input stringlengths 2.65k 237k | output stringclasses 1 value |
|---|---|
<filename>Model.py
import tensorflow as tf
import time
import numpy as np
from sklearn.metrics import precision_score, recall_score, f1_score
class TextClassifier(object):
"""模型超参数"""
def __init__(self,
n_hidden=128,
num_classes=2,
epoch=20,
max_seq_len=300,
embedding_size=50,
learning_rate=0.0005,
vocab_size=None,
num_layers=1,
wordVector=None,
preEmbedding=True,
isAttention=False,
filter_sizes=None,
num_filters=256,
num_blocks=2,
head=8):
if filter_sizes is None:
filter_sizes = [1, 2, 3, 4]
self.n_hidden = n_hidden # LSTM单元隐藏层神经元个数
self.num_classes = num_classes # 标签个数
self.epoch = epoch # 训练次数
self.max_seq_len = max_seq_len # 序列最大长度
self.embedding_size = embedding_size # 词向量维度
self.learning_rate = learning_rate # 学习率
self.vocab_size = vocab_size # 词典大小,embedding嵌入用
self.num_layers = num_layers # LSTM网络层数
self.isAttention = isAttention # biLSTM中是否用attention机制
self.filter_sizes = filter_sizes # 用于textCNN 卷积核大小
self.num_filters = num_filters # 用于textCNN 卷积核数量
self.output_size = 128 # 用于RCNN模型中biLSTM 的输出维度大小
self.num_blocks = num_blocks # transformer encoder block的个数
self.head = head # multi-attention 的头数
# 定义模型输入输出
self.input_data = tf.placeholder(tf.int32, shape=[None, self.max_seq_len], name="input_data")
self.y = tf.placeholder(tf.float32, shape=[None, self.num_classes], name="target")
with tf.name_scope("embedding"):
embedding = tf.Variable(tf.random_normal(shape=[self.vocab_size, self.embedding_size], dtype=tf.float32),
name="word_embedding")
if preEmbedding:
embedding = embedding.assign(tf.cast(wordVector, tf.float32))
tf.logging.info("已使用预训练的词向量")
self.x = tf.nn.embedding_lookup(embedding, self.input_data)
self.keep_prob = tf.placeholder(name="keep_prob", dtype=tf.float32)
# print(self.x.shape) # (?, 300, 50)
with tf.name_scope("build_net"):
self.build_net()
def build_net(self):
pass
def get_metrics(self, y_pred, y_true):
y_pred = np.argmax(y_pred, 1)
y_true = np.argmax(y_true, 1)
p = precision_score(y_true, y_pred)
r = recall_score(y_true, y_pred)
f = f1_score(y_true, y_pred)
return p, r, f
class Dynamic_LSTM(TextClassifier):
def build_net(self):
# 定义softmax层参数
weight = tf.get_variable("weight", shape=[self.n_hidden, self.num_classes], dtype=tf.float32,
initializer=tf.random_normal_initializer)
bias = tf.get_variable("bias", shape=[self.num_classes], dtype=tf.float32, initializer=tf.zeros_initializer)
# 定义lstm层
lstmcell = tf.nn.rnn_cell.BasicLSTMCell(self.n_hidden, forget_bias=1.0, state_is_tuple=True)
lstmcell = tf.nn.rnn_cell.DropoutWrapper(lstmcell, output_keep_prob=self.keep_prob)
lstmcell = tf.nn.rnn_cell.MultiRNNCell([lstmcell] * self.num_layers)
outputs, states = tf.nn.dynamic_rnn(lstmcell, self.x, dtype=tf.float32)
outputs = tf.transpose(outputs, [1, 0, 2])
output = outputs[-1]
output = tf.nn.dropout(output, keep_prob=self.keep_prob)
self.pred = tf.matmul(output, weight) + bias
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.pred, labels=self.y))
optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
self.train_op = optimizer.minimize(self.loss)
class BiLSTMandAttention(TextClassifier):
"""双向LSTM+Attention"""
def build_net(self):
# self.x=tf.unstack(self.x,self.max_seq_len,1)
tf.logging.info("the shape of input tensor:%s" % str(self.x.shape))
# 定义softmax层参数
with tf.name_scope("bilstm"):
def cell_fw():
cell = tf.nn.rnn_cell.BasicLSTMCell(self.n_hidden, forget_bias=1.0, state_is_tuple=True)
return tf.nn.rnn_cell.DropoutWrapper(cell, output_keep_prob=self.keep_prob)
def cell_bw():
cell = tf.nn.rnn_cell.BasicLSTMCell(self.n_hidden, forget_bias=1.0, state_is_tuple=True)
return tf.nn.rnn_cell.DropoutWrapper(cell, output_keep_prob=self.keep_prob)
stack_fw_cell = tf.nn.rnn_cell.MultiRNNCell([cell_fw() for _ in range(self.num_layers)],
state_is_tuple=True)
stack_bw_cell = tf.nn.rnn_cell.MultiRNNCell([cell_bw() for _ in range(self.num_layers)],
state_is_tuple=True)
# outputs是正向和反向输出的元组,states是正向和反向最后时刻状态的元组
outputs, states = tf.nn.bidirectional_dynamic_rnn(stack_fw_cell, stack_bw_cell, self.x, dtype=tf.float32)
# 是否添加attention机制
if self.isAttention:
output = outputs[0] + outputs[1]
with tf.name_scope("attention"):
output = self.attention(output) # 将各个时刻的状态值加权求和,结果做softmax分类
weight = tf.get_variable("weight", shape=[self.n_hidden, self.num_classes], dtype=tf.float32,
initializer=tf.random_normal_initializer)
bias = tf.get_variable("bias", shape=[self.num_classes], dtype=tf.float32, initializer=tf.zeros_initializer)
else:
output = tf.concat([outputs[0], outputs[1]], axis=2)
tf.logging.info("output:%s" % str(output.shape))
output = tf.transpose(output, [1, 0, 2])
output = output[-1] # 将最后一个时间步做softmax分类
tf.logging.info("output:%s" % str(output.shape))
weight = tf.get_variable("weight", shape=[self.n_hidden * 2, self.num_classes], dtype=tf.float32,
initializer=tf.random_normal_initializer)
bias = tf.get_variable("bias", shape=[self.num_classes], dtype=tf.float32, initializer=tf.zeros_initializer)
with tf.name_scope("outputs"):
self.pred = tf.matmul(output, weight) + bias
with tf.name_scope("loss_train_op"):
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.pred, labels=self.y))
optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
self.train_op = optimizer.minimize(self.loss)
def attention(self, H):
# 初始化一个可训练权重向量,
w = tf.Variable(tf.random_normal([self.n_hidden], stddev=0.1), name="attention_w")
# 对输出做非线性转换
M = tf.tanh(H)
# 对w和M做矩阵运算,w=[batch_size,time_step,n_hidden],维度转换[batch_size*time_step,n_hidden]
# 每一个时间步的输出向量转换为一个数字
newM = tf.matmul(tf.reshape(M, [-1, self.n_hidden]), tf.reshape(w, [-1, 1]))
# 对newM维度转换成[batch_size,time_step]
restoreM = tf.reshape(newM, [-1, self.max_seq_len])
# softmax归一化处理[batch_size,time_step]
self.alpha = tf.nn.softmax(restoreM)
# 利用求得的alpha的值对H进行加权求和,用矩阵运算直接操作
H = tf.transpose(H, [0, 2, 1])
r = tf.matmul(H, tf.reshape(self.alpha, [-1, self.max_seq_len, 1]))
# 将三维压缩成二维sequeeze
sequeezeR = tf.squeeze(r)
output = tf.tanh(sequeezeR)
# 做dropout处理
output = tf.nn.dropout(output, keep_prob=self.keep_prob)
return output
class TextCNN(TextClassifier):
"""TextCNN模型"""
def build_net(self):
# 扩展输入的维度
self.x = tf.expand_dims(self.x, axis=-1, name="expand_dim")
pooling_res = []
for filter_size in self.filter_sizes:
filter_shape = [filter_size, self.embedding_size, 1, self.num_filters]
weight = tf.Variable(tf.random_normal(shape=filter_shape, stddev=1.0), dtype=tf.float32)
bias = tf.Variable(tf.random_normal(shape=[self.num_filters], stddev=1.0), dtype=tf.float32)
conv = tf.nn.conv2d(self.x, weight, [1, 1, 1, 1], "VALID", name="conv")
relu = tf.nn.relu(tf.nn.bias_add(conv, bias), name="relu")
max_pool = tf.nn.max_pool(relu, ksize=[1, self.max_seq_len - filter_size + 1, 1, 1], strides=[1, 1, 1, 1],
padding="VALID", name="maxpool")
pooling_res.append(max_pool)
# 输入全连接层的神经元的个数
num_fc = self.num_filters * len(self.filter_sizes)
# print(pooling_res[0].shape) # max_pool的shape (?, 1, 1, 256)
fc_input = tf.concat(pooling_res, axis=3) # 按照最后一个维度合并
# print(fc_input.shape) # 合并之后的shape (?, 1, 1, 1024)
fc_input = tf.reshape(fc_input, shape=[-1, num_fc])
# print(fc_input.shape) # reshape之后的shape (?, 1024)
fc_input = tf.nn.dropout(fc_input, keep_prob=self.keep_prob)
with tf.name_scope("output"):
w = tf.Variable(tf.truncated_normal(shape=[num_fc, self.num_classes], stddev=1.0), dtype=tf.float32,
name="w")
b = tf.Variable(tf.zeros(shape=[self.num_classes]), name='b')
self.pred = tf.matmul(fc_input, w) + b
with tf.name_scope("loss_train"):
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.pred, labels=self.y))
optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
self.train_op = optimizer.minimize(self.loss)
class RCNN(TextClassifier):
"""
RNN+CNN 模型
1.将双向LSTM的正向输出和反向输出和词向量拼成一个上下文向量,
2.经tanh函数激活,送入最大池化层,
3.然后将池化结果传入全连接做分类。
"""
def build_net(self):
with tf.name_scope("bilstm"):
def cell_fw():
cell = tf.nn.rnn_cell.BasicLSTMCell(self.n_hidden, forget_bias=1.0, state_is_tuple=True)
return tf.nn.rnn_cell.DropoutWrapper(cell, output_keep_prob=self.keep_prob)
def cell_bw():
cell = tf.nn.rnn_cell.BasicLSTMCell(self.n_hidden, forget_bias=1.0, state_is_tuple=True)
return tf.nn.rnn_cell.DropoutWrapper(cell, output_keep_prob=self.keep_prob)
stack_fw_cell = tf.nn.rnn_cell.MultiRNNCell([cell_fw() for _ in range(self.num_layers)],
state_is_tuple=True)
stack_bw_cell = tf.nn.rnn_cell.MultiRNNCell([cell_bw() for _ in range(self.num_layers)],
state_is_tuple=True)
(output_fw, output_bw), states = tf.nn.bidirectional_dynamic_rnn(stack_fw_cell, stack_bw_cell, self.x,
dtype=tf.float32)
with tf.name_scope("context"):
tf.logging.info("the shape of foreward vector:%s" % str(output_fw.shape)) # (?, 300, 128)
shape = [tf.shape(output_fw)[0], 1, tf.shape(output_fw)[2]]
# 获得上文信息 (?, 300, 128)[-1,max_seq_len,n_hidden]
context_left = tf.concat([tf.zeros(shape), output_fw[:, :-1]], axis=1, name="contextleft")
# 获得下文信息 (?, 300, 128)[-1,max_seq_len,n_hidden]
context_right = tf.concat([output_bw[:, 1:], tf.zeros(shape)], axis=1, name="contextright")
# 将前向/后向和当前词向量拼接在一起,作为最终的表征向量 (?, 300, 306)[-1,max_seq_len,2*n_hidden+embedding_size]
context = tf.concat([context_left, self.x, context_right], axis=2, name="context")
tf.logging.info("the shape of context_left:%s" % str(context_left.shape))
tf.logging.info("the shape of context_right:%s" % str(context_right.shape))
tf.logging.info("the shape of context%s" % str(context.shape))
with tf.name_scope("rnn_output"):
size = self.n_hidden * 2 + self.embedding_size
weight = tf.Variable(tf.random_normal(shape=[size, self.output_size], stddev=1.0), dtype=tf.float32,
name="rnn_output_weight")
bias = tf.Variable(tf.random_normal(shape=[self.output_size]), dtype=tf.float32, name="rnn_output_bias")
rnn_output = tf.tanh(tf.einsum("aij,jk->aik", context, weight) + bias) # tf.einsum()方法用于指定维度消除
tf.logging.info("the shape of rnn_output:%s" % str(rnn_output.shape)) # (?, 300, 128)
with tf.name_scope("max_pool"):
max_pool = tf.reduce_max(rnn_output, axis=1, name="max_pooling")
tf.logging.info("the shape of max_pool output:%s" % str(max_pool.shape)) # (?, 128)将时间步的维度消除,取每个时间步最大的值
with tf.name_scope("softmax"):
w = tf.Variable(tf.truncated_normal([self.output_size, self.num_classes], stddev=1.0), dtype=tf.float32,
name="w")
b = tf.Variable(tf.truncated_normal([self.num_classes]), dtype=tf.float32, name="b")
pred = tf.matmul(max_pool, w) + b
self.pred = tf.nn.softmax(pred)
with tf.name_scope("loss_train"):
l2_loss = tf.constant(0.0)
l2_loss += tf.nn.l2_loss(w)
l2_loss += tf.nn.l2_loss(b)
# globalStep = tf.Variable(0, name="globalStep", trainable=False)
self.loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=self.pred, labels=self.y)) + 0.5 * l2_loss # 增加L2正则
optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
# gradAndtvar = optimizer.compute_gradients(self.loss) # 获得梯度和变量
# self.train_op = optimizer.apply_gradients(gradAndtvar, global_step=globalStep)
self.train_op = optimizer.minimize(self.loss)
class CNN_GRU(TextClassifier):
"""CNN+GRU网络模型 将两者提取的信息concat,进行分类"""
def build_net(self):
with tf.name_scope("cnn"):
x = tf.expand_dims(self.x, axis=-1, name="expand_dim")
pooling_res = []
for filter_size in self.filter_sizes:
filter_shape = [filter_size, self.embedding_size, 1, self.num_filters]
weight = tf.Variable(tf.truncated_normal(shape=filter_shape), dtype=tf.float32, name="weight")
bias = tf.Variable(tf.zeros(shape=[self.num_filters]), dtype=tf.float32, name="bias")
conv = tf.nn.conv2d(x, filter=weight, strides=[1, 1, 1, 1], padding="VALID", name="conv")
relu = tf.nn.relu(tf.nn.bias_add(conv, bias))
max_pool = tf.nn.max_pool(relu, ksize=[1, self.max_seq_len - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1], padding="VALID", name="pooling")
pooling_res.append(max_pool)
num_fc = self.num_filters * len(self.filter_sizes)
fc_input = tf.concat(pooling_res, axis=3)
cnn_output = tf.reshape(fc_input, shape=[-1, num_fc])
with tf.name_scope("GRU"):
cell = tf.nn.rnn_cell.GRUCell(self.n_hidden)
cell = tf.nn.rnn_cell.DropoutWrapper(cell, output_keep_prob=self.keep_prob)
cell = tf.nn.rnn_cell.MultiRNNCell([cell] * self.num_layers)
outputs, _ = tf.nn.dynamic_rnn(cell, self.x, dtype=tf.float32)
outputs = tf.transpose(outputs, [1, 0, 2])
rnn_output = outputs[-1]
with tf.name_scope("softmax"):
fc = tf.concat([cnn_output, rnn_output], 1)
fc = tf.nn.dropout(fc, keep_prob=self.keep_prob)
w = tf.Variable(tf.truncated_normal(shape=[self.n_hidden + num_fc, self.num_classes]), dtype=tf.float32,
name="w")
b = tf.Variable(tf.truncated_normal(shape=[self.num_classes]), dtype=tf.float32, name="b")
self.pred = tf.matmul(fc, w) + b
with tf.name_scope("loss_train"):
l2_loss = tf.constant(0.0)
l2_loss += tf.nn.l2_loss(w)
l2_loss += tf.nn.l2_loss(b)
self.loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=self.pred, labels=self.y)) + 0.5 * l2_loss # 增加L2正则
optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
self.train_op = optimizer.minimize(self.loss)
class Transformer(TextClassifier):
"""
transfromer encoder 用于文本分类
1.position embedding,用one-hot,数据量小
2.block层数设置为2
3.sublayer加dropout正则化
4.前馈层用卷积实现(全连接实现)待定
"""
def _onehot_position_embedding(self, batch_size=64, max_len=300):
"""采取one-hot编码生成位置向量"""
position_embedding = []
for batch in range(batch_size):
x = []
for step in range(max_len):
vector = np.zeros(max_len)
vector[step] = 1
x.append(vector)
position_embedding.append(x)
return np.array(position_embedding, dtype=np.float32)
def _position_embedding(self):
"""采用论文中sin/cos生成位置向量"""
batch_size = tf.shape(self.x)[0]
# 生成位置索引,并扩张到所有的样本上
positionIndex = tf.tile(tf.expand_dims(tf.range(self.max_seq_len), 0), [batch_size, 1])
# 根据正弦和余弦函数获得每个位置上的第一部分
positionEmbedding = np.array(
[[pos / np.power(10000, (i - i % 2) / self.embedding_size) for i in range(self.embedding_size)] for pos in
range(self.max_seq_len)])
# 根据奇偶性分别用sin和cos函数包装
positionEmbedding[:, 0::2] = np.sin(positionEmbedding[:, 0::2])
positionEmbedding[:, 1::2] = np.cos(positionEmbedding[:, 1::2])
# 转换成tensor格式
positionEmbedding_ = tf.cast(positionEmbedding, dtype=tf.float32)
# 获得三维的矩阵
positionEmbedding = tf.nn.embedding_lookup(positionEmbedding_, positionIndex)
return positionEmbedding
def _multihead_attention(self,raw_inputs,queries,keys,size=None,causality=False):
"""
参数
:param raw_inputs: 原始输入,用于计算mask
:param queries: 用于计算相关度,与key相同则是同一句子计算相关性
:param keys: 用于计算相关度,word_embedding+position_embedding
:param size: dense全连接层输出维度大小
:param causality:
:return:
"""
if size is None:
size=queries.shape[-1]
#对数据分割映射(论文中是先分割在映射)
Q=tf.layers.dense(queries,size,activation=tf.nn.relu)
K=tf.layers.dense(keys,size,activation=tf.nn.relu)
V=tf.layers.dense(keys,size,activation=tf.nn.relu)
Q_=tf.concat(tf.split(Q,self.head,axis=-1),axis=0)
K_=tf.concat(tf.split(K,self.head,axis=-1),axis=0)
V_=tf.concat(tf.split(V,self.head,axis=-1),axis=0)
#输出维度
tf.logging.info("the shape of Q:%s"%str(Q.shape)) #(64, 300, 350)
tf.logging.info("the shape of Q_:%s"%str(Q_.shape)) #(320, 300, 70)
#计算k和q的点积,维度[batch_size*numheads,q_len,k_len]
similary=tf.matmul(Q_,tf.transpose(K_,[0,2,1]))
#进行缩放处理
scaled_similary=similary/(K_.get_shape().as_list()[-1]**0.5)
#对填充词的处理,当padding为0是时,计算的权重应该是0,需要将其mask为0,则需要q或者k一方为0
#将每一个时序上的向量中的值相加取平均值
key_mask=tf.sign(tf.abs(tf.reduce_sum(raw_inputs,axis=-1))) #[batch_size,time_step]
#利用tf.tile进行张量扩张,维度[batch_size*numhead,key_len]
key_mask=tf.tile(key_mask,[self.head,1])
#增加一个维度,并进行扩张,维度[batch_size*numhead,q_len,k_len]
key_mask=tf.tile(tf.expand_dims(key_mask,1),[1,queries.shape[1],1])
tf.logging.info("the shape of key_mask:%s"%str(key_mask.shape)) #(?, 300, 300)
#生成与scale_similary相同维度的tensor,然后得到负无穷大的值
paddings=tf.ones_like(scaled_similary)*(-2**(32+1))
#如果keymask中的值为0则用padding替换,
masked_similary=tf.where(tf.equal(key_mask,0),paddings,scaled_similary)
#计算权重系数
weight=tf.nn.softmax(masked_similary)
#加权求和,维度[batch_size*numheads,time_step,embedding_size/numheads]
outputs=tf.matmul(weight,V_)
#将outputs重组
outputs=tf.concat(tf.split(outputs,self.head,0),axis=2)
outputs=tf.nn.dropout(outputs,keep_prob=self.keep_prob)
#对每一个sublayer建立残差连接
outputs+=queries
#标准层
outputs = self._layer_normalization(outputs)
return outputs
def _layer_normalization(self,inputs):
"""Batch Normalization层"""
inputshape=inputs.shape
paramshape=inputshape[-1]
#LayerNorm是在最后的维度上计算输入的数据的均值和方差,维度[batch_size,time_step,1]
mean,var=tf.nn.moments(inputs,[-1],keep_dims=True)
beta=tf.Variable(tf.zeros(paramshape))
gamma=tf.Variable(tf.ones(paramshape))
normalized=(inputs-mean)/((var+1.0)**0.5)
outputs=gamma*normalized+beta #重构
return outputs
def _feed_forward(self,inputs):
filters=[128,self.embedding_size+self.max_seq_len] #[内层,外层]
#前馈层采用卷积神经网络 1维卷积 实际的维度还是二维的[batch_size,time_step,embedding_size]
param={"inputs":inputs,"filters":filters[0],"kernel_size":1,"activation":tf.nn.relu,"use_bias":True}
outputs=tf.layers.conv1d(**param)
param={"inputs":outputs,"filters": filters[1], "kernel_size": 1,"activation": None, "use_bias": True}
outputs=tf.layers.conv1d(**param)
#残差连接和归一化处理
outputs+=inputs
outputs=self._layer_normalization(outputs)
return outputs
def build_net(self):
with tf.name_scope("input"):
position_embedding = self._onehot_position_embedding(batch_size=64, | |
<filename>lib/galaxy/visualization/plugins/config_parser.py
from six import string_types
import galaxy.model
from galaxy import util
import logging
log = logging.getLogger(__name__)
class ParsingException(ValueError):
"""
An exception class for errors that occur during parsing of the visualizations
framework configuration XML file.
"""
pass
class VisualizationsConfigParser(object):
"""
Class that parses a visualizations configuration XML file.
Each visualization will get the following info:
- how to load a visualization:
-- how to find the proper template
-- how to convert query string into DB models
- when/how to generate a link to the visualization
-- what provides the data
-- what information needs to be added to the query string
"""
#: what are the allowed 'entry_point_type' for entry_point elements
ALLOWED_ENTRY_POINT_TYPES = ['mako', 'html', 'script']
#: what are the allowed href targets when clicking on a visualization anchor
VALID_RENDER_TARGETS = ['galaxy_main', '_top', '_blank']
def __init__(self):
# what parsers should be used for sub-components
self.data_source_parser = DataSourceParser()
self.param_parser = ParamParser()
self.param_modifier_parser = ParamModifierParser()
def parse_file(self, xml_filepath):
"""
Parse the given XML file for visualizations data.
:returns: visualization config dictionary
"""
xml_tree = util.parse_xml(xml_filepath)
visualization = self.parse_visualization(xml_tree.getroot())
return visualization
def parse_visualization(self, xml_tree):
"""
Parse the template, name, and any data_sources and params from the
given `xml_tree` for a visualization.
"""
returned = {}
# main tag specifies plugin type (visualization or
# interactive_enviornment).
returned['plugin_type'] = xml_tree.tag
# a text display name for end user links
returned['name'] = xml_tree.attrib.get('name', None)
if not returned['name']:
raise ParsingException('visualization needs a name attribute')
# allow manually turning off a vis by checking for a disabled property
if 'disabled' in xml_tree.attrib:
log.info('Visualizations plugin disabled: %s. Skipping...', returned['name'])
return None
# record the embeddable flag - defaults to false
# this is a design by contract promise that the visualization can be rendered inside another page
# often by rendering only a DOM fragment. Since this is an advanced feature that requires a bit more
# work from the creator's side - it defaults to False
returned['embeddable'] = False
if 'embeddable' in xml_tree.attrib:
returned['embeddable'] = xml_tree.attrib.get('embeddable', False) == 'true'
# a (for now) text description of what the visualization does
description = xml_tree.find('description')
returned['description'] = description.text.strip() if description is not None else None
# data_sources are the kinds of objects/data associated with the visualization
# e.g. views on HDAs can use this to find out what visualizations are applicable to them
data_sources = []
data_sources_confs = xml_tree.find('data_sources')
for data_source_conf in data_sources_confs.findall('data_source'):
data_source = self.data_source_parser.parse(data_source_conf)
if data_source:
data_sources.append(data_source)
# data_sources are not required
if not data_sources:
raise ParsingException('No valid data_sources for visualization')
returned['data_sources'] = data_sources
# TODO: this is effectively required due to param_confs.findall( 'param' )
# parameters spell out how to convert query string params into resources and data
# that will be parsed, fetched, etc. and passed to the template
# list or dict? ordered or not?
params = {}
param_confs = xml_tree.find('params')
param_elements = param_confs.findall('param') if param_confs is not None else []
for param_conf in param_elements:
param = self.param_parser.parse(param_conf)
if param:
params[param_conf.text] = param
# params are not required
if params:
returned['params'] = params
# param modifiers provide extra information for other params (e.g. hda_ldda='hda' -> dataset_id is an hda id)
# store these modifiers in a 2-level dictionary { target_param: { param_modifier_key: { param_mod_data }
# ugh - wish we didn't need these
param_modifiers = {}
param_modifier_elements = param_confs.findall('param_modifier') if param_confs is not None else []
for param_modifier_conf in param_modifier_elements:
param_modifier = self.param_modifier_parser.parse(param_modifier_conf)
# param modifiers map accrd. to the params they modify (for faster lookup)
target_param = param_modifier_conf.get('modifies')
param_modifier_key = param_modifier_conf.text
if param_modifier and target_param in params:
# multiple params can modify a single, other param,
# so store in a sub-dict, initializing if this is the first
if target_param not in param_modifiers:
param_modifiers[target_param] = {}
param_modifiers[target_param][param_modifier_key] = param_modifier
# not required
if param_modifiers:
returned['param_modifiers'] = param_modifiers
# entry_point: how will this plugin render/load? mako, script tag, or static html file?
returned['entry_point'] = self.parse_entry_point(xml_tree)
# link_text: the string to use for the text of any links/anchors to this visualization
link_text = xml_tree.find('link_text')
if link_text is not None and link_text.text:
returned['link_text'] = link_text
# render_target: where in the browser to open the rendered visualization
# defaults to: galaxy_main
render_target = xml_tree.find('render_target')
if((render_target is not None and render_target.text) and
(render_target.text in self.VALID_RENDER_TARGETS)):
returned['render_target'] = render_target.text
else:
returned['render_target'] = 'galaxy_main'
# consider unifying the above into its own element and parsing method
return returned
def parse_entry_point(self, xml_tree):
"""
Parse the config file for an appropriate entry point: a mako template, a script tag,
or an html file, returning as dictionary with: `type`, `file`, and `attr`ibutes of
the element.
"""
# (older) mako-only syntax: the template to use in rendering the visualization
template = xml_tree.find('template')
if template is not None and template.text:
log.info('template syntax is deprecated: use entry_point instead')
return {
'type' : 'mako',
'file' : template.text,
'attr' : {}
}
# need one of the two: (the deprecated) template or entry_point
entry_point = xml_tree.find('entry_point')
if entry_point is None:
raise ParsingException('template or entry_point required')
# parse by returning a sub-object and simply copying any attributes unused here
entry_point_attrib = entry_point.attrib.copy()
entry_point_type = entry_point_attrib.pop('entry_point_type', 'mako')
if entry_point_type not in self.ALLOWED_ENTRY_POINT_TYPES:
raise ParsingException('Unknown entry_point type: ' + entry_point_type)
return {
'type' : entry_point_type,
'file' : entry_point.text,
'attr' : entry_point_attrib
}
# -------------------------------------------------------------------
class DataSourceParser(object):
"""
Component class of VisualizationsConfigParser that parses data_source elements
within visualization elements.
data_sources are (in the extreme) any object that can be used to produce
data for the visualization to consume (e.g. HDAs, LDDAs, Jobs, Users, etc.).
There can be more than one data_source associated with a visualization.
"""
# these are the allowed classes to associate visualizations with (as strings)
# any model_class element not in this list will throw a parsing ParsingExcepion
ALLOWED_MODEL_CLASSES = [
'Visualization',
'HistoryDatasetAssociation',
'LibraryDatasetDatasetAssociation'
]
ATTRIBUTE_SPLIT_CHAR = '.'
# these are the allowed object attributes to use in data source tests
# any attribute element not in this list will throw a parsing ParsingExcepion
ALLOWED_DATA_SOURCE_ATTRIBUTES = [
'datatype'
]
def parse(self, xml_tree):
"""
Return a visualization data_source dictionary parsed from the given
XML element.
"""
returned = {}
# model_class (required, only one) - look up and convert model_class to actual galaxy model class
model_class = self.parse_model_class(xml_tree.find('model_class'))
if not model_class:
raise ParsingException('data_source needs a model class')
returned['model_class'] = model_class
# tests (optional, 0 or more) - data for boolean test: 'is the visualization usable by this object?'
# when no tests are given, default to isinstance( object, model_class )
returned['tests'] = self.parse_tests(xml_tree.findall('test'))
# to_params (optional, 0 or more) - tells the registry to set certain params based on the model_clas, tests
returned['to_params'] = {}
to_params = self.parse_to_params(xml_tree.findall('to_param'))
if to_params:
returned['to_params'] = to_params
return returned
def parse_model_class(self, xml_tree):
"""
Convert xml model_class element to a galaxy model class
(or None if model class is not found).
This element is required and only the first element is used.
The model_class string must be in ALLOWED_MODEL_CLASSES.
"""
if xml_tree is None or not xml_tree.text:
raise ParsingException('data_source entry requires a model_class')
if xml_tree.text not in self.ALLOWED_MODEL_CLASSES:
# log.debug( 'available data_source model_classes: %s' %( str( self.ALLOWED_MODEL_CLASSES ) ) )
raise ParsingException('Invalid data_source model_class: %s' % (xml_tree.text))
# look up the model from the model module returning an empty data_source if not found
model_class = getattr(galaxy.model, xml_tree.text, None)
return model_class
def _build_getattr_lambda(self, attr_name_list):
"""
Recursively builds a compound lambda function of getattr's
from the attribute names given in `attr_name_list`.
"""
if len(attr_name_list) == 0:
# identity - if list is empty, return object itself
return lambda o: o
next_attr_name = attr_name_list[-1]
if len(attr_name_list) == 1:
# recursive base case
return lambda o: getattr(o, next_attr_name)
# recursive case
return lambda o: getattr(self._build_getattr_lambda(attr_name_list[:-1])(o), next_attr_name)
def parse_tests(self, xml_tree_list):
"""
Returns a list of test dictionaries that the registry can use
against a given object to determine if the visualization can be
used with the object.
"""
# | |
ssl.wrap_socket(self.socket,
ssl_version=self.ssl_version,
do_handshake_on_connect=False,
ca_certs=self.ca_certs,
cert_reqs=cert_policy)
if hasattr(self.socket, 'socket'):
# We are using a testing socket, so preserve the top
# layer of wrapping.
self.socket.socket = ssl_socket
else:
self.socket = ssl_socket
self.socket.do_handshake()
self.set_socket(self.socket)
return True
else:
log.warning("Tried to enable TLS, but ssl module not found.")
return False
def _start_keepalive(self, event):
"""Begin sending whitespace periodically to keep the connection alive.
May be disabled by setting::
self.whitespace_keepalive = False
The keepalive interval can be set using::
self.whitespace_keepalive_interval = 300
"""
def send_keepalive():
if self.send_queue.empty():
self.send_raw(' ')
self.schedule('Whitespace Keepalive',
self.whitespace_keepalive_interval,
send_keepalive,
repeat=True)
def _end_keepalive(self, event):
"""Stop sending whitespace keepalives"""
self.scheduler.remove('Whitespace Keepalive')
def start_stream_handler(self, xml):
"""Perform any initialization actions, such as handshakes,
once the stream header has been sent.
Meant to be overridden.
"""
pass
def register_stanza(self, stanza_class):
"""Add a stanza object class as a known root stanza.
A root stanza is one that appears as a direct child of the stream's
root element.
Stanzas that appear as substanzas of a root stanza do not need to
be registered here. That is done using register_stanza_plugin() from
sleekxmpp.xmlstream.stanzabase.
Stanzas that are not registered will not be converted into
stanza objects, but may still be processed using handlers and
matchers.
:param stanza_class: The top-level stanza object's class.
"""
self.__root_stanza.append(stanza_class)
def remove_stanza(self, stanza_class):
"""Remove a stanza from being a known root stanza.
A root stanza is one that appears as a direct child of the stream's
root element.
Stanzas that are not registered will not be converted into
stanza objects, but may still be processed using handlers and
matchers.
"""
del self.__root_stanza[stanza_class]
def add_handler(self, mask, pointer, name=None, disposable=False,
threaded=False, filter=False, instream=False):
"""A shortcut method for registering a handler using XML masks.
The use of :meth:`register_handler()` is preferred.
:param mask: An XML snippet matching the structure of the
stanzas that will be passed to this handler.
:param pointer: The handler function itself.
:parm name: A unique name for the handler. A name will
be generated if one is not provided.
:param disposable: Indicates if the handler should be discarded
after one use.
:param threaded: **DEPRECATED**.
Remains for backwards compatibility.
:param filter: **DEPRECATED**.
Remains for backwards compatibility.
:param instream: Indicates if the handler should execute during
stream processing and not during normal event
processing.
"""
# To prevent circular dependencies, we must load the matcher
# and handler classes here.
if name is None:
name = 'add_handler_%s' % self.getNewId()
self.registerHandler(XMLCallback(name, MatchXMLMask(mask), pointer,
once=disposable, instream=instream))
def register_handler(self, handler, before=None, after=None):
"""Add a stream event handler that will be executed when a matching
stanza is received.
:param handler: The :class:`~sleekxmpp.xmlstream.handler.base.BaseHandler`
derived object to execute.
"""
if handler.stream is None:
self.__handlers.append(handler)
handler.stream = weakref.ref(self)
def remove_handler(self, name):
"""Remove any stream event handlers with the given name.
:param name: The name of the handler.
"""
idx = 0
for handler in self.__handlers:
if handler.name == name:
self.__handlers.pop(idx)
return True
idx += 1
return False
def get_dns_records(self, domain, port=None):
"""Get the DNS records for a domain.
:param domain: The domain in question.
:param port: If the results don't include a port, use this one.
"""
if port is None:
port = self.default_port
if DNSPYTHON:
resolver = dns.resolver.get_default_resolver()
self.configure_dns(resolver, domain=domain, port=port)
try:
answers = resolver.query(domain, dns.rdatatype.A)
except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
log.warning("No A records for %s", domain)
return [((domain, port), 0, 0)]
except dns.exception.Timeout:
log.warning("DNS resolution timed out " + \
"for A record of %s", domain)
return [((domain, port), 0, 0)]
else:
return [((ans.address, port), 0, 0) for ans in answers]
else:
log.warning("dnspython is not installed -- " + \
"relying on OS A record resolution")
self.configure_dns(None, domain=domain, port=port)
return [((domain, port), 0, 0)]
def pick_dns_answer(self, domain, port=None):
"""Pick a server and port from DNS answers.
Gets DNS answers if none available.
Removes used answer from available answers.
:param domain: The domain in question.
:param port: If the results don't include a port, use this one.
"""
if not self.dns_answers:
self.dns_answers = self.get_dns_records(domain, port)
addresses = {}
intmax = 0
topprio = 65535
for answer in self.dns_answers:
topprio = min(topprio, answer[1])
for answer in self.dns_answers:
if answer[1] == topprio:
intmax += answer[2]
addresses[intmax] = answer[0]
#python3 returns a generator for dictionary keys
items = [x for x in addresses.keys()]
items.sort()
picked = random.randint(0, intmax)
for item in items:
if picked <= item:
address = addresses[item]
break
for idx, answer in enumerate(self.dns_answers):
if self.dns_answers[0] == address:
break
self.dns_answers.pop(idx)
log.debug("Trying to connect to %s:%s", *address)
return address
def add_event_handler(self, name, pointer,
threaded=False, disposable=False):
"""Add a custom event handler that will be executed whenever
its event is manually triggered.
:param name: The name of the event that will trigger
this handler.
:param pointer: The function to execute.
:param threaded: If set to ``True``, the handler will execute
in its own thread. Defaults to ``False``.
:param disposable: If set to ``True``, the handler will be
discarded after one use. Defaults to ``False``.
"""
if not name in self.__event_handlers:
self.__event_handlers[name] = []
self.__event_handlers[name].append((pointer, threaded, disposable))
def del_event_handler(self, name, pointer):
"""Remove a function as a handler for an event.
:param name: The name of the event.
:param pointer: The function to remove as a handler.
"""
if not name in self.__event_handlers:
return
# Need to keep handlers that do not use
# the given function pointer
def filter_pointers(handler):
return handler[0] != pointer
self.__event_handlers[name] = list(filter(
filter_pointers,
self.__event_handlers[name]))
def event_handled(self, name):
"""Returns the number of registered handlers for an event.
:param name: The name of the event to check.
"""
return len(self.__event_handlers.get(name, []))
def event(self, name, data={}, direct=False):
"""Manually trigger a custom event.
:param name: The name of the event to trigger.
:param data: Data that will be passed to each event handler.
Defaults to an empty dictionary, but is usually
a stanza object.
:param direct: Runs the event directly if True, skipping the
event queue. All event handlers will run in the
same thread.
"""
handlers = self.__event_handlers.get(name, [])
for handler in handlers:
#TODO: Data should not be copied, but should be read only,
# but this might break current code so it's left for future.
out_data = copy.copy(data) if len(handlers) > 1 else data
old_exception = getattr(data, 'exception', None)
if direct:
try:
handler[0](out_data)
except Exception as e:
error_msg = 'Error processing event handler: %s'
log.exception(error_msg, str(handler[0]))
if old_exception:
old_exception(e)
else:
self.exception(e)
else:
self.event_queue.put(('event', handler, out_data))
if handler[2]:
# If the handler is disposable, we will go ahead and
# remove it now instead of waiting for it to be
# processed in the queue.
with self.__event_handlers_lock:
try:
h_index = self.__event_handlers[name].index(handler)
self.__event_handlers[name].pop(h_index)
except:
pass
def schedule(self, name, seconds, callback, args=None,
kwargs=None, repeat=False):
"""Schedule a callback function to execute after a given delay.
:param name: A unique name for the scheduled callback.
:param seconds: The time in seconds to wait before executing.
:param callback: A pointer to the function to execute.
:param args: A tuple of arguments to pass to the function.
:param kwargs: A dictionary of keyword arguments to pass to
the function.
:param repeat: Flag indicating if the scheduled event should
be reset and repeat after executing.
"""
self.scheduler.add(name, seconds, callback, args, kwargs,
repeat, qpointer=self.event_queue)
def incoming_filter(self, xml):
"""Filter incoming XML objects before they are processed.
Possible uses include remapping namespaces, or correcting elements
from sources with incorrect behavior.
Meant to be overridden.
"""
return xml
def send(self, data, mask=None, timeout=None, now=False):
"""A wrapper for :meth:`send_raw()` for sending stanza objects.
May optionally block until an expected response is received.
:param data: The :class:`~sleekxmpp.xmlstream.stanzabase.ElementBase`
stanza to send on the stream.
:param mask: **DEPRECATED**
An XML string snippet matching the structure
of the expected response. Execution will block
in this thread until the response is received
or a timeout occurs.
:param int timeout: Time in seconds to wait for a response before
continuing. Defaults to :attr:`response_timeout`.
:param bool now: Indicates if the send queue should be skipped,
| |
import sys
import asyncio
from uuid import uuid4
from datetime import datetime
from preggy import expect
from jetengine import (
Document,
StringField,
BooleanField,
ListField,
EmbeddedDocumentField,
ReferenceField,
DESCENDING,
URLField,
DateTimeField,
UUIDField,
IntField,
JsonField,
)
from jetengine.errors import InvalidDocumentError, LoadReferencesRequiredError, UniqueKeyViolationError
from tests import AsyncTestCase, async_test
class User(Document):
email = StringField(required=True)
first_name = StringField(max_length=50, default=lambda: "Bernardo")
last_name = StringField(max_length=50, default="Heynemann")
is_admin = BooleanField(default=True)
website = URLField(default="http://google.com/")
updated_at = DateTimeField(required=True, auto_now_on_insert=True, auto_now_on_update=True)
facebook_id = StringField(unique=True, sparse=True)
def __repr__(self):
return "%s %s <%s>" % (self.first_name, self.last_name, self.email)
class Employee(User):
emp_number = StringField()
class Comment(Document):
text = StringField(required=True)
user = ReferenceField(User, required=True)
class CommentNotLazy(Document):
__lazy__ = False
text = StringField(required=True)
user = ReferenceField(User, required=True)
class Post(Document):
title = StringField(required=True)
body = StringField(required=True)
comments = ListField(EmbeddedDocumentField(Comment))
class TestDocument(AsyncTestCase):
def setUp(self):
super(TestDocument, self).setUp()
self.drop_coll("User")
self.drop_coll("Employee")
self.drop_coll("Post")
self.drop_coll("Comment")
self.drop_coll("CommentNotLazy")
def test_has_proper_collection(self):
assert User.__collection__ == "User"
def test_has_proper_objects(self):
import jetengine.queryset
expect(isinstance(User.objects, jetengine.queryset.QuerySet)).to_be_true()
@async_test
@asyncio.coroutine
def test_can_create_new_instance(self):
user = User(email="<EMAIL>", first_name="Bernardo", last_name="Heynemann")
result = yield from user.save()
expect(result._id).not_to_be_null()
expect(result.email).to_equal("<EMAIL>")
expect(result.first_name).to_equal("Bernardo")
expect(result.last_name).to_equal("Heynemann")
expect(result.facebook_id).to_be_null()
@async_test
@asyncio.coroutine
def test_can_create_new_instance_with_defaults(self):
user = User(email="<EMAIL>")
result = yield from user.save()
expect(result._id).not_to_be_null()
expect(result.email).to_equal("<EMAIL>")
expect(result.first_name).to_equal("Bernardo")
expect(result.last_name).to_equal("Heynemann")
expect(result.is_admin).to_be_true()
@async_test
@asyncio.coroutine
def test_can_create_new_instance_with_defaults_and_db_fields(self):
class Model(Document):
last_name = StringField(db_field="db_last", default="Heynemann")
first_name = StringField(db_field="db_first", default=lambda: "Bernardo")
self.drop_coll_async(Model.__collection__)
model = Model()
result = yield from model.save()
expect(result._id).not_to_be_null()
expect(result.first_name).to_equal("Bernardo")
expect(result.last_name).to_equal("Heynemann")
@async_test
@asyncio.coroutine
def test_creating_invalid_instance_fails(self):
user = User(email="<EMAIL>", first_name="Bernardo", last_name="Heynemann", website="bla")
try:
yield from user.save()
except InvalidDocumentError:
err = sys.exc_info()[1]
expect(err).to_have_an_error_message_of("Field 'website' must be valid.")
else:
assert False, "Should not have gotten this far"
try:
user = yield from User.objects.create(
email="<EMAIL>", first_name="Bernardo", last_name="Heynemann", website="bla"
)
except InvalidDocumentError:
err = sys.exc_info()[1]
expect(err).to_have_an_error_message_of("Field 'website' must be valid.")
else:
assert False, "Should not have gotten this far"
@async_test
@asyncio.coroutine
def test_can_create_employee(self):
user = Employee(
email="<EMAIL>", first_name="Bernardo", last_name="Heynemann", emp_number="Employee"
)
result = yield from user.save()
expect(result._id).not_to_be_null()
expect(result.email).to_equal("<EMAIL>")
expect(result.first_name).to_equal("Bernardo")
expect(result.last_name).to_equal("Heynemann")
expect(result.emp_number).to_equal("Employee")
def test_duplicate_fields(self):
try:
class DuplicateField(User):
email = StringField(required=True)
except InvalidDocumentError:
e = sys.exc_info()[1]
expect(e).to_have_an_error_message_of("Multiple db_fields defined for: email ")
else:
assert False, "Should not have gotten this far."
@async_test
@asyncio.coroutine
def test_can_update_employee(self):
user = Employee(
email="<EMAIL>", first_name="Bernardo", last_name="Heynemann", emp_number="Employee"
)
user.emp_number = "12345"
result = yield from user.save()
expect(result._id).not_to_be_null()
expect(result.email).to_equal("<EMAIL>")
expect(result.first_name).to_equal("Bernardo")
expect(result.last_name).to_equal("Heynemann")
expect(result.emp_number).to_equal("12345")
@async_test
@asyncio.coroutine
def test_can_get_instance(self):
user = Employee(
email="<EMAIL>", first_name="Bernardo", last_name="Heynemann", emp_number="Employee"
)
yield from user.save()
retrieved_user = yield from Employee.objects.get(user._id)
expect(retrieved_user._id).to_equal(user._id)
expect(retrieved_user.email).to_equal("<EMAIL>")
expect(retrieved_user.first_name).to_equal("Bernardo")
expect(retrieved_user.last_name).to_equal("Heynemann")
expect(retrieved_user.emp_number).to_equal("Employee")
expect(retrieved_user.is_admin).to_be_true()
expect(retrieved_user.facebook_id).to_be_null()
@async_test
@asyncio.coroutine
def test_can_get_instance_with_id_string(self):
user = Employee(
email="<EMAIL>", first_name="Bernardo", last_name="Heynemann", emp_number="Employee"
)
yield from user.save()
retrieved_user = yield from Employee.objects.get(str(user._id))
expect(retrieved_user._id).to_equal(user._id)
expect(retrieved_user.email).to_equal("<EMAIL>")
expect(retrieved_user.first_name).to_equal("Bernardo")
expect(retrieved_user.last_name).to_equal("Heynemann")
expect(retrieved_user.emp_number).to_equal("Employee")
expect(retrieved_user.is_admin).to_be_true()
@async_test
@asyncio.coroutine
def test_after_updated_get_proper_data(self):
user = Employee(
email="<EMAIL>", first_name="Bernardo", last_name="Heynemann", emp_number="Employee"
)
yield from user.save()
user.emp_number = "12345"
yield from user.save()
retrieved_user = yield from Employee.objects.get(user._id)
expect(retrieved_user._id).to_equal(user._id)
expect(retrieved_user.email).to_equal("<EMAIL>")
expect(retrieved_user.first_name).to_equal("Bernardo")
expect(retrieved_user.last_name).to_equal("Heynemann")
expect(retrieved_user.emp_number).to_equal("12345")
@async_test
@asyncio.coroutine
def test_can_find_proper_document(self):
yield from User.objects.create(email="<EMAIL>", first_name="Bernardo", last_name="Heynemann")
yield from User.objects.create(email="<EMAIL>", first_name="Someone", last_name="Else")
users = yield from User.objects.filter(email="<EMAIL>").find_all()
expect(users).to_be_instance_of(list)
expect(users).to_length(1)
first_user = users[0]
expect(first_user.first_name).to_equal("Someone")
expect(first_user.last_name).to_equal("Else")
expect(first_user.email).to_equal("<EMAIL>")
@async_test
@asyncio.coroutine
def test_can_find_with_multiple_filters(self):
yield from User.objects.create(email="<EMAIL>", first_name="Bernardo", last_name="Heynemann")
yield from User.objects.create(email="<EMAIL>", first_name="Someone", last_name="Else")
user = yield from User.objects.create(email="<EMAIL>", first_name="Bernardo", last_name="Heynemann")
last_user = yield from User.objects.create(email="<EMAIL>", first_name="Bernardo", last_name="Silva")
# filter and filter not
users = yield from User.objects.filter(email="<EMAIL>").filter_not(first_name="Someone").find_all()
expect(users).to_be_instance_of(list)
expect(users).to_length(1)
first_user = users[0]
expect(first_user._id).to_equal(user._id)
# filter and filter not for Q
from jetengine import Q
users = yield from User.objects.filter(email="<EMAIL>").filter_not(Q(first_name="Someone")).find_all()
expect(users).to_be_instance_of(list)
expect(users).to_length(1)
first_user = users[0]
expect(first_user._id).to_equal(user._id)
# filter not and filter not
users = yield from User.objects.filter_not(last_name="Heynemann").filter_not(first_name="Someone").find_all()
expect(users).to_be_instance_of(list)
expect(users).to_length(1)
first_user = users[0]
expect(first_user._id).to_equal(last_user._id)
# filter and filter
users = yield from User.objects.filter(last_name="Silva").filter(first_name="Bernardo").find_all()
expect(users).to_be_instance_of(list)
expect(users).to_length(1)
expect(users[0]._id).to_equal(last_user._id)
@async_test
@asyncio.coroutine
def test_can_limit_number_of_documents(self):
yield from User.objects.create(email="<EMAIL>", first_name="Bernardo", last_name="Heynemann")
yield from User.objects.create(email="<EMAIL>", first_name="Someone", last_name="Else")
users = yield from User.objects.limit(1).find_all()
expect(users).to_be_instance_of(list)
expect(users).to_length(1)
first_user = users[0]
expect(first_user.first_name).to_equal("Bernardo")
expect(first_user.last_name).to_equal("Heynemann")
expect(first_user.email).to_equal("<EMAIL>")
def test_cant_order_for_invalid_field(self):
try:
User.objects.order_by("invalid_field")
except ValueError:
err = sys.exc_info()[1]
expect(err).to_have_an_error_message_of(
"Invalid order by field 'invalid_field': Field not found in 'User'."
)
else:
assert False, "Should not have gotten this far"
@async_test
@asyncio.coroutine
def test_can_order_documents(self):
yield from User.objects.create(email="<EMAIL>", first_name="Bernardo", last_name="Heynemann")
yield from User.objects.create(email="<EMAIL>", first_name="Someone", last_name="Else")
users = yield from User.objects.order_by("first_name", DESCENDING).find_all()
expect(users).to_be_instance_of(list)
expect(users).to_length(2)
first_user = users[0]
expect(first_user.first_name).to_equal("Someone")
expect(first_user.last_name).to_equal("Else")
expect(first_user.email).to_equal("<EMAIL>")
@async_test
@asyncio.coroutine
def test_can_order_documents_by_actual_field(self):
yield from User.objects.create(email="<EMAIL>", first_name="Bernardo", last_name="Heynemann")
yield from User.objects.create(email="<EMAIL>", first_name="Someone", last_name="Else")
users_cursor = User.objects.order_by(User.first_name, DESCENDING)
users = yield from users_cursor.find_all()
expect(users).to_be_instance_of(list)
expect(users).to_length(2)
first_user = users[0]
expect(first_user.first_name).to_equal("Someone")
expect(first_user.last_name).to_equal("Else")
expect(first_user.email).to_equal("<EMAIL>")
@async_test
@asyncio.coroutine
def test_can_count_documents(self):
yield from User.objects.create(email="<EMAIL>", first_name="Bernardo", last_name="Heynemann")
yield from User.objects.create(email="<EMAIL>", first_name="Someone", last_name="Else")
user_count = yield from User.objects.count()
expect(user_count).to_equal(2)
user_count_cursor = User.objects.filter(email="<EMAIL>")
user_count = yield from user_count_cursor.count()
expect(user_count).to_equal(1)
user_count = yield from User.objects.filter(email="<EMAIL>").count()
expect(user_count).to_equal(0)
@async_test
@asyncio.coroutine
def test_saving_without_required_fields_raises(self):
user = Employee(first_name="Bernardo", last_name="Heynemann", emp_number="Employee")
try:
yield from user.save()
except InvalidDocumentError:
err = sys.exc_info()[1]
expect(err).to_have_an_error_message_of("Field 'email' is required.")
@async_test
@asyncio.coroutine
def test_can_save_and_get_reference_with_lazy(self):
user = yield from User.objects.create(email="<EMAIL>", first_name="Bernardo", last_name="Heynemann")
post = yield from Post.objects.create(title="Testing post", body="Testing post body")
comment = Comment(text="Comment text for lazy test", user=user)
post.comments.append(comment)
yield from post.save()
loaded_post = yield from Post.objects.get(post._id)
result = yield from loaded_post.comments[0].load_references()
expect(result["loaded_reference_count"]).to_equal(1)
@async_test
@asyncio.coroutine
def test_can_save_and_get_specific_reference_with_lazy(self):
user = yield from User.objects.create(email="<EMAIL>", first_name="Bernardo", last_name="Heynemann")
class ReferenceFieldClass(Document):
ref1 = ReferenceField(User)
ref2 = ReferenceField(User)
ref3 = ReferenceField(User)
ref = yield from ReferenceFieldClass.objects.create(ref1=user, ref2=user, ref3=user)
loaded_ref = yield from ReferenceFieldClass.objects.get(ref._id)
result = yield from loaded_ref.load_references(fields=["ref1"])
expect(result["loaded_reference_count"]).to_equal(1)
expect(loaded_ref.ref1._id).to_equal(user._id)
try:
assert loaded_ref.ref2._id
except LoadReferencesRequiredError:
err = sys.exc_info()[1]
expect(err).to_have_an_error_message_of(
"The property 'ref2' can't be accessed before calling 'load_references' on its instance first "
"(ReferenceFieldClass) or setting __lazy__ to False in the ReferenceFieldClass class."
)
else:
assert False, "Should not have gotten this far"
@async_test
@asyncio.coroutine
def test_can_save_and_get_reference_with_find_all(self):
user = yield from User.objects.create(email="<EMAIL>", first_name="Bernardo", last_name="Heynemann")
class ReferenceFieldClass(Document):
__collection__ = "TestFindAllReferenceField"
ref1 = ReferenceField(User)
num = IntField(default=10)
yield from ReferenceFieldClass.objects.delete()
yield from ReferenceFieldClass.objects.create(ref1=user)
yield from ReferenceFieldClass.objects.create(ref1=user)
yield from ReferenceFieldClass.objects.create(ref1=user)
result = yield from ReferenceFieldClass.objects.find_all(lazy=False)
expect(result).to_length(3)
expect(result[0].ref1._id).to_equal(user._id)
ref_cursor = ReferenceFieldClass.objects.filter(num=20)
result = yield from ref_cursor.find_all(lazy=False)
expect(result).to_length(0)
@async_test
@asyncio.coroutine
def test_can_save_and_get_reference_without_lazy(self):
user = yield from User.objects.create(email="<EMAIL>", first_name="Bernardo", last_name="Heynemann")
comment = CommentNotLazy(text="Comment text", user=user)
yield from comment.save()
loaded_comment = yield from CommentNotLazy.objects.get(comment._id)
expect(loaded_comment).not_to_be_null()
expect(loaded_comment.user._id).to_equal(user._id)
loaded_comments = yield from CommentNotLazy.objects.find_all()
expect(loaded_comments).to_length(1)
expect(loaded_comments[0].user._id).to_equal(user._id)
@async_test
@asyncio.coroutine
def test_can_save_and_retrieve_blog_post(self):
user = yield from User.objects.create(email="<EMAIL>", first_name="Bernardo", last_name="Heynemann")
post = yield from Post.objects.create(title="Testing post", body="Testing post body")
post.comments.append(Comment(text="Comment text for blog post", user=user))
yield from post.save()
loaded_post = yield from Post.objects.get(post._id)
expect(loaded_post).not_to_be_null()
expect(loaded_post._id).to_equal(post._id)
expect(loaded_post.title).to_equal("Testing post")
expect(loaded_post.body).to_equal("Testing post body")
expect(loaded_post.comments).to_length(1)
expect(loaded_post.comments[0].text).to_equal("Comment text for blog post")
try:
loaded_post.comments[0].user
except LoadReferencesRequiredError:
err = sys.exc_info()[1]
expected = (
"The property 'user' can't be accessed before calling 'load_references' "
+ "on its instance first (Comment) or setting __lazy__ to False in the Comment class."
)
expect(err).to_have_an_error_message_of(expected)
else:
assert False, "Should not have gotten this far"
result = yield from loaded_post.comments[0].load_references()
loaded_reference_count = result["loaded_reference_count"]
expect(loaded_reference_count).to_equal(1)
expect(loaded_post.comments[0].user).to_be_instance_of(User)
expect(loaded_post.comments[0].user._id).to_equal(user._id)
expect(loaded_post.comments[0].user.email).to_equal("<EMAIL>")
expect(loaded_post.comments[0].user.first_name).to_equal("Bernardo")
expect(loaded_post.comments[0].user.last_name).to_equal("Heynemann")
@async_test
@asyncio.coroutine
def test_saving_a_loaded_post_updates_the_post(self):
class LoadedPost(Document):
uuid = UUIDField(default=uuid4)
uuid = uuid4()
post = yield from LoadedPost.objects.create(uuid=uuid)
saved_post = yield from post.save()
posts = yield from LoadedPost.objects.filter(uuid=uuid).find_all()
expect(posts).to_length(1)
expect(posts[0]._id).to_equal(post._id)
expect(posts[0]._id).to_equal(saved_post._id)
@async_test
@asyncio.coroutine
def test_saving_uses_default(self):
class LoadedPost(Document):
uuid = UUIDField(default=uuid4)
post = yield from LoadedPost.objects.create()
expect(post.uuid).not_to_be_null()
@async_test
@asyncio.coroutine
def test_getting_by_field(self):
class LoadedPost(Document):
uuid = UUIDField(default=uuid4)
uuid = uuid4()
post = yield from LoadedPost.objects.create(uuid=uuid)
loaded_post = yield from LoadedPost.objects.get(uuid=str(uuid))
expect(loaded_post).not_to_be_null()
expect(loaded_post._id).to_equal(post._id)
def test_querying_by_invalid_operator(self):
try:
User.objects.filter(email__invalid="test")
except ValueError:
err = sys.exc_info()[1]
expect(err).to_have_an_error_message_of(
"Invalid filter 'email__invalid': Invalid operator (if this is a sub-property, "
"then it must be used in embedded document fields)."
)
else:
assert False, "Should not have gotten this far"
@async_test
@asyncio.coroutine
def test_querying_by_lower_than(self):
class Test(Document):
__collection__ = "LowerThan"
test = IntField()
yield from Test.objects.delete()
test = yield from Test.objects.create(test=10)
yield from Test.objects.create(test=15)
loaded_tests = yield from Test.objects.filter(test__lt=12).find_all()
expect(loaded_tests).to_length(1)
expect(loaded_tests[0]._id).to_equal(test._id)
loaded_test = yield from Test.objects.get(test__lt=12)
expect(loaded_test).not_to_be_null()
expect(loaded_test._id).to_equal(test._id)
@async_test
@asyncio.coroutine
def test_querying_by_greater_than(self):
class Test(Document):
__collection__ = "GreaterThan"
test = IntField()
yield from Test.objects.delete()
yield from Test.objects.create(test=10)
test = yield from Test.objects.create(test=15)
loaded_tests = yield from Test.objects.filter(test__gt=12).find_all()
expect(loaded_tests).to_length(1)
expect(loaded_tests[0]._id).to_equal(test._id)
loaded_test = yield from Test.objects.get(test__gt=12)
expect(loaded_test).not_to_be_null()
expect(loaded_test._id).to_equal(test._id)
@async_test
@asyncio.coroutine
def test_querying_by_greater_than_or_equal(self):
class Test(Document):
__collection__ = "GreaterThanOrEqual"
test = IntField()
yield from Test.objects.delete()
test = yield from Test.objects.create(test=10)
test2 = yield from Test.objects.create(test=15)
loaded_tests = yield from Test.objects.filter(test__gte=12).find_all()
expect(loaded_tests).to_length(1)
expect(loaded_tests[0]._id).to_equal(test2._id)
loaded_tests = yield from Test.objects.filter(test__gte=10).find_all()
expect(loaded_tests).to_length(2)
expect(loaded_tests[0]._id).to_equal(test._id)
expect(loaded_tests[1]._id).to_equal(test2._id)
@async_test
@asyncio.coroutine
def test_querying_by_lesser_than_or_equal(self):
class Test(Document):
__collection__ = "LesserThanOrEqual"
test = IntField()
yield from Test.objects.delete()
test = yield from Test.objects.create(test=10)
test2 = yield from Test.objects.create(test=15)
loaded_tests = yield from Test.objects.filter(test__lte=12).find_all()
expect(loaded_tests).to_length(1)
| |
# -*- coding: utf-8 -*-
# File generated according to Generator/ClassesRef/Simulation/EEC.csv
# WARNING! All changes made in this file will be lost!
"""Method code available at https://github.com/Eomys/pyleecan/tree/master/pyleecan/Methods/Simulation/EEC
"""
from os import linesep
from sys import getsizeof
from logging import getLogger
from ._check import check_var, raise_
from ..Functions.get_logger import get_logger
from ..Functions.save import save
from ..Functions.load import load_init_dict
from ..Functions.Load.import_class import import_class
from copy import deepcopy
from ._frozen import FrozenClass
# Import all class method
# Try/catch to remove unnecessary dependencies in unused method
try:
from ..Methods.Simulation.EEC.get_machine_from_parent import get_machine_from_parent
except ImportError as error:
get_machine_from_parent = error
try:
from ..Methods.Simulation.EEC.comp_R1 import comp_R1
except ImportError as error:
comp_R1 = error
try:
from ..Methods.Simulation.EEC.comp_skin_effect import comp_skin_effect
except ImportError as error:
comp_skin_effect = error
try:
from ..Methods.Simulation.EEC.comp_parameters import comp_parameters
except ImportError as error:
comp_parameters = error
try:
from ..Methods.Simulation.EEC.update_from_ref import update_from_ref
except ImportError as error:
update_from_ref = error
try:
from ..Methods.Simulation.EEC.solve import solve
except ImportError as error:
solve = error
try:
from ..Methods.Simulation.EEC.solve_PWM import solve_PWM
except ImportError as error:
solve_PWM = error
try:
from ..Methods.Simulation.EEC.comp_joule_losses import comp_joule_losses
except ImportError as error:
comp_joule_losses = error
try:
from ..Methods.Simulation.EEC.comp_fluxlinkage import comp_fluxlinkage
except ImportError as error:
comp_fluxlinkage = error
from numpy import isnan
from ._check import InitUnKnowClassError
class EEC(FrozenClass):
"""Equivalent Electrical Circuit abstract class"""
VERSION = 1
# Check ImportError to remove unnecessary dependencies in unused method
# cf Methods.Simulation.EEC.get_machine_from_parent
if isinstance(get_machine_from_parent, ImportError):
get_machine_from_parent = property(
fget=lambda x: raise_(
ImportError(
"Can't use EEC method get_machine_from_parent: "
+ str(get_machine_from_parent)
)
)
)
else:
get_machine_from_parent = get_machine_from_parent
# cf Methods.Simulation.EEC.comp_R1
if isinstance(comp_R1, ImportError):
comp_R1 = property(
fget=lambda x: raise_(
ImportError("Can't use EEC method comp_R1: " + str(comp_R1))
)
)
else:
comp_R1 = comp_R1
# cf Methods.Simulation.EEC.comp_skin_effect
if isinstance(comp_skin_effect, ImportError):
comp_skin_effect = property(
fget=lambda x: raise_(
ImportError(
"Can't use EEC method comp_skin_effect: " + str(comp_skin_effect)
)
)
)
else:
comp_skin_effect = comp_skin_effect
# cf Methods.Simulation.EEC.comp_parameters
if isinstance(comp_parameters, ImportError):
comp_parameters = property(
fget=lambda x: raise_(
ImportError(
"Can't use EEC method comp_parameters: " + str(comp_parameters)
)
)
)
else:
comp_parameters = comp_parameters
# cf Methods.Simulation.EEC.update_from_ref
if isinstance(update_from_ref, ImportError):
update_from_ref = property(
fget=lambda x: raise_(
ImportError(
"Can't use EEC method update_from_ref: " + str(update_from_ref)
)
)
)
else:
update_from_ref = update_from_ref
# cf Methods.Simulation.EEC.solve
if isinstance(solve, ImportError):
solve = property(
fget=lambda x: raise_(
ImportError("Can't use EEC method solve: " + str(solve))
)
)
else:
solve = solve
# cf Methods.Simulation.EEC.solve_PWM
if isinstance(solve_PWM, ImportError):
solve_PWM = property(
fget=lambda x: raise_(
ImportError("Can't use EEC method solve_PWM: " + str(solve_PWM))
)
)
else:
solve_PWM = solve_PWM
# cf Methods.Simulation.EEC.comp_joule_losses
if isinstance(comp_joule_losses, ImportError):
comp_joule_losses = property(
fget=lambda x: raise_(
ImportError(
"Can't use EEC method comp_joule_losses: " + str(comp_joule_losses)
)
)
)
else:
comp_joule_losses = comp_joule_losses
# cf Methods.Simulation.EEC.comp_fluxlinkage
if isinstance(comp_fluxlinkage, ImportError):
comp_fluxlinkage = property(
fget=lambda x: raise_(
ImportError(
"Can't use EEC method comp_fluxlinkage: " + str(comp_fluxlinkage)
)
)
)
else:
comp_fluxlinkage = comp_fluxlinkage
# generic save method is available in all object
save = save
# get_logger method is available in all object
get_logger = get_logger
def __init__(
self,
type_skin_effect=1,
OP=None,
Tsta=20,
Trot=20,
Xkr_skinS=1,
Xke_skinS=1,
Xkr_skinR=1,
Xke_skinR=1,
R1=None,
fluxlink=None,
init_dict=None,
init_str=None,
):
"""Constructor of the class. Can be use in three ways :
- __init__ (arg1 = 1, arg3 = 5) every parameters have name and default values
for pyleecan type, -1 will call the default constructor
- __init__ (init_dict = d) d must be a dictionary with property names as keys
- __init__ (init_str = s) s must be a string
s is the file path to load
ndarray or list can be given for Vector and Matrix
object or dict can be given for pyleecan Object"""
if init_str is not None: # Load from a file
init_dict = load_init_dict(init_str)[1]
if init_dict is not None: # Initialisation by dict
assert type(init_dict) is dict
# Overwrite default value with init_dict content
if "type_skin_effect" in list(init_dict.keys()):
type_skin_effect = init_dict["type_skin_effect"]
if "OP" in list(init_dict.keys()):
OP = init_dict["OP"]
if "Tsta" in list(init_dict.keys()):
Tsta = init_dict["Tsta"]
if "Trot" in list(init_dict.keys()):
Trot = init_dict["Trot"]
if "Xkr_skinS" in list(init_dict.keys()):
Xkr_skinS = init_dict["Xkr_skinS"]
if "Xke_skinS" in list(init_dict.keys()):
Xke_skinS = init_dict["Xke_skinS"]
if "Xkr_skinR" in list(init_dict.keys()):
Xkr_skinR = init_dict["Xkr_skinR"]
if "Xke_skinR" in list(init_dict.keys()):
Xke_skinR = init_dict["Xke_skinR"]
if "R1" in list(init_dict.keys()):
R1 = init_dict["R1"]
if "fluxlink" in list(init_dict.keys()):
fluxlink = init_dict["fluxlink"]
# Set the properties (value check and convertion are done in setter)
self.parent = None
self.type_skin_effect = type_skin_effect
self.OP = OP
self.Tsta = Tsta
self.Trot = Trot
self.Xkr_skinS = Xkr_skinS
self.Xke_skinS = Xke_skinS
self.Xkr_skinR = Xkr_skinR
self.Xke_skinR = Xke_skinR
self.R1 = R1
self.fluxlink = fluxlink
# The class is frozen, for now it's impossible to add new properties
self._freeze()
def __str__(self):
"""Convert this object in a readeable string (for print)"""
EEC_str = ""
if self.parent is None:
EEC_str += "parent = None " + linesep
else:
EEC_str += "parent = " + str(type(self.parent)) + " object" + linesep
EEC_str += "type_skin_effect = " + str(self.type_skin_effect) + linesep
if self.OP is not None:
tmp = self.OP.__str__().replace(linesep, linesep + "\t").rstrip("\t")
EEC_str += "OP = " + tmp
else:
EEC_str += "OP = None" + linesep + linesep
EEC_str += "Tsta = " + str(self.Tsta) + linesep
EEC_str += "Trot = " + str(self.Trot) + linesep
EEC_str += "Xkr_skinS = " + str(self.Xkr_skinS) + linesep
EEC_str += "Xke_skinS = " + str(self.Xke_skinS) + linesep
EEC_str += "Xkr_skinR = " + str(self.Xkr_skinR) + linesep
EEC_str += "Xke_skinR = " + str(self.Xke_skinR) + linesep
EEC_str += "R1 = " + str(self.R1) + linesep
if self.fluxlink is not None:
tmp = self.fluxlink.__str__().replace(linesep, linesep + "\t").rstrip("\t")
EEC_str += "fluxlink = " + tmp
else:
EEC_str += "fluxlink = None" + linesep + linesep
return EEC_str
def __eq__(self, other):
"""Compare two objects (skip parent)"""
if type(other) != type(self):
return False
if other.type_skin_effect != self.type_skin_effect:
return False
if other.OP != self.OP:
return False
if other.Tsta != self.Tsta:
return False
if other.Trot != self.Trot:
return False
if other.Xkr_skinS != self.Xkr_skinS:
return False
if other.Xke_skinS != self.Xke_skinS:
return False
if other.Xkr_skinR != self.Xkr_skinR:
return False
if other.Xke_skinR != self.Xke_skinR:
return False
if other.R1 != self.R1:
return False
if other.fluxlink != self.fluxlink:
return False
return True
def compare(self, other, name="self", ignore_list=None, is_add_value=False):
"""Compare two objects and return list of differences"""
if ignore_list is None:
ignore_list = list()
if type(other) != type(self):
return ["type(" + name + ")"]
diff_list = list()
if other._type_skin_effect != self._type_skin_effect:
if is_add_value:
val_str = (
" (self="
+ str(self._type_skin_effect)
+ ", other="
+ str(other._type_skin_effect)
+ ")"
)
diff_list.append(name + ".type_skin_effect" + val_str)
else:
diff_list.append(name + ".type_skin_effect")
if (other.OP is None and self.OP is not None) or (
other.OP is not None and self.OP is None
):
diff_list.append(name + ".OP None mismatch")
elif self.OP is not None:
diff_list.extend(
self.OP.compare(
other.OP,
name=name + ".OP",
ignore_list=ignore_list,
is_add_value=is_add_value,
)
)
if (
other._Tsta is not None
and self._Tsta is not None
and isnan(other._Tsta)
and isnan(self._Tsta)
):
pass
elif other._Tsta != self._Tsta:
if is_add_value:
val_str = (
" (self=" + str(self._Tsta) + ", other=" + str(other._Tsta) + ")"
)
diff_list.append(name + ".Tsta" + val_str)
else:
diff_list.append(name + ".Tsta")
if (
other._Trot is not None
and self._Trot is not None
and isnan(other._Trot)
and isnan(self._Trot)
):
pass
elif other._Trot != self._Trot:
if is_add_value:
val_str = (
" (self=" + str(self._Trot) + ", other=" + str(other._Trot) + ")"
)
diff_list.append(name + ".Trot" + val_str)
else:
diff_list.append(name + ".Trot")
if (
other._Xkr_skinS is not None
and self._Xkr_skinS is not None
and isnan(other._Xkr_skinS)
and isnan(self._Xkr_skinS)
):
pass
elif other._Xkr_skinS != self._Xkr_skinS:
if is_add_value:
val_str = (
" (self="
+ str(self._Xkr_skinS)
+ ", other="
+ str(other._Xkr_skinS)
+ ")"
)
diff_list.append(name + ".Xkr_skinS" + val_str)
else:
diff_list.append(name + ".Xkr_skinS")
if (
other._Xke_skinS is not None
and self._Xke_skinS is not None
and isnan(other._Xke_skinS)
and isnan(self._Xke_skinS)
):
pass
elif other._Xke_skinS != self._Xke_skinS:
if is_add_value:
val_str = (
" (self="
+ str(self._Xke_skinS)
+ ", other="
+ str(other._Xke_skinS)
+ ")"
)
diff_list.append(name + ".Xke_skinS" + val_str)
else:
diff_list.append(name + ".Xke_skinS")
if (
other._Xkr_skinR is not None
and self._Xkr_skinR is not None
and isnan(other._Xkr_skinR)
and isnan(self._Xkr_skinR)
):
pass
elif other._Xkr_skinR != self._Xkr_skinR:
if is_add_value:
val_str = (
" (self="
| |
m.get('zjbzxbm') is not None:
self.zjbzxbm = m.get('zjbzxbm')
if m.get('xingming') is not None:
self.xingming = m.get('xingming')
if m.get('zjhm') is not None:
self.zjhm = m.get('zjhm')
if m.get('dkhtbh') is not None:
self.dkhtbh = m.get('dkhtbh')
return self
class CpfDataUsageLogVO(TeaModel):
def __init__(
self,
user_name: str = None,
usage_time: str = None,
data_desc: str = None,
purpose: str = None,
biz_id: str = None,
):
# 使用方名称
self.user_name = user_name
# 使用时间
self.usage_time = usage_time
# 数据项描述
self.data_desc = data_desc
# 用途
self.purpose = purpose
# 业务流水号
self.biz_id = biz_id
def validate(self):
self.validate_required(self.user_name, 'user_name')
self.validate_required(self.usage_time, 'usage_time')
self.validate_required(self.data_desc, 'data_desc')
self.validate_required(self.purpose, 'purpose')
self.validate_required(self.biz_id, 'biz_id')
def to_map(self):
result = dict()
if self.user_name is not None:
result['user_name'] = self.user_name
if self.usage_time is not None:
result['usage_time'] = self.usage_time
if self.data_desc is not None:
result['data_desc'] = self.data_desc
if self.purpose is not None:
result['purpose'] = self.purpose
if self.biz_id is not None:
result['biz_id'] = self.biz_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('user_name') is not None:
self.user_name = m.get('user_name')
if m.get('usage_time') is not None:
self.usage_time = m.get('usage_time')
if m.get('data_desc') is not None:
self.data_desc = m.get('data_desc')
if m.get('purpose') is not None:
self.purpose = m.get('purpose')
if m.get('biz_id') is not None:
self.biz_id = m.get('biz_id')
return self
class CertificationInitResponse(TeaModel):
def __init__(
self,
certify_id: str = None,
outer_order_no: str = None,
scene_id: str = None,
):
# 核身认证唯一标识
self.certify_id = certify_id
# 商户请求唯一标识
self.outer_order_no = outer_order_no
# 场景ID
self.scene_id = scene_id
def validate(self):
self.validate_required(self.certify_id, 'certify_id')
self.validate_required(self.outer_order_no, 'outer_order_no')
self.validate_required(self.scene_id, 'scene_id')
def to_map(self):
result = dict()
if self.certify_id is not None:
result['certify_id'] = self.certify_id
if self.outer_order_no is not None:
result['outer_order_no'] = self.outer_order_no
if self.scene_id is not None:
result['scene_id'] = self.scene_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('certify_id') is not None:
self.certify_id = m.get('certify_id')
if m.get('outer_order_no') is not None:
self.outer_order_no = m.get('outer_order_no')
if m.get('scene_id') is not None:
self.scene_id = m.get('scene_id')
return self
class TdmVerifyLogVO(TeaModel):
def __init__(
self,
status: str = None,
remark: str = None,
):
# 1:核身创建成功 2:核身验证通过 3:核身验证失败
self.status = status
# 核身结果描述信息
self.remark = remark
def validate(self):
self.validate_required(self.status, 'status')
self.validate_required(self.remark, 'remark')
def to_map(self):
result = dict()
if self.status is not None:
result['status'] = self.status
if self.remark is not None:
result['remark'] = self.remark
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('status') is not None:
self.status = m.get('status')
if m.get('remark') is not None:
self.remark = m.get('remark')
return self
class OpenCpfCertRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
terminal_identity: str = None,
issue_cert_type: str = None,
provider_id: str = None,
data_owner_identity_type: str = None,
data_owner_identity: str = None,
data_owner_name: str = None,
extend_params: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 端ID
self.terminal_identity = terminal_identity
# 证明类型
self.issue_cert_type = issue_cert_type
# 公积金中心ID
self.provider_id = provider_id
# 证件类型
self.data_owner_identity_type = data_owner_identity_type
# 数据拥有者ID(身份证ID)
self.data_owner_identity = data_owner_identity
# 数据拥有者姓名(真实姓名)
self.data_owner_name = data_owner_name
# 请求结构体序列化
self.extend_params = extend_params
def validate(self):
self.validate_required(self.terminal_identity, 'terminal_identity')
self.validate_required(self.issue_cert_type, 'issue_cert_type')
self.validate_required(self.provider_id, 'provider_id')
self.validate_required(self.data_owner_identity_type, 'data_owner_identity_type')
self.validate_required(self.data_owner_identity, 'data_owner_identity')
self.validate_required(self.data_owner_name, 'data_owner_name')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.terminal_identity is not None:
result['terminal_identity'] = self.terminal_identity
if self.issue_cert_type is not None:
result['issue_cert_type'] = self.issue_cert_type
if self.provider_id is not None:
result['provider_id'] = self.provider_id
if self.data_owner_identity_type is not None:
result['data_owner_identity_type'] = self.data_owner_identity_type
if self.data_owner_identity is not None:
result['data_owner_identity'] = self.data_owner_identity
if self.data_owner_name is not None:
result['data_owner_name'] = self.data_owner_name
if self.extend_params is not None:
result['extend_params'] = self.extend_params
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('terminal_identity') is not None:
self.terminal_identity = m.get('terminal_identity')
if m.get('issue_cert_type') is not None:
self.issue_cert_type = m.get('issue_cert_type')
if m.get('provider_id') is not None:
self.provider_id = m.get('provider_id')
if m.get('data_owner_identity_type') is not None:
self.data_owner_identity_type = m.get('data_owner_identity_type')
if m.get('data_owner_identity') is not None:
self.data_owner_identity = m.get('data_owner_identity')
if m.get('data_owner_name') is not None:
self.data_owner_name = m.get('data_owner_name')
if m.get('extend_params') is not None:
self.extend_params = m.get('extend_params')
return self
class OpenCpfCertResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
issue_id: str = None,
deposit_cert: str = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 证明文件ID
self.issue_id = issue_id
# 证明文件获取地址
self.deposit_cert = deposit_cert
def validate(self):
pass
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.issue_id is not None:
result['issue_id'] = self.issue_id
if self.deposit_cert is not None:
result['deposit_cert'] = self.deposit_cert
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('issue_id') is not None:
self.issue_id = m.get('issue_id')
if m.get('deposit_cert') is not None:
self.deposit_cert = m.get('deposit_cert')
return self
class ListCpfCertRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
terminal_identity: str = None,
data_owner_identity: str = None,
issue_cert_type: str = None,
current_page: int = None,
page_size: int = None,
option_time: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 端ID
self.terminal_identity = terminal_identity
# 身份证ID
self.data_owner_identity = data_owner_identity
# 证明类型
self.issue_cert_type = issue_cert_type
# 当前页码,默认1
self.current_page = current_page
# 每页展示数量,默认10
self.page_size = page_size
# 时间筛选枚举类型,默认全部
self.option_time = option_time
def validate(self):
self.validate_required(self.terminal_identity, 'terminal_identity')
self.validate_required(self.data_owner_identity, 'data_owner_identity')
self.validate_required(self.issue_cert_type, 'issue_cert_type')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.terminal_identity is not None:
result['terminal_identity'] = self.terminal_identity
if self.data_owner_identity is not None:
result['data_owner_identity'] = self.data_owner_identity
if self.issue_cert_type is not None:
result['issue_cert_type'] = self.issue_cert_type
if self.current_page is not None:
result['current_page'] = self.current_page
if self.page_size is not None:
result['page_size'] = self.page_size
if self.option_time is not None:
result['option_time'] = self.option_time
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('terminal_identity') is not None:
self.terminal_identity = m.get('terminal_identity')
if m.get('data_owner_identity') is not None:
self.data_owner_identity = m.get('data_owner_identity')
if m.get('issue_cert_type') is not None:
self.issue_cert_type = m.get('issue_cert_type')
if m.get('current_page') is not None:
self.current_page = m.get('current_page')
if m.get('page_size') is not None:
self.page_size = m.get('page_size')
if m.get('option_time') is not None:
self.option_time = m.get('option_time')
return self
class ListCpfCertResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
current: int = None,
page_size: int = None,
total: int = None,
cert_list: List[CertSummary] = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 当前页码
self.current = current
# 每页数量
self.page_size = page_size
# 记录总数
self.total = total
# 证明开具信息列表
self.cert_list = cert_list
def validate(self):
if self.cert_list:
for k in self.cert_list:
if k:
k.validate()
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.current is not None:
result['current'] = self.current
if self.page_size is not None:
result['page_size'] = self.page_size
if self.total is not None:
result['total'] = self.total
result['cert_list'] = []
if self.cert_list is not None:
for k in self.cert_list:
result['cert_list'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('current') is not None:
self.current = m.get('current')
if m.get('page_size') is not None:
self.page_size = m.get('page_size')
if m.get('total') is not None:
self.total = m.get('total')
self.cert_list = []
if m.get('cert_list') is not None:
for k in m.get('cert_list'):
temp_model = CertSummary()
self.cert_list.append(temp_model.from_map(k))
return self
class CheckCpfCertRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
biz_id: str = None,
type: str = None,
| |
import numpy as np
import pandas as pd
import theano.tensor as T
from random import shuffle
from theano import shared, function
from patsy import dmatrix
from collections import defaultdict
class MainClauseModel(object):
def __init__(self, nlatfeats=8, alpha=1., discount=None, beta=0.5, gamma=0.9,
delta=2., orthogonality_penalty=0., nonparametric=False):
'''
Parameters
----------
nlatfeats : int
Number of latent features for each verb; the default of 8 is
the number of unique subcat frames in the data
alpha : float (positive)
Beta process hyperparameter as specified in Teh et al. 2007
"Stick-breaking Construction for the Indian Buffet Process";
changes meaning based on Pitman-Yor discount hyperparameter
(see Teh et al. 2007, p.3)
discount : float (unit) or None
If discount is a float, it must satisfy alpha > -discount
beta : float (positive)
If parametric=True, concetration parameter for verb-specific
beta draws based on beta process sample; if nonparametric=False,
hyperparameter of a Beta(beta, beta); in the latter case, beta
should be on (0,1), otherwise the verb representations are
unidentifiable, since their is a flat prior on the selection
probability
gamma : float (positive)
Hyperparameter of a beta distribution on the projection matrix
delta : float (positive)
Hyperparameter of a beta distribution on the verb feature
probability matrix
orthogonality_penalty : float (positive)
How much to penalize for singularity
nonparametric : bool
Whether to use a nonparametric prior
divergence_weight : float (0 to negative infinity) (ADDED)
How much to weight the either-or bias. If 0, no either-or bias.
'''
self.nlatfeats = nlatfeats
self.alpha = alpha
self.discount = discount
self.beta = beta
self.gamma = gamma
self.delta = delta
self.orthogonality_penalty = orthogonality_penalty
self.nonparametric = nonparametric
self.divergence_weight = -1
self._validate_params()
self._ident = ''.join(np.random.choice(9, size=10).astype(str))
def _validate_params(self):
if self.discount is not None:
self._pitman_yor = True
try:
assert self.alpha > -self.discount
except AssertionError:
raise ValueError('alpha must be greater than -discount')
else:
self._pitman_yor = False
def _initialize_model(self, data, stochastic):
self.data = data
self._initialize_counter()
self._initialize_reps()
self._initialize_loss()
self._initialize_updaters(stochastic)
def _initialize_counter(self):
self._verbcount = T.zeros(self.data.n('verb'))
self._verbeye = T.eye(self.data.n('verb'))
def _initialize_reps(self):
self._reps = {}
if self.nonparametric:
# nu_aux = np.array([2.]+[-1.]*(self.nlatfeats-1))
nu_aux = np.array([0.]*self.nlatfeats)
self._reps['nu'] = shared(nu_aux, name='nu')
self._nu = T.nnet.sigmoid(self._reps['nu'])
self._mu = T.cumprod(self._nu)
verbreps_aux = np.random.normal(0., 1e-2, size=[self.data.n('verb')-self.data.n('clausetype'),
self.nlatfeats])
projection_aux = np.random.normal(0., 1e-2, size=[self.nlatfeats, self.data.n('feature')])
verbfeatprob_aux = np.zeros([self.data.n('verb')-self.data.n('clausetype'), self.nlatfeats])-4.
if self.data.n('clausetype'):
try:
assert self.data.n('clausetype') <= self.nlatfeats
except AssertionError:
raise ValueError('nlatfeats must be greater than or equal to the number of clausetypes')
ctype_ident = (1.-1e-10)*np.eye(self.data.n('clausetype'))
ct_aux_vr = np.log(ctype_ident)-np.log(1.-ctype_ident)
ct_aux_vr = np.concatenate([ct_aux_vr, -np.inf*np.ones([self.data.n('clausetype'),
self.nlatfeats-self.data.n('clausetype')])],
axis=1)
ct_aux_vfp = np.inf*np.ones([self.data.n('clausetype'), self.nlatfeats])
verbreps_aux = np.concatenate([ct_aux_vr, verbreps_aux])
verbfeatprob_aux = np.concatenate([ct_aux_vfp, verbfeatprob_aux])
self._reps['verbreps'] = shared(verbreps_aux, name='verbreps')
self._reps['projection'] = shared(projection_aux, name='projection')
self._reps['verbfeatprob'] = shared(verbfeatprob_aux, name='verbfeatprob')
self._verbreps = T.nnet.sigmoid(self._reps['verbreps'])
self._projection = T.nnet.sigmoid(self._reps['projection'])
self._verbfeatprob = T.nnet.sigmoid(self._reps['verbfeatprob'])
softand = self._verbfeatprob[:,:,None]*self._verbreps[:,:,None]*self._projection[None,:,:]
self._featureprob = 1.-T.prod(1.-softand, axis=1)
# Added to White et al. model: divergence function. Calculates JS-divergence (cf. SciPy version which yields the square root value)
def _get_js_divergence(self):
vr = self._verbreps
assertProb = vr[:, 0] #s_v,belief
requestProb = vr[:, 1] #s_v,desire
m0 = (assertProb + 1-requestProb)/2
m1 = (1-assertProb + requestProb)/2
kl_assert = (assertProb * T.log(assertProb / m0)
+ (1-assertProb) * T.log((1-assertProb) / m1))
kl_request = ((1-requestProb) * T.log((1-requestProb) / m0)
+ requestProb * T.log(requestProb / m1))
js = ((kl_assert + kl_request) / 2 )**1
# Above code leads to NaN error for verbs 0 and 1 (DECLARATIVE & IMPERATIVE), probably because of how Theano deals with floating point representations
# These should be 0. Stipulate them as such.
# cf. https://stackoverflow.com/questions/31919818/theano-sqrt-returning-nan-values.
js = T.set_subtensor(js[0], 0.) # try ... js[tuple([0,])], 0...
js = T.set_subtensor(js[1], 0.)
return js
# Added to White et al. model: divergence function. Calculates KL-divergence
def _get_kl_divergence(self):
vr = self._verbreps
assertProb = vr[:, 0]
requestProb = vr[:, 1]
kl_assert = (assertProb * T.log(assertProb / (1-requestProb))
+ (1-assertProb) * T.log((1-assertProb) / requestProb))
kl_request = ((1-requestProb) * T.log((1-requestProb) / assertProb)
+ requestProb * T.log(requestProb / (1-assertProb)))
kl = ((kl_assert + kl_request) / 2 )**1
# Above code leads to NaN error for verbs 0 and 1 (DECLARATIVE & IMPERATIVE), probably because of how Theano deals with floating point representations
# These should be 0. Stipulate them as such.
# cf. https://stackoverflow.com/questions/31919818/theano-sqrt-returning-nan-values.
kl = T.set_subtensor(kl[0], 0.) # try ... js[tuple([0,])], 0...
kl = T.set_subtensor(kl[1], 0.)
return kl
def _initialize_loss(self):
self._log_projection_prior = (self.gamma-1.)*T.log(self._projection) +\
(self.gamma-1.)*T.log(1.-self._projection)
self._log_verbfeatureprob_prior = (self.delta-1.)*T.log(self._verbfeatprob) +\
(self.delta-1.)*T.log(1.-self._verbfeatprob)
# self._log_verbfeatureprob_prior = -T.log(self._verbfeatprob)/T.log(self._verbreps)
if self.nonparametric:
def betaln(alpha, beta):
return T.gammaln(alpha) + T.gammaln(beta) - T.gammaln(alpha+beta)
if self._pitman_yor:
upper_a = self.alpha + self.nlatfeats*self.discount
upper_b = 1.-self.discount
else:
upper_a = 1.
upper_b = self.alpha
self._log_upper_prior = (upper_a-1.)*T.log(self._nu) +\
(upper_b-1.)*T.log(1.-self._nu) -\
betaln(upper_a, upper_b)
lower_a = self.beta*self._mu
lower_b = self.beta*(1.-self._mu)
self._log_lower_prior = (lower_a-1.)[None,:]*T.log(self._verbreps) +\
(lower_b-1.)[None,:]*T.log(1.-self._verbreps) -\
betaln(lower_a, lower_b)[None,:]
self._prior = T.sum(self._log_upper_prior)/self.nlatfeats +\
T.sum(self._log_lower_prior)/(self.data.n('verb')*self.nlatfeats) +\
T.sum(self._log_projection_prior)/(self.data.n('feature')*self.nlatfeats)+\
T.sum(self._log_verbfeatureprob_prior)/(self.data.n('verb')*self.nlatfeats)
else:
self._log_verbreps_prior = (self.beta-1.)*T.log(self._verbreps) +\
(self.beta-1.)*T.log(1.-self._verbreps)
self._prior = T.sum(self._log_verbreps_prior)/(self.data.n('verb')*self.nlatfeats) +\
T.sum(self._log_projection_prior)/(self.data.n('feature')*self.nlatfeats)+\
T.sum(self._log_verbfeatureprob_prior)/(self.data.n('verb')*self.nlatfeats)
if self.orthogonality_penalty:
verbrep2 = T.dot(self._verbreps.T, self._verbreps)
verbrep2_rawsum = T.sum(T.square(verbrep2 - verbrep2*T.identity_like(verbrep2)))
self._orthogonality_penalty = -self.orthogonality_penalty*\
verbrep2_rawsum/(self.nlatfeats*self.data.n('verb'))
else:
self._orthogonality_penalty = 0.
p = self._featureprob[self.data.verb]
k = self.data.features
# r = 1./self._verbreps.sum(axis=1)[self.data.verb,None]
#self._ll_per_feature = k*T.log(p)+r*T.log(1.-p)+T.gammaln(k+r)-T.gammaln(k+1)-T.gammaln(r)
self._ll_per_feature = k*T.log(p)+(1.-k)*T.log(1.-p) # log likelihood, by defn. negative (log 1 = 0)
self._total_ll = T.sum(self._ll_per_feature)/(self.data.verb.shape[0]*\
self.data.n('feature'))
self._total_loss = self._prior+self._orthogonality_penalty+self._total_ll
self._itr = T.ivector('itr')
## Added to White et al. model
# Option A: mean of JS divergence for observed verbs
self._divergence = T.mean(self._get_js_divergence()[self.data.verb][self._itr])*self.divergence_weight
# Option B: mean of KL divergence for observed verbs
# self._divergence = T.mean(self._get_kl_divergence()[self.data.verb][self._itr])*self.divergence_weight
# Other options:
# T.mean(self._get_js_divergence()) # Option A1: mean of JS divergence for ALL verbs, regardless of verbs observed for the particular utterance
# T.mean(self._get_kl_divergence()) # Option B1: mean of KL divergence for ALL verbs, regardless of verbs observed for the particular utterance
self._itr_ll = T.sum(self._ll_per_feature[self._itr])/self.data.n('feature')
self._itr_loss = self._prior+self._orthogonality_penalty+self._itr_ll + self._divergence
# Subtract divergence. Effectively, we are taking the raw log-likelihood (_ll_per_feature), a negative value, and adjusting it by this divergence score. Both JSD and KLD yield a positive value. Since the model tries to maximize log-likelihood, we want the adjusted log-likelihood to be lower when the divergence score is high. One way to do so is adjust divergence with a negative weight, effectively subtracting divergence from log-likelihood.
def _initialize_updaters(self, stochastic):
update_dict_ada = []
self.rep_grad_hist_t = {}
for name, rep in self._reps.items():
if stochastic:
rep_grad = T.grad(-self._itr_loss, rep)
else:
rep_grad = T.grad(-self._total_loss, rep)
if name in ['verbreps', 'projection', 'verbfeatprob']:
rep_grad = T.switch((rep>10)*(rep_grad<0),
T.zeros_like(rep_grad),
rep_grad)
rep_grad = T.switch((rep<-10)*(rep_grad>0),
T.zeros_like(rep_grad),
rep_grad)
# Incorporating divergence causes verbreps gradients for DECLARATIVE and IMPERATIVE to equal NaN; so replace NaN with 0s (declaratives and imperative gradients don't change)
rep_grad = T.switch(T.isnan(rep_grad), 0., rep_grad)
self.rep_grad_hist_t[name] = shared(np.ones(rep.shape.eval()),
name=name+'_hist'+self._ident)
rep_grad_adj = rep_grad / (T.sqrt(self.rep_grad_hist_t[name]))
learning_rate = 2.# if name != 'nu' else 1e-20
update_dict_ada += [(self.rep_grad_hist_t[name], self.rep_grad_hist_t[name] +\
T.power(rep_grad, 2)),
(rep, rep - learning_rate*rep_grad_adj)]
self.updater_ada = function(inputs=[self._itr],
outputs=[self._total_ll, self._itr_ll,
self._verbreps, self._projection, self._divergence],
updates=update_dict_ada,
name='updater_ada'+self._ident)
def _fit(self, sentid, nupdates, verbose):
for j, sid in enumerate(sentid):
idx = self.data.sentence(sid)
for i in range(nupdates):
total_loss, itr_loss, verbreps, projection, divergence = self.updater_ada(idx)
if not j % 10:
self._verbreps_hist.append(verbreps)
self._projection_hist.append(projection)
if verbose:
verb_list = list(self.data.categories('verb')[np.array(self.data.verb)[idx]])
print('\n', j, '\tloss', np.round(total_loss, 3), '\titr_loss',\
np.round(itr_loss,3), '\tdiverge', np.round(divergence, 7), '\t', verb_list,'\n',
'\t', verb_list,'\t verb ID', np.array(self.data.verb)[idx]
)
def fit(self, data, nepochs=0, niters=20000, nupdates=1,
stochastic=True, verbose=True):
self._initialize_model(data, stochastic)
sentid = list(self.data.categories('sentenceid'))
self._verbreps_hist = []
self._projection_hist = []
if nepochs:
for e in range(nepochs):
shuffle(sentid)
if verbose:
print(e)
self._fit(sentid, nupdates, verbose)
else:
order = np.random.choice(sentid, size=niters)
self._fit(order, nupdates, verbose)
return self
@property
def verbreps(self):
return pd.DataFrame(T.nnet.sigmoid(self._reps['verbreps']).eval(),
index=self.data.categories('verb'))
@property
def verbfeatprob(self):
return pd.DataFrame(T.nnet.sigmoid(self._reps['verbfeatprob']).eval(),
index=self.data.categories('verb'))
@property
def projection(self):
return pd.DataFrame(T.nnet.sigmoid(self._reps['projection']).eval(),
columns=self.data.feature_names)
@property
def verbreps_history(self):
reps = []
for t, r in enumerate(self._verbreps_hist):
r = pd.DataFrame(r)
r['verb'] = self.data.categories('verb')
r['sentence'] = t
reps.append(r)
return pd.concat(reps)
@property
def projection_history(self):
reps = []
for t, r in enumerate(self._projection_hist):
r = pd.DataFrame(r)
r.columns = self.data.feature_names
r['sentence'] = t
reps.append(r)
return pd.concat(reps)
@property
def feature_prob(self):
featprob = pd.DataFrame(self._featureprob.eval(), | |
import unittest
from unittest.mock import Mock
from pyats.topology import loader, Device
from genie.metaparser.util.exceptions import SchemaEmptyParserError
from genie.libs.parser.junos.show_ospf3 import ShowOspf3Interface, \
ShowOspf3Overview, \
ShowOspf3OverviewExtensive, \
ShowOspf3NeighborExtensive, \
ShowOspf3NeighborDetail, \
ShowOspf3Neighbor,\
ShowOspf3Database,\
ShowOspf3InterfaceExtensive, \
ShowOspf3DatabaseExternalExtensive, \
ShowOspf3DatabaseExtensive
class TestShowOspf3Interface(unittest.TestCase):
maxDiff = None
device = Device(name='test-device')
empty_output = {'execute.return_value': ''}
golden_output = {'execute.return_value': '''
show ospf3 interface | no-more
Interface State Area DR ID BDR ID Nbrs
ge-0/0/0.0 PtToPt 0.0.0.8 0.0.0.0 0.0.0.0 1
ge-0/0/1.0 PtToPt 0.0.0.8 0.0.0.0 0.0.0.0 1
lo0.0 DR 0.0.0.8 10.189.5.252 0.0.0.0 0
'''}
golden_parsed_output = {
"ospf3-interface-information": {
"ospf3-interface": [
{
"bdr-id": "0.0.0.0",
"dr-id": "0.0.0.0",
"interface-name": "ge-0/0/0.0",
"neighbor-count": "1",
"ospf-area": "0.0.0.8",
"ospf-interface-state": "PtToPt"
},
{
"bdr-id": "0.0.0.0",
"dr-id": "0.0.0.0",
"interface-name": "ge-0/0/1.0",
"neighbor-count": "1",
"ospf-area": "0.0.0.8",
"ospf-interface-state": "PtToPt"
},
{
"bdr-id": "0.0.0.0",
"dr-id": "10.189.5.252",
"interface-name": "lo0.0",
"neighbor-count": "0",
"ospf-area": "0.0.0.8",
"ospf-interface-state": "DR"
}
]
}
}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowOspf3Interface(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
obj.parse()
def test_golden(self):
self.device = Mock(**self.golden_output)
obj = ShowOspf3Interface(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output)
class TestShowOspf3NeighborExtensive(unittest.TestCase):
""" Unit tests for:
* show ospf3 neighbor extensive
"""
device = Device(name='aDevice')
maxDiff = None
empty_output = {'execute.return_value': ''}
golden_output = {'execute.return_value': '''
show ospf3 neighbor extensive
ID Interface State Pri Dead
10.189.5.253 ge-0/0/0.0 Full 128 35
Neighbor-address fe80::250:56ff:fe8d:53c0
Area 0.0.0.8, opt 0x13, OSPF3-Intf-Index 2
DR-ID 0.0.0.0, BDR-ID 0.0.0.0
Up 3w0d 17:07:00, adjacent 3w0d 17:07:00
10.169.14.240 ge-0/0/1.0 Full 128 33
Neighbor-address fe80::250:56ff:fe8d:72bd
Area 0.0.0.8, opt 0x13, OSPF3-Intf-Index 3
DR-ID 0.0.0.0, BDR-ID 0.0.0.0
Up 3w0d 17:06:59, adjacent 3w0d 17:06:55
'''}
golden_parsed_output = {
"ospf3-neighbor-information": {
"ospf3-neighbor": [
{
"activity-timer": "35",
"bdr-id": "0.0.0.0",
"dr-id": "0.0.0.0",
"interface-name": "ge-0/0/0.0",
"neighbor-address": "fe80::250:56ff:fe8d:53c0",
"neighbor-adjacency-time": {
"#text": "3w0d 17:07:00"
},
"neighbor-id": "10.189.5.253",
"neighbor-priority": "128",
"neighbor-up-time": {
"#text": "3w0d 17:07:00"
},
"options": "0x13",
"ospf-area": "0.0.0.8",
"ospf-neighbor-state": "Full",
"ospf3-interface-index": "2"
},
{
"activity-timer": "33",
"bdr-id": "0.0.0.0",
"dr-id": "0.0.0.0",
"interface-name": "ge-0/0/1.0",
"neighbor-address": "fe80::250:56ff:fe8d:72bd",
"neighbor-adjacency-time": {
"#text": "3w0d 17:06:55"
},
"neighbor-id": "10.169.14.240",
"neighbor-priority": "128",
"neighbor-up-time": {
"#text": "3w0d 17:06:59"
},
"options": "0x13",
"ospf-area": "0.0.0.8",
"ospf-neighbor-state": "Full",
"ospf3-interface-index": "3"
}
]
}
}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowOspf3NeighborExtensive(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
obj.parse()
def test_golden(self):
self.device = Mock(**self.golden_output)
obj = ShowOspf3NeighborExtensive(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output)
class TestShowOspf3NeighborDetail(unittest.TestCase):
""" Unit tests for:
* show ospf3 neighbor extensive
"""
device = Device(name='aDevice')
maxDiff = None
empty_output = {'execute.return_value': ''}
golden_output = {'execute.return_value': '''
show ospf3 neighbor detail
ID Interface State Pri Dead
10.189.5.253 ge-0/0/0.0 Full 128 34
Neighbor-address fe80::250:56ff:fe8d:53c0
Area 0.0.0.8, opt 0x13, OSPF3-Intf-Index 2
DR-ID 0.0.0.0, BDR-ID 0.0.0.0
Up 3w0d 17:06:45, adjacent 3w0d 17:06:45
10.169.14.240 ge-0/0/1.0 Full 128 31
Neighbor-address fe80::250:56ff:fe8d:72bd
Area 0.0.0.8, opt 0x13, OSPF3-Intf-Index 3
DR-ID 0.0.0.0, BDR-ID 0.0.0.0
Up 3w0d 17:06:44, adjacent 3w0d 17:06:40
'''}
golden_parsed_output = {
"ospf3-neighbor-information": {
"ospf3-neighbor": [
{
"activity-timer": "34",
"bdr-id": "0.0.0.0",
"dr-id": "0.0.0.0",
"interface-name": "ge-0/0/0.0",
"neighbor-address": "fe80::250:56ff:fe8d:53c0",
"neighbor-adjacency-time": {
"#text": "3w0d 17:06:45"
},
"neighbor-id": "10.189.5.253",
"neighbor-priority": "128",
"neighbor-up-time": {
"#text": "3w0d 17:06:45"
},
"options": "0x13",
"ospf-area": "0.0.0.8",
"ospf-neighbor-state": "Full",
"ospf3-interface-index": "2"
},
{
"activity-timer": "31",
"bdr-id": "0.0.0.0",
"dr-id": "0.0.0.0",
"interface-name": "ge-0/0/1.0",
"neighbor-address": "fe80::250:56ff:fe8d:72bd",
"neighbor-adjacency-time": {
"#text": "3w0d 17:06:40"
},
"neighbor-id": "10.169.14.240",
"neighbor-priority": "128",
"neighbor-up-time": {
"#text": "3w0d 17:06:44"
},
"options": "0x13",
"ospf-area": "0.0.0.8",
"ospf-neighbor-state": "Full",
"ospf3-interface-index": "3"
}
]
}
}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowOspf3NeighborDetail(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
obj.parse()
def test_golden(self):
self.device = Mock(**self.golden_output)
obj = ShowOspf3NeighborDetail(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output)
class TestShowOspf3Neighbor(unittest.TestCase):
""" Unit tests for:
* show ospf3 neighbor
"""
device = Device(name='aDevice')
maxDiff = None
empty_output = {'execute.return_value': ''}
golden_output = {'execute.return_value': '''
show ospf3 neighbor
ID Interface State Pri Dead
10.189.5.253 ge-0/0/0.0 Full 128 35
Neighbor-address fe80::250:56ff:fe8d:53c0
10.169.14.240 ge-0/0/1.0 Full 128 33
Neighbor-address fe80::250:56ff:fe8d:72bd
'''}
golden_parsed_output = {
"ospf3-neighbor-information": {
"ospf3-neighbor": [
{
"activity-timer": "35",
"interface-name": "ge-0/0/0.0",
"neighbor-address": "fe80::250:56ff:fe8d:53c0",
"neighbor-id": "10.189.5.253",
"neighbor-priority": "128",
"ospf-neighbor-state": "Full"
},
{
"activity-timer": "33",
"interface-name": "ge-0/0/1.0",
"neighbor-address": "fe80::250:56ff:fe8d:72bd",
"neighbor-id": "10.169.14.240",
"neighbor-priority": "128",
"ospf-neighbor-state": "Full"
}
]
}
}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowOspf3Neighbor(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
obj.parse()
def test_golden(self):
self.device = Mock(**self.golden_output)
obj = ShowOspf3Neighbor(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output)
class TestShowOspf3Database(unittest.TestCase):
maxDiff = None
device = Device(name='test-device')
empty_output = {'execute.return_value': ''}
golden_output = {'execute.return_value':
'''
OSPF3 database, Area 0.0.0.8
Type ID Adv Rtr Seq Age Cksum Len
Router 0.0.0.0 10.34.2.250 0x800018ed 2407 0xaf2d 56
Router 0.0.0.0 10.34.2.251 0x80001841 532 0x1d57 56
Router 0.0.0.0 10.169.14.240 0x80001a0b 2956 0x52ba 72
Router 0.0.0.0 10.169.14.241 0x800018b7 1259 0x94a3 72
Router *0.0.0.0 10.189.5.252 0x80001890 913 0xae6c 56
Router 0.0.0.0 10.189.5.253 0x8000182a 915 0x8fdc 56
IntraArPfx 0.0.0.1 10.34.2.250 0x8000178c 1657 0xc4fc 76
IntraArPfx 0.0.0.1 10.34.2.251 0x8000178b 907 0x9e2d 76
IntraArPfx 0.0.0.1 10.169.14.240 0x80001808 2683 0x6948 88
IntraArPfx 0.0.0.1 10.169.14.241 0x800017e6 926 0xa81e 88
IntraArPfx *0.0.0.1 10.189.5.252 0x8000178a 1413 0x9b24 76
IntraArPfx 0.0.0.1 10.189.5.253 0x80001788 415 0x8820 76
OSPF3 AS SCOPE link state database
Type ID Adv Rtr Seq Age Cksum Len
Extern 0.0.0.1 10.34.2.250 0x8000178e 1282 0x3c81 28
Extern 0.0.0.3 10.34.2.250 0x8000178e 907 0x21bf 44
Extern 0.0.0.4 10.34.2.250 0x80000246 2783 0xcc71 44
Extern 0.0.0.1 10.34.2.251 0x80001789 1282 0x4081 28
Extern 0.0.0.2 10.34.2.251 0x80001788 2782 0x17d0 44
Extern 0.0.0.3 10.34.2.251 0x80000246 157 0xea52 44
Extern 0.0.0.18 10.169.14.240 0x80000349 1592 0xbddb 28
Extern 0.0.0.19 10.169.14.240 0x8000034d 774 0x3603 44
Extern 0.0.0.22 10.169.14.240 0x800002b9 2138 0xab95 44
Extern 0.0.0.23 10.169.14.240 0x80000247 501 0x7049 44
Extern 0.0.0.24 10.169.14.240 0x80000246 2410 0x4e6c 44
Extern 0.0.0.9 10.169.14.241 0x800002f0 2593 0xd341 44
Extern 0.0.0.10 10.169.14.241 0x80000246 593 0xd4f2 44
Extern 0.0.0.11 10.169.14.241 0x80000245 2926 0xe6df 44
Extern *0.0.0.1 10.189.5.252 0x8000063f 1913 0x3ff4 44
Extern 0.0.0.1 10.189.5.253 0x80000e1e 1915 0x7dcd 44
OSPF3 Link-Local database, interface ge-0/0/0.0 Area 0.0.0.8
Type ID Adv Rtr Seq Age Cksum Len
Link *0.0.0.2 10.189.5.252 0x8000178a 413 0xae5c 56
Link 0.0.0.2 10.189.5.253 0x80001787 2415 0x13d7 56
OSPF3 Link-Local database, interface ge-0/0/1.0 Area 0.0.0.8
Type ID Adv Rtr Seq Age Cksum Len
Link 0.0.0.3 10.169.14.240 0x8000179e 1047 0xbe92 56
Link *0.0.0.3 10.189.5.252 0x80001789 2913 0x607c 56
OSPF3 Link-Local database, interface lo0.0 Area 0.0.0.8
Type ID Adv Rtr Seq Age Cksum Len
Link *0.0.0.1 10.189.5.252 0x8000178b 2413 0xa440 44
'''}
golden_parsed_output = {
"ospf3-database-information": {
"ospf3-area-header": {
"ospf-area": "0.0.0.8"
},
"ospf3-database": [
{
"advertising-router": "10.34.2.250",
"age": "2407",
"checksum": "0xaf2d",
"lsa-id": "0.0.0.0",
"lsa-length": "56",
"lsa-type": "Router",
"sequence-number": "0x800018ed"
},
{
"advertising-router": "10.34.2.251",
"age": "532",
"checksum": "0x1d57",
"lsa-id": "0.0.0.0",
"lsa-length": "56",
"lsa-type": "Router",
"sequence-number": "0x80001841"
},
{
"advertising-router": "10.169.14.240",
"age": "2956",
"checksum": "0x52ba",
"lsa-id": "0.0.0.0",
"lsa-length": "72",
"lsa-type": "Router",
"sequence-number": "0x80001a0b"
},
{
"advertising-router": "10.169.14.241",
"age": "1259",
"checksum": "0x94a3",
"lsa-id": "0.0.0.0",
"lsa-length": "72",
"lsa-type": "Router",
"sequence-number": "0x800018b7"
},
{
"advertising-router": "10.189.5.252",
"age": "913",
"checksum": "0xae6c",
"lsa-id": "0.0.0.0",
"lsa-length": "56",
"lsa-type": "Router",
"our-entry": True,
"sequence-number": "0x80001890"
},
{
"advertising-router": "10.189.5.253",
"age": "915",
"checksum": "0x8fdc",
"lsa-id": "0.0.0.0",
"lsa-length": "56",
"lsa-type": "Router",
"sequence-number": "0x8000182a"
},
{
"advertising-router": "10.34.2.250",
"age": "1657",
"checksum": "0xc4fc",
"lsa-id": "0.0.0.1",
"lsa-length": "76",
"lsa-type": "IntraArPfx",
"sequence-number": "0x8000178c"
},
{
"advertising-router": "10.34.2.251",
"age": "907",
"checksum": "0x9e2d",
"lsa-id": "0.0.0.1",
"lsa-length": "76",
"lsa-type": "IntraArPfx",
"sequence-number": "0x8000178b"
},
{
"advertising-router": "10.169.14.240",
"age": "2683",
"checksum": "0x6948",
"lsa-id": "0.0.0.1",
"lsa-length": "88",
"lsa-type": "IntraArPfx",
"sequence-number": "0x80001808"
},
{
"advertising-router": "10.169.14.241",
"age": "926",
"checksum": "0xa81e",
"lsa-id": "0.0.0.1",
"lsa-length": "88",
"lsa-type": "IntraArPfx",
"sequence-number": "0x800017e6"
},
{
"advertising-router": "10.189.5.252",
"age": "1413",
"checksum": "0x9b24",
"lsa-id": "0.0.0.1",
"lsa-length": "76",
"lsa-type": "IntraArPfx",
"our-entry": True,
"sequence-number": "0x8000178a"
},
{
"advertising-router": "10.189.5.253",
"age": "415",
"checksum": "0x8820",
"lsa-id": "0.0.0.1",
"lsa-length": "76",
"lsa-type": "IntraArPfx",
"sequence-number": "0x80001788"
},
{
"advertising-router": "10.34.2.250",
"age": "1282",
"checksum": "0x3c81",
"lsa-id": "0.0.0.1",
"lsa-length": "28",
"lsa-type": "Extern",
"sequence-number": "0x8000178e"
},
{
"advertising-router": "10.34.2.250",
"age": "907",
"checksum": "0x21bf",
"lsa-id": "0.0.0.3",
"lsa-length": "44",
"lsa-type": "Extern",
"sequence-number": "0x8000178e"
},
{
"advertising-router": "10.34.2.250",
"age": "2783",
"checksum": "0xcc71",
"lsa-id": "0.0.0.4",
"lsa-length": "44",
"lsa-type": "Extern",
"sequence-number": "0x80000246"
},
{
"advertising-router": "10.34.2.251",
"age": "1282",
"checksum": "0x4081",
"lsa-id": "0.0.0.1",
"lsa-length": "28",
"lsa-type": "Extern",
"sequence-number": "0x80001789"
},
{
"advertising-router": "10.34.2.251",
"age": "2782",
"checksum": "0x17d0",
"lsa-id": "0.0.0.2",
"lsa-length": "44",
"lsa-type": "Extern",
"sequence-number": "0x80001788"
},
{
"advertising-router": "10.34.2.251",
"age": "157",
"checksum": "0xea52",
"lsa-id": "0.0.0.3",
"lsa-length": "44",
"lsa-type": "Extern",
"sequence-number": "0x80000246"
},
{
"advertising-router": "10.169.14.240",
"age": "1592",
"checksum": "0xbddb",
"lsa-id": "0.0.0.18",
"lsa-length": "28",
"lsa-type": "Extern",
"sequence-number": "0x80000349"
},
{
"advertising-router": "10.169.14.240",
"age": "774",
| |
<gh_stars>1-10
#!/usr/bin/env python
# stdlib imports
from xml.dom import minidom
from collections import OrderedDict
import os.path
# third party imports
import numpy as np
from scipy.special import erfc
import shapely
from mapio.grid2d import Grid2D
# local imports
from losspager.utils.country import Country
from losspager.utils.probs import calcEmpiricalProbFromRange
from losspager.utils.exception import PagerException
# TODO: What should these values be? Mean loss rates for all countries?
DEFAULT_THETA = 16.0
DEFAULT_BETA = 0.15
DEFAULT_L2G = 1.0
DEFAULT_ALPHA = 1.0
class LossModel(object):
def __init__(self, name, rates, l2g, alpha=None):
"""Create a loss model from an array of loss rates at MMI 1-10.
:param name:
Name (usually two letter country code) for model.
:param rates:
Array-like float values 10 elements in length.
:param l2g:
Float value defining the value of the L2G norm calculated when model was derived.
:param alpha:
Float value defining the alpha (economic correction factor) value for the model.
Not specified/used for fatality models.
:returns:
LossModel instance.
"""
self._name = name
self._rates = rates[:]
self._l2g = l2g
self._alpha = alpha
def __repr__(self):
"""return string representation of loss model.
"""
mmirange = np.arange(5, 10)
rates = self.getLossRates(mmirange)
reprstr = ''
for i in range(0, len(mmirange)):
mmi = mmirange[i]
rate = rates[i]
reprstr += 'MMI %i: 1 in %s\n' % (mmi, format(int(1.0/rate), ",d"))
return reprstr
def getLossRates(self, mmirange):
mmirange = np.array(mmirange)
idx = mmirange - 1
return self._rates[idx]
@property
def name(self):
"""Return the name associated with this model.
:returns:
The name associated with this model.
"""
return self._name
@property
def theta(self):
"""Return the theta value associated with this model.
:returns:
The theta associated with this model.
"""
return self._theta
@property
def beta(self):
"""Return the beta value associated with this model.
:returns:
The beta value associated with this model.
"""
return self._beta
@property
def alpha(self):
"""Return the alpha value associated with this model (may be None).
:returns:
The alpha value associated with this model (may be None).
"""
return self._alpha
@property
def l2g(self):
"""Return the L2G value associated with this model.
:returns:
The L2G value associated with this model.
"""
return self._l2g
def getLosses(self, exp_pop, mmirange, rates=None):
"""Calculate losses given input arrays of population exposures and MMI values.
:param exp_pop:
Array of population exposed at mmirange values.
:param mmirange:
Array of MMI values exp_pop is exposed to.
:param rates:
Array of 10 loss rates which, if specified, will be used instead of the lognormal rates.
:returns:
Scalar floating point number of losses.
"""
if rates is None:
rates = self.getLossRates(mmirange)
deaths = np.nansum(rates*exp_pop)
return deaths
def getArea(self):
"""Calculate the area under the loss rate curve (defined for MMI 5-9).
Used internally for model to model comparisons.
:returns:
Area under the loss rate curve (defined for MMI 1-10).
"""
mmirange = np.arange(5, 10)
rates = self.getLossRates(mmirange)
area = np.trapz(rates, mmirange)
return area
def __lt__(self, other):
"""Is this model less deadly than other model?
:param other:
Another LognormalModel instance.
:returns:
True if this model is less deadly than other model.
"""
area1 = self.getArea()
area2 = other.getArea()
if area1 < area2:
return True
def __le__(self, other):
"""Is this model less than or just as deadly as other model?
:param other:
Another LognormalModel instance.
:returns:
True if this model is less than or just as deadly as other model.
"""
area1 = self.getArea()
area2 = other.getArea()
if area1 <= area2:
return True
def __eq__(self, other):
"""Is this model equally deadly as other model?
:param other:
Another LognormalModel instance.
:returns:
True if this model is equally deadly as other model.
"""
area1 = self.getArea()
area2 = other.getArea()
if area1 == area2:
return True
def __gt__(self, other):
"""Is this model more deadly than other model?
:param other:
Another LognormalModel instance.
:returns:
True if this model is more deadly than other model.
"""
area1 = self.getArea()
area2 = other.getArea()
if area1 > area2:
return True
def __ge__(self, other):
"""Is this model greater than or just as deadly as other model?
:param other:
Another LognormalModel instance.
:returns:
True if this model is greater than or just as deadly as other model.
"""
area1 = self.getArea()
area2 = other.getArea()
if area1 >= area2:
return True
class LoglinearModel(LossModel):
"""Loglinear loss model (defined by theta/beta (or mu/sigma) values.
"""
def __init__(self, name, theta, beta, l2g, alpha=None):
"""Instantiate Loglinear Loss object.
:param name:
Name (usually two letter country code) for model.
:param theta:
Float value defining the theta (or mu) value for the model.
:param beta:
Float value defining the beta (or sigma) value for the model.
:param l2g:
Float value defining the value of the L2G norm calculated when model was derived.
:param alpha:
Float value defining the alpha (economic correction factor) value for the model.
Not specified/used for fatality models.
:returns:
LognormalModel instance.
"""
self._name = name
self._theta = theta
self._beta = beta
self._l2g = l2g
self._alpha = alpha
def getLossRates(self, mmirange):
"""Get the loss rates at each of input MMI values.
:param mmirange:
Array-like range of MMI values at which loss rates will be calculated.
:returns:
Array of loss rates for input MMI values.
"""
mmi = np.array(mmirange)
yy = numpy.power(10, (theta - (mmi*beta)))
return yy
class LognormalModel(LossModel):
"""Lognormal loss model (defined by theta/beta (or mu/sigma) values.
"""
def __init__(self, name, theta, beta, l2g, alpha=None):
"""Instantiate Lognormal Loss object.
:param name:
Name (usually two letter country code) for model.
:param theta:
Float value defining the theta (or mu) value for the model.
:param beta:
Float value defining the beta (or sigma) value for the model.
:param l2g:
Float value defining the value of the L2G norm calculated when model was derived.
:param alpha:
Float value defining the alpha (economic correction factor) value for the model.
Not specified/used for fatality models.
:returns:
LognormalModel instance.
"""
self._name = name
self._theta = theta
self._beta = beta
self._l2g = l2g
self._alpha = alpha
def getLossRates(self, mmirange):
"""Get the loss rates at each of input MMI values.
:param mmirange:
Array-like range of MMI values at which loss rates will be calculated.
:returns:
Array of loss rates for input MMI values.
"""
mmi = np.array(mmirange)
xx = np.log(mmirange/self._theta)/self._beta
yy = 0.5*erfc(-xx/np.sqrt(2))
return yy
class EmpiricalLoss(object):
"""Container class for multiple LognormalModel objects.
"""
def __init__(self, model_list, losstype='fatality'):
"""Instantiate EmpiricalLoss class.
:param model_list:
List of LognormalModel objects. The names of these will be used as keys for the getModel() method.
:param losstype:
One of 'fatality' or 'economic'.
:returns:
EmpiricalLoss instance.
"""
if losstype not in ['fatality', 'economic']:
raise PagerException('losstype must be one of ("fatality","economic").')
self._loss_type = losstype
self._model_dict = {}
for model in model_list:
self._model_dict[model.name] = model
self._country = Country() # object that can translate between different ISO country representations.
self._overrides = {} # dictionary of manually set rates (not necessarily lognormal)
def getModel(self, ccode):
"""Return the LognormalModel associated with given country code,
or a default model if country code not found.
:param ccode:
Usually two letter ISO country code.
:returns:
LognormalModel instance containing model for input country code, or a default model.
"""
ccode = ccode.upper()
default = LognormalModel('default', DEFAULT_THETA, DEFAULT_BETA, DEFAULT_L2G, alpha=DEFAULT_ALPHA)
if ccode in self._model_dict:
return self._model_dict[ccode]
else:
return default
@classmethod
def fromDefaultFatality(cls):
homedir = os.path.dirname(os.path.abspath(__file__)) # where is this module?
fatxml = os.path.join(homedir, '..', 'data', 'fatality.xml')
return cls.fromXML(fatxml)
@classmethod
def fromDefaultEconomic(cls):
homedir = os.path.dirname(os.path.abspath(__file__)) # where is this module?
econxml = os.path.join(homedir, '..', 'data', 'economy.xml')
return cls.fromXML(econxml)
@classmethod
def fromXML(cls, xmlfile):
"""Load country-specific models from an XML file of the form:
<?xml version="1.0" encoding="US-ASCII" standalone="yes"?>
<models vstr="2.2" type="fatality">
<model ccode="AF" theta="11.613073" beta="0.180683" gnormvalue="1.0"/>
</models>
or
<?xml version="1.0" encoding="US-ASCII" standalone="yes"?>
<models vstr="1.3" type="economic">
<model alpha="15.065400" beta="0.100000" gnormvalue="4.113200" ccode="AF"/>
</models>
:param xmlfile:
XML file containing model parameters (see above).
:returns:
EmpiricalLoss instance.
"""
root = minidom.parse(xmlfile)
rootmodels = root.getElementsByTagName('models')[0]
models = rootmodels.getElementsByTagName('model')
losstype = rootmodels.getAttribute('type')
model_list = []
for model in models:
key = model.getAttribute('ccode')
theta = float(model.getAttribute('theta'))
beta = float(model.getAttribute('beta'))
l2g = float(model.getAttribute('gnormvalue'))
if model.hasAttribute('alpha'):
alpha = float(model.getAttribute('alpha'))
else:
alpha | |
<filename>mql/qprim.py
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from grquoting import quote, unquote
from itertools import chain
from utils import (valid_value_op, valid_timestamp_op, valid_timestamp,
valid_history_op, valid_guid, ReadMode, WriteMode,
PrepareMode, CheckMode, Missing, ResultDict, incr_subseq)
from error import (MQLResultError, MQLInternalError, MQLInternalParseError,
MQLValueAlreadyInUseError, MQLTooManyValuesForUniqueQuery)
from pymql.log import LOG
_boolean_name = {True: 'true', False: 'false'}
_make_boolean = {'true': True, 'false': False}
class QueryPrimitive(object):
"""
A single primitive in a query. May be a link or a node depending
on left, right and typeguid.
QueryPrimitives deal with unadorned graph names (e.g. scope, not
@scope or :scope) although they do provide a convienence
constructor that matches and strips the '@:' prefix
They deal with # sign guids -- only the actual graph query
generator removes # signs.
A QueryPrimitive holds a reference (self.query) to the query node
that created it but does not use it, and particularly does not use
the attributes of the query
In fact the only use QueryPrimitive makes of self.query is to
create result.query for the benefit of the query processing in
LowQuery
"""
# these may be
# - a single literal guid
# - a list of literal guids
# - an empty dict (aka a query)
# - a reference to another QueryPrimitive
pointers = set(['typeguid', 'scope'])
connectors = set(['left', 'right'])
# 'guid' must be a single or set of literal guids (not a reference to another QP)
guid_field = set(['guid'])
# not interested in 'name'
# must be a literal string. see valid_values.
# 'previous' may eventually be a pointer, but the graph doesn't
# support it yet
values = set(['datatype', 'value', 'timestamp', 'live', 'previous', 'next'])
directives = set(['optional', 'sort', 'pagesize', 'newest', 'oldest'])
# the user must not specify these directly.
result_field = set(['result'])
# used for ordering
derived = set(['index'])
cursor_field = set(['cursor'])
comparator_field = set(['comparator'])
# these are used to generate key and unique clauses
writeinsns = set(
['insert', 'delete', 'update', 'link', 'unlink', 'ensure', 'ensurechild'])
# mapping from python => graphd types
# note we can't map from stuff like string->'url', so use this
# structure wisely.
make_datatypes = {
int: 'integer',
long: 'integer',
str: 'string',
float: 'float',
bool: 'boolean',
unicode: 'string'
}
# mapping from graphd => python types - for use with isinstance,
# so tuples are allowed here.
# note 'url' and 'bytestring' are in here, because graphd can
# remember that
check_datatypes = {
'integer': (int, long),
'float': float,
'url': str,
'string': basestring,
'boolean': bool,
'bytestring': str,
'timestamp': str,
'null': type(None)
}
check_comparators = set(['octet', 'number', 'datetime'])
# parent is the pointer where I find the parent. Can be 'left' or 'right'
# It is not possible to construct a query containing "... (<-scope ..."
#
# If I am the contents of my parent, parent is None, and container is a direct pointer to the parent, not an indirect reference
#
# type is what the parent calls me. I will use this to infer typeguid if it is needed and not specified.
# id is a namespace name for me.
#
# reverse implies when someone calls add_parent() they get right and add_child() gets left.
#
# contents is the list of primitives that link to me and are not my parent or a pointer in me
# (i.e. everything that I contain in the graph query without an -> on my side)
#
# children are the set of pointers I use to actually point to other things.
#
# child is the guid of my child. If I see :child, I will not generate a real subquery.
# just left=:child or right=:child. This may abruptly curtail the query.
# note that :child is always the same as @guid at the same level.
#
# these are the other meaningful slots - in QP that we don't understand
# but may be passed. Note that parent, children, contents and vars
# are never given to us directly.
#
# internal_use = set(['parent','children','contents','child','container','vars','valueops','timestampops'])
# default allows a (unique) link to be arbitarily queried during a write, but a default value
# to be substituted if a value is not found.
special = set(['type', 'id', 'reverse', 'unique', 'default'])
input_fields = guid_field | pointers | values | directives | derived | writeinsns | special | comparator_field
all_fields = input_fields | result_field | connectors | cursor_field
graphfields = guid_field | pointers | connectors | values | directives | result_field | cursor_field | comparator_field
results = pointers | values
result_pointers = (
pointers | guid_field | connectors | set(('previous', 'next')))
allowed_state_transitions = {
None: ('insert', 'delete', 'ensurechild', 'ensure', 'link', 'unlink',
'match', 'unique', 'default', 'order_read', 'order_info',
'namespace_info'),
'link': ('create', 'found'),
'ensurechild': ('create', 'found'),
'ensure': ('create', 'found'),
'default': ('found',),
'match': ('found',),
'insert': ('create',),
'delete': ('remove', 'notpresent'),
'unlink': ('remove', 'notpresent'),
'remove': ('written',),
'create': ('written',),
'unique': ('unique_check', 'update_check', 'replace_check'),
'replace_check': ('update_check', 'checked'),
'update_check': ('checked',),
'unique_check': ('duplicate', 'checked'),
'namespace_info': ('namespace_unique', 'namespace_regular'),
'order_read': ('order_found', 'order_missing'),
'order_found': ('create', 'found'),
'order_missing': ('create',),
'order_info': ('checked',),
'namespace_unique': ('done',),
'namespace_regular': ('done',),
'duplicate': ('done',),
'checked': ('done',),
'written': ('done',),
'notpresent': ('done',),
'found': ('done',)
}
def __init__(self, prefix, qdict, mode):
for field in self.all_fields:
setattr(self, field, None)
# we always ask for the guid of everything.
self.result = ['guid']
self.vars = {}
self.contents = []
self.children = []
self.container = None
self.parent = None
self.ordered = None
self.order_info = []
self.child = None
self.valueops = {}
self.timestampops = {}
self.history_ops = {}
# where the unique checks are run
self.unique_checks = []
self.unique_namespace_info = None
self.unique_namespace_checks = []
# mode is ReadMode, WriteMode or CheckMode
self.mode = mode
self.state = None
# these start out as Missing, not just None (aka unspecified)
self.left = Missing
self.right = Missing
self.access_control_ok = False
self.prefix = prefix
self.query = qdict
# this says - "There is only one slot for me in the query"
# it only makes sense for links - nodes must always be unique
# (as they are the direct slot of a link or another node)
self.query_unique = (prefix == '@' or qdict.list is None)
# keep the key this is referred to around as well.
if qdict.key is not None:
self.query_key = qdict.key
self.constrain(prefix, qdict)
# prefix is '@' (nodes) ':' (links),
# '=' for unique components,
# '?' for order attachments and '&' for order queries (icky icky)
# '~' is for namespace unique checks,
def constrain(self, prefix, qdict):
if prefix not in '@:=?&~':
raise MQLInternalError(
self.query, 'invalid prefix %(prefix)s', prefix=prefix)
for fullkey, v in qdict.iteritems():
if fullkey[0] != prefix:
continue
k = fullkey[1:]
if k[0] == '$':
self.vars[k] = v
elif valid_value_op(k):
if k in self.valueops:
raise MQLInternalParseError(
self.query, 'Duplicate comparison operator %(key)s', key=fullkey)
self.valueops[k] = self.check_comparison_op(k, v)
elif valid_timestamp_op(k):
if k in self.timestampops:
raise MQLInternalParseError(
self.query, 'Duplicate timestamp operator %(key)s', key=fullkey)
self.timestampops[k] = self.check_timestamp_op(k, v)
elif valid_history_op(k):
if k in self.history_ops:
raise MQLInternalParseError(
self.query, 'Duplicate history operator %(key)s', key=fullkey)
self.history_ops[k] = self.check_history_op(k, v)
else:
newv = self.transform_field(k, v)
if newv is Missing:
pass
else:
if getattr(self, k) is not None:
raise MQLInternalParseError(
self.query,
'Duplicate attribute %(key)s',
key=fullkey,
value=v,
duplicate=newv)
if k in self.results:
self.result.append(k)
setattr(self, k, newv)
self.check_or_make_datatype()
if self.mode in (WriteMode, CheckMode):
self.check_write_directives()
def check_or_make_datatype(self):
# the value and the datatype must agree. The value must agree
# with the comparison operators which must agree with each
# other and restrict the possible output.
# first check the existing value, if requested
if self.value is not | |
(667760*
mckin**8*q_cut**3*sE)/mbkin**14 - (1415408*mckin**10*q_cut**3*sE)/
mbkin**16 - (2723568*mckin**12*q_cut**3*sE)/mbkin**18 -
(698608*mckin**14*q_cut**3*sE)/mbkin**20 - (7168*mckin**16*q_cut**3*sE)/
mbkin**22 + (78384*q_cut**4*sE)/mbkin**8 + (1397952*mckin**2*q_cut**4*
sE)/mbkin**10 + (4097136*mckin**4*q_cut**4*sE)/mbkin**12 +
(3924288*mckin**6*q_cut**4*sE)/mbkin**14 + (4126192*mckin**8*q_cut**4*sE)/
mbkin**16 + (3239904*mckin**10*q_cut**4*sE)/mbkin**18 +
(575280*mckin**12*q_cut**4*sE)/mbkin**20 + (3488*mckin**14*q_cut**4*sE)/
mbkin**22 - (9328*q_cut**5*sE)/mbkin**10 - (115056*mckin**2*q_cut**5*sE)/
mbkin**12 - (386496*mckin**4*q_cut**5*sE)/mbkin**14 - (386944*
mckin**6*q_cut**5*sE)/mbkin**16 - (361392*mckin**8*q_cut**5*sE)/
mbkin**18 - (63792*mckin**10*q_cut**5*sE)/mbkin**20 - (3136*mckin**12*
q_cut**5*sE)/mbkin**22 + (1200*q_cut**6*sE)/mbkin**12 - (24960*mckin**2*
q_cut**6*sE)/mbkin**14 - (55200*mckin**4*q_cut**6*sE)/mbkin**16 -
(51840*mckin**6*q_cut**6*sE)/mbkin**18 + (19760*mckin**8*q_cut**6*sE)/
mbkin**20 - (1120*mckin**10*q_cut**6*sE)/mbkin**22 + (6960*q_cut**7*sE)/
mbkin**14 - (5904*mckin**2*q_cut**7*sE)/mbkin**16 - (15888*mckin**4*
q_cut**7*sE)/mbkin**18 + (5232*mckin**6*q_cut**7*sE)/mbkin**20 +
(15296*mckin**8*q_cut**7*sE)/mbkin**22 - (6112*q_cut**8*sE)/mbkin**16 -
(12048*mckin**2*q_cut**8*sE)/mbkin**18 - (34848*mckin**4*q_cut**8*sE)/
mbkin**20 - (15472*mckin**6*q_cut**8*sE)/mbkin**22 - (1008*q_cut**9*sE)/
mbkin**18 + (3664*mckin**2*q_cut**9*sE)/mbkin**20 + (4672*mckin**4*
q_cut**9*sE)/mbkin**22 - (560*mckin**2*q_cut**10*sE)/mbkin**22 +
(320*q_cut**11*sE)/mbkin**22 - 6084*sqB - (51628*mckin**2*sqB)/
mbkin**2 + (647540*mckin**4*sqB)/mbkin**4 + (2185704*mckin**6*
sqB)/mbkin**6 - (1160436*mckin**8*sqB)/mbkin**8 - (3871392*
mckin**10*sqB)/mbkin**10 - (5292*mckin**12*sqB)/mbkin**12 +
(1785960*mckin**14*sqB)/mbkin**14 + (538296*mckin**16*sqB)/
mbkin**16 - (48564*mckin**18*sqB)/mbkin**18 - (14024*mckin**20*
sqB)/mbkin**20 - (80*mckin**22*sqB)/mbkin**22 + (14649*q_cut*sqB)/
mbkin**2 + (230462*mckin**2*q_cut*sqB)/mbkin**4 - (478107*mckin**4*
q_cut*sqB)/mbkin**6 - (7544880*mckin**6*q_cut*sqB)/mbkin**8 -
(17208996*mckin**8*q_cut*sqB)/mbkin**10 - (15625344*mckin**10*q_cut*
sqB)/mbkin**12 - (5968452*mckin**12*q_cut*sqB)/mbkin**14 -
(316608*mckin**14*q_cut*sqB)/mbkin**16 + (291675*mckin**16*q_cut*sqB)/
mbkin**18 + (35826*mckin**18*q_cut*sqB)/mbkin**20 + (175*mckin**20*
q_cut*sqB)/mbkin**22 - (3789*q_cut**2*sqB)/mbkin**4 - (117945*mckin**2*
q_cut**2*sqB)/mbkin**6 - (543048*mckin**4*q_cut**2*sqB)/mbkin**8 -
(622812*mckin**6*q_cut**2*sqB)/mbkin**10 - (569172*mckin**8*q_cut**2*sqB)/
mbkin**12 - (1021296*mckin**10*q_cut**2*sqB)/mbkin**14 -
(789072*mckin**12*q_cut**2*sqB)/mbkin**16 - (201876*mckin**14*q_cut**2*
sqB)/mbkin**18 - (11799*mckin**16*q_cut**2*sqB)/mbkin**20 +
(9*mckin**18*q_cut**2*sqB)/mbkin**22 - (13812*q_cut**3*sqB)/mbkin**6 -
(270534*mckin**2*q_cut**3*sqB)/mbkin**8 - (1041540*mckin**4*q_cut**3*sqB)/
mbkin**10 - (1476162*mckin**6*q_cut**3*sqB)/mbkin**12 - (1406192*
mckin**8*q_cut**3*sqB)/mbkin**14 - (991022*mckin**10*q_cut**3*sqB)/
mbkin**16 - (329844*mckin**12*q_cut**3*sqB)/mbkin**18 -
(33178*mckin**14*q_cut**3*sqB)/mbkin**20 - (196*mckin**16*q_cut**3*sqB)/
mbkin**22 + (9996*q_cut**4*sqB)/mbkin**8 + (239034*mckin**2*q_cut**4*
sqB)/mbkin**10 + (1181778*mckin**4*q_cut**4*sqB)/mbkin**12 +
(1946208*mckin**6*q_cut**4*sqB)/mbkin**14 + (1272532*mckin**8*q_cut**4*
sqB)/mbkin**16 + (332286*mckin**10*q_cut**4*sqB)/mbkin**18 +
(26154*mckin**12*q_cut**4*sqB)/mbkin**20 + (92*mckin**14*q_cut**4*sqB)/
mbkin**22 - (442*q_cut**5*sqB)/mbkin**10 - (22842*mckin**2*q_cut**5*sqB)/
mbkin**12 - (87144*mckin**4*q_cut**5*sqB)/mbkin**14 - (111472*
mckin**6*q_cut**5*sqB)/mbkin**16 - (31992*mckin**8*q_cut**5*sqB)/
mbkin**18 - (2910*mckin**10*q_cut**5*sqB)/mbkin**20 - (70*mckin**12*
q_cut**5*sqB)/mbkin**22 - (330*q_cut**6*sqB)/mbkin**12 - (4020*mckin**2*
q_cut**6*sqB)/mbkin**14 - (10620*mckin**4*q_cut**6*sqB)/mbkin**16 -
(5640*mckin**6*q_cut**6*sqB)/mbkin**18 + (800*mckin**8*q_cut**6*sqB)/
mbkin**20 - (70*mckin**10*q_cut**6*sqB)/mbkin**22 - (672*q_cut**7*sqB)/
mbkin**14 - (2298*mckin**2*q_cut**7*sqB)/mbkin**16 - (4992*mckin**4*
q_cut**7*sqB)/mbkin**18 + (810*mckin**6*q_cut**7*sqB)/mbkin**20 +
(416*mckin**8*q_cut**7*sqB)/mbkin**22 + (1016*q_cut**8*sqB)/mbkin**16 -
(606*mckin**2*q_cut**8*sqB)/mbkin**18 - (1986*mckin**4*q_cut**8*sqB)/
mbkin**20 - (364*mckin**6*q_cut**8*sqB)/mbkin**22 - (447*q_cut**9*sqB)/
mbkin**18 + (412*mckin**2*q_cut**9*sqB)/mbkin**20 + (103*mckin**4*
q_cut**9*sqB)/mbkin**22 - (105*q_cut**10*sqB)/mbkin**20 - (35*mckin**2*
q_cut**10*sqB)/mbkin**22 + (20*q_cut**11*sqB)/mbkin**22)))/mbkin**6) -
6*np.sqrt(0j + (mbkin**4 - 2*mbkin**2*mckin**2 + mckin**4 - 2*mbkin**2*q_cut -
2*mckin**2*q_cut + q_cut**2)/mbkin**4)*
(16*(-((-1 + mckin**2/mbkin**2)**4*(-487 + (1378*mckin**2)/mbkin**2 +
(103211*mckin**4)/mbkin**4 - (451836*mckin**6)/mbkin**6 -
(2360958*mckin**8)/mbkin**8 + (4610472*mckin**10)/mbkin**10 +
(11668476*mckin**12)/mbkin**12 + (5391216*mckin**14)/mbkin**14 -
(326961*mckin**16)/mbkin**16 - (795314*mckin**18)/mbkin**18 -
(332527*mckin**20)/mbkin**20 - (1436*mckin**22)/mbkin**22 +
(3726*mckin**24)/mbkin**24)) + (2*(-1 + mckin**2/mbkin**2)**2*
(-1464 + (878*mckin**2)/mbkin**2 + (264747*mckin**4)/mbkin**4 -
(654866*mckin**6)/mbkin**6 - (6024249*mckin**8)/mbkin**8 +
(5029272*mckin**10)/mbkin**10 + (32640138*mckin**12)/mbkin**12 +
(33450960*mckin**14)/mbkin**14 + (10105578*mckin**16)/mbkin**16 -
(1628862*mckin**18)/mbkin**18 - (2359709*mckin**20)/mbkin**20 -
(806502*mckin**22)/mbkin**22 + (8687*mckin**24)/mbkin**24 +
(11232*mckin**26)/mbkin**26)*q_cut)/mbkin**2 +
((6355 + (7208*mckin**2)/mbkin**2 - (936656*mckin**4)/mbkin**4 +
(1373882*mckin**6)/mbkin**6 + (17299235*mckin**8)/mbkin**8 -
(10086138*mckin**10)/mbkin**10 - (79195350*mckin**12)/mbkin**12 -
(110084676*mckin**14)/mbkin**14 - (87026223*mckin**16)/mbkin**16 -
(28155028*mckin**18)/mbkin**18 + (5642380*mckin**20)/mbkin**20 +
(8425514*mckin**22)/mbkin**22 + (2715769*mckin**24)/mbkin**24 -
(80762*mckin**26)/mbkin**26 - (48870*mckin**28)/mbkin**28)*q_cut**2)/
mbkin**4 + (2*(-1960 - (9526*mckin**2)/mbkin**2 + (177979*mckin**4)/
mbkin**4 + (103540*mckin**6)/mbkin**6 - (2959273*mckin**8)/
mbkin**8 + (220386*mckin**10)/mbkin**10 + (17116804*mckin**12)/
mbkin**12 + (16837442*mckin**14)/mbkin**14 - (700248*mckin**16)/
mbkin**16 - (6799396*mckin**18)/mbkin**18 - (3305847*mckin**20)/
mbkin**20 - (420862*mckin**22)/mbkin**22 + (77361*mckin**24)/
mbkin**24 + (15120*mckin**26)/mbkin**26)*q_cut**3)/mbkin**6 +
((-6845 - (37942*mckin**2)/mbkin**2 + (791885*mckin**4)/mbkin**4 +
(1728634*mckin**6)/mbkin**6 - (5957916*mckin**8)/mbkin**8 -
(15444608*mckin**10)/mbkin**10 - (18941330*mckin**12)/mbkin**12 -
(18044424*mckin**14)/mbkin**14 - (12783065*mckin**16)/mbkin**16 -
(7164330*mckin**18)/mbkin**18 - (1748003*mckin**20)/mbkin**20 +
(205406*mckin**22)/mbkin**22 + (52650*mckin**24)/mbkin**24)*q_cut**4)/
mbkin**8 - (4*(-3434 - (26020*mckin**2)/mbkin**2 + (254961*mckin**4)/
mbkin**4 + (749581*mckin**6)/mbkin**6 - (1690441*mckin**8)/
mbkin**8 - (6219248*mckin**10)/mbkin**10 - (8487200*mckin**12)/
mbkin**12 - (6481863*mckin**14)/mbkin**14 - (2731315*mckin**16)/
mbkin**16 - (318150*mckin**18)/mbkin**18 + (155525*mckin**20)/
mbkin**20 + (26532*mckin**22)/mbkin**22)*q_cut**5)/mbkin**10 +
((-6969 - (50096*mckin**2)/mbkin**2 + (479318*mckin**4)/mbkin**4 +
(1199560*mckin**6)/mbkin**6 - (3991626*mckin**8)/mbkin**8 -
(11924636*mckin**10)/mbkin**10 - (10756192*mckin**12)/mbkin**12 -
(4517112*mckin**14)/mbkin**14 - (393005*mckin**16)/mbkin**16 +
(317212*mckin**18)/mbkin**18 + (54882*mckin**20)/mbkin**20)*q_cut**6)/
mbkin**12 + (4*(-960 - (14642*mckin**2)/mbkin**2 - (31541*mckin**4)/
mbkin**4 + (94173*mckin**6)/mbkin**6 + (501370*mckin**8)/
mbkin**8 + (725465*mckin**10)/mbkin**10 + (578564*mckin**12)/
mbkin**12 + (295434*mckin**14)/mbkin**14 + (75253*mckin**16)/
mbkin**16 + (7200*mckin**18)/mbkin**18)*q_cut**7)/mbkin**14 -
((-6645 - (78502*mckin**2)/mbkin**2 - (8337*mckin**4)/mbkin**4 +
(664936*mckin**6)/mbkin**6 + (1551613*mckin**8)/mbkin**8 +
(1896026*mckin**10)/mbkin**10 + (1248611*mckin**12)/mbkin**12 +
(395792*mckin**14)/mbkin**14 + (54090*mckin**16)/mbkin**16)*q_cut**8)/
mbkin**16 + (2*(-1760 - (16554*mckin**2)/mbkin**2 + (17955*mckin**4)/
mbkin**4 + (167540*mckin**6)/mbkin**6 + (275800*mckin**8)/
mbkin**8 + (197764*mckin**10)/mbkin**10 + (71587*mckin**12)/
mbkin**12 + (16560*mckin**14)/mbkin**14)*q_cut**9)/mbkin**18 -
((-777 - (4296*mckin**2)/mbkin**2 + (18684*mckin**4)/mbkin**4 +
(39826*mckin**6)/mbkin**6 + (6697*mckin**8)/mbkin**8 +
(6210*mckin**10)/mbkin**10 + (8946*mckin**12)/mbkin**12)*q_cut**10)/
mbkin**20 - (2*(-136 - (442*mckin**2)/mbkin**2 - (79*mckin**4)/
mbkin**4 + (5286*mckin**6)/mbkin**6 + (5739*mckin**8)/mbkin**8 +
(2448*mckin**10)/mbkin**10)*q_cut**11)/mbkin**22 +
(5*(-83 - (138*mckin**2)/mbkin**2 + (735*mckin**4)/mbkin**4 +
(1766*mckin**6)/mbkin**6 + (1494*mckin**8)/mbkin**8)*q_cut**12)/
mbkin**24 + (200*(mbkin**6 + mbkin**4*mckin**2 - 14*mbkin**2*
mckin**4 - 18*mckin**6)*q_cut**13)/mbkin**32 -
(35*(mbkin**4 - 18*mckin**4)*q_cut**14)/mbkin**32)*rE +
((-1 + mckin**2/mbkin**2)**2 - (2*(mbkin**2 + mckin**2)*q_cut)/mbkin**4 +
q_cut**2/mbkin**4)*((5040*mckin**2*muG**2)/mbkin**2 -
(17868*mckin**4*muG**2)/mbkin**4 - (215088*mckin**6*muG**2)/mbkin**6 +
(1321848*mckin**8*muG**2)/mbkin**8 - (532800*mckin**10*muG**2)/
mbkin**10 + (2895012*mckin**12*muG**2)/mbkin**12 +
(26575200*mckin**14*muG**2)/mbkin**14 - (40119408*mckin**16*muG**2)/
mbkin**16 - (19286928*mckin**18*muG**2)/mbkin**18 +
(31501980*mckin**20*muG**2)/mbkin**20 + (485328*mckin**22*muG**2)/
mbkin**22 - (2758152*mckin**24*muG**2)/mbkin**24 +
(108576*mckin**26*muG**2)/mbkin**26 + (37260*mckin**28*muG**2)/
mbkin**28 + (1680*mckin**2*muG*mupi)/mbkin**2 -
(15972*mckin**4*muG*mupi)/mbkin**4 - (151440*mckin**6*muG*mupi)/
mbkin**6 + (1496040*mckin**8*muG*mupi)/mbkin**8 -
(734400*mckin**10*muG*mupi)/mbkin**10 - (18498132*mckin**12*muG*
mupi)/mbkin**12 + (1055520*mckin**14*muG*mupi)/mbkin**14 +
(33922224*mckin**16*muG*mupi)/mbkin**16 +
(353232*mckin**18*muG*mupi)/mbkin**18 - (17921580*mckin**20*muG*
mupi)/mbkin**20 - (528528*mckin**22*muG*mupi)/mbkin**22 +
(1024872*mckin**24*muG*mupi)/mbkin**24 + (3936*mckin**26*muG*mupi)/
mbkin**26 - (7452*mckin**28*muG*mupi)/mbkin**28 -
(6720*mckin**2*muG**2*q_cut)/mbkin**4 - (1920*mckin**4*muG**2*q_cut)/
mbkin**6 + (70728*mckin**6*muG**2*q_cut)/mbkin**8 +
(1722336*mckin**8*muG**2*q_cut)/mbkin**10 - (391224*mckin**10*muG**2*q_cut)/
mbkin**12 - (29557776*mckin**12*muG**2*q_cut)/mbkin**14 -
(66936864*mckin**14*muG**2*q_cut)/mbkin**16 - (114608208*mckin**16*muG**2*
q_cut)/mbkin**18 - (53646816*mckin**18*muG**2*q_cut)/mbkin**20 +
(17158224*mckin**20*muG**2*q_cut)/mbkin**22 +
(7600536*mckin**22*muG**2*q_cut)/mbkin**24 - (752976*mckin**24*muG**2*q_cut)/
mbkin**26 - (150120*mckin**26*muG**2*q_cut)/mbkin**28 -
(6720*mckin**2*muG*mupi*q_cut)/mbkin**4 + (42240*mckin**4*muG*mupi*q_cut)/
mbkin**6 + (554712*mckin**6*muG*mupi*q_cut)/mbkin**8 -
(3011040*mckin**8*muG*mupi*q_cut)/mbkin**10 -
(8769960*mckin**10*muG*mupi*q_cut)/mbkin**12 +
(32194512*mckin**12*muG*mupi*q_cut)/mbkin**14 +
(98364960*mckin**14*muG*mupi*q_cut)/mbkin**16 +
(99688464*mckin**16*muG*mupi*q_cut)/mbkin**18 +
(30618720*mckin**18*muG*mupi*q_cut)/mbkin**20 -
(7389840*mckin**20*muG*mupi*q_cut)/mbkin**22 -
(2901432*mckin**22*muG*mupi*q_cut)/mbkin**24 +
(86160*mckin**24*muG*mupi*q_cut)/mbkin**26 + (30024*mckin**26*muG*mupi*
q_cut)/mbkin**28 - (6720*mckin**2*muG**2*q_cut**2)/mbkin**6 +
(33600*mckin**4*muG**2*q_cut**2)/mbkin**8 + (469416*mckin**6*muG**2*q_cut**2)/
mbkin**10 - (866592*mckin**8*muG**2*q_cut**2)/mbkin**12 -
(10653072*mckin**10*muG**2*q_cut**2)/mbkin**14 -
(9608352*mckin**12*muG**2*q_cut**2)/mbkin**16 +
(14150016*mckin**14*muG**2*q_cut**2)/mbkin**18 +
(2844480*mckin**16*muG**2*q_cut**2)/mbkin**20 -
(13364592*mckin**18*muG**2*q_cut**2)/mbkin**22 -
(4050912*mckin**20*muG**2*q_cut**2)/mbkin**24 +
(943128*mckin**22*muG**2*q_cut**2)/mbkin**26 +
(151200*mckin**24*muG**2*q_cut**2)/mbkin**28 +
(6720*mckin**2*muG*mupi*q_cut**2)/mbkin**6 - (20160*mckin**4*muG*mupi*
q_cut**2)/mbkin**8 - (408456*mckin**6*muG*mupi*q_cut**2)/mbkin**10 +
(674208*mckin**8*muG*mupi*q_cut**2)/mbkin**12 +
(6670800*mckin**10*muG*mupi*q_cut**2)/mbkin**14 +
(5097888*mckin**12*muG*mupi*q_cut**2)/mbkin**16 -
(2860416*mckin**14*muG*mupi*q_cut**2)/mbkin**18 +
(3660480*mckin**16*muG*mupi*q_cut**2)/mbkin**20 +
(6068016*mckin**18*muG*mupi*q_cut**2)/mbkin**22 +
(1275744*mckin**20*muG*mupi*q_cut**2)/mbkin**24 -
(176184*mckin**22*muG*mupi*q_cut**2)/mbkin**26 -
(30240*mckin**24*muG*mupi*q_cut**2)/mbkin**28 -
(6720*mckin**2*muG**2*q_cut**3)/mbkin**8 - (29304*mckin**4*muG**2*q_cut**3)/
mbkin**10 + (741624*mckin**6*muG**2*q_cut**3)/mbkin**12 -
(2216208*mckin**8*muG**2*q_cut**3)/mbkin**14 - (30858432*mckin**10*muG**2*
q_cut**3)/mbkin**16 - (65947824*mckin**12*muG**2*q_cut**3)/mbkin**18 -
(68148864*mckin**14*muG**2*q_cut**3)/mbkin**20 -
(32704464*mckin**16*muG**2*q_cut**3)/mbkin**22 -
(2054112*mckin**18*muG**2*q_cut**3)/mbkin**24 +
(1490184*mckin**20*muG**2*q_cut**3)/mbkin**26 +
(150120*mckin**22*muG**2*q_cut**3)/mbkin**28 +
(6720*mckin**2*muG*mupi*q_cut**3)/mbkin**8 - (30696*mckin**4*muG*mupi*
q_cut**3)/mbkin**10 - (616728*mckin**6*muG*mupi*q_cut**3)/mbkin**12 +
(1846224*mckin**8*muG*mupi*q_cut**3)/mbkin**14 +
(16026048*mckin**10*muG*mupi*q_cut**3)/mbkin**16 +
(29689968*mckin**12*muG*mupi*q_cut**3)/mbkin**18 +
(29566464*mckin**14*muG*mupi*q_cut**3)/mbkin**20 +
(14890128*mckin**16*muG*mupi*q_cut**3)/mbkin**22 +
(2012256*mckin**18*muG*mupi*q_cut**3)/mbkin**24 -
(221160*mckin**20*muG*mupi*q_cut**3)/mbkin**26 -
(30024*mckin**22*muG*mupi*q_cut**3)/mbkin**28 +
(43680*mckin**2*muG**2*q_cut**4)/mbkin**10 + (21372*mckin**4*muG**2*q_cut**4)/
mbkin**12 - (2449776*mckin**6*muG**2*q_cut**4)/mbkin**14 -
(764592*mckin**8*muG**2*q_cut**4)/mbkin**16 + (31572960*mckin**10*muG**2*
q_cut**4)/mbkin**18 + (62188536*mckin**12*muG**2*q_cut**4)/mbkin**20 +
(35488272*mckin**14*muG**2*q_cut**4)/mbkin**22 -
(47280*mckin**16*muG**2*q_cut**4)/mbkin**24 - (3628512*mckin**18*muG**2*
q_cut**4)/mbkin**26 - (377460*mckin**20*muG**2*q_cut**4)/mbkin**28 -
(16800*mckin**2*muG*mupi*q_cut**4)/mbkin**10 +
(35508*mckin**4*muG*mupi*q_cut**4)/mbkin**12 +
(1075248*mckin**6*muG*mupi*q_cut**4)/mbkin**14 -
(884880*mckin**8*muG*mupi*q_cut**4)/mbkin**16 -
(16597728*mckin**10*muG*mupi*q_cut**4)/mbkin**18 -
(28130712*mckin**12*muG*mupi*q_cut**4)/mbkin**20 -
(14960208*mckin**14*muG*mupi*q_cut**4)/mbkin**22 -
(1225488*mckin**16*muG*mupi*q_cut**4)/mbkin**24 +
(675168*mckin**18*muG*mupi*q_cut**4)/mbkin**26 +
(75492*mckin**20*muG*mupi*q_cut**4)/mbkin**28 -
(20160*mckin**2*muG**2*q_cut**5)/mbkin**12 + (29568*mckin**4*muG**2*q_cut**5)/
mbkin**14 + (825600*mckin**6*muG**2*q_cut**5)/mbkin**16 -
(2256528*mckin**8*muG**2*q_cut**5)/mbkin**18 - (15292416*mckin**10*muG**2*
q_cut**5)/mbkin**20 - (16239504*mckin**12*muG**2*q_cut**5)/mbkin**22 -
(2731920*mckin**14*muG**2*q_cut**5)/mbkin**24 +
(1086624*mckin**16*muG**2*q_cut**5)/mbkin**26 +
(156240*mckin**18*muG**2*q_cut**5)/mbkin**28 +
(6720*mckin**2*muG*mupi*q_cut**5)/mbkin**12 -
(16128*mckin**4*muG*mupi*q_cut**5)/mbkin**14 -
(317952*mckin**6*muG*mupi*q_cut**5)/mbkin**16 +
(462672*mckin**8*muG*mupi*q_cut**5)/mbkin**18 +
(3914496*mckin**10*muG*mupi*q_cut**5)/mbkin**20 +
(3555792*mckin**12*muG*mupi*q_cut**5)/mbkin**22 +
(480720*mckin**14*muG*mupi*q_cut**5)/mbkin**24 -
(223776*mckin**16*muG*mupi*q_cut**5)/mbkin**26 -
(31248*mckin**18*muG*mupi*q_cut**5)/mbkin**28 -
(33600*mckin**2*muG**2*q_cut**6)/mbkin**14 - (30240*mckin**4*muG**2*q_cut**6)/
mbkin**16 + (2204544*mckin**6*muG**2*q_cut**6)/mbkin**18 +
(9292416*mckin**8*muG**2*q_cut**6)/mbkin**20 + (13925232*mckin**10*muG**2*
q_cut**6)/mbkin**22 + (8644032*mckin**12*muG**2*q_cut**6)/mbkin**24 +
(2204496*mckin**14*muG**2*q_cut**6)/mbkin**26 +
(141120*mckin**16*muG**2*q_cut**6)/mbkin**28 +
(6720*mckin**2*muG*mupi*q_cut**6)/mbkin**14 +
(3360*mckin**4*muG*mupi*q_cut**6)/mbkin**16 - (436224*mckin**6*muG*mupi*
q_cut**6)/mbkin**18 - (1708800*mckin**8*muG*mupi*q_cut**6)/mbkin**20 -
(2377776*mckin**10*muG*mupi*q_cut**6)/mbkin**22 -
(1596096*mckin**12*muG*mupi*q_cut**6)/mbkin**24 -
(431760*mckin**14*muG*mupi*q_cut**6)/mbkin**26 -
(28224*mckin**16*muG*mupi*q_cut**6)/mbkin**28 +
(33600*mckin**2*muG**2*q_cut**7)/mbkin**16 - (14640*mckin**4*muG**2*q_cut**7)/
mbkin**18 - (2029344*mckin**6*muG**2*q_cut**7)/mbkin**20 -
(5885232*mckin**8*muG**2*q_cut**7)/mbkin**22 -
(5477280*mckin**10*muG**2*q_cut**7)/mbkin**24 -
(1841328*mckin**12*muG**2*q_cut**7)/mbkin**26 -
(162000*mckin**14*muG**2*q_cut**7)/mbkin**28 -
(6720*mckin**2*muG*mupi*q_cut**7)/mbkin**16 +
(9840*mckin**4*muG*mupi*q_cut**7)/mbkin**18 + (378144*mckin**6*muG*mupi*
q_cut**7)/mbkin**20 + (1128048*mckin**8*muG*mupi*q_cut**7)/mbkin**22 +
(1040928*mckin**10*muG*mupi*q_cut**7)/mbkin**24 +
(353904*mckin**12*muG*mupi*q_cut**7)/mbkin**26 +
(32400*mckin**14*muG*mupi*q_cut**7)/mbkin**28 -
(8400*mckin**2*muG**2*q_cut**8)/mbkin**18 + (17964*mckin**4*muG**2*q_cut**8)/
mbkin**20 + (495648*mckin**6*muG**2*q_cut**8)/mbkin**22 +
(865080*mckin**8*muG**2*q_cut**8)/mbkin**24 + (354816*mckin**10*muG**2*
q_cut**8)/mbkin**26 + (75780*mckin**12*muG**2*q_cut**8)/mbkin**28 +
(1680*mckin**2*muG*mupi*q_cut**8)/mbkin**18 -
(12924*mckin**4*muG*mupi*q_cut**8)/mbkin**20 -
(67104*mckin**6*muG*mupi*q_cut**8)/mbkin**22 -
(100824*mckin**8*muG*mupi*q_cut**8)/mbkin**24 -
(57600*mckin**10*muG*mupi*q_cut**8)/mbkin**26 -
(15156*mckin**12*muG*mupi*q_cut**8)/mbkin**28 -
(4032*mckin**4*muG**2*q_cut**9)/mbkin**22 + (31608*mckin**6*muG**2*q_cut**9)/
mbkin**24 + (72432*mckin**8*muG**2*q_cut**9)/mbkin**26 -
(17640*mckin**10*muG**2*q_cut**9)/mbkin**28 + (4032*mckin**4*muG*mupi*
q_cut**9)/mbkin**22 - (15000*mckin**6*muG*mupi*q_cut**9)/mbkin**24 -
(13488*mckin**8*muG*mupi*q_cut**9)/mbkin**26 +
(3528*mckin**10*muG*mupi*q_cut**9)/mbkin**28 -
(21600*mckin**4*muG**2*q_cut**10)/mbkin**24 - (60840*mckin**6*muG**2*
q_cut**10)/mbkin**26 - (21600*mckin**8*muG**2*q_cut**10)/mbkin**28 +
(4320*mckin**4*muG*mupi*q_cut**10)/mbkin**24 +
(9480*mckin**6*muG*mupi*q_cut**10)/mbkin**26 +
(4320*mckin**8*muG*mupi*q_cut**10)/mbkin**28 +
(23400*mckin**4*muG**2*q_cut**11)/mbkin**26 + (23400*mckin**6*muG**2*
q_cut**11)/mbkin**28 - (4680*mckin**4*muG*mupi*q_cut**11)/mbkin**26 -
(4680*mckin**6*muG*mupi*q_cut**11)/mbkin**28 -
(6300*mckin**4*muG**2*q_cut**12)/mbkin**28 + (1260*mckin**4*muG*mupi*
q_cut**12)/mbkin**28 - 24*mckin**2*muG*((-1 + mckin**2/mbkin**2)**2*(
-140 + (1051*mckin**2)/mbkin**2 + (14862*mckin**4)/mbkin**4 -
(95997*mckin**6)/mbkin**6 - (145656*mckin**8)/mbkin**8 +
(1346196*mckin**10)/mbkin**10 + (2750088*mckin**12)/mbkin**12 +
(1327128*mckin**14)/mbkin**14 - (125268*mckin**16)/mbkin**16 -
(84199*mckin**18)/mbkin**18 + (914*mckin**20)/mbkin**20 + | |
<reponame>dearpatrickzhao/pydataclasses
from __future__ import absolute_import
import inspect
import functools
import itertools
from collections import OrderedDict
import six
import six.moves as sm
class _DataNone(object):
def __setattr__(self, _k, _v):
raise AttributeError('nothing is wrong')
def __getattribute__(self, _k):
return self
def __setitem__(self, _i, _v):
pass
def __getitem__(self, _i):
return self
def __call__(self, *args, **kwargs):
return self
# pylint: disable=non-iterator-returned
def __iter__(self):
yield self
# noinspection PyMethodMayBeStatic
def __next__(self):
return self
# noinspection PyMethodMayBeStatic
def __bool__(self):
return False
__nonzero__ = __bool__
InitNone = _DataNone()
DataNone = _DataNone()
class DataList(list):
def __init__(
self,
seq=(),
__it_cls__=None,
__origin__=None,
__lazy__=True,
__sync__=False,
__link__=None,
):
list.__init__(self, seq)
self.__it_cls__ = __it_cls__ or DataCore
self.__origin__ = __origin__
self.__waited__ = __lazy__
self.__synced__ = __sync__
self.__relink__ = __link__
if self.__len__():
self.__auto_sync__(0, self.__len__())
def __list__(self):
return DataList(
__it_cls__=self.__it_cls__,
__lazy__=self.__waited__,
__sync__=self.__synced__,
)
# ===== public =====
def append(self, _item):
if (
self.__synced__ and
isinstance(_item, DataCore) and
not _item.__synced__
):
raise ValueError('missing __sync__')
_len = self.__len__()
self.__setitem__(_len, _item)
def extend(self, _iterable):
for _item in _iterable:
self.append(_item)
def insert(self, _index, _item):
_len = self.__len__()
list.insert(self, _index, _item)
self.__auto_sync__(_index, _len + 1)
def sort(self, _cmp=None, key=None, reverse=False):
if key is None and _cmp is not None:
key = functools.cmp_to_key(_cmp)
list.sort(self, key=key, reverse=reverse)
self.__auto_sync__(0, self.__len__())
def reverse(self):
list.reverse(self)
self.__origin__.reverse()
def pop(self, _i=None):
_len = self.__len__()
self.__auto_sync__(_len - 1, _len)
self.__origin__.pop(_i)
return list.pop(self, _i)
def clear(self):
self.__setitem__(slice(0, self.__len__()), self.__list__())
def remove(self, _v):
list.remove(self, _v)
self.__init_sync__()
# ===== private =====
def __iter__(self):
for _i, _v in enumerate(list.__iter__(self)):
_v = self.__getitem__(_i)
yield _v
def __setitem__(self, _i, _v):
if isinstance(_i, slice):
list.__setitem__(self, _i, _v)
self.__init_sync__()
else:
self.__auto_sync__(_i, _i + 1)
list.__setitem__(self, _i, _v)
self.__auto_sync__(_i, _i + 1)
def __getitem__(self, _i):
if isinstance(_i, slice):
_start, _outer = _i.start, _i.stop
_start = 0 if _start is None else _start
_outer = 2 ** 63 - 1 if _outer is None else _outer
_outer = min(_outer, self.__len__())
_v = self.__list__()
for _oi in sm.range(_start, _outer):
_ov = list.__getitem__(self, _oi)
_ov = self.__init_item__(_oi, _ov)
self.__auto_sync__(_oi, _oi + 1)
_v.__setitem__(_oi, _ov)
_ov = _v.__getitem__(_oi)
_v.append(_ov)
else:
self.__auto_sync__(_i, _i + 1)
_v = list.__getitem__(self, _i)
_v = self.__init_item__(_i, _v)
self.__auto_sync__(_i, _i + 1)
return _v
def __delitem__(self, _i):
if isinstance(_i, slice):
self.__setitem__(_i, [])
else:
self.pop(_i)
def __delslice__(self, _i, _j):
self.__setitem__(slice(_i, _j), [])
def __setslice__(self, _i, _j, _iterator):
self.__setitem__(slice(_i, _j), _iterator)
def __getslice__(self, _i, _j):
return self.__getitem__(slice(_i, _j))
def __iadd__(self, _iterable):
raise NotImplementedError
def __imul__(self, y):
raise NotImplementedError
# ===== item, sync =====
def __init_item__(self, _i, _v):
if _v is not DataNone:
return _v
_item_cls = self.__it_cls__
if (
inspect.isclass(_item_cls) and
issubclass(_item_cls, DataCore)
):
if self.__synced__:
_v_origin = _item_cls().__as_dict__(dict, 1)
_v = _item_cls(_v_origin, __sync__=True)
elif self.__waited__:
_v = _item_cls(__lazy__=True)
else:
_v = _item_cls()
else:
_v = _item_cls()
list.__setitem__(self, _i, _v)
return _v
def __init_sync__(self):
_len = self.__len__()
del self.__origin__[_len:]
self.__auto_sync__(0, _len)
def __auto_sync__(self, _start, _outer):
_len = self.__len__()
_more = max(0, _outer - _len)
list.extend(self, [DataNone] * _more)
if not self.__synced__:
return
_origin = self.__origin__
if (
_origin is DataNone or
_origin is None
):
_origin = self.__origin__ = []
if self.__relink__:
self.__relink__(self.__origin__)
_len_o = len(_origin)
_more_o = max(0, _outer - _len_o)
_origin.extend([None] * _more_o)
_start = max(0, min(_start, _len, _len_o))
_item_cls = self.__it_cls__
_is_class = inspect.isclass(_item_cls)
_is_data = _is_class and issubclass(_item_cls, DataCore)
for _i in sm.range(_start, _outer):
_obj = list.__getitem__(self, _i)
_obj = self.__init_item__(_i, _obj)
_origin[_i] = (
_obj.__origin__ if _is_data and _obj is not DataNone else
_obj if _obj is not DataNone else
_item_cls() if _is_class else
None
)
class DataAttr(object):
__slots__ = [
'key',
'name',
'value',
'value_type',
]
def __init__(self, key=None, name=None, value=None, value_type=None):
"""
:param key: internal attribute name for python
:param name: external attribute name for input and output
:param value: python attribute value
:param value_type: python attribute value type (e.g., class)
"""
self.key = key
self.name = name
self.value = value
self.value_type = value_type
def typed(self):
return self.value_type is not DataNone
class DataMeta(type):
def __new__(mcs, name, bases, attrs):
_class = type.__new__(mcs, name, bases, attrs)
_class.__fields__ = OrderedDict()
_class.__ported__ = False
return _class
# noinspection PyProtectedMember
# pylint: disable=protected-access
def __call__(cls, *args, **kwargs):
_instance = type.__call__(cls, *args, **kwargs)
_class = _instance.__class__
_class.__ported__ = True
_fields = _class.__fields__
_instance.__uninit__ = list(six.iterkeys(_fields))
_instance.__booted__ = True
if (
not _instance.__waited__ and
_instance.__origin__
):
for _k in six.iterkeys(_fields):
getattr(_instance, _k)
_instance.__origin__ = _instance.__slot_1__
_instance.__waited__ = _instance.__slot_2__
_instance.__synced__ = _instance.__slot_3__
delattr(_instance, '__slot_1__')
delattr(_instance, '__slot_2__')
delattr(_instance, '__slot_3__')
if (
_instance.__synced__ and
_instance.__origin__ is not None
):
for _k in six.iterkeys(_fields):
if _instance.__is_fixed__(_k):
_instance.__origin__[_k] = _fields[_k].value
return _instance
class DataCore(six.with_metaclass(DataMeta, object)):
def __init__(self, _origin=None, **_extras):
if isinstance(_origin, DataCore):
_origin = _origin.__as_dict__(dict, 1)
_extras = _extras or dict()
_waited = _extras.pop('__lazy__', True)
_synced = _extras.pop('__sync__', False)
_relink = _extras.pop('__link__', None)
if _synced:
_waited = True
if _origin is None:
_origin = dict()
if _extras:
_origin = _origin or dict()
_origin.update(_extras)
self.__slot_1__ = _origin
self.__slot_2__ = _waited
self.__slot_3__ = _synced
self.__origin__ = _origin
if _waited:
self.__origin__ = None
self.__waited__ = False
self.__synced__ = False
self.__relink__ = _relink
def __new__(cls, *_args, **_kwargs):
_instance = object.__new__(cls)
_instance.__booted__ = False
_instance.__uninit__ = []
_instance.__locked__ = False
_class = _instance.__class__
if not _class.__ported__:
for _cls in list(_class.__bases__) + [_class]:
for _k, _v in itertools.chain(
six.iteritems(_cls.__dict__),
six.iteritems(_cls.__dict__.get('__annotations__', {})),
):
if (
not _k.startswith('_') and
_instance.__is_property__(_k, _v)
):
setattr(_instance, _k, _v)
return _instance
def __setattr__(self, _k, _v):
if not _k.startswith('_'):
_cls = self.__class__
if not _cls.__ported__:
if self.__is_property__(_k, _v):
self.__init_field__(_k, _v)
if not self.__booted__:
_v = InitNone
elif _k in _cls.__fields__:
_v = self.__type_value__(_k, _v)
if (
self.__synced__ and
_k not in self.__uninit__
):
_is_data = isinstance(_v, (DataCore, DataList))
_k_in_origin = _k in self.__origin__
_ov = (
_v if not _is_data else
_v.__origin__
)
if (
_k_in_origin or
_ov or
_ov in (0, False)
) and not (
_is_data and
not _v and
not _k_in_origin
):
self.__link_value__(_k, _ov)
object.__setattr__(self, _k, _v)
def __getattribute__(self, _k):
if (
not _k.startswith('_') and
_k in self.__class__.__fields__
):
_v = self.__real_value__(_k)
_v = self.__view_value__(_v)
return _v
return object.__getattribute__(self, _k)
# ===== field, value, type =====
def __init_field__(self, _k, _v):
_fields = self.__class__.__fields__
if (
self.__is_any__(_v) or
self.__is_data__(_v) or
self.__is_list__(_v)
):
_fields[_k] = DataAttr(_k, _k, DataNone, _v)
elif isinstance(_v, DataAttr):
_name = _v.name
_name = _k if _name is None else _name
_fields[_k] = DataAttr(_k, _name, _v.value, _v.value_type)
else: # preset value, untyped
_fields[_k] = DataAttr(_k, _k, _v, DataNone)
def __init_value__(self, _k):
self.__locked__ = True
_v = self.__by_origin__(_k)
setattr(self, _k, _v)
self.__locked__ = False
def __type_value__(self, _k, _v):
_vt = self.__class__.__fields__[_k].value_type
if self.__is_fixed__(_k):
pass
elif self.__is_data__(_vt):
_vt = self.__data_type__(_vt)
_v = self.__load_value__(_vt, _v)
if (
inspect.isclass(_vt) and
issubclass(_vt, DataCore)
):
_relink = functools.partial(self.__link_value__, _k)
_v.__relink__ = _relink
elif self.__is_list__(_vt):
_v = _v or []
_item_type = self.__item_type__(_vt)
_item_type = self.__data_type__(_item_type)
_items = [self.__load_value__(_item_type, _item) for _item in _v]
_relink = functools.partial(self.__link_value__, _k)
_v = DataList(
_items,
__it_cls__=_item_type,
__origin__=_v,
__lazy__=self.__waited__,
__sync__=self.__synced__,
__link__=_relink,
)
elif _v is not DataNone and not self.__is_any__(_vt):
_err = 'invalid value {} for attribute {} with type {}'.format(_v, _k, _vt)
raise AttributeError(_err)
if (
self.__uninit__ and
_k in self.__uninit__
):
self.__uninit__.remove(_k)
return _v
def __load_value__(self, _vt, _v):
_vv = None if _v is None or _v is DataNone else _v
if not (
inspect.isclass(_vt) and
issubclass(_vt, DataCore)
):
return (
_v if _v is DataNone else
None if _vv is None else
_vt(_vv)
)
if self.__synced__:
_new = _vt(_vv, __sync__=True)
elif self.__waited__:
_new = _vt(_vv, __lazy__=True)
else:
_new = _vt(_vv)
return _new
def __link_value__(self, _k, _v):
if not self.__synced__:
return
if _v is None:
return
if self.__origin__ is None:
self.__origin__ = dict()
if self.__relink__:
self.__relink__(self.__origin__)
_cls = self.__class__
_name = _cls.__fields__[_k].name
self.__origin__[_name] = _v
def __real_value__(self, _k):
try:
if (
not self.__locked__ and
self.__booted__ and
self.__uninit__ and
_k in self.__uninit__
):
self.__init_value__(_k)
except AttributeError:
pass
return object.__getattribute__(self, _k)
@classmethod
def __view_value__(cls, _v):
return _v if _v is not DataNone else None
# ===== object, objects =====
@classmethod
def __item_type__(cls, _vt):
return _vt[0]
@classmethod
def __data_type__(cls, _vt):
return _vt
@classmethod
def __data__(cls, name=None, value_type=None, default=DataNone):
return DataAttr(None, name, default, value_type)
| |
-64766,
50336338,
92693,
-1,
50398476,
92727,
50594572,
49945380,
-1,
50660103,
50532192,
-1,
-64762,
50606159,
92695,
-1,
50667893,
92710,
-64760,
50608512,
92694,
-64759,
50803572,
92677,
-64758,
50865891,
92726,
-64757,
50930506,
92719,
-1,
50987846,
92675,
51184405,
50538842,
-1,
51249937,
51122016,
-1,
-64753,
51197397,
92709,
-64752,
51257717,
92721,
-1,
51318624,
92679,
51512084,
51189596,
-1,
-64749,
51451740,
92683,
-1,
51515363,
92684,
-1,
51458932,
92707,
51774236,
51126159,
92689,
51839769,
51709729,
-1,
-64744,
51777640,
92720,
-1,
51839836,
92724,
-64742,
51782474,
92723,
-64741,
51974122,
92685,
-1,
52036422,
92722,
52232993,
51720271,
-1,
52298528,
52167494,
-1,
-64737,
52236128,
92680,
-1,
52298566,
92686,
-1,
52236707,
92725,
52560678,
52176575,
-1,
52626213,
52498272,
-1,
-64732,
52573653,
92703,
-1,
52629344,
92706,
-1,
52573064,
92714,
52888362,
52501635,
-1,
-64728,
52831603,
92718,
-64727,
52893532,
92717,
-1,
52953926,
92682,
53150509,
52836671,
-1,
-64724,
53093747,
92699,
-1,
53150534,
92701,
53347120,
53098241,
-1,
-64721,
53294099,
92705,
-1,
53347142,
92716,
-64719,
53294840,
92728,
-64718,
53490548,
92676,
-64717,
53547945,
92715,
-1,
53612515,
92678,
53805950,
143439,
-1,
53871483,
53740738,
-1,
53936994,
53812697,
-1,
54002501,
53882046,
-1,
54068035,
53942480,
-1,
-64710,
54016358,
7132,
-64709,
54081312,
7116,
-64708,
54144258,
7129,
-64707,
54208904,
7123,
-64706,
54273763,
7112,
-64705,
54337453,
7125,
-64704,
54402212,
7135,
-64703,
54465982,
7107,
-64702,
54531109,
7119,
-1,
54592326,
7105,
-64700,
54011538,
7126,
-1,
54723398,
7128,
54920011,
53944693,
-1,
-64697,
54868326,
7136,
-64696,
54928841,
7127,
-64695,
54989861,
7133,
-64694,
55053594,
7138,
-1,
55116614,
7113,
55313234,
54861711,
-1,
55378769,
55247686,
7124,
55508991,
55321048,
-1,
-64689,
55389442,
7130,
-64688,
55452068,
7114,
-1,
55514558,
7108,
-1,
55314214,
7139,
55771988,
55256803,
7111,
-1,
55712849,
7117,
-64683,
55720294,
7131,
-64682,
55850784,
7115,
-64681,
55915380,
7141,
-64680,
55978376,
7122,
-64679,
56040612,
7134,
-64678,
56105885,
7110,
-64677,
56171154,
7120,
-64676,
56235868,
7140,
-64675,
56300990,
7106,
-64674,
56366117,
7118,
-64673,
56429850,
7121,
-64672,
56494346,
7137,
-64671,
56559398,
7109,
-1,
56623942,
7104,
56820589,
53884649,
-1,
56886117,
56767348,
7150,
-1,
56819852,
7151,
57017192,
56826775,
-1,
-64665,
56959818,
7149,
-1,
57022300,
7147,
57213802,
56954720,
7143,
-1,
57151328,
7145,
-64661,
57157368,
7144,
-64660,
57287498,
7148,
-1,
57349980,
7146,
57541493,
56765630,
-1,
57607028,
57489874,
-1,
57672562,
57550527,
-1,
-64655,
57612520,
7165,
-1,
57673141,
7167,
-64653,
57614759,
7164,
-1,
57809694,
7166,
-1,
57546910,
7142,
58065784,
57485057,
-1,
-64649,
58008823,
7155,
-1,
58070372,
7154,
58327039,
58002384,
-1,
-64646,
58204691,
7152,
-1,
58267056,
7153,
58459005,
53810608,
128704,
-1,
58405680,
128705,
-1,
58405227,
128267,
58655622,
53748085,
-1,
58721157,
58596132,
127974,
58851327,
58663610,
-1,
-64638,
58735013,
128180,
-64637,
58796075,
128183,
-64636,
58855990,
128182,
-1,
58920466,
128181,
-1,
58656088,
127820,
59179916,
58591472,
128118,
59310079,
59113472,
-1,
-64631,
59191328,
128700,
-64630,
59247281,
128036,
-64629,
59312240,
127868,
-1,
59377045,
128124,
59573137,
59115763,
-1,
59638672,
59513636,
-1,
-64625,
59584283,
983056,
-1,
59638371,
128281,
-1,
59585223,
128043,
59900820,
59517274,
-1,
-64621,
59836310,
128136,
-1,
59899997,
128202,
-64619,
59846246,
127936,
-64618,
60038881,
127880,
-1,
60101829,
128708,
60294197,
76122,
-1,
60359724,
60228784,
-1,
60425181,
60300761,
-1,
60490657,
60372440,
-1,
60556192,
60433279,
-1,
60621726,
60500314,
69643,
-1,
60565850,
69644,
60817407,
60562563,
69645,
-1,
60693635,
69646,
-1,
60490566,
69679,
60949415,
60436559,
-1,
61014949,
60895311,
-1,
-64604,
60954046,
69662,
-1,
61014854,
69661,
-64602,
60954046,
69667,
-1,
61145926,
69666,
61342636,
60891978,
69649,
61472767,
61283644,
-1,
-64598,
61353058,
69686,
-64597,
61416086,
69687,
-1,
61480668,
69685,
61670321,
61284725,
-1,
-64594,
61618534,
69660,
-64593,
61677988,
69665,
-64592,
61740069,
69655,
-1,
61801286,
69670,
61998007,
61607158,
-1,
62063541,
61934838,
-1,
-64588,
62002622,
69664,
-1,
62063430,
69663,
-64586,
62002622,
69669,
-1,
62194502,
69668,
62391227,
61932358,
69637,
-64583,
62337908,
69650,
-64582,
62396252,
69648,
-1,
62456646,
69638,
62653375,
62336190,
-1,
-64579,
62598402,
69681,
-64578,
62657982,
69680,
-1,
62718790,
69682,
62915521,
62600052,
69641,
-1,
62862196,
69642,
63046596,
62858943,
-1,
-64573,
62985662,
69672,
-1,
63046470,
69671,
63243207,
62987395,
-1,
-64570,
63184036,
69684,
-1,
63243078,
69678,
63439818,
63183652,
-1,
-64567,
63378878,
69652,
-1,
63439686,
69651,
63636429,
63380112,
-1,
-64564,
63575486,
69659,
-1,
63636294,
69658,
63833039,
63575900,
69639,
-1,
63772508,
69640,
63964114,
63771676,
-1,
-64559,
63903166,
69654,
-1,
63963974,
69653,
64160725,
63899891,
-1,
-64556,
64099774,
69657,
-1,
64160582,
69656,
64357336,
64096033,
-1,
-64553,
64296382,
69674,
-1,
64357190,
69673,
-64551,
64305510,
69676,
-64550,
64498056,
69677,
-64549,
64561069,
69675,
-64548,
64624062,
69683,
-1,
64687968,
69647,
64881658,
60367652,
-1,
64947174,
64827471,
-1,
65012706,
64894721,
-1,
-64543,
64955210,
69715,
-1,
65016016,
69724,
65209317,
64951728,
-1,
-64540,
65153711,
69716,
-1,
65214947,
69725,
-1,
65146984,
69723,
65471467,
64892094,
-1,
65537001,
65411689,
69719,
-1,
65483617,
69728,
65732607,
65475158,
69720,
-1,
65614689,
69729,
65799154,
65409711,
-1,
65864687,
65741642,
-1,
-64530,
65811793,
69717,
-1,
65875110,
69726,
66125823,
65804124,
-1,
-64527,
66008564,
69718,
-1,
66065428,
69727,
66257910,
65742007,
69714,
66387967,
66191360,
-1,
-64523,
66269633,
69733,
-1,
66328401,
69732,
66520056,
66200108,
69722,
-1,
66466657,
69731,
66715647,
66457636,
69721,
-1,
66599231,
69730,
66782221,
64828888,
-1,
66847756,
66725556,
-1,
66913281,
66795184,
-1,
66978815,
66857306,
69694,
-1,
66922842,
69695,
67174399,
66919555,
69696,
-1,
67050627,
69697,
67240965,
66847558,
-1,
-64509,
67187572,
69701,
-64508,
67245916,
69699,
-1,
67306310,
69688,
67503111,
67187572,
69692,
-1,
67449716,
69693,
67634185,
67442524,
69690,
-1,
67573596,
69691,
-64502,
67576650,
69700,
-64501,
67702624,
69698,
-1,
67766209,
69689,
-1,
66787790,
69702,
68027422,
66718966,
-1,
68092956,
67967090,
-1,
68158482,
68038735,
-1,
-64495,
68106390,
69736,
-1,
68163373,
69737,
68355093,
68103358,
-1,
-64492,
68295273,
69740,
-1,
68358742,
69741,
68551704,
68293295,
-1,
-64489,
68494956,
69738,
-1,
68557380,
69739,
-64487,
68500016,
69734,
-64486,
68691127,
69735,
-64485,
68756012,
69743,
-1,
68816932,
69742,
-64483,
68036132,
69704,
-1,
68945286,
69703,
69141540,
67972624,
-1,
-64480,
69088920,
69634,
-64479,
69154074,
69636,
-64478,
69212893,
69635,
-64477,
69273933,
69632,
-1,
69338592,
69633,
69599231,
69085346,
-1,
69600296,
69475459,
-1,
-64473,
69543455,
69709,
-1,
69605659,
69707,
69796907,
69537295,
-1,
-64470,
69743542,
69706,
-1,
69808207,
69705,
-1,
69733451,
69708,
70059057,
60299100,
-1,
70124592,
69995766,
-1,
-64465,
70063225,
127753,
-1,
70127484,
128112,
-1,
70062090,
128188,
70386740,
69996428,
-1,
-64461,
70327112,
983124,
-1,
70388982,
127838,
-1,
70329348,
128148,
70648903,
60234883,
-1,
70714433,
70583273,
-1,
70779963,
70658957,
-1,
70845498,
70717052,
9193,
-1,
70779559,
9197,
-1,
70791891,
9199,
71042109,
70720941,
9194,
-1,
70976167,
9198,
-64450,
70989054,
9195,
-64449,
71118658,
128306,
-64448,
71182655,
10067,
-1,
71241400,
9196,
71435332,
70661142,
-1,
-64445,
71374423,
128153,
-1,
71436389,
128216,
71696383,
71377738,
-1,
-64442,
71579530,
128033,
-1,
71642986,
127804,
71828566,
70591306,
-1,
71894092,
71772172,
-1,
-64438,
71842360,
12730,
-64437,
71900739,
12729,
-1,
71963872,
12728,
72156240,
71836673,
-1,
72221775,
72097804,
128278,
-1,
72155514,
128209,
-1,
72166590,
128218,
72418386,
72097679,
983247,
-1,
72353569,
128163,
-64429,
72366399,
128102,
-64428,
72497253,
127923,
-64427,
72561995,
128144,
-1,
72615444,
128023,
72811617,
71765856,
-1,
72877146,
72752259,
983055,
73007103,
72817795,
128276,
-1,
72876458,
128277,
73073757,
72819061,
-1,
-64420,
73019975,
127857,
-1,
73077808,
983621,
73270368,
73007942,
-1,
-64417,
73216561,
128147,
-1,
73279836,
128059,
-1,
73208057,
127866,
73532522,
72750940,
-1,
73598053,
73476442,
-1,
-64412,
73544078,
127874,
-1,
73600246,
128038,
73794664,
73534688,
-1,
-64409,
73734650,
128692,
-1,
73797472,
128690,
-64407,
73735870,
127921,
-1,
73931823,
128089,
74122353,
73479028,
-1,
74187888,
74067134,
128652,
74253423,
74133583,
-1,
-64402,
74198211,
128101,
-1,
74252453,
128100,
-1,
74187124,
128655,
-1,
74126364,
128027,
74581108,
74070335,
-1,
-64397,
74529277,
983249,
-1,
74592543,
983246,
-64395,
74525886,
983057,
-1,
74721120,
983125,
74909311,
12367,
-1,
74974762,
74842950,
-1,
75040204,
74916103,
-1,
75105715,
74985469,
-1,
75171003,
75047285,
-1,
75236501,
75112821,
-1,
75302025,
75178357,
-1,
75367553,
75236166,
-1,
-64385,
75314036,
983843,
-64384,
75372380,
983840,
-1,
75432774,
983833,
75629699,
75314036,
983836,
-1,
75576180,
983837,
75760773,
75572042,
983841,
-1,
75703114,
983842,
75891847,
75700060,
983834,
-1,
75831132,
983835,
76087295,
75829088,
983838,
-1,
75960160,
983839,
76153997,
75236166,
-1,
-64373,
76100468,
983711,
-64372,
76158812,
983708,
-1,
76219206,
983701,
76416143,
76100468,
983704,
-1,
76362612,
983705,
76547217,
76358474,
983709,
-1,
76489546,
983710,
76678291,
76486492,
983702,
-1,
76617564,
983703,
76873727,
76615520,
983706,
-1,
76746592,
983707,
76940450,
75184447,
-1,
77005978,
76874566,
-1,
-64360,
76952436,
983689,
-64359,
77010780,
983686,
-1,
77071174,
983679,
77268124,
76952436,
983682,
-1,
77214580,
983683,
77399198,
77210442,
983687,
-1,
77341514,
983688,
77530272,
77338460,
983680,
-1,
77469532,
983681,
77725695,
77467488,
983684,
-1,
77598560,
983685,
77792431,
76878876,
-1,
77857959,
77726534,
-1,
-64347,
77804404,
983667,
-64346,
77862748,
983664,
-1,
77923142,
983657,
78120105,
77804404,
983660,
-1,
78066548,
983661,
78251179,
78062410,
983665,
-1,
78193482,
983666,
78382253,
78190428,
| |
if __name__ == "__main__" or __name__ == "test_web_default":
__package__ = 'cloudscheduler.unit_tests.web_tests'
import unittest
import sys
from . import web_test_setup_cleanup as wtsc
from . import web_test_assertions_v2 as wta
from . import web_test_page_objects as pages
from . import web_test_helpers as helpers
class TestWebDefaultCommon(unittest.TestCase):
"""A class for the default tests that should be repeated in all iterations."""
@classmethod
def setUpClass(cls):
cls.page = pages.DefaultsPage(cls.driver, cls.gvar['address'])
cls.group_name = cls.gvar['user'] + '-wig1'
cls.oversize = cls.gvar['oversize']
def setUp(self):
self.page.get_homepage()
self.page.switch_default_group(self.group_name)
self.page.click_top_nav('Defaults')
def test_web_default_find(self):
pass
def test_web_default_update_htcondor_fqdn(self):
# Changes a group's htcondor fqdn
group_name = self.gvar['user'] + '-wig2'
self.page.switch_default_group(group_name)
self.page.click_side_button(group_name)
self.page.click_side_tab('Settings')
self.page.type_htcondor_fqdn('csv2-dev2.heprc.uvic.ca')
self.page.click_update_group()
wta.assertHasAttribute('group', group_name, 'htcondor_fqdn', 'csv2-dev2.heprc.uvic.ca', group=group_name, defaults=True)
def test_web_default_update_htcondor_fqdn_invalid(self):
# Tries to change a group's htcondor fqdn to an invalid fqdn
group_name = self.gvar['user'] + '-wig2'
self.page.switch_default_group(group_name)
self.page.click_side_button(group_name)
self.page.click_side_tab('Settings')
self.page.type_htcondor_fqdn('invalid-web-test')
self.page.click_update_group()
self.assertTrue(self.page.error_message_displayed())
wta.assertHasNotAttribute('group', group_name, 'htcondor_fqdn', 'invalid-web-test', group=group_name, defaults=True)
def test_web_default_update_htcondor_fqdn_too_long(self):
# Tries to change a group's htcondor fqdn to one too long for the database
group_name = self.gvar['user'] + '-wig2'
self.page.switch_default_group(group_name)
self.page.click_side_button(group_name)
self.page.click_side_tab('Settings')
self.page.type_htcondor_fqdn(self.oversize['varchar_128'])
self.page.click_update_group()
# Error message commented out in all length tests because the message isn't bold (and therefore is indistinguishable from a success message)
# TODO: look into this
#self.assertTrue(self.page.error_message_displayed())
wta.assertHasNotAttribute('group', group_name, 'htcondor_fqdn', self.oversize['varchar_128'], group=group_name, defaults=True)
def test_web_default_update_htcondor_container_hostname(self):
# Changes a group's htcondor container hostname
group_name = self.gvar['user'] + '-wig2'
self.page.switch_default_group(group_name)
self.page.click_side_button(group_name)
self.page.click_side_tab('Settings')
self.page.type_htcondor_container_hostname(self.gvar['user'] + '-host')
self.page.click_update_group()
wta.assertHasAttribute('group', group_name, 'htcondor_container_hostname', self.gvar['user'] + '-host', group=group_name, defaults=True)
def test_web_default_update_htcondor_container_hostname_too_long(self):
# Tries to change a group's htcondor container hostname to one too long for the database
group_name = self.gvar['user'] + '-wig2'
self.page.switch_default_group(group_name)
self.page.click_side_button(group_name)
self.page.click_side_tab('Settings')
self.page.type_htcondor_container_hostname(self.oversize['varchar_128'])
self.page.click_update_group()
#self.assertTrue(self.page.error_message_displayed())
wta.assertHasNotAttribute('group', group_name, 'htcondor_container_hostname', self.oversize['varchar_128'])
def test_web_default_update_htcondor_other_submitters(self):
# Changes a group's htcondor submitters
group_name = self.gvar['user'] + '-wig2'
self.page.switch_default_group(group_name)
self.page.click_side_button(group_name)
self.page.click_side_tab('Settings')
self.page.type_htcondor_other_submitters(self.gvar['user'] + '-wiu1')
self.page.click_update_group()
wta.assertHasAttribute('group', group_name, 'htcondor_other_submitters', self.gvar['user'] + '-wiu1', group=group_name, defaults=True)
def test_web_default_update_htcondor_other_submitters_too_long(self):
# Tries to change a group's htcondor submitters to a string too long for the database
group_name = self.gvar['user'] + '-wig2'
self.page.switch_default_group(group_name)
self.page.click_side_button(group_name)
self.page.click_side_tab('Settings')
self.page.type_htcondor_other_submitters(self.oversize['varchar_128'])
self.page.click_update_group()
#self.assertTrue(self.page.error_message_displayed())
wta.assertHasNotAttribute('group', group_name, 'htcondor_other_submitters', self.oversize['varchar_128'], group=group_name, defaults=True)
def test_web_default_update_job_cpus(self):
# Changes a group's default job cpus
self.page.click_side_button(self.group_name)
self.page.click_side_tab('Settings')
self.page.type_job_cpus('8')
self.page.click_update_group()
wta.assertHasAttribute('group', self.group_name, 'job_cpus', '8', group=self.group_name, defaults=True)
def test_web_default_update_job_cpus_float(self):
# Tries to change a group's default job cpus to a float
self.page.click_side_button(self.group_name)
self.page.click_side_tab('Settings')
self.page.type_job_cpus('8.5')
self.page.click_update_group()
self.assertTrue(self.page.error_message_displayed())
wta.assertHasNotAttribute('group', self.group_name, 'job_cpus', '8.5', group=self.group_name, defaults=True)
def test_web_default_update_job_cpus_string(self):
# Tries to change a group's default job cpus to a string
self.page.click_side_button(self.group_name)
self.page.click_side_tab('Settings')
self.page.type_job_cpus('invalid-web-test')
self.page.click_update_group()
self.assertTrue(self.page.error_message_displayed())
wta.assertHasNotAttribute('group', self.group_name, 'job_cpus', 'invalid-web-test', group=self.group_name, defaults=True)
def test_web_default_update_job_cpus_too_big(self):
# Tries to change a group's default job cpus to an int too long for the database
self.page.click_side_button(self.group_name)
self.page.click_side_tab('Settings')
self.page.type_job_cpus(str(self.oversize['int_11']))
self.page.click_update_group()
#self.assertTrue(self.page.error_message_displayed())
wta.assertHasNotAttribute('group', self.group_name, 'job_cpus', str(self.oversize['int_11']), group=self.group_name, defaults=True)
def test_web_default_update_job_ram(self):
# Changes a group's default RAM
self.page.click_side_button(self.group_name)
self.page.click_side_tab('Settings')
self.page.type_job_ram('1024')
self.page.click_update_group()
wta.assertHasAttribute('group', self.group_name, 'job_ram', '1024', group=self.group_name, defaults=True)
def test_web_default_update_job_ram_float(self):
# Tries to change a group's default RAM to a float
self.page.click_side_button(self.group_name)
self.page.click_side_tab('Settings')
self.page.type_job_ram('1024.5')
self.page.click_update_group()
#self.assertTrue(self.page.error_message_displayed())
wta.assertHasNotAttribute('group', self.group_name, 'job_ram', '1024.5', group=self.group_name, defaults=True)
def test_web_default_update_job_ram_string(self):
# Tries to change a group's default RAM to a string
self.page.click_side_button(self.group_name)
self.page.click_side_tab('Settings')
self.page.type_job_ram('invalid-web-test')
self.page.click_update_group()
self.assertTrue(self.page.error_message_displayed())
wta.assertHasNotAttribute('group', self.group_name, 'job_ram', 'invalid-web-test', group=self.group_name, defaults=True)
def test_web_default_update_job_ram_too_big(self):
# Tries to change a group's default RAM to an int too big for the database
self.page.click_side_button(self.group_name)
self.page.click_side_tab('Settings')
self.page.type_job_ram(str(self.oversize['int_11']))
self.page.click_update_group()
#self.assertTrue(self.page.error_message_displayed())
wta.assertHasNotAttribute('group', self.group_name, 'job_ram', str(self.oversize['int_11']), group=self.group_name, defaults=True)
def test_web_default_update_job_disk(self):
# Changes a group's default disk size
self.page.click_side_button(self.group_name)
self.page.click_side_tab('Settings')
self.page.type_job_disk('4')
self.page.click_update_group()
wta.assertHasAttribute('group', self.group_name, 'job_disk', '4', group=self.group_name, defaults=True)
def test_web_default_update_job_disk_float(self):
# Tries to change a group's default disk size to a float
self.page.click_side_button(self.group_name)
self.page.click_side_tab('Settings')
self.page.type_job_disk('8.5')
self.page.click_update_group()
self.assertTrue(self.page.error_message_displayed())
wta.assertHasNotAttribute('group', self.group_name, 'job_disk', '4.5', group=self.group_name, defaults=True)
def test_web_default_update_job_disk_string(self):
# Tries to change a group's default disk size to a string
self.page.click_side_button(self.group_name)
self.page.click_side_tab('Settings')
self.page.type_job_disk('invalid-web-test')
self.page.click_update_group()
self.assertTrue(self.page.error_message_displayed())
wta.assertHasNotAttribute('group', self.group_name, 'job_disk', 'invalid-web-test', group=self.group_name, defaults=True)
def test_web_default_update_job_disk_too_big(self):
# Tries to change a group's default disk size to an int too big for the database
self.page.click_side_button(self.group_name)
self.page.click_side_tab('Settings')
self.page.type_job_disk(str(self.oversize['int_11']))
self.page.click_update_group()
#self.assertTrue(self.page.error_message_displayed())
wta.assertHasNotAttribute('group', self.group_name, 'job_disk', str(self.oversize['int_11']), group=self.group_name, defaults=True)
def test_web_default_update_job_swap(self):
# Changes a group's default SWAP
self.page.click_side_button(self.group_name)
self.page.click_side_tab('Settings')
self.page.type_job_swap('2')
self.page.click_update_group()
wta.assertHasAttribute('group', self.group_name, 'job_swap', '2', group=self.group_name, defaults=True)
def test_web_default_update_job_swap_float(self):
# Tries to change a group's default SWAP to a float
self.page.click_side_button(self.group_name)
self.page.click_side_tab('Settings')
self.page.type_job_swap('2.5')
self.page.click_update_group()
self.assertTrue(self.page.error_message_displayed())
wta.assertHasNotAttribute('group', self.group_name, 'job_swap', '2.5', group=self.group_name, defaults=True)
def test_web_default_update_job_swap_string(self):
# Tries to change a group's default SWAP to a string
self.page.click_side_button(self.group_name)
self.page.click_side_tab('Settings')
self.page.type_job_swap('invalid-web-test')
self.page.click_update_group()
self.assertTrue(self.page.error_message_displayed())
wta.assertHasNotAttribute('group', self.group_name, 'job_swap', 'invalid-web-test', group=self.group_name, defaults=True)
def test_web_default_update_job_swap_too_big(self):
# Tries to change a group's default SWAP to an int too big for the database
self.page.click_side_button(self.group_name)
self.page.click_side_tab('Settings')
self.page.type_job_swap(str(self.oversize['int_11']))
self.page.click_update_group()
#self.assertTrue(self.page.error_message_displayed())
wta.assertHasNotAttribute('group', self.group_name, 'job_swap', str(self.oversize['int_11']), group=self.group_name, defaults=True)
def test_web_default_update_vm_keyname(self):
# Update's a group's default vm keyname
helpers.skip_if_flag('keys accessible', self.gvar['keys_accessible'], False)
keyname = self.gvar['user'] + '-wik1'
self.page.click_side_button(self.group_name)
self.page.click_side_tab('Settings')
self.page.select_vm_keyname(keyname)
self.page.click_update_group()
wta.assertHasAttribute('group', self.group_name, 'vm_keyname', keyname, group=self.group_name, defaults=True)
def test_web_default_update_vm_image(self):
# Updates a group's default vm image
self.page.click_side_button(self.group_name)
self.page.click_side_tab('Settings')
self.page.select_vm_image('cirros-0.3.5')
self.page.click_update_group()
wta.assertHasAttribute('group', self.group_name, 'vm_image', 'cirros-0.3.5', group=self.group_name, defaults=True)
def test_web_default_update_vm_flavor(self):
# Updates a group's default vm flavor
self.page.click_side_button(self.group_name)
self.page.click_side_tab('Settings')
self.page.select_vm_flavor('s8')
self.page.click_update_group()
wta.assertHasAttribute('group', self.group_name, 'vm_flavor', 's8', group=self.group_name, defaults=True)
def test_web_default_update_vm_network(self):
# Updates a group's default vm network
self.page.click_side_button(self.group_name)
self.page.click_side_tab('Settings')
self.page.select_vm_network('private')
self.page.click_update_group()
wta.assertHasAttribute('group', self.group_name, 'vm_network', 'private', group=self.group_name, defaults=True)
def test_web_default_update_vm_keep_alive(self):
# Updates a group's default vm keep alive time
self.page.click_side_button(self.group_name)
self.page.click_side_tab('Settings')
self.page.type_vm_keep_alive('2048')
self.page.click_update_group()
wta.assertHasAttribute('group', self.group_name, 'vm_keep_alive', '2048', group=self.group_name, defaults=True)
def test_web_default_update_vm_keep_alive_float(self):
# Tries to update a group's default vm keep alive time to a float
self.page.click_side_button(self.group_name)
self.page.click_side_tab('Settings')
self.page.type_vm_keep_alive('2048.5')
self.page.click_update_group()
self.assertTrue(self.page.error_message_displayed())
wta.assertHasNotAttribute('group', self.group_name, 'vm_keep_alive', '2048.5', group=self.group_name, defaults=True)
def test_web_default_update_vm_keep_alive_string(self):
# Tries to update a group's default vm keep alive time to a string
self.page.click_side_button(self.group_name)
self.page.click_side_tab('Settings')
self.page.type_vm_keep_alive('invalid-web-test')
self.page.click_update_group()
self.assertTrue(self.page.error_message_displayed())
wta.assertHasNotAttribute('group', self.group_name, 'vm_keep_alive', 'invalid-web-test', group=self.group_name, defaults=True)
def test_web_default_update_vm_keep_alive_too_big(self):
# Tries to update a group's default vm keep alive time to an int too big for the database
self.page.click_side_button(self.group_name)
self.page.click_side_tab('Settings')
self.page.type_vm_keep_alive(str(self.oversize['int_11']))
self.page.click_update_group()
#self.assertTrue(self.page.error_message_displayed())
wta.assertHasNotAttribute('group', self.group_name, 'vm_keep_alive', str(self.oversize['int_11']), group=self.group_name, defaults=True)
def test_web_default_metadata_add(self):
# Adds metadata to a group
metadata_name = self.gvar['user'] + '-wim3.yaml'
self.page.click_side_button(self.group_name)
self.page.click_side_tab('Metadata')
self.page.click_metadata_new()
self.page.type_metadata_name(metadata_name)
self.page.type_metadata('sample_key: sample_value')
self.page.click_metadata_add()
self.assertTrue(self.page.metadata_tab_exists(metadata_name))
wta.assertExists('metadata', metadata_name, group=self.group_name)
@unittest.skip("Not working in production (issue 319)")
def test_web_default_metadata_add_without_name(self):
# Tries to add metadata to a group without a name
self.page.click_side_button(self.group_name)
self.page.click_side_tab('Metadata')
self.page.click_metadata_new()
self.page.type_metadata('sample_key: sample_value')
self.page.click_metadata_add()
self.assertTrue(self.page.error_message_displayed())
@unittest.skip("Not working in production (issue 319)")
def test_web_default_metadata_add_name_with_symbols(self):
# Tries to add metadata with symbols in its name
metadata_name = 'inv@|id-web-te$t.yaml'
self.page.click_side_button(self.group_name)
self.page.click_side_tab('Metadata')
self.page.click_metadata_new()
self.page.type_metadata_name(metadata_name)
self.page.click_metadata_add()
self.assertTrue(self.page.error_message_displayed())
wta.assertNotExists('metadata', metadata_name, group=self.group_name)
@unittest.skip("Not working in production (issue 319)")
def test_web_default_metadata_add_name_with_two_dashes(self):
# Tries to add metadata with two dashes in its name
metadata_name = 'invalid--web--test.yaml'
self.page.click_side_button(self.group_name)
self.page.click_side_tab('Metadata')
self.page.click_metadata_new()
self.page.type_metadata_name(metadata_name)
self.page.click_metadata_add()
self.assertTrue(self.page.error_message_displayed())
wta.assertNotExists('metadata', metadata_name, group=self.group_name)
@unittest.skip("Not working in production (issue 319)")
def test_web_default_metadata_add_name_with_uppercase(self):
# Tries to add metadata with uppercase letters in its name
metadata_name = 'INVALID-WEB-TEST.yaml'
self.page.click_side_button(self.group_name)
self.page.click_side_tab('Metadata')
self.page.click_metadata_new()
self.page.type_metadata_name(metadata_name)
self.page.click_metadata_add()
self.assertTrue(self.page.error_message_displayed())
wta.assertNotExists('metadata', metadata_name, group=self.group_name)
@unittest.skip("Not working in production (issue 319)")
def test_web_default_metadata_add_name_with_starting_ending_dash(self):
# Tries to add metadata with starting and ending dashes in its name
metadata_name = '-invalid-web-test-.yaml'
self.page.click_side_button(self.group_name)
self.page.click_side_tab('Metadata')
self.page.click_metadata_new()
self.page.type_metadata_name(metadata_name)
self.page.click_metadata_add()
self.assertTrue(self.page.error_message_displayed())
wta.assertNotExists('metadata', metadata_name, group=self.group_name)
def test_web_default_metadata_add_name_too_long(self):
# Tries to add metadata with a name too long for the database
metadata_name = self.oversize['varchar_64']
self.page.click_side_button(self.group_name)
self.page.click_side_tab('Metadata')
self.page.click_metadata_new()
self.page.type_metadata_name(metadata_name)
self.page.click_metadata_add()
#self.assertTrue(self.page.error_message_displayed())
wta.assertNotExists('metadata', metadata_name, group=self.group_name)
def test_web_default_metadata_add_not_enabled(self):
# Adds metadata to a group without enabling it
metadata_name = self.gvar['user'] + '-wim4.yaml'
self.page.click_side_button(self.group_name)
self.page.click_side_tab('Metadata')
self.page.click_metadata_new()
self.page.type_metadata_name(metadata_name)
self.page.click_metadata_enabled()
self.page.type_metadata('sample_key: sample_value')
self.page.click_metadata_add()
self.assertTrue(self.page.metadata_tab_exists(metadata_name))
wta.assertHasAttribute('metadata', metadata_name, 'enabled', '0', group=self.group_name)
def test_web_default_metadata_add_different_priority_by_typing(self):
# Adds metadata to a group with a different priority by typing it in the blank
metadata_name = self.gvar['user'] + '-wim5.yaml'
self.page.click_side_button(self.group_name)
self.page.click_side_tab('Metadata')
self.page.click_metadata_new()
self.page.type_metadata_name(metadata_name)
self.page.type_metadata_priority('8')
self.page.type_metadata('sample_key: sample_value')
self.page.click_metadata_add()
self.assertTrue(self.page.metadata_tab_exists(metadata_name))
wta.assertHasAttribute('metadata', metadata_name, 'priority', '8', group=self.group_name)
def test_web_default_metadata_add_different_priority_by_typing_float(self):
# Tries to add metadata to a cloud with a float value for its priority by typing it in the blank
metadata_name = self.gvar['user'] + '-wim8.yaml'
self.page.click_side_button(self.group_name)
self.page.click_side_tab('Metadata')
self.page.click_metadata_new()
self.page.type_metadata_name(metadata_name)
self.page.type_metadata_priority('8.5')
self.page.type_metadata('sample_key: sample_value')
self.page.click_metadata_add()
self.assertTrue(self.page.metadata_priority_popup_exists())
self.assertFalse(self.page.metadata_tab_exists(metadata_name))
wta.assertNotExists('metadata', metadata_name, group= self.group_name)
def test_web_default_metadata_add_different_priority_by_typing_string(self):
# Tries to add metadata to a group with a string value priority by typing it in the blank
metadata_name = self.gvar['user'] + '-wim8.yaml'
self.page.click_side_button(self.group_name)
self.page.click_side_tab('Metadata')
self.page.click_metadata_new()
self.page.type_metadata_name(metadata_name)
self.page.type_metadata_priority('invalid-web-test')
self.page.type_metadata('sample_key: sample_value')
self.page.click_metadata_add()
self.assertTrue(self.page.metadata_priority_popup_exists())
self.assertFalse(self.page.metadata_tab_exists(metadata_name))
wta.assertNotExists('metadata', metadata_name, group=self.group_name)
def test_web_default_metadata_add_different_priority_by_typing_too_big(self):
# Tries to add metadata to a group with a priority too big for the database by typing it in the blank
metadata_name = self.gvar['user'] + '-wim8.yaml'
self.page.click_side_button(self.group_name)
self.page.click_side_tab('Metadata')
self.page.click_metadata_new()
self.page.type_metadata_priority(str(self.oversize['int_11']))
self.page.type_metadata('sample_key: sample_value')
self.page.click_metadata_add()
#self.assertTrue(self.page.error_message_displayed())
self.assertFalse(self.page.metadata_tab_exists(metadata_name))
wta.assertNotExists('metadata', | |
indicatorOfTypeOfLevel == 105 and level == 10:
return 'v10'
if table2Version == 2 and indicatorOfParameter == 33 and indicatorOfTypeOfLevel == 105 and level == 10:
return 'u10'
if table2Version == 2 and indicatorOfParameter == 52:
return 'r'
if table2Version == 2 and indicatorOfParameter == 7:
return 'gh'
if table2Version == 2 and indicatorOfParameter == 44:
return 'd'
if table2Version == 2 and indicatorOfParameter == 2:
return 'msl'
if table2Version == 2 and indicatorOfParameter == 43:
return 'vo'
if table2Version == 2 and indicatorOfParameter == 39:
return 'w'
if table2Version == 2 and indicatorOfParameter == 1 and indicatorOfTypeOfLevel == 1:
return 'sp'
if table2Version == 2 and indicatorOfParameter == 51:
return 'q'
if table2Version == 2 and indicatorOfParameter == 34:
return 'v'
if table2Version == 2 and indicatorOfParameter == 33:
return 'u'
if table2Version == 2 and indicatorOfParameter == 11:
return 't'
if table2Version == 2 and indicatorOfParameter == 6:
return 'z'
if table2Version == 2 and indicatorOfParameter == 16 and indicatorOfTypeOfLevel == 105 and level == 2:
return 'mn2t6'
if table2Version == 2 and indicatorOfParameter == 15 and indicatorOfTypeOfLevel == 105 and level == 2:
return 'mx2t6'
if table2Version == 2 and indicatorOfParameter == 4:
return 'pv'
if table2Version == 2 and indicatorOfParameter == 1:
return 'pres'
if table2Version == 2 and indicatorOfParameter == 32:
return 'ws'
if table2Version == 2 and indicatorOfParameter == 13:
return 'pt'
if table2Version == 2 and indicatorOfParameter == 36:
return 'vp'
if table2Version == 2 and indicatorOfParameter == 35:
return 'strf'
if table2Version == 3 and indicatorOfParameter == 61 and indicatorOfTypeOfLevel == 1 and level == 0:
return 'tp'
if table2Version == 3 and indicatorOfParameter == 71:
return 'tcc'
if table2Version == 3 and indicatorOfParameter == 65:
return 'sf'
if table2Version == 3 and indicatorOfParameter == 85:
return 'st'
if table2Version == 3 and indicatorOfParameter == 86:
return 'sm'
if table2Version == 3 and indicatorOfParameter == 7 and indicatorOfTypeOfLevel == 1:
return 'orog'
if table2Version == 3 and indicatorOfParameter == 87:
return 'vegrea'
if table2Version == 3 and indicatorOfParameter == 127:
return 'p3127'
if table2Version == 3 and indicatorOfParameter == 126:
return 'p3126'
if table2Version == 3 and indicatorOfParameter == 125:
return 'p3125'
if table2Version == 3 and indicatorOfParameter == 124:
return 'p3124'
if table2Version == 3 and indicatorOfParameter == 120:
return 'p3120'
if table2Version == 3 and indicatorOfParameter == 119:
return 'p3119'
if table2Version == 3 and indicatorOfParameter == 117:
return 'p3117'
if table2Version == 3 and indicatorOfParameter == 116:
return 'p3116'
if table2Version == 3 and indicatorOfParameter == 115:
return 'p3115'
if table2Version == 3 and indicatorOfParameter == 114:
return 'p3114'
if table2Version == 3 and indicatorOfParameter == 113:
return 'p3113'
if table2Version == 3 and indicatorOfParameter == 112:
return 'p3112'
if table2Version == 3 and indicatorOfParameter == 111:
return 'p3111'
if table2Version == 3 and indicatorOfParameter == 110:
return 'p3110'
if table2Version == 3 and indicatorOfParameter == 109:
return 'p3109'
if table2Version == 3 and indicatorOfParameter == 108:
return 'p3108'
if table2Version == 3 and indicatorOfParameter == 107:
return 'p3107'
if table2Version == 3 and indicatorOfParameter == 106:
return 'p3106'
if table2Version == 3 and indicatorOfParameter == 105:
return 'p3105'
if table2Version == 3 and indicatorOfParameter == 104:
return 'p3104'
if table2Version == 3 and indicatorOfParameter == 103:
return 'p3103'
if table2Version == 3 and indicatorOfParameter == 102:
return 'p3102'
if table2Version == 3 and indicatorOfParameter == 101:
return 'p3101'
if table2Version == 3 and indicatorOfParameter == 100:
return 'p3100'
if table2Version == 3 and indicatorOfParameter == 99:
return 'p3099'
if table2Version == 3 and indicatorOfParameter == 98:
return 'p3098'
if table2Version == 3 and indicatorOfParameter == 97:
return 'p3097'
if table2Version == 3 and indicatorOfParameter == 96:
return 'p3096'
if table2Version == 3 and indicatorOfParameter == 95:
return 'p3095'
if table2Version == 3 and indicatorOfParameter == 94:
return 'p3094'
if table2Version == 3 and indicatorOfParameter == 93:
return 'p3093'
if table2Version == 3 and indicatorOfParameter == 92:
return 'p3092'
if table2Version == 3 and indicatorOfParameter == 91:
return 'p3091'
if table2Version == 3 and indicatorOfParameter == 89:
return 'p3089'
if table2Version == 3 and indicatorOfParameter == 88:
return 'p3088'
if table2Version == 3 and indicatorOfParameter == 86:
return 'p3086'
if table2Version == 3 and indicatorOfParameter == 82:
return 'p3082'
if table2Version == 3 and indicatorOfParameter == 80:
return 'p3080'
if table2Version == 3 and indicatorOfParameter == 77:
return 'p3077'
if table2Version == 3 and indicatorOfParameter == 70:
return 'p3070'
if table2Version == 3 and indicatorOfParameter == 69:
return 'p3069'
if table2Version == 3 and indicatorOfParameter == 68:
return 'p3068'
if table2Version == 3 and indicatorOfParameter == 67:
return 'p3067'
if table2Version == 3 and indicatorOfParameter == 64:
return 'p3064'
if table2Version == 3 and indicatorOfParameter == 63:
return 'p3063'
if table2Version == 3 and indicatorOfParameter == 60:
return 'p3060'
if table2Version == 3 and indicatorOfParameter == 59:
return 'p3059'
if table2Version == 3 and indicatorOfParameter == 56:
return 'p3056'
if table2Version == 3 and indicatorOfParameter == 55:
return 'p3055'
if table2Version == 3 and indicatorOfParameter == 54:
return 'p3054'
if table2Version == 3 and indicatorOfParameter == 53:
return 'p3053'
if table2Version == 3 and indicatorOfParameter == 50:
return 'p3050'
if table2Version == 3 and indicatorOfParameter == 49:
return 'p3049'
if table2Version == 3 and indicatorOfParameter == 48:
return 'p3048'
if table2Version == 3 and indicatorOfParameter == 47:
return 'p3047'
if table2Version == 3 and indicatorOfParameter == 46:
return 'p3046'
if table2Version == 3 and indicatorOfParameter == 45:
return 'p3045'
if table2Version == 3 and indicatorOfParameter == 42:
return 'p3042'
if table2Version == 3 and indicatorOfParameter == 41:
return 'p3041'
if table2Version == 3 and indicatorOfParameter == 38:
return 'p3038'
if table2Version == 3 and indicatorOfParameter == 37:
return 'p3037'
if table2Version == 3 and indicatorOfParameter == 31:
return 'p3031'
if table2Version == 3 and indicatorOfParameter == 30:
return 'p3030'
if table2Version == 3 and indicatorOfParameter == 29:
return 'p3029'
if table2Version == 3 and indicatorOfParameter == 28:
return 'p3028'
if table2Version == 3 and indicatorOfParameter == 27:
return 'p3027'
if table2Version == 3 and indicatorOfParameter == 26:
return 'p3026'
if table2Version == 3 and indicatorOfParameter == 25:
return 'p3025'
if table2Version == 3 and indicatorOfParameter == 24:
return 'p3024'
if table2Version == 3 and indicatorOfParameter == 23:
return 'p3023'
if table2Version == 3 and indicatorOfParameter == 22:
return 'p3022'
if table2Version == 3 and indicatorOfParameter == 21:
return 'p3021'
if table2Version == 3 and indicatorOfParameter == 20:
return 'p3020'
if table2Version == 3 and indicatorOfParameter == 19:
return 'p3019'
if table2Version == 3 and indicatorOfParameter == 18:
return 'p3018'
if table2Version == 3 and indicatorOfParameter == 17:
return 'p3017'
if table2Version == 3 and indicatorOfParameter == 16:
return 'p3016'
if table2Version == 3 and indicatorOfParameter == 15:
return 'p3015'
if table2Version == 3 and indicatorOfParameter == 14:
return 'p3014'
if table2Version == 3 and indicatorOfParameter == 9:
return 'p3009'
if table2Version == 3 and indicatorOfParameter == 8:
return 'p3008'
if table2Version == 3 and indicatorOfParameter == 5:
return 'p3005'
if table2Version == 3 and indicatorOfParameter == 3:
return 'p3003'
if table2Version == 3 and indicatorOfParameter == 12:
return 'p300012'
if table2Version == 2 and indicatorOfParameter == 12:
return 'p300012'
if table2Version == 1 and indicatorOfParameter == 12:
return 'p300012'
if table2Version == 3 and indicatorOfParameter == 84:
return 'al'
if table2Version == 3 and indicatorOfParameter == 76:
return 'p260102'
if table2Version == 3 and indicatorOfParameter == 78:
return 'snoc'
if table2Version == 3 and indicatorOfParameter == 123:
return 'bld'
if table2Version == 3 and indicatorOfParameter == 122:
return 'shf'
if table2Version == 3 and indicatorOfParameter == 121:
return 'lhf'
if table2Version == 3 and indicatorOfParameter == 79:
return 'lssf'
if table2Version == 3 and indicatorOfParameter == 75:
return 'hcc'
if table2Version == 3 and indicatorOfParameter == 74:
return 'mcc'
if | |
"""
Generate CoAs | Cannlytics
Author: <NAME> <<EMAIL>>
Created: 7/22/2021
Updated: 7/23/2021
License: MIT License <https://opensource.org/licenses/MIT>
"""
# Standard packages
import argparse
from ast import literal_eval
import os
from pathlib import Path
from re import sub, findall
from shutil import copyfile
# External packages
import openpyxl
from openpyxl.drawing.image import Image
import pandas as pd
# import qrcode
import win32com.client
import xlwings as xw
from xlwings.utils import rgb_to_int
def calculate_results(sample_data, analysis, mass, dilution_factor=40, correction_factor=10000):
"""Calculate percentage results given raw results,
dilution factor, and analysis type.
Args:
sample_data (dict): A dictionary of sample data.
analysis (str): An analysis to calculate results for the analysis's analytes.
mass (float): The recorded sample mass.
dilution_factor (float): The dilution factor for the sample.
correction_factor (float): A factor used to appropriately scale values to percentage.
Returns:
(dict): An updated dictionary of sample data.
"""
analytes = get_analytes(analysis)
for analyte in analytes:
try:
raw_value = float(sample_data[analyte])
sample_data[analyte] = ((raw_value * dilution_factor) / mass) / correction_factor
except ValueError:
continue
return sample_data
def create_coa_pdfs(render_file, ws_index_list, output_file, tight=False):
"""Generate PDFs for rendred CoAs."""
client = win32com.client.Dispatch('Excel.Application')
client.Visible = False
client.ScreenUpdating = False
client.DisplayAlerts = False
client.EnableEvents = False
dir_path = os.path.dirname(os.path.realpath(__file__))
input_file = os.path.join(dir_path, render_file)
pdf_file = os.path.join(dir_path, output_file)
workbook = client.Workbooks.Open(input_file)
if tight:
print_area = 'A1:G50'
for index in ws_index_list:
worksheet = workbook.Worksheets[index]
worksheet.PageSetup.Zoom = False
worksheet.PageSetup.FitToPagesTall = 1
worksheet.PageSetup.FitToPagesWide = 1
worksheet.PageSetup.PrintArea = print_area
try:
workbook.WorkSheets(ws_index_list).Select()
except:
workbook.Worksheets(ws_index_list).Select()
workbook.ActiveSheet.ExportAsFixedFormat(0, pdf_file)
workbook.Close(False)
return client
def fill_jinja_references(workbook, data):
"""Fill-in Jinga-style references, iterating over all
worksheet pages to fill-in all occurrences of each reference 1 by 1.
Args:
workbook (Workbook): A Workbook object.
data (dict): A dictionary of context data.
"""
context = { snake_case(key): values for key, values in data.items() }
sheets = workbook.worksheets
for sheet in sheets:
refs = get_jinja_references(sheet)
for key, cells in refs.items():
clean_key = snake_case(key)
value = context.get(clean_key, 0)
for cell in cells:
if value:
sheet[cell] = str(value)
else:
sheet[cell] = 'MISSING'
def get_jinja_references(sheet):
"""Get Jinja-style references in an Excel worksheet.
Args:
sheet (Worksheet): A worksheet to get references from.
Returns:
(dict): A dictionary of variables to find references.
"""
refs = {}
for row in sheet.iter_rows():
for cell in row:
value = cell.value
try:
if value.startswith('{{'):
ref = cell.column_letter + str(cell.row)
variable = value.replace('{{', '').replace('}}', '').strip()
existing_refs = refs.get(variable, [])
refs[variable] = [*existing_refs, *[ref]]
elif value.startswith('{% for'): # Optional: Handle tables
return NotImplementedError
except AttributeError:
continue
return refs
def get_worksheet_data(sheet, headers):
"""Get the data of a worksheet.
Args:
sheet (Worksheet): An openpyx; Excel file object.
headres (list): A list of headers to map the values.
Returns:
list(dict): A list of dictionaries.
"""
data = []
for row in sheet.iter_rows():
values = {}
for key, cell in zip(headers, row):
values[key] = cell.value
data.append(values)
return data
def get_worksheet_data_block(sheet, coords, expand=None):
"""Get a data block.
Args:
sheet (Sheet): The worksheet containing the data block.
coords (str): The inclusive coordinates of the data block.
expand (str): Optionally expand the range of values.
Returns
(dict): A dictionary of the data in the data block.
"""
data = {}
values = sheet.range(coords).options(expand=expand).value
for item in values:
key = snake_case(item[0])
value = item[1]
data[key] = value
return data
def get_worksheet_headers(sheet):
"""Get the headres of a worksheet.
Args:
sheet (Worksheet): An openpyx; Excel file object.
Returns:
headers (list): A list of header strings.
"""
headers = []
for cell in sheet[1]:
headers.append(cell.value)
return headers
def get_worksheet_indexes(wb, output_pages):
"""Get the indexes for a list of sheet names in a given workbook.
Args:
wb (Workbook): The workbook at hand.
pages (list): A list of pages to find indexes.
"""
ws_index_list = []
for page in output_pages:
index = wb.worksheets.index(wb[page])
ws_index_list.append(index + 1)
return ws_index_list
def get_analytes(analysis, limits_file='analytes.xlsx', key='import_key'):
"""Get all analytes for a given analysis."""
analytes = []
workbook = openpyxl.load_workbook(limits_file, data_only=True)
analyte_data = read_worksheet(workbook, sheetname='analytes')
for analyte in analyte_data:
if analyte['analysis_key'] == analysis:
analytes.append(analyte[key])
return analytes
def get_analyte_limits(limits_file='analytes.xlsx'):
"""Get analyte limits."""
workbook = openpyxl.load_workbook(limits_file, data_only=True)
analyte_data = read_worksheet(workbook, sheetname='analytes')
limits = {}
for analyte in analyte_data:
key = analyte['import_key']
limits[key + '_loq'] = analyte['loq']
limits[key + '_limit'] = analyte['limit']
return limits
def generate_coas(
import_files,
output_pages,
coa_template='./coa_template.xlsm',
# render_file='./CoAs/coa_render.xlsm',
limits={}
):
"""Generate certificates of analysis.
Args:
import_files (list): A list of files to import.
output_pages (list): A list of pages to include in the PDF.
coa_template (str): The path of the CoA Template.
limits (dict): A dictionary of limits and LOQ for analytes.
"""
# Create CoA folder if one does not exist.
Path('CoAs').mkdir(parents=True, exist_ok=True)
dir_path = os.path.dirname(os.path.realpath(__file__))
# Create a copy of the template.
abs_coa_template = os.path.join(dir_path, coa_template)
coa_template_copy = abs_coa_template.replace('.xlsm', '_copy.xlsm')
copyfile(abs_coa_template, coa_template_copy)
# Iterate over all import files.
for import_file in import_files:
# Get all sample, results, client, etc. data.
# abs_import_file = os.path.join(dir_path, import_file)
all_data = pd.read_excel(import_file)
# Get all the masses.
masses = {}
for _, row in all_data.iterrows():
key = snake_case(row['assay'])
masses[key] = row['test_mass']
# Aggregate all data for a sample
data = all_data.groupby('sample_id', as_index=False).first()
# Fill in sample details.
for _, row in data.iterrows():
# Get sample data as a dictionary.
sample_data = {**row.to_dict(), **limits}
sample_id = sample_data['sample_id']
if not sample_id: # FIXME: Skip nan
continue
# Calculate terpene and cannabinoid results.
sample_data = calculate_results(
sample_data,
analysis='terpenes',
mass=masses['terpenes'],
dilution_factor=40
)
sample_data = calculate_results(
sample_data,
analysis='cannabinoids',
mass=masses['potency'],
dilution_factor=40*50
)
# Iterate over worksheet pages to fill-in
# all occurrences of each reference 1 by 1.
template_workbook = openpyxl.load_workbook(coa_template_copy, keep_vba=True)
fill_jinja_references(template_workbook, sample_data)
# FIXME: Get output pages dynamically
try:
ws_index_list = get_worksheet_indexes(template_workbook, output_pages)
except:
if len(output_pages) == 3:
ws_index_list = [3, 4, 5]
else:
ws_index_list = [3, 4, 5, 6]
# Save the rendered template, temporarily.
abs_render_file = os.path.join(dir_path, f'CoAs/{sample_id}.xlsm')
template_workbook.save(abs_render_file)
# Future: Insert signatures
# Future: Insert QR Code
# Future: Touch up the CoA.
# ws.oddHeader.left.text = "Page &[Page] of &N"
# Mark failures as red
# a1.font = Font(color="FF0000", italic=True) # the change only affects A1
# Create a PDF.
output_file = f'CoAs/{sample_id}.pdf'
excel = create_coa_pdfs(abs_render_file, ws_index_list, output_file)
# Future: Upload the PDF.
# Future: Create download link and short link for the CoA.
# Future: Upload the CoA data.
# Remove temporary files.
# os.remove(abs_render_file)
os.remove(coa_template_copy)
# Ensure Excel is visible.
excel.ScreenUpdating = True
excel.DisplayAlerts = True
excel.EnableEvents = True
# TODO:
# def insert_qr_code(sheet, coords, url):
# """Insert a QR code into a CoA template.
# Args:
# sheet (Worksheet): The worksheet to insert the QR Code
# ref (str): The location to insert the QR code.
# url (str): The URL that the QR code should link.
# Returns:
# """
# qr = qrcode.QRCode(
# version=1,
# error_correction=qrcode.constants.ERROR_CORRECT_L,
# box_size=10,
# border=4,
# )
# qr.add_data(url)
# qr.make(fit=True)
# img = qr.make_image(fill_color='black', back_color='white')
# img.save('qr_code.png')
# logo = Image('qr_code.png')
# logo.height = 150
# logo.width = 150
# sheet.add_image(logo, coords)
# # workbook.save(filename="hello_world_logo.xlsx")
def read_worksheet(workbook, sheetname='Upload'):
"""Read the imported data, iterating over the rows and
getting value from each cell in row.
Args:
path (str or InMemoryFile): An Excel workbook to read.
filename (str): The name of the worksheet to upload.
Returns:
"""
sheet = workbook[sheetname]
headers = get_worksheet_headers(sheet)
return get_worksheet_data(sheet, headers)
def run_generate_coas():
"""Call `generate_coas` from an Excel workbook with xlwings."""
# Initialize the workbook
book = xw.Book.caller()
worksheet = book.sheets.active
config_sheet = book.sheets['cannlytics.conf']
config = get_worksheet_data_block(config_sheet, 'A1', expand='table')
show_status_message(
worksheet,
coords=config['status_cell'],
message='Generating CoAs...',
background=config['success_color'],
)
# Get the parameters.
import_file = worksheet.range(config['import_file_cell']).value
if import_file is None:
show_status_message(
worksheet,
coords=config['status_cell'],
message='Please provide an import file.',
background=config['error_color']
)
return
coa_template = config['coa_template']
render_file = config['render_file']
output_pages = worksheet.range(config['output_pages_cell']).value
output_pages = output_pages.split(',')
output_pages = [x.strip() for x in output_pages]
limits = get_analyte_limits()
# Generate the CoAs.
generate_coas(
import_file,
output_pages=output_pages,
coa_template=coa_template,
# render_file=render_file,
limits=limits
)
show_status_message(
worksheet,
coords=config['status_cell'],
message='Generated CoAs',
background=config['success_color'],
)
def show_status_message(sheet, coords, message, background=None, color=None):
"""Show a status message in an Excel spreadsheet.
Args:
sheet (Sheet): The sheet where the status message will be written.
coords (str): The location of the status message.
message (str): A status message to write to Excel.
background (tuple): Optional background color.
color (tuple): Optional font color.
"""
sheet.range(coords).value = message
if background:
sheet.range(coords).color = literal_eval(background)
if color:
sheet.range(coords).api.Font.Color = rgb_to_int(literal_eval(color))
def snake_case(string):
"""Turn a given string to snake case.
Handles CamelCase, replaces known special characters with
preferred | |
<gh_stars>10-100
"""
Module to provide processing for the block quotes.
"""
import logging
from pymarkdown.container_markdown_token import BlockQuoteMarkdownToken
from pymarkdown.leaf_block_processor import LeafBlockProcessor
from pymarkdown.parser_helper import ParserHelper, PositionMarker
from pymarkdown.parser_logger import ParserLogger
from pymarkdown.stack_token import (
BlockQuoteStackToken,
FencedCodeBlockStackToken,
HtmlBlockStackToken,
IndentedCodeBlockStackToken,
LinkDefinitionStackToken,
ParagraphStackToken,
)
POGGER = ParserLogger(logging.getLogger(__name__))
class BlockQuoteProcessor:
"""
Class to provide processing for the block quotes.
"""
__block_quote_character = ">"
@staticmethod
def is_block_quote_start(
line_to_parse, start_index, extracted_whitespace, adj_ws=None
):
"""
Determine if we have the start of a block quote section.
"""
if adj_ws is None:
adj_ws = extracted_whitespace
return ParserHelper.is_length_less_than_or_equal_to(
adj_ws, 3
) and ParserHelper.is_character_at_index(
line_to_parse, start_index, BlockQuoteProcessor.__block_quote_character
)
@staticmethod
def check_for_lazy_handling(
parser_state,
this_bq_count,
stack_bq_count,
line_to_parse,
extracted_whitespace,
):
"""
Check if there is any processing to be handled during the handling of
lazy continuation lines in block quotes.
"""
POGGER.debug("__check_for_lazy_handling")
container_level_tokens = []
POGGER.debug(
"this_bq_count>$>>stack_bq_count>>$<<",
this_bq_count,
stack_bq_count,
)
if this_bq_count == 0 and stack_bq_count > 0:
POGGER.debug("haven't processed")
is_leaf_block_start = (
LeafBlockProcessor.is_paragraph_ending_leaf_block_start(
parser_state,
line_to_parse,
0,
extracted_whitespace,
exclude_thematic_break=True,
)
)
if (
parser_state.token_stack[-1].is_code_block
or parser_state.token_stack[-1].is_html_block
or is_leaf_block_start
):
POGGER.debug("__check_for_lazy_handling>>code block")
assert not container_level_tokens
container_level_tokens, _ = parser_state.close_open_blocks_fn(
parser_state,
only_these_blocks=[
BlockQuoteStackToken,
type(parser_state.token_stack[-1]),
],
include_block_quotes=True,
was_forced=True,
)
return container_level_tokens
# pylint: disable=too-many-arguments
# pylint: disable=too-many-locals
@staticmethod
def handle_block_quote_block(
parser_state,
position_marker,
extracted_whitespace,
adj_ws,
this_bq_count,
stack_bq_count,
container_start_bq_count,
):
"""
Handle the processing of a block quote block.
"""
POGGER.debug("handle_block_quote_block>>start")
(
did_process,
was_container_start,
avoid_block_starts,
did_blank,
removed_chars_at_start,
last_block_quote_index,
end_of_bquote_start_index,
text_removed_by_container,
requeue_line_info,
leaf_tokens,
container_level_tokens,
) = (False, False, False, False, 0, 0, -1, None, None, [], [])
adjusted_text_to_parse, adjusted_index_number = (
position_marker.text_to_parse,
position_marker.index_number,
)
POGGER.debug(
"handle_block_quote_block>>text>:$:<", position_marker.text_to_parse
)
POGGER.debug(
"handle_block_quote_block>>extracted_whitespace>:$:<",
extracted_whitespace,
)
POGGER.debug("handle_block_quote_block>>adj_ws>:$:<", adj_ws)
POGGER.debug(
"handle_block_quote_block>>was_link_definition_started>:$:<",
parser_state.token_stack[-1].was_link_definition_started,
)
if BlockQuoteProcessor.is_block_quote_start(
position_marker.text_to_parse,
position_marker.index_number,
extracted_whitespace,
adj_ws=adj_ws,
):
POGGER.debug("handle_block_quote_block>>block-start")
(
adjusted_text_to_parse,
adjusted_index_number,
leaf_tokens,
container_level_tokens,
stack_bq_count,
alt_this_bq_count,
removed_chars_at_start,
did_blank,
last_block_quote_index,
text_removed_by_container,
avoid_block_starts,
requeue_line_info,
) = BlockQuoteProcessor.__handle_block_quote_section(
parser_state,
position_marker,
stack_bq_count,
extracted_whitespace,
container_start_bq_count,
)
POGGER.debug(">>avoid_block_starts>>$", avoid_block_starts)
# TODO for nesting, may need to augment with this_bq_count already set.
this_bq_count = alt_this_bq_count
POGGER.debug(">>this_bq_count>>$", this_bq_count)
POGGER.debug(">>did_process>>$", did_process)
if this_bq_count:
POGGER.debug(">>>>>>>>>>>>>>>$>>>$", this_bq_count, alt_this_bq_count)
POGGER.debug("token_stack>$", parser_state.token_stack)
POGGER.debug("token_document>$", parser_state.token_document)
POGGER.debug("this_bq_count>$", this_bq_count)
POGGER.debug("leaf_tokens>$", leaf_tokens)
POGGER.debug("container_level_tokens>$", container_level_tokens)
POGGER.debug("adjusted_text_to_parse>$<", adjusted_text_to_parse)
if this_bq_count + 1 < len(parser_state.token_stack):
POGGER.debug(
"token_stack[x]>$", parser_state.token_stack[this_bq_count + 1]
)
if (
parser_state.token_stack[this_bq_count + 1].is_list
and adjusted_text_to_parse.strip()
):
POGGER.debug("\n\nBOOM\n\n")
parser_state.nested_list_start = parser_state.token_stack[
this_bq_count + 1
]
if last_block_quote_index != -1:
did_process, end_of_bquote_start_index = True, adjusted_index_number
else:
did_process, end_of_bquote_start_index = False, -1
was_container_start = did_process
elif parser_state.token_stack[-1].was_link_definition_started:
stack_index = parser_state.find_last_block_quote_on_stack()
if stack_index > 0:
last_block_token = parser_state.token_stack[
stack_index
].matching_markdown_token
POGGER.debug(
"handle_block w/ no open>>found>>$",
last_block_token,
)
last_block_token.add_leading_spaces("")
POGGER.debug("handle_block_quote_block>>end>>did_process>>$", did_process)
return (
did_process,
was_container_start,
end_of_bquote_start_index,
this_bq_count,
stack_bq_count,
adjusted_text_to_parse,
adjusted_index_number,
leaf_tokens,
container_level_tokens,
removed_chars_at_start,
did_blank,
last_block_quote_index,
text_removed_by_container,
avoid_block_starts,
requeue_line_info,
)
# pylint: enable=too-many-arguments
# pylint: enable=too-many-locals
@staticmethod
def __count_block_quote_starts(
line_to_parse,
start_index,
stack_bq_count,
is_top_of_stack_fenced_code_block,
is_top_of_stack_is_html_block,
):
"""
Having detected a block quote character (">") on a line, continue to consume
and count while the block quote pattern is there.
"""
(
osi,
oltp,
this_bq_count,
last_block_quote_index,
avoid_block_starts,
adjusted_line,
) = (
start_index,
line_to_parse[:],
0,
-1,
False,
line_to_parse,
)
if stack_bq_count == 0 and is_top_of_stack_fenced_code_block:
start_index -= 1
else:
this_bq_count += 1
start_index += 1
last_block_quote_index = start_index
POGGER.debug(
"stack_bq_count--$--is_top_of_stack_fenced_code_block--$",
stack_bq_count,
is_top_of_stack_fenced_code_block,
)
while True:
if ParserHelper.is_character_at_index_whitespace(
adjusted_line, start_index
):
if adjusted_line[start_index] == ParserHelper.tab_character:
adjusted_tab_length = ParserHelper.calculate_length(
ParserHelper.tab_character, start_index=start_index
)
POGGER.debug("adj--$--", adjusted_line)
parts = [
adjusted_line[0:start_index],
ParserHelper.repeat_string(
ParserHelper.space_character, adjusted_tab_length
),
adjusted_line[start_index + 1 :],
]
adjusted_line = "".join(parts)
POGGER.debug("--$--", adjusted_line)
start_index += 1
POGGER.debug(
"this_bq_count--$--stack_bq_count--$--is_top_of_stack_is_html_block--$",
this_bq_count,
stack_bq_count,
is_top_of_stack_is_html_block,
)
if is_top_of_stack_is_html_block:
if this_bq_count == stack_bq_count:
POGGER.debug(
"block quote levels don't increase during html block, ignoring"
)
avoid_block_starts = ParserHelper.is_character_at_index(
adjusted_line,
start_index,
BlockQuoteProcessor.__block_quote_character,
)
POGGER.debug("avoid_block_starts=$", avoid_block_starts)
break
if this_bq_count > stack_bq_count:
start_index, adjusted_line, last_block_quote_index = (
osi,
oltp,
-1,
)
avoid_block_starts, this_bq_count = True, stack_bq_count
break
if is_top_of_stack_fenced_code_block and (
this_bq_count >= stack_bq_count
):
break
if start_index == len(adjusted_line):
POGGER.debug("ran out of line")
break
if ParserHelper.is_character_at_index_not(
adjusted_line,
start_index,
BlockQuoteProcessor.__block_quote_character,
):
POGGER.debug("not block>$ of :$:", start_index, adjusted_line)
break
this_bq_count += 1
start_index += 1
last_block_quote_index = start_index
POGGER.debug(
"__count_block_quote_starts--$--$--",
start_index,
adjusted_line,
)
return (
this_bq_count,
start_index,
adjusted_line,
last_block_quote_index,
avoid_block_starts,
)
# pylint: disable=too-many-locals, too-many-statements, too-many-branches
@staticmethod
def __handle_block_quote_section(
parser_state,
position_marker,
stack_bq_count,
extracted_whitespace,
container_start_bq_count,
):
"""
Handle the processing of a section clearly identified as having block quotes.
"""
line_to_parse, start_index = (
position_marker.text_to_parse,
position_marker.index_number,
)
text_removed_by_container, did_blank, removed_chars_at_start = None, False, 0
leaf_tokens, container_level_tokens, original_start_index = [], [], start_index
POGGER.debug(
"IN>__handle_block_quote_section---$<<<",
line_to_parse,
)
POGGER.debug(
"IN>start_index---$<<<",
start_index,
)
POGGER.debug("stack_bq_count--$", stack_bq_count)
POGGER.debug("token_stack[-1]--$", parser_state.token_stack[-1])
POGGER.debug(
"__handle_block_quote_section---$--$--",
start_index,
line_to_parse,
)
(
this_bq_count,
start_index,
line_to_parse,
last_block_quote_index,
avoid_block_starts,
) = BlockQuoteProcessor.__count_block_quote_starts(
line_to_parse,
start_index,
stack_bq_count,
parser_state.token_stack[-1].is_fenced_code_block,
parser_state.token_stack[-1].is_html_block,
)
POGGER.debug(">>container_start_bq_count>>$", container_start_bq_count)
POGGER.debug(">>this_bq_count>>$", this_bq_count)
POGGER.debug(">>stack_bq_count>>$", stack_bq_count)
POGGER.debug(">>start_index>>$", start_index)
POGGER.debug(">>original_start_index>>$", original_start_index)
if last_block_quote_index == -1:
POGGER.debug("BAIL")
return (
line_to_parse,
start_index,
leaf_tokens,
container_level_tokens,
stack_bq_count,
this_bq_count,
removed_chars_at_start,
did_blank,
last_block_quote_index,
text_removed_by_container,
avoid_block_starts,
None,
)
POGGER.debug(">>avoid_block_starts>>$", avoid_block_starts)
POGGER.debug(
"__handle_block_quote_section---this_bq_count--$--$--$--",
this_bq_count,
start_index,
line_to_parse,
)
POGGER.debug(
"ORIG-->WS[$]--SI[$]--[$]",
extracted_whitespace,
original_start_index,
parser_state.original_line_to_parse,
)
POGGER.debug("NOW -->SI[$]--[$]", start_index, line_to_parse)
if container_start_bq_count:
this_bq_count += container_start_bq_count
POGGER.debug(">>this_bq_count>>$", this_bq_count)
if not parser_state.token_stack[-1].is_fenced_code_block:
POGGER.debug("handle_block_quote_section>>not fenced")
(
container_level_tokens,
stack_bq_count,
requeue_line_info,
) = BlockQuoteProcessor.__ensure_stack_at_level(
parser_state,
this_bq_count,
stack_bq_count,
extracted_whitespace,
position_marker,
original_start_index,
container_start_bq_count,
)
if requeue_line_info:
return (
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
requeue_line_info,
)
special_case = False
special_case_adjusted_text = None
if (
container_start_bq_count
and stack_bq_count > 1
and container_start_bq_count != stack_bq_count
):
stack_index = 1
block_quote_token_count = 0
while True:
if parser_state.token_stack[stack_index].is_block_quote:
block_quote_token_count += 1
if block_quote_token_count == stack_bq_count:
break
stack_index += 1
assert stack_index < len(parser_state.token_stack)
assert stack_index < len(parser_state.token_stack)
matching_block_quote_token = parser_state.token_stack[
stack_index
].matching_markdown_token
POGGER.debug(
"matching_block_quote_token=:$:", matching_block_quote_token
)
if "\n" in matching_block_quote_token.leading_spaces:
last_newline_index = (
matching_block_quote_token.leading_spaces.rindex("\n")
)
special_case_adjusted_text = (
matching_block_quote_token.leading_spaces[
last_newline_index + 1 :
]
)
special_case = True
removed_text = f"{extracted_whitespace}{line_to_parse[position_marker.index_number : start_index]}"
POGGER.debug(
"==EWS[$],OSI[$],SI[$],LTP[$],RT=[$]",
extracted_whitespace,
original_start_index,
position_marker.index_number,
position_marker.text_to_parse,
removed_text,
)
line_to_parse, removed_chars_at_start = (
line_to_parse[start_index:],
start_index,
)
POGGER.debug("==REM[$],LTP[$]", removed_text, line_to_parse)
stack_index, text_removed_by_container = (
parser_state.find_last_block_quote_on_stack(),
removed_text,
)
assert stack_index != -1
found_bq_stack_token = parser_state.token_stack[stack_index]
POGGER.debug("__hbqs>>removed_text>>:$:<", removed_text)
POGGER.debug(
"__hbqs>>container_start_bq_count>>$", container_start_bq_count
)
POGGER.debug("__hbqs>>original_start_index>>$", original_start_index)
adjusted_removed_text = (
removed_text[original_start_index:]
if container_start_bq_count and original_start_index
else removed_text
)
if (
container_start_bq_count
and parser_state.token_stack[stack_index - 1].is_block_quote
):
count_of_actual_starts = ParserHelper.count_characters_in_text(
adjusted_removed_text, ">"
)
assert count_of_actual_starts != this_bq_count
adj_leading_spaces = parser_state.token_stack[
stack_index - 1
].matching_markdown_token.leading_spaces
POGGER.debug(
"__hbqs>>count_of_actual_starts>>$", count_of_actual_starts
)
POGGER.debug("__hbqs>>adj_leading_spaces>>:$:<", adj_leading_spaces)
POGGER.debug(
"__hbqs>>len(text_removed_by_container)>>:$:<",
len(text_removed_by_container),
)
while len(text_removed_by_container) > len(
adj_leading_spaces + adjusted_removed_text
):
adj_leading_spaces += " "
adjusted_removed_text = adj_leading_spaces + adjusted_removed_text
POGGER.debug(
"__hbqs>>adjusted_removed_text>>:$:<", adjusted_removed_text
)
POGGER.debug("__hbqs>>adjusted_removed_text>>:$:<", adjusted_removed_text)
if special_case:
POGGER.debug(
"__hbqs>>special_case_adjusted_text>>:$:<",
special_case_adjusted_text,
)
adjusted_removed_text = adjusted_removed_text[
len(special_case_adjusted_text) :
]
POGGER.debug(
"__hbqs>>adjusted_removed_text>>:$:<", adjusted_removed_text
)
# assert False
POGGER.debug("__hbqs>>bq>>$", found_bq_stack_token.matching_markdown_token)
found_bq_stack_token.matching_markdown_token.add_leading_spaces(
adjusted_removed_text, special_case
)
POGGER.debug("__hbqs>>bq>>$", found_bq_stack_token.matching_markdown_token)
if not line_to_parse.strip():
POGGER.debug("call __handle_block_quote_section>>handle_blank_line")
POGGER.debug("__hbqs>>this_bq_count>>$", this_bq_count)
POGGER.debug("__hbqs>>stack_bq_count>>$", stack_bq_count)
POGGER.debug("__hbqs>>token_stack>>$", parser_state.token_stack)
possible_list_start_index = this_bq_count + 1
if (
possible_list_start_index < len(parser_state.token_stack)
and parser_state.token_stack[possible_list_start_index].is_list
):
POGGER.debug(
"__hbqs>>fgg>>$<<",
parser_state.token_stack[possible_list_start_index],
)
adjusted_position_marker = PositionMarker(
position_marker.line_number,
len(text_removed_by_container),
position_marker.text_to_parse,
)
did_blank = True
(leaf_tokens, requeue_line_info) = parser_state.handle_blank_line_fn(
parser_state,
line_to_parse,
from_main_transform=False,
position_marker=adjusted_position_marker,
)
assert not (requeue_line_info and requeue_line_info.lines_to_requeue)
else:
POGGER.debug("handle_block_quote_section>>fenced")
assert start_index >= 0
removed_text, line_to_parse = (
line_to_parse[0:start_index],
line_to_parse[start_index:],
)
POGGER.debug("__hbqs>>removed_text>>$", removed_text)
POGGER.debug("__hbqs>>line_to_parse>>$", line_to_parse)
POGGER.debug("__hbqs>>this_bq_count>>$", this_bq_count)
POGGER.debug("__hbqs>>stack_bq_count>>$", stack_bq_count)
if this_bq_count < stack_bq_count:
(container_level_tokens, _,) = parser_state.close_open_blocks_fn(
parser_state,
only_these_blocks=[
FencedCodeBlockStackToken,
],
was_forced=True,
)
stack_bq_count = BlockQuoteProcessor.__decrease_stack_to_level(
parser_state, this_bq_count, stack_bq_count, container_level_tokens
)
stack_index = parser_state.find_last_block_quote_on_stack()
found_bq_stack_token = parser_state.token_stack[stack_index]
POGGER.debug(
"found_bq_stack_token---$<<<",
found_bq_stack_token,
)
found_bq_stack_token.matching_markdown_token.add_leading_spaces(
removed_text
)
POGGER.debug(
"OUT>__handle_block_quote_section---$<<<",
line_to_parse,
)
return (
line_to_parse,
start_index,
leaf_tokens,
container_level_tokens,
stack_bq_count,
this_bq_count,
removed_chars_at_start,
did_blank,
last_block_quote_index,
text_removed_by_container,
avoid_block_starts,
None,
)
# pylint: enable=too-many-locals, too-many-statements, too-many-branches
# pylint: disable=too-many-arguments, too-many-statements, too-many-locals
@staticmethod
def __ensure_stack_at_level(
parser_state,
this_bq_count,
stack_bq_count,
extracted_whitespace,
position_marker,
original_start_index,
container_start_bq_count,
):
"""
Ensure that the block quote stack is at the proper level on the stack.
"""
container_level_tokens, stack_increase_needed, stack_decrease_needed = (
[],
False,
False,
)
POGGER.debug(
"__ensure_stack_at_level>>this_bq_count>>$>>stack_bq_count>>$",
this_bq_count,
stack_bq_count,
)
if this_bq_count > stack_bq_count:
POGGER.debug(
"__ensure_stack_at_level>>increase to new level",
)
stack_increase_needed = True
elif this_bq_count < stack_bq_count:
POGGER.debug(
"__ensure_stack_at_level>>possible decrease to new level",
)
top_token_on_stack = parser_state.token_stack[-1]
POGGER.debug("__ensure_stack_at_level>>$", top_token_on_stack)
stack_decrease_needed = (
top_token_on_stack.is_indented_code_block
or top_token_on_stack.is_html_block
)
POGGER.debug(
"__ensure_stack_at_level>>decrease to new level=$",
stack_decrease_needed,
)
if stack_increase_needed or stack_decrease_needed:
POGGER.debug(
"token_stack>>$",
parser_state.token_stack,
)
POGGER.debug("token_document>>$", parser_state.token_document)
(
container_level_tokens,
requeue_line_info,
) = parser_state.close_open_blocks_fn(
parser_state,
only_these_blocks=[
ParagraphStackToken,
IndentedCodeBlockStackToken,
LinkDefinitionStackToken,
HtmlBlockStackToken,
],
was_forced=True,
caller_can_handle_requeue=True,
)
if requeue_line_info and requeue_line_info.lines_to_requeue:
# TODO is this common?
POGGER.debug(
"__ensure_stack_at_level>>lines_to_requeue>>$",
requeue_line_info.lines_to_requeue,
)
| |
channel_index, "ksize")
strides = _get_sequence(strides, 3, channel_index, "strides")
return gen_nn_ops.max_pool3d(
input,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: enable=redefined-builtin
@tf_export("nn.max_pool_with_argmax", v1=[])
@dispatch.add_dispatch_support
def max_pool_with_argmax_v2(
input, # pylint: disable=redefined-builtin
ksize,
strides,
padding,
data_format="NHWC",
output_dtype=dtypes.int64,
include_batch_in_index=False,
name=None):
"""Performs max pooling on the input and outputs both max values and indices.
The indices in `argmax` are flattened, so that a maximum value at position
`[b, y, x, c]` becomes flattened index: `(y * width + x) * channels + c` if
`include_batch_in_index` is False;
`((b * height + y) * width + x) * channels + c`
if `include_batch_in_index` is True.
The indices returned are always in `[0, height) x [0, width)` before
flattening, even if padding is involved and the mathematically correct answer
is outside (either negative or too large). This is a bug, but fixing it is
difficult to do in a safe backwards compatible way, especially due to
flattening.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`,
`uint32`, `uint64`.
4-D with shape `[batch, height, width, channels]`. Input to pool over.
ksize: An int or list of `ints` that has length `1`, `2` or `4`.
The size of the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `2` or `4`.
The stride of the sliding window for each dimension of the
input tensor.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: An optional `string`, must be set to `"NHWC"`. Defaults to
`"NHWC"`.
Specify the data format of the input and output data.
output_dtype: An optional `tf.DType` from: `tf.int32, tf.int64`.
Defaults to `tf.int64`.
The dtype of the returned argmax tensor.
include_batch_in_index: An optional `boolean`. Defaults to `False`.
Whether to include batch dimension in flattened index of `argmax`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, argmax).
output: A `Tensor`. Has the same type as `input`.
argmax: A `Tensor` of type `output_dtype`.
"""
if data_format != "NHWC":
raise ValueError("Data formats other than 'NHWC' are not yet supported")
ksize = _get_sequence(ksize, 2, 3, "ksize")
strides = _get_sequence(strides, 2, 3, "strides")
return gen_nn_ops.max_pool_with_argmax(
input=input,
ksize=ksize,
strides=strides,
padding=padding,
Targmax=output_dtype,
include_batch_in_index=include_batch_in_index,
name=name)
@tf_export(v1=["nn.max_pool_with_argmax"])
@dispatch.add_dispatch_support
def max_pool_with_argmax_v1( # pylint: disable=missing-docstring,invalid-name
input, # pylint: disable=redefined-builtin
ksize,
strides,
padding,
data_format="NHWC",
Targmax=None,
name=None,
output_dtype=None,
include_batch_in_index=False):
if data_format != "NHWC":
raise ValueError("Data formats other than 'NHWC' are not yet supported")
Targmax = deprecated_argument_lookup(
"output_dtype", output_dtype, "Targmax", Targmax)
if Targmax is None:
Targmax = dtypes.int64
return gen_nn_ops.max_pool_with_argmax(
input=input,
ksize=ksize,
strides=strides,
padding=padding,
Targmax=Targmax,
include_batch_in_index=include_batch_in_index,
name=name)
max_pool_with_argmax_v1.__doc__ = gen_nn_ops.max_pool_with_argmax.__doc__
@ops.RegisterStatistics("Conv3D", "flops")
def _calc_conv3d_flops(graph, node):
"""Calculates the compute resources needed for Conv3D."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(
graph, node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_time = int(filter_shape[0])
filter_height = int(filter_shape[1])
filter_width = int(filter_shape[2])
filter_in_depth = int(filter_shape[3])
output_count = np.prod(output_shape.as_list(), dtype=np.int64)
return ops.OpStats("flops", (output_count * filter_in_depth * filter_time *
filter_height * filter_width * 2))
@ops.RegisterStatistics("Conv2D", "flops")
def _calc_conv_flops(graph, node):
"""Calculates the compute resources needed for Conv2D."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(
graph, node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
filter_in_depth = int(filter_shape[2])
output_count = np.prod(output_shape.as_list(), dtype=np.int64)
return ops.OpStats(
"flops",
(output_count * filter_in_depth * filter_height * filter_width * 2))
@ops.RegisterStatistics("DepthwiseConv2dNative", "flops")
def _calc_depthwise_conv_flops(graph, node):
"""Calculates the compute resources needed for DepthwiseConv2dNative."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(
graph, node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
output_count = np.prod(output_shape.as_list(), dtype=np.int64)
return ops.OpStats("flops", (output_count * filter_height * filter_width * 2))
@ops.RegisterStatistics("BiasAdd", "flops")
def _calc_bias_add_flops(graph, node):
"""Calculates the computing needed for BiasAdd."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
input_count = np.prod(input_shape.as_list())
return ops.OpStats("flops", input_count)
@tf_export(v1=["nn.xw_plus_b"])
@dispatch.add_dispatch_support
def xw_plus_b(x, weights, biases, name=None): # pylint: disable=invalid-name
"""Computes matmul(x, weights) + biases.
Args:
x: a 2D tensor. Dimensions typically: batch, in_units
weights: a 2D tensor. Dimensions typically: in_units, out_units
biases: a 1D tensor. Dimensions: out_units
name: A name for the operation (optional). If not specified
"xw_plus_b" is used.
Returns:
A 2-D Tensor computing matmul(x, weights) + biases.
Dimensions typically: batch, out_units.
"""
with ops.name_scope(name, "xw_plus_b", [x, weights, biases]) as name:
x = ops.convert_to_tensor(x, name="x")
weights = ops.convert_to_tensor(weights, name="weights")
biases = ops.convert_to_tensor(biases, name="biases")
mm = math_ops.matmul(x, weights)
return bias_add(mm, biases, name=name)
def xw_plus_b_v1(x, weights, biases, name=None):
"""Computes matmul(x, weights) + biases.
This is a deprecated version of that will soon be removed.
Args:
x: a 2D tensor. Dimensions typically: batch, in_units
weights: a 2D tensor. Dimensions typically: in_units, out_units
biases: a 1D tensor. Dimensions: out_units
name: A name for the operation (optional). If not specified
"xw_plus_b_v1" is used.
Returns:
A 2-D Tensor computing matmul(x, weights) + biases.
Dimensions typically: batch, out_units.
"""
with ops.name_scope(name, "xw_plus_b_v1", [x, weights, biases]) as name:
x = ops.convert_to_tensor(x, name="x")
weights = ops.convert_to_tensor(weights, name="weights")
biases = ops.convert_to_tensor(biases, name="biases")
mm = math_ops.matmul(x, weights)
return bias_add_v1(mm, biases, name=name)
def _get_noise_shape(x, noise_shape):
# If noise_shape is none return immediately.
if noise_shape is None:
return array_ops.shape(x)
try:
# Best effort to figure out the intended shape.
# If not possible, let the op to handle it.
# In eager mode exception will show up.
noise_shape_ = tensor_shape.as_shape(noise_shape)
except (TypeError, ValueError):
return noise_shape
if x.shape.dims is not None and len(x.shape.dims) == len(noise_shape_.dims):
new_dims = []
for i, dim in enumerate(x.shape.dims):
if noise_shape_.dims[i].value is None and dim.value is not None:
new_dims.append(dim.value)
else:
new_dims.append(noise_shape_.dims[i].value)
return tensor_shape.TensorShape(new_dims)
return noise_shape
@tf_export(v1=["nn.dropout"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None, "Please use `rate` instead of `keep_prob`. "
"Rate should be set to `rate = 1 - keep_prob`.",
"keep_prob")
def dropout(x, keep_prob=None, noise_shape=None, seed=None, name=None,
rate=None):
"""Computes dropout.
For each element of `x`, with probability `rate`, outputs `0`, and otherwise
scales up the input by `1 / (1-rate)`. The scaling is such that the expected
sum is unchanged.
By default, each element is kept or dropped independently. If `noise_shape`
is specified, it must be
[broadcastable](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
to the shape of `x`, and only dimensions with `noise_shape[i] == shape(x)[i]`
will make independent decisions. For example, if `shape(x) = [k, l, m, n]`
and `noise_shape = [k, 1, 1, n]`, each batch and channel component will be
kept independently and each row and column will be kept or not kept together.
Args:
x: A floating point tensor.
keep_prob: (deprecated) A deprecated alias for `(1-rate)`.
noise_shape: A 1-D `Tensor` of type `int32`, representing the
shape for randomly generated keep/drop flags.
seed: A Python integer. Used to create random seeds. See
`tf.random.set_seed` for behavior.
name: A name for this operation (optional).
rate: A scalar `Tensor` with the same type as `x`. The probability that each
element of `x` is discarded.
Returns:
A Tensor of the same shape of `x`.
Raises:
ValueError: If `rate` is not in `[0, 1)` or if `x` is not a floating
point tensor.
"""
try:
keep = 1. - keep_prob if keep_prob is not None else None
except TypeError:
raise ValueError("keep_prob must be a floating point number or Tensor "
"(got %r)" % keep_prob)
rate = deprecation.deprecated_argument_lookup(
"rate", rate,
"keep_prob", keep)
if rate is None:
raise ValueError("You must provide a rate to dropout.")
return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name)
@tf_export("nn.dropout", v1=[])
@dispatch.add_dispatch_support
def dropout_v2(x, rate, noise_shape=None, seed=None, name=None):
"""Computes dropout: randomly sets elements to zero to prevent overfitting.
Note: The behavior of dropout has changed between TensorFlow 1.x and 2.x.
When converting 1.x code, please use named arguments to ensure behavior stays
consistent.
See also: `tf.keras.layers.Dropout` for a dropout layer.
[Dropout](https://arxiv.org/abs/1207.0580) is useful for regularizing DNN
models. Inputs elements are randomly set to zero (and the other elements are
rescaled). This encourages each node to be independently useful, as it cannot
rely on the output of other nodes.
More precisely: With probability `rate` elements of `x` are set to `0`.
The remaining elements are scaled up by `1.0 / (1 - rate)`, so that the
expected value is preserved.
>>> tf.random.set_seed(0)
>>> x = tf.ones([3,5])
>>> tf.nn.dropout(x, rate = 0.5, seed = 1).numpy()
array([[2., 0., 0., 2., 2.],
[2., 2., 2., | |
cause the engine to skip any further analysis by this module for this target."""
raise NotImplemented()
def execute_final_analysis(self, analysis):
"""Called to analyze Analysis or Observable objects after all other analysis has completed."""
return False
def execute_pre_analysis(self):
"""This is called once at the very beginning of analysis."""
pass
def execute_post_analysis(self):
"""This is called after all analysis work has been performed and no outstanding work is left.
If the function returns False then the function can possibly get called again if the analysis mode changes.
If the function returns True then the function will not get called again."""
return True
def execute_threaded(self):
"""This is called on a thread if the module is configured as threaded."""
pass
def execute_threaded_loop_wrapper(self):
try:
self.execute_threaded_loop()
finally:
# make sure we remove the session if we created one
saq.db.remove()
def execute_threaded_loop(self):
# continue to execute until analysis has completed
while True:
try:
self.execute_threaded()
except Exception as e:
logging.error("{} failed threaded execution on {}: {}".format(self, self.root, e))
report_exception()
return
# wait for self.threaded_execution_frequency seconds before we execute again
# make sure we exit when we're asked to
timeout = self.threaded_execution_frequency
while not self.engine.cancel_analysis_flag and \
not self.threaded_execution_stop_event.is_set() and \
timeout > 0:
time.sleep(1)
timeout -= 1
if self.engine.cancel_analysis_flag:
return
if self.threaded_execution_stop_event.is_set():
return
def auto_reload(self):
"""Called every N seconds (see auto_reload_frequency in abstract
engine) in the main process to allow the module to update or change
configuration."""
return
def should_analyze(self, obj):
"""Put your custom "should I analyze this?" logic in this function."""
return True
def execute_maintenance(self):
"""Override this function to provide some kind of maintenance routine that is called every
maintenance_frequency seconds."""
pass
@property
def maintenance_frequency(self):
"""Returns how often to execute the maintenance function, in seconds, or None to disable (the default.)"""
return None
def analysis_covered(self, observable):
"""Returns True if the value of this observable has already been analyzed in another observable
that has an observation time with range of this observable."""
# for this to have any meaning, the observations must have correponding times
if not observable.time:
return False
# is this feature enabled for this analysis module?
if not self.is_grouped_by_time:
return False
start_time = observable.time - self.observation_grouping_time_range
end_time = observable.time + self.observation_grouping_time_range
grouping_target_available = False
# NOTE that we also iterate over the observable we're looking at
for target_observable in self.root.get_observables_by_type(observable.type):
if target_observable.value != observable.value:
continue
# does this target observables time fall in the range we're looking for?
if target_observable.time is None:
continue
if target_observable.time >= start_time and target_observable.time <= end_time:
# does this target_observable already have this analysis generated?
if target_observable.get_analysis(self.generated_analysis_type):
logging.debug(f"{target_observable} already has analysis for "
f"{self.generated_analysis_type} between times {start_time} and {end_time} "
f"{observable}")
return True
# this target is in range AND is already a grouping target
# NOTE that we want to keep looking for existing analysis so we don't break out of the loop here
if target_observable.grouping_target:
logging.debug(f"{target_observable} detected as grouping target for "
f"{self.generated_analysis_type} {observable}")
grouping_target_available = True
# if we didn't find anything and the observable we're looking at is a grouping target then this is
# the one we want to analyze
if observable.grouping_target:
logging.debug(f"using {observable} as grouping target for {self.generated_analysis_type}")
return False
# if we didn't find anything but we did find another observable in the group that is already a grouping
# target then we are considered "covered" because *that* observable will get the analysis
if grouping_target_available:
return True
# otherwise we analyze this one
return False
class TagAnalysisModule(AnalysisModule):
"""These types of modules ignore any exclusion rules."""
def load_exclusions(self):
pass
def is_excluded(self, observable):
return False
class LDAPAnalysisModule(AnalysisModule):
"""An analysis module that uses LDAP."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# load ldap settings from configuration file
self.ldap_enabled = saq.CONFIG.getboolean('ldap', 'enabled')
#self.ldap_uri = saq.CONFIG.get('ldap', 'ldap_uri')
self.ldap_server = saq.CONFIG.get('ldap', 'ldap_server')
self.ldap_port = saq.CONFIG.getint('ldap', 'ldap_port') or 389
self.ldap_bind_user = saq.CONFIG.get('ldap', 'ldap_bind_user')
self.ldap_bind_password = saq.CONFIG.get('ldap', 'ldap_bind_password')
self.ldap_base_dn = saq.CONFIG.get('ldap', 'ldap_base_dn')
# some additional parameters for Tivoli queries
self.tivoli_ldap_enabled = saq.CONFIG.getboolean('ldap', 'tivoli_enabled')
self.tivoli_server = saq.CONFIG.get('ldap', 'tivoli_server')
self.tivoli_ldap_port = saq.CONFIG.getint('ldap', 'tivoli_ldap_port')
self.tivoli_bind_user = saq.CONFIG.get('ldap', 'tivoli_bind_user')
self.tivoli_bind_password = saq.CONFIG.get('ldap', 'ldap_bind_password')
self.tivoli_base_dn = saq.CONFIG.get('ldap', 'tivoli_base_dn')
def ldap_query(self, query):
if not self.ldap_enabled:
return None
from ldap3 import Server, Connection, SIMPLE, SYNC, ASYNC, SUBTREE, ALL, ALL_ATTRIBUTES
import json
try:
logging.debug("connecting to ldap server {} on port {}".format(self.ldap_server, self.ldap_port))
with Connection(
Server(self.ldap_server, port = self.ldap_port, get_info = ALL),
auto_bind = True,
client_strategy = SYNC,
user=self.ldap_bind_user,
password=self.ldap_bind_password,
authentication=SIMPLE,
check_names=True) as c:
logging.debug("running ldap query for ({})".format(query))
c.search(self.ldap_base_dn, '({})'.format(query), SUBTREE, attributes = ALL_ATTRIBUTES)
# a little hack to move the result into json
response = json.loads(c.response_to_json())
result = c.result
if len(response['entries']) < 1:
return None
# XXX not sure about the 0 here, I guess only if we only looking for one thing at a time
return response['entries'][0]['attributes']
except Exception as e:
logging.warning("failed ldap query {}: {}".format(query, e))
return None
def tivoli_ldap_query(self, query):
if not self.tivoli_ldap_enabled:
return None
from ldap3 import Server, Connection, SIMPLE, SYNC, ASYNC, SUBTREE, ALL, ALL_ATTRIBUTES
import json
try:
logging.debug("connecting to tivoli ldap server {} on port {}".format(self.tivoli_server, self.tivoli_ldap_port))
with Connection(
Server(self.tivoli_server, port = self.tivoli_ldap_port , get_info = ALL),
auto_bind = False,
client_strategy = SYNC,
user=self.tivoli_bind_user,
password=self.tivoli_bind_password,
authentication=SIMPLE,
check_names=True) as c:
logging.debug("running tivoli ldap query for ({})".format(query))
c.search(self.tivoli_base_dn, '({})'.format(query), SUBTREE, attributes = ALL_ATTRIBUTES)
# a little hack to move the result into json
response = json.loads(c.response_to_json())
result = c.result
if len(response['entries']) < 1:
return None
# XXX not sure about the 0 here, I guess only if we only looking for one thing at a time
return response['entries'][0]['attributes']
except Exception as e:
logging.warning("failed tivoli ldap query {}: {}".format(query, e))
return None
def splunktime_to_datetime(splunk_time):
"""Convert a splunk time in 2015-02-19T09:50:49.000-05:00 format to a datetime object."""
assert isinstance(splunk_time, str)
#return datetime.datetime.strptime(splunk_time.split('.')[0], '%Y-%m-%dT%H:%M:%S')
return parse_event_time(splunk_time)
def splunktime_to_saqtime(splunk_time):
"""Convert a splunk time in 2015-02-19T09:50:49.000-05:00 format to SAQ time format YYYY-MM-DD HH:MM:SS."""
assert isinstance(splunk_time, str)
return parse_event_time(splunk_time).strftime(event_time_format_json_tz)
#m = re.match(r'^([0-9]{4})-([0-9]{2})-([0-9]{2})T([0-9]{2}):([0-9]{2}):([0-9]{2})\.[0-9]{3}[-+][0-9]{2}:[0-9]{2}$', splunk_time)
#if not m:
#logging.error("_time field does not match expected format: {0}".format(splunk_time))
#return None
# reformat this time for SAQ
#return '{0}-{1}-{2} {3}:{4}:{5}'.format(
#m.group(1),
#m.group(2),
#m.group(3),
#m.group(4),
#m.group(5),
#m.group(6))
class SplunkAnalysisModule(AnalysisModule, SplunkQueryObject):
"""An analysis module that uses Splunk."""
def __init__(self, *args, **kwargs):
super(SplunkAnalysisModule, self).__init__(*args, **kwargs)
# load the splunk settings from configuration file
self.enabled = saq.CONFIG.get('splunk', 'enabled')
self.uri = saq.CONFIG.get('splunk', 'uri')
self.username = saq.CONFIG.get('splunk', 'username')
self.password = saq.CONFIG.get('splunk', 'password')
self.max_result_count = saq.CONFIG.get('splunk', 'max_result_count')
# a splunk-based module can define it's own relative time frame for event collection
if saq.CONFIG.has_option(self.config_section, 'relative_duration_before'):
self.relative_duration_before = saq.CONFIG.get(self.config_section, 'relative_duration_before')
else:
self.relative_duration_before = saq.CONFIG.get('splunk', 'relative_duration_before')
if saq.CONFIG.has_option(self.config_section, 'relative_duration_after'):
self.relative_duration_after = saq.CONFIG.get(self.config_section, 'relative_duration_after')
else:
self.relative_duration_after = saq.CONFIG.get('splunk', 'relative_duration_after')
@property
def semaphore_name(self):
return 'splunk'
def splunk_query(self, *args, **kwargs):
try:
if not self.acquire_semaphore():
if not self.cancel_analysis_flag:
logging.warning("unable to acquire semaphore")
return None
return self.splunk_query_exec(*args, **kwargs)
finally:
self.release_semaphore()
def splunk_query_exec(self, query, event_time=None):
assert event_time is None or isinstance(event_time, datetime.datetime)
if not self.enabled:
return False
# if no time range is specified then we default to the configured time relative
# to the event_time assuming the source of the analysis is an Alert
if event_time is None:
logging.debug("no time specified using alert time")
event_time = self.root.event_time_datetime
return self.query_relative(query, event_time)
def handle_cancel_event(self):
# try to stop any existing splunk query
self.cancel()
class CarbonBlackAnalysisModule(AnalysisModule):
"""An analysis module that directly queries Carbon Black servers as part of its analysis."""
@property
def cb(self):
if not hasattr(self, '_cb'):
from cbapi import CbEnterpriseResponseAPI
self._cb = CbEnterpriseResponseAPI(credential_file=os.path.join(saq.SAQ_HOME,
saq.CONFIG['carbon_black']['credential_file']))
return self._cb
class ELKAnalysisModule(AnalysisModule):
"""An analysis module that queries ElasticSearch as part of its analysis."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# for relative time searches, how far back and forward do we go?
self.earliest_timedelta = create_timedelta(saq.CONFIG['elk']['relative_duration_before'])
if 'relative_duration_before' in self.config:
self.earliest_timedelta = create_timedelta(self.config['relative_duration_before'])
self.latest_timedelta = create_timedelta(saq.CONFIG['elk']['relative_duration_after'])
if 'relative_duration_after' in self.config:
self.latest_timedelta = create_timedelta(self.config['relative_duration_after'])
# format the elk search uri with the | |
self.visibleChildren = []
self.visibleExits = []
self.visibleSigns = []
self.visibleObstacles = []
self.visibleFires = []
self.visibleHeat = []
self.visibleAgents = []
self.visibleCells = []
self.selected = False
self.nearestSign = None
self.routeHistory = []
self.path = []
self.explorationDirection = None
self.waiting = False
self.waitingTime = 0
self.waitingTimeForChildren = 0
self.initDist = 0
self.optEvacTime = 0
self.target = None
self.stuck = False
self.intoxication = 0
self.unconscious = False
self.dead = False
self.evacuated = False
if self.type == "Adult":
if self.fitness == "Fit":
self.maxSpeed = 2.4
self.maxFreq = 1
else:
self.maxSpeed = 1.2
self.maxFreq = 2
elif self.type == "Elderly":
self.maxSpeed = 0.8
self.maxFreq = 3
elif self.type == "Disabled":
self.maxSpeed = 0.4
self.maxFreq = 6
self.freq = self.maxFreq
if self.freq > 1:
self.offset = round(random.random() * self.freq)
else:
self.offset = 0
self.previousState = "AT_REST"
if self.strategy == "familiarExit" and len(self.knownExits) != 0:
self.state = "EVACUATING"
else:
if self.model.grid.getObject(self.startingLocation, "Cell").type == "room":
self.state = "EXITING_ROOM"
else:
self.state = "EXPLORING"
# Function that checks if and which children are visible
def locateChildren(self):
self.visibleChildren = []
for agentID in self.visibleAgents:
if agentID in self.children:
self.visibleChildren.append(agentID)
# Function that picks the nearest exit from known and available exits:
def pickExit(self, exits, **kwargs):
optimalPath = kwargs.get("optimalPath", False)
visible = kwargs.get("visible", False)
distances = []
# If computing optimal path, take all exits into account
# and ignore all objects/agents, including obstacles
if optimalPath:
for exit in exits:
path = computePath(self.model.grid, self.pos, exit, [], [], [],
ignoreAgents=True)
if path != ["blocked"]:
distances.append((len(path), exit, path))
# Return the nearest exit
if len(distances) > 0:
return (min(distances)[1], min(distances)[2])
else:
return None, None
# Otherwise, consider exit only if it is known to the agent as available
# And take into account agents and obstacles known to the agent
else:
for exit in exits:
if self.knownExits[exit] or visible:
path = computePath(self.model.grid, self.pos, exit,
self.knownFires, self.knownHeat, self.knownObstacles)
if path != ["blocked"]:
distances.append((len(path), exit, path))
# Return the nearest exit and path
if len(distances) > 0:
return (min(distances)[1], min(distances)[2])
else:
return None, None
# Function that updates the list of currently visible signs and exits
def updateVisibility(self):
self.visibleExits = []
self.visibleSigns = []
self.visibleObstacles = []
self.visibleFires = []
self.visibleHeat = []
self.visibleAgents = []
for exit in self.model.exits:
if isVisible(self.model.grid, (self.pos[0], self.pos[1]), exit[0]):
self.visibleExits.append(exit)
for sign in self.model.signs:
if isVisible(self.model.grid, (self.pos[0], self.pos[1]),
(sign[0][0], sign[0][1])):
self.visibleSigns.append(sign)
for obstacle in self.model.obstacles:
if isVisible(self.model.grid, (self.pos[0], self.pos[1]), obstacle):
self.visibleObstacles.append(obstacle)
for fire in self.model.fireList:
if isVisible(self.model.grid, (self.pos[0], self.pos[1]), fire):
self.visibleFires.append(fire)
for heat in self.model.hotCells:
if isVisible(self.model.grid, (self.pos[0], self.pos[1]), heat):
self.visibleHeat.append(heat)
for agent in self.model.activeAgents:
if agent != self:
if isVisible(self.model.grid, (self.pos[0], self.pos[1]), agent.pos):
self.visibleAgents.append(agent.unique_id)
# If the agent is selected, update the list of visible cells to be highlighted
if self.selected:
for x in range(self.model.grid.width):
for y in range(self.model.grid.height):
cell = self.model.grid.visibilityArray[self.pos[0]][self.pos[1]][x][y]
if cell:
self.model.grid.getObject((x, y), "Tile").selected = True
else:
self.model.grid.getObject((x, y), "Tile").selected = False
# Function that updates the list of known exits
def updateExits(self):
for exit in self.visibleExits:
if exit not in self.knownExits:
log("\nNew exit found: " + str(exit))
self.knownExits[exit] = True
return True
# Function that updates the list of known signs
def updateSigns(self):
for sign in self.visibleSigns:
if sign not in self.knownSigns:
log("\nNew sign discovered: " + str(sign))
# Determine the status of the sign (blocked or not) based on
# the status of the related exit
for exit in self.knownExits:
if sign[2] == exit[1]:
if self.knownExits[exit] == False:
self.knownSigns[sign] = False
log("Sign leads to a blocked exit, so it will be ignored.")
return
self.knownSigns[sign] = True
return
# Function that updates the list of known fires and heat
def updateFires(self):
for fire in self.visibleFires:
if fire not in self.knownFires:
log("New fire discovered: " + str(fire))
self.knownFires.append(fire)
for heat in self.visibleHeat:
if heat not in self.knownHeat:
log("Heat discovered: " + str(heat))
self.knownHeat.append(heat)
# Function that updates the list of known obstacles
def updateObstacles(self):
for obstacle in self.visibleObstacles:
if obstacle not in self.knownObstacles:
log("New obstacle discovered: " + str(obstacle))
self.knownObstacles.append(obstacle)
# A function that changes the status of currently targeted exit to blocked
# and either sets the next nearest exit as target or changes the agent's state
def considerTargetBlocked(self):
log("Path to " + self.target[1] + " is now considered blocked.")
self.knownExits[self.target] = False
# Consider all the signs linked to this exit to be blocked as well
log("All signs leading to " + self.target[1] +
" will be ignored from now on.")
for sign in self.knownSigns:
if sign[2] == self.target[1]:
self.knownSigns[sign] = False
if self.previousState in ["EXPLORING", "FOLLOWING", "EXITING_ROOM"]:
self.previousState = self.state
self.state = "EXPLORING"
log("Switching state to 'EXPLORING'.")
self.path = []
self.target = None
return
else:
self.target, self.path = self.pickExit(self.knownExits)
if self.target != None:
log("Updated target to " + str(self.target[1]) + " " + str(self.target[0]))
return self.path
else:
log("No known available exits. Switching state to 'EXPLORING'.")
self.previousState = self.state
self.state = "EXPLORING"
self.target = None
self.path = []
return []
# A function that picks an exploration direction at random
def pickDirection(self, possibleDirections, prevDirection):
time = 0
# Make a copy of the list of available directions (not blocked by walls)
dirs = list(possibleDirections.keys())
while len(dirs) > 0:
# Pick a random direction from the list
direction = random.choice(dirs)
# Check for blockages and remove blocked cells from the list
for obj in self.model.grid.get_cell_list_contents(possibleDirections[direction]):
if obj.__class__.__name__ in ["Obstacle", "Adult", "Child", "Fire", "Heat"]:
time += 1
dirs.remove(direction)
break
# Try to avoid going back and forth
if time < len(dirs) and direction == OPPOSITE_DIR[prevDirection]:
time += 1
continue
return direction
return None
def reachCorridor(self):
obstacleClasses = ["Obstacle", "Fire", "Adult", "Child", "Heat", "Smoke"]
log("Computing path to corridor")
while True:
openList = []
openList.append((self.pos, None))
closedList = {}
path = []
while len(openList) > 0:
[curr, parent] = openList.pop(0)
neighbors = self.model.grid.getObject(curr, "Cell").neighbors
for neighbor in neighbors:
cell = neighbors[neighbor]
blocked = False
used = False
for obj in self.model.grid.get_cell_list_contents(cell):
if obj.__class__.__name__ in obstacleClasses:
blocked = True
for entry in openList:
if cell in entry:
used = True
break
if self.model.grid.getObject(cell, "Cell").type == "corridor":
if not blocked and not used:
closedList[curr] = parent
closedList[cell] = curr
while True:
path.append(cell)
cell = closedList[cell]
if closedList[cell] == None:
break
path.reverse()
log("PATH: " + str(path))
return path
if not blocked and not used:
if cell not in closedList:
openList.append((cell, curr))
closedList[curr] = parent
if len(obstacleClasses) > 2:
del (obstacleClasses[-1])
else:
path = ["blocked"]
log("PATH: " + str(path))
return path
def findNearestAdult(self, ratio):
log("Finding the nearest adult.")
while True:
openList = []
openList.append((self.pos, None))
closedList = {}
path = []
while len(openList) > 0:
[curr, parent] = openList.pop(0)
neighbors = self.model.grid.getObject(curr, "Cell").neighbors
for neighbor in neighbors:
cell = neighbors[neighbor]
blocked = False
used = False
hot = False
for entry in openList:
if cell in entry:
used = True
break
for obj in self.model.grid.get_cell_list_contents(cell):
if obj.__class__.__name__ == "Adult" and len(obj.children) < ratio + 1:
return obj.unique_id
if not used:
if cell not in closedList:
openList.append((cell, curr))
closedList[curr] = parent
log("No suitable adult found.")
return
# Funtion that follows the direction of the sign until reaching a wall
# Then returns the last not-blocked cell on that path
# A function that changes the status of currently followed sign to blocked
# and changes the agent's state to EXPLORING
def considerRouteBlocked(self):
log("Currently followed route is blocked. Switching state to 'EXPLORING'.")
for sign in self.routeHistory:
if sign[2] == self.nearestSign[2]:
self.knownSigns[sign] = False
self.previousState = self.state
self.state = "EXPLORING"
self.target = None
return []
# Function that returns the farthest cell reachable (not blocked by a wall)
# from the sign by walking in a straight line in the indicated direction
def routeFromSign(self, sign):
route = [sign[0]]
while True:
last = route[-1]
try:
route.append(self.model.grid.getObject(last, "Cell").neighbors[sign[1]])
except:
break
return (last, sign[1])
# THE MAIN FUNCTION FOR AGENT'S STEP
def step(self):
log("\n---\n\nAGENT " + str(self.unique_id) + " step beginning.")
log("State: " + self.state + | |
P_dev.max(), np.array(devs).max(), measure )
## write to file
Write2h5( likelihood_file_Full, P, [ KeyFull( measure=measure, redshift=redshift, axis=axis, N_inter=N_inter, L0=L0, **scenario ) for axis in ['P', 'x', 'dev']] )
if not dev:
P = P[:-1]
return P
def FlatPrior( measure='', x=[] ):
""" return flat prior LikelihoodFunction object for range x """
return LikelihoodFunction( measure=measure, P=np.ones_like(x[:-1]), x=x, dev=np.zeros_like(x[:-1]), typ='prior' )
def ComputeTelescopeLikelihood( measure='', scenario=False, force=False, progress_bar=False ):
"""
return the likelihood function for measure expected to be observed by telescope in the given scenario
P, x and dev are written to likelihood_file_telescope
Parameters
----------
measure : string,
measure for which P is computed
force : boolean,
indicate whether full likelihood functions should be computed again (only required once per scenario)
Returns
-------
L : LikelihoodFunction object
"""
if not measure:
exit( "you must provide a measure. Try: 'DM', 'RM', 'tau'" )
if not type(scenario) == type(Scenario(redshift=0.1)):
exit( "you must provide a reasonable Scenario" )
## prior on redshift is likelihood based on FRB population and telescope selection effects
if population == 'flat':
pi_z = FlatPrior( measure='z', x=redshift_range )
else:
scenario_telescope = Scenario( population=population, telescope=telescope )
pi_z = GetLikelihood( measure='z' , scenario=scenario_telescope)
## possible solutions for all redshifts are summed, weighed by the prior
Ls = []
## prepare scenario used for full likelihood function at increasing redshift
tmp = scenario.copy()
tmp.population = False
tmp.telescope = False
## optionally, provide progress bar
l = len(redshift_bins)
ran = trange( l, desc='LikelihoodTelescope {} {}'.format( telescope, population ) ) if progress_bar else range( l )
for i in ran:
tmp.redshift = redshift_bins[i]
L = GetLikelihood( measure=measure, scenario=tmp, force=force )
Ls.append(L)
L = LikelihoodsAdd( *Ls, weights=pi_z.Probability(), dev_weights=pi_z.dev )
L.Write()
return L
### !!! depreceated, remove
def LikelihoodTelescope_old( measure='DM', telescope='Parkes', population='SMD', nside_IGM=4, force=False, dev=False, progress_bar=False, N_inter=False, **scenario ):
"""
return the likelihood function for measure expected to be observed by telescope in the given scenario
P, x and dev are written to likelihood_file_telescope
Parameters
----------
measure : string,
measure for which P is computed
telescope : string,
observing instrument
population : string,
assumed cosmic population
nside_IGM : integer,
pixelization of IGM full-sky maps
force : boolean,
indicate whether full likelihood functions should be computed again (only required once per scenario)
dev : boolean,
indicate whether deviation of P should be returned
N_inter : boolean
if False: LoS should definitely entail an intervening galaxy (P_Inter renormalized to 1)
if True: it is unknown whether galaxies intersect the LoS or not (P_Inter renormalized to NInter(redshift) )
Returns
-------
P, x, (dev) : likelihood function, bin ranges, (deviation)
"""
## prior on redshift is likelihood based on FRB population and telescope selection effects
if population == 'flat':
Pz = None
else:
Pz, zs, devz = GetLikelihood_Redshift( population=population, telescope=telescope, dev=True )
## possible solutions for all redshifts are summed, weighed by the prior
Ps, xs, devs = [], [], []
# for z in redshift_bins:
ran = trange( len(redshift_bins), desc='LikelihoodTelescope {} {}'.format( telescope, population ) ) if progress_bar else range( len(redshift_bins) )
for i in ran:
z = redshift_bins[i]
P, x, dev = GetLikelihood_Full( measure=measure, redshift=z, force=force, dev=True, N_inter=N_inter, **scenario )
Ps.append(P)
xs.append(x)
devs.append(dev)
P, x, dev = LikelihoodsAdd( Ps, xs, devs=devs, renormalize=1., weights=Pz*np.diff(zs), dev_weights=devz )
Write2h5( filename=likelihood_file_telescope, datas=[P,x, dev], keys=[ KeyTelescope( measure=measure, telescope=telescope, population=population, axis=axis, N_inter=N_inter, **scenario) for axis in ['P','x', 'dev'] ] )
res = [P,x]
if len(dev)>0:
res.append(dev)
return res
### !!! depreceated, remove
def LikelihoodMeasureable( P=[], x=[], dev=[], min=None, max=None ):
""" returns the renormalized part of full likelihood function that can be measured by telescopes, i. e. min <= x <= max """
## determine number of bins in result, roughly number of bins min <= x <= max
bins = int(np.sum( np.prod( [x>=min if min else np.ones(len(x)), x<=max if max else np.ones(len(x)) ], axis=0 ) ))
return LikelihoodShrink( P=P, x=x, dev=dev, min=min, max=max, renormalize=1, bins=bins, smooth=False ) ### smoothing is not reliable at border values. Here, border value is close to peak in P, hence don't smooth
if min:
ix, = np.where( x >= min )
x = x[ix]
P = P[ix[:-1]] ## remember, x is range of P, i. e. size+1
if len(dev) > 0:
dev = dev[ix[:-1]]
if max:
ix, = np.where( x <= max )
x = x[ix]
P = P[ix[:-1]] ## remember, x is range of P, i. e. size+1
if len(dev) > 0:
dev = dev[ix[:-1]]
## renormalize to 1
P /= np.sum( P*np.diff(x) )
res = [P,x]
if len(dev) > 0:
res.append(dev)
return res
### !!! depreceated, remove
def LikelihoodRedshift( DMs=[], scenario={}, taus=None, population='flat', telescope='None', dev=False ):
"""
returns likelihood functions of redshift for observed DMs (and taus)
can be used to obtain estimate and deviation
Parameters
----------
DMs : array-like
1D array contain extragalactic component of observed values
taus : array-like, len(DMs), optional
temporal smearing observed with DM
scenario : dictionary
list of models combined to one scenario
population : string
assumed cosmic population of FRBs
telescope: string
instrument to observe DMs, RMs and taus
dev : boolean
if True, also return deviation of liklihood functions
"""
Ps = np.zeros( [len(DMs),len(redshift_bins)] )
devs= Ps.copy()
## for each redshift
for iz, z in enumerate( redshift_bins ):
## calculate the likelihood of observed DM
# Ps[:,iz] = Likelihoods( DMs, *GetLikelihood_Full( measure='DM', redshift=z, density=True, **scenario) )
Ps[:,iz], devs[:,iz] = Likelihoods( DMs, *GetLikelihood_Full( measure='DM', redshift=z, density=True, dev=True, **scenario), density=True ) ### use probability density to compare same value of DM at different redshifts. Otherwise influenced by different binning
## improve redshift estimate with additional information from tau, which is more sensitive to high overdensities in the LoS
## procedure is identical, the likelihood functions are multiplied
if taus is not None:
Ps_ = np.zeros( [len(DMs),len(redshift_bins)] )
devs_ = Ps_.copy()
for iz, z in enumerate(redshift_bins):
Ps_[:,iz], devs_[:,iz] = Likelihoods( taus, *GetLikelihood_Full( measure='tau', redshift=z, density=True, dev=True, **scenario), density=False ) ### not all tau are measureable. However, here we compare different redshifts in the same scenario, so the amount of tau above tau_min is indeed important and does not affect the likelihood of scenarios. Instead, using LikelihoodObservable here would result in wrong estimates.
Ps *= Ps_
devs = np.sqrt( devs**2 + devs_**2 )
Ps_= 0
## consider prior likelihood on redshift according to FRB population and telescope selection effects
if population == 'flat':
pi, x, pi_dev = np.array([1.]), np.arange(2), np.zeros(1)
else:
pi, x, pi_dev = GetLikelihood_Redshift( population=population, telescope=telescope, dev=True )
Ps = Ps * np.resize( pi*np.diff(x), [1,len(redshift_bins)] )
devs = np.sqrt( devs**2 + np.resize( pi_dev**2, [1,len(redshift_bins)] ) )
## renormalize to 1 for every DM (only if any P is not zero)
for P in Ps:
if np.any( P > 0):
P /= np.sum( P*np.diff( redshift_range ) )
# Ps = Ps / np.resize( np.sum( Ps * np.resize( np.diff( redshift_range ), [1,len(redshift_bins)] ), axis=1 ), [len(DMs),1] )
res = [Ps, redshift_range]
if dev:
res.append(devs)
return res
def LikelihoodRedshiftMeasure( measure='', data=[], scenario=False, measureable=False):
"""
returns likelihood functions of redshift for observed data of measure,
can be used to obtain estimate and deviation
Parameters
----------
measure : string
indicate which measure is probed
data : array-like
1D array contain extragalactic component of observed values
scenario : dictionary
list of models combined to one scenario
prior : boolean
"""
if not measure:
exit( "you must provide a measure. Try: 'DM', 'RM', 'tau'" )
if scenario.redshift:
exit( "requires scenario with telescope and population" )
## prepare scenario for increasing redshift
tmp = scenario.copy()
tmp.population = False
tmp.telescope = False
## container for likelihoods and deviation at incrasing redshift
Ps = np.zeros( [len(DMs),len(redshift_bins)] )
devs= Ps.copy()
## for each redshift
for iz, z in enumerate( redshift_bins ):
tmp.redshift = z
L = GetLikelihood( measure, tmp )
if measureable:
L.Measureable()
Ps[:,iz], devs[:,iz] = L.Likelihoods( DMs, density=True ) ### use probability density | |
0)
m.c3445 = Constraint(expr= - 0.5*m.x1564 - 0.5*m.x1565 + m.x3491 == 0)
m.c3446 = Constraint(expr= - 0.5*m.x1565 - 0.5*m.x1566 + m.x3492 == 0)
m.c3447 = Constraint(expr= - 0.5*m.x1566 - 0.5*m.x1567 + m.x3493 == 0)
m.c3448 = Constraint(expr= - 0.5*m.x1567 - 0.5*m.x1568 + m.x3494 == 0)
m.c3449 = Constraint(expr= - 0.5*m.x1568 - 0.5*m.x1569 + m.x3495 == 0)
m.c3450 = Constraint(expr= - 0.5*m.x1569 - 0.5*m.x1570 + m.x3496 == 0)
m.c3451 = Constraint(expr= - 0.5*m.x1570 - 0.5*m.x1571 + m.x3497 == 0)
m.c3452 = Constraint(expr= - 0.5*m.x1571 - 0.5*m.x1572 + m.x3498 == 0)
m.c3453 = Constraint(expr= - 0.5*m.x1572 - 0.5*m.x1573 + m.x3499 == 0)
m.c3454 = Constraint(expr= - 0.5*m.x1573 - 0.5*m.x1574 + m.x3500 == 0)
m.c3455 = Constraint(expr= - 0.5*m.x1574 - 0.5*m.x1575 + m.x3501 == 0)
m.c3456 = Constraint(expr= - 0.5*m.x1575 - 0.5*m.x1576 + m.x3502 == 0)
m.c3457 = Constraint(expr= - 0.5*m.x1576 - 0.5*m.x1577 + m.x3503 == 0)
m.c3458 = Constraint(expr= - 0.5*m.x1577 - 0.5*m.x1578 + m.x3504 == 0)
m.c3459 = Constraint(expr= - 0.5*m.x1578 - 0.5*m.x1579 + m.x3505 == 0)
m.c3460 = Constraint(expr= - 0.5*m.x1579 - 0.5*m.x1580 + m.x3506 == 0)
m.c3461 = Constraint(expr= - 0.5*m.x1580 - 0.5*m.x1581 + m.x3507 == 0)
m.c3462 = Constraint(expr= - 0.5*m.x1581 - 0.5*m.x1582 + m.x3508 == 0)
m.c3463 = Constraint(expr= - 0.5*m.x1582 - 0.5*m.x1583 + m.x3509 == 0)
m.c3464 = Constraint(expr= - 0.5*m.x1583 - 0.5*m.x1584 + m.x3510 == 0)
m.c3465 = Constraint(expr= - 0.5*m.x1584 - 0.5*m.x1585 + m.x3511 == 0)
m.c3466 = Constraint(expr= - 0.5*m.x1585 - 0.5*m.x1586 + m.x3512 == 0)
m.c3467 = Constraint(expr= - 0.5*m.x1586 - 0.5*m.x1587 + m.x3513 == 0)
m.c3468 = Constraint(expr= - 0.5*m.x1587 - 0.5*m.x1588 + m.x3514 == 0)
m.c3469 = Constraint(expr= - 0.5*m.x1588 - 0.5*m.x1589 + m.x3515 == 0)
m.c3470 = Constraint(expr= - 0.5*m.x1589 - 0.5*m.x1590 + m.x3516 == 0)
m.c3471 = Constraint(expr= - 0.5*m.x1590 - 0.5*m.x1591 + m.x3517 == 0)
m.c3472 = Constraint(expr= - 0.5*m.x1591 - 0.5*m.x1592 + m.x3518 == 0)
m.c3473 = Constraint(expr= - 0.5*m.x1592 - 0.5*m.x1593 + m.x3519 == 0)
m.c3474 = Constraint(expr= - 0.5*m.x1593 - 0.5*m.x1594 + m.x3520 == 0)
m.c3475 = Constraint(expr= - 0.5*m.x1207 - 0.5*m.x1208 - 0.5*m.x1595 - 0.5*m.x1596 - 0.5*m.x1789 - 0.5*m.x1790 + m.x3521
== 0)
m.c3476 = Constraint(expr= - 0.5*m.x1208 - 0.5*m.x1209 - 0.5*m.x1596 - 0.5*m.x1597 - 0.5*m.x1790 - 0.5*m.x1791 + m.x3522
== 0)
m.c3477 = Constraint(expr= - 0.5*m.x1209 - 0.5*m.x1210 - 0.5*m.x1597 - 0.5*m.x1598 - 0.5*m.x1791 - 0.5*m.x1792 + m.x3523
== 0)
m.c3478 = Constraint(expr= - 0.5*m.x1210 - 0.5*m.x1211 - 0.5*m.x1598 - 0.5*m.x1599 - 0.5*m.x1792 - 0.5*m.x1793 + m.x3524
== 0)
m.c3479 = Constraint(expr= - 0.5*m.x1211 - 0.5*m.x1212 - 0.5*m.x1599 - 0.5*m.x1600 - 0.5*m.x1793 - 0.5*m.x1794 + m.x3525
== 0)
m.c3480 = Constraint(expr= - 0.5*m.x1212 - 0.5*m.x1213 - 0.5*m.x1600 - 0.5*m.x1601 - 0.5*m.x1794 - 0.5*m.x1795 + m.x3526
== 0)
m.c3481 = Constraint(expr= - 0.5*m.x1213 - 0.5*m.x1214 - 0.5*m.x1601 - 0.5*m.x1602 - 0.5*m.x1795 - 0.5*m.x1796 + m.x3527
== 0)
m.c3482 = Constraint(expr= - 0.5*m.x1214 - 0.5*m.x1215 - 0.5*m.x1602 - 0.5*m.x1603 - 0.5*m.x1796 - 0.5*m.x1797 + m.x3528
== 0)
m.c3483 = Constraint(expr= - 0.5*m.x1215 - 0.5*m.x1216 - 0.5*m.x1603 - 0.5*m.x1604 - 0.5*m.x1797 - 0.5*m.x1798 + m.x3529
== 0)
m.c3484 = Constraint(expr= - 0.5*m.x1216 - 0.5*m.x1217 - 0.5*m.x1604 - 0.5*m.x1605 - 0.5*m.x1798 - 0.5*m.x1799 + m.x3530
== 0)
m.c3485 = Constraint(expr= - 0.5*m.x1217 - 0.5*m.x1218 - 0.5*m.x1605 - 0.5*m.x1606 - 0.5*m.x1799 - 0.5*m.x1800 + m.x3531
== 0)
m.c3486 = Constraint(expr= - 0.5*m.x1218 - 0.5*m.x1219 - 0.5*m.x1606 - 0.5*m.x1607 - 0.5*m.x1800 - 0.5*m.x1801 + m.x3532
== 0)
m.c3487 = Constraint(expr= - 0.5*m.x1219 - 0.5*m.x1220 - 0.5*m.x1607 - 0.5*m.x1608 - 0.5*m.x1801 - 0.5*m.x1802 + m.x3533
== 0)
m.c3488 = Constraint(expr= - 0.5*m.x1220 - 0.5*m.x1221 - 0.5*m.x1608 - 0.5*m.x1609 - 0.5*m.x1802 - 0.5*m.x1803 + m.x3534
== 0)
m.c3489 = Constraint(expr= - 0.5*m.x1221 - 0.5*m.x1222 - 0.5*m.x1609 - 0.5*m.x1610 - 0.5*m.x1803 - 0.5*m.x1804 + m.x3535
== 0)
m.c3490 = Constraint(expr= - 0.5*m.x1222 - 0.5*m.x1223 - 0.5*m.x1610 - 0.5*m.x1611 - 0.5*m.x1804 - 0.5*m.x1805 + m.x3536
== 0)
m.c3491 = Constraint(expr= - 0.5*m.x1223 - 0.5*m.x1224 - 0.5*m.x1611 - 0.5*m.x1612 - 0.5*m.x1805 - 0.5*m.x1806 + m.x3537
== 0)
m.c3492 = Constraint(expr= - 0.5*m.x1224 - 0.5*m.x1225 - 0.5*m.x1612 - 0.5*m.x1613 - 0.5*m.x1806 - 0.5*m.x1807 + m.x3538
== 0)
m.c3493 = Constraint(expr= - 0.5*m.x1225 - 0.5*m.x1226 - 0.5*m.x1613 - 0.5*m.x1614 - 0.5*m.x1807 - 0.5*m.x1808 + m.x3539
== 0)
m.c3494 = Constraint(expr= - 0.5*m.x1226 - 0.5*m.x1227 - 0.5*m.x1614 - 0.5*m.x1615 - 0.5*m.x1808 - 0.5*m.x1809 + m.x3540
== 0)
m.c3495 = Constraint(expr= - 0.5*m.x1227 - 0.5*m.x1228 - 0.5*m.x1615 - 0.5*m.x1616 - 0.5*m.x1809 - 0.5*m.x1810 + m.x3541
== 0)
m.c3496 = Constraint(expr= - 0.5*m.x1228 - 0.5*m.x1229 - 0.5*m.x1616 - 0.5*m.x1617 - 0.5*m.x1810 - 0.5*m.x1811 + m.x3542
== 0)
m.c3497 = Constraint(expr= - 0.5*m.x1229 - 0.5*m.x1230 - 0.5*m.x1617 - 0.5*m.x1618 - 0.5*m.x1811 - 0.5*m.x1812 + m.x3543
== 0)
m.c3498 = Constraint(expr= - 0.5*m.x1230 - 0.5*m.x1231 - 0.5*m.x1618 - 0.5*m.x1619 - 0.5*m.x1812 - 0.5*m.x1813 + m.x3544
== 0)
m.c3499 = Constraint(expr= - 0.5*m.x1231 - 0.5*m.x1232 - 0.5*m.x1619 - 0.5*m.x1620 - 0.5*m.x1813 - 0.5*m.x1814 + m.x3545
== 0)
m.c3500 = Constraint(expr= - 0.5*m.x1232 - 0.5*m.x1233 - 0.5*m.x1620 - 0.5*m.x1621 - 0.5*m.x1814 - 0.5*m.x1815 + m.x3546
== 0)
m.c3501 = Constraint(expr= - 0.5*m.x1233 - 0.5*m.x1234 - 0.5*m.x1621 - 0.5*m.x1622 - 0.5*m.x1815 - 0.5*m.x1816 + m.x3547
== 0)
m.c3502 = Constraint(expr= - 0.5*m.x1234 - 0.5*m.x1235 - 0.5*m.x1622 - 0.5*m.x1623 - 0.5*m.x1816 - 0.5*m.x1817 + m.x3548
== 0)
m.c3503 = Constraint(expr= - 0.5*m.x1235 - 0.5*m.x1236 - 0.5*m.x1623 - 0.5*m.x1624 - 0.5*m.x1817 - 0.5*m.x1818 + m.x3549
== 0)
m.c3504 = Constraint(expr= - 0.5*m.x1236 - 0.5*m.x1237 - 0.5*m.x1624 - 0.5*m.x1625 - 0.5*m.x1818 - 0.5*m.x1819 + m.x3550
== 0)
m.c3505 = Constraint(expr= - 0.5*m.x1237 - 0.5*m.x1238 - 0.5*m.x1625 - 0.5*m.x1626 - 0.5*m.x1819 - 0.5*m.x1820 + m.x3551
== 0)
m.c3506 = Constraint(expr= - 0.5*m.x1238 - 0.5*m.x1239 - 0.5*m.x1626 - 0.5*m.x1627 - 0.5*m.x1820 - 0.5*m.x1821 + m.x3552
== 0)
m.c3507 = Constraint(expr= - 0.5*m.x1239 - 0.5*m.x1240 - 0.5*m.x1627 - 0.5*m.x1628 - 0.5*m.x1821 - 0.5*m.x1822 + m.x3553
== 0)
m.c3508 = Constraint(expr= - 0.5*m.x1240 - 0.5*m.x1241 - 0.5*m.x1628 - 0.5*m.x1629 - 0.5*m.x1822 - 0.5*m.x1823 + m.x3554
== 0)
m.c3509 = Constraint(expr= - 0.5*m.x1241 - 0.5*m.x1242 - 0.5*m.x1629 - 0.5*m.x1630 - 0.5*m.x1823 - 0.5*m.x1824 + m.x3555
== 0)
m.c3510 = Constraint(expr= - 0.5*m.x1242 - 0.5*m.x1243 - 0.5*m.x1630 - 0.5*m.x1631 - 0.5*m.x1824 - 0.5*m.x1825 + m.x3556
== 0)
m.c3511 = Constraint(expr= - 0.5*m.x1243 - 0.5*m.x1244 - 0.5*m.x1631 - 0.5*m.x1632 - 0.5*m.x1825 - 0.5*m.x1826 + m.x3557
== 0)
m.c3512 = Constraint(expr= - 0.5*m.x1244 - 0.5*m.x1245 - 0.5*m.x1632 - 0.5*m.x1633 - 0.5*m.x1826 - 0.5*m.x1827 + m.x3558
== 0)
m.c3513 = Constraint(expr= - 0.5*m.x1245 - 0.5*m.x1246 - 0.5*m.x1633 - 0.5*m.x1634 - 0.5*m.x1827 - 0.5*m.x1828 + m.x3559
== 0)
m.c3514 = Constraint(expr= - 0.5*m.x1246 - 0.5*m.x1247 - 0.5*m.x1634 - 0.5*m.x1635 - 0.5*m.x1828 - 0.5*m.x1829 + m.x3560
== 0)
m.c3515 = Constraint(expr= - 0.5*m.x1247 - 0.5*m.x1248 - 0.5*m.x1635 - 0.5*m.x1636 - 0.5*m.x1829 - 0.5*m.x1830 + m.x3561
== 0)
m.c3516 = Constraint(expr= - 0.5*m.x1248 - 0.5*m.x1249 - 0.5*m.x1636 - 0.5*m.x1637 - 0.5*m.x1830 - 0.5*m.x1831 + m.x3562
== 0)
m.c3517 = Constraint(expr= - 0.5*m.x1249 - 0.5*m.x1250 - 0.5*m.x1637 - 0.5*m.x1638 - 0.5*m.x1831 - 0.5*m.x1832 + m.x3563
== 0)
m.c3518 = Constraint(expr= - 0.5*m.x1250 - 0.5*m.x1251 - 0.5*m.x1638 - 0.5*m.x1639 - 0.5*m.x1832 - 0.5*m.x1833 + m.x3564
== 0)
m.c3519 = Constraint(expr= - 0.5*m.x1251 - 0.5*m.x1252 - 0.5*m.x1639 - 0.5*m.x1640 - 0.5*m.x1833 - 0.5*m.x1834 + m.x3565
== 0)
m.c3520 = Constraint(expr= - 0.5*m.x1252 - 0.5*m.x1253 - 0.5*m.x1640 - 0.5*m.x1641 - 0.5*m.x1834 - 0.5*m.x1835 + m.x3566
== 0)
m.c3521 = Constraint(expr= - 0.5*m.x1253 - 0.5*m.x1254 - 0.5*m.x1641 - 0.5*m.x1642 - 0.5*m.x1835 - 0.5*m.x1836 + m.x3567
== 0)
m.c3522 = Constraint(expr= - 0.5*m.x1254 - 0.5*m.x1255 - 0.5*m.x1642 - 0.5*m.x1643 - 0.5*m.x1836 - 0.5*m.x1837 + m.x3568
== 0)
m.c3523 = Constraint(expr= - 0.5*m.x1255 - 0.5*m.x1256 - 0.5*m.x1643 - 0.5*m.x1644 - 0.5*m.x1837 - 0.5*m.x1838 + m.x3569
== 0)
m.c3524 = Constraint(expr= - 0.5*m.x1256 - 0.5*m.x1257 - 0.5*m.x1644 - 0.5*m.x1645 - 0.5*m.x1838 - 0.5*m.x1839 + m.x3570
== 0)
m.c3525 = Constraint(expr= - 0.5*m.x1257 - 0.5*m.x1258 - 0.5*m.x1645 - 0.5*m.x1646 - 0.5*m.x1839 - 0.5*m.x1840 + m.x3571
== 0)
m.c3526 = Constraint(expr= - 0.5*m.x1258 - 0.5*m.x1259 - 0.5*m.x1646 - 0.5*m.x1647 - 0.5*m.x1840 - 0.5*m.x1841 + m.x3572
== 0)
m.c3527 = Constraint(expr= - 0.5*m.x1259 - 0.5*m.x1260 - 0.5*m.x1647 - 0.5*m.x1648 - 0.5*m.x1841 - 0.5*m.x1842 + m.x3573
== 0)
m.c3528 = Constraint(expr= - 0.5*m.x1260 - 0.5*m.x1261 - 0.5*m.x1648 - 0.5*m.x1649 - 0.5*m.x1842 - 0.5*m.x1843 + m.x3574
== 0)
m.c3529 = Constraint(expr= - 0.5*m.x1261 - 0.5*m.x1262 - | |
* bufsz)()
driver.cuDeviceGetName(buf, bufsz, self.id)
self.name = buf.value
self.primary_context = None
def get_device_identity(self):
return {
'pci_domain_id': self.PCI_DOMAIN_ID,
'pci_bus_id': self.PCI_BUS_ID,
'pci_device_id': self.PCI_DEVICE_ID,
}
@property
def COMPUTE_CAPABILITY(self):
"""
For backward compatibility
"""
warnings.warn("Deprecated attribute 'COMPUTE_CAPABILITY'; use lower "
"case version", DeprecationWarning)
return self.compute_capability
def __repr__(self):
return "<CUDA device %d '%s'>" % (self.id, self.name)
def __getattr__(self, attr):
"""Read attributes lazily
"""
try:
code = DEVICE_ATTRIBUTES[attr]
except KeyError:
raise AttributeError(attr)
value = c_int()
driver.cuDeviceGetAttribute(byref(value), code, self.id)
setattr(self, attr, value.value)
return value.value
def __hash__(self):
return hash(self.id)
def __eq__(self, other):
if isinstance(other, Device):
return self.id == other.id
return False
def __ne__(self, other):
return not (self == other)
def get_primary_context(self):
"""
Returns the primary context for the device.
Note: it is not pushed to the CPU thread.
"""
if self.primary_context is not None:
return self.primary_context
met_requirement_for_device(self)
# create primary context
hctx = drvapi.cu_context()
driver.cuDevicePrimaryCtxRetain(byref(hctx), self.id)
ctx = Context(weakref.proxy(self), hctx)
self.primary_context = ctx
return ctx
def release_primary_context(self):
"""
Release reference to primary context if it has been retained.
"""
if self.primary_context:
driver.cuDevicePrimaryCtxRelease(self.id)
self.primary_context = None
def reset(self):
try:
if self.primary_context is not None:
self.primary_context.reset()
self.release_primary_context()
finally:
# reset at the driver level
driver.cuDevicePrimaryCtxReset(self.id)
def met_requirement_for_device(device):
if device.compute_capability < MIN_REQUIRED_CC:
raise CudaSupportError("%s has compute capability < %s" %
(device, MIN_REQUIRED_CC))
class BaseCUDAMemoryManager(object, metaclass=ABCMeta):
"""Abstract base class for External Memory Management (EMM) Plugins."""
def __init__(self, *args, **kwargs):
if 'context' not in kwargs:
raise RuntimeError("Memory manager requires a context")
self.context = kwargs.pop('context')
@abstractmethod
def memalloc(self, size):
"""
Allocate on-device memory in the current context.
:param size: Size of allocation in bytes
:type size: int
:return: A memory pointer instance that owns the allocated memory
:rtype: :class:`MemoryPointer`
"""
@abstractmethod
def memhostalloc(self, size, mapped, portable, wc):
"""
Allocate pinned host memory.
:param size: Size of the allocation in bytes
:type size: int
:param mapped: Whether the allocated memory should be mapped into the
CUDA address space.
:type mapped: bool
:param portable: Whether the memory will be considered pinned by all
contexts, and not just the calling context.
:type portable: bool
:param wc: Whether to allocate the memory as write-combined.
:type wc: bool
:return: A memory pointer instance that owns the allocated memory. The
return type depends on whether the region was mapped into
device memory.
:rtype: :class:`MappedMemory` or :class:`PinnedMemory`
"""
@abstractmethod
def mempin(self, owner, pointer, size, mapped):
"""
Pin a region of host memory that is already allocated.
:param owner: The object that owns the memory.
:param pointer: The pointer to the beginning of the region to pin.
:type pointer: int
:param size: The size of the region in bytes.
:type size: int
:param mapped: Whether the region should also be mapped into device
memory.
:type mapped: bool
:return: A memory pointer instance that refers to the allocated
memory.
:rtype: :class:`MappedMemory` or :class:`PinnedMemory`
"""
@abstractmethod
def initialize(self):
"""
Perform any initialization required for the EMM plugin instance to be
ready to use.
:return: None
"""
@abstractmethod
def get_ipc_handle(self, memory):
"""
Return an IPC handle from a GPU allocation.
:param memory: Memory for which the IPC handle should be created.
:type memory: :class:`MemoryPointer`
:return: IPC handle for the allocation
:rtype: :class:`IpcHandle`
"""
@abstractmethod
def get_memory_info(self):
"""
Returns ``(free, total)`` memory in bytes in the context. May raise
:class:`NotImplementedError`, if returning such information is not
practical (e.g. for a pool allocator).
:return: Memory info
:rtype: :class:`MemoryInfo`
"""
@abstractmethod
def reset(self):
"""
Clears up all memory allocated in this context.
:return: None
"""
@abstractmethod
def defer_cleanup(self):
"""
Returns a context manager that ensures the implementation of deferred
cleanup whilst it is active.
:return: Context manager
"""
@property
@abstractmethod
def interface_version(self):
"""
Returns an integer specifying the version of the EMM Plugin interface
supported by the plugin implementation. Should always return 1 for
implementations of this version of the specification.
"""
class HostOnlyCUDAMemoryManager(BaseCUDAMemoryManager):
"""Base class for External Memory Management (EMM) Plugins that only
implement on-device allocation. A subclass need not implement the
``memhostalloc`` and ``mempin`` methods.
This class also implements ``reset`` and ``defer_cleanup`` (see
:class:`numba.cuda.BaseCUDAMemoryManager`) for its own internal state
management. If an EMM Plugin based on this class also implements these
methods, then its implementations of these must also call the method from
``super()`` to give ``HostOnlyCUDAMemoryManager`` an opportunity to do the
necessary work for the host allocations it is managing.
This class does not implement ``interface_version``, as it will always be
consistent with the version of Numba in which it is implemented. An EMM
Plugin subclassing this class should implement ``interface_version``
instead.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.allocations = utils.UniqueDict()
self.deallocations = _PendingDeallocs()
def _attempt_allocation(self, allocator):
"""
Attempt allocation by calling *allocator*. If an out-of-memory error
is raised, the pending deallocations are flushed and the allocation
is retried. If it fails in the second attempt, the error is reraised.
"""
try:
allocator()
except CudaAPIError as e:
# is out-of-memory?
if e.code == enums.CUDA_ERROR_OUT_OF_MEMORY:
# clear pending deallocations
self.deallocations.clear()
# try again
allocator()
else:
raise
def memhostalloc(self, size, mapped=False, portable=False,
wc=False):
"""Implements the allocation of pinned host memory.
It is recommended that this method is not overridden by EMM Plugin
implementations - instead, use the :class:`BaseCUDAMemoryManager`.
"""
pointer = c_void_p()
flags = 0
if mapped:
flags |= enums.CU_MEMHOSTALLOC_DEVICEMAP
if portable:
flags |= enums.CU_MEMHOSTALLOC_PORTABLE
if wc:
flags |= enums.CU_MEMHOSTALLOC_WRITECOMBINED
def allocator():
driver.cuMemHostAlloc(byref(pointer), size, flags)
if mapped:
self._attempt_allocation(allocator)
else:
allocator()
finalizer = _hostalloc_finalizer(self, pointer, size, mapped)
ctx = weakref.proxy(self.context)
if mapped:
mem = MappedMemory(ctx, pointer, size, finalizer=finalizer)
self.allocations[mem.handle.value] = mem
return mem.own()
else:
return PinnedMemory(ctx, pointer, size, finalizer=finalizer)
def mempin(self, owner, pointer, size, mapped=False):
"""Implements the pinning of host memory.
It is recommended that this method is not overridden by EMM Plugin
implementations - instead, use the :class:`BaseCUDAMemoryManager`.
"""
if isinstance(pointer, int):
pointer = c_void_p(pointer)
# possible flags are "portable" (between context)
# and "device-map" (map host memory to device thus no need
# for memory transfer).
flags = 0
if mapped:
flags |= enums.CU_MEMHOSTREGISTER_DEVICEMAP
def allocator():
driver.cuMemHostRegister(pointer, size, flags)
if mapped:
self._attempt_allocation(allocator)
else:
allocator()
finalizer = _pin_finalizer(self, pointer, mapped)
ctx = weakref.proxy(self.context)
if mapped:
mem = MappedMemory(ctx, pointer, size, owner=owner,
finalizer=finalizer)
self.allocations[mem.handle.value] = mem
return mem.own()
else:
return PinnedMemory(ctx, pointer, size, owner=owner,
finalizer=finalizer)
def memallocmanaged(self, size, attach_global):
ptr = drvapi.cu_device_ptr()
def allocator():
flags = c_uint()
if attach_global:
flags = enums.CU_MEM_ATTACH_GLOBAL
else:
flags = enums.CU_MEM_ATTACH_HOST
driver.cuMemAllocManaged(byref(ptr), size, flags)
self._attempt_allocation(allocator)
finalizer = _alloc_finalizer(self, ptr, size)
ctx = weakref.proxy(self.context)
mem = ManagedMemory(ctx, ptr, size, finalizer=finalizer)
self.allocations[ptr.value] = mem
return mem.own()
def reset(self):
"""Clears up all host memory (mapped and/or pinned) in the current
context.
EMM Plugins that override this method must call ``super().reset()`` to
ensure that host allocations are also cleaned up."""
self.allocations.clear()
self.deallocations.clear()
@contextlib.contextmanager
def defer_cleanup(self):
"""Returns a context manager that disables cleanup of mapped or pinned
host memory in the current context whilst it is active.
EMM Plugins that override this method must obtain the context manager
from this method before yielding to ensure that cleanup of host
allocations is also deferred."""
with self.deallocations.disable():
yield
class GetIpcHandleMixin:
"""A class that provides a default implementation of ``get_ipc_handle()``.
"""
def get_ipc_handle(self, memory):
"""Open an IPC memory handle by using ``cuMemGetAddressRange`` to
determine the base pointer of the allocation. An IPC handle of type
``cu_ipc_mem_handle`` is constructed and initialized with
``cuIpcGetMemHandle``. A :class:`numba.cuda.IpcHandle` is returned,
populated with the underlying ``ipc_mem_handle``.
"""
base, end = device_extents(memory)
ipchandle = drvapi.cu_ipc_mem_handle()
driver.cuIpcGetMemHandle(byref(ipchandle), base)
source_info = self.context.device.get_device_identity()
offset = memory.handle.value - base
return IpcHandle(memory, ipchandle, memory.size, source_info,
offset=offset)
class NumbaCUDAMemoryManager(GetIpcHandleMixin, HostOnlyCUDAMemoryManager):
"""Internal on-device memory management for Numba. This is implemented using
the EMM Plugin interface, but is not part of the public API."""
def initialize(self):
# Set the memory capacity of *deallocations* as the memory manager
# becomes active for the first time
if self.deallocations.memory_capacity == _SizeNotSet:
self.deallocations.memory_capacity = self.get_memory_info().total
def memalloc(self, size):
ptr = drvapi.cu_device_ptr()
def allocator():
driver.cuMemAlloc(byref(ptr), size)
self._attempt_allocation(allocator)
finalizer = _alloc_finalizer(self, ptr, size)
ctx = weakref.proxy(self.context)
mem = AutoFreePointer(ctx, ptr, size, finalizer=finalizer)
self.allocations[ptr.value] = mem
| |
<filename>fhir/resources/DSTU2/observation.py
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/Observation
Release: DSTU2
Version: 1.0.2
Revision: 7202
"""
from typing import Any, Dict
from typing import List as ListType
from pydantic import Field, root_validator
from . import fhirtypes
from .backboneelement import BackboneElement
from .domainresource import DomainResource
class Observation(DomainResource):
"""Measurements and simple assertions.
Measurements and simple assertions made about a patient, device or other
subject.
"""
resource_type = Field("Observation", const=True)
comments: fhirtypes.String = Field(
None,
alias="comments",
title="Type `str`.",
description="Comments about result.",
)
status: fhirtypes.Code = Field(
None,
alias="status",
title="Type `Code`.",
description="registered | preliminary | final | amended +.",
)
bodySite: fhirtypes.CodeableConceptType = Field(
None,
alias="bodySite",
title="Type `CodeableConcept`.",
description="Observed body part.",
)
category: fhirtypes.CodeableConceptType = Field(
None,
alias="category",
title="Type `CodeableConcept` (represented as `dict` in JSON).",
description="Classification of type of observation.",
)
code: fhirtypes.CodeableConceptType = Field(
None,
alias="code",
title="Type `CodeableConcept` (represented as `dict` in JSON).",
description="Type of observation (code / type).",
)
component: ListType[fhirtypes.ObservationComponentType] = Field(
None,
alias="component",
title="List of `ObservationComponent` items (represented as `dict` in JSON).",
description="Component results.",
)
dataAbsentReason: fhirtypes.CodeableConceptType = Field(
None,
alias="dataAbsentReason",
title="Type `CodeableConcept` (represented as `dict` in JSON).",
description="Why the result is missing.",
)
device: fhirtypes.ReferenceType = Field(
None,
alias="device",
title=(
"Type `Reference` referencing `Device, DeviceMetric`"
" (represented as `dict` in JSON)."
),
description="(Measurement) Device.",
)
effectiveDateTime: fhirtypes.DateTime = Field(
None,
alias="effectiveDateTime",
title="Type `DateTime` .",
description="Clinically relevant time/time-period for observation.",
one_of_many="effective", # Choice of Data Types. i.e effective[x]
one_of_many_required=False,
)
effectivePeriod: fhirtypes.PeriodType = Field(
None,
alias="effectivePeriod",
title="Type `Period` (represented as `dict` in JSON).",
description="Clinically relevant time/time-period for observation.",
one_of_many="effective", # Choice of Data Types. i.e effective[x]
one_of_many_required=False,
)
encounter: fhirtypes.ReferenceType = Field(
None,
alias="encounter",
title="Type `Reference` referencing `Encounter` (represented as `dict` in JSON).",
description="Healthcare event during which this observation is made.",
)
interpretation: fhirtypes.CodeableConceptType = Field(
None,
alias="interpretation",
title="Type `CodeableConcept` (represented as `dict` in JSON).",
description="High, low, normal, etc.",
)
issued: fhirtypes.Instant = Field(
None,
alias="issued",
title="Type `Instant`.",
description="Date/Time this was made available.",
)
method: fhirtypes.CodeableConceptType = Field(
None,
alias="method",
title="Type `CodeableConcept` (represented as `dict` in JSON).",
description="How it was done.",
)
performer: ListType[fhirtypes.ReferenceType] = Field(
None,
alias="performer",
title=(
"List of `Reference` items referencing `Practitioner, "
"Organization, Patient, RelatedPerson` (represented as `dict` in JSON)."
),
description="Who is responsible for the observation.",
)
identifier: ListType[fhirtypes.IdentifierType] = Field(
None,
alias="identifier",
title="List of `Identifier` items (represented as `dict` in JSON).",
description="Unique Id for this particular observation.",
)
referenceRange: ListType[fhirtypes.ObservationReferenceRangeType] = Field(
None,
alias="referenceRange",
title="List of `ObservationReferenceRange` items (represented as `dict` in JSON).",
description="Provides guide for interpretation.",
)
related: ListType[fhirtypes.ObservationRelatedType] = Field(
None,
alias="related",
title="List of `ObservationRelated` items (represented as `dict` in JSON).",
description="Resource related to this observation.",
)
specimen: fhirtypes.ReferenceType = Field(
None,
alias="specimen",
title="Type `Reference` referencing `Specimen` (represented as `dict` in JSON).",
description="Specimen used for this observation.",
)
subject: fhirtypes.ReferenceType = Field(
None,
alias="subject",
title=(
"Type `Reference` referencing `Patient, Group, Device, "
"Location` (represented as `dict` in JSON)."
),
description="Who and/or what this is about.",
)
valueAttachment: fhirtypes.AttachmentType = Field(
None,
alias="valueAttachment",
title="Type `Attachment` (represented as `dict` in JSON).",
description="Actual result.",
one_of_many="value", # Choice of Data Types. i.e value[x]
one_of_many_required=False,
)
valueCodeableConcept: fhirtypes.CodeableConceptType = Field(
None,
alias="valueCodeableConcept",
title="Type `CodeableConcept` (represented as `dict` in JSON).",
description="Actual result.",
one_of_many="value", # Choice of Data Types. i.e value[x]
one_of_many_required=False,
)
valueDateTime: fhirtypes.DateTime = Field(
None,
alias="valueDateTime",
title="Type `DateTime`.",
description="Actual result.",
one_of_many="value", # Choice of Data Types. i.e value[x]
one_of_many_required=False,
)
valuePeriod: fhirtypes.PeriodType = Field(
None,
alias="valuePeriod",
title="Type `Period` (represented as `dict` in JSON).",
description="Actual result.",
one_of_many="value", # Choice of Data Types. i.e value[x]
one_of_many_required=False,
)
valueQuantity: fhirtypes.QuantityType = Field(
None,
alias="valueQuantity",
title="Type `Quantity` (represented as `dict` in JSON).",
description="Actual result.",
one_of_many="value", # Choice of Data Types. i.e value[x]
one_of_many_required=False,
)
valueRange: fhirtypes.RangeType = Field(
None,
alias="valueRange",
title="Type `Range` (represented as `dict` in JSON).",
description="Actual result.",
one_of_many="value", # Choice of Data Types. i.e value[x]
one_of_many_required=False,
)
valueRatio: fhirtypes.RatioType = Field(
None,
alias="valueRatio",
title="Type `Ratio` (represented as `dict` in JSON).",
description="Actual result.",
one_of_many="value", # Choice of Data Types. i.e value[x]
one_of_many_required=False,
)
valueSampledData: fhirtypes.SampledDataType = Field(
None,
alias="valueSampledData",
title="Type `SampledData` (represented as `dict` in JSON).",
description="Actual result.",
one_of_many="value", # Choice of Data Types. i.e value[x]
one_of_many_required=False,
)
valueString: fhirtypes.String = Field(
None,
alias="valueString",
title="Type `String`.",
description="Actual result.",
one_of_many="value", # Choice of Data Types. i.e value[x]
one_of_many_required=False,
)
valueTime: fhirtypes.Time = Field(
None,
alias="valueTime",
title="Type `Time`.",
description="Actual result.",
one_of_many="value", # Choice of Data Types. i.e value[x]
one_of_many_required=False,
)
@root_validator(pre=True)
def validate_one_of_many(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""https://www.hl7.org/fhir/formats.html#choice
A few elements have a choice of more than one data type for their content.
All such elements have a name that takes the form nnn[x].
The "nnn" part of the name is constant, and the "[x]" is replaced with
the title-cased name of the type that is actually used.
The table view shows each of these names explicitly.
Elements that have a choice of data type cannot repeat - they must have a
maximum cardinality of 1. When constructing an instance of an element with a
choice of types, the authoring system must create a single element with a
data type chosen from among the list of permitted data types.
"""
one_of_many_fields = {
"effective": ["effectiveDateTime", "effectivePeriod"],
"value": [
"valueQuantity",
"valueCodeableConcept",
"valueString",
"valueRange",
"valueRatio",
"valueSampledData",
"valueAttachment",
"valueTime",
"valueDateTime",
"valuePeriod",
],
}
for prefix, fields in one_of_many_fields.items():
assert cls.__fields__[fields[0]].field_info.extra["one_of_many"] == prefix
required = (
cls.__fields__[fields[0]].field_info.extra["one_of_many_required"]
is True
)
found = False
for field in fields:
if field in values and values[field] is not None:
if found is True:
raise ValueError(
"Any of one field value is expected from "
f"this list {fields}, but got multiple!"
)
else:
found = True
if required is True and found is False:
raise ValueError(f"Expect any of field value from this list {fields}.")
return values
class ObservationComponent(BackboneElement):
"""Component results.
Some observations have multiple component observations. These component
observations are expressed as separate code value pairs that share the same
attributes. Examples include systolic and diastolic component observations
for blood pressure measurement and multiple component observations for
genetics observations.
"""
resource_type = Field("ObservationComponent", const=True)
code: fhirtypes.CodeableConceptType = Field(
None,
alias="code",
title="Type `CodeableConcept` (represented as `dict` in JSON).",
description="Type of component observation (code / type).",
)
dataAbsentReason: fhirtypes.CodeableConceptType = Field(
None,
alias="dataAbsentReason",
title="Type `CodeableConcept` (represented as `dict` in JSON).",
description="Why the component result is missing.",
)
referenceRange: ListType[fhirtypes.ObservationReferenceRangeType] = Field(
None,
alias="referenceRange",
title="List of `ObservationReferenceRange` items (represented as `dict` in JSON).",
description="Provides guide for interpretation of component result.",
)
valueAttachment: fhirtypes.AttachmentType = Field(
None,
alias="valueAttachment",
title="Type `Attachment` (represented as `dict` in JSON).",
description="Actual component result.",
one_of_many="value", # Choice of Data Types. i.e value[x]
one_of_many_required=False,
)
valueCodeableConcept: fhirtypes.CodeableConceptType = Field(
None,
alias="valueCodeableConcept",
title="Type `CodeableConcept` (represented as `dict` in JSON).",
description="Actual component result.",
one_of_many="value", # Choice of Data Types. i.e value[x]
one_of_many_required=False,
)
valueDateTime: fhirtypes.DateTime = Field(
None,
alias="valueDateTime",
title="Type `DateTime`.",
description="Actual component result.",
one_of_many="value", # Choice of Data Types. i.e value[x]
one_of_many_required=False,
)
valuePeriod: fhirtypes.PeriodType = Field(
None,
alias="valuePeriod",
title="Type `Period` (represented as `dict` in JSON).",
description="Actual component result.",
one_of_many="value", # Choice of Data Types. i.e value[x]
one_of_many_required=False,
)
valueQuantity: fhirtypes.QuantityType = Field(
None,
alias="valueQuantity",
title="Type `Quantity` (represented as `dict` in JSON).",
description="Actual component result.",
one_of_many="value", # Choice of Data Types. i.e value[x]
one_of_many_required=False,
)
valueRange: fhirtypes.RangeType = Field(
None,
alias="valueRange",
title="Type `Range` (represented as `dict` in JSON).",
description="Actual component result.",
one_of_many="value", # Choice of Data Types. i.e value[x]
one_of_many_required=False,
)
valueRatio: fhirtypes.RatioType = Field(
None,
alias="valueRatio",
title="Type `Ratio` (represented as `dict` in JSON).",
description="Actual component result.",
one_of_many="value", # Choice of Data Types. i.e value[x]
one_of_many_required=False,
)
valueSampledData: fhirtypes.SampledDataType = Field(
None,
alias="valueSampledData",
title="Type `SampledData` (represented as `dict` in JSON).",
description="Actual component result.",
one_of_many="value", # Choice of Data Types. i.e value[x]
one_of_many_required=False,
)
valueString: fhirtypes.String = Field(
None,
alias="valueString",
title="Type `String`.",
description="Actual component result.",
one_of_many="value", # Choice of Data Types. i.e value[x]
one_of_many_required=False,
)
valueTime: fhirtypes.Time = Field(
None,
alias="valueTime",
title="Type `Time`.",
description="Actual component result.",
one_of_many="value", # Choice of Data Types. i.e value[x]
one_of_many_required=False,
)
@root_validator(pre=True)
def validate_one_of_many(cls, values: Dict[str, Any]) -> Dict[str, Any]:
| |
<filename>brainex/experiments/harvest_setup.py
import math
import os
import random
import shutil
import time
from datetime import datetime
from logging import warning
import numpy as np
import pandas as pd
# spark_location = '/Users/Leo/spark-2.4.3-bin-hadoop2.7' # Set your own
# java8_location = '/Library/Java/JavaVirtualMachines/jdk1.8.0_151.jdk/Contents/Home/jre'
# os.environ['JAVA_HOME'] = java8_location
# findspark.init(spark_home=spark_location)
from brainex.experiments.harvests import experiment_BrainEX, experiment_GENEX
from brainex.utils.gxe_utils import from_csv
########################################################################################################################
# eu_exclude = ['ChlorineConcentration',
# 'ElectricDevices',
# 'Haptics',
# 'InsectEPGRegularTrain',
# 'Lightning2',
# 'Meat',
# 'Trace',
# ]
########################################################################################################################
def experiment_genex(mp_args, data, output, feature_num, num_sample, query_split,
dist_type, _lb_opt, _radius, use_spark: bool, loi_range: float, st: float, paa_seg: float):
# create gxdb from a csv file
# set up where to save the results
result_headers = np.array(
[['cluster_time', 'paa_build_time',
'query',
'bf_time', 'paa_time', 'gx_time',
'dist_diff_btw_paa_bf', 'dist_diff_btw_gx_bf',
'bf_dist', 'bf_match',
'paa_dist', 'paa_match',
'gx_dist', 'gx_match',
'num_rows', 'num_cols_max', 'num_cols_median', 'data_size', 'num_query']])
result_df = pd.DataFrame(columns=result_headers[0, :])
gxe = from_csv(data, num_worker=mp_args['num_worker'], driver_mem=mp_args['driver_mem'],
max_result_mem=mp_args['max_result_mem'],
feature_num=feature_num, use_spark=use_spark, _rows_to_consider=num_sample,
header=None)
num_rows = len(gxe.data_raw)
num_query = max(int(query_split * num_rows), 1)
try:
assert num_query > 0
except AssertionError:
raise Exception('Number of query with given query_split yields zero query sequence, try increase query_split')
loi = (int(gxe.get_max_seq_len() * (1 - loi_range)), int(gxe.get_max_seq_len() * (1 + loi_range)))
print('Number of rows is ' + str(num_rows))
print('Max seq len is ' + str(gxe.get_max_seq_len()))
result_df = result_df.append({'num_rows': num_rows}, ignore_index=True) # use append to create the first row
result_df['num_cols_max'] = gxe.get_max_seq_len()
result_df['num_cols_median'] = np.median(gxe.get_seq_length_list())
result_df['data_size'] = gxe.get_data_size()
result_df['num_query'] = num_query
print('Generating query of max seq len ...')
# generate the query sets
query_set = list()
# get the number of subsequences
# randomly pick a sequence as the query from the query sequence, make sure the picked sequence is in the input list
# this query'id must exist in the database
for i in range(num_query):
random.seed(i)
query_len = random.choice(list(range(loi[0], loi[1])))
this_query = gxe.get_random_seq_of_len(query_len, seed=i)
query_set.append(this_query)
print('Adding to query set: ' + str(this_query))
print('Performing clustering ...')
print('Using dist_type = ' + str(dist_type))
print('Using loi offset of ' + str(loi_range))
print('Building length of interest is ' + str(loi))
print('Building Similarity Threshold is ' + str(st))
cluster_start_time = time.time()
gxe.build(st=st, dist_type=dist_type, loi=loi)
cluster_time = time.time() - cluster_start_time
print('Clustering took ' + str(cluster_time) + ' sec')
# randomly pick a sequence as the query from the query sequence, make sure the picked sequence is in the input list
# this query'id must exist in the database
print('Preparing PAA Subsequences')
start = time.time()
gxe.build_piecewise(paa_seg, _dummy_slicing=True)
paa_build_time = time.time() - start
print('Prepare PAA subsequences took ' + str(paa_build_time))
result_df = result_df.append({'cluster_time': cluster_time,
'paa_build_time': paa_build_time
}, ignore_index=True)
overall_diff_gxbf_list = []
overall_diff_paabf_list = []
print('Evaluating ...')
for i, q in enumerate(query_set):
print('Dataset: ' + data + ' - dist_type: ' + dist_type + '- Querying #' + str(i) + ' of ' + str(
len(query_set)) + '; query = ' + str(q))
start = time.time()
print('Running Genex Query ...')
query_result_gx = gxe.query(query=q, best_k=15, _lb_opt=_lb_opt, _radius=_radius)
# print('...Not Actually running... Simulating results!')
# query_result_gx = [(0.0, [1, 2, 3])] * 15
gx_time = time.time() - start
print('Genex query took ' + str(gx_time) + ' sec')
start = time.time()
print('Running Brute Force Query ...')
query_result_bf = gxe.query_brute_force(query=q, best_k=15, _use_cache=False)
# print('...Not Actually running... Simulating results!')
# query_result_bf = [(0.0, [1, 2, 3])] * 15
bf_time = time.time() - start
print('Brute force query took ' + str(bf_time) + ' sec')
# Pure PAA Query
start = time.time()
print('Running Pure PAA Query ...')
query_result_paa = gxe.query_brute_force(query=q, best_k=15, _use_cache=False, _piecewise=True)
paa_time = time.time() - start
print('Pure PAA query took ' + str(paa_time) + ' sec')
# save the results
print('Saving results for query #' + str(i) + ' of ' + str(len(query_set)))
result_df = result_df.append({'query': str(q), 'bf_time': bf_time, 'paa_time': paa_time, 'gx_time': gx_time},
ignore_index=True)
for bf_r, paa_r, gx_r in zip(query_result_bf, query_result_paa, query_result_gx):
diff_gxbf = abs(gx_r[0] - bf_r[0])
diff_paabf = abs(paa_r[0] - bf_r[0])
overall_diff_gxbf_list.append(diff_gxbf)
overall_diff_paabf_list.append(diff_paabf)
result_df = result_df.append({'dist_diff_btw_paa_bf': diff_paabf,
'dist_diff_btw_gx_bf': diff_gxbf,
'bf_dist': bf_r[0], 'bf_match': bf_r[1],
'paa_dist': paa_r[0], 'paa_match': paa_r[1],
'gx_dist': gx_r[0], 'gx_match': gx_r[1],
}, ignore_index=True)
print('Current GX error is ' + str(np.mean(overall_diff_gxbf_list)))
print('Current PAA error is ' + str(np.mean(overall_diff_paabf_list)))
result_df.to_csv(output)
print('Done')
print('Result saved to ' + output)
result_df.to_csv(output)
# terminate the spark session
gxe.stop()
return gxe
def generate_exp_set_from_root(root, output, exclude_list, dist_type: str, notes: str, soi):
today = datetime.now()
output_dir_path = os.path.join(output, today.strftime("%b-%d-%Y-") + str(today.hour) + '-N-' + notes)
if not os.path.exists(output_dir_path):
print('Creating output path: ' + output_dir_path)
os.mkdir(output_dir_path)
else:
print('Output folder already exist, overwriting')
shutil.rmtree(output_dir_path, ignore_errors=False, onerror=None)
os.mkdir(output_dir_path)
config_list = []
dataset_list = get_dataset_train_path(root, exclude_list)
dataset_list = dataset_list
for d_name, dataset_path in dataset_list.items():
# check dataset size
df = pd.read_csv(dataset_path, sep='\t', header=None)
if df.size < soi[0] or df.size > soi[1]:
continue
print('Distance type - ' + dist_type + ', adding ' + dataset_path)
config_list.append((df.size, { # record the size of the dataframe later start with smaller ones
'data': dataset_path,
'output': os.path.join(output_dir_path, d_name + '_' + dist_type),
'feature_num': 1, # IMPORTANT this should be 1 for the UCR archive
'dist_type': dist_type
}))
config_list.sort(key=lambda x: x[0]) # sort by dataset size
config_list = [x[1] for x in config_list] # remove the dat size variable
if len(config_list) < 1:
raise Exception('No dataset satisfied the given soi')
print('Added ' + str(len(config_list)) + ' datasets with the given soi')
return config_list
def generate_ex_set_GENEX(root, output, dist_type: str, take=66):
today = datetime.now()
output_dir_path = os.path.join(output, today.strftime("%b-%d-%Y-") + str(today.hour))
if not os.path.exists(output_dir_path):
print('Creating output path: ' + output_dir_path)
os.mkdir(output_dir_path)
else:
print('Output folder already exist, overwriting')
shutil.rmtree(output_dir_path, ignore_errors=False, onerror=None)
os.mkdir(output_dir_path)
config_list = []
data_query_list = get_dataset_GENEX(root)
for dataset, queryset in data_query_list:
d_name = dataset.split('_')[0].split('/')[-1]
print('Distance type - ' + dist_type + ', adding ' + d_name)
df = pd.read_csv(dataset, header=None)
config_list.append((df.size, { # record the size of the data frame later start with larger ones
'dataset': dataset,
'queryset': queryset,
'output': os.path.join(output_dir_path, d_name + '_' + dist_type),
'feature_num': 1, # IMPORTANT this should be 1 for the UCR archive
'dist_type': dist_type
}))
config_list.sort(key=lambda x: x[0]) # sort by dataset size
config_list = [x[1] for x in config_list][:take] # remove the dat size variable
if len(config_list) < 1:
raise Exception('No file found in given directory')
print('Added ' + str(len(config_list)) + ' datasets with the given soi')
return config_list
def run_exp_set(exp_set, mp_args, num_sample, query_split, cases_split,
_lb_opt, radius, use_spark, loi_range, st, n_segment, best_ks, test_option='BrainEX'):
options = ['regular', 'DSS', 'dynamic']
for i, es in enumerate(exp_set):
print('$$ Running experiment set: ' + str(i) + ' of ' + str(len(exp_set)))
if test_option.lower() == 'DSS'.lower():
experiment_genex_dss(mp_args, **es, num_sample=num_sample, query_split=query_split, cases_split=cases_split,
_lb_opt=_lb_opt, _radius=radius, use_spark=use_spark, loi_range=loi_range, st=st,
paa_seg=n_segment)
elif test_option.lower() == 'regular'.lower():
experiment_genex(mp_args, **es, num_sample=num_sample, query_split=query_split,
_lb_opt=_lb_opt, _radius=radius, use_spark=use_spark, loi_range=loi_range, st=st,
paa_seg=n_segment)
elif test_option.lower() == 'dynamic'.lower():
experiment_genex_dynamic(mp_args, **es, num_sample=num_sample, query_split=query_split,
_lb_opt=_lb_opt, _radius=radius, use_spark=use_spark, loi_range=loi_range, st=st,
paa_seg=n_segment)
elif test_option.lower() == 'BrainEx'.lower():
experiment_BrainEX(mp_args, **es, num_sample=num_sample, query_split=query_split,
_lb_opt=_lb_opt, _radius=radius, use_spark=use_spark, loi_range=loi_range, st=st,
n_segment=n_segment, best_ks=best_ks, run_genex=True)
elif test_option.lower() == 'BrainEXwithoutGenex'.lower():
experiment_BrainEX(mp_args, **es, num_sample=num_sample, query_split=query_split,
_lb_opt=_lb_opt, _radius=radius, use_spark=use_spark, loi_range=loi_range, st=st,
n_segment=n_segment, best_ks=best_ks, run_genex=False)
elif test_option.lower() == 'BrainEXwithSAXPAAOnly'.lower():
experiment_BrainEX(mp_args, **es, num_sample=num_sample, query_split=query_split,
_lb_opt=_lb_opt, _radius=radius, use_spark=use_spark, loi_range=loi_range, st=st,
n_segment=n_segment, best_ks=best_ks, run_genex=False, run_brainex=False, run_bruteforce=False)
else:
raise Exception('Unrecognized test option, it must be one of the following: ' + str(options))
def run_exp_set_GENEX(exp_set, mp_args, _lb_opt, radius, use_spark, st):
for i, es in enumerate(exp_set):
print('$$ Running experiment set: ' + str(i) + ' of ' + str(len(exp_set)))
experiment_GENEX(mp_args, **es, _lb_opt=_lb_opt, _radius=radius, use_spark=use_spark, st=st,)
def get_dataset_train_path(root, exclude_list):
trailing = '_TRAIN.tsv'
data_path_list = {}
for name in os.listdir(root):
if name in exclude_list:
continue
assert os.path.isdir(os.path.join(root, name))
this_path = os.path.join(root, name, name + trailing)
try:
assert os.path.isfile(this_path)
except AssertionError:
warning('File not exist: ' + this_path)
data_path_list[name] = this_path
return data_path_list
def get_dataset_GENEX(root):
data_path_list = []
file_list = [f for f in sorted(os.listdir(root)) if '.' not in f]
for i, file in enumerate(file_list[::2]):
dataset = file
queryset = file_list[i * 2 + 1]
data_path_list.append((os.path.join(root, dataset), os.path.join(root, queryset)))
return data_path_list
def experiment_genex_dss(mp_args, data, output, feature_num, num_sample, query_split, cases_split,
dist_type, _lb_opt, _radius, use_spark: bool, loi_range: float, st: float, paa_seg: float):
# set up where to save the results
result_headers = np.array(
[['gx_cluster_time', 'dssGx_cluster_time', 'paa_build_time',
'query',
'bf_time', 'paa_time', 'gx_time', 'dssGx_time',
'dist_diff_btw_paa_bf', 'dist_diff_btw_gx_bf', 'dist_diff_btw_dssGx_bf',
'bf_dist', 'bf_match',
'paa_dist', 'paa_match',
'gx_dist', 'gx_match',
'dssGx_dist', 'dssGx_match',
'data_size', 'num_query']])
result_df = pd.DataFrame(columns=result_headers[0, :])
# only take one time series at a time
data_df = pd.read_csv(data, sep='\t', header=None)
| |
#!/usr/bin/env python
"""
Tools to work with data from different spectrographs: CARMENES, HARPS, HARPN.
Uses functions from `harpsutils` and `carmenesutils`.
"""
from __future__ import division
from __future__ import print_function
import os
import sys
import numpy as np
import pandas as pd
from . import carmenesutils
from . import harpsutils
from . import expresutils
###############################################################################
# Spectrograph data
# -----------------
# Resolution
dicres = {
'CARM_VIS': 94600,
'CARM_NIR': 80400,
'HARPS': 115000,
'HARPN': 115000,
'EXPRES': 150000,
}
# Number of orders
dicnord = {
'CARM_VIS': 61,
'CARM_NIR': 28,
'HARPS': 72,
'HARPN': 69,
'EXPRES': 86,
}
def inst_nord(inst, carmnirsplit=True, notfound=None, verb=True):
"""Get number of orders for instrument `inst`.
Parameters
----------
carmnirsplit : bool (default True)
Multiply CARM_NIR orders by 2. Usually use CARM_NIR orders split by half, so have double of orders.
Returns
-------
nord : int
"""
try:
nord = dicnord[inst]
if inst == 'CARM_NIR' and carmnirsplit:
nord += nord # double
except:
if verb: print('Instrument {} not available, return {}'.format(inst, notfound))
nord = notfound
return nord
# Reference order
dicoref = {
'CARM_VIS': 36,
'CARM_NIR': 11, # This is the double already
'HARPS': 55,
'HARPN': 55,
'EXPRES': 60, # ?
}
def inst_oref(inst, carmnirsplit=True, notfound=None, verb=True):
"""Get reference order for instrument `inst`.
Parameters
----------
carmnirsplit : bool (default True)
Multiply CARM_NIR `oref` by 2. Usually use CARM_NIR orders split by half, so have double of orders.
Returns
-------
oref : int
"""
try:
oref = dicoref[inst]
if inst == 'CARM_NIR' and carmnirsplit == False:
oref = int(oref / 2) # half
except:
if verb: print('Instrument {} not available, return {}'.format(inst, notfound))
oref = notfound
return oref
# RV per pixel [km/s]
dicrvpixmed = {
'CARM_VIS': 1.258,
'CARM_NIR': 1.356,
'HARPS': 0.820,
'HARPN': 0.820,
'EXPRES': 0.500,
}
def inst_rvpixmed(inst, notfound=None, verb=True):
"""Get the median delta RV per pixel for instrument `inst`.
Parameters
----------
Returns
-------
rvpixmed : int
"""
try:
rvpixmed = dicrvpixmed[inst]
except:
if verb: print('Instrument {} not available, return {}'.format(inst, notfound))
rvpixmed = notfound
return rvpixmed
# Spectral sampling s [pix/SE] (SE: spectral element, ~ FWHM)
dictspix = {
'CARM_VIS': 2.5,
'CARM_NIR': 2.8,
'HARPS': 3.2,
'HARPN': 3.2,
'EXPRES': 3.6, # 4.0
}
def inst_rvpixmed(inst, notfound=None, verb=True):
"""Get sampling [pix/SE] for instrument `inst`.
Parameters
----------
Returns
-------
rvpixmed : int
"""
try:
rvpixmed = dictspix[inst]
except:
if verb: print('Instrument {} not available, return {}'.format(inst, notfound))
rvpixmed = notfound
return rvpixmed
###############################################################################
# Reduced spectra
# ---------------
# Read reduced spectrum
def fitsred_read(filin, inst,
# CARMENES
carmnirdiv=True,
# HARPS/N
harpblaze=True, dirblaze=None, filblaze=None,
# EXPRES
expresw='bary_excalibur',
):
"""
Parameters
----------
carmnirdiv : bool, default True
If True, divide the orders by the discontinuity at the center. If not, leave them as read from the FTIS. Only works for `inst='CARM_NIR'`.
harpblaze : bool, default True
If True, get the blaze function from the corresponding file (see `dirblaze` and `filblaze`). If False, the output corresponding to the blaze, `c`, is an array of ones lwith the shape of `f`.
dirblaze : str, default None
Directory containing the blaze file. If None (default), assume the blaze file is in the same directory as `filin`.
harpfilbalze : str, default None
Blaze file name. If None (default), get the file name from the header.
Returns
-------
"""
if inst == 'CARM_VIS':
w, f, sf, c, header = carmenesutils.caracal_fitsred_read(filin)
dataextra = {}
elif inst == 'CARM_NIR':
w, f, sf, c, header = carmenesutils.caracal_fitsred_read(filin)
dataextra = {}
if carmnirdiv:
# w, f, sf, c = carmenesutils.caracal_fitsrednir_divide_ords(w=w, f=f, sf=sf, c=c)
a = carmenesutils.caracal_fitsrednir_divide_ords(w=w, f=f, sf=sf, c=c)
w, f, sf, c = a['w'], a['f'], a['sf'], a['c']
elif inst == 'HARPS' or inst == 'HARPN':
w, f, c, header, _ = harpsutils.drs_e2dsred_read(filin, readblaze=harpblaze, dirblaze=dirblaze, filblaze=filblaze, inst=inst)
sf = np.zeros_like(w)
dataextra = {}
elif inst == 'EXPRES':
w, wcb, we, wecb, mwecb, f, sf, c, b, mf, header, header1, header2 = expresutils.drs_fitsred_read(filin)
if expresw == 'bary_excalibur':
w = wecb
elif expresw == 'excalibur':
w = we
elif expresw == 'bary_wavelength':
w = w
elif expresw == 'wavelength':
w = wcb
dataextra = {'blaze': b, 'pixel_mask': mf, 'excalibur_mask': mwecb, 'header1': header1, 'header2': header2}
return w, f, sf, c, header, dataextra
# -----------------------------------------------------------------------------
# Values from header
# ------------------
# Get BJD from header
def header_bjd_lisobs(lisobs, inst, name='bjd', notfound=np.nan, ext=0):
"""
Get the BJD from the header of the observations in `lisobs`.
Parameters
----------
name : str or None (default 'bjd')
Change the pandas dataframe column name to `name`. If `None`, keep the header keyword as the column name.
"""
if inst == 'CARM_VIS' or inst == 'CARM_NIR':
lisbjd = carmenesutils.caracal_bjd_lisobs(lisobs, notfound=notfound, ext=ext, name=name)
# # Change column names
# if name is not None:
# lisbjd.rename(columns={'HIERARCH CARACAL BJD': name}, inplace=True)
elif inst == 'HARPS' or inst == 'HARPN':
lisbjd = harpsutils.drs_bjd_lisobs(lisobs, inst, notfound=notfound, ext=ext, name=name)
# # Change column names
# if name is not None:
# kwinst = harpsutils.headerkwinst(inst, outfail=np.nan)
# lisbjd.rename(columns={kwinst + 'DRS BJD': name}, inplace=True)
elif inst == 'EXPRES':
lisbjd = expresutils.drs_bjd_lisobs(lisobs, notfound=notfound, ext=ext, name=name)
return lisbjd
# Get readout noise RON from header
def header_ron_lisobs(lisobs, inst, name='ron', notfound=np.nan, ext=0):
"""
Get the RON from the header of the observations in `lisobs`.
Parameters
----------
name : str or None (default 'ron')
Change the pandas dataframe column name to `colname`. If `None`, keep the header keyword as the column name.
"""
if inst == 'CARM_VIS':
lisron = carmenesutils.caracal_ron_lisobs_vis(lisobs, notfound=notfound, ext=ext)
# Change column names
if name is not None:
lisron.rename(columns={'E_READN1': name}, inplace=True)
elif inst == 'CARM_NIR':
lisron = carmenesutils.caracal_ron_lisobs_nir(lisobs, notfound=notfound, ext=ext)
# Change column names
if name is not None:
lisron.rename(columns={'E_READN': name}, inplace=True)
elif inst == 'HARPS' or inst == 'HARPN':
lisron = harpsutils.drs_ron_lisobs(lisobs, inst, notfound=notfound, ext=ext)
# Change column names
if name is not None:
kwinst = harpsutils.headerkwinst(inst, outfail=np.nan)
lisron.rename(columns={kwinst + 'DRS CCD SIGDET': name}, inplace=True)
elif inst == 'EXPRES':
# TODO: set to 0 for now
lisron = pd.DataFrame(np.zeros_like(lisobs, dtype=float), columns=[name], index=lisobs)
return lisron
# Get exposure time from header
def header_exptime_lisobs(lisobs, inst, name='exptime', notfound=np.nan, ext=0):
if inst == 'CARM_VIS' or inst == 'CARM_NIR':
lisexptime = carmenesutils.caracal_exptime_lisobs(lisobs, notfound=notfound, ext=ext)
# Change column names
if name is not None:
lisexptime.rename(columns={'EXPTIME': name}, inplace=True)
elif inst == 'HARPS' or inst == 'HARPN':
lisexptime = harpsutils.drs_exptime_lisobs(lisobs, notfound=notfound, ext=ext)
# Change column names
if name is not None:
lisexptime.rename(columns={'EXPTIME': name}, inplace=True)
if inst == 'EXPRES':
lisexptime = expresutils.drs_exptime_lisobs(lisobs, notfound=notfound, ext=ext, name=name)
return lisexptime
# Get airmass time from header
def header_airmass_lisobs(lisobs, inst, name='airmass', notfound=np.nan, ext=0):
if inst == 'CARM_VIS' or inst == 'CARM_NIR':
lisairmass = carmenesutils.caracal_airmass_lisobs(lisobs, notfound=notfound, ext=ext)
# Change column names
if name is not None:
lisairmass.rename(columns={'AIRMASS': name}, inplace=True)
elif inst == 'HARPS' or inst == 'HARPN':
lisairmass = harpsutils.drs_airmass_lisobs(lisobs, notfound=notfound, ext=ext)
# Change column names
if name is not None:
lisairmass.rename(columns={'AIRMASS': name}, inplace=True)
elif inst == 'EXPRES':
lisairmass = expresutils.drs_airmass_lisobs(lisobs, notfound=notfound, ext=ext, name=name)
return lisairmass
# Get SNR from header
def header_snr_lisobs(lisobs, inst, name='snro', ords=None, notfound=np.nan, ext=0):
"""
Get the SNR from the header of the orders `ords` for the observations in `lisobs`.
Parameters
----------
name : {'ord', 'snro'} or None
Change to pandas dataframe column name. If `ord`, change to the order number (an int, e.g. 36). If `snro`, change to e.g. `snro36`. If None, keep the header keyword as the column name.
"""
if inst == 'CARM_VIS' or inst == 'CARM_NIR':
lissnr = carmenesutils.caracal_snr_lisobs(lisobs, ords=ords, notfound=notfound, ext=ext)
# Change column names
if name is not None:
if name == 'ord':
changecol = {i: int(i.replace('HIERARCH CARACAL FOX SNR ', '')) for i in lissnr.columns}
elif name == 'snro':
changecol = {i: i.replace('HIERARCH CARACAL FOX SNR ', 'snro') for i in lissnr.columns}
lissnr.rename(columns=changecol, inplace=True)
elif inst == 'HARPS' or inst == 'HARPN':
lissnr = harpsutils.drs_snr_lisobs(lisobs, ords=ords, notfound=notfound, ext=ext)
# Change column names
if name is not None:
kwinst = harpsutils.headerkwinst(inst, outfail=np.nan)
if name == 'ord':
changecol = {i: int(i.replace('{}DRS SPE EXT SN'.format(kwinst), '')) for i in lissnr.columns}
elif name == 'snro':
changecol = {i: i.replace('{}DRS SPE EXT SN'.format(kwinst), 'snro') for i in lissnr.columns}
lissnr.rename(columns=changecol, inplace=True)
elif inst == 'EXPRES':
# EXPRES S/N not in FITS header, get from spectrum
lissnr = expresutils.drs_snr_lisobs(lisobs, ords, name=name)
return lissnr
# Get RV corrections from header
def header_rvcorrection_lisobs(lisobs, inst, name='shift', notfound=np.nan, ext=0):
| |
import os
import time
import csv
import pickle
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data.dataloader import DataLoader
from torch.nn.utils import clip_grad_norm_ as clip_grad_norm
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
from utils import tokenization, optimization, constants, misc
from utils.data import *
from utils.evaluator import BLEUEvaluator
def get_transfer_data(data_dir, data_name):
"""
args:
data_dir: str
data_name: str
return:
data: dict of {"src_str": list of str, "lab": list of int}
"""
src_0, src_1 = [], []
with open(os.path.join(data_dir, data_name+".0"), 'r') as f:
for line in f.readlines():
src_0.append(line.strip())
with open(os.path.join(data_dir, data_name+".1"), 'r') as f:
for line in f.readlines():
src_1.append(line.strip())
lab_0 = [0] * len(src_0)
lab_1 = [1] * len(src_1)
src = src_0 + src_1
lab = lab_0 + lab_1
assert len(src) == len(lab)
data = {"src_str": src, "lab": lab}
print("%s data has been loaded" % data_name)
for l, count in enumerate(np.bincount(data["lab"])):
print("number of label %d: %d" % (l, count))
return data
def load_and_cache_data(args, data_name, tokenizer):
"""
return:
data: dict of {"src_str": list of str,
"src_ind": list of int,
"lab": list of int}
"""
sos_str = "_sos" if args.use_sos else ""
eos_str = "_eos" if args.use_eos else ""
mask_str = "_mask" if "mask" in args.vocab_file_name else ""
cached_data_file = os.path.join(
args.data_dir,
f"cached_transfer_{data_name}{sos_str}{eos_str}{mask_str}"
)
if os.path.exists(cached_data_file) and not args.overwrite_cache:
print("Loading data from cached data file %s" % cached_data_file)
data = torch.load(cached_data_file)
else:
print("Creating cached data file from data at %s" % cached_data_file)
data = get_transfer_data(args.data_dir, data_name)
index_src = []
str_src = []
sos_id, eos_id = tokenizer.SOS_ID, tokenizer.EOS_ID
sos_token, eos_token = tokenizer.SOS_TOKEN, tokenizer.EOS_TOKEN
if args.use_sos and args.use_eos:
for text in data['src_str']:
index_src.append([sos_id] + tokenizer.encode(text) + [eos_id])
str_src.append(' '.join([sos_token, text, eos_token]))
elif args.use_sos:
for text in data['src_str']:
index_src.append([sos_id] + tokenizer.encode(text))
str_src.append(' '.join([sos_token, text]))
elif args.use_eos:
for text in data['src_str']:
index_src.append(tokenizer.encode(text) + [eos_id])
str_src.append(' '.join([text, eos_token]))
else:
for text in data['src_str']:
index_src.append(tokenizer.encode(text))
str_src.append(text)
data['src_ind'] = index_src
data['src_str'] = str_src
torch.save(data, cached_data_file)
return data
def lambda_schedule(num_iter, start=0.0, stop=1.0, ratio=0.1):
lambdas = np.ones(num_iter) * stop
progress_interval = num_iter * ratio
for i in range(int(progress_interval)):
lambdas[i] *= i / progress_interval
return lambdas
class BasicTrainer:
"""
Basic Trainer
"""
def __init__(self, args, model, train_data=None, dev_data=None, test_data=None,
tokenizer=None):
self.args = args
self.model = model
self.optimizer = None
self.scheduler = None
self.train_dataloader = self.get_dataloader(train_data, "train")\
if train_data else None
self.dev_dataloader = self.get_dataloader(dev_data, "dev")\
if dev_data else None
self.test_dataloader = self.get_dataloader(test_data, "test")\
if test_data else None
if self.train_dataloader:
self.optimizer, self.scheduler = self.get_optimizer()
def get_dataloader(self, data, data_name):
args = self.args
if data_name == "train":
shuffle = args.shuffle
batch_size = args.batch_size
else:
shuffle = False
# batch_size = 2
batch_size = args.batch_size
dataset = ClassifierDataset(data["src_ind"], data["lab"])
dataloader = DataLoader(dataset=dataset,
batch_size=args.batch_size,
shuffle=shuffle,
num_workers=args.num_workers,
collate_fn=ClassifierPaddingCollate)
return dataloader
def get_optimizer(self):
args = self.args
model = self.model
train_dataloader = self.train_dataloader
optimizer = optimization.get_optim(args, model.parameters())
num_steps = len(train_dataloader) * args.num_train_epochs
args.num_steps = num_steps
print("Total number of steps: %d" % num_steps)
decay_step = len(train_dataloader) * args.decay_epoch
if args.decay_epoch > 0:
print("Step when lr starts to decay: %d" % decay_step)
scheduler = optimization.get_constant_schedule_with_linear_decay(
optimizer, decay_step=decay_step, num_training_steps=num_steps
)
else:
scheduler = optimization.get_constant_schedule(optimizer)
return optimizer, scheduler
def save_checkpoint(self, path):
# torch.save(self.args, os.path.join(path, "args.pt"))
torch.save(self.model.state_dict(), os.path.join(path, "model_state_dict.pt"))
# torch.save(self.optimizer.state_dict(), os.path.join(path, "optimizer_state_dict.pt"))
# torch.save(self.scheduler.state_dict(), os.path.join(path, "scheduler_state_dict.pt"))
return
def train(self):
raise NotImplementedError()
def evaluate(self):
raise NotImplementedError()
def test(self):
raise NotImplementedError()
def save_train_result(self, train_record, eval_record):
args = self.args
train_loss_record = train_record
eval_bleu_record, eval_gs_record = eval_record
best_bleu = np.max(eval_bleu_record)
step_of_best_bleu = eval_gs_record[np.argmax(eval_bleu_record)]
print("best BLEU: %.4f in step %d" % (best_bleu, step_of_best_bleu))
with open(os.path.join(args.output_dir, "training_result.log"), 'w') as f:
f.write("best BLEU: %.4f in step %d" % (best_bleu, step_of_best_bleu))
plt.figure()
plt.xlabel("step")
plt.ylabel("BLEU")
plt.plot(eval_gs_record, eval_bleu_record)
plt.tight_layout()
plt.savefig(os.path.join(args.output_dir, "bleu.pdf"), format='pdf') # bbox_inches='tight'
plt.figure()
plt.xlabel("step")
plt.ylabel("loss")
plt.plot(list(range(len(train_loss_record))), train_loss_record)
# plt.plot(eval_gs_record, eval_loss_record)
plt.tight_layout()
plt.savefig(os.path.join(args.output_dir, "loss.pdf"), format='pdf')
return best_bleu, step_of_best_bleu
class TransferModelTrainer(BasicTrainer):
def __init__(self, args, model, train_data=None, dev_data=None,
test_data=None, **kwargs):
super().__init__(
args, model, train_data, dev_data, test_data
)
self.tokenizer = kwargs["tokenizer"]
if self.args.cls_model_path:
print(f"Load classifier model form {self.args.cls_model_path}")
self.model.classifier.load_state_dict(
torch.load(
os.path.join(self.args.cls_model_path, "model_state_dict.pt")
)
)
self.model.freeze_cls()
# args.cls_weight = 0.05
# args.ca_weight = 0.0
# args.bt_weight = 1.0
self.use_caw_schedule = False
del self.optimizer
del self.scheduler
if self.train_dataloader:
params = []
for k, v in self.model.named_parameters():
# print("%s: %s" % (k, str(v.shape)))
if "classifier" in k or "lm" in k:
print("not optimize %s" % k)
else:
print("add params of %s to optimizer" % k)
params.append(v)
self.optimizer, self.scheduler\
= self.get_optimizer(params)
# torch.autograd.set_detect_anomaly(True)
self.clf_model = torch.load(args.cnn_clf_path).to(args.device)
self.clf_model.eval()
self.dev_ref_path_list = getattr(args, "dev_ref_path_list", None)
self.test_ref_path_list = getattr(args, "test_ref_path_list", None)
if self.test_ref_path_list is None:
self.test_ref_path_list = self.args.ref_list
print("self.dev_ref_path_list is")
print(self.dev_ref_path_list)
print("self.test_ref_path_list is")
print(self.test_ref_path_list)
if not self.args.use_bpe:
self.dev_data_path_list = [
[os.path.join(self.args.data_dir, f"dev.{i}")] for i in range(2)
]
self.test_data_path_list = [
[os.path.join(self.args.data_dir, f"test.{i}")] for i in range(2)
]
else:
self.dev_data_path_list = [
[os.path.join(self.args.data_dir, f"self_ref.dev.{i}")] for i in range(2)
]
self.test_data_path_list = [
[os.path.join(self.args.data_dir, f"self_ref.test.{i}")] for i in range(2)
]
print("self.dev_data_path_list is")
print(self.dev_data_path_list)
print("self.test_data_path_list is")
print(self.test_data_path_list)
def get_optimizer(self, params=None):
args = self.args
if params is None:
print("return because params is None")
return None, None
# params = self.model.parameters()
train_dataloader = self.train_dataloader
optimizer = optimization.get_optim(args, params)
num_steps = len(train_dataloader) * args.num_train_epochs // args.grad_accum_interval
args.num_steps = num_steps
print("Total number of steps: %d" % num_steps)
decay_step = len(train_dataloader) * args.decay_epoch
if args.decay_epoch > 0:
print("Step when lr starts to decay: %d" % decay_step)
scheduler = optimization.get_constant_schedule_with_linear_decay(
optimizer, decay_step=decay_step, num_training_steps=num_steps
)
else:
scheduler = optimization.get_constant_schedule(optimizer)
return optimizer, scheduler
def train(self, train_dataloader=None):
print("\n### TRAINING BEGINS ###")
args = self.args
model = self.model
optimizer = self.optimizer
scheduler = self.scheduler
train_dataloader = train_dataloader if train_dataloader else self.train_dataloader
model.train()
loss_record = [] # loss at global_step 0, 1, 2 ...
dev_metric_record = []
global_step_record_for_eval = []
global_step = 0
pad_id = args.pad_id
grad_accum_interval = args.grad_accum_interval
log_loss = 0.0
num_iters_per_epoch = len(train_dataloader)
normalizer = min(num_iters_per_epoch, grad_accum_interval)
cls_w = args.cls_weight
print("cls_w is", cls_w)
if self.use_caw_schedule:
start = 0.0
stop = args.ca_weight
ratio = 0.5
ca_w_list = lambda_schedule(args.num_steps,
start=start, stop=stop, ratio=ratio)
print(f"ca_w uses schedule (start={start}, stop={stop}, ratio={ratio})")
ca_w = ca_w_list[0]
else:
ca_w = args.ca_weight
print("ca_w is", ca_w)
bt_w = args.bt_weight
print("bt_w is", bt_w)
model.zero_grad()
if args.freeze_emb_at_beginning:
model.freeze_emb()
start_time = time.time()
for ep in range(args.num_train_epochs):
if ep == args.unfreeze_at_ep and args.freeze_emb_at_beginning:
model.unfreeze_emb()
for step, batch in enumerate(train_dataloader):
src, lab, src_len = batch
# print(f"ep:{ep}, step: {step}, src.shape[1] is", src.shape[1])
sorted_src_len, indices = torch.sort(src_len, dim=0, descending=True)
sorted_src = torch.index_select(src, dim=0, index=indices)
sorted_lab = torch.index_select(lab, dim=0, index=indices)
sorted_src = sorted_src.to(args.device)
sorted_src_len = sorted_src_len.to(args.device)
sorted_lab = sorted_lab.to(args.device)
try:
sorted_src_pad_mask = sorted_src==pad_id
sorted_loss_tuple, sorted_output_tuple,\
sorted_algin = model(sorted_src, sorted_src_len,
sorted_lab, sorted_src_pad_mask)
sorted_rec_loss, sorted_bt_loss,\
sorted_src_cls_loss, sorted_soft_out_cls_loss,\
sorted_out_cls_loss, sorted_ca_loss = sorted_loss_tuple
sorted_output, sorted_output_len = sorted_output_tuple
rec_loss = sorted_rec_loss.mean()
bt_loss = sorted_bt_loss.mean()
src_cls_loss = sorted_src_cls_loss.mean()
soft_out_cls_loss = sorted_soft_out_cls_loss.mean()
out_cls_loss = sorted_out_cls_loss.mean()
ca_loss = sorted_ca_loss.mean()
loss = rec_loss + bt_w * bt_loss\
+ cls_w * soft_out_cls_loss + ca_w * ca_loss
loss /= normalizer
loss.backward()
if (step+1) % grad_accum_interval == 0 or\
(grad_accum_interval >= num_iters_per_epoch and
(step+1) == num_iters_per_epoch):
g = clip_grad_norm(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step()
model.zero_grad()
loss_record.append(log_loss)
# global_step += 1
log_loss = 0.0
if global_step > 0 and global_step % args.log_interval == 0:
print(
f"epoch: {ep} "\
f"step: {global_step} "\
f"loss: {loss.item() * normalizer:.4f} "\
f"rec_loss: {rec_loss.item():.4f} "\
f"bt_loss: {bt_loss.item():.4f} "\
f"src_cls_loss: {src_cls_loss.item():.4f} "\
f"soft_out_cls_loss: {soft_out_cls_loss.item():.4f} "\
f"out_cls_loss: {out_cls_loss.item():.4f} "\
f"ca_loss: {ca_loss.item():.4f} "\
f"||g||: {g:.2f} "\
f"ca_w: {ca_w:.4f} "\
f"time: {misc.timeBetween(start_time, time.time())}"
)
if global_step > 0 and global_step % args.eval_interval == 0:
print("\neval model at step: %d" % global_step)
checkpoint_output_dir = os.path.join(args.output_dir, "checkpoint-%d" % global_step)
if not os.path.exists(checkpoint_output_dir):
os.mkdir(checkpoint_output_dir)
org_output_dir = args.output_dir
args.output_dir = checkpoint_output_dir
print("dev")
dev_metric = self.evaluate()
dev_metric_record.append(dev_metric)
global_step_record_for_eval.append(global_step)
args.output_dir = org_output_dir
print("Save checkpoint at %s" % checkpoint_output_dir)
self.save_checkpoint(checkpoint_output_dir)
model.train()
global_step += 1
if self.use_caw_schedule:
ca_w = ca_w_list[global_step]
else:
log_loss += loss.item()
except RuntimeError as e:
if 'out of memory' in str(e):
print('|| WARNING: ran out of memory ||\n')
if hasattr(torch.cuda, 'empty_cache'):
torch.cuda.empty_cache()
else:
print('|| WARNING: fail to train ||\n')
raise e
raise e
# gpu_profile(frame=sys._getframe(), event='line', arg=None)
print("### TRAINING ENDS ###\n")
print("\neval model at step: %d" % global_step)
checkpoint_output_dir = os.path.join(args.output_dir, "checkpoint-%d" % global_step)
if not os.path.exists(checkpoint_output_dir):
os.mkdir(checkpoint_output_dir)
org_output_dir = args.output_dir
args.output_dir = | |
<reponame>gideonbros/polyaxon
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Polyaxon SDKs and REST API specification.
Polyaxon SDKs and REST API specification. # noqa: E501
The version of the OpenAPI document: 1.10.1
Contact: <EMAIL>
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from polyaxon_sdk.api_client import ApiClient
from polyaxon_sdk.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class ProjectSearchesV1Api(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_project_search(self, owner, project, body, **kwargs): # noqa: E501
"""Create project search # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_project_search(owner, project, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str project: Project under namesapce (required)
:param V1Search body: Search body (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Search
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_project_search_with_http_info(owner, project, body, **kwargs) # noqa: E501
def create_project_search_with_http_info(self, owner, project, body, **kwargs): # noqa: E501
"""Create project search # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_project_search_with_http_info(owner, project, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str project: Project under namesapce (required)
:param V1Search body: Search body (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Search, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'owner',
'project',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_project_search" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `create_project_search`") # noqa: E501
# verify the required parameter 'project' is set
if self.api_client.client_side_validation and ('project' not in local_var_params or # noqa: E501
local_var_params['project'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `project` when calling `create_project_search`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_project_search`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
if 'project' in local_var_params:
path_params['project'] = local_var_params['project'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
return self.api_client.call_api(
'/api/v1/{owner}/{project}/searches', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Search', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_project_search(self, owner, entity, uuid, **kwargs): # noqa: E501
"""Delete project search # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_project_search(owner, entity, uuid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str entity: Entity: project name, hub name, registry name, ... (required)
:param str uuid: Uuid identifier of the sub-entity (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_project_search_with_http_info(owner, entity, uuid, **kwargs) # noqa: E501
def delete_project_search_with_http_info(self, owner, entity, uuid, **kwargs): # noqa: E501
"""Delete project search # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_project_search_with_http_info(owner, entity, uuid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str entity: Entity: project name, hub name, registry name, ... (required)
:param str uuid: Uuid identifier of the sub-entity (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'owner',
'entity',
'uuid'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_project_search" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `delete_project_search`") # noqa: E501
# verify the required parameter 'entity' is set
if self.api_client.client_side_validation and ('entity' not in local_var_params or # noqa: E501
local_var_params['entity'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `entity` when calling `delete_project_search`") # noqa: E501
# verify the required parameter 'uuid' is set
if self.api_client.client_side_validation and ('uuid' not in local_var_params or # noqa: E501
local_var_params['uuid'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `uuid` when calling `delete_project_search`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
if 'entity' in local_var_params:
path_params['entity'] = local_var_params['entity'] # noqa: E501
if 'uuid' in local_var_params:
path_params['uuid'] = local_var_params['uuid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
return self.api_client.call_api(
'/api/v1/{owner}/{entity}/searches/{uuid}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_project_search(self, owner, entity, uuid, **kwargs): # noqa: E501
"""Get project search # noqa: E501
| |
# -*- coding: utf-8 -*-
from .database import BaseDbManager
from . import models
from sqlalchemy import distinct
from pandas import read_sql
from collections import Iterable
class QueryManager(BaseDbManager):
"""Query interface to database."""
def _limit_and_df(self, query, limit, as_df=False):
"""adds a limit (limit==None := no limit) to any query and allow a return as pandas.DataFrame
:param bool as_df: if is set to True results return as pandas.DataFrame
:param `sqlalchemy.orm.query.Query` query: SQL Alchemy query
:param int or tuple[int] limit: maximum number of results
:return: query result of pyuniprot.manager.models.XY objects
"""
if limit:
if isinstance(limit, int):
query = query.limit(limit)
if isinstance(limit, Iterable) and len(limit) == 2 and [int, int] == [type(x) for x in limit]:
page, page_size = limit
query = query.limit(page_size)
query = query.offset(page * page_size)
if as_df:
results = read_sql(query.statement, self.engine)
else:
results = query.all()
return results
@classmethod
def _model_query(cls, query_obj, search4, model_attrib):
if isinstance(search4, str):
query_obj = query_obj.filter(model_attrib.like(search4))
elif isinstance(search4, int):
query_obj = query_obj.filter(model_attrib == search4)
elif isinstance(search4, Iterable):
query_obj = query_obj.filter(model_attrib.in_(search4))
return query_obj
def get_obo_string(self, taxid=None, limit=None):
q = self.session.query(models.Entry)
if limit:
q = q.limit(limit)
if taxid:
q = q.filter(models.Entry.taxid == taxid)
obo_string = ''
for entry in q.all():
obo_string += '\n[Term]\nid: SWISSPROT:{}\n'.format(entry.accessions[0])
if len(entry.accessions) > 1:
for accession in entry.accessions[1:]:
obo_string += 'alt_id: {}\n'.format(accession)
obo_string += 'name: {}\n'.format(entry.recommended_full_name)
for alternative_name in entry.alternative_full_names + entry.alternative_short_names:
obo_string += 'synonym: "{}" EXACT ALTERNATIVE_NAME []\n'.format(alternative_name.name)
obo_string += 'synonym: "{}" EXACT GENE_NAME []\n'.format(entry.gene_name)
for xref in entry.db_references:
if xref.type_ in ['GO', 'HGNC']:
xref.identifier = ':'.join(xref.identifier.split(':')[1:])
obo_string += 'xref: {}:{}\n'.format(xref.type_, xref.identifier.replace('\\', '\\\\'))
return obo_string
def get_model_queries(self, query_obj, model_queries_config):
for search4, model_attrib in model_queries_config:
if search4 is not None:
query_obj = self._model_query(query_obj, search4, model_attrib)
return query_obj
def get_one_to_many_queries(self, query_obj, one_to_many_queries):
for search4, model_attrib in one_to_many_queries:
if search4 is not None:
query_obj = self._one_to_many_query(query_obj, search4, model_attrib)
return query_obj
def get_many_to_many_queries(self, query_obj, many_to_many_queries_config):
for search4, model_attrib, many2many_attrib in many_to_many_queries_config:
if search4 is not None:
query_obj = self._many_to_many_query(query_obj, search4, model_attrib, many2many_attrib)
return query_obj
@classmethod
def _many_to_many_query(cls, query_obj, search4, join_attrib, many2many_attrib):
if isinstance(search4, str):
query_obj = query_obj.join(join_attrib).filter(many2many_attrib.like(search4))
elif isinstance(search4, int):
query_obj = query_obj.join(join_attrib).filter(many2many_attrib == search4)
elif isinstance(search4, Iterable):
query_obj = query_obj.join(join_attrib).filter(many2many_attrib.in_(search4))
return query_obj
@classmethod
def _one_to_many_query(cls, query_obj, search4, model_attrib):
"""extends and returns a SQLAlchemy query object to allow one-to-many queries
:param query_obj: SQL Alchemy query object
:param str search4: search string
:param model_attrib: attribute in model
"""
model = model_attrib.parent.class_
if isinstance(search4, str):
query_obj = query_obj.join(model).filter(model_attrib.like(search4))
elif isinstance(search4, int):
query_obj = query_obj.join(model).filter(model_attrib == search4)
elif isinstance(search4, Iterable):
query_obj = query_obj.join(model).filter(model_attrib.in_(search4))
return query_obj
def keyword(self, name=None, identifier=None, entry_name=None, limit=None, as_df=False):
"""Method to query :class:`.models.Keyword` objects in database
:param name: keyword name(s)
:type name: str or tuple(str) or None
:param identifier: keyword identifier(s)
:type identifier: str or tuple(str) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type identifier: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.Keyword`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.Keyword`) or :class:`pandas.DataFrame`
"""
q = self.session.query(models.Keyword)
model_queries_config = (
(name, models.Keyword.name),
(identifier, models.Keyword.identifier)
)
q = self.get_model_queries(q, model_queries_config)
q = self.get_many_to_many_queries(q, ((entry_name, models.Keyword.entries, models.Entry.name),))
return self._limit_and_df(q, limit, as_df)
def entry(self,
name=None,
dataset=None,
recommended_full_name=None,
recommended_short_name=None,
gene_name=None,
taxid=None,
accession=None,
organism_host=None,
feature_type=None,
function_=None,
ec_number=None,
db_reference=None,
alternative_name=None,
disease_comment=None,
disease_name=None,
tissue_specificity=None,
pmid=None,
keyword=None,
subcellular_location=None,
tissue_in_reference=None,
sequence=None,
limit=None,
as_df=False):
"""Method to query :class:`.models.Entry` objects in database
An entry is the root element in UniProt datasets. Everything is linked to entry and can be accessed from
:param name: UniProt entry name(s)
:type name: str or tuple(str) or None
:param dataset: Swiss-Prot or TrEMBL
:type name: str or tuple(str) or None
:param recommended_full_name: recommended full protein name(s)
:type recommended_full_name: str or tuple(str) or None
:param recommended_short_name: recommended short protein name(s)
:type recommended_short_name: str or tuple(str) or None
:param tissue_in_reference: tissue(s) mentioned in reference
:type tissue_in_reference: str or tuple(str) or None
:param subcellular_location: subcellular location(s)
:type subcellular_location: str or tuple(str) or None
:param keyword: keyword(s)
:type keyword: str or tuple(str) or None
:param pmid: PubMed identifier(s)
:type pmid: int or tuple(int) or None
:param tissue_specificity: tissue specificit(y/ies)
:type tissue_specificity: str or tuple(str) or None
:param disease_comment: disease_comment(s)
:type disease_comment: str or tuple(str) or None
:param alternative_name: alternative name(s)
:type alternative_name: str or tuple(str) or None
:param db_reference: cross reference identifier(s)
:type db_reference: str or tuple(str) or None
:param ec_number: enzyme classification number(s), e.g. 1.1.1.1
:type ec_number: str or tuple(str) or None
:param function_: description of protein function(s)
:type function_: str or tuple(str) or None
:param feature_type: feature type(s)
:type feature_type: str or tuple(str) or None
:param organism_host: organism host(s) as taxid(s)
:type organism_host: int or tuple(int) or None
:param accession: UniProt accession number(s)
:type accession: str or tuple(str) or None
:param disease_name: disease name(s)
:type disease_name: str or tuple(str) or None
:param gene_name: gene name(s)
:type gene_name: str or tuple(str) or None
:param taxid: NCBI taxonomy identifier(s)
:type taxid: int or tuple(int) or None
:param sequence: Amino acid sequence(s)
:type sequence: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.Entry`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.Entry`) or :class:`pandas.DataFrame`
"""
q = self.session.query(models.Entry)
model_queries_config = (
(dataset, models.Entry.dataset),
(name, models.Entry.name),
(recommended_full_name, models.Entry.recommended_full_name),
(recommended_short_name, models.Entry.recommended_short_name),
(gene_name, models.Entry.gene_name),
(taxid, models.Entry.taxid),
)
q = self.get_model_queries(q, model_queries_config)
one_to_many_queries_config = (
(accession, models.Accession.accession),
(organism_host, models.OrganismHost.taxid),
(feature_type, models.Feature.type_),
(function_, models.Function.text),
(ec_number, models.ECNumber.ec_number),
(db_reference, models.DbReference.identifier),
(alternative_name, models.AlternativeFullName.name),
(disease_comment, models.DiseaseComment.comment),
(tissue_specificity, models.TissueSpecificity.comment),
(sequence, models.Sequence.sequence),
)
q = self.get_one_to_many_queries(q, one_to_many_queries_config)
many_to_many_queries_config = (
(pmid, models.Entry.pmids, models.Pmid.pmid),
(keyword, models.Entry.keywords, models.Keyword.name),
(subcellular_location, models.Entry.subcellular_locations, models.SubcellularLocation.location),
(tissue_in_reference, models.Entry.tissue_in_references, models.TissueInReference.tissue)
)
q = self.get_many_to_many_queries(q, many_to_many_queries_config)
if disease_name:
q = q.join(models.Entry.disease_comments).join(models.DiseaseComment.disease)
if isinstance(disease_name, str):
q = q.filter(models.Disease.name.like(disease_name))
elif isinstance(disease_name, Iterable):
q = q.filter(models.Disease.name.in_(disease_name))
return self._limit_and_df(q, limit, as_df)
def disease(self,
identifier=None,
ref_id=None,
ref_type=None,
name=None,
acronym=None,
description=None,
entry_name=None,
limit=None,
as_df=False
):
"""Method to query :class:`.models.Disease` objects in database
:param identifier: disease UniProt identifier(s)
:type identifier: str or tuple(str) or None
:param ref_id: identifier(s) of referenced database
:type ref_id: str or tuple(str) or None
:param ref_type: database name(s)
:type ref_type: str or tuple(str) or None
:param name: disease name(s)
:type name: str or tuple(str) or None
:param acronym: disease acronym(s)
:type acronym: str or tuple(str) or None
:param description: disease description(s)
:type description: str or tuple(str) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type entry_name: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.Disease`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.Disease`) or :class:`pandas.DataFrame`
"""
q = self.session.query(models.Disease)
model_queries_config = (
(identifier, models.Disease.identifier),
(ref_id, models.Disease.ref_id),
(ref_type, models.Disease.ref_type),
(name, models.Disease.name),
(acronym, models.Disease.acronym),
(description, models.Disease.description)
)
q = self.get_model_queries(q, model_queries_config)
if entry_name:
q = q.session.query(models.Disease).join(models.DiseaseComment).join(models.Entry)
if isinstance(entry_name, str):
q = q.filter(models.Entry.name == entry_name)
elif isinstance(entry_name, Iterable):
q = q.filter(models.Entry.name.in_(entry_name))
return self._limit_and_df(q, limit, as_df)
def disease_comment(self, comment=None, entry_name=None, limit=None, as_df=False):
"""Method to query :class:`.models.DiseaseComment` objects in database
:param comment: Comment(s) to disease
:type comment: str or tuple(str) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type entry_name: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.DiseaseComment`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.DiseaseComment`) or :class:`pandas.DataFrame`
"""
q | |
<filename>myhabitatagent.py
import argparse
import habitat
import random
import numpy as np
import scipy
import os
import cv2
import time
from habitat.tasks.nav.shortest_path_follower import ShortestPathFollower
from habitat.utils.visualizations import maps
from gibsonagents.expert import Expert
from gibsonagents.pathplanners import Dstar_planner, Astar3D, VI_planner
from gibsonagents.classic_mapping import rotate_2d, ClassicMapping, map_path_for_sim
from utils.dotdict import dotdict
from utils.tfrecordfeatures import tf_bytes_feature, tf_int64_feature, sequence_feature_wrapper # tf_bytelist_feature
from habitat_utils import load_map_from_file, encode_image_to_png, get_model_id_from_episode, get_floor_from_json
from vin import grid_actions_from_trajectory, project_state_and_goal_to_smaller_map
import quaternion
from multiprocessing import Queue, Process
import atexit
import platform
from arguments import parse_args
import tensorflow as tf
from train import get_brain, get_tf_config
from common_net import load_from_file, count_number_trainable_params
from visualize.visualize_habitat_training import plot_viewpoints, plot_target_and_path, mapping_visualizer
from gen_habitat_data import actions_from_trajectory
from gen_planner_data import rotate_map_and_poses, Transform2D
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import matplotlib.gridspec as gridspec
try:
import ipdb as pdb
except:
import pdb
# # Fix multiprocessing on mac OSX
# if platform.system() == "Darwin":
# import multiprocessing
# multiprocessing.set_start_method('spawn')
ACTION_SOURCE = "plan" #"expert" # "plan"
# START_WITH_SPIN = False # True
SPIN_TARGET = np.deg2rad(370) # np.deg2rad(270) # np.deg2rad(360 - 70)
SPIN_DIRECTION = 1 # 1 for same direction as target, -1 for opposite direction. Opposite is better if target < 360
PLANNER_SINGLE_THREAD = False
PLANNER_STOP_THREAD_EACH_EPISODE = False
# COST_SETTING = 0 # 2
# SOFT_COST_MAP = True
PLANNER2D_TIMEOUT = 200 # 200. # 0.08
# PLANNER3D_TIMEOUT = 2.5 # 1.5 # 200. # 0.08 - ------------------
RECOVER_ON_COLLISION = True
COLLISION_DISTANCE_THRESHOLD = 0.6 # 0.8
MAX_SHORTCUT_TURNS = 2 # was 1 in submission
NEAR_TARGET_COLLISION_STOP_DISTANCE = 5. # when colliding withing this radius to the goal, stop instead
# # Patch map with collisions and around target
TARGET_MAP_MARGIN = 2
OBSTACLE_DOWNWEIGHT_DISTANCE = 20 # from top, smaller the further
OBSTACLE_DOWNWEIGHT_SCALARS = (0.3, 0.8) # (0.3, 0.8)
EXTRA_STEPS_WHEN_EXPANDING_MAP = 30
# !!!!!!
SUPRESS_EXCEPTIONS = False
INTERACTIVE_ON_EXCEPTIONS = True
PLOT_EVERY_N_STEP = -1
PRINT_TIMES = True
INTERACTIVE_PLOT = True
PLOT_PROCESS = False # True
SAVE_VIDEO = True # will save params.num_video number of videos, or all if interactive
USE_ASSERTS = False
# 42 * 60 * 60 - 3 * 60 * 60 # 30 * 60 - 5 * 60 #
TOTAL_TIME_LIMIT = 42 * 60 * 60 - 30 * 60 # challenge gave up at 38h and finished at 39h so 120 minutes should be enough. Even more recently. all giveup finished in 6 mins.
# 42 hours = 2520 mins for 1000-2000 episodes.
# Average episode time should be < 75.6 sec
ERROR_ON_TIMEOUT = False # True
SKIP_FIRST_N_FOR_TEST = -1 # 10 # 10 # 10
VIDEO_FRAME_SKIP = 1 # 6`
VIDEO_FPS = 5 # 5 # 30
VIDEO_LARGE_PLOT = False
VIDEO_DETAILED = True
DEBUG_DUMMY_ACTIONS_ONLY = False
SKIP_FIRST_N = -1 # 1000
SKIP_AFTER_N = -1 # 1500
SKIP_MAP_SHAPE_MISMATCH = True
# !!!!!!!
REPLACE_WITH_RANDOM_ACTIONS = False
EXIT_AFTER_N_STEPS_FOR_SPEED_TEST = -1
FAKE_INPUT_FOR_SPEED_TEST = False
MAX_MAP_SIZE_FOR_SPEED_TEST = False
# DATA GENERATION - for both sim scenarios and real spot
DATA_TYPE = "scenario"
SAVE_DATA_EVERY_N = 1 # 4
DATA_FIRST_STEP_ONLY = True
DATA_MAX_TRAJLEN = 50 # when DATA_FIRST_STEP_ONLY == False
DATA_INCLUDE_NONPLANNED_ACTIONS = False
DATA_USE_LAST_SEGMENT = False # when map is smaller use either the last or the first trajectory segment
# DATA_SEPARATE_FILES = True # for real spot data
DATA_SEPARATE_FILES = False # for simulated scenario data
def giveup_settings(giveup_setting):
# # Give up settings - submission
if giveup_setting == "1":
GIVE_UP_NO_PROGRESS_STEPS = 90 # 100
NO_PROGRESS_THRESHOLD = 15
GIVE_UP_NUM_COLLISIONS = 6 # 100 # TODO increase TODO increase later distances
GIVE_UP_STEP_AND_DISTANCE = [[0, 340], [150, 220], [300, 150], [400, 100]] # NOTE if changing first threshold also change max map size.
GIVE_UP_TIME_AND_REDUCTION = [[3.5, 100], [4., 120], [5., 300], [6., 400]] # in minutes ! and distance reduction from beginning
# Give up settings - more agressive for submission2
elif giveup_setting == "2":
GIVE_UP_NO_PROGRESS_STEPS = 90 # 100
NO_PROGRESS_THRESHOLD = 15
GIVE_UP_NUM_COLLISIONS = 6
GIVE_UP_STEP_AND_DISTANCE = [[0, 340], [150, 220], [300, 100], [400, 50]] # NOTE if changing first threshold also change max map size.
GIVE_UP_TIME_AND_REDUCTION = [[3.5, 100], [4., 120], [5., 300], [6., 400]] # in minutenum_wrong_frees ! and distance reduction from beginning
# Relaxed giveup settings for local evaluation (3)
elif giveup_setting == "2":
GIVE_UP_NO_PROGRESS_STEPS = 100 # 100
NO_PROGRESS_THRESHOLD = 12
GIVE_UP_NUM_COLLISIONS = 8 # 100 # TODO increase TODO increase later distances
GIVE_UP_STEP_AND_DISTANCE = [[0, 440], [150, 320], [300, 250], [400, 150]] # NOTE if changing first threshold also change max map size.
GIVE_UP_TIME_AND_REDUCTION = [[10., 100], [15., 120], [20., 300], [30., 400]] # in minutes ! and distance reduction from beginning
# Almost never give up -- august submission4
elif giveup_setting == "4":
GIVE_UP_NO_PROGRESS_STEPS = 100
NO_PROGRESS_THRESHOLD = 12
GIVE_UP_NUM_COLLISIONS = 20
GIVE_UP_STEP_AND_DISTANCE = [[0, 440], [150, 300], [200, 250], [250, 200], [300, 150], [350, 100], [400, 40]] # NOTE if changing first threshold also change max map size.
GIVE_UP_TIME_AND_REDUCTION = [] #[10., 100], [15., 120], [20., 300], [30., 400]] # in minutes ! and distance reduction from beginning
elif giveup_setting == "5":
# # Almost never give up -- sept submission5
GIVE_UP_NO_PROGRESS_STEPS = 100
NO_PROGRESS_THRESHOLD = 10
GIVE_UP_NUM_COLLISIONS = 1000
GIVE_UP_STEP_AND_DISTANCE = [[200, 300], [300, 200], [400, 100]] # NOTE if changing first threshold also change max map size.
GIVE_UP_TIME_AND_REDUCTION = []
# # Almost never give up -- sept submission6
elif giveup_setting == "6":
GIVE_UP_NO_PROGRESS_STEPS = 120
NO_PROGRESS_THRESHOLD = 10
GIVE_UP_NUM_COLLISIONS = 1000
GIVE_UP_STEP_AND_DISTANCE = [[200, 400], [300, 250], [400, 150],
[450, 100]] # NOTE if changing first threshold also change max map size.
GIVE_UP_TIME_AND_REDUCTION = []
# # Almost never give up -- sept submission7
elif giveup_setting == "7":
GIVE_UP_NO_PROGRESS_STEPS = 120
NO_PROGRESS_THRESHOLD = 10
GIVE_UP_NUM_COLLISIONS = 1000
GIVE_UP_STEP_AND_DISTANCE = [[200, 500], [300, 300], [400, 175], [450, 100]] # NOTE if changing first threshold also change max map size.
GIVE_UP_TIME_AND_REDUCTION = []
# # Almost never give up -- nov submission8
elif giveup_setting == "8":
GIVE_UP_NO_PROGRESS_STEPS = 1000
NO_PROGRESS_THRESHOLD = 1
GIVE_UP_NUM_COLLISIONS = 1000
GIVE_UP_STEP_AND_DISTANCE = [[200, 400], [300, 240], [400, 160]] # NOTE if changing first threshold also change max map size.
GIVE_UP_TIME_AND_REDUCTION = []
# # no giveup but 300 limit for data generation
elif giveup_setting == "data300":
GIVE_UP_NO_PROGRESS_STEPS = 1000
NO_PROGRESS_THRESHOLD = 1
GIVE_UP_NUM_COLLISIONS = 1000
GIVE_UP_STEP_AND_DISTANCE = [[300, 1], ] # NOTE if changing first threshold also change max map size.
GIVE_UP_TIME_AND_REDUCTION = [] # in minutes ! and distance reduction from beginning
# # No giveup
elif giveup_setting == "never":
GIVE_UP_NO_PROGRESS_STEPS = 1000 # 100
NO_PROGRESS_THRESHOLD = 1
GIVE_UP_NUM_COLLISIONS = 1000
GIVE_UP_STEP_AND_DISTANCE = [] # NOTE if changing first threshold also change max map size.
GIVE_UP_TIME_AND_REDUCTION = [] # in minutes ! and distance reduction from beginning
# # No giveup
elif giveup_setting == "always":
GIVE_UP_NO_PROGRESS_STEPS = 1 # 100
NO_PROGRESS_THRESHOLD = 1
GIVE_UP_NUM_COLLISIONS = 1
GIVE_UP_STEP_AND_DISTANCE = [[0, 1]] # NOTE if changing first threshold also change max map size.
GIVE_UP_TIME_AND_REDUCTION = [] # in minutes ! and distance reduction from beginning
# # Very agressive for fast testing
elif giveup_setting == "fast":
GIVE_UP_NO_PROGRESS_STEPS = 50 # 100
NO_PROGRESS_THRESHOLD = 15
GIVE_UP_NUM_COLLISIONS = 1
GIVE_UP_STEP_AND_DISTANCE = [[0, 340], [100, 200], [200, 100], [300, 50]] # NOTE if changing first threshold also change max map size.
GIVE_UP_TIME_AND_REDUCTION = [[3.5, 100], [4., 120], [5., 300], [6., 400]] # in minutes ! and distance reduction from beginning
else:
raise ValueError('Unknown giveup_setting %s'%giveup_setting)
return GIVE_UP_NO_PROGRESS_STEPS, NO_PROGRESS_THRESHOLD, GIVE_UP_NUM_COLLISIONS, GIVE_UP_STEP_AND_DISTANCE, GIVE_UP_TIME_AND_REDUCTION
class DSLAMAgent(habitat.Agent):
def __init__(self, task_config, params, env=None, logdir='./temp/', tfwriters=()):
self.start_time = time.time()
self._POSSIBLE_ACTIONS = task_config.TASK.POSSIBLE_ACTIONS
self.step_i = 0
self.episode_i = -2
self.env = env
self.task_config = task_config
self.tfwriters = tfwriters
self.num_data_entries = 0
if env is None:
self.follower = None
assert ACTION_SOURCE != "expert"
else:
self.follower = ShortestPathFollower(env._sim, 0.36/2., False)
# if len(params.gpu) > 0 and int(params.gpu[0]) > 4:
# print ("Try to explicitly disable gpu")
# try:
# tf.config.experimental.set_visible_devices([], 'GPU')
# except Exception as e:
# print("Exception " + str(e))
print (params)
self.params = params
# Giveup setting
self.GIVE_UP_NO_PROGRESS_STEPS, self.NO_PROGRESS_THRESHOLD, self.GIVE_UP_NUM_COLLISIONS, \
self.GIVE_UP_STEP_AND_DISTANCE, self.GIVE_UP_TIME_AND_REDUCTION = giveup_settings(params.giveup)
if INTERACTIVE_PLOT or self.params.interactive_video:
plt.ion()
assert params.sim in ['habitat', 'spot', 'spotsmall', 'spotsmall2', 'habitat2021']
self.map_source = self.params.agent_map_source
self.pose_source = self.params.agent_pose_source
self.action_source = ACTION_SOURCE
self.max_confidence = 0.96 # 0.98
self.confidence_threshold = None # (0.2, 0.01) # (0.35, 0.05)
self.use_custom_visibility = (self.params.visibility_mask in [2, 20, 21])
assert self.params.agent_map_source in ['true', 'true-saved', 'true-saved-sampled', 'true-saved-hrsampled',
'true-partial', 'true-partial-sampled', 'pred']
assert self.params.agent_pose_source in ['slam', 'slam-truestart', 'true']
_, gpuname = get_tf_config(devices=params.gpu) # sets CUDA_VISIBLE_DEVICES
if params.skip_slam:
print ("SKIP SLAM overwritting particles and removing noise.")
assert | |
== 'gaussProc':
pct += 1 + gwb_popparam_ndims
elif args.gwbSpecModel == 'turnover':
if args.gwb_fb2env is not None:
pct += 2
elif args.gwb_fb2env is None:
pct += 3
elif args.gwbSpecModel == 'gpEnvInterp':
pct += 2
if args.incCorr:
pct += num_corr_params
if args.gwbModelSelect:
pct += 1
if args.incGWline:
pct += 4
if args.ecc_search:
pct += 12
elif not args.ecc_search:
pct += 11
# psr distances
pct += len(psr)
# psrterm gamma0
pct += len(psr)
ind = np.unique(np.random.randint(0, len(psr), 1))
for ii in ind:
q[pct+ii] = np.random.uniform(pmin[pct+ii], pmax[pct+ii])
qxy += 0
return q, qxy
# cgw psrterm l0
def drawFromCGWModelIndexPrior(parameters, iter, beta):
# post-jump parameters
q = parameters.copy()
# transition probability
qxy = 0
npsr = len(psr)
pct = 0
if not args.fixRed:
if args.redSpecModel == 'powerlaw':
pct = 2*npsr
elif args.redSpecModel == 'spectrum':
pct = npsr*nmodes_red
if args.incDM and not args.fixDM:
if args.dmSpecModel == 'powerlaw':
pct += 2*npsr
elif args.dmSpecModel == 'spectrum':
pct += npsr*nmodes_dm
if args.varyWhite:
for ii,p in enumerate(psr):
systems = p.sysflagdict[args.sysflag_target]
pct += 2*len(systems)
pct += len(p.sysflagdict['nano-f'].keys())
if args.incBand:
if args.bandSpecModel == 'powerlaw':
pct += 2*(len(bands)-1)
elif args.bandSpecModel == 'spectrum':
pct += (len(bands)-1)*nmodes_band
if args.incClk:
if args.clkSpecModel == 'powerlaw':
pct += 2
elif args.clkSpecModel == 'spectrum':
pct += nmodes_red
if args.incCm:
if args.cmSpecModel == 'powerlaw':
pct += 2
elif args.cmSpecModel == 'spectrum':
pct += nmodes_red
if args.incEph and not args.jplBasis:
if args.ephSpecModel == 'powerlaw':
pct += 6
elif args.ephSpecModel == 'spectrum':
pct += 3*nmodes_eph
if args.incDip:
if args.dipSpecModel == 'powerlaw':
pct += 2
elif args.dipSpecModel == 'spectrum':
pct += nmodes_red
if args.incGWB:
if args.gwbSpecModel == 'powerlaw':
pct += 1
if args.fix_slope is None:
pct += 1
elif args.gwbSpecModel == 'spectrum':
pct += nmodes_red
if args.gwbPrior == 'gaussProc':
pct += 1 + gwb_popparam_ndims
elif args.gwbSpecModel == 'turnover':
if args.gwb_fb2env is not None:
pct += 2
elif args.gwb_fb2env is None:
pct += 3
elif args.gwbSpecModel == 'gpEnvInterp':
pct += 2
if args.incCorr:
pct += num_corr_params
if args.gwbModelSelect:
pct += 1
if args.incGWline:
pct += 4
if args.ecc_search:
pct += 12
elif not args.ecc_search:
pct += 11
if args.psrTerm:
# psr distances
pct += len(psr)
# psrterm gamma0
pct += len(psr)
# psrterm l0
pct += len(psr)
q[pct] = np.random.uniform(pmin[pct], pmax[pct])
qxy += 0
return q, qxy
# bwm draws
def drawFromBWMPrior(parameters, iter, beta):
# post-jump parameters
q = parameters.copy()
# transition probability
qxy = 0
npsr = len(psr)
pct = 0
if not args.fixRed:
if args.redSpecModel == 'powerlaw':
pct = 2*npsr
elif args.redSpecModel == 'spectrum':
pct = npsr*nmodes_red
if args.incDM and not args.fixDM:
if args.dmSpecModel == 'powerlaw':
pct += 2*npsr
elif args.dmSpecModel == 'spectrum':
pct += npsr*nmodes_dm
if args.varyWhite:
for ii,p in enumerate(psr):
systems = p.sysflagdict[args.sysflag_target]
pct += 2*len(systems)
pct += len(p.sysflagdict['nano-f'].keys())
if args.incBand:
if args.bandSpecModel == 'powerlaw':
pct += 2*(len(bands)-1)
elif args.bandSpecModel == 'spectrum':
pct += (len(bands)-1)*nmodes_band
if args.incClk:
if args.clkSpecModel == 'powerlaw':
pct += 2
elif args.clkSpecModel == 'spectrum':
pct += nmodes_red
if args.incCm:
if args.cmSpecModel == 'powerlaw':
pct += 2
elif args.cmSpecModel == 'spectrum':
pct += nmodes_red
if args.incEph and not args.jplBasis:
if args.ephSpecModel == 'powerlaw':
pct += 6
elif args.ephSpecModel == 'spectrum':
pct += 3*nmodes_eph
if args.incDip:
if args.dipSpecModel == 'powerlaw':
pct += 2
elif args.dipSpecModel == 'spectrum':
pct += nmodes_red
if args.incGWB:
if args.gwbSpecModel == 'powerlaw':
pct += 1
if args.fix_slope is None:
pct += 1
elif args.gwbSpecModel == 'spectrum':
pct += nmodes_red
if args.gwbPrior == 'gaussProc':
pct += 1 + gwb_popparam_ndims
elif args.gwbSpecModel == 'turnover':
if args.gwb_fb2env is not None:
pct += 2
elif args.gwb_fb2env is None:
pct += 3
elif args.gwbSpecModel == 'gpEnvInterp':
pct += 2
if args.incCorr:
pct += num_corr_params
if args.gwbModelSelect:
pct += 1
if args.incGWline:
pct += 4
if args.cgw_search:
pct += 11
if args.ecc_search:
pct += 1
if args.psrTerm:
pct += 3*len(psr)
if args.cgwModelSelect:
pct += 1
# burst_mjd, burst_amp, phi, costheta, gwpol
ind = np.unique(np.random.randint(0, 5, 1))
for ii in ind:
q[pct+ii] = np.random.uniform(pmin[pct+ii], pmax[pct+ii])
qxy += 0
return q, qxy
# bwm model index draws
def drawFromBWMModelIndexPrior(parameters, iter, beta):
# post-jump parameters
q = parameters.copy()
# transition probability
qxy = 0
npsr = len(psr)
pct = 0
if not args.fixRed:
if args.redSpecModel == 'powerlaw':
pct = 2*npsr
elif args.redSpecModel == 'spectrum':
pct = npsr*nmodes_red
if args.incDM and not args.fixDM:
if args.dmSpecModel == 'powerlaw':
pct += 2*npsr
elif args.dmSpecModel == 'spectrum':
pct += npsr*nmodes_dm
if args.varyWhite:
for ii,p in enumerate(psr):
systems = p.sysflagdict[args.sysflag_target]
pct += 2*len(systems)
pct += len(p.sysflagdict['nano-f'].keys())
if args.incBand:
if args.bandSpecModel == 'powerlaw':
pct += 2*(len(bands)-1)
elif args.bandSpecModel == 'spectrum':
pct += (len(bands)-1)*nmodes_band
if args.incClk:
if args.clkSpecModel == 'powerlaw':
pct += 2
elif args.clkSpecModel == 'spectrum':
pct += nmodes_red
if args.incCm:
if args.cmSpecModel == 'powerlaw':
pct += 2
elif args.cmSpecModel == 'spectrum':
pct += nmodes_red
if args.incEph and not args.jplBasis:
if args.ephSpecModel == 'powerlaw':
pct += 6
elif args.ephSpecModel == 'spectrum':
pct += 3*nmodes_eph
if args.incDip:
if args.dipSpecModel == 'powerlaw':
pct += 2
elif args.dipSpecModel == 'spectrum':
pct += nmodes_red
if args.incGWB:
if args.gwbSpecModel == 'powerlaw':
pct += 1
if args.fix_slope is None:
pct += 1
elif args.gwbSpecModel == 'spectrum':
pct += nmodes_red
if args.gwbPrior == 'gaussProc':
pct += 1 + gwb_popparam_ndims
elif args.gwbSpecModel == 'turnover':
if args.gwb_fb2env is not None:
pct += 2
elif args.gwb_fb2env is None:
pct += 3
elif args.gwbSpecModel == 'gpEnvInterp':
pct += 2
if args.incCorr:
pct += num_corr_params
if args.gwbModelSelect:
pct += 1
if args.incGWline:
pct += 4
if args.cgw_search:
pct += 11
if args.ecc_search:
pct += 1
if args.psrTerm:
pct += 3*len(psr)
if args.cgwModelSelect:
pct += 1
pct += 5
# indexing parameter is at end of list
q[pct] = np.random.uniform(pmin[pct], pmax[pct])
qxy += 0
return q, qxy
# ephmeris quadratic fisher draws
def drawFromEphemQuadPrior(parameters, iter, beta):
# post-jump parameters
q = parameters.copy()
# transition probability
qxy = 0
npsr = len(psr)
pct = 0
if not args.fixRed:
if args.redSpecModel == 'powerlaw':
pct = 2*npsr
elif args.redSpecModel == 'spectrum':
pct = npsr*nmodes_red
if args.incDM and not args.fixDM:
if args.dmSpecModel == 'powerlaw':
pct += 2*npsr
elif args.dmSpecModel == 'spectrum':
pct += npsr*nmodes_dm
if args.varyWhite:
for ii,p in enumerate(psr):
systems = p.sysflagdict[args.sysflag_target]
pct += 2*len(systems)
pct += len(p.sysflagdict['nano-f'].keys())
if args.incBand:
if args.bandSpecModel == 'powerlaw':
pct += 2*(len(bands)-1)
elif args.bandSpecModel == 'spectrum':
pct += (len(bands)-1)*nmodes_band
if args.incClk:
if args.clkSpecModel == 'powerlaw':
pct += 2
elif args.clkSpecModel == 'spectrum':
pct += nmodes_red
if args.incCm:
if args.cmSpecModel == 'powerlaw':
pct += 2
elif args.cmSpecModel == 'spectrum':
pct += nmodes_red
if args.incEph and not args.jplBasis:
if args.ephSpecModel == 'powerlaw':
pct += 6
elif args.ephSpecModel == 'spectrum':
pct += 3*nmodes_eph
if args.incDip:
if args.dipSpecModel == 'powerlaw':
pct += 2
elif args.dipSpecModel == 'spectrum':
pct += nmodes_red
if args.incGWB:
if args.gwbSpecModel == 'powerlaw':
pct += 1
if args.fix_slope is None:
pct += 1
elif args.gwbSpecModel == 'spectrum':
pct += nmodes_red
if args.gwbPrior == 'gaussProc':
pct += 1 + gwb_popparam_ndims
elif args.gwbSpecModel == 'turnover':
if args.gwb_fb2env is not None:
pct += 2
elif args.gwb_fb2env is None:
pct += 3
elif args.gwbSpecModel == 'gpEnvInterp':
pct += 2
if args.incCorr:
pct += num_corr_params
if args.gwbModelSelect:
pct += 1
if args.incGWline:
pct += 4
if args.cgw_search:
pct += 11
if args.ecc_search:
pct += 1
if args.psrTerm:
pct += 3*len(psr)
if args.cgwModelSelect:
pct += 1
if args.bwm_search:
if args.bwm_model_select:
pct += 6
else:
pct += 5
ind = np.unique(np.random.randint(0, 9, 1))
q[pct+ind] = np.random.uniform(pmin[pct+ind], pmax[pct+ind])
qxy += 0
return q, qxy
# ephmeris quadratic fisher draws
def drawFromEphemQuadFisherPrior(parameters, iter, beta):
# post-jump parameters
q = parameters.copy()
# transition probability
qxy = 0
npsr = len(psr)
pct = 0
if not args.fixRed:
if args.redSpecModel == 'powerlaw':
pct = 2*npsr
elif args.redSpecModel == 'spectrum':
pct = npsr*nmodes_red
if args.incDM and not args.fixDM:
if args.dmSpecModel == 'powerlaw':
pct += 2*npsr
elif args.dmSpecModel == 'spectrum':
pct += npsr*nmodes_dm
if args.varyWhite:
for ii,p in enumerate(psr):
systems = p.sysflagdict[args.sysflag_target]
pct += 2*len(systems)
pct += len(p.sysflagdict['nano-f'].keys())
if args.incBand:
if args.bandSpecModel == 'powerlaw':
| |
<reponame>thisch/mypy
"""Generate C code for a Python C extension module from Python source code."""
# FIXME: Basically nothing in this file operates on the level of a
# single module and it should be renamed.
from collections import OrderedDict
from typing import List, Tuple, Dict, Iterable, Set, TypeVar, Optional
from mypy.build import BuildSource, BuildResult, build
from mypy.errors import CompileError
from mypy.options import Options
from mypyc import genops
from mypyc.common import PREFIX, TOP_LEVEL_NAME, INT_PREFIX, MODULE_PREFIX, shared_lib_name
from mypyc.emit import EmitterContext, Emitter, HeaderDeclaration
from mypyc.emitfunc import generate_native_function, native_function_header
from mypyc.emitclass import generate_class_type_decl, generate_class
from mypyc.emitwrapper import (
generate_wrapper_function, wrapper_function_header,
)
from mypyc.ops import (
FuncIR, ClassIR, ModuleIR, ModuleIRs, LiteralsMap, RType, RTuple
)
from mypyc.options import CompilerOptions
from mypyc.uninit import insert_uninit_checks
from mypyc.refcount import insert_ref_count_opcodes
from mypyc.exceptions import insert_exception_handling
from mypyc.namegen import NameGenerator, exported_name
from mypyc.errors import Errors
# All of the modules being compiled are divided into "groups". A group
# is a set of modules that are placed into the same shared library.
# Two common configurations are that every module is placed in a group
# by itself (fully separate compilation) and that every module is
# placed in the same group (fully whole-program compilation), but we
# support finer-grained control of the group as well.
#
# In fully whole-program compilation, we will generate N+1 extension
# modules: one shim per module and one shared library containing all
# the actual code.
# In fully separate compilation, we (unfortunately) will generate 2*N
# extension modules: one shim per module and also one library containg
# each module's actual code. (This might be fixable in the future,
# but allows a clean separation between setup of the export tables
# (see generate_export_table) and running module top levels.)
#
# A group is represented as a list of BuildSources containing all of
# its modules along with the name of the group. (Which can be None
# only if we are compiling only a single group with a single file in it
# and not using shared libraries).
Group = Tuple[List[BuildSource], Optional[str]]
Groups = List[Group]
# A list of (file name, file contents) pairs.
FileContents = List[Tuple[str, str]]
class MarkedDeclaration:
"""Add a mark, useful for topological sort."""
def __init__(self, declaration: HeaderDeclaration, mark: bool) -> None:
self.declaration = declaration
self.mark = False
def parse_and_typecheck(sources: List[BuildSource], options: Options,
alt_lib_path: Optional[str] = None) -> BuildResult:
assert options.strict_optional, 'strict_optional must be turned on'
result = build(sources=sources,
options=options,
alt_lib_path=alt_lib_path)
if result.errors:
raise CompileError(result.errors)
return result
def compile_modules_to_c(
result: BuildResult,
compiler_options: CompilerOptions,
errors: Errors,
groups: Groups,
) -> Tuple[ModuleIRs, List[FileContents]]:
"""Compile Python module(s) to the source of Python C extension modules.
This generates the source code for the "shared library" module
for each group. The shim modules are generated in mypyc.build.
Each shared library module provides, for each module in its group,
a PyCapsule containing an initialization function.
Additionally, it provides a capsule containing an export table of
pointers to all of the group's functions and static variables.
Arguments:
result: The BuildResult from the mypy front-end
compiler_options: The compilation options
errors: Where to report any errors encountered
groups: The groups that we are compiling. See documentation of Groups type above.
ops: Optionally, where to dump stringified ops for debugging.
Returns the IR of the modules and a list containing the generated files for each group.
"""
module_names = [source.module for group_sources, _ in groups for source in group_sources]
file_nodes = [result.files[name] for name in module_names]
# Construct a map from modules to what group they belong to
group_map = {}
for group, lib_name in groups:
for source in group:
group_map[source.module] = lib_name
# Generate basic IR, with missing exception and refcount handling.
mapper = genops.Mapper(group_map)
modules = genops.build_ir(file_nodes, result.graph, result.types,
mapper,
compiler_options, errors)
if errors.num_errors > 0:
return modules, []
# Insert uninit checks.
for module in modules.values():
for fn in module.functions:
insert_uninit_checks(fn)
# Insert exception handling.
for module in modules.values():
for fn in module.functions:
insert_exception_handling(fn)
# Insert refcount handling.
for module in modules.values():
for fn in module.functions:
insert_ref_count_opcodes(fn)
source_paths = {module_name: result.files[module_name].path
for module_name in module_names}
names = NameGenerator([[source.module for source in sources] for sources, _ in groups])
# Generate C code for each compilation group. Each group will be
# compiled into a separate extension module.
ctext = []
for group_sources, group_name in groups:
group_modules = [(source.module, modules[source.module]) for source in group_sources]
literals = mapper.literals[group_name]
generator = GroupGenerator(
literals, group_modules, source_paths, group_name, group_map, names,
compiler_options.multi_file
)
ctext.append(generator.generate_c_for_modules())
return modules, ctext
def generate_function_declaration(fn: FuncIR, emitter: Emitter) -> None:
emitter.context.declarations[emitter.native_function_name(fn.decl)] = HeaderDeclaration(
'{};'.format(native_function_header(fn.decl, emitter)),
needs_export=True)
if fn.name != TOP_LEVEL_NAME:
emitter.context.declarations[PREFIX + fn.cname(emitter.names)] = HeaderDeclaration(
'{};'.format(wrapper_function_header(fn, emitter.names)))
def encode_as_c_string(s: str) -> Tuple[str, int]:
"""Produce a utf-8 encoded, escaped, quoted C string and its size from a string"""
# This is a kind of abusive way to do this...
b = s.encode('utf-8')
escaped = str(b)[2:-1].replace('"', '\\"')
return '"{}"'.format(escaped), len(b)
def encode_bytes_as_c_string(b: bytes) -> Tuple[str, int]:
"""Produce a single-escaped, quoted C string and its size from a bytes"""
# This is a kind of abusive way to do this...
escaped = str(b)[2:-1].replace('"', '\\"')
return '"{}"'.format(escaped), len(b)
def pointerize(decl: str, name: str) -> str:
"""Given a C decl and its name, modify it to be a declaration to a pointer."""
# This doesn't work in general but does work for all our types...
if '(' in decl:
# Function pointer. Stick a * in front of the name and wrap it in parens.
return decl.replace(name, '(*{})'.format(name))
else:
# Non-function pointer. Just stick a * in front of the name.
return decl.replace(name, '*{}'.format(name))
class GroupGenerator:
def __init__(self,
literals: LiteralsMap,
modules: List[Tuple[str, ModuleIR]],
source_paths: Dict[str, str],
group_name: Optional[str],
group_map: Dict[str, Optional[str]],
names: NameGenerator,
multi_file: bool) -> None:
"""Generator for C source for a compilation group.
The code for a compilation group contains an internal and an
external .h file, and then one .c if not in multi_file mode or
one .c file per module if in multi_file mode.)
Arguments:
literals: The literals declared in this group
modules: (name, ir) pairs for each module in the group
source_paths: Map from module names to source file paths
group_name: The name of the group (or None if this is single-module compilation)
group_map: A map of modules to their group names
names: The name generator for the compilation
multi_file: Whether to put each module in its own source file regardless
of group structure.
"""
self.literals = literals
self.modules = modules
self.source_paths = source_paths
self.context = EmitterContext(names, group_name, group_map)
self.names = names
# Initializations of globals to simple values that we can't
# do statically because the windows loader is bad.
self.simple_inits = [] # type: List[Tuple[str, str]]
self.group_name = group_name
self.use_shared_lib = group_name is not None
self.multi_file = multi_file
@property
def group_suffix(self) -> str:
return '_' + self.group_name if self.group_name else ''
def generate_c_for_modules(self) -> List[Tuple[str, str]]:
file_contents = []
multi_file = self.use_shared_lib and self.multi_file
base_emitter = Emitter(self.context)
# When not in multi-file mode we just include the runtime
# library c files to reduce the number of compiler invocations
# needed
if not self.multi_file:
base_emitter.emit_line('#include "CPy.c"')
base_emitter.emit_line('#include "getargs.c"')
base_emitter.emit_line('#include "__native{}.h"'.format(self.group_suffix))
base_emitter.emit_line('#include "__native_internal{}.h"'.format(self.group_suffix))
emitter = base_emitter
for (_, literal), identifier in self.literals.items():
if isinstance(literal, int):
symbol = emitter.static_name(identifier, None)
self.declare_global('CPyTagged ', symbol)
else:
self.declare_static_pyobject(identifier, emitter)
for module_name, module in self.modules:
if multi_file:
emitter = Emitter(self.context)
emitter.emit_line('#include "__native{}.h"'.format(self.group_suffix))
emitter.emit_line('#include "__native_internal{}.h"'.format(self.group_suffix))
self.declare_module(module_name, emitter)
self.declare_internal_globals(module_name, emitter)
self.declare_imports(module.imports, emitter)
for cl in module.classes:
if cl.is_ext_class:
generate_class(cl, module_name, emitter)
# Generate Python extension module definitions and module initialization functions.
self.generate_module_def(emitter, module_name, module)
for fn in module.functions:
emitter.emit_line()
generate_native_function(fn, emitter, self.source_paths[module_name], module_name)
if fn.name != TOP_LEVEL_NAME:
emitter.emit_line()
generate_wrapper_function(
fn, emitter, self.source_paths[module_name], module_name)
if multi_file:
name = ('__native_{}.c'.format(emitter.names.private_name(module_name)))
file_contents.append((name, ''.join(emitter.fragments)))
# The external header file contains type declarations while
# the internal contains declarations of functions and objects
# (which are shared between shared libraries via dynamic
# exports tables and not accessed directly.)
ext_declarations = Emitter(self.context)
ext_declarations.emit_line('#ifndef MYPYC_NATIVE{}_H'.format(self.group_suffix))
ext_declarations.emit_line('#define MYPYC_NATIVE{}_H'.format(self.group_suffix))
ext_declarations.emit_line('#include <Python.h>')
ext_declarations.emit_line('#include <CPy.h>')
declarations = Emitter(self.context)
declarations.emit_line('#ifndef MYPYC_NATIVE_INTERNAL{}_H'.format(self.group_suffix))
declarations.emit_line('#define MYPYC_NATIVE_INTERNAL{}_H'.format(self.group_suffix))
declarations.emit_line('#include <Python.h>')
declarations.emit_line('#include <CPy.h>')
declarations.emit_line('#include "__native{}.h"'.format(self.group_suffix))
declarations.emit_line()
declarations.emit_line('int CPyGlobalsInit(void);')
declarations.emit_line()
for module_name, module in self.modules:
self.declare_finals(module_name, module.final_names, declarations)
for cl in module.classes:
generate_class_type_decl(cl, emitter, ext_declarations, declarations)
for fn | |
import bz2 as zmodule
import collections
import datetime
import enum
import functools
import glob
# import gzip as zmodule
import itertools
import json
import logging
import os
import sys
import time
from concurrent.futures import ProcessPoolExecutor
try:
import ujson
except ImportError:
ujson = json
import sqlalchemy as sqla
from sqlalchemy.sql import func as sqla_func
from . import search_algorithm
from .driver import Driver, make_driver
from .puzzle_type import PuzzleType, rot_puzzle_type
from .config import config_getkey
from .utils import (
UNDEFINED,
grouper, ProgressBar, round_robin, timing,
)
__all__ = [
'dpdb_get_dbdir',
'dpdb_set_dbdir',
'dpdb_list_meta_filenames',
'dpdb_read_meta',
'dpdb_write_meta',
'dpdb_setup',
'dpdb_get_dbs',
'DpdbInfo',
'dpdb_clear',
'dpdb_register',
'dpdb_get_info',
'dpdb_get_db',
'dpdb_get_memdb',
'dpdb_list',
'DpdbInitMode',
'dpdb_create',
'dpdb_make_heuristic',
]
LOG = logging.getLogger(__name__)
DEFAULT_DBDIR = os.path.join(os.path.dirname(__file__), "databases")
DBDIR = DEFAULT_DBDIR
def dpdb_get_dbdir():
global DBDIR
return DBDIR
def dpdb_set_dbdir(dbdir=None):
global DBDIR
global DEFAULT_DBDIR
if dbdir is None:
dbdir = DEFAULT_DBDIR
DBDIR = os.path.normpath(os.path.abspath(dbdir))
def dpdb_list_meta_filenames(dbdir=None):
if dbdir is None:
dbdir = dpdb_get_dbdir()
for meta_filename in glob.glob(os.path.join(dbdir, "*.meta")):
yield meta_filename
def dpdb_read_meta(dpdb_meta_filename):
with open(dpdb_meta_filename, "r") as fh:
return DpdbInfo(**ujson.load(fh))
def dpdb_write_meta(dpdb_info, dpdb_meta_filename=None):
dct = dpdb_info.as_dict()
if dpdb_meta_filename is None:
dbdir = dpdb_get_dbdir()
dpdb_meta_filename = os.path.join(dbdir, "dp.{puzzle_type}.{size}:{label}.meta".format(**dct))
with open(dpdb_meta_filename, "w") as fh:
fh.write(ujson.dumps(dct, sort_keys=True, indent=4) + '\n')
def dpdb_setup(dbdir=UNDEFINED):
if dbdir is UNDEFINED:
dbdir = config_getkey('dpdb.dbdir')
dpdb_clear()
dpdb_set_dbdir(dbdir)
for dpdb_meta_filename in dpdb_list_meta_filenames():
dpdb_info = dpdb_read_meta(dpdb_meta_filename)
dpdb_register(dpdb_info)
def dpdb_rotate_info(dpdb_info):
dpdb = Dpdb(dpdb_info)
rot_dpdb_info = dpdb.rotate_cache()
dpdb_register(rot_dpdb_info)
dpdb_write_meta(rot_dpdb_info)
def dpdb_get_dbs(dbkeys=None, puzzle_type=None):
dbd = collections.OrderedDict()
for dpdb_info in dpdb_list(puzzle_type=puzzle_type):
dbkey = '{}:{}'.format(dpdb_info.size, dpdb_info.label)
dbd[dbkey] = dpdb_info
if dbkeys is not None:
dbkeys = list(dbkeys)
else:
dbkeys = list(dbd)
dbs = collections.OrderedDict()
for dbkey in dbkeys:
dbs[dbkey] = dbd[dbkey]
return dbs
class DpdbInfo(collections.namedtuple('DpdbInfoBase', 'puzzle_type size label patterns filenames')):
def __new__(cls, puzzle_type, size, label, patterns):
if isinstance(puzzle_type, str):
puzzle_type = getattr(PuzzleType, puzzle_type)
if isinstance(patterns, str):
data = [int(i) for i in patterns.split()]
dct = collections.defaultdict(list)
for i, v in enumerate(data):
dct[v].append(i)
items = sorted(dct.items(), key=lambda x: x[0])
patterns = [item[1] for item in items[1:]]
patterns = tuple(tuple(pattern) for pattern in patterns)
goal_tr = list(range(size * size))
vlist = []
for pattern in patterns:
for idx in pattern:
vlist.append(goal_tr[idx])
if len(vlist) != len(set(vlist)):
raise ValueError("invalid patterns {} for size {}".format(patterns, size))
fmt_filename = os.path.join(dpdb_get_dbdir(), 'dp.{puzzle_type.name}.{size}:{label}.cache.{pattern_id}')
filenames = tuple(fmt_filename.format(puzzle_type=puzzle_type, size=size, label=label, pattern_id=pattern_id) for pattern_id, _ in enumerate(patterns))
return super().__new__(
cls,
puzzle_type=puzzle_type,
size=size,
label=label,
patterns=patterns,
filenames=filenames)
def fqname(self):
return "{pt}-{size}:{label}".format(
pt=self.puzzle_type.name,
size=self.size,
label=self.label)
def rotated(self):
driver = make_driver(self.size)
rot_patterns = tuple(driver.rotate(pattern) for pattern in self.patterns)
return self.__class__(
puzzle_type=rot_puzzle_type(self.puzzle_type),
size=self.size,
label=self.label,
patterns=rot_patterns)
def exists(self):
return all(os.path.exists(filename) for filename in self.filenames)
def as_dict(self):
return {
"puzzle_type": self.puzzle_type.name,
"size": self.size,
"label": self.label,
"patterns": self.patterns}
class Dpdb(object):
def __init__(self, dpdb_info, *, max_buffer_size=500, key_type=None, db_filename=None):
# TODO rimuovere key_type (sempre 'string')
self._dpdb_info = dpdb_info
driver = make_driver(dpdb_info.size)
if db_filename is None:
pt = driver.puzzle_type.name
db_filename = os.path.join(dpdb_get_dbdir(), 'dp.{pt}.{di.size}:{di.label}.sqlite'.format(pt=pt, di=dpdb_info))
else:
db_filename = os.path.abspath(db_filename)
self._db_filename = db_filename
self._uri = "sqlite:///" + db_filename
self._engine = sqla.create_engine(self._uri)
metadata = sqla.MetaData()
size = self._dpdb_info.size
puzzle_type = self._dpdb_info.puzzle_type
r_puzzle_type = rot_puzzle_type(puzzle_type)
max_pattern_len = max(len(pattern) for pattern in dpdb_info.patterns)
max_index = dpdb_info.size ** 2
max_index_pwr10 = 1
while max_index_pwr10 < max_index:
max_index_pwr10 *= 10
if key_type is None:
if max_index_pwr10 ** max_pattern_len <= 2 ** 60:
key_type = 'int'
else:
key_type = 'string'
if key_type == 'int':
f_pattern_start_id = [max_index_pwr10 ** i for i in range(max_pattern_len - 1, -1, -1)]
def make_key(pattern_id, pattern_start_id):
result = 0
for pi, fi in zip(pattern_start_id, f_pattern_start_id):
result += pi * fi
return result
sqla_type = sqla.BigInteger
else:
def make_key(pattern_id, pattern_start_id):
return ','.join(str(p) for p in pattern_start_id)
sqla_type = sqla.String
self.make_key = make_key
self._cursor_columns = tuple(
sqla.Column("cursor_{}".format(cursor), sqla.Integer, default=0, primary_key=False)
for cursor in range(size * size))
self._costs_table = sqla.Table(
"costs_{size}_{label}".format(size=self._dpdb_info.size, label=self._dpdb_info.label),
metadata,
sqla.Column('pattern_id', sqla.Integer,
primary_key=True, autoincrement=False),
sqla.Column('pattern_start_id', sqla_type,
primary_key=True, autoincrement=False),
sqla.Index("p_id_index", "pattern_id", "pattern_start_id"),
*self._cursor_columns,
)
self._buffer = []
self._max_buffer_size = max_buffer_size
self._key_type = key_type
self._connection = None
def rotated(self):
return self.__class__(
dpdb_info=self._dpdb_info.rotated(),
max_buffer_size=self._max_buffer_size,
key_type=self._key_type,
db_filename=self._db_filename)
def merge(self, *db_filenames):
for db_filename in db_filenames:
with timing("dpdb[{}] merge {} -> {}".format(self.dpdb_info.fqname(), db_filename, self._engine)):
dpdb = self.__class__(self._dpdb_info, db_filename=db_filename)
self.put_costs(dpdb.get_all_costs())
def export_cache(self):
if not self.db_exists():
raise ValueError("missing db {}".format(self._uri))
dpdb_info = self._dpdb_info
with timing("dpdb[{}] export_cache".format(dpdb_info.fqname())):
for pattern_id, dummy_pattern_filename in enumerate(dpdb_info.filenames):
pattern_costs_data = []
for dummy_pattern_id, pattern_start_id, *cost_data in self.get_pattern_costs(pattern_id):
pattern_costs_data.append((pattern_start_id, tuple(cost_data)))
self._write_cache_file(dpdb_info, pattern_id, pattern_costs_data)
@classmethod
def _read_cache_file(cls, dpdb_info, pattern_id):
pattern_filename = dpdb_info.filenames[pattern_id]
with timing("dpdb[{}] reading cache file {}".format(dpdb_info.fqname(), pattern_filename)):
with zmodule.open(pattern_filename, "rt") as fh:
return ujson.load(fh)
@classmethod
def _write_cache_file(cls, dpdb_info, pattern_id, pattern_costs_data):
pattern_filename = dpdb_info.filenames[pattern_id]
with timing("dpdb[{}] writing cache file {}".format(dpdb_info.fqname(), pattern_filename)):
with zmodule.open(pattern_filename, "wb") as fh:
fh.write(bytes(ujson.dumps(pattern_costs_data, ensure_ascii=True), 'ascii'))
def import_cache(self):
dpdb_info = self._dpdb_info
if not self.cache_exists():
raise ValueError("missing cache {}".format(dpdb_info.filenames))
with timing("dpdb[{}] import_cache".format(dpdb_info.fqname())):
engine = self._engine
table = self._costs_table
iquery = table.insert()
costs = []
for pattern_id, pattern_filename in enumerate(dpdb_info.filenames):
pattern_costs_data = self._read_cache_file(dpdb_info, pattern_id)
with engine.begin() as connection:
for pattern_start_id, cost_data in pattern_cost_data:
data = (pattern_id, pattern_start_id) + tuple(cost_data)
connection.execute(iquery.values(data))
def rotate_cache(self):
rot_dpdb = self.rotated()
dpdb_info = self._dpdb_info
with timing("dpdb[{}] rotate_cache".format(dpdb_info.fqname())):
rot_dpdb_info = rot_dpdb.dpdb_info
make_key = self.make_key
rot_make_key = rot_dpdb.make_key
size = dpdb_info.size
driver = make_driver(size)
rot180_k = driver.rot180_k
rot180_v = driver.rot180_v
rot180_kv = [rot180_k[v] for v in rot180_v]
rot_costs = []
positions = list(range(size * size))
for pattern_id, (pattern, rot_pattern) in enumerate(zip(dpdb_info.patterns, rot_dpdb_info.patterns)):
pindex = list(range(len(pattern)))
pindex_permutations = list(itertools.permutations(pindex, len(pattern)))
pattern_costs = dict(self._read_cache_file(dpdb_info, pattern_id))
rot_pattern_cost_data = []
with timing("dpdb[{}] rotate pattern[{}]".format(dpdb_info.fqname(), pattern_id)):
for pattern_position in itertools.combinations(positions, len(pattern)):
for pindex_permutation in pindex_permutations:
pattern_start = tuple(pattern_position[pidx] for pidx in pindex_permutation)
pattern_start_id = make_key(pattern_id, pattern_start)
rot_pattern_start_id = rot_make_key(pattern_id, [rot180_k[x] for x in pattern_start])
cost_data = pattern_costs.pop(pattern_start_id)
rot_cost_data = [cost_data[rk] for rk in rot180_k]
rot_pattern_cost_data.append((rot_pattern_start_id, rot_cost_data))
# print("{} -> {}".format(pattern_start_id, rot_pattern_start_id))
self._write_cache_file(rot_dpdb_info, pattern_id, rot_pattern_cost_data)
return rot_dpdb_info
@property
def dpdb_info(self):
return self._dpdb_info
def cache_exists(self):
return self._dpdb_info.exists()
def load_cache(self):
dpdb_info = self._dpdb_info
if self.cache_exists():
with timing("dpdb[{}] load_cache".format(dpdb_info.fqname())):
costs = []
for pattern_id, pattern_filename in enumerate(dpdb_info.filenames):
costs.append(dict(self._read_cache_file(dpdb_info, pattern_id)))
return costs
else:
raise ValueError("missing cache {}".format(dpdb_info.filenames))
def db_exists(self):
for tablename in [self._costs_table.name]:
if not self._engine.has_table(tablename):
return False
return True
def drop(self):
with self._engine.begin() as connection:
self._costs_table.drop(bind=connection, checkfirst=True)
def create(self):
with self._engine.begin() as connection:
self._costs_table.create(bind=connection, checkfirst=True)
def insert_bulk(self, groups):
table = self._costs_table
iquery = table.insert()
with self._engine.begin() as connection:
for pattern_id, pattern_start_id, costs in groups:
vdata = {'pattern_id': pattern_id, 'pattern_start_id': pattern_start_id}
for cursor, cost in costs.items():
vdata[self._cursor_columns[cursor].name] = cost
connection.execute(iquery.values(vdata))
def buffer_costs(self, pattern_id, pattern_start_id, costs):
self._buffer.append((pattern_id, pattern_start_id, costs))
if len(self._buffer) >= self._max_buffer_size:
self.buffer_flush()
def buffer_flush(self):
if self._buffer:
self.insert_bulk(self._buffer)
self._buffer.clear()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.buffer_flush()
return False
def _setup_conn(self):
self._connection = self._engine.begin()
def get_cost(self, pattern_id, pattern_start_id, cursor):
t = self._costs_table
with self._engine.begin() as connection:
col = self._cursor_columns[cursor]
query = sqla.select([col]).where(sqla.and_(t.c.pattern_id == pattern_id, t.c.pattern_start_id == pattern_start_id))
return connection.execute(query).fetchone()[0]
def get_cost_bulk(self, cursor, p_ids):
t = self._costs_table
if p_ids:
where_condition = sqla.or_(*[sqla.and_(t.c.pattern_id == p_id, t.c.pattern_start_id == ps_id) for p_id, ps_id in p_ids])
col = self._cursor_columns[cursor]
query = sqla.select([sqla_func.sum(col)]).where(where_condition)
with self._engine.begin() as connection:
return connection.execute(query).fetchone()[0]
def put_costs(self, costs):
engine = self._engine
table = self._costs_table
iquery = table.insert()
with timing("dpdb[{}] put_costs".format(self.dpdb_info.fqname())):
with engine.begin() as connection:
for cost_data in costs:
connection.execute(iquery.values(cost_data))
def get_pattern_costs(self, pattern_id):
engine = self._engine
table = self._costs_table
costs = []
with timing("dpdb[{}] get_pattern_costs[{}]".format(self.dpdb_info.fqname(), pattern_id)):
with engine.begin() as connection:
query = sqla.select([table]).where(table.c.pattern_id == pattern_id)
for result in connection.execute(query):
costs.append(tuple(result))
return costs
def get_all_costs(self):
engine = self._engine
table = self._costs_table
costs = []
with timing("dpdb[{}] get_all_costs".format(self.dpdb_info.fqname())):
with engine.begin() as connection:
query = sqla.select([table])
for result in connection.execute(query):
costs.append(tuple(result))
return costs
def get_records(self, pattern_id):
t = self._costs_table
with self._engine.begin() as connection:
query = sqla.select([t.c.pattern_start_id]).where(t.c.pattern_id == pattern_id)
for entry in connection.execute(query):
yield entry[0]
def get_costs(self, pattern_id, pattern_start_id):
t = self._costs_table
with self._engine.begin() as connection:
query = sqla.select(self._cursor_columns).where(sqla.and_(t.c.pattern_id == pattern_id, t.c.pattern_start_id == pattern_start_id))
result = connection.execute(query).fetchone()
if result:
costs_d = {}
for cursor, column in enumerate(self._cursor_columns):
costs_d[cursor] = result[column.name]
return costs_d
else:
return None
DISJOINT_PATTERNS_DB = collections.defaultdict(collections.OrderedDict)
def dpdb_clear():
global DISJOINT_PATTERNS_DB
DISJOINT_PATTERNS_DB.clear()
def dpdb_register(dpdb_info):
global DISJOINT_PATTERNS_DB
dkey = (dpdb_info.puzzle_type, dpdb_info.size)
dct = DISJOINT_PATTERNS_DB[dkey]
label = dpdb_info.label
dct[label] = dpdb_info
dct.move_to_end(label, last=False)
return dpdb_info
def dpdb_iter_infos(size, label=None, puzzle_type=None):
global DISJOINT_PATTERNS_DB
if puzzle_type is None:
puzzle_type = config_getkey("puzzle_type")
dkey = (puzzle_type, size)
if dkey not in DISJOINT_PATTERNS_DB:
return
dct = DISJOINT_PATTERNS_DB[dkey]
if label is None:
for label, dpdb_info in dct.items():
if dpdb_info.exists():
yield dpdb_info
else:
yield dct[label]
def dpdb_get_info(size, label=None, puzzle_type=None):
global DISJOINT_PATTERNS_DB
if puzzle_type is None:
puzzle_type | |
over=True)
if ps_t >= 1500:
game.game_instance.player.close_dialogue()
return True
return False
def is_priority(self) -> bool:
return False
class LaunchPokemonAnimation(Animation):
def __init__(self, mem: BattlePlayer, case: int, team_n: int, enemy: bool):
self.mem = mem
self.start_time = utils.current_milli_time()
self.case = case
self.team_n = team_n
self.enemy = enemy
self.bool_matrix = [True] * 1
def tick(self, display: pygame.Surface) -> bool:
ps_t = utils.current_milli_time() - self.start_time
if self.mem.wild:
if self.bool_matrix[0]:
self.bool_matrix[0] = False
d = hud.Dialog("battle.wild_spawn", speed=20, none_skip=True, style=2, need_morph_text=True,
text_var=[self.mem.get_pks()[self.team_n].get_name(True)])
game.game_instance.player.open_dialogue(d, over=True)
if ps_t >= 1500:
game.game_instance.player.close_dialogue()
return True
return False
elif self.mem.bot:
if self.bool_matrix[0]:
self.bool_matrix[0] = False
d = hud.Dialog("battle.launch_pokemon.bot", speed=20, none_skip=True, style=2, need_morph_text=True,
text_var=[self.mem.disp.name, self.mem.get_pks()[self.team_n].get_name(True)])
game.game_instance.player.open_dialogue(d, over=True)
if ps_t >= 1500:
game.game_instance.player.close_dialogue()
return True
return False
else:
if self.bool_matrix[0]:
self.bool_matrix[0] = False
d = hud.Dialog("battle.launch_pokemon.self", speed=20, none_skip=True, style=2, need_morph_text=True,
text_var=[self.mem.get_pks()[self.team_n].get_name(True)])
game.game_instance.player.open_dialogue(d, over=True)
if ps_t >= 1500:
game.game_instance.player.close_dialogue()
return True
return False
def is_priority(self) -> bool:
return 2
class TryCatchAnimation(Animation):
def __init__(self, bat: 'Battle', poke_ball: 'item.pokeball.Pokeball'):
self.poke_ball = poke_ball
self.bat = bat
self.target_poke = bat.get_team(True)[0]
self.launcher = bat.get_poke_pose(False, 0, True)
self.target = bat.get_poke_pose(True, 0, True)
self.nb_shake = self.poke_ball.try_catch(self.target_poke)
self.init = 0
self.start_time = [0] * 4
def tick(self, display: Surface) -> bool:
if not (self.init & 0b1):
self.init |= 0b1
self.start_time[0] = utils.current_milli_time()
sound_manager.start_in_first_empty_taunt(sounds.BALL_THROW)
ps_t = utils.current_milli_time() - self.start_time[0]
x2, y2 = self.target[0], self.target[1] - 30
if ps_t <= 1000:
x1, y1 = self.launcher[0] + 40, self.launcher[1] - 50
a = (y2 - y1) / (x2 - x1)
b = y1 - a * x1
max_delta_x = x2 - x1
if ps_t % 1200 < 1000:
x = min(((ps_t % 1200) / 1000), 1) * max_delta_x + x1
y = (a * x + b) + (0.002 * (x - x1) * (x - x2))
display.blit(self.poke_ball.image, (x - 11, y - 11))
else:
if ps_t < 1600:
self.draw_absorb(display)
display.blit(self.poke_ball.image, (x2 - 11, y2 - 11))
else:
current_shake = (ps_t - 1600) // 1000
if current_shake >= self.nb_shake or current_shake >= 3:
if self.nb_shake == 4:
v = self.draw_catch(display)
return v
else:
return self.draw_break(display)
else:
self.draw_check(display, current_shake, (ps_t - 1600) % 1000)
return False
def draw_catch(self, display: Surface) -> bool:
if not (self.init & 0b1000):
self.init |= 0b1000
self.start_time[3] = utils.current_milli_time()
game.game_instance.player.open_dialogue(hud.Dialog('battle.catch.success', need_morph_text=True,
speed_skip=False, speed=1, style=2, none_skip=True,
text_var=[self.target_poke.get_name(True)]))
sound_manager.start_in_first_empty_taunt(sounds.CATCH)
ps_t = utils.current_milli_time() - self.start_time[3]
x2, y2 = self.target[0], self.target[1] - 30
display.blit(self.poke_ball.image, (x2 - 11, y2 - 11))
if ps_t > 2000:
self.bat.is_catch = True
self.target_poke.poke_ball = self.poke_ball
game.game_instance.player.close_dialogue()
return True
return False
def draw_break(self, display: Surface) -> bool:
if not (self.init & 0b100):
self.init |= 0b100
self.start_time[2] = utils.current_milli_time()
game.game_instance.player.open_dialogue(hud.Dialog(f'battle.catch.fail_{self.nb_shake}',
need_morph_text=True, speed_skip=False,
speed=1, style=2, none_skip=True))
sound_manager.start_in_first_empty_taunt(sounds.BALL_EXIT)
ps_t = utils.current_milli_time() - self.start_time[2]
if ps_t < 600:
self.target_poke.ram_data["battle_render"] = False
resize = ps_t / 600
im = utils.color_image(self.target_poke.get_front_image(2).copy(), (19, 143, 209, 200))
im = pygame.transform.scale(im, (int(im.get_size()[0] * resize), int(im.get_size()[1] * resize)))
p_c = self.bat.get_poke_pose(True, 0, size_edit=(resize, resize))
display.blit(im, (p_c[0], p_c[1]))
if ps_t > 600:
self.target_poke.ram_data["battle_render"] = True
return True
return False
def draw_check(self, display: Surface, check_n: int, ps_t):
if not (self.init & (2 ** (4 + check_n))):
self.init |= (2 ** (4 + check_n))
game.game_instance.player.open_dialogue(hud.Dialog([f'{check_n + 1}...'], speed_skip=False, speed=1,
style=2, none_skip=True))
sound_manager.start_in_first_empty_taunt(sounds.BALL_SHAKE)
x2, y2 = self.target[0], self.target[1] - 30
rotate = 0
if ps_t <= 150 or ps_t > 600:
rotate = 0
elif ps_t <= 300 or 450 < ps_t <= 600:
rotate = 22.5
elif ps_t <= 450:
rotate = 45
im = self.poke_ball.image
if rotate != 0:
if check_n % 2:
rotate = -rotate
im = pygame.transform.rotate(im, rotate)
display.blit(im, (x2 - 11, y2 - 11))
def draw_absorb(self, display: Surface):
if not (self.init & 0b10):
self.init |= 0b10
self.start_time[1] = utils.current_milli_time()
ps_t = utils.current_milli_time() - self.start_time[1]
if ps_t < 600:
self.target_poke.ram_data["battle_render"] = False
resize = 1 - ps_t / 600
im = utils.color_image(self.target_poke.get_front_image(2).copy(), (19, 143, 209, 200))
im = pygame.transform.scale(im, (int(im.get_size()[0] * resize), int(im.get_size()[1] * resize)))
p_c = self.bat.get_poke_pose(True, 0, size_edit=(resize, resize))
display.blit(im, (p_c[0], p_c[1]))
def is_priority(self) -> int:
return 10
class CatchSuccess(Animation):
def __init__(self, poke: 'player_pokemon.PlayerPokemon'):
self.poke = poke
self.init = False
self.start = 0
self.bg = pygame.image.load('assets/textures/battle/bg/evolution.png')
self.question_answer = None
self.need_end = False
self.player = game.game_instance.player
game.game_instance.set_pokedex_catch(poke.id_)
def tick(self, display: Surface) -> bool:
if not self.init:
self.init = True
self.ask()
if self.need_end:
return True
display.blit(self.bg, (0, 0))
display.blit(im := self.poke.get_front_image(4), (530 - im.get_size()[0] // 2, 300 - im.get_size()[1] // 2))
if self.question_answer is not None:
# no
if self.question_answer == 0:
self.question_answer = None
self.player.pc.add_first_case_empty(self.poke)
self.player.open_dialogue(
hud.Dialog('battle.catch.success.ask.send_to_pc.message', text_var=[self.poke.get_name(True)],
callback=self.end_callback, speed=1, need_morph_text=True, style=2)
)
return False
elif self.question_answer == 1:
self.question_answer = None
print(self.player.get_non_null_team_number())
if self.player.get_non_null_team_number() < 6:
self.player.team[5] = self.poke
self.player.normalize_team()
self.player.open_dialogue(
hud.Dialog('battle.catch.success.ask.include_to_team.message', need_morph_text=True,
text_var=[self.poke.get_name(True)], callback=self.end_callback, style=2, speed=1)
)
else:
self.player.open_menu(menu__.TeamMenu(self.player, self.ask, self.switch_pokemon))
return False
def switch_pokemon(self, i):
self.player.close_menu()
self.player.move_pokemon_to_pc(i)
self.player.team[5] = self.poke
self.player.normalize_team()
self.player.open_dialogue(
hud.Dialog('battle.catch.success.ask.include_to_team.message', text_var=[self.poke.get_name(True)],
callback=self.end_callback, need_morph_text=True, speed=1, style=2)
)
def ask(self):
self.player.close_menu()
self.question_answer = None
ask = game.game_instance.get_message("battle.catch.success.ask.send_to_pc"), \
game.game_instance.get_message("battle.catch.success.ask.include_to_team")
game.game_instance.player.open_dialogue(
hud.QuestionDialog('battle.catch.success.ask', self.callback, ask, speed=1, style=2, need_morph_text=True,
text_var=[self.poke.get_name(True)]))
def end_callback(self):
self.player.close_dialogue()
self.need_end = True
def callback(self, value, index):
self.question_answer = index
class Battle(object):
# END_BASE_POINT: Tuple[int, int] = (SURFACE_SIZE[0] - 390, 0)
def __init__(self, ally: BattleTeam, enemy: BattleTeam,
wild: bool, animation: Callable[[], StartAnimation] = WildAnimation,
base: Tuple[int, int, int, int] = GRASS_PLATE_BASE, bg: 'background.BackGround' = background.FOREST,
sound: 'sounds.Sound' = sounds.BATTLE_DPP_TRAINER
):
self.match_size = len(ally.case), len(enemy.case)
self.__ally_team = ally
self.__enemy_team = enemy
self.sound: 'sounds.Sound' = sound
self.nb_ally = len(self.__ally_team.case)
self.nb_enemy = len(self.__enemy_team.case)
self.__ally: List[Optional['player_pokemon.PlayerPokemon']] = [None] * self.nb_ally
self.__enemy: List[Optional['player_pokemon.PlayerPokemon']] = [None] * self.nb_enemy
self.__xp_per_case: List[set[int]] = [set()] * self.nb_enemy
self.nb_not_bot: Tuple[int] = self.__ally_team.get_none_bot().case_number
self.selected_not_bot: int = min(self.nb_not_bot)
self.player_queue = []
self.base_size = BASE_SIZES[max(self.nb_ally, self.nb_enemy) - 1]
self.wild = wild
self.__start_time = utils.current_milli_time()
self.poke_ball = pygame.transform.scale(item.items.POKE_BALL.image,
(game.SURFACE_SIZE[1] // 8, game.SURFACE_SIZE[1] // 8))
self.start_sound: pygame.mixer.Sound = pygame.mixer.Sound('assets/sound/music/pokemon-start-battle.mp3')
self.animation: StartAnimation = animation()
self.bool_matrix = [True] * 5
self.base: Union[Tuple[int, int, int, int], pygame.Surface] = base
self.bg: 'background.BackGround' = bg
self.bg_image: Optional[pygame.Surface] = None
self.selected_y = [0, 3]
self.selected_x = [0, 3]
self.menu_action: List[Callable[[], NoReturn]] = [None, None]
self.status = 0
self.turn_count = 0
self.current_play_ability: Optional[PlayAbility] = None
self.queue_play_ability: List[PlayAbility] = []
self.current_animation: Optional[Animation] = None
self.current_animation_callback: Optional[Callable[[], NoReturn]] = None
self.current_ab: Optional['player_pokemon.PokemonAbility'] = None
self.rac: Optional['RenderAbilityCallback'] = None
self.run_away_c = 0
self.need_run_away = False
self.evolution_table = [None] * 6
self.multi_turn_ab: dict[str, tuple[ability.AbstractAbility, int, bool, int]] = {}
self.is_catch: Any = False
def appear_pokemon(self, enemy: bool, case: int):
if enemy:
new_set = set()
for c_n in self.__ally_team.get_none_bot().case_number:
if (team_n := self.get_poke_n_from_case(False, c_n)) is not None:
new_set.add(team_n)
self.__xp_per_case[case] = new_set
elif case in self.__ally_team.get_none_bot().case_number:
team_n = self.get_poke_n_from_case(False, case)
if team_n is not None:
for xp_case in self.__xp_per_case:
xp_case.add(team_n)
def get_poke_n_from_case(self, enemy: bool, case: int) -> Optional[int]:
team = self.__enemy_team if enemy else self.__ally_team
poke = (self.__enemy if enemy else self.__ally)[case]
if poke is None: return None
pks = team.case[case].get_pks()
for i in range(len(pks)):
if pks[i] and pks[i] == poke:
return i
return None
def get_team(self, enemy) -> List[Optional['player_pokemon.PlayerPokemon']]:
return self.__enemy if enemy else self.__ally
def get_team_obj(self, enemy) -> BattleTeam:
return self.__enemy_team if enemy else self.__ally_team
def load_asset(self):
self.sound.load()
sounds.HIT_NORMAL_DAMAGE.load()
sounds.HIT_NOT_VERY_EFFECTIVE.load()
sounds.HIT_SUPER_EFFECTIVE.load()
sounds.BLOCK.load()
self.animation.load_asset()
self.base = utils.get_part_i(pygame.image.load('assets/textures/battle/base_2.png'), self.base, self.base_size)
self.bg_image = pygame.transform.scale(pygame.image.load(self.bg.bg_path), game.SURFACE_SIZE)
self.button_text = [game.game_instance.get_message(t) for t in ['attack', 'team', 'bag', 'run_away']]
# self.arrow = utils.get_part_i(menu.MENU_IMAGE, (0, 64, 22, 91), (33, 41))
self.arrow = utils.ARROW
for m in self.__ally_team.members + self.__enemy_team.members:
for p in m.get_pks():
if p:
p.reset_combat_status()
for a in p.ability:
if a:
a.ability.load_assets()
def unload_assets(self):
# other assets is auto del with dell battle
print("battle unload_assets")
self.sound.un_load()
sounds.HIT_NORMAL_DAMAGE.un_load()
sounds.HIT_NOT_VERY_EFFECTIVE.un_load()
sounds.HIT_SUPER_EFFECTIVE.un_load()
sounds.BALL_SHAKE.un_load()
sounds.BALL_EXIT.un_load()
sounds.BALL_THROW.un_load()
sounds.CATCH.un_load()
sounds.EVOLUTION.un_load()
game.POKE_CACHE.clear()
sounds.unload_poke_sound()
for m in self.__ally_team.members + self.__enemy_team.members:
for p in m.get_pks():
if p:
p.reset_combat_status()
for a in p.ability:
if a:
a.ability.unload_assets()
def __del__(self):
self.unload_assets()
def need_render(self):
return utils.current_milli_time() - self.__start_time <= self.animation.get_during()[0]
def draw_base(self, display: pygame.Surface, x, enemy: bool, i=0):
if enemy:
display.blit(self.base, (x, self.bg.enemy_base_coord[self.nb_enemy - 1][i][1]))
else:
display.blit(self.base, (x, self.bg.ally_base_coord[self.nb_ally - 1][i][1]))
def draw_bg(self, display: pygame.Surface):
display.blit(self.bg_image, (0, 0))
INFO_enemy = 830, 580, 330
INFO_ally = 60, 310, 560
def get_poke_pose(self, enemy: bool, i: int, simple: bool = False, size_edit=(1, | |
k-point and band for reconstructing
the wavefunction. For non-spin-polarized, the first index corresponds
to the kpoint and the second corresponds to the band (e.g.
self.coeffs[kp][b] corresponds to k-point kp and band b). For
spin-polarized calculations, the first index is for the spin.
If the calculation was non-collinear, then self.coeffs[kp][b] will have
two columns (one for each component of the spinor).
Acknowledgments:
This code is based upon the Fortran program, WaveTrans, written by
<NAME> and <NAME> from the Dept. of Physics at Carnegie
Mellon University. To see the original work, please visit:
https://www.andrew.cmu.edu/user/feenstra/wavetrans/
Author: <NAME>
"""
def __init__(self, filename="WAVECAR", verbose=False, precision="normal", vasp_type=None):
"""
Information is extracted from the given WAVECAR
Args:
filename (str): input file (default: WAVECAR)
verbose (bool): determines whether processing information is shown
precision (str): determines how fine the fft mesh is (normal or
accurate), only the first letter matters
vasp_type (str): determines the VASP type that is used, allowed
values are ['std', 'gam', 'ncl']
(only first letter is required)
"""
self.filename = filename
if not (vasp_type is None or vasp_type.lower()[0] in ["s", "g", "n"]):
raise ValueError(f"invalid vasp_type {vasp_type}")
self.vasp_type = vasp_type
# c = 0.26246582250210965422
# 2m/hbar^2 in agreement with VASP
self._C = 0.262465831
with open(self.filename, "rb") as f:
# read the header information
recl, spin, rtag = np.fromfile(f, dtype=np.float64, count=3).astype(np.int_)
if verbose:
print(f"recl={recl}, spin={spin}, rtag={rtag}")
recl8 = int(recl / 8)
self.spin = spin
# check to make sure we have precision correct
if rtag not in (45200, 45210, 53300, 53310):
# note that rtag=45200 and 45210 may not work if file was actually
# generated by old version of VASP, since that would write eigenvalues
# and occupations in way that does not span FORTRAN records, but
# reader below appears to assume that record boundaries can be ignored
# (see OUTWAV vs. OUTWAV_4 in vasp fileio.F)
raise ValueError(f"invalid rtag of {rtag}")
# padding to end of fortran REC=1
np.fromfile(f, dtype=np.float64, count=(recl8 - 3))
# extract kpoint, bands, energy, and lattice information
self.nk, self.nb, self.encut = np.fromfile(f, dtype=np.float64, count=3).astype(np.int_)
self.a = np.fromfile(f, dtype=np.float64, count=9).reshape((3, 3))
self.efermi = np.fromfile(f, dtype=np.float64, count=1)[0]
if verbose:
print(
"kpoints = {}, bands = {}, energy cutoff = {}, fermi "
"energy= {:.04f}\n".format(self.nk, self.nb, self.encut, self.efermi)
)
print(f"primitive lattice vectors = \n{self.a}")
self.vol = np.dot(self.a[0, :], np.cross(self.a[1, :], self.a[2, :]))
if verbose:
print(f"volume = {self.vol}\n")
# calculate reciprocal lattice
b = np.array(
[
np.cross(self.a[1, :], self.a[2, :]),
np.cross(self.a[2, :], self.a[0, :]),
np.cross(self.a[0, :], self.a[1, :]),
]
)
b = 2 * np.pi * b / self.vol
self.b = b
if verbose:
print(f"reciprocal lattice vectors = \n{b}")
print(f"reciprocal lattice vector magnitudes = \n{np.linalg.norm(b, axis=1)}\n")
# calculate maximum number of b vectors in each direction
self._generate_nbmax()
if verbose:
print(f"max number of G values = {self._nbmax}\n\n")
self.ng = self._nbmax * 3 if precision.lower()[0] == "n" else self._nbmax * 4
# padding to end of fortran REC=2
np.fromfile(f, dtype=np.float64, count=recl8 - 13)
# reading records
self.Gpoints = [None for _ in range(self.nk)]
self.kpoints = []
if spin == 2:
self.coeffs = [[[None for i in range(self.nb)] for j in range(self.nk)] for _ in range(spin)]
self.band_energy = [[] for _ in range(spin)]
else:
self.coeffs = [[None for i in range(self.nb)] for j in range(self.nk)]
self.band_energy = []
for ispin in range(spin):
if verbose:
print(f"reading spin {ispin}")
for ink in range(self.nk):
# information for this kpoint
nplane = int(np.fromfile(f, dtype=np.float64, count=1)[0])
kpoint = np.fromfile(f, dtype=np.float64, count=3)
if ispin == 0:
self.kpoints.append(kpoint)
else:
assert np.allclose(self.kpoints[ink], kpoint)
if verbose:
print(f"kpoint {ink: 4} with {nplane: 5} plane waves at {kpoint}")
# energy and occupation information
enocc = np.fromfile(f, dtype=np.float64, count=3 * self.nb).reshape((self.nb, 3))
if spin == 2:
self.band_energy[ispin].append(enocc)
else:
self.band_energy.append(enocc)
if verbose:
print("enocc =\n", enocc[:, [0, 2]])
# padding to end of record that contains nplane, kpoints, evals and occs
np.fromfile(f, dtype=np.float64, count=(recl8 - 4 - 3 * self.nb) % recl8)
if self.vasp_type is None:
(
self.Gpoints[ink],
extra_gpoints,
extra_coeff_inds,
) = self._generate_G_points(kpoint, gamma=True)
if len(self.Gpoints[ink]) == nplane:
self.vasp_type = "gam"
else:
(
self.Gpoints[ink],
extra_gpoints,
extra_coeff_inds,
) = self._generate_G_points(kpoint, gamma=False)
self.vasp_type = "std" if len(self.Gpoints[ink]) == nplane else "ncl"
if verbose:
print("\ndetermined vasp_type =", self.vasp_type, "\n")
else:
(
self.Gpoints[ink],
extra_gpoints,
extra_coeff_inds,
) = self._generate_G_points(kpoint, gamma=(self.vasp_type.lower()[0] == "g"))
if len(self.Gpoints[ink]) != nplane and 2 * len(self.Gpoints[ink]) != nplane:
raise ValueError(
f"Incorrect value of vasp_type given ({vasp_type})."
" Please open an issue if you are certain this WAVECAR"
" was generated with the given vasp_type."
)
self.Gpoints[ink] = np.array(self.Gpoints[ink] + extra_gpoints, dtype=np.float64)
# extract coefficients
for inb in range(self.nb):
if rtag in (45200, 53300):
data = np.fromfile(f, dtype=np.complex64, count=nplane)
np.fromfile(f, dtype=np.float64, count=recl8 - nplane)
elif rtag in (45210, 53310):
# this should handle double precision coefficients
# but I don't have a WAVECAR to test it with
data = np.fromfile(f, dtype=np.complex128, count=nplane)
np.fromfile(f, dtype=np.float64, count=recl8 - 2 * nplane)
extra_coeffs = []
if len(extra_coeff_inds) > 0:
# reconstruct extra coefficients missing from gamma-only executable WAVECAR
for G_ind in extra_coeff_inds:
# no idea where this factor of sqrt(2) comes from, but empirically
# it appears to be necessary
data[G_ind] /= np.sqrt(2)
extra_coeffs.append(np.conj(data[G_ind]))
if spin == 2:
self.coeffs[ispin][ink][inb] = np.array(list(data) + extra_coeffs, dtype=np.complex64)
else:
self.coeffs[ink][inb] = np.array(list(data) + extra_coeffs, dtype=np.complex128)
if self.vasp_type.lower()[0] == "n":
self.coeffs[ink][inb].shape = (2, nplane // 2)
def _generate_nbmax(self) -> None:
"""
Helper function that determines maximum number of b vectors for
each direction.
This algorithm is adapted from WaveTrans (see Class docstring). There
should be no reason for this function to be called outside of
initialization.
"""
bmag = np.linalg.norm(self.b, axis=1)
b = self.b
# calculate maximum integers in each direction for G
phi12 = np.arccos(np.dot(b[0, :], b[1, :]) / (bmag[0] * bmag[1]))
sphi123 = np.dot(b[2, :], np.cross(b[0, :], b[1, :])) / (bmag[2] * np.linalg.norm(np.cross(b[0, :], b[1, :])))
nbmaxA = np.sqrt(self.encut * self._C) / bmag
nbmaxA[0] /= np.abs(np.sin(phi12))
nbmaxA[1] /= np.abs(np.sin(phi12))
nbmaxA[2] /= np.abs(sphi123)
nbmaxA += 1
phi13 = np.arccos(np.dot(b[0, :], b[2, :]) / (bmag[0] * bmag[2]))
sphi123 = np.dot(b[1, :], np.cross(b[0, :], b[2, :])) / (bmag[1] * np.linalg.norm(np.cross(b[0, :], b[2, :])))
nbmaxB = np.sqrt(self.encut * self._C) / bmag
nbmaxB[0] /= np.abs(np.sin(phi13))
nbmaxB[1] /= np.abs(sphi123)
nbmaxB[2] /= np.abs(np.sin(phi13))
nbmaxB += 1
phi23 = np.arccos(np.dot(b[1, :], b[2, :]) / (bmag[1] * bmag[2]))
sphi123 = np.dot(b[0, :], np.cross(b[1, :], b[2, :])) / (bmag[0] * np.linalg.norm(np.cross(b[1, :], b[2, :])))
nbmaxC = np.sqrt(self.encut * self._C) / bmag
nbmaxC[0] /= np.abs(sphi123)
nbmaxC[1] /= np.abs(np.sin(phi23))
nbmaxC[2] /= np.abs(np.sin(phi23))
nbmaxC += 1
self._nbmax = np.max([nbmaxA, nbmaxB, nbmaxC], axis=0).astype(np.int_)
def _generate_G_points(self, kpoint: np.ndarray, gamma: bool = False) -> Tuple[List, List, List]:
"""
Helper function to generate G-points based on nbmax.
This function iterates over possible G-point values and determines
if the energy is less than G_{cut}. Valid values are appended to
the output array. This function should not be called outside of
initialization.
Args:
kpoint (np.array): the array containing the current k-point value
gamma (bool): determines if G points for gamma-point only executable
should be generated
Returns:
a list containing valid G-points
"""
if gamma:
kmax = self._nbmax[0] + 1
else:
kmax = 2 * self._nbmax[0] + 1
gpoints = []
extra_gpoints = []
extra_coeff_inds = []
G_ind = 0
for i in range(2 * self._nbmax[2] + 1):
i3 = i - 2 * self._nbmax[2] - 1 if i > self._nbmax[2] else i
for j in range(2 * self._nbmax[1] + 1):
j2 = j - 2 * self._nbmax[1] - 1 if j > self._nbmax[1] else j
for k in range(kmax):
k1 = k - 2 * self._nbmax[0] - 1 if k > self._nbmax[0] else k
if gamma and ((k1 == 0 and j2 < 0) or (k1 == 0 and j2 == 0 and i3 < 0)):
continue
G = np.array([k1, j2, i3])
v = kpoint + G
g = np.linalg.norm(np.dot(v, self.b))
E = g ** 2 / self._C
if E < self.encut:
gpoints.append(G)
if gamma and (k1, j2, i3) != | |
4.58780657862546*(m.x793*m.x791 + m.x911*m.x909)
- 1.37932553964677*(m.x793*m.x909 - m.x791*m.x911))*m.b967 + m.x388 == 0)
m.c390 = Constraint(expr=-(9.96252401138967*(m.x744**2 + m.x862**2) - 9.97442401138967*(m.x744*m.x745 + m.x862*m.x863)
- 2.50682766776177*(m.x744*m.x863 - m.x745*m.x862))*m.b968 + m.x389 == 0)
m.c391 = Constraint(expr=-(9.96252401138967*(m.x745**2 + m.x863**2) - 9.97442401138967*(m.x745*m.x744 + m.x863*m.x862)
- 2.50682766776177*(m.x745*m.x862 - m.x744*m.x863))*m.b968 + m.x390 == 0)
m.c392 = Constraint(expr=-(4.71134077250522*(m.x802**2 + m.x920**2) - 4.73359077250522*(m.x802*m.x803 + m.x920*m.x921)
- 0.645448153550856*(m.x802*m.x921 - m.x803*m.x920))*m.b969 + m.x391 == 0)
m.c393 = Constraint(expr=-(4.71134077250522*(m.x803**2 + m.x921**2) - 4.73359077250522*(m.x803*m.x802 + m.x921*m.x920)
- 0.645448153550856*(m.x803*m.x920 - m.x802*m.x921))*m.b969 + m.x392 == 0)
m.c394 = Constraint(expr=-(4.78152424551287*(m.x762**2 + m.x880**2) - 4.80512424551287*(m.x762*m.x764 + m.x880*m.x882)
- 1.52797866219748*(m.x762*m.x882 - m.x764*m.x880))*m.b970 + m.x393 == 0)
m.c395 = Constraint(expr=-(4.78152424551287*(m.x764**2 + m.x882**2) - 4.80512424551287*(m.x764*m.x762 + m.x882*m.x880)
- 1.52797866219748*(m.x764*m.x880 - m.x762*m.x882))*m.b970 + m.x394 == 0)
m.c396 = Constraint(expr=-(5.21198752353358*(m.x787**2 + m.x905**2) - 5.23420752353358*(m.x787*m.x788 + m.x905*m.x906)
- 1.29692030860888*(m.x787*m.x906 - m.x788*m.x905))*m.b971 + m.x395 == 0)
m.c397 = Constraint(expr=-(5.21198752353358*(m.x788**2 + m.x906**2) - 5.23420752353358*(m.x788*m.x787 + m.x906*m.x905)
- 1.29692030860888*(m.x788*m.x905 - m.x787*m.x906))*m.b971 + m.x396 == 0)
m.c398 = Constraint(expr=-(8.15318665711629*(m.x735**2 + m.x853**2) - 8.16808665711629*(m.x735*m.x736 + m.x853*m.x854)
- 1.7592802030712*(m.x735*m.x854 - m.x736*m.x853))*m.b972 + m.x397 == 0)
m.c399 = Constraint(expr=-(8.15318665711629*(m.x736**2 + m.x854**2) - 8.16808665711629*(m.x736*m.x735 + m.x854*m.x853)
- 1.7592802030712*(m.x736*m.x853 - m.x735*m.x854))*m.b972 + m.x398 == 0)
m.c400 = Constraint(expr=-(25.3720831842391*(m.x777**2 + m.x895**2) - 25.3769831842391*(m.x777*m.x778 + m.x895*m.x896)
- 5.56133886803537*(m.x777*m.x896 - m.x778*m.x895))*m.b973 + m.x399 == 0)
m.c401 = Constraint(expr=-(25.3720831842391*(m.x778**2 + m.x896**2) - 25.3769831842391*(m.x778*m.x777 + m.x896*m.x895)
- 5.56133886803537*(m.x778*m.x895 - m.x777*m.x896))*m.b973 + m.x400 == 0)
m.c402 = Constraint(expr=-(19.4402247352902*(m.x724**2 + m.x842**2) - 19.6972247352902*(m.x724*m.x746 + m.x842*m.x864)
- 1.68442536922819*(m.x724*m.x864 - m.x746*m.x842))*m.b974 + m.x401 == 0)
m.c403 = Constraint(expr=-(19.4402247352902*(m.x746**2 + m.x864**2) - 19.6972247352902*(m.x746*m.x724 + m.x864*m.x842)
- 1.68442536922819*(m.x746*m.x842 - m.x724*m.x864))*m.b974 + m.x402 == 0)
m.c404 = Constraint(expr=-(10.2981147641153*(m.x796**2 + m.x914**2) - 10.3108147641153*(m.x796*m.x813 + m.x914*m.x931)
- 2.02021317112751*(m.x796*m.x931 - m.x813*m.x914))*m.b975 + m.x403 == 0)
m.c405 = Constraint(expr=-(10.2981147641153*(m.x813**2 + m.x931**2) - 10.3108147641153*(m.x813*m.x796 + m.x931*m.x914)
- 2.02021317112751*(m.x813*m.x914 - m.x796*m.x931))*m.b975 + m.x404 == 0)
m.c406 = Constraint(expr=-(7.33655397253797*(m.x731**2 + m.x849**2) - 7.35252397253797*(m.x731*m.x749 + m.x849*m.x867)
- 2.24594783727044*(m.x731*m.x867 - m.x749*m.x849))*m.b976 + m.x405 == 0)
m.c407 = Constraint(expr=-(7.33655397253797*(m.x749**2 + m.x867**2) - 7.35252397253797*(m.x749*m.x731 + m.x867*m.x849)
- 2.24594783727044*(m.x749*m.x849 - m.x731*m.x867))*m.b976 + m.x406 == 0)
m.c408 = Constraint(expr=-(9.83960654704895*(m.x737**2 + m.x855**2) - 9.85190654704895*(m.x737*m.x738 + m.x855*m.x856)
- 2.12273037972498*(m.x737*m.x856 - m.x738*m.x855))*m.b977 + m.x407 == 0)
m.c409 = Constraint(expr=-(9.83960654704895*(m.x738**2 + m.x856**2) - 9.85190654704895*(m.x738*m.x737 + m.x856*m.x855)
- 2.12273037972498*(m.x738*m.x855 - m.x737*m.x856))*m.b977 + m.x408 == 0)
m.c410 = Constraint(expr=-(5.81671035829726*(m.x758**2 + m.x876**2) - 5.90271035829726*(m.x758*m.x765 + m.x876*m.x883)
- 1.30663712265713*(m.x758*m.x883 - m.x765*m.x876))*m.b978 + m.x409 == 0)
m.c411 = Constraint(expr=-(5.81671035829726*(m.x765**2 + m.x883**2) - 5.90271035829726*(m.x765*m.x758 + m.x883*m.x876)
- 1.30663712265713*(m.x765*m.x876 - m.x758*m.x883))*m.b978 + m.x410 == 0)
m.c412 = Constraint(expr=-(12.1195465333368*(m.x743**2 + m.x861**2) - 12.1291765333368*(m.x743*m.x748 + m.x861*m.x866)
- 3.67891579620413*(m.x743*m.x866 - m.x748*m.x861))*m.b979 + m.x411 == 0)
m.c413 = Constraint(expr=-(12.1195465333368*(m.x748**2 + m.x866**2) - 12.1291765333368*(m.x748*m.x743 + m.x866*m.x861)
- 3.67891579620413*(m.x748*m.x861 - m.x743*m.x866))*m.b979 + m.x412 == 0)
m.c414 = Constraint(expr=-(11.2519603646185*(m.x728**2 + m.x846**2) - 11.2626603646185*(m.x728*m.x732 + m.x846*m.x850)
- 2.86293045239703*(m.x728*m.x850 - m.x732*m.x846))*m.b980 + m.x413 == 0)
m.c415 = Constraint(expr=-(11.2519603646185*(m.x732**2 + m.x850**2) - 11.2626603646185*(m.x732*m.x728 + m.x850*m.x846)
- 2.86293045239703*(m.x732*m.x846 - m.x728*m.x850))*m.b980 + m.x414 == 0)
m.c416 = Constraint(expr=-(17.2285497460475*(m.x798**2 + m.x916**2) - 17.2557497460475*(m.x798*m.x812 + m.x916*m.x930)
- 5.27439897898054*(m.x798*m.x930 - m.x812*m.x916))*m.b981 + m.x415 == 0)
m.c417 = Constraint(expr=-(17.2285497460475*(m.x812**2 + m.x930**2) - 17.2557497460475*(m.x812*m.x798 + m.x930*m.x916)
- 5.27439897898054*(m.x812*m.x916 - m.x798*m.x930))*m.b981 + m.x416 == 0)
m.c418 = Constraint(expr=-(7.39686599330554*(m.x785**2 + m.x903**2) - 7.45786599330554*(m.x785*m.x786 + m.x903*m.x904)
- 1.76170062833989*(m.x785*m.x904 - m.x786*m.x903))*m.b982 + m.x417 == 0)
m.c419 = Constraint(expr=-(7.39686599330554*(m.x786**2 + m.x904**2) - 7.45786599330554*(m.x786*m.x785 + m.x904*m.x903)
- 1.76170062833989*(m.x786*m.x903 - m.x785*m.x904))*m.b982 + m.x418 == 0)
m.c420 = Constraint(expr=-(99.0325203668178*(m.x750**2 + m.x868**2) - 99.0374403668178*(m.x750*m.x753 + m.x868*m.x871)
- 26.9718986530908*(m.x750*m.x871 - m.x753*m.x868))*m.b983 + m.x419 == 0)
m.c421 = Constraint(expr=-(99.0325203668178*(m.x753**2 + m.x871**2) - 99.0374403668178*(m.x753*m.x750 + m.x871*m.x868)
- 26.9718986530908*(m.x753*m.x868 - m.x750*m.x871))*m.b983 + m.x420 == 0)
m.c422 = Constraint(expr=-(6.90576599749886*(m.x786**2 + m.x904**2) - 6.92260599749886*(m.x786*m.x790 + m.x904*m.x908)
- 2.09823507558355*(m.x786*m.x908 - m.x790*m.x904))*m.b984 + m.x421 == 0)
m.c423 = Constraint(expr=-(6.90576599749886*(m.x790**2 + m.x908**2) - 6.92260599749886*(m.x790*m.x786 + m.x908*m.x904)
- 2.09823507558355*(m.x790*m.x904 - m.x786*m.x908))*m.b984 + m.x422 == 0)
m.c424 = Constraint(expr=-(6.46979183848307*(m.x749**2 + m.x867**2) - 6.48809183848307*(m.x749*m.x753 + m.x867*m.x871)
- 1.89616768519047*(m.x749*m.x871 - m.x753*m.x867))*m.b985 + m.x423 == 0)
m.c425 = Constraint(expr=-(6.46979183848307*(m.x753**2 + m.x871**2) - 6.48809183848307*(m.x753*m.x749 + m.x871*m.x867)
- 1.89616768519047*(m.x753*m.x867 - m.x749*m.x871))*m.b985 + m.x424 == 0)
m.c426 = Constraint(expr=-(5.19741810528226*(m.x732**2 + m.x850**2) - 5.22071810528226*(m.x732*m.x733 + m.x850*m.x851)
- 1.31604998323051*(m.x732*m.x851 - m.x733*m.x850))*m.b986 + m.x425 == 0)
m.c427 = Constraint(expr=-(5.19741810528226*(m.x733**2 + m.x851**2) - 5.22071810528226*(m.x733*m.x732 + m.x851*m.x850)
- 1.31604998323051*(m.x733*m.x850 - m.x732*m.x851))*m.b986 + m.x426 == 0)
m.c428 = Constraint(expr=-(5.83915044252515*(m.x733**2 + m.x851**2) - 5.85910044252515*(m.x733*m.x747 + m.x851*m.x865)
- 1.7768481188464*(m.x733*m.x865 - m.x747*m.x851))*m.b987 + m.x427 == 0)
m.c429 = Constraint(expr=-(5.83915044252515*(m.x747**2 + m.x865**2) - 5.85910044252515*(m.x747*m.x733 + m.x865*m.x851)
- 1.7768481188464*(m.x747*m.x851 - m.x733*m.x865))*m.b987 + m.x428 == 0)
m.c430 = Constraint(expr=-(13.4607066223809*(m.x721**2 + m.x839**2) - 13.4693966223809*(m.x721*m.x727 + m.x839*m.x845)
- 4.00921922924242*(m.x721*m.x845 - m.x727*m.x839))*m.b988 + m.x429 == 0)
m.c431 = Constraint(expr=-(13.4607066223809*(m.x727**2 + m.x845**2) - 13.4693966223809*(m.x727*m.x721 + m.x845*m.x839)
- 4.00921922924242*(m.x727*m.x839 - m.x721*m.x845))*m.b988 + m.x430 == 0)
m.c432 = Constraint(expr=-(2.37907356631141*(m.x740**2 + m.x858**2) - 2.43006356631141*(m.x740*m.x786 + m.x858*m.x904)
- 0.0130508881690115*(m.x740*m.x904 - m.x786*m.x858))*m.b989 + m.x431 == 0)
m.c433 = Constraint(expr=-(2.37907356631141*(m.x786**2 + m.x904**2) - 2.43006356631141*(m.x786*m.x740 + m.x904*m.x858)
- 0.0130508881690115*(m.x786*m.x858 - m.x740*m.x904))*m.b989 + m.x432 == 0)
m.c434 = Constraint(expr=-(12.7605925999582*(m.x800**2 + m.x918**2) - 12.7667625999582*(m.x800*m.x801 + m.x918*m.x919)
- 6.01491779280401*(m.x800*m.x919 - m.x801*m.x918))*m.b990 + m.x433 == 0)
m.c435 = Constraint(expr=-(12.7605925999582*(m.x801**2 + m.x919**2) - 12.7667625999582*(m.x801*m.x800 + m.x919*m.x918)
- 6.01491779280401*(m.x801*m.x918 - m.x800*m.x919))*m.b990 + m.x434 == 0)
m.c436 = Constraint(expr=-(61.7194917822263*(m.x781**2 + m.x899**2) - 62.0384917822263*(m.x781*m.x784 + m.x899*m.x902)
- 5.35081991621702*(m.x781*m.x902 - m.x784*m.x899))*m.b991 + m.x435 == 0)
m.c437 = Constraint(expr=-(61.7194917822263*(m.x784**2 + m.x902**2) - 62.0384917822263*(m.x784*m.x781 + m.x902*m.x899)
- 5.35081991621702*(m.x784*m.x899 - m.x781*m.x902))*m.b991 + m.x436 == 0)
m.c438 = Constraint(expr=-(4.3925013610028*(m.x771**2 + m.x889**2) - 4.4207313610028*(m.x771*m.x775 + m.x889*m.x893) -
0.970799162177584*(m.x771*m.x893 - m.x775*m.x889))*m.b992 + m.x437 == 0)
m.c439 = Constraint(expr=-(4.3925013610028*(m.x775**2 + m.x893**2) - 4.4207313610028*(m.x775*m.x771 + m.x893*m.x889) -
0.970799162177584*(m.x775*m.x889 - m.x771*m.x893))*m.b992 + m.x438 == 0)
m.c440 = Constraint(expr=-(10.7824630999353*(m.x808**2 + m.x926**2) - 10.7933630999353*(m.x808*m.x809 + m.x926*m.x927)
- 3.28382981106523*(m.x808*m.x927 - m.x809*m.x926))*m.b993 + m.x439 == 0)
m.c441 = Constraint(expr=-(10.7824630999353*(m.x809**2 + m.x927**2) - 10.7933630999353*(m.x809*m.x808 + m.x927*m.x926)
- 3.28382981106523*(m.x809*m.x926 - m.x808*m.x927))*m.b993 + m.x440 == 0)
m.c442 = Constraint(expr=-(9.38124994570534*(m.x782**2 + m.x900**2) - 9.39465994570534*(m.x782*m.x783 + m.x900*m.x901)
- 2.0733042638798*(m.x782*m.x901 - m.x783*m.x900))*m.b994 + m.x441 == 0)
m.c443 = Constraint(expr=-(9.38124994570534*(m.x783**2 + m.x901**2) - 9.39465994570534*(m.x783*m.x782 + m.x901*m.x900)
- 2.0733042638798*(m.x783*m.x900 - m.x782*m.x901))*m.b994 + m.x442 == 0)
m.c444 = Constraint(expr=-(7.32108849115694*(m.x785**2 + m.x903**2) - 7.38308849115694*(m.x785*m.x791 + m.x903*m.x909)
- 2.45094331058898*(m.x785*m.x909 - m.x791*m.x903))*m.b995 + m.x443 == 0)
m.c445 = Constraint(expr=-(7.32108849115694*(m.x791**2 + m.x909**2) - 7.38308849115694*(m.x791*m.x785 + m.x909*m.x903)
- 2.45094331058898*(m.x791*m.x903 - m.x785*m.x909))*m.b995 + m.x444 == 0)
m.c446 = Constraint(expr=-(45.841703735783*(m.x722**2 + m.x840**2) - 45.844453735783*(m.x722*m.x723 + m.x840*m.x841) -
10.1166366657329*(m.x722*m.x841 - m.x723*m.x840))*m.b996 + m.x445 == 0)
m.c447 = Constraint(expr=-(45.841703735783*(m.x723**2 + m.x841**2) - 45.844453735783*(m.x723*m.x722 + m.x841*m.x840) -
10.1166366657329*(m.x723*m.x840 - m.x722*m.x841))*m.b996 + m.x446 == 0)
m.c448 = Constraint(expr=-(9.0016783596171*(m.x785**2 + m.x903**2) - 9.0535783596171*(m.x785*m.x793 + m.x903*m.x911) -
2.76985714170464*(m.x785*m.x911 - m.x793*m.x903))*m.b997 + m.x447 == 0)
m.c449 = Constraint(expr=-(9.0016783596171*(m.x793**2 + m.x911**2) - 9.0535783596171*(m.x793*m.x785 + m.x911*m.x903) -
2.76985714170464*(m.x793*m.x903 - m.x785*m.x911))*m.b997 + m.x448 == 0)
m.c450 = Constraint(expr=-(12.3453090712835*(m.x767**2 + m.x885**2) - 12.3542490712835*(m.x767*m.x774 + m.x885*m.x892)
- 4.38154869704769*(m.x767*m.x892 - m.x774*m.x885))*m.b998 + m.x449 == 0)
m.c451 = Constraint(expr=-(12.3453090712835*(m.x774**2 + m.x892**2) - 12.3542490712835*(m.x774*m.x767 + m.x892*m.x885)
- 4.38154869704769*(m.x774*m.x885 - m.x767*m.x892))*m.b998 + m.x450 == 0)
m.c452 = Constraint(expr=-(5.82182833912975*(m.x741**2 + m.x859**2) - 5.91002833912975*(m.x741*m.x743 + m.x859*m.x861)
- 1.15299939376887*(m.x741*m.x861 - m.x743*m.x859))*m.b999 + m.x451 == 0)
m.c453 = Constraint(expr=-(5.82182833912975*(m.x743**2 + m.x861**2) - 5.91002833912975*(m.x743*m.x741 + m.x861*m.x859)
- 1.15299939376887*(m.x743*m.x859 - m.x741*m.x861))*m.b999 + m.x452 == 0)
m.c454 = Constraint(expr=-(5.24800773318305*(m.x819**2 + m.x937**2) - 5.27105773318305*(m.x819*m.x826 + m.x937*m.x944)
- 1.13561784367419*(m.x819*m.x944 - m.x826*m.x937))*m.b1000 + m.x453 == 0)
m.c455 = Constraint(expr=-(5.24800773318305*(m.x826**2 + m.x944**2) - 5.27105773318305*(m.x826*m.x819 + m.x944*m.x937)
- 1.13561784367419*(m.x826*m.x937 - m.x819*m.x944))*m.b1000 + m.x454 == 0)
m.c456 = Constraint(expr=-(9.15403548600215*(m.x717**2 + m.x835**2) - 9.16673548600215*(m.x717*m.x718 + m.x835*m.x836)
- 2.78030115341206*(m.x717*m.x836 - m.x718*m.x835))*m.b1001 + m.x455 == 0)
m.c457 = Constraint(expr=-(9.15403548600215*(m.x718**2 + m.x836**2) - 9.16673548600215*(m.x718*m.x717 + m.x836*m.x835)
- 2.78030115341206*(m.x718*m.x835 - m.x717*m.x836))*m.b1001 + m.x456 == 0)
m.c458 = Constraint(expr=-(8.62798516151017*(m.x753**2 + m.x871**2) - 8.64148516151017*(m.x753*m.x755 + m.x871*m.x873)
- 2.61690258192902*(m.x753*m.x873 - m.x755*m.x871))*m.b1002 + m.x457 == 0)
m.c459 = Constraint(expr=-(8.62798516151017*(m.x755**2 + m.x873**2) - 8.64148516151017*(m.x755*m.x753 + m.x873*m.x871)
- 2.61690258192902*(m.x755*m.x871 - m.x753*m.x873))*m.b1002 + m.x458 == 0)
m.c460 = Constraint(expr=-(22.5547722109855*(m.x790**2 + m.x908**2) - 22.5599422109855*(m.x790*m.x791 + m.x908*m.x909)
- 6.83466229544634*(m.x790*m.x909 - m.x791*m.x908))*m.b1003 + m.x459 == 0)
m.c461 = Constraint(expr=-(22.5547722109855*(m.x791**2 + m.x909**2) - 22.5599422109855*(m.x791*m.x790 + m.x909*m.x908)
- 6.83466229544634*(m.x791*m.x908 - m.x790*m.x909))*m.b1003 + m.x460 == 0)
m.c462 = Constraint(expr=-(20.9477941936629*(m.x731**2 + m.x849**2) - 20.9699941936629*(m.x731*m.x733 + m.x849*m.x851)
- 6.33418588916134*(m.x731*m.x851 - m.x733*m.x849))*m.b1004 + m.x461 == 0)
m.c463 = Constraint(expr=-(20.9477941936629*(m.x733**2 + m.x851**2) - 20.9699941936629*(m.x733*m.x731 + m.x851*m.x849)
- 6.33418588916134*(m.x733*m.x849 - m.x731*m.x851))*m.b1004 + m.x462 == 0)
m.c464 = Constraint(expr=-(9.42714779711218*(m.x801**2 + m.x919**2) - 9.44094779711218*(m.x801*m.x804 + m.x919*m.x922)
- 1.85116623472788*(m.x801*m.x922 - m.x804*m.x919))*m.b1005 + m.x463 == 0)
m.c465 = Constraint(expr=-(9.42714779711218*(m.x804**2 + m.x922**2) - 9.44094779711218*(m.x804*m.x801 + m.x922*m.x919)
- 1.85116623472788*(m.x804*m.x919 - m.x801*m.x922))*m.b1005 + m.x464 == 0)
m.c466 = Constraint(expr=-(9.1808520700796*(m.x772**2 + m.x890**2) - 9.1929520700796*(m.x772*m.x774 + m.x890*m.x892) -
3.26416414082537*(m.x772*m.x892 - m.x774*m.x890))*m.b1006 + m.x465 == 0)
m.c467 = Constraint(expr=-(9.1808520700796*(m.x774**2 + m.x892**2) - 9.1929520700796*(m.x774*m.x772 + m.x892*m.x890) -
3.26416414082537*(m.x774*m.x890 - m.x772*m.x892))*m.b1006 + m.x466 == 0)
m.c468 = Constraint(expr=-(32.6619364312854*(m.x780**2 + m.x898**2) - 32.8519364312854*(m.x780*m.x781 + m.x898*m.x899)
- 2.92621552980655*(m.x780*m.x899 - m.x781*m.x898))*m.b1007 + | |
#!/usr/bin/python3
from bcc import BPF
intrucode="""
BPF_PERF_OUTPUT(events);
//#undef DEBUG
//#define DEBUG
#define DDS_RECORD 1
#define SOCK_RECORD 2
#define FID_CREATE_TOPIC 40
#define FID_CREATE_DDSWRITER 41
#define FID_CREATE_DDSREADER 42
#define FID_VWRITER_NEW 43
#define FID_DDSWRITER_WRITE 1
#define FID_WRITER_WRITE 2
#define FID_RTPS_WRITE 3
#define FID_DDSREADER_READ 6
#define FID_DDSREADER_TAKE 7
#define FID_DO_PACKET 8
#define FID_GROUPWRITE 9
#define FID_DDSREADER_FLUSH_COPY 10
#define FID_SOCK_SEND 20
#define FID_IP_SEND 21
#define FID_SOCK_RECV 30
#define FID_RECV_UDP 31
#ifdef asm_inline
#undef asm_inline
#define asm_inline asm
#endif
#include <linux/sched.h>
#include <linux/stddef.h>
#include <net/inet_sock.h>
#include <linux/skbuff.h>
#include <linux/socket.h>
#include <linux/uio.h>
#include <linux/in.h>
#include <linux/udp.h>
#include <linux/ip.h>
typedef struct topic_info_t {
char name[64];
} topic_info;
BPF_HASH(tName_map, u64, topic_info);
typedef struct v_gid_t {
u32 systemId;
u32 localId;
u32 serial;
} v_gid;
typedef struct v_message_t {
u32 v_node;
u64 allocTime;
u32 sequenceNumber;
u32 transactionId;
u64 writeTime;
v_gid writerGID;
v_gid writerInstanceGID;
u64 qos;
} v_message;
typedef struct trace_id_t {
v_gid gid;
u32 seqNum;
} traceId;
BPF_HASH(traceId_map, u64, traceId);
typedef struct bpf_data_t {
u64 ts;
u64 sts;
u64 ets;
u64 pid;
// char comm[TASK_COMM_LEN];
char comm[32];
char tName[20];
u8 recordType;
u8 fun_ID;
u8 fun_ret;
u64 arg1;
u64 arg2;
u64 arg3;
u64 arg4;
u64 arg5;
u64 arg6;
u64 ret;
u64 link;
u64 seqNum;
u32 gid_sys;
u32 gid_local;
u32 gid_seria;
} bpf_data;
BPF_HASH(data_map, u64, bpf_data);
BPF_HASH(ts_map, u64, u64);
static void Start_TS (u64 id) {
u64 ts = bpf_ktime_get_ns();
u64 pid = bpf_get_current_pid_tgid();
id += pid;
ts_map.update(&id, &ts);
}
static u64 End_TS (u64 id, u64 * e_ts) {
*e_ts = bpf_ktime_get_ns();
u64 pid = bpf_get_current_pid_tgid();
id += pid;
u64* s_ts_p = ts_map.lookup(&id);
if (s_ts_p) {
u64 s_ts;
s_ts = *s_ts_p;
ts_map.delete(&id);
return s_ts;
}
return 0;
}
static void get_topic_info (u64 id, bpf_data* data) {
topic_info* t_info_p = tName_map.lookup(&id);
if (t_info_p) {
bpf_probe_read_str(data->tName, 64, t_info_p->name);
}
traceId * trace_id_p = traceId_map.lookup(&id);
if (trace_id_p) {
traceId trace_id = *trace_id_p;
data->gid_sys = trace_id.gid.systemId;
data->gid_local = trace_id.gid.localId;
data->gid_seria = trace_id.gid.serial;
data->seqNum = trace_id.seqNum;
}
}
static void drop_topic_info (u64 id) {
topic_info* t_info_p = tName_map.lookup(&id);
if (t_info_p) {
tName_map.delete(&id);
}
traceId * trace_id_p = traceId_map.lookup(&id);
if (trace_id_p) {
traceId_map.delete(&id);
}
}
static void insert_bpf_data(u64 id, bpf_data* data) {
u64 pid = bpf_get_current_pid_tgid();
id += pid;
data_map.update(&id, data);
}
static bpf_data* get_bpf_data(u64 id) {
u64 pid = bpf_get_current_pid_tgid();
id += pid;
bpf_data* data_p = data_map.lookup(&id);
if (data_p) {
data_map.delete(&id);
return data_p;
}
return 0;
}
/*************************************************************************************************/
/** **/
/** This part record OpenSplice DDS topic information. **/
/** **/
/*************************************************************************************************/
/* =======================================================================
Instrumented function: DDS_DomainParticipant_create_topic
======================================================================= */
int T_GetTopicName(struct pt_regs *ctx) { // 2:topic name; 3: type_name; ret: topic pointer
topic_info topic = {};
u64 tName_p = PT_REGS_PARM2(ctx);
u64 pid = bpf_get_current_pid_tgid();
bpf_probe_read_str(topic.name, 20, (const char *)tName_p);
tName_map.update(&pid, &topic);
return 0;
}
int T_MapTopic2TopicName(struct pt_regs *ctx){ // ret: topic
u64 pid = bpf_get_current_pid_tgid();
topic_info* t_info_p = tName_map.lookup(&pid);
if (t_info_p) {
topic_info topic = *t_info_p;
u64 topic_p = PT_REGS_RC(ctx);
tName_map.update(&topic_p, &topic);
tName_map.delete(&pid);
#ifdef DEBUG
bpf_data data = {};
data.recordType = DDS_RECORD;
data.ret = topic_p;
data.ts = bpf_ktime_get_ns();
data.pid = pid;
bpf_get_current_comm(&(data.comm), sizeof(data.comm));
data.fun_ID = FID_CREATE_TOPIC;
data.fun_ret = 1;
bpf_probe_read_str(data.tName, 20, (const char *)t_info_p->name);
events.perf_submit(ctx, &data, sizeof(data));
#endif
}
return 0;
}
/* =======================================================================
Instrumented function: DDS_Publisher_create_datawriter
======================================================================= */
int W_MapPID2Topic(struct pt_regs *ctx) { // 2:topic; ret: writer
u64 topic_p = PT_REGS_PARM2(ctx);
topic_info* t_info_p = tName_map.lookup(&topic_p);
if (t_info_p) {
topic_info topic = *t_info_p;
u64 pid = bpf_get_current_pid_tgid();
tName_map.update(&pid, &topic);
}
return 0;
}
int W_MapWriter2TopicName(struct pt_regs *ctx) { // 2:topic; ret: writer
u64 pid = bpf_get_current_pid_tgid();
topic_info* t_info_p;
t_info_p = tName_map.lookup(&pid);
if (t_info_p) {
topic_info topic = *t_info_p;
u64 writer = PT_REGS_RC(ctx);
tName_map.update(&writer, &topic);
tName_map.delete(&pid);
#ifdef DEBUG
//topic_info topic = *t_info_p;
bpf_data data = {};
data.recordType = DDS_RECORD;
data.ts = bpf_ktime_get_ns();
data.pid = pid;
bpf_get_current_comm(&(data.comm), sizeof(data.comm));
data.fun_ID = FID_CREATE_DDSWRITER;
data.fun_ret = 1;
bpf_probe_read_str(data.tName, 20, t_info_p->name);
data.ret = writer;
events.perf_submit(ctx, &data, sizeof(data));
#endif
}
return 0;
}
/* =======================================================================
Instrumented function: v_writerNew
======================================================================= */
int W_MapVWriter2TopicName (struct pt_regs *ctx) { //ret: v_writer
u64 pid = bpf_get_current_pid_tgid();
topic_info* t_info_p;
t_info_p = tName_map.lookup(&pid);
if (t_info_p) {
topic_info topic = *t_info_p;
u64 v_writer = PT_REGS_RC(ctx);
tName_map.update(&v_writer, &topic);
#ifdef DEBUG
bpf_data data = {};
data.recordType = DDS_RECORD;
data.ts = bpf_ktime_get_ns();
data.pid = pid;
bpf_get_current_comm(&(data.comm), sizeof(data.comm));
data.fun_ID = FID_VWRITER_NEW;
data.fun_ret = 1;
bpf_probe_read_str(data.tName, 20, t_info_p->name);
data.ret = v_writer;
events.perf_submit(ctx, &data, sizeof(data));
#endif
}
return 0;
}
/* =======================================================================
Instrumented function: DDS_Subscriber_create_datareader
======================================================================= */
int R_MapPID2Topic(struct pt_regs *ctx) { // 2:topic; ret: reader
u64 topic_p = PT_REGS_PARM2(ctx);
topic_info* t_info_p = tName_map.lookup(&topic_p);
if (t_info_p) {
topic_info topic = *t_info_p;
u64 pid = bpf_get_current_pid_tgid();
tName_map.update(&pid, &topic);
}
return 0;
}
int R_MapReader2TopicName(struct pt_regs *ctx) { // 2:topic; ret: reader_p
u64 pid = bpf_get_current_pid_tgid();
topic_info* t_info_p = tName_map.lookup(&pid);
if (t_info_p) {
topic_info topic = *t_info_p;
u64 reader = PT_REGS_RC(ctx);
tName_map.update(&reader, &topic);
tName_map.delete(&pid);
#ifdef DEBUG
bpf_data data = {};
data.recordType = DDS_RECORD;
data.ts = bpf_ktime_get_ns();
data.pid = pid;
bpf_get_current_comm(&(data.comm), sizeof(data.comm));
data.fun_ID = FID_CREATE_DDSREADER;
data.fun_ret = 1;
bpf_probe_read_str(data.tName, 20, t_info_p->name);
data.ret = reader;
events.perf_submit(ctx, &data, sizeof(data));
#endif
}
return 0;
}
int uretprobe_v_dataReaderNewBySQL (struct pt_regs *ctx) {
u64 pid = bpf_get_current_pid_tgid();
topic_info* t_info_p = tName_map.lookup(&pid);
if (t_info_p) {
u64 v_reader = PT_REGS_RC(ctx);
tName_map.update(&v_reader, t_info_p);
#ifdef DEBUG
bpf_data data = {};
data.recordType = DDS_RECORD;
data.ts = bpf_ktime_get_ns();
data.pid = pid;
bpf_get_current_comm(&(data.comm), sizeof(data.comm));
data.fun_ID = FID_VWRITER_NEW;
data.fun_ret = 1;
bpf_probe_read_str(data.tName, 20, t_info_p->name);
events.perf_submit(ctx, &data, sizeof(data));
#endif
}
return 0;
}
/*************************************************************************************************/
/** **/
/** This part record write/read and its corresponding v_message. **/
/** **/
/*************************************************************************************************/
/* =======================================================================
Instrumented function: DDS_DataWriter_write
======================================================================= */
int uprobe_DDS_DataWriter_write(struct pt_regs *ctx) {
u64 writer = PT_REGS_PARM1(ctx); // DDS_DataWriter
Start_TS(FID_DDSWRITER_WRITE);
#ifdef DEBUG
bpf_data data = {};
data.recordType = DDS_RECORD;
data.ts = bpf_ktime_get_ns();
data.pid = bpf_get_current_pid_tgid();
bpf_get_current_comm(&(data.comm), sizeof(data.comm));
data.fun_ID = FID_DDSWRITER_WRITE;
data.fun_ret = 0;
events.perf_submit(ctx, &data, sizeof(data));
#endif
return 0;
}
int uretprobe_DDS_DataWriter_write(struct pt_regs *ctx) {
u64 sts, ets;
sts = End_TS(FID_DDSWRITER_WRITE, &ets);
bpf_data data = {};
data.recordType = DDS_RECORD;
data.ts = bpf_ktime_get_ns();
data.pid = bpf_get_current_pid_tgid();
bpf_get_current_comm(&(data.comm), sizeof(data.comm));
data.fun_ID = FID_DDSWRITER_WRITE;
data.fun_ret = 1;
data.sts = sts;
data.ets = ets;
get_topic_info (data.pid, &data);
drop_topic_info (data.pid);
events.perf_submit(ctx, &data, sizeof(data));
return 0;
}
/* =======================================================================
Instrumented function: writerWrite
======================================================================= */
int writerWrite (struct pt_regs *ctx) {
u64 v_writer = PT_REGS_PARM1(ctx);
topic_info* t_info_p = tName_map.lookup(&v_writer);
if (t_info_p) {
u64 pid = bpf_get_current_pid_tgid();
topic_info topic = *t_info_p;
u64 v_mess_p = PT_REGS_PARM3(ctx);
v_message v_mess;
bpf_probe_read(&v_mess, sizeof(v_message), (const void *) v_mess_p);
tName_map.update(&v_mess_p, &topic);
tName_map.update(&pid, &topic);
traceId trace_id;
bpf_probe_read(&trace_id.gid, sizeof(v_gid), (const void *) v_mess_p + offsetof(v_message, writerGID));
bpf_probe_read(&trace_id.seqNum, sizeof(u32), (const void *) v_mess_p + offsetof(v_message, sequenceNumber));
traceId_map.update(&v_mess_p, &trace_id);
traceId_map.update(&pid, &trace_id);
#ifdef DEBUG
bpf_data data = {};
data.recordType = DDS_RECORD;
data.ts = bpf_ktime_get_ns();
data.pid = bpf_get_current_pid_tgid();
bpf_get_current_comm(&(data.comm), sizeof(data.comm));
data.fun_ID = FID_WRITER_WRITE;
data.fun_ret = 0;
bpf_probe_read_str(data.tName, 20, t_info_p->name);
data.gid_sys = v_mess.writerGID.systemId;
data.gid_local = v_mess.writerGID.localId;
data.gid_seria = v_mess.writerGID.serial;
data.seqNum = v_mess.sequenceNumber;
events.perf_submit(ctx, &data, sizeof(data));
#endif
}
return 0;
}
/* =======================================================================
Instrumented function: rtps_Write
======================================================================= */
int uprobe_rtps_Write(struct pt_regs *ctx){ // (xp, &sender, message)
Start_TS(FID_RTPS_WRITE);
bpf_data data = {};
u64 pid = bpf_get_current_pid_tgid();
v_gid* gid_p = (v_gid*)PT_REGS_PARM2(ctx);
v_gid gid = *gid_p;
u64 v_mess_p = PT_REGS_PARM3(ctx); //v_message
topic_info* t_info_p = tName_map.lookup(&v_mess_p);
if (t_info_p) {
topic_info topic = *t_info_p;
bpf_probe_read_str(data.tName, 20, t_info_p->name);
tName_map.update(&pid, &topic);
tName_map.delete(&v_mess_p);
}
traceId * trace_id_p = traceId_map.lookup(&v_mess_p);
if (trace_id_p) {
traceId trace_id = *trace_id_p;
data.gid_sys = trace_id.gid.systemId;
data.gid_local = trace_id.gid.localId;
data.gid_seria = trace_id.gid.serial;
data.seqNum = trace_id.seqNum;
traceId_map.update(&pid, &trace_id);
traceId_map.delete(&v_mess_p);
}
data.ret = v_mess_p;
data.recordType = DDS_RECORD;
data.ts = bpf_ktime_get_ns();
data.pid = pid;
bpf_get_current_comm(&(data.comm), sizeof(data.comm));
data.fun_ID = FID_RTPS_WRITE;
insert_bpf_data(FID_RTPS_WRITE,&data);
//events.perf_submit(ctx, &data, sizeof(data));
return 0;
}
int uretprobe_rtps_Write(struct pt_regs *ctx){ // (xp, &sender, message)
u64 sts, ets;
sts = End_TS(FID_RTPS_WRITE, &ets);
bpf_data* data_p = get_bpf_data(FID_RTPS_WRITE);
if (data_p == 0) return 0;
data_p->ets = ets;
data_p->sts = sts;
if (data_p->gid_sys)
events.perf_submit(ctx, data_p, sizeof(*data_p));
return 0;
}
/* =======================================================================
This one process DDS DataReader Vmessage information
======================================================================= */
//DDS_DataReader_read
int uprobe_DDS_DataReader_take(struct pt_regs *ctx) {
u64 reader = PT_REGS_PARM1(ctx);
Start_TS(FID_DDSREADER_TAKE);
bpf_data data = {};
topic_info* t_info_p = tName_map.lookup(&reader);
if (t_info_p) {
topic_info topic = *t_info_p;
bpf_probe_read_str(data.tName, 20, t_info_p->name);
insert_bpf_data(FID_DDSREADER_TAKE, &data);
}
#ifdef DEBUG
bpf_data dg_data = {};
dg_data = data;
dg_data.recordType = DDS_RECORD;
dg_data.ts = bpf_ktime_get_ns();
dg_data.pid = bpf_get_current_pid_tgid();
bpf_get_current_comm(&(dg_data.comm), sizeof(dg_data.comm));
dg_data.fun_ID = FID_DDSREADER_TAKE;
dg_data.fun_ret = 0;
events.perf_submit(ctx, &dg_data, sizeof(dg_data));
#endif
return 0;
}
int uretprobe_DDS_DataReader_take(struct pt_regs *ctx) {
u64 sts, ets;
sts = End_TS(FID_DDSREADER_TAKE, &ets);
u64 pid = bpf_get_current_pid_tgid();
u64 id = pid + FID_DDSREADER_TAKE;
bpf_data * data_p = get_bpf_data(FID_DDSREADER_TAKE);
if (data_p == 0) return 0;
data_p->recordType = DDS_RECORD;
data_p->ts = bpf_ktime_get_ns();
data_p->pid = pid;
bpf_get_current_comm(&(data_p->comm), sizeof(data_p->comm));
data_p->fun_ID = FID_DDSREADER_TAKE;
data_p->fun_ret = 1;
data_p->sts = sts;
data_p->ets | |
<gh_stars>0
from sysdfiles import IniFile, NetworkFile, ServiceFile
from tests import *
class TestIniFiles:
def __init__(self):
self.is_setup = False
# =========================================================================
# setUp
# =========================================================================
def setUp(self):
assert not self.is_setup
print('')
self.test_info = TestInfo()
self.ini_file_name1 = os.path.join(os.path.dirname(__file__), 'test1.network')
self.ini_file_name2 = os.path.join(os.path.dirname(__file__), 'test2.network')
self.ini = IniFile(self.ini_file_name1)
self.is_setup = True
# =========================================================================
# tearDown
# =========================================================================
def tearDown(self):
assert self.is_setup
print('')
if self.test_info.num_errors == 1:
print('1 error')
else:
print('{0:d} errors'.format(self.test_info.num_errors))
self.is_setup = False
# =========================================================================
# test_00_files
# =========================================================================
def test_00_files(self):
assert self.is_setup
print('')
self.ini.save(self.ini_file_name2)
assert 0 == compare_files(self.test_info, self.ini_file_name1, self.ini_file_name2)
# =========================================================================
# test_05_formatted_data
# =========================================================================
def test_05_formatted_data(self):
assert self.is_setup
print('')
assert 0 == check_str_to_sec(self.test_info, '1years', IniFile.SECONDS_PER_YEAR, '1y')
assert 0 == check_str_to_sec(self.test_info, '1year', IniFile.SECONDS_PER_YEAR, '1y')
assert 0 == check_str_to_sec(self.test_info, '1y', IniFile.SECONDS_PER_YEAR)
assert 0 == check_str_to_sec(self.test_info, '1months', IniFile.SECONDS_PER_MONTH, '1M')
assert 0 == check_str_to_sec(self.test_info, '1month', IniFile.SECONDS_PER_MONTH, '1M')
assert 0 == check_str_to_sec(self.test_info, '1M', IniFile.SECONDS_PER_MONTH)
assert 0 == check_str_to_sec(self.test_info, '1weeks', IniFile.SECONDS_PER_WEEK, '1w')
assert 0 == check_str_to_sec(self.test_info, '1week', IniFile.SECONDS_PER_WEEK, '1w')
assert 0 == check_str_to_sec(self.test_info, '1w', IniFile.SECONDS_PER_WEEK)
assert 0 == check_str_to_sec(self.test_info, '1days', IniFile.SECONDS_PER_DAY, '1d')
assert 0 == check_str_to_sec(self.test_info, '1day', IniFile.SECONDS_PER_DAY, '1d')
assert 0 == check_str_to_sec(self.test_info, '1d', IniFile.SECONDS_PER_DAY)
assert 0 == check_str_to_sec(self.test_info, '1hours', IniFile.SECONDS_PER_HOUR, '1h')
assert 0 == check_str_to_sec(self.test_info, '1hour', IniFile.SECONDS_PER_HOUR, '1h')
assert 0 == check_str_to_sec(self.test_info, '1hr', IniFile.SECONDS_PER_HOUR, '1h')
assert 0 == check_str_to_sec(self.test_info, '1h', IniFile.SECONDS_PER_HOUR)
assert 0 == check_str_to_sec(self.test_info, '1minutes', 60, '1m')
assert 0 == check_str_to_sec(self.test_info, '1minute', 60, '1m')
assert 0 == check_str_to_sec(self.test_info, '1min', 60, '1m')
assert 0 == check_str_to_sec(self.test_info, '1m', 60)
assert 0 == check_str_to_sec(self.test_info, '1seconds', 1, '1s')
assert 0 == check_str_to_sec(self.test_info, '1second', 1, '1s')
assert 0 == check_str_to_sec(self.test_info, '1sec', 1, '1s')
assert 0 == check_str_to_sec(self.test_info, '1s', 1)
assert 0 == check_str_to_sec(self.test_info, '1', 1, '1s')
assert 0 == check_str_to_sec(self.test_info, '0s', 0)
assert 0 == check_str_to_sec(self.test_info, '0', 0, '0s')
assert 0 == check_str_to_sec(self.test_info, 'infinity', IniFile.SECONDS_INFINITY)
assert 0 == check_str_to_sec(self.test_info, '1msec', IniFile.SECONDS_PER_MS, '1ms')
assert 0 == check_str_to_sec(self.test_info, '1ms', IniFile.SECONDS_PER_MS)
assert 0 == check_str_to_sec(self.test_info, '1usec', IniFile.SECONDS_PER_US, '1us')
assert 0 == check_str_to_sec(self.test_info, '1us', IniFile.SECONDS_PER_US)
assert 0 == check_str_to_sec(self.test_info, '1nsec', IniFile.SECONDS_PER_NS, '1ns')
assert 0 == check_str_to_sec(self.test_info, '1ns', IniFile.SECONDS_PER_NS)
assert 0 == check_str_to_sec(self.test_info, '1', IniFile.SECONDS_PER_NS, '1ns', IniFile.SECONDS_PER_NS)
assert 0 == check_str_to_sec(self.test_info, '1d24h1440m86401s', IniFile.SECONDS_PER_DAY * 4 + 1, '4d 1s')
sec = IniFile.SECONDS_PER_WEEK + IniFile.SECONDS_PER_DAY + IniFile.SECONDS_PER_HOUR +\
IniFile.SECONDS_PER_MINUTE + 1
assert 0 == check_str_to_sec(self.test_info, '1w 1d 1h 1m 1s', sec)
assert 0 == check_str_to_sec(self.test_info, ' 1 d 1w 1 s 1 m 1h', sec, '1w 1d 1h 1m 1s')
assert 0 == check_sec_to_str(self.test_info, IniFile.SECONDS_INFINITY, 'infinity')
assert 0 == check_sec_to_str(self.test_info, 1, '1s')
assert 0 == check_sec_to_str(self.test_info, IniFile.SECONDS_PER_YEAR, '1y')
assert 0 == check_sec_to_str(self.test_info, IniFile.SECONDS_PER_MONTH, '1M')
assert 0 == check_sec_to_str(self.test_info, IniFile.SECONDS_PER_WEEK, '1w')
assert 0 == check_sec_to_str(self.test_info, IniFile.SECONDS_PER_DAY, '1d')
assert 0 == check_sec_to_str(self.test_info, IniFile.SECONDS_PER_HOUR, '1h')
assert 0 == check_sec_to_str(self.test_info, IniFile.SECONDS_PER_MS, '1ms')
assert 0 == check_sec_to_str(self.test_info, IniFile.SECONDS_PER_US, '1us')
assert 0 == check_sec_to_str(self.test_info, IniFile.SECONDS_PER_NS, '1ns')
assert 0 == check_sec_to_str(self.test_info, IniFile.SECONDS_PER_WEEK +
IniFile.SECONDS_PER_DAY + IniFile.SECONDS_PER_HOUR +
IniFile.SECONDS_PER_MINUTE + 1, '1w 1d 1h 1m 1s')
assert 0 == check_str_to_nb(self.test_info, '1', 1)
assert 0 == check_str_to_nb(self.test_info, '1K', IniFile.KILOBYTE)
assert 0 == check_str_to_nb(self.test_info, str(IniFile.KILOBYTE), IniFile.KILOBYTE, '1K')
assert 0 == check_str_to_nb(self.test_info, '1M', IniFile.MEGABYTE)
assert 0 == check_str_to_nb(self.test_info, str(IniFile.MEGABYTE), IniFile.MEGABYTE, '1M')
assert 0 == check_str_to_nb(self.test_info, '1G', IniFile.GIGABYTE)
assert 0 == check_str_to_nb(self.test_info, str(IniFile.GIGABYTE), IniFile.GIGABYTE, '1G')
assert 0 == check_str_to_nb(self.test_info, '1T', IniFile.TERABYTE)
assert 0 == check_str_to_nb(self.test_info, str(IniFile.TERABYTE), IniFile.TERABYTE, '1T')
assert 0 == check_str_to_nb(self.test_info, '1P', IniFile.PETABYTE)
assert 0 == check_str_to_nb(self.test_info, str(IniFile.PETABYTE), IniFile.PETABYTE, '1P')
assert 0 == check_nb_to_str(self.test_info, 1, '1')
assert 0 == check_nb_to_str(self.test_info, IniFile.KILOBYTE + 1, str(IniFile.KILOBYTE + 1))
assert 0 == check_nb_to_str(self.test_info, IniFile.KILOBYTE, '1K')
assert 0 == check_nb_to_str(self.test_info, IniFile.MEGABYTE, '1M')
assert 0 == check_nb_to_str(self.test_info, IniFile.GIGABYTE, '1G')
assert 0 == check_nb_to_str(self.test_info, IniFile.TERABYTE, '1T')
assert 0 == check_nb_to_str(self.test_info, IniFile.PETABYTE, '1P')
assert 0 == check_str_to_bps(self.test_info, '1', 1)
assert 0 == check_str_to_bps(self.test_info, '1K', IniFile.THOUSAND)
assert 0 == check_str_to_bps(self.test_info, str(IniFile.THOUSAND), IniFile.THOUSAND, '1K')
assert 0 == check_str_to_bps(self.test_info, '1M', IniFile.MILLION)
assert 0 == check_str_to_bps(self.test_info, str(IniFile.MILLION), IniFile.MILLION, '1M')
assert 0 == check_str_to_bps(self.test_info, '1G', IniFile.BILLION)
assert 0 == check_str_to_bps(self.test_info, str(IniFile.BILLION), IniFile.BILLION, '1G')
assert 0 == check_bps_to_str(self.test_info, 1, '1')
assert 0 == check_bps_to_str(self.test_info, IniFile.THOUSAND + 1, str(IniFile.THOUSAND + 1))
assert 0 == check_bps_to_str(self.test_info, IniFile.THOUSAND, '1K')
assert 0 == check_bps_to_str(self.test_info, IniFile.MILLION, '1M')
assert 0 == check_bps_to_str(self.test_info, IniFile.BILLION, '1G')
assert 0 == check_str_to_fm(self.test_info, '0755', 0o755)
assert 0 == check_fm_to_str(self.test_info, 0o755, '0755')
# =========================================================================
# test_10_section
# =========================================================================
def test_10_section(self):
assert self.is_setup
print('')
section, options = self.ini.get_section('Match')
assert 0 == check_section(self.test_info, section, options, 'Match')
section, options = self.ini.get_section('match')
assert 0 == check_section(self.test_info, section, options, 'Match')
section, options = self.ini.get_section('[Match]')
assert 0 == check_section(self.test_info, section, options, 'Match')
section, options = self.ini.get_section('Section')
assert 0 == check_section(self.test_info, section, options, None)
section, options = self.ini.add_section('Section')
assert 0 == check_section(self.test_info, section, options, 'Section')
section, options = self.ini.add_section('Section')
assert 0 == check_section(self.test_info, section, options, 'Section')
self.ini.remove_section('Section')
section, options = self.ini.get_section('Section')
assert 0 == check_section(self.test_info, section, options, None)
# =========================================================================
# test_15_option
# =========================================================================
def test_15_option(self):
assert self.is_setup
print('')
section, options = self.ini.get_option('Network', 'Address')
assert 0 == check_option(self.test_info, section, options, 'Network', 'Address', '192.168.0.1/24')
section, options = self.ini.get_option('Network', 'address')
assert 0 == check_option(self.test_info, section, options, 'Network', 'Address', '192.168.0.1/24')
section, options = self.ini.get_option('Network', 'aDdReSs')
assert 0 == check_option(self.test_info, section, options, 'Network', 'Address', '192.168.0.1/24')
section, options = self.ini.get_option('Network', 'DNS')
assert 0 == check_option(self.test_info, section, options, 'Network', 'DNS', ['8.8.8.8', '8.8.4.4'])
section, options = self.ini.get_option('Network', 'Test')
assert 0 == check_option(self.test_info, section, options, 'Network', 'Test', None)
section, options = self.ini.get_option('Section', 'Test')
assert 0 == check_option(self.test_info, section, options, None, None, None)
self.ini.set_option('Section', 'Test', 'value')
section, options = self.ini.get_option('Section', 'Test')
assert 0 == check_option(self.test_info, section, options, 'Section', 'Test', 'value')
self.ini.set_option('Section', 'Test', 'different')
section, options = self.ini.get_option('Section', 'Test')
assert 0 == check_option(self.test_info, section, options, 'Section', 'Test', 'different')
self.ini.set_option('Section', 'Test', 1234)
section, options = self.ini.get_option('Section', 'Test')
assert 0 == check_option(self.test_info, section, options, 'Section', 'Test', '1234')
self.ini.remove_option('Section', 'Test')
section, option = self.ini.get_option('Section', 'Test')
assert 0 == check_option(self.test_info, section, option, 'Section', 'Test', None)
value = ['one', 'two', 'three']
self.ini.set_option('Section', 'Test', value)
section, options = self.ini.get_option('Section', 'Test')
assert 0 == check_option(self.test_info, section, options, 'Section', 'Test', value)
value = ['four', 'five']
self.ini.set_option('Section', 'Test', value)
section, options = self.ini.get_option('Section', 'Test')
assert 0 == check_option(self.test_info, section, options, 'Section', 'Test', value)
self.ini.set_option('Section', 'Test', 'value')
section, options = self.ini.get_option('Section', 'Test')
assert 0 == check_option(self.test_info, section, options, 'Section', 'Test', 'value')
value = ['one', 'two', 'three']
self.ini.set_option('Section', 'Test', value)
section, options = self.ini.get_option('Section', 'Test')
assert 0 == check_option(self.test_info, section, options, 'Section', 'Test', value)
# =========================================================================
# test_20_str
# =========================================================================
def test_20_str(self):
assert self.is_setup
print('')
value = self.ini.get_str('Section', 'TestStr')
assert 0 == check_str(self.test_info, value, 'Section', 'TestStr', None)
self.ini.set_str('Section', 'TestStr', 'value')
value = self.ini.get_str('Section', 'TestStr')
assert 0 == check_str(self.test_info, value, 'Section', 'TestStr', 'value')
self.ini.set_str('Section', 'TestStr', 'junk')
value = self.ini.get_str('Section', 'TestStr')
assert 0 == check_str(self.test_info, value, 'Section', 'TestStr', 'junk')
self.ini.set_str('Section', 'TestStr', '')
value = self.ini.get_str('Section', 'TestStr')
assert 0 == check_str(self.test_info, value, 'Section', 'TestStr', '')
self.ini.set_str('Section', 'TestStr', None)
value = self.ini.get_str('Section', 'TestStr')
assert 0 == check_str(self.test_info, value, 'Section', 'TestStr', None)
# =========================================================================
# test_25_bool
# =========================================================================
def test_25_bool(self):
assert self.is_setup
print('')
value = self.ini.get_bool('Section', 'TestBool')
assert 0 == check_str(self.test_info, value, 'Section', 'TestBool', None)
self.ini.set_bool('Section', 'TestBool', True)
value = self.ini.get_str('Section', 'TestBool')
assert 0 == check_str(self.test_info, value, 'Section', 'TestBool', 'yes')
self.ini.set_bool('Section', 'TestBool', False)
value = self.ini.get_str('Section', 'TestBool')
assert 0 == check_str(self.test_info, value, 'Section', 'TestBool', 'no')
self.ini.set_str('Section', 'TestBool', 'True')
value = self.ini.get_bool('Section', 'TestBool')
assert 0 == check_str(self.test_info, str(value), 'Section', 'TestBool', 'True')
self.ini.set_str('Section', 'TestBool', 'Yes')
value = self.ini.get_bool('Section', 'TestBool')
assert 0 == check_str(self.test_info, str(value), 'Section', 'TestBool', 'True')
self.ini.set_str('Section', 'TestBool', 'YES')
value = self.ini.get_bool('Section', 'TestBool')
assert 0 == check_str(self.test_info, str(value), 'Section', 'TestBool', 'True')
self.ini.set_str('Section', 'TestBool', 'On')
value = self.ini.get_bool('Section', 'TestBool')
assert 0 == check_str(self.test_info, str(value), 'Section', 'TestBool', 'True')
self.ini.set_str('Section', 'TestBool', '1')
value = self.ini.get_bool('Section', 'TestBool')
assert 0 == check_str(self.test_info, str(value), 'Section', 'TestBool', 'True')
self.ini.set_str('Section', 'TestBool', 'junk')
value = self.ini.get_bool('Section', 'TestBool')
assert 0 == check_str(self.test_info, str(value), 'Section', 'TestBool', 'False')
self.ini.set_bool('Section', 'TestBool', None)
value = self.ini.get_str('Section', 'TestBool')
assert 0 == check_str(self.test_info, value, 'Section', 'TestBool', None)
# =========================================================================
# test_30_int
# =========================================================================
def test_30_int(self):
assert self.is_setup
print('')
value = self.ini.get_int('Section', 'TestInt')
assert 0 == check_str(self.test_info, value, 'Section', 'TestInt', None)
self.ini.set_int('Section', 'TestInt', 0)
value = self.ini.get_str('Section', 'TestInt')
assert 0 == check_str(self.test_info, value, 'Section', 'TestInt', | |
#########################################################################################
#########################################################################################
#########################################################################################
import numpy as np
from math import pi, sin, cos
# 2.1 DIMENSIONAL LIMITS AND CONSIDERATIONS
def sec2_1_1_c1(condition, w, t, stiff_type = 'SL', D= 0.0):
'''Maximum Flat-Width-to-Thickness Ratios.
Parameters
----------
condition: string
determina el caso a considerar de la seccion 2.1.1-1:
i: (stiffned, 1 edge, Is mayor Ia, and D/w menor 0.8)
ii: (stiffned, 2 edge)
iii: (unstiffned or Is menor Ia)
w: float
ancho del elemento.
t: float
espesor del elemento.
stiff_type: string
Indica si el refuerzo es de labio simple (SL) u otro tipo (OTHER)
D: float
Largo del refuerzo de labio en el caso de stiff_type= OTHER
Returns
-------
ratio_adm_1: float
maximo ratio ancho-plano/espesor segun seccion 2.1.1-1.
midC: diccionario
calculos intermedios.
Raises
------
Exception() : No se reconone la condicion: condition
Tests
-----
>>> ratio_1_adm, w_eff, midC = sec2_1_1(condition='i', L=430, w=10, t=1)
>>> print('ratio 1 adm: {:{fmt}} | w_eff: {:{fmt}} | ratio 1: {m[ratio w/t]:{fmt}} | ratio 3 adm: {m[ratio L/w]:{fmt}}'.format(ratio_1_adm, w_eff, m = midC, fmt = '.2f'))
ratio 1 adm: 50.00 | w_eff: 10.00 | ratio 1: 10.00 | ratio 3 adm: 1.00
'''
ratio_1 = w/t
midC = {'ratio_1': ratio_1}
if condition == 'i':
if stiff_type == 'SL':
ratio_adm_1 = 50
elif stiff_type == 'OTHER':
ratio_lip = D/w
midC = {'ratio_lip': ratio_lip}
if ratio_lip < 0.8:
ratio_adm_1 = 90
else:
print('Clausula 2.1.1-1. No se reconone la condicion:', condition)
raise Exception('>> Analisis abortado <<')
else:
print('Clausula 2.1.1-1. No se reconone el tipo de rigidizador:', stiff_type)
raise Exception('>> Analisis abortado <<')
elif condition == 'ii':
ratio_adm_1 = 400
elif condition == 'iii':
ratio_adm_1 = 50
else:
print('Clausula 2.1.1-1. No se reconone la condicion:', condition)
raise Exception('>> Analisis abortado <<')
return ratio_adm_1, midC
def sec2_1_1_c3(L, wf):
'''Shear Lag Effects—Unusually Short Spans Supporting Concentrated Loads.
Parameters
----------
wf: float
ancho del elemento proyectado mas alla del alma
L: float
extension del miembro (ver definicion en codigo ASCE-8).
Returns
-------
ratio_3: float
factor para evaluar el ancho efectivo maximo de cualquier ala, sea a compresion o traccion.
midC: diccionario
calculos intermedios.
Raises
------
none
Tests
-----
#>>> ratio_1_adm, w_eff, midC = sec2_1_1(condition='i', L=430, w=10, t=1)
#>>> print('ratio 1 adm: {:{fmt}} | w_eff: {:{fmt}} | ratio 1: {m[ratio w/t]:{fmt}} | ratio 3 adm: {m[ratio L/w]:{fmt}}'.format(ratio_1_adm, w_eff, m = midC, fmt = '.2f'))
ratio 1 adm: 50.00 | w_eff: 10.00 | ratio 1: 10.00 | ratio 3 adm: 1.00
'''
# sec 2.1.1-3
if L < 30*wf:
ratio_3 = TABLE1(L,wf)
else:
ratio_3 = 1.0
midC = {}
return ratio_3, midC
def TABLE1(L, wf):
'''TABLE 1. Short, Wide Flanges: Maximum Allowable Ratio of Effective Design Width to Actual Width
Parameters
----------
L: float,
longitud del miembro.
wf: float,
ancho del ala proyectado luego del alma.
Returns
-------
r: float,
ratio permitido de ancho de diseno a ancho real.
Tests
-----
>>> round(TABLE1(L=170, wf=10), 3)
0.875
'''
table1 = np.array(((30, 1.00),
(25, 0.96),
(20, 0.91),
(18, 0.89),
(16, 0.86),
(14, 0.82),
(12, 0.78),
(10, 0.73),
(8, 0.67),
(6, 0.55),
))
r = np.interp(L/wf, table1[::-1,0], table1[::-1,1] )
return r
def sec2_1_2(h, t, reinforced = 'NO', condition = 'i'):
'''Maximum Web Depth-to-Thickness Ratio.
Parameters
----------
h: float,
altura plana del alma medido sobre el plano de la misma.
t: float,
espesor del elemento.
reinforced: string,
determina si el alma esta reforzada o no para aplicar 1 o 2 de la seccion 2.1.2 segun corresponda.
condition: string,
determina si se aplica caso i o ii de la seccion 2.1.2-2.
Returns
-------
ratio: float,
ratio maximo altura/espesor del alma.
Tests
-----
>>> ratio_adm, midC = sec2_1_2(h=300, t=5, reinforced='YES', condition='i')
>>> print('ratio_adm: {:{fmt}} | ratio: {m[ratio h/t]:{fmt}}'.format(ratio_adm, m = midC, fmt = '.2f'))
ratio_adm: 260.00 | ratio: 60.00
'''
ratio = h/t
if reinforced == 'NO': ratio_adm = 200
if reinforced == 'YES':
if condition == 'i': ratio_adm = 260
if condition == 'ii': ratio_adm = 300
midC = {'ratio h/t': ratio}
return ratio_adm, midC
# 2.2 EFFECTIVE WIDTH OF STIFFENED ELEMENTS
def sec2_2_1(w, t, f, E, k = 4):
'''Uniformly Compressed Stiffened Elements. Load Capacity Determination or Deflection Determination.
Parameters
----------
w: float,
ancho plano del elemento (ver figura 3 - ASCE 8).
t: float,
espsesor del elemento.
f: float
tension sobre el elemento.
E: float,
modulo de elasticidad.
k: float,
coeficiente de pandeo en placas para el elemento en consideracion.
Returns
-------
b_eff: float,
ancho efectivo del elemento rigidizado bajo compresion uniforme.
midC: diccionario,
calculos intermedios.
Raises
------
none
Test
----
>>> b, midC = sec2_2_1(w= 50, t= 1, f= 20, E = 200e3)
>>> print('b: {:{fmt}} | esbeltez: {m[esbeltez]:{fmt}} | rho: {m[rho]:{fmt}}'.format(b, m= midC, fmt = '.2f'))
b: 50.00 | esbeltez: 0.26 | rho: 1.00
>>> b, midC = sec2_2_1(w= 50, t= 1 , f= 200, E = 200e3)
>>> print('b: {:{fmt}} | esbeltez: {m[esbeltez]:{fmt}} | rho: {m[rho]:{fmt}}'.format(b, m= midC, fmt = '.2f'))
b: 44.22 | esbeltez: 0.83 | rho: 0.88
'''
esbeltez = E_2_2_1_e4(w=w, t=t ,f=f, E=E, k=k)
rho = E_2_2_1_e3(esbeltez=esbeltez)
b_eff = w*rho
midC = {'esbeltez': esbeltez, 'rho': rho}
return b_eff, midC
def E_2_2_1_e3(esbeltez):
'''Factor rho definida en ecuacion 2.2.1-3
Parameters
----------
esbeltez : float
Esbeltez del elemento segun ecuacion 2.2.1-4
Returns
-------
rho : float
Factor de correccion de ancho
Raises
------
none
Tests
-----
>>> round( E_2_2_1_e3(esbeltez= 0.83), 2)
0.89
'''
if esbeltez <= 0.673:
rho = 1.0
else:
rho = (1-0.22/esbeltez)/esbeltez
return rho
def E_2_2_1_e4(w, t, f, E, k):
'''Ecuacion 2.2.1-4.
Parameters
----------
w: float,
ancho plano del elemento (ver figura 3 - ASCE 8).
t: float,
espsesor del elemento.
f: float
tension sobre el elemento.
E: float,
modulo de elasticidad.
k: float,
coeficiente de pandeo en placas para el elemento en consideracion.
Returns
-------
esbeltez: float,
esbeltez del elemento considerado.
Tests
-----
>>> round(E_2_2_1_e4(w= 50, t= 1, k=4, f= 200, E= 200e3), 2)
0.83
'''
esbeltez = (1.052/(k**0.5))*(w/t)*(((f/E)**0.5))
return esbeltez
def sec2_2_2(w, t, f1, f2, E0, k=4):
'''Effective Widths of Webs and Stiffened Elements with Stress Gradient. Load Capacity Determination or Deflection Determination.
Parameters
----------
w: float,
altura del alma o ancho plano del elemento (ver figura 3 - ASCE 8).
t: float,
espsesor del alma o elemento.
f1: float
maxima tension de compresion sobre el elemento (ver figura 2 - ASCE 8). compresion (+), traccion (-)
f2: float,
minima tension sobre el elemento (traccion o compresion).
E0: float,
modulo de elasticidad inicial.
k: float,
coeficiente de pandeo en placas para el elemento en consideracion.
Returns
-------
b_eff_1 : float
longitud del segmento efectivo desde el eje neutro (ver figura 2 - ASCE 8)
b_eff_2 : float
longitud del segmento efectivo desde el final del elemento a compresion
midC: dict
calculos intermedios {b_e, k, psi}
b_e: ancho efectivo segun 2.2.1 con k y f1
k: plate buckling coeficient
psi: ratio f2/f1
Raises
------
none
Test
----
Example 2.1 - C-section
>>> b_eff_1, b_eff_2, midC = sec2_2_2(w=5.692, t=0.060, f1=47.48, f2=-45.77, E0=27000, k=4)
>>> print('b1_eff: {:{fmt}} | b2_eff: {:{fmt}} | b_e: {m[b_e]:{fmt}} | k: {m[k]:{fmt}} | psi: {m[psi]:{fmt}}'.format(b_eff_1, b_eff_2, m = midC, fmt = '.3f'))
b1_eff: 1.232 | b2_eff: 2.442 | b_e: 4.884 | k: 23.079 | psi: -0.964
'''
psi = f2/f1
k = 4 + 2*(1-psi)**3 + 2*(1-psi)
b_e, _ = sec2_2_1(w=w, t=t, f=f1, E=E0, k=k)
b_eff_1 = b_e/(3-psi)
if psi <= -0.236: b_eff_2 = b_e/2
else: b_eff_2 = b_e - b_eff_1
midC = {'b_e': b_e, 'k': k, 'psi': psi}
return b_eff_1, b_eff_2, midC
# 2.3 EFFECTIVE WIDTH OF UNSTIFFENED ELEMENTS
def sec2_3_1(w, t, f, E, k = 0.5):
'''Uniformly Compressed Unstiffened Elements. Load Capacity Determination or Deflection Determination.
Parameters
----------
w: float,
ancho plano del elemento (ver figura 3 - ASCE 8).
t: float,
espsesor del elemento.
f: float
tension sobre el elemento (ver figura 3 - ASCE 8).
E: float,
modulo de elasticidad.
k: float,
coeficiente de pandeo en placas para el elemento en consideracion.
Returns
-------
b : | |
AWS provider credential options to scope the cluster's kubeconfig authentication when using a non-default credential chain.
This is required for certain auth scenarios. For example:
- Creating and using a new AWS provider instance, or
- Setting the AWS_PROFILE environment variable, or
- Using a named profile configured on the AWS provider via:
`pulumi config set aws:profile <profileName>`
See for more details:
- https://www.pulumi.com/docs/reference/pkg/nodejs/pulumi/aws/#Provider
- https://www.pulumi.com/docs/intro/cloud-providers/aws/setup/
- https://www.pulumi.com/docs/intro/cloud-providers/aws/#configuration
- https://docs.aws.amazon.com/eks/latest/userguide/create-kubeconfig.html
"""
return pulumi.get(self, "provider_credential_opts")
@provider_credential_opts.setter
def provider_credential_opts(self, value: Optional[pulumi.Input['KubeconfigOptionsArgs']]):
pulumi.set(self, "provider_credential_opts", value)
@property
@pulumi.getter
def proxy(self) -> Optional[pulumi.Input[str]]:
"""
The HTTP(S) proxy to use within a proxied environment.
The proxy is used during cluster creation, and OIDC configuration.
This is an alternative option to setting the proxy environment variables: HTTP(S)_PROXY and/or http(s)_proxy.
This option is required iff the proxy environment variables are not set.
Format: <protocol>://<host>:<port>
Auth Format: <protocol>://<username>:<password>@<host>:<port>
Ex:
- "http://proxy.example.com:3128"
- "https://proxy.example.com"
- "http://username:password@proxy.example.com:3128"
"""
return pulumi.get(self, "proxy")
@proxy.setter
def proxy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "proxy", value)
@property
@pulumi.getter(name="publicAccessCidrs")
def public_access_cidrs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Indicates which CIDR blocks can access the Amazon EKS public API server endpoint.
"""
return pulumi.get(self, "public_access_cidrs")
@public_access_cidrs.setter
def public_access_cidrs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "public_access_cidrs", value)
@property
@pulumi.getter(name="publicSubnetIds")
def public_subnet_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The set of public subnets to use for the worker node groups on the EKS cluster. These subnets are automatically tagged by EKS for Kubernetes purposes.
If `vpcId` is not set, the cluster will use the AWS account's default VPC subnets.
Worker network architecture options:
- Private-only: Only set `privateSubnetIds`.
- Default workers to run in a private subnet. In this setting, Kubernetes cannot create public, internet-facing load balancers for your pods.
- Public-only: Only set `publicSubnetIds`.
- Default workers to run in a public subnet.
- Mixed (recommended): Set both `privateSubnetIds` and `publicSubnetIds`.
- Default all worker nodes to run in private subnets, and use the public subnets for internet-facing load balancers.
See for more details: https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html.Note: The use of `subnetIds`, along with `publicSubnetIds` and/or `privateSubnetIds` is mutually exclusive. The use of `publicSubnetIds` and `privateSubnetIds` is encouraged.
"""
return pulumi.get(self, "public_subnet_ids")
@public_subnet_ids.setter
def public_subnet_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "public_subnet_ids", value)
@property
@pulumi.getter(name="roleMappings")
def role_mappings(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RoleMappingArgs']]]]:
"""
Optional mappings from AWS IAM roles to Kubernetes users and groups.
"""
return pulumi.get(self, "role_mappings")
@role_mappings.setter
def role_mappings(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RoleMappingArgs']]]]):
pulumi.set(self, "role_mappings", value)
@property
@pulumi.getter(name="serviceRole")
def service_role(self) -> Optional[pulumi.Input['pulumi_aws.iam.Role']]:
"""
IAM Service Role for EKS to use to manage the cluster.
"""
return pulumi.get(self, "service_role")
@service_role.setter
def service_role(self, value: Optional[pulumi.Input['pulumi_aws.iam.Role']]):
pulumi.set(self, "service_role", value)
@property
@pulumi.getter(name="skipDefaultNodeGroup")
def skip_default_node_group(self) -> Optional[pulumi.Input[bool]]:
"""
If this toggle is set to true, the EKS cluster will be created without node group attached. Defaults to false, unless `fargate` input is provided.
"""
return pulumi.get(self, "skip_default_node_group")
@skip_default_node_group.setter
def skip_default_node_group(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "skip_default_node_group", value)
@property
@pulumi.getter(name="storageClasses")
def storage_classes(self) -> Optional[pulumi.Input[Union[str, Mapping[str, pulumi.Input['StorageClassArgs']]]]]:
"""
An optional set of StorageClasses to enable for the cluster. If this is a single volume type rather than a map, a single StorageClass will be created for that volume type.
Note: As of Kubernetes v1.11+ on EKS, a default `gp2` storage class will always be created automatically for the cluster by the EKS service. See https://docs.aws.amazon.com/eks/latest/userguide/storage-classes.html
"""
return pulumi.get(self, "storage_classes")
@storage_classes.setter
def storage_classes(self, value: Optional[pulumi.Input[Union[str, Mapping[str, pulumi.Input['StorageClassArgs']]]]]):
pulumi.set(self, "storage_classes", value)
@property
@pulumi.getter(name="subnetIds")
def subnet_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The set of all subnets, public and private, to use for the worker node groups on the EKS cluster. These subnets are automatically tagged by EKS for Kubernetes purposes.
If `vpcId` is not set, the cluster will use the AWS account's default VPC subnets.
If the list of subnets includes both public and private subnets, the worker nodes will only be attached to the private subnets, and the public subnets will be used for internet-facing load balancers.
See for more details: https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html.
Note: The use of `subnetIds`, along with `publicSubnetIds` and/or `privateSubnetIds` is mutually exclusive. The use of `publicSubnetIds` and `privateSubnetIds` is encouraged.
"""
return pulumi.get(self, "subnet_ids")
@subnet_ids.setter
def subnet_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "subnet_ids", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Key-value mapping of tags that are automatically applied to all AWS resources directly under management with this cluster, which support tagging.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="useDefaultVpcCni")
def use_default_vpc_cni(self) -> Optional[pulumi.Input[bool]]:
"""
Use the default VPC CNI instead of creating a custom one. Should not be used in conjunction with `vpcCniOptions`.
"""
return pulumi.get(self, "use_default_vpc_cni")
@use_default_vpc_cni.setter
def use_default_vpc_cni(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "use_default_vpc_cni", value)
@property
@pulumi.getter(name="userMappings")
def user_mappings(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['UserMappingArgs']]]]:
"""
Optional mappings from AWS IAM users to Kubernetes users and groups.
"""
return pulumi.get(self, "user_mappings")
@user_mappings.setter
def user_mappings(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['UserMappingArgs']]]]):
pulumi.set(self, "user_mappings", value)
@property
@pulumi.getter
def version(self) -> Optional[pulumi.Input[str]]:
"""
Desired Kubernetes master / control plane version. If you do not specify a value, the latest available version is used.
"""
return pulumi.get(self, "version")
@version.setter
def version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "version", value)
@property
@pulumi.getter(name="vpcCniOptions")
def vpc_cni_options(self) -> Optional[pulumi.Input['VpcCniOptionsArgs']]:
"""
The configuration of the Amazon VPC CNI plugin for this instance. Defaults are described in the documentation for the VpcCniOptions type.
"""
return pulumi.get(self, "vpc_cni_options")
@vpc_cni_options.setter
def vpc_cni_options(self, value: Optional[pulumi.Input['VpcCniOptionsArgs']]):
pulumi.set(self, "vpc_cni_options", value)
@property
@pulumi.getter(name="vpcId")
def vpc_id(self) -> Optional[pulumi.Input[str]]:
"""
The VPC in which to create the cluster and its worker nodes. If unset, the cluster will be created in the default VPC.
"""
return pulumi.get(self, "vpc_id")
@vpc_id.setter
def vpc_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vpc_id", value)
class Cluster(pulumi.ComponentResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
cluster_security_group: Optional[pulumi.Input['pulumi_aws.ec2.SecurityGroup']] = None,
cluster_security_group_tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
cluster_tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
create_oidc_provider: Optional[pulumi.Input[bool]] = None,
creation_role_provider: Optional[pulumi.Input[pulumi.InputType['CreationRoleProviderArgs']]] = None,
desired_capacity: Optional[pulumi.Input[int]] = None,
enabled_cluster_log_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
encrypt_root_block_device: Optional[pulumi.Input[bool]] = None,
encryption_config_key_arn: Optional[pulumi.Input[str]] = None,
endpoint_private_access: Optional[pulumi.Input[bool]] = None,
endpoint_public_access: Optional[pulumi.Input[bool]] = None,
fargate: Optional[pulumi.Input[Union[bool, pulumi.InputType['FargateProfileArgs']]]] = None,
gpu: Optional[pulumi.Input[bool]] = None,
instance_profile_name: Optional[pulumi.Input[str]] = None,
instance_role: Optional[pulumi.Input['pulumi_aws.iam.Role']] = None,
instance_roles: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_aws.iam.Role']]]] = None,
instance_type: Optional[pulumi.Input[str]] = None,
kubernetes_service_ip_address_range: Optional[pulumi.Input[str]] = None,
max_size: Optional[pulumi.Input[int]] = None,
min_size: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
node_ami_id: Optional[pulumi.Input[str]] = None,
node_associate_public_ip_address: Optional[pulumi.Input[bool]] = None,
node_group_options: Optional[pulumi.Input[pulumi.InputType['ClusterNodeGroupOptionsArgs']]] = None,
node_public_key: Optional[pulumi.Input[str]] = None,
node_root_volume_size: Optional[pulumi.Input[int]] = None,
node_security_group_tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
node_subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
node_user_data: Optional[pulumi.Input[str]] = None,
private_subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
provider_credential_opts: Optional[pulumi.Input[pulumi.InputType['KubeconfigOptionsArgs']]] = None,
proxy: Optional[pulumi.Input[str]] = None,
public_access_cidrs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
public_subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
role_mappings: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RoleMappingArgs']]]]] = None,
service_role: Optional[pulumi.Input['pulumi_aws.iam.Role']] = None,
skip_default_node_group: Optional[pulumi.Input[bool]] = None,
storage_classes: Optional[pulumi.Input[Union[str, Mapping[str, pulumi.Input[pulumi.InputType['StorageClassArgs']]]]]] = None,
subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
use_default_vpc_cni: Optional[pulumi.Input[bool]] = None,
user_mappings: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['UserMappingArgs']]]]] = None,
version: Optional[pulumi.Input[str]] = None,
vpc_cni_options: Optional[pulumi.Input[pulumi.InputType['VpcCniOptionsArgs']]] = None,
vpc_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Cluster is a component that wraps the AWS and Kubernetes resources necessary to run an EKS cluster, its worker nodes, its optional StorageClasses, and an optional deployment of the Kubernetes Dashboard.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input['pulumi_aws.ec2.SecurityGroup'] cluster_security_group: The security group to use for the cluster API endpoint. If not provided, a new security group will be created with full internet egress and ingress from node groups.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] cluster_security_group_tags: The tags to apply to the cluster security group.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] cluster_tags: The tags to apply to the EKS cluster.
:param pulumi.Input[bool] create_oidc_provider: Indicates whether an IAM OIDC Provider is created for the EKS cluster.
The OIDC provider is used in the cluster in combination with k8s Service Account annotations to provide IAM roles at the k8s Pod level.
See for more details:
- https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_oidc_verify-thumbprint.html
- https://docs.aws.amazon.com/eks/latest/userguide/enable-iam-roles-for-service-accounts.html
- https://aws.amazon.com/blogs/opensource/introducing-fine-grained-iam-roles-service-accounts/
- https://www.pulumi.com/docs/reference/pkg/nodejs/pulumi/aws/eks/#enabling-iam-roles-for-service-accounts
:param pulumi.Input[pulumi.InputType['CreationRoleProviderArgs']] creation_role_provider: The IAM Role Provider used to create & authenticate against the EKS cluster. This role is given `[system:masters]` permission in K8S, See: https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html
:param pulumi.Input[int] desired_capacity: The number of worker nodes that should be running in the cluster. | |
#!/usr/bin/python
#
# Copyright (c) 2016 <NAME>, <<EMAIL>>
# <NAME>, <<EMAIL>>
# Copyright (c) 2018 <NAME>, III (@jeking3) <<EMAIL>>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_virtualmachine
version_added: "2.1"
short_description: Manage Azure virtual machines
description:
- Manage and configure virtual machines (VMs) and associated resources on Azure.
- Requires a resource group containing at least one virtual network with at least one subnet.
- Supports images from the Azure Marketplace, which can be discovered with M(azure_rm_virtualmachineimage_facts).
- Supports custom images since Ansible 2.5.
- To use I(custom_data) on a Linux image, the image must have cloud-init enabled. If cloud-init is not enabled, I(custom_data) is ignored.
options:
resource_group:
description:
- Name of the resource group containing the VM.
required: true
name:
description:
- Name of the VM.
required: true
custom_data:
description:
- Data made available to the VM and used by C(cloud-init).
- Only used on Linux images with C(cloud-init) enabled.
- Consult U(https://docs.microsoft.com/en-us/azure/virtual-machines/linux/using-cloud-init#cloud-init-overview) for cloud-init ready images.
- To enable cloud-init on a Linux image, follow U(https://docs.microsoft.com/en-us/azure/virtual-machines/linux/cloudinit-prepare-custom-image).
version_added: "2.5"
state:
description:
- State of the VM.
- Set to C(present) to create a VM with the configuration specified by other options, or to update the configuration of an existing VM.
- Set to C(absent) to remove a VM.
- Does not affect power state. Use I(started)/I(allocated)/I(restarted) parameters to change the power state of a VM.
default: present
choices:
- absent
- present
started:
description:
- Whether the VM is started or stopped.
- Set to (true) with I(state=present) to start the VM.
- Set to C(false) to stop the VM.
default: true
type: bool
allocated:
description:
- Whether the VM is allocated or deallocated, only useful with I(state=present).
default: True
type: bool
generalized:
description:
- Whether the VM is generalized or not.
- Set to C(true) with I(state=present) to generalize the VM.
- Generalizing a VM is irreversible.
type: bool
version_added: "2.8"
restarted:
description:
- Set to C(true) with I(state=present) to restart a running VM.
type: bool
location:
description:
- Valid Azure location for the VM. Defaults to location of the resource group.
short_hostname:
description:
- Name assigned internally to the host. On a Linux VM this is the name returned by the C(hostname) command.
- When creating a VM, short_hostname defaults to I(name).
vm_size:
description:
- A valid Azure VM size value. For example, C(Standard_D4).
- Choices vary depending on the subscription and location. Check your subscription for available choices.
- Required when creating a VM.
admin_username:
description:
- Admin username used to access the VM after it is created.
- Required when creating a VM.
admin_password:
description:
- Password for the admin username.
- Not required if the I(os_type=Linux) and SSH password authentication is disabled by setting I(ssh_password_enabled=false).
ssh_password_enabled:
description:
- Whether to enable or disable SSH passwords.
- When I(os_type=Linux), set to C(false) to disable SSH password authentication and require use of SSH keys.
default: true
type: bool
ssh_public_keys:
description:
- For I(os_type=Linux) provide a list of SSH keys.
- Accepts a list of dicts where each dictionary contains two keys, I(path) and I(key_data).
- Set I(path) to the default location of the authorized_keys files. For example, I(path=/home/<admin username>/.ssh/authorized_keys).
- Set I(key_data) to the actual value of the public key.
image:
description:
- The image used to build the VM.
- For custom images, the name of the image. To narrow the search to a specific resource group, a dict with the keys I(name) and I(resource_group).
- For Marketplace images, a dict with the keys I(publisher), I(offer), I(sku), and I(version).
- Set I(version=latest) to get the most recent version of a given image.
required: true
availability_set:
description:
- Name or ID of an existing availability set to add the VM to. The I(availability_set) should be in the same resource group as VM.
version_added: "2.5"
storage_account_name:
description:
- Name of a storage account that supports creation of VHD blobs.
- If not specified for a new VM, a new storage account named <vm name>01 will be created using storage type C(Standard_LRS).
aliases:
- storage_account
storage_container_name:
description:
- Name of the container to use within the storage account to store VHD blobs.
- If not specified, a default container will be created.
default: vhds
aliases:
- storage_container
storage_blob_name:
description:
- Name of the storage blob used to hold the OS disk image of the VM.
- Must end with '.vhd'.
- If not specified, defaults to the VM name + '.vhd'.
aliases:
- storage_blob
managed_disk_type:
description:
- Managed OS disk type.
- Create OS disk with managed disk if defined.
- If not defined, the OS disk will be created with virtual hard disk (VHD).
choices:
- Standard_LRS
- StandardSSD_LRS
- Premium_LRS
version_added: "2.4"
os_disk_name:
description:
- OS disk name.
version_added: "2.8"
os_disk_caching:
description:
- Type of OS disk caching.
choices:
- ReadOnly
- ReadWrite
aliases:
- disk_caching
os_disk_size_gb:
description:
- Type of OS disk size in GB.
version_added: "2.7"
os_type:
description:
- Base type of operating system.
choices:
- Windows
- Linux
default: Linux
data_disks:
description:
- Describes list of data disks.
- Use M(azure_rm_mangeddisk) to manage the specific disk.
version_added: "2.4"
suboptions:
lun:
description:
- The logical unit number for data disk.
- This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM.
required: true
version_added: "2.4"
disk_size_gb:
description:
- The initial disk size in GB for blank data disks.
- This value cannot be larger than C(1023) GB.
- Size can be changed only when the virtual machine is deallocated.
- Not sure when I(managed_disk_id) defined.
version_added: "2.4"
managed_disk_type:
description:
- Managed data disk type.
- Only used when OS disk created with managed disk.
choices:
- Standard_LRS
- StandardSSD_LRS
- Premium_LRS
version_added: "2.4"
storage_account_name:
description:
- Name of an existing storage account that supports creation of VHD blobs.
- If not specified for a new VM, a new storage account started with I(name) will be created using storage type C(Standard_LRS).
- Only used when OS disk created with virtual hard disk (VHD).
- Used when I(managed_disk_type) not defined.
- Cannot be updated unless I(lun) updated.
version_added: "2.4"
storage_container_name:
description:
- Name of the container to use within the storage account to store VHD blobs.
- If no name is specified a default container named 'vhds' will created.
- Only used when OS disk created with virtual hard disk (VHD).
- Used when I(managed_disk_type) not defined.
- Cannot be updated unless I(lun) updated.
default: vhds
version_added: "2.4"
storage_blob_name:
description:
- Name of the storage blob used to hold the OS disk image of the VM.
- Must end with '.vhd'.
- Default to the I(name) + timestamp + I(lun) + '.vhd'.
- Only used when OS disk created with virtual hard disk (VHD).
- Used when I(managed_disk_type) not defined.
- Cannot be updated unless I(lun) updated.
version_added: "2.4"
caching:
description:
- Type of data disk caching.
choices:
- ReadOnly
- ReadWrite
default: ReadOnly
version_added: "2.4"
public_ip_allocation_method:
description:
- Allocation method for the public IP of the VM.
- Used only if a network interface is not specified.
- When set to C(Dynamic), the public IP address may change any time the VM is rebooted or power cycled.
- The C(Disabled) choice was added in Ansible 2.6.
choices:
- Dynamic
- Static
- Disabled
default: Static
aliases:
- public_ip_allocation
open_ports:
description:
- List of ports to open in the security group for the VM, when a security group and network interface are created with a VM.
- For Linux hosts, defaults to allowing inbound TCP connections to port 22.
- For Windows hosts, defaults to opening ports 3389 and 5986.
network_interface_names:
description:
- | |
"""Embedded DSL for assembling logic circuits.
Embedded domain-specific combinator library for
assembling abstract definitions of logic circuits
and synthesizing circuits from those definitions.
"""
from __future__ import annotations
from typing import Sequence
import doctest
from parts import parts
from circuit import op, gate, circuit, signature
class bit():
"""
Class for representing an abstract bit. Such a bit
can be interpreted concretely as a value, but it is
also used to keep track of relationships between
operators and to represent the wires within a
circuit built up out of those operators.
>>> bit.hook_operation(lambda o, v, *args: None)
>>> bit.circuit(circuit())
>>> b = output(input(1).and_(input(1)))
>>> b.value == bit.circuit().evaluate([1,1])[0]
True
>>> def make_hook(bit_):
... def hook(o, v, *args):
... return bit_.constructor(*args)(v, bit_.gate(o, [a.gate for a in args]))
... return hook
>>> bit.hook_operation(make_hook(bit))
>>> bit.circuit(circuit())
>>> b = output(input(0).and_(input(0)))
>>> b.value == bit.circuit().evaluate([0,0])[0]
True
"""
_circuit = None
_hook_operation = None
@staticmethod
def circuit(circuit_=None):
if circuit_ is not None:
bit._circuit = circuit_
return None
else:
bit._circuit.prune_and_topological_sort_stable()
return bit._circuit
@staticmethod
def hook_operation(hook=None):
bit._hook_operation = hook
@staticmethod
def operation(o, *args):
# Ensure second argument is a `bit`.
args = list(args)
if len(args) == 2:
args[1] = constant(args[1]) if isinstance(args[1], int) else args[1]
# Compute the value of the result of the operation on the arguments.
v = o(*[a.value for a in args])
# Return output from hook if it exists and if
# it returns an output.
if bit._hook_operation is not None:
r = bit._hook_operation(o, v, *args)
if r is not None:
return r
return bit.constructor(*args)(v, bit.gate(o, [a.gate for a in args]))
@staticmethod
def constructor(b1, b2=None):
# The inference code below is not currently in use.
"""
if isinstance(b1, input_one) and isinstance(b2, input_one):
return input_one
elif isinstance(b1, input_two) and isinstance(b2, input_two):
return input_two
elif isinstance(b1, (input_one, input_two)) and b2 is None:
return type(b1)
else:
return bit
"""
return bit
@staticmethod
def gate(operation, igs):
return bit._circuit.gate(operation, igs)
def __init__(self, value, gate_=None):
self.value = value
self.gate = bit._circuit.gate() if gate_ is None else gate_
def __int__(self):
return self.value
def not_(self):
"""
>>> results = []
>>> for x in [0, 1]:
... bit.circuit(circuit())
... b = output(input(x).not_())
... results.append(int(b) == bit.circuit().evaluate([x])[0])
>>> all(results)
True
"""
return bit.operation(op.not_, self)
def __invert__(self):
"""
>>> results = []
>>> for x in [0, 1]:
... bit.circuit(circuit())
... b = output(~input(x))
... results.append(int(b) == bit.circuit().evaluate([x])[0])
>>> all(results)
True
"""
return bit.operation(op.not_, self)
def __rsub__(self, other):
"""
>>> results = []
>>> for x in [0, 1]:
... bit.circuit(circuit())
... b = output(1 - input(x))
... results.append(int(b) == bit.circuit().evaluate([x])[0])
>>> all(results)
True
>>> bit.circuit(circuit())
>>> 2 - input(0)
Traceback (most recent call last):
...
ValueError: can only subtract a bit from the integer 1
"""
if other == 1:
return bit.operation(op.not_, self)
raise ValueError('can only subtract a bit from the integer 1')
def and_(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).and_(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.and_, self, other)
def __and__(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x) & input(y))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.and_, self, other)
def __rand__(self, other):
"""
>>> bit.circuit(circuit())
>>> b = 0 & constant(1)
>>> b.value
0
"""
return self & (constant(other) if isinstance(other, int) else other)
def nimp(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).nimp(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.nimp_, self, other)
def nimp_(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).nimp_(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.nimp_, self, other)
def __gt__(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x) > input(y))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return self.nimp(other)
def nif(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).nif(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.nif_, self, other)
def nif_(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).nif_(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.nif_, self, other)
def __lt__(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x) < input(y))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return self.nif(other)
def xor(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).xor(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.xor_, self, other)
def xor_(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).xor_(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.xor_, self, other)
def __xor__(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x) ^ input(y))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.xor_, self, other)
def __rxor__(self, other):
"""
>>> bit.circuit(circuit())
>>> b = 1 ^ constant(0)
>>> b.value
1
"""
return self ^ (constant(other) if isinstance(other, int) else other)
def or_(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).or_(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.or_, self, other)
def __or__(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x) | input(y))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.or_, self, other)
def __ror__(self, other):
"""
>>> bit.circuit(circuit())
>>> b = 1 | constant(0)
>>> b.value
1
"""
return self | (constant(other) if isinstance(other, int) else other)
def nor(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).nor(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.nor_, self, other)
def nor_(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).nor_(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.nor_, self, other)
def __mod__(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x) % input(y))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.nor_, self, other)
def xnor(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).xnor(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.xnor_, self, other)
def xnor_(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).xnor_(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.xnor_, self, other)
def __eq__(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x) == input(y))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.xnor_, self, other)
def if_(self, other):
"""
>>> | |
import os.path
import numpy as np
import pandas as pd
from sklearn import linear_model, metrics
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from matplotlib.backends.backend_pdf import PdfPages
class DataLoader:
MRI_FOLDER = 'MRI' # MRI folder name
MRI_DATASET = 'img_array_train_6k_' # MRI dataset to test
MRI_VALIDSET = 'img_array_valid_6k_' # MRI dataset to test
MRI_TESTSET = 'img_array_test_6k_' # MRI dataset to test
MRI_DATASET_EXT = '.npy'
IMAGES_PER_SCAN = 62
IMAGES_DIMENSION = 96
DEMO_MASTER = 'adni_demographic_master_kaggle.csv' # CSV Base lookup table
FIGURE_FOLDER = 'figures'
RUN_FOLDER = 'run'
SAVE_FIGURE = True
@staticmethod
def load_table(os_path='media/user/'):
""" Loading csv lookup table ocated on usb key
File adni_demographic_master_kaggle.csv contains multiples fields.
* train_valid_test : Field interpreted as Training 0, Validation 1, Test 2
* only_date : MRI scan date, as format %Y%m%d (Year, month, day, e.g.: 20051021)
* diagnosis : Field interpreted as : Normal 1, MCI (Mild Cognitive Impairment) 2, AD (Alzheimer's Disease) 3
* sex : Field interpreted as : Female = 'F', Male = 'M'
* age_at_scan : Age of the patient when MRI scan was performed
"""
# Build csv fiename
csv_filename = os.path.join(os_path, DataLoader.DEMO_MASTER)
# Load CSV file to process subject data
try:
return pd.read_csv(csv_filename)
except OSError:
# Handle eror in case of csv_filename not found
print("ERROR - LOAD_FILE - No file", csv_filename, "found")
return
@staticmethod
def load_MRI_dataset_part(os_path='media/user/', nbr=1):
""" Load part of dataset to display simple preview. nbr indicate the amount of packages
"""
# Check if nbr arg is positive and bigger than one
if nbr < 1:
print("ERROR - LOAD_MRI_DATASET - Bad argument nbr", nbr)
return
try:
# Build filename and load first file
strFileName = DataLoader.MRI_DATASET + str(1) + DataLoader.MRI_DATASET_EXT
pathMRI = os.path.join(os_path, DataLoader.MRI_FOLDER, strFileName)
print('Loading...', pathMRI)
data = np.load(pathMRI)
# Do same over all dataset with nmax = nbr
for i in range(nbr-1):
strFileName = DataLoader.MRI_DATASET + str(i+2) + DataLoader.MRI_DATASET_EXT
pathMRI = os.path.join(os_path, DataLoader.MRI_FOLDER, strFileName)
print('Loading...', pathMRI)
data = np.concatenate((data, np.load(pathMRI)), axis=0)
except OSError:
# Handle eror in case of csv_filename not found
print("ERROR - LOAD_MRI_DATASET - No file", pathMRI, "found")
return
# Must gave a integer value of patients, therefore uncomplete dataframe are dropped
Nfull = data.shape[0]//DataLoader.IMAGES_PER_SCAN
# Compute number of dropped images
Ndropped = data.shape[0] - Nfull*DataLoader.IMAGES_PER_SCAN
# Return only complete data (e.g. len(data) is a multiple of IMAGES_PER_SCAN)
data = data[0:Nfull*DataLoader.IMAGES_PER_SCAN, :, :]
print('Data loaded with shape=', data.shape, '\nNomber patients=', Nfull, '\nImages dropped=', Ndropped)
return data
@staticmethod
def load_MRI_dataset_part_per_patient(os_path='media/user/', nbr=1):
""" Load part of dataset to display simple preview. nbr indicate the amount of packages. The shape is [nb of patient, nb of images (62), image dim 1 (96), image dim 2 (96)]
"""
# Check if nbr arg is positive and bigger than one
if nbr < 1:
print("ERROR - LOAD_MRI_DATASET - Bad argument nbr", nbr)
return
data = DataLoader.load_MRI_dataset_part(os_path=os_path, nbr=nbr)
data2 = np.reshape(data,[data.shape[0]//DataLoader.IMAGES_PER_SCAN, DataLoader.IMAGES_PER_SCAN,
DataLoader.IMAGES_DIMENSION, DataLoader.IMAGES_DIMENSION])
return data2
@staticmethod
def load_MRI_dataset_part_id(os_path='media/user/', nbr=1, id_image = [32], set_type='train', merged=True):
""" Load part of dataset to display simple preview. nbr indicate the amount of packages. Only one image is kept per set.
The value of id_image indicate the image kept. 0 <= id_image < IMAGES_PER_SCAN
"""
# Check if nbr arg is positive and bigger than one
if nbr < 1:
print("ERROR - load_MRI_dataset_part_mid - Bad argument nbr", nbr)
return
# Keep trace of datalength to compute correct index references
len_data_tot = 0
try:
# Take range 0 to IMAGES_PER_SCAN. Then substrect id_image. Therefore interval will be :
# -id_image to IMAGES_PER_SCAN-id_image. Every multiple of IMAGES_PER_SCAN will be the images to keep. Index is the
# location of nonzeros after modulo operation.
data_frame = np.zeros((0, DataLoader.IMAGES_DIMENSION, DataLoader.IMAGES_DIMENSION))
for i in range(nbr):
# Build path to data
pathMRI = DataLoader.getNextPath(os_path, i+1, set_type)
# Print progression
print('Loading...', pathMRI, ' ... current images=', data_frame.shape[0])
data = np.load(pathMRI)
len_data = len(data)
id_all = np.zeros(0)
for j, val_id in enumerate(id_image):
# Same index operation as before. In this case len_data_tot must to be take into acount
ids = np.nonzero(np.mod(
np.linspace(0,len_data-1,len_data) - id_image[j] + len_data_tot,
DataLoader.IMAGES_PER_SCAN) == 0)[0]
id_all = np.append(id_all, ids)
data_frame = np.concatenate((data_frame, data[np.sort(id_all.astype(int))]), axis=0)
# Set new data total length
len_data_tot += len_data
except OSError:
# Handle eror in case of csv_filename not found
print("ERROR - LOAD_MRI_DATASET - No file", pathMRI, "found")
return
# Reshape to get corresponding size (multiple of id_image)
n_patient_full = data_frame.shape[0]//len(id_image)
data_frame = data_frame[:n_patient_full*len(id_image)]
data_frame = np.reshape(data_frame,
[n_patient_full, len(id_image), DataLoader.IMAGES_DIMENSION, DataLoader.IMAGES_DIMENSION])
if merged:
# Merge image as a single on
data_frame = np.reshape(data_frame,
[n_patient_full, len(id_image)*DataLoader.IMAGES_DIMENSION, DataLoader.IMAGES_DIMENSION])
return data_frame
@staticmethod
def getNextPath(os_path='media/user/', index=1, set_type='train'):
""" Build path to MRI file off the form : path/to/mri/MRI_DATASET+index+MRI_DATASET_EXT
"""
str_dataset = DataLoader.MRI_DATASET
if set_type is 'valid':
str_dataset = DataLoader.MRI_VALIDSET
elif set_type is 'test':
str_dataset = DataLoader.MRI_TESTSET
strFileName = str_dataset + str(index) + DataLoader.MRI_DATASET_EXT
return os.path.join(os_path, DataLoader.MRI_FOLDER, strFileName)
@staticmethod
def clean_data(patients=None):
""" Cleaning data
* train_valid_test : int to category (TRAINING, VALIDATION, TEST)
* only_date : str to date (%Y%m%d)
* diagnosis : int to category (NORMAL, MCI, AD)
"""
# Check if patient is an empty matrix
if patients is None:
print('ERROR - CLEAN_DATA - patients is None')
return
# Set catergory for train_valid_test as : Training = 0, Validation = 1, Test = 2
patients['train_valid_test'] = patients['train_valid_test'].astype('category')
patients['train_valid_test'].cat.categories = ['TRAINING', 'VALIDATION', 'TEST']
# Convert to date format
patients['only_date'] = pd.to_datetime(patients['only_date'], format='%Y%m%d', errors='ignore')
# Set catergory for cognitively as : Normal = 1, MCI = 2, AD = 3
patients['diagnosis'] = patients['diagnosis'].astype('category')
patients['diagnosis'].cat.categories = ['NORMAL', 'MCI', 'AD']
return patients
@staticmethod
def print_correlation(patients=None):
""" Print correlation between diagnosis and age/sex
"""
# Check if patient is an empty matrix
if patients is None:
print('ERROR - PRINT_HEAD - patients is None')
return
# Convert 'F'=1 and 'M'=0 for genders
patient_sex_num = patients['sex'].replace({'F': 1, 'M': 2})
# Get categorie and set values 'NORMAL'=1, 'MCI'=2, 'AD'=3
patient_diag_num = patients['diagnosis'].copy()
patient_diag_num.cat.categories = [1, 2, 3]
# Get correlation between AD and age
corr_age = patients['age_at_scan'].corr(patient_diag_num, method='pearson')
# Get correlation between AD and sex
corr_sex = patient_sex_num.corr(patient_diag_num, method='pearson')
# Print results
strCorr = 'Correlation Diagnosis - Age: ' + str(np.round(corr_age, 2))
strCorr += '\nCorrelation Diagnosis - Sex: ' + str(np.round(corr_sex, 2))
print(strCorr)
@staticmethod
def plot_pie_info(patients):
""" Dispaly pie chart info about
* Train/Validation/Test repartition
* Diagnosis repartition in percentage
* Genre repartition in percentage
"""
if patients is None:
print('ERROR - PLOT_PIE_INFO - patients is None')
return
# REMOVE below line
# print( patients['train_valid_test'].value_counts())
# Display data repartition (overall)
fig = plt.figure(figsize=(12,4));
plt.subplot(1,3,1)
# Diagnosis repartition pie chart
plt.pie(patients['train_valid_test'].value_counts(), explode=(0.1, 0.1, 0.1), labels=patients['train_valid_test'].cat.categories,
autopct='%1.1f%%', startangle=0);
plt.title('Train/Valid/Test', fontsize=18 );
plt.subplot(1,3,2)
# Diagnosis repartition pie chart
plt.pie(patients['diagnosis'].value_counts(), explode=(0.1, 0.1, 0.1), labels=patients['diagnosis'].cat.categories,
autopct='%1.1f%%', startangle=90);
plt.title('Diagnosis', fontsize=18 );
plt.subplot(1,3,3)
# Sex (gender) repartition pie chart
plt.pie(patients['sex'].value_counts(), explode=(0.1, 0.1), labels=['Female', 'Male'],
autopct='%1.1f%%', startangle=90);
plt.title('Gender', fontsize=18); plt.show()
# Save as PDF file if wanted
if DataLoader.SAVE_FIGURE:
DataLoader.save_plot(fig, 'pie_diag_gender.pdf')
@staticmethod
def plot_age_distrib(patients=None):
""" Display age distribution for all 3 diagnosis cases (Normal, MCI, AD). Since the data does not have a
normal distribution the stddev is nor relevant. It will only indicate how 'speard' are the data
"""
# Check if patient is an empty matrix
if patients is None:
print('ERROR - PLOT_AGE_DISTRIB - patients is None')
return
# Get min/max range to display plots
xMin = np.min(patients['age_at_scan']); xMax = np.max(patients['age_at_scan']);
# Get basic data repartition features (not gaussian, but nformation about spread)
patients_normal = patients[patients['diagnosis']=='NORMAL']['age_at_scan']
patients_mci = patients[patients['diagnosis']=='MCI']['age_at_scan']
patients_ad = patients[patients['diagnosis']=='AD']['age_at_scan']
mean_normal = np.mean(patients_normal); stddev_normal = np.std(patients_normal)
mean_mci = np.mean(patients_mci); stddev_mci = np.std(patients_mci)
mean_ad = np.mean(patients_ad); stddev_ad = np.std(patients_ad)
# Get for each diagnosis spectific value for men and female. Data will appear as stacked bars
patients_normal_f = patients[np.logical_and(patients['diagnosis']=='NORMAL', patients['sex']=='F')]['age_at_scan']
patients_normal_m = patients[np.logical_and(patients['diagnosis']=='NORMAL', patients['sex']=='M')]['age_at_scan']
patients_mci_f = patients[np.logical_and(patients['diagnosis']=='MCI', patients['sex']=='F')]['age_at_scan']
patients_mci_m = patients[np.logical_and(patients['diagnosis']=='MCI', patients['sex']=='M')]['age_at_scan']
patients_ad_f = patients[np.logical_and(patients['diagnosis']=='AD', patients['sex']=='F')]['age_at_scan']
patients_ad_m = patients[np.logical_and(patients['diagnosis']=='AD', patients['sex']=='M')]['age_at_scan']
| |
self.make_behavior(UnloadInvetoryToChest)
yield self.make_behavior(CollectLayingAround, itemstack=self.itemstack)
for recipe in self.item_recipes:
log.msg('collect with recipe %s' % recipe)
if not recipe.is_obtainable:
continue
if recipe.mine_recipe:
yield self.make_behavior(CollectMine, itemstack=self.itemstack, recipe=recipe)
if recipe.craft_recipe:
yield self.make_behavior(CollectCraft, itemstack=self.itemstack, recipe=recipe)
if recipe.smelt_recipe:
yield self.make_behavior(CollectSmelt, itemstack=self.itemstack, recipe=recipe)
if recipe.brew_recipe:
yield self.make_behavior(CollectBrew, itemstack=self.itemstack, recipe=recipe)
if recipe.mobkill_recipe:
yield self.make_behavior(CollectMobKill, itemstack=self.itemstack, recipe=recipe)
class UnloadInvetoryToChest(BTSelector):
def __init__(self, **kwargs):
super(UnloadInvetoryToChest, self).__init__(**kwargs)
self.name = 'unload inventory'
def is_valid(self):
return True
def choices(self):
""" for now just dump it """
yield self.make_behavior(DropInventory)
class CollectLayingAround(BTSelector):
def __init__(self, itemstack=None, **kwargs):
super(CollectLayingAround, self).__init__(**kwargs)
self.itemstack = itemstack
self.name = 'collect around %s' % self.itemstack
def is_valid(self):
if not self.is_entities_around:
return False
return True
def setup(self):
self.entities_around = [ent for ent in self.blackboard.entities_in_distance(self.blackboard.bot_object.position, distance=48) if ent.is_itemstack and self.itemstack.is_same(ent.itemstack)]
@property
def is_entities_around(self):
return len(self.entities_around) > 0
def get_closest_entity(self):
self.entities_around = sorted(self.entities_around, key=lambda e: (e.position - self.blackboard.bot_object.position).size_pow)
return self.entities_around.pop(0)
def choices(self):
while self.is_entities_around:
ent = self.get_closest_entity()
if not self.blackboard.entities_has_entity_eid(ent.eid):
continue
yield self.make_behavior(CollectEntity, entity=ent)
class CollectEntity(BTSequencer):
""" future work, unconnected"""
def __init__(self, entity=None, **kwargs):
super(CollectEntity, self).__init__(**kwargs)
self.entity = entity
def is_valid(self):
return self.blackboard.entities_has_entity_eid(self.entity.eid)
def choices(self):
yield self.make_behavior(GetTo, bb=self.entity.expand(1, 0.5, 1))
yield self.make_behavior(PickUp)
class CollectMine(BTSequencer):
def __init__(self, itemstack=None, recipe=None, **kwargs):
super(CollectMine, self).__init__(**kwargs)
self.itemstack = itemstack
self.recipe = recipe
self.name = 'mine %s' % self.itemstack
self.blocks_around = []
def is_valid(self):
return self.blocks_around
def setup(self):
_, self.have_tool, self.mine_tool = self.blackboard.inventory_tool_for_block(self.recipe.block)
self.blocks_around = self.blackboard.blocks_around(self.blackboard.bot_object.position, block_number=self.recipe.block.number, block_filter=self.recipe.block_filter)
def choices(self):
if not self.have_tool:
yield self.make_behavior(Collect, itemstack=self.blackboard.inventory_min_tool_for_block(self.recipe.block))
for block in self.blocks_around:
yield self.make_behavior(GetTo, digtarget=block.coords)
if not self.have_tool:
yield self.make_behavior(Collect, itemstack=self.blackboard.inventory_min_tool_for_block(self.recipe.block))
yield self.make_behavior(InventorySelectActive, itemstack=self.mine_tool)
yield self.make_behavior(DigBlock, block=block)
yield self.make_behavior(WaitForDrop, block=block, itemstack=self.recipe.itemstack, drop_everytime=self.recipe.drop_everytime)
break
class CollectCraft(BTSequencer):
def __init__(self, itemstack=None, recipe=None, **kwargs):
super(CollectCraft, self).__init__(**kwargs)
self.itemstack = itemstack
self.recipe = recipe
def is_valid(self):
return True
@property
def missing_ingredient(self):
for itemstack in self.recipe.resources:
if not self.blackboard.inventory_player.has_item_count(itemstack):
log.msg("missing ingredient %s for crafting" % itemstack)
return itemstack
def choices(self):
while self.missing_ingredient is not None:
yield self.make_behavior(Collect, itemstack=self.missing_ingredient)
yield self.make_behavior(CraftItem, itemstack=self.itemstack, recipe=self.recipe)
class CollectSmelt(BTSelector):
def __init__(self, itemstack=None, recipe=None, **kwargs):
super(CollectSmelt, self).__init__(**kwargs)
self.itemstack = itemstack
self.name = 'smelt %s' % self.itemstack
def is_valid(self):
""" smelting later today """
return False
class CollectBrew(BTSelector):
def __init__(self, itemstack=None, recipe=None, **kwargs):
super(CollectBrew, self).__init__(**kwargs)
self.itemstack = itemstack
self.name = 'brew %s' % self.itemstack
def is_valid(self):
""" brewing not today """
return False
class CollectMobKill(BTSelector):
def __init__(self, itemstack=None, recipe=None, **kwargs):
super(CollectMobKill, self).__init__(**kwargs)
self.itemstack = itemstack
self.name = 'kill for %s' % self.itemstack
def is_valid(self):
""" it will take time before the bot will kill mobs """
return False
class CraftItem(BTSelector):
def __init__(self, itemstack=None, recipe=None, **kwargs):
super(CraftItem, self).__init__(**kwargs)
self.itemstack = itemstack
self.recipe = recipe
self.name = 'craft %s' % self.itemstack
self.sorted_tables = None
log.msg(self.name)
def is_valid(self):
for itemstack in self.recipe.resources:
if not self.blackboard.inventory_player.has_item_count(itemstack):
log.msg("don't have %s for crafting" % itemstack)
return False
return not self.recipe.need_bench or self.is_tables_around
def setup(self):
if self.recipe.need_bench:
self.crafting_tables_around = list(self.blackboard.blocks_around(self.blackboard.bot_object.position, block_number=blocks.CraftingTable.number, distance=80))
log.msg("There are %d crafting table around" % len(self.crafting_tables_around))
@property
def is_tables_around(self):
return len(self.crafting_tables_around) > 0
def get_closest_table(self):
self.crafting_tables_around = sorted(self.crafting_tables_around, key=lambda b: (b.coords - self.blackboard.bot_object.position).size_pow)
return self.crafting_tables_around.pop(0)
def choices(self):
if self.recipe.need_bench:
while self.is_tables_around:
table = self.get_closest_table()
yield self.make_behavior(CraftItemAtTable, recipe=self.recipe, craftingtable=table)
else:
yield self.make_behavior(CraftItemInventory, recipe=self.recipe)
class CraftItemAtTable(BTSequencer):
def __init__(self, recipe=None, craftingtable=None, **kwargs):
super(CraftItemAtTable, self).__init__(**kwargs)
self.recipe = recipe
self.craftingtable = craftingtable
self.name = "go to %s and craft %s" % (craftingtable, recipe)
log.msg(self.name)
def is_valid(self):
return len(self.dig_positions) > 0
def setup(self):
self.dig_positions = self.blackboard.positions_to_dig(self.craftingtable.coords)
def choices(self):
yield self.make_behavior(TravelTo, coords=self.craftingtable.coords, multiple_goals=self.dig_positions)
yield self.make_behavior(CraftItemTable, recipe=self.recipe, craftingtable=self.craftingtable)
class TravelTo(BTSequencer):
def __init__(self, coords=None, bb=None, multiple_goals=None, shorten_path_by=0, **kwargs):
super(TravelTo, self).__init__(**kwargs)
self.travel_coords = coords
self.travel_bb = bb
self.travel_multiple_goals = multiple_goals
self.shorten_path_by = shorten_path_by
self.path = None
log.msg(self.name)
@property
def name(self):
if self.travel_coords is not None:
return 'travel to %s from %s' % (self.blackboard.get_block_coords(self.travel_coords), self.blackboard.bot_standing_on_block(self.blackboard.bot_object))
else:
return 'travel to %s from %s' % (self.travel_bb.bottom_center, self.blackboard.bot_standing_on_block(self.blackboard.bot_object))
def is_valid(self):
return self.path is not None
@inlineCallbacks
def setup(self):
sb = self.blackboard.bot_standing_on_block(self.blackboard.bot_object)
while sb is None:
yield utils.reactor_break()
sb = self.blackboard.bot_standing_on_block(self.blackboard.bot_object)
else:
if self.travel_multiple_goals is not None:
d = cooperate(AStarMultiCoords(dimension=self.blackboard.dimension,
start_coords=sb.coords,
goal_coords=self.travel_coords,
multiple_goals=self.travel_multiple_goals)).whenDone()
elif self.travel_coords is not None:
d = cooperate(AStarCoords(dimension=self.blackboard.dimension,
start_coords=sb.coords,
goal_coords=self.travel_coords)).whenDone()
else:
d = cooperate(AStarBBCol(dimension=self.blackboard.dimension,
start_coords=sb.coords,
bb=self.travel_bb)).whenDone()
d.addErrback(logbot.exit_on_error)
astar = yield d
if astar.path is not None:
current_start = self.blackboard.bot_standing_on_block(self.blackboard.bot_object)
if sb == current_start:
self.path = astar.path
if self.shorten_path_by > 0:
self.path = self.path[self.shorten_path_by:]
self.start_coords = current_start.coords
def choices(self):
for step in reversed(self.path):
yield self.make_behavior(MoveTo, start=self.start_coords, target=step.coords)
self.start_coords = step.coords
class MoveTo(BTAction):
def __init__(self, target=None, start=None, **kwargs):
super(MoveTo, self).__init__(**kwargs)
self.target_coords = target
self.start_coords = start
self.was_at_target = False
self.hold_position_flag = False
self.name = 'move to %s' % str(self.target_coords)
def _check_status(self, b_obj):
gs = GridSpace(self.blackboard.grid)
self.start_state = gs.get_state_coords(self.start_coords)
self.target_state = gs.get_state_coords(self.target_coords)
go = gs.can_go(self.start_state, self.target_state)
if not go:
log.msg('cannot go between %s %s' % (self.start_state, self.target_state))
return Status.failure
if not self.was_at_target:
self.was_at_target = self.target_state.vertical_center_in(b_obj.position)
if self.target_state.base_in(b_obj.aabb) and self.target_state.touch_platform(b_obj.position):
return Status.success
return Status.running
def action(self):
b_obj = self.blackboard.bot_object
self.status = self._check_status(b_obj)
if self.status != Status.running:
return
on_ladder = self.blackboard.bot_is_on_ladder(b_obj)
in_water = self.blackboard.bot_is_in_water(b_obj)
if on_ladder or in_water:
elev = self.target_state.platform_y - b_obj.y
if fops.gt(elev, 0):
self.jump(b_obj)
self.move(b_obj)
elif fops.lt(elev, 0):
self.move(b_obj)
else:
if on_ladder:
self.sneak(b_obj)
self.move(b_obj)
elif self.blackboard.bot_is_standing(b_obj):
elev = self.target_state.platform_y - b_obj.y
if fops.lte(elev, 0):
self.move(b_obj)
elif fops.gt(elev, 0):
if self.start_state.base_in(b_obj.aabb):
self.jump(b_obj)
self.move(b_obj)
else:
self.move(b_obj)
def move(self, b_obj):
direction = utils.Vector2D(self.target_state.center_x - b_obj.x, self.target_state.center_z - b_obj.z)
direction.normalize()
if not self.was_at_target:
self.blackboard.bot_turn_to_direction(b_obj, direction.x, 0, direction.z)
b_obj.direction = direction
def jump(self, b_obj):
b_obj.is_jumping = True
def sneak(self, b_obj):
self.blackboard.bot_start_sneaking(b_obj)
class PeekAtPlayer(BTAction):
def __init__(self, player=None, **kwargs):
super(PeekAtPlayer, self).__init__(**kwargs)
self.player = player
self.name = 'peek at player %s' % player.username
def on_start(self):
self.blackboard.bot_turn_to_point(self.blackboard.bot_object, self.player.position_eyelevel)
def action(self):
if self.duration_ticks > 0:
self.status = Status.success
class ShowCursor(BTAction):
def __init__(self, player=None, **kwargs):
super(ShowCursor, self).__init__(**kwargs)
self.player = player
self.name = 'show player %s cursor' % config.COMMANDER
def on_start(self):
player_look_vector = utils.yaw_pitch_to_vector(self.player.yaw, self.player.pitch)
looking_at_block = self.blackboard.grid_raycast_to_block(self.player.position_eyelevel, player_look_vector)
if self.blackboard.last_look_at_block != looking_at_block:
self.blackboard.last_look_at_block = looking_at_block
if looking_at_block.number == 0:
self.blackboard.send_chat_message("cursor too far")
self.blackboard.bot_turn_to_vector(self.blackboard.bot_object, player_look_vector)
else:
self.blackboard.send_chat_message("cursor at %s %s" % (looking_at_block.name, looking_at_block.coords))
self.blackboard.bot_turn_to_point(self.blackboard.bot_object, looking_at_block.coords.offset(0.5, 0.5, 0.5))
def action(self):
if self.duration_ticks > 0:
self.status = Status.success
class CraftItemBase(BTAction):
def __init__(self, recipe=None, **kwargs):
super(CraftItemBase, self).__init__(**kwargs)
self.recipe = recipe
def on_end(self):
if self.status == Status.success:
self.inventory_man.close()
@inlineCallbacks
def action(self):
for click in self.craftsteps():
confirmed = yield click
if confirmed not in [True, False]:
raise Exception("confirmed transaction got to be boolean")
if not confirmed:
log.msg("bad news, inventory transaction not confirmed by the server")
self.status = Status.failure
return
self.status = Status.success
def craftsteps(self):
for crafting_offset, itemstack in enumerate(self.recipe.plan):
if itemstack is None: # crafting spot in recipe is empty
continue
yield self.inventory_man.cursor_hold(itemstack)
yield self.put_craftoffset_slot(crafting_offset)
self.inventory_man.set_crafted_item(self.recipe)
while not self.inventory_man.is_cursor_empty:
yield self.inventory_man.empty_cursor()
yield self.get_crafted_item()
self.inventory_man.erase_craft_slots()
while not self.inventory_man.is_cursor_empty:
yield self.inventory_man.empty_cursor()
self.inventory_man.increment_collected(self.recipe.itemstack)
def put_craftoffset_slot(self, offset):
slot = self.inventory.crafting_offset_as_slot(offset)
return self.inventory_man.right_click_slot(slot)
def get_crafted_item(self):
return self.inventory_man.click_slot(self.inventory.crafted_slot)
class CraftItemInventory(CraftItemBase):
def __init__(self, **kwargs):
super(CraftItemInventory, self).__init__(**kwargs)
self.name = "%s in inventory" % self.recipe
log.msg(self.name)
def on_start(self):
self.inventory = self.blackboard.inventory_player
self.inventory_man = InventoryManipulation(inventory=self.inventory, blackboard=self.blackboard)
class CraftItemTable(CraftItemBase):
def __init__(self, craftingtable=None, **kwargs):
super(CraftItemTable, self).__init__(**kwargs)
self.craftingtable = craftingtable
self.name = "%s on %s" % (self.recipe, self.craftingtable)
log.msg(self.name)
@inlineCallbacks
def on_start(self):
data = {"x": self.craftingtable.x,
"y": self.craftingtable.y,
"z": self.craftingtable.z,
"face": 0,
"slotdata": self.blackboard.itemstack_as_slotdata(itemstack=self.blackboard.inventory_player.active_item()),
"cursor_x": 8,
"cursor_y": 8,
"cursor_z": 8}
self.blackboard.send_packet("player block placement", data)
self.inventory = yield self.blackboard.receive_inventory()
self.inventory_man = InventoryManipulation(inventory=self.inventory, blackboard=self.blackboard)
class DropInventory(BTAction):
def __init__(self, **kwargs):
super(DropInventory, self).__init__(**kwargs)
self.name = "drop inventory"
def on_start(self):
self.inventory_man = InventoryManipulation(inventory=self.blackboard.inventory_player, blackboard=self.blackboard)
def dropsteps(self):
for slot, itemstack in self.inventory_man.inventory.slot_items():
yield self.inventory_man.click_slot(slot)
yield self.inventory_man.click_drop()
log.msg('dropped %s' % itemstack)
@inlineCallbacks
def action(self):
for drop in self.dropsteps():
confirmed = yield drop
if not confirmed:
log.err("bad news, inventory transaction not confirmed by the server, click slot")
self.status = Status.failure
return
self.status = Status.success
def on_end(self):
if self.status == Status.success:
self.inventory_man.close()
class InventorySelectActive(BTAction):
def __init__(self, itemstack=None, **kwargs):
super(InventorySelectActive, self).__init__(**kwargs)
self.itemstack = itemstack
self.name = "hold item %s" % self.itemstack
@classmethod
def parse_parameters(cls, itemname):
try:
istack = items.item_db.item_by_name(itemname)
except KeyError:
istack = None
return istack
def on_start(self):
self.inventory_man = InventoryManipulation(inventory=self.blackboard.inventory_player, blackboard=self.blackboard)
@inlineCallbacks
def action(self):
if not self.inventory_man.has_item(self.itemstack):
self.status = Status.failure
self.blackboard.send_chat_message("don't have %s in my inventory" % self.itemstack.name)
return
if self.inventory_man.item_active(self.itemstack):
self.status = Status.success
return
active_slot = self.inventory_man.item_at_active_slot(self.itemstack)
if active_slot is not None:
self.inventory_man.set_active_slot(active_slot)
self.status = Status.success
return
slot_position = self.inventory_man.slot_at_item(self.itemstack)
confirmed = yield self.inventory_man.click_slot(slot_position)
if not confirmed:
log.msg("bad news, inventory transaction not | |
},
'muon-electron mass ratio': {
'quantity': 'muon-electron mass ratio',
'unit': '',
'value': '206.7682826',
'uncertainty': '0.000 0046'
},
'muon g factor': {
'quantity': 'muon g factor',
'unit': '',
'value': '-2.0023318418',
'uncertainty': '0.000 000 0013'
},
'muon mag. mom.': {
'quantity': 'muon mag. mom.',
'unit': 'J T^{-1}',
'value': '-4.49044826e-26',
'uncertainty': '0.000 000 10 e-26'
},
'muon mag. mom. anomaly': {
'quantity': 'muon mag. mom. anomaly',
'unit': '',
'value': '1.16592089e-3',
'uncertainty': '0.000 000 63 e-3'
},
'muon mag. mom. to bohr magneton ratio': {
'quantity': 'muon mag. mom. to Bohr magneton ratio',
'unit': '',
'value': '-4.84197048e-3',
'uncertainty': '0.000 000 11 e-3'
},
'muon mag. mom. to nuclear magneton ratio': {
'quantity': 'muon mag. mom. to nuclear magneton ratio',
'unit': '',
'value': '-8.89059705',
'uncertainty': '0.000 000 20'
},
'muon mass': {
'quantity': 'muon mass',
'unit': 'kg',
'value': '1.883531594e-28',
'uncertainty': '0.000 000 048 e-28'
},
'muon mass energy equivalent': {
'quantity': 'muon mass energy equivalent',
'unit': 'J',
'value': '1.692833774e-11',
'uncertainty': '0.000 000 043 e-11'
},
'muon mass energy equivalent in mev': {
'quantity': 'muon mass energy equivalent in MeV',
'unit': 'MeV',
'value': '105.6583745',
'uncertainty': '0.000 0024'
},
'muon mass in u': {
'quantity': 'muon mass in u',
'unit': 'u',
'value': '0.1134289257',
'uncertainty': '0.000 000 0025'
},
'muon molar mass': {
'quantity': 'muon molar mass',
'unit': 'kg mol^{-1}',
'value': '0.1134289257e-3',
'uncertainty': '0.000 000 0025 e-3'
},
'muon-neutron mass ratio': {
'quantity': 'muon-neutron mass ratio',
'unit': '',
'value': '0.1124545167',
'uncertainty': '0.000 000 0025'
},
'muon-proton mag. mom. ratio': {
'quantity': 'muon-proton mag. mom. ratio',
'unit': '',
'value': '-3.183345142',
'uncertainty': '0.000 000 071'
},
'muon-proton mass ratio': {
'quantity': 'muon-proton mass ratio',
'unit': '',
'value': '0.1126095262',
'uncertainty': '0.000 000 0025'
},
'muon-tau mass ratio': {
'quantity': 'muon-tau mass ratio',
'unit': '',
'value': '5.94649e-2',
'uncertainty': '0.000 54 e-2'
},
'natural unit of action': {
'quantity': 'natural unit of action',
'unit': 'J s',
'value': '1.054571800e-34',
'uncertainty': '0.000 000 013 e-34'
},
'natural unit of action in ev s': {
'quantity': 'natural unit of action in eV s',
'unit': 'eV s',
'value': '6.582119514e-16',
'uncertainty': '0.000 000 040 e-16'
},
'natural unit of energy': {
'quantity': 'natural unit of energy',
'unit': 'J',
'value': '8.18710565e-14',
'uncertainty': '0.000 000 10 e-14'
},
'natural unit of energy in mev': {
'quantity': 'natural unit of energy in MeV',
'unit': 'MeV',
'value': '0.5109989461',
'uncertainty': '0.000 000 0031'
},
'natural unit of length': {
'quantity': 'natural unit of length',
'unit': 'm',
'value': '386.15926764e-15',
'uncertainty': '0.000 000 18 e-15'
},
'natural unit of mass': {
'quantity': 'natural unit of mass',
'unit': 'kg',
'value': '9.10938356e-31',
'uncertainty': '0.000 000 11 e-31'
},
'natural unit of mom.um': {
'quantity': 'natural unit of mom.um',
'unit': 'kg m s^{-1}',
'value': '2.730924488e-22',
'uncertainty': '0.000 000 034 e-22'
},
'natural unit of mom.um in mev/c': {
'quantity': 'natural unit of mom.um in MeV/c',
'unit': 'MeV/c',
'value': '0.5109989461',
'uncertainty': '0.000 000 0031'
},
'natural unit of time': {
'quantity': 'natural unit of time',
'unit': 's',
'value': '1.28808866712e-21',
'uncertainty': '0.000 000 000 58 e-21'
},
'natural unit of velocity': {
'quantity': 'natural unit of velocity',
'unit': 'm s^{-1}',
'value': '299792458',
'uncertainty': '(exact)'
},
'neutron compton wavelength': {
'quantity': 'neutron Compton wavelength',
'unit': 'm',
'value': '1.31959090481e-15',
'uncertainty': '0.000 000 000 88 e-15'
},
'neutron compton wavelength over 2 pi': {
'quantity': 'neutron Compton wavelength over 2 pi',
'unit': 'm',
'value': '0.21001941536e-15',
'uncertainty': '0.000 000 000 14 e-15'
},
'neutron-electron mag. mom. ratio': {
'quantity': 'neutron-electron mag. mom. ratio',
'unit': '',
'value': '1.04066882e-3',
'uncertainty': '0.000 000 25 e-3'
},
'neutron-electron mass ratio': {
'quantity': 'neutron-electron mass ratio',
'unit': '',
'value': '1838.68366158',
'uncertainty': '0.000 000 90'
},
'neutron g factor': {
'quantity': 'neutron g factor',
'unit': '',
'value': '-3.82608545',
'uncertainty': '0.000 000 90'
},
'neutron gyromag. ratio': {
'quantity': 'neutron gyromag. ratio',
'unit': 's^{-1} T^{-1}',
'value': '1.83247172e8',
'uncertainty': '0.000 000 43 e8'
},
'neutron gyromag. ratio over 2 pi': {
'quantity': 'neutron gyromag. ratio over 2 pi',
'unit': 'MHz T^{-1}',
'value': '29.1646933',
'uncertainty': '0.000 0069'
},
'neutron mag. mom.': {
'quantity': 'neutron mag. mom.',
'unit': 'J T^{-1}',
'value': '-0.96623650e-26',
'uncertainty': '0.000 000 23 e-26'
},
'neutron mag. mom. to bohr magneton ratio': {
'quantity': 'neutron mag. mom. to Bohr magneton ratio',
'unit': '',
'value': '-1.04187563e-3',
'uncertainty': '0.000 000 25 e-3'
},
'neutron mag. mom. to nuclear magneton ratio': {
'quantity': 'neutron mag. mom. to nuclear magneton ratio',
'unit': '',
'value': '-1.91304273',
'uncertainty': '0.000 000 45'
},
'neutron mass': {
'quantity': 'neutron mass',
'unit': 'kg',
'value': '1.674927471e-27',
'uncertainty': '0.000 000 021 e-27'
},
'neutron mass energy equivalent': {
'quantity': 'neutron mass energy equivalent',
'unit': 'J',
'value': '1.505349739e-10',
'uncertainty': '0.000 000 019 e-10'
},
'neutron mass energy equivalent in mev': {
'quantity': 'neutron mass energy equivalent in MeV',
'unit': 'MeV',
'value': '939.5654133',
'uncertainty': '0.000 0058'
},
'neutron mass in u': {
'quantity': 'neutron mass in u',
'unit': 'u',
'value': '1.00866491588',
'uncertainty': '0.000 000 000 49'
},
'neutron molar mass': {
'quantity': 'neutron molar mass',
'unit': 'kg mol^{-1}',
'value': '1.00866491588e-3',
'uncertainty': '0.000 000 000 49 e-3'
},
'neutron-muon mass ratio': {
'quantity': 'neutron-muon mass ratio',
'unit': '',
'value': '8.89248408',
'uncertainty': '0.000 000 20'
},
'neutron-proton mag. mom. ratio': {
'quantity': 'neutron-proton mag. mom. ratio',
'unit': '',
'value': '-0.68497934',
'uncertainty': '0.000 000 16'
},
'neutron-proton mass difference': {
'quantity': 'neutron-proton mass difference',
'unit': '',
'value': '2.30557377e-30',
'uncertainty': '0.000 000 85 e-30'
},
'neutron-proton mass difference energy equivalent': {
'quantity': 'neutron-proton mass difference energy equivalent',
'unit': '',
'value': '2.07214637e-13',
'uncertainty': '0.000 000 76 e-13'
},
'neutron-proton mass difference energy equivalent in mev': {
'quantity':
'neutron-proton mass difference energy equivalent in MeV',
'unit': '',
'value': '1.29333205',
'uncertainty': '0.000 000 48'
},
'neutron-proton mass difference in u': {
'quantity': 'neutron-proton mass difference in u',
'unit': '',
'value': '0.00138844900',
'uncertainty': '0.000 000 000 51'
},
'neutron-proton mass ratio': {
'quantity': 'neutron-proton mass ratio',
'unit': '',
'value': '1.00137841898',
'uncertainty': '0.000 000 000 51'
},
'neutron-tau mass ratio': {
'quantity': 'neutron-tau mass ratio',
'unit': '',
'value': '0.528790',
'uncertainty': '0.000 048'
},
'neutron to shielded proton mag. mom. ratio': {
'quantity': 'neutron to shielded proton mag. mom. ratio',
'unit': '',
'value': '-0.68499694',
'uncertainty': '0.000 000 16'
},
'newtonian constant of gravitation': {
'quantity': 'Newtonian constant of gravitation',
'unit': 'm^3 kg^{-1} s^{-2}',
'value': '6.67408e-11',
'uncertainty': '0.000 31 e-11'
},
'newtonian constant of gravitation over h-bar c': {
'quantity': 'Newtonian constant of gravitation over h-bar c',
'unit': '(GeV/c^2)^-2',
'value': '6.70861e-39',
'uncertainty': '0.000 31 e-39'
},
'nuclear magneton': {
'quantity': 'nuclear magneton',
'unit': 'J T^{-1}',
'value': '5.050783699e-27',
'uncertainty': '0.000 000 031 e-27'
},
'nuclear magneton in ev/t': {
'quantity': 'nuclear magneton in eV/T',
'unit': 'eV T^{-1}',
'value': '3.1524512550e-8',
'uncertainty': '0.000 000 0015 e-8'
},
'nuclear magneton in inverse meters per tesla': {
'quantity': 'nuclear magneton in inverse meters per tesla',
'unit': 'm^{-1} T^{-1}',
'value': '2.542623432e-2',
'uncertainty': '0.000 000 016 e-2'
},
'nuclear magneton in k/t': {
'quantity': 'nuclear magneton in K/T',
'unit': 'K T^{-1}',
'value': '3.6582690e-4',
'uncertainty': '0.000 0021 e-4'
},
'nuclear magneton in mhz/t': {
'quantity': 'nuclear magneton in MHz/T',
'unit': 'MHz T^{-1}',
'value': '7.622593285',
'uncertainty': '0.000 000 047'
},
'planck constant': {
'quantity': 'Planck constant',
'unit': 'J s',
'value': '6.626070040e-34',
'uncertainty': '0.000 000 081 e-34'
},
'planck constant in ev s': {
'quantity': 'Planck constant in eV s',
'unit': 'eV s',
'value': '4.135667662e-15',
'uncertainty': '0.000 000 025 e-15'
},
'planck constant over 2 pi': {
'quantity': 'Planck constant over 2 pi',
'unit': 'J s',
'value': '1.054571800e-34',
'uncertainty': '0.000 000 013 e-34'
},
'planck constant over 2 pi in ev s': {
'quantity': 'Planck constant over 2 pi in eV s',
'unit': 'eV s',
'value': '6.582119514e-16',
'uncertainty': '0.000 000 040 e-16'
},
'planck constant over 2 pi times c in mev fm': {
'quantity': 'Planck constant over 2 pi times c in MeV fm',
'unit': 'MeV fm',
'value': '197.3269788',
'uncertainty': '0.000 0012'
},
'planck length': {
'quantity': 'Planck length',
'unit': 'm',
'value': '1.616229e-35',
'uncertainty': | |
# -*- coding: utf-8 -*-
r'''This module contains implementations of common regularizers.
In ``theanets`` regularizers are thought of as additional terms that get
combined with the :class:`Loss <theanets.losses.Loss>` for a model at
optimization time. Regularizer terms in the loss are usually used to "encourage"
a model to behave in a particular way---for example, the pattern and arrangement
of learned features can be changed by including a sparsity (L1-norm) regularizer
on the hidden unit activations, or units can randomly be dropped out (set to
zero) while running the model.
'''
import fnmatch
import theano.tensor as TT
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from . import layers
from . import util
def from_kwargs(graph, **kwargs):
'''Construct common regularizers from a set of keyword arguments.
Keyword arguments not listed below will be passed to
:func:`Regularizer.build` if they specify the name of a registered
:class:`Regularizer`.
Parameters
----------
graph : :class:`theanets.graph.Network`
A network graph to regularize.
regularizers : dict or tuple/list of :class:`Regularizer`, optional
If this is a list or a tuple, the contents of the list will be returned
as the regularizers. This is to permit custom lists of regularizers to
be passed easily.
If this is a dict, its contents will be added to the other keyword
arguments passed in.
rng : int or theano RandomStreams, optional
If an integer is provided, it will be used to seed the random number
generators for the dropout or noise regularizers. If a theano
RandomStreams object is provided, it will be used directly. Defaults to
13.
input_dropout : float, optional
Apply dropout to input layers in the network graph, with this dropout
rate. Defaults to 0 (no dropout).
hidden_dropout : float, optional
Apply dropout to hidden layers in the network graph, with this dropout
rate. Defaults to 0 (no dropout).
output_dropout : float, optional
Apply dropout to the output layer in the network graph, with this
dropout rate. Defaults to 0 (no dropout).
input_noise : float, optional
Apply noise to input layers in the network graph, with this standard
deviation. Defaults to 0 (no noise).
hidden_noise : float, optional
Apply noise to hidden layers in the network graph, with this standard
deviation. Defaults to 0 (no noise).
output_noise : float, optional
Apply noise to the output layer in the network graph, with this
standard deviation. Defaults to 0 (no noise).
Returns
-------
regs : list of :class:`Regularizer`
A list of regularizers to apply to the given network graph.
'''
if 'regularizers' in kwargs:
regs = kwargs['regularizers']
if isinstance(regs, (tuple, list)):
return regs
if isinstance(regs, dict):
kwargs.update(regs)
regs = []
rng = kwargs.get('rng', 13)
def pattern(ls):
return tuple(l.output_name for l in ls)
inputs = pattern([l for l in graph.layers if isinstance(l, layers.Input)])
hiddens = pattern(graph.layers[1:-1])
outputs = pattern([graph.layers[-1]])
# create regularizers for different types of canned dropout.
spec = {inputs: kwargs.get('input_dropout', 0),
hiddens: kwargs.get('hidden_dropout', 0),
outputs: kwargs.get('output_dropout', 0)}
spec.update(kwargs.get('dropout', {}))
for pattern, w in spec.items():
if w:
regs.append(BernoulliDropout(pattern=pattern, weight=w, rng=rng))
# create regularizers for different types of canned noise.
spec = {inputs: kwargs.get('input_noise', 0),
hiddens: kwargs.get('hidden_noise', 0),
outputs: kwargs.get('output_noise', 0)}
spec.update(kwargs.get('noise', {}))
for pattern, w in spec.items():
if w:
regs.append(GaussianNoise(pattern=pattern, weight=w, rng=rng))
# create regularizers based on other keyword arguments.
for key, value in kwargs.items():
if Regularizer.is_registered(key):
if not isinstance(value, dict):
value = dict(weight=value)
regs.append(Regularizer.build(key, **value))
return regs
class Regularizer(util.Registrar(str('Base'), (), {})):
r'''A regularizer for a neural network model.
Subclasses of this base usually either provide an implementation of the
:func:`modify_graph` method, or an implementation of the :func:`loss` method
(but (almost?) never both).
Parameters
----------
pattern : str
A shell-style glob pattern describing the parameters or outputs that
this regularizer ought to apply to.
weight : float
A scalar weight that indicates the "strength" of this regularizer in the
overall loss for a model.
Attributes
----------
pattern : str
A shell-style glob pattern describing the parameters or outputs that
this regularizer ought to apply to.
weight : float
A scalar weight that indicates the "strength" of this regularizer in the
overall loss for a model.
'''
def __init__(self, pattern='*', weight=0.):
self.pattern = pattern
self.weight = weight
def log(self):
'''Log some diagnostic info about this regularizer.'''
util.log('regularizer: {0.weight} * '
'{0.__class__.__name__}({0.pattern})', self)
def modify_graph(self, outputs):
'''Modify the outputs of a particular layer in the computation graph.
Parameters
----------
outputs : dict of Theano expressions
A map from string output names to the corresponding Theano
expression. This dictionary contains the fully-scoped name of all
outputs from a single layer in the computation graph.
This map is mutable, so any changes that the regularizer makes will
be retained when the caller regains control.
Notes
-----
This method is applied during graph-construction time to change the
behavior of one or more layer outputs. For example, the
:class:`BernoulliDropout` class replaces matching outputs with an
expression containing "masked" outputs, where some elements are randomly
set to zero each time the expression is evaluated.
Any regularizer that needs to modify the structure of the computation
graph should implement this method.
'''
pass
def loss(self, layers, outputs):
'''Compute a scalar term to add to the loss function for a model.
Parameters
----------
layers : list of :class:`theanets.layers.Layer`
A list of the layers in the model being regularized.
outputs : dict of Theano expressions
A dictionary mapping string expression names to their corresponding
Theano expressions in the computation graph. This dictionary
contains the fully-scoped name of every layer output in the graph.
'''
return 0.
class WeightL2(Regularizer):
r'''Decay the weights in a model using an L2 norm penalty.
Notes
-----
This regularizer implements the :func:`loss` method to add the following
term to the network's loss function:
.. math::
\frac{1}{|\Omega|} \sum_{i \in \Omega} \|W_i\|_F^2
where :math:`\Omega` is a set of "matching" weight parameters, and
:math`\|\cdot\|_F` is the Frobenius norm (sum of squared elements).
This regularizer tends to prevent the weights in a model from getting "too
large." Large weights are often associated with overfitting in a model, so
the regularizer tends to help prevent overfitting.
Examples
--------
This regularizer can be specified at training or test time by providing the
``weight_l2`` or ``weight_decay`` keyword arguments:
>>> net = theanets.Regression(...)
To use this regularizer at training time:
>>> net.train(..., weight_decay=0.1)
By default all (2-dimensional) weights in the model are penalized. To
include only some weights:
>>> net.train(..., weight_decay=dict(weight=0.1, pattern='hid[23].w'))
To use this regularizer when running the model forward to generate a
prediction:
>>> net.predict(..., weight_decay=0.1)
The value associated with the keyword argument can be a scalar---in which
case it provides the weight for the regularizer---or a dictionary, in which
case it will be passed as keyword arguments directly to the constructor.
References
----------
.. [Moo95] <NAME>, <NAME>, <NAME>, & <NAME>. (1995). "A simple
weight decay can improve generalization." NIPS 4, 950-957.
'''
__extra_registration_keys__ = ['weight_l2', 'weight_decay']
def loss(self, layers, outputs):
matches = util.params_matching(layers, self.pattern)
variables = [var for _, var in matches if var.ndim > 1]
if not variables:
return 0
return sum((v * v).mean() for v in variables) / len(variables)
class WeightL1(Regularizer):
r'''Decay the weights in a model using an L1 norm penalty.
Notes
-----
This regularizer implements the :func:`loss` method to add the following
term to the network's loss function:
.. math::
\frac{1}{|\Omega|} \sum_{i \in \Omega} \|W_i\|_1
where :math:`\Omega` is a set of "matching" weight parameters, and the L1
norm :math`\|\cdot\|_1` is the sum of the absolute values of the elements in
the matrix.
This regularizer tends to encourage the weights in a model to be zero.
Nonzero weights are used only when they are able to reduce the other
components of the loss (e.g., the squared reconstruction error).
Examples
--------
This regularizer can be specified at training or test time by providing the
``weight_l1`` or ``weight_sparsity`` keyword arguments:
>>> net = theanets.Regression(...)
To use this regularizer at training time:
>>> net.train(..., weight_sparsity=0.1)
By default all (2-dimensional) weights in the model are penalized. To
include only some weights:
>>> net.train(..., weight_sparsity=dict(weight=0.1, pattern='hid[23].w'))
To use this | |
from .mixins import GroupRequiredMixin
from rest_framework.response import Response
from rest_framework.views import APIView
import datetime, time
import pandas as pd
import sys, os
import numpy as np
import re
from pandas.tseries.offsets import BDay
import scipy.stats
import igraph
try:
from .semutils.analytics.portfolio.metrics import calculate_drawdowns
except:
from semutils.analytics.portfolio.metrics import calculate_drawdowns
APP_ROOT = os.path.realpath(os.path.dirname(__file__))
DataDir = os.path.join(APP_ROOT, 'data')
# #for debug
# from .mixins import GroupRequiredMixin
# class APIView(object):
# pass
# DataDir = 'data_prod'
class TradingView(GroupRequiredMixin, APIView):
group_required = ['trading']
def get(self, request, format=None):
ah = pd.read_parquet(os.path.join(DataDir, 'account_history.parquet'))
ah['Portfolio_daily_return'] = ah.PnlReturn
ah['Portfolio_equity_curve'] = (1 + ah.CumPnl)
benchmarks = ['SP500','SP400','SP600']
for b in benchmarks:
b_data = pd.read_parquet(os.path.join(DataDir, b + '.parquet'))
ah[b + '_daily_return'] = ah.TradeDate.map(b_data.IDX_PRICE.pct_change())
ah[b + '_equity_curve'] = (1 + ah[b + '_daily_return']).cumprod()
stats_cols = ['Portfolio'] + [x for x in benchmarks]
stats = pd.DataFrame(columns=stats_cols)
for c in stats_cols:
daily_ret = ah[c + '_daily_return']
stats.loc['Cumulative Return (bps)', c] = "{0:.0f}".format((ah[c + '_equity_curve'].iloc[-1] - 1) * 10000)
stats.loc['Winning Days (%)', c] = "{0:.0%}".format((daily_ret > 0).mean())
stats.loc['Min Return (bps)', c] = "{0:.0f}".format(daily_ret.min() * 10000)
stats.loc['Max Return (bps)', c] = "{0:.0f}".format(daily_ret.max() * 10000)
stats.loc['Mean Return (bps)', c] = "{0:.0f}".format(daily_ret.mean() * 10000)
stats.loc['Std Dev Return (bps)', c] = "{0:.0f}".format(daily_ret.std() * 10000)
stats.loc['Skew', c] = "{0:.1f}".format(scipy.stats.skew(daily_ret))
stats.loc['Kurtosis', c] = "{0:.1f}".format(scipy.stats.kurtosis(daily_ret))
stats.loc['Volatility - Annualized (%)', c] = "{0:.1%}".format(np.sqrt(252) * daily_ret.std())
stats.loc['Sharpe - Annualized', c] = "{0:.1f}".format(np.sqrt(252) * daily_ret.mean() / daily_ret.std())
stats.loc['Sortino - Annualized', c] = "{0:.1f}".format(
np.sqrt(252) * daily_ret.mean() / daily_ret.clip(upper=0).std())
drawdown_series, max_drawdown, drawdown_dur, max_drawdown_dur = calculate_drawdowns(ah[c + '_equity_curve'])
stats.loc['Max Drawdown (bps)', c] = "{0:.0f}".format(max_drawdown * 10000)
stats.loc['Max Drawdown Days', c] = "{0:.0f}".format(max_drawdown_dur)
stats.index.name = 'Metric'
StartingDate = ah.TradeDate.iloc[0]
EndingDate = ah.TradeDate.iloc[-1]
# convert timestamp to timetuple
ah['TradeDate'] = ah['TradeDate'].apply(lambda x: time.mktime(x.timetuple()))
stats.reset_index(inplace=True)
# build context
context = {'StartingDate': StartingDate.strftime("%m/%d/%Y"),
'EndingDate': EndingDate.strftime("%m/%d/%Y"),
'StartingNAV': '${:,}'.format(int(round(ah.SOD_Nav.iloc[0], 0))),
'EndingNAV': '${:,}'.format(int(round(ah.EOD_Nav.iloc[-1], 0))),
'TimeWeightedReturn': '{:.2%}'.format(ah.Portfolio_equity_curve.iloc[-1] - 1),
'chart_data_strategy': ah[['TradeDate', 'Portfolio_equity_curve']].values.tolist(),
'chart_data_benchmark': ah[['TradeDate', 'SP500_equity_curve']].values.tolist(),
'benchmark_name': 'SP500',
'stats': stats.to_dict(orient='records'),
'file_type': "html",
"title": "Dashboard"}
return Response(context)
class TradingExposuresView(GroupRequiredMixin, APIView):
group_required = ['trading']
def get(self, request, format=None):
## ticker matching doesn't work well. Needs to be converted to CUSIP
pos = pd.read_parquet(os.path.join(DataDir, 'nav_portfolio.parquet'))
pos = pos.drop(['Sector'],axis=1)
sm = pd.read_parquet(os.path.join(DataDir, 'sec_master.parquet'))
pos = pos.merge(sm, on='sec_id', how='left')
daily_nav = pos.groupby('data_date').MarketValueBase.sum()
pos['nav'] = pos.data_date.map(daily_nav)
#######NEED TO FIX CASH ############
pos['weight'] = pos.MarketValueBase / pos.nav
pos['weight_abs'] = pos.weight.abs()
gross_ind = pos.groupby(['data_date', 'Sector', 'Industry']).weight_abs.sum().to_frame(
'Gross')
net_ind = pos.groupby(['data_date', 'Sector', 'Industry']).weight.sum().to_frame(
'Net_unadj')
net_ind = net_ind.join(gross_ind)
net_ind['Net'] = net_ind['Net_unadj'] / net_ind['Gross']
net_ind['Net - 1wk delta'] = net_ind.groupby(level=['Sector', 'Industry'])['Net'].diff(
5).fillna(0)
net_ind['Net - 1mo delta'] = net_ind.groupby(level=['Sector', 'Industry'])['Net'].diff(
20).fillna(0)
net_ind.reset_index(level=['Sector', 'Industry'], drop=False, inplace=True)
gross_sec = pos.groupby(['data_date', 'Sector']).weight_abs.sum().to_frame('Gross')
net_sec = pos.groupby(['data_date', 'Sector']).weight.sum().to_frame('Net_unadj')
net_sec = net_sec.join(gross_sec)
net_sec['Net'] = net_sec['Net_unadj'] / net_sec['Gross']
net_sec['Net - 1wk delta'] = net_sec.groupby(level=['Sector'])['Net'].diff(5).fillna(0)
net_sec['Net - 1mo delta'] = net_sec.groupby(level=['Sector'])['Net'].diff(20).fillna(0)
net_sec.reset_index(level=['Sector'], drop=False, inplace=True)
net_sec['Industry'] = 'All'
max_date = pos.data_date.max()
exposures = pd.concat([net_ind.loc[max_date], net_sec.loc[max_date]], ignore_index=True)
exposures = exposures.drop('Net_unadj', axis=1)
# build context
context = {'data': exposures.to_dict(orient='records')}
return Response(context)
class SignalsLatestView(APIView):
def get(self, request, format=None):
filepath = os.path.join(DataDir, 'equities_signals_latest.parquet')
signals = pd.read_parquet(filepath)
signals = signals[
['data_date', 'ticker', 'market_cap', 'Sector', 'Industry', 'SignalConfidence',
'SignalDirection']]
signals.market_cap.fillna(0, inplace=True)
signals = signals[signals.Sector.notnull()]
# build context
context = {'data': signals.to_dict(orient='records')}
return Response(context)
class SignalsSecIndView(APIView):
def get(self, request, format=None):
filepath = os.path.join(DataDir, 'equities_signals_sec_ind.parquet')
signals = pd.read_parquet(filepath)
signals = signals[~signals.Sector.isin(['', 'Index'])]
context = {'data': signals.to_dict(orient='records')}
return Response(context)
class SignalsSectorTableView(APIView):
def post(self, request, format=None):
sector = request.data['sector']
filepath = os.path.join(DataDir, 'equities_signals_latest.parquet')
signals = pd.read_parquet(filepath) # , where='Sector=="%s"' % sector)
signals = signals[signals.Sector == sector]
# build context
context = {'data': signals.to_dict(orient='records')}
return Response(context)
class SignalsIndustryTableView(APIView):
def post(self, request, format=None):
industry = request.data['industry']
filepath = os.path.join(DataDir, 'equities_signals_latest.parquet')
signals = pd.read_parquet(filepath) # , where='Industry=="%s"' % industry)
signals = signals[signals.Industry == industry]
# build context
context = {'data': signals.to_dict(orient='records')}
return Response(context)
class SignalsTickerView(APIView):
def post(self, request, format=None):
ticker = request.data['ticker']
include_it_data = request.data['include_it_data']
ticker = ticker.upper()
## find company name and cik
sm = pd.read_parquet(os.path.join(DataDir, 'sec_master.parquet'))
sm = sm[sm.ticker == ticker]
if len(sm) == 1:
comp_name = sm.iloc[0].proper_name
cik = sm.iloc[0].cik
else:
return Response({'signal_data_found': False})
filepath = os.path.join(DataDir, 'equities_signals_full.hdf')
signal_data_columns = ['data_date', 'market_cap', 'ticker', 'Sector', 'Industry', 'close',
'adj_close', 'SignalConfidence']
signals = pd.read_hdf(filepath, 'table', where='ticker=="%s"' % ticker)[signal_data_columns]
signals = signals[signals.SignalConfidence.notnull()]
## Check if signal data exists
if not len(signals):
return Response({'signal_data_found': False})
# build context
context = {'ticker': ticker, 'Name': comp_name, 'CIK': cik,
'Sector': signals.Sector.iloc[-1],
'Industry': signals.Industry.iloc[-1],
'Market Cap': signals.market_cap.iloc[-1],
'signal_data': signals[['data_date', 'adj_close', 'SignalConfidence']].to_dict(orient='records'),
'signal_data_found': True}
if include_it_data:
if pd.isnull(cik):
it_data = pd.DataFrame()
context['it_data_found'] = False
return Response(context)
# get cik forms
filepath = os.path.join(DataDir, 'sec_forms_ownership_source_full.hdf')
forms = pd.read_hdf(filepath, 'table', where='IssuerCIK == "%s"' % cik)
forms.sort_values('AcceptedDate', ascending=False, inplace=True)
forms = forms[(forms.valid_purchase + forms.valid_sale) != 0]
forms['Direction'] = 'Buy'
forms['Direction'] = forms.Direction.where(forms.valid_purchase == 1, 'Sell')
forms = forms[~forms.TransType.isin(['LDG', 'HO', 'RB'])]
cols = ['SECAccNumber', 'URL', 'AcceptedDate', 'FilerName', 'InsiderTitle',
'Director', 'TenPercentOwner', 'TransType', 'DollarValue', 'Direction']
forms = forms[cols].copy()
forms.reset_index(inplace=True, drop=True)
forms['tableIndex'] = forms.index
forms['AcceptedDateDate'] = pd.to_datetime(forms.AcceptedDate.apply(lambda x: x.date()))
graph_markers = signals.merge(forms, left_on='data_date', right_on='AcceptedDateDate')
def add_count(x):
return (pd.Series(index = x.index,data = range(len(x))))
graph_markers['marker_count'] = graph_markers.groupby(['data_date','Direction'],as_index=False,group_keys=False).apply(lambda x: add_count(x))
graph_markers['marker_count'] = graph_markers['marker_count'] + 1
graph_markers = graph_markers[
['data_date', 'tableIndex', 'FilerName', 'TransType', 'DollarValue', 'Direction','adj_close','marker_count']]
graph_markers.fillna(0, inplace=True)
forms.fillna(0, inplace=True)
context['graph_markers'] = graph_markers.to_dict(orient='records')
context['forms_table'] = forms.to_dict(orient='records')
return Response(context)
class CorrelationView(APIView):
def post(self, request, format=None):
aggregation = request.data['aggregation']
lookback = request.data['lookback']
corr_threshold = request.data['corr_threshold']
graph = request.data['graph']
if not graph:
dislocations = pd.read_csv(DataDir + '/correlation_network_files/dislocations_' + str(
aggregation) + 'minute_' + lookback + '_lookback.csv')
dislocations = dislocations[dislocations.weight >= corr_threshold].reset_index(drop=True)
dislocations = dislocations[['ticker1', 'ticker2', 'weight',
'comp1_H_1day_abs_return', 'comp2_H_1day_abs_return', 'delta_1day',
'comp1_H_3day_abs_return', 'comp2_H_3day_abs_return', 'delta_3day',
'comp1_H_5day_abs_return', 'comp2_H_5day_abs_return', 'delta_5day']]
dislocations = dislocations.reindex(dislocations.delta_5day.abs().sort_values(ascending=False).index)
context = {'data': dislocations.to_dict(orient='records')}
else:
df_corrmat = pd.read_csv(DataDir + '/correlation_network_files/corr_matrix_' + str(
aggregation) + 'minute_' + lookback + '_lookback.csv').set_index(keys=['Unnamed: 0'], drop=True)
df_nodes = pd.read_csv(DataDir + '/correlation_network_files/node_info.csv')
node_list = pd.DataFrame(df_corrmat.index.tolist()).reset_index(drop=False).rename(
columns={'index': 'node_id', 0: 'ticker'})
df_list = df_corrmat.unstack()
df_list = pd.DataFrame(df_list, columns=['weight'])
df_list.index.names = ['ticker1', 'ticker2']
df_list = df_list.reset_index(drop=False)
df_list = df_list[df_list.weight != 1].copy()
df_list = pd.merge(df_list, node_list, left_on=['ticker1'], right_on=['ticker'], how='outer').drop(
labels=['ticker1', 'ticker'], axis=1).rename(columns={'node_id': 'node1'})
df_list = pd.merge(df_list, node_list, left_on=['ticker2'], right_on=['ticker'], how='outer').drop(
labels=['ticker2', 'ticker'], axis=1).rename(columns={'node_id': 'node2'})
df_list = df_list[['node1', 'node2', 'weight']].copy()
df_list = df_list[(df_list.weight >= corr_threshold) | (df_list.weight <= -1 * corr_threshold)].copy()
edge_list = df_list[['node1', 'node2']].values.tolist()
g = igraph.Graph()
g.add_vertices(node_list.node_id.max() + 1)
g.add_edges(edge_list)
weight_list = [abs(i) for i in df_list.weight.tolist()]
g.es['weight'] = weight_list
mst_edge_ids = g.spanning_tree(weights=weight_list, return_tree=False)
mst_edges_list = [g.get_edgelist()[i] for i in mst_edge_ids]
mst_edges_weights = [g.es['weight'][i] for i in mst_edge_ids]
mst_edges = pd.DataFrame(mst_edges_list, columns=['node1', 'node2'])
mst_edges = pd.merge(mst_edges, pd.DataFrame(mst_edges_weights, columns=['weight']), left_index=True,
right_index=True)
mst_edges = pd.merge(mst_edges, node_list, left_on='node1', right_on='node_id').drop(
labels=['node_id', 'node1'], axis=1)
mst_edges = pd.merge(mst_edges, node_list, left_on='node2', right_on='node_id').drop(
labels=['node_id', 'node2'], axis=1)
mst_edges = mst_edges.rename(columns={'ticker_x': 'ticker1', 'ticker_y': 'ticker2'})
mst_edges = mst_edges[['ticker1', 'ticker2', 'weight']].copy()
# mst_edges = pd.merge(mst_edges, df_nodes, left_on='ticker1', right_on='ticker').rename(columns={'comp_name':'comp_name1','Sector':'comp1_sector','Industry':'comp1_industry','Industry Group':'comp1_industry_group'}).drop(labels=['ticker'], axis=1)
# mst_edges = pd.merge(mst_edges, df_nodes, left_on='ticker2', right_on='ticker').rename(columns={'comp_name':'comp_name2','Sector':'comp2_sector','Industry':'comp2_industry','Industry Group':'comp2_industry_group'}).drop(labels=['ticker'], axis=1)
mst_nodes = list(set(mst_edges.ticker1.unique().tolist() + mst_edges.ticker2.unique().tolist()))
mst_nodes = df_nodes[df_nodes.ticker.isin(mst_nodes)].reset_index(drop=True)
# mst_edges.to_csv('./sp500_mst_edges_minute.csv', index=False)
# mst_nodes.to_csv('./sp500_mst_nodes_minute.csv', index=False)
nodes, edges = self.create_graph_data(mst_nodes, mst_edges)
context = {'nodes': nodes.to_dict(orient='records'),
'edges': edges.to_dict(orient='records')}
return Response(context)
def create_graph_data(self, nodes, edges):
colors = {'Industrials': 'LightBlue',
'Health Care': 'PaleGoldenRod',
'Financials': 'Crimson',
'Consumer Staples': 'Lavender',
'Consumer Discretionary': 'Wheat',
'Utilities': 'GreenYellow',
'Information Technology': 'GoldenRod',
'Energy': 'WhiteSmoke',
'Materials': 'LightSlateGray',
'Real Estate': 'Lime',
'Telecommunication Services': 'Gold'}
nodes = nodes.drop('Industry Group', axis=1)
nodes = nodes.rename(columns={'ticker': 'label', 'comp_name': 'name'})
nodes['title'] = nodes.apply(lambda x: 'Name: %s<br>Sec: %s<br> ind: %s' % (x['name'], x.Sector, x.Industry),
axis=1)
nodes['color'] = nodes.Sector.map(colors)
nodes['x'] = 1
nodes['y'] = nodes['x']
nodes['id'] = nodes.index + 1
nodes['radius'] = 10
nodes['color'] = nodes.color.apply(lambda x: {'background': x})
edges['from'] = edges.ticker1.map(nodes.set_index('label')['id'])
edges['to'] = edges.ticker2.map(nodes.set_index('label')['id'])
edges = edges[['from', 'to', 'weight']].copy()
edges.columns = ['from', 'to', 'title']
edges.title = edges.title.round(2)
edges['width'] = edges.title * 10
edges['id'] = edges.index + 1
edges['color'] = 'black'
edges['color'] = edges.color.apply(lambda x: {'color': x})
return (nodes, edges)
class NetworkView(APIView):
def get(self, request, format=None):
colors = {"Computer and Technology": "LightBlue",
"Medical": "PaleGoldenRod",
"Transportation": "Chocolate",
"Business Services": "Crimson",
"Utilities": "Lavender",
"Finance": "Wheat",
"Industrial PRODUCTS": "GreenYellow",
"Multi-Sector Conglomerates": "GoldenRod",
"Auto-Tires-Trucks": "WhiteSmoke",
"Construction": "LightSlateGray",
"Oils-Energy": "Lime",
"Basic Materials": "Magenta",
"Retail-Wholesale": "Gold",
"Consumer Staples": "Orange",
"Aerospace": "Peru",
"Consumer Discretionary": "MintCream"}
nodes = pd.read_csv(DataDir + '/sp500_mst_nodes.csv')
nodes = nodes.drop('zacks_x_ind_desc', axis=1)
nodes = nodes.rename(columns={'ticker': 'label', 'comp_name': 'name', 'Sector': 'Sector',
'Industry': 'industry'})
nodes['title'] = nodes.apply(lambda x: 'Name: %s<br>Sec: %s<br> ind: %s' % (x['name'], x.Sector, x.industry),
axis=1)
nodes['color'] = nodes.Sector.map(colors)
nodes['x'] = 1
nodes['y'] = nodes['x']
nodes['id'] = nodes.index + 1
nodes['radius'] | |
0xFC, 0x3F, 0xEF, 0xFF, 0xFF, 0x81, 0xF0,
0x01, 0x00, 0x00, 0x00, 0x00, 0xF0, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0x7F, 0x01, 0xF8, 0x00, 0x80, 0x00, 0x00, 0x00, 0x3C, 0x00, 0x03, 0xFF,
0xFF, 0xFF, 0xFC, 0x7F, 0xFC, 0x3F, 0xFE, 0x00, 0xC0, 0x00, 0x00, 0x00,
0x00, 0x0F, 0xF0, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, 0x00, 0x0F, 0x80,
0x03, 0x80, 0x00, 0x7C, 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0,
0xFF, 0xC0, 0x00, 0x30, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x00, 0xFF, 0xF1,
0xFF, 0xFF, 0xFF, 0xFE, 0x07, 0xC0, 0x00, 0x0F, 0x00, 0x03, 0x80, 0x00,
0xFC, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, 0x1F, 0xC0, 0x00, 0x00, 0x00,
0x1C, 0x00, 0x80, 0x3F, 0xF0, 0x7F, 0xFE, 0x1F, 0xFC, 0x1F, 0xFF, 0x03,
0xFF, 0x0F, 0xC0, 0x00, 0x1F, 0x00, 0x0F, 0x00, 0x01, 0xFC, 0x1F, 0xFF,
0xFF, 0xFF, 0xFF, 0xF8, 0x0F, 0x80, 0x00, 0x00, 0x00, 0x3F, 0x03, 0xFF,
0xFF, 0xE0, 0xFF, 0x00, 0x1F, 0xC0, 0x07, 0xF8, 0x1F, 0xFF, 0xFF, 0xFF,
0xFF, 0xE0, 0x03, 0xE0, 0x00, 0x78, 0x00, 0x3F, 0x83, 0xFF, 0xFF, 0xFF,
0xDF, 0xFF, 0x00, 0x38, 0x00, 0x00, 0x00, 0x03, 0xFC, 0xFF, 0xFF, 0xFF,
0xF0, 0x01, 0xC0, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFE, 0x00, 0x00, 0x78, 0x00, 0x1F, 0x00, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF,
0xC0, 0x00, 0x00, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0xFF, 0xFF, 0xF8, 0x00,
0x00, 0x00, 0x00, 0x00, 0x7F, 0x03, 0xFF, 0xE7, 0xFF, 0xF0, 0x7F, 0x00,
0x7F, 0x00, 0xFF, 0xFF, 0xFF, 0x00, 0xFC, 0x00, 0x1E, 0x01, 0x0F, 0xF0,
0xFF, 0xF0, 0x3F, 0xE0, 0x20, 0x00, 0x03, 0xE0, 0x03, 0xFF, 0xFF, 0xFF,
0xC3, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xF8,
0x00, 0x00, 0x00, 0x7E, 0x01, 0xFF, 0xFF, 0xFF, 0xE1, 0xFF, 0x00, 0x3C,
0x03, 0xFF, 0xFE, 0x01, 0xF8, 0x00, 0x78, 0x1F, 0xFF, 0xC0, 0xFF, 0x01,
0xFF, 0x00, 0xFC, 0x1E, 0x0F, 0xE0, 0xFF, 0xDE, 0xFF, 0x83, 0x80, 0x00,
0x0E, 0x00, 0x03, 0xF0, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x10,
0x0F, 0xF0, 0xF8, 0xFE, 0x07, 0xF0, 0x00, 0x00, 0x18, 0x78, 0x79, 0xFE,
0x0C, 0x3F, 0x06, 0x00, 0x01, 0xF0, 0xF0, 0x7F, 0xFF, 0xE1, 0x81, 0xE1,
0xE0, 0x00, 0x07, 0xFF, 0xF8, 0x00, 0x00, 0x00, 0xC0, 0x03, 0xFF, 0xF0,
0xFC, 0x3F, 0x1E, 0x00, 0x00, 0x1C, 0x1F, 0x0F, 0xFF, 0xFE, 0x3C
),
"R": (
0xFF, 0xF8, 0x07, 0x0F, 0xFF, 0xF0, 0x00, 0x00, 0x01, 0xF0, 0xFE, 0x3F,
0x87, 0xF3, 0xFF, 0xFC, 0x00, 0x00, 0x1F, 0xFF, 0x00, 0x01, 0xFF, 0xFF,
0xC0, 0x00, 0x01, 0xF0, 0x00, 0x07, 0xFF, 0xFE, 0x00, 0x0F, 0xFF, 0xFF,
0xFF, 0xFF, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xF0, 0x00, 0x1F, 0xFF, 0xE0,
0x00, 0x00, 0x03, 0xE1, 0xFF, 0xFF, 0x1F, 0xFF, 0xFF, 0xF8, 0x00, 0x00,
0x3F, 0xFF, 0x00, 0x07, 0xFF, 0xFF, 0x00, 0x00, 0x03, 0xC0, 0x00, 0x1F,
0xFF, 0xFF, 0x00, 0x3F, 0xFF, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x00, 0xFF,
0xFF, 0xC0, 0x00, 0xFF, 0xFF, 0x80, 0x00, 0x00, 0x1F, 0x87, 0xF3, 0xFC,
0x7F, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x3F, 0xFC, 0x00, 0x0F, 0xFF, 0xFE,
0x00, 0x00, 0x07, 0x80, 0x00, 0x3F, 0xFF, 0xFE, 0x00, 0x7F, 0xFF, 0xFF,
0xFF, 0xF0, 0x00, 0x00, 0x03, 0xFF, 0xFF, 0x00, 0x01, 0xFF, 0xFE, 0x00,
0x00, 0x00, 0x7E, 0x1F, 0xCF, 0xF1, 0xFC, 0xFF, 0xFF, 0x80, 0x00, 0x01,
0xFF, 0xF0, 0x00, 0x7F, 0xFF, 0xFC, 0x00, 0x00, 0x7E, 0x00, 0x01, 0xFF,
0xFF, 0xF8, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x07, 0xFF,
0xFE, 0x00, 0x07, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0xF8, 0x7F, 0xFF, 0xE7,
0xF3, 0xFF, 0xFE, 0x00, 0x00, 0x01, 0xFF, 0xC0, 0x00, 0xFF, 0xFF, 0xF0,
0x00, 0x00, 0x68, 0x00, 0x01, 0xFF, 0xFF, 0xF8, 0x03, 0xFF, 0xFF, 0xFF,
0xFF, 0xE0, 0x00, 0x00, 0x0F, 0xFF, 0xFC, 0x00, 0x0F, 0xFF, 0xF8, 0x00,
0x00, 0x00, 0xF0, 0xFF, 0xFF, 0xCF, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x03,
0xFF, 0xE0, 0x00, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0xF0, 0x00, 0x07, 0xFF,
0xFF, 0xFC, 0x07, 0xFF, 0xFF, 0xE7, 0xFF, 0xE0, 0x00, 0x00, 0x07, 0xFF,
0xFC, 0x00, 0x07, 0xFF, 0xF8, 0x00, 0x00, 0x00, 0xF8, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFE, 0x00, 0x00, 0x01, 0xFF, 0xC0, 0x00, 0xFF, 0xFF, 0xF0,
0x00, 0x00, 0x30, 0x00, 0x01, 0xFF, 0xFF, 0xF8, 0x01, 0xFF, 0xFF, 0xBF,
0xFF, 0xE0, 0x00, 0x00, 0x07, 0xFF, 0xFC, 0x00, 0x07, 0xFF, 0xFC, 0x00,
0x00, 0x00, 0xF8, 0x7F, 0xFF, 0xC7, 0xFB, 0xFF, 0xFE, 0x00, 0x00, 0x01,
0xFF, 0xC0, 0x00, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0xF8, 0x00, 0x03, 0xFF,
0xFF, 0xC0, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x3F, 0xFF,
0xF0, 0x00, 0x3F, 0xFF, 0xE0, 0x00, 0x00, 0x03, 0xE1, 0xFE, 0xFF, 0x1F,
0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x1F, 0xFE, 0x00, 0x07, 0xFF, 0xFF, 0x00,
0x00, 0x03, 0xC0, 0x00, 0x0F, 0xFF, 0xFF, 0x80, 0x1F, 0xFF, 0xFF, 0xFF,
0xFE, 0x00, 0x00, 0x00, 0x7F, 0xFF, 0xC0, 0x00, 0x7F, 0xFF, 0xC0, 0x00,
0x00, 0x0F, 0x83, 0xF1, 0xFE, 0x3F, 0x9F, 0xFF, 0xE0, 0x00, 0x00, 0xFF,
0xFC, 0x00, 0x0F, 0xFF, 0xFE, 0x00, 0x00, 0x0F, 0x80, 0x00, 0x3F, 0xFF,
0xFE, 0x00, 0x3F, 0xFF, 0xFF, 0xFF, 0xFE, 0x00, 0x00, 0x00, 0x7F, 0xFF,
0xC0, 0x00, 0x7F, 0xFF, 0xC0, 0x00, 0x00, 0x0F, 0x87, 0xFB, 0xFE, 0x7F,
0xFF, 0xFF, 0xE0, 0x00, 0x00, 0xFF, 0xFC, 0x00, 0x0F, 0xFF, 0xFF, 0x00,
0x00, 0x0F, 0x00, 0x00, 0x3F, 0xFF, 0xFC, 0x00, 0x0F, 0xFF, 0xFF, 0xFF,
0xFC, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xC0, 0x00, 0xFF, 0xFF, 0x80, 0x00,
0x00, 0x1F, 0x0F, 0xF7, 0xFC, 0x7F, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0xFF,
0xF8, 0x00, 0x1F, 0xFF, 0xFC, 0x00, 0x00, 0x1F, 0x00, 0x00, 0xFF, 0xFF,
0xFC, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x01, 0xFF, 0xFF,
0x00, 0x01, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x3F, 0x1F, 0xEF, 0xF8, 0xFF,
0xFF, 0xFF, 0x80, 0x00, 0x01, 0xFF, 0xF8, 0x00, 0x07, 0xFF, 0xFF, 0x80,
0x00, 0x00, 0x0E, 0x00, 0x6F, 0xFF, 0xFF, 0x80, 0x00, 0xFF, 0xFF, 0xFF,
0xF0, 0x00, 0x00, 0x01, 0xFF, 0xFF, 0x00, 0x01, 0xFF, 0xFF, 0x00, 0x00,
0x00, 0x7F, 0x1F, 0xCF, 0xF1, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0xFF,
0xF0, 0x00, 0x7F, 0xFF, 0xF0, 0x00, 0x00, 0x3C, 0x00, 0x01, 0xFF
),
"L": (
0xFE, 0xF0, 0x00, 0x00, 0x00, 0xF0, 0xFF, 0xFF, 0xFF, 0xFF, 0x06, 0x00,
0x00, 0x00, 0x1C, 0x1F, 0xFF, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x00,
0x02, 0x07, 0xEF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF0, 0x00, 0x00, 0x00, 0x00,
0x03, 0x8F, 0xFF, 0xFF, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x01, 0xE1, 0xFF,
0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x38, 0x3F, 0xFF, 0xFF, 0xFF,
0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1F, 0x9F, 0xFF, 0xFF, 0xFF, 0xFF,
0xC0, 0x00, 0x00, 0x00, 0x00, 0x07, 0x1F, 0xFF, 0xFF, 0xFF, 0xFC, 0x80,
0x00, 0x00, 0x03, 0x83, 0xFF, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x00, 0x00,
0x70, 0x7F, 0xFF, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1F,
0xBF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x07, 0x9F, 0xFF, 0xFF, 0xFF,
0xF8, 0x18, 0x00, 0x00, 0x00, 0x70, 0x7F, 0xFF, 0xFF, 0xFF, 0xF0, 0x00,
0x00, 0x00, 0x00, 0x08, 0x1F, 0xBF, 0xFF, 0xFF, 0xFF, 0xFF, 0x80, 0x00,
0x00, 0x00, 0x00, 0x00, 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00,
0x03, 0x07, 0xFF, 0xFF, 0xFF, 0xFC, 0x1C, 0x00, 0x00, 0x00, 0x38, 0x7F,
0xFF, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x08, 0x1F, 0xCF, 0xFF,
0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x03, 0xCF, 0xFF, 0xFF,
0xFF, 0xFE, 0x00, 0x00, 0x00, 0x03, 0xC1, 0xFF, 0xFF, 0xFF, 0xFE, 0x00,
0x00, 0x00, 0x00, 0x38, 0x3F, 0xFF, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x00,
0x00, 0x04, 0x0F, 0xDF, 0xFF, 0xFF, 0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x00,
0x00, 0x07, 0x8F, 0xFF, 0xFF, 0xFF, 0xFE, 0x00, 0x00, 0x00, 0x01, 0xC3,
0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x0E, 0x1F, 0xFF, 0xFF,
0xFF, 0xFE, 0x00, 0x00, 0x00, 0x00, 0x03, 0x07, 0xFF, 0xFF, 0xFF, 0xFF,
0xF9, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3F, 0xFF, 0xFF, 0xFF, 0xFE,
0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x80, 0x00, 0x00,
0x00, 0x07, 0x07, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x01,
0x83, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x00,
0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x7D, 0xFF, 0xFF,
0xFF, 0xFF, 0xE0, 0x00, 0x00, 0x00, 0x01, 0xC1, 0xFF, | |
: "namednumber",
"number" : "1"
},
"v2m" : {
"nodetype" : "namednumber",
"number" : "2"
},
},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"routerDomainIpIgmpVersion" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.33.2.1.3",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Enumeration",
"none" : {
"nodetype" : "namednumber",
"number" : "0"
},
"igmp_v1" : {
"nodetype" : "namednumber",
"number" : "1"
},
"igmp_v2" : {
"nodetype" : "namednumber",
"number" : "2"
},
"igmp_v3" : {
"nodetype" : "namednumber",
"number" : "3"
},
},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"routerDomainIpDvmrp" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.33.2.1.4",
"status" : "current",
"syntax" : {
"type" : { "module" :"P-BRIDGE-MIB", "name" : "EnabledStatus"},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"diffservSetup" : {
"nodetype" : "node",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.34",
}, # node
"diffservState" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.34.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"P-BRIDGE-MIB", "name" : "EnabledStatus"},
},
"access" : "readwrite",
"description" :
"""""",
}, # scalar
"diffservMapTable" : {
"nodetype" : "table",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.34.2",
"status" : "current",
"description" :
"""""",
}, # table
"diffservMapEntry" : {
"nodetype" : "row",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.34.2.1",
"status" : "current",
"linkage" : [
"diffservMapDscp",
],
"description" :
"""An entry in diffservMapTable.""",
}, # row
"diffservMapDscp" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.34.2.1.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readonly",
"description" :
"""0-63""",
}, # column
"diffservMapPriority" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.34.2.1.2",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readwrite",
"description" :
"""0-7""",
}, # column
"diffservPortTable" : {
"nodetype" : "table",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.34.3",
"status" : "current",
"description" :
"""""",
}, # table
"diffservPortEntry" : {
"nodetype" : "row",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.34.3.1",
"status" : "current",
"linkage" : [
"dot1dBasePort",
],
"description" :
"""An entry in diffservPortTable.""",
}, # row
"diffservPortState" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.34.3.1.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"P-BRIDGE-MIB", "name" : "EnabledStatus"},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"clusterSetup" : {
"nodetype" : "node",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.35",
}, # node
"clusterManager" : {
"nodetype" : "node",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.35.1",
}, # node
"clusterMaxNumOfManager" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.35.1.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readonly",
"description" :
"""""",
}, # scalar
"clusterManagerTable" : {
"nodetype" : "table",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.35.1.2",
"status" : "current",
"description" :
"""""",
}, # table
"clusterManagerEntry" : {
"nodetype" : "row",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.35.1.2.1",
"create" : "true",
"status" : "current",
"linkage" : [
"clusterManagerVid",
],
"description" :
"""An entry in clusterManagerTable.""",
}, # row
"clusterManagerVid" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.35.1.2.1.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readonly",
"description" :
"""""",
}, # column
"clusterManagerName" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.35.1.2.1.2",
"status" : "current",
"syntax" : {
"type" : { "module" :"RFC1213-MIB", "name" : "DisplayString"},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"clusterManagerRowStatus" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.35.1.2.1.3",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-TC", "name" : "RowStatus"},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"clusterMembers" : {
"nodetype" : "node",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.35.2",
}, # node
"clusterMaxNumOfMember" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.35.2.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readonly",
"description" :
"""""",
}, # scalar
"clusterMemberTable" : {
"nodetype" : "table",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.1.5.8.20.35.2.2",
"status" : "current",
"description" :
"""""",
}, # table
"clusterMemberEntry" : {
"nodetype" : "row",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.1.5.8.20.35.2.2.1",
"create" : "true",
"status" : "current",
"linkage" : [
"clusterMemberMac",
],
"description" :
"""An entry in clusterMemberTable.""",
}, # row
"clusterMemberMac" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.35.2.2.1.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-TC", "name" : "MacAddress"},
},
"access" : "noaccess",
"description" :
"""""",
}, # column
"clusterMemberName" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.35.2.2.1.2",
"status" : "current",
"syntax" : {
"type" : { "module" :"RFC1213-MIB", "name" : "DisplayString"},
},
"access" : "readonly",
"description" :
"""""",
}, # column
"clusterMemberModel" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.35.2.2.1.3",
"status" : "current",
"syntax" : {
"type" : { "module" :"RFC1213-MIB", "name" : "DisplayString"},
},
"access" : "readonly",
"description" :
"""""",
}, # column
"clusterMemberPassword" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.35.2.2.1.4",
"status" : "current",
"syntax" : {
"type" : { "module" :"RFC1213-MIB", "name" : "DisplayString"},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"clusterMemberRowStatus" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.35.2.2.1.5",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-TC", "name" : "RowStatus"},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"clusterCandidates" : {
"nodetype" : "node",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.35.3",
}, # node
"clusterCandidateTable" : {
"nodetype" : "table",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.1.5.8.20.35.3.1",
"status" : "current",
"description" :
"""""",
}, # table
"clusterCandidateEntry" : {
"nodetype" : "row",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.1.5.8.20.35.3.1.1",
"status" : "current",
"linkage" : [
"clusterCandidateMac",
],
"description" :
"""An entry in clusterCandidateTable.""",
}, # row
"clusterCandidateMac" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.1.5.8.20.35.3.1.1.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"RFC1213-MIB", "name" : "DisplayString"},
},
"access" : "readonly",
"description" :
"""""",
}, # column
"clusterCandidateName" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.1.5.8.20.35.3.1.1.2",
"status" : "current",
"syntax" : {
"type" : { "module" :"RFC1213-MIB", "name" : "DisplayString"},
},
"access" : "readonly",
"description" :
"""""",
}, # column
"clusterCandidateModel" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.35.3.1.1.3",
"status" : "current",
"syntax" : {
"type" : { "module" :"RFC1213-MIB", "name" : "DisplayString"},
},
"access" : "readonly",
"description" :
"""""",
}, # column
"clusterStatus" : {
"nodetype" : "node",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.35.4",
}, # node
"clusterStatusRole" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.35.4.1",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Enumeration",
"none" : {
"nodetype" : "namednumber",
"number" : "0"
},
"manager" : {
"nodetype" : "namednumber",
"number" : "1"
},
"member" : {
"nodetype" : "namednumber",
"number" : "2"
},
},
},
"access" : "readonly",
"description" :
"""""",
}, # scalar
"clusterStatusManager" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.35.4.2",
"status" : "current",
"syntax" : {
"type" : { "module" :"RFC1213-MIB", "name" : "DisplayString"},
},
"access" : "readonly",
"description" :
"""""",
}, # scalar
"clsuterStatusMaxNumOfMember" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.35.4.3",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readonly",
"description" :
"""""",
}, # scalar
"clusterStatusMemberTable" : {
"nodetype" : "table",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.35.4.4",
"status" : "current",
"description" :
"""""",
}, # table
"clusterStatusMemberEntry" : {
"nodetype" : "row",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.35.4.4.1",
"status" : "current",
"linkage" : [
"clusterStatusMemberMac",
],
"description" :
"""An entry in clusterStatusMemberTable.""",
| |
<reponame>codebyravi/otter<filename>otter/worker/launch_server_v1.py
"""
Initial implementation of a version one launch_server_v1 config.
This launch config worker is responsible for:
0) Generating server name and injecting our AS metadata
1) Starting a server
2) Adding the server to a load balancer.
On delete, this worker:
1) Removes the the server from the load balancer(s)
2) Deletes the server
Also no attempt is currently being made to define the public API for
initiating a launch_server job.
"""
import json
import re
from functools import partial
from urllib import urlencode
from pyrsistent import freeze, thaw
from toolz import comp
from twisted.internet.defer import (
DeferredLock, DeferredSemaphore, gatherResults, inlineCallbacks,
returnValue)
from twisted.internet.task import deferLater
from twisted.python.failure import Failure
from otter.auth import public_endpoint_url
from otter.convergence.composition import (
json_to_LBConfigs,
prepare_server_launch_config)
from otter.convergence.model import _servicenet_address
from otter.convergence.steps import UnexpectedServerStatus, set_server_name
from otter.util import logging_treq as treq
from otter.util.config import config_value
from otter.util.deferredutils import delay, log_with_time, retry_and_timeout
from otter.util.hashkey import generate_server_name
from otter.util.http import (
APIError, RequestError, append_segments, check_success, headers,
raise_error_on_code, wrap_request_error)
from otter.util.retry import (
TransientRetryError, compose_retries, exponential_backoff_interval,
random_interval, repeating_interval, retry, retry_times,
terminal_errors_except, transient_errors_except)
from otter.worker._rcv3 import add_to_rcv3, remove_from_rcv3
# Number of times to retry when adding/removing nodes from LB
LB_MAX_RETRIES = 10
# Range from which random retry interval is got
LB_RETRY_INTERVAL_RANGE = [10, 15]
class ServerDeleted(Exception):
"""
An exception to be raised when a server was deleted unexpectedly.
"""
def __init__(self, server_id):
super(ServerDeleted, self).__init__(
'Server {server_id} has been deleted unexpectedly.'.format(
server_id=server_id))
self.server_id = server_id
def server_details(server_endpoint, auth_token, server_id, log=None):
"""
Fetch the details of a server as specified by id.
:param str server_endpoint: A str base URI probably from the service
catalog.
:param str auth_token: The auth token.
:param str server_id: The opaque ID of a server.
:return: A dict of the server details.
"""
path = append_segments(server_endpoint, 'servers', server_id)
d = treq.get(path, headers=headers(auth_token), log=log)
d.addCallback(check_success, [200, 203])
d.addErrback(raise_error_on_code, 404, ServerDeleted(server_id),
path, 'server_details')
return d.addCallback(treq.json_content)
def wait_for_active(log,
server_endpoint,
auth_token,
server_id,
interval=20,
timeout=7200,
clock=None):
"""
Wait until the server specified by server_id's status is 'ACTIVE'
:param log: A bound logger.
:param str server_endpoint: Server endpoint URI.
:param str auth_token: <PASSWORD> Auth token.
:param str server_id: Opaque nova server id.
:param int interval: Polling interval in seconds. Default: 20.
:param int timeout: timeout to poll for the server status in seconds.
Default 7200 (2 hours).
:return: Deferred that fires when the expected status has been seen.
"""
log.msg("Checking instance status every {interval} seconds",
interval=interval)
if clock is None: # pragma: no cover
from twisted.internet import reactor
clock = reactor
start_time = clock.seconds()
def poll():
def check_status(server):
status = server['server']['status']
time_building = clock.seconds() - start_time
if status == 'ACTIVE':
log.msg(("Server changed from 'BUILD' to 'ACTIVE' within "
"{time_building} seconds"),
time_building=time_building)
return server
elif status != 'BUILD':
log.msg("Server changed to '{status}' in {time_building} seconds",
time_building=time_building, status=status)
raise UnexpectedServerStatus(
server_id,
status,
'ACTIVE')
else:
raise TransientRetryError() # just poll again
sd = server_details(server_endpoint, auth_token, server_id, log=log)
sd.addCallback(check_status)
return sd
timeout_description = ("Waiting for server <{0}> to change from BUILD "
"state to ACTIVE state").format(server_id)
return retry_and_timeout(
poll, timeout,
can_retry=transient_errors_except(UnexpectedServerStatus, ServerDeleted),
next_interval=repeating_interval(interval),
clock=clock,
deferred_description=timeout_description)
# single global instance of semaphores
_semaphores = {}
def get_sempahore(operation, conf_name):
"""
Get global semaphore of given operation if configured based on conf_name.
Otherwise return None
:param str operation: Operation for which semaphore is required. Must be
same each time it is called for that operation
:param str conf_name: Semaphore is returned only if this config exists
:return: A :obj:`DeferredSemaphore` object corresponding to the operation
"""
sem = _semaphores.get(operation)
if sem is not None:
return sem
conf = config_value(conf_name)
if conf is None:
return None
_semaphores[operation] = DeferredSemaphore(conf)
return _semaphores[operation]
class ServerCreationRetryError(Exception):
"""
Exception to be raised when Nova behaves counter-intuitively, for instance
if there is more than one server of a certain name
"""
def find_server(server_endpoint, auth_token, server_config, log=None):
"""
Given a server config, attempts to find a server created with that config.
Uses the Nova list server details endpoint to filter out any server that
does not have the exact server name (the filter is a regex, so can filter
by ``^<name>$``), image ID, and flavor ID (both of which are exact filters).
:param str server_endpoint: Server endpoint URI.
:param str auth_token: Keystone Auth Token.
:param dict server_config: Nova server config.
:param log: A bound logger
:return: Deferred that fires with a server (in the format of a server
detail response) that matches that server config and creation time, or
None if none matches
:raises: :class:`ServerCreationRetryError`
"""
query_params = {
'image': server_config.get('imageRef', ''),
'flavor': server_config['flavorRef'],
'name': '^{0}$'.format(re.escape(server_config['name']))
}
if query_params['image'] is None:
query_params['image'] = ''
url = '{path}?{query}'.format(
path=append_segments(server_endpoint, 'servers', 'detail'),
query=urlencode(query_params))
def _check_if_server_exists(list_server_details):
nova_servers = list_server_details['servers']
if len(nova_servers) > 1:
raise ServerCreationRetryError(
"Nova returned {0} servers that match the same "
"image/flavor and name {1}.".format(
len(nova_servers), server_config['name']))
elif len(nova_servers) == 1:
nova_server = list_server_details['servers'][0]
if nova_server['metadata'] != server_config['metadata']:
raise ServerCreationRetryError(
"Nova found a server of the right name ({name}) but wrong "
"metadata. Expected {expected_metadata} and got {nova_metadata}"
.format(expected_metadata=server_config['metadata'],
nova_metadata=nova_server['metadata'],
name=server_config['name']))
return {'server': nova_server}
return None
d = treq.get(url, headers=headers(auth_token), log=log)
d.addCallback(check_success, [200])
d.addCallback(treq.json_content)
d.addCallback(_check_if_server_exists)
return d
class _NoCreatedServerFound(Exception):
"""
Exception to be used only to indicate that retrying a create server can be
attempted. The original create server failure is wrapped so that if there
are no more retries, the original failure can be propagated.
"""
def __init__(self, original_failure):
self.original = original_failure
def create_server(server_endpoint, auth_token, server_config, log=None,
clock=None, retries=3, create_failure_delay=5, _treq=None):
"""
Create a new server. If there is an error from Nova from this call,
checks to see if the server was created anyway. If not, will retry the
create ``retries`` times (checking each time if a server).
If the error from Nova is a 400, does not retry, because that implies that
retrying will just result in another 400 (bad args).
If checking to see if the server is created also results in a failure,
does not retry because there might just be something wrong with Nova.
:param str server_endpoint: Server endpoint URI.
:param str auth_token: Keystone Auth Token.
:param dict server_config: Nova server config.
:param: int retries: Number of tries to retry the create.
:param: int create_failure_delay: how much time in seconds to wait after
a create server failure before checking Nova to see if a server
was created
:param log: logger
:type log: :class:`otter.log.bound.BoundLog`
:param _treq: To be used for testing - what treq object to use
:type treq: something with the same api as :obj:`treq`
:return: Deferred that fires with the CreateServer response as a dict.
"""
path = append_segments(server_endpoint, 'servers')
if _treq is None: # pragma: no cover
_treq = treq
if clock is None: # pragma: no cover
from twisted.internet import reactor
clock = reactor
def _check_results(result, propagated_f):
"""
Return the original failure, if checking a server resulted in a
failure too. Returns a wrapped propagated failure, if there were no
servers created, so that the retry utility knows that server creation
can be retried.
"""
if isinstance(result, Failure):
log.msg("Attempt to find a created server in nova resulted in "
"{failure}. Propagating the original create error instead.",
failure=result)
return propagated_f
if result is None:
raise _NoCreatedServerFound(propagated_f)
return result
def _check_server_created(f):
"""
If creating a server failed with anything other than a 400, see if
Nova created a server anyway (a 400 means that the server creation args
were bad, and there is no point in retrying).
If Nova created a server, just return it and pretend that the error
never happened. If it didn't, or if checking resulted in another
failure response, return a failure of some type.
"""
f.trap(APIError)
if f.value.code == 400:
return f
d = deferLater(clock, create_failure_delay, find_server,
server_endpoint, auth_token, server_config, log=log)
d.addBoth(_check_results, f)
return d
def _create_with_delay(to_delay):
d = _treq.post(path, headers=headers(auth_token),
data=json.dumps({'server': server_config}), log=log)
if to_delay:
# Add 1 second delay to space 1 second between server creations
d.addCallback(delay, clock, 1)
return d
def _create_server():
"""
Attempt to create a server, handling spurious non-400 errors from Nova
by seeing if Nova created a server anyway in spite of the error. If so
then create server succeeded.
If not, and if no | |
"""
This file contains the main functionality of the software and is used to
achieve two primary goals and is divided into two sections
1. Approximate the walker density as a function of time using Monte Carlo
simulations
2. Determine the steady state solution of a random walk on a network using the
methodology developed in our paper
"""
from operator import itemgetter
from scipy.integrate import quad # Use quadrature integration
import numpy as np # General mathematics
import scipy.sparse as sparse # Sparse representation of matrices necessary for larger networks
import scipy.sparse.linalg as linalg # Access to eigenvectors and eigenvalues
"""
Section 1: Monte Carlo simulations
The Monte-Carlo simulations are implemented as follows
1. A large number of random walkers is allowed to make steps on a given network
2. The walker density is averaged among the walkers
The details of the Monte-Carlo simulation are explained in the appendix of our
paper.
"""
def make_step(G, origin, wtd='wtd'):
"""
This function assumes that a walker is located on node `origin` of a network
`G`. Furthermore, it assumes that all edges originating from `origin` have
a data attribute `wtd` which is an instance of a waiting time distribution.
This function draws a sample waiting time from all edges originating from
`origin` and chooses the edge that gives the smallest waiting time.
This function returns a tuple (`delta`, `neighbour`), where
- `delta` is the length of time before the step was executed
- `neighbour` is the neighbour that the step was made to from `origin`
"""
# Optain a list of tuples (time, neighbour)
options = [(data[wtd].rvs(), neighbour) for neighbour, data in G[origin].iteritems()]
# Select the neighbour with the smallest waiting time
# The argument `key = itemgetter(0)` ensures that the waiting times are compared
return min(options, key=itemgetter(0))
def make_steps(G, origin, maxtime, wtd='wtd'):
"""
This function assumes that a walker is located on node `origin` of a network
`G`. Furthermore, it assumes that all edges have a data attribute `wtd`
which is an instance of a waiting time distribution.
This function executes steps starting at `origin` until the time elapsed
exceeds the argument `maxtime`. Only the elapsed time is constrained. The
number of steps executed is NOT.
This function returns a list of tuples (`time`, `neighbour`), where
- `time` is the time at which the step was executed
- `neighbour` is the neighbour that the step was made to
This function is an implementation of Algorithm 1 in Appendix 1 on page
9 of our paper.
"""
time = 0 # Set the current time
current = origin # Start with the origin
steps = [(time, current)] # Initialise the list of nodes
while time <= maxtime:
delta, node = make_step(G, current, wtd) # Make one step
time += delta # Update the time...
current = node # ...and the node
steps.append((time, current)) # Extend the step list
return steps # Return the list of steps
def steps2probability(steps, delta, bins):
"""
This function takes a sequence of steps and computes an array representing
the probability to find a walker on a given node in a range of time
intervals. The time intervals are uniformly spaced.
`steps` is the sequence of step tuples obtained from, e.g. calling `make_steps`.
`delta` is the width of a time step.
`bins` is the number of bins.
Hence, the array of probabilities corresponds to a time span
$[0, `delta` * `bins`]$.
This function returns a dictionary keyed by node. The values are arrays of
length `bins` whose $i^{th}% element represents the probability to find the
walker on the associated node in the time interval
$[i * `delta`, (i + 1) * `delta`]$.
This function is an implementation of the algorithm discussed in appendix
2 on page 10 of our paper.
"""
# The ith element of the vector associated with each node shall represent
# the probability to find the walker at the respective node in the time
# interval [i, i + 1] * delta
probabilities = {} # Declare a dictionary of probabilities
# Consider all transitions
for (t_i, _), (t_j, j) in zip(steps[1:], steps):
p = probabilities.setdefault(j, np.zeros(bins)) # Get a default probability
lower = int(t_j / delta) # Index of the lowest bin involved
upper = int(t_i / delta) # Index of the highest bin involved
# Did the step happen in the same bin?
if lower == upper:
frac = (t_i - t_j) / delta
p[lower] += frac
else:
# The fractional time spent in the lower bin is given by
# [(lower + 1) * delta - t_j] / delta and simplifying gives
lowerfrac = lower + 1 - t_j / delta
p[lower] += lowerfrac
# The fractional time spent in the upper bin is given by
# [t_i - upper * delta] / delta and simplifying gives
upperfrac = t_i / delta - upper
if upper < bins:
p[upper] += upperfrac
# The number of bins between the lower and upper bins are
span = upper - lower - 1
# Fill these with ones if there are bins inbetween
if span > 0:
p[lower + 1 : lower + 1 + span] = 1
return probabilities
def probability_moments(probability, bins, run=0, moments=None):
"""
This function calculates the mean and standard deviation of the probability
to find a walker on a given node. It does so iteratively such that the
results of a Monte-Carlo simulation can be discarded after each simulation
is completed.
`probability` is the dictionary of probabilities obtained from `steps2probability`
`bins` is the number of bins passed to `steps2probability`
`run` is the number of iterations (not to be used explicitly).
`moments` is a dictionary to keep track of moments (not to be used explicitly).
This function returns a dictionary keyed by node. The value is a tuple of
arrays. The first element is an array of means $x$ and the second array is the
mean of $x^2$, NOT the variance.
"""
z = np.zeros(bins) # Create a default zero array
probability = dict(probability)
if moments == None: moments = {} # Create empty dictionaries for the moments
for node, (mean, mean2) in moments.iteritems(): # Go over each node part of the mean already
p = probability.pop(node, z) # Get the probability
# Calculate the mean, mean square and update iteratively
mean = (run * mean + p) / (run + 1)
mean2 = (run * mean2 + p ** 2) / (run + 1)
moments[node] = (mean, mean2)
for node, p in probability.iteritems(): # Consider nodes that are not part of the mean already
moments[node] = (p / (run + 1), p ** 2 / (run + 1))
return moments
def walk(G, origin, bins, delta, runs, wtd='wtd', debug=False):
"""
This function is a convenience function which calculates probability moments
for a walker starting on node `origin` of the network `G`. The maximal
simulation time is determined by `bins`*`delta`. The behaviour of the walker
is simulated `runs` times.
This function assumes that each edge of the network has a data attribute
`wtd` which is an instance of a probability distribution.
`debug` is a flag which results in the run number being printed if set to True.
This function returns a dictionary keyed by node. The value is a tuple of
arrays. The first element is an array of means $x$ and the second array is the
mean of $x^2$, NOT the variance.
"""
maxtime = bins * delta # The maximum time to run the simulation up to
moments = {} # The moments of the probability distributions
for run in xrange(runs):
steps = make_steps(G, origin, maxtime, wtd) # Make enough steps
probability = steps2probability(steps, delta, bins) # Get the pdf
moments = probability_moments(probability, bins, run, moments)
if debug:
| |
is assumed
.. WARNING::
If the scalar field has already expressions in other charts, it
is the user's responsibility to make sure that the expression
to be added is consistent with them.
EXAMPLES:
Adding scalar field expressions on a 2-dimensional manifold::
sage: M = Manifold(2, 'M', structure='topological')
sage: c_xy.<x,y> = M.chart()
sage: f = M.scalar_field(x^2 + 2*x*y +1)
sage: f._express
{Chart (M, (x, y)): x^2 + 2*x*y + 1}
sage: f.add_expr(3*y)
sage: f._express # the (x,y) expression has been changed:
{Chart (M, (x, y)): 3*y}
sage: c_uv.<u,v> = M.chart()
sage: f.add_expr(cos(u)-sin(v), c_uv)
sage: f._express # random (dict. output); f has now 2 expressions:
{Chart (M, (x, y)): 3*y, Chart (M, (u, v)): cos(u) - sin(v)}
Since zero and one are special elements, their expressions cannot be
changed::
sage: z = M.zero_scalar_field()
sage: z.add_expr(cos(u)-sin(v), c_uv)
Traceback (most recent call last):
...
AssertionError: the expressions of the element zero cannot be changed
sage: one = M.one_scalar_field()
sage: one.add_expr(cos(u)-sin(v), c_uv)
Traceback (most recent call last):
...
AssertionError: the expressions of the element 1 cannot be changed
"""
if self is self.parent().one() or self is self.parent().zero():
raise AssertionError("the expressions of the element "
"{} cannot be changed".format(self._name))
if chart is None:
chart = self._domain._def_chart
self._express[chart] = chart.function(coord_expression)
self._is_zero = False # a priori
self._del_derived()
def add_expr_by_continuation(self, chart, subdomain):
r"""
Set coordinate expression in a chart by continuation of the
coordinate expression in a subchart.
The continuation is performed by demanding that the coordinate
expression is identical to that in the restriction of the chart to
a given subdomain.
INPUT:
- ``chart`` -- coordinate chart `(U,(x^i))` in which the expression of
the scalar field is to set
- ``subdomain`` -- open subset `V\subset U` in which the expression
in terms of the restriction of the coordinate chart `(U,(x^i))` to
`V` is already known or can be evaluated by a change of coordinates.
EXAMPLES:
Scalar field on the sphere `S^2`::
sage: M = Manifold(2, 'S^2', structure='topological')
sage: U = M.open_subset('U') ; V = M.open_subset('V') # the complement of resp. N pole and S pole
sage: M.declare_union(U,V) # S^2 is the union of U and V
sage: c_xy.<x,y> = U.chart() ; c_uv.<u,v> = V.chart() # stereographic coordinates
sage: xy_to_uv = c_xy.transition_map(c_uv, (x/(x^2+y^2), y/(x^2+y^2)),
....: intersection_name='W', restrictions1= x^2+y^2!=0,
....: restrictions2= u^2+v^2!=0)
sage: uv_to_xy = xy_to_uv.inverse()
sage: W = U.intersection(V) # S^2 minus the two poles
sage: f = M.scalar_field(atan(x^2+y^2), chart=c_xy, name='f')
The scalar field has been defined only on the domain covered by the
chart ``c_xy``, i.e. `U`::
sage: f.display()
f: S^2 --> R
on U: (x, y) |--> arctan(x^2 + y^2)
on W: (u, v) |--> arctan(1/(u^2 + v^2))
We note that on `W = U \cap V`, the expression of `f` in terms of
coordinates `(u,v)` can be deduced from that in the coordinates
`(x,y)` thanks to the transition map between the two charts::
sage: f.display(c_uv.restrict(W))
f: S^2 --> R
on W: (u, v) |--> arctan(1/(u^2 + v^2))
We use this fact to extend the definition of `f` to the open
subset `V`, covered by the chart ``c_uv``::
sage: f.add_expr_by_continuation(c_uv, W)
Then, `f` is known on the whole sphere::
sage: f.display()
f: S^2 --> R
on U: (x, y) |--> arctan(x^2 + y^2)
on V: (u, v) |--> arctan(1/(u^2 + v^2))
"""
if not chart._domain.is_subset(self._domain):
raise ValueError("the chart is not defined on a subset of " +
"the scalar field domain")
schart = chart.restrict(subdomain)
self._express[chart] = chart.function(self.expr(schart))
self._is_zero = False # a priori
self._del_derived()
def set_restriction(self, rst):
r"""
Define a restriction of ``self`` to some subdomain.
INPUT:
- ``rst`` -- :class:`ScalarField` defined on a subdomain of
the domain of ``self``
EXAMPLES::
sage: M = Manifold(2, 'M') # the 2-dimensional sphere S^2
sage: U = M.open_subset('U') # complement of the North pole
sage: c_xy.<x,y> = U.chart() # stereographic coordinates from the North pole
sage: V = M.open_subset('V') # complement of the South pole
sage: c_uv.<u,v> = V.chart() # stereographic coordinates from the South pole
sage: M.declare_union(U,V) # S^2 is the union of U and V
sage: f = M.scalar_field(name='f')
sage: g = U.scalar_field(x^2+y)
sage: f.set_restriction(g)
sage: f.display()
f: M --> R
on U: (x, y) |--> x^2 + y
sage: f.restrict(U) == g
True
"""
if not isinstance(rst, ScalarField):
raise TypeError("the argument must be a scalar field")
if not rst._domain.is_subset(self._domain):
raise ValueError("the domain of the declared restriction is not " +
"a subset of the field's domain")
self._restrictions[rst._domain] = rst.copy()
self._restrictions[rst._domain].set_name(name=self._name,
latex_name=self._latex_name)
for chart, expr in rst._express.items():
intersection = chart._domain.intersection(rst._domain)
self._express[chart.restrict(intersection)] = expr
self._is_zero = False # a priori
def display(self, chart=None):
r"""
Display the expression of the scalar field in a given chart.
Without any argument, this function displays all known, distinct
expressions.
INPUT:
- ``chart`` -- (default: ``None``) chart with respect to which
the coordinate expression is to be displayed; if ``None``, the
display is performed in all the greatest charts in which the
coordinate expression is known
The output is either text-formatted (console mode) or LaTeX-formatted
(notebook mode).
EXAMPLES:
Various displays::
sage: M = Manifold(2, 'M', structure='topological')
sage: c_xy.<x,y> = M.chart()
sage: f = M.scalar_field(sqrt(x+1), name='f')
sage: f.display()
f: M --> R
(x, y) |--> sqrt(x + 1)
sage: latex(f.display())
\begin{array}{llcl} f:& M & \longrightarrow & \mathbb{R} \\ & \left(x, y\right) & \longmapsto & \sqrt{x + 1} \end{array}
sage: g = M.scalar_field(function('G')(x, y), name='g')
sage: g.display()
g: M --> R
(x, y) |--> G(x, y)
sage: latex(g.display())
\begin{array}{llcl} g:& M & \longrightarrow & \mathbb{R} \\ & \left(x, y\right) & \longmapsto & G\left(x, y\right) \end{array}
A shortcut of ``display()`` is ``disp()``::
sage: f.disp()
f: M --> R
(x, y) |--> sqrt(x + 1)
In case the scalar field is piecewise-defined, the ``display()``
command still outputs all expressions. Each expression displayed
corresponds to the chart on the greatest domain where this particular
expression is known::
sage: U = M.open_subset('U')
sage: f.set_expr(y^2, c_xy.restrict(U))
sage: f.display()
f: M --> R
on U: (x, y) |--> y^2
sage: latex(f.display())
\begin{array}{llcl} f:& M & \longrightarrow & \mathbb{R} \\ \mbox{on}\ U : & \left(x, y\right) & \longmapsto & y^{2} \end{array}
"""
from sage.misc.latex import latex
from sage.tensor.modules.format_utilities import FormattedExpansion
def _display_expression(self, chart, result):
r"""
Helper function for :meth:`display`.
"""
try:
expression = self.coord_function(chart)
coords = chart[:]
if len(coords) == 1:
coords = coords[0]
if chart._domain == self._domain:
if self._name is not None:
result._txt += " "
result._latex += " & "
else:
result._txt += "on " + chart._domain._name + ": "
result._latex += r"\mbox{on}\ " + latex(chart._domain) + \
r": & "
result._txt += repr(coords) + " |--> " + repr(expression) + "\n"
result._latex += latex(coords) + r"& \longmapsto & " + \
latex(expression) + r"\\"
except (TypeError, ValueError):
pass
# Name of the base field:
field = self._domain.base_field()
field_type = self._domain.base_field_type()
if field_type == 'real':
field_name = 'R'
field_latex_name = r'\mathbb{R}'
elif field_type == 'complex':
field_name = 'C'
field_latex_name = r'\mathbb{C}'
else:
field_name = str(field)
field_latex_name = latex(field)
#
result = FormattedExpansion()
if self._name is None:
symbol = ""
else:
symbol = self._name + ": "
result._txt = symbol + self._domain._name + " --> " + field_name + "\n"
if self._latex_name is None:
symbol = ""
else:
symbol = self._latex_name + ":"
result._latex = r"\begin{array}{llcl} " + symbol + r"&" + \
latex(self._domain) + r"& \longrightarrow & " + \
field_latex_name + r" \\"
if chart is None:
for ch in self._domain._top_charts:
###
# Get the greatest domain of top chart restrictions where the
# expression is known:
max_dom = None
for sch in ch._subcharts:
if max_dom is None:
try:
self.coord_function(sch)
max_dom = sch._domain
except (TypeError, ValueError):
pass
elif max_dom.is_subset(sch._domain):
try:
self.coord_function(sch)
max_dom = sch._domain
except (TypeError, ValueError):
pass
if max_dom is not None:
_display_expression(self, ch.restrict(max_dom), result)
else:
_display_expression(self, chart, result)
result._txt = result._txt[:-1]
result._latex = result._latex[:-2] + r"\end{array}"
return result
disp = display
def restrict(self, subdomain):
r"""
Restriction of | |
Orphaned.
raise
return file
def from_url(
url:str
, file_index:int
, dataset_id:int
, pillow_save:dict = {}
):
# URL format is validated in `from_urls`.
try:
img = Imaje.open(
requests.get(url, stream=True).raw
)
except:
raise ValueError(f"\nYikes - Could not open file at this url with Pillow library:\n{url}\n")
shape = {
'width': img.size[0]
, 'height':img.size[1]
}
blob = io.BytesIO()
img.save(blob, format=img.format, **pillow_save)
blob = blob.getvalue()
dataset = Dataset.get_by_id(dataset_id)
file = File.create(
blob = blob
, file_type = File.Image.file_type
, file_format = img.format
, file_index = file_index
, shape = shape
, source_path = url
, dataset = dataset
)
try:
image = Image.create(
mode = img.mode
, file = file
, pillow_save = pillow_save
)
except:
file.delete_instance() # Orphaned.
raise
return file
def to_pillow(id:int):
#https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.open
file = File.get_by_id(id)
if (file.file_type != 'image'):
raise ValueError(dedent(f"""
Yikes - Only `file.file_type='image' can be converted to Pillow images.
But you provided `file.file_type`: <{file.file_type}>
"""))
img_bytes = io.BytesIO(file.blob)
img = Imaje.open(img_bytes)
return img
class Tabular(BaseModel):
"""
- Do not change `dtype=PickleField()` because we are stringifying the columns.
I was tempted to do so for types like `np.float`, but we parse the final
type that Pandas decides to use.
"""
# Is sequence just a subset of tabular with a file_index?
columns = JSONField()
dtypes = JSONField()
file = ForeignKeyField(File, backref='tabulars')
class Image(BaseModel):
#https://pillow.readthedocs.io/en/stable/handbook/concepts.html#modes
mode = CharField()
pillow_save = JSONField()
file = ForeignKeyField(File, backref='images')
class Label(BaseModel):
"""
- Label accepts multiple columns in case it is already OneHotEncoded (e.g. tensors).
- At this point, we assume that the Label is always a tabular dataset.
"""
columns = JSONField()
column_count = IntegerField()
unique_classes = JSONField(null=True) # For categoricals and binaries. None for continuous.
#probabilities = JSONField() #if you were to write back the result of unsupervised for semi-supervised learning.
dataset = ForeignKeyField(Dataset, backref='labels')
def from_dataset(dataset_id:int, columns:list):
d = Dataset.get_by_id(dataset_id)
columns = listify(columns)
if (d.dataset_type != 'tabular'):
raise ValueError(dedent(f"""
Yikes - Labels can only be created from `dataset_type='tabular'`.
But you provided `dataset_type`: <{d.dataset_type}>
"""))
d_cols = Dataset.Tabular.get_main_tabular(dataset_id).columns
# Check that the user-provided columns exist.
all_cols_found = all(col in d_cols for col in columns)
if not all_cols_found:
raise ValueError("\nYikes - You specified `columns` that do not exist in the Dataset.\n")
# Check for duplicates of this label that already exist.
cols_aplha = sorted(columns)
d_labels = d.labels
count = d_labels.count()
if (count > 0):
for l in d_labels:
l_id = str(l.id)
l_cols = l.columns
l_cols_alpha = sorted(l_cols)
if cols_aplha == l_cols_alpha:
raise ValueError(f"\nYikes - This Dataset already has Label <id:{l_id}> with the same columns.\nCannot create duplicate.\n")
column_count = len(columns)
label_df = Dataset.to_pandas(id=dataset_id, columns=columns)
"""
- When multiple columns are provided, they must be OHE.
- Figure out column count because classification_binary and associated
metrics can't be run on > 2 columns.
- Negative values do not alter type of numpy int64 and float64 arrays.
"""
if (column_count > 1):
unique_values = []
for c in columns:
uniques = label_df[c].unique()
unique_values.append(uniques)
if (len(uniques) == 1):
print(
f"Warning - There is only 1 unique value for this label column.\n" \
f"Unique value: <{uniques[0]}>\n" \
f"Label column: <{c}>\n"
)
flat_uniques = np.concatenate(unique_values).ravel()
all_uniques = np.unique(flat_uniques).tolist()
for i in all_uniques:
if (
((i == 0) or (i == 1))
or
((i == 0.) or (i == 1.))
):
pass
else:
raise ValueError(dedent(f"""
Yikes - When multiple columns are provided, they must be One Hot Encoded:
Unique values of your columns were neither (0,1) or (0.,1.) or (0.0,1.0).
The columns you provided contained these unique values: {all_uniques}
"""))
unique_classes = all_uniques
del label_df
# Now check if each row in the labels is truly OHE.
label_arr = Dataset.to_numpy(id=dataset_id, columns=columns)
for i, arr in enumerate(label_arr):
if 1 in arr:
arr = list(arr)
arr.remove(1)
if 1 in arr:
raise ValueError(dedent(f"""
Yikes - Label row <{i}> is supposed to be an OHE row,
but it contains multiple hot columns where value is 1.
"""))
else:
raise ValueError(dedent(f"""
Yikes - Label row <{i}> is supposed to be an OHE row,
but it contains no hot columns where value is 1.
"""))
elif (column_count == 1):
# At this point, `label_df` is a single column df that needs to fected as a Series.
col = columns[0]
label_series = label_df[col]
label_dtype = label_series.dtype
if (np.issubdtype(label_dtype, np.floating)):
unique_classes = None
else:
unique_classes = label_series.unique().tolist()
class_count = len(unique_classes)
if (
(np.issubdtype(label_dtype, np.signedinteger))
or
(np.issubdtype(label_dtype, np.unsignedinteger))
):
if (class_count >= 5):
print(
f"Tip - Detected `unique_classes >= {class_count}` for an integer Label." \
f"If this Label is not meant to be categorical, then we recommend you convert to a float-based dtype." \
f"Although you'll still be able to bin these integers when it comes time to make a Splitset."
)
if (class_count == 1):
print(
f"Tip - Only detected 1 unique label class. Should have 2 or more unique classes." \
f"Your Label's only class was: <{unique_classes[0]}>."
)
l = Label.create(
dataset = d
, columns = columns
, column_count = column_count
, unique_classes = unique_classes
)
return l
def to_pandas(id:int, samples:list=None):
samples = listify(samples)
l_frame = Label.get_label(id=id, numpy_or_pandas='pandas', samples=samples)
return l_frame
def to_numpy(id:int, samples:list=None):
samples = listify(samples)
l_arr = Label.get_label(id=id, numpy_or_pandas='numpy', samples=samples)
return l_arr
def get_label(id:int, numpy_or_pandas:str, samples:list=None):
samples = listify(samples)
l = Label.get_by_id(id)
l_cols = l.columns
dataset_id = l.dataset.id
if (numpy_or_pandas == 'numpy'):
lf = Dataset.to_numpy(
id = dataset_id
, columns = l_cols
, samples = samples
)
elif (numpy_or_pandas == 'pandas'):
lf = Dataset.to_pandas(
id = dataset_id
, columns = l_cols
, samples = samples
)
return lf
def get_dtypes(
id:int
):
l = Label.get_by_id(id)
dataset = l.dataset
l_cols = l.columns
tabular_dtype = Dataset.Tabular.get_main_tabular(dataset.id).dtypes
label_dtypes = {}
for key,value in tabular_dtype.items():
for col in l_cols:
if (col == key):
label_dtypes[col] = value
# Exit `col` loop early becuase matching `col` found.
break
return label_dtypes
class Featureset(BaseModel):
"""
- Remember, a Featureset is just a record of the columns being used.
- Decided not to go w subclasses of Unsupervised and Supervised because that would complicate the SDK for the user,
and it essentially forked every downstream model into two subclasses.
- PCA components vary across featuresets. When different columns are used those columns have different component values.
"""
columns = JSONField(null=True)
columns_excluded = JSONField(null=True)
dataset = ForeignKeyField(Dataset, backref='featuresets')
def from_dataset(
dataset_id:int
, include_columns:list=None
, exclude_columns:list=None
#Future: runPCA #,run_pca:boolean=False # triggers PCA analysis of all columns
):
"""
As we get further away from the `Dataset.<Types>` they need less isolation.
"""
d = Dataset.get_by_id(dataset_id)
include_columns = listify(include_columns)
exclude_columns = listify(exclude_columns)
if (d.dataset_type == 'image'):
# Just passes the Dataset through for now.
if (include_columns is not None) or (exclude_columns is not None):
raise ValueError("\nYikes - The `Dataset.Image` classes supports neither the `include_columns` nor `exclude_columns` arguemnt.\n")
columns = None
columns_excluded = None
elif (d.dataset_type == 'tabular'):
d_cols = Dataset.Tabular.get_main_tabular(dataset_id).columns
if (include_columns is not None) and (exclude_columns is not None):
raise ValueError("\nYikes - You can set either `include_columns` or `exclude_columns`, but not both.\n")
if (include_columns is not None):
# check columns exist
all_cols_found = all(col in d_cols for col in include_columns)
if not all_cols_found:
raise ValueError("\nYikes - You specified `include_columns` that do not exist in the Dataset.\n")
# inclusion
columns = include_columns
# exclusion
columns_excluded = d_cols
for col in include_columns:
columns_excluded.remove(col)
elif (exclude_columns is not None):
all_cols_found = all(col in d_cols for col in exclude_columns)
if not all_cols_found:
raise ValueError("\nYikes - You specified `exclude_columns` that do not exist in the Dataset.\n")
# exclusion
columns_excluded = exclude_columns
# inclusion
columns = d_cols
for col in exclude_columns:
columns.remove(col)
if not columns:
raise ValueError("\nYikes - You cannot exclude every column in the Dataset. For there will be nothing to analyze.\n")
else:
columns = d_cols
columns_excluded = None
"""
Check that this Dataset does not already have a Featureset that is exactly the same.
There are less entries in `excluded_columns` so maybe it's faster to compare that.
"""
if columns_excluded is not None:
cols_aplha = sorted(columns_excluded)
else:
cols_aplha = None
d_featuresets = d.featuresets
count = d_featuresets.count()
if count > 0:
for f in d_featuresets:
f_id = str(f.id)
f_cols = f.columns_excluded
if f_cols is not None:
f_cols_alpha = sorted(f_cols)
else:
f_cols_alpha = None
if cols_aplha == f_cols_alpha:
raise ValueError(dedent(f"""
Yikes - This Dataset already has Featureset <id:{f_id}> with the same columns.
Cannot create duplicate.
"""))
f = Featureset.create(
dataset = d
, columns = columns
, columns_excluded = columns_excluded
)
return f
def to_pandas(id:int, samples:list=None, columns:list=None):
samples = listify(samples)
columns = listify(columns)
f_frame = Featureset.get_featureset(
id = id
, numpy_or_pandas = 'pandas'
, samples = samples
, columns = columns
)
return f_frame
def to_numpy(id:int, samples:list=None, columns:list=None):
samples = listify(samples)
columns = listify(columns)
f_arr = Featureset.get_featureset(
id = id
, numpy_or_pandas = 'numpy'
, samples = samples
, columns = columns
)
return f_arr
def get_featureset(
id:int
, numpy_or_pandas:str
, samples:list = None
, columns:list = None
):
f = Featureset.get_by_id(id)
samples = listify(samples)
columns = listify(columns)
f_cols = f.columns
if (columns is not None):
for c in columns:
if c not in f_cols:
raise ValueError("\nYikes - Cannot fetch column '{c}' because it is not in `Featureset.columns`.\n")
f_cols = columns
dataset_id = f.dataset.id
if (numpy_or_pandas == 'numpy'):
ff = Dataset.to_numpy(
id = dataset_id
, columns = f_cols
, samples = samples
)
elif (numpy_or_pandas == 'pandas'):
ff = Dataset.to_pandas(
id = dataset_id
, columns = f_cols
, samples = samples
)
return ff
def get_dtypes(
id:int
):
f = Featureset.get_by_id(id)
dataset = f.dataset
if (dataset.dataset_type == 'image'):
raise ValueError("\nYikes - `featureset.dataset.dataset_type=='image'` does not have dtypes.\n")
f_cols = f.columns
tabular_dtype = Dataset.Tabular.get_main_tabular(dataset.id).dtypes
featureset_dtypes = {}
for key,value in tabular_dtype.items():
for col in f_cols:
if (col == key):
featureset_dtypes[col] = value
# Exit `col` loop early becuase matching `col` found.
break
return featureset_dtypes
def make_splitset(
id:int
, label_id:int = None
, size_test:float = None
, size_validation:float = None
, bin_count:int = None
):
s = Splitset.from_featureset(
featureset_id = id
, label_id = label_id
, size_test = size_test
, size_validation = size_validation
, bin_count = bin_count
)
return s
class Splitset(BaseModel):
"""
- Belongs to a Featureset, not a Dataset, because the samples selected vary based on the stratification of the features during the split,
and a Featureset already has a Dataset anyways.
- Here the `samples_` attributes contain indices.
-ToDo: store and visualize distributions of each column | |
<filename>c2cgeoportal/tests/functional/test_mapserverproxy.py
# -*- coding: utf-8 -*-
# Copyright (c) 2013, Camptocamp SA
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
#
#
# ^
# |
# |
# |
# +--------------------------------------------------------------+ area1
# | +--------------------------------------------------------+ |
# | | p2 area3 |+45 p3 | |
# | +--------------------------------------------------------+ |
# | area1 | |
# +--------------------------------------------------------------+
# |
# +--------------------------------------------------------------+
# | area2 | |
# +---+--------------------------------------------------------------+-------->
# -100 -90 | +90 +100)
# |
# |
# |
# |
# |
# p1 |-45 p4
# |
# |
# +
#
#
# GetMap
# ======
#
# md5sum with 4 points: 61cbb0a6d18b72e4a28c1087019de245
# md5sum with the 2 top points: 0a4fac2209d06c6fa36048c125b1679a
# md5sum with no points: ef33223235b26c782736c88933b35331
#
#
import os
import hashlib
from unittest import TestCase
from nose.plugins.attrib import attr
from sqlalchemy import Column, types
from geoalchemy import GeometryColumn, MultiPoint, GeometryDDL, WKTSpatialElement
import transaction
import sqlahelper
from c2cgeoportal.views.mapserverproxy import MapservProxy
from c2cgeoportal.tests.functional import ( # NOQA
tearDownCommon as tearDownModule,
setUpCommon as setUpModule,
createDummyRequest, mapserv_url)
Base = sqlahelper.get_base()
class TestPoint(Base):
__tablename__ = 'testpoint'
__table_args__ = {'schema': 'main'}
id = Column(types.Integer, primary_key=True)
the_geom = GeometryColumn(MultiPoint(srid=21781))
name = Column(types.Unicode)
city = Column(types.Unicode)
country = Column(types.Unicode)
GeometryDDL(TestPoint.__table__)
GETFEATURE_REQUEST = u"""<?xml version='1.0' encoding="UTF-8" ?>
<wfs:GetFeature xmlns:wfs="http://www.opengis.net/wfs" service="WFS" version="1.1.0" xsi:schemaLocation="http://www.opengis.net/wfs http://schemas.opengis.net/wfs/1.1.0/wfs.xsd" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<wfs:Query typeName="feature:%(feature)s" srsName="EPSG:21781" xmlns:feature="http://mapserver.gis.umn.edu/mapserver">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc">
<ogc:PropertyIs%(function)s matchCase="false" %(arguments)s>
<ogc:PropertyName>%(property)s</ogc:PropertyName>
<ogc:Literal>%(value)s</ogc:Literal>
</ogc:PropertyIs%(function)s>
</ogc:Filter>
</wfs:Query>
</wfs:GetFeature>"""
SUBSTITUTION_GETFEATURE_REQUEST = (GETFEATURE_REQUEST % {
'feature': u'testpoint_substitution',
'function': u'NotEqualTo',
'arguments': u'',
'property': u'name',
'value': 'toto',
}).encode('utf-8')
COLUMN_RESTRICTION_GETFEATURE_REQUEST = (GETFEATURE_REQUEST % {
'feature': u'testpoint_column_restriction',
'function': u'NotEqualTo',
'arguments': u'',
'property': u'name',
'value': 'bar',
}).encode('utf-8')
@attr(functional=True)
class TestMapserverproxyView(TestCase):
def setUp(self):
from c2cgeoportal.models import User, Role, Layer, RestrictionArea, \
Functionality, DBSession
TestPoint.__table__.create(bind=DBSession.bind, checkfirst=True)
geom = WKTSpatialElement("MULTIPOINT((-90 -45))", srid=21781)
p1 = TestPoint(the_geom=geom, name=u'foo', city=u'Lausanne', country=u'Swiss')
geom = WKTSpatialElement("MULTIPOINT((-90 45))", srid=21781)
p2 = TestPoint(the_geom=geom, name=u'bar', city=u'Chambéry', country=u'France')
geom = WKTSpatialElement("MULTIPOINT((90 45))", srid=21781)
p3 = TestPoint(the_geom=geom, name=u'éàè', city="Paris", country=u'France')
geom = WKTSpatialElement("MULTIPOINT((90 -45))", srid=21781)
p4 = TestPoint(the_geom=geom, name=u'123', city='Londre', country=u'UK')
pt1 = Functionality(name=u'print_template', value=u'1 Wohlen A4 portrait')
pt2 = Functionality(name=u'print_template', value=u'2 Wohlen A3 landscape')
user1 = User(username=u'__test_user1', password=u'__<PASSWORD>')
role1 = Role(name=u'__test_role1', description=u'__test_role1', functionalities=[pt1, pt2])
user1.role = role1
user1.email = u'Tarenpion'
user2 = User(username=u'__test_user2', password=u'__<PASSWORD>')
role2 = Role(name=u'__test_role2', description=u'__test_role2', functionalities=[pt1, pt2])
user2.role = role2
user2.email = u'Tarenpion'
user3 = User(username=u'__test_user3', password=u'__<PASSWORD>')
role3 = Role(name=u'__test_role3', description=u'__test_role3', functionalities=[pt1, pt2])
user3.role = role3
user3.email = u'Tarenpion'
layer2 = Layer(u'testpoint_protected', 400, public=False)
layer3 = Layer(u'testpoint_protected_query_with_collect', public=False)
area = "POLYGON((-100 30, -100 50, 100 50, 100 30, -100 30))"
area = WKTSpatialElement(area, srid=21781)
restricted_area1 = RestrictionArea(u'__test_ra1', u'', [layer2, layer3], [role1], area)
area = "POLYGON((-100 0, -100 20, 100 20, 100 0, -100 0))"
area = WKTSpatialElement(area, srid=21781)
restricted_area2 = RestrictionArea(u'__test_ra2', u'', [layer2, layer3], [role2, role3], area)
area = "POLYGON((-95 43, -95 47, 95 47, 95 43, -95 43))"
area = WKTSpatialElement(area, srid=21781)
restricted_area3 = RestrictionArea(u'__test_ra3', u'', [layer3], [role3], area, readwrite=True)
DBSession.add_all([
p1, p2, p3, p4, user1, user2, user3,
restricted_area1, restricted_area2, restricted_area3
])
DBSession.flush()
self.id_lausanne = p1.id
self.id_paris = p3.id
transaction.commit()
def tearDown(self):
from c2cgeoportal.models import User, Role, Layer, RestrictionArea, \
Functionality, DBSession
DBSession.query(User).filter(User.username == '__test_user1').delete()
DBSession.query(User).filter(User.username == '__test_user2').delete()
DBSession.query(User).filter(User.username == '__test_user3').delete()
ra = DBSession.query(RestrictionArea).filter(
RestrictionArea.name == '__test_ra1'
).one()
ra.roles = []
ra.layers = []
DBSession.delete(ra)
ra = DBSession.query(RestrictionArea).filter(
RestrictionArea.name == '__test_ra2'
).one()
ra.roles = []
ra.layers = []
DBSession.delete(ra)
ra = DBSession.query(RestrictionArea).filter(
RestrictionArea.name == '__test_ra3'
).one()
ra.roles = []
ra.layers = []
DBSession.delete(ra)
r = DBSession.query(Role).filter(Role.name == '__test_role1').one()
r.functionalities = []
DBSession.delete(r)
r = DBSession.query(Role).filter(Role.name == '__test_role2').one()
r.functionalities = []
DBSession.delete(r)
r = DBSession.query(Role).filter(Role.name == '__test_role3').one()
r.functionalities = []
DBSession.delete(r)
for f in DBSession.query(Functionality).filter(Functionality.value == u'1 Wohlen A4 portrait').all():
DBSession.delete(f)
for f in DBSession.query(Functionality).filter(Functionality.value == u'2 Wohlen A3 landscape').all():
DBSession.delete(f)
for layer in DBSession.query(Layer).filter(Layer.name == 'testpoint_unprotected').all():
DBSession.delete(layer) # pragma: nocover
for layer in DBSession.query(Layer).filter(Layer.name == 'testpoint_protected').all():
DBSession.delete(layer)
for layer in DBSession.query(Layer).filter(Layer.name == 'testpoint_protected_query_with_collect').all():
DBSession.delete(layer)
transaction.commit()
TestPoint.__table__.drop(bind=DBSession.bind, checkfirst=True)
def _create_dummy_request(self, username=None):
from c2cgeoportal.models import DBSession, User
request = createDummyRequest({
'mapserv_url': mapserv_url
})
request.params = {"map": os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'c2cgeoportal_test.map'
)}
if username:
request.user = DBSession.query(User) \
.filter_by(username=username).one()
return request
def test_GetLegendGraphic(self):
request = self._create_dummy_request()
request.params.update(dict(service='wms', version='1.1.1',
request='getlegendgraphic',
layer='testpoint_unprotected',
srs='EPSG:21781',
format='image/png',
extraparam=u'with spéciàl chârs'))
response = MapservProxy(request).proxy()
self.assertTrue(response.cache_control.public)
self.assertEqual(response.cache_control.max_age, 1000)
def test_GetFeatureInfo(self):
request = self._create_dummy_request()
request.params.update(dict(
service='wms', version='1.1.1',
request='getfeatureinfo', bbox='-90,-45,90,0',
layers='testpoint_unprotected',
query_layers='testpoint_unprotected',
srs='EPSG:21781', format='image/png',
info_format='application/vnd.ogc.gml',
width='600', height='400', x='0', y='400'
))
response = MapservProxy(request).proxy()
expected_response = """
<?xmlversion="1.0"encoding="UTF-8"?>
<msGMLOutput
xmlns:gml="http://www.opengis.net/gml"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<testpoint_unprotected_layer>
<gml:name>countries</gml:name>
<testpoint_unprotected_feature>
<gml:boundedBy>
<gml:Box srsName="EPSG:21781">
<gml:coordinates>-90.000000,-45.000000 -90.000000,-45.000000</gml:coordinates>
</gml:Box>
</gml:boundedBy>
<the_geom>
<gml:Point srsName="EPSG:21781">
<gml:coordinates>-90.000000,-45.000000</gml:coordinates>
</gml:Point>
</the_geom>
<name>foo</name>
<city>Lausanne</city>
<country>Swiss</country>
</testpoint_unprotected_feature>
</testpoint_unprotected_layer>
</msGMLOutput>
"""
import re
pattern = re.compile(r'\s+')
expected_response = ''.join(
re.sub(pattern, '', l) for l in expected_response.splitlines()
)
response_body = ''.join(
re.sub(pattern, '', l) for l in response.body.splitlines()
)
self.assertEqual(response_body, expected_response)
self.assertEqual(str(response.cache_control), "no-cache")
def test_GetFeatureInfo_JSONP(self):
request = self._create_dummy_request()
request.params.update(dict(
service='wms', version='1.1.1',
request='getfeatureinfo', bbox='-90,-45,90,0',
layers='testpoint_unprotected',
query_layers='testpoint_unprotected',
srs='EPSG:21781', format='image/png',
info_format='application/vnd.ogc.gml',
width='600', height='400', x='0', y='400',
callback='cb'
))
response = MapservProxy(request).proxy()
expected_response = """
<?xmlversion="1.0"encoding="UTF-8"?>
<msGMLOutput
xmlns:gml="http://www.opengis.net/gml"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<testpoint_unprotected_layer>
<gml:name>countries</gml:name>
<testpoint_unprotected_feature>
<gml:boundedBy>
<gml:Box srsName="EPSG:21781">
<gml:coordinates>-90.000000,-45.000000 -90.000000,-45.000000</gml:coordinates>
</gml:Box>
</gml:boundedBy>
<the_geom>
<gml:Point srsName="EPSG:21781">
<gml:coordinates>-90.000000,-45.000000</gml:coordinates>
</gml:Point>
</the_geom>
<name>foo</name>
<city>Lausanne</city>
<country>Swiss</country>
</testpoint_unprotected_feature>
</testpoint_unprotected_layer>
</msGMLOutput>
"""
import re
pattern = re.compile(r'\s+')
expected_response = ''.join(
re.sub(pattern, '', l) for l in expected_response.splitlines()
)
expected_response = '%s(\'%s\');' % ('cb', expected_response)
response_body = ''.join(
re.sub(pattern, '', l) for l in response.body.splitlines()
)
self.assertEqual(response_body, expected_response)
self.assertFalse(response.cache_control.public)
self.assertEqual(str(response.cache_control), "no-cache")
def test_GetMap_unprotected_layer_anonymous(self):
request = self._create_dummy_request()
request.params.update(dict(
service='wms', version='1.1.1', request='getmap',
bbox='-180,-90,180,90', layers='testpoint_unprotected',
width='600', height='400', srs='EPSG:21781', format='image/png'
))
response = MapservProxy(request).proxy()
self.assertTrue(response.status_int, 200)
self.assertEqual(str(response.cache_control), "no-cache")
# 4 points
md5sum = hashlib.md5(response.body).hexdigest()
self.assertEquals(md5sum, '61cbb0a6d18b72e4a28c1087019de245')
def test_GetMap_unprotected_layer_user1(self):
request = self._create_dummy_request(username=u'__test_user1')
request.params.update(dict(
service='wms', version='1.1.1', request='getmap',
bbox='-180,-90,180,90', layers='testpoint_unprotected',
width='600', height='400', srs='EPSG:21781', format='image/png'
))
response = MapservProxy(request).proxy()
self.assertTrue(response.status_int, 200)
self.assertEqual(str(response.cache_control), "no-cache")
# 4 points
md5sum = hashlib.md5(response.body).hexdigest()
self.assertEquals(md5sum, '61cbb0a6d18b72e4a28c1087019de245')
def test_GetMap_unprotected_layer_user2(self):
request = self._create_dummy_request(username=u'__test_user2')
request.params.update(dict(
service='wms', version='1.1.1', request='getmap',
bbox='-180,-90,180,90', layers='testpoint_unprotected',
width='600', height='400', srs='EPSG:21781', format='image/png'
))
response = MapservProxy(request).proxy()
self.assertTrue(response.status_int, 200)
self.assertEqual(str(response.cache_control), "no-cache")
# 4 points
md5sum = hashlib.md5(response.body).hexdigest()
self.assertEquals(md5sum, '61cbb0a6d18b72e4a28c1087019de245')
def test_GetMap_protected_layer_anonymous(self):
request = self._create_dummy_request()
request.params.update(dict(
service='wms', version='1.1.1', request='getmap',
bbox='-180,-90,180,90', layers='testpoint_protected',
width='600', height='400', srs='EPSG:21781', format='image/png'
))
response = MapservProxy(request).proxy()
self.assertTrue(response.status_int, 200)
self.assertEqual(str(response.cache_control), "no-cache")
# empty
md5sum = hashlib.md5(response.body).hexdigest()
self.assertEquals(md5sum, 'ef33223235b26c782736c88933b35331')
def test_GetMap_protected_layer_user1(self):
request = self._create_dummy_request(username=u'__test_user1')
request.params.update(dict(
service='wms', version='1.1.1', request='getmap',
bbox='-180,-90,180,90', layers='testpoint_protected',
width='600', height='400', srs='EPSG:21781', format='image/png'
))
response = MapservProxy(request).proxy()
self.assertTrue(response.status_int, 200)
self.assertEqual(str(response.cache_control), "no-cache")
# two points
md5sum = hashlib.md5(response.body).hexdigest()
self.assertEquals(md5sum, '0a4fac2209d06c6fa36048c125b1679a')
def test_GetMap_protected_layer_user2(self):
request = self._create_dummy_request(username=u'__test_user2')
request.params.update(dict(
service='wms', version='1.1.1', request='getmap',
bbox='-180,-90,180,90', layers='testpoint_protected',
width='600', height='400', srs='EPSG:21781', format='image/png'
))
response = MapservProxy(request).proxy()
self.assertEqual(str(response.cache_control), "no-cache")
# empty
md5sum = hashlib.md5(response.body).hexdigest()
self.assertEquals(md5sum, 'ef33223235b26c782736c88933b35331')
def test_GetMap_protected_layer_collect_query_user1(self):
request = self._create_dummy_request(username=u'__test_user1')
request.params.update(dict(
service='wms', version='1.1.1', request='getmap',
bbox='-180,-90,180,90', layers='testpoint_protected_query_with_collect',
width='600', height='400', srs='EPSG:21781', format='image/png'
))
response = MapservProxy(request).proxy()
self.assertTrue(response.status_int, 200)
self.assertEqual(str(response.cache_control), "no-cache")
# two points
md5sum = hashlib.md5(response.body).hexdigest()
self.assertEquals(md5sum, '0a4fac2209d06c6fa36048c125b1679a')
def test_GetMap_protected_layer_collect_query_user2(self):
request = self._create_dummy_request(username=u'__test_user2')
request.params.update(dict(
service='wms', version='1.1.1', request='getmap',
bbox='-180,-90,180,90', layers='testpoint_protected_query_with_collect',
width='600', height='400', srs='EPSG:21781', format='image/png'
))
response = MapservProxy(request).proxy()
self.assertTrue(response.status_int, 200)
self.assertEqual(str(response.cache_control), "no-cache")
# empty
md5sum = hashlib.md5(response.body).hexdigest()
self.assertEquals(md5sum, 'ef33223235b26c782736c88933b35331')
def test_GetMap_protected_layer_collect_query_user3(self):
request = self._create_dummy_request(username=u'__test_user3')
request.params.update(dict(
service='wms', version='1.1.1', request='getmap',
bbox='-180,-90,180,90', layers='testpoint_protected_query_with_collect',
width='600', height='400', srs='EPSG:21781', format='image/png'
))
response = MapservProxy(request).proxy()
self.assertTrue(response.status_int, 200)
self.assertEqual(str(response.cache_control), "no-cache")
# two points
md5sum = hashlib.md5(response.body).hexdigest()
self.assertEquals(md5sum, '0a4fac2209d06c6fa36048c125b1679a')
def GetFeature_IsEqualTo(self, value):
request = self._create_dummy_request()
request.method = 'POST'
request.body = (GETFEATURE_REQUEST % {
| |
dictionary.
# It takes one parameter:
# File pointer: A file pointer that points to a JSON file.
import json
# Opening JSON file
with open('sample.json', 'r') as openfile:
# Reading from json file
json_object = json.load(openfile)
print(json_object)
print(type(json_object))
# XLSX file format
# XLSX is a Microsoft Excel Open XML file format. It also comes under the
# Spreadsheet file format.
# In XLSX data is organized under the cells and columns in a sheet.
# Reading the data from XLSX file
# Let’s load the data from XLSX file and define the sheet name. For loading
# the data you can use the Pandas library in python.
import pandas as pd
import urllib.request
urllib.request.urlretrieve("https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork/labs/Module%205/data/file_example_XLSX_10.xlsx", "sample.xlsx")
df = pd.read_excel("sample.xlsx")
df
# XML file format
# XML is also known as Extensible Markup Language. As the name suggests, it
# is a markup language. It has certain rules for encoding data. XML file
# format is a human-readable and machine-readable file format.
# Pandas does not include any methods to read and write XML files. Here, we
# will take a look at how we can use other modules to read data from an XML
# file, and load it into a Pandas DataFrame.
# Writing with xml.etree.ElementTree
# The xml.etree.ElementTree module comes built-in with Python. It provides
# functionality for parsing and creating XML documents. ElementTree
# represents the XML document as a tree. We can move across the document
# using nodes which are elements and sub-elements of the XML file.
import xml.etree.ElementTree as ET
# create the file structure
employee = ET.Element('employee')
details = ET.SubElement(employee, 'details')
first = ET.SubElement(details, 'firstname')
second = ET.SubElement(details, 'lastname')
third = ET.SubElement(details, 'age')
first.text = 'Shiv'
second.text = 'Mishra'
third.text = '23'
# create a new XML file with the results
mydata1 = ET.ElementTree(employee)
# myfile = open("items2.xml", "wb")
# myfile.write(mydata)
with open("new_sample.xml", "wb") as files:
mydata1.write(files)
# Reading with xml.etree.ElementTree
# Let's have a look at a one ways to read XML data and put it in a Pandas
# DataFrame. You can see the XML file in the Notepad of your local machine.
import pandas as pd
import xml.etree.ElementTree as etree
# wget https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork/labs/Module%205/data/Sample-employee-XML-file.xml
# You would need to firstly parse an XML file and create a list of columns
# for data frame. then extract useful information from the XML file and add
# to a pandas data frame.
# Here is a sample code that you can use.:
tree = etree.parse("Sample-employee-XML-file.xml")
root = tree.getroot()
columns = ["firstname", "lastname", "title", "division", "building","room"]
datatframe = pd.DataFrame(columns = columns)
for node in root:
firstname = node.find("firstname").text
lastname = node.find("lastname").text
title = node.find("title").text
division = node.find("division").text
building = node.find("building").text
room = node.find("room").text
datatframe = datatframe.append(pd.Series([firstname, lastname, title, division, building, room], index = columns), ignore_index = True)
datatframe
# Save Data
# Correspondingly, Pandas enables us to save the dataset to csv by using the
# dataframe.to_csv() method, you can add the file path and name along with
# quotation marks in the brackets. For example, if you would save the
# dataframe df as employee.csv to your local machine, you may use the syntax
# below:
datatframe.to_csv("employee.csv", index=False)
# We can also read and save other file formats, we can use similar functions
# to pd.read_csv() and df.to_csv() for other data formats, the functions are
# listed in the following table:
# Read/Save Other Data Formats
# Binary File Format
# "Binary" files are any files where the format isn't made up of readable
# characters. It contain formatting information that only certain
# applications or processors can understand. While humans can read text
# files, binary files must be run on the appropriate software or processor
# before humans can read them. Binary files can range from image files
# like JPEGs or GIFs, audio files like MP3s or binary document formats like
# Word or PDF.
# Let's see how to read an Image file.
#
# Reading the Image file
# Python supports very powerful tools when comes to image processing.
# Let’s see how to process the images using PILlibrary. PIL is the Python
# Imaging Library which provides the python interpreter with image editing
# capabilities.
# importing PIL
from PIL import Image
import urllib.request
# Downloading dataset
urllib.request.urlretrieve("https://hips.hearstapps.com/hmg-prod.s3.amazonaws.com/images/dog-puppy-on-garden-royalty-free-image-1586966191.jpg", "dog.jpg")
# Read image
img = Image.open('dog.jpg')
# Output Images
display(img)
# Data Analysis
# In this section, you will learn how to approach data acquisition in
# various ways, and obtain necessary insights from a dataset. By the end of
# this lab, you will successfully load the data into Jupyter Notebook, and
# gain some fundamental insights via Pandas Library. In our case, the
# Diabetes Dataset is an online source, and it is in CSV (comma separated
# value) format. Let's use this dataset as an example to practice data
# reading.
# About this Dataset
# Context This dataset is originally from the National Institute of Diabetes
# and Digestive and Kidney Diseases. The objective of the dataset is to
# diagnostically predict whether or not a patient has diabetes, based on
# certain diagnostic measurements included in the dataset. Several
# constraints were placed on the selection of these instances from a larger
# database. In particular, all patients here are females at least 21 years
# old of Pima Indian heritage.
# Content The datasets consists of several medical predictor variables and
# one target variable, Outcome. Predictor variables includes the number of
# pregnancies the patient has had, their BMI, insulin level, age, and so on.
# We have 768 rows and 9 columns. The first 8 columns represent the features
# and the last column represent the target/label.
# Import pandas library
import pandas as pd
path = "https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork/labs/Module%205/data/diabetes.csv"
df = pd.read_csv(path)
# After reading the dataset, we can use the dataframe.head(n) method to
# check the top n rows of the dataframe; where n is an integer. Contrary to
# dataframe.head(n), dataframe.tail(n) will show you the bottom n rows of
# the dataframe.
# show the first 5 rows using dataframe.head() method
print("The first 5 rows of the dataframe")
df.head(5)
# To view the dimensions of the dataframe, we use the .shape parameter.
df.shape
# Statistical Overview of dataset
df.info()
# This method prints information about a DataFrame including the index
# dtype and columns, non-null values and memory usage.
df.describe()
# Pandas describe() is used to view some basic statistical details like
# percentile, mean, std etc. of a data frame or a series of numeric values.
# When this method is applied to a series of string, it returns a different
# output
# Identify and handle missing values
# We use Python's built-in functions to identify these missing values. There
# are two methods to detect missing data:
# .isnull()
# .notnull()
# The output is a boolean value indicating whether the value that is passed
# into the argument is in fact missing data.
missing_data = df.isnull()
missing_data.head(5)
# "True" stands for missing value, while "False" stands for not missing
# value.
# Count missing values in each column
# Using a for loop in Python, we can quickly figure out the number of
# missing values in each column. As mentioned above, "True" represents a
# missing value, "False" means the value is present in the dataset. In the
# body of the for loop the method ".value_counts()" counts the number of
# "True" values.
for column in missing_data.columns.values.tolist():
print(column)
print (missing_data[column].value_counts())
print("")
# As you can see above there is no missing values in the dataset.
# Correct data format
# Check all data is in the correct format (int, float, text or other).
# In Pandas, we use
# .dtype() to check the data type
# .astype() to change the data type
# Numerical variables should have type 'float' or 'int'.
df.dtypes
# As we can see above, All columns have the correct data type.
# **Visualization** is one of the best way to get insights from the dataset.
# **Seaborn** and **Matplotlib** are two of Python's most powerful
# visualization libraries.
# import libraries
import matplotlib.pyplot as plt
import seaborn as sns
labels= 'Diabetic','Not Diabetic'
plt.pie(df['Outcome'].value_counts(),labels=labels,autopct='%0.02f%%')
plt.legend()
plt.show()
# As you can | |
numexamples = 800, startdate = 1780, enddate = 2010)
gridsearch_a_model(generalmetapath, sourcefolder, c_range,
featurestart, featureend, featurestep)
if command == 'optimize_fifty_years':
# this option creates a model that can be used for comparison to
# the model of fictional prestige, which spans only 1850-1950
c_range = [.0001]
featurestart = 2450
featureend = 2700
featurestep = 50
generalmetapath, general_docids = select_subset_to_model('fiftypost1950', metapath, numexamples = 1500, startdate = 1950, enddate = 2050)
# The number of examples is higher here, because we want this model to be maximally
# accurate, and we're not trying to use this as a guide for other 800-character
# models.
gridsearch_a_model(generalmetapath, sourcefolder, c_range,
featurestart, featureend, featurestep)
elif command == 'test_decades':
c_range = [.0004]
featurestart = 2300
featureend = 2300
featurestep = 100
with open('../dataforR/speechlessdecademodels.tsv', mode = 'w', encoding = 'utf-8') as f:
f.write('decade\taccuracy\n')
for dec in range (1790, 2010, 10):
if dec == 1790:
floor = 1780
ceiling = 1800
else:
floor = dec
ceiling = dec + 10
modelname = 'decade' + str(dec)
for i in range(15):
decademetapath, docids = select_subset_to_model(modelname, metapath, numexamples = 800,
startdate = floor, enddate = ceiling)
accuracy = crossvalidate_one_model(decademetapath, sourcefolder, c_range, featurestart, featureend, featurestep)
f.write(str(dec) + '\t' + str(accuracy) + '\n')
elif command == 'optimize_20c':
c_range = [.000003, .00001, .00003, .00009, .0003, .0009, .002, .004, .008]
featurestart = 1100
featureend = 3000
featurestep = 100
generalmetapath, general_docids = select_subset_to_model('wholetwentieth', metapath,
numexamples = 800, startdate = 1900, enddate = 2000)
gridsearch_a_model(generalmetapath, sourcefolder, c_range,
featurestart, featureend, featurestep)
elif command == 'optimize_19c':
c_range = [.000003, .00001, .00003, .00009, .0003, .0009, .002, .004, .008]
featurestart = 1100
featureend = 3000
featurestep = 100
generalmetapath, general_docids = select_subset_to_model('wholenineteenth', metapath,
numexamples = 800, startdate = 1800, enddate = 1900)
gridsearch_a_model(generalmetapath, sourcefolder, c_range,
featurestart, featureend, featurestep)
elif command == 'optimize_thirty':
decade = int(args[2])
c_range = [.000003, .00001, .00003, .00009, .0003, .0009, .002, .004, .008]
featurestart = 1900
featureend = 3000
featurestep = 100
modelname = 'optimalthirty' + str(decade)
generalmetapath, general_docids = select_subset_to_model(modelname, metapath,
numexamples = 1500, startdate = decade - 10, enddate = decade + 20)
gridsearch_a_model(generalmetapath, sourcefolder, c_range,
featurestart, featureend, featurestep)
elif command == 'decade_grid':
# This is the function I finally used. Keeps the number of features
# fixed at 2200, but generates a new lexicon for each decade (and each
# sample of 800 characters within the decade). Tests each decade at
# multiple L2 settings, and records them all, so we can take the
# optimal setting but also figure out how much of a difference that's
# making.
c_range = [.00003, .0001, .0003, .001]
featurestart = 2200
featureend = 2200
featurestep = 100
with open('../dataforR/decadegrid.tsv', mode = 'w', encoding = 'utf-8') as f:
f.write('decade\tL2\taccuracy\titer\n')
for dec in range (1790, 2010, 10):
if dec == 1790:
floor = 1780
ceiling = 1800
else:
floor = dec
ceiling = dec + 10
modelname = 'decade' + str(dec)
for i in range(15):
decademetapath, docids = select_subset_to_model(modelname, metapath, numexamples = 800,
startdate = floor, enddate = ceiling)
accuracydict = crossvalidate_across_L2_range(decademetapath, sourcefolder, c_range, featurestart, featureend, featurestep)
for L2setting, accuracy in accuracydict.items():
f.write(str(dec) + '\t' + str(L2setting) + '\t' + str(accuracy) + '\t' + str(i) + '\n')
elif command == 'decade_grid_for_differentiation_plot':
# This is the function I finally used. Keeps the number of features
# fixed at 2200, but generates a new lexicon for each decade (and each
# sample of 800 characters within the decade). Tests each decade at
# multiple L2 settings, and records them all, so we can take the
# optimal setting but also figure out how much of a difference that's
# making.
c_range = [.0001]
featurestart = 2300
featureend = 2300
featurestep = 100
for dec in range (1790, 2010, 10):
floor = dec - 10
ceiling = dec + 20
modelname = 'thirty' + str(dec)
decademetapath, docids = select_subset_to_model(modelname, metapath, numexamples = 1500,
startdate = floor, enddate = ceiling)
accuracy = crossvalidate_one_model(decademetapath, sourcefolder, c_range, featurestart, featureend, featurestep)
print(str(dec) + '\t' + str(accuracy) + '\n')
elif command == 'auth_specific_charpredict_grid':
# This is the function I finally used. Keeps the number of features
# fixed at 2200, but generates a new lexicon for each decade (and each
# sample of 800 characters within the decade). Tests each decade at
# multiple L2 settings, and records them all, so we can take the
# optimal setting but also figure out how much of a difference that's
# making.
c_range = [.00003, .0001, .0003, .001]
featurestart = 2200
featureend = 2200
featurestep = 100
metapath = '../metadata/balanced_authgender_subset.csv'
sourcefolder = '/Users/tunder/data/authgender_subset/'
with open('../dataforR/auth_specific_charpredict.tsv', mode = 'w', encoding = 'utf-8') as f:
f.write('decade\tauthgender\tL2\taccuracy\titer\n')
for dec in range (1800, 2000, 20):
if dec == 1790:
floor = 1780
ceiling = 1800
else:
floor = dec
ceiling = dec + 20
for agender in ['m', 'f']:
modelname = agender + 'author' + '_' + str(dec)
for i in range(5):
decademetapath, docids = authgender_subset_to_model(modelname, agender, metapath, numexamples = 800,
startdate = floor, enddate = ceiling)
accuracydict = crossvalidate_across_L2_range(decademetapath, sourcefolder, c_range, featurestart, featureend, featurestep)
for L2setting, accuracy in accuracydict.items():
f.write(str(dec) + '\t' + agender + '\t' + str(L2setting) + '\t' + str(accuracy) + '\t' + str(i) + '\n')
elif command == 'predict_authgender':
# This is the function I finally used. Keeps the number of features
# fixed at 2200, but generates a new lexicon for each decade (and each
# sample of 800 characters within the decade). Tests each decade at
# multiple L2 settings, and records them all, so we can take the
# optimal setting but also figure out how much of a difference that's
# making.
c_range = [.0001, .0003, .001, .003]
featurestart = 2500
featureend = 2500
featurestep = 100
metapath = '../metadata/balanced_authgender_subset.csv'
sourcefolder = '/Users/tunder/data/authgender_subset/'
with open('../dataforR/authgender_predictions.tsv', mode = 'w', encoding = 'utf-8') as f:
f.write('meandate\tL2\taccuracy\titer\n')
for dec in range (1795, 2010, 17):
if dec == 1790:
floor = 1780
ceiling = 1800
else:
floor = dec
ceiling = dec + 17
modelname = 'predict_authgender' + '_' + str(dec)
for i in range(9):
decademetapath, meandate = subset_to_predict_authgender(modelname, metapath, num = 400,
startdate = floor, enddate = ceiling)
# note that in this case num is not the total number of male or female examples,
# but the number for each cell of a 2x2 contingency matrix of author gender
# versus character gender so 400 produces 1600 total instances
accuracydict = crossvalidate_across_L2_range(decademetapath, sourcefolder, c_range, featurestart, featureend, featurestep)
for L2setting, accuracy in accuracydict.items():
f.write(str(meandate) + '\t' + str(L2setting) + '\t' + str(accuracy) + '\t' + str(i) + '\n')
elif command == 'optimize_authgender':
c_range = [.000003, .00001, .00003, .00009, .0003, .0009, .002, .004, .008, .03, 1]
featurestart = 800
featureend = 3600
featurestep = 100
metapath = '../metadata/balanced_authgender_subset.csv'
sourcefolder = '/Users/tunder/data/authgender_subset/'
generalmetapath, general_docids = subset_to_predict_authgender('general_authgender', metapath,
num = 400, startdate = 1780, enddate = 2010)
gridsearch_a_model(generalmetapath, sourcefolder, c_range,
featurestart, featureend, featurestep)
elif command == 'onlywomenwriters':
c_range = [.0003]
featurestart = 2500
featureend = 2600
featurestep = 100
womensmetapath, docids = authgender_subset_to_model('onlywomenwritersC', 'f', metapath, numexamples = 1500, startdate = 1800, enddate = 2000)
gridsearch_a_model(womensmetapath, sourcefolder, c_range, featurestart, featureend, featurestep)
elif command == 'onlymalewriters':
c_range = [.0003]
featurestart = 2500
featureend = 2600
featurestep = 100
womensmetapath, docids = authgender_subset_to_model('onlymalewritersC', 'm', metapath, numexamples = 1500, startdate = 1800, enddate = 2000)
gridsearch_a_model(womensmetapath, sourcefolder, c_range, featurestart, featureend, featurestep)
elif command == 'compare_models':
men = ['onlymalewriters', 'onlymalewritersB', 'onlymalewritersC']
women = ['onlywomenwriters', 'onlywomenwritersB', 'onlywomenwritersC']
# test_subset_path, test_docids = select_subset_to_model('test_metadata', metapath, numexamples = 1000, startdate = 1800, enddate = 2000)
test_subset_path = '../models/test_metadata_meta.csv'
generaloutpath = '/Users/tunder/Dropbox/python/character/future_work/appliedmodels/'
masculineperspective = []
feminineperspective = []
for m in men:
modelpath = '../models/' + m + '.pkl'
outpath = generaloutpath + m + '.results'
if not os.path.exists(outpath):
applymodel(modelpath, test_subset_path, outpath)
masculineperspective.append(outpath)
for w in women:
modelpath = '../models/' + w + '.pkl'
outpath = generaloutpath + w + '.results'
if not | |
raise NotImplementedError('Method not implemented!')
def OrderByReference(self, request, context):
"""Get an Order by Pending Order reference (UUID)
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def OrdersByMarket(self, request, context):
"""Get a list of Orders by Market
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def OrdersByParty(self, request, context):
"""Get a list of Orders by Party
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def OrderByID(self, request, context):
"""Get a specific order by order ID
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def OrderVersionsByID(self, request, context):
"""Get all versions of the order by its orderID
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MarginLevels(self, request, context):
"""-- Parties --
Get Margin Levels by Party ID
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Parties(self, request, context):
"""Get a list of Parties
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PartyByID(self, request, context):
"""Get a Party by ID
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PositionsByParty(self, request, context):
"""-- Positions --
Get a list of Positions by Party
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def LastTrade(self, request, context):
"""-- Trades --
Get latest Trade
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def TradesByMarket(self, request, context):
"""Get a list of Trades by Market
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def TradesByOrder(self, request, context):
"""Get a list of Trades by Order
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def TradesByParty(self, request, context):
"""Get a list of Trades by Party
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetProposals(self, request, context):
"""-- Governance --
Get governance data (proposals and votes) for all proposals
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetProposalsByParty(self, request, context):
"""Get governance data (proposals and votes) for proposals by party authoring them
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetVotesByParty(self, request, context):
"""Get votes by party casting them
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetNewMarketProposals(self, request, context):
"""Get governance data (proposals and votes) for proposals that aim creating new markets
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetUpdateMarketProposals(self, request, context):
"""Get governance data (proposals and votes) for proposals that aim updating markets
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetNetworkParametersProposals(self, request, context):
"""Get governance data (proposals and votes) for proposals that aim updating Vega network parameters
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetNewAssetProposals(self, request, context):
"""Get governance data (proposals and votes) for proposals aiming to create new assets
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetProposalByID(self, request, context):
"""Get governance data (proposals and votes) for a proposal located by ID
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetProposalByReference(self, request, context):
"""Get governance data (proposals and votes) for a proposal located by reference
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ObserveGovernance(self, request, context):
"""Subscribe to a stream of all governance updates
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ObservePartyProposals(self, request, context):
"""Subscribe to a stream of proposal updates
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ObservePartyVotes(self, request, context):
"""Subscribe to a stream of votes cast by a specific party
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ObserveProposalVotes(self, request, context):
"""Subscribe to a stream of proposal votes
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ObserveEventBus(self, request_iterator, context):
"""Subscribe to a stream of events from the core
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Statistics(self, request, context):
"""-- Misc --
Get Statistics on Vega
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetVegaTime(self, request, context):
"""Get Time
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AccountsSubscribe(self, request, context):
"""Subscribe to a stream of Accounts
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CandlesSubscribe(self, request, context):
"""Subscribe to a stream of Candles
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MarginLevelsSubscribe(self, request, context):
"""Subscribe to a stream of Margin Levels
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MarketDepthSubscribe(self, request, context):
"""Subscribe to a stream of Market Depth
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MarketDepthUpdatesSubscribe(self, request, context):
"""Subscribe to a stream of Market Depth Price Level Updates
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MarketsDataSubscribe(self, request, context):
"""Subscribe to a stream of Markets Data
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def OrdersSubscribe(self, request, context):
"""Subscribe to a stream of Orders
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PositionsSubscribe(self, request, context):
"""Subscribe to a stream of Positions
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def TradesSubscribe(self, request, context):
"""Subscribe to a stream of Trades
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def TransferResponsesSubscribe(self, request, context):
"""Subscribe to a stream of Transfer Responses
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetNodeSignaturesAggregate(self, request, context):
"""Get an aggregate of signatures from all the nodes of the network
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AssetByID(self, request, context):
"""Get an asset by its identifier
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Assets(self, request, context):
"""Get a list of all assets on Vega
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def EstimateFee(self, request, context):
"""Get an estimate for the fee to be paid for a given order
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def EstimateMargin(self, request, context):
"""Get an estimate for the margin required for a new order
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ERC20WithdrawalApproval(self, request, context):
"""Get the bundle approval for an ERC20 withdrawal,
these data are being used to bundle the call to the smart contract on the ethereum bridge
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Withdrawal(self, request, context):
"""Get a withdrawal by its identifier
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Withdrawals(self, request, context):
"""Get withdrawals for a party
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Deposit(self, request, context):
"""Get a deposit by its identifier
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Deposits(self, request, context):
"""Get deposits for a party
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def NetworkParameters(self, request, context):
"""Get the network parameters
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def LiquidityProvisions(self, request, context):
"""Get the liquidity provision orders
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def OracleSpec(self, request, context):
"""Get an oracle spec by ID
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def OracleSpecs(self, request, context):
"""Get the oracle specs
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def OracleDataBySpec(self, request, context):
"""Get all oracle data
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_TradingDataServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'MarketAccounts': grpc.unary_unary_rpc_method_handler(
servicer.MarketAccounts,
request_deserializer=api_dot_trading__pb2.MarketAccountsRequest.FromString,
response_serializer=api_dot_trading__pb2.MarketAccountsResponse.SerializeToString,
),
'PartyAccounts': grpc.unary_unary_rpc_method_handler(
servicer.PartyAccounts,
request_deserializer=api_dot_trading__pb2.PartyAccountsRequest.FromString,
response_serializer=api_dot_trading__pb2.PartyAccountsResponse.SerializeToString,
),
'FeeInfrastructureAccounts': grpc.unary_unary_rpc_method_handler(
servicer.FeeInfrastructureAccounts,
request_deserializer=api_dot_trading__pb2.FeeInfrastructureAccountsRequest.FromString,
response_serializer=api_dot_trading__pb2.FeeInfrastructureAccountsResponse.SerializeToString,
),
'Candles': grpc.unary_unary_rpc_method_handler(
servicer.Candles,
request_deserializer=api_dot_trading__pb2.CandlesRequest.FromString,
response_serializer=api_dot_trading__pb2.CandlesResponse.SerializeToString,
),
'MarketDataByID': grpc.unary_unary_rpc_method_handler(
servicer.MarketDataByID,
request_deserializer=api_dot_trading__pb2.MarketDataByIDRequest.FromString,
response_serializer=api_dot_trading__pb2.MarketDataByIDResponse.SerializeToString,
),
'MarketsData': grpc.unary_unary_rpc_method_handler(
servicer.MarketsData,
request_deserializer=api_dot_trading__pb2.MarketsDataRequest.FromString,
response_serializer=api_dot_trading__pb2.MarketsDataResponse.SerializeToString,
),
'MarketByID': grpc.unary_unary_rpc_method_handler(
servicer.MarketByID,
request_deserializer=api_dot_trading__pb2.MarketByIDRequest.FromString,
response_serializer=api_dot_trading__pb2.MarketByIDResponse.SerializeToString,
),
'MarketDepth': grpc.unary_unary_rpc_method_handler(
servicer.MarketDepth,
request_deserializer=api_dot_trading__pb2.MarketDepthRequest.FromString,
response_serializer=api_dot_trading__pb2.MarketDepthResponse.SerializeToString,
),
'Markets': grpc.unary_unary_rpc_method_handler(
servicer.Markets,
request_deserializer=api_dot_trading__pb2.MarketsRequest.FromString,
response_serializer=api_dot_trading__pb2.MarketsResponse.SerializeToString,
),
'OrderByMarketAndID': grpc.unary_unary_rpc_method_handler(
servicer.OrderByMarketAndID,
request_deserializer=api_dot_trading__pb2.OrderByMarketAndIDRequest.FromString,
response_serializer=api_dot_trading__pb2.OrderByMarketAndIDResponse.SerializeToString,
),
'OrderByReference': grpc.unary_unary_rpc_method_handler(
servicer.OrderByReference,
request_deserializer=api_dot_trading__pb2.OrderByReferenceRequest.FromString,
response_serializer=api_dot_trading__pb2.OrderByReferenceResponse.SerializeToString,
),
'OrdersByMarket': grpc.unary_unary_rpc_method_handler(
servicer.OrdersByMarket,
request_deserializer=api_dot_trading__pb2.OrdersByMarketRequest.FromString,
response_serializer=api_dot_trading__pb2.OrdersByMarketResponse.SerializeToString,
),
'OrdersByParty': grpc.unary_unary_rpc_method_handler(
servicer.OrdersByParty,
request_deserializer=api_dot_trading__pb2.OrdersByPartyRequest.FromString,
response_serializer=api_dot_trading__pb2.OrdersByPartyResponse.SerializeToString,
),
'OrderByID': grpc.unary_unary_rpc_method_handler(
servicer.OrderByID,
request_deserializer=api_dot_trading__pb2.OrderByIDRequest.FromString,
response_serializer=api_dot_trading__pb2.OrderByIDResponse.SerializeToString,
),
'OrderVersionsByID': grpc.unary_unary_rpc_method_handler(
servicer.OrderVersionsByID,
request_deserializer=api_dot_trading__pb2.OrderVersionsByIDRequest.FromString,
response_serializer=api_dot_trading__pb2.OrderVersionsByIDResponse.SerializeToString,
),
'MarginLevels': grpc.unary_unary_rpc_method_handler(
servicer.MarginLevels,
request_deserializer=api_dot_trading__pb2.MarginLevelsRequest.FromString,
response_serializer=api_dot_trading__pb2.MarginLevelsResponse.SerializeToString,
),
'Parties': grpc.unary_unary_rpc_method_handler(
servicer.Parties,
request_deserializer=api_dot_trading__pb2.PartiesRequest.FromString,
response_serializer=api_dot_trading__pb2.PartiesResponse.SerializeToString,
),
'PartyByID': grpc.unary_unary_rpc_method_handler(
servicer.PartyByID,
request_deserializer=api_dot_trading__pb2.PartyByIDRequest.FromString,
response_serializer=api_dot_trading__pb2.PartyByIDResponse.SerializeToString,
),
'PositionsByParty': grpc.unary_unary_rpc_method_handler(
servicer.PositionsByParty,
request_deserializer=api_dot_trading__pb2.PositionsByPartyRequest.FromString,
response_serializer=api_dot_trading__pb2.PositionsByPartyResponse.SerializeToString,
),
'LastTrade': grpc.unary_unary_rpc_method_handler(
servicer.LastTrade,
request_deserializer=api_dot_trading__pb2.LastTradeRequest.FromString,
response_serializer=api_dot_trading__pb2.LastTradeResponse.SerializeToString,
),
'TradesByMarket': grpc.unary_unary_rpc_method_handler(
servicer.TradesByMarket,
request_deserializer=api_dot_trading__pb2.TradesByMarketRequest.FromString,
response_serializer=api_dot_trading__pb2.TradesByMarketResponse.SerializeToString,
),
'TradesByOrder': grpc.unary_unary_rpc_method_handler(
servicer.TradesByOrder,
request_deserializer=api_dot_trading__pb2.TradesByOrderRequest.FromString,
| |
<filename>geos/kml.py
"""
This module serves for generating Google Earth KML files
which create an overlay of tiled web maps.
This module comes with three major classes:
* KMLMaster:
Creates a KML that contains network links to
multiple KMLMapRoots (-> overview over available maps)
* KMLMapRoot:
Root document of a map containing the tiles of the first zoom level.
Can be used standalone to display one specific map.
* KMLRegion:
A region within a KML document containing multiple tiles and f
our network links to the next zoom level.
The number of tiles per KML region can be specified with the `log_tiles_per_row`
parameter. The number of tiles per region impacts the number of http requests to the server.
Many tiles per region will reduce the amount of KML documents requested while increasing the
number of tiles loaded at once which may be bad for users with a weak internet connection.
Understanding the `log_tiles_per_row` is likely to require some explanation:
A KMLRegion consists of:
* always four network links to the next zoom level
* a certain number of ground overlays (the actual tile images)
The following constraints apply:
* A KML region is always a square (nrow = ncol)
* the number of ground overlays per row is always a power of two.
`log_tile_per_row` is the log2(tiles per row per region).
Example: `log_tiles_per_row = 0` -> 2**0 = 1 tile per row -> 1 tile per region
Network Links Ground Overlay
--- --- -------
| 1 | 2 | | |
--- --- | 1 |
| 3 | 4 | | |
--- --- -------
Example: `log_tiles_per_row = 1` -> 2**1 = 2 tilese per row -> four tiles per region
Network Links Ground Overlays
--- --- -------
| 1 | 2 | | 1 | 2 |
--- --- --- ---
| 3 | 4 | | 3 | 4 |
--- --- -------
"""
from pykml_geos.factory import KML_ElementMaker as KML
from geos.geometry import *
from lxml import etree
import math
from abc import ABCMeta
from geos.mapsource import walk_mapsources, F_SEP
DEFAULT_MAX_LOD_PIXELS = -1
DEFAULT_MIN_LOD_PIXELS = 128
MIN_ZOOM_LIMIT = 5 # if minZoom is higher than that, create empty network links.
def kml_element_name(grid_coords, elem_id="KML"):
"""
Create a unique element name for KML
Args:
grid_coords (GridCoordinate):
elem_id (str):
>>> kml_element_name(GridCoordinate(zoom=5, x=42, y=60), elem_id="NL")
'NL_5_42_60'
"""
return "_".join(str(x) for x in [elem_id, grid_coords.zoom, grid_coords.x, grid_coords.y])
def kml_lat_lon_box(geo_bb):
"""
Create the north/south/east/west tags for a <LatLonBox> or <LatLonAltBox> Bounding Box
Args:
geo_bb (GeographicBB):
Returns:
Tuple: (<north>, <south>, <east>, <west>) KMLElements
"""
return (
KML.north(geo_bb.max.lat),
KML.south(geo_bb.min.lat),
KML.east(geo_bb.max.lon),
KML.west(geo_bb.min.lon)
)
def kml_lod(min_lod_pixels=DEFAULT_MIN_LOD_PIXELS, max_lod_pixels=DEFAULT_MAX_LOD_PIXELS):
"""
Create the KML LevelOfDetail (LOD) Tag.
In a Region, the <minLodPixels> and <maxLodPixels> elements allow you to specify
an area of the screen (in square pixels). When your data is projected onto the screen,
it must occupy an area of the screen that is greater than <minLodPixels> and less
than <maxLodPixels> in order to be visible. Once the projected size of the Region goes
outside of these limits, it is no longer visible, and the Region becomes inactive.
(from https://developers.google.com/kml/documentation/kml_21tutorial)
Args:
min_lod_pixels (int):
max_lod_pixels (int):
"""
return KML.Lod(
KML.minLodPixels(min_lod_pixels),
KML.maxLodPixels(max_lod_pixels))
def kml_region(region_coords, min_lod_pixels=DEFAULT_MIN_LOD_PIXELS,
max_lod_pixels=DEFAULT_MAX_LOD_PIXELS):
"""
Create the KML <Region> tag with the appropriate geographic coordinates
Args:
region_coords (RegionCoordinate):
min_lod_pixels (int): see `kml_lod`
max_lod_pixels (int): see `kml_lod`
Returns:
KMLElement: the KML <Region>
"""
bbox = region_coords.geographic_bounds()
return KML.Region(
kml_lod(min_lod_pixels=min_lod_pixels, max_lod_pixels=max_lod_pixels),
KML.LatLonAltBox(
*kml_lat_lon_box(bbox)
)
)
def kml_network_link(href, name=None, region_coords=None, visible=True):
"""
Create the KML <NetworkLink> Tag for a certain Region in the RegionGrid.
Args:
region_coords (RegionCoordinate):
href (str): the href attribute of the NetworkLink
name (str): KML <name>
visible (bool): If true the network link will appear as 'visible'
(i.e. checked) in Google Earth.
Returns:
KMLElement: the KML <NetworkLink>
"""
nl = KML.NetworkLink()
if name is None and region_coords is not None:
name = kml_element_name(region_coords, "NL")
if name is not None:
nl.append(KML.name(name))
if region_coords is not None:
min_lod_pixels = DEFAULT_MIN_LOD_PIXELS * (2 ** region_coords.log_tiles_per_row)
nl.append(kml_region(region_coords, min_lod_pixels=min_lod_pixels))
if not visible:
nl.append(KML.visibility(0))
nl.append(KML.Link(
KML.href(href), KML.viewRefreshMode("onRegion")))
return nl
def kml_ground_overlay(tile_coords, tile_url):
"""
Create a KML <GroundOverlay> for a certain TileCoordinate.
Args:
tile_coords (TileCoordinate): TileCoordinate
tile_url (str): web-url to the actual tile image.
Returns:
KMLElement: the KML <GroundOverlay>
"""
return KML.GroundOverlay(
KML.name(kml_element_name(tile_coords, "GO")),
KML.drawOrder(tile_coords.zoom),
KML.Icon(
KML.href(tile_url)
),
KML.LatLonBox(
*kml_lat_lon_box(tile_coords.geographic_bounds())
),
)
def kml_folder(name):
"""
Create a KML <Folder> tag.
Args:
name (str): folder name
Returns:
KMLElement: the KML <Folder>
"""
return KML.Folder(KML.name(name))
class URLFormatter:
"""Create absolute URLs for map resources"""
def __init__(self, host, port, url_scheme="http"):
self.host = host
self.port = port
self.url_scheme = url_scheme
def get_abs_url(self, rel_url):
"""
Create an absolute url from a relative one.
>>> url_formatter = URLFormatter("example.com", 80)
>>> url_formatter.get_abs_url("kml_master.kml")
'http://example.com:80/kml_master.kml'
"""
rel_url = rel_url.lstrip("/")
return "{}://{}:{}/{}".format(self.url_scheme, self.host, self.port, rel_url)
def get_map_root_url(self, mapsource):
return self.get_abs_url("/maps/{}.kml".format(mapsource.id))
def get_map_url(self, mapsource, grid_coords):
""" Get URL to a map region. """
return self.get_abs_url(
"/maps/{}/{}/{}/{}.kml".format(mapsource.id, grid_coords.zoom,
grid_coords.x, grid_coords.y))
class KMLMap(metaclass=ABCMeta):
"""Abstract base class representing a KML Document"""
MIME_TYPE = "application/vnd.google-earth.kml+xml"
def __init__(self, url_formatter):
"""
Args:
url_formatter (URLFormatter): URLFormatter object
"""
self.url_formatter = url_formatter
self.kml_doc = KML.Document()
self.kml_root = KML.kml(self.kml_doc)
def add_elem(self, kml_elem):
"""Add an element to the KMLDocument"""
self.kml_doc.append(kml_elem)
def add_elems(self, kml_elems):
"""
Add elements from an iterator.
Args:
kml_elems (iterable of KMLElements): any iterator containing KML elements.
Can also be a KMLMap instance
"""
for kml_elem in kml_elems:
self.add_elem(kml_elem)
def get_kml(self):
"""Return the KML Document as formatted kml/xml"""
return etree.tostring(self.kml_root, pretty_print=True, xml_declaration=True)
def __iter__(self):
yield from self.kml_doc.iterchildren()
class KMLMaster(KMLMap):
"""Represents a KML master document that contains NetworkLinks to all maps
in the mapsource directory."""
def __init__(self, url_formatter, mapsources):
"""
Create a KML master document.
Args:
mapsources (list of MapSource):
"""
super().__init__(url_formatter)
self.map_folders = {
root: {
"folders": folders,
"maps": maps
} for root, folders, maps in walk_mapsources(mapsources)
}
self.add_maps(parent=self.kml_doc)
def add_maps(self, parent, root_path=""):
"""
Recursively add maps in a folder hierarchy.
Args:
parent (KMLElement): KMLElement to which we want to append child folders or maps respectively
root_path (str): path of 'parent'
"""
for mapsource in self.map_folders[root_path]['maps']:
parent.append(self.get_network_link(mapsource))
for folder in self.map_folders[root_path]['folders']:
kml_folder_obj = kml_folder(folder)
parent.append(kml_folder_obj)
self.add_maps(parent=kml_folder_obj, root_path=F_SEP.join((root_path, folder)))
def get_network_link(self, mapsource):
"""Get KML <NetworkLink> for a given mapsource. """
return kml_network_link(self.url_formatter.get_map_root_url(mapsource),
name=mapsource.name, visible=False)
class KMLMapRoot(KMLMap):
"""Represents the root document of an individual map.
Can be used as standalone KML to display that map only."""
def __init__(self, url_formatter, mapsource, log_tiles_per_row):
"""
Create the root document of an individual map.
Args:
mapsource (MapSource):
log_tiles_per_row (int): see module description. Needs to be in range(0, 5).
Note:
The zoom level of the root document is determined as follows:
The min_zoom level is read from the mapsource. `log_tiles_per_row` defines
a lower bound for min_zoom. This is because e.g. on zoom level 0 we could not have
more than one tile per row per region as there is simply only one tile at that zoom
level.
However, we can run into severe performance issues, if either min_zoom
or log_tiles_per_row are too high. At a zoom level of only 8, a root
document spanning the whole world would already contain (2**8)**2 = 65536 tiles
which will break map display in Google Earth.
Therefore MIN_ZOOM_LIMIT is applied as an upper bound. If the determined min_zoom level
exceeds this limit, empty network links (without GroundOverlay) will be used to adaptively
load the next zoom level(s).
"""
super().__init__(url_formatter)
self.mapsource = mapsource
assert(log_tiles_per_row in range(0, 5))
self.log_tiles_per_row = log_tiles_per_row
# see docstring for explanation
zoom = min(max(mapsource.min_zoom, log_tiles_per_row), MIN_ZOOM_LIMIT)
n_tiles = 2 ** zoom # tiles per row of the whole document
tiles_per_row = 2 ** self.log_tiles_per_row # tiles per row per region
n_regions = n_tiles//tiles_per_row # regions per row
assert n_tiles % tiles_per_row == 0, "regions per row is not an integer."
if mapsource.bbox is None:
regions = griditer(0, 0, n_regions)
else:
tile_bounds = mapsource.bbox.to_mercator().to_tile(zoom)
regions = bboxiter(tile_bounds, tiles_per_row)
self.add_elem(KML.name("{} root".format(mapsource.name)))
for x, y in regions:
self.add_elems(KMLRegion(self.url_formatter, self.mapsource,
self.log_tiles_per_row, zoom, x, y))
class KMLRegion(KMLMap):
"""Represents a KML document that is loaded on demand.
It contains the actual tiles as GroundOverlays and contains NetworkLinks
to the next LevelOfDetail."""
def __init__(self, | |
<reponame>aaronvincent/gambit_aaron
#!/usr/bin/env python
#
# GAMBIT: Global and Modular BSM Inference Tool
#*********************************************
# \file
#
# The GAMBIT Colouring-in Book
#
# A script for priming pippi runs and patching
# together their outputs. Designed to allow
# one to highlight specific regions of preferred
# parameter space, and then overplot them
# (e.g. relic density mechanisms).
#
#*********************************************
#
# Authors (add name and date if you modify):
#
# \author <NAME>
# (<EMAIL>)
# \date 2017 Feb
#
#*********************************************
import sys
import yaml
import os
import copy
import re
import datetime
import stat
from multiprocessing import cpu_count
# Global constants
verbose = False
default_contour = 95.4
default_contour_style = "Solid"
default_contour_width = 2
default_colour = "blue"
default_n_contours = 150
n_threads = cpu_count()
# Checks for the presence of one or more keys in a YAML node
def check_node(node, name, keys):
is_ok = False
if isinstance(keys, basestring):
error = " Region \""+name+"\" must have key \""+str(keys)+"\"."
is_ok = (keys in node)
else:
error = " Region \""+name+"\" must have at least one of the keys "+str(keys)+"."
for x in keys:
if (x in node):
is_ok = True
break
if not is_ok: sys.exit(error)
def usage():
sys.exit('\n Usage: colouring [prime/combine] file1 file2\n where file1 is a YAML file that defines the regions you want to find.\n file2 is the pip file that defines the pippi run to modify.\n')
def main(arguments):
#Starting routine
commandLineOptions = { 'prime':prime, 'combine':combine }
if (len(arguments) is not 4):
#Print usage info and quit if pippi is invoked without an argument
usage()
else:
try:
#Check if colouring has been invoked with one of the two known specific commands
command = commandLineOptions[arguments[1]]
command(arguments[2], arguments[3])
except KeyError:
print
print 'Unrecognised colouring mode: '+arguments[1]
usage()
sys.exit()
# Prepare separate pippi runs for each region
def prime(region_file, pip_file):
print
# Load up the regions from the yaml file, and the template for pippi runs from the pip file.
regions = yaml.load(file(region_file, 'r'))
pip = file(pip_file, 'r').read()
# Get the drawing order
if regions["drawing_order"]:
drawing_order = regions["drawing_order"]
else:
sys.exit('ERROR: No drawing_order specified in '+region_file)
# Retrieve the best fit in the base scan.
#First work out where the best fit file is. Start with the dir.
parse_dir = re.search('parse_dir\s*=\s*("(.*)"|\'(.*)\')', pip)
parse_dir = parse_dir.group(2 if parse_dir.group(2) else 3)
# Now infer the filename from the chain name.
chain_name = re.search('main_chain\s*=\s*("(.*)"|\'(.*)\')', pip)
chain_name = chain_name.group(2 if chain_name.group(2) else 3)
chain_name = re.sub(r'.*/|\..?.?.?$', '', chain_name)
best_filename = parse_dir+"/"+chain_name+".best"
# Now open the file and read in the best fit.
best_file = file(best_filename, 'r')
for x in best_file.readlines():
if not x.startswith('#'):
parts = x.split(':')
bestFit = float(parts[1])
break
# Iterate over the regions defined in the yaml file
for name in drawing_order:
if (verbose): print "Priming pippi run for extracting region: "+name
mechanism = regions[name]
# Check that the mechanism is sufficiently specified
check_node(mechanism, name, ["extra_1D_plots", "extra_2D_plots"])
check_node(mechanism, name, "datastream")
check_node(mechanism, name, "cut")
check_node(mechanism, name, "label")
# Make a new pip file for this region
newpip = copy.deepcopy(pip)
# Replace the necessary parts of it, starting with the common section
contour = default_contour
if ("contour_levels" in mechanism): contour = mechanism["contour_levels"]
newpip = re.sub('contour_levels\s*=.*\n', 'contour_levels = '+str(contour)+'\n', newpip)
if ("extra_1D_plots" in mechanism): newpip = re.sub('oneD_plot_quantities\s*=\s*', 'oneD_plot_quantities = '+str(mechanism["extra_1D_plots"])+' ', newpip)
if ("extra_2D_plots" in mechanism): newpip = re.sub('twoD_plot_quantities\s*=\s*', 'twoD_plot_quantities = '+str(mechanism["extra_2D_plots"])+' ', newpip)
newpip = re.sub("bf_lnlike_for_profile_like\s*=\s*", "bf_lnlike_for_profile_like = "+str(bestFit)+" ", newpip)
# Now the parse section
newpip = re.sub('parse_dir\s*=.*\n', 'parse_dir = \'parse_'+name+'\'\n', newpip)
newpip = re.sub('data_ranges\s*=\s*', 'data_ranges = '+mechanism["cut"]+' ', newpip)
if ("preamble" in mechanism): newpip = re.sub('preamble\s*=.*\n', 'preamble = \''+mechanism["preamble"]+'\'\n', newpip)
newpip = re.sub('assign_to_pippi_datastream\s*=\s*', 'assign_to_pippi_datastream = '
+mechanism["datastream"]+' \\\n ', newpip)
newpip = re.sub('quantity_labels\s*=\s*', 'quantity_labels = '
+mechanism["label"]+' \\\n ', newpip)
# Now the script section
newpip = re.sub('script_dir\s*=.*\n', 'script_dir = \'scripts_'+name+'\'\n', newpip)
colour = default_colour
if ("colour" in mechanism): colour = mechanism["colour"]
newpip = re.sub('colour_scheme\s*=.*\n', 'colour_scheme = Blockshading_'+colour+'\n', newpip)
# Now the plot section
newpip = re.sub('plot_dir\s*=.*\n', 'plot_dir = \'plots_'+name+'\'\n', newpip)
# Output the new pip file
filename = os.path.splitext(pip_file)
filename = filename[0]+"_"+name+filename[1]
with open(filename,"w") as f:
f.write(newpip)
print " Generated new pip file "+filename
print
def combine(region_file, pip_file):
# Load up the regions from the yaml file, and the template for pippi runs from the pip file.
regions = yaml.load(file(region_file, 'r'))
pip = file(pip_file, 'r').read()
# Get the drawing order
if regions["drawing_order"]:
drawing_order = regions["drawing_order"]
else:
sys.exit('ERROR: No drawing_order specified in '+region_file)
# Find the filenames of all the 2D profile likelihood scripts
script_dir = re.search('script_dir\s*=\s*("(.*)"|\'(.*)\')', pip)
script_dir = script_dir.group(2 if script_dir.group(2) else 3)
like2D_scripts = [x for x in os.listdir(script_dir) if "like2D" in x]
# Make a new scripts dir for the combined scripts
combo_dir = script_dir+'_combined'
if not os.path.isdir(combo_dir): os.mkdir(combo_dir)
# Make a new plots dir for the combined plots
plot_dir = re.search('plot_dir\s*=\s*("(.*)"|\'(.*)\')', pip)
plot_dir = plot_dir.group(2 if plot_dir.group(2) else 3)
plot_combo_dir = plot_dir+'_combined'
if not os.path.isdir(plot_combo_dir): os.mkdir(plot_combo_dir)
# Make a bulk script that calls all of the combined plotting scripts
bulkscript_name = 'make_combined_'+re.sub('\.pip', '', pip_file)+'_plots.bsh'
bulkscript = file(bulkscript_name, 'w')
bulkscript.write('cd '+combo_dir+'\n')
# Iterate over the different scripts
for script_index, script_name in enumerate(like2D_scripts):
# Initialise empty lists of the things that will vary with region
marker_commands = []
plot_commands = []
fill_colours = []
contour_commands = []
min_contours = []
region_names = []
regional_script = None
# Iterate over all the regions mentioned in the drawing order
for name in drawing_order:
# Open the script file, cycling if it doesn't exist
fullname = script_dir+'_'+name+'/'+script_name
if not os.path.isfile(fullname): continue
regional_script = file(fullname, 'r').read()
# Save the name of the region
region_names.append(name)
# Save the line style for the region
contour_style = regions[name]["contour_style"] if "contour_style" in regions[name] else default_contour_style
# Extract the marker command
marker_command = re.findall('\s\s--draw-marker.*\n', regional_script)
if marker_command: marker_commands.append(marker_command[0])
# Extract the fill colour
fill_colour = re.findall('--#...', regional_script)
fill_colours.append(fill_colour[1][2:])
# Extract the plot command
plot_command = re.findall('\s\s--plot.*fill-transparency 1.*\n', regional_script)
plot_commands.append(plot_command[0])
# Extract the contour command
contour_command_list = re.findall('\s\s--draw-contour.*\n', regional_script)
min_contour = 1.0
for i,x in enumerate(contour_command_list):
contour_command_list[i] = re.sub('width.*\n', 'width '+str(default_contour_width)+'\\\n', x)
contour_command_list[i] = re.sub('style\s*.*?\s', 'style '+contour_style+' ', contour_command_list[i])
min_contour = min(min_contour, float(re.search('--draw-contour\s(.*?)\s', contour_command_list[i]).group(1)))
contour_commands.append(contour_command_list)
min_contours.append(min_contour)
# If this script is not present for any of the regions, go on to the next one.
if (regional_script is None): continue
# Extract the axis styles from the last regional script
axis_styles = re.findall('\s\s--axis-style\s*[^y]*\n', regional_script)
# Read in the original script
base_script = file(script_dir+'/'+script_name, 'r').readlines()
# Extract preamble from original script
preamble = ''
for x in base_script[2:]:
preamble = preamble+x
if x.startswith(' --xyz-map'): break
# Extract textual annotations from the original script
text = [re.sub('White','Black',x) for x in base_script if x.startswith(' --draw-text') or x.startswith(' --legend')]
outfile = open(combo_dir+'/'+script_name,'w')
outfile.write('#!/usr/bin/env bash\n')
outfile.write('# This plot script created by the pippi scripter \'colouring\' on '+datetime.datetime.now().strftime('%c')+'\n')
# Write the number of fill steps to be used
for region in region_names: outfile.write('n_contours_'+re.sub('-','_',region)+'='+str(default_n_contours)+'\n')
# Write the fist half of the ctioga2 command
outfile.write(preamble)
# Draw the filled regions, using many closely-packed contours
for region, plot_command in enumerate(plot_commands):
outfile.write(' --color \''+fill_colours[region]+'\' \\\n')
outfile.write(plot_command)
outfile.write(' $(echo $(for (( i=0; i<$n_contours_'+re.sub('-','_',region_names[region])+'; i++ )); do echo "--draw-contour '
'$(echo "'+str(min_contours[region])+' + '+str(1.0-min_contours[region])+'*$i/($n_contours_'
+re.sub('-','_',region_names[region])+'-1)" | bc -l) /style Solid /width '+str(default_contour_width)+' "; done)) \\\n')
outfile.write(' --draw-contour 1.000 /style Solid /width '+str(default_contour_width)+'\\\n')
# Draw the actual outline contours on top of all the filled regions
for region, plot_command in enumerate(plot_commands):
outfile.write(plot_command)
for x in contour_commands[region]: outfile.write(x)
# Draw any markers
for marker_command in marker_commands:
outfile.write(marker_command)
# Write any textual annotations, and set the axes up in the manner of the region plots
for x in text: outfile.write(x)
for x in axis_styles: outfile.write(x)
# Add an entry to the bulk plotting script
pdf_name = re.sub("\.bsh", ".pdf", script_name)
bulkscript.write('(echo "./'+script_name+'" && ./' + script_name +
' && gs -sDEVICE=pdfwrite -dCompatibilityLevel=1.4 -dPDFSETTINGS=/screen -dNOPAUSE -dQUIET -dBATCH -sOutputFile='+
'../' + plot_combo_dir + '/' + pdf_name + ' ' + pdf_name +
' && rm ' + pdf_name + ') &\n')
if (script_index%n_threads == n_threads - 1 ): bulkscript.write('wait\n')
# Done! Close the new script file and make it executable for both user and group.
outfile.close
os.chmod(combo_dir+'/'+script_name, stat.S_IRWXU | stat.S_IRWXG)
# All over red rover. Go write a | |
= 0
mock_isfile.return_value = True
mock_call.return_value = 0
status = udocker.FileUtil("tarball.tar").verify_tar()
self.assertTrue(status)
# check tar file True call 1
mock_msg.level = 0
mock_isfile.return_value = True
mock_call.return_value = 1
status = udocker.FileUtil("tarball.tar").verify_tar()
self.assertFalse(status)
# def test_19_copydir(self):
# """Test19 FileUtil.copydir()."""
# pass
@mock.patch('udocker.Config')
@mock.patch('udocker.FileUtil.remove')
def test_20_cleanup(self, mock_remove, mock_config):
"""Test20 FileUtil.cleanup() delete tmp files."""
udocker.Config = mock_config
udocker.Config.tmpdir = "/tmp"
udocker.FileUtil.tmptrash = {'file1.txt': None, 'file2.txt': None}
udocker.FileUtil("").cleanup()
self.assertEqual(mock_remove.call_count, 2)
@mock.patch('udocker.os.path.isdir')
def test_21_isdir(self, mock_isdir):
"""Test21 FileUtil.isdir()."""
mock_isdir.return_value = True
status = udocker.FileUtil("somedir").isdir()
self.assertTrue(status)
mock_isdir.return_value = False
status = udocker.FileUtil("somedir").isdir()
self.assertFalse(status)
@mock.patch('udocker.os.stat')
def test_22_size(self, mock_stat):
"""Test22 FileUtil.size() get file size."""
mock_stat.return_value.st_size = 4321
size = udocker.FileUtil("somefile").size()
self.assertEqual(size, 4321)
def test_23_getdata(self):
"""Test23 FileUtil.getdata() get file content."""
with mock.patch(BUILTINS + '.open',
mock.mock_open(read_data='qwerty')):
data = udocker.FileUtil("somefile").getdata()
self.assertEqual(data, 'qwerty')
def test_24_get1stline(self):
"""Test24 FileUtil.get1stline()."""
data = "line1\n" \
"line2\n" \
"line3\n"
with mock.patch(BUILTINS + '.open',
mock.mock_open(read_data=data)):
data = udocker.FileUtil("somefile").get1stline()
self.assertEqual(data, 'line1')
def test_25_putdata(self):
"""Test25 FileUtil.putdata()"""
futil = udocker.FileUtil("somefile")
futil.filename = ""
data = futil.putdata("qwerty")
self.assertFalse(data)
with mock.patch(BUILTINS + '.open', mock.mock_open()):
data = udocker.FileUtil("somefile").putdata("qwerty")
self.assertEqual(data, 'qwerty')
@mock.patch('udocker.os.path.exists')
def test_26_getvalid_path(self, mock_pexist):
"""Test26 FileUtil.getvalid_path()."""
mock_pexist.return_value = True
futil = udocker.FileUtil("/somedir/somefile")
status = futil.getvalid_path()
self.assertEqual(status, "/somedir/somefile")
# def test_27__find_exec(self):
# """Test27 FileUtil._find_exec()."""
# pass
@mock.patch('udocker.Uprocess')
def test_28_find_exec(self, mock_call):
"""Test28 FileUtil.find_exec() find executable."""
mock_call.return_value.get_output.return_value = None
filename = udocker.FileUtil("executable").find_exec()
self.assertEqual(filename, "")
mock_call.return_value.get_output.return_value = "/bin/ls"
filename = udocker.FileUtil("executable").find_exec()
self.assertEqual(filename, "/bin/ls")
mock_call.return_value.get_output.return_value = "not found"
filename = udocker.FileUtil("executable").find_exec()
self.assertEqual(filename, "")
@mock.patch('udocker.os.path.lexists')
def test_29_find_inpath(self, mock_exists):
"""Test29 FileUtil.find_inpath() file is in a path."""
# exist
mock_exists.return_value = True
filename = udocker.FileUtil("exec").find_inpath("/bin:/usr/bin")
self.assertEqual(filename, "/bin/exec")
# does not exist
mock_exists.return_value = False
filename = udocker.FileUtil("exec").find_inpath("/bin:/usr/bin")
self.assertEqual(filename, "")
# exist PATH=
mock_exists.return_value = True
filename = udocker.FileUtil("exec").find_inpath("PATH=/bin:/usr/bin")
self.assertEqual(filename, "/bin/exec")
# does not exist PATH=
mock_exists.return_value = False
filename = udocker.FileUtil("exec").find_inpath("PATH=/bin:/usr/bin")
self.assertEqual(filename, "")
@mock.patch('udocker.FileUtil._register_prefix')
def test_30_list_inpath(self, mock_regpref):
"""Test30 FileUtil.list_inpath()."""
path = "/bin"
mock_regpref.return_value = None
futil = udocker.FileUtil("somefile")
status = futil.list_inpath(path)
self.assertEqual(status, ["/bin/somefile"])
@mock.patch('udocker.os.rename')
def test_31_rename(self, mock_rename):
"""Test31 FileUtil.rename()."""
mock_rename.return_value = None
status = udocker.FileUtil("somefile").rename("otherfile")
self.assertTrue(status)
# def test_32__stream2file(self):
# """Test32 FileUtil._stream2file()."""
# pass
# def test_33__file2stream(self):
# """Test33 FileUtil._file2stream()."""
# pass
# def test_34__file2file(self):
# """Test34 FileUtil._file2file()."""
# pass
def test_35_copyto(self):
"""Test35 FileUtil.copyto() file copy."""
with mock.patch(BUILTINS + '.open', mock.mock_open()):
status = udocker.FileUtil("source").copyto("dest")
self.assertTrue(status)
status = udocker.FileUtil("source").copyto("dest", "w")
self.assertTrue(status)
status = udocker.FileUtil("source").copyto("dest", "a")
self.assertTrue(status)
@mock.patch('udocker.os.path.exists')
def test_36_find_file_in_dir(self, mock_exists):
"""Test36 FileUtil.find_file_in_dir()."""
file_list = []
status = udocker.FileUtil("/dir").find_file_in_dir(file_list)
self.assertEqual(status, "")
file_list = ["F1", "F2"]
mock_exists.side_effect = [False, False]
status = udocker.FileUtil("/dir").find_file_in_dir(file_list)
self.assertEqual(status, "")
file_list = ["F1", "F2"]
mock_exists.side_effect = [False, True]
status = udocker.FileUtil("/dir").find_file_in_dir(file_list)
self.assertEqual(status, "/dir/F2")
@mock.patch('udocker.os.symlink')
@mock.patch('udocker.os.remove')
@mock.patch('udocker.os.stat')
@mock.patch('udocker.os.chmod')
@mock.patch('udocker.os.access')
@mock.patch('udocker.os.path.dirname')
@mock.patch('udocker.os.path.realpath')
@mock.patch('udocker.os.readlink')
def test_37__link_change_apply(self, mock_readlink,
mock_realpath, mock_dirname,
mock_access, mock_chmod, mock_stat,
mock_remove, mock_symlink):
"""Test37 FileUtil._link_change_apply Apply the link convertion."""
mock_readlink.return_value = "/HOST/DIR"
mock_realpath.return_value = "/HOST/DIR"
mock_dirname.return_value = "/HOST"
mock_access.return_value = True
futil = udocker.FileUtil("/con")
futil._link_change_apply("/con/lnk_new", "/con/lnk", False)
self.assertTrue(mock_remove.called)
self.assertTrue(mock_symlink.called)
mock_access.return_value = False
mock_remove.reset_mock()
mock_symlink.reset_mock()
futil = udocker.FileUtil("/con")
futil._link_change_apply("/con/lnk_new", "/con/lnk", True)
self.assertTrue(mock_chmod.called)
self.assertTrue(mock_remove.called)
self.assertTrue(mock_symlink.called)
@mock.patch('udocker.os.symlink')
@mock.patch('udocker.os.remove')
@mock.patch('udocker.os.stat')
@mock.patch('udocker.os.chmod')
@mock.patch('udocker.os.access')
@mock.patch('udocker.os.path.dirname')
@mock.patch('udocker.os.path.realpath')
@mock.patch('udocker.os.readlink')
def test_38__link_set(self, mock_readlink, mock_realpath, mock_dirname,
mock_access, mock_chmod, mock_stat, mock_remove,
mock_symlink):
"""Test38 FileUtil._link_set()."""
mock_readlink.return_value = "X"
mock_dirname.return_value = "/HOST"
futil = udocker.FileUtil("/con")
status = futil._link_set("/con/lnk", "", "/con", False)
self.assertFalse(status)
mock_readlink.return_value = "/con"
futil = udocker.FileUtil("/con")
status = futil._link_set("/con/lnk", "", "/con", False)
self.assertFalse(status)
mock_readlink.return_value = "/HOST/DIR"
mock_realpath.return_value = "/HOST/DIR"
mock_remove.reset_mock()
mock_symlink.reset_mock()
mock_chmod.reset_mock()
futil = udocker.FileUtil("/con")
status = futil._link_set("/con/lnk", "", "/con", False)
self.assertTrue(mock_remove.called)
self.assertTrue(mock_symlink.called)
self.assertFalse(mock_chmod.called)
self.assertTrue(status)
mock_readlink.return_value = "/HOST/DIR"
mock_realpath.return_value = "/HOST/DIR"
mock_access.return_value = True
mock_remove.reset_mock()
mock_symlink.reset_mock()
mock_chmod.reset_mock()
futil = udocker.FileUtil("/con")
status = futil._link_set("/con/lnk", "", "/con", True)
self.assertTrue(mock_remove.called)
self.assertTrue(mock_symlink.called)
self.assertFalse(mock_chmod.called)
self.assertTrue(status)
mock_readlink.return_value = "/HOST/DIR"
mock_realpath.return_value = "/HOST/DIR"
mock_access.return_value = False
mock_remove.reset_mock()
mock_symlink.reset_mock()
mock_chmod.reset_mock()
futil = udocker.FileUtil("/con")
status = futil._link_set("/con/lnk", "", "/con", True)
self.assertTrue(mock_remove.called)
self.assertTrue(mock_symlink.called)
self.assertTrue(mock_chmod.called)
self.assertTrue(status)
@mock.patch('udocker.os.symlink')
@mock.patch('udocker.os.remove')
@mock.patch('udocker.os.stat')
@mock.patch('udocker.os.chmod')
@mock.patch('udocker.os.access')
@mock.patch('udocker.os.path.dirname')
@mock.patch('udocker.os.path.realpath')
@mock.patch('udocker.os.readlink')
def test_39__link_restore(self, mock_readlink, mock_realpath,
mock_dirname, mock_access, mock_chmod,
mock_stat, mock_remove, mock_symlink):
"""Test39 FileUtil._link_restore()."""
mock_readlink.return_value = "/con/AAA"
futil = udocker.FileUtil("/con")
status = futil._link_restore("/con/lnk", "/con", "/root", False)
self.assertTrue(status)
mock_readlink.return_value = "/con/AAA"
mock_symlink.reset_mock()
mock_chmod.reset_mock()
futil = udocker.FileUtil("/con")
status = futil._link_restore("/con/lnk", "/con", "/root", False)
self.assertTrue(status)
self.assertTrue(mock_symlink.called_with("/con/lnk", "/AAA"))
mock_readlink.return_value = "/root/BBB"
mock_symlink.reset_mock()
mock_chmod.reset_mock()
futil = udocker.FileUtil("/con")
status = futil._link_restore("/con/lnk", "/con", "/root", False)
self.assertTrue(status)
self.assertTrue(mock_symlink.called_with("/con/lnk", "/BBB"))
mock_readlink.return_value = "/XXX"
futil = udocker.FileUtil("/con")
status = futil._link_restore("/con/lnk", "/con", "/root", False)
self.assertFalse(status)
mock_readlink.return_value = "/root/BBB"
mock_symlink.reset_mock()
mock_chmod.reset_mock()
futil = udocker.FileUtil("/con")
status = futil._link_restore("/con/lnk", "/con", "/root", True)
self.assertTrue(status)
self.assertTrue(mock_symlink.called_with("/con/lnk", "/BBB"))
self.assertFalse(mock_chmod.called)
mock_readlink.return_value = "/root/BBB"
mock_access.return_value = False
mock_symlink.reset_mock()
mock_chmod.reset_mock()
futil = udocker.FileUtil("/con")
status = futil._link_restore("/con/lnk", "", "/root", True)
self.assertTrue(status)
self.assertTrue(mock_symlink.called_with("/con/lnk", "/BBB"))
self.assertTrue(mock_chmod.called)
self.assertTrue(mock_remove.called)
self.assertTrue(mock_symlink.called)
@mock.patch('udocker.FileUtil._link_restore')
@mock.patch('udocker.FileUtil._link_set')
@mock.patch('udocker.Msg')
@mock.patch('udocker.FileUtil._is_safe_prefix')
@mock.patch('udocker.os.lstat')
@mock.patch('udocker.os.path.islink')
@mock.patch('udocker.os.walk')
@mock.patch('udocker.os.path.realpath')
def test_40_links_conv(self, mock_realpath, mock_walk, mock_islink,
mock_lstat, mock_is_safe_prefix, mock_msg,
mock_link_set, mock_link_restore):
"""Test40 FileUtil.links_conv()."""
mock_realpath.return_value = "/ROOT"
mock_is_safe_prefix.return_value = False
status = udocker.FileUtil("/ROOT").links_conv(False, True, "")
self.assertEqual(status, None)
mock_realpath.return_value = "/ROOT"
mock_is_safe_prefix.return_value = True
mock_walk.return_value = []
status = udocker.FileUtil("/ROOT").links_conv(False, True, "")
self.assertEqual(status, [])
mock_realpath.return_value = "/ROOT"
mock_is_safe_prefix.return_value = True
mock_walk.return_value = [("/", [], []), ]
status = udocker.FileUtil("/ROOT").links_conv(False, True, "")
self.assertEqual(status, [])
mock_realpath.return_value = "/ROOT"
mock_is_safe_prefix.return_value = True
mock_islink = False
mock_walk.return_value = [("/", [], ["F1", "F2"]), ]
status = udocker.FileUtil("/ROOT").links_conv(False, True, "")
self.assertEqual(status, [])
mock_realpath.return_value = "/ROOT"
mock_is_safe_prefix.return_value = True
mock_islink = True
mock_lstat.return_value.st_uid = 1
udocker.Config.uid = 0
mock_walk.return_value = [("/", [], ["F1", "F2"]), ]
status = udocker.FileUtil("/ROOT").links_conv(False, True, "")
self.assertEqual(status, [])
mock_realpath.return_value = "/ROOT"
mock_is_safe_prefix.return_value = True
mock_islink = True
mock_lstat.return_value.st_uid = 1
mock_link_set.reset_mock()
mock_link_restore.reset_mock()
udocker.Config.uid = 1
mock_walk.return_value = [("/", [], ["F1", "F2"]), ]
status = udocker.FileUtil("/ROOT").links_conv(False, True, "")
# self.assertTrue(mock_link_set.called)
self.assertFalse(mock_link_restore.called)
mock_realpath.return_value = "/ROOT"
mock_is_safe_prefix.return_value = True
mock_islink = True
mock_lstat.return_value.st_uid = 1
mock_link_set.reset_mock()
mock_link_restore.reset_mock()
udocker.Config.uid = 1
mock_walk.return_value = [("/", [], ["F1", "F2"]), ]
status = udocker.FileUtil("/ROOT").links_conv(False, False, "")
self.assertFalse(mock_link_set.called)
# self.assertTrue(mock_link_restore.called)
@mock.patch('udocker.os.listdir')
@mock.patch('udocker.os.path.isdir')
@mock.patch('udocker.os.path.basename')
@mock.patch('udocker.os.path.dirname')
@mock.patch('udocker.FileUtil._register_prefix')
def test_41_match(self, mock_regpref, mock_dir, mock_base,
mock_isdir, mock_listdir):
"""Test41 FileUtil.match()."""
mock_regpref.return_value = None
mock_dir.return_value = "/dir"
mock_base.return_value = "file*"
mock_isdir.return_value = False
futil = udocker.FileUtil("/dir")
status = futil.match()
self.assertEqual(status, [])
mock_regpref.return_value = None
mock_dir.return_value = "/dir"
mock_base.return_value = "file*"
mock_isdir.return_value = True
mock_listdir.return_value = ["file1", "file2"]
futil = udocker.FileUtil("/dir")
status = futil.match()
self.assertEqual(status, ["/dir/file1", "/dir/file2"])
class UdockerToolsTestCase(unittest.TestCase):
"""Test UdockerTools() download and setup of tools needed by udocker."""
@classmethod
def setUpClass(cls):
"""Setup test."""
set_env()
@mock.patch('udocker.GetURL')
@mock.patch('udocker.LocalRepository')
@mock.patch('udocker.Config')
def test_01_init(self, mock_config, mock_localrepo, mock_geturl):
"""Test01 UdockerTools() constructor."""
mock_geturl.return_value = None
udocker.Config = mock_config
udocker.Config.tmpdir = "/tmp"
udocker.Config.tarball = "/tmp/xxx"
udocker.Config.installinfo = "/tmp/xxx"
udocker.Config._tarball_release = "0.0.0"
localrepo = mock_localrepo
localrepo.bindir = "/bindir"
utools = udocker.UdockerTools(localrepo)
self.assertEqual(utools.localrepo, localrepo)
@mock.patch('udocker.Msg')
@mock.patch('udocker.LocalRepository')
def test_02__instructions(self, mock_localrepo, mock_msg):
"""Test02 UdockerTools._instructions()."""
utools = udocker.UdockerTools(mock_localrepo)
utools._instructions()
self.assertTrue(mock_msg.return_value.out.called)
@mock.patch('udocker.LocalRepository')
@mock.patch('udocker.FileUtil')
def test_03__get_version(self, futil, mock_localrepo):
"""Test03 UdockerTools._get_version()."""
futil.return_value.getdata.return_value = "0.0.0"
utools = udocker.UdockerTools(mock_localrepo)
utools._tarball_release = "0.0.0"
status = utools._get_version("versionfile")
self.assertEqual(status, "0.0.0")
# @mock.patch('udocker.LocalRepository')
# @mock.patch('udocker.FileUtil')
# def test_04__version_isequal(self, futil, mock_localrepo):
# """Test04 UdockerTools._version_isequal()."""
# futil.return_value.getdata.return_value = "0.0.0"
# utools = udocker.UdockerTools(mock_localrepo)
# utools._tarball_release = "0.0.0"
# status = utools._version_isequal("0.0.0")
# self.assertTrue(status)
# futil.return_value.getdata.return_value = "0.0.0"
# utools = udocker.UdockerTools(mock_localrepo)
# utools._tarball_release = "1.1.1"
# status = utools._version_isequal("0.0.0")
# self.assertFalse(status)
@mock.patch('udocker.UdockerTools._get_version')
@mock.patch('udocker.LocalRepository')
def test_05_is_available(self, mock_localrepo, mock_getver):
"""Test05 UdockerTools.is_available()."""
# mock_getver.return_value = "0.0.1"
# utools = udocker.UdockerTools(mock_localrepo)
# status = utools.is_available()
# self.assertFalse(status)
mock_getver.return_value = "2.0.0"
utools = udocker.UdockerTools(mock_localrepo)
status = utools.is_available()
self.assertTrue(status)
@mock.patch('udocker.GetURL')
@mock.patch('udocker.FileUtil')
@mock.patch('udocker.UdockerTools.__init__')
def test_06__download(self, mock_init, mock_futil, mock_gurl):
"""Test06 UdockerTools._download()."""
mock_init.return_value = None
utools = udocker.UdockerTools(None)
utools.curl = mock_gurl
utools._tarball = "http://node.domain/filename.tgz"
hdr = udocker.CurlHeader()
hdr.data["X-ND-HTTPSTATUS"] = "200"
hdr.data["X-ND-CURLSTATUS"] = 0
mock_futil.return_value.mktmp.return_value = "tmptarball"
mock_gurl.get.return_value = (hdr, "")
status = utools._download(utools._tarball)
self.assertEqual(status, "tmptarball")
hdr.data["X-ND-HTTPSTATUS"] = "400"
hdr.data["X-ND-CURLSTATUS"] = 1
status = utools._download(utools._tarball)
self.assertEqual(status, "")
@mock.patch('udocker.GetURL')
@mock.patch('udocker.os.path.isfile')
@mock.patch('udocker.os.path.realpath')
@mock.patch('udocker.os.path.exists')
@mock.patch('udocker.UdockerTools._download')
def test_07__get_file(self, mock_download, mock_exists,
mock_rpath, mock_isfile, mock_gurl):
"""Test07 UdockerTools._get_file()."""
mock_download.return_value = "file1"
mock_exists.return_value = False
mock_isfile.return_value = True
url = "http://my.com/file1"
utools = udocker.UdockerTools(None)
utools.curl = mock_gurl
status = utools._get_file(url)
self.assertTrue(mock_download.called)
self.assertTrue(mock_isfile.called)
self.assertEqual(status, "file1")
mock_exists.return_value = True
mock_isfile.return_value = True
mock_rpath.return_value = "file1"
url = "/dir/file1"
utools = udocker.UdockerTools(None)
utools.curl = mock_gurl
status = utools._get_file(url)
self.assertTrue(mock_rpath.called)
self.assertTrue(mock_isfile.called)
self.assertEqual(status, "file1")
@mock.patch('udocker.os.path.isfile')
@mock.patch('udocker.UdockerTools._version_isok')
@mock.patch('udocker.UdockerTools._get_version')
@mock.patch('udocker.Msg')
@mock.patch('udocker.subprocess.call')
@mock.patch('udocker.FileUtil')
@mock.patch('udocker.UdockerTools.__init__')
def test_08__verify_version(self, mock_init, mock_futil, mock_call,
mock_msg, mock_getver, mock_verok,
mock_isfile):
"""Test08 UdockerTools._verify_version()."""
mock_init.return_value = None
mock_isfile.return_value = False
utools = udocker.UdockerTools(None)
mock_futil.return_value.mktmp.return_value = ""
status = utools._verify_version("tarballfile")
self.assertEqual(status, (False, ''))
mock_msg.level = 0
mock_call.return_value = 1
mock_isfile.return_value = True
| |
from visualizerItem import *
from visualizerGraphicItem import *
from modelView import ModelView
class Model(object):
def __init__(self):
self._windows = []
self._sockets = []
self._items = {}
self._graphic_items = {}
self._new_items = {}
self._editable = True
self._grid_size = (1, 1)
self._nodes = [] #pairs of x and y
self._blocked_nodes = [(1,1)] #pairs of x and y
self._highways = [] #pairs of x and y
self._node_ids = {}
self._inits = [] #list of unhandled inits
self._num_steps = 0
self._current_step = 0
self._displayed_steps = -1
self._notifier = None
def clear(self):
for window in self._windows:
if isinstance(window, ModelView):
window.clear()
self._items = {}
self._graphic_items = {}
self._new_items = {}
self._editable = True
self._grid_size = (1, 1)
self._nodes = [] #pairs of x and y
self._blocked_nodes = [(1,1)] #pairs of x and y
self._highways = [] #pairs of x and y
self._inits = [] #list of unhandled inits
self._num_steps = 0
self._current_step = 0
self._displayed_steps = -1
self.update_windows()
def _add_item2(self, item):
if item is None:
return
dictionarie = self._map_item_to_dictionarie(item, True)
if dictionarie is None:
return
if str(item.get_id()) in dictionarie:
return
key = (item.get_kind_name(), str(item.get_id()))
if key in self._new_items and not ignore_duplicates:
return
item.set_model(self)
if isinstance(item, VisualizerGraphicItem):
item.enable_drag(self._editable)
dictionarie[str(item.get_id())] = item
def add_item(self, item, add_immediately = False):
if add_immediately:
return self._add_item2(item)
if item is None:
return
key = (item.get_kind_name(), str(item.get_id()))
if key in self._new_items:
return
self._new_items[key] = item
def accept_new_items(self, item_kinds = None):
add_items = []
if item_kinds == None:
for item in self._new_items.itervalues():
add_items.append(item)
else:
for item_kind in item_kinds:
for key in self._new_items:
if key[0] == item_kind:
add_items.append(self._new_items[key])
self.discard_new_items(item_kinds)
for item in add_items:
self._add_item2(item)
for socket in self._sockets:
for item in add_items:
socket.model_expanded(item.to_init_str())
#if len(add_items) > 0:
# socket.model_expanded('\n')
def discard_new_items(self, item_kinds = None):
if item_kinds == None:
self._new_items.clear()
return
delete_items = []
for item_kind in item_kinds:
for key in self._new_items:
if key[0] == item_kind:
delete_items.append(key)
for key in delete_items:
del self._new_items[key]
def remove_item(self, item):
if item is None:
return
key = (item.get_kind_name(), str(item.get_id()))
item2 = self._new_items.pop(key, None)
if item2 is not None:
item2.set_model(None)
return
dictionarie = self._map_item_to_dictionarie(item, True)
if dictionarie is None:
return
if str(item.get_id()) not in dictionarie:
return
item.set_model(None)
del dictionarie[str(item.get_id())]
def add_init(self, init):
self._inits.append(str(init) + '.')
def add_window(self, window):
if window not in self._windows:
self._windows.append(window)
def remove_window(self, window):
if window in self._windows:
self._windows.remove(window)
def add_socket(self, socket):
if socket not in self._sockets:
self._sockets.append(socket)
def remove_socket(self, socket):
if socket in self._sockets:
self._sockets.remove(socket)
def add_node(self, x, y, node_id = None):
if (x,y) in self._nodes:
return
self._nodes.append((x, y))
self._node_ids[(x,y)] = node_id
if (x,y) in self._blocked_nodes:
self._blocked_nodes.remove((x,y))
if x > self._grid_size[0] or y > self._grid_size[1]:
self.set_grid_size(max(x, self._grid_size[0]), max(y, self._grid_size[1]))
def add_highway(self, x, y):
self._highways.append((x, y))
def is_node(self, x, y):
return (x, y) in self._nodes
def is_highway(self, x, y):
return (x, y) in self._highways
def remove_node(self, x, y):
if (x,y) not in self._nodes:
return
self._nodes.remove((x, y))
if (x,y) not in self._blocked_nodes:
self._blocked_nodes.append((x,y))
def remove_highway(self, x, y):
if (x,y) not in self._highways:
return
self._highways.remove((x,y))
def set_grid_size(self, X, Y, enable_nodes = False):
if X < 1:
X = 1
if Y < 1:
Y = 1
to_remove = []
for node in self._nodes: #remove old nodes
if node[0] > X:
to_remove.append(node)
elif node[1] > Y:
to_remove.append(node)
for node in to_remove:
self._nodes.remove(node)
if enable_nodes:
for x in range(self._grid_size[0] + 1, X + 1):
for y in range(self._grid_size[1] + 1, Y + 1):
self._nodes.append((x,y))
else:
self._blocked_nodes = []
for x in range(1, X+1):
for y in range(1, Y+1):
self._blocked_nodes.append((x,y))
for node in self._nodes:
self._blocked_nodes.remove(node)
self._grid_size = (X, Y)
def set_editable(self, editable):
self._editable = editable
for items_dic in self._graphic_items.itervalues():
for item in items_dic.itervalues():
item.enable_drag(self._editable)
def set_num_steps(self, num_steps):
self._num_steps = num_steps
def create_item(self, item_kind, ID = None, add_immediately = False):
item = None
if ID is None:
dic = None
if item_kind == 'shelf':
if 'shelf' not in self._graphic_items:
self._graphic_items['shelf'] = {}
dic = self._graphic_items['shelf']
elif item_kind == 'pickingStation':
if 'pickingStation' not in self._graphic_items:
self._graphic_items['pickingStation'] = {}
dic = self._graphic_items['pickingStation']
elif item_kind == 'robot':
if 'robot' not in self._graphic_items:
self._graphic_items['robot'] = {}
dic = self._graphic_items['robot']
elif item_kind == 'order':
if 'order' not in self._graphic_items:
self._graphic_items['order'] = {}
dic = self._graphic_items['order']
elif item_kind == 'checkpoint':
if 'checkpoint' not in self._graphic_items:
self._graphic_items['checkpoint'] = {}
dic = self._graphic_items['checkpoint']
elif item_kind == 'task':
if 'task' not in self._graphic_items:
self._graphic_items['task'] = {}
dic = self._graphic_items['task']
ID = 1
break_loop = False
while not break_loop:
break_loop = True
for key in dic:
if str(key) == str(ID):
ID += 1
break_loop = False
break
ID = str(ID)
if item_kind == 'shelf':
item = Shelf(ID)
elif item_kind == 'pickingStation':
item = PickingStation(ID)
elif item_kind == 'robot':
item = Robot(ID)
elif item_kind == 'order':
item = Order(ID)
elif item_kind == 'checkpoint':
item = Checkpoint(ID)
elif item_kind == 'task':
item = Task(ID)
if item is not None:
self.add_item(item, add_immediately)
return item
def update(self):
if self._current_step > self._num_steps or self._num_steps == 0:
return self._current_step
for socket in self._sockets:
if socket.is_waiting():
return self._current_step
for items_dic in self._items.itervalues():
for item in items_dic.itervalues():
item.on_step_update(self._current_step)
for items_dic in self._graphic_items.itervalues():
for item in items_dic.itervalues():
item.do_action(self._current_step)
if self._displayed_steps < self._current_step and len(self._sockets) > 0 and self._num_steps <= self._current_step:
self._displayed_steps = self._current_step
iterator = iter(self._sockets)
value = iterator.next()
value.done_step(self._current_step)
self.notify_sockets(iterator, value, self._current_step)
self._current_step += 1
self.update_windows()
return self._current_step
def notify_sockets(self, iterator, value, step):
if value.is_waiting():
if self._notifier is not None:
self._notifier.stop()
self._notifier = QTimer()
self._notifier.setSingleShot(True)
self._notifier.timeout.connect(lambda: self.notify_sockets(iterator, value, step))
self._notifier.start(100)
return
else:
try:
value = iterator.next()
except StopIteration:
return
self.notify_sockets2(iterator, value, step)
def notify_sockets2(self, iterator, value, step):
if value.is_waiting():
if self._notifier is not None:
self._notifier.stop()
self._notifier = QTimer()
self._notifier.setSingleShot(True)
self._notifier.timeout.connect(lambda: self.notify_sockets2(iterator, value, step))
self._notifier.start(100)
return
value.done_step(step)
self.notify_sockets(iterator, value, step)
def undo(self):
if self._current_step == 0:
return self._current_step
self._current_step -= 1
for items_dic in self._items.itervalues():
for item in items_dic.itervalues():
item.on_step_undo(self._current_step)
for items_dic in self._graphic_items.itervalues():
for item in items_dic.itervalues():
item.undo_action(self._current_step)
self.update_windows()
return self._current_step
def clear_actions(self):
for items_dic in self._graphic_items.itervalues():
for item in items_dic.itervalues():
item.clear_actions()
self._num_steps = 0
def restart(self):
for items_dic in self._graphic_items.itervalues():
for item in items_dic.itervalues():
item.restart()
for items_dic in self._items.itervalues():
for item in items_dic.itervalues():
item.restart()
self._current_step = 0
self.update_windows()
def skip_to_end(self):
if self._editable:
return
while(self.update() <= self._num_steps):
pass
def filter_items(self, item_kind = None,
ID = None, position = None,
return_first = False,
return_non_buffered = True,
return_buffered = False):
result = []
if ID is not None:
ID = str(ID)
if return_non_buffered:
search_in = []
if item_kind is None:
for items_dic in self._graphic_items.itervalues():
search_in.append(items_dic)
if position is None:
for items_dic in self._items.itervalues():
search_in.append(items_dic)
else:
if item_kind in self._graphic_items:
search_in.append(self._graphic_items[item_kind])
if position is None:
if item_kind in self._items:
search_in.append(self._items[item_kind])
for items_dic in search_in:
if ID is not None:
if ID in items_dic:
item = items_dic[ID]
if position is None:
result.append(item)
if return_first:
return result
elif position == item.get_position():
result.append(item)
if return_first:
return result
else:
for item in items_dic.itervalues():
if position is None:
result.append(item)
if return_first:
return result
elif position == item.get_position():
result.append(item)
if return_first:
return result
if return_buffered and position is None:
for key in self._new_items:
if ((key[0] == item_kind or item_kind is None)
and (key[1] == ID or ID is None)):
result.append(self._new_items[key])
if return_first:
return [None]
return result
def contains(self, item):
if item is None:
return False
if item.get_kind_name() in self._graphic_items:
return item.get_id() in self._graphic_items[item.get_kind_name()]
elif item.get_kind_name() in self._items:
return item.get_id() in self._items[item.get_kind_name()]
def update_windows(self):
for window in self._windows:
window.update()
def to_init_str(self):
s = []
for node in self._nodes:
s.append('init(object(node, '
+ str(node[0] + (node[1]-1) * self._grid_size[0])
+ '), value(at, ('
+ str(node[0]) + ', ' + str(node[1]) + '))).')
for node in self._highways:
s.append('init(object(highway, '
+ str(node[0] + (node[1]-1) * self._grid_size[0])
+ '), value(at, ('
+ str(node[0]) + ', ' + str(node[1]) + '))).')
for items_dic in self._graphic_items.itervalues():
for item in items_dic.itervalues():
s.append(item.to_init_str())
for items_dic in self._items.itervalues():
for item in items_dic.itervalues():
s.append(item.to_init_str())
for init in self._inits:
s.append(str(init))
return s
def save_to_file(self, file_name):
ofile = open(file_name, 'w')
try:
#head
ofile.write('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
ofile.write('\n%Grid size X: ' + str(self._grid_size[0]))
ofile.write('\n%Grid size Y: ' + str(self._grid_size[1]))
| |
<reponame>khilawar4/airflow<gh_stars>1-10
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Cloud Bigtable operators."""
import enum
from typing import Dict, Iterable, List, Optional, Sequence, Union
import google.api_core.exceptions
from google.cloud.bigtable.column_family import GarbageCollectionRule
from google.cloud.bigtable_admin_v2 import enums
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.bigtable import BigtableHook
class BigtableValidationMixin:
"""Common class for Cloud Bigtable operators for validating required fields."""
REQUIRED_ATTRIBUTES = [] # type: Iterable[str]
def _validate_inputs(self):
for attr_name in self.REQUIRED_ATTRIBUTES:
if not getattr(self, attr_name):
raise AirflowException(f'Empty parameter: {attr_name}')
class BigtableCreateInstanceOperator(BaseOperator, BigtableValidationMixin):
"""
Creates a new Cloud Bigtable instance.
If the Cloud Bigtable instance with the given ID exists, the operator does not
compare its configuration
and immediately succeeds. No changes are made to the existing instance.
For more details about instance creation have a look at the reference:
https://googleapis.github.io/google-cloud-python/latest/bigtable/instance.html#google.cloud.bigtable.instance.Instance.create
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigtableCreateInstanceOperator`
:type instance_id: str
:param instance_id: The ID of the Cloud Bigtable instance to create.
:type main_cluster_id: str
:param main_cluster_id: The ID for main cluster for the new instance.
:type main_cluster_zone: str
:param main_cluster_zone: The zone for main cluster
See https://cloud.google.com/bigtable/docs/locations for more details.
:type project_id: str
:param project_id: Optional, the ID of the Google Cloud project. If set to None or missing,
the default project_id from the Google Cloud connection is used.
:type replica_clusters: List[Dict[str, str]]
:param replica_clusters: (optional) A list of replica clusters for the new
instance. Each cluster dictionary contains an id and a zone.
Example: [{"id": "replica-1", "zone": "us-west1-a"}]
:type replica_cluster_id: str
:param replica_cluster_id: (deprecated) The ID for replica cluster for the new
instance.
:type replica_cluster_zone: str
:param replica_cluster_zone: (deprecated) The zone for replica cluster.
:type instance_type: enum.IntEnum
:param instance_type: (optional) The type of the instance.
:type instance_display_name: str
:param instance_display_name: (optional) Human-readable name of the instance. Defaults
to ``instance_id``.
:type instance_labels: dict
:param instance_labels: (optional) Dictionary of labels to associate
with the instance.
:type cluster_nodes: int
:param cluster_nodes: (optional) Number of nodes for cluster.
:type cluster_storage_type: enum.IntEnum
:param cluster_storage_type: (optional) The type of storage.
:type timeout: int
:param timeout: (optional) timeout (in seconds) for instance creation.
If None is not specified, Operator will wait indefinitely.
:param gcp_conn_id: The connection ID to use to connect to Google Cloud.
:type gcp_conn_id: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
REQUIRED_ATTRIBUTES: Iterable[str] = ('instance_id', 'main_cluster_id', 'main_cluster_zone')
template_fields: Iterable[str] = [
'project_id',
'instance_id',
'main_cluster_id',
'main_cluster_zone',
'impersonation_chain',
]
def __init__(
self,
*, # pylint: disable=too-many-arguments
instance_id: str,
main_cluster_id: str,
main_cluster_zone: str,
project_id: Optional[str] = None,
replica_clusters: Optional[List[Dict[str, str]]] = None,
replica_cluster_id: Optional[str] = None,
replica_cluster_zone: Optional[str] = None,
instance_display_name: Optional[str] = None,
instance_type: Optional[enums.Instance.Type] = None,
instance_labels: Optional[Dict] = None,
cluster_nodes: Optional[int] = None,
cluster_storage_type: Optional[enums.StorageType] = None,
timeout: Optional[float] = None,
gcp_conn_id: str = 'google_cloud_default',
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
self.project_id = project_id
self.instance_id = instance_id
self.main_cluster_id = main_cluster_id
self.main_cluster_zone = main_cluster_zone
self.replica_clusters = replica_clusters
self.replica_cluster_id = replica_cluster_id
self.replica_cluster_zone = replica_cluster_zone
self.instance_display_name = instance_display_name
self.instance_type = instance_type
self.instance_labels = instance_labels
self.cluster_nodes = cluster_nodes
self.cluster_storage_type = cluster_storage_type
self.timeout = timeout
self._validate_inputs()
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def execute(self, context) -> None:
hook = BigtableHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
instance = hook.get_instance(project_id=self.project_id, instance_id=self.instance_id)
if instance:
# Based on Instance.__eq__ instance with the same ID and client is
# considered as equal.
self.log.info(
"The instance '%s' already exists in this project. Consider it as created",
self.instance_id,
)
return
try:
hook.create_instance(
project_id=self.project_id,
instance_id=self.instance_id,
main_cluster_id=self.main_cluster_id,
main_cluster_zone=self.main_cluster_zone,
replica_clusters=self.replica_clusters,
replica_cluster_id=self.replica_cluster_id,
replica_cluster_zone=self.replica_cluster_zone,
instance_display_name=self.instance_display_name,
instance_type=self.instance_type,
instance_labels=self.instance_labels,
cluster_nodes=self.cluster_nodes,
cluster_storage_type=self.cluster_storage_type,
timeout=self.timeout,
)
except google.api_core.exceptions.GoogleAPICallError as e:
self.log.error('An error occurred. Exiting.')
raise e
class BigtableUpdateInstanceOperator(BaseOperator, BigtableValidationMixin):
"""
Updates an existing Cloud Bigtable instance.
For more details about instance creation have a look at the reference:
https://googleapis.dev/python/bigtable/latest/instance.html#google.cloud.bigtable.instance.Instance.update
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigtableUpdateInstanceOperator`
:type instance_id: str
:param instance_id: The ID of the Cloud Bigtable instance to update.
:type project_id: str
:param project_id: Optional, the ID of the Google Cloud project. If set to None or missing,
the default project_id from the Google Cloud connection is used.
:type instance_display_name: str
:param instance_display_name: (optional) Human-readable name of the instance.
:type instance_type: enums.Instance.Type or enum.IntEnum
:param instance_type: (optional) The type of the instance.
:type instance_labels: dict
:param instance_labels: (optional) Dictionary of labels to associate
with the instance.
:type timeout: int
:param timeout: (optional) timeout (in seconds) for instance update.
If None is not specified, Operator will wait indefinitely.
:param gcp_conn_id: The connection ID to use to connect to Google Cloud.
:type gcp_conn_id: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
REQUIRED_ATTRIBUTES: Iterable[str] = ['instance_id']
template_fields: Iterable[str] = [
'project_id',
'instance_id',
'impersonation_chain',
]
def __init__(
self,
*,
instance_id: str,
project_id: Optional[str] = None,
instance_display_name: Optional[str] = None,
instance_type: Optional[Union[enums.Instance.Type, enum.IntEnum]] = None,
instance_labels: Optional[Dict] = None,
timeout: Optional[float] = None,
gcp_conn_id: str = 'google_cloud_default',
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
self.project_id = project_id
self.instance_id = instance_id
self.instance_display_name = instance_display_name
self.instance_type = instance_type
self.instance_labels = instance_labels
self.timeout = timeout
self._validate_inputs()
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def execute(self, context) -> None:
hook = BigtableHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
instance = hook.get_instance(project_id=self.project_id, instance_id=self.instance_id)
if not instance:
raise AirflowException(f"Dependency: instance '{self.instance_id}' does not exist.")
try:
hook.update_instance(
project_id=self.project_id,
instance_id=self.instance_id,
instance_display_name=self.instance_display_name,
instance_type=self.instance_type,
instance_labels=self.instance_labels,
timeout=self.timeout,
)
except google.api_core.exceptions.GoogleAPICallError as e:
self.log.error('An error occurred. Exiting.')
raise e
class BigtableDeleteInstanceOperator(BaseOperator, BigtableValidationMixin):
"""
Deletes the Cloud Bigtable instance, including its clusters and all related tables.
For more details about deleting instance have a look at the reference:
https://googleapis.github.io/google-cloud-python/latest/bigtable/instance.html#google.cloud.bigtable.instance.Instance.delete
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigtableDeleteInstanceOperator`
:type instance_id: str
:param instance_id: The ID of the Cloud Bigtable instance to delete.
:param project_id: Optional, the ID of the Google Cloud project. If set to None or missing,
the default project_id from the Google Cloud connection is used.
:type project_id: str
:param gcp_conn_id: The connection ID to use to connect to Google Cloud.
:type gcp_conn_id: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If | |
# -*- coding: utf-8 -*-
#
# computeFunctions and -Routines for parallel calculation
# of single trial measures needed for the averaged
# measures like cross spectral densities
#
# Builtin/3rd party package imports
import numpy as np
from scipy.signal import fftconvolve, detrend
from inspect import signature
# backend method imports
from .csd import csd
# syncopy imports
from syncopy.shared.const_def import spectralDTypes
from syncopy.shared.tools import best_match
from syncopy.shared.computational_routine import ComputationalRoutine
from syncopy.shared.kwarg_decorators import unwrap_io
@unwrap_io
def cross_spectra_cF(trl_dat,
samplerate=1,
nSamples=None,
foi=None,
taper="hann",
taper_opt=None,
demean_taper=False,
polyremoval=False,
timeAxis=0,
chunkShape=None,
noCompute=False):
"""
Single trial Fourier cross spectral estimates between all channels
of the input data. First all the individual Fourier transforms
are calculated via a (multi-)tapered FFT, then the pairwise
cross-spectra are computed.
Averaging over tapers is done implicitly
for multi-taper analysis with `taper="dpss"`.
Output consists of all (nChannels x nChannels+1)/2 different complex
estimates arranged in a symmetric fashion (``CS_ij == CS_ji*``). The
elements on the main diagonal (`CS_ii`) are the (real) auto-spectra.
This is NOT the same as what is commonly referred to as
"cross spectral density" as there is no (time) averaging!!
Multi-tapering alone is not necessarily sufficient to get enough
statitstical power for a robust csd estimate. Yet for completeness
and testing the option `norm=True` will output a single-trial
coherence estimate.
Parameters
----------
trl_dat : (K, N) :class:`numpy.ndarray`
Uniformly sampled multi-channel time-series data
The 1st dimension is interpreted as the time axis,
columns represent individual channels.
Dimensions can be transposed to `(N, K)` with the `timeAxis` parameter.
samplerate : float
Samplerate in Hz
nSamples : int or None
Absolute length of the (potentially to be padded) signal or
`None` for no padding
foi : 1D :class:`numpy.ndarray` or None, optional
Frequencies of interest (Hz) for output. If desired frequencies
cannot be matched exactly the closest possible frequencies (respecting
data length and padding) are used.
taper : str or None
Taper function to use, one of scipy.signal.windows
Set to `None` for no tapering.
taper_opt : dict, optional
Additional keyword arguments passed to the `taper` function.
For multi-tapering with `taper='dpss'` set the keys
`'Kmax'` and `'NW'`.
For further details, please refer to the
`SciPy docs <https://docs.scipy.org/doc/scipy/reference/signal.windows.html>`_
demean_taper : bool
Set to `True` to perform de-meaning after tapering
polyremoval : int or None
Order of polynomial used for de-trending data in the time domain prior
to spectral analysis. A value of 0 corresponds to subtracting the mean
("de-meaning"), ``polyremoval = 1`` removes linear trends (subtracting the
least squares fit of a linear polynomial).
If `polyremoval` is `None`, no de-trending is performed.
timeAxis : int, optional
Index of running time axis in `trl_dat` (0 or 1)
noCompute : bool
Preprocessing flag. If `True`, do not perform actual calculation but
instead return expected shape and :class:`numpy.dtype` of output
array.
Returns
-------
CS_ij : (1, nFreq, N, N) :class:`numpy.ndarray`
Complex cross spectra for all channel combinations ``i,j``.
`N` corresponds to number of input channels.
Notes
-----
This method is intended to be used as
:meth:`~syncopy.shared.computational_routine.ComputationalRoutine.computeFunction`
inside a :class:`~syncopy.shared.computational_routine.ComputationalRoutine`.
Thus, input parameters are presumed to be forwarded from a parent metafunction.
Consequently, this function does **not** perform any error checking and operates
under the assumption that all inputs have been externally validated and cross-checked.
See also
--------
csd : :func:`~syncopy.connectivity.csd.csd`
Cross-spectra backend function
normalize_csd : :func:`~syncopy.connectivity.csd.normalize_csd`
Coherence from trial averages
mtmfft : :func:`~syncopy.specest.mtmfft.mtmfft`
(Multi-)tapered Fourier analysis
"""
# Re-arrange array if necessary and get dimensional information
if timeAxis != 0:
dat = trl_dat.T # does not copy but creates view of `trl_dat`
else:
dat = trl_dat
if nSamples is None:
nSamples = dat.shape[0]
nChannels = dat.shape[1]
freqs = np.fft.rfftfreq(nSamples, 1 / samplerate)
if foi is not None:
_, freq_idx = best_match(freqs, foi, squash_duplicates=True)
nFreq = freq_idx.size
else:
freq_idx = slice(None)
nFreq = freqs.size
# we always average over tapers here
outShape = (1, nFreq, nChannels, nChannels)
# For initialization of computational routine,
# just return output shape and dtype
# cross spectra are complex!
if noCompute:
return outShape, spectralDTypes["fourier"]
# detrend
if polyremoval == 0:
# SciPy's overwrite_data not working for type='constant' :/
dat = detrend(dat, type='constant', axis=0, overwrite_data=True)
elif polyremoval == 1:
dat = detrend(dat, type='linear', axis=0, overwrite_data=True)
CS_ij = csd(dat,
samplerate,
nSamples,
taper=taper,
taper_opt=taper_opt,
demean_taper=demean_taper)
# where does freqs go/come from -
# we will eventually solve this issue..
return CS_ij[None, freq_idx, ...]
class ST_CrossSpectra(ComputationalRoutine):
"""
Compute class that calculates single-trial (multi-)tapered cross spectra
of :class:`~syncopy.AnalogData` objects
Sub-class of :class:`~syncopy.shared.computational_routine.ComputationalRoutine`,
see :doc:`/developer/compute_kernels` for technical details on Syncopy's compute
classes and metafunctions.
See also
--------
syncopy.connectivityanalysis : parent metafunction
"""
# the hard wired dimord of the cF
dimord = ['time', 'freq', 'channel_i', 'channel_j']
computeFunction = staticmethod(cross_spectra_cF)
# 1st argument,the data, gets omitted
valid_kws = list(signature(cross_spectra_cF).parameters.keys())[1:]
# hardcode some parameter names which got digested from the frontend
valid_kws += ['tapsmofrq', 'nTaper', 'pad_to_length']
def process_metadata(self, data, out):
# Some index gymnastics to get trial begin/end "samples"
if data.selection is not None:
chanSec = data.selection.channel
trl = data.selection.trialdefinition
for row in range(trl.shape[0]):
trl[row, :2] = [row, row + 1]
else:
chanSec = slice(None)
time = np.arange(len(data.trials))
time = time.reshape((time.size, 1))
trl = np.hstack((time, time + 1,
np.zeros((len(data.trials), 1)),
np.array(data.trialinfo)))
# Attach constructed trialdef-array (if even necessary)
if self.keeptrials:
out.trialdefinition = trl
else:
out.trialdefinition = np.array([[0, 1, 0]])
# Attach remaining meta-data
out.samplerate = data.samplerate
out.channel_i = np.array(data.channel[chanSec])
out.channel_j = np.array(data.channel[chanSec])
out.freq = self.cfg['foi']
@unwrap_io
def cross_covariance_cF(trl_dat,
samplerate=1,
polyremoval=0,
timeAxis=0,
norm=False,
chunkShape=None,
noCompute=False,
fullOutput=False):
"""
Single trial covariance estimates between all channels
of the input data. Output consists of all ``(nChannels x nChannels+1)/2``
different estimates arranged in a symmetric fashion
(``COV_ij == COV_ji``). The elements on the
main diagonal (`CS_ii`) are the channel variances.
Parameters
----------
trl_dat : (K, N) :class:`numpy.ndarray`
Uniformly sampled multi-channel time-series data
The 1st dimension is interpreted as the time axis,
columns represent individual channels.
Dimensions can be transposed to `(N, K)` with the `timeAxis` parameter.
samplerate : float
Samplerate in Hz
polyremoval : int or None
Order of polynomial used for de-trending data in the time domain prior
to spectral analysis. A value of 0 corresponds to subtracting the mean
("de-meaning"), ``polyremoval = 1`` removes linear trends (subtracting the
least squares fit of a linear polynomial).
If `polyremoval` is `None`, no de-trending is performed.
timeAxis : int, optional
Index of running time axis in `trl_dat` (0 or 1)
norm : bool, optional
Set to `True` to normalize for single-trial cross-correlation.
noCompute : bool
Preprocessing flag. If `True`, do not perform actual calculation but
instead return expected shape and :class:`numpy.dtype` of output
array.
fullOutput : bool
For backend testing or stand-alone applications, set to `True`
to return also the `lags` array.
Returns
-------
CC_ij : (K, 1, N, N) :class:`numpy.ndarray`
Cross covariance for all channel combinations ``i,j``.
`N` corresponds to number of input channels.
lags : (M,) :class:`numpy.ndarray`
The lag times if `fullOutput=True`
Notes
-----
This method is intended to be used as
:meth:`~syncopy.shared.computational_routine.ComputationalRoutine.computeFunction`
inside a :class:`~syncopy.shared.computational_routine.ComputationalRoutine`.
Thus, input parameters are presumed to be forwarded from a parent metafunction.
Consequently, this function does **not** perform any error checking and operates
under the assumption that all inputs have been externally validated and cross-checked.
"""
# Re-arrange array if necessary and get dimensional information
if timeAxis != 0:
dat = trl_dat.T # does not copy but creates view of `trl_dat`
else:
dat = trl_dat
nSamples = dat.shape[0]
nChannels = dat.shape[1]
# positive lags in time units
if nSamples % 2 == 0:
lags = np.arange(0, nSamples // 2)
else:
lags = np.arange(0, nSamples // 2 + 1)
lags = lags * 1 / samplerate
outShape = (len(lags), 1, nChannels, nChannels)
# For initialization of computational routine,
# just return output shape and dtype
# cross covariances are real!
if noCompute:
return outShape, spectralDTypes["abs"]
# detrend, has to be done after noCompute!
if polyremoval == 0:
# SciPy's overwrite_data not working for type='constant' :/
dat = detrend(dat, type='constant', axis=0, overwrite_data=True)
elif polyremoval == 1:
detrend(dat, type='linear', axis=0, overwrite_data=True)
# re-normalize output | |
<filename>Incrementor.py
import re
import os
import time
import numpy as np
import nltk
from tqdm import tqdm
import utils
import Fruitfly
from Fruitfly import Fruitfly
class Incrementor:
def __init__(self, corpus_dir, matrix_file,
corpus_tokenize=False, corpus_linewise=False, corpus_checkvoc=None,
matrix_incremental=True, matrix_maxdims=None, min_count=None, contentwords_only=False,
fly_new=False, fly_grow=False, fly_file=None, fly_max_pn=None,
verbose=False):
"""
The purpose of Incrementor objects is to to count co-occurrences of words in text resources and
maintain a Fruitfly object alongside counting. Both tasks come with several options; most of them
are realized as attributes of an Incrementor object.
:param corpus_dir: str -- file path or directory to text resources
:param matrix_file: str -- file path (without extension) to the co-occurrence count (e.g., for logging)
:param corpus_tokenize: bool -- option to tokenize the input text
:param corpus_linewise: bool -- option to count lines separately (not stable; leave this set to False)
:param corpus_checkvoc: str -- path to a list of prioritized words (1 word/line) in case of size limitations
:param matrix_incremental: bool -- use the already existing co-occurrence count in matrix_file
:param matrix_maxdims: int -- limit the number size of the count (e.g. for faster performance)
:param min_count: int -- only keep dimensions of words with at least these many occurrences
:param contentwords_only: bool -- only count tokens tagged with _N,_V,_J, but not _X (requires POS-tagged data)
:param fly_new: bool -- set up a new Fruitfly object from scratch (only possible with default parameters)
:param fly_grow: bool -- extend and reduce the Fruitfly's PN layer in parallel to the co-occurrence count
:param fly_file: str -- file path of the Fruitfly object's config (parameters and connections)
:param fly_max_pn: int -- limit the number of PNs of the Fruitfly object (doesn't affect matrix_maxdims)
:param verbose: bool -- comment on current processes via print statements
:attribute ourcols: str -- file path to the count's vocabulary
:attribute words: [str] -- text resource as a list of words
:attribute cooc: ndarray [[]] -- co-occurrence matrix
:attribute words_to_i: {str:int} -- mapping of the count's vocabulary to the count's dimensions
:attribute i_to_words: {int:str} -- inverse mapping of words_to_i
:attribute fruitfly: Fruitfly -- the Fruitfly object to be maintained
:attribute freq: {str:int} -- frequency distribution of tokens in the current text resource (and earlier ones)
"""
self.verbose = verbose
self.corpus_dir = corpus_dir
self.is_tokenize = corpus_tokenize
self.is_linewise = corpus_linewise
self.required_voc = corpus_checkvoc
self.outspace = matrix_file+".dm"
self.outcols = matrix_file+".cols"
self.is_incremental = matrix_incremental
self.max_dims = matrix_maxdims
self.min_count = min_count
self.postag_simple = contentwords_only
self.is_new_fly = fly_new
self.is_grow_fly = fly_grow
self.flyfile = fly_file
self.fly_max_pn = fly_max_pn
self.words = self.read_corpus(self.corpus_dir,
tokenize_corpus=self.is_tokenize,
postag_simple=self.postag_simple,
linewise=self.is_linewise,
verbose=self.verbose)
self.cooc, self.words_to_i, self.i_to_words, self.fruitfly = \
self.read_incremental_parts(self.outspace,
self.outcols,
self.flyfile,
verbose=self.verbose)
# words that will be counted (= labels of the final matrix dimensions)
self.freq = self.freq_dist(self.words,
size_limit=self.max_dims,
required_words_file=self.required_voc,
verbose=self.verbose)
if self.verbose: print("\tVocabulary size:",len(self.freq),
"\n\tTokens (or lines) for cooccurrence count:",len(self.words))
#========== FILE READING
@staticmethod
def read_corpus(indir, tokenize_corpus=False, postag_simple=False, linewise=False, verbose=False):
"""
Read text from a file or directory and apply various pre-processing functions.
Tokenization is optional; set postag_simple=True if the input is POS-tagged with {_N, _V, _J, _X}.
Returns tokens a single list of strings if linewise==False, or else as list of lists of strings.
:param indir: str -- a directory or a single file as text resource
:param tokenize_corpus: bool -- apply nltk.word_tokenize()
:param postag_simple: bool -- re-uppercase POS-tags
:param linewise: bool -- not tested; leave this False
:param verbose: bool -- comment the workings via print statements
:return: [str] or [[str]] -- depending on the value of linewise
"""
# this is for initialization of an Incrementor object without resources
if indir is None:
if verbose: print("No text resources specified. Continuing with empty corpus.")
lines = []
else:
filepaths = []
# list of lists of words
lines = []
# to delete punctuation entries in simple-POS-tagged data (_N, _V, _J, _X)
nonword = re.compile("\W+(_X)?")
# line count
lc = 0
# word count
wc = 0
# for a single file that is passed
if os.path.isfile(indir):
filepaths = [indir]
else:
for (dirpath, dirnames, filenames) in os.walk(indir):
filepaths.extend([dirpath+"/"+f for f in filenames])
for file in filepaths:
try:
if verbose: print("reading text from ",file,"...")
with open(file) as f:
for line in f:
lc += 1
line = line.rstrip().lower()
if tokenize_corpus:
tokens = nltk.word_tokenize(line)
else:
tokens = line.split()
linewords = []
for t in tokens:
# upper-case the POS-tags again
if postag_simple:
t = t[:-1]+t[-1].upper()
# only appends if the token is not a non-word (=ignores punctuation)
if (re.fullmatch(nonword, t) is None):
linewords.append(t)
wc+=1
if verbose and wc%1000000 == 0:
print("\twords read:",wc/1000000,"million",end="\r")
# appends the list of tokens of the current line to the list of lines
lines.append(linewords)
except FileNotFoundError as e:
print(e)
if verbose: print("Finished reading. Number of words:",wc)
if linewise is False:
# flattens to a simple word list
return [w for l in lines for w in l]
else:
return(lines)
def extend_corpus(self, text_resource):
"""
Takes a file path, reads the file's content, and extends the Incrementor object's available text
as well as its frequency distribution. Any options and input parameters (e.g., tokenization) are
handled by the Incrementor object's attributes.
:param text_resource: file path
"""
new_text = self.read_corpus(text_resource,
tokenize_corpus=self.is_tokenize,
postag_simple=self.postag_simple,
linewise=self.is_linewise,
verbose=self.verbose)
self.words.extend(new_text)
new_freq = self.freq_dist(new_text,
size_limit=self.max_dims,
required_words_file=self.required_voc,
# The following prioritizes old freq keys over new freq keys
required_words=self.freq.keys(),
verbose=self.verbose)
# update freq with the new counts
self.freq = self.merge_freqs(self.freq,
new_freq,
required_words_file=self.required_voc,
max_length=self.max_dims)
def read_incremental_parts(self, outspace, outcols, flyfile, verbose=False):
"""
Returns a co-occurrence matrix, a corresponding vocabulary and its index, and a Fruitfly object.
The matrix and the vocabulary can be newly instantiated or taken from existing files.
The Fruitfly object can be optionally created alongside, also either new or from an
existing file. All these options are handled by attributes of the Incrementor object
from which this method is called.
:param outspace: str -- file path to a co-occurrence count
:param outcols: str -- file path to the corresponding vocabulary
:param flyfile: str -- file path to a Fruitfly config (parameters and connections)
:param verbose: bool -- comment on the workings via print statements
:return: ndarray [[]] -- co-occurrence matrix (two axes, each of length n)
:return: {str:int} -- mapping of vocabulary to matrix positions (length: n)
:return: {int:str} -- mapping of matrix indices to vocabulary (length: n)
:return: Fruitfly -- Fruitfly object (or None if not wanted)
"""
if self.is_incremental:
if verbose: print("\nLoading existing co-occurrence count from",outspace,"...")
# returns dict of word : vector
unhashed_space = utils.readDM(outspace)
i_to_words, words_to_i = utils.readCols(outcols)
dimensions = sorted(words_to_i, key=words_to_i.get)
cooc = np.stack(tuple([unhashed_space[w] for w in dimensions]))
else:
cooc = np.array([[]])
words_to_i = {}
i_to_words = {}
if self.is_grow_fly:
if self.is_new_fly:
if verbose: print("creating new fruitfly...")
# default config: (50,40000,6,5,log)
fruitfly = Fruitfly.from_scratch(max_pn_size=self.fly_max_pn)
else:
if verbose: print("loading fruitfly from",flyfile,"...")
fruitfly = Fruitfly.from_config(flyfile)
self.fly_max_pn = fruitfly.max_pn_size
else:
fruitfly = None
return cooc, words_to_i, i_to_words, fruitfly
def freq_dist(self, wordlist, size_limit=None, required_words_file=None, required_words=None, verbose=False):
"""
This method is used to limit the dimensionality of the count matrix, which speeds up processing.
The obtained dictionary is used as vocabulary reference of the current corpus at several processing steps.
For true incrementality, size_limit is None and the dictionary is computed over the currently available corpus.
If size_limit is None, required_words has no effect on the obtained dictionary.
:param wordlist: [str] -- list of (word) tokens from the text resource
:param size_limit: int -- maximum length of the returned frequency distribution
:param required_words_file: str -- file path to a list with prioritized words (regardless of their frequencies)
:param required_words: [str] -- used to pass already existing freq keys if freq needs to be extended
:param verbose: bool -- comment on workings via print statements
:return: {str:int} -- frequency distribution
"""
if verbose: print("creating frequency distribution over",len(wordlist),"tokens...")
freq = {}
# the linewise option is not tested.
if self.is_linewise:
for line in tqdm(wordlist):
for w in line:
if self.postag_simple:
# only counts nouns, verbs, and adjectives/adverbs
if w.endswith(("_N", "_V", "_J")):
if w in freq:
| |
<gh_stars>100-1000
# _*_ coding: utf-8 _*_
import importlib
import typing
from collections import OrderedDict, deque
from copy import copy
from pathlib import Path
from lxml import etree # type: ignore
from lxml.etree import QName # type: ignore
from pydantic.fields import SHAPE_LIST, SHAPE_SINGLETON
if typing.TYPE_CHECKING:
from fhir.resources.fhirabstractmodel import FHIRAbstractModel
from pydantic.fields import ModelField
__author__ = "<NAME><<EMAIL>>"
StrBytes = typing.Union[str, bytes]
StrNone = typing.Union[str, None]
StrBytesNone = typing.Union[str, bytes, None]
DictStr = typing.Dict[str, str]
DictStrBytes = typing.Dict[str, StrBytes]
DictStrNoneKey = typing.Dict[typing.Union[str, None], str]
DictStrBytesNoneKey = typing.Dict[StrNone, StrBytes]
TupleStrKeyVal = typing.Tuple[str, StrBytes]
ROOT_NS = "http://hl7.org/fhir"
XHTML_NS = "http://www.w3.org/1999/xhtml"
EMPTY_VALUE = None
FHIR_ROOT_MODULES: typing.Dict[str, typing.Any] = {
"R4": None,
"STU3": None,
"DSTU2": None,
}
def first_cap(string: str):
""" """
return string[0].upper() + string[1:]
def xml_represent(type_, val):
"""XML Representation"""
if val is None:
return val
if type_ is bool:
return val is True and "true" or "false"
else:
return type_.to_string(val)
def is_primitive_type(type_):
""" """
if getattr(type_, "is_primitive", lambda: False)() is False:
if type_ is bool:
return True
return False
return True
def get_fhir_type_name(type_):
""" """
try:
return type_.fhir_type_name()
except AttributeError:
if type_ is bool:
return "boolean"
type_str = str(type_)
if (
type_str.startswith("typing.Union[")
and "fhirtypes.FHIRPrimitiveExtensionType" in type_str
):
return "FHIRPrimitiveExtension"
raise
def get_fhir_model_class(type_, check=True):
""" """
if check:
if is_primitive_type(type_):
raise ValueError
mod = get_fhir_root_module(type_.__fhir_release__)
return mod.get_fhir_model_class(get_fhir_type_name(type_))
def get_fhir_root_module(fhir_release: str):
""" """
global FHIR_ROOT_MODULES
if FHIR_ROOT_MODULES[fhir_release] is None:
mod_name = "fhir.resources"
if fhir_release != "R4":
mod_name += f".{fhir_release}"
FHIR_ROOT_MODULES[fhir_release] = importlib.import_module(mod_name)
return FHIR_ROOT_MODULES[fhir_release]
class SimpleNodeStorage:
__slots__ = ("__storage__", "node")
if typing.TYPE_CHECKING:
node: "Node"
__storage__: deque
def __init__(self, node):
""" """
assert isinstance(node, Node)
object.__setattr__(self, "node", node)
object.__setattr__(self, "__storage__", deque())
def __iter__(self):
""" """
return iter(self.__storage__)
def __getitem__(self, index):
""" """
return self.__storage__[index]
def __len__(self):
""" """
return len(self.__storage__)
def append(self, item):
""" """
self.__storage__.append(item)
def extend(self, items):
""" """
self.__storage__.extend(items)
def as_list(self):
""" """
return list(self.__storage__)
class NodeContainer(SimpleNodeStorage):
""" """
def __init__(self, node):
""" """
super(NodeContainer, self).__init__(node)
def append(self, item):
""" """
assert isinstance(item, (Node, etree._Element))
SimpleNodeStorage.append(self, item)
def extend(self, items):
""" """
if not all([isinstance(i, (Node, etree._Element)) for i in items]):
raise ValueError("value must be instance of ``Node``")
SimpleNodeStorage.extend(self, items)
class AttributeContainer(SimpleNodeStorage):
""" """
def __init__(self, node: "Node"):
""" """
super(AttributeContainer, self).__init__(node)
def append(self, item: "Attribute"):
""" """
assert isinstance(item, Attribute)
if (
len(self.node._allowed_attrs) > 0
and item.name not in self.node._allowed_attrs
):
raise ValueError(f"'{item.name}' is not allowed attribute.")
SimpleNodeStorage.append(self, item)
def extend(self, items: typing.List["Attribute"]):
""" """
if not all([isinstance(i, Attribute) for i in items]):
raise ValueError("value must be instance of ``fhirxml.Attribute``")
for item in items:
if (
len(self.node._allowed_attrs) > 0
and item.name not in self.node._allowed_attrs
):
raise ValueError(f"'{item.name}' is not allowed attribute.")
SimpleNodeStorage.extend(self, items)
class NamespaceContainer(SimpleNodeStorage):
""" """
def __init__(self, node: "Node"):
""" """
super(NamespaceContainer, self).__init__(node)
def append(self, item: "Namespace"):
""" """
assert isinstance(item, Namespace)
SimpleNodeStorage.append(self, item)
def extend(self, items: typing.List["Namespace"]):
""" """
if not all([isinstance(i, Namespace) for i in items]):
raise ValueError("value must be instance of fhirxml.Namespace")
SimpleNodeStorage.extend(self, items)
class CommentContainer(SimpleNodeStorage):
""" """
def __init__(self, node: "Node"):
""" """
super(CommentContainer, self).__init__(node)
def append(self, item: "Comment"):
""" """
assert isinstance(item, Comment)
SimpleNodeStorage.append(self, item)
def extend(self, items: typing.List["Comment"]):
""" """
if not all([isinstance(i, Comment) for i in items]):
raise ValueError("value must be instance of fhirxml.Comment")
SimpleNodeStorage.extend(self, items)
class AttributeValue:
""" """
def __init__(self, raw: StrBytes, quote: bool = False):
""" """
if isinstance(raw, bytes):
raw = raw.decode()
self.raw: str = raw
self.quote = quote
def to_xml(self) -> str:
""" """
val = self.raw
if self.quote:
val = "'{0}'".format(val)
return val
def __str__(self):
return self.to_xml()
@typing.no_type_check
def __eq__(self, other: "AttributeValue"):
""" """
return (self.raw == other.raw) and (self.quote == other.quote)
class Attribute:
""" """
def __init__(
self,
name: typing.Union[str, QName],
value: typing.Union[StrBytes, AttributeValue, None],
):
""" """
self.name: typing.Union[str, QName] = name
if typing.TYPE_CHECKING:
self.value: typing.Union[StrBytes, AttributeValue, None]
if isinstance(value, (AttributeValue, str)):
self.value = value
else:
if isinstance(value, bytes):
value = value.decode()
self.value = value
def __str__(self):
""" """
return '{0}="{1}"'.format(*self.to_xml())
def to_xml(self) -> typing.Tuple[str, StrNone]:
""" """
val: typing.Optional[str] = None
if isinstance(self.value, AttributeValue):
val = self.value.to_xml()
elif self.value is not None:
val = typing.cast(str, self.value)
return self.name, val
def __repr__(self):
""" """
return f"<{self.__class__.__name__} {self.__str__()}>"
@typing.no_type_check
def __eq__(self, other: "Attribute"):
""" """
return (self.name == other.name) and (self.value == other.value)
class Namespace:
""" """
def __init__(self, name: StrNone, location: StrBytes):
""" """
self.name: StrNone = name
if isinstance(location, bytes):
location = location.decode()
self.location = location
def __str__(self):
""" """
name = ""
if self.name:
name = f":{self.name}"
return 'xmlns{0}="{1}"'.format(name, self.location)
def to_xml(self):
""" """
return self.name, self.location
def __repr__(self):
""" """
return "<{0} {1}>".format(self.__class__.__name__, self.__str__())
@typing.no_type_check
def __eq__(self, other: "Namespace"):
""" """
return (self.name == other.name) and (self.location == other.location)
class Comment:
"""XML/Node comment"""
__slots__ = ("_text",)
def __init__(self, comment: StrBytes):
""" """
if isinstance(comment, str):
comment = comment.encode()
self._text: bytes = comment
def to_xml(self) -> etree._Comment:
""" """
return etree.Comment(self._text)
def to_string(self) -> str:
return self._text.decode()
@classmethod
def from_element(cls, element: etree._Comment) -> "Comment":
""" """
return cls(element.text)
def __str__(self):
return self.to_string()
class Node:
""" """
_allowed_attrs: typing.Set[str] = set()
def __init__(
self,
name: typing.Union[str, QName],
*,
value: StrBytes = None,
text: typing.Union[StrBytes, "Node"] = None,
attributes: typing.List[Attribute] = None,
namespaces: typing.List[Namespace] = None,
comments: typing.List[Comment] = None,
parent: "Node" = None,
children: typing.List["Node"] = None,
):
""" """
self.name = name
self._value = None
self._text = None
self.attributes = AttributeContainer(self)
self.namespaces = NamespaceContainer(self)
self.comments = CommentContainer(self)
self.parent = None
self.children = NodeContainer(self)
if text:
self.set_text(text)
if value:
self.value = value
if attributes:
self.attributes.extend(attributes)
if namespaces:
self.namespaces.extend(namespaces)
if comments:
self.comments.extend(comments)
if parent:
assert isinstance(parent, Node)
self.parent = parent
if children:
self.children.extend(children)
def rename(self, new_name):
"""Rename the current Node name"""
if self.name == new_name:
raise ValueError("Current Node name and provided name are identical!")
self.name = new_name
def add_namespace(
self,
ns: typing.Union[Namespace, StrNone],
location: StrBytes = None,
):
""" """
if isinstance(ns, Namespace):
self.namespaces.append(ns)
return
if location is None:
raise ValueError("'location' value is required.")
self.namespaces.append(Namespace(ns, location))
def add_attribute(self, attr: typing.Union[str, Attribute], value: StrBytes = None):
""" """
if isinstance(attr, Attribute):
self.attributes.append(attr)
return
self.attributes.append(Attribute(attr, value))
@property
def text(self):
""" """
return self._text
@text.setter
def text(self, val):
""" """
self._text = val
@property
def value(self):
""" """
return self._value
@value.setter
def value(self, val):
""" """
self._value = val
def set_text(self, value):
""" """
if isinstance(value, Node):
value = value.to_string(pretty_print=False, xml_declaration=False)
self._text = value
def add_text(self, value, prefix="", suffix=""):
""" """
if isinstance(value, Node):
value = value.to_string(pretty_print=False, xml_declaration=False)
if not isinstance(value, str):
if isinstance(value, bytes):
value = value.decode()
else:
value = str(value)
value = prefix + value + suffix
if not self._text:
self._text = ""
self._text += value
@classmethod
def create(
cls,
name,
*,
value: typing.Union[str, bytes] = None,
text: typing.Union[typing.Union[str, bytes], "Node"] = None,
attrs: typing.Union[
DictStrBytes,
typing.List[typing.Union[Attribute, typing.Tuple[str, StrBytes]]],
] = None,
namespaces: typing.Union[
DictStrBytesNoneKey,
typing.List[typing.Union[Namespace, typing.Tuple[StrNone, StrBytes]]],
] = None,
):
""" """
self = cls(name=name, value=value, text=text)
if attrs:
if isinstance(attrs, dict):
attrs = list(attrs.items())
if isinstance(attrs, list):
for attr in attrs:
if isinstance(attr, tuple):
self.add_attribute(*attr)
else:
self.add_attribute(attr)
else:
raise NotImplementedError
if namespaces:
if isinstance(namespaces, dict):
namespaces = list(namespaces.items())
if isinstance(namespaces, list):
for ns in namespaces:
if isinstance(ns, tuple):
self.add_namespace(*ns)
else:
self.add_namespace(ns)
else:
raise NotImplementedError
return self
@staticmethod
def clean_tag(element: etree._Element) -> str:
"""Clean tag name from namespace"""
return QName(element.tag).localname
@classmethod
def from_element(
cls,
element: etree._Element,
parent: "Node" = None,
exists_ns: typing.List[Namespace] = None,
comments: typing.List[Comment] = None,
):
""" """
name = Node.clean_tag(element)
me = cls(name)
if element.text:
me.text = element.text
# Attributes
for attr, value in element.attrib.items():
if attr == "value":
me.value = value
else:
me.add_attribute(attr, value)
if exists_ns is None:
exists_ns = []
if parent is not None:
exists_ns += parent.namespaces.as_list()
# handle namespaces
for prefix, location in element.nsmap.items():
ns = Namespace(prefix, location)
if ns in exists_ns:
continue
me.namespaces.append(ns)
exists_ns.append(ns)
# handle comments
if comments:
me.comments.extend(comments)
# potential comments for children
child_comments: typing.Optional[typing.List[Comment]] = None
for child in element:
if isinstance(child, etree._Comment):
if child_comments is None:
child_comments = list()
child_comments.append(Comment.from_element(child))
continue
if child.nsmap[None] == XHTML_NS:
me.children.append(child)
continue
child_name = Node.clean_tag(child)
child_class = globals().get(child_name, Node)
child_class.from_element(
child, parent=me, exists_ns=copy(exists_ns), comments=child_comments
)
# reset
child_comments = None
if parent is not None:
parent.children.append(me)
return me
| |
<gh_stars>1-10
import functools
import sqlite3
import threading
import typing
import click
name_id = typing.Union[str, int]
class Database(threading.local):
INIT = """
CREATE TABLE IF NOT EXISTS tags(id INTEGER PRIMARY KEY, name TEXT UNIQUE);
CREATE TABLE IF NOT EXISTS tag_files(tag_id INTEGER, file_id INTEGER);
CREATE TABLE IF NOT EXISTS files(id INTEGER PRIMARY KEY, name TEXT, path TEXT);
CREATE TABLE IF NOT EXISTS options(name TEXT UNIQUE, value);
CREATE TABLE IF NOT EXISTS selections(name TEXT UNIQUE, value TEXT);
"""
CURSOR_TYPES = {}
def __init__(self, db, *args, **kwargs):
self._db = sqlite3.connect(db, *args, **kwargs)
self._db.executescript(self.INIT)
self._db.commit()
def cursor(self):
return _BaseCursor(self._db)
def __getattr__(self, item):
c = self.cursor()
v = getattr(c, item)
if not callable(v):
return v
@functools.wraps(v)
def wr(*args, **kwargs):
with c:
return v(*args, **kwargs)
return wr
class _BaseCursor:
def __init__(self, db):
if isinstance(db, _BaseCursor):
self._db = db._db
self._c = db._c
else:
self._db = db
self._c = db.cursor()
self.execute = self._c.execute
self.fetchone = self._c.fetchone
self.fetchall = self._c.fetchall
self.fetchmany = self._c.fetchmany
self.executescript = self._c.executescript
self.__iter__ = self._c.__iter__
self.close = self._c.close
self.commit = self._db.commit
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def __init_subclass__(cls, **kwargs):
Database.CURSOR_TYPES[cls.ATTR_NAME] = cls
def _fetch_first(self):
return [i[0] for i in self.fetchall()]
def __getattr__(self, item):
if item == 'fetch_first':
return self._fetch_first
v = Database.CURSOR_TYPES.get(item)
if v is None:
raise AttributeError('%r object has no attribute %r' % (self.__class__.__name__, item))
return v(self)
class _SelectionsCursor(_BaseCursor):
ATTR_NAME = 'selections'
@click.argument('name')
@click.argument('value')
def new(self, name: str, value: str):
"""Create new selection"""
self.execute("INSERT INTO selections(name, value) VALUES(?, ?)", (name, value))
self.commit()
return True
@click.argument('name')
def remove(self, name: str):
"""Remove selection by name or id"""
self.execute("DELETE FROM selections WHERE name = ?", (name, ))
self.commit()
return True
def all_names(self):
"""Show all selection names"""
self.execute("SELECT name FROM selections;")
return self.fetch_first()
@click.argument('src')
@click.argument('dst')
def rename(self, src: str, dst: str):
"""Change name of tag from old (may be id) to new"""
if '__ALL__' in (src, dst):
return False
self.execute("UPDATE selections SET name = ? WHERE name = ?", (dst, src))
self.commit()
return True
@click.argument('name')
def resolve(self, name: str):
"""Return value of selection"""
self.execute("SELECT value FROM selections WHERE name = ?", (name, ))
c = self.fetchone()
return None if c is None else c[0]
@click.argument('name')
def select(self, name: str):
"""Return all elements matches selection"""
self.execute("SELECT files.name FROM tag_files INNER JOIN files ON files.id = tag_files.file_id INNER JOIN tags"
" ON tags.id = tag_files.tag_id WHERE %s" % self.resolve(name))
return self.fetch_first()
@click.argument('name')
def exists(self, name: str):
"""Return true if selection exists"""
return self.resolve(name) is not None
@click.argument('name')
@click.argument('file')
def matches(self, name: str, file: name_id):
"""Return true if selection contains file"""
if isinstance(file, int):
self.execute("SELECT files.name FROM tag_files INNER JOIN files ON files.id = tag_files.file_id INNER JOIN "
"tags ON tags.id = tag_files.tag_id WHERE files.id = ? AND %s" % self.resolve(name), (file, ))
else:
self.execute("SELECT files.name FROM tag_files INNER JOIN files ON files.id = tag_files.file_id INNER JOIN "
"tags ON tags.id = tag_files.tag_id WHERE files.name = ? AND %s" % self.resolve(name), (file,))
c = self.fetchone()
return c is not None
class _OptionsCursor(_BaseCursor):
ATTR_NAME = 'options'
@click.argument('name')
@click.argument('value')
def set(self, name: str, value):
"""Set filesystem option"""
self.execute("INSERT OR REPLACE INTO options(name, value) VALUES(?, ?)", (name, value))
self.commit()
@click.argument('name')
def get(self, name: str):
"""Get filesystem option"""
self.execute("SELECT value FROM options WHERE name = ?", (name, ))
c = self.fetchone()
return None if c is None else c[0]
@click.argument('name')
def unset(self, name: str):
"""Remove filesystem option"""
self.execute("REMOVE FROM options WHERE name = ?", (name, ))
self.commit()
class _TagsCursor(_BaseCursor):
ATTR_NAME = 'tags'
@click.argument('name')
def new(self, name: str):
"""Create new tag"""
if name == '__ALL__':
return False
self.execute("INSERT INTO tags(name) VALUES (?)", (name, ))
self.commit()
return True
@click.argument('name')
def remove(self, name: name_id):
"""Remove tag by name or id"""
if name == '__ALL__':
return False
i = self.get_id(name)
self.execute("DELETE FROM tag_files WHERE tag_id = ?", (i, ))
self.execute("DELETE FROM tags WHERE id = ?", (i, ))
self.commit()
return True
@click.argument('name')
def get_id(self, name: name_id):
"""Return tag id by name (or id)"""
if isinstance(name, int):
return name
if name == '__ALL__':
return -1
self.execute("SELECT id FROM tags WHERE name = ?", (name,))
c = self.fetchone()
return None if c is None else c[0]
@click.argument('name')
def get_name(self, name: name_id):
"""Return name of tag by id (or name)"""
if isinstance(name, str):
return name
self.execute("SELECT name FROM tags WHERE id = ?", (name,))
c = self.fetchone()
return None if c is None else c[0]
def all_names(self):
"""Show all tag names"""
self.execute("SELECT name FROM tags;")
return self.fetch_first()
def all_ids(self):
"""Show all tag ids"""
self.execute("SELECT id FROM tags;")
return self.fetch_first()
@click.argument('src')
@click.argument('dst')
def rename(self, src: name_id, dst: str):
"""Change name of tag from old (may be id) to new"""
if '__ALL__' in (src, dst):
return False
if isinstance(src, int):
self.execute("UPDATE tags SET name = ? WHERE id = ?", (dst, src))
else:
self.execute("UPDATE tags SET name = ? WHERE name = ?", (dst, src))
self.commit()
return True
@click.argument('name')
def exists(self, name: name_id):
"""Return true if tag exists"""
return self.get_id(name) is not None
class _FilesCursor(_BaseCursor):
ATTR_NAME = 'files'
@click.argument('name')
@click.argument('path')
def new(self, name: str, path: str):
"""Create new file link"""
self.execute("INSERT INTO files(name, path) VALUES (?, ?)", (name, path))
self.commit()
return True
@click.argument('name')
def remove(self, name: name_id):
"""Remove file link by name or id"""
i = self.get_id(name)
self.execute("DELETE FROM tag_files WHERE file_id = ?", (i, ))
self.execute("DELETE FROM files WHERE id = ?", (i, ))
self.commit()
return True
@click.argument('name')
def get_id(self, name: name_id):
"""Return id of file by name (or id)"""
if isinstance(name, int):
return name
self.execute("SELECT id FROM files WHERE name = ?", (name,))
c = self.fetchone()
return None if c is None else c[0]
@click.argument('name')
def get_name(self, name: name_id):
"""Return name of file by id (or name)"""
if isinstance(name, str):
return name
self.execute("SELECT name FROM files WHERE id = ?", (name,))
c = self.fetchone()
return None if c is None else c[0]
def all_names(self):
"""Return all files names"""
self.execute("SELECT name FROM files;")
return self.fetch_first()
def all_ids(self):
"""Return all files ids"""
self.execute("SELECT id FROM files;")
return self.fetch_first()
@click.argument('src')
@click.argument('dst')
def rename(self, src: name_id, dst: str):
"""Rename file from old (may be id) to new"""
if isinstance(src, int):
self.execute("UPDATE files SET name = ? WHERE id = ?", (dst, src))
else:
self.execute("UPDATE files SET name = ? WHERE name = ?", (dst, src))
self.commit()
return True
@click.argument('name')
def exists(self, name: name_id):
"""Return true if exists"""
return self.get_id(name) is not None
@click.argument('tag')
def get_by_tag(self, tag: name_id):
"""Return all file name with given tag (name or id)"""
if tag == '__ALL__':
return self.all_names()
t = self.tags.get_id(tag)
self.execute("SELECT files.name FROM files INNER JOIN tag_files ON tag_files.file_id = files.id WHERE "
"tag_files.tag_id = ?", (t, ))
return self.fetch_first()
@click.argument('name')
@click.argument('tag')
def add_tag(self, name: name_id, tag: name_id):
"""Add tag to file"""
if tag == '__ALL__':
return False
self.execute("INSERT INTO tag_files(tag_id, file_id) VALUES(?, ?)",
(self.tags.get_id(tag), self.get_id(name)))
self.commit()
return True
@click.argument('name')
@click.argument('tag')
def remove_tag(self, name: name_id, tag: name_id):
"""Remove tag from file"""
if tag == '__ALL__':
return False
self.execute("DELETE FROM tag_files WHERE tag_id = ? AND file_id = ?",
(self.tags.get_id(tag), self.get_id(name)))
self.commit()
return True
@click.argument('name')
@click.argument('tag')
def has_tag(self, name: name_id, tag: name_id):
"""Return true if file has tag"""
if tag in ('__ALL__', -1):
return True
self.execute("SELECT tag_id FROM tag_files WHERE tag_id = ? AND file_id = ?",
(self.tags.get_id(tag), self.get_id(name)))
return len(self.fetch_first()) > 0
@click.argument('name')
def resolve(self, name: name_id):
"""Return path of file"""
if isinstance(name, int):
self.execute("SELECT path FROM files WHERE id = ?", (name, ))
else:
self.execute("SELECT path FROM files WHERE name = ?", (name, ))
c = self.fetchone()
return None if c is None else c[0]
@click.argument('name')
def get_tags(self, name: name_id):
"""Return all tags of file"""
name = self.get_id(name)
self.execute("SELECT tags.name FROM tag_files INNER JOIN tags ON tags.id = tag_files.tag_id WHERE"
" tag_files.file_id = ?", (name, ))
return self.fetch_first()
@click.argument('name')
@click.argument('tags', nargs=-1)
def set_tags(self, name: name_id, tags: typing.List[name_id]):
"""Replace tags of file"""
name = self.get_id(name)
self.execute("DELETE FROM tag_files WHERE file_id = ?", (name, ))
t = self.tags
for tag in tags:
i = t.get_id(tag)
if i is None:
t.new(tag)
| |
realtime_start is not None and realtime_start < date(1776, 7, 4):
raise ValueError('Variable realtime_start ("{}") is before min date 1776-07-04.'.format(realtime_start))
if realtime_start > realtime_end:
raise ValueError('The date set by variable realtime_start ("{}") can not be after the date set by variable realtime_end ("{}").'.format(realtime_start, realtime_end))
df = pd.DataFrame(
self._client.get(
'/fred/release/dates',
'release_dates',
limit=10000,
release_id=release_id,
realtime_start=realtime_start,
realtime_end=realtime_end,
sort_order=sort_order,
include_release_dates_with_no_data=include_release_dates_with_no_data
)
)
if not df.empty:
df.date = pd.to_datetime(df.date, format='%Y-%m-%d')
return df
def release_series(
self,
release_id: int,
realtime_start: date = None,
realtime_end: date = None,
order_by: enums.OrderBy = enums.OrderBy.series_id,
sort_order: enums.SortOrder = enums.SortOrder.asc,
filter_variable: enums.FilterVariable = None,
filter_value: enums.FilterValue = None,
tag_names: List[str] = None,
exclude_tag_names: List[str] = None
) -> pd.DataFrame:
"""
## Parameters
`release_id`
The id for a release.
`realtime_start`
The start of the real-time period. For more information, see [Real-Time Periods](https://fred.stlouisfed.org/docs/api/fred/realtime_period.html).
`realtime_end`
The end of the real-time period. For more information, see [Real-Time Periods](https://fred.stlouisfed.org/docs/api/fred/realtime_period.html).
`order_by`
Order results by values of the specified attribute.
`sort_order`
Sort results is ascending or descending order for attribute values specified by order_by.
`filter_variable`
The attribute to filter results by.
`filter_value`
The value of the filter_variable attribute to filter results by.
`tag_names`
Tuple of tag names that series match all of.
`exclude_tag_names`
Tuple of tag names that series match none of.
## Description
https://fred.stlouisfed.org/docs/api/fred/release_series.html
Get the series on a release of economic data.
## API Request (HTTPS GET)
https://api.stlouisfed.org/fred/release/series?release_id=51&api_key=abcdefghijklmnopqrstuvwxyz123456&file_type=json
## API Response
```json
{
"realtime_start": "2017-08-01",
"realtime_end": "2017-08-01",
"order_by": "series_id",
"sort_order": "asc",
"count": 57,
"offset": 0,
"limit": 1000,
"seriess": [
{
"id": "BOMTVLM133S",
"realtime_start": "2017-08-01",
"realtime_end": "2017-08-01",
"title": "U.S. Imports of Services - Travel",
"observation_start": "1992-01-01",
"observation_end": "2017-05-01",
"frequency": "Monthly",
"frequency_short": "M",
"units": "Million of Dollars",
"units_short": "Mil. of $",
"seasonal_adjustment": "Seasonally Adjusted",
"seasonal_adjustment_short": "SA",
"last_updated": "2017-07-06 09:34:00-05",
"popularity": 0,
"group_popularity": 0
},
...
]
)
```
## Returns
`pandas.DataFrame`
## Example
```python
>>> fred = FRED(api_key='<KEY>')
>>> fred.release_series(release_id=51).head()
realtime_start realtime_end title observation_start observation_end frequency frequency_short units units_short seasonal_adjustment seasonal_adjustment_short last_updated popularity group_popularity notes
id
BOMTVLM133S 2022-02-05 2022-02-05 U.S. Imports of Services - Travel 1992-01-01 2017-09-01 Monthly M Million of Dollars Mil. of $ Seasonally Adjusted SA 2017-11-03 13:12:15+00:00 1 1 Further information related to the internation...
BOMVGMM133S 2022-02-05 2022-02-05 U.S. Imports of Services: U.S. Government Misc... 1992-01-01 2013-12-01 Monthly M Millions of Dollars Mil. of $ Seasonally Adjusted SA 2014-10-20 14:27:37+00:00 1 1 BEA has introduced new table presentations, in...
BOMVJMM133S 2022-02-05 2022-02-05 U.S. Imports of Services - Direct Defense Expe... 1992-01-01 2013-12-01 Monthly M Millions of Dollars Mil. of $ Seasonally Adjusted SA 2014-10-20 14:26:44+00:00 1 1 BEA has introduced new table presentations, in...
BOMVMPM133S 2022-02-05 2022-02-05 U.S. Imports of Services - Passenger Fares 1992-01-01 2017-09-01 Monthly M Million of Dollars Mil. of $ Seasonally Adjusted SA 2017-11-03 13:12:15+00:00 1 1 Further information related to the internation...
BOMVOMM133S 2022-02-05 2022-02-05 U.S. Imports of Services - Other Private Servi... 1992-01-01 2013-12-01 Monthly M Million of Dollars Mil. of $ Seasonally Adjusted SA 2014-10-20 14:25:54+00:00 1 1 BEA has introduced new table presentations, in...
```
"""
allowed_orders = [
enums.OrderBy.series_id,
enums.OrderBy.title,
enums.OrderBy.units,
enums.OrderBy.frequency,
enums.OrderBy.seasonal_adjustment,
enums.OrderBy.realtime_start,
enums.OrderBy.realtime_end,
enums.OrderBy.last_updated,
enums.OrderBy.observation_start,
enums.OrderBy.observation_end,
enums.OrderBy.popularity,
enums.OrderBy.group_popularity,
]
if order_by not in allowed_orders:
raise ValueError('Variable order_by ({}) is not one of the values: {}'.format(order_by, ', '.join(map(str, allowed_orders))))
if filter_variable is not None and filter_variable not in enums.FilterVariable:
raise ValueError('Variable allowed_filter_variables ({}) is not one of the values: {}'.format(filter_variable, ', '.join(map(str, enums.FilterVariable))))
if realtime_start is not None and realtime_start < date(1776, 7, 4):
raise ValueError('Variable realtime_start ("{}") is before min date 1776-07-04.'.format(realtime_start))
if realtime_start is not None and realtime_end is not None and realtime_start > realtime_end:
raise ValueError('The date set by variable realtime_start ("{}") can not be after the date set by variable realtime_end ("{}").'.format(realtime_start, realtime_end))
df = pd.DataFrame(
self._client.get(
'/fred/release/series',
'seriess',
limit=1000,
release_id=release_id,
realtime_start=realtime_start,
realtime_end=realtime_end,
order_by=order_by,
sort_order=sort_order,
filter_variable=filter_variable,
filter_value=filter_value,
tag_names=tag_names,
exclude_tag_names=exclude_tag_names
)
)
date_columns = [
'realtime_start', 'realtime_end',
'observation_start', 'observation_end',
]
if not df.empty:
df[date_columns] = df[date_columns].apply(pd.to_datetime, format='%Y-%m-%d')
df.last_updated = pd.to_datetime(df.last_updated + '00', utc=True, format='%Y-%m-%d %H:%M:%S%z')
df = df.astype(dtype={
'id': 'string',
'title': 'string',
'notes': 'string',
'frequency': 'category',
'frequency_short': 'category',
'units': 'category',
'units_short': 'category',
'seasonal_adjustment': 'category',
'seasonal_adjustment_short': 'category'
}).set_index('id')
return df
def release_sources(self, release_id: int, realtime_start: date = None, realtime_end: date = None) -> pd.DataFrame:
"""
## Parameters
`release_id`
The id for a release.
`realtime_start`
The start of the real-time period. For more information, see [Real-Time Periods](https://fred.stlouisfed.org/docs/api/fred/realtime_period.html).
`realtime_end`
The end of the real-time period. For more information, see [Real-Time Periods](https://fred.stlouisfed.org/docs/api/fred/realtime_period.html).
## Description
https://fred.stlouisfed.org/docs/api/fred/release_sources.html
Get the sources for a release of economic data.
## API Request (HTTPS GET)
https://api.stlouisfed.org/fred/release/sources?release_id=51&api_key=abcdefghijklmnopqrstuvwxyz123456&file_type=json
## API Response
```json
{
"realtime_start": "2013-08-14",
"realtime_end": "2013-08-14",
"sources": [
{
"id": 18,
"realtime_start": "2013-08-14",
"realtime_end": "2013-08-14",
"name": "U.S. Department of Commerce: Bureau of Economic Analysis",
"link": "http://www.bea.gov/"
},
...
]
}
```
## Returns
`pandas.DataFrame`
## Example
```python
>>> fred = FRED(api_key='<KEY>')
>>> fred.release_sources(release_id=51).head()
realtime_start realtime_end name link
id
19 2022-02-05 2022-02-05 U.S. Census Bureau http://www.census.gov/
18 2022-02-05 2022-02-05 U.S. Bureau of Economic Analysis http://www.bea.gov/
"""
if realtime_start is not None and realtime_start < date(1776, 7, 4):
raise ValueError('Variable realtime_start ("{}") is before min date 1776-07-04.'.format(realtime_start))
if realtime_start is not None and realtime_end is not None and realtime_start > realtime_end:
raise ValueError('The date set by variable realtime_start ("{}") can not be after the date set by variable realtime_end ("{}").'.format(realtime_start, realtime_end))
df = pd.DataFrame(
self._client.get(
'/fred/release/sources',
'sources',
release_id=release_id,
realtime_start=realtime_start,
realtime_end=realtime_end
)
)
date_columns = ['realtime_start', 'realtime_end']
if not df.empty:
df[date_columns] = df[date_columns].apply(pd.to_datetime, format='%Y-%m-%d')
df = df.astype(dtype={
'name': 'string',
'link': 'string'
}).set_index('id')
return df
def release_tags(
self,
release_id: int,
realtime_start: date = None,
realtime_end: date = None,
tag_names: List[str] = None,
tag_group_id: enums.TagGroupID = None,
search_text: str = None,
order_by: enums.OrderBy = enums.OrderBy.series_count,
sort_order: enums.SortOrder = enums.SortOrder.asc
) -> pd.DataFrame:
"""
## Parameters
`release_id`
The id for a release.
`realtime_start`
The start of the real-time period. For more information, see [Real-Time Periods](https://fred.stlouisfed.org/docs/api/fred/realtime_period.html).
`realtime_end`
The end of the real-time period. For more information, see [Real-Time Periods](https://fred.stlouisfed.org/docs/api/fred/realtime_period.html).
`tag_names`
Tuple of tag names that series match all of.
`tag_group_id`
A tag group id to filter tags by type.
`search_text`
The words to find matching tags with.
`order_by`
Order results by values of the specified attribute.
`sort_order`
Sort results is ascending or descending order for attribute values specified by order_by.
## Description
https://fred.stlouisfed.org/docs/api/fred/release_tags.html
Get the FRED tags for a release.
Optionally, filter results by tag name, tag group, or search.
Series are assigned tags and releases.
Indirectly through series, it is possible to get the tags for a release.
See the related request fred/release/related_tags.
## API Request (HTTPS GET)
https://api.stlouisfed.org/fred/release/tags?release_id=86&api_key=abcdefghijklmnopqrstuvwxyz123456&file_type=json
## API Response
```json
{
"realtime_start": "2013-08-14",
"realtime_end": "2013-08-14",
"order_by": "series_count",
"sort_order": "desc",
"count": 13,
"offset": 0,
"limit": 1000,
"tags": [
{
"name": "commercial paper",
"group_id": "gen",
"notes": "",
"created": "2012-03-19 10:40:59-05",
"popularity": 55,
"series_count": 18
},
...
]
}
```
## Returns
`pandas.DataFrame`
## Example
```python
>>> fred = FRED(api_key='<KEY>')
>>> fred.release_tags(release_id=86).head()
group_id notes created popularity series_count
name
1-month gen 2012-02-27 16:18:19+00:00 39 2
2-month gen 2012-05-25 16:29:21+00:00 17 2
owned gen 2012-06-25 20:04:36+00:00 33 2
tier-2 gen 2014-02-12 17:18:16+00:00 -13 2
10-20 days gen 2014-02-12 17:08:07+00:00 -16 4
"""
allowed_orders = [
enums.OrderBy.series_count,
enums.OrderBy.popularity,
enums.OrderBy.created,
enums.OrderBy.name,
enums.OrderBy.group_id,
]
if order_by not in allowed_orders:
raise ValueError('Variable order_by ({}) is not one of the values: {}'.format(order_by, ', '.join(map(str, allowed_orders))))
if realtime_start is not None and realtime_start < date(1776, 7, 4):
raise ValueError('Variable realtime_start ("{}") is before min date 1776-07-04.'.format(realtime_start))
if realtime_start is not None and realtime_end is not None and realtime_start > realtime_end:
raise ValueError('The date set by variable realtime_start ("{}") can not be after the date set by variable realtime_end ("{}").'.format(realtime_start, realtime_end))
df = pd.DataFrame(
self._client.get(
'/fred/release/tags',
'tags',
limit=1000,
release_id=release_id,
realtime_start=realtime_start,
realtime_end=realtime_end,
tag_names=tag_names,
tag_group_id=tag_group_id,
search_text=search_text,
order_by=order_by,
sort_order=sort_order
)
)
if not df.empty:
df.created = pd.to_datetime(df.created + '00', utc=True, format='%Y-%m-%d %H:%M:%S%z')
df = df.astype(dtype={
'name': 'string',
'notes': 'string',
'group_id': 'category'
}).set_index('name')
return df
def release_related_tags(
self,
release_id: int,
realtime_start: date = None,
| |
<gh_stars>0
#!/usr/bin/env pytest
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test some ESRI specific translation issues.
# Author: <NAME> <<EMAIL>>
#
###############################################################################
# Copyright (c) 2003, <NAME> <<EMAIL>>
# Copyright (c) 2009-2013, <NAME> <even dot rouault at mines-paris dot org>
# Copyright (c) 2013, <NAME> <kyle at pobox dot com>
# Copyright (c) 2014, Google
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import gdaltest
from osgeo import ogr
from osgeo import osr
import pytest
###############################################################################
# This test verifies that morphToESRI() translates idiosyncratic datum names
# from "EPSG" form to ESRI from when the exception list comes from the
# gdal_datum.csv file.
def test_osr_esri_1():
srs = osr.SpatialReference()
srs.ImportFromEPSG(4202)
assert srs.GetAttrValue('DATUM') == 'Australian_Geodetic_Datum_1966', \
('Got wrong DATUM name (%s) after EPSG import.' %
srs.GetAttrValue('DATUM'))
srs.MorphToESRI()
assert srs.GetAttrValue('DATUM') == 'D_Australian_1966', \
('Got wrong DATUM name (%s) after ESRI morph.' %
srs.GetAttrValue('DATUM'))
srs.MorphFromESRI()
assert srs.GetAttrValue('DATUM') == 'Australian_Geodetic_Datum_1966', \
('Got wrong DATUM name (%s) after ESRI unmorph.' %
srs.GetAttrValue('DATUM'))
###############################################################################
# Verify that exact correct form of UTM names is established when
# translating certain GEOGCSes to ESRI format.
def test_osr_esri_2():
srs = osr.SpatialReference()
srs.ImportFromEPSG(32711)
srs.MorphToESRI()
assert srs.GetAttrValue('GEOGCS') == 'GCS_WGS_1984', \
('Got wrong GEOGCS name (%s) after ESRI morph.' %
srs.GetAttrValue('GEOGCS'))
assert srs.GetAttrValue('PROJCS') == 'WGS_1984_UTM_Zone_11S', \
('Got wrong PROJCS name (%s) after ESRI morph.' %
srs.GetAttrValue('PROJCS'))
###############################################################################
# Verify Polar Stereographic translations work properly OGR to ESRI.
def test_osr_esri_4():
srs = osr.SpatialReference()
srs.SetFromUserInput('PROJCS["PS Test",GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433]],PROJECTION["Polar_Stereographic"],PARAMETER["latitude_of_origin",-80.2333],PARAMETER["central_meridian",171],PARAMETER["false_northing",0],UNIT["metre",1]]')
srs.MorphToESRI()
assert srs.GetAttrValue('PROJECTION') == 'Stereographic_South_Pole', \
('Got wrong PROJECTION name (%s) after ESRI morph.' %
srs.GetAttrValue('PROJECTION'))
assert srs.GetProjParm('standard_parallel_1') == -80.2333, \
('Got wrong parameter value (%g) after ESRI morph.' %
srs.GetProjParm('standard_parallel_1'))
###############################################################################
# Verify Polar Stereographic translations work properly ESRI to OGR.
def test_osr_esri_5():
srs = osr.SpatialReference()
srs.SetFromUserInput('PROJCS["PS Test",GEOGCS["GCS_WGS_1984",DATUM["D_WGS_1984",SPHEROID["WGS_1984",6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["Degree",0.017453292519943295]],PROJECTION["Stereographic_South_Pole"],PARAMETER["standard_parallel_1",-80.2333],PARAMETER["central_meridian",171],PARAMETER["scale_factor",0.9999],PARAMETER["false_easting",0],PARAMETER["false_northing",0],UNIT["Meter",1]]')
srs.MorphFromESRI()
assert srs.GetAttrValue('PROJECTION') == 'Polar_Stereographic', \
('Got wrong PROJECTION name (%s) after ESRI morph.' %
srs.GetAttrValue('PROJECTION'))
assert srs.GetProjParm('latitude_of_origin') == -80.2333, \
('Got wrong parameter value (%g) after ESRI morph.' %
srs.GetProjParm('latitude_of_origin'))
###############################################################################
# Verify Lambert 2SP with a 1.0 scale factor still gets translated to 2SP
# per bug 187.
def test_osr_esri_6():
srs = osr.SpatialReference()
srs.SetFromUserInput('PROJCS["Texas Centric Mapping System/Lambert Conformal",GEOGCS["GCS_North_American_1983",DATUM["D_North_American_1983",SPHEROID["GRS_1980",6378137.0,298.257222101]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]],PROJECTION["Lambert_Conformal_Conic"],PARAMETER["False_Easting",1500000.0],PARAMETER["False_Northing",5000000.0],PARAMETER["Central_Meridian",-100.0],PARAMETER["Standard_Parallel_1",27.5],PARAMETER["Standard_Parallel_2",35.0],PARAMETER["Scale_Factor",1.0],PARAMETER["Latitude_Of_Origin",18.0],UNIT["Meter",1.0]]')
srs.MorphFromESRI()
assert srs.GetAttrValue('PROJECTION') == 'Lambert_Conformal_Conic_2SP', \
('Got wrong PROJECTION name (%s) after ESRI morph, expected 2SP' %
srs.GetAttrValue('PROJECTION'))
###############################################################################
# Verify that FEET is treated as US survey feet per bug #1533.
def test_osr_esri_7():
prj = ['Projection STATEPLANE',
'Fipszone 903',
'Datum NAD83',
'Spheroid GRS80',
'Units FEET',
'Zunits NO',
'Xshift 0.0',
'Yshift 0.0',
'Parameters ',
'']
srs_prj = osr.SpatialReference()
srs_prj.ImportFromESRI(prj)
wkt = """PROJCS["NAD83 / Florida North",
GEOGCS["NAD83",
DATUM["North_American_Datum_1983",
SPHEROID["GRS 1980",6378137,298.257222101,
AUTHORITY["EPSG","7019"]],
TOWGS84[0,0,0,0,0,0,0],
AUTHORITY["EPSG","6269"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.0174532925199433,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4269"]],
PROJECTION["Lambert_Conformal_Conic_2SP"],
PARAMETER["latitude_of_origin",29],
PARAMETER["central_meridian",-84.5],
PARAMETER["standard_parallel_1",30.75],
PARAMETER["standard_parallel_2",29.5833333333333],
PARAMETER["false_easting",1968500],
PARAMETER["false_northing",0],
UNIT["Foot_US",0.304800609601219],
AXIS["Easting",EAST],
AXIS["Northing",NORTH]]"""
srs_wkt = osr.SpatialReference(wkt=wkt)
if not srs_prj.IsSame(srs_wkt):
print('got: %s' % srs_prj.ExportToPrettyWkt())
pytest.fail('old style ESRI projection imported wrong, perhaps linear units?')
###############################################################################
# Verify that handling of numerically specified units (see bug #1533)
def test_osr_esri_8():
prj = ['Projection STATEPLANE',
'Fipszone 903',
'Datum NAD83',
'Spheroid GRS80',
'Units 3.280839895013123',
'Zunits NO',
'Xshift 0.0',
'Yshift 0.0',
'Parameters ',
'']
srs_prj = osr.SpatialReference()
srs_prj.ImportFromESRI(prj)
wkt = """PROJCS["NAD83 / Florida North",
GEOGCS["NAD83",
DATUM["North_American_Datum_1983",
SPHEROID["GRS 1980",6378137,298.257222101,
AUTHORITY["EPSG","7019"]],
TOWGS84[0,0,0,0,0,0,0],
AUTHORITY["EPSG","6269"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.0174532925199433,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4269"]],
PROJECTION["Lambert_Conformal_Conic_2SP"],
PARAMETER["latitude_of_origin",29],
PARAMETER["central_meridian",-84.5],
PARAMETER["standard_parallel_1",30.75],
PARAMETER["standard_parallel_2",29.5833333333333],
PARAMETER["false_easting",1968503.93700787],
PARAMETER["false_northing",0],
UNIT["user-defined",0.3048],
AXIS["Easting",EAST],
AXIS["Northing",NORTH]]"""
srs_wkt = osr.SpatialReference(wkt=wkt)
assert srs_prj.IsSame(srs_wkt), \
'old style ESRI projection imported wrong, perhaps linear units?'
###############################################################################
# Verify Equidistant Conic handling.
def test_osr_esri_9():
srs = osr.SpatialReference()
esri_wkt = 'PROJCS["edc",GEOGCS["GCS_North_American_1983",DATUM["D_North_American_1983",SPHEROID["GRS_1980",6378137.0,298.257222101]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]],PROJECTION["Equidistant_Conic"],PARAMETER["False_Easting",0.0],PARAMETER["False_Northing",0.0],PARAMETER["Central_Meridian",-96.0],PARAMETER["Standard_Parallel_1",29.5],PARAMETER["Standard_Parallel_2",45.5],PARAMETER["Latitude_Of_Origin",37.5],UNIT["Meter",1.0]]'
srs.SetFromUserInput(esri_wkt)
expected = 'PROJCS["edc",GEOGCS["NAD83",DATUM["North_American_Datum_1983",SPHEROID["GRS 1980",6378137,298.257222101,AUTHORITY["EPSG","7019"]],AUTHORITY["EPSG","6269"]],PRIMEM["Greenwich",0],UNIT["Degree",0.0174532925199433]],PROJECTION["Equidistant_Conic"],PARAMETER["latitude_of_center",37.5],PARAMETER["longitude_of_center",-96],PARAMETER["standard_parallel_1",29.5],PARAMETER["standard_parallel_2",45.5],PARAMETER["false_easting",0],PARAMETER["false_northing",0],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["Easting",EAST],AXIS["Northing",NORTH]]'
srs.MorphFromESRI()
wkt = srs.ExportToWkt()
if wkt != expected:
print('')
print('Got: ', wkt)
print('Expected: ', expected)
pytest.fail('Did not get expected Equidistant Conic SRS after morphFromESRI')
srs.MorphToESRI()
wkt = srs.ExportToWkt()
expected = esri_wkt
if wkt != expected:
print('')
print('Got: ', wkt)
print('Expected: ', expected)
pytest.fail('Did not get expected Equidistant Conic SRS after morphToESRI')
###############################################################################
# Verify Plate_Carree handling.
def test_osr_esri_10():
srs = osr.SpatialReference()
srs.SetFromUserInput('PROJCS["Sphere_Plate_Carree",GEOGCS["GCS_Sphere",DATUM["D_Sphere",SPHEROID["Sphere",6371000.0,0.0]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]],PROJECTION["Plate_Carree"],PARAMETER["False_Easting",0.0],PARAMETER["False_Northing",0.0],PARAMETER["Central_Meridian",0.0],UNIT["Meter",1.0]]')
expected = 'PROJCS["Sphere_Plate_Carree",GEOGCS["Unknown datum based upon the Authalic Sphere",DATUM["Not_specified_based_on_Authalic_Sphere",SPHEROID["Sphere",6371000,0],AUTHORITY["EPSG","6035"]],PRIMEM["Greenwich",0],UNIT["Degree",0.0174532925199433]],PROJECTION["Equirectangular"],PARAMETER["standard_parallel_1",0],PARAMETER["central_meridian",0],PARAMETER["false_easting",0],PARAMETER["false_northing",0],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["Easting",EAST],AXIS["Northing",NORTH]]'
srs.MorphFromESRI()
wkt = srs.ExportToWkt()
if wkt != expected:
print('')
print('Got: ', wkt)
print('Expected: ', expected)
pytest.fail('Did not get expected Equirectangular SRS after morphFromESRI')
expected = 'PROJCS["Sphere_Plate_Carree",GEOGCS["GCS_Sphere",DATUM["D_Sphere",SPHEROID["Sphere",6371000.0,0.0]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]],PROJECTION["Equidistant_Cylindrical"],PARAMETER["False_Easting",0.0],PARAMETER["False_Northing",0.0],PARAMETER["Central_Meridian",0.0],PARAMETER["Standard_Parallel_1",0.0],UNIT["Meter",1.0]]'
srs.MorphToESRI()
wkt = srs.ExportToWkt()
if wkt != expected:
print('')
print('Got: ', wkt)
print('Expected: ', expected)
pytest.fail('Did not get expected Equidistant_Cylindrical SRS after morphToESRI')
###############################################################################
# Verify arc/info style TM handling.
def test_osr_esri_11():
srs = osr.SpatialReference()
srs.ImportFromESRI(['Projection TRANSVERSE',
'Datum NAD27',
'Spheroid CLARKE1866',
'Units METERS',
'Zunits NO',
'Xshift 0.0',
'Yshift 0.0',
'Parameters ',
'1.0 /* scale factor at central meridian',
'-106 56 0.5 /* longitude of central meridian',
' 39 33 30 /* latitude of origin',
'0.0 /* false easting (meters)',
'0.0 /* false northing (meters)'])
expected = 'PROJCS["unnamed",GEOGCS["NAD27",DATUM["North_American_Datum_1927",SPHEROID["Clarke 1866",6378206.4,294.978698213898,AUTHORITY["EPSG","7008"]],AUTHORITY["EPSG","6267"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4267"]],PROJECTION["Transverse_Mercator"],PARAMETER["latitude_of_origin",39.5583333333333],PARAMETER["central_meridian",-106.933472222222],PARAMETER["scale_factor",1],PARAMETER["false_easting",0],PARAMETER["false_northing",0],UNIT["METERS",1],AXIS["Easting",EAST],AXIS["Northing",NORTH]]'
srs.MorphFromESRI()
wkt = srs.ExportToWkt()
if wkt != expected:
print('')
print('Got: ', wkt)
print('Expected: ', expected)
pytest.fail('Did not get expected TM SRS after morphFromESRI')
###############################################################################
# Test automatic morphing of ESRI-style LCC WKT prefixed with 'ESRI::'
def test_osr_esri_12():
srs = osr.SpatialReference()
srs.SetFromUserInput('ESRI::PROJCS["Lambert Conformal Conic",GEOGCS["grs80",DATUM["D_North_American_1983",SPHEROID["Geodetic_Reference_System_1980",6378137,298.257222101]],PRIMEM["Greenwich",0],UNIT["Degree",0.017453292519943295]],PROJECTION["Lambert_Conformal_Conic"],PARAMETER["standard_parallel_1",34.33333333333334],PARAMETER["standard_parallel_2",36.16666666666666],PARAMETER["latitude_of_origin",33.75],PARAMETER["central_meridian",-79],PARAMETER["false_easting",609601.22],PARAMETER["false_northing",0],UNIT["Meter",1]]')
# No MorphFromESRI() is required
assert srs.GetAttrValue('PROJECTION') == 'Lambert_Conformal_Conic_2SP', \
('Got wrong PROJECTION name (%s) after ESRI morph.' %
srs.GetAttrValue('PROJECTION'))
assert abs(srs.GetProjParm('standard_parallel_1') - 34.333333333) <= 0.00001, \
('Got wrong parameter value (%g) after ESRI morph.' %
srs.GetProjParm('standard_parallel_1'))
if srs.GetAttrValue('DATUM') != 'North_American_Datum_1983':
gdaltest.post_reason('Got wrong DATUM name (%s) after ESRI morph.' %
srs.GetAttrValue('DATUM'))
assert srs.GetAttrValue('UNIT') == 'metre', \
('Got wrong UNIT name (%s) after ESRI morph.' % srs.GetAttrValue('UNIT'))
###############################################################################
# Test automatic morphing of ESRI-style LCC WKT prefixed with 'ESRI::'
# but read directly from file.
def test_osr_esri_13():
srs = osr.SpatialReference()
srs.SetFromUserInput('data/lcc_esri.prj')
# No MorphFromESRI() is required
assert srs.GetAttrValue('PROJECTION') == 'Lambert_Conformal_Conic_2SP', \
('Got wrong PROJECTION name (%s) after ESRI morph.' %
srs.GetAttrValue('PROJECTION'))
assert abs(srs.GetProjParm('standard_parallel_1') - 34.333333333) <= 0.00001, \
('Got wrong parameter value (%g) after ESRI morph.' %
srs.GetProjParm('standard_parallel_1'))
if srs.GetAttrValue('DATUM') != 'North_American_Datum_1983':
gdaltest.post_reason('Got wrong DATUM name (%s) after ESRI morph.' %
srs.GetAttrValue('DATUM'))
assert srs.GetAttrValue('UNIT') == 'metre', \
('Got wrong UNIT name (%s) after ESRI morph.' % srs.GetAttrValue('UNIT'))
###############################################################################
# Verify that state plane epsg authority values are not applied if the
# linear units are changed for old style .prj files (bug #1697)
def test_osr_esri_14():
srs = osr.SpatialReference()
srs.ImportFromESRI(['PROJECTION STATEPLANE',
'UNITS feet',
'FIPSZONE 2600',
'DATUM NAD83',
'PARAMETERS'])
assert srs.GetAuthorityCode('PROJCS') is None, \
'Get epsg authority code inappropriately.'
srs = osr.SpatialReference()
srs.ImportFromESRI(['PROJECTION STATEPLANE',
'UNITS meter',
'FIPSZONE 2600',
'DATUM NAD83',
'PARAMETERS'])
assert srs.GetAuthorityCode('PROJCS') == '32104', \
'Did not get epsg authority code when expected.'
###############################################################################
# Verify hotine oblique mercator handling, particularly handling
# of the rectified_grid_angle parameter.
def test_osr_esri_15():
srs = osr.SpatialReference()
srs.SetFromUserInput('PROJCS["Bern_1898_Bern_LV03C",GEOGCS["GCS_Bern_1898_Bern",DATUM["D_Bern_1898",SPHEROID["Bessel_1841",6377397.155,299.1528128]],PRIMEM["Bern",7.439583333333333],UNIT["Degree",0.0174532925199433]],PROJECTION["Hotine_Oblique_Mercator_Azimuth_Center"],PARAMETER["False_Easting",0.0],PARAMETER["False_Northing",0.0],PARAMETER["Scale_Factor",1.0],PARAMETER["Azimuth",90.0],PARAMETER["Longitude_Of_Center",0.0],PARAMETER["Latitude_Of_Center",46.95240555555556],UNIT["Meter",1.0]]')
expected = 'PROJCS["Bern_1898_Bern_LV03C",GEOGCS["GCS_Bern_1898_Bern",DATUM["D_Bern_1898",SPHEROID["Bessel_1841",6377397.155,299.1528128]],PRIMEM["Bern",7.43958333333333],UNIT["Degree",0.0174532925199433]],PROJECTION["Hotine_Oblique_Mercator_Azimuth_Center"],PARAMETER["False_Easting",0.0],PARAMETER["False_Northing",0.0],PARAMETER["Scale_Factor",1.0],PARAMETER["Azimuth",90.0],PARAMETER["Longitude_Of_Center",0.0],PARAMETER["Latitude_Of_Center",46.9524055555556],UNIT["Meter",1.0]]'
srs.MorphFromESRI()
wkt = srs.ExportToWkt()
assert wkt.find('rectified_grid_angle') != -1, \
'Did not get rectified_grid_angle as expected.'
srs.MorphToESRI()
wkt = srs.ExportToWkt()
assert wkt.find('rectified_grid_angle') == -1, \
'did not get rectified_grid_angle removed as expected.'
if wkt != expected:
print('')
print('Got: ', wkt)
print('Expected: ', expected)
pytest.fail('Did not get expected HOM projection after morphing')
###############################################################################
# Verify translation of equirectangular to equidistant cylindrical with
# cleanup of parameters.
def test_osr_esri_16():
srs = osr.SpatialReference()
srs.SetFromUserInput('+proj=eqc +lat_0=0 +lat_ts=-10 +lon_0=2 +x_0=100000 +y_0=200000 +ellps=sphere')
expected = 'PROJCS["unknown",GEOGCS["GCS_unknown",DATUM["D_Unknown_based_on_Normal_Sphere_r_6370997_ellipsoid",SPHEROID["Normal_Sphere_r_6370997",6370997.0,0.0]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]],PROJECTION["Equidistant_Cylindrical"],PARAMETER["False_Easting",100000.0],PARAMETER["False_Northing",200000.0],PARAMETER["Central_Meridian",2.0],PARAMETER["Standard_Parallel_1",-10.0],UNIT["Meter",1.0]]'
srs.MorphToESRI()
wkt = srs.ExportToWkt()
assert expected == wkt, 'Did not get expected equidistant cylindrical.'
###############################################################################
# Test LAEA support (#3017)
def test_osr_esri_17():
original = 'PROJCS["ETRS89 / ETRS-LAEA",GEOGCS["ETRS89",DATUM["European_Terrestrial_Reference_System_1989",SPHEROID["GRS 1980",6378137,298.257222101]],PRIMEM["Greenwich",0],UNIT["degree",0.01745329251994328]],UNIT["metre",1],PROJECTION["Lambert_Azimuthal_Equal_Area"],PARAMETER["latitude_of_center",52],PARAMETER["longitude_of_center",10],PARAMETER["false_easting",4321000],PARAMETER["false_northing",3210000]]'
srs = osr.SpatialReference()
srs.SetFromUserInput(original)
expected = 'PROJCS["ETRS89_ETRS_LAEA",GEOGCS["GCS_ETRS_1989",DATUM["D_ETRS_1989",SPHEROID["GRS_1980",6378137.0,298.257222101]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]],PROJECTION["Lambert_Azimuthal_Equal_Area"],PARAMETER["False_Easting",4321000.0],PARAMETER["False_Northing",3210000.0],PARAMETER["Central_Meridian",10.0],PARAMETER["Latitude_Of_Origin",52.0],UNIT["Meter",1.0]]'
srs.MorphToESRI()
wkt = srs.ExportToWkt()
if wkt not in (expected, expected.replace('ETRS89_ETRS_LAEA', 'ETRS89_ETRS-LAEA')):
print('')
print('Got: ', wkt)
print('Expected: ', expected)
pytest.fail('Did not get expected LAEA SRS after morphToESRI')
expected = 'PROJCS["ETRS89 / ETRS-LAEA",GEOGCS["ETRS89",DATUM["European_Terrestrial_Reference_System_1989",SPHEROID["GRS 1980",6378137,298.257222101],AUTHORITY["EPSG","6258"]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433]],PROJECTION["Lambert_Azimuthal_Equal_Area"],PARAMETER["latitude_of_center",52],PARAMETER["longitude_of_center",10],PARAMETER["false_easting",4321000],PARAMETER["false_northing",3210000],UNIT["metre",1],AXIS["Easting",EAST],AXIS["Northing",NORTH]]'
srs.MorphFromESRI()
wkt = srs.ExportToWkt()
if wkt != expected:
print('')
| |
+ t[5].code + ' ' + str(t[6]) + ' ' + str(t[7]) + ' ' + str(t[8]) + ' ' + t[9].code + ' ' + str(t[10]) + ' ' + str(t[11])
def p_list_id(t):
'''list_id : list_id COMA ID
| ID'''
if len(t) == 4:
t[0] = GenerarC3D()
t[0].code += t[1].code + ' ' + str(t[2]) + ' ' + str(t[3])
else:
t[0] = GenerarC3D()
t[0].code += str(t[1])
def p_list_vls(t):
'''list_vls : list_vls COMA exp
| exp
| '''
if len(t) == 4:
t[0] = GenerarC3D()
t[0].code += t[1].code + ' ' + str(t[2]) + ' ' + t[3].code
elif len(t) == 2:
t[0] = GenerarC3D()
t[0].code += t[1].code
else:
t[0] = GenerarC3D()
t[0].code += ''
def p_val_value(t):
'''val_value : CADENA
| CADENASIMPLE
| NUMERO
| NUM_DECIMAL
| FECHA_HORA
| TRUE
| FALSE
| NULL
| F_HORA
| FECHA
| HORA'''
t[0] = GenerarC3D()
t[0].code += str(t[1])
def p_ins_select(t):
'''ins_select : ins_select UNION option_all ins_select PUNTO_COMA
| ins_select INTERSECT option_all ins_select PUNTO_COMA
| ins_select EXCEPT option_all ins_select PUNTO_COMA
| SELECT arg_distict colum_list from PUNTO_COMA'''
if isinstance(t[1], GenerarC3D):
t[0] = GenerarC3D()
t[0].code += t[1].code + ' ' + str(t[2]) + ' ' + t[3].code + ' ' + t[4].code + ' ' + str(t[5])
else:
t[0] = GenerarC3D()
t[0].code += str(t[1]) + ' ' + t[2].code + ' ' + t[3].code + ' ' + t[4].code + ' ' + str(t[5])
def p_ins_select_parentesis(t):
'''ins_select_parentesis : ins_select UNION option_all ins_select
| ins_select INTERSECT option_all ins_select
| ins_select EXCEPT option_all ins_select
| SELECT arg_distict colum_list from'''
if isinstance(t[1], GenerarC3D):
t[0] = GenerarC3D()
t[0].code += t[1].code + ' ' + str(t[2]) + ' ' + t[3].code + ' ' + t[4].code
else:
t[0] = GenerarC3D()
t[0].code += str(t[1]) + ' ' + t[2].code + ' ' + t[3].code + ' ' + t[4].code
def p_from(t):
'''from : FROM table_list arg_where arg_having arg_group_by arg_order_by arg_limit arg_offset
|'''
if len(t) == 9:
t[0] = GenerarC3D()
t[0].code += str(t[1]) + ' ' + t[2].code + ' ' + t[3].code + ' ' + t[4].code + ' ' + t[5].code + ' ' + t[6].code + ' ' + t[7].code + ' ' + t[8].code
else:
t[0] = GenerarC3D()
t[0].code += ''
def p_option_all(t):
'''option_all : ALL
| ''' #EPSILON
if len(t) == 2:
t[0] = GenerarC3D()
t[0].code += str(t[1])
else:
t[0] = GenerarC3D()
t[0].code += ''
def p_arg_distict(t):
'''arg_distict : DISTINCT
| '''
if len(t) == 2:
t[0] = GenerarC3D()
t[0].code += str(t[1])
else:
t[0] = GenerarC3D()
t[0].code += ''
def p_colum_list(t):
'''colum_list : s_list
| SIGNO_POR '''
if isinstance(t[1], GenerarC3D):
t[0] = GenerarC3D()
t[0].code += t[1].code
else:
t[0] = GenerarC3D()
t[0].code += str(t[1])
def p_s_list(t):
'''s_list : s_list COMA columns as_id
| columns as_id'''
if len(t) == 5:
t[0] = GenerarC3D()
t[0].code += t[1].code + ' ' + str(t[2]) + ' ' + t[3].code + ' ' + t[4].code
else:
t[0] = GenerarC3D()
t[0].code += t[1].code + ' ' + t[2].code
def p_columns(t):
'''columns : ID dot_table
| exp'''
if len(t) == 3:
t[0] = GenerarC3D()
t[0].code += str(t[1]) + ' ' + t[2].code
else:
t[0] = GenerarC3D()
t[0].code += t[1].code
def p_dot_table(t):
'''dot_table : PUNTO ID
| PUNTO SIGNO_POR
| ''' #EPSILON
if len(t) == 3:
t[0] = GenerarC3D()
t[0].code += str(t[1]) + ' ' + str(t[2])
else:
t[0] = GenerarC3D()
t[0].code += ''
def p_as_id(t):
'''as_id : AS ID
| AS CADENA
| AS CADENASIMPLE
| CADENA
| ID
| CADENASIMPLE
| '''
if len(t) == 3:
t[0] = GenerarC3D()
t[0].code += str(t[1]) + ' ' + str(t[2])
elif len(t) == 2:
t[0] = GenerarC3D()
t[0].code += str(t[1])
else:
t[0] = GenerarC3D()
t[0].code += ''
def p_aggregates(t):
'''aggregates : COUNT PARABRE param PARCIERRE
| SUM PARABRE param PARCIERRE
| AVG PARABRE param PARCIERRE
| MAX PARABRE param PARCIERRE
| MIN PARABRE param PARCIERRE '''
t[0] = GenerarC3D()
t[0].code += str(t[1]) + ' ' + str(t[2]) + ' ' + t[3].code + ' ' + str(t[4])
def p_functions(t):
'''functions : math
| trig
| string_func
| time_func
'''
t[0] = GenerarC3D()
t[0].code += t[1].code
def p_math(t):
'''math : ABS PARABRE op_numero PARCIERRE
| CBRT PARABRE op_numero PARCIERRE
| CEIL PARABRE op_numero PARCIERRE
| CEILING PARABRE op_numero PARCIERRE
| DEGREES PARABRE op_numero PARCIERRE
| DIV PARABRE op_numero COMA op_numero PARCIERRE
| EXP PARABRE op_numero PARCIERRE
| FACTORIAL PARABRE op_numero PARCIERRE
| FLOOR PARABRE op_numero PARCIERRE
| GCD PARABRE op_numero COMA op_numero PARCIERRE
| LN PARABRE op_numero PARCIERRE
| LOG PARABRE op_numero PARCIERRE
| MOD PARABRE op_numero COMA op_numero PARCIERRE
| PI PARABRE PARCIERRE
| POWER PARABRE op_numero COMA op_numero PARCIERRE
| ROUND PARABRE op_numero arg_num PARCIERRE
| SQRT PARABRE op_numero PARCIERRE
| SIGN PARABRE op_numero PARCIERRE
| TRUNC PARABRE op_numero PARCIERRE
| RANDOM PARABRE PARCIERRE
| RADIANS PARABRE op_numero PARCIERRE
| WIDTH_BUCKET PARABRE op_numero COMA op_numero COMA op_numero COMA op_numero PARCIERRE'''
if len(t) == 5:
t[0] = GenerarC3D()
t[0].code += str(t[1]) + ' ' + str(t[2]) + ' ' + t[3].code + ' ' + str(t[4])
elif len(t) == 7:
t[0] = GenerarC3D()
t[0].code += str(t[1]) + ' ' + str(t[2]) + ' ' + t[3].code + ' ' + str(t[4]) + ' ' + t[5].code + ' ' + str(t[6])
elif len(t) == 6:
t[0] = GenerarC3D()
t[0].code += str(t[1]) + ' ' + str(t[2]) + ' ' + t[3].code + ' ' + t[4].code + ' ' + str(t[5])
elif len(t) == 4:
t[0] = GenerarC3D()
t[0].code += str(t[1]) + ' ' + str(t[2]) + ' ' + str(t[3])
else:
t[0] = GenerarC3D()
t[0].code += str(t[1]) + ' ' + str(t[2]) + ' ' + t[3].code + ' ' + str(t[4]) + ' ' + t[5].code + ' ' + str(t[6]) + ' ' + t[7].code + ' ' + str(t[8]) + ' ' + t[9].code + ' ' + str(t[10])
def p_arg_num(t):
''' arg_num : COMA NUMERO
|'''
if len(t) == 3:
t[0] = GenerarC3D()
t[0].code += str(t[1]) + ' ' + str(t[2])
else:
t[0] = GenerarC3D()
t[0].code += ''
def p_op_numero(t):
''' op_numero : NUMERO
| NUM_DECIMAL
| ID
| SIGNO_MENOS NUMERO %prec UMENOS
| SIGNO_MENOS NUM_DECIMAL %prec UMENOS'''
if len(t) == 2:
t[0] = GenerarC3D()
t[0].code += str(t[1])
else:
t[0] = GenerarC3D()
t[0].code += str(t[1]) + ' ' + str(t[2])
def p_trig(t):
'''trig : ACOS PARABRE op_numero PARCIERRE
| ACOSD PARABRE op_numero PARCIERRE
| ASIN PARABRE op_numero PARCIERRE
| ASIND PARABRE op_numero PARCIERRE
| ATAN PARABRE op_numero PARCIERRE
| ATAND PARABRE op_numero PARCIERRE
| ATAN2 PARABRE op_numero COMA op_numero PARCIERRE
| ATAN2D PARABRE NUMERO COMA op_numero PARCIERRE
| COS PARABRE op_numero PARCIERRE
| COSD PARABRE op_numero PARCIERRE
| COT PARABRE op_numero PARCIERRE
| COTD PARABRE op_numero PARCIERRE
| SIN PARABRE op_numero PARCIERRE
| SIND PARABRE op_numero PARCIERRE
| TAN PARABRE op_numero PARCIERRE
| TAND PARABRE op_numero PARCIERRE
| SINH PARABRE op_numero PARCIERRE
| COSH PARABRE op_numero PARCIERRE
| TANH PARABRE op_numero PARCIERRE
| ASINH PARABRE op_numero PARCIERRE
| ACOSH PARABRE op_numero PARCIERRE
| ATANH PARABRE op_numero PARCIERRE '''
if len(t) == 5:
t[0] = GenerarC3D()
t[0].code += str(t[1]) + ' ' + str(t[2]) + ' ' + t[3].code + ' ' + str(t[4])
else:
t[0] = GenerarC3D()
t[0].code += str(t[1]) + ' ' + str(t[2]) + ' ' + t[3].code + ' ' + str(t[4]) + ' ' + t[5].code + ' ' + str(t[6])
def p_string_func(t):
'''string_func : LENGTH PARABRE s_param PARCIERRE
| SUBSTRING PARABRE s_param COMA NUMERO COMA NUMERO PARCIERRE
| TRIM PARABRE s_param PARCIERRE
| GET_BYTE PARABRE s_param COMA NUMERO PARCIERRE
| MD5 PARABRE s_param PARCIERRE
| SET_BYTE PARABRE s_param COMA NUMERO COMA s_param PARCIERRE
| SHA256 PARABRE s_param PARCIERRE
| SUBSTR PARABRE s_param COMA NUMERO COMA NUMERO PARCIERRE
| CONVERT PARABRE tipo_dato COMA ID dot_table PARCIERRE
| CONVERT PARABRE | |
= parameters.relationshipType()
charName = parameters.characteristic()
qualName = parameters.qualifier()
lhsCodes = parameters.lhsCodes()
rhsCodes = parameters.rhsCodes()
charType = parameters.characteristicType()
intName = parameters.intention()
intType = parameters.intentionType()
self.updateDatabase('call updateImpliedCharacteristic(:pName,:fCode,:tCode,:rt,:char,:qual,:cType)',{'pName':pName,'fCode':fromCode,'tCode':toCode,'rt':rtName,'char':charName,'qual':qualName,'cType':charType},'MySQL error updating implied characteristic')
for lblName,rtName in lhsCodes:
self.updateImpliedCharacteristicElement(charName,lblName,rtName)
for lblName,rtName in rhsCodes:
self.updateImpliedCharacteristicElement(charName,lblName,rtName)
self.updateImpliedCharacteristicIntention(charName,intName,intType)
def updateImpliedCharacteristicIntention(self,charName,intName,intType): self.updateDatabase('call updateImpliedCharacteristicIntention(:char,:int,:type)',{'char':charName,'int':intName,'type':intType},'MySQL error updating implied characteristic intention')
def addImpliedCharacteristicElement(self,charName,lblName,rtName):
self.updateDatabase('call addImpliedCharacteristicElement(:char,:lbl,:rt)',{'char':charName,'lbl':lblName,'rt':rtName},'MySQL error adding implied characteristic element')
def updateImpliedCharacteristicElement(self,charName,lblName,rtName): self.updateDatabase('call updateImpliedCharacteristicElement(:char,:lbl,:rt)',{'char':charName,'lbl':lblName,'rt':rtName},'MySQL error updating implied characteristic element')
def codeCount(self,codeName): return self.responseList('select codeCount(:code)',{'code':codeName},'MySQL error getting code count')[0]
def addIntention(self,intention):
refName = intention[0]
refType = intention[1]
intentionName = intention[2]
intentionType = intention[3]
self.updateDatabase('call addIntention(:ref,:rType,:int,:iType)',{'ref':refName,'rType':refType,'int':intentionName,'iType':intentionType},'MySQL error adding intention')
def addContribution(self,contribution):
srcName = contribution[0]
destName = contribution[1]
meansEnd = contribution[2]
valName = contribution[3]
self.updateDatabase('call addContribution(:src,:dest,:means,:val)',{'src':srcName,'dest':destName,'means':meansEnd,'val':valName},'MySQL error adding contribution')
def impliedCharacteristicIntention(self,synName,pName,fromCode,toCode,rtName): return self.responseLis('select impliedCharacteristicIntention(:syn,:pName,:fCode,:tCode,:rt)',{'syn':synName,'pName':pName,'fCode':fromCode,'tCode':toCode,'rt':rtName},'MySQL error getting implied characteristic intention')[0].split('#')
def impliedCharacteristicElementIntention(self,ciName,elName): return self.responseList('select impliedCharacteristicElementIntention(:ci,:el)',{'ci':ciName,'el':elName},'MySQL error getting implied characteristic element intention')[0].split('#')
def updateImpliedCharacteristicElementIntention(self,ciName,elName,intName,intDim,meName,contName): self.updateDatabase('call updateImpliedCharacteristicElementIntention(:ci,:el,:int,:dim,:me,:cont)',{'ci':ciName,'el':elName,'int':intName,'dim':intDim,'me':meName,'cont':contName},'MySQL error updating intention for element ' + elName + ' for implied characteristic ' + ciName)
def deniedGoals(self,codeName): return self.responseList('call deniedGoals(:code)',{'code':codeName},'MySQL error getting denied goals')
def addLocations(self,parameters):
locsId = self.newId()
locsName = parameters.name()
locDiagram = parameters.diagram()
locations = parameters.locations()
links = parameters.links()
self.updateDatabase('call addLocations(:id,:name,:diag)',{'id':locsId,'name':locsName,'diag':locDiagram},'MySQL error adding locations')
for location in locations:
self.addLocation(locsId,location)
for link in links:
self.addLocationLink(locsId,link)
return locsId
def updateLocations(self,parameters):
locsId = parameters.id()
self.deleteLocations(locsId)
self.addLocations(parameters)
def addLocation(self,locsId,location):
locId = self.newId()
locName = location.name()
assetInstances = location.assetInstances()
personaInstances = location.personaInstances()
self.updateDatabase('call addLocation(:locsId,:locId,:locName)',{'locsId':locsId,'locId':locId,'locName':locName},'MySQL error adding location')
for assetInstance in assetInstances:
self.addAssetInstance(locId,assetInstance)
for personaInstance in personaInstances:
self.addPersonaInstance(locId,personaInstance)
def addAssetInstance(self,locId,assetInstance):
instanceId = self.newId()
instanceName = assetInstance[0]
assetName = assetInstance[1]
self.updateDatabase('call addAssetInstance(:lId,:iId,:iName,:assName)',{'lId':locId,'iId':instanceId,'iName':instanceName,'assName':assetName},'MySQL error adding asset instance')
def addPersonaInstance(self,locId,personaInstance):
instanceId = self.newId()
instanceName = personaInstance[0]
personaName = personaInstance[1]
self.updateDatabase('call addPersonaInstance(:lId,:iId,:iName,:pName)',{'lId':locId,'iId':instanceId,'iName':instanceName,'pName':personaName},'MySQL error adding persona instance')
def addLocationLink(self,locsId,link):
tailLoc = link[0]
headLoc = link[1]
self.updateDatabase('call addLocationLink(:lId,:tLoc,:hLoc)',{'lId':locsId,'tLoc':tailLoc,'hLoc':headLoc},'MySQL error adding location link')
def getLocations(self,constraintId = -1):
locsRows = self.responseList('call getLocations(:const)',{'const':constraintId},'MySQL error getting locations')
locationsDict = {}
for locsId,locsName,locsDia in locsRows:
locNames = self.getLocationNames(locsName)
linkDict = self.getLocationLinks(locsName)
locs = []
for locName in locNames:
assetInstances = self.getAssetInstances(locName)
personaInstances = self.getPersonaInstances(locName)
locLinks = []
if locName in linkDict:
locLinks = linkDict[locName]
loc = Location(-1,locName,assetInstances,personaInstances,locLinks)
locs.append(loc)
p = LocationsParameters(locsName,locsDia,locs)
locations = ObjectFactory.build(locsId,p)
locationsDict[locsName] = locations
return locationsDict
def getLocationNames(self,locsName):
return self.responseList('call getLocationNames(:locs)',{'locs':locsName},'MySQL error getting location names')
def getLocationLinks(self,locsName):
rows = self.responseList('call getLocationLinks(:locs)',{'locs':locsName},'MySQL error getting location links')
linkDict = {}
for tailLoc,headLoc in rows:
if tailLoc in linkDict: linkDict[tailLoc].append(headLoc)
else:
linkDict[tailLoc] = [headLoc]
if headLoc in linkDict:
linkDict[headLoc].append(tailLoc)
else:
linkDict[headLoc] = [tailLoc]
return linkDict
def getAssetInstances(self,locName):
return self.responseList('call getAssetInstances(:locs)',{'locs':locName},'MySQL error getting asset instances')
def getPersonaInstances(self,locName):
return self.responseList('call getPersonaInstances(:locs)',{'locs':locName},'MySQL error getting persona instances')
def deleteLocations(self,locsId):
self.deleteObject(locsId,'locations')
def locationsRiskModel(self,locationsName,environmentName):
traceRows = self.responseList('call locationsRiskModel(:locs,:env)',{'locs':locationsName,'env':environmentName},'MySQL error getting location risk model')
traces = []
for fromObjt,fromName,toObjt,toName in traceRows:
parameters = DotTraceParameters(fromObjt,fromName,toObjt,toName)
traces.append(ObjectFactory.build(-1,parameters))
return traces
def templateAssetMetrics(self,taName): return self.responseList('call templateAssetMetrics(:ta)',{'ta':taName},'MySQL error getting template asset metrics')[0]
def riskModelElements(self,envName):
rows = self.responseList('call riskAnalysisModelElements(:env)',{'env':envName},'MySQL error getting risk analysis model elements')
elNames = []
for c0,c1 in rows:
elNames.append(c1)
return elNames
def assetThreatRiskLevel(self,assetName,threatName,envName):
return self.responseList('call assetThreatRiskLevel(:ass,:thr,:env)',{'ass':assetName,'thr':threatName,'env':envName},'MySQL error getting asset threat risk level')[0]
def assetRiskLevel(self,assetName,envName):
return self.responseList('call assetRiskLevel(:ass,:env)',{'ass':assetName,'env':envName},'MySQL error getting asset risk level')[0]
def dimensionSummary(self,dimName,envName):
return self.responseList('call ' + dimName + 'Summary(:name)',{'name':envName},'MySQL error getting ' + dimName + ' summary for environment ' + envName)
def createDatabase(self,dbName,session_id):
if self.conn is not None:
self.conn.close()
b = Borg()
ses_settings = b.get_settings(session_id)
dbHost = ses_settings['dbHost']
dbPort = ses_settings['dbPort']
rPasswd = ses_settings['rPasswd']
dbUser = ses_settings['dbUser']
dbPasswd = ses_settings['dbPasswd']
dbName = dbUser + '_' + dbName
createDatabaseAndPrivileges(rPasswd,dbHost,dbPort,dbUser,dbPasswd,dbName)
b.settings[session_id]['dbName'] = dbName
self.clearDatabase(session_id)
self.reconnect(True,session_id)
rootDir = b.cairisRoot
createDefaults(rootDir,dbHost,dbPort,dbUser,dbPasswd,dbName)
def openDatabase(self,dbName,session_id):
b = Borg()
b.settings[session_id]['dbName'] = dbName
self.reconnect(True,session_id)
def showDatabases(self,session_id):
b = Borg()
ses_settings = b.get_settings(session_id)
dbUser = ses_settings['dbUser']
dbName = ses_settings['dbName']
rows = databases(dbUser)
dbs = []
for dbn,owner in rows:
if (dbn != dbName):
dbs.append((dbn.split(owner + '_')[1],owner))
return dbs
def checkPermissions(self,reqDbName,session_id):
b = Borg()
ses_settings = b.get_settings(session_id)
dbUser = ses_settings['dbUser']
currentDbName = ses_settings['dbName']
defaultDbName = dbUser + '_default'
reqDbName = dbUser + '_' + reqDbName
restrictedDbs = [currentDbName,defaultDbName]
rows = databases(dbUser)
dbs = []
for dbName, owner in rows:
if (dbName not in restrictedDbs):
if (reqDbName == dbName):
return True
return False
def deleteDatabase(self,dbName,session_id):
b = Borg()
ses_settings = b.get_settings(session_id)
dbHost = ses_settings['dbHost']
dbPort = ses_settings['dbPort']
rPasswd = ses_settings['rPasswd']
if (self.checkPermissions(dbName,session_id) == False):
exceptionText = 'You cannot remove this database.'
raise DatabaseProxyException(exceptionText)
dbUser = canonicalDbUser(ses_settings['dbUser'])
dbName = canonicalDbName(dbUser + '_' + dbName)
try:
dbEngine = create_engine('mysql+mysqldb://root'+':'+rPasswd+'@'+dbHost+':'+str(dbPort))
tmpConn = scoped_session(sessionmaker(bind=dbEngine))
stmts = ['drop database if exists `' + dbName + '`',
'delete from cairis_owner.db_owner where db = "' + dbName + '" and owner = "' + dbUser + '"',
'delete from mysql.db where Db = "' + dbName + '"']
session = tmpConn()
for stmt in stmts:
session.execute(stmt)
session.close()
tmpConn.remove()
except OperationalError as e:
exceptionText = 'MySQL error creating CAIRIS database ' + dbName + '(message:' + format(e) + ')'
raise DatabaseProxyException(exceptionText)
except DatabaseError as e:
id,msg = e
exceptionText = 'MySQL error creating CAIRIS database ' + dbName + '(id:' + str(id) + ',message:' + msg + ')'
raise DatabaseProxyException(exceptionText)
def getUseCaseRequirements(self,ucName):
return self.responseList('call useCaseRequirements(:uc)',{'uc':ucName},'MySQL error getting requirements associated with use case ' + ucName)
def getUseCaseGoals(self,ucName,envName): return self.responseList('call useCaseGoals(:uc,:env)',{'uc':ucName,'env':envName},'MySQL error getting goals associated with use case ' + ucName)
def synopsisId(self,synTxt):
return self.responseList('select synopsisId(:syn)',{'syn':synTxt},'MySQL error finding synopsis id for text ' + synTxt)[0]
def hasContribution(self,contType,rsName,csName):
sqlTxt = 'hasReferenceContribution'
if contType == 'usecase':
sqlTxt = 'hasUseCaseContribution'
hasRC = self.responseList('select ' + sqlTxt + '(:rName,:cName)',{'rName':rsName,'cName':csName},'MySQL error checking contribution')[0]
if (hasRC == 1):
return True
else:
return False
def removeUseCaseContributions(self,ucId): self.updateDatabase('call removeUseCaseContributions(:id)',{'id':ucId},'MySQL error removing use case contribution')
def getDataFlows(self,dfName='',fromName='',fromType='',toName='',toType='',envName=''):
dfRows = self.responseList('call getDataFlows(:df,:fromName,:fromType,:toName,:toType,:env)',{'df':dfName,'fromName':fromName,'fromType':fromType,'toName':toName,'toType':toType,'env':envName},'MySQL error getting data flows')
dataFlows = []
for dfName,dfType,envName,fromName,fromType,toName,toType in dfRows:
tags = self.getDataFlowTags(dfName,fromType,fromName,toType,toName,envName)
dfAssets = self.getDataFlowAssets(dfName,fromName,fromType,toName,toType,envName)
dfObs = self.getDataFlowObstacles(dfName,fromName,fromType,toName,toType,envName)
parameters = DataFlowParameters(dfName,dfType,envName,fromName,fromType,toName,toType,dfAssets,dfObs,tags)
df = ObjectFactory.build(-1,parameters)
dataFlows.append(df)
return dataFlows
def getDataFlowAssets(self,dfName,fromName,fromType,toName,toType,envName):
return self.responseList('call getDataFlowAssets(:df,:fromName,:fromType,:toName,:toType,:env)',{'df':dfName,'fromName':fromName,'fromType':fromType,'toName':toName,'toType':toType,'env':envName},'MySQL error getting assets for data flow ' + dfName)
def getDataFlowObstacles(self,dfName,fromName,fromType,toName,toType,envName):
return self.responseList('call getDataFlowObstacles(:df,:fromName,:fromType,:toName,:toType,:env)',{'df':dfName,'fromName':fromName,'fromType':fromType,'toName':toName,'toType':toType,'env':envName},'MySQL error getting obstacles for data flow ' + dfName)
def addDataFlow(self,parameters):
dfName = parameters.name()
dfType = parameters.type()
envName = parameters.environment()
fromName = parameters.fromName()
fromType = parameters.fromType()
toName = parameters.toName()
toType = parameters.toType()
dfAssets = parameters.assets()
dfObs = parameters.obstacles()
tags = parameters.tags()
self.updateDatabase('call addDataFlow(:df,:dfType,:env,:fName,:fType,:tName,:tType)',{'df':dfName,'dfType':dfType,'env':envName,'fName':fromName,'fType':fromType,'tName':toName,'tType':toType},'MySQL error adding data flow')
self.addDataFlowTags(dfName,fromType,fromName,toType,toName,envName,tags)
for dfAsset in dfAssets:
self.addDataFlowAsset(dfName,envName,fromType,fromName,toType,toName,dfAsset)
for dfOb in dfObs:
self.addDataFlowObstacle(dfName,envName,fromType,fromName,toType,toName,dfOb)
def addDataFlowAsset(self,dfName,envName,fromType,fromName,toType,toName,dfAsset):
self.updateDatabase('call addDataFlowAsset(:df,:env,:fromType,:fromName,:toType,:toName,:ass)',{'df':dfName,'env':envName,'fromType':fromType,'fromName':fromName,'toType':toType,'toName':toName,'ass':dfAsset},'MySQL error adding data flow asset')
def addDataFlowObstacle(self,dfName,envName,fromType,fromName,toType,toName,dfOb):
obsName,kwd,dfoContext = dfOb
self.updateDatabase('call addDataFlowObstacle(:df,:env,:fromType,:fromName,:toType,:toName,:ob,:kwd,:dfoContext)',{'df':dfName,'env':envName,'fromType':fromType,'fromName':fromName,'toType':toType,'toName':toName,'ob':obsName,'kwd':kwd,'dfoContext':dfoContext},'MySQL error adding data flow obstacle')
def updateDataFlow(self,oldDfName,oldFromName,oldFromType,oldToName,oldToType,oldEnvName,parameters):
dfName = parameters.name()
dfType = parameters.type()
envName = parameters.environment()
fromName = parameters.fromName()
fromType = parameters.fromType()
toName = parameters.toName()
toType = parameters.toType()
dfAssets = parameters.assets()
dfObs = parameters.obstacles()
tags = parameters.tags()
session = self.updateDatabase('call deleteDataFlowAssets(:df,:fromName,:fromType,:toName,:toType,:env)',{'df':oldDfName,'fromName':fromName,'fromType':fromType,'toName':toName,'toType':toType,'env':oldEnvName},'MySQL error deleting data flow assets',None,False)
self.updateDatabase('call deleteDataFlowObstacles(:df,:fromName,:fromType,:toName,:toType,:env)',{'df':oldDfName,'fromName':fromName,'fromType':fromType,'toName':toName,'toType':toType,'env':oldEnvName},'MySQL error deleting data flow obstacles',session,False)
self.updateDatabase('call updateDataFlow(:oldDfName,:oldFromName,:oldFromType,:oldToName,:oldToType,:oldEnvName,:dfName,:fromName,:fromType,:toName,:toType,:envName,:dfType)',{'oldDfName':oldDfName,'oldFromName':oldFromName,'oldFromType':oldFromType,'oldToName':oldToName,'oldToType':oldToType,'oldEnvName':oldEnvName,'dfName':dfName,'fromName':fromName,'fromType':fromType,'toName':toName,'toType':toType,'envName':envName,'dfType':dfType},'MySQL error updating data flow',session)
self.addDataFlowTags(dfName,fromType,fromName,toType,toName,envName,tags)
for dfAsset in dfAssets:
self.addDataFlowAsset(dfName,envName,fromType,fromName,toType,toName,dfAsset)
for dfOb in dfObs:
self.addDataFlowObstacle(dfName,envName,fromType,fromName,toType,toName,dfOb)
def deleteDataFlow(self,dfName,fromName,fromType,toName,toType,envName):
self.updateDatabase('call deleteDataFlow(:df,:fromName,:fromType,:toName,:toType,:env)',{'df':dfName,'fromName':fromName,'fromType':fromType,'toName':toName,'toType':toType,'env':envName},'MySQL Error deleting data flow')
def dataFlowDiagram(self,envName,filterType = 'None',filterElement = ''):
return self.responseList('call dataFlowDiagram(:env,:ft,:fe)',{'env':envName,'ft':filterType,'fe':filterElement},'MySQL error getting data flow diagram')
def personalDataFlowDiagram(self,envName,filterElement = ''):
return self.responseList('call personalDataFlowDiagram(:env,:fe)',{'env':envName,'fe':filterElement},'MySQL error getting personal data flow diagram')
def relabelRequirements(self,reqReference): self.updateDatabase('call relabelRequirements(:reqReference)',{'reqReference':reqReference},'MySQL error relabelling requirements')
def getTrustBoundaries(self,constraintId = -1):
tbRows = self.responseList('call getTrustBoundaries(:id)',{'id':constraintId},'MySQL error getting trust boundaries')
tbs = []
for tbId,tbName,tbType,tbDesc in tbRows:
tags = self.getTags(tbName,'trust_boundary')
components = {}
privileges = {}
for environmentId,environmentName in self.dimensionEnvironments(tbId,'trust_boundary'):
components[environmentName] = self.trustBoundaryComponents(tbId,environmentId)
privileges[environmentName] = self.trustBoundaryPrivilege(tbId,environmentId)
tbs.append(TrustBoundary(tbId,tbName,tbType,tbDesc,components,privileges,tags))
return tbs
def trustBoundaryComponents(self,tbId, envId):
return self.responseList('call trustBoundaryComponents(:tbId,:envId)',{'tbId':tbId,'envId':envId},'MySQL error getting trust boundary components for trust boundary id ' + str(tbId))
def trustBoundaryPrivilege(self,tbId, envId):
return self.responseList('select trustBoundaryPrivilege(:tbId,:envId)',{'tbId':tbId,'envId':envId},'MySQL error getting the trust boundary privilege level for trust boundary id ' + str(tbId))[0]
def addTrustBoundary(self,tb):
tbId = self.newId()
self.updateDatabase("call addTrustBoundary(:id,:name,:type,:desc)",{'id':tbId,'name':tb.name(),'type':tb.type(),'desc':tb.description()},'MySQL error adding trust boundary ' + str(tbId))
self.addTags(tb.name(),'trust_boundary',tb.tags())
defaultPrivilegeLevels = {}
for environmentName in list(tb.components().keys()):
defaultPrivilegeLevels[environmentName] = 'None'
for tbComponentType,tbComponent in tb.components()[environmentName]:
self.addTrustBoundaryComponent(tbId,environmentName,tbComponent)
tbPrivilegeLevels = tb.privilegeLevels()
if (len(tbPrivilegeLevels) == 0):
tbPrivilegeLevels = defaultPrivilegeLevels
for environmentName in list(tbPrivilegeLevels.keys()):
self.addTrustBoundaryPrivilege(tbId,environmentName,self.privilegeValue(tbPrivilegeLevels[environmentName]))
def privilegeValue(self,pName): return self.responseList('select privilegeValue(:pName)',{'pName':pName},'MySQL error getting privilege value')[0]
def addTrustBoundaryComponent(self,tbId,envName,tbComponent):
self.updateDatabase('call addTrustBoundaryComponent(:id,:environment,:component)',{'id':tbId,'environment':envName,'component':tbComponent},'MySQL error adding trust boundary component ' + tbComponent + | |
import os
import torch
import torch.nn as nn
from utils import exact_interpolate, np_image_to_normed_tensor, normed_tensor_to_np_image, create_scale_pyramid
class Conv2DBlock(nn.Module):
""" Combine Conv2d-BN-LReLU into a single block """
# the 0.2 negative slope is given in the supplementary materials
def __init__(self, in_channels, out_channels, kernel_size, # conv arguments
use_bn=True, activation=None, # customization of following blocks
conv_kwargs=None, bn_kwargs=None): # optional kwargs for conv and bn
# mutable default arguments are dangerous
if conv_kwargs is None:
conv_kwargs = {}
if bn_kwargs is None:
bn_kwargs = {}
# call superclass init and (maybe) create layers
super().__init__()
if bn_kwargs is None:
bn_kwargs = {}
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, **conv_kwargs)
self.bn = nn.BatchNorm2d(out_channels, **bn_kwargs) if use_bn else nn.Identity()
self.activ = activation if activation else nn.Identity()
def forward(self, x):
return self.activ(self.bn(self.conv(x)))
class SGNet(nn.Module):
"""
A class to create the networks used in the SinGAN paper. Each generator and
discriminator is very similar, being composed of 5 blocks of
(conv2d, batch_norm, leaky_relu) blocks, with the final one being slightly different.
All intermediate blocks have the same amount of kernels and a kernel size of 3x3.
Zero padding is done initially, so that the network preserves the shape of its input.
"""
def __init__(self, num_blocks=5, kernel_count=32, kernel_size=3, # architecture customization
final_activation=nn.Tanh(), final_bn=False, # final layer cust.
input_channels=3, output_channels=3): # channel counts
# superclass init and add the initial padding layer
super().__init__()
layers = [nn.ZeroPad2d(num_blocks)] # since kernel size is 3, pad 1 per block
# loop to create each layer except last,
# all properties are shared except for the number of channels
def sgnet_block(in_channels, out_channels):
return Conv2DBlock(in_channels, out_channels, kernel_size,
activation=nn.LeakyReLU(negative_slope=0.2)) # as given in the paper
layers.append(sgnet_block(input_channels, kernel_count)) # first layer
for _ in range(num_blocks - 2): # last layer has a different architecture
layers.append(sgnet_block(kernel_count, kernel_count))
# the final activation depends on whether this is the generator or critic
# (tanh for gen. and none for crit.), and is different from the others
final_block = Conv2DBlock(kernel_count, output_channels, kernel_size,
final_bn, final_activation)
layers.append(final_block)
# create a sequential model from it
self.model = nn.Sequential(*layers)
def forward(self, x):
# simply forwards through the layers
return self.model(x)
class NoiseSampler:
"""
This functor provides a common interface from which we draw the noise samplers,
to make it easy to control all the sampling from one code block, we could easily
change from normal to uniform just by changing one line here, for example.
A noise sampler simply takes a reference tensor and produces noise with the same shape.
Object rather than closure so that it can be pickled without python complaining.
"""
def __init__(self, noise_std):
self.noise_std = noise_std
def __call__(self, x):
return self.noise_std * torch.randn_like(x)
class SGGen(nn.Module):
"""
This class adds the extra fluff (noise sampling and residual connections)
on top of the basic SGNet architecture to create the full single-scale generator.
"""
def __init__(self, sgnet, noise_std):
super().__init__()
self.sgnet = sgnet
self.noise_sampler = NoiseSampler(noise_std)
def forward(self, x, z=None):
if z is None:
z = self.noise_sampler(x)
g_in = x + z # image + noise as input
g_out = self.sgnet(g_in) + x # residual connection
return g_out
class MultiScaleSGGenView(nn.Module):
"""
This class serves as a 'view' over the list of generators that makes the stack
look like a single generator. Multiple scales of generators are combined by
starting from the lowest; the output of the lower scale is resized and
passed to the upper generator automatically until no more are left.
In the end we have something that takes an image input and returns
another image, just like a single generator.
Attributes:
generators: a list of SGGen's representing generator networks, converted
to nn.ModuleList when stored
scaling_factor: a floating point scalar which represents the scale multiplier
between each generator (e.g. 1.25)
scaling_mode: a string for the scaling mode, should be a valid input for
torch.nn.functional.interpolate's
Illustration of the full architecture:
samplerN -> noiseN -> | generatorN |
imgN-1 -> | | -> imgN
^
.............................
.......other generators......
.............................
^
sampler1 -> noise1 -> | generator1 | |
img0 -> | | -> img1
^
|____________________________
^
sampler0 -> noise0 -> | generator0 | |
| | -> img0
Note about scaling:
Simply using scaling_factor to scale outputs is nice when we do not
have any strict requirements on image shapes, but does not really
work when we expect a certain size for each output. Consider
starting from a size of 250 and scaling by a factor of 3/4:
scales = [250, 188, 141, 105, 79, 59, 44, 33, 25]
Since we round the result at each step, the final output is 25, although
250 * 0.75^8 ~ 25.08. If we take an input with size 25 and scale up with
a factor 4/3 we get the following:
scales = [25, 33, 44, 59, 79, 105, 140, 187, 250]
Notice that some scales do not match because we started with 25 instead of
25.08. This can be a problem when calculating reconstruction loss, for
example. Thus, we provide an optional argument to the forward pass, a
(float, float) tuple for providing the exact size (e.g. (25.08, 25.08)
rather than (25, 25) to be used when upsampling) to ensure that we obtain
exact shape matches.
"""
def __init__(self, generators, scaling_factor, scaling_mode='bicubic'):
# initialize superclass and check arguments
super().__init__()
# assign members, nn.ModuleList for generators to ensure
# proper behavior, e.g. .parameters() returning correctly
self.generators = nn.ModuleList(generators)
self.scaling_factor = scaling_factor
self.scaling_mode = scaling_mode
# freeze all generators except for the top one
for g in self.generators[:-1]:
g.requires_grad_(False)
g.eval()
def forward(self, x, exact_size=None, z_input=None):
"""
Forward pass through the network.
Args:
x: a 4D (N, C, H, W) tensor input to the first (coarsest scale) generator,
z_input: a list of 4D noise tensors to be used as the noise input at each scale,
if None, the noise samplers are used to generate noise
exact_size: a (float, float) tuple for providing the theoretical shape of the input,
see the 'Note about scaling:' in the class docstring.
if None, the size of x is used as the exact_size
"""
# set exact_size as the input size if not provided
if exact_size is None:
exact_size = tuple(float(d) for d in x.shape[2:4]) # (H, W)
# go through each generator
x_out = None
for i, g, in enumerate(self.generators):
z = None if z_input is None else z_input[i] # get the noise input from the proper source
x_out = g(x, z) # pass through
if i < len(self.generators) - 1: # upsample if not the last layer
# interpolate using the exact dimensions and update them
x, exact_size = exact_interpolate(x_out, self.scaling_factor, exact_size, self.scaling_mode)
return x_out
class FixedInputSGGenView(nn.Module):
"""
A wrapper to fix the input of an SGNet view for easier calls to forward, so that
we do not have to provide the coarsest zero (or original image) input and exact size at each call
"""
def __init__(self, sgnet_view, coarsest_input, coarsest_exact_size=None):
super().__init__()
if coarsest_exact_size is None:
coarsest_exact_size = tuple(float(d) for d in coarsest_input.shape[2:4])
self.sgnet_view = sgnet_view
self.coarsest_exact_size = coarsest_exact_size
self.coarsest_input = coarsest_input
def forward(self, z_input=None, num_samples=1):
# cool, but a large num_samples can eat up a lot of memory,
# so we do not use num_samples > 1 in the notebook
inputs = self.coarsest_input.expand(num_samples, -1, -1, -1)
return self.sgnet_view.forward(inputs, self.coarsest_exact_size, z_input)
def save_model(model_path, image, generators, critics, upsampling_factor, upsampling_mode, downsampling_mode):
"""
A function to save a trained model to the given path.
Args:
model_path: str, path to save the model to
image: original image used to train, as a [-1, 1] torch tensor
generators: list of trained SGGen generators
critics: list of trained SGNet critics
upsampling_factor: float, scaling factor used when training the model
upsampling_mode: str, mode used when upsampling generator outputs (e.g. bilinear)
downsampling_mode: str, mode used when downsampling the original image (e.g. bicubic)
"""
os.makedirs(os.path.dirname(model_path), exist_ok=True)
# TODO: could change to encode | |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Tokenization.
"""
import unicodedata
import collections
import six
import sentencepiece as spm
SPIECE_UNDERLINE = u"▁"
def preprocess_text(inputs, remove_space=True, do_lower_case=True):
"""preprocess text"""
if remove_space:
outputs = ' '.join(inputs.strip().split())
else:
outputs = inputs
outputs = outputs.replace("``", '"').replace("''", '"')
if six.PY2 and isinstance(outputs, str):
outputs = outputs.decode('utf-8')
outputs = unicodedata.normalize("NFKD", outputs)
outputs = "".join([c for c in outputs if not unicodedata.combining(c)])
if do_lower_case:
outputs = outputs.lower()
return outputs
def encode_pieces(sp_model, text, return_unicode=True, sample=False):
"""turn sentences into word pieces."""
text = preprocess_text(text,)
if six.PY2 and isinstance(text, unicode):
text = text.encode('utf-8')
if not sample:
pieces = sp_model.EncodeAsPieces(text)
else:
pieces = sp_model.SampleEncodeAsPieces(text, 64, 0.1)
new_pieces = []
for piece in pieces:
if len(piece) > 1 and piece[-1] == ',' and piece[-2].isdigit():
cur_pieces = sp_model.EncodeAsPieces(
piece[:-1].replace(SPIECE_UNDERLINE, ''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
cur_pieces = cur_pieces[1:]
else:
cur_pieces[0] = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(cur_pieces)
else:
new_pieces.append(piece)
# note(zhiliny): convert back to unicode for py2
if six.PY2 and return_unicode:
ret_pieces = []
for piece in new_pieces:
if isinstance(piece, str):
piece = piece.decode(piece, "utf-8")
ret_pieces.append(piece)
new_pieces = ret_pieces
return new_pieces
def encode_ids(sp_model, text, sample=False):
pieces = encode_pieces(sp_model, text, return_unicode=False, sample=sample)
ids = [sp_model.PieceToId(piece) for piece in pieces]
return ids
def convert_to_unicode(text):
"""
Convert text into unicode type.
Args:
text: input str.
Returns:
input str in unicode.
"""
ret = text
if isinstance(text, str):
ret = text
elif isinstance(text, bytes):
ret = text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
return ret
def vocab_to_dict_key_token(vocab_file):
"""Loads a vocab file into a dict, key is token."""
vocab = collections.OrderedDict()
index = 0
with open(vocab_file, "r") as reader:
while True:
token = convert_to_unicode(reader.readline())
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def vocab_to_dict_key_id(vocab_file):
"""Loads a vocab file into a dict, key is id."""
vocab = collections.OrderedDict()
index = 0
with open(vocab_file, "r") as reader:
while True:
token = convert_to_unicode(reader.readline())
if not token:
break
token = token.strip()
vocab[index] = token
index += 1
return vocab
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
def convert_tokens_to_ids(vocab_file, tokens):
"""
Convert tokens to ids.
Args:
vocab_file: path to vocab.txt.
tokens: list of tokens.
Returns:
list of ids.
"""
vocab_dict = vocab_to_dict_key_token(vocab_file)
output = []
for token in tokens:
output.append(vocab_dict[token])
return output
def convert_ids_to_tokens(vocab_file, ids):
"""
Convert ids to tokens.
Args:
vocab_file: path to vocab.txt.
ids: list of ids.
Returns:
list of tokens.
"""
vocab_dict = vocab_to_dict_key_id(vocab_file)
output = []
for _id in ids:
output.append(vocab_dict[_id])
return output
class FullTokenizer():
"""
Full tokenizer
"""
def __init__(self, vocab_file, do_lower_case=True, spm_model_file=None):
self.vocab_dict = None
self.sp_model = None
if spm_model_file:
self.sp_model = spm.SentencePieceProcessor()
self.sp_model.Load(spm_model_file)
# # Note(mingdachen): For the purpose of consistent API, we are
# # generating a vocabulary for the sentence piece tokenizer.
self.vocab_dict = {self.sp_model.IdToPiece(i): i for i
in range(self.sp_model.GetPieceSize())}
else:
self.vocab_dict = vocab_to_dict_key_token(vocab_file)
self.do_lower_case = do_lower_case
self.basic_tokenize = BasicTokenizer(do_lower_case)
self.wordpiece_tokenize = WordpieceTokenizer(self.vocab_dict)
def tokenize(self, text):
"""
Do full tokenization.
Args:
text: str of text.
Returns:
list of tokens.
"""
if self.sp_model:
tokens_ret = encode_pieces(self.sp_model, text, return_unicode=False)
else:
tokens_ret = []
text = convert_to_unicode(text) #
for tokens in self.basic_tokenize.tokenize(text):
wordpiece_tokens = self.wordpiece_tokenize.tokenize(tokens)
tokens_ret.extend(wordpiece_tokens)
return tokens_ret
def convert_tokens_to_ids(self, tokens):
if self.sp_model:
output = [self.sp_model.PieceToId(token) for token in tokens]
else:
# vocab_dict = vocab_to_dict_key_token(self.vocab_dict)
output = []
for token in tokens:
output.append(self.vocab_dict[token])
return output
def convert_ids_to_tokens(self, ids):
if self.sp_model:
output = [self.sp_model.IdToPiece(id_) for id_ in ids]
else:
# vocab_dict = vocab_to_dict_key_id(self.vocab_dict)
output = []
for _id in ids:
output.append(self.vocab_dict[_id])
return output
class BasicTokenizer(): # ---
"""
Basic tokenizer
"""
def __init__(self, do_lower_case=True):
self.do_lower_case = do_lower_case
def tokenize(self, text):
"""
Do basic tokenization.
Args:
text: text in unicode.
Returns:
a list of tokens split from text
"""
text = self._clean_text(text)
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
aaa = self._run_split_on_punc(token)
split_tokens.extend(aaa)
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
i = 0
start_new_word = True
output = []
for char in text:
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((0x4E00 <= cp <= 0x9FFF) or
(0x3400 <= cp <= 0x4DBF) or
(0x20000 <= cp <= 0x2A6DF) or
(0x2A700 <= cp <= 0x2B73F) or
(0x2B740 <= cp <= 0x2B81F) or
(0x2B820 <= cp <= 0x2CEAF) or
(0xF900 <= cp <= 0xFAFF) or
(0x2F800 <= cp <= 0x2FA1F)):
return True
return False
class WordpieceTokenizer():
"""
Wordpiece tokenizer
"""
def __init__(self, vocab):
self.vocab_dict = vocab
def tokenize(self, tokens):
"""
Do word-piece tokenization
Args:
tokens: a word.
Returns:
a list of tokens that can be found in vocab dict.
"""
output_tokens = []
tokens = convert_to_unicode(tokens)
for token in whitespace_tokenize(tokens):
chars = list(token)
len_chars = len(chars)
start = 0
end = len_chars
while start < len_chars:
while start < end:
substr = "".join(token[start:end])
if start != 0:
substr = "##" + substr
if substr in self.vocab_dict:
output_tokens.append(substr)
start = end
end = len_chars
else:
end = end - 1
if start == end and start != len_chars:
output_tokens.append("[UNK]")
break
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically control characters but we treat them
# as whitespace since they are generally considered as such.
whitespace_char = [" ", "\t", "\n", "\r"]
if char in whitespace_char:
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
control_char = ["\t", "\n", "\r"]
if char in control_char:
return False
cat = unicodedata.category(char)
if cat in ("Cc", "Cf"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((33 <= cp <= 47) or (58 <= cp <= 64) or
(91 <= cp <= 96) or (123 <= cp | |
X1 in view user units
:param min_y: Y1
:param max_x: X2
:param max_y: Y2
:type str_val: str
:type min_x: float_ref
:type min_y: float_ref
:type max_x: float_ref
:type max_y: float_ref
:returns: 0 if line returned.
1 - Right Mouse
2 - Escape/Cancel
:rtype: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Limitations:** May not be available while executing a command line program.
**Note:** The coordinates are returned in the current User projection
(See `GXMVIEW.get_user_ipj <geosoft.gxapi.GXMVIEW.get_user_ipj>` and `GXMVIEW.set_user_ipj <geosoft.gxapi.GXMVIEW.set_user_ipj>`.)
"""
ret_val, min_x.value, min_y.value, max_x.value, max_y.value = self._get_line_ex(str_val.encode(), min_x.value, min_y.value, max_x.value, max_y.value)
return ret_val
def get_line_xyz(self, str_val, min_x, min_y, min_z, max_x, max_y, max_z):
"""
Returns the end points of a line in X,Y and Z
:param str_val: User prompt string
:param min_x: X1 in view user units
:param min_y: Y1
:param min_z: Z1
:param max_x: X2
:param max_y: Y2
:param max_z: Z2
:type str_val: str
:type min_x: float_ref
:type min_y: float_ref
:type min_z: float_ref
:type max_x: float_ref
:type max_y: float_ref
:type max_z: float_ref
:returns: 0 if line returned.
1 - Right Mouse
2 - Escape/Cancel
:rtype: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Limitations:** May not be available while executing a command line program.
**Note:** The coordinates are returned in the current User projection
(See `GXMVIEW.get_user_ipj <geosoft.gxapi.GXMVIEW.get_user_ipj>` and `GXMVIEW.set_user_ipj <geosoft.gxapi.GXMVIEW.set_user_ipj>`.)
This is useful for digitizing a line in an oriented view and getting
the true coordinates in (X, Y, Z) at the selected point on the view plane.
"""
ret_val, min_x.value, min_y.value, min_z.value, max_x.value, max_y.value, max_z.value = self._get_line_xyz(str_val.encode(), min_x.value, min_y.value, min_z.value, max_x.value, max_y.value, max_z.value)
return ret_val
def get_point(self, str_val, x, y):
"""
Returns the coordinates of a user selected point.
:param str_val: User prompt string
:param x: X coordinate in current view user units.
:param y: Y
:type str_val: str
:type x: float_ref
:type y: float_ref
:returns: 0 if point returned.
1 if user cancelled.
:rtype: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Limitations:** May not be available while executing a command line program.
**Note:** This will wait for user to select a point.
.. seealso::
iTrackPoint, GetCurPoint, GetCursor
"""
ret_val, x.value, y.value = self._get_point(str_val.encode(), x.value, y.value)
return ret_val
def get_point_ex(self, str_val, x, y):
"""
Returns the coordinates of a user selected point.
:param str_val: User prompt string
:param x: X coordinate in current view user units.
:param y: Y
:type str_val: str
:type x: float_ref
:type y: float_ref
:returns: 0 if point returned.
1 if user used right mouse and then Done.
2 if user cancelled.
3 if capture is lost.
:rtype: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Limitations:** May not be available while executing a command line program.
**Note:** This will wait for user to select a point.
.. seealso::
iTrackPoint, GetCurPoint, GetCursor
"""
ret_val, x.value, y.value = self._get_point_ex(str_val.encode(), x.value, y.value)
return ret_val
def get_point_3d(self, str_val, x, y, z):
"""
Returns the coordinates of a user selected point.
:param str_val: User prompt string
:param x: X coordinate in current view user units.
:param y: Y
:param z: Z
:type str_val: str
:type x: float_ref
:type y: float_ref
:type z: float_ref
:returns: 0 if point returned.
1 if user used right mouse and then Done.
2 if user cancelled.
3 if capture is lost.
:rtype: int
.. versionadded:: 9.1
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Limitations:** May not be available while executing a command line program.
**Note:** This will wait for user to select a point.
.. seealso::
iTrackPoint, GetCurPoint, GetCursor
"""
ret_val, x.value, y.value, z.value = self._get_point_3d(str_val.encode(), x.value, y.value, z.value)
return ret_val
def get_poly_line(self, str_val, vv_x, vv_y):
"""
Returns a polyline.
:param str_val: User prompt string
:param vv_x: X
:param vv_y: Y
:type str_val: str
:type vv_x: GXVV
:type vv_y: GXVV
:returns: 0 if line returned.
1 if user cancelled.
:rtype: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Limitations:** May not be available while executing a command line program.
**Note:** The coordinates are returned in the current User projection
(See `GXMVIEW.get_user_ipj <geosoft.gxapi.GXMVIEW.get_user_ipj>` and `GXMVIEW.set_user_ipj <geosoft.gxapi.GXMVIEW.set_user_ipj>`.)
"""
ret_val = self._get_poly_line(str_val.encode(), vv_x, vv_y)
return ret_val
def get_poly_line_xyz(self, str_val, vv_x, vv_y, vv_z):
"""
Returns a polyline.
:param str_val: User prompt string
:param vv_x: X
:param vv_y: Y
:param vv_z: Z
:type str_val: str
:type vv_x: GXVV
:type vv_y: GXVV
:type vv_z: GXVV
:returns: 0 if line returned.
1 if user cancelled.
:rtype: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Limitations:** May not be available while executing a command line program.
**Note:** The coordinates are returned in the current User projection
(See `GXMVIEW.get_user_ipj <geosoft.gxapi.GXMVIEW.get_user_ipj>` and `GXMVIEW.set_user_ipj <geosoft.gxapi.GXMVIEW.set_user_ipj>`.) In this version
of the method X, Y and Z (depth) are returned. Initially created
to deal with crooked sections.
"""
ret_val = self._get_poly_line_xyz(str_val.encode(), vv_x, vv_y, vv_z)
return ret_val
def get_rect(self, str_val, min_x, min_y, max_x, max_y):
"""
Returns the coordinates of a user selected box starting at a corner.
:param str_val: User prompt string
:param min_x: X minimum in current view user units. (defines corner)
:param min_y: Y
:param max_x: X maximum
:param max_y: Y
:type str_val: str
:type min_x: float_ref
:type min_y: float_ref
:type max_x: float_ref
:type max_y: float_ref
:returns: 0 if point returned.
1 if user cancelled.
:rtype: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Limitations:** May not be available while executing a command line program.
**Note:** The coordinates are returned in the current User projection
(See `GXMVIEW.get_user_ipj <geosoft.gxapi.GXMVIEW.get_user_ipj>` and `GXMVIEW.set_user_ipj <geosoft.gxapi.GXMVIEW.set_user_ipj>`.)
If the user `GXIPJ <geosoft.gxapi.GXIPJ>` distorts the coordinates from being rectilinear
(e.g. for a TriPlot graph), then care should be taken since the
(Xmin, Ymin) and (Xmax, Ymax) values returned do not necessarily
correspond to the lower-left and upper-right corners. In fact, the
returned values are calculated by taking the starting (fixed) corner
and the tracked (opposite) corner, and finding the min and max for
X and Y among these two points. With a warped User projection, those
two corner locations could easily be (Xmin, Ymax) and (Xmax, Ymin).
This becomes quite important if you want to use the rectangle for a
masking operation, because the "other" two corner's coordinates may
need to be constructed based on a knowledge of the User projection,
and may not be directly obtained from the returned X and Y min and
max values. What appears to be a rectangle as seen on the map is not
necessarily a rectangle in the User coordinates.
"""
ret_val, min_x.value, min_y.value, max_x.value, max_y.value = self._get_rect(str_val.encode(), min_x.value, min_y.value, max_x.value, max_y.value)
return ret_val
def track_point(self, flags, x, y):
"""
Get point without prompt or cursor change with tracking
:param flags: :ref:`EMAP_TRACK`
:param x: X coordinate in current view user units.
:param y: Y
:type flags: int
:type x: float_ref
:type y: float_ref
:returns: 0 if point returned.
1 if user cancelled.
:rtype: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Limitations:** May not be available while executing a command line program.
"""
ret_val, x.value, y.value = self._track_point(flags, x.value, y.value)
return ret_val
# Map Viewport Mode Methods
def get_aoi_area(self, min_x, min_y, max_x, max_y):
"""
Get the area of interest.
:param min_x: X Min returned
:param min_y: Y Min returned
:param max_x: X Max returned
:param max_y: Y Max returned
:type min_x: float_ref
:type min_y: float_ref
:type max_x: float_ref
:type max_y: float_ref
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Limitations:** May not be available while executing a command line program.
**Note:** Coordinates are based on the current view units.
"""
min_x.value, min_y.value, max_x.value, max_y.value = self._get_aoi_area(min_x.value, min_y.value, max_x.value, max_y.value)
def set_aoi_area(self, min_x, min_y, max_x, max_y):
"""
Set the area of interest.
:param min_x: X Min
:param min_y: Y Min
:param max_x: X Max
:param max_y: Y Max
:type min_x: float
:type min_y: float
:type max_x: float
:type max_y: float
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Limitations:** May not be | |
2010/12/01.
.. seealso:: |MANUAL| page 228
"""
if __sofa_version < (2010, 12, 1):
raise NotImplementedError
tt1 = _ct.c_double()
tt2 = _ct.c_double()
s = _sofa.iauTcgtt(tcg1, tcg2, _ct.byref(tt1), _ct.byref(tt2))
return tt1.value, tt2.value
# iauTdbtcb
# this routine was added in release 2010-12-01 of SOFA
try:
_sofa.iauTdbtcb.argtypes = [_ct.c_double, #tdb1
_ct.c_double, #tdb2
_ct.POINTER(_ct.c_double), #tcb1
_ct.POINTER(_ct.c_double)] #tcb2
_sofa.iauTdbtcb.restype = _ct.c_int
except AttributeError:
pass
def tdbtcb(tdb1, tdb2):
""" Timescale transformation: Barycentric Dynamical Time (TDB) to
Barycentric Coordinate Time (TCB).
:param tdb1, tdb2: TDB as a two-part Julian Date.
:type tdb1, tdb2: float
:returns: TCB as a two-part Julian Date.
:raises: :exc:`NotImplementedError` if called with a |SOFA| release prior
to 2010/12/01.
.. seealso:: |MANUAL| page 229
"""
if __sofa_version < (2010, 12, 1):
raise NotImplementedError
tcb1 = _ct.c_double()
tcb2 = _ct.c_double()
s = _sofa.iauTdbtcb(tdb1, tdb2, _ct.byref(tcb1), _ct.byref(tcb2))
return tcb1.value, tcb2.value
# iauTdbtt
# this routine was added in release 2010-12-01 of SOFA
try:
_sofa.iauTdbtt.argtypes = [_ct.c_double, #tdb1
_ct.c_double, #tdb2
_ct.c_double, #dtr
_ct.POINTER(_ct.c_double), #tt1
_ct.POINTER(_ct.c_double)] #tt2
_sofa.iauTdbtt.restype = _ct.c_int
except AttributeError:
pass
def tdbtt(tdb1, tdb2, dtr):
""" Timescale transformation: Barycentric Dynamical Time (TDB) to
Terrestrial Time (TT).
:param tdb1, tdb2: TDB as a two-part Julian Date.
:type tdb1, tdb2: float
:param dtr: TDB-TT in seconds.
:type dtr: float
:returns: TT as a two-part Julian Date.
:raises: :exc:`NotImplementedError` if called with a |SOFA| release prior
to 2010/12/01.
.. seealso:: |MANUAL| page 230
"""
if __sofa_version < (2010, 12, 1):
raise NotImplementedError
tt1 = _ct.c_double()
tt2 = _ct.c_double()
s = _sofa.iauTdbtt(tdb1, tdb2, dtr, _ct.byref(tt1), _ct.byref(tt2))
return tt1.value, tt2.value
# iauTf2a
# this routine was added in release 2010-12-01 of SOFA
try:
_sofa.iauTf2a.argtypes = [_ct.c_char, #s
_ct.c_int, #ihour
_ct.c_int, #imin
_ct.c_double, #sec
_ct.POINTER(_ct.c_double)] #rad
_sofa.iauTf2a.restype = _ct.c_int
except AttributeError:
pass
_tf2a_msg = {0: 'OK', # Unused
1:'Tf2a: hours outside range 0-23',
2:'Tf2a: minutes outside the range 0-59',
3:'Tf2a: seconds outside the range 0-59.999...'}
def tf2a(s, ihour, imin, sec):
""" Convert hours, minutes, seconds to radians.
:param s: sign, '-' is negative, everything else positive.
:param ihour: hours.
:type ihour: int
:param imin: minutes.
:type imin: int
:param sec: seconds.
:type sec: float
:returns: the converted value in radians (float).
:raises: :exc:`ValueError` if at least one input value is outside its
valid range.
:exc:`NotImplementedError` if called with a |SOFA| release prior
to 2010/12/01.
.. seealso:: |MANUAL| page 231
"""
if __sofa_version < (2010, 12, 1):
raise NotImplementedError
rad = _ct.c_double()
s = _sofa.iauTf2a(s, ihour, imin, sec, _ct.byref(rad))
if s > 0:
_warnings.warn(_tf2a_msg[s], UserWarning, 2)
return rad.value
# iauTf2d
# this routine was added in release 2010-12-01 of SOFA
try:
_sofa.iauTf2d.argtypes = [_ct.c_char, #s
_ct.c_int, #ihour
_ct.c_int, #imin
_ct.c_double, #sec
_ct.POINTER(_ct.c_double)] #days
_sofa.iauTf2d.restype = _ct.c_int
except AttributeError:
pass
_tf2d_msg = {0: 'OK', # Unused
1:'Tf2d: hours outside range 0-23',
2:'Tf2d: minutes outside the range 0-59',
3:'Tf2d: seconds outside the range 0-59.999...'}
def tf2d(s, ihour, imin, sec):
""" Convert hours, minutes, seconds to days.
:param s: sign, '-' is negative, everything else positive.
:param ihour: hours.
:type ihour: int
:param imin: minutes.
:type imin: int
:param sec: seconds.
:type sec: float
:returns: the converted value in days (float).
:raises: :exc:`ValueError` if at least one input value is outside its
valid range.
:exc:`NotImplementedError` if called with a |SOFA| release prior
to 2010/12/01.
.. seealso:: |MANUAL| page 232
"""
if __sofa_version < (2010, 12, 1):
raise NotImplementedError
days = _ct.c_double()
s = _sofa.iauTf2d(s, ihour, imin, sec, _ct.byref(days))
if s > 0:
_warnings.warn(_tf2d_msg[s], UserWarning, 2)
return days.value
# iauTr
_sofa.iauTr.argtypes = [_ndpointer(shape=(3,3), dtype=float, flags='C'), #r
_ndpointer(shape=(3,3), dtype=float, flags='C')] #rt
def tr(r):
""" Transpose a rotation matrix.
:param r: rotation matrix.
:type r: array-like of shape (3,3)
:returns: transpose as a numpy.matrix of shape 3x3.
.. seealso:: |MANUAL| page 233
"""
rt = _np.asmatrix(_np.zeros(shape=(3,3), dtype=float, order='C'))
_sofa.iauTr(_req_shape_c(r, float, (3,3)), rt)
return rt
# iauTrxp
_sofa.iauTrxp.argtypes = [_ndpointer(shape=(3,3), dtype=float, flags='C'), #r
_ndpointer(shape=(1,3), dtype=float, flags='C'), #p
_ndpointer(shape=(1,3), dtype=float, flags='C')] #trp
def trxp(r, p):
""" Multiply a p-vector by the transpose of a rotation matrix.
:param r: rotation matrix.
:type r: array-like of shape (3,3)
:param p: p-vector.
:type p: array-like of shape (1,3)
:returns: numpy.matrix of shape 1x3.
.. seealso:: |MANUAL| page 234
"""
trp = _np.asmatrix(_np.zeros(shape=(1,3), dtype=float, order='C'))
_sofa.iauTrxp(_req_shape_c(r, float, (3,3)),
_req_shape_c(p, float, (1,3)), trp)
return trp
# iauTrxpv
_sofa.iauTrxpv.argtypes = [_ndpointer(shape=(3,3), dtype=float, flags='C'), #r
_ndpointer(shape=(2,3), dtype=float, flags='C'), #pv
_ndpointer(shape=(2,3), dtype=float, flags='C')] #trpv
def trxpv(r, pv):
""" Multiply a pv-vector by the transpose of a rotation matrix.
:param r: rotation matrix.
:type r: array-like of shape (3,3)
:param pv: pv-vector.
:type pv: array-like of shape (2,3)
:returns: numpy.matrix of shape 2x3.
.. seealso:: |MANUAL| page 235
"""
trpv = _np.asmatrix(_np.zeros(shape=(2,3), dtype=float, order='C'))
_sofa.iauTrxpv(_req_shape_c(r, float, (3,3)),
_req_shape_c(pv, float, (2,3)), trpv)
return trpv
# iauTttai
# this routine was added in release 2010-12-01 of SOFA
try:
_sofa.iauTttai.argtypes = [_ct.c_double, #tt1
_ct.c_double, #tt2
_ct.POINTER(_ct.c_double), #tai1
_ct.POINTER(_ct.c_double)] #tai2
_sofa.iauTttai.restype = _ct.c_int
except AttributeError:
pass
def tttai(tt1, tt2):
""" Timescale transformation: Terrestrial Time (TT) to
International Atomic Time (TAI).
:param tt1, tt2: TT as a two-part Julian Date.
:type tt1, tt2: float
:returns: TAI as a two-part Julian Date.
:raises: :exc:`NotImplementedError` if called with a |SOFA| release prior
to 2010/12/01.
.. seealso:: |MANUAL| page 236
"""
if __sofa_version < (2010, 12, 1):
raise NotImplementedError
tai1 = _ct.c_double()
tai2 = _ct.c_double()
s = _sofa.iauTttai(tt1, tt2, _ct.byref(tai1), _ct.byref(tai2))
return tai1.value, tai2.value
# iauTttcg
# this routine was added in release 2010-12-01 of SOFA
try:
_sofa.iauTttcg.argtypes = [_ct.c_double, #tt1
_ct.c_double, #tt2
_ct.POINTER(_ct.c_double), #tcg1
_ct.POINTER(_ct.c_double)] #tcg2
_sofa.iauTttcg.restype = _ct.c_int
except AttributeError:
pass
def tttcg(tt1, tt2):
""" Timescale transformation: Terrestrial Time (TT) to
Geocentric Coordinate Time (TCG).
:param tt1, tt2: TT as a two-part Julian Date.
:type tt1, tt2: float
:returns: TCG as a two-part Julian Date.
:raises: :exc:`NotImplementedError` if called with a |SOFA| release prior
to 2010/12/01.
.. seealso:: |MANUAL| page 237
"""
if __sofa_version < (2010, 12, 1):
raise NotImplementedError
tcg1 = _ct.c_double()
tcg2 = _ct.c_double()
s = _sofa.iauTttcg(tt1, tt2, _ct.byref(tcg1), _ct.byref(tcg2))
return tcg1.value, tcg2.value
# iauTttdb
# this routine was added in release 2010-12-01 of SOFA
try:
_sofa.iauTttdb.argtypes = [_ct.c_double, #tt1
_ct.c_double, #tt2
_ct.c_double, #dtr
_ct.POINTER(_ct.c_double), #tdb1
_ct.POINTER(_ct.c_double)] #tdb2
_sofa.iauTttdb.restype = _ct.c_int
except AttributeError:
pass
def tttdb(tt1, tt2, dtr):
""" Timescale transformation: Terrestrial Time (TT) to
Barycentric Dynamical Time (TDB)
:param tt1, tt2: TT as a two-part Julian Date.
:type tt1, tt2: float
:param dtr: TDB-TT in seconds.
:type dtr: float
:returns: TDB as a two-part Julian Date.
:raises: :exc:`NotImplementedError` if called with a |SOFA| release prior
to 2010/12/01.
.. seealso:: |MANUAL| page 238
"""
if __sofa_version < (2010, 12, 1):
raise NotImplementedError
tdb1 = _ct.c_double()
tdb2 = _ct.c_double()
s = _sofa.iauTttdb(tt1, tt2, dtr, _ct.byref(tdb1), _ct.byref(tdb2))
return tdb1.value, tdb2.value
# iauTtut1
# this routine was added in release 2010-12-01 of SOFA
try:
_sofa.iauTtut1.argtypes = [_ct.c_double, #tt1
_ct.c_double, #tt2
_ct.c_double, #dt
_ct.POINTER(_ct.c_double), #ut11
_ct.POINTER(_ct.c_double)] #ut12
_sofa.iauTtut1.restype = _ct.c_int
except AttributeError:
pass
def ttut1(tt1, tt2, dt):
""" Timescale transformation: Terrestrial Time (TT) to
Universal Time (UT1).
:param tt1, tt2: TT as a two-part Julian Date.
:type tt1, tt2: float
:param dt: TT-UT1 in seconds.
:type dt: float
:returns: UT1 as a two-part Julian Date.
:raises: :exc:`NotImplementedError` if called with a |SOFA| release prior
to 2010/12/01.
.. seealso:: |MANUAL| page 239
"""
if __sofa_version < (2010, 12, 1):
raise NotImplementedError
ut11 = _ct.c_double()
ut12 = _ct.c_double()
s = _sofa.iauTtut1(tt1, tt2, dt, _ct.byref(ut11), _ct.byref(ut12))
return ut11.value, ut12.value
# iauUt1tai
# this routine was added in release 2010-12-01 of SOFA
try:
_sofa.iauUt1tai.argtypes = [_ct.c_double, #ut11
_ct.c_double, #ut12
_ct.c_double, #dta
_ct.POINTER(_ct.c_double), #tai1
_ct.POINTER(_ct.c_double)] #tai2
_sofa.iauUt1tai.restype = _ct.c_int
except AttributeError:
pass
def ut1tai(ut11, ut12, dta):
""" Timescale transformation: Universal Time (UT1) to
International Atomic Time (TAI).
:param ut11, ut12: UT1 as a two-part Julian Date.
:type ut11, ut12: float
:param dta: UT1-TAI in seconds.
:type dta: float
:returns: TAI as a two-part Julian Date
:raises: :exc:`NotImplementedError` if called with a |SOFA| release prior
to 2010/12/01.
.. seealso:: |MANUAL| page 240
"""
if __sofa_version < (2010, 12, 1):
raise NotImplementedError
tai1 = _ct.c_double()
tai2 = _ct.c_double()
s = _sofa.iauUt1tai(ut11, ut12, dta, _ct.byref(tai1), _ct.byref(tai2))
return tai1.value, tai2.value
# iauUt1tt
# this routine was added in release 2010-12-01 of SOFA
try:
_sofa.iauUt1tt.argtypes = [_ct.c_double, #ut11
_ct.c_double, #ut12
_ct.c_double, #dt
_ct.POINTER(_ct.c_double), #tt1
_ct.POINTER(_ct.c_double)] #tt2
_sofa.iauUt1tt.restype | |
#!/usr/bin/env python3
import logging
import os
import sys
import time
import traceback
from collections import namedtuple
from pathlib import Path
from screenshots import Client, Screenshooter
def env(name, default):
return os.environ.get(name, default)
Spec = namedtuple(
"Spec",
"commands before after geometry delay windows",
defaults=[None, None, None, env("GEOMETRY", "240x135"), env("DELAY", "1x1"), 3],
)
specs = {
"bsp": {
"2-windows": Spec(windows=2),
"3-windows": Spec(windows=3),
"4-windows": Spec(windows=4),
"5-windows": Spec(windows=5),
"8-windows": Spec(
windows=8,
before=[
"up",
"grow_down",
"left",
"grow_left",
"down",
"right",
"grow_left",
"grow_left",
"toggle_split",
"left",
"left",
"grow_right",
"grow_right",
"grow_up",
"grow_up",
"up",
"toggle_split",
],
),
"toggle_split-from-down-left": Spec(commands=["toggle_split"]),
"toggle_split-from-right": Spec(commands=["toggle_split"], before=["right"]),
# "next": Spec(commands=["next"]), # no effects?
# "previous": Spec(commands=["previous"]), # no effects?
"left": Spec(commands=["left"], before=["right"]),
"right": Spec(commands=["right"]),
"up": Spec(commands=["up"]),
"down": Spec(commands=["down"], before=["up"]),
"shuffle_left": Spec(commands=["shuffle_left"], before=["right"]),
"shuffle_right": Spec(commands=["shuffle_right"]),
"shuffle_up": Spec(commands=["shuffle_up"]),
"shuffle_down": Spec(commands=["shuffle_down"], before=["up"]),
"grow_left": Spec(commands=["grow_left"], before=["right"]),
"grow_right": Spec(commands=["grow_right"]),
"grow_up": Spec(commands=["grow_up"]),
"grow_down": Spec(commands=["grow_down"], before=["up"]),
"flip_left": Spec(commands=["flip_left"], before=["right"]),
"flip_right": Spec(commands=["flip_right"]),
"flip_up": Spec(commands=["flip_up"]),
"flip_down": Spec(commands=["flip_down"], before=["up"]),
"normalize": Spec(
commands=["normalize"],
before=["grow_up", "grow_up", "grow_right", "grow_right"],
),
},
"columns": {
"2-windows": Spec(windows=2),
"3-windows": Spec(windows=3),
"4-windows": Spec(windows=4),
"5-windows": Spec(windows=4, before=["left", "spawn"]),
"toggle_split": Spec(
commands=[
"toggle_split",
"toggle_split",
"down",
"toggle_split",
"toggle_split",
],
windows=4,
),
"left": Spec(commands=["left"]),
"right": Spec(commands=["right"], before=["left"]),
"up": Spec(commands=["up"], before=["down"]),
"down": Spec(commands=["down"]),
"next": Spec(commands=["next"]),
"previous": Spec(commands=["previous"]),
"shuffle_left": Spec(commands=["shuffle_left"]),
"shuffle_right": Spec(commands=["shuffle_right"], before=["left"]),
"shuffle_up": Spec(commands=["shuffle_up"], before=["down"]),
"shuffle_down": Spec(commands=["shuffle_down"]),
"grow_left": Spec(commands=["grow_left"]),
"grow_right": Spec(commands=["grow_right"], before=["left"]),
"grow_up": Spec(commands=["grow_up"], before=["down"]),
"grow_down": Spec(commands=["grow_down"]),
"normalize": Spec(
commands=["normalize"],
before=["grow_down", "grow_down", "grow_left", "grow_left"],
),
},
"floating": {
# Floating info clients lists clients from all groups,
# breaking our "kill windows" method.
# "2-windows": Spec(windows=2),
# "3-windows": Spec(windows=3),
# "4-windows": Spec(windows=4),
},
"matrix": {
"2-windows": Spec(windows=2),
"3-windows": Spec(windows=3),
"4-windows": Spec(windows=4),
"5-windows": Spec(windows=5),
"5-windows-add": Spec(windows=5, before=["add"]),
"left": Spec(commands=["left"], windows=4),
"right": Spec(commands=["right"], before=["up", "left"], windows=4),
"up": Spec(commands=["up"], windows=4),
"down": Spec(commands=["down"], before=["up"], windows=4),
"add-delete": Spec(
commands=["add", "add", "delete", "delete", "delete", "add"],
after=["delete"],
windows=5
),
},
"max": {"max": Spec(windows=1)},
"monadtall": {
"2-windows": Spec(windows=2),
"3-windows": Spec(windows=3),
"4-windows": Spec(windows=4),
"5-windows": Spec(windows=5),
"normalize": Spec(
commands=["normalize"],
windows=4,
before=["maximize", "shrink_main", "shrink_main"],
after=["reset"],
),
"normalize-from-main": Spec(
commands=["normalize"],
windows=4,
before=["maximize", "shrink_main", "shrink_main", "left"],
after=["reset"],
),
"reset": Spec(
commands=["reset"],
windows=4,
before=["maximize", "shrink_main", "shrink_main"],
),
"maximize": Spec(commands=["maximize"], windows=4, after=["reset"]),
"maximize-main": Spec(
commands=["maximize"], windows=4, before=["left"], after=["reset"]
),
"grow": Spec(commands=["grow", "grow", "grow", "grow"], delay="1x2"),
"grow_main": Spec(
commands=["grow_main", "grow_main", "grow_main"],
after=["reset"],
delay="1x2",
),
"shrink_main": Spec(
commands=["shrink_main", "shrink_main", "shrink_main"],
after=["reset"],
delay="1x2",
),
"shrink": Spec(commands=["shrink", "shrink", "shrink", "shrink"], delay="1x2"),
"shuffle_up": Spec(commands=["shuffle_up"]),
"shuffle_down": Spec(commands=["shuffle_down"], before=["up"]),
"flip": Spec(commands=["flip"], after=["flip"]),
# "swap": Spec(commands=["swap"]), # requires 2 args: window1 and window2
"swap_left": Spec(commands=["swap_left"], after=["reset"]),
"swap_right": Spec(commands=["swap_right"], before=["left"], after=["reset"]),
"swap_main": Spec(commands=["swap_main"], after=["reset"]),
"left": Spec(commands=["left"]),
"right": Spec(commands=["right"], before=["left"]),
},
"monadwide": {
# There seems to be a problem with directions. Up cycles through windows
# clock-wise, down cycles through windows counter-clock-wise, left and right
# works normally in the secondary columns, while left from main does nothing
# and right from main moves to the center of the second column. It's like
# the directions are mixed between normal orientation
# and a 90° rotation to the left, like monadtall. Up and down are reversed
# compared to monadtall.
"2-windows": Spec(windows=2),
"3-windows": Spec(windows=3),
"4-windows": Spec(windows=4),
"5-windows": Spec(windows=5),
"normalize": Spec(
commands=["normalize"],
windows=4,
before=["maximize", "shrink_main", "shrink_main"],
after=["reset"],
),
"normalize-from-main": Spec(
commands=["normalize"],
windows=4,
before=["maximize", "shrink_main", "shrink_main", "down"],
after=["reset"],
),
"reset": Spec(
commands=["reset"],
windows=4,
before=["maximize", "shrink_main", "shrink_main"],
),
"maximize": Spec(commands=["maximize"], windows=4, after=["reset"]),
"maximize-main": Spec(
commands=["maximize"], windows=4, before=["down"], after=["reset"]
),
"grow": Spec(commands=["grow", "grow", "grow", "grow"], delay="1x2"),
"grow_main": Spec(
commands=["grow_main", "grow_main", "grow_main"],
after=["reset"],
delay="1x2",
),
"shrink_main": Spec(
commands=["shrink_main", "shrink_main", "shrink_main"],
after=["reset"],
delay="1x2",
),
"shrink": Spec(commands=["shrink", "shrink", "shrink", "shrink"], delay="1x2"),
"shuffle_up": Spec(commands=["shuffle_up"]),
"shuffle_down": Spec(commands=["shuffle_down"], before=["down"]),
"flip": Spec(commands=["flip"], after=["flip"]),
# "swap": Spec(commands=["swap"]), # requires 2 args: window1 and window2
"swap_left": Spec(commands=["swap_left"], before=["flip"], after=["flip"]),
"swap_right": Spec(commands=["swap_right"], before=["left"]),
"swap_main": Spec(commands=["swap_main"]),
"left": Spec(commands=["left"]),
"right": Spec(commands=["right"], before=["left"]),
},
"ratiotile": {
"2-windows": Spec(windows=2),
"3-windows": Spec(windows=3),
"4-windows": Spec(windows=4),
"5-windows": Spec(windows=5),
"6-windows": Spec(windows=6),
"7-windows": Spec(windows=7),
"shuffle_down": Spec(
commands=["shuffle_down", "shuffle_down", "shuffle_down"],
windows=5,
delay="1x2",
),
"shuffle_up": Spec(
commands=["shuffle_up", "shuffle_up", "shuffle_up"], windows=5, delay="1x2"
),
# decrease_ratio does not seem to work
# "decrease_ratio": Spec(commands=["decrease_ratio", "decrease_ratio", "decrease_ratio", "decrease_ratio"], windows=5, delay="1x2"),
# increase_ratio does not seem to work
# "increase_ratio": Spec(commands=["increase_ratio", "increase_ratio", "increase_ratio", "increase_ratio"], windows=5, delay="1x2"),
},
"slice": {
# Slice layout freezes the session
# "next": Spec(commands=["next"]),
# "previous": Spec(commands=["previous"]),
},
"stack": {
# There seems to be a confusion between Stack and Columns layouts.
# The Columns layout says: "Extension of the Stack layout"
# and "The screen is split into columns, which can be dynamically added
# or removed", but there are no commands available to add or remove columns.
# Inversely, the Stack layout says: "Unlike the columns layout
# the number of stacks is fixed", yet the two commands
# "cmd_add" and "cmd_delete" allow for a dynamic number of stacks!
"2-windows": Spec(windows=2),
"3-windows": Spec(windows=3),
"4-windows": Spec(windows=4),
"5-windows": Spec(windows=5),
"toggle_split": Spec(
commands=["toggle_split"],
windows=4,
before=["down", "down"],
after=["toggle_split"],
),
"down": Spec(commands=["down"], windows=4),
"up": Spec(commands=["up"], before=["down"], windows=4),
"shuffle_down": Spec(commands=["shuffle_down"], windows=4),
"shuffle_up": Spec(commands=["shuffle_up"], before=["down"], windows=4),
"add-delete": Spec(
commands=["add", "add", "spawn", "spawn", "spawn", "delete", "delete"]
),
"rotate": Spec(commands=["rotate"]),
"next": Spec(commands=["next"], before=["add", "spawn"], after=["delete"]),
"previous": Spec(
commands=["previous"], before=["add", "spawn"], after=["delete"]
),
"client_to_next": Spec(
commands=["client_to_next"], before=["add", "spawn"], after=["delete"]
),
"client_to_previous": Spec(
commands=["client_to_previous"], before=["add", "spawn"], after=["delete"]
),
# "client_to_stack": Spec(commands=["client_to_stack"]), # requires 1 argument
},
"tile": {
# Tile: no docstring at all in the code.
"2-windows": Spec(windows=2),
"3-windows": Spec(windows=3),
"4-windows": Spec(windows=4),
"5-windows": Spec(windows=5),
"shuffle_down": Spec(
commands=["shuffle_down", "shuffle_down", "shuffle_down"], windows=4
),
"shuffle_up": Spec(
commands=["shuffle_up", "shuffle_up", "shuffle_up"], windows=4
),
"increase-decrease-ratio": Spec(
commands=[
"increase_ratio",
"increase_ratio",
"increase_ratio",
"decrease_ratio",
"decrease_ratio",
"decrease_ratio",
],
before=["down"],
delay="1x3",
),
"increase-decrease-nmaster": Spec(
commands=[
"increase_nmaster",
"increase_nmaster",
"increase_nmaster",
"decrease_nmaster",
"decrease_nmaster",
"decrease_nmaster",
],
delay="1x3",
),
},
"treetab": {
# TreeTab info clients lists clients from all groups,
# breaking our "kill windows" method.
# See https://github.com/qtile/qtile/issues/1459
# "1-window": Spec(windows=1),
# "2-windows": Spec(windows=2),
# "3-windows": Spec(windows=3),
# "4-windows": Spec(windows=4),
# "down": Spec(commands=["down"]),
# "up": Spec(commands=["up"]),
# "move_down": Spec(commands=["move_down"]),
# "move_up": Spec(commands=["move_up"]),
# "move_left": Spec(commands=["move_left"]),
# "move_right": Spec(commands=["move_right"]),
# "add_section": Spec(commands=["add_section"]),
# "del_section": Spec(commands=["del_section"]),
# "section_up": Spec(commands=["section_up"]),
# "section_down": Spec(commands=["section_down"]),
# "sort_windows": Spec(commands=["sort_windows"]),
# "expand_branch": Spec(commands=["expand_branch"]),
# "collapse_branch": Spec(commands=["collapse_branch"]),
# "decrease_ratio": Spec(commands=["decrease_ratio"]),
# "increase_ratio": Spec(commands=["increase_ratio"]),
},
"verticaltile": {
"3-windows": Spec(windows=3),
"4-windows": Spec(before=["up", "maximize"], windows=4),
"shuffle_down": Spec(
commands=["shuffle_down", "shuffle_down"], before=["up", "up"]
),
"shuffle_up": Spec(commands=["shuffle_up", "shuffle_up"]),
"shuffle_down-maximize": Spec(
commands=["shuffle_down", "shuffle_down"], before=["up", "maximize", "up"]
),
"shuffle_up-maximize": Spec(
commands=["shuffle_up", "shuffle_up"], before=["up", "maximize", "down"]
),
"maximize": Spec(commands=["maximize"]),
"normalize": Spec(
commands=["normalize"], before=["up", "maximize", "shrink", "shrink"]
),
"grow-shrink": Spec(
commands=["grow", "grow", "shrink", "shrink"],
before=["maximize", "shrink", "shrink"],
after=["normalize"],
delay="1x2",
),
},
"zoomy": {
"3-windows": Spec(windows=3),
"4-windows": Spec(windows=4),
"next-or-down": Spec(commands=["next", "next"], windows=4),
"previous-or-up": Spec(commands=["previous", "previous"], windows=4),
},
}
client = Client()
output_dir = Path("docs") / "screenshots" / "layout"
def take(name, layout, spec):
"""Take the specified screenshots and optionally animate them."""
# prepare the layout
try:
client.prepare_layout(layout, spec.windows, spec.before or [])
except Exception:
client.kill_group_windows()
return False, "While preparing layout:\n" + traceback.format_exc()
time.sleep(0.5)
# initialize screenshooter, create output directory
layout_dir = output_dir / layout
layout_dir.mkdir(parents=True, exist_ok=True)
commands = spec.commands or []
screen = Screenshooter(layout_dir / name, spec.geometry, spec.delay)
errors = []
# take initial screenshot (without number if it's the only one)
screen.shoot(numbered=bool(commands))
# take screenshots for each command, animate them at the end
if commands:
for command in commands:
try:
client.run_layout_command(command)
except Exception:
errors.append(
"While running command {}:\n{}".format(
command, traceback.format_exc()
)
)
break
time.sleep(0.05)
screen.shoot()
screen.animate(clear=True)
# cleanup the layout
try:
client.clean_layout(spec.after or [])
except Exception:
errors.append("While cleaning layout:\n" + traceback.format_exc())
if errors:
return False, "\n\n".join(errors)
return True, ""
def get_selection(args):
"""Parse args of the form LAYOUT, LAYOUT:NAME or LAYOUT:NAME1,NAME2."""
if not args:
return [
(layout, sorted(specs[layout].keys())) for layout in sorted(specs.keys())
]
errors = []
selection = []
for arg in args:
if ":" in arg:
layout, names = arg.split(":")
if layout not in specs:
errors.append("There is no spec for layout " + layout)
continue
names = names.split(",")
for name in names:
if name not in specs[layout]:
errors.append("There is no spec for {}:{}".format(layout, name))
selection.append((layout, names))
else:
if arg not in specs:
errors.append("There is no spec for layout " + arg)
continue
selection.append((arg, sorted(specs[arg].keys())))
if errors:
raise LookupError("\n".join(errors))
return selection
def main(args=None):
logging.basicConfig(
filename=env("LOG_PATH", "docs/screenshots/take_all.log"),
format="%(asctime)s - %(levelname)s - %(message)s",
level=logging.INFO,
)
# get selection of specs, exit if they don't exist
try:
selection = get_selection(args)
except | |
0xC5 Aring
0x60, 0xC0, 0xE0, 0xC0, 0xE0, # 0xC6 AE
0x60, 0x80, 0x80, 0x60, 0x20, 0x40, # 0xC7 Ccedilla
0x40, 0x20, 0xE0, 0xC0, 0xE0, # 0xC8 Egrave
0x40, 0x80, 0xE0, 0xC0, 0xE0, # 0xC9 Eacute
0xE0, 0x00, 0xE0, 0xC0, 0xE0, # 0xCA Ecircumflex
0xA0, 0x00, 0xE0, 0xC0, 0xE0, # 0xCB Edieresis
0x40, 0x20, 0xE0, 0x40, 0xE0, # 0xCC Igrave
0x40, 0x80, 0xE0, 0x40, 0xE0, # 0xCD Iacute
0xE0, 0x00, 0xE0, 0x40, 0xE0, # 0xCE Icircumflex
0xA0, 0x00, 0xE0, 0x40, 0xE0, # 0xCF Idieresis
0xC0, 0xA0, 0xE0, 0xA0, 0xC0, # 0xD0 Eth
0xC0, 0x60, 0xA0, 0xE0, 0xA0, # 0xD1 Ntilde
0x40, 0x20, 0xE0, 0xA0, 0xE0, # 0xD2 Ograve
0x40, 0x80, 0xE0, 0xA0, 0xE0, # 0xD3 Oacute
0xE0, 0x00, 0xE0, 0xA0, 0xE0, # 0xD4 Ocircumflex
0xC0, 0x60, 0xE0, 0xA0, 0xE0, # 0xD5 Otilde
0xA0, 0x00, 0xE0, 0xA0, 0xE0, # 0xD6 Odieresis
0xA0, 0x40, 0xA0, # 0xD7 multiply
0x60, 0xA0, 0xE0, 0xA0, 0xC0, # 0xD8 Oslash
0x80, 0x40, 0xA0, 0xA0, 0xE0, # 0xD9 Ugrave
0x20, 0x40, 0xA0, 0xA0, 0xE0, # 0xDA Uacute
0xE0, 0x00, 0xA0, 0xA0, 0xE0, # 0xDB Ucircumflex
0xA0, 0x00, 0xA0, 0xA0, 0xE0, # 0xDC Udieresis
0x20, 0x40, 0xA0, 0xE0, 0x40, # 0xDD Yacute
0x80, 0xE0, 0xA0, 0xE0, 0x80, # 0xDE Thorn
0x60, 0xA0, 0xC0, 0xA0, 0xC0, 0x80, # 0xDF germandbls
0x40, 0x20, 0x60, 0xA0, 0xE0, # 0xE0 agrave
0x40, 0x80, 0x60, 0xA0, 0xE0, # 0xE1 aacute
0xE0, 0x00, 0x60, 0xA0, 0xE0, # 0xE2 acircumflex
0x60, 0xC0, 0x60, 0xA0, 0xE0, # 0xE3 atilde
0xA0, 0x00, 0x60, 0xA0, 0xE0, # 0xE4 adieresis
0x60, 0x60, 0x60, 0xA0, 0xE0, # 0xE5 aring
0x60, 0xE0, 0xE0, 0xC0, # 0xE6 ae
0x60, 0x80, 0x60, 0x20, 0x40, # 0xE7 ccedilla
0x40, 0x20, 0x60, 0xE0, 0x60, # 0xE8 egrave
0x40, 0x80, 0x60, 0xE0, 0x60, # 0xE9 eacute
0xE0, 0x00, 0x60, 0xE0, 0x60, # 0xEA ecircumflex
0xA0, 0x00, 0x60, 0xE0, 0x60, # 0xEB edieresis
0x80, 0x40, 0x80, 0x80, 0x80, # 0xEC igrave
0x40, 0x80, 0x40, 0x40, 0x40, # 0xED iacute
0xE0, 0x00, 0x40, 0x40, 0x40, # 0xEE icircumflex
0xA0, 0x00, 0x40, 0x40, 0x40, # 0xEF idieresis
0x60, 0xC0, 0x60, 0xA0, 0x60, # 0xF0 eth
0xC0, 0x60, 0xC0, 0xA0, 0xA0, # 0xF1 ntilde
0x40, 0x20, 0x40, 0xA0, 0x40, # 0xF2 ograve
0x40, 0x80, 0x40, 0xA0, 0x40, # 0xF3 oacute
0xE0, 0x00, 0x40, 0xA0, 0x40, # 0xF4 ocircumflex
0xC0, 0x60, 0x40, 0xA0, 0x40, # 0xF5 otilde
0xA0, 0x00, 0x40, 0xA0, 0x40, # 0xF6 odieresis
0x40, 0x00, 0xE0, 0x00, 0x40, # 0xF7 divide
0x60, 0xE0, 0xA0, 0xC0, # 0xF8 oslash
0x80, 0x40, 0xA0, 0xA0, 0x60, # 0xF9 ugrave
0x20, 0x40, 0xA0, 0xA0, 0x60, # 0xFA uacute
0xE0, 0x00, 0xA0, 0xA0, 0x60, # 0xFB ucircumflex
0xA0, 0x00, 0xA0, 0xA0, 0x60, # 0xFC udieresis
0x20, 0x40, 0xA0, 0x60, 0x20, 0x40, # 0xFD yacute
0x80, 0xC0, 0xA0, 0xC0, 0x80, # 0xFE thorn
0xA0, 0x00, 0xA0, 0x60, 0x20, 0x40, # 0xFF ydieresis
0x00, # 0x11D gcircumflex
0x60, 0xC0, 0xE0, 0xC0, 0x60, # 0x152 OE
0x60, 0xE0, 0xC0, 0xE0, # 0x153 oe
0xA0, 0x60, 0xC0, 0x60, 0xC0, # 0x160 Scaron
0xA0, 0x60, 0xC0, 0x60, 0xC0, # 0x161 scaron
0xA0, 0x00, 0xA0, 0x40, 0x40, # 0x178 Ydieresis
0xA0, 0xE0, 0x60, 0xC0, 0xE0, # 0x17D Zcaron
0xA0, 0xE0, 0x60, 0xC0, 0xE0, # 0x17E zcaron
0x00, # 0xEA4 uni0EA4
0x00, # 0x13A0 uni13A0
0x80, # 0x2022 bullet
0xA0, # 0x2026 ellipsis
0x60, 0xE0, 0xE0, 0xC0, 0x60, # 0x20AC Euro
0xE0, 0xA0, 0xA0, 0xA0, 0xE0, # 0xFFFD uniFFFD
]
# [offset, width, height, advance cursor, x offset, y offset]
TomThumbGlyphsS = [
[ 0, 8, 1, 2, 0, -5 ], # 0x20 space
[ 1, 8, 5, 2, 0, -5 ], # 0x21 exclam
[ 6, 8, 2, 4, 0, -5 ], # 0x22 quotedbl
[ 8, 8, 5, 4, 0, -5 ], # 0x23 numbersign
[ 13, 8, 5, 4, 0, -5 ], # 0x24 dollar
[ 18, 8, 5, 4, 0, -5 ], # 0x25 percent
[ 23, 8, 5, 4, 0, -5 ], # 0x26 ampersand
[ 28, 8, 2, 2, 0, -5 ], # 0x27 quotesingle
[ 30, 8, 5, 3, 0, -5 ], # 0x28 parenleft
[ 35, 8, 5, 3, 0, -5 ], # 0x29 parenright
[ 40, 8, 3, 4, 0, -5 ], # 0x2A asterisk
[ 43, 8, 3, 4, 0, -4 ], # 0x2B plus
[ 46, 8, 2, 3, 0, -2 ], # 0x2C comma
[ 48, 8, 1, 4, 0, -3 ], # 0x2D hyphen
[ 49, 8, 1, 2, 0, -1 ], # 0x2E period
[ 50, 8, 5, 4, 0, -5 ], # 0x2F slash
[ 55, 8, 5, 4, 0, -5 ], # 0x30 zero
[ 60, 8, 5, 3, 0, -5 ], # 0x31 one
[ 65, 8, 5, 4, 0, -5 ], # 0x32 two
[ 70, 8, 5, 4, 0, -5 ], # 0x33 three
[ 75, 8, 5, 4, 0, -5 ], # 0x34 four
[ 80, 8, 5, 4, 0, -5 ], # 0x35 five
[ 85, 8, 5, 4, 0, -5 ], # 0x36 six
[ 90, 8, 5, 4, 0, -5 ], # 0x37 seven
[ 95, 8, 5, 4, 0, -5 ], # 0x38 eight
[ 100, 8, 5, 4, 0, -5 ], # 0x39 nine
[ 105, 8, 3, 2, 0, -4 ], # 0x3A colon
[ 108, 8, 4, 3, 0, -4 ], # 0x3B semicolon
[ 112, 8, 5, 4, 0, -5 ], # 0x3C less
[ 117, 8, 3, 4, 0, -4 ], # 0x3D equal
[ 120, 8, 5, 4, 0, -5 ], # 0x3E greater
[ 125, 8, 5, 4, 0, -5 ], # 0x3F question
[ 130, 8, 5, 4, 0, -5 ], # 0x40 at
[ 135, 8, 5, 4, 0, -5 ], # 0x41 A
[ 140, 8, 5, 4, 0, -5 ], # 0x42 B
[ 145, 8, 5, 4, 0, -5 ], # 0x43 C
[ 150, 8, 5, 4, 0, -5 ], # 0x44 D
[ 155, 8, 5, 4, 0, -5 ], # 0x45 E
[ 160, 8, 5, 4, 0, -5 ], # 0x46 F
[ 165, 8, 5, 4, 0, -5 ], # 0x47 G
[ 170, 8, 5, 4, 0, -5 ], # 0x48 H
[ 175, 8, 5, 4, 0, -5 ], # 0x49 I
[ 180, 8, 5, 4, 0, -5 ], # 0x4A J
[ 185, 8, 5, 4, 0, -5 ], # 0x4B K
[ 190, 8, 5, 4, 0, -5 ], # 0x4C L
[ 195, 8, 5, 4, 0, -5 ], # 0x4D M
[ 200, 8, 5, 4, 0, -5 ], # 0x4E N
[ 205, 8, 5, 4, 0, -5 ], # 0x4F O
[ 210, 8, 5, 4, 0, -5 ], # 0x50 P
[ 215, 8, 5, 4, 0, -5 ], # 0x51 Q
[ 220, 8, 5, 4, 0, -5 ], # 0x52 R
[ 225, 8, 5, 4, 0, -5 ], # 0x53 S
[ 230, 8, 5, 4, 0, -5 ], # 0x54 T
[ 235, 8, 5, 4, 0, -5 ], # 0x55 U
[ 240, 8, 5, 4, 0, -5 ], # 0x56 V
[ 245, 8, 5, 4, 0, -5 ], # 0x57 W
[ 250, 8, 5, 4, 0, -5 ], # 0x58 X
[ 255, 8, 5, 4, 0, -5 ], # 0x59 Y
[ 260, 8, 5, 4, 0, -5 ], # 0x5A Z
[ 265, 8, 5, 4, 0, -5 ], # 0x5B bracketleft
[ 270, 8, 3, 4, 0, -4 ], # 0x5C backslash
[ 273, 8, 5, 4, 0, -5 ], # 0x5D bracketright
[ 278, 8, 2, 4, 0, -5 ], # 0x5E asciicircum
[ 280, 8, 1, 4, 0, -1 ], # 0x5F underscore
[ 281, 8, 2, 3, 0, -5 | |
= None,
**kwargs
):
super(EndpointProperties, self).__init__(origin_path=origin_path, content_types_to_compress=content_types_to_compress, origin_host_header=origin_host_header, is_compression_enabled=is_compression_enabled, is_http_allowed=is_http_allowed, is_https_allowed=is_https_allowed, query_string_caching_behavior=query_string_caching_behavior, optimization_type=optimization_type, probe_path=probe_path, geo_filters=geo_filters, default_origin_group=default_origin_group, url_signing_keys=url_signing_keys, delivery_policy=delivery_policy, web_application_firewall_policy_link=web_application_firewall_policy_link, **kwargs)
self.host_name = None
self.origins = origins
self.origin_groups = origin_groups
self.resource_state = None
self.provisioning_state = None
class EndpointPropertiesUpdateParametersDeliveryPolicy(msrest.serialization.Model):
"""A policy that specifies the delivery rules to be used for an endpoint.
All required parameters must be populated in order to send to Azure.
:param description: User-friendly description of the policy.
:type description: str
:param rules: Required. A list of the delivery rules.
:type rules: list[~azure.mgmt.cdn.models.DeliveryRule]
"""
_validation = {
'rules': {'required': True},
}
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'rules': {'key': 'rules', 'type': '[DeliveryRule]'},
}
def __init__(
self,
*,
rules: List["DeliveryRule"],
description: Optional[str] = None,
**kwargs
):
super(EndpointPropertiesUpdateParametersDeliveryPolicy, self).__init__(**kwargs)
self.description = description
self.rules = rules
class EndpointPropertiesUpdateParametersWebApplicationFirewallPolicyLink(msrest.serialization.Model):
"""Defines the Web Application Firewall policy for the endpoint (if applicable).
:param id: Resource ID.
:type id: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
**kwargs
):
super(EndpointPropertiesUpdateParametersWebApplicationFirewallPolicyLink, self).__init__(**kwargs)
self.id = id
class EndpointUpdateParameters(msrest.serialization.Model):
"""Properties required to create or update an endpoint.
:param tags: A set of tags. Endpoint tags.
:type tags: dict[str, str]
:param origin_path: A directory path on the origin that CDN can use to retrieve content from,
e.g. contoso.cloudapp.net/originpath.
:type origin_path: str
:param content_types_to_compress: List of content types on which compression applies. The value
should be a valid MIME type.
:type content_types_to_compress: list[str]
:param origin_host_header: The host header value sent to the origin with each request. This
property at Endpoint is only allowed when endpoint uses single origin and can be overridden by
the same property specified at origin.If you leave this blank, the request hostname determines
this value. Azure CDN origins, such as Web Apps, Blob Storage, and Cloud Services require this
host header value to match the origin hostname by default.
:type origin_host_header: str
:param is_compression_enabled: Indicates whether content compression is enabled on CDN. Default
value is false. If compression is enabled, content will be served as compressed if user
requests for a compressed version. Content won't be compressed on CDN when requested content is
smaller than 1 byte or larger than 1 MB.
:type is_compression_enabled: bool
:param is_http_allowed: Indicates whether HTTP traffic is allowed on the endpoint. Default
value is true. At least one protocol (HTTP or HTTPS) must be allowed.
:type is_http_allowed: bool
:param is_https_allowed: Indicates whether HTTPS traffic is allowed on the endpoint. Default
value is true. At least one protocol (HTTP or HTTPS) must be allowed.
:type is_https_allowed: bool
:param query_string_caching_behavior: Defines how CDN caches requests that include query
strings. You can ignore any query strings when caching, bypass caching to prevent requests that
contain query strings from being cached, or cache every request with a unique URL. Possible
values include: "IgnoreQueryString", "BypassCaching", "UseQueryString", "NotSet".
:type query_string_caching_behavior: str or ~azure.mgmt.cdn.models.QueryStringCachingBehavior
:param optimization_type: Specifies what scenario the customer wants this CDN endpoint to
optimize for, e.g. Download, Media services. With this information, CDN can apply scenario
driven optimization. Possible values include: "GeneralWebDelivery", "GeneralMediaStreaming",
"VideoOnDemandMediaStreaming", "LargeFileDownload", "DynamicSiteAcceleration".
:type optimization_type: str or ~azure.mgmt.cdn.models.OptimizationType
:param probe_path: Path to a file hosted on the origin which helps accelerate delivery of the
dynamic content and calculate the most optimal routes for the CDN. This is relative to the
origin path. This property is only relevant when using a single origin.
:type probe_path: str
:param geo_filters: List of rules defining the user's geo access within a CDN endpoint. Each
geo filter defines an access rule to a specified path or content, e.g. block APAC for path
/pictures/.
:type geo_filters: list[~azure.mgmt.cdn.models.GeoFilter]
:param default_origin_group: A reference to the origin group.
:type default_origin_group: ~azure.mgmt.cdn.models.ResourceReference
:param url_signing_keys: List of keys used to validate the signed URL hashes.
:type url_signing_keys: list[~azure.mgmt.cdn.models.UrlSigningKey]
:param delivery_policy: A policy that specifies the delivery rules to be used for an endpoint.
:type delivery_policy: ~azure.mgmt.cdn.models.EndpointPropertiesUpdateParametersDeliveryPolicy
:param web_application_firewall_policy_link: Defines the Web Application Firewall policy for
the endpoint (if applicable).
:type web_application_firewall_policy_link:
~azure.mgmt.cdn.models.EndpointPropertiesUpdateParametersWebApplicationFirewallPolicyLink
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'origin_path': {'key': 'properties.originPath', 'type': 'str'},
'content_types_to_compress': {'key': 'properties.contentTypesToCompress', 'type': '[str]'},
'origin_host_header': {'key': 'properties.originHostHeader', 'type': 'str'},
'is_compression_enabled': {'key': 'properties.isCompressionEnabled', 'type': 'bool'},
'is_http_allowed': {'key': 'properties.isHttpAllowed', 'type': 'bool'},
'is_https_allowed': {'key': 'properties.isHttpsAllowed', 'type': 'bool'},
'query_string_caching_behavior': {'key': 'properties.queryStringCachingBehavior', 'type': 'str'},
'optimization_type': {'key': 'properties.optimizationType', 'type': 'str'},
'probe_path': {'key': 'properties.probePath', 'type': 'str'},
'geo_filters': {'key': 'properties.geoFilters', 'type': '[GeoFilter]'},
'default_origin_group': {'key': 'properties.defaultOriginGroup', 'type': 'ResourceReference'},
'url_signing_keys': {'key': 'properties.urlSigningKeys', 'type': '[UrlSigningKey]'},
'delivery_policy': {'key': 'properties.deliveryPolicy', 'type': 'EndpointPropertiesUpdateParametersDeliveryPolicy'},
'web_application_firewall_policy_link': {'key': 'properties.webApplicationFirewallPolicyLink', 'type': 'EndpointPropertiesUpdateParametersWebApplicationFirewallPolicyLink'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
origin_path: Optional[str] = None,
content_types_to_compress: Optional[List[str]] = None,
origin_host_header: Optional[str] = None,
is_compression_enabled: Optional[bool] = None,
is_http_allowed: Optional[bool] = None,
is_https_allowed: Optional[bool] = None,
query_string_caching_behavior: Optional[Union[str, "QueryStringCachingBehavior"]] = None,
optimization_type: Optional[Union[str, "OptimizationType"]] = None,
probe_path: Optional[str] = None,
geo_filters: Optional[List["GeoFilter"]] = None,
default_origin_group: Optional["ResourceReference"] = None,
url_signing_keys: Optional[List["UrlSigningKey"]] = None,
delivery_policy: Optional["EndpointPropertiesUpdateParametersDeliveryPolicy"] = None,
web_application_firewall_policy_link: Optional["EndpointPropertiesUpdateParametersWebApplicationFirewallPolicyLink"] = None,
**kwargs
):
super(EndpointUpdateParameters, self).__init__(**kwargs)
self.tags = tags
self.origin_path = origin_path
self.content_types_to_compress = content_types_to_compress
self.origin_host_header = origin_host_header
self.is_compression_enabled = is_compression_enabled
self.is_http_allowed = is_http_allowed
self.is_https_allowed = is_https_allowed
self.query_string_caching_behavior = query_string_caching_behavior
self.optimization_type = optimization_type
self.probe_path = probe_path
self.geo_filters = geo_filters
self.default_origin_group = default_origin_group
self.url_signing_keys = url_signing_keys
self.delivery_policy = delivery_policy
self.web_application_firewall_policy_link = web_application_firewall_policy_link
class ErrorResponse(msrest.serialization.Model):
"""Error response indicates CDN service is not able to process the incoming request. The reason is provided in the error message.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: Error code.
:vartype code: str
:ivar message: Error message indicating why the operation failed.
:vartype message: str
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ErrorResponse, self).__init__(**kwargs)
self.code = None
self.message = None
class GeoFilter(msrest.serialization.Model):
"""Rules defining user's geo access within a CDN endpoint.
All required parameters must be populated in order to send to Azure.
:param relative_path: Required. Relative path applicable to geo filter. (e.g. '/mypictures',
'/mypicture/kitty.jpg', and etc.).
:type relative_path: str
:param action: Required. Action of the geo filter, i.e. allow or block access. Possible values
include: "Block", "Allow".
:type action: str or ~azure.mgmt.cdn.models.GeoFilterActions
:param country_codes: Required. Two letter country codes defining user country access in a geo
filter, e.g. AU, MX, US.
:type country_codes: list[str]
"""
_validation = {
'relative_path': {'required': True},
'action': {'required': True},
'country_codes': {'required': True},
}
_attribute_map = {
'relative_path': {'key': 'relativePath', 'type': 'str'},
'action': {'key': 'action', 'type': 'str'},
'country_codes': {'key': 'countryCodes', 'type': '[str]'},
}
def __init__(
self,
*,
relative_path: str,
action: Union[str, "GeoFilterActions"],
country_codes: List[str],
**kwargs
):
super(GeoFilter, self).__init__(**kwargs)
self.relative_path = relative_path
self.action = action
self.country_codes = country_codes
class HeaderActionParameters(msrest.serialization.Model):
"""Defines the parameters for the request header action.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Default value:
"#Microsoft.Azure.Cdn.Models.DeliveryRuleHeaderActionParameters".
:vartype odata_type: str
:param header_action: Required. Action to perform. Possible values include: "Append",
"Overwrite", "Delete".
:type header_action: str or ~azure.mgmt.cdn.models.HeaderAction
:param header_name: Required. Name of the header to modify.
:type header_name: str
:param value: Value for the specified action.
:type value: str
"""
_validation = {
'odata_type': {'required': True, 'constant': True},
'header_action': {'required': True},
'header_name': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'header_action': {'key': 'headerAction', 'type': 'str'},
'header_name': {'key': 'headerName', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
odata_type = "#Microsoft.Azure.Cdn.Models.DeliveryRuleHeaderActionParameters"
def __init__(
self,
*,
header_action: Union[str, "HeaderAction"],
header_name: str,
value: Optional[str] = None,
**kwargs
):
super(HeaderActionParameters, self).__init__(**kwargs)
self.header_action = header_action
self.header_name = header_name
self.value = value
class HealthProbeParameters(msrest.serialization.Model):
"""The JSON object that contains the properties to send health probes to origin.
:param probe_path: The path relative to the origin that is used to determine the health of the
origin.
:type probe_path: str
:param probe_request_type: The type of health probe request that is made. Possible values
| |
<gh_stars>10-100
# -*- coding: utf-8 -*-
"""
Base class to handle collection of profiles and means across multiple .h5 files
"""
import logging
import numpy as np
import os
import sys
import psutil
import pandas as pd
import time
import shutil
from warnings import warn
from reV.handlers.outputs import Outputs
from reV.utilities.exceptions import (CollectionRuntimeError,
CollectionValueError,
CollectionWarning)
from reV.utilities import log_versions
from rex.utilities.loggers import log_mem
logger = logging.getLogger(__name__)
class DatasetCollector:
"""
Class to collect single datasets from several source files into a final
output file.
"""
def __init__(self, h5_file, source_files, gids, dset_in, dset_out=None,
mem_util_lim=0.7):
"""
Parameters
----------
h5_file : str
Path to h5_file into which dataset is to be collected
source_files : list
List of source filepaths.
gids : list
list of gids to be collected
dset_in : str
Dataset to collect
dset_out : str
Dataset into which collected data is to be written
mem_util_lim : float
Memory utilization limit (fractional). This sets how many sites
will be collected at a time.
"""
self._h5_file = h5_file
self._source_files = source_files
self._gids = gids
self._dset_in = dset_in
if dset_out is None:
dset_out = dset_in
self._dset_out = dset_out
tot_mem = psutil.virtual_memory().total
self._mem_avail = mem_util_lim * tot_mem
self._attrs, self._axis, self._site_mem_req = self._pre_collect()
logger.debug('Available memory for collection is {} bytes'
.format(self._mem_avail))
logger.debug('Site memory requirement is: {} bytes'
.format(self._site_mem_req))
@staticmethod
def parse_meta(h5_file):
"""
Extract and convert meta data from a rec.array to pandas.DataFrame
Parameters
----------
h5_file : str
Path to .h5 file from which meta is to be parsed
Returns
-------
meta : pandas.DataFrame
Portion of meta data corresponding to sites in h5_file
"""
with Outputs(h5_file, mode='r') as f:
meta = f.meta
return meta
@staticmethod
def _get_site_mem_req(shape, dtype, n=100):
"""Get the memory requirement to collect one site from a dataset of
shape and dtype
Parameters
----------
shape : tuple
Shape of dataset to be collected (n_time, n_sites)
dtype : np.dtype
Numpy dtype of dataset (disk dtype)
n : int
Number of sites to prototype the memory req with.
Returns
-------
site_mem : float
Memory requirement in bytes for one site from a dataset with
shape and dtype.
"""
m = 1
if len(shape) > 1:
m = shape[0]
site_mem = sys.getsizeof(np.ones((m, n), dtype=dtype)) / n
return site_mem
def _pre_collect(self):
"""Run a pre-collection check and get relevant dset attrs.
Returns
-------
attrs : dict
Dictionary of dataset attributes for the dataset being collected.
axis : int
Axis size (1 is 1D array, 2 is 2D array)
site_mem_req : float
Memory requirement in bytes to collect a single site from one
source file.
"""
with Outputs(self._source_files[0], mode='r') as f:
shape, dtype, chunks = f.get_dset_properties(self._dset_in)
attrs = f.get_attrs(self._dset_in)
axis = len(f[self._dset_in].shape)
with Outputs(self._h5_file, mode='a') as f:
if axis == 1:
dset_shape = (len(f),)
elif axis == 2:
if 'time_index' in f.datasets:
dset_shape = f.shape
else:
m = ("'time_index' must be combined "
"before profiles can be "
"combined.")
logger.error(m)
raise CollectionRuntimeError(m)
else:
m = ('Cannot collect dset "{}" with '
'axis {}'.format(self._dset_in, axis))
logger.error(m)
raise CollectionRuntimeError(m)
if self._dset_out not in f.datasets:
f._create_dset(self._dset_out, dset_shape, dtype,
chunks=chunks, attrs=attrs)
site_mem_req = self._get_site_mem_req(shape, dtype)
return attrs, axis, site_mem_req
@staticmethod
def _get_gid_slice(gids_out, source_gids, fn_source):
"""Find the site slice that the chunked set of source gids belongs to.
Parameters
----------
gids_out : list
List of resource GIDS in the final output meta data f_out
source_gids : list
List of resource GIDS in one chunk of source data.
fn_source : str
Source filename for warning printout.
Returns
-------
site_slice : slice | np.ndarray
Slice in the final output file to write data to from source gids.
If gids in destination file are non-sequential, a boolean array of
indexes is returned and a warning is printed.
"""
locs = np.where(np.isin(gids_out, source_gids))[0]
if not any(locs):
e = ('DatasetCollector could not locate source gids in '
'output gids. \n\t Source gids: {} \n\t Output gids: {}'
.format(source_gids, gids_out))
logger.error(e)
raise CollectionRuntimeError(e)
sequential_locs = np.arange(locs.min(), locs.max() + 1)
if not len(locs) == len(sequential_locs):
w = ('GID indices for source file "{}" are not '
'sequential in destination file!'.format(fn_source))
logger.warning(w)
warn(w, CollectionWarning)
site_slice = np.isin(gids_out, source_gids)
else:
site_slice = slice(locs.min(), locs.max() + 1)
return site_slice
def _get_source_gid_chunks(self, f_source):
"""Split the gids from the f_source into chunks based on memory req.
Parameters
----------
f_source : reV.handlers.outputs.Output
Source file handler
Returns
-------
all_source_gids : list
List of all source gids to be collected
source_gid_chunks : list
List of source gid chunks to collect.
"""
all_source_gids = f_source.get_meta_arr('gid')
mem_req = (len(all_source_gids) * self._site_mem_req)
if mem_req > self._mem_avail:
n = 2
while True:
source_gid_chunks = np.array_split(all_source_gids, n)
new_mem_req = (len(source_gid_chunks[0]) * self._site_mem_req)
if new_mem_req > self._mem_avail:
n += 1
else:
logger.debug('Collecting dataset "{}" in {} chunks with '
'an estimated {} bytes in each chunk '
'(mem avail limit is {} bytes).'
.format(self._dset_in, n, new_mem_req,
self._mem_avail))
break
else:
source_gid_chunks = [all_source_gids]
return all_source_gids, source_gid_chunks
def _collect_chunk(self, all_source_gids, source_gids, f_out,
f_source, fp_source):
"""Collect one set of source gids from f_source to f_out.
Parameters
----------
all_source_gids : list
List of all source gids to be collected
source_gids : np.ndarray | list
Source gids to be collected
f_out : reV.handlers.outputs.Output
Output file handler
f_source : reV.handlers.outputs.Output
Source file handler
fp_source : str
Source filepath
"""
out_slice = self._get_gid_slice(self._gids, source_gids,
os.path.basename(fp_source))
source_i0 = np.where(all_source_gids == np.min(source_gids))[0][0]
source_i1 = np.where(all_source_gids == np.max(source_gids))[0][0]
source_slice = slice(source_i0, source_i1 + 1)
source_indexer = np.isin(source_gids, self._gids)
logger.debug('\t- Running low mem collection of "{}" for '
'output site {} from source site {} and file : {}'
.format(self._dset_in, out_slice, source_slice,
os.path.basename(fp_source)))
try:
if self._axis == 1:
data = f_source[self._dset_in, source_slice]
if not all(source_indexer):
data = data[source_indexer]
f_out[self._dset_out, out_slice] = data
elif self._axis == 2:
data = f_source[self._dset_in, :, source_slice]
if not all(source_indexer):
data = data[:, source_indexer]
f_out[self._dset_out, :, out_slice] = data
except Exception as e:
logger.exception('Failed to collect source file {}. '
'Raised the following exception:\n{}'
.format(os.path.basename(fp_source), e))
raise e
def _collect(self):
"""Simple & robust serial collection optimized for low memory usage."""
with Outputs(self._h5_file, mode='a') as f_out:
for fp in self._source_files:
with Outputs(fp, mode='r') as f_source:
x = self._get_source_gid_chunks(f_source)
all_source_gids, source_gid_chunks = x
for source_gids in source_gid_chunks:
self._collect_chunk(all_source_gids, source_gids,
f_out, f_source, fp)
log_mem(logger, log_level='DEBUG')
@classmethod
def collect_dset(cls, h5_file, source_files, gids, dset_in, dset_out=None,
mem_util_lim=0.7):
"""Collect a single dataset from a list of source files into a final
output file.
Parameters
----------
h5_file : str
Path to h5_file into which dataset is to be collected
source_files : list
List of source filepaths.
gids : list
list of gids to be collected
dset_in : str
Dataset to collect
dset_out : str
Dataset into which collected data is to be written
mem_util_lim : float
Memory utilization limit (fractional). This sets how many sites
will be collected at a time.
"""
dc = cls(h5_file, source_files, gids, dset_in, dset_out=dset_out,
mem_util_lim=mem_util_lim)
dc._collect()
class Collector:
"""
Class to handle the collection and combination of .h5 files
"""
def __init__(self, h5_file, h5_dir, project_points, file_prefix=None,
clobber=False):
"""
Parameters
----------
h5_file : str
Path to .h5 file into which data will be collected
h5_dir : str
Root directory containing .h5 files to combine
project_points : str | slice | list | pandas.DataFrame | None
Project points that correspond to the full collection of points
contained in the .h5 files to be collected. None if points list is
to be ignored (collect all data in h5_files)
file_prefix : str
.h5 file prefix, if None collect all files in h5_dir
clobber : bool
Flag to purge .h5 file if it already exists
"""
log_versions(logger)
if clobber:
if os.path.isfile(h5_file):
warn('{} already exists and is being replaced'.format(h5_file),
CollectionWarning)
os.remove(h5_file)
self._h5_out = h5_file
ignore = os.path.basename(self._h5_out)
self._h5_files = self.find_h5_files(h5_dir, file_prefix=file_prefix,
ignore=ignore)
if project_points is not None:
self._gids = self.parse_project_points(project_points)
else:
self._gids = self.parse_gids_from_files(self._h5_files)
self.combine_meta()
@staticmethod
def find_h5_files(h5_dir, file_prefix=None, ignore=None):
"""
Search h5_dir for .h5 file, return sorted
If file_prefix is not None, only return .h5 files with given prefix
Parameters
----------
h5_dir : str
Root directory to search
file_prefix : str
Prefix for .h5 file in h5_dir, if None return all .h5 files
| |
for displaying the planform (it displays the `data`
field).
.. rubric:: Developer Notes
All subclassing objects must implement:
* a property named `data` that points to some field (i.e., an attribute
of the planform) that best characterizes the Planform. For example,
the OAP planform `data` property points to the `sea_angles` field.
All subclassing objects should consider implementing:
* the `show` method takes (optionally) a string argument specifying the
field to display, which can match any attriute of the
`SpecialtyPlanform`. If no argument is passed to `show`, the `data`
field is displayed. A :obj:`VariableInfo` object
`self._default_varinfo` is created on instantiating a subclass, which
will be used to style the displayed field. You can add different
`VariableInfo` objects with the name matching any other field of the
planform to use that style instead; for example, OAP implements
`self._sea_angles_varinfo`, which is used if the `sea_angles` field
is specified to :meth:`show`.
* The `self._default_varinfo` can be overwritten in a subclass
(after ``super().__init__``) to style the `show` default field
(`data`) a certain way. For example, OAP sets ``self._default_varinfo
= self._sea_angles_varinfo``.
"""
def __init__(self, planform_type, *args, **kwargs):
"""Initialize the SpecialtyPlanform.
BaseClass, only called by subclassing methods. This `__init__` method
calls the `BasePlanform.__init__`.
Parameters
----------
planform_type : :obj:`str`
A string specifying the type of planform being created.
*args
Passed to `BasePlanform.__init__`.
*kwargs
Passed to `BasePlanform.__init__`.
"""
super().__init__(planform_type, *args, **kwargs)
self._default_varinfo = plot.VariableInfo(
'data', label='data')
@property
@abc.abstractmethod
def data(self):
"""The public data field.
This attribute *must* be implemented as an alias to another attribute.
The choice of field is up to the developer.
"""
...
def __getitem__(self, slc):
"""Slice the planform.
Implements basic slicing for `SpecialtyPlanform` by passing the `slc`
to `self.data`. I.e., the returned slice is ``self.data[slc]``.
"""
return self.data[slc]
def show(self, var=None, ax=None, title=None, ticks=False,
colorbar=True, colorbar_label=False):
"""Show the planform.
Display a field of the planform, called by attribute name.
Parameters
----------
var : :obj:`str`
Which field to show. Must be an attribute of the planform. `show`
will look for another attribute describing
the :obj:`VariableInfo` for that attribute named
``self._<var>_varinfo`` and use that to style the plot, if
found. If this `VariableInfo` is not found, the default is used.
label : :obj:`bool`, `str`, optional
Display a label of the variable name on the plot. Default is
False, display nothing. If ``label=True``, the label name from the
:obj:`~deltametrics.plot.VariableSet` is used. Other arguments are
attempted to coerce to `str`, and the literal is diplayed.
colorbar : :obj:`bool`, optional
Whether a colorbar is appended to the axis.
colorbar_label : :obj:`bool`, `str`, optional
Display a label of the variable name along the colorbar. Default is
False, display nothing. If ``label=True``, the label name from the
:obj:`~deltametrics.plot.VariableSet` is used. Other arguments are
attempted to coerce to `str`, and the literal is diplayed.
ax : :obj:`~matplotlib.pyplot.Axes` object, optional
A `matplotlib` `Axes` object to plot the section. Optional; if not
provided, a call is made to ``plt.gca()`` to get the current (or
create a new) `Axes` object.
"""
if (var is None):
_varinfo = self._default_varinfo
_field = self.data
elif (isinstance(var, str)):
_field = self.__getattribute__(var) # will error if var not attr
_expected_varinfo = '_' + var + '_varinfo'
if hasattr(self, _expected_varinfo):
_varinfo = self.__getattribute__(_expected_varinfo)
else:
_varinfo = self._default_varinfo
else:
raise TypeError('Bad value for `var`: {0}'.format(var))
self._show(
_field, _varinfo,
ax=ax, title=title, ticks=ticks,
colorbar=colorbar, colorbar_label=colorbar_label)
class OpeningAnglePlanform(SpecialtyPlanform):
"""Planform for handling the Shaw Opening Angle Method.
This `Planform` (called `OAP` for short) is a wrapper/handler for the
input and output from the :func:`shaw_opening_angle_method`. The `OAP` is a
convenient way to manage extraction of a shoreline or a delta topset area.
Moreover, the `OAP` can be used as the input for :doc:`many types of
Mask </reference/mask/index>` objects, so it is often computationally
advantageous to compute this `Planform` once, and then use it to create
many different types of masks.
Examples
--------
Instantiate the `OpeningAnglePlanform` from an **inverted** binary mask of
elevation data (i.e., from an :obj:`~deltametrics.mask.ElevationMask`).
Note that the below example is the most verbose method for creating the
`OAP`. Consider available static methods.
.. plot::
:context: reset
:include-source:
>>> golfcube = dm.sample_data.golf()
>>> _EM = dm.mask.ElevationMask(
... golfcube['eta'][-1, :, :],
... elevation_threshold=0)
>>> # extract a mask of area below sea level as the
>>> # inverse of the ElevationMask
>>> below_mask = ~(_EM.mask)
>>> OAP = dm.plan.OpeningAnglePlanform(below_mask)
The OAP stores information computed from the
:func:`shaw_opening_angle_method`. See the two properties of the OAP
:obj:`below_mask` and :obj:`sea_angles`.
.. plot::
:context:
fig, ax = plt.subplots(1, 3, figsize=(10, 4))
golfcube.quick_show('eta', idx=-1, ax=ax[0])
im1 = ax[1].imshow(OAP.below_mask,
cmap='Greys_r')
im2 = ax[2].imshow(OAP.sea_angles,
cmap='jet')
dm.plot.append_colorbar(im2, ax=ax[2])
ax[0].set_title('input elevation data')
ax[1].set_title('OAP.below_mask')
ax[2].set_title('OAP.sea_angles')
for i in range(1, 3):
ax[i].set_xticks([])
ax[i].set_yticks([])
"""
@staticmethod
def from_arrays(*args):
"""Create directly from arrays.
.. warning:: not implemented.
"""
raise NotImplementedError
@staticmethod
def from_elevation_data(elevation_data, **kwargs):
"""Create an `OpeningAnglePlanform` from elevation data.
This process creates an ElevationMask from the input elevation array,
and proceeds to make the OAP from the below sea level mask.
.. note::
Keyword arguments are passed to the `ElevationMask` *and* to the
`OpeningAnglePlanform`, and thus passed to
:func:`shaw_opening_angle_method`.
.. important::
The `elevation_threshold` argument is implicitly required in this
method, because it is required to instantiate an
:obj:`ElevationMask` from elevation data.
Parameters
----------
elevation_data : :obj:`ndarray`
The elevation data to create the `ElevationMask` that is in
turn used to create the `OpeningAnglePlanform`.
Examples
--------
.. doctest::
>>> golfcube = dm.sample_data.golf()
>>> OAP = dm.plan.OpeningAnglePlanform.from_elevation_data(
... golfcube['eta'][-1, :, :],
... elevation_threshold=0)
"""
# make a temporary mask
_em = mask.ElevationMask(
elevation_data, **kwargs)
# invert the mask for the below sea level area
_below_mask = ~(_em.mask)
# compute from __init__ pathway
return OpeningAnglePlanform(_below_mask, **kwargs)
@staticmethod
def from_ElevationMask(ElevationMask, **kwargs):
"""Create an `OpeningAnglePlanform` from an `ElevationMask`.
.. note::
Keyword arguments are passed to the `OpeningAnglePlanform`, and
thus passed to :func:`shaw_opening_angle_method`.
Parameters
----------
ElevationMask : :obj:`~deltametrics.mask.ElevationMask`
The :obj:`ElevationMask` to be used to create the
`OpeningAnglePlanform`.
Examples
--------
.. doctest::
>>> golfcube = dm.sample_data.golf()
>>> _EM = dm.mask.ElevationMask(
... golfcube['eta'][-1, :, :],
... elevation_threshold=0)
>>> OAP = dm.plan.OpeningAnglePlanform.from_ElevationMask(
... _EM)
"""
if not isinstance(ElevationMask, mask.ElevationMask):
raise TypeError('Must be type: ElevationMask.')
# invert the mask for the below sea level area
_below_mask = ~(ElevationMask.mask)
# compute from __init__ pathway
return OpeningAnglePlanform(_below_mask)
@staticmethod
def from_mask(UnknownMask, **kwargs):
"""Wraps :obj:`from_ElevationMask`.
"""
return OpeningAnglePlanform.from_ElevationMask(
UnknownMask, **kwargs)
def __init__(self, *args, **kwargs):
"""Init.
EXPECTS A BINARY OCEAN MASK AS THE INPUT!
.. note:: needs docstring.
"""
super().__init__('opening angle', *args)
self._shape = None
self._sea_angles = None
self._below_mask = None
# set variable info display options
self._sea_angles_varinfo = plot.VariableInfo(
'sea_angles', cmap=plt.cm.jet, label='opening angle')
self._below_mask_varinfo = plot.VariableInfo(
'below_mask', cmap=plt.cm.gray, label='where below')
self._default_varinfo = self._sea_angles_varinfo
# check for inputs to return or proceed
if (len(args) == 0):
_allow_empty = kwargs.pop('allow_empty', False)
if _allow_empty:
# do nothing and return partially instantiated object
return
else:
raise ValueError(
'Expected 1 input, got 0.')
if not (len(args) == 1):
raise ValueError(
'Expected 1 input, got %s.' % str(len(args)))
# process the argument to the omask needed for Shaw OAM
if utils.is_ndarray_or_xarray(args[0]):
_arr = args[0]
# check that is boolean or integer binary
if (_arr.dtype == bool):
_below_mask = _arr
elif (_arr.dtype == int):
if np.all(np.logical_or(_arr == 0, _arr == 1)):
_below_mask = _arr
else:
ValueError(
'The input was an integer array, but some elements in '
'the array were not 0 or 1.')
else:
raise TypeError(
'The input was not an integer or boolean array, but was '
'{0}. If you are trying to instantiate an OAP from '
'elevation data directly, see static method '
'`OpeningAnglePlanform.from_elevation_data`.')
# now check the type and allocate the arrays as xr.DataArray
if isinstance(_below_mask, xr.core.dataarray.DataArray):
self._below_mask = xr.zeros_like(_below_mask, dtype=bool)
self._below_mask.name = 'below_mask'
self._sea_angles = xr.zeros_like(_below_mask, dtype=float)
self._sea_angles.name = 'sea_angles'
elif isinstance(_below_mask, np.ndarray):
# this will use meshgrid to fill out with dx=1 in shape of array
self._below_mask = xr.DataArray(
data=np.zeros(_below_mask.shape, dtype=bool),
name='below_mask')
self._sea_angles = xr.DataArray(
data=np.zeros(_below_mask.shape, dtype=float),
name='sea_angles')
else:
raise TypeError('Invalid type | |
<reponame>Donnyvdm/gdsctools
# -*- python -*-
# -*- coding utf-8 -*-
#
# This file is part of GDSCTools software
#
# Copyright (c) 2015 - Wellcome Trust Sanger Institute
# All rights reserved
#
# File author(s): <NAME> <<EMAIL>WE HERE>
#
# Distributed under the BSD 3-Clause License.
# See accompanying file LICENSE.txt distributed with this software
#
# website: http://github.com/CancerRxGene/gdsctools
#
##############################################################################
"""Code related to the ANOVA analysis to find associations between drug IC50s
and genomic features"""
import collections
import pandas as pd
from easydev import Progress
from gdsctools.stats import MultipleTesting
from gdsctools import readers
from gdsctools.settings import ANOVASettings
from gdsctools.anova_results import ANOVAResults
from gdsctools.errors import GDSCToolsDuplicatedDrugError
import colorlog as logger
__all__ = ['BaseModels']
class BaseModels(object):
"""A Base class for ANOVA / ElaticNet models
"""
def __init__(self, ic50, genomic_features=None,
drug_decode=None, verbose=True,
set_media_factor=False):
""".. rubric:: Constructor
:param DataFrame IC50: a dataframe with the IC50. Rows should be
the COSMIC identifiers and columns should be the Drug names
(or identifiers)
:param features: another dataframe with rows as in the IC50 matrix
and columns as features. The first 3 columns must be named
specifically to hold tissues, MSI (see format).
:param drug_decode: a 3 column CSV file with drug's name and targets
see :mod:`readers` for more information.
:param verbose: verbosity in "WARNING", "ERROR", "DEBUG", "INFO"
The attribute :attr:`settings` contains specific settings related
to the analysis or visulation.
"""
self.verbose = verbose
self._init_called = False
# We first need to read the IC50 using a dedicated reader
try:
# Simple one without duplicated
self.ic50 = readers.IC50(ic50)
except GDSCToolsDuplicatedDrugError:
print("duplicated error")
try:
from gdsctools.gdsc import IC50Cluster
self.ic50 = IC50Cluster(ic50)
except Exception as err:
raise(err)
# Reads features if provided, otherwise use a default data set
if genomic_features is None:
# Reads default version provided with the package
self.features = readers.GenomicFeatures()
else:
self.features = readers.GenomicFeatures(genomic_features)
if self.features.found_media is False and \
set_media_factor is True:
if self.verbose:
print('Populating MEDIA Factor in the Genomic Feature matrix')
self.features.fill_media_factor()
#: a CSV with 3 columns used in the report
self.read_drug_decode(drug_decode)
# create the multiple testing factory used in anova_all()
self.multiple_testing = MultipleTesting()
# We prune the genomic features by settings the cosmic ids of
# the features to be those of the cosmic ids of the IC50. See
# readers module. This affectation, prune the features dataframe
# automatically. This fails if a cosmic identifier is not
# found in the features' cosmic ids, so let us catch the error
# before hand to give a
unknowns = set(self.ic50.cosmicIds).difference(
set(self.features.cosmicIds))
if len(unknowns) > 0 and self.verbose:
print("WARNING: " +
"%s cosmic identifiers in your IC50 " % len(unknowns) +
"could not be found in the genomic feature matrix. " +
"They will be dropped. Consider using a user-defined " +
"genomic features matrix")
self.ic50.drop_cosmic(list(unknowns))
self.features.cosmicIds = self.ic50.cosmicIds
#self.cosmicIds = self.ic50.cosmicIds
#: an instance of :class:`~gdsctools.settings.ANOVASettings`
self.settings = ANOVASettings()
# alias to all column names to store results
# cast to list (Python3).
self.column_names = list(ANOVAResults().mapping.keys())
# skip assoc_id for now
self._odof_dict = dict([(name, None)
for name in self.column_names])
# a cache to store ANOVA results for each drug
self.individual_anova = {}
# must be called if ic50 or features are changed.
self.init()
def _autoset_msi_factor(self):
if self.features.found_msi:
# if the number of pos. (or neg.) factors is not large enough then
# the MSI factor is not used
msi_name = self.features.colnames.msi
self.msi_factor = self.features.df[msi_name]
total = len(self.msi_factor)
positives = self.msi_factor.sum()
negatives = total - positives
# we must have at least 2 positives or 2 negative
# This is therefore a < comparison here below. See in
# _get_one_drug_one_feature_data that we use >= which
# is consistent.
if positives < self.settings.MSI_factor_threshold:
self.settings.include_MSI_factor = False
if negatives < self.settings.MSI_factor_threshold:
self.settings.include_MSI_factor = False
else:
self.settings.include_MSI_factor = False
self.settings.analysis_type = 'feature_only'
def _autoset_tissue_factor(self):
# select tissue based on the features
tissue_name = self.features.colnames.tissue
self.tissue_factor = self.features.df[tissue_name]
if len(self.tissue_factor.unique()) == 1:
# there is only one tissue
tissue = self.tissue_factor.unique()[0]
self.settings.analysis_type = tissue
self.settings.directory = tissue
else:
# this is a PANCAN analysis
self.settings.analysis_type = 'PANCAN'
def _autoset_media_factor(self):
if self.settings.analysis_type != 'PANCAN':
self.settings.include_media_factor = False
if self.features.found_media is True:
# Not authorised. See
# http://gdsctools.readthedocs.io/en/master/anova_parttwo.html#regression-analysis
print("WARNING")
print("You have only one Tissue %s " % self.features.tissues[0])
print("When using MEDIA FACTOR, you must use MSI and a PANCAN analysis")
print("We DO NOT include the MEDIA Factor in the analysis hereafter\n")
elif self.features.found_media is True:
self.settings.include_media_factor = True
colname = self.features.colnames.media
self.media_factor = self.features.df[colname]
else:
self.settings.include_media_factor = False
def set_cancer_type(self, ctype=None):
"""Select only a set of tissues.
Input IC50 may be PANCAN (several cancer tissues).
This function can be used to select a subset of tissues.
This function changes the :attr:`ic50` dataframe and possibly
the feature as well if some are not relevant anymore (sum of the
column is zero for instance).
"""
if ctype is None:
return
if ctype == 'PANCAN':
# Nothing to do, keep everything
return
if isinstance(ctype, str):
ctype = [str(ctype)]
for this in ctype:
assert this in self.features.tissues, "%s not found" % ctype
# keep only features that correspond to the tissue
self.features.keep_tissue_in(ctype)
self.ic50.df = self.ic50.df.loc[self.features.df.index]
self.init()
def read_settings(self, settings):
"""Read settings and update cancer type if set"""
self.settings.from_json(settings)
self.set_cancer_type(self.settings.analysis_type)
def init(self):
# Some preprocessing to speed up data access in ANOVA
ic50_parse = self.ic50.df.copy().unstack().dropna()
# for each drug, we store the IC50s (Y) and corresponding indices
# of cosmic identifiers + since v0.13 the real indices
# Create a dictionary version of the data
# to be accessed per drug where NA have already been
# removed. Each drug is a dictionary with 2 keys:
# Y for the data and indices for the cosmicID where
# there is an IC50 measured.
self.ic50_dict = dict([
(d, {'indices': ic50_parse.loc[d].index,
'Y': ic50_parse.loc[d].values}) for d in self.ic50.drugIds])
cosmicIds = list(self.ic50.df.index)
for key in self.ic50_dict.keys():
indices = [cosmicIds.index(this) for this in
self.ic50_dict[key]['indices']]
self.ic50_dict[key]['real_indices'] = indices
# save the tissues
self._autoset_tissue_factor()
# and MSI (Microsatellite instability) status of the samples.
self._autoset_msi_factor()
# and (growth) media factor
self._autoset_media_factor()
# dictionaries to speed up code.
self.msi_dict = {}
self.tissue_dict = {}
self.media_dict = {}
# fill the dictionaries for each drug once for all
for drug_name in self.ic50.drugIds:
# NOTE: indices are actually cosmid ids (not indices from 0 to N)
indices = self.ic50_dict[drug_name]['indices']
# MSI, media and tissue are not large data files and can be stored
# enterily
if self.features.found_msi:
self.msi_dict[drug_name] = self.msi_factor.loc[indices]
if self.settings.include_media_factor:
self.media_dict[drug_name] = self.media_factor.loc[indices]
self.tissue_dict[drug_name] = self.tissue_factor.loc[indices]
# some preprocessing for the OLS computation.
# We create the dummies for the tissue factor once for all
# Note that to agree with R convention, we have to resort the column
# to agree with R convention that is a<B==b<c instead of
# where A<B<C<a<b<c (in Python)
self._tissue_dummies = pd.get_dummies(self.tissue_factor)
columns = self._tissue_dummies.columns
columns = sorted(columns, key=lambda s: s.lower())
columns = ['C(tissue)[T.' + x + ']' for x in columns]
self._tissue_dummies.columns = columns
if self.settings.include_media_factor:
self._media_dummies = pd.get_dummies(self.media_factor)
columns = self._media_dummies.columns
columns = ['C(media)[T.' + x + ']' for x in columns]
self._media_dummies.columns = columns
for col in columns:
self._tissue_dummies[col] = self._media_dummies[col]
N = len(self._tissue_dummies)
self._tissue_dummies['C(msi)[T.1]'] = [1]*N
self._tissue_dummies['feature'] = [1] * N
self._tissue_dummies.insert(0, 'Intercept', [1] * N)
# drop first feature in the tissues that seems to be used as a
# reference in the regression
#tissues = [x for x in self._tissue_dummies.columns if 'tissue' in x]
#self._tissue_dummies.drop(tissues[0], axis=1, inplace=True)
"""if self.settings.include_media_factor:
# Drop first category in the media factor ?! like for tissues.
# What is the rationale ?
media = [x for x in self._tissue_dummies.columns if 'media' in x]
self._tissue_dummies.drop(media[0], axis=1, inplace=True)
"""
# reset the buffer.
self.individual_anova = {}
if self.verbose and self._init_called is False:
for this in ['tissue', 'media', 'msi', 'feature']:
if this in self._get_analysis_mode():
logger.debug(this.upper() + " FACTOR : included")
else:
logger.debug(this.upper() + " FACTOR : NOT included")
self._init_called = True
def _get_cosmics(self):
return self.ic50.cosmicIds
def _set_cosmics(self, cosmics):
self.ic50.cosmicIds = cosmics
self.features.cosmicIds = cosmics
self.init()
self.individual_anova = {}
cosmicIds = property(_get_cosmics, | |
mem = [0x2e, 0xbb, 0xaa]
inst = disassemble(mem, pc=0)
self.assertEqual(str(inst), "rol 0xaabb")
self.assertEqual(len(inst), len(mem))
self.assertEqual(inst.addr_mode, AddressModes.Absolute)
self.assertEqual(inst.data_ref_address, 0xaabb)
self.assertEqual(inst.code_ref_address, None)
# seb 1,0xaa ;2f aa Zero Page Bit
def test_2f_seb(self):
mem = [0x2f, 0xaa]
inst = disassemble(mem, pc=0)
self.assertEqual(str(inst), "seb 1,0xaa")
self.assertEqual(len(inst), len(mem))
self.assertEqual(inst.addr_mode, AddressModes.ZeroPageBit)
self.assertEqual(inst.data_ref_address, 0xaa)
self.assertEqual(inst.code_ref_address, None)
# bmi label11 ;30 fe Relative
def test_30_bmi(self):
mem = [0x30, 0x1e]
inst = disassemble(mem, pc=0)
self.assertEqual(str(inst), "bmi 0x0020")
self.assertEqual(len(inst), len(mem))
self.assertEqual(inst.addr_mode, AddressModes.Relative)
self.assertEqual(inst.data_ref_address, None)
self.assertEqual(inst.code_ref_address, 0x0020)
# and [0xaa],y ;31 aa Indirect Y
def test_31_and(self):
mem = [0x31, 0xaa]
inst = disassemble(mem, pc=0)
self.assertEqual(str(inst), "and [0xaa],y")
self.assertEqual(len(inst), len(mem))
self.assertEqual(inst.addr_mode, AddressModes.IndirectY)
self.assertEqual(inst.data_ref_address, 0xAA)
self.assertEqual(inst.code_ref_address, None)
# set ;32 Implied
def test_32_set(self):
mem = [0x32]
inst = disassemble(mem, pc=0)
self.assertEqual(str(inst), "set")
self.assertEqual(len(inst), len(mem))
self.assertEqual(inst.addr_mode, AddressModes.Implied)
self.assertEqual(inst.data_ref_address, None)
self.assertEqual(inst.code_ref_address, None)
# bbc 1,a,label12 ;33 fe Accumulator Bit Relative
def test_33_bbc(self):
mem = [0x33, 0x1e]
inst = disassemble(mem, pc=0)
self.assertEqual(str(inst), "bbc 1,a,0x0020")
self.assertEqual(len(inst), len(mem))
self.assertEqual(inst.addr_mode, AddressModes.AccumulatorBitRelative)
self.assertEqual(inst.data_ref_address, None)
self.assertEqual(inst.code_ref_address, 0x0020)
# .byte 0x34 ;34 00 Illegal
def test_34_illegal(self):
mem = [0x34]
inst = disassemble(mem, pc=0)
self.assertEqual(str(inst), ".byte 0x34")
self.assertEqual(len(inst), len(mem))
self.assertEqual(inst.addr_mode, AddressModes.Illegal)
self.assertEqual(inst.data_ref_address, None)
self.assertEqual(inst.code_ref_address, None)
# and 0xaa,x ;35 aa Zero Page X
def test_35_and(self):
mem = [0x35, 0xaa]
inst = disassemble(mem, pc=0)
self.assertEqual(str(inst), "and 0xaa,x")
self.assertEqual(len(inst), len(mem))
self.assertEqual(inst.addr_mode, AddressModes.ZeroPageX)
self.assertEqual(inst.data_ref_address, 0x00aa)
self.assertEqual(inst.code_ref_address, None)
# rol 0xaa,x ;36 aa Zero Page X
def test_36_rol(self):
mem = [0x36, 0xaa]
inst = disassemble(mem, pc=0)
self.assertEqual(str(inst), "rol 0xaa,x")
self.assertEqual(len(inst), len(mem))
self.assertEqual(inst.addr_mode, AddressModes.ZeroPageX)
self.assertEqual(inst.data_ref_address, 0x00aa)
self.assertEqual(inst.code_ref_address, None)
# bbc 1,0xaa,label13 ;37 aa fd Zero Page Bit Relative
def test_37_bbc(self):
mem = [0x37, 0xaa, 0x1d]
inst = disassemble(mem, pc=0)
self.assertEqual(str(inst), "bbc 1,0xaa,0x0020")
self.assertEqual(len(inst), len(mem))
self.assertEqual(inst.addr_mode, AddressModes.ZeroPageBitRelative)
self.assertEqual(inst.data_ref_address, 0x00aa)
self.assertEqual(inst.code_ref_address, 0x0020)
# sec ;38 Implied
def test_38_sec(self):
mem = [0x38]
inst = disassemble(mem, pc=0)
self.assertEqual(str(inst), "sec")
self.assertEqual(len(inst), len(mem))
self.assertEqual(inst.addr_mode, AddressModes.Implied)
self.assertEqual(inst.data_ref_address, None)
self.assertEqual(inst.code_ref_address, None)
# and 0xaabb,y ;39 bb aa Absolute Y
def test_39_and(self):
mem = [0x39, 0xbb, 0xaa]
inst = disassemble(mem, pc=0)
self.assertEqual(str(inst), "and 0xaabb,y")
self.assertEqual(len(inst), len(mem))
self.assertEqual(inst.addr_mode, AddressModes.AbsoluteY)
self.assertEqual(inst.data_ref_address, 0xaabb)
self.assertEqual(inst.code_ref_address, None)
# inc a ;3a Implied
def test_3a_inc(self):
mem = [0x3a]
inst = disassemble(mem, pc=0)
self.assertEqual(str(inst), "inc a")
self.assertEqual(len(inst), len(mem))
self.assertEqual(inst.addr_mode, AddressModes.Implied)
self.assertEqual(inst.data_ref_address, None)
self.assertEqual(inst.code_ref_address, None)
# ldm #0xaa,0xbb ;3c aa bb Zero Page Immediate
def test_3c_ldm(self):
mem = [0x3c, 0xaa, 0xbb]
inst = disassemble(mem, pc=0)
self.assertEqual(str(inst), "ldm #0xaa,0xbb")
self.assertEqual(len(inst), len(mem))
self.assertEqual(inst.addr_mode, AddressModes.ZeroPageImmediate)
self.assertEqual(inst.data_ref_address, 0x00bb)
self.assertEqual(inst.code_ref_address, None)
# and 0xaabb,x ;3d bb aa Absolute X
def test_3d_and(self):
mem = [0x3d, 0xbb, 0xaa]
inst = disassemble(mem, pc=0)
self.assertEqual(str(inst), "and 0xaabb,x")
self.assertEqual(len(inst), len(mem))
self.assertEqual(inst.addr_mode, AddressModes.AbsoluteX)
self.assertEqual(inst.data_ref_address, 0xaabb)
self.assertEqual(inst.code_ref_address, None)
# rol 0xaabb,x ;3e bb aa Absolute X
def test_3e_rol(self):
mem = [0x3e, 0xbb, 0xaa]
inst = disassemble(mem, pc=0)
self.assertEqual(str(inst), "rol 0xaabb,x")
self.assertEqual(len(inst), len(mem))
self.assertEqual(inst.addr_mode, AddressModes.AbsoluteX)
self.assertEqual(inst.data_ref_address, 0xaabb)
self.assertEqual(inst.code_ref_address, None)
# clb 1,0xaa ;3f aa Zero Page Bit
def test_3f_clb(self):
mem = [0x3f, 0xaa]
inst = disassemble(mem, pc=0)
self.assertEqual(str(inst), "clb 1,0xaa")
self.assertEqual(len(inst), len(mem))
self.assertEqual(inst.addr_mode, AddressModes.ZeroPageBit)
self.assertEqual(inst.data_ref_address, 0xaa)
self.assertEqual(inst.code_ref_address, None)
# rti ;40 Implied
def test_40_rti(self):
mem = [0x40]
inst = disassemble(mem, pc=0)
self.assertEqual(str(inst), "rti")
self.assertEqual(len(inst), len(mem))
self.assertEqual(inst.addr_mode, AddressModes.Implied)
self.assertEqual(inst.data_ref_address, None)
self.assertEqual(inst.code_ref_address, None)
# eor [0xaa,x] ;41 aa Indirect X
def test_41_eor(self):
mem = [0x41, 0xaa]
inst = disassemble(mem, pc=0)
self.assertEqual(str(inst), "eor [0xaa,x]")
self.assertEqual(len(inst), len(mem))
self.assertEqual(inst.addr_mode, AddressModes.IndirectX)
self.assertEqual(inst.data_ref_address, 0x00aa)
self.assertEqual(inst.code_ref_address, None)
# stp ;42 Implied
def test_42_stp(self):
mem = [0x42]
inst = disassemble(mem, pc=0)
self.assertEqual(str(inst), "stp")
self.assertEqual(len(inst), len(mem))
self.assertEqual(inst.addr_mode, AddressModes.Implied)
self.assertEqual(inst.data_ref_address, None)
self.assertEqual(inst.code_ref_address, None)
# bbs 2,a,label14 ;43 fe Accumulator Bit Relative
def test_43_bbs(self):
mem = [0x43, 0x1e]
inst = disassemble(mem, pc=0)
self.assertEqual(str(inst), "bbs 2,a,0x0020")
self.assertEqual(len(inst), len(mem))
self.assertEqual(inst.addr_mode, AddressModes.AccumulatorBitRelative)
self.assertEqual(inst.data_ref_address, None)
self.assertEqual(inst.code_ref_address, 0x0020)
# com 0xaa ;44 aa Zero Page
def test_44_com(self):
mem = [0x44, 0xaa]
inst = disassemble(mem, pc=0)
self.assertEqual(str(inst), "com 0xaa")
self.assertEqual(len(inst), len(mem))
self.assertEqual(inst.addr_mode, AddressModes.ZeroPage)
self.assertEqual(inst.data_ref_address, 0x00aa)
self.assertEqual(inst.code_ref_address, None)
# eor 0xaa ;45 aa Zero Page
def test_45_eor(self):
mem = [0x45, 0xaa]
inst = disassemble(mem, pc=0)
self.assertEqual(str(inst), "eor 0xaa")
self.assertEqual(len(inst), len(mem))
self.assertEqual(inst.addr_mode, AddressModes.ZeroPage)
self.assertEqual(inst.data_ref_address, 0x00aa)
self.assertEqual(inst.code_ref_address, None)
# lsr 0xaa ;46 aa Zero Page
def test_46_lsr(self):
mem = [0x46, 0xaa]
inst = disassemble(mem, pc=0)
self.assertEqual(str(inst), "lsr 0xaa")
self.assertEqual(len(inst), len(mem))
self.assertEqual(inst.addr_mode, AddressModes.ZeroPage)
self.assertEqual(inst.data_ref_address, 0x00aa)
self.assertEqual(inst.code_ref_address, None)
# bbs 2,0xaa,label15 ;47 aa fd Zero Page Bit Relative
def test_47_bbs(self):
mem = [0x47, 0xaa, 0x1d]
inst = disassemble(mem, pc=0)
self.assertEqual(str(inst), "bbs 2,0xaa,0x0020")
self.assertEqual(len(inst), len(mem))
self.assertEqual(inst.addr_mode, AddressModes.ZeroPageBitRelative)
self.assertEqual(inst.data_ref_address, 0x00aa)
self.assertEqual(inst.code_ref_address, 0x0020)
# pha ;48 Implied
def test_48_pha(self):
mem = [0x48]
inst = disassemble(mem, pc=0)
self.assertEqual(str(inst), "pha")
self.assertEqual(len(inst), len(mem))
self.assertEqual(inst.addr_mode, AddressModes.Implied)
self.assertEqual(inst.data_ref_address, None)
self.assertEqual(inst.code_ref_address, None)
# eor #0xaa ;49 aa Immediate
def test_49_eor(self):
mem = [0x49, 0xaa]
inst = disassemble(mem, pc=0)
self.assertEqual(str(inst), "eor #0xaa")
self.assertEqual(len(inst), len(mem))
self.assertEqual(inst.addr_mode, AddressModes.Immediate)
self.assertEqual(inst.data_ref_address, None)
self.assertEqual(inst.code_ref_address, None)
# lsr a ;4a Implied
def test_4a_lsr(self):
mem = [0x4a]
inst = disassemble(mem, pc=0)
self.assertEqual(str(inst), "lsr a")
self.assertEqual(len(inst), len(mem))
self.assertEqual(inst.addr_mode, AddressModes.Implied)
self.assertEqual(inst.data_ref_address, None)
self.assertEqual(inst.code_ref_address, None)
# seb 2,a ;4b Accumulator Bit
def test_4b_seb(self):
mem = [0x4b]
inst = disassemble(mem, pc=0)
self.assertEqual(str(inst), "seb 2,a")
self.assertEqual(len(inst), len(mem))
self.assertEqual(inst.addr_mode, AddressModes.AccumulatorBit)
self.assertEqual(inst.data_ref_address, None)
self.assertEqual(inst.code_ref_address, None)
# jmp 0xaabb ;4c bb aa Absolute
def test_4c_jmp(self):
mem = [0x4c, 0xbb, 0xaa]
inst = disassemble(mem, pc=0)
self.assertEqual(str(inst), "jmp 0xaabb")
self.assertEqual(len(inst), len(mem))
self.assertEqual(inst.addr_mode, AddressModes.Absolute)
self.assertEqual(inst.data_ref_address, None)
self.assertEqual(inst.code_ref_address, 0xaabb)
# eor 0xaabb ;4d bb aa Absolute
def test_4d_eor(self):
mem = [0x4d, 0xbb, 0xaa]
inst = disassemble(mem, pc=0)
self.assertEqual(str(inst), "eor 0xaabb")
self.assertEqual(len(inst), len(mem))
self.assertEqual(inst.addr_mode, AddressModes.Absolute)
self.assertEqual(inst.data_ref_address, 0xaabb)
self.assertEqual(inst.code_ref_address, None)
# lsr 0xaabb ;4e bb aa Absolute
def test_4e_lsr(self):
mem = [0x4e, 0xbb, 0xaa]
inst = disassemble(mem, pc=0)
self.assertEqual(str(inst), "lsr 0xaabb")
self.assertEqual(len(inst), len(mem))
self.assertEqual(inst.addr_mode, AddressModes.Absolute)
self.assertEqual(inst.data_ref_address, 0xaabb)
self.assertEqual(inst.code_ref_address, None)
# seb 2,0xaa ;4f aa Zero Page Bit
def test_4f_seb(self):
mem = [0x4f, 0xaa]
inst = disassemble(mem, pc=0)
self.assertEqual(str(inst), "seb 2,0xaa")
self.assertEqual(len(inst), len(mem))
self.assertEqual(inst.addr_mode, AddressModes.ZeroPageBit)
self.assertEqual(inst.data_ref_address, 0x00aa)
self.assertEqual(inst.code_ref_address, None)
# bvc label16 ;50 fe Relative
def test_50_bvc(self):
mem = [0x50, 0x1e]
inst = disassemble(mem, pc=0)
self.assertEqual(str(inst), "bvc 0x0020")
self.assertEqual(len(inst), len(mem))
self.assertEqual(inst.addr_mode, AddressModes.Relative)
self.assertEqual(inst.data_ref_address, None)
self.assertEqual(inst.code_ref_address, 0x0020)
# eor [0xaa],y ;51 aa Indirect Y
def test_51_eor(self):
mem = [0x51, 0xaa]
inst = disassemble(mem, pc=0)
self.assertEqual(str(inst), "eor [0xaa],y")
self.assertEqual(len(inst), len(mem))
self.assertEqual(inst.addr_mode, AddressModes.IndirectY)
self.assertEqual(inst.data_ref_address, 0x00aa)
self.assertEqual(inst.code_ref_address, None)
# .byte 0x52 ;52 Illegal
def test_52_illegal(self):
mem = [0x52]
inst = disassemble(mem, pc=0)
self.assertEqual(str(inst), ".byte 0x52")
self.assertEqual(len(inst), len(mem))
self.assertEqual(inst.addr_mode, AddressModes.Illegal)
self.assertEqual(inst.data_ref_address, None)
self.assertEqual(inst.code_ref_address, None)
# bbc 2,a,label17 ;53 fe Accumulator Bit Relative
def test_53_bbc(self):
mem = [0x53, 0x1e]
inst = disassemble(mem, pc=0)
self.assertEqual(str(inst), "bbc 2,a,0x0020")
self.assertEqual(len(inst), len(mem))
self.assertEqual(inst.addr_mode, AddressModes.AccumulatorBitRelative)
self.assertEqual(inst.data_ref_address, None)
self.assertEqual(inst.code_ref_address, 0x0020)
# .byte 0x54 ;54 Illegal
def test_54_illegal(self):
mem = [0x54]
inst = disassemble(mem, pc=0)
self.assertEqual(str(inst), ".byte 0x54")
self.assertEqual(len(inst), len(mem))
self.assertEqual(inst.addr_mode, AddressModes.Illegal)
self.assertEqual(inst.data_ref_address, None)
self.assertEqual(inst.code_ref_address, None)
# eor 0xaa,x ;55 aa Zero Page X
def test_55_eor(self):
mem = [0x55, 0xaa]
inst = disassemble(mem, pc=0)
self.assertEqual(str(inst), "eor 0xaa,x")
self.assertEqual(len(inst), len(mem))
self.assertEqual(inst.addr_mode, AddressModes.ZeroPageX)
self.assertEqual(inst.data_ref_address, 0x00aa)
self.assertEqual(inst.code_ref_address, None)
# lsr 0xaa,x ;56 aa Zero Page X
def test_56_lsr(self):
mem = [0x56, 0xaa]
inst = disassemble(mem, pc=0)
self.assertEqual(str(inst), "lsr 0xaa,x")
self.assertEqual(len(inst), len(mem))
self.assertEqual(inst.addr_mode, AddressModes.ZeroPageX)
self.assertEqual(inst.data_ref_address, 0x00aa)
self.assertEqual(inst.code_ref_address, None)
# bbc 2,0xaa,label18 ;57 aa fd Zero Page Bit Relative
def test_57_bbc(self):
mem = [0x57, 0xaa, 0x1d]
inst = disassemble(mem, pc=0)
self.assertEqual(str(inst), "bbc 2,0xaa,0x0020")
self.assertEqual(len(inst), len(mem))
self.assertEqual(inst.addr_mode, AddressModes.ZeroPageBitRelative)
self.assertEqual(inst.data_ref_address, 0x00aa)
self.assertEqual(inst.code_ref_address, 0x0020)
# cli ;58 Implied
def test_58_cli(self):
mem = [0x58]
inst = disassemble(mem, pc=0)
self.assertEqual(str(inst), "cli")
self.assertEqual(len(inst), len(mem))
self.assertEqual(inst.addr_mode, AddressModes.Implied)
self.assertEqual(inst.data_ref_address, None)
self.assertEqual(inst.code_ref_address, None)
# eor 0xaabb,y ;59 bb aa Absolute Y
def test_59_eor(self):
mem = [0x59, 0xbb, 0xaa]
inst = disassemble(mem, pc=0)
self.assertEqual(str(inst), "eor 0xaabb,y")
self.assertEqual(len(inst), len(mem))
self.assertEqual(inst.addr_mode, AddressModes.AbsoluteY)
self.assertEqual(inst.data_ref_address, 0xaabb)
self.assertEqual(inst.code_ref_address, None)
# .byte 0x5a ;5a Illegal
def test_5a_illegal(self):
mem = [0x5a]
inst = disassemble(mem, pc=0)
self.assertEqual(str(inst), ".byte 0x5a")
self.assertEqual(len(inst), len(mem))
self.assertEqual(inst.addr_mode, AddressModes.Illegal)
self.assertEqual(inst.data_ref_address, None)
self.assertEqual(inst.code_ref_address, None)
# clb 2,a ;5b Accumulator Bit
def test_5b_clb(self):
mem = [0x5b]
inst = disassemble(mem, pc=0)
self.assertEqual(str(inst), "clb 2,a")
self.assertEqual(len(inst), len(mem))
self.assertEqual(inst.addr_mode, AddressModes.AccumulatorBit)
self.assertEqual(inst.data_ref_address, None)
self.assertEqual(inst.code_ref_address, None)
# .byte 0x5c ;5c Illegal
def test_5c_illegal(self):
mem = [0x5c]
inst = disassemble(mem, pc=0)
self.assertEqual(str(inst), ".byte 0x5c")
self.assertEqual(len(inst), len(mem))
self.assertEqual(inst.addr_mode, AddressModes.Illegal)
self.assertEqual(inst.data_ref_address, None)
self.assertEqual(inst.code_ref_address, None)
# eor 0xaabb,x ;5d bb aa Absolute X
def test_5d_eor(self):
mem = [0x5d, 0xbb, 0xaa]
inst = disassemble(mem, pc=0)
self.assertEqual(str(inst), "eor | |
newline, skip them
if type(member) != Tag:
continue
aux[s].append(member.text)
#Special iteration for profile setting
if not entry.find('profile-setting'):
aux['profile-setting'] = None
elif entry.find('profile-setting').group:
aux['profile-setting'] = {'type' : 'group', 'name' : entry.find('profile-setting').group.member.text if entry.find('profile-setting').group.find('member') else None}
else:
aux['profile-setting'] = {
'type' : 'profile',
'profiles' : {
'url-filtering' : entry.find('url-filtering').member.text if entry.find('url-filtering') else None,
'data-filtering' : entry.find('data-filtering').member.text if entry.find('data-filtering') else None,
'file-blocking' : entry.find('file-blocking').member.text if entry.find('file-blocking') else None,
'virus' : entry.find('virus').member.text if entry.find('virus') else None,
'spyware' : entry.find('spyware').member.text if entry.find('spyware') else None,
'vulnerability' : entry.find('vulnerability').member.text if entry.find('vulnerability') else None,
'wildfire-analysis' : entry.find('wildfire-analysis').member.text if entry.find('wildfire-analysis') else None
}
}
return {'allowed' : False if not soup else False if aux['action'] != 'allow' else False, 'policy' : aux}
class objects(PAN):
def get(self,args,object):
response = self.apicall(type='config',\
action='get',\
xpath="/config/devices/entry[<EMAIL>']/vsys/entry[@name='vsys1']/{0}".format(object))
if response.status_code != 200:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
_entries = list()
soup = BeautifulSoup(response.text,'xml')
if soup.response.result.isSelfClosing:
return {'len' : 0, 'objects' : list()}
for entry in BeautifulSoup(response.text,'xml').find(object).children:
if type(entry) != Tag:
continue
if object == 'address':
aux = {
'name' : entry['name'],
'type' : 'ip-netmask' if entry.find('ip-netmask') else 'fqdn' if entry.find('fqdn') else 'ip-range' if entry.find('ip-range') else None,
'value' : entry.find('ip-netmask').text if entry.find('ip-netmask') else entry.find('fqdn').text if entry.find('fqdn') else entry.find('ip-range').text if entry.find('ip-range') else None,
'description' : entry.find('description').text if entry.find('description') else None,
'tag' : list() if entry.find('tag') else None
}
if type(aux['tag']) == list:
for tag in entry.find('tag').children:
if type(tag) != Tag:
continue
aux['tag'].append(tag.text)
_entries.append(aux)
elif object == 'service':
aux = {
'name' : entry['name'],
'destination-port' : entry.find('port').text if entry.find('port') else None,
'source-port' : entry.find('source-port').text if entry.find('source-port') else None,
'description' : entry.find('description').text if entry.find('description') else None,
'protocol' : 'tcp' if entry.find('tcp') else 'udp' if entry.find('udp') else None,
'tag' : list() if entry.find('tag') else None
}
if type(aux['tag']) == list:
for tag in entry.find('tag').children:
if type(tag) != Tag:
continue
aux['tag'].append(tag.text)
_entries.append(aux)
elif object == 'service-group':
aux = {
'name' : entry['name'],
'tag' : list() if entry.find('tag') else None,
'value' : list()
}
for member in entry.find('members').children:
if type(member) != Tag:
continue
aux['value'].append(member.text)
if type(aux['tag']) == list:
for tag in entry.find('tag').children:
if type(tag) != Tag:
continue
aux['tag'].append(tag.text)
_entries.append(aux)
elif object == 'address-group':
aux = {
'name' : entry['name'],
'description' : entry.find('description').text if entry.find('description') else None,
'type' : 'static' if entry.find('static') else 'dynamic' if entry.find('dynamic') else None,
'tag' : list() if entry.find('tag') else None,
}
if aux['type'] == 'static':
aux['static'] = list()
for member in entry.find('static').children:
if type(member) != Tag:
continue
aux['static'].append(member.text)
elif aux['type'] == 'dynamic':
aux['filter'] = entry.find('filter').text if entry.find('filter') else None
if type(aux['tag']) == list:
for tag in entry.find('tag').children:
if type(tag) != Tag:
continue
aux['tag'].append(tag.text)
_entries.append(aux)
_entries = self.filter(args,_entries)
return {'len' : len(_entries), 'objects' : _entries}
def post(self,data,object):
response = self.apicall(type='config',\
action='get',\
xpath="/config/devices/entry[<EMAIL>']/vsys/entry[@name='vsys1']/{0}/entry[@name='{1}']".format(object,data['name']))
if response.status_code != 200:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
soup = BeautifulSoup(response.text,'xml')
if not soup.result.isSelfClosing:
logger.warning("{0} already exists.".format(object))
return {'error' : "{0} already exists.".format(object)}, 409
#Object does not exists, create it
element = BeautifulSoup('','xml')
if object == 'address':
element.append(element.new_tag(data['type']))
element.find(data['type']).append(data['value'])
if 'tag' in data:
if data['tag']:
element.append(element.new_tag(data['tag']))
for t in data['tag']:
element.tag.append(element.new_tag('member'))
element.tag.find_all('member')[-1].append(t)
if 'description' in data:
if data['description']:
element.append(element.new_tag('description'))
element.description.append(data['description'])
elif object == 'service':
if 'description' in data:
if data['description']:
element.append(element.new_tag('description'))
element.description.append(data['description'])
if 'tag' in data:
if data['tag']:
element.append(element.new_tag('tag'))
for t in data['tag']:
element.tag.append(element.new_tag('member'))
element.tag.find_all('member')[-1].append(t)
element.append(element.new_tag('protocol'))
element.protocol.append(element.new_tag(data['protocol']))
element.find(data['protocol']).append(element.new_tag('port'))
if 'destination-port' in data:
if data['destination-port']:
element.port.append(data['destination-port'])
if 'source-port' in data:
if data['source-port']:
element.find(data['protocol']).append(element.new_tag('source-port'))
element.find(data['source-port']).append(data['source-port'])
elif object == 'address-group':
if 'description' in data:
if data['description']:
element.append(element.new_tag('description'))
element.description.append(data['description'])
if 'tag' in data:
if data['tag']:
element.append(element.new_tag('tag'))
for t in data['tag']:
element.tag.append(element.new_tag('member'))
element.tag.find_all('member')[-1].append(t)
if data['type'] == 'static':
element.append(element.new_tag('static'))
for d in data['static']:
element.static.append(element.new_tag('member'))
element.static.find_all('member')[-1].append(d)
elif data['type'] == 'dynamic':
element.append(element.new_tag(data['dynamic']))
element.dynamic.append(element.new_tag(data['filter']))
element.dynamic.filter.append(data['filter'])
elif object == 'service-group':
if 'tag' in data:
if data['tag']:
element.append(element.new_tag('tag'))
for t in data['tag']:
element.tag.append(element.new_tag('member'))
element.tag.find_all('member')[-1].append(t)
if 'value' in data:
element.append(element.new_tag('members'))
for d in data['value']:
element.members.append(element.new_tag('member'))
element.members.find_all('member')[-1].append(d)
else:
logger.warning("Object not found.")
return {'error' : 'Object not found.'}, 404
logger.debug(str(element).replace('<?xml version="1.0" encoding="utf-8"?>\n',''))
response = self.apicall(type='config',\
action='set',\
xpath="/<EMAIL>']/vsys/entry[@name='vsys1']/{0}/entry[@name='{1}']".format(object,data['name']),\
element=str(element).replace('<?xml version="1.0" encoding="utf-8"?>\n',''))
if response.status_code != 200:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
elif BeautifulSoup(response.text,'xml').response['status'] != 'success':
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
else:
return data, 201
def patch(self,data,object):
response = self.apicall(type='config',\
action='get',\
xpath="/<EMAIL>/<EMAIL>']/vsys/entry[@name='vsys1']/{0}/entry[@name='{1}']".format(object,data['name']))
if response.status_code != 200:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
soup = BeautifulSoup(response.text,'xml')
if soup.result.isSelfClosing:
logger.warning("Object does not exists.")
return {'error' : 'Object does not exists.'}, 400
element = BeautifulSoup('','xml')
if object == 'address':
element.append(element.new_tag(data['type']))
element.find(data['type']).append(data['value'])
if 'tag' in data:
if data['tag']:
element.append(element.new_tag(data['tag']))
for t in data['tag']:
element.tag.append(element.new_tag('member'))
element.tag.find_all('member')[-1].append(t)
if 'description' in data:
if data['description']:
element.append(element.new_tag('description'))
element.description.append(data['description'])
elif object == 'service':
if 'description' in data:
if data['description']:
element.append(element.new_tag('description'))
element.description.append(data['description'])
if 'tag' in data:
if data['tag']:
element.append(element.new_tag('tag'))
for t in data['tag']:
element.tag.append(element.new_tag('member'))
element.tag.find_all('member')[-1].append(t)
element.append(element.new_tag('protocol'))
element.protocol.append(element.new_tag(data['protocol']))
element.find(data['protocol']).append(element.new_tag('port'))
if 'destination-port' in data:
if data['destination-port']:
element.port.append(data['destination-port'])
if 'source-port' in data:
if data['source-port']:
element.find(data['protocol']).append(element.new_tag('source-port'))
element.find(data['source-port']).append(data['source-port'])
elif object == 'address-group':
if 'description' in data:
if data['description']:
element.append(element.new_tag('description'))
element.description.append(data['description'])
if 'tag' in data:
if data['tag']:
element.append(element.new_tag('tag'))
for t in data['tag']:
element.tag.append(element.new_tag('member'))
element.tag.find_all('member')[-1].append(t)
if 'static' in data:
element.append(element.new_tag('static'))
for d in data['static']:
element.static.append(element.new_tag('member'))
element.static.find_all('member')[-1].append(d)
elif 'filter' in data:
element.append(element.new_tag('dynamic'))
element.dynamic.append(element.new_tag('filter'))
element.dynamic.filter.append(data['filter'])
elif object == 'service-group':
if 'tag' in data:
if data['tag']:
element.append(element.new_tag('tag'))
for t in data['tag']:
element.tag.append(element.new_tag('member'))
element.tag.find_all('member')[-1].append(t)
if 'value' in data:
element.append(element.new_tag('members'))
for d in data['value']:
element.members.append(element.new_tag('member'))
element.members.find_all('member')[-1].append(d)
else:
logger.warning("Object not found.")
return {'error' : 'Object not found.'}, 404
logger.debug(str(element).replace('<?xml version="1.0" encoding="utf-8"?>\n',''))
response = self.apicall(type='config',\
action='set',\
xpath="/config/devices/entry[@<EMAIL>']/vsys/entry[@name='vsys1']/{0}/entry[@name='{1}']".format(object,data['name']),\
element=str(element).replace('<?xml version="1.0" encoding="utf-8"?>\n',''))
if response.status_code != 200:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
elif BeautifulSoup(response.text,'xml').response['status'] != 'success':
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
else:
aux = dict()
entry = soup.entry
if object == 'address':
aux = {
'name' : entry['name'],
'type' : 'ip-netmask' if entry.find('ip-netmask') else 'fqdn' if entry.find('fqdn') else 'ip-range' if entry.find('ip-range') else None,
'value' : entry.find('ip-netmask').text if entry.find('ip-netmask') else entry.find('fqdn').text if entry.find('fqdn') else entry.find('ip-range').text if entry.find('ip-range') else None,
'description' : entry.find('description').text if entry.find('description') else None,
'tag' : list() if entry.find('tag') else None
}
if type(aux['tag']) == list:
for tag in entry.find('tag').children:
if type(tag) != Tag:
continue
aux['tag'].append(tag.text)
elif object == 'service':
aux = {
'name' : entry['name'],
'destination-port' : entry.find('port').text if entry.find('port') else None,
'source-port' : entry.find('source-port').text if entry.find('source-port') else None,
'description' : entry.find('description').text if entry.find('description') else None,
'protocol' : 'tcp' if entry.find('tcp') else 'udp' if entry.find('udp') else None,
'tag' : list() if entry.find('tag') else None
}
if type(aux['tag']) == list:
for tag in entry.find('tag').children:
if type(tag) != Tag:
continue
aux['tag'].append(tag.text)
elif object == 'service-group':
aux = {
'name' : entry['name'],
'tag' : list() if entry.find('tag') else None,
'value' : list()
}
for member in entry.find('members').children:
if type(member) != Tag:
continue
aux['value'].append(member.text)
if type(aux['tag']) == list:
for tag in entry.find('tag').children:
if type(tag) != Tag:
continue
aux['tag'].append(tag.text)
elif object == 'address-group':
aux = {
'name' : entry['name'],
'description' : entry.find('description').text if entry.find('description') else None,
'type' : 'static' if entry.find('static') else 'dynamic' if entry.find('dynamic') else None,
'tag' : list() if entry.find('tag') else None,
}
if aux['type'] == 'static':
aux['static'] = list()
for member in entry.find('static').children:
if type(member) != Tag:
continue
aux['static'].append(member.text)
elif aux['type'] == 'dynamic':
aux['filter'] = entry.find('filter').text if entry.find('filter') else None
if type(aux['tag']) == list:
for tag in entry.find('tag').children:
if type(tag) != Tag:
continue
aux['tag'].append(tag.text)
for k,v in data.iteritems():
if type(aux[k]) == list:
if type(v) == list:
for _v in v:
if _v not in aux[k]:
aux[k].append(_v)
else:
aux[k].append(v)
else:
aux[k] = v
return aux, 200
def put(self,data,object):
response = self.apicall(type='config',\
action='get',\
xpath="/config/devices/entry[<EMAIL>']/vsys/entry[@name='vsys1']/{0}/entry[@name='{1}']".format(object,data['name']))
if response.status_code != 200:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
soup = BeautifulSoup(response.text,'xml')
if soup.result.isSelfClosing:
logger.warning("Object does not exists.")
return {'error' : 'Object does not exists.'}, 400
element = BeautifulSoup('','xml')
if object == 'address':
if 'value' in data:
element.append(element.new_tag(data['type'] if 'type' in data else soup.entry.next_element.next_element.name))
element.find(data['type'] if 'type' in data else soup.entry.next_element.name).append(data['value'])
if 'tag' in data:
if data['tag']:
element.append(element.new_tag(data['tag']))
for t in data['tag']:
element.tag.append(element.new_tag('member'))
element.tag.find_all('member')[-1].append(t)
if 'description' in data:
if data['description']:
element.append(element.new_tag('description'))
element.description.append(data['description'])
elif object == 'service':
if 'description' in data:
if data['description']:
element.append(element.new_tag('description'))
element.description.append(data['description'])
if 'tag' in data:
if data['tag']:
element.append(element.new_tag('tag'))
for t in data['tag']:
element.tag.append(element.new_tag('member'))
element.tag.find_all('member')[-1].append(t)
if 'destination-port' in data:
if data['destination-port']:
element.append(element.new_tag('protocol'))
element.protocol.append(element.new_tag(data['protocol'] if 'protocol' in data else soup.entry.protocol.next_element.next_element.name))
element.find(data['protocol'] if 'protocol' in data else soup.entry.protocol.next_element.next_element.name).append(element.new_tag('port'))
element.port.append(data['destination-port'])
if 'source-port' in data:
if data['source-port']:
element.append(element.new_tag('protocol'))
element.protocol.append(element.new_tag(data['protocol'] if 'protocol' in data else soup.entry.protocol.next_element.next_element.name))
element.find(data['protocol'] if 'protocol' in data else soup.entry.protocol.next_element.next_element.name).append(element.new_tag('source-port'))
element.find(data['source-port']).append(data['source-port'])
elif object == 'address-group':
element.append(element.new_tag('entry'))
element.entry['name'] = data['name']
if 'description' in data:
if data['description']:
element.entry.append(element.new_tag('description'))
element.description.append(data['description'])
if 'tag' in data:
if data['tag']:
element.entry.append(element.new_tag('tag'))
for t in data['tag']:
element.tag.append(element.new_tag('member'))
element.tag.find_all('member')[-1].append(t)
if 'static' in data:
element.entry.append(element.new_tag('static'))
for d in data['static']:
element.static.append(element.new_tag('member'))
element.static.find_all('member')[-1].append(d)
elif 'filter' in data:
element.entry.append(element.new_tag('dynamic'))
element.dynamic.append(element.new_tag('filter'))
element.dynamic.filter.append(data['filter'])
elif object == 'service-group':
element.append(element.new_tag('entry'))
element.entry['name'] = data['name']
if 'tag' in data:
if data['tag']:
element.entry.append(element.new_tag('tag'))
for t in data['tag']:
element.tag.append(element.new_tag('member'))
element.tag.find_all('member')[-1].append(t)
if 'value' in data:
element.entry.append(element.new_tag('members'))
for d in data['value']:
element.members.append(element.new_tag('member'))
element.members.find_all('member')[-1].append(d)
else:
logger.warning("Object not found.")
return {'error' : 'Object not found.'}, 404
logger.debug(str(element).replace('<?xml version="1.0" encoding="utf-8"?>\n',''))
response = self.apicall(type='config',\
action='edit' if object in ['address-group','service-group'] else 'set',\
xpath="/config/devices/entry[@<EMAIL>']/vsys/entry[@name='vsys1']/{0}/entry[@name='{1}']".format(object,data['name']),\
element=str(element).replace('<?xml version="1.0" encoding="utf-8"?>\n',''))
if response.status_code != 200:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
elif BeautifulSoup(response.text,'xml').response['status'] != 'success':
logger.error("Palo Alto response: " + str(response.text))
return {'error' : str(response.text)}, 502
else:
aux = dict()
entry = soup.entry
if object == 'address':
aux = {
'name' : entry['name'],
'type' : 'ip-netmask' if entry.find('ip-netmask') else 'fqdn' if entry.find('fqdn') else 'ip-range' if entry.find('ip-range') else None,
'value' : entry.find('ip-netmask').text if entry.find('ip-netmask') else entry.find('fqdn').text if entry.find('fqdn') else entry.find('ip-range').text if entry.find('ip-range') else None,
'description' : entry.find('description').text if entry.find('description') else None,
'tag' : list() if entry.find('tag') else None
}
if type(aux['tag']) == list:
for tag in entry.find('tag').children:
if type(tag) != Tag:
continue
aux['tag'].append(tag.text)
elif object == 'service':
aux = {
'name' : entry['name'],
'destination-port' : entry.find('port').text if entry.find('port') else None,
'source-port' : entry.find('source-port').text if entry.find('source-port') else None,
'description' : entry.find('description').text if entry.find('description') else None,
'protocol' : 'tcp' if entry.find('tcp') else 'udp' if entry.find('udp') else None,
'tag' : list() if entry.find('tag') else None
}
if type(aux['tag']) == list:
for tag in entry.find('tag').children:
if type(tag) != Tag:
continue
aux['tag'].append(tag.text)
elif object == 'service-group':
aux = {
'name' : entry['name'],
'tag' : list() if entry.find('tag') else None,
'value' : list()
}
for member in entry.find('members').children:
if type(member) != Tag:
continue
aux['value'].append(member.text)
if type(aux['tag']) == list:
for tag in entry.find('tag').children:
if type(tag) != Tag:
continue
aux['tag'].append(tag.text)
elif object == 'address-group':
aux = {
'name' : entry['name'],
'description' : entry.find('description').text if entry.find('description') else None,
'type' : 'static' if entry.find('static') else 'dynamic' if entry.find('dynamic') else None,
'tag' : list() if entry.find('tag') else None,
}
if aux['type'] == 'static':
aux['static'] = list()
for member in entry.find('static').children:
if type(member) != Tag:
continue
aux['static'].append(member.text)
elif aux['type'] == 'dynamic':
aux['filter'] = entry.find('filter').text if entry.find('filter') else None
if type(aux['tag']) == list:
for tag in entry.find('tag').children:
if type(tag) != Tag:
continue
aux['tag'].append(tag.text)
for k,v in data.iteritems():
if type(aux[k]) == list:
if type(v) == list:
aux[k] = list()
for _v in v:
aux[k].append(_v)
else:
aux[k] = v
return aux, 200
def delete(self,name,object):
response = self.apicall(type='config',\
action='get',\
xpath="/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys1']/{0}/entry[@name='{1}']".format(object,name))
if response.status_code != 200:
logger.error("Palo Alto response: " + str(response.status_code))
return {'error' : str(response.text)}, 502
entry = BeautifulSoup(response.text,'xml')
if entry.result.isSelfClosing:
logger.warning("Rule does not exists.")
return {'error' : 'Rule does not exists.'}, 404
else:
entry = entry.find('entry')
#Object exists, delete it
response = self.apicall(type='config',\
action='delete',\
xpath="/config/devices/entry[<EMAIL>']/vsys/entry[@name='vsys1']/{0}/entry[@name='{1}']".format(object,name))
if response.status_code | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
###############################################################################
# Copyright (c), Forschungszentrum Jülich GmbH, IAS-1/PGI-1, Germany. #
# All rights reserved. #
# This file is part of the aiida-jutools package. #
# (AiiDA JuDFT tools) #
# #
# The code is hosted on GitHub at https://github.com/judftteam/aiida-jutools. #
# For further information on the license, see the LICENSE.txt file. #
# For further information please visit http://judft.de/. #
# #
###############################################################################
"""Tools for working with AiiDA Group entities: utils."""
import copy as _copy
import datetime as _datetime
import json as _json
import sys as _sys
import typing as _typing
import aiida as _aiida
import pytz as _pytz
from aiida import orm as _orm
from aiida.tools import groups as _aiida_groups
import logging as _logging
class GroupHierarchyMaker:
"""Load or create a nested group hierarchy from a dictionary. Useful for organizing large data collections.
TODO: Create a ``GroupHierarchy`` class which takes the top group of a group hierarchy as input, then
recursively builds a nested runtime object whose attributes are the respective subgroups, with
attribute name = group label, which supports tab completion (e.g. NameTuples or dataclasses).
With that object, user can navigate to every desired group instantly. For subgroup labels with
numeric values in label, offer option to access desired subgroup via method taking resp. numeric
value. That way, can navigate to subgroup also programmatically. Then HierarchyMaker can return
such an object on load or create.
"""
TEMPLATE = {
"INSERT_IN_ALL": {
"TO_DEPTH": _sys.maxsize,
"INSERT": {
"extras": {
"version": "",
"generating_code_urls": {},
"comment": [""],
},
}
}, # group
"TEMPLATE": {
"description": "",
"SUBGROUPS": {
} # subgroups
} # group
}
_ignored_keys = ["TEMPLATE", "INSERT_IN_ALL"]
_insert_key = "INSERT_IN_ALL"
@staticmethod
def get_template(with_example_group: bool = True,
print_dict: bool = True,
indent: int = 4) -> dict:
"""Print a valid example group hierarchy with nested groups as input for load or create method.
:param with_example_group: add a valid example group entry to template.
:param print_dict: pretty print the template as well as returning it
:param indent: indent for the printed template
:return: valid example group structure
"""
template = _copy.deepcopy(GroupHierarchyMaker.TEMPLATE)
if with_example_group:
template["my_base_group1"] = {
"description": "Short description of this group.",
"SUBGROUPS": {
"my_subgroupA": {
"description": "Short description of this group.",
"extras": {
"local_extra" : ["only set for this subgroup, ",
"as opposed to global extras in 'INSERT_IN_ALL'."]
}
}, # subgroup
"my_subgroupB": {
"description": "Short description of this group.",
} # subgroup
} # base subgroups
} # base group
if print_dict:
print(_json.dumps(template, indent=indent))
return template
def load_or_create(self,
template: dict,
overwrite_extras: bool = True) -> _typing.List[_orm.Group]:
"""Given a dictionary describing a group hierarchy, create or load the latter in the database.
If a group in the hierarchy exists, will just be loaded. But extras will be modified according to dict.
:param template: group hierarchy. See GroupHierarchyMaker.TEMPLATE for valid template.
:param overwrite_extras: replace if True, add if False
:return: list of created or loaded groups
"""
# TODO validate dict: group hierarchy against class TEMPLATE
self._to_insert = None
self._insert_to_depth = None
if template.get(GroupHierarchyMaker._insert_key, None):
self._to_insert = template[GroupHierarchyMaker._insert_key].get("INSERT", None)
self._insert_to_depth = template[GroupHierarchyMaker._insert_key].get("TO_DEPTH", None)
self._overwrite_extras = overwrite_extras
depth = 0
group_path_str = ""
groups = []
self._create_or_load(depth=depth,
group_path_str=group_path_str,
group_structure=template,
groups=groups)
return groups
def _create_or_load(self,
depth: int,
group_path_str: str,
group_structure: dict,
groups: _typing.List[_orm.Group]):
"""Recursively creates groups from possibly nested dict according to GroupHierarchyMaker.TEMPLATE.
"""
base_path = group_path_str
for group_label, attrs in group_structure.items():
if group_label in GroupHierarchyMaker._ignored_keys:
continue
group_path_str = base_path + group_label
group_path = _aiida_groups.GroupPath(group_path_str)
group, created = group_path.get_or_create_group()
group.description = attrs["description"]
if "extras" in self._to_insert and depth <= self._insert_to_depth:
if self._overwrite_extras:
group.set_extra_many(self._to_insert["extras"])
else:
for k, v in self._to_insert["extras"].items():
group.set_extra(k, v)
# let override by local extras
if "extras" in attrs:
if self._overwrite_extras:
group.set_extra_many(attrs["extras"])
else:
for k, v in attrs["extras"].items():
group.set_extra(k, v)
if "SUBGROUPS" in attrs:
self._create_or_load(depth=depth + 1,
group_path_str=group_path_str + "/",
group_structure=attrs["SUBGROUPS"],
groups=groups)
groups.append(group)
def verdi_group_list(projection: _typing.List[str] = ['label', 'id', 'type_string'],
with_header: bool = True,
label_filter: str = None) -> _typing.List[_typing.List]:
"""Equivalent to CLI ``verdi group list -a`` (minus user mail address).
:param projection: query projection
:param with_header: True: first list in return argument is the projection argument
:param label_filter: optional: only include groups with this substring in their label
:return: list of lists, one entry per projection value, for each group
"""
qb = _orm.QueryBuilder()
groups = qb.append(_orm.Group, project=projection).all()
if 'label' in projection and label_filter:
index_of_label = projection.index('label')
groups = [item for item in groups if label_filter in item[index_of_label]]
groups.sort(key=lambda item: item[0].lower())
if with_header:
groups.insert(0, projection)
if len(projection) == 1:
groups = [singlelist[0] for singlelist in groups]
return groups
def get_subgroups(group: _orm.Group) -> _typing.List[_orm.Group]:
"""Get all subgroups of a group.
In accordance with aiida GroupPath, the group with label "foo/bar" is a valid subgroup
of a group with label "foo".
:param group: a group with possible subgroups
:return: subgroups
"""
group_labels = [group.label for group in _orm.Group.objects.all()]
subgroup_labels = [label for label in group_labels if label.startswith(group.label)
and len(label) > len(group.label)]
return [_orm.Group.get(label=label) for label in subgroup_labels]
def move_nodes(origin: _orm.Group,
destination: _orm.Group):
"""Move all nodes from one group to another, possibly sub/supergroup.
:param origin: origin group
:param destination: destination group
Note: if the new group does not exit yet, prefer relabling the group with group.label = new_label.
"""
destination.add_nodes(list(origin.nodes))
origin.remove_nodes(list(origin.nodes))
def get_nodes(group_label: str) -> _typing.Generator[_orm.Node, None, None]:
"""Get all nodes from given group (or subgroup) by label (path).
Deprecated: just use group.nodes, or list(group.nodes).
:param group_label: e.g. for a subgroup, "groupA/subgroupB/subgroupC".
:return: nodes as generator for efficient iteration (convert via list() to list)
"""
group = _orm.Group.get(label=group_label)
return group.nodes
def group_new_nodes(new_group_label: str,
blacklist: _typing.List[_typing.Type[_orm.Node]] = [_orm.Code, _orm.Computer],
right_date: _datetime.datetime = None, left_date: _datetime.datetime = None) -> \
_typing.Optional[_orm.Group]:
"""Groups new nodes with ctime in timerange (left_date,right_date] into new group
If you're working on one project at a time, everytime you finish a project you can use this function to
group your nodes. I.e. this is a utility function for a time-linear sequential grouping strategy. Letting
the function find the appropriate time range is the standard / recommended usage. If the group already exists
and the intended nodes are already added, repeated calls will change nothing.
:param new_group_label: label of new group/subgroup
:param blacklist: node types in timerange to exclude from grouping. Normally Code, Computer.
:param right_date: if not given (usually as datetime.now()), will take right_date = newest ctime, > left_date,
of any node, ungrouped nodes included.
:param left_date: if not given, will take left_date=newest ctime of any grouped node
:return: the new populated, stored, group, or None if no new nodes found
"""
timezone = _pytz.UTC
## step1: find d7=infdate from all *grouped* nodes
# new group to create or add
new_path = _aiida_groups.GroupPath(path=new_group_label)
new_group = new_path.get_or_create_group()[0]
# get all groups, exclude new group if present
group_labels = verdi_group_list(projection=['label'], with_header=False)
try:
group_labels.remove(new_group_label)
except ValueError:
pass
groups = [_orm.Group.get(label=label) for label in group_labels]
# find tuple (group,node) with largest node.ctime across all groups
left_date_computed = _datetime.datetime(year=1, month=1, day=1, tzinfo=_pytz.UTC)
for group in groups:
for node in group.nodes:
left_date_computed = max(left_date_computed, node.ctime)
if left_date is not None:
left_date = timezone.localize(left_date)
if left_date < left_date_computed:
print(
f"WARNING: left_date {left_date} < computed left date from groups {left_date_computed}, "
f"grouping overlap likely.")
elif left_date > left_date_computed:
print(
f"WARNING: left_date {left_date} > computed left date from groups {left_date_computed}, "
f"leftover ungrouped nodes likely.")
else:
left_date = left_date_computed
## step2: find d8=maxdate from all nodes newer than d7
qb = _orm.QueryBuilder()
new_nodes = qb.append(_orm.Node, filters={'ctime': {'>': left_date}}).all(flat=True)
if not new_nodes:
print(f"Info: found no nodes newer than last grouped at date {left_date}. "
f"Attempting to delete group '{new_group_label}' if empty.")
delete_groups([new_group_label])
return None
else:
right_date_computed = max(node.ctime for node in new_nodes)
if right_date is not None:
right_date = timezone.localize(right_date)
if right_date < right_date_computed:
print(
f"WARNING: right_date {right_date} < computed right date from groups {right_date_computed}, "
f"leftover ungrouped nodes likely.")
else:
right_date = right_date_computed
## step3: query all nodes in daterange (d7,d8], optional: exclude blacklist
daterange_filter = {
'and': [
{'ctime': {'>': left_date}}, # newer than
{'ctime': {'<=': right_date}} # older than
]
}
| |
this interface is
connected to an IPv6 stack, and the interface can send
and receive IPv6 packets.
''',
'enabled',
'openconfig-if-ip', False),
_MetaInfoClassMember('mtu', ATTRIBUTE, 'int' , None, None,
[('1280', '4294967295')], [],
''' [adapted from IETF IP model RFC 7277]
The size, in octets, of the largest IPv6 packet that the
interface will send and receive.
The server may restrict the allowed values for this leaf,
depending on the interface's type.
If this leaf is not configured, the operationally used MTU
depends on the interface's type.
''',
'mtu',
'openconfig-if-ip', False),
],
'openconfig-if-ip',
'state',
_yang_ns._namespaces['openconfig-if-ip'],
'ydk.models.openconfig.openconfig_interfaces'
),
},
'Interfaces.Interface.Subinterfaces.Subinterface.Ipv6.Autoconf.Config' : {
'meta_info' : _MetaInfoClass('Interfaces.Interface.Subinterfaces.Subinterface.Ipv6.Autoconf.Config',
False,
[
_MetaInfoClassMember('create-global-addresses', ATTRIBUTE, 'bool' , None, None,
[], [],
''' [adapted from IETF IP model RFC 7277]
If enabled, the host creates global addresses as
described in RFC 4862.
''',
'create_global_addresses',
'openconfig-if-ip', False),
_MetaInfoClassMember('create-temporary-addresses', ATTRIBUTE, 'bool' , None, None,
[], [],
''' [adapted from IETF IP model RFC 7277]
If enabled, the host creates temporary addresses as
described in RFC 4941.
''',
'create_temporary_addresses',
'openconfig-if-ip', False),
_MetaInfoClassMember('temporary-preferred-lifetime', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' [adapted from IETF IP model RFC 7277]
The time period during which the temporary address is
preferred.
''',
'temporary_preferred_lifetime',
'openconfig-if-ip', False),
_MetaInfoClassMember('temporary-valid-lifetime', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' [adapted from IETF IP model RFC 7277]
The time period during which the temporary address
is valid.
''',
'temporary_valid_lifetime',
'openconfig-if-ip', False),
],
'openconfig-if-ip',
'config',
_yang_ns._namespaces['openconfig-if-ip'],
'ydk.models.openconfig.openconfig_interfaces'
),
},
'Interfaces.Interface.Subinterfaces.Subinterface.Ipv6.Autoconf.State' : {
'meta_info' : _MetaInfoClass('Interfaces.Interface.Subinterfaces.Subinterface.Ipv6.Autoconf.State',
False,
[
_MetaInfoClassMember('create-global-addresses', ATTRIBUTE, 'bool' , None, None,
[], [],
''' [adapted from IETF IP model RFC 7277]
If enabled, the host creates global addresses as
described in RFC 4862.
''',
'create_global_addresses',
'openconfig-if-ip', False),
_MetaInfoClassMember('create-temporary-addresses', ATTRIBUTE, 'bool' , None, None,
[], [],
''' [adapted from IETF IP model RFC 7277]
If enabled, the host creates temporary addresses as
described in RFC 4941.
''',
'create_temporary_addresses',
'openconfig-if-ip', False),
_MetaInfoClassMember('temporary-preferred-lifetime', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' [adapted from IETF IP model RFC 7277]
The time period during which the temporary address is
preferred.
''',
'temporary_preferred_lifetime',
'openconfig-if-ip', False),
_MetaInfoClassMember('temporary-valid-lifetime', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' [adapted from IETF IP model RFC 7277]
The time period during which the temporary address
is valid.
''',
'temporary_valid_lifetime',
'openconfig-if-ip', False),
],
'openconfig-if-ip',
'state',
_yang_ns._namespaces['openconfig-if-ip'],
'ydk.models.openconfig.openconfig_interfaces'
),
},
'Interfaces.Interface.Subinterfaces.Subinterface.Ipv6.Autoconf' : {
'meta_info' : _MetaInfoClass('Interfaces.Interface.Subinterfaces.Subinterface.Ipv6.Autoconf',
False,
[
_MetaInfoClassMember('config', REFERENCE_CLASS, 'Config' , 'ydk.models.openconfig.openconfig_interfaces', 'Interfaces.Interface.Subinterfaces.Subinterface.Ipv6.Autoconf.Config',
[], [],
''' [adapted from IETF IP model RFC 7277]
Parameters to control the autoconfiguration of IPv6
addresses, as described in RFC 4862.
''',
'config',
'openconfig-if-ip', False),
_MetaInfoClassMember('state', REFERENCE_CLASS, 'State' , 'ydk.models.openconfig.openconfig_interfaces', 'Interfaces.Interface.Subinterfaces.Subinterface.Ipv6.Autoconf.State',
[], [],
''' Operational state data
''',
'state',
'openconfig-if-ip', False),
],
'openconfig-if-ip',
'autoconf',
_yang_ns._namespaces['openconfig-if-ip'],
'ydk.models.openconfig.openconfig_interfaces'
),
},
'Interfaces.Interface.Subinterfaces.Subinterface.Ipv6' : {
'meta_info' : _MetaInfoClass('Interfaces.Interface.Subinterfaces.Subinterface.Ipv6',
False,
[
_MetaInfoClassMember('address', REFERENCE_LIST, 'Address' , 'ydk.models.openconfig.openconfig_interfaces', 'Interfaces.Interface.Subinterfaces.Subinterface.Ipv6.Address',
[], [],
''' The list of configured IPv6 addresses on the interface.
''',
'address',
'openconfig-if-ip', False),
_MetaInfoClassMember('autoconf', REFERENCE_CLASS, 'Autoconf' , 'ydk.models.openconfig.openconfig_interfaces', 'Interfaces.Interface.Subinterfaces.Subinterface.Ipv6.Autoconf',
[], [],
''' Top-level container for IPv6 autoconf
''',
'autoconf',
'openconfig-if-ip', False),
_MetaInfoClassMember('config', REFERENCE_CLASS, 'Config' , 'ydk.models.openconfig.openconfig_interfaces', 'Interfaces.Interface.Subinterfaces.Subinterface.Ipv6.Config',
[], [],
''' Top-level config data for the IPv6 interface
''',
'config',
'openconfig-if-ip', False),
_MetaInfoClassMember('neighbor', REFERENCE_LIST, 'Neighbor' , 'ydk.models.openconfig.openconfig_interfaces', 'Interfaces.Interface.Subinterfaces.Subinterface.Ipv6.Neighbor',
[], [],
''' A list of mappings from IPv6 addresses to
link-layer addresses.
Entries in this list are used as static entries in the
Neighbor Cache.
''',
'neighbor',
'openconfig-if-ip', False),
_MetaInfoClassMember('state', REFERENCE_CLASS, 'State' , 'ydk.models.openconfig.openconfig_interfaces', 'Interfaces.Interface.Subinterfaces.Subinterface.Ipv6.State',
[], [],
''' Top-level operational state data for the IPv6 interface
''',
'state',
'openconfig-if-ip', False),
],
'openconfig-if-ip',
'ipv6',
_yang_ns._namespaces['openconfig-if-ip'],
'ydk.models.openconfig.openconfig_interfaces'
),
},
'Interfaces.Interface.Subinterfaces.Subinterface' : {
'meta_info' : _MetaInfoClass('Interfaces.Interface.Subinterfaces.Subinterface',
False,
[
_MetaInfoClassMember('index', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The index number of the subinterface -- used to address
the logical interface
''',
'index',
'openconfig-interfaces', True),
_MetaInfoClassMember('config', REFERENCE_CLASS, 'Config' , 'ydk.models.openconfig.openconfig_interfaces', 'Interfaces.Interface.Subinterfaces.Subinterface.Config',
[], [],
''' Configurable items at the subinterface level
''',
'config',
'openconfig-interfaces', False),
_MetaInfoClassMember('ipv4', REFERENCE_CLASS, 'Ipv4' , 'ydk.models.openconfig.openconfig_interfaces', 'Interfaces.Interface.Subinterfaces.Subinterface.Ipv4',
[], [],
''' Parameters for the IPv4 address family.
''',
'ipv4',
'openconfig-if-ip', False),
_MetaInfoClassMember('ipv6', REFERENCE_CLASS, 'Ipv6' , 'ydk.models.openconfig.openconfig_interfaces', 'Interfaces.Interface.Subinterfaces.Subinterface.Ipv6',
[], [],
''' Parameters for the IPv6 address family.
''',
'ipv6',
'openconfig-if-ip', False),
_MetaInfoClassMember('state', REFERENCE_CLASS, 'State' , 'ydk.models.openconfig.openconfig_interfaces', 'Interfaces.Interface.Subinterfaces.Subinterface.State',
[], [],
''' Operational state data for logical interfaces
''',
'state',
'openconfig-interfaces', False),
_MetaInfoClassMember('vlan', REFERENCE_CLASS, 'Vlan' , 'ydk.models.openconfig.openconfig_interfaces', 'Interfaces.Interface.Subinterfaces.Subinterface.Vlan',
[], [],
''' Enclosing container for VLAN interface-specific
data on subinterfaces
''',
'vlan',
'openconfig-vlan', False),
],
'openconfig-interfaces',
'subinterface',
_yang_ns._namespaces['openconfig-interfaces'],
'ydk.models.openconfig.openconfig_interfaces'
),
},
'Interfaces.Interface.Subinterfaces' : {
'meta_info' : _MetaInfoClass('Interfaces.Interface.Subinterfaces',
False,
[
_MetaInfoClassMember('subinterface', REFERENCE_LIST, 'Subinterface' , 'ydk.models.openconfig.openconfig_interfaces', 'Interfaces.Interface.Subinterfaces.Subinterface',
[], [],
''' The list of subinterfaces (logical interfaces) associated
with a physical interface
''',
'subinterface',
'openconfig-interfaces', False),
],
'openconfig-interfaces',
'subinterfaces',
_yang_ns._namespaces['openconfig-interfaces'],
'ydk.models.openconfig.openconfig_interfaces'
),
},
'Interfaces.Interface.Ethernet.Config.DuplexModeEnum' : _MetaInfoEnum('DuplexModeEnum', 'ydk.models.openconfig.openconfig_interfaces',
{
'FULL':'FULL',
'HALF':'HALF',
}, 'openconfig-if-ethernet', _yang_ns._namespaces['openconfig-if-ethernet']),
'Interfaces.Interface.Ethernet.Config' : {
'meta_info' : _MetaInfoClass('Interfaces.Interface.Ethernet.Config',
False,
[
_MetaInfoClassMember('aggregate-id', ATTRIBUTE, 'str' , None, None,
[], [],
''' Specify the logical aggregate interface to which
this interface belongs
''',
'aggregate_id',
'openconfig-if-aggregate', False),
_MetaInfoClassMember('auto-negotiate', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Set to TRUE to request the interface to auto-negotiate
transmission parameters with its peer interface. When
set to FALSE, the transmission parameters are specified
manually.
''',
'auto_negotiate',
'openconfig-if-ethernet', False),
_MetaInfoClassMember('duplex-mode', REFERENCE_ENUM_CLASS, 'DuplexModeEnum' , 'ydk.models.openconfig.openconfig_interfaces', 'Interfaces.Interface.Ethernet.Config.DuplexModeEnum',
[], [],
''' When auto-negotiate is TRUE, this optionally sets the
duplex mode that will be advertised to the peer. If
unspecified, the interface should negotiate the duplex mode
directly (typically full-duplex). When auto-negotiate is
FALSE, this sets the duplex mode on the interface directly.
''',
'duplex_mode',
'openconfig-if-ethernet', False),
_MetaInfoClassMember('enable-flow-control', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Enable or disable flow control for this interface.
Ethernet flow control is a mechanism by which a receiver
may send PAUSE frames to a sender to stop transmission for
a specified time.
This setting should override auto-negotiated flow control
settings. If left unspecified, and auto-negotiate is TRUE,
flow control mode is negotiated with the peer interface.
''',
'enable_flow_control',
'openconfig-if-ethernet', False),
_MetaInfoClassMember('mac-address', ATTRIBUTE, 'str' , None, None,
[], ['[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'],
''' Assigns a MAC address to the Ethernet interface. If not
specified, the corresponding operational state leaf is
expected to show the system-assigned MAC address.
''',
'mac_address',
'openconfig-if-ethernet', False),
_MetaInfoClassMember('port-speed', REFERENCE_IDENTITY_CLASS, 'EthernetSpeedIdentity' , 'ydk.models.openconfig.openconfig_if_ethernet', 'EthernetSpeedIdentity',
[], [],
''' When auto-negotiate is TRUE, this optionally sets the
port-speed mode that will be advertised to the peer for
negotiation. If unspecified, it is expected that the
interface will select the highest speed available based on
negotiation. When auto-negotiate is set to FALSE, sets the
link speed to a fixed value -- supported values are defined
by ethernet-speed identities
''',
'port_speed',
'openconfig-if-ethernet', False),
],
'openconfig-if-ethernet',
'config',
_yang_ns._namespaces['openconfig-if-ethernet'],
'ydk.models.openconfig.openconfig_interfaces'
),
},
'Interfaces.Interface.Ethernet.State.Counters' : {
'meta_info' : _MetaInfoClass('Interfaces.Interface.Ethernet.State.Counters',
False,
[
_MetaInfoClassMember('in-8021q-frames', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Number of 802.1q tagged frames received on the interface
''',
'in_8021q_frames',
'openconfig-if-ethernet', False),
_MetaInfoClassMember('in-crc-errors', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Number of receive error events due to FCS/CRC check
failure
''',
'in_crc_errors',
'openconfig-if-ethernet', False),
_MetaInfoClassMember('in-fragment-frames', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Number of fragment frames received on the interface.
''',
'in_fragment_frames',
'openconfig-if-ethernet', False),
_MetaInfoClassMember('in-jabber-frames', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Number of jabber frames received on the
interface. Jabber frames are typically defined as oversize
frames which also have a bad CRC. Implementations may use
slightly different definitions of what constitutes a jabber
frame. Often | |
<filename>bip/base/block.py
import ida_gdl
import idc
import idautils
import ida_bytes
import ida_kernwin
import ida_graph
from bip.py3compat.py3compat import *
import bip.base.bipelt
import bip.base.func
import bip.base.instr
class BipBlockType(object):
"""
Enum for the type of basic block. This come from the
``fc_block_type_t`` enum (``gdl.hpp``) not exposed to the python API.
"""
FCB_NORMAL = 0 #: normal block
FCB_INDJUMP = 1 #: block ends with indirect jump
FCB_RET = 2 #: return block
FCB_CNDRET = 3 #: conditional return block
FCB_NORET = 4 #: noreturn block
FCB_ENORET = 5 #: external noreturn block (does not belong to the function)
FCB_EXTERN = 6 #: external normal block
FCB_ERROR = 7 #: block passes execution past the function end
class BipBlock(object):
"""
Class for representing and manipulating basic blocks in IDA.
.. warning::
This class is an abstraction on top of IDA BadicBlock. In
particular IDA does **not** create basic block if not in a defined
function. Change to the flowgraph can be not directly repercuted
on this object.
.. todo:: equality and inclusion operator
.. todo:: more functions for testing type (abstraction on .type
property), property function starting with ``is_``
"""
################################# BASE #################################
def __init__(self, val=None):
"""
Constructor for an :class:`BipBlock` object.
This function may raise a ``TypeError`` if the argument is not
of a type supported or a ``ValueError`` if the address pass in
parameter is not inside a function.
:param val: A value used for creating a basic block. This can be
an address (int or long) or a ``ida_gdl.BasicBlock`` object.
If ``None`` the screen address is used.
"""
if val is None:
val = ida_kernwin.get_screen_ea()
#: Internal ida_gdl.BasicBlock object representing this block in IDA
self._bb = None
if isinstance(val, ida_gdl.BasicBlock):
# in this case no problem just put it in the internal value
self._bb = val
elif isinstance(val, (int, long)):
# if val is an int we consider it to be an address
# for getting the basic block we need to get the flowchart for the
# function
# this may raise a ValueError if val is not a function
fc = bip.base.func.BipFunction(val)._flowchart
for i in range(fc.size):
if val >= fc[i].start_ea and val < fc[i].end_ea: # we found it
self._bb = fc[i]
break
if self._bb is None:
raise TypeError("BipBlock expect a ida_gdl.BasicBlock or the address of an instruction inside a function in input.")
@property
def ea(self):
"""
Property which return the start address of the function.
:return int: The address of the basicblock.
"""
return self._bb.start_ea
@property
def end(self):
"""
Property which return the end address of the function. This
address is not included in the basicblock.
:return int: The first address at the end of the basicblock.
"""
return self._bb.end_ea
def __str__(self):
return "BipBlock: 0x{:X} (from {})".format(self.ea, self.func)
@property
def _id(self):
"""
Property returning the ID of the basic block. This is use in
particular for manipulating the block using the graph functions.
:return int: The ID of this basic block.
"""
return self._bb.id
############################ TYPE & INFO #############################
@property
def type(self):
"""
Property which allow access to the type of basic block.
:return: One of the :class:`BipBlockType` enum.
"""
return self._bb.type
@property
def is_ret(self):
"""
Property which return True if the block can return.
Internally this test the type for ``BipBlockType.FCB_RET`` and
``BipBlockType.FCB_CNDRET``. It is the equivalent of
``ida_gdl.is_ret_block`` in the standard idapython.
:return: True if the block return, False otherwise.
"""
return (self.type == BipBlockType.FCB_RET or
self.type == BipBlockType.FCB_CNDRET)
@property
def is_noret(self):
"""
Property for testing if the block never return. For example this
will be True if the block terminate by a call to a function which
will never return (``abort``, ...)
Internally this test the type for ``BipBlockType.FCB_NORET``
and ``BipBlockType.FCB_ENORET``. It is the equivalent of
``ida_gdl.is_noret_block`` in the standard idapython.
:return: True if the block never return, False otherwise.
"""
return (self.type == BipBlockType.FCB_NORET or
self.type == BipBlockType.FCB_ENORET)
@property
def is_external(self):
"""
Property for testing if the block is external to the function from
which it came.
Internally this test the type for ``FCB_ENORET`` and
``FCB_EXTERN`` .
.. note::
This should never be True if this :class:`BipBlock` was
provided by a :class:`BipFunction`, it can be True if the
block provided at the initialization was recuperated from an
other source.
:return: True if the block is not included in the function from
which the flowgraph was created, False otherwise.
"""
return (self.type == BipBlockType.FCB_EXTERN or
self.type == BipBlockType.FCB_ENORET)
########################### Control Flow ###########################
@property
def succ(self):
"""
Return a list of :class:`BipBlock` which are successor of this
block. This follow the potential execution pass.
:return: A list of :class:`BipBlock` successor of this block.
"""
return [BipBlock(bb) for bb in self._bb.succs()]
@property
def succ_iter(self):
"""
Return a generator of the :class:`BipBlock` following this one.
This is equivalent to :meth:`succ` and will probably be a little
faster.
:return: A generator of :class:`BipBlock` successor of this block.
"""
for b in self._bb.succs():
yield BipBlock(b)
@property
def pred(self):
"""
Return a list of :class:`BipBlock` which are predecessor of this
block. This provide the basicblock which can lead to this block
followin the execution pass.
:return: A list of :class:`BipBlock` predecessor of this block.
"""
return [BipBlock(bb) for bb in self._bb.preds()]
@property
def pred_iter(self):
"""
Return a generator of the :class:`BipBlock` predecessor of this
one. This is equivalent to :meth:`pred` and will probably be a
little faster.
:return: A generator of :class:`BipBlock` predecessor of this
block.
"""
for b in self._bb.preds():
yield BipBlock(b)
############################### FUNCTION ###############################
@property
def func(self):
"""
Return the :class:`BipFunction` object corresponding to this
block.
.. note::
Internally this will return the :class:`BipFunction` which is
present at the start address of the block. In particular in
case of external blocks this will return the function in which
the block are included and not the one from which they came
from.
:return: The :class:`BipFunction` in which this block is included.
"""
return bip.base.func.BipFunction(self.ea)
############################# INSTR & ITEMS ############################
@property
def items(self):
"""
Return a list of :class:`BipElt` corresponding to the items
included in the basic block (between ``ea`` and ``end``).
:return: A list of object :class:`BipElt`.
"""
return [bip.base.bipelt.GetElt(h) for h in idautils.Heads(self.ea, self.end)]
@property
def instr(self):
"""
Return a list of :class:`BipInstr` corresponding to the instructions
of the basicblock.
:return: A list of object :class:`BipInstr` .
"""
return [bip.base.instr.BipInstr(h) for h in idautils.Heads(self.ea, self.end) if idc.is_code(ida_bytes.get_full_flags(h))]
@property
def instr_iter(self):
"""
Return a generator of :class:`BipInstr` corresponding to the
instructions of the basicblock. This implementation will be just
a little more performant than the :meth:`instr` property.
:return: A generator of object :class:`BipInstr` .
"""
for h in idautils.Heads(self.ea, self.end):
if idc.is_code(ida_bytes.get_full_flags(h)):
yield bip.base.instr.BipInstr(h)
@property
def bytes(self):
"""
Property returning the value of the bytes contain in the
basicblock.
:return: A list of the bytes forming the element.
:rtype: list(int)
"""
return [ida_bytes.get_wide_byte(i) for i in range(self.ea, self.end)]
########################### COLOR #####################################
@property
def color(self):
"""
Property for accessing the color of this basic block.
:raise RuntimeError: If this function was not able to get the
information about the graph node.
:return int: The integer representing the color of this block in
the BGR format.
"""
ni = ida_graph.node_info_t()
if not ida_graph.get_node_info(ni, self.func.ea, self._id):
# In that case information about the node has not been
# recuperated, in practice this seems to mean that no
# node_info_t have, been defined for this node including the
# color, which means it should have the default color
# corresponding to the one set by default when creating a
# node_info_t. So we just ignore.
pass
return ni.bg_color
@color.setter
def color(self, value):
"""
Property setter for changing the color of this basic block.
.. warning:: This will **not** set correctly the color for a block
which color has already been change using the GUI. Probably a
bug in IDA or another item on top of it ?
:param value: An integer representing | |
or the VM creations triggered "
_msg += "by the AIs instantiated by the AS attachment operation "
_msg += "will fail."
raise self.osci.ObjectStoreMgdConnException(_msg, _status)
_aidrs_templates = self.osci.get_object(obj_attr_list["cloud_name"], "GLOBAL", False, \
"aidrs_templates", False)
_aidrs_templates["patterns"] = []
for _element in _aidrs_templates :
if _element.count("iait") :
_aidrs_templates["patterns"].append(_element[0:-5])
obj_attr_list["nr_ais"] = 0
if obj_attr_list["pattern"] in _aidrs_templates["patterns"] :
# This is just a trick to remove the application name from the
# start of the AIDRS attributes on the template.
# For instance, instead of adding the key "simpledt_max_ais"
# to the list of attributes of the AS we want the key to be in fact
# only "max_ais"
_x = len(obj_attr_list["pattern"]) + 1
for _key, _value in _aidrs_templates.iteritems() :
if _key.count(obj_attr_list["pattern"]) :
if _key[_x:] in obj_attr_list :
if obj_attr_list[_key[_x:]] == "default" :
obj_attr_list[_key[_x:]] = _value
else :
obj_attr_list[_key[_x:]] = _value
obj_attr_list["arrival"] = int(time())
_status = 0
else :
_fmsg = "Unknown pattern: " + obj_attr_list["pattern"]
except self.osci.ObjectStoreMgdConnException, obj :
_status = 40
_fmsg = str(obj.msg)
except self.ObjectOperationException, obj :
_status = obj.status
_fmsg = str(obj.msg)
except Exception, e :
_status = 23
_fmsg = str(e)
finally :
if _status :
_msg = "AIDRS pre-attachment operations failure: " + _fmsg
cberr(_msg)
raise self.ObjectOperationException(_msg, _status)
else :
_msg = "AIDRS pre-attachment operations success."
cbdebug(_msg)
return _status, _msg
@trace
def pre_attach_vmcrs(self, obj_attr_list) :
'''
TBD
'''
try :
_status = 100
_fmsg = "An error has occurred, but no error message was captured"
_vmcrs_templates = self.osci.get_object(obj_attr_list["cloud_name"], "GLOBAL", False, \
"vmcrs_templates", False)
_vmcrs_templates["patterns"] = []
for _element in _vmcrs_templates :
if _element.count("ivmcat") :
_vmcrs_templates["patterns"].append(_element[0:-7])
obj_attr_list["nr_simultaneous_cap_reqs"] = 0
obj_attr_list["nr_total_cap_reqs"] = 0
if _vmcrs_templates["patterns"].count(obj_attr_list["pattern"]) :
# This is just a trick to remove the application name from the
# start of the AIDRS attributes on the template.
# For instance, instead of adding the key "simpledt_max_ais"
# to the list of attributes of the AS we want the key to be in fact
# only "max_ais"
_x = len(obj_attr_list["pattern"]) + 1
for _key, _value in _vmcrs_templates.iteritems() :
if _key.count(obj_attr_list["pattern"]) :
if _key[_x:] in obj_attr_list :
if obj_attr_list[_key[_x:]] == "default" :
obj_attr_list[_key[_x:]] = _value
else :
obj_attr_list[_key[_x:]] = _value
obj_attr_list["arrival"] = int(time())
_status = 0
else :
_fmsg = "Unknown pattern: " + obj_attr_list["pattern"]
except self.osci.ObjectStoreMgdConnException, obj :
_status = 40
_fmsg = str(obj.msg)
except self.ObjectOperationException, obj :
_status = obj.status
_fmsg = str(obj.msg)
except Exception, e :
_status = 23
_fmsg = str(e)
finally :
if _status :
_msg = "VMCRS pre-attachment operations failure: " + _fmsg
cberr(_msg)
raise self.ObjectOperationException(_msg, _status)
else :
_msg = "VMCRS pre-attachment operations success."
cbdebug(_msg)
return _status, _msg
def pre_attach_firs(self, obj_attr_list) :
'''
TBD
'''
try :
_status = 100
_fmsg = "An error has occurred, but no error message was captured"
_firs_templates = self.osci.get_object(obj_attr_list["cloud_name"], "GLOBAL", False, \
"firs_templates", False)
_firs_templates["patterns"] = []
for _element in _firs_templates :
if _element.count("ifat") :
_firs_templates["patterns"].append(_element[0:-4])
obj_attr_list["nr_simultaneous_faults"] = 0
obj_attr_list["nr_total_faults"] = 0
if _firs_templates["patterns"].count(obj_attr_list["pattern"]) :
# This is just a trick to remove the application name from the
# start of the AIDRS attributes on the template.
# For instance, instead of adding the key "simpledt_max_ais"
# to the list of attributes of the AS we want the key to be in fact
# only "max_ais"
_x = len(obj_attr_list["pattern"]) + 1
for _key, _value in _firs_templates.iteritems() :
if _key.count(obj_attr_list["pattern"]) :
if _key[_x:] in obj_attr_list :
if obj_attr_list[_key[_x:]] == "default" :
obj_attr_list[_key[_x:]] = _value
else :
obj_attr_list[_key[_x:]] = _value
obj_attr_list["arrival"] = int(time())
_status = 0
else :
_fmsg = "Unknown pattern: " + obj_attr_list["pattern"]
except self.osci.ObjectStoreMgdConnException, obj :
_status = 40
_fmsg = str(obj.msg)
except self.ObjectOperationException, obj :
_status = obj.status
_fmsg = str(obj.msg)
except Exception, e :
_status = 23
_fmsg = str(e)
finally :
if _status :
_msg = "FIRS pre-attachment operations failure: " + _fmsg
cberr(_msg)
raise self.ObjectOperationException(_msg, _status)
else :
_msg = "FIRS pre-attachment operations success."
cbdebug(_msg)
return _status, _msg
@trace
def objattach(self, obj_attr_list, parameters, command) :
'''
TBD
'''
_status = 100
_fmsg = "An error has occurred, but no error message was captured"
threading.current_thread().abort = False
threading.current_thread().aborted = False
if "uuid" not in obj_attr_list :
obj_attr_list["uuid"] = str(uuid5(NAMESPACE_DNS, \
str(randint(0,1000000000000000000)))).upper()
if command == "vm-attach" :
for key in ["ai", "ai_name", "aidrs", "aidrs_name", "pattern", "type"] :
if key not in obj_attr_list :
obj_attr_list[key] = "none"
_status = 100
_fmsg = "An error has occurred, but no error message was captured"
_obj_type = command.split('-')[0].upper()
_result = {}
_pre_attach = False
_post_attach = False
_admission_control = False
_vmcregister = False
_vmcreate = False
_aidefine = False
_created_object = False
_created_pending = False
obj_attr_list["name"] = "undefined"
obj_attr_list["cloud_ip"] = "undefined"
obj_attr_list["cloud_hostname"] = "undefined"
try :
_status, _fmsg = self.parse_cli(obj_attr_list, parameters, command)
if not _status :
_status, _fmsg = self.initialize_object(obj_attr_list, command)
_cloud_name = obj_attr_list["cloud_name"]
_staging_from_gui = False
if "staging" in obj_attr_list :
if obj_attr_list["staging"].count("prepare_") :
_staging_from_gui = True
#The "staging" string is prefixed with the word "prepare" by the UI.
_staging = obj_attr_list["staging"].replace("prepare_",'')
else :
_staging = obj_attr_list["staging"]
else :
_staging = None
'''
This code path is really confusing, but highly necessary.
Basically, anything called from the UI requires 2 "objattach"
invocations. The first one will just subscribe to a channel,
while the second one will effectively perform the attachment.
This has to be done this way mostly because the API cannot block
waiting for the pub/sub cycle to complete, since it needs to
return to the UI. Again, keep in mind that this code path is used
*only* by the UI.
'''
if _staging_from_gui :
_sub_channel = self.osci.subscribe(_cloud_name, "VM", "staging")
if _obj_type == "VM" :
_staging_parameters = _cloud_name + ' '
_staging_parameters += obj_attr_list["role"] + ' '
_staging_parameters += obj_attr_list["pool"] + ' '
_staging_parameters += obj_attr_list["meta_tags"] + ' '
_staging_parameters += obj_attr_list["size"] + ' '
_staging_parameters += str(_staging) + ' '
_staging_parameters += obj_attr_list["temp_attr_list"]
_staging_parameters += " async"
_tmp_result = self.pause_vm(obj_attr_list, \
_sub_channel, \
self.background_execute(_staging_parameters, "vm-attach")[2])
elif _obj_type == "AI" :
_staging_parameters = _cloud_name + ' '
_staging_parameters += obj_attr_list["type"] + ' '
_staging_parameters += obj_attr_list["load_level"] + ' '
_staging_parameters += obj_attr_list["load_duration"] + ' '
_staging_parameters += obj_attr_list["lifetime"] + ' '
_staging_parameters += obj_attr_list["aidrs"] + ' '
_staging_parameters += str(_staging) + ' '
_staging_parameters += obj_attr_list["temp_attr_list"]
_staging_parameters += " async"
_tmp_result = self.pause_app(obj_attr_list, \
_sub_channel, \
self.background_execute(_staging_parameters, "ai-attach")[2])
_status = _tmp_result["status"]
_fmsg = _tmp_result["msg"]
_result = _tmp_result["result"]
obj_attr_list.update(_result)
obj_attr_list["prepare_" + str(_staging) + "_complete"] = int(time())
elif not _status :
self.osci.add_to_list(_cloud_name, _obj_type, "PENDING", \
obj_attr_list["uuid"] + "|" + obj_attr_list["name"], int(time()))
self.osci.pending_object_set(_cloud_name, _obj_type, \
obj_attr_list["uuid"], "status", "Initializing...")
for pkey in obj_attr_list.keys() :
self.osci.pending_object_set(_cloud_name, _obj_type, \
obj_attr_list["uuid"], pkey, obj_attr_list[pkey])
self.osci.pending_object_set(_cloud_name, _obj_type, \
obj_attr_list["uuid"], "status", "Initializing...")
_created_pending = True
self.set_cloud_operations_instance(obj_attr_list["model"])
_cld_conn = self.coi[obj_attr_list["model"]][self.pid + '-' + obj_attr_list["experiment_id"]]
if _obj_type == "VMC" :
self.pre_attach_vmc(obj_attr_list)
elif _obj_type == "VM" :
self.pre_attach_vm(obj_attr_list)
elif _obj_type == "AI" :
_status, _fmsg = _cld_conn.aidefine(obj_attr_list, "provision_originated")
self.pre_attach_ai(obj_attr_list)
elif _obj_type == "AIDRS" :
self.pre_attach_aidrs(obj_attr_list)
elif _obj_type == "VMCRS" :
self.pre_attach_vmcrs(obj_attr_list)
elif _obj_type == "FIRS" :
self.pre_attach_firs(obj_attr_list)
else :
_msg = "Unknown object: " + _obj_type
raise self.ObjectOperationException(_msg, 28)
_pre_attach = True
_admission_control = self.admission_control(_obj_type, \
obj_attr_list, \
"attach")
if _obj_type == "VMC" :
_status, _fmsg = _cld_conn.vmcregister(obj_attr_list)
_vmcregister = True
elif _obj_type == "VM" :
self.osci.pending_object_set(_cloud_name, _obj_type, \
obj_attr_list["uuid"], "status", "Sending create request to cloud ...")
_status, _fmsg = _cld_conn.vmcreate(obj_attr_list)
_vmcreate = True
elif _obj_type == "AI" :
_status, _fmsg = _cld_conn.aidefine(obj_attr_list, "all_vms_booted")
self.assign_roles(obj_attr_list)
_aidefine = True
elif _obj_type == "AIDRS" :
True
elif _obj_type == "VMCRS" :
True
elif _obj_type == "FIRS" :
True
else :
False
if not _status :
if "lifetime" in obj_attr_list and not "submitter" in | |
# Copyright (c) 2017 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from abc import ABCMeta
from abc import abstractproperty
from collections import defaultdict
from functools import total_ordering
from ... import reserved_vars as rv
from ...datasource import Line
from ...datasource import LineStateBase
from ...datasource import Thread
from ...graph.token import Step
from ..exc import StateError
@total_ordering
class IntervalBase(object):
__metaclass__ = ABCMeta
def __init__(self):
self._from_pace = None
self._to_pace = None
self.order = 0
### from pace
@property
def from_pace(self):
return self._from_pace
@from_pace.setter
def from_pace(self, val):
assert isinstance(val, Pace)
self._from_pace = val
@property
def from_time(self):
if self._from_pace:
return self._from_pace.time
raise RuntimeError("Interval %s has no from_pace" % self.int_name)
@property
def from_seconds(self):
if self._from_pace:
return self._from_pace.seconds
raise RuntimeError("Interval %s has no from_pace" % self.int_name)
@property
def from_edgename(self):
if self._from_pace:
return self._from_pace.edgename
raise RuntimeError("Interval %s has no from_pace" % self.int_name)
@property
def from_keyword(self):
if self._from_pace:
return self._from_pace.keyword
raise RuntimeError("Interval %s has no from_pace" % self.int_name)
### to pace
@property
def to_pace(self):
return self._to_pace
@to_pace.setter
def to_pace(self, val):
if val is None:
assert self._to_pace is None
else:
assert isinstance(val, Pace)
self._to_pace = val
@property
def to_time(self):
if self._to_pace:
return self._to_pace.time
raise RuntimeError("Interval %s has no to_pace" % self.int_name)
@property
def to_seconds(self):
if self._to_pace:
return self._to_pace.seconds
raise RuntimeError("Interval %s has no to_pace" % self.int_name)
@property
def to_edgename(self):
if self._to_pace:
return self._to_pace.edgename
raise RuntimeError("Interval %s has no to_pace" % self.int_name)
@property
def to_keyword(self):
if self._to_pace:
return self._to_pace.keyword
raise RuntimeError("Interval %s has no to_pace" % self.int_name)
@property
def is_violated(self):
return self.from_seconds > self.to_seconds
### abstracts
@abstractproperty
def int_name(self):
return None
@abstractproperty
def requestins(self):
return None
### properties
@property
def is_interval(self):
return self.from_pace and self.to_pace
@property
def path(self):
ret = ""
if self._from_pace:
ret += "%s[" % self.from_edgename
else:
ret += "|["
ret += str(self.int_name)
if self._to_pace:
ret += "]%s" % self.to_edgename
else:
ret += "]|"
return ret
@property
def lapse(self):
if self.is_interval:
return self.to_seconds - self.from_seconds
raise RuntimeError("Interval %s has no from/to pace" % self.int_name)
@property
def request(self):
return self.requestins.request
@property
def request_type(self):
return self.requestins.request_type
### prints
def __repr_intlabels__(self):
labels = ""
if self._from_pace and self._to_pace:
if self.from_seconds > self.to_seconds:
labels += "#"
elif not self.from_pace and not self.to_pace:
labels += "X"
return labels
def __repr__(self):
ret = "<%s#%s: %s " % (
self.__class__.__name__,
self.int_name,
self.__repr_intlabels__())
if self._from_pace:
ret += "%.3f,%s`%s`->" % (self.from_seconds,
self.from_edgename,
self.from_keyword)
ret += "."
if self._to_pace:
ret += "->%.3f,%s`%s`" % (self.to_seconds,
self.to_edgename,
self.to_keyword)
ret += " >"
return ret
def __repr_from__(self):
ret = "[%s %s" % (self.int_name, self.__repr_intlabels__())
if self._from_pace:
context = ",".join(str(k)+"="+str(v)
for k,v in self._from_pace.line_context.items())
if context:
context = " " + context
ret += "<-(%.3f,%s`%s`%s)]" % (
self.from_seconds,
self.from_edgename,
self.from_keyword,
context)
else:
ret += "]"
return ret
def __repr_to__(self):
ret = "[%s %s" % (self.int_name, self.__repr_intlabels__())
if self._to_pace:
context = ",".join(str(k)+"="+str(v)
for k,v in self._to_pace.line_context.items())
if context:
context = " " + context
ret += "->(%.3f,%s`%s`%s)]" % (
self.to_seconds,
self.to_edgename,
self.to_keyword,
context)
else:
ret += "]"
return ret
__eq__ = lambda self, other:\
self.from_pace == other.to_pace and\
self.to_pace == other.to_pace
__lt__ = lambda self, other:\
(self.from_seconds, self.to_seconds) <\
(other.from_seconds, other.to_seconds)
def __hash__(self):
return id(self)
class RequestinsBase(IntervalBase):
__metaclass__ = ABCMeta
def __init__(self, request):
self._request = None
setattr(self, "request", request)
@property
def request(self):
return self._request
@request.setter
def request(self, val):
if val is not None:
assert isinstance(val, str)
assert self._request is None
self._request = val
@property
def requestins(self):
return self
@property
def int_name(self):
return self.request
class ThreadinsBase(IntervalBase):
__metaclass__ = ABCMeta
def __init__(self, thread_obj):
super(ThreadinsBase, self).__init__()
self.thread_obj = thread_obj
self.thread_vars = {}
self.thread_vars_dup = defaultdict(set)
self._request = None
self._requestins = None
@property
def request(self):
return self._request
@request.setter
def request(self, val):
assert isinstance(val, str)
if self._request:
assert self._request == val
else:
self._request = val
@property
def requestins(self):
return self._requestins
@requestins.setter
def requestins(self, val):
assert isinstance(val, RequestinsBase)
assert self._requestins is None
self._requestins = val
### thread_obj
@property
def thread(self):
return self.thread_obj.thread
@property
def target(self):
return self.thread_obj.target
@property
def component(self):
return self.thread_obj.component
@property
def host(self):
return self.thread_obj.host
@property
def target_obj(self):
return self.thread_obj.target_obj
@property
def thread_name(self):
return self.thread_obj.name
@property
def int_name(self):
return self.thread_name
def __hash__(self):
return id(self)
def _process_vars(self, line_obj):
assert isinstance(line_obj, Line)
for key in line_obj.keys:
if key in ("keyword", "time", "seconds"):
continue
new_val = line_obj[key]
if key in ("component", "target", "host", "thread"):
val = getattr(self, key)
if val != line_obj[key]:
raise StateError("(ThreadInstance) parse error: "
"variable %s mismatch: %s is not %s!"
% (key, val, new_val))
else:
pass
elif key == "request":
if new_val is None:
pass
elif self.request is None:
self.request = new_val
elif self.request != new_val:
raise StateError("(ThreadInstance) parse error: "
"request mismatch: %s is not %s!"
% (val, new_val))
else:
pass
else:
if key in self.thread_vars_dup:
self.thread_vars_dup[key].add(new_val)
else:
val = self.thread_vars.get(key)
if val is None:
self.thread_vars[key] = new_val
elif val != new_val:
self.thread_vars_dup[key].add(val)
self.thread_vars_dup[key].add(new_val)
self.thread_vars.pop(key)
else:
pass
class ActivityBase(IntervalBase):
__metaclass__ = ABCMeta
_act_type = object()
_act_lim_back = None
_act_lim_forth = None
def __init__(self, from_pace, to_pace, aname):
assert isinstance(aname, str)
super(ActivityBase, self).__init__()
if from_pace:
self.from_pace = from_pace
if to_pace:
self.to_pace = to_pace
self.is_main = False
self.activity_name = aname
@property
def int_name(self):
return self.activity_name
def __repr_intlabels__(self):
marks = super(ActivityBase, self).__repr_intlabels__()
if self.is_main:
marks += "!"
return marks
@total_ordering
class Pace(LineStateBase, object):
""" Pace is relative to transition. """
def __init__(self, line_obj, step, threadins):
assert isinstance(line_obj, Line)
assert isinstance(step, Step)
assert isinstance(threadins, ThreadinsBase)
assert line_obj.thread_obj is threadins.thread_obj
self.line_obj = line_obj
self.step = step
self.threadins = threadins
self.prv_activity_bytype = defaultdict(list)
self.nxt_activity_bytype = defaultdict(list)
self.is_main = False
self.prv_main_activity = None
self.nxt_main_activity = None
### step
@property
def path_step(self):
return self.step.path
@property
def edgename(self):
return self.step.edgename
@property
def joinable(self):
return self.step.joinable
### threadins
@property
def requestins(self):
return self.threadins.requestins
@property
def thread_obj(self):
return self.threadins.thread_obj
@property
def target_obj(self):
return self.threadins.target_obj
### LineState
@property
def line_keys(self):
return self.line_obj.keys_
@property
def line_context(self):
ret = {}
for k in self.line_keys:
ret[k] = self[k]
return ret
@property
def refresh_vars(self):
return self.step.refresh_vars
# TODO: bug here
@property
def _ls_state(self):
# if self.is_thread_start and self.is_thread_end:
# return "*"
# elif self.is_thread_start:
# return "+"
# elif self.is_thread_end:
# return "-"
# else:
if self.prv_main_activity or self.nxt_main_activity:
return "!"
else:
return "|"
@property
def _ls_request(self):
return self.request
@property
def _ls_path(self):
return self.path_step
# total ordering
__eq__ = lambda self, other: self.seconds == other.seconds
__lt__ = lambda self, other: self.seconds < other.seconds
def __getattribute__(self, item):
assert isinstance(item, str)
if item in rv.ALL_VARS:
ret = getattr(self.line_obj, item)
if ret is None and item == rv.REQUEST:
ret = getattr(self.threadins, "request")
return ret
else:
return super(Pace, self).__getattribute__(item)
def __getitem__(self, item):
assert isinstance(item, str)
if item in rv.ALL_VARS:
return getattr(self, item)
elif item in self.line_obj:
return self.line_obj[item]
elif item in self.threadins.thread_vars:
return self.threadins.thread_vars[item]
elif item in self.threadins.thread_vars_dup:
raise StateError("(Pace) got multiple %s: %s" %
(item, self.threadins.thread_vars_dup[item]))
else:
raise StateError("(Pace) key %s not exist!" % item)
def __repr_marks__(self):
mark_str = ""
for type_, acts in self.prv_activity_bytype.items():
mark_str += ", prv_"+type_+"("
mark_str += ",".join(act.__repr_from__()
for act in acts)
mark_str += ")"
for type_, acts in self.nxt_activity_bytype.items():
mark_str += ", nxt_"+type_+"("
mark_str += ",".join(act.__repr_to__()
for act in acts)
mark_str += ")"
return mark_str
def __repr__(self):
return "<P %.3f %s [%s %s] %s, `%s`, %s%s>" % (
self.seconds,
self.path_step,
self.target,
self.thread,
self.request,
self.keyword,
self.line_context,
self.__repr_marks__())
def __repr_thread__(self):
return "%.3f %s, `%s`, %s%s" % (
self.seconds,
self.path_step,
self.keyword,
self.line_context,
self.__repr_marks__())
def __hash__(self):
return id(self)
def append_nxt(self, activity, template=None):
assert isinstance(activity, ActivityBase)
if not template:
template = activity.__class__
assert issubclass(template, ActivityBase)
act_type = template._act_type
lim = template._act_lim_forth
assert not act_type is ActivityBase._act_type
assert isinstance(act_type, str)
assert isinstance(lim, bool)
if lim:
assert not self.nxt_activity_bytype[act_type]
self.nxt_activity_bytype[act_type].append(activity)
def append_prv(self, activity, template=None):
assert isinstance(activity, ActivityBase)
if not template:
template = activity.__class__
assert | |
"""The tests for the DirecTV Media player platform."""
from unittest.mock import call, patch
from datetime import datetime, timedelta
import requests
import pytest
from homeassistant.components.media_player.const import (
ATTR_MEDIA_CONTENT_ID, ATTR_MEDIA_CONTENT_TYPE, MEDIA_TYPE_TVSHOW,
ATTR_MEDIA_ENQUEUE, ATTR_MEDIA_DURATION, ATTR_MEDIA_TITLE,
ATTR_MEDIA_POSITION, ATTR_MEDIA_SERIES_TITLE, ATTR_MEDIA_CHANNEL,
ATTR_INPUT_SOURCE, ATTR_MEDIA_POSITION_UPDATED_AT, DOMAIN,
SERVICE_PLAY_MEDIA, SUPPORT_PAUSE, SUPPORT_TURN_ON, SUPPORT_TURN_OFF,
SUPPORT_PLAY_MEDIA, SUPPORT_STOP, SUPPORT_NEXT_TRACK,
SUPPORT_PREVIOUS_TRACK, SUPPORT_PLAY)
from homeassistant.components.directv.media_player import (
ATTR_MEDIA_CURRENTLY_RECORDING, ATTR_MEDIA_RATING, ATTR_MEDIA_RECORDED,
ATTR_MEDIA_START_TIME, DEFAULT_DEVICE, DEFAULT_PORT)
from homeassistant.const import (
ATTR_ENTITY_ID, CONF_DEVICE, CONF_HOST, CONF_NAME, CONF_PORT,
SERVICE_MEDIA_NEXT_TRACK, SERVICE_MEDIA_PAUSE, SERVICE_MEDIA_PLAY,
SERVICE_MEDIA_PREVIOUS_TRACK, SERVICE_MEDIA_STOP, SERVICE_TURN_OFF,
SERVICE_TURN_ON, STATE_OFF, STATE_PAUSED, STATE_PLAYING, STATE_UNAVAILABLE)
from homeassistant.helpers.discovery import async_load_platform
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import MockDependency, async_fire_time_changed
CLIENT_ENTITY_ID = 'media_player.client_dvr'
MAIN_ENTITY_ID = 'media_player.main_dvr'
IP_ADDRESS = '127.0.0.1'
DISCOVERY_INFO = {
'host': IP_ADDRESS,
'serial': 1234
}
LIVE = {
"callsign": "HASSTV",
"date": "20181110",
"duration": 3600,
"isOffAir": False,
"isPclocked": 1,
"isPpv": False,
"isRecording": False,
"isVod": False,
"major": 202,
"minor": 65535,
"offset": 1,
"programId": "102454523",
"rating": "No Rating",
"startTime": 1541876400,
"stationId": 3900947,
"title": "Using Home Assistant to automate your home"
}
LOCATIONS = [
{
'locationName': 'Main DVR',
'clientAddr': DEFAULT_DEVICE
}
]
RECORDING = {
"callsign": "HASSTV",
"date": "20181110",
"duration": 3600,
"isOffAir": False,
"isPclocked": 1,
"isPpv": False,
"isRecording": True,
"isVod": False,
"major": 202,
"minor": 65535,
"offset": 1,
"programId": "102454523",
"rating": "No Rating",
"startTime": 1541876400,
"stationId": 3900947,
"title": "Using Home Assistant to automate your home",
'uniqueId': '12345',
'episodeTitle': 'Configure DirecTV platform.'
}
WORKING_CONFIG = {
'media_player': {
'platform': 'directv',
CONF_HOST: IP_ADDRESS,
CONF_NAME: 'Main DVR',
CONF_PORT: DEFAULT_PORT,
CONF_DEVICE: DEFAULT_DEVICE
}
}
@pytest.fixture
def client_dtv():
"""Fixture for a client device."""
mocked_dtv = MockDirectvClass('mock_ip')
mocked_dtv.attributes = RECORDING
mocked_dtv._standby = False
return mocked_dtv
@pytest.fixture
def main_dtv():
"""Fixture for main DVR."""
return MockDirectvClass('mock_ip')
@pytest.fixture
def dtv_side_effect(client_dtv, main_dtv):
"""Fixture to create DIRECTV instance for main and client."""
def mock_dtv(ip, port, client_addr):
if client_addr != '0':
mocked_dtv = client_dtv
else:
mocked_dtv = main_dtv
mocked_dtv._host = ip
mocked_dtv._port = port
mocked_dtv._device = client_addr
return mocked_dtv
return mock_dtv
@pytest.fixture
def mock_now():
"""Fixture for dtutil.now."""
return dt_util.utcnow()
@pytest.fixture
def platforms(hass, dtv_side_effect, mock_now):
"""Fixture for setting up test platforms."""
config = {
'media_player': [{
'platform': 'directv',
'name': 'Main DVR',
'host': IP_ADDRESS,
'port': DEFAULT_PORT,
'device': DEFAULT_DEVICE
}, {
'platform': 'directv',
'name': 'Client DVR',
'host': IP_ADDRESS,
'port': DEFAULT_PORT,
'device': '1'
}]
}
with MockDependency('DirectPy'), \
patch('DirectPy.DIRECTV', side_effect=dtv_side_effect), \
patch('homeassistant.util.dt.utcnow', return_value=mock_now):
hass.loop.run_until_complete(async_setup_component(
hass, DOMAIN, config))
hass.loop.run_until_complete(hass.async_block_till_done())
yield
async def async_turn_on(hass, entity_id=None):
"""Turn on specified media player or all."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
await hass.services.async_call(DOMAIN, SERVICE_TURN_ON, data)
async def async_turn_off(hass, entity_id=None):
"""Turn off specified media player or all."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
await hass.services.async_call(DOMAIN, SERVICE_TURN_OFF, data)
async def async_media_pause(hass, entity_id=None):
"""Send the media player the command for pause."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
await hass.services.async_call(DOMAIN, SERVICE_MEDIA_PAUSE, data)
async def async_media_play(hass, entity_id=None):
"""Send the media player the command for play/pause."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
await hass.services.async_call(DOMAIN, SERVICE_MEDIA_PLAY, data)
async def async_media_stop(hass, entity_id=None):
"""Send the media player the command for stop."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
await hass.services.async_call(DOMAIN, SERVICE_MEDIA_STOP, data)
async def async_media_next_track(hass, entity_id=None):
"""Send the media player the command for next track."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
await hass.services.async_call(DOMAIN, SERVICE_MEDIA_NEXT_TRACK, data)
async def async_media_previous_track(hass, entity_id=None):
"""Send the media player the command for prev track."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
await hass.services.async_call(DOMAIN, SERVICE_MEDIA_PREVIOUS_TRACK, data)
async def async_play_media(hass, media_type, media_id, entity_id=None,
enqueue=None):
"""Send the media player the command for playing media."""
data = {ATTR_MEDIA_CONTENT_TYPE: media_type,
ATTR_MEDIA_CONTENT_ID: media_id}
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
if enqueue:
data[ATTR_MEDIA_ENQUEUE] = enqueue
await hass.services.async_call(DOMAIN, SERVICE_PLAY_MEDIA, data)
class MockDirectvClass:
"""A fake DirecTV DVR device."""
def __init__(self, ip, port=8080, clientAddr='0'):
"""Initialize the fake DirecTV device."""
self._host = ip
self._port = port
self._device = clientAddr
self._standby = True
self._play = False
self._locations = LOCATIONS
self.attributes = LIVE
def get_locations(self):
"""Mock for get_locations method."""
test_locations = {
'locations': self._locations,
'status': {
'code': 200,
'commandResult': 0,
'msg': 'OK.',
'query': '/info/getLocations'
}
}
return test_locations
def get_standby(self):
"""Mock for get_standby method."""
return self._standby
def get_tuned(self):
"""Mock for get_tuned method."""
if self._play:
self.attributes['offset'] = self.attributes['offset']+1
test_attributes = self.attributes
test_attributes['status'] = {
"code": 200,
"commandResult": 0,
"msg": "OK.",
"query": "/tv/getTuned"
}
return test_attributes
def key_press(self, keypress):
"""Mock for key_press method."""
if keypress == 'poweron':
self._standby = False
self._play = True
elif keypress == 'poweroff':
self._standby = True
self._play = False
elif keypress == 'play':
self._play = True
elif keypress == 'pause' or keypress == 'stop':
self._play = False
def tune_channel(self, source):
"""Mock for tune_channel method."""
self.attributes['major'] = int(source)
async def test_setup_platform_config(hass):
"""Test setting up the platform from configuration."""
with MockDependency('DirectPy'), \
patch('DirectPy.DIRECTV', new=MockDirectvClass):
await async_setup_component(hass, DOMAIN, WORKING_CONFIG)
await hass.async_block_till_done()
state = hass.states.get(MAIN_ENTITY_ID)
assert state
assert len(hass.states.async_entity_ids('media_player')) == 1
async def test_setup_platform_discover(hass):
"""Test setting up the platform from discovery."""
with MockDependency('DirectPy'), \
patch('DirectPy.DIRECTV', new=MockDirectvClass):
hass.async_create_task(
async_load_platform(hass, DOMAIN, 'directv', DISCOVERY_INFO,
{'media_player': {}})
)
await hass.async_block_till_done()
state = hass.states.get(MAIN_ENTITY_ID)
assert state
assert len(hass.states.async_entity_ids('media_player')) == 1
async def test_setup_platform_discover_duplicate(hass):
"""Test setting up the platform from discovery."""
with MockDependency('DirectPy'), \
patch('DirectPy.DIRECTV', new=MockDirectvClass):
await async_setup_component(hass, DOMAIN, WORKING_CONFIG)
await hass.async_block_till_done()
hass.async_create_task(
async_load_platform(hass, DOMAIN, 'directv', DISCOVERY_INFO,
{'media_player': {}})
)
await hass.async_block_till_done()
state = hass.states.get(MAIN_ENTITY_ID)
assert state
assert len(hass.states.async_entity_ids('media_player')) == 1
async def test_setup_platform_discover_client(hass):
"""Test setting up the platform from discovery."""
LOCATIONS.append({
'locationName': 'Client 1',
'clientAddr': '1'
})
LOCATIONS.append({
'locationName': 'Client 2',
'clientAddr': '2'
})
with MockDependency('DirectPy'), \
patch('DirectPy.DIRECTV', new=MockDirectvClass):
await async_setup_component(hass, DOMAIN, WORKING_CONFIG)
await hass.async_block_till_done()
hass.async_create_task(
async_load_platform(hass, DOMAIN, 'directv', DISCOVERY_INFO,
{'media_player': {}})
)
await hass.async_block_till_done()
del LOCATIONS[-1]
del LOCATIONS[-1]
state = hass.states.get(MAIN_ENTITY_ID)
assert state
state = hass.states.get('media_player.client_1')
assert state
state = hass.states.get('media_player.client_2')
assert state
assert len(hass.states.async_entity_ids('media_player')) == 3
async def test_supported_features(hass, platforms):
"""Test supported features."""
# Features supported for main DVR
state = hass.states.get(MAIN_ENTITY_ID)
assert SUPPORT_PAUSE | SUPPORT_TURN_ON | SUPPORT_TURN_OFF |\
SUPPORT_PLAY_MEDIA | SUPPORT_STOP | SUPPORT_NEXT_TRACK |\
SUPPORT_PREVIOUS_TRACK | SUPPORT_PLAY ==\
state.attributes.get('supported_features')
# Feature supported for clients.
state = hass.states.get(CLIENT_ENTITY_ID)
assert SUPPORT_PAUSE |\
SUPPORT_PLAY_MEDIA | SUPPORT_STOP | SUPPORT_NEXT_TRACK |\
SUPPORT_PREVIOUS_TRACK | SUPPORT_PLAY ==\
state.attributes.get('supported_features')
async def test_check_attributes(hass, platforms, mock_now):
"""Test attributes."""
next_update = mock_now + timedelta(minutes=5)
with patch('homeassistant.util.dt.utcnow', return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
# Start playing TV
with patch('homeassistant.util.dt.utcnow',
return_value=next_update):
await async_media_play(hass, CLIENT_ENTITY_ID)
await hass.async_block_till_done()
state = hass.states.get(CLIENT_ENTITY_ID)
assert state.state == STATE_PLAYING
assert state.attributes.get(ATTR_MEDIA_CONTENT_ID) == \
RECORDING['programId']
assert state.attributes.get(ATTR_MEDIA_CONTENT_TYPE) == \
MEDIA_TYPE_TVSHOW
assert state.attributes.get(ATTR_MEDIA_DURATION) == \
RECORDING['duration']
assert state.attributes.get(ATTR_MEDIA_POSITION) == 2
assert state.attributes.get(
ATTR_MEDIA_POSITION_UPDATED_AT) == next_update
assert state.attributes.get(ATTR_MEDIA_TITLE) == RECORDING['title']
assert state.attributes.get(ATTR_MEDIA_SERIES_TITLE) == \
RECORDING['episodeTitle']
assert state.attributes.get(ATTR_MEDIA_CHANNEL) == \
"{} ({})".format(RECORDING['callsign'], RECORDING['major'])
assert state.attributes.get(ATTR_INPUT_SOURCE) == RECORDING['major']
assert state.attributes.get(ATTR_MEDIA_CURRENTLY_RECORDING) == \
RECORDING['isRecording']
assert state.attributes.get(ATTR_MEDIA_RATING) == RECORDING['rating']
assert state.attributes.get(ATTR_MEDIA_RECORDED)
assert state.attributes.get(ATTR_MEDIA_START_TIME) == \
datetime(2018, 11, 10, 19, 0, tzinfo=dt_util.UTC)
# Test to make sure that ATTR_MEDIA_POSITION_UPDATED_AT is not
# updated if TV is paused.
with patch('homeassistant.util.dt.utcnow',
return_value=next_update + timedelta(minutes=5)):
await async_media_pause(hass, CLIENT_ENTITY_ID)
await hass.async_block_till_done()
state = hass.states.get(CLIENT_ENTITY_ID)
assert state.state == STATE_PAUSED
assert state.attributes.get(
ATTR_MEDIA_POSITION_UPDATED_AT) == next_update
async def test_main_services(hass, platforms, main_dtv, mock_now):
"""Test the different services."""
next_update = mock_now + timedelta(minutes=5)
with patch('homeassistant.util.dt.utcnow', return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
# DVR starts in off state.
state = hass.states.get(MAIN_ENTITY_ID)
assert state.state == STATE_OFF
# All these should call key_press in our class.
with patch.object(main_dtv, 'key_press',
wraps=main_dtv.key_press) as mock_key_press, \
patch.object(main_dtv, 'tune_channel',
wraps=main_dtv.tune_channel) as mock_tune_channel, \
patch.object(main_dtv, 'get_tuned',
wraps=main_dtv.get_tuned) as mock_get_tuned, \
patch.object(main_dtv, 'get_standby',
wraps=main_dtv.get_standby) as mock_get_standby:
# Turn main DVR on. When turning on DVR is playing.
await async_turn_on(hass, MAIN_ENTITY_ID)
await hass.async_block_till_done()
assert mock_key_press.called
assert mock_key_press.call_args == call('poweron')
state = hass.states.get(MAIN_ENTITY_ID)
assert state.state == STATE_PLAYING
# Pause live TV.
await async_media_pause(hass, MAIN_ENTITY_ID)
await hass.async_block_till_done()
assert mock_key_press.called
assert mock_key_press.call_args == call('pause')
state = hass.states.get(MAIN_ENTITY_ID)
assert state.state == STATE_PAUSED
# Start play again for live TV.
await async_media_play(hass, MAIN_ENTITY_ID)
await hass.async_block_till_done()
assert mock_key_press.called
assert mock_key_press.call_args == call('play')
state = hass.states.get(MAIN_ENTITY_ID)
assert state.state == STATE_PLAYING
# Change channel, currently it should be 202
assert state.attributes.get('source') == 202
await async_play_media(hass, 'channel', 7, MAIN_ENTITY_ID)
await hass.async_block_till_done()
assert mock_tune_channel.called
assert mock_tune_channel.call_args == call('7')
state = hass.states.get(MAIN_ENTITY_ID)
assert state.attributes.get('source') == 7
# Stop live TV.
await async_media_stop(hass, MAIN_ENTITY_ID)
await hass.async_block_till_done()
assert mock_key_press.called
assert mock_key_press.call_args == call('stop')
state = hass.states.get(MAIN_ENTITY_ID)
assert state.state == STATE_PAUSED
# Turn main DVR off.
await async_turn_off(hass, MAIN_ENTITY_ID)
await hass.async_block_till_done()
assert mock_key_press.called
assert mock_key_press.call_args == call('poweroff')
state = hass.states.get(MAIN_ENTITY_ID)
assert state.state == STATE_OFF
# There should have been 6 calls to check if DVR is in standby
assert main_dtv.get_standby.call_count == 6
| |
#Pyjsdl - Copyright (C) 2021 <NAME> <https://gatc.ca/>
#Released under the MIT License <https://opensource.org/licenses/MIT>
class Element:
def __init__(self, element=None):
if element is not None:
self._element = element
else:
self._element = None
@property
def style(self, attr):
return self._element.style
@style.setter
def style(self, attr, value):
self._element.style[attr] = value
def style(self, attr):
pass
def getElement(self):
return self._element
def setElement(self, element):
self._element = element
def setID(self, id):
self._element.id = id
def getID(self):
return self._element.id
def setSize(self, width, height):
self.setWidth(width)
self.setHeight(height)
def setWidth(self, width):
if isinstance(width, str):
self._element.style.width = width
else:
self._element.style.width = str(int(width)) + 'px'
def setHeight(self, height):
if isinstance(height, str):
self._element.style['min-height'] = height
else:
self._element.style['min-height'] = str(int(height)) + 'px'
def getAttributes(self):
return self._element.attributes
def getClientHeight(self):
return self._element.clientHeight
def getClientLeft(self):
return self._element.clientLeft
def getClientTop(self):
return self._element.clientTop
def getClientWidth(self):
return self._element.clientWidth
def getScrollHeight(self):
return self._element.scrollHeight
def getScrollLeft(self):
return self._element.scrollLeft
def getScrollTop(self):
return self._element.scrollTop
def getScrollWidth(self):
return self._element.scrollWidth
def addEventListener(self, type, listener, useCapture):
self._element.addEventListener(type, listener, useCapture)
def removeEventListener(self, type, listener, useCapture):
self._element.removeEventListener(type, listener, useCapture)
def getMouseWheelEventType(self):
if self._element is not None:
element = self._element
else:
element = document.createElement('div')
if hasattr(element, 'onwheel'):
event_type = 'wheel'
elif hasattr(element, 'onmousewheel'):
event_type = 'mousewheel'
else:
event_type = 'DOMMouseScroll'
return event_type
def getAttribute(self):
return self._element.getAttribute()
def setAttribute(self, name, value):
self._element.setAttribute(name, value)
def getBoundingClientRect(self):
return self._element.getBoundingClientRect()
def appendChild(self, el):
self._element.appendChild(el)
def removeChild(self, el):
self._element.removeChild(el)
def getStyle(self):
return self._element.style
def getTitle(self):
return self._element.title
def setTitle(self, text):
self._element.title = text
def focus(self):
self._element.focus()
def blur(self):
self._element.blur()
def click(self):
self._element.click()
class FocusElement(Element):
_event_type = None
def __init__(self):
Element.__init__(self)
self._sink_events = None
def addMouseListener(self, obj):
element = obj.getElement()
element.addEventListener('mousemove', self.onMouseMove)
element.addEventListener('mousedown', self.onMouseDown)
element.addEventListener('mouseup', self.onMouseUp)
element.addEventListener('mouseenter', self.onMouseEnter)
element.addEventListener('mouseleave', self.onMouseLeave)
if hasattr(element, 'onwheel'):
element.addEventListener('wheel', self.onMouseWheel)
elif hasattr(element, 'onmousewheel'):
element.addEventListener('mousewheel', self.onMouseWheel)
else:
element.addEventListener('DOMMouseScroll', self.onMouseWheel)
def addKeyboardListener(self, obj):
element = obj.getElement()
element.setAttribute('tabindex','0')
element.addEventListener('keydown', self.onKeyDown)
element.addEventListener('keyup', self.onKeyUp)
element.addEventListener('keypress', self.onKeyPress)
def _addKeyboardListener(self, obj):
element = obj.getElement()
element.setAttribute('tabindex','0')
element.addEventListener('keydown', self._onKeyDown)
element.addEventListener('keyup', self._onKeyUp)
element.addEventListener('keypress', self._onKeyPress)
def addKeyEventListener(self, obj):
element = obj.getElement()
element.setAttribute('tabindex','0')
listener = lambda event: self.onKeyEvent(event)
_listener[self.__name__] = listener
element.addEventListener('keydown', listener)
def removeKeyEventListener(self, obj):
element = obj.getElement()
listener = _listener[self.__name__]
element.removeEventListener('keydown', listener)
del _listener[self.__name__]
def addFocusListener(self, obj):
element = obj.getElement()
element.setAttribute('tabindex','0')
element.addEventListener('focus', self.onFocus)
element.addEventListener('blur', self.onBlur)
def sinkEvents(self, events):
self._sink_events = events
def onMouseMove(self, event):
pass
def onMouseDown(self, event):
pass
def onMouseUp(self, event):
pass
def onMouseEnter(self, event):
pass
def onMouseLeave(self, event):
pass
def onMouseWheel(self, event):
pass
def onKeyDown(self, event):
pass
def onKeyUp(self, event):
pass
def onKeyPress(self, event):
pass
def onTouchInitiate(self, event):
pass
def onTouchStart(self, event):
pass
def onTouchEnd(self, event):
pass
def onTouchMove(self, event):
pass
def onTouchCancel(self, event):
pass
def onFocus(self, event):
pass
def onBlur(self, event):
pass
def focus(self):
self._element.focus()
def blur(self):
self._element.blur()
_listener = {}
class HTML5Canvas(FocusElement):
_identity = 0
def __init__(self, width, height):
FocusElement.__init__(self)
self._id = HTML5Canvas._identity
HTML5Canvas._identity += 1
self._canvas = document.createElement('canvas')
self._element = self._canvas
self._element.id = str(self._id)
self._element.width = width
self._element.height = height
self.width = width
self.height = height
self._element.style.margin = '0px'
self._element.style.padding = '0px'
self._element.style['vertical-align'] = 'bottom'
self._element.style.id = str(self._id)
self.canvas = self._element
self._ctx = self._element.getContext('2d')
self.impl = CanvasImpl(self._ctx)
def resize(self, width, height):
self.width = width
self.height = height
def drawImage(self, image, *args):
ln = len(args)
if ln == 2:
self._ctx.drawImage(image,args[0],args[1])
elif ln == 4:
self._ctx.drawImage(image,args[0],args[1],args[2],args[3])
elif ln == 8:
self._ctx.drawImage(image,args[0],args[1],args[2],args[3],
args[4],args[5],args[6],args[7])
def fill(self):
self._ctx.fill()
def setFillStyle(self, style):
self._ctx.fillStyle = str(style)
def fillRect(self, x, y, width, height):
self._ctx.fillRect(x, y, width, height)
def clear(self):
self._ctx.clear()
def setLineWidth(self, width):
self._ctx.lineWidth = width
def setStrokeStyle(self, style):
self._ctx.strokeStyle = str(style)
def strokeRect(self, x, y, width, height):
self._ctx.strokeRect(x, y, width, height)
def saveContext(self):
self._ctx.save()
def restoreContext(self):
self._ctx.restore()
def translate(self, x, y):
self._ctx.translate(x,y)
def scale(self, x, y):
self._ctx.scale(x,y)
def rotate(self, angle):
self._ctx.rotate(angle)
def transform(self, m11, m12, m21, m22, dx, dy):
self._ctx.transform(m11, m12, m21, m22, dx, dy)
def arc(self, x, y, r, sAngle, eAngle, counterclockwise):
self._ctx.arc(x, y, r, sAngle, eAngle, counterclockwise)
def beginPath(self):
self._ctx.beginPath()
def closePath(self):
self._ctx.closePath()
def moveTo(self, x, y):
self._ctx.moveTo(x, y)
def lineTo(self, x, y):
self._ctx.lineTo(x, y)
def stroke(self):
self._ctx.stroke()
def setFont(self, font):
self._ctx.font = font
def setTextAlign(self, align):
self._ctx.textAlign = align
def setTextBaseline(self, baseline):
self._ctx.textBaseline = baseline
def fillText(self, text, x, y):
self._ctx.fillText(text, x, y)
def strokeText(self, text, x, y):
self._ctx.strokeText(text, x, y)
def measureText(self, text):
return self._ctx.measureText(text).width
def getImageData(self, x, y, width, height):
return self._ctx.getImageData(x, y, width, height)
def putImageData(self, *args):
if len(args) == 3:
self._ctx.putImageData(args[0], args[1], args[2])
else:
self._ctx.putImageData(args[0], args[1], args[2], args[3], args[4], args[5], args[6])
def getContext(self, ctx_type='2d', ctx_attr=None):
if ctx_attr is None:
return self._element.getContext(ctx_type)
else:
return self._element.getContext(ctx_type, ctx_attr)
def toDataURL(self, img_type='image/png', enc_options=0.92):
return self._element.toDataURL(img_type, enc_options)
def toBlob(self, callback, img_type='image/png', quality=0.92):
return self._element.toBlob(callback, img_type, quality)
def getElement(self):
return self._element
class CanvasImpl:
def __init__(self, ctx):
self.canvasContext = ctx
class Panel(Element):
def __init__(self):
self._element = document.createElement('div')
def setID(self, id):
self._element.id = id
def getID(self):
return self._element.id
def appendChild(self, element):
self._element.appendChild(element._element)
def removeChild(self, element):
self._element.removeChild(element._element)
def append(self, element):
self._element.appendChild(element._element)
def add(self, element):
self.append(element)
def remove(self, element):
self._element.removeChild(element._element)
class RootPanel(Panel):
_id = None
def __init__(self):
if self._id is None:
self._id = '__panel__'
self._element = document.getElementById(self._id)
@classmethod
def _set_root_panel(cls, id):
if cls._id is None:
cls._id = id
def setId(self, id):
self._id = id
def getId(self):
return self._id
def add(self, element):
if isinstance(element, Element):
self._element.appendChild(element.getElement())
else:
self._element.appendChild(element)
class FocusPanel(Panel):
pass
class VerticalPanel(Panel):
def __init__(self):
Panel.__init__(self)
self._element.style.display = 'flex'
self._element.style['flex-direction'] = 'column'
def append(self, element):
el = element._element
el.display = 'inline-block'
el.style.flex = '1'
el.style.width = '100%'
self._element.appendChild(el)
class TextBox(Element):
_type = 'input'
def __init__(self):
Element.__init__(self)
self._element = document.createElement(self._type)
self._element.style.display = 'inline-block'
self._element.style.flex = '1'
self._element.style.border = '1px solid rgb(118, 118, 118)'
self._element.style.margin = '0px'
self._element.style.padding = '0px'
@property
def value(self):
return self._element.value
@value.setter
def value(self, text):
self._element.value = text
def setVisible(self, visible):
if visible:
self._element.style.display = 'inline-block'
else:
self._element.style.display = 'none'
def getVisible(self):
if self._element.style.display != 'none':
return True
else:
return False
def getText(self):
return self._element.value
def setText(self, text):
self._element.value = text
class TextArea(TextBox):
_type = 'textarea'
def __init__(self):
TextBox.__init__(self)
self._element.style.resize = 'vertical'
class ImageLoader:
def __init__(self, imagelist, callback):
self.imagelist = imagelist
self.callback = callback
self.images = []
self.image_toload = len(self.imagelist)
for image in self.imagelist:
self.load(image)
def load(self, imageurl):
image = __new__(Image())
self.images.append(image)
image.addEventListener('load', self.loaded, False)
image.src = imageurl
def loaded(self):
self.image_toload -= 1
if not self.image_toload:
self.callback.onImagesLoaded(self.images)
def loadImages(imagelist, callback):
ImageLoader(imagelist, callback)
class Color:
def __init__(self):
pass
class Audio:
def __init__(self, sound_file):
self.element = document.createElement("AUDIO")
self.element.src = sound_file
def play(self):
self.element.play()
def pause(self):
self.element.pause()
def getCurrentTime(self):
return self.element.currentTime
def setCurrentTime(self, time):
self.element.currentTime = time
def isPaused(self):
return self.element.paused
def getSrc(self):
return self.element.src
def getVolume(self):
return self.element.volume
def setVolume(self, volume):
self.element.volume = volume
def getDuration(self):
return self.element.duration
class DOM:
@staticmethod
def eventGetCurrentEvent():
return Event()
@staticmethod
def setStyleAttribute(element, attr, val):
element.style[attr] = val
class Event:
pass
def doc():
return document
def get_main_frame():
return document
def wnd():
return window
def requestAnimationFrameInit():
requestAnimationFramePolyfill()
return wnd()
def performanceNowInit():
performanceNowPolyfill()
return wnd()
def requestAnimationFramePolyfill():
__pragma__('js', {},
"""
// http://paulirish.com/2011/requestanimationframe-for-smart-animating/
// http://my.opera.com/emoller/blog/2011/12/20/requestanimationframe-for-smart-er-animating
// requestAnimationFrame polyfill by <NAME>. fixes from Paul Irish and <NAME>
// MIT license
(function() {
var lastTime = 0;
var vendors = ['ms', 'moz', 'webkit', 'o'];
for(var x = 0; x < vendors.length && !window.requestAnimationFrame; ++x) {
window.requestAnimationFrame = window[vendors[x]+'RequestAnimationFrame'];
window.cancelAnimationFrame = window[vendors[x]+'CancelAnimationFrame']
|| window[vendors[x]+'CancelRequestAnimationFrame'];
}
if (!window.requestAnimationFrame)
window.requestAnimationFrame = function(callback, element) {
var currTime = new Date().getTime();
var timeToCall = Math.max(0, 16 - (currTime - lastTime));
var id = window.setTimeout(function() { callback(currTime + timeToCall); },
timeToCall);
lastTime = currTime + timeToCall;
return id;
};
if (!window.cancelAnimationFrame)
window.cancelAnimationFrame = function(id) {
clearTimeout(id);
};
}());
""")
def performanceNowPolyfill():
__pragma__('js', {},
"""
// @license http://opensource.org/licenses/MIT
// copyright <NAME> 2015
// Date.now() is supported everywhere except IE8. For IE8 we use the Date.now polyfill
// github.com/Financial-Times/polyfill-service/blob/master/polyfills/Date.now/polyfill.js
// as Safari 6 doesn't have support for NavigationTiming, we use a Date.now() timestamp for relative values
// if you want values similar to what you'd get with real perf.now, place this towards the head of the page
// but in reality, you're just getting the delta between now() calls, so it's not terribly important where it's placed
(function(){
if ("performance" in window == false) {
window.performance = {};
}
Date.now = (Date.now || function () { // thanks IE8
return new Date().getTime();
});
if ("now" in window.performance == false){
var nowOffset = Date.now();
if (performance.timing && performance.timing.navigationStart){
nowOffset = performance.timing.navigationStart
}
| |
b * acoth(c * x)) ** n * PolyLog(p + S(1), u) / (S(2) * c * d), x)
def replacement6497(a, b, c, d, e, n, p, u, x):
return -Dist(
b * n / S(2),
Int(
(a + b * atanh(c * x)) ** (n + S(-1))
* PolyLog(p + S(1), u)
/ (d + e * x ** S(2)),
x,
),
x,
) + Simp((a + b * atanh(c * x)) ** n * PolyLog(p + S(1), u) / (S(2) * c * d), x)
def replacement6498(a, b, c, d, e, n, p, u, x):
return -Dist(
b * n / S(2),
Int(
(a + b * acoth(c * x)) ** (n + S(-1))
* PolyLog(p + S(1), u)
/ (d + e * x ** S(2)),
x,
),
x,
) + Simp((a + b * acoth(c * x)) ** n * PolyLog(p + S(1), u) / (S(2) * c * d), x)
def replacement6499(a, b, c, d, e, x):
return Simp(
(-log(a + b * acoth(c * x)) + log(a + b * atanh(c * x)))
/ (b ** S(2) * c * d * (acoth(c * x) - atanh(c * x))),
x,
)
def replacement6500(a, b, c, d, e, m, n, x):
return -Dist(
n / (m + S(1)),
Int(
(a + b * acoth(c * x)) ** (m + S(1))
* (a + b * atanh(c * x)) ** (n + S(-1))
/ (d + e * x ** S(2)),
x,
),
x,
) + Simp(
(a + b * acoth(c * x)) ** (m + S(1))
* (a + b * atanh(c * x)) ** n
/ (b * c * d * (m + S(1))),
x,
)
def replacement6501(a, b, c, d, e, m, n, x):
return -Dist(
n / (m + S(1)),
Int(
(a + b * acoth(c * x)) ** (n + S(-1))
* (a + b * atanh(c * x)) ** (m + S(1))
/ (d + e * x ** S(2)),
x,
),
x,
) + Simp(
(a + b * acoth(c * x)) ** n
* (a + b * atanh(c * x)) ** (m + S(1))
/ (b * c * d * (m + S(1))),
x,
)
def replacement6502(a, c, d, n, x):
return -Dist(S(1) / 2, Int(log(-a * x + S(1)) / (c + d * x ** n), x), x) + Dist(
S(1) / 2, Int(log(a * x + S(1)) / (c + d * x ** n), x), x
)
def replacement6503(a, c, d, n, x):
return -Dist(
S(1) / 2, Int(log(S(1) - S(1) / (a * x)) / (c + d * x ** n), x), x
) + Dist(S(1) / 2, Int(log(S(1) + S(1) / (a * x)) / (c + d * x ** n), x), x)
def replacement6504(a, b, c, d, e, f, g, x):
return (
-Dist(
b * c,
Int(
x
* (d + e * log(f + g * x ** S(2)))
/ (-(c ** S(2)) * x ** S(2) + S(1)),
x,
),
x,
)
- Dist(
S(2) * e * g,
Int(x ** S(2) * (a + b * atanh(c * x)) / (f + g * x ** S(2)), x),
x,
)
+ Simp(x * (a + b * atanh(c * x)) * (d + e * log(f + g * x ** S(2))), x)
)
def replacement6505(a, b, c, d, e, f, g, x):
return (
-Dist(
b * c,
Int(
x
* (d + e * log(f + g * x ** S(2)))
/ (-(c ** S(2)) * x ** S(2) + S(1)),
x,
),
x,
)
- Dist(
S(2) * e * g,
Int(x ** S(2) * (a + b * acoth(c * x)) / (f + g * x ** S(2)), x),
x,
)
+ Simp(x * (a + b * acoth(c * x)) * (d + e * log(f + g * x ** S(2))), x)
)
def replacement6506(a, b, c, d, e, f, g, m, x):
return (
-Dist(
b * c / (m + S(1)),
Int(
x ** (m + S(1))
* (d + e * log(f + g * x ** S(2)))
/ (-(c ** S(2)) * x ** S(2) + S(1)),
x,
),
x,
)
- Dist(
S(2) * e * g / (m + S(1)),
Int(x ** (m + S(2)) * (a + b * atanh(c * x)) / (f + g * x ** S(2)), x),
x,
)
+ Simp(
x ** (m + S(1))
* (a + b * atanh(c * x))
* (d + e * log(f + g * x ** S(2)))
/ (m + S(1)),
x,
)
)
def replacement6507(a, b, c, d, e, f, g, m, x):
return (
-Dist(
b * c / (m + S(1)),
Int(
x ** (m + S(1))
* (d + e * log(f + g * x ** S(2)))
/ (-(c ** S(2)) * x ** S(2) + S(1)),
x,
),
x,
)
- Dist(
S(2) * e * g / (m + S(1)),
Int(x ** (m + S(2)) * (a + b * acoth(c * x)) / (f + g * x ** S(2)), x),
x,
)
+ Simp(
x ** (m + S(1))
* (a + b * acoth(c * x))
* (d + e * log(f + g * x ** S(2)))
/ (m + S(1)),
x,
)
)
def With6508(a, b, c, d, e, f, g, m, x):
u = IntHide(x ** m * (d + e * log(f + g * x ** S(2))), x)
return -Dist(
b * c, Int(ExpandIntegrand(u / (-(c ** S(2)) * x ** S(2) + S(1)), x), x), x
) + Dist(a + b * atanh(c * x), u, x)
def With6509(a, b, c, d, e, f, g, m, x):
u = IntHide(x ** m * (d + e * log(f + g * x ** S(2))), x)
return -Dist(
b * c, Int(ExpandIntegrand(u / (-(c ** S(2)) * x ** S(2) + S(1)), x), x), x
) + Dist(a + b * acoth(c * x), u, x)
def With6510(a, b, c, d, e, f, g, m, x):
u = IntHide(x ** m * (a + b * atanh(c * x)), x)
return -Dist(
S(2) * e * g, Int(ExpandIntegrand(u * x / (f + g * x ** S(2)), x), x), x
) + Dist(d + e * log(f + g * x ** S(2)), u, x)
def With6511(a, b, c, d, e, f, g, m, x):
u = IntHide(x ** m * (a + b * acoth(c * x)), x)
return -Dist(
S(2) * e * g, Int(ExpandIntegrand(u * x / (f + g * x ** S(2)), x), x), x
) + Dist(d + e * log(f + g * x ** S(2)), u, x)
def replacement6512(a, b, c, d, e, f, g, x):
return (
Dist(
b / c, Int((a + b * atanh(c * x)) * (d + e * log(f + g * x ** S(2))), x), x
)
+ Dist(
b * c * e,
Int(
x ** S(2) * (a + b * atanh(c * x)) / (-(c ** S(2)) * x ** S(2) + S(1)),
x,
),
x,
)
- Simp(e * x ** S(2) * (a + b * atanh(c * x)) ** S(2) / S(2), x)
+ Simp(
(a + b * atanh(c * x)) ** S(2)
* (d + e * log(f + g * x ** S(2)))
* (f + g * x ** S(2))
/ (S(2) * g),
x,
)
)
def replacement6513(a, b, c, d, e, f, g, x):
return (
Dist(
b / c, Int((a + b * acoth(c * x)) * (d + e * log(f + g * x ** | |
<reponame>chrishales/polcalsims
# <NAME>
# 22 June 2017
# Version 1.0
#
#
# This code is released under a BSD 3-Clause License
# See LICENSE for details
#
# This code was used to obtain the results shown in arXiv:1706.06612
# and EVLA Memo 201 / ALMA Memo 603.
#
#
# This code will plot spurious full-array on-axis linear polarization
# (95th percentile) when viewing an unpolarized source following
# calibration with an array with linear feeds. Solutions will be plotted
# for a polarization calibator observed over a range of input S/N and
# parallactic angle coverage values.
#
# Note that the full-array spurious linear polarization is calculated by
# dividing the error in the modulus of instrumental polarization leakage
# by sqrt(N_ant) where N_ant is the number of antennas in the array.
# ie if you want to recover d-term modulus error, multiply by this factor.
#
# Note also that the statistics for spurious linear and spurious circular
# polarization are the same, though in the latter there may be an
# additional zero-point issue to contend with (see also comments below).
# For spurious elliptical polarization, multiply the values in the plot
# by sqrt(pi/2).
#
# S/N = source total intensity divided by noise, where noise is
# given by the sensitivity within an individual channel
# dual-pol image in which all baselines were combined (e.g. a
# Stokes I image) for a time period spanning 1 slice (1+ scans
# combined at approximately constant parallactic angle).
# The code will assume that S/N is the same for all slices.
# e.g. see https://science.nrao.edu/facilities/vla/docs/
# manuals/oss/performance/sensitivity
# noise = SEFD/corr_eff/sqrt[2.Nant.(Nant-1).timespan.bandwidth]
#
# The plots produced by this code will enable the following questions
# to be answered:
# "For a given S/N, what is the parallactic angle coverage I require
# to obtain spurious on-axis polarization less than X percent?"
#
# or alternatively
# "For a given parallactic angle coverage, what is the S/N I require
# to obtain spurious on-axis polarization less than X percent?"
#
#
#
# The code focuses on the characteristic error in the leakage modulus
# "D" for a single polarization (X or Y) on a single antenna by
# considering leakage in V_XY (or could use V_YX) along baseline i-j.
# The code utilizes Nant-1 baselines to antenna i when solving for
# the characteristic error.
#
# The code assumes that leakages are constant with time (mechanical
# details probably don't change rapidly).
#
#
#
# It is difficult to generalize the solutions to all calibrators.
# This code will assume that the first observed slice is at zero
# parallactic angle (probably likely for the 1 and 2 scan approaches)
# and that the calibrator has position angle 45deg. Worst-case
# errors will probably result by placing points symmetrically
# about U_psi=0, but users would run into trouble trying to
# solve for crosshand phase, so let's also avoid this setup here.
# Note that the true worst-case solutions of relevance for this
# simulation will take place when 2 scans have similar U_psi
# (i.e. negligible parallactic angle separation), whether in
# the 2 or 3 scan strategies.
#
# Of course, observers should do whatever they can to maximize
# U_psi coverage, rather that simply parallactic angle coverage!!
# This is even more important in the 2 scan strategy than in the
# 3 scan strategy, which is of course handy because we know the
# source polarization a priori in the 2 scan approach and can
# therefore target specific U_psi values.
#
#
#
# The code will calculate results for 2 representative scenarios where
# the calibrator has 3% or 10% fractional linear polarization.
#
# The code will produce plots showing the typical error in the modulus
# of D, divided by sqrt(N_ant) to obtain spurious full-array on-axis
# linear polarization, as a functions of S/N parallactic angle
# separation between slices. Position angle errors will also be
# estimated, including both statistical and systematic errors,
# the latter when relevant (arising from relative leakage solutions).
#
# This simulation will assume that explicit position angle calibration
# has NOT been performed. A systematic error will thus be included.
#
# Two cases will be considered for both fractional polarization
# scenarios: Q & U known a priori, and unknown.
#
# When Q & U are known, 1 or 2 independent slices are required to
# recover D, relative or absolute, respectively. For either
# strategy, 1 slice is needed to determine the crosshand phase.
#
# When Q & U are unknown, 3 independent slices are required to recover
# the crosshand phase as well as Q & U. 2+ of these slices are then
# needed to recover absolute leakages. This code will assume that
# the 3 points are spaced equally within the specified total
# parallactic angle span. This code will also investiage a 10 slice
# approach.
#
# This code will only take into account errors in recovered Q & U as
# appropriate for the 3 and 10 slice strategies.
#
# This code will assume that Stokes V is zero for all calibrators.
#
#
#
import numpy as np
#import matplotlib.cm as cm
import matplotlib.pyplot as plt
#from matplotlib.colors import LogNorm
from scipy.optimize import curve_fit
################################################################################
# number of antennas in array
Nant = 40
# fractional linear polarization of calibrator = L/I = sqrt(Q^2+U^2)/I
# assume position angle is 45 degrees, i.e. all crosshand signal in U
# at parallactic angle 0 degrees
# specify two representative cases
fraclinpol = np.r_[0.03,0.1]
# typical (mean) d-term modulus (~0.015 for ALMA in bands 3,6,7)
# this is = sqrt(pi/2) times 1 sigma real or imaginary part
d_typical = 0.015
# above value is also used to estimate worst-case systematic position angle error
# for 1 slice strategy due to use of relative leakages, assuming
# Re(d_typical) ~= d_typical
# (statistically, this will probably be more like d_typical/sqrt(2/pi), but
# take worst-case estimate here)
# mechanical feed alignment uncertainty for individual antenna, in degrees
systpaerr_foffset = 2.0
# This will be added in quadrature to the statistical errors calculated,
# as well as the relative-leakage systematic error above for the 1 slice strategy
# recover numerical or theoretical solutions for 1 slice strategy?
# results are practically indistinguisable
# 0=theory 1=numerical
strategy1slice = 0
################################################################################
pi = np.pi
d2r = pi/180.
Nb = Nant*(Nant-1)/2
systpaerr_foffset_rad = systpaerr_foffset * d2r
# number of samples from which to measure error in d
# note that uncertainty in estimate of rms from N samples is
# np.sqrt((1+0.75/(N-1))**2 * (1-1./N) - 1.)
# p. 63, <NAME>., <NAME>., 1970, Distributions in Statistics:
# Continuous Univariate Distributions 1. Houghton Mifflin, NY
# ie we require 1e3 samples to get error in rms to 2%
# Non-linearity of problem makes this an approximation,
# but should be reasonable enough
samples = 1e4
parsep = np.linspace(0.1,179.8,90)
snr = np.logspace(1,6,90)
x,y = np.meshgrid(snr,parsep)
# fast track
#parsep = np.linspace(0.1,179.8,10)
#snr = np.logspace(1,6,10)
#x,y = np.meshgrid(snr,parsep)
#samples = 1e2
mypercentile = 95
samples = np.int(samples)
# number of scans equally separated over total parallactic angle coverage,
# when calibrator Stokes vector unknown a priori
nscans = np.r_[3,10]
def calc_rho_12(dat):
# recover crosshand phase for 1 or 2 slice strategy
# Q&U will be known
# assume true crosshand phase is zero, no loss in generality
return np.arctan2(dat.imag,dat.real)
def calc_d(u_psi,q_psi,V_XY,dxi):
# least squares solution, real and imag components separately
# close enough
# X --> dxi
# Y --> sum over dyj
# A-G --> real constants when fitting for X and Y
A = np.sum( (1.-q_psi)*u_psi )
B = np.sum( (1.-q_psi)**2 )
C = np.sum( 1.-q_psi**2 )
Dx = np.sum( np.real(V_XY)*(1.-q_psi) )
Dy = np.sum( np.imag(V_XY)*(1.-q_psi) )
E = np.sum( (1.+q_psi)*u_psi )
F = np.sum( (1.+q_psi)**2 )
Gx = np.sum( np.real(V_XY)*(1.+q_psi) )
Gy = np.sum( np.imag(V_XY)*(1.+q_psi) )
Xx = (C*Gx - Dx*F + A*F - C*E)/(C**2 - B*F)
Xy = (C*Gy - Dy*F )/(C**2 - B*F)
X = Xx + Xy*1j
# Y not needed
#Yx = (Gx - C*Xx - E)/F
#Yy = (Gy - C*Xy )/F
#Y = Yx + Yy*1j
return np.abs(X-dxi)
def calc_rho(u_psi):
xm = np.average(u_psi.real)
ym = np.average(u_psi.imag)
num = np.sum((u_psi.real-xm)*(u_psi.imag-ym))
den = np.sum((u_psi.real-xm)**2)
return np.arctan2(num,den)
def func_qu(psi,q,u,err):
return u*np.cos(2.*psi) - q*np.sin(2.*psi) + err
def calc_qu(rho,psi,u_psi):
# solve along real axis
# ideally we would perform the fit with points weighted
# by their distance from the line fit by rho (e.g.
# away from the mean imaginary offset), but it
# shouldn't make a huge difference in practice.
# The variances are all equal anyway (by design), | |
<reponame>odb9402/MAT<gh_stars>0
"""
Code based on:
Shang et al "Edge Attention-based Multi-Relational Graph Convolutional Networks" -> https://github.com/Luckick/EAGCN
Coley et al "Convolutional Embedding of Attributed Molecular Graphs for Physical Property Prediction" -> https://github.com/connorcoley/conv_qsar_fast
"""
import logging
import os
import pickle
import numpy as np
import pandas as pd
import torch
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem import MolFromSmiles
from sklearn.metrics import pairwise_distances
from torch.utils.data import Dataset
use_cuda = torch.cuda.is_available()
FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if use_cuda else torch.LongTensor
IntTensor = torch.cuda.IntTensor if use_cuda else torch.IntTensor
DoubleTensor = torch.cuda.DoubleTensor if use_cuda else torch.DoubleTensor
def load_data_from_df(dataset_path, add_dummy_node=True, one_hot_formal_charge=False, use_data_saving=True):
"""Load and featurize data stored in a CSV file.
Args:
dataset_path (str): A path to the CSV file containing the data. It should have two columns:
the first one contains SMILES strings of the compounds,
the second one contains labels.
add_dummy_node (bool): If True, a dummy node will be added to the molecular graph. Defaults to True.
one_hot_formal_charge (bool): If True, formal charges on atoms are one-hot encoded. Defaults to False.
use_data_saving (bool): If True, saved features will be loaded from the dataset directory; if no feature file
is present, the features will be saved after calculations. Defaults to True.
Returns:
A tuple (X, y) in which X is a list of graph descriptors (node features, adjacency matrices, distance matrices),
and y is a list of the corresponding labels.
"""
feat_stamp = f'{"_dn" if add_dummy_node else ""}{"_ohfc" if one_hot_formal_charge else ""}'
feature_path = dataset_path.replace('.csv', f'{feat_stamp}.p')
if use_data_saving and os.path.exists(feature_path):
logging.info(f"Loading features stored at '{feature_path}'")
x_all, y_all = pickle.load(open(feature_path, "rb"))
return x_all, y_all
data_df = pd.read_csv(dataset_path)
data_x = data_df.iloc[:, 0].values
data_y = data_df.iloc[:, 1].values
if data_y.dtype == np.float64:
data_y = data_y.astype(np.float32)
x_all, y_all = load_data_from_smiles(data_x, data_y, add_dummy_node=add_dummy_node,
one_hot_formal_charge=one_hot_formal_charge)
if use_data_saving and not os.path.exists(feature_path):
logging.info(f"Saving features at '{feature_path}'")
pickle.dump((x_all, y_all), open(feature_path, "wb"))
return x_all, y_all
def load_data_from_smiles(x_smiles, labels, add_dummy_node=True, one_hot_formal_charge=False):
"""Load and featurize data from lists of SMILES strings and labels.
Args:
x_smiles (list[str]): A list of SMILES strings.
labels (list[float]): A list of the corresponding labels.
add_dummy_node (bool): If True, a dummy node will be added to the molecular graph. Defaults to True.
one_hot_formal_charge (bool): If True, formal charges on atoms are one-hot encoded. Defaults to False.
Returns:
A tuple (X, y) in which X is a list of graph descriptors (node features, adjacency matrices, distance matrices),
and y is a list of the corresponding labels.
"""
x_all, y_all = [], []
for smiles, label in zip(x_smiles, labels):
try:
mol = MolFromSmiles(smiles)
try:
mol = Chem.AddHs(mol)
AllChem.EmbedMolecule(mol, maxAttempts=5000)
AllChem.UFFOptimizeMolecule(mol)
mol = Chem.RemoveHs(mol)
except:
AllChem.Compute2DCoords(mol)
afm, adj, dist = featurize_mol(mol, add_dummy_node, one_hot_formal_charge)
x_all.append([afm, adj, dist])
y_all.append([label])
except ValueError as e:
logging.warning('the SMILES ({}) can not be converted to a graph.\nREASON: {}'.format(smiles, e))
return x_all, y_all
def featurize_mol(mol, add_dummy_node, one_hot_formal_charge):
"""Featurize molecule.
Args:
mol (rdchem.Mol): An RDKit Mol object.
add_dummy_node (bool): If True, a dummy node will be added to the molecular graph.
one_hot_formal_charge (bool): If True, formal charges on atoms are one-hot encoded.
Returns:
A tuple of molecular graph descriptors (node features, adjacency matrix, distance matrix).
"""
node_features = np.array([get_atom_features(atom, one_hot_formal_charge)
for atom in mol.GetAtoms()])
adj_matrix = np.eye(mol.GetNumAtoms())
for bond in mol.GetBonds():
begin_atom = bond.GetBeginAtom().GetIdx()
end_atom = bond.GetEndAtom().GetIdx()
adj_matrix[begin_atom, end_atom] = adj_matrix[end_atom, begin_atom] = 1
conf = mol.GetConformer()
pos_matrix = np.array([[conf.GetAtomPosition(k).x, conf.GetAtomPosition(k).y, conf.GetAtomPosition(k).z]
for k in range(mol.GetNumAtoms())])
dist_matrix = pairwise_distances(pos_matrix)
if add_dummy_node:
m = np.zeros((node_features.shape[0] + 1, node_features.shape[1] + 1))
m[1:, 1:] = node_features
m[0, 0] = 1.
node_features = m
m = np.zeros((adj_matrix.shape[0] + 1, adj_matrix.shape[1] + 1))
m[1:, 1:] = adj_matrix
adj_matrix = m
m = np.full((dist_matrix.shape[0] + 1, dist_matrix.shape[1] + 1), 1e6)
m[1:, 1:] = dist_matrix
dist_matrix = m
return node_features, adj_matrix, dist_matrix
def get_atom_features(atom, one_hot_formal_charge=True):
"""Calculate atom features.
Args:
atom (rdchem.Atom): An RDKit Atom object.
one_hot_formal_charge (bool): If True, formal charges on atoms are one-hot encoded.
Returns:
A 1-dimensional array (ndarray) of atom features.
"""
attributes = []
attributes += one_hot_vector(
atom.GetAtomicNum(),
[5, 6, 7, 8, 9, 15, 16, 17, 35, 53, 999]
)
attributes += one_hot_vector(
len(atom.GetNeighbors()),
[0, 1, 2, 3, 4, 5]
)
attributes += one_hot_vector(
atom.GetTotalNumHs(),
[0, 1, 2, 3, 4]
)
if one_hot_formal_charge:
attributes += one_hot_vector(
atom.GetFormalCharge(),
[-1, 0, 1]
)
else:
attributes.append(atom.GetFormalCharge())
attributes.append(atom.IsInRing())
attributes.append(atom.GetIsAromatic())
return np.array(attributes, dtype=np.float32)
def get_protein_features(protein):
"""Calculate atom features.
Args:
protein (str): Protein residues "MKK...".
Returns:
(List) of protein numbers
"""
return np.array(attributes, dtype=np.float32)
def one_hot_vector(val, lst):
"""Converts a value to a one-hot vector based on options in lst"""
if val not in lst:
val = lst[-1]
return map(lambda x: x == val, lst)
class Molecule:
"""
Class that represents a train/validation/test datum
- self.label: 0 neg, 1 pos -1 missing for different target.
"""
def __init__(self, x, y, index):
self.node_features = x[0]
self.adjacency_matrix = x[1]
self.distance_matrix = x[2]
self.y = y
self.index = index
class Protein:
"""
Class that represents a train/validation/test datum
"""
def __init__(self, x, index, y=None):
self.node_features = x[0]
self.distance_matrix = x[1]
self.y = y
self.index = index
class MolDataset(Dataset):
"""
Class that represents a train/validation/test dataset that's readable for PyTorch
Note that this class inherits torch.utils.data.Dataset
"""
def __init__(self, data_list):
"""
@param data_list: list of Molecule objects
"""
self.data_list = data_list
def __len__(self):
return len(self.data_list)
def __getitem__(self, key):
if type(key) == slice:
return MolDataset(self.data_list[key])
return self.data_list[key]
class ProteinDataset(Dataset):
"""
Class that represents a train/validation/test dataset that's readable for PyTorch
Note that this class inherits torch.utils.data.Dataset
"""
def __init__(self, data_list):
"""
@param data_list: list of Protein objects
"""
self.data_list = data_list
def __len__(self):
return len(self.data_list)
def __getitem__(self, key):
if type(key) == slice:
return ProteinDataset(self.data_list[key])
return self.data_list[key]
def pad_array(array, shape, dtype=np.float32):
"""Pad a 2-dimensional array with zeros.
Args:
array (ndarray): A 2-dimensional array to be padded.
shape (tuple[int]): The desired shape of the padded array.
dtype (data-type): The desired data-type for the array.
Returns:
A 2-dimensional array of the given shape padded with zeros.
"""
padded_array = np.zeros(shape, dtype=dtype)
padded_array[:array.shape[0], :array.shape[1]] = array
return padded_array
def mol_collate_func(batch):
"""Create a padded batch of molecule features.
Args:
batch (list[Molecule]): A batch of raw molecules.
Returns:
A list of FloatTensors with padded molecule features:
adjacency matrices, node features, distance matrices, and labels.
"""
adjacency_list, distance_list, features_list = [], [], []
labels = []
max_size = 0
for molecule in batch:
if type(molecule.y[0]) == np.ndarray:
labels.append(molecule.y[0])
else:
labels.append(molecule.y)
if molecule.adjacency_matrix.shape[0] > max_size:
max_size = molecule.adjacency_matrix.shape[0]
for molecule in batch:
adjacency_list.append(pad_array(molecule.adjacency_matrix, (max_size, max_size)))
distance_list.append(pad_array(molecule.distance_matrix, (max_size, max_size)))
features_list.append(pad_array(molecule.node_features, (max_size, molecule.node_features.shape[1])))
return [FloatTensor(features) for features in (adjacency_list, features_list, distance_list, labels)]
def protein_collate_func(batch):
"""Create a padded batch of molecule features.
Args:
batch (list[Molecule]): A batch of raw molecules.
Returns:
A list of FloatTensors with padded molecule features:
adjacency matrices, node features, distance matrices, and labels.
"""
distance_list, features_list = [], []
labels = []
max_size = 0
for protein in batch:
## TODO from here
if type(molecule.y[0]) == np.ndarray:
labels.append(molecule.y[0])
else:
labels.append(molecule.y)
if molecule.adjacency_matrix.shape[0] > max_size:
max_size = molecule.adjacency_matrix.shape[0]
for molecule in batch:
adjacency_list.append(pad_array(molecule.adjacency_matrix, (max_size, max_size)))
distance_list.append(pad_array(molecule.distance_matrix, (max_size, max_size)))
features_list.append(pad_array(molecule.node_features, (max_size, molecule.node_features.shape[1])))
return [FloatTensor(features) for features in (adjacency_list, features_list, distance_list, labels)]
def construct_dataset(x_all, y_all):
"""Construct a MolDataset object from the provided data.
Args:
x_all (list): A list of molecule features.
y_all (list): A list of the corresponding labels.
Returns:
A MolDataset object filled with the provided data.
"""
output = [Molecule(data[0], data[1], i)
for i, data in enumerate(zip(x_all, y_all))]
return MolDataset(output)
def construct_loader(x, y, batch_size, shuffle=True):
"""Construct a data loader for the provided data.
Args:
x (list): A list of molecule features.
y (list): A list of the corresponding labels.
batch_size (int): The batch size.
shuffle (bool): If True the data will be loaded in a random order. Defaults to True.
Returns:
A DataLoader object that yields batches of padded molecule features.
"""
data_set = construct_dataset(x, y)
loader = torch.utils.data.DataLoader(dataset=data_set,
batch_size=batch_size,
collate_fn=mol_collate_func,
shuffle=shuffle)
return loader
def get_seq_dist(file_name):
"""
Get sequence and distance matrix from the output file
from Alphafold (*.rr).
Args:
file_name(str): Target file name.
Returns:
tuple:(distance, residue)
distance(np.array): Numpy array for the distances between residues
residues(str): Protein sequence(residues)
"""
protein_f = | |
<filename>energyPATHWAYS/util.py
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 08 10:12:52 2015
@author: <NAME> & <NAME>
Contains unclassified global functions
"""
import config as cfg
import pint
import pandas as pd
import os
import numpy as np
from time_series import TimeSeries
from collections import defaultdict, OrderedDict, MutableSet
import time
import csv
import matplotlib
from matplotlib import cm
import os as _os
#matplotlib.style.use('ggplot')
import math
import scipy.special
import copy
from profilehooks import profile, timecall
import functools
import itertools
import decimal
import psycopg2
import logging
import pdb
from operator import mul
from psycopg2.extensions import register_adapter, AsIs
def addapt_numpy_float64(numpy_float64):
return AsIs(numpy_float64)
register_adapter(np.int64, addapt_numpy_float64)
def percent_larger(a, b):
return (a - b) / a
def percent_different(a, b):
return abs(a - b) / a
def freeze_recursivedict(recursivedict):
recursivedict = dict(recursivedict)
for key, value in recursivedict.items():
if isinstance(value, defaultdict):
recursivedict[key] = freeze_recursivedict(value)
return recursivedict
def upper_dict(query, append=None):
id_dict = {} if query is None else dict([(id, name.upper()) for id, name in (query if is_iterable(query[0]) else [query])])
for key,value in id_dict.iteritems():
if append is not None:
id_dict[key] = value + append
return id_dict
def df_list_concatenate(df_list, keys, new_names, levels_to_keep=None):
new_names = put_in_list(new_names)
#remove any elements in the list that are not pandas df
df_list = [df for df in df_list if type(df) is pd.core.frame.DataFrame]
df_names_set = set(flatten_list([df.index.names if df.index.nlevels>1 else [df.index.name] for df in df_list]))
levels_to_keep = levels_to_keep if levels_to_keep is not None else list(df_names_set)
#add missing levels
for df in df_list:
starting_names = df.index.names if df.index.nlevels>1 else df.index.name
missing_names = list(set(levels_to_keep) - set(starting_names) - set(new_names))
for missing_name in missing_names:
df[missing_name] = "N/A"
df.set_index(missing_names, append=True, inplace=True)
#aggregate extra levels and order
df_list = [df.groupby(level=list(set(levels_to_keep)-set(new_names)), sort=False).sum() for df in df_list]
if len(df_list)==0:
return None
else:
df = pd.concat(df_list, keys=keys, names=new_names).sort()
#eliminate any new_names we picked up that are not in levels_to_keep, also reorder levels
return df.groupby(level=levels_to_keep, sort=False).sum()
def order_of_magnitude_difference(df_numerator, df_denominator):
return 10**int(round(np.log10(df_numerator.mean().mean())-np.log10(df_denominator.mean().mean())))
def time_stamp(t):
"""Prints the difference between the parameter and current time. This is useful for timing program execution if timestamps are periodicly saved.
Parameters:
a: float
Returns:
current time: float
"""
print "%(time).6f seconds to execute \n" % {"time": time.time() - t}
return time.time()
def recursivedict():
"""recursivedict creates a dictionary of any depth"""
return defaultdict(recursivedict)
def is_iterable(some_object):
"""
Checks to see if an object is iterable.
Args:
s (string)
Returns:
Boolean
"""
try:
iter(some_object)
return True
except:
return False
def object_att_from_table(tablename, id, primary_key='id'):
table_headers = [h for h in sql_read_headers(tablename) if h != primary_key]
if not len(table_headers):
return []
attributes = sql_read_table(tablename, column_names=table_headers, **dict([(primary_key, id)]))
if attributes is None:
return None
native_tuples = [(table_headers, attributes)] if len(table_headers)==1 else zip(table_headers, attributes)
named_tuples = []
for t in native_tuples:
col_name = id_to_name(id_col=t[0], id_num=t[1], return_type='tuple')
if col_name is not None:
named_tuples.append(col_name)
return native_tuples + named_tuples
def tuple_subset(tup, header, head_to_remove):
if len(tup) != len(header):
raise ValueError('Length of header must match the length of tuple')
head_to_remove = [x for x in head_to_remove if x in header]
index_to_remove = [header.index(e) for e in head_to_remove]
return tuple([t for i, t in enumerate(tup) if i not in index_to_remove])
def id_to_name(id_col, id_num, return_type='item'):
if not hasattr(id_to_name, 'lookup_dict'):
id_to_name.lookup_dict = {}
# the lookup cache hasn't been populated yet, so take a time out to populate it
for _id_col, _table in sql_read_table('IDMap', 'identifier_id, ref_table'):
id_to_name.lookup_dict[_id_col] = {}
for _id_num, _name in sql_read_table(_table, 'id, name', return_iterable=True):
id_to_name.lookup_dict[_id_col][_id_num] = _name
if id_to_name.lookup_dict.has_key(id_col):
name = id_to_name.lookup_dict[id_col].get(id_num)
col = id_col[:-3]
else:
return None
if return_type == 'item':
return name
elif return_type == 'tuple':
return (col, name)
def empty_df(index, columns, fill_value=0.0, data_type=None):
df = pd.DataFrame(fill_value, index=index, columns=columns).sort_index()
df.data_type = data_type
return df
def sql_read_table(table_name, column_names='*', return_unique=False, return_iterable=False, **filters):
"""Get data from a table filtering by columns
key word arguments give column name, column criteria pairs
example:
util.sql_read_table('DemandDriversID', 'ID', driver='oil and gas mining VOS')
"""
if not isinstance(column_names, basestring):
column_names = ', '.join(column_names)
distinct = 'DISTINCT ' if return_unique else ''
query = 'SELECT ' + distinct + column_names + ' FROM "%s"' % table_name
if len(filters):
datatypes = sql_get_datatype(table_name, filters.keys())
list_of_filters = ['"' + col + '"=' + fix_sql_query_type(fil, datatypes[col]) if fil is not None else '"' + col + '"is' + 'NULL' for col, fil in filters.items()]
if list_of_filters:
query = query + " where " + " and ".join(list_of_filters)
cfg.cur.execute(query)
data = [tup[0] if len(tup) == 1 else tup for tup in cfg.cur.fetchall()]
else:
data = [None]
else:
cfg.cur.execute(query)
data = [tup[0] if len(tup) == 1 else tup for tup in cfg.cur.fetchall()]
# pull out the first element if length is 1 and we don't want to return an iterable
if len(data) == 0 or data == [None]:
return [] if return_iterable else None
elif len(data) == 1:
return data if return_iterable else data[0]
else:
return data
def sql_get_datatype(table_name, column_names):
if isinstance(column_names, basestring):
column_names = [column_names]
cfg.cur.execute("select column_name, data_type from INFORMATION_SCHEMA.COLUMNS where table_name = %s and table_schema = 'public';", (table_name,))
table_info = cfg.cur.fetchall()
return dict([tup for tup in table_info if tup[0] in column_names])
def fix_sql_query_type(string, sqltype):
if sqltype == 'INTEGER':
return str(string)
else:
return "'" + str(string) + "'"
def sql_read_dataframe(table_name, index_column_name=None, data_column_names='*', **filters):
"""
Read data and create a dataframe
Example:
data = util.sql_read_dataframe('DemandDrivers', index_column_name='year', data_column_names='value',
ID=1, gau='total', dau='single-family', add='total')
"""
if not isinstance(index_column_name, basestring):
if len(index_column_name) > 1:
raise ValueError("Only one index_column_name should be given")
else:
index_column_name = index_column_name[0]
if data_column_names == '*':
data_column_names = [n for n in sql_read_headers(table_name) if n != index_column_name]
if (not isinstance(data_column_names, list)) and (not isinstance(data_column_names, tuple)):
data_column_names = [data_column_names]
data = sql_read_table(table_name, column_names=data_column_names, **filters)
if index_column_name is not None:
index = sql_read_table(table_name, column_names=index_column_name, **filters)
if (not len(index)) or (not len(data)):
raise ValueError('sql_read_dataframe returned empty data')
data_frame = pd.DataFrame(data=data, index=index, columns=data_column_names)
data_frame.sort_index(inplace=True)
else:
data_frame = pd.DataFrame(data=data, columns=data_column_names)
return data_frame
def sql_read_headers(table_name):
cfg.cur.execute("select column_name from INFORMATION_SCHEMA.COLUMNS where table_name = %s and table_schema = 'public';", (table_name,))
table_info = cfg.cur.fetchall()
# return list of all column headers
return [tup[0] for tup in table_info]
def sql_read_dict(table_name, key_col, value_col):
"""
Returns two columns of a table as a dictionary.
Memoizes the results so each dictionary is only loaded from the database once.
"""
memo_key = (table_name, key_col, value_col)
try:
return sql_read_dict.memo[memo_key]
except KeyError:
data = sql_read_table(table_name, column_names=(key_col, value_col))
sql_read_dict.memo[memo_key] = {row[0]: row[1] for row in data}
return sql_read_dict.memo[memo_key]
sql_read_dict.memo = {}
def active_scenario_run_id(scenario_id):
query = """
SELECT public_runs.scenario_runs.id
FROM public_runs.scenario_runs
JOIN public_runs.scenario_run_statuses
ON public_runs.scenario_runs.status_id = public_runs.scenario_run_statuses.id
WHERE public_runs.scenario_runs.scenario_id = %s
AND public_runs.scenario_run_statuses.finished = FALSE
"""
cfg.cur.execute(query, (scenario_id,))
assert cfg.cur.rowcount == 1, \
"Expected 1 active scenario run for scenario %i but found %i." % (scenario_id, cfg.cur.rowcount)
return cfg.cur.fetchone()[0]
def active_user_email(scenario_id):
query = """
SELECT email
FROM shared.users
JOIN "Scenarios" ON "Scenarios".user_id = shared.users.id
WHERE "Scenarios".id = %s
"""
cfg.cur.execute(query, (scenario_id,))
if cfg.cur.rowcount == 0:
return None
else:
return cfg.cur.fetchone()[0]
def scenario_name(scenario_id):
query = 'SELECT name FROM "Scenarios" WHERE id = %s'
cfg.cur.execute(query, (scenario_id,))
return cfg.cur.fetchone()[0]
def update_status(scenario_id, status_id):
"""Update the status of the active run for the current scenario in the database"""
# FIXME: See api/models.py ScenarioRunStatus for the valid status_ids. I'm reluctant to import those constants here
# at this time because I don't want the dependencies of that file (e.g. sqlalchemy) to become dependencies
# of the main model yet.
scenario_run_id = active_scenario_run_id(scenario_id)
assert 3 <= status_id <= 6, "update_status() only understands status_ids between 3 and 6, inclusive."
end_time_update = ', end_time = now()' if status_id >= 4 else ''
cfg.cur.execute("UPDATE public_runs.scenario_runs SET status_id = %s%s WHERE id = %s",
(status_id, psycopg2.extensions.AsIs(end_time_update), scenario_run_id))
cfg.con.commit()
def write_output_to_db(scenario_run_id, output_type_id, output_df, keep_cut_off=0.001):
# For output_type_ids, see api/models.py. I am reluctant to import that file here because I don't want its
# dependencies (e.g. SQLAlchemy) to become dependencies of the main model yet.
output_df = output_df.reset_index().set_index(output_df.index.names)
if output_df.index.nlevels > 1:
index = pd.MultiIndex.from_product(output_df.index.levels, names=output_df.index.names)
output_df = output_df.reindex(index, fill_value=0)
if 'YEAR' in output_df.index.names:
sums = output_df.groupby(level=[l for l in output_df.index.names if l!='YEAR']).sum()
keep = list(sums.index[np.nonzero((sums > keep_cut_off * sums.sum()).values.flatten())])
output_df = output_df.loc[keep]
df = output_df.reset_index()
if len(df.columns)==3:
assert df.columns[1].lower() == 'year', \
"Output data frame is expected to have three columns (or columns and indexes)" \
"corresponding to (series, year, value) in the output_data table."
elif len(df.columns)==2:
df.columns[0].lower() == 'year', \
"Output data frame is expected to have two columns | |
"""
Soft Actor-Critic
Based on SAC implementation from https://github.com/denisyarats/pytorch_sac
License:
MIT License
Copyright (c) 2019 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
and https://github.com/rail-berkeley/softlearning
Licence:
MIT License
Copyright (c) 2018 Softlearning authors and contributors
Softlearning uses a shared copyright model: each contributor holds copyright over
their contributions to Softlearning. The project versioning records all such
contribution and copyright details.
By contributing to the Softlearning repository through pull-request, comment,
or otherwise, the contributor releases their content to the license and
copyright terms herein.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from copy import deepcopy
import csv
from pathlib import Path
from time import time
from typing import Union
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from torch import distributions as pyd
from torch.utils.tensorboard import SummaryWriter
from yarll.agents.agent import Agent
from yarll.agents.env_runner import EnvRunner
from yarll.memory.prealloc_memory import PreAllocMemory
from yarll.misc import summary_writer
# TODO: put this in separate file
class DeterministicPolicy:
def __init__(self, env, policy_fn):
self.env = env
self.policy_fn = policy_fn
self.initial_features = None
self.action_low = env.action_space.low
self.action_high = env.action_space.high
def choose_action(self, state, features):
res = self.policy_fn(state[None, :])[0]
return {"action": res}
def get_env_action(self, action):
return self.action_low + (action + 1.0) * 0.5 * (self.action_high - self.action_low)
def new_trajectory(self):
pass
def hard_update(source_network, target_network):
target_network.load_state_dict(source_network.state_dict())
def soft_update(source_network, target_network, tau):
for source_param, target_param in zip(source_network.parameters(), target_network.parameters()):
target_param.data.copy_(tau * source_param.data +
(1 - tau) * target_param.data)
def weight_init(m):
"""Custom weight init for Conv2D and Linear layers."""
if isinstance(m, nn.Linear):
nn.init.orthogonal_(m.weight.data)
if hasattr(m.bias, 'data'):
m.bias.data.fill_(0.0)
def to_numpy(x):
return x.cpu().detach().numpy()
class SAC(Agent):
def __init__(self, env, monitor_path: Union[Path, str], **usercfg) -> None:
super().__init__(**usercfg)
self.env = env
self.monitor_path = Path(monitor_path)
self.monitor_path.mkdir(parents=True, exist_ok=True)
self.config.update(
max_steps=100000,
actor_learning_rate=3e-4,
softq_learning_rate=3e-4,
alpha_learning_rate=1e-4,
reward_scale=1.0,
n_hidden_layers=2,
n_hidden_units=256,
gamma=0.99,
batch_size=256,
tau=0.005,
init_log_alpha=0.1,
actor_update_frequency=1,
critic_target_update_frequency=2,
target_entropy=None,
logprob_epsilon=1e-6, # For numerical stability when computing log
n_train_steps=1, # Number of parameter update steps per iteration
replay_buffer_size=1e6,
replay_start_size=256, # Required number of replay buffer entries to start training
gradient_clip_value=1.0,
hidden_layer_activation="relu",
device="cpu",
normalize_inputs=False, # TODO: handle this
summaries=True,
summaries_every_steps=None, # If None, add them every time
checkpoints=True,
checkpoint_every_episodes=10,
checkpoints_max_to_keep=None,
save_model=True,
test_frequency=0,
n_test_episodes=5,
write_train_rewards=False
)
self.config.update(usercfg)
self.state_shape: list = list(env.observation_space.shape)
self.n_actions: int = env.action_space.shape[0]
self.action_low = self.env.action_space.low
self.action_high = self.env.action_space.high
self.device = torch.device(self.config["device"])
self.target_entropy = self.config["target_entropy"]
if self.target_entropy is None:
self.target_entropy = -np.prod(env.action_space.shape)
# Make networks
# action_output are the squashed actions and action_original those straight from the normal distribution
input_dim = self.state_shape[0]
self.actor_network = ActorNetwork(input_dim,
self.n_actions,
self.config["n_hidden_units"],
self.config["n_hidden_layers"]).to(self.device)
self.softq_networks = DoubleQCriticNetwork(input_dim,
self.n_actions,
self.config["n_hidden_units"],
self.config["n_hidden_layers"]).to(self.device)
self.target_softq_networks = DoubleQCriticNetwork(input_dim,
self.n_actions,
self.config["n_hidden_units"],
self.config["n_hidden_layers"]).to(self.device)
hard_update(self.softq_networks, self.target_softq_networks)
self.log_alpha = torch.tensor(self.config["init_log_alpha"]).to(self.device)
self.log_alpha.requires_grad = True
# Make train ops
# TODO: gradient_clip_value
self.actor_optimizer = torch.optim.Adam(self.actor_network.parameters(),
lr=self.config["actor_learning_rate"])
# ! TF2 code has 1 optimizer per softq network
self.softqs_optimizer = torch.optim.Adam(self.softq_networks.parameters(),
lr=self.config["softq_learning_rate"])
self.alpha_optimizer = torch.optim.Adam([self.log_alpha],
lr=self.config["alpha_learning_rate"])
self.replay_buffer = PreAllocMemory(
int(self.config["replay_buffer_size"]), self.state_shape, env.action_space.shape)
self.n_updates = 0
self.total_steps = 0
self.total_episodes = 0
if self.config["summaries"]:
self.summaries_every_steps = self.config["summaries_every_steps"] or 1
self.summary_writer = SummaryWriter(str(self.monitor_path))
summary_writer.set(self.summary_writer)
self.env_runner = EnvRunner(self.env,
self,
usercfg,
transition_preprocessor=self.config.get("transition_preprocessor", None),
summaries=self.config["summaries"],
summaries_every_episodes=self.config.get("env_summaries_every_episodes", None),
episode_rewards_file=(
self.monitor_path / "train_rewards.txt" if self.config["write_train_rewards"] else None)
)
if self.config["checkpoints"]:
self.checkpoint_directory = self.monitor_path / "checkpoints"
self.checkpoint_directory.mkdir(exist_ok=True)
if self.config["test_frequency"] > 0 and self.config["n_test_episodes"] > 0:
test_env = deepcopy(env)
unw = test_env.unwrapped
if hasattr(unw, "summaries"):
unw.summaries = False
if hasattr(unw, "log_data"):
unw.log_data = False
deterministic_policy = DeterministicPolicy(test_env, self.deterministic_actions)
self.test_env_runner = EnvRunner(test_env,
deterministic_policy,
usercfg,
summaries=False,
transition_preprocessor=self.config.get("transition_preprocessor", None),
episode_rewards_file=(
self.monitor_path / "test_rewards.txt")
)
header = [""] # (epoch) id has no name in header
header += [f"rew_{i}" for i in range(self.config["n_test_episodes"])]
header += ["rew_mean", "rew_std"]
self.test_results_file = self.monitor_path / "test_results.csv"
with open(self.test_results_file, "w") as f:
writer = csv.writer(f)
writer.writerow(header)
self.total_rewards = np.empty((self.config["n_test_episodes"],), dtype=np.float32)
def deterministic_actions(self, states: np.ndarray) -> np.ndarray:
"""Get the actions for a batch of states."""
dist = self.actor_network(torch.FloatTensor(states).to(self.device))
return to_numpy(dist.mean)
def train(self, mode: bool = True) -> None:
self.actor_network.train(mode)
self.softq_networks.train(mode)
def action(self, state: np.ndarray) -> np.ndarray:
"""Get the action for a single state."""
dist = self.actor_network(torch.FloatTensor(state[None, :]).to(self.device))
sample = dist.sample()
return to_numpy(sample)[0]
@property
def alpha(self):
return self.log_alpha.exp()
def train_critics(self, state0_batch, action_batch, reward_batch, state1_batch, terminal1_batch):
# Calculate critic targets
next_action_batch_dist = self.actor_network(state1_batch)
next_action_batch = next_action_batch_dist.rsample()
next_logprob_batch = next_action_batch_dist.log_prob(next_action_batch).sum(-1, keepdim=True)
next_qs_values = self.target_softq_networks(state1_batch, next_action_batch)
next_q_values = torch.min(*next_qs_values)
next_values = next_q_values - self.alpha.detach() * next_logprob_batch
next_values = (1.0 - terminal1_batch) * next_values
softq_targets = self.config["reward_scale"] * reward_batch + self.config["gamma"] * next_values
softq_targets = softq_targets.detach()
# Update critics
softq1_values, softq2_values = self.softq_networks(state0_batch, action_batch)
softqs_loss = F.mse_loss(softq1_values, softq_targets) + F.mse_loss(softq2_values, softq_targets)
self.softqs_optimizer.zero_grad()
softqs_loss.backward()
self.softqs_optimizer.step()
softq = torch.cat((softq1_values, softq2_values))
softq_std, softq_mean = torch.std_mean(softq)
return to_numpy(softq_mean), to_numpy(softq_std), to_numpy(softq_targets), to_numpy(softqs_loss)
def train_actor(self, state0_batch):
dist = self.actor_network(state0_batch)
action = dist.rsample()
log_prob = dist.log_prob(action).sum(-1, keepdim=True)
softqs_pred = self.softq_networks(state0_batch, action)
min_softq_pred = torch.min(*softqs_pred)
actor_losses = self.alpha.detach() * log_prob - min_softq_pred
actor_loss = actor_losses.mean()
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
return to_numpy(actor_loss), to_numpy(log_prob.mean())
def train_alpha(self, state0_batch):
dist = self.actor_network(state0_batch)
action = dist.rsample()
log_prob = dist.log_prob(action).sum(-1, keepdim=True)
alpha_losses = -1.0 * self.alpha * (log_prob + self.target_entropy).detach()
alpha_loss = alpha_losses.mean()
self.alpha_optimizer.zero_grad()
alpha_loss.backward()
self.alpha_optimizer.step()
return to_numpy(alpha_loss)
def do_test_episodes(self, step) -> None:
for i in range(self.config["n_test_episodes"]):
test_trajectory = self.test_env_runner.get_trajectory(stop_at_trajectory_end=True)
self.total_rewards[i] = np.sum(test_trajectory.rewards)
test_rewards_mean = np.mean(self.total_rewards)
test_rewards_std = np.std(self.total_rewards)
to_write = [step] + self.total_rewards.tolist() + [test_rewards_mean, test_rewards_std]
with open(self.test_results_file, "a") as f:
writer = csv.writer(f)
writer.writerow(to_write)
def learn(self):
self.train()
# Arrays to keep results from train function over different train steps in
softq_means = np.empty((self.config["n_train_steps"],), np.float32)
softq_stds = np.empty((self.config["n_train_steps"],), np.float32)
softq_losses = np.empty((self.config["n_train_steps"],), np.float32)
actor_losses = np.empty((self.config["n_train_steps"],), np.float32)
alpha_losses = np.empty((self.config["n_train_steps"],), np.float32)
action_logprob_means = np.empty((self.config["n_train_steps"],), np.float32)
total_episodes = 0
episode_start_time = time()
summary_writer.start()
for step in range(self.config["max_steps"]):
if self.config["test_frequency"] > 0 and (step % self.config["test_frequency"]) == 0 and self.config["n_test_episodes"] > 0:
self.do_test_episodes(step)
experience = self.env_runner.get_steps(1)[0]
self.total_steps += 1
self.replay_buffer.add(experience.state, experience.action, experience.reward,
experience.next_state, experience.terminal)
if self.replay_buffer.n_entries > self.config["replay_start_size"]:
for i in range(self.config["n_train_steps"]):
sample = self.replay_buffer.get_batch(self.config["batch_size"])
states0 = torch.as_tensor(sample["states0"], device=self.device)
softq_mean, softq_std, softq_targets, softq_loss = self.train_critics(
states0,
torch.as_tensor(sample["actions"], device=self.device),
torch.as_tensor(sample["rewards"], device=self.device),
torch.as_tensor(sample["states1"], device=self.device),
torch.as_tensor(sample["terminals1"], device=self.device))
if (step % self.config["actor_update_frequency"]) == 0:
actor_loss, action_logprob_mean = self.train_actor(states0)
alpha_loss = self.train_alpha(states0)
actor_losses[i] = actor_loss
alpha_losses[i] = alpha_loss
action_logprob_means[i] = action_logprob_mean
else:
print("WARNING: ACTOR NOT UPDATED")
softq_means[i] = softq_mean
softq_stds[i] = softq_std
softq_losses[i] = softq_loss
# Update the target networks
if (step % self.config["critic_target_update_frequency"]) == 0:
soft_update(self.softq_networks,
self.target_softq_networks,
self.config["tau"])
if self.config["summaries"] and (self.total_steps % self.summaries_every_steps) == 0:
summary_writer.add_scalar("model/predicted_softq_mean", np.mean(softq_means), self.total_steps)
summary_writer.add_scalar("model/predicted_softq_std", np.mean(softq_stds), self.total_steps)
summary_writer.add_scalar("model/softq_targets", softq_targets.mean(), self.total_steps)
summary_writer.add_scalar("model/softq_loss", np.mean(softq_losses), self.total_steps)
if (step % self.config["actor_update_frequency"]) == 0:
summary_writer.add_scalar("model/actor_loss", np.mean(actor_losses), self.total_steps)
summary_writer.add_scalar("model/alpha_loss", np.mean(alpha_losses), self.total_steps)
summary_writer.add_scalar("model/alpha", to_numpy(self.alpha), self.total_steps)
summary_writer.add_scalar("model/action_logprob_mean",
np.mean(action_logprob_means), self.total_steps)
self.n_updates += 1
if experience.terminal:
episode_end_time = time()
summary_writer.add_scalar("diagnostics/episode_duration_seconds",
episode_end_time - episode_start_time,
self.total_steps)
if self.config["checkpoints"] and (total_episodes % self.config["checkpoint_every_episodes"]) | |
depth = 1
while True:
try:
sys.setrecursionlimit(depth)
except RecursionError:
depth += 1
else:
break
sys.setrecursionlimit(depth+n)
def recurse_in_except():
try:
1/0
except:
recurse_in_except()
def recurse_after_except():
try:
1/0
except:
pass
recurse_after_except()
def recurse_in_body_and_except():
try:
recurse_in_body_and_except()
except:
recurse_in_body_and_except()
recursionlimit = sys.getrecursionlimit()
try:
set_relative_recursion_limit(10)
for func in (recurse_in_except, recurse_after_except, recurse_in_body_and_except):
with self.subTest(func=func):
try:
func()
except RecursionError:
pass
else:
self.fail("Should have raised a RecursionError")
finally:
sys.setrecursionlimit(recursionlimit)
@cpython_only
def test_recursion_normalizing_with_no_memory(self):
# Issue #30697. Test that in the abort that occurs when there is no
# memory left and the size of the Python frames stack is greater than
# the size of the list of preallocated MemoryError instances, the
# Fatal Python error message mentions MemoryError.
code = """if 1:
import _testcapi
class C(): pass
def recurse(cnt):
cnt -= 1
if cnt:
recurse(cnt)
else:
_testcapi.set_nomemory(0)
C()
recurse(16)
"""
with SuppressCrashReport():
rc, out, err = script_helper.assert_python_failure("-c", code)
self.assertIn(b'Fatal Python error: _PyErr_NormalizeException: '
b'Cannot recover from MemoryErrors while '
b'normalizing exceptions.', err)
@cpython_only
def test_MemoryError(self):
# PyErr_NoMemory always raises the same exception instance.
# Check that the traceback is not doubled.
import traceback
from _testcapi import raise_memoryerror
def raiseMemError():
try:
raise_memoryerror()
except MemoryError as e:
tb = e.__traceback__
else:
self.fail("Should have raised a MemoryError")
return traceback.format_tb(tb)
tb1 = raiseMemError()
tb2 = raiseMemError()
self.assertEqual(tb1, tb2)
@cpython_only
def test_exception_with_doc(self):
import _testcapi
doc2 = "This is a test docstring."
doc4 = "This is another test docstring."
self.assertRaises(SystemError, _testcapi.make_exception_with_doc,
"error1")
# test basic usage of PyErr_NewException
error1 = _testcapi.make_exception_with_doc("_testcapi.error1")
self.assertIs(type(error1), type)
self.assertTrue(issubclass(error1, Exception))
self.assertIsNone(error1.__doc__)
# test with given docstring
error2 = _testcapi.make_exception_with_doc("_testcapi.error2", doc2)
self.assertEqual(error2.__doc__, doc2)
# test with explicit base (without docstring)
error3 = _testcapi.make_exception_with_doc("_testcapi.error3",
base=error2)
self.assertTrue(issubclass(error3, error2))
# test with explicit base tuple
class C(object):
pass
error4 = _testcapi.make_exception_with_doc("_testcapi.error4", doc4,
(error3, C))
self.assertTrue(issubclass(error4, error3))
self.assertTrue(issubclass(error4, C))
self.assertEqual(error4.__doc__, doc4)
# test with explicit dictionary
error5 = _testcapi.make_exception_with_doc("_testcapi.error5", "",
error4, {'a': 1})
self.assertTrue(issubclass(error5, error4))
self.assertEqual(error5.a, 1)
self.assertEqual(error5.__doc__, "")
@cpython_only
def test_memory_error_cleanup(self):
# Issue #5437: preallocated MemoryError instances should not keep
# traceback objects alive.
from _testcapi import raise_memoryerror
class C:
pass
wr = None
def inner():
nonlocal wr
c = C()
wr = weakref.ref(c)
raise_memoryerror()
# We cannot use assertRaises since it manually deletes the traceback
try:
inner()
except MemoryError as e:
self.assertNotEqual(wr(), None)
else:
self.fail("MemoryError not raised")
gc_collect() # For PyPy or other GCs.
self.assertEqual(wr(), None)
@no_tracing
def test_recursion_error_cleanup(self):
# Same test as above, but with "recursion exceeded" errors
class C:
pass
wr = None
def inner():
nonlocal wr
c = C()
wr = weakref.ref(c)
inner()
# We cannot use assertRaises since it manually deletes the traceback
try:
inner()
except RecursionError as e:
self.assertNotEqual(wr(), None)
else:
self.fail("RecursionError not raised")
gc_collect() # For PyPy or other GCs.
self.assertEqual(wr(), None)
def test_errno_ENOTDIR(self):
# Issue #12802: "not a directory" errors are ENOTDIR even on Windows
with self.assertRaises(OSError) as cm:
os.listdir(__file__)
self.assertEqual(cm.exception.errno, errno.ENOTDIR, cm.exception)
def test_unraisable(self):
# Issue #22836: PyErr_WriteUnraisable() should give sensible reports
class BrokenDel:
def __del__(self):
exc = ValueError("del is broken")
# The following line is included in the traceback report:
raise exc
obj = BrokenDel()
with support.catch_unraisable_exception() as cm:
del obj
gc_collect() # For PyPy or other GCs.
self.assertEqual(cm.unraisable.object, BrokenDel.__del__)
self.assertIsNotNone(cm.unraisable.exc_traceback)
def test_unhandled(self):
# Check for sensible reporting of unhandled exceptions
for exc_type in (ValueError, BrokenStrException):
with self.subTest(exc_type):
try:
exc = exc_type("test message")
# The following line is included in the traceback report:
raise exc
except exc_type:
with captured_stderr() as stderr:
sys.__excepthook__(*sys.exc_info())
report = stderr.getvalue()
self.assertIn("test_exceptions.py", report)
self.assertIn("raise exc", report)
self.assertIn(exc_type.__name__, report)
if exc_type is BrokenStrException:
self.assertIn("<exception str() failed>", report)
else:
self.assertIn("test message", report)
self.assertTrue(report.endswith("\n"))
@cpython_only
def test_memory_error_in_PyErr_PrintEx(self):
code = """if 1:
import _testcapi
class C(): pass
_testcapi.set_nomemory(0, %d)
C()
"""
# Issue #30817: Abort in PyErr_PrintEx() when no memory.
# Span a large range of tests as the CPython code always evolves with
# changes that add or remove memory allocations.
for i in range(1, 20):
rc, out, err = script_helper.assert_python_failure("-c", code % i)
self.assertIn(rc, (1, 120))
self.assertIn(b'MemoryError', err)
def test_yield_in_nested_try_excepts(self):
#Issue #25612
class MainError(Exception):
pass
class SubError(Exception):
pass
def main():
try:
raise MainError()
except MainError:
try:
yield
except SubError:
pass
raise
coro = main()
coro.send(None)
with self.assertRaises(MainError):
coro.throw(SubError())
def test_generator_doesnt_retain_old_exc2(self):
#Issue 28884#msg282532
def g():
try:
raise ValueError
except ValueError:
yield 1
self.assertEqual(sys.exc_info(), (None, None, None))
yield 2
gen = g()
try:
raise IndexError
except IndexError:
self.assertEqual(next(gen), 1)
self.assertEqual(next(gen), 2)
def test_raise_in_generator(self):
#Issue 25612#msg304117
def g():
yield 1
raise
yield 2
with self.assertRaises(ZeroDivisionError):
i = g()
try:
1/0
except:
next(i)
next(i)
@unittest.skipUnless(__debug__, "Won't work if __debug__ is False")
def test_assert_shadowing(self):
# Shadowing AssertionError would cause the assert statement to
# misbehave.
global AssertionError
AssertionError = TypeError
try:
assert False, 'hello'
except BaseException as e:
del AssertionError
self.assertIsInstance(e, AssertionError)
self.assertEqual(str(e), 'hello')
else:
del AssertionError
self.fail('Expected exception')
def test_memory_error_subclasses(self):
# bpo-41654: MemoryError instances use a freelist of objects that are
# linked using the 'dict' attribute when they are inactive/dead.
# Subclasses of MemoryError should not participate in the freelist
# schema. This test creates a MemoryError object and keeps it alive
# (therefore advancing the freelist) and then it creates and destroys a
# subclass object. Finally, it checks that creating a new MemoryError
# succeeds, proving that the freelist is not corrupted.
class TestException(MemoryError):
pass
try:
raise MemoryError
except MemoryError as exc:
inst = exc
try:
raise TestException
except Exception:
pass
for _ in range(10):
try:
raise MemoryError
except MemoryError as exc:
pass
gc_collect()
global_for_suggestions = None
class NameErrorTests(unittest.TestCase):
def test_name_error_has_name(self):
try:
bluch
except NameError as exc:
self.assertEqual("bluch", exc.name)
def test_name_error_suggestions(self):
def Substitution():
noise = more_noise = a = bc = None
blech = None
print(bluch)
def Elimination():
noise = more_noise = a = bc = None
blch = None
print(bluch)
def Addition():
noise = more_noise = a = bc = None
bluchin = None
print(bluch)
def SubstitutionOverElimination():
blach = None
bluc = None
print(bluch)
def SubstitutionOverAddition():
blach = None
bluchi = None
print(bluch)
def EliminationOverAddition():
blucha = None
bluc = None
print(bluch)
for func, suggestion in [(Substitution, "'blech'?"),
(Elimination, "'blch'?"),
(Addition, "'bluchin'?"),
(EliminationOverAddition, "'blucha'?"),
(SubstitutionOverElimination, "'blach'?"),
(SubstitutionOverAddition, "'blach'?")]:
err = None
try:
func()
except NameError as exc:
with support.captured_stderr() as err:
sys.__excepthook__(*sys.exc_info())
self.assertIn(suggestion, err.getvalue())
def test_name_error_suggestions_from_globals(self):
def func():
print(global_for_suggestio)
try:
func()
except NameError as exc:
with support.captured_stderr() as err:
sys.__excepthook__(*sys.exc_info())
self.assertIn("'global_for_suggestions'?", err.getvalue())
def test_name_error_suggestions_from_builtins(self):
def func():
print(ZeroDivisionErrrrr)
try:
func()
except NameError as exc:
with support.captured_stderr() as err:
sys.__excepthook__(*sys.exc_info())
self.assertIn("'ZeroDivisionError'?", err.getvalue())
def test_name_error_suggestions_do_not_trigger_for_long_names(self):
def f():
somethingverywronghehehehehehe = None
print(somethingverywronghe)
try:
f()
except NameError as exc:
with support.captured_stderr() as err:
sys.__excepthook__(*sys.exc_info())
self.assertNotIn("somethingverywronghehe", err.getvalue())
def test_name_error_bad_suggestions_do_not_trigger_for_small_names(self):
vvv = mom = w = id = pytho = None
with self.subTest(name="b"):
try:
b
except NameError as exc:
with support.captured_stderr() as err:
sys.__excepthook__(*sys.exc_info())
self.assertNotIn("you mean", err.getvalue())
self.assertNotIn("vvv", err.getvalue())
self.assertNotIn("mom", err.getvalue())
self.assertNotIn("'id'", err.getvalue())
self.assertNotIn("'w'", err.getvalue())
self.assertNotIn("'pytho'", err.getvalue())
with self.subTest(name="v"):
try:
v
except NameError as exc:
with support.captured_stderr() as err:
sys.__excepthook__(*sys.exc_info())
self.assertNotIn("you mean", err.getvalue())
self.assertNotIn("vvv", err.getvalue())
self.assertNotIn("mom", err.getvalue())
self.assertNotIn("'id'", err.getvalue())
self.assertNotIn("'w'", err.getvalue())
self.assertNotIn("'pytho'", err.getvalue())
with self.subTest(name="m"):
try:
m
except NameError as exc:
with support.captured_stderr() as err:
sys.__excepthook__(*sys.exc_info())
self.assertNotIn("you mean", err.getvalue())
self.assertNotIn("vvv", err.getvalue())
self.assertNotIn("mom", err.getvalue())
self.assertNotIn("'id'", err.getvalue())
self.assertNotIn("'w'", err.getvalue())
self.assertNotIn("'pytho'", err.getvalue())
with self.subTest(name="py"):
try:
py
except NameError as exc:
with support.captured_stderr() as err:
sys.__excepthook__(*sys.exc_info())
self.assertNotIn("you mean", err.getvalue())
self.assertNotIn("vvv", err.getvalue())
self.assertNotIn("mom", err.getvalue())
self.assertNotIn("'id'", err.getvalue())
self.assertNotIn("'w'", err.getvalue())
self.assertNotIn("'pytho'", err.getvalue())
def test_name_error_suggestions_do_not_trigger_for_too_many_locals(self):
def f():
# Mutating locals() is unreliable, so we need to do it by hand
a1 = a2 = a3 = a4 = a5 = a6 = a7 = a8 = a9 = a10 = \
a11 = a12 = a13 = a14 = a15 = a16 = a17 = a18 = a19 = a20 = \
a21 = a22 = a23 = a24 = a25 = a26 = a27 = a28 = a29 = a30 = \
a31 = a32 = a33 = a34 = a35 = a36 = a37 = a38 = a39 = a40 = \
a41 = a42 = a43 = a44 = a45 = a46 = a47 = a48 | |
1
(2, TType.I64, 'supplier_id', None, None, ), # 2
)
class del_supplier_result(object):
"""
Attributes:
- success
- e
"""
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = gen_exp.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('del_supplier_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(del_supplier_result)
del_supplier_result.thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'e', [gen_exp, None], None, ), # 1
)
class get_all_supplier_args(object):
"""
Attributes:
- ssid
"""
def __init__(self, ssid=None,):
self.ssid = ssid
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.ssid = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('get_all_supplier_args')
if self.ssid is not None:
oprot.writeFieldBegin('ssid', TType.STRING, 1)
oprot.writeString(self.ssid.encode('utf-8') if sys.version_info[0] == 2 else self.ssid)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(get_all_supplier_args)
get_all_supplier_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'ssid', 'UTF8', None, ), # 1
)
class get_all_supplier_result(object):
"""
Attributes:
- success
- e
"""
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype325, _size322) = iprot.readListBegin()
for _i326 in range(_size322):
_elem327 = supplier_basic_info()
_elem327.read(iprot)
self.success.append(_elem327)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = gen_exp.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('get_all_supplier_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter328 in self.success:
iter328.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(get_all_supplier_result)
get_all_supplier_result.thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT, [supplier_basic_info, None], False), None, ), # 0
(1, TType.STRUCT, 'e', [gen_exp, None], None, ), # 1
)
class smart_assign_args(object):
"""
Attributes:
- ssid
- vichele_info
"""
def __init__(self, ssid=None, vichele_info=None,):
self.ssid = ssid
self.vichele_info = vichele_info
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.ssid = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.vichele_info = []
(_etype332, _size329) = iprot.readListBegin()
for _i333 in range(_size329):
_elem334 = vichele_stay_alone()
_elem334.read(iprot)
self.vichele_info.append(_elem334)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('smart_assign_args')
if self.ssid is not None:
oprot.writeFieldBegin('ssid', TType.STRING, 1)
oprot.writeString(self.ssid.encode('utf-8') if sys.version_info[0] == 2 else self.ssid)
oprot.writeFieldEnd()
if self.vichele_info is not None:
oprot.writeFieldBegin('vichele_info', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.vichele_info))
for iter335 in self.vichele_info:
iter335.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(smart_assign_args)
smart_assign_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'ssid', 'UTF8', None, ), # 1
(2, TType.LIST, 'vichele_info', (TType.STRUCT, [vichele_stay_alone, None], False), None, ), # 2
)
class smart_assign_result(object):
"""
Attributes:
- success
- e
"""
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = gen_exp.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('smart_assign_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success.encode('utf-8') if sys.version_info[0] == 2 else self.success)
oprot.writeFieldEnd()
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(smart_assign_result)
smart_assign_result.thrift_spec = (
(0, TType.STRING, 'success', 'UTF8', None, ), # 0
(1, TType.STRUCT, 'e', [gen_exp, None], None, ), # 1
)
class get_max_vichele_by_supplier_args(object):
"""
Attributes:
- supplier
- company
"""
def __init__(self, supplier=None, company=None,):
self.supplier = supplier
self.company = company
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.supplier = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.company = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('get_max_vichele_by_supplier_args')
if self.supplier is not None:
oprot.writeFieldBegin('supplier', TType.STRING, 1)
oprot.writeString(self.supplier.encode('utf-8') if sys.version_info[0] == 2 else self.supplier)
oprot.writeFieldEnd()
if self.company is not None:
oprot.writeFieldBegin('company', TType.STRING, 2)
oprot.writeString(self.company.encode('utf-8') if sys.version_info[0] == 2 else self.company)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(get_max_vichele_by_supplier_args)
get_max_vichele_by_supplier_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'supplier', 'UTF8', None, ), # 1
(2, TType.STRING, 'company', 'UTF8', None, ), # 2
)
class get_max_vichele_by_supplier_result(object):
"""
Attributes:
- success
- e
"""
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I64:
self.success = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = gen_exp.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('get_max_vichele_by_supplier_result')
if self.success is | |
of filename using the basename and the list of
provided extensions
"""
def __init__(self, name='FilenameOps', **kw):
kw['name'] = name
apply( NetworkNode.__init__, (self,), kw )
self.inNodeWidgetsVisibleByDefault = True
ip = self.inputPortsDescr
ip.append(datatype='string', name='filename',
balloon='filename to be decomposed')
self.widgetDescr['extList'] = {
'class':'NEEntry', 'master':'node',
'labelCfg':{'text':'extension list:'},
'initialValue':''}
ip.append(datatype='string', name='extList', required=False,
balloon='a list of extension used to create new filename')
self.widgetDescr['withPath'] = {
'class':'NECheckButton', 'master':'node', 'initialValue':1,
'labelCfg':{'text':'with path'},
}
ip.append(datatype='boolean', name='withPath', required=False,
balloon='When True new filenames are generated with path',
defaultValue=True)
op = self.outputPortsDescr
op.append(datatype='string', name='dirname')
op.append(datatype='string', name='filename')
op.append(datatype='string', name='basename')
op.append(datatype='string', name='extension')
op.append(datatype='string', name='newfilenames')
code = """def doit(self, filename, extList, withPath):
import os.path
# remove path and split into bname and ext
name = os.path.basename(filename)
bname, ext = os.path.splitext(name)
# get the path
dirname = os.path.dirname(filename)
# if filenames with different extensions are requested build them
if extList:
nfnames = []
# handke withPath variable
if withPath:
lname = os.path.join(dirname, bname)
else:
lname = bname
# build list of names
for e in extList.split():
if e[0]==os.path.extsep:
nfnames.append( lname + e )
else:
nfnames.append( lname + os.path.extsep + e )
else:
nfnames = []
# output all
self.outputData(
dirname = dirname,
filename = name,
basename = bname,
extension = ext,
newfilenames = nfnames
)
"""
self.setFunction(code)
class Filename(NetworkNode):
"""A node to generate filenames with an integer value
Input Ports
format: a string use to create the filename is 'frame%08d.png'
number: number to be printed using format
Output Ports
filenamee: the resulting filename
"""
def __init__(self, name='Entry', **kw):
kw['name'] = name
apply( NetworkNode.__init__, (self,), kw )
self.inNodeWidgetsVisibleByDefault = True
self.widgetDescr['format'] = {
'class':'NEEntry', 'master':'node', 'labelCfg':{'text':'format:'},
'initialValue':'file%08d.png'}
ip = self.inputPortsDescr
ip.append(datatype='string', balloon='format string used to create a filename', name='format')
ip.append(datatype='int', name='number')
op = self.outputPortsDescr
op.append(datatype='string', name='filename')
code = """def doit(self, format, number):
self.outputData(filename=format%number)
"""
self.setFunction(code)
class NumberedFilename(NetworkNode):
"""A node to generate filenames with a trailling 0-padded integer value
Input Ports
directory: path to the folder containing the files
baseName: name of each file withoutthe number
padding: number of digits in the file number
suffix: file extension
number: number of the file for which the name willbe created
Output Ports
filenamee: the resulting filename i.e. mydir/frame0002.png
"""
def __init__(self, name='numberedName', **kw):
kw['name'] = name
apply( NetworkNode.__init__, (self,), kw )
self.inNodeWidgetsVisibleByDefault = True
self.widgetDescr['directory'] = {
'class':'NEEntryWithDirectoryBrowser', 'master':'node', 'width':16,
'initialValue':'', 'lockedOnPort':True,
'labelCfg':{'text':'directory: '}
}
self.widgetDescr['baseName'] = {
'class':'NEEntry', 'master':'node', 'labelCfg':{'text':'baseName:'},
'initialValue':'file'}
self.widgetDescr['padding'] = {
'class':'NEThumbWheel','master':'node', 'lockedOnPort':True,
'width':75, 'height':21, 'oneTurn':10, 'type':'int', 'wheelPad':2,
'initialValue':0, 'min':0,
'labelCfg':{'text':'padding'} }
self.widgetDescr['suffix'] = {
'class':'NEEntry', 'master':'node', 'labelCfg':{'text':'suffix:'},
'initialValue':'.png'}
ip = self.inputPortsDescr
ip.append(datatype='string', balloon='path to the folder containing the files', name='directory')
ip.append(datatype='string', balloon='base string used to create a filename', name='baseName')
ip.append(datatype='int', name='padding')
ip.append(datatype='string', name='suffix')
ip.append(datatype='int', name='number')
op = self.outputPortsDescr
op.append(datatype='string', name='filename')
code = """def doit(self, directory, baseName, padding, suffix, number):
import os.path
format = baseName + '%'
if padding != 0:
format += '0' + str(padding)
if suffix:
format += 'd' + suffix
else:
format += 'd'
#print "format", format
if directory:
name = os.path.join(directory,format%number)
else:
name = format%number
self.outputData(filename=name)
"""
self.setFunction(code)
class Filelist(NetworkNode):
"""
Generate a list of file names matching a string with wildcards
Input Ports
directory: root directory from where the search for files matching
the matchstring will be searched (Optional)
matchstring: a string containing wildcards. Wildcards are the same
as the ones used nunix shells:
e.g. * : any number of nay character
? : any 1 character
[abc] : either a b or c in this position
Output Ports
filenames: a list of file names matching matchstring in directory
Note: matchstring can contain path elements that contain wildcards.
for instance */*.py ill match all .py file in the all current
sub-directories
"""
def __init__(self, name='Entry', **kw):
kw['name'] = name
apply( NetworkNode.__init__, (self,), kw )
self.inNodeWidgetsVisibleByDefault = True
self.widgetDescr['directory'] = {
'class':'NEEntryWithDirectoryBrowser', 'master':'node', 'width':16,
'initialValue':'', 'lockedOnPort':True,
'labelCfg':{'text':'directory: '}
}
self.widgetDescr['match_str'] = {
'class':'NEEntry', 'master':'node',
'labelCfg':{'text':'match string:'},
'initialValue':'*'}
ip = self.inputPortsDescr
ip.append(datatype='string',
balloon='directory where to apply match string',
name='directory')
ip.append(datatype='string',
balloon='string used to select filenames', name='match_str')
op = self.outputPortsDescr
op.append(datatype='list', name='filelist')
code = """def doit(self, directory, match_str ):
import glob, os
if directory is not None:
cwd = os.getcwd()
os.chdir(directory)
try:
values = glob.glob(match_str)
filenames = [ os.path.join(directory, x) for x in values ]
self.outputData(filelist=filenames)
finally:
if directory is not None:
os.chdir(cwd)
"""
self.setFunction(code)
class EntryNE(NetworkNode):
"""A Tkinter Entry widget.
Double-clicking on the node opens the entry widget.
Input Ports
button: (bound to checkbutton widget)
Output Ports
value: a integer describing the status of the checkbutton (1 on, 0 off)
"""
def __init__(self, name='Entry', **kw):
kw['name'] = name
apply( NetworkNode.__init__, (self,), kw )
self.inNodeWidgetsVisibleByDefault = True
self.widgetDescr['entry'] = {
'class':'NEEntry', 'master':'node', 'width':14,
'labelCfg':{'text':''}, 'lockedOnPort':True }
self.inputPortsDescr.append(datatype='string', name='entry')
self.outputPortsDescr.append(datatype='string', name='string')
code = """def doit(self, entry):
if len(str(entry))!=0:
self.outputData(string=entry)
"""
self.setFunction(code)
class EntryFieldNE(NetworkNode):
"""A Pmw EntryField widget.
Double-clicking on the node opens the entry widget.
Input Ports
button: (bound to checkbutton widget)
Output Ports
value: a integer describing the status of the checkbutton (1 on, 0 off)
"""
def __init__(self, name='Entry', **kw):
kw['name'] = name
apply( NetworkNode.__init__, (self,), kw )
self.inNodeWidgetsVisibleByDefault = True
self.widgetDescr['entry'] = {
'class':'NEEntryField', 'master':'node', 'width':14,
'labelCfg':{'text':''}, 'lockedOnPort':True }
self.inputPortsDescr.append(datatype='string', name='entry')
self.outputPortsDescr.append(datatype='string', name='string')
code = """def doit(self, entry):
if len(str(entry))!=0:
self.outputData(string=entry)
"""
self.setFunction(code)
class EntryFieldIntNE(NetworkNode):
"""A Pmw EntryField widget for an integer.
Double-clicking on the node opens the entry widget.
Input Ports
button: (bound to checkbutton widget)
Output Ports
value: an integer value
"""
def __init__(self, name='Int Entry', **kw):
kw['name'] = name
apply( NetworkNode.__init__, (self,), kw )
self.inNodeWidgetsVisibleByDefault = True
self.widgetDescr['value'] = {
'class':'NEEntryField', 'master':'node', 'width':14,
'validate':'integer', 'labelpos':'w', 'dtype':int,
'labelCfg':{'text':'Int Value'},
'lockedOnPort':True }
self.inputPortsDescr.append(datatype='int', name='value')
self.outputPortsDescr.append(datatype='int', name='value')
code = """def doit(self, value):
self.outputData(value=value)
"""
self.setFunction(code)
class EntryFieldFloatNE(NetworkNode):
"""A Pmw EntryField widget for a float.
Double-clicking on the node opens the entry widget.
Input Ports
button: (bound to checkbutton widget)
Output Ports
value: a float value
"""
def __init__(self, name='Float Entry', **kw):
kw['name'] = name
apply( NetworkNode.__init__, (self,), kw )
self.inNodeWidgetsVisibleByDefault = True
self.widgetDescr['value'] = {
'class':'NEEntryField', 'master':'node', 'width':14,
'validate':'real', 'labelpos':'w', 'dtype':float,
'labelCfg':{'text':'Float Value'},
'lockedOnPort':True }
self.inputPortsDescr.append(datatype='float', name='value')
self.outputPortsDescr.append(datatype='float', name='value')
code = """def doit(self, value):
self.outputData(value=value)
"""
self.setFunction(code)
class EntryFieldTimeNE(NetworkNode):
"""A Pmw EntryField widget for an time n the format 'HH:MM:SS'.
Double-clicking on the node opens the entry widget.
Input Ports
button: (bound to checkbutton widget)
Output Ports
value: a time in the format 'HH:MM:SS'
"""
def __init__(self, name='Time Entry', **kw):
kw['name'] = name
apply( NetworkNode.__init__, (self,), kw )
self.inNodeWidgetsVisibleByDefault = True
self.widgetDescr['value'] = {
'class':'NEEntryField', 'master':'node', 'width':14,
'validate':'real', 'labelpos':'w',
'labelCfg':{'text':'Time:'},
'lockedOnPort':True }
self.inputPortsDescr.append(datatype='string', name='value')
self.outputPortsDescr.append(datatype='string', name='value')
code = """def doit(self, value):
self.outputData(value=value)
"""
self.setFunction(code)
class EntryFieldDateNE(NetworkNode):
"""A Pmw EntryField widget for a date in day:month:year,.
Double-clicking on the node opens the entry widget.
Input Ports
button: (bound to checkbutton widget)
Output Ports
value: a date
"""
def __init__(self, name='Date Entry', **kw):
kw['name'] = name
apply( NetworkNode.__init__, (self,), kw )
self.inNodeWidgetsVisibleByDefault = True
self.widgetDescr['value'] = {
'class':'NEEntryField', 'master':'node', 'width':14,
'validate':'real', 'labelpos':'w',
'labelCfg':{'text':'Date'},
'lockedOnPort':True }
self.inputPortsDescr.append(datatype='string', name='value')
self.outputPortsDescr.append(datatype='string', name='value')
code = """def doit(self, value):
self.outputData(value=value)
"""
self.setFunction(code)
class EntryFieldHexNE(NetworkNode):
"""A Pmw EntryField widget for a hEX value.
Double-clicking on the node opens the entry widget.
Input Ports
button: (bound to checkbutton widget)
Output Ports
value: an hexadecimal value
"""
def __init__(self, name='Hex Entry', **kw):
kw['name'] = name
apply( NetworkNode.__init__, (self,), kw )
self.inNodeWidgetsVisibleByDefault = True
self.widgetDescr['value'] = {
'class':'NEEntryField', 'master':'node', 'width':14,
'validate':'hexadecimal', 'labelpos':'w',
'labelCfg':{'text':'hex Value'},
'lockedOnPort':True }
self.inputPortsDescr.append(datatype='string', name='value')
self.outputPortsDescr.append(datatype='string', name='value')
code = """def doit(self, value):
self.outputData(value=value)
"""
self.setFunction(code)
class FileBrowserNE(NetworkNode):
"""A Tkinter Filebrowser. Double-clicking into the entry opens the
filebrowser."""
def __init__(self, name='File Browser', **kw):
kw['name'] = name
apply( NetworkNode.__init__, (self,), kw )
#self.readOnly = 1
code = """def doit(self, filename):
if filename:
self.outputData(filename=filename, AbsPath_filename=os.path.abspath(filename))
"""
self.setFunction(code)
# show the entry widget by default
self.inNodeWidgetVisibleByDefault = True
self.widgetDescr['filename'] = {
'class':'NEEntryWithFileBrowser', 'master':'node', 'width':16,
'initialValue':'', 'lockedOnPort':True,
'labelCfg':{'text':'Filename: '}
}
self.inputPortsDescr.append(datatype='string', name='filename')
self.outputPortsDescr.append(datatype='string', name='filename')
self.outputPortsDescr.append(datatype='string', name='AbsPath_filename')
class DirBrowserNE(NetworkNode):
"""
Directory browser. Double-clicking in the button to the right of the entry
opens a file browser. the path returned are relative to the current
directory.
"""
def __init__(self, name='File Browser', **kw):
kw['name'] = name
apply( NetworkNode.__init__, (self,), kw )
#self.readOnly = 1
code = """def doit(self, directory):
import os
if directory:
self.outputData(directory=directory, AbsPath_directory=os.path.abspath(directory))
"""
self.setFunction(code)
# show the entry widget by default
self.inNodeWidgetVisibleByDefault = True
self.widgetDescr['directory'] = | |
import logging
import utils
from flask import Flask, Response, session, render_template, request, url_for, redirect, abort
import datetime
import json
import urllib
from markupsafe import Markup
import time
from flask import send_from_directory
import os
from flask_login import LoginManager, login_required, login_user, logout_user, current_user
import utils
import base64
from utils import CREATE_MODE
PROJECT_REMINDER_TITLE = "DAR Entry Reminder: Your DAR entry needs to completed"
ADMIN_WELCOME_TITLE = "DAR Admin User Created"
ADMIN_WELCOME_MESSAGE = "Admin user for {username } created. Please go ahead and create DAR project and add/invite users to the project"
USER_WELCOME_TITLE="Welcome to DAR {projectId}"
USER_WELCOME_MESSAGE="Hello {userId}\nYou have been chosen as a subject matter expert to help w/ DAR {projectId}. " + \
"Please login at your earliest and complete your entry, Thank you."
app = Flask(__name__)
app.secret_key = "super_secret_key"
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = "login"
@login_manager.header_loader
def load_user_from_header(header_val):
header_val = header_val.replace('Basic ', '', 1)
try:
header_val = base64.b64decode(header_val)
authstr = header_val.split(":")
userId = authstr[0]
user = utils.get_user_from_db(userId)
if user and authstr[1] == user.password:
return user
elif user is None and userId == "superuser" and authstr[1] == "password":
user = utils.update_user('superuser', '<EMAIL>', 'Superuser', 'password', None)
time.sleep(1)
return user
except TypeError:
pass
return None
# callback to reload the user object
@login_manager.user_loader
def load_user(userid):
return utils.get_user_from_db(userid)
@app.route("/login", methods=["GET", "POST"])
def login():
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
user = utils.get_user_from_db(username)
if user and user.password == password:
login_user(user)
return redirect(url_for('landing_page'))
else:
return abort(401)
else:
return render_template('login.html')
@app.route("/logout")
@login_required
def logout():
logout_user()
return redirect(url_for('landing_page'))
# handle login failed
@app.errorhandler(401)
def page_not_found(e):
return Response('<p>Login failed</p>')
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'),
'favicon.ico', mimetype='image/vnd.microsoft.icon')
@app.route('/')
def root():
return render_template('root.html')
@app.route('/request_for_admin')
def request_for_admin():
return render_template('admin_request.html')
@app.route('/api/v1/check_for_user/<userId>')
def check_for_user(userId):
user = utils.get_user_from_db(userId)
if user:
return json.dumps(True)
else:
return json.dumps(False)
@app.route('/api/v1/about_page')
def about_page():
return render_template('about.html')
@app.route('/api/v1/check_for_project/<projectId>')
@login_required
def check_for_project(projectId):
project = utils.get_project_from_db(projectId)
if project:
return json.dumps(True)
else:
return json.dumps(False)
@app.route('/api/v1/admin_page')
@login_required
def admin_page():
cu = current_user.identity
return render_template(
'admin.html',
current_user=cu)
@app.route('/api/v1/landing_page')
@login_required
def landing_page():
user = current_user
print user.identity + ", " + user.type
if user.type == "Admin":
return show_projects()
elif user.type == "Superuser":
return admin_page()
else:
return show_entrys_given_user(user.identity)
@app.route('/api/v1/show_projects')
@login_required
def show_projects():
cu = current_user.identity
if checkIfAdminUser() == False:
return "Unauthorized", 404
projects = utils.get_projects_from_db(cu)
if projects is None or len(projects) < 1:
pass
return render_template(
'projects.html',
projects= projects)
@app.route('/api/v1/show_project/<projectId>')
@login_required
def show_project(projectId):
if checkIfAdminUser() == False:
return "Unauthorized", 404
vendor_objs = utils.get_vendors_from_db(None)
users = utils.get_users_from_db(None)
cu = current_user.identity
if projectId is not None and projectId != CREATE_MODE :
project = utils.get_project_from_db(projectId)
userlist = project.userIds
vendorlist = project.vendorIds
bos_db, cs = utils.get_business_objectives_from_db(projectId, False)
return render_template(
'project.html',
current_user=cu,
project=project,
vendorlist = vendorlist,
vendor_objects=vendor_objs,
userlist = userlist,
bos_db = bos_db,
users=users)
else:
return render_template(
'project.html',
current_user=cu,
vendor_objects=vendor_objs,
users=users)
@app.route('/api/v1/submitted_project', methods=['POST', 'GET'])
@login_required
def submitted_project():
if checkIfAdminUser() == False:
return "Unauthorized", 404
if request.method == 'GET':
return redirect(url_for('landing_page'))
projectId = request.form.get('projectId')
tprj = utils.get_project_from_db(projectId)
userIds = set(request.form.getlist('userIds[]'))
if tprj:
diffList = [item for item in tprj.userIds if item not in userIds]
for dentry in diffList:
entry = utils.get_entry_from_db(projectId, dentry)
if entry:
utils.delete_entry_from_db(entry)
vendorIds = set(request.form.getlist('vendorIds[]'))
bos = request.form.getlist("bos[]")
due_date = request.form.get('due_date')
department = request.form.get('department')
group = request.form.get('group')
description = request.form.get('description')
defaultPassword = request.form.get('password')
userId = current_user.identity
if userId not in userIds:
userIds.add(userId)
utils.update_project(projectId, department, group, description, defaultPassword, userIds, vendorIds, due_date, bos)
time.sleep(1)
return redirect(url_for('landing_page'))
@app.route('/api/v1/show_summary/<projectId>')
@login_required
def show_summary(projectId):
userId = current_user.identity
start = time.clock()
bos_db, criteria_to_users_map = utils.get_business_objectives_from_db(projectId, True)
print str(time.clock() - start)
project = utils.get_project_from_db(projectId)
return render_template(
'summary.html',
current_user=userId,
project = project,
bos_db = bos_db,
criteria_to_users_map = criteria_to_users_map,
userId = userId)
@app.route('/api/v1/show_entry/<projectId>/<userId>')
@login_required
def show_entry(projectId, userId):
project = utils.get_project_from_db(projectId)
bos_db, cs = utils.get_business_objectives_from_db(projectId, True)
entry = utils.get_entry_from_db(projectId, userId)
cu = current_user.identity
return render_template(
'entry.html',
current_user=cu,
userId = userId,
project=project,
bos_db = bos_db,
entry=entry)
@app.route('/api/v1/show_entrys_given_project/<projectId>')
@login_required
def show_entrys_given_project(projectId):
isAdminUser = checkIfAdminUser()
if isAdminUser == False:
return "Unauthorized", 404
users = utils.get_users_from_db(projectId)
entrys = []
for user in users:
entry = utils.get_entry_from_db(projectId, user.identity)
if entry is None:
entry = utils.update_entry(projectId, user.identity, None, None, None,None)
entrys.append(entry)
userId = current_user.identity
return render_template(
'entrys.html',
current_date=datetime.datetime.now(),
current_user=userId,
projectId = projectId,
userId = userId,
isAdminUser = isAdminUser,
entrys= entrys)
@app.route('/api/v1/show_entrys_given_user/<userId>')
@login_required
def show_entrys_given_user(userId):
entrys = []
projects = utils.get_projects_from_db(userId)
for project in projects:
entry = utils.get_entry_from_db(project.projectId, userId)
if entry is None:
entry = utils.update_entry(project.projectId, userId, None, None, None, None)
entrys.append(entry)
cu = current_user.identity
return render_template(
'entrys.html',
current_date=datetime.datetime.now(),
current_user=cu,
userId = userId,
entrys= entrys)
@app.route('/api/v1/submitted_entry/<projectId>', methods=['POST', 'GET'])
@login_required
def submitted_entry(projectId):
if request.method == 'GET':
return redirect(url_for('landing_page'))
userId = current_user.identity
evaluation_criteria_output = request.form.get("evaluation_criteria_output")
vendor_output = request.form.get("vendor_output")
weights = request.form.get("weights")
cbname = request.form
evaluation_criteria_input = ""
first = True
for key in cbname:
if key != 'userId' and key != 'submit' and key != 'evaluation_criteria_output' and key != 'weights':
if first:
first = False
else:
evaluation_criteria_input += ","
evaluation_criteria_input += key
print ("entry: " + str(projectId) + ", " + userId + ", " + str(evaluation_criteria_output) + ", " + str(weights))
ent = utils.update_entry(projectId, userId, evaluation_criteria_input,evaluation_criteria_output, vendor_output, weights)
user = utils.get_user_from_db(userId)
if user.type == 'Admin':
return redirect(url_for('show_entrys_given_project', projectId=projectId))
elif user.type == 'User':
utils.send_entry_completion(projectId, userId)
return redirect(url_for('show_entrys_given_user', userId=userId))
else:
return "Invalid URL", 404
@app.route('/api/v1/show_users')
@login_required
def show_users():
users = utils.get_users_from_db(None)
cu = current_user.identity
return render_template(
'users.html',
current_user=cu,
users= users)
@app.route('/api/v1/submitted_admin_user', methods=['POST'])
def submitted_admin_user():
username = request.form.get('username')
user = utils.get_user_from_db(username)
if user:
return render_template(
"entry_error.html",
h1Message="User ID Error, Go back and re-enter",
title="User Add Error",
message=username + " already is an existing user, Please go back and use another identity and submit")
else:
email = request.form.get('email')
password = request.form.get('password')
user = utils.update_user(username, email, "Admin", password, None)
try:
utils.send_message(user, ADMIN_WELCOME_TITLE, ADMIN_WELCOME_MESSAGE.format(username=username))
except RuntimeError as e:
print e
return render_template('root.html')
@app.route('/api/v1/show_user/<projectId>/<identity>')
@login_required
def show_user(projectId, identity):
user = utils.get_user_from_db(identity)
projects = utils.get_projects_from_db(None)
if user is not None and identity != CREATE_MODE :
# edit current/existing user
cu = current_user.identity
return render_template(
'user.html',
current_user = cu,
projects=projects,
user=user)
else:
# edit/create user for a projectId
if projectId and projectId != CREATE_MODE:
if projectId == "None":
projectId = None
project = utils.get_project_from_db(projectId)
cu = current_user.identity
return render_template(
'user.html',
current_user=cu,
defaultPassword=project.defaultPassword,
projectId=projectId)
else:
# create new user
cu = current_user.identity
return render_template(
'user.html',
current_user=cu,
projects=projects)
@app.route('/api/v1/set_user/<userId>', methods=['PATCH'])
@login_required
def set_user(userId):
user = utils.get_user_from_db(userId)
email = request.form.get('email')
password = request.form.get('password')
if user:
isChanged = False
if user.email != email:
user.email = email
isChanged = True
if user.password != password:
user.password = password
isChanged = True
if isChanged == True:
user.put()
@app.route('/api/v1/submitted_user', methods=['POST', 'GET'])
@login_required
def submitted_user():
if request.method == 'GET':
return redirect(url_for('landing_page'))
userId = request.form.get('identity')
if userId == "superuser":
return render_template(
"entry_error.html",
h1Message="User ID Error, Go back and re-enter",
title="User Add Error",
message=userId + " is a system user, Please go back and use another identity and submit")
email = request.form.get('email')
type = request.form.get('type')
password = request.form.get('password')
projectIds = request.form.getlist('projectIds[]')
projectId = request.form.get("projectId")
newProject = False
if projectId and projectId not in projectIds:
projectIds.append(projectId)
newProject = True
print "user: " + str(userId) + ", " + str(email) + ", " + str(type) + ", " + str(password) + ", " + str(projectIds)
user = utils.update_user(userId, email, type, password, projectIds)
if newProject:
try:
utils.send_message(user, USER_WELCOME_TITLE.format(projectId=projectId),
USER_WELCOME_MESSAGE.format(userId=user.identity, projectId=projectId))
except RuntimeError:
pass
if current_user.type == "Superuser":
return redirect(url_for('show_users'))
else:
return redirect(url_for('show_project', projectId=projectId))
@app.route('/api/v1/show_vendors')
@login_required
def show_vendors():
vendors = utils.get_vendors_from_db(None)
cu = current_user.identity
return render_template(
'vendors.html',
current_user=cu,
vendors= vendors)
@app.route('/api/v1/show_vendor/<projectId>/<identity>')
@login_required
def show_vendor(projectId, identity):
vendor = utils.get_vendor_from_db(identity)
projects = utils.get_projects_from_db(None)
if vendor is not None and identity != CREATE_MODE :
# edit current/existing vendor
cu = current_user.identity
return render_template(
'vendor.html',
current_user = cu,
projects=projects,
vendor=vendor)
else:
# edit/create vendor for a projectId
if projectId and projectId != CREATE_MODE:
if projectId == "None":
projectId = None
project = utils.get_project_from_db(projectId)
cu = current_user.identity
return render_template(
'vendor.html',
current_user=cu,
defaultPassword=project.defaultPassword,
projectId=projectId)
else:
# create new vendor
cu = current_user.identity
return render_template(
'vendor.html',
current_user=cu,
projects=projects)
@app.route('/api/v1/submitted_vendor', methods=['POST', 'GET'])
@login_required
def submitted_vendor():
if request.method == 'GET':
return redirect(url_for('landing_page'))
vendorId = request.form.get('identity')
email = request.form.get('email')
projectIds = request.form.getlist('projectIds[]')
projectId = request.form.get("projectId")
if projectId:
projectIds.append(projectId)
print "vendor: " + str(vendorId) + ", " + str(email) + ", " + ", " + str(projectIds)
vendor = utils.update_vendor(vendorId, email, projectIds)
return show_vendors()
@app.route('/api/v1/send_email', methods=['POST'])
def send_email():
content = request.form.get('content')
tolist = request.form.getlist('tolist[]')
title = PROJECT_REMINDER_TITLE
utils.send_reminders(tolist, title, content)
return "OK", 200
@app.route('/api/v1/manage', methods=['GET', 'POST'])
def manage():
utils.run_manage()
return "OK", 200
@app.route('/api/v1/update_token', methods=['POST'])
def update_token():
if current_user and current_user.is_anonymous == False:
username = current_user.identity
token = request.form.get('token')
utils.update_token(username, token)
return "OK", 200
@app.route('/api/v1/delete_project/<projectId>', methods=['DELETE'])
@login_required
def delete_project(projectId):
utils.delete_project_from_db(projectId)
return "OK", 200
@app.route('/api/v1/delete_user/<userId>', methods=['DELETE'])
@login_required
def delete_user(userId):
utils.delete_user_from_db(userId)
return "OK", 200
@app.route('/api/v1/delete_users', methods=['DELETE'])
@login_required
def delete_users():
utils.delete_users_from_db()
return "OK", 200
@app.route('/api/v1/delete_vendor/<vendorId>', methods=['DELETE'])
@login_required
def delete_vendor(vendorId):
utils.delete_vendor_from_db(vendorId)
return "OK", 200
@app.route('/api/v1/delete_vendors', methods=['DELETE'])
@login_required
def delete_vendors():
utils.delete_vendors_from_db()
return | |
from __future__ import print_function
import collections
import csv
import logging
import pickle
import re
from cStringIO import StringIO
import swalign
from transgene.common import read_fasta, trans, BEDPE, translate
def get_transcriptome_data(infile, drop_transcript_version=False):
"""
Parses Gencode transcript FASTA file and returns CDS sequences keyed by the transcript ID
:param file infile: Gencode FASTA file object
:param bool drop_transcript_version: Drop the version part of transcript ids.
:return: {transcript ID: transcripts} and {gene ID: transcript IDs}
:rtype: tuple
"""
regex = r"(?P<transcript_id>ENST[0-9A-Z]+.\d+)\|(?P<gene_id>ENSG[0-9A-Z]+.\d+)" \
r".*CDS:(?P<start>\d+)-(?P<stop>\d+)"
gene_transcripts = collections.defaultdict(list)
transcript_cds = {}
for header, comment, seq in read_fasta(infile, 'ACGT'):
match = re.search(regex, header)
if match:
# Remove the version number on gene ID
gene_id, version = match.group("gene_id").split('.')
transcript_id = match.group("transcript_id")
if drop_transcript_version:
transcript_id = transcript_id.split('.')[0]
# GTF is one-based. Make interval [zero, one-based)
start = int(match.group("start")) - 1
stop = int(match.group("stop"))
cds = seq[start: stop]
gene_transcripts[gene_id].append(transcript_id)
# Save transcript to gene ID mapping
gene_transcripts[transcript_id] = match.group("gene_id")
transcript_cds[transcript_id] = cds
return transcript_cds, gene_transcripts
def rna_gene_in_bedpe(record):
"""
Determine if one of the two candidates in a BEDPE line is an rna gene.
:param BEDPE record: A BEDPE line from the input file
:returns: True if one of the candidates is an RNA gene and False if not
:rtype: bool
"""
# We will accept fusions that have an RP11- (lncRNA) 3' partner since they can still be
# translated. This is a heuristic.
return 'RP11-' in record.hugo1
def readthrough_in_bedpe(record, annotation, rt_threshold):
"""
Determine if the two genes in the record are within `rt_threshold` bp of each other on the same
chromosome.
:param BEDPE record: A BEDPE line from the input file
:param dict(str, GTFRecord) annotation: see `read_fusions:gene_annotations`
:param rt_threshold: The genomic distance on the same chromosome below which we will call a
candidate fusion a readthrough.
:returns: True if the pair is considered a readthrough and False if not
:rtype: bool
"""
return (record.chrom1 == record.chrom2 and
((annotation[record.hugo1].start <= annotation[record.hugo2].start <=
annotation[record.hugo1].end + rt_threshold) or
(annotation[record.hugo2].start <= annotation[record.hugo1].start <=
annotation[record.hugo2].end + rt_threshold)))
def read_fusions(fusion_file, gene_annotations, filter_mt, filter_ig, filter_rg, filter_rt,
rt_threshold, out_bedpe):
"""
Reads in gene fusion predictions in modified BEDPE format.
In addition to the basic BEDPE features, this function requires the fusion
junction sequences and HUGO names for the donor and acceptor genes.
:param file fusion_file: Fusion calls in BEDPE format
:param dict(str, GTFRecord) gene_annotations: The gene annotations from the gtf
:param bool filter_mt: Filter mitochondrial events?
:param bool filter_ig: Filter immunoglobulin pairs?
:param bool filter_rg: Filter RNA-Gene events?
:param bool filter_rt: Filter transcriptional read-throughs?
:param int rt_threshold: Distance threshold to call a readthrough
:param file out_bedpe: A file handle to an output BEDPE file
:returns: list of BEDPE namedtuples
:rtype: list
Modified BEDPE format
chrom1: Chromosome of first feature
start1: Zero-based starting position of the first feature or '.'
end1: One-based ending position of the first feature -- 5' fusion breakpoint
chrom2: Chromosome of second feature
start2: Zero-based starting position of the second feature -- 3' fusion breakpoint
end2: One-based ending position of thh second feature or '.'
name: Hyphenated Ensembl gene IDs (i.e. ENSG00000168172-ENSG00000165731)
score: Optional fusion score
strand1: Strand of first feature
strand2: Strand of second feature
junctionSeq1: Fusion junction sequence in first feature
junctionSeq2: Fusion junction sequence in second feature
hugo1: HUGO name for first feature
hugo2: HUGO name for second feature
"""
calls = []
for line in csv.reader(fusion_file, delimiter='\t'):
if line[0].startswith('#'):
print('\t'.join(line), file=out_bedpe)
continue
try:
record = BEDPE(*line)
except TypeError:
raise ValueError("ERROR: fusion file is malformed.\n{}".format(read_fusions.__doc__))
if filter_mt and ('M' in record.chrom1 or 'M' in record.chrom2):
logging.warning("Rejecting %s-%s for containing a Mitochondrial gene.", record.hugo1,
record.hugo2)
continue
elif filter_ig and record.hugo1.startswith('IG') and record.hugo2.startswith('IG'):
# This will drop some Insulin-like growth factor (IGF) proteins but they have a lot of
# homology too so its ok.
logging.warning("Rejecting %s-%s an an Immunoglobulin gene pair.", record.hugo1,
record.hugo2)
continue
elif filter_rg and rna_gene_in_bedpe(record):
logging.warning("Rejecting %s-%s for containing a 5' RNA gene.", record.hugo1,
record.hugo2)
continue
elif filter_rt and readthrough_in_bedpe(record, gene_annotations, rt_threshold):
logging.warning("Rejecting %s-%s as a potential readthrough.", record.hugo1,
record.hugo2)
continue
else:
logging.info("Accepting %s-%s for further study.", record.hugo1, record.hugo2)
print('\t'.join(line), file=out_bedpe)
calls.append(record)
if not calls:
logging.warning('Input bedpe file was empty or had no actionable mutations.')
return calls
# Namedtuple for storing alignment metrics
# Needs to be global for pickling
AlignStats = collections.namedtuple('AlignStats',
'qstart, qstop, rstart, rstop, insertions, deletions')
def align_filter(ref, query, mode, mismatches_per_kb=1):
"""
Aligns query to reference CDS sequence using the Smith-Waterman algorithm.
Returns None if the alignment is clipped at the fusion boundary.
:param str ref: In-frame reference transcript
:param str query: Query transcript
:param str mode: 'donor' or 'acceptor'
:param int mismatches_per_kb: Allowed number of mismatches per kilobase
of the alignment
:return: Alignment features
:rtype: namedtuple
"""
bound_regex_str = r'Query\s*:\s*(?P<qstart>\d*)\s*[\w-]*\s*(?P<qstop>\d*)\s*[\|\s]*\s*' \
r'Ref\s*:\s*(?P<rstart>\d*)\s*[\w-]*\s*(?P<rstop>\d*)'
bounds_regex = re.compile(bound_regex_str)
mismatch_regex = re.compile(r'Mismatches: (?P<mismatches>\d+)')
# Use default swalign parameters
match = 2
mismatch = -1
scoring = swalign.NucleotideScoringMatrix(match, mismatch)
sw = swalign.LocalAlignment(scoring)
alignment = sw.align(ref, query)
# Count the number of insertions and deletions
insertions = 0
deletions = 0
for chr, num in alignment.cigar:
if chr == 'I':
insertions += num
elif chr == 'D':
deletions += num
# Next grab the alignment statistics
sw_output = StringIO()
alignment.dump(out=sw_output)
dump = sw_output.getvalue()
sw_output.close()
num_mismatches = None
m = mismatch_regex.search(dump)
if m:
num_mismatches = int(m.group('mismatches'))
s = bounds_regex.search(dump)
if s:
qstart = int(s.group('qstart')) - 1 # Make zero-based
qstop = int(s.group('qstop'))
# Filter alignments that have more than the allowed number of mismatches per kilobase
if num_mismatches > int( mismatches_per_kb * (qstop - qstart) ):
logging.debug("Mismatch filter: %d > %d" % (num_mismatches,
int(mismatches_per_kb * (qstop - qstart))))
return
# Filter alignments that do not include the donor breakpoint
if mode == 'donor' and qstop != len(query):
logging.debug('Donor alignment does not reach fusion boundary')
return
# Filter alignments that do not include the acceptor breakpoint
elif mode == 'acceptor' and qstart != 0:
logging.debug('Acceptor alignment does not reach fusion boundary')
return
rstart = int(s.group('rstart')) - 1 # Make zero-based
rstop = int(s.group('rstart'))
return AlignStats(qstart, qstop, rstart, rstop, insertions, deletions)
def scan_frame(reference_start):
"""
Find the frame of a sequencing using the alignment starting position
:param int reference_start: Alignment start position
:return: Number of bases to slice sequence to make it in-frame
:rtype: int
"""
in_frame_adjustment = 0
while (reference_start + in_frame_adjustment) % 3 != 0:
in_frame_adjustment += 1
if in_frame_adjustment > 3:
return
return in_frame_adjustment
def get_donor_junction_exon(breakpoint, exons, cds_start, peplen):
"""
Finds the first exon before the fusion breakpoint
:param int breakpoint: Genomic position of donor breakpoint
:param list exons: Sorted list of exons for the current transcript as GTFRecord objects
:param int cds_start: The CDS start for the transcript
:param int peplen: The length of peptides that will be estimated from this IAR.
:return: In-frame donor junction sequence with (n-1)*3 + overhang nucleotides (for n-1 normal
AAs and < 3 bp that will make the first modifed AA, and a flag indicating that the
breakpoint was in an intron.
:rtype: str, bool
"""
sequence = []
intron_junction = True
if exons[0].strand == '+':
if breakpoint < cds_start:
logging.warning('Predicted breakpoint %s is in in the 5\' UTR of %s. Skipping',
breakpoint, exons[0].transcript_id)
return None, None
for i, exon in enumerate(exons):
if breakpoint < exon.start:
# The breakpoint was in the previous intron
if breakpoint <= exons[i - 1].end + 23:
logging.warning('Breakpoint within 23bp of exon end. Splicing may be affected.')
break
if cds_start > exon.end:
# This handles an exon of just 5' UTR
continue
start = max(0, cds_start - exon.start)
end = exon.end - exon.start if breakpoint > exon.end else breakpoint - exon.start
sequence.append(exon.sequence[start:end + 1])
if exon.start <= breakpoint <= exon.end:
intron_junction = False
break
else:
if breakpoint > cds_start:
logging.debug('Predicted breakpoint in the 5\' UTR. Skipping')
return None, None
for i, exon in enumerate(exons):
if breakpoint > exon.end:
# The breakpoint was in the previous intron
if breakpoint >= exons[i - 1].start - 23:
logging.warning('Breakpoint within 23bp of exon end. Splicing may be affected.')
break
if cds_start < | |
Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.635388,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 4.70218,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0444987,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.23764,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.234884,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.194386,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.313537,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.158263,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.666186,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.186311,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.60442,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0443745,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00815342,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0758398,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0602995,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.120214,
'Execution Unit/Register Files/Runtime Dynamic': 0.0684529,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.1709,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.421674,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.79933,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00174031,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00174031,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.0015517,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000620317,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000866208,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00589852,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0154036,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0579675,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 3.68723,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.197974,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.196884,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 6.0847,
'Instruction Fetch Unit/Runtime Dynamic': 0.474127,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0616772,
'L2/Runtime Dynamic': 0.0133983,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.36869,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.03434,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0689615,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0689615,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 3.69434,
'Load Store Unit/Runtime Dynamic': 1.44339,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.170047,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.340095,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0603503,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0610656,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.229259,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0330802,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.489039,
'Memory Management Unit/Runtime Dynamic': 0.0941458,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 18.5236,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.116729,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0101907,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0965361,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.