hexsha
stringlengths 40
40
| size
int64 4
996k
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
996k
| avg_line_length
float64 1.33
58.2k
| max_line_length
int64 2
323k
| alphanum_fraction
float64 0
0.97
| content_no_comment
stringlengths 0
946k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
790d63f065ecd4e3b6431e8fcb5ae11dec6027e2
| 2,530
|
py
|
Python
|
livelossplot/keras_plot.py
|
kkanska/livelossplot
|
20715b3c16656e22a1371e0b9bb45ae4a7718a71
|
[
"MIT"
] | 1
|
2019-03-07T12:49:58.000Z
|
2019-03-07T12:49:58.000Z
|
livelossplot/keras_plot.py
|
kkanska/livelossplot
|
20715b3c16656e22a1371e0b9bb45ae4a7718a71
|
[
"MIT"
] | null | null | null |
livelossplot/keras_plot.py
|
kkanska/livelossplot
|
20715b3c16656e22a1371e0b9bb45ae4a7718a71
|
[
"MIT"
] | null | null | null |
from __future__ import division
from keras.callbacks import Callback
from .generic_plot import PlotLosses
metric2printable = {
"acc": "Accuracy",
"mean_squared_error": "Mean squared error",
"mean_absolute_error": "Mean absolute error",
"mean_absolute_percentage_error": "Mean absolute percentage error",
# etc
"categorical_crossentropy": "Log-loss",
"sparse_categorical_crossentropy": "Log-loss",
"binary_crossentropy": "Log-loss",
"kullback_leibler_divergence": "Log-loss"
}
def loss2name(loss):
if hasattr(loss, '__call__'):
# if passed as a function
return loss.__name__
else:
# if passed as a string
return loss
class PlotLossesKeras(Callback):
def __init__(self, **kwargs):
super(PlotLossesKeras, self).__init__()
self.liveplot = PlotLosses(**kwargs)
def on_train_begin(self, logs={}):
self.liveplot.set_metrics([
metric for metric in self.params['metrics']
if not metric.startswith('val_')
])
# slightly convolved due to model.complie(loss=...) stuff
# vide https://github.com/keras-team/keras/blob/master/keras/engine/training.py
if isinstance(self.model.loss, list):
losses = self.model.loss
elif isinstance(self.model.loss, dict):
losses = list(self.model.loss.values())
else:
# by far the most common scenario
losses = [self.model.loss]
metric2printable_updated = metric2printable.copy()
loss_name = loss2name(losses[0])
metric2printable_updated['loss'] =\
"{} (cost function)".format(metric2printable_updated.get(loss_name, loss_name))
if len(losses) > 1:
for output_name, loss in zip(self.model.output_names, losses):
loss_name = loss2name(loss)
metric2printable_updated['{}_loss'.format(output_name)] =\
"{} ({})".format(metric2printable_updated.get(loss_name, loss_name), output_name)
else:
for output_name in self.model.output_names:
metric2printable_updated['{}_loss'.format(output_name)] =\
"{} ({})".format(metric2printable_updated.get(loss_name, loss_name), output_name)
self.liveplot.metric2title = metric2printable_updated
self.liveplot.set_max_epoch(self.params['epochs'])
def on_epoch_end(self, epoch, logs={}):
self.liveplot.update(logs.copy())
self.liveplot.draw()
| 36.142857
| 101
| 0.641107
|
from __future__ import division
from keras.callbacks import Callback
from .generic_plot import PlotLosses
metric2printable = {
"acc": "Accuracy",
"mean_squared_error": "Mean squared error",
"mean_absolute_error": "Mean absolute error",
"mean_absolute_percentage_error": "Mean absolute percentage error",
"categorical_crossentropy": "Log-loss",
"sparse_categorical_crossentropy": "Log-loss",
"binary_crossentropy": "Log-loss",
"kullback_leibler_divergence": "Log-loss"
}
def loss2name(loss):
if hasattr(loss, '__call__'):
return loss.__name__
else:
return loss
class PlotLossesKeras(Callback):
def __init__(self, **kwargs):
super(PlotLossesKeras, self).__init__()
self.liveplot = PlotLosses(**kwargs)
def on_train_begin(self, logs={}):
self.liveplot.set_metrics([
metric for metric in self.params['metrics']
if not metric.startswith('val_')
])
if isinstance(self.model.loss, list):
losses = self.model.loss
elif isinstance(self.model.loss, dict):
losses = list(self.model.loss.values())
else:
losses = [self.model.loss]
metric2printable_updated = metric2printable.copy()
loss_name = loss2name(losses[0])
metric2printable_updated['loss'] =\
"{} (cost function)".format(metric2printable_updated.get(loss_name, loss_name))
if len(losses) > 1:
for output_name, loss in zip(self.model.output_names, losses):
loss_name = loss2name(loss)
metric2printable_updated['{}_loss'.format(output_name)] =\
"{} ({})".format(metric2printable_updated.get(loss_name, loss_name), output_name)
else:
for output_name in self.model.output_names:
metric2printable_updated['{}_loss'.format(output_name)] =\
"{} ({})".format(metric2printable_updated.get(loss_name, loss_name), output_name)
self.liveplot.metric2title = metric2printable_updated
self.liveplot.set_max_epoch(self.params['epochs'])
def on_epoch_end(self, epoch, logs={}):
self.liveplot.update(logs.copy())
self.liveplot.draw()
| true
| true
|
790d64784359740d132bbba12433a5a99da3ca03
| 77,727
|
py
|
Python
|
bert4keras/models.py
|
CurisZhou/bert4keras
|
216f408b0501a1e6e6903c7a6271213d88f7725c
|
[
"Apache-2.0"
] | null | null | null |
bert4keras/models.py
|
CurisZhou/bert4keras
|
216f408b0501a1e6e6903c7a6271213d88f7725c
|
[
"Apache-2.0"
] | null | null | null |
bert4keras/models.py
|
CurisZhou/bert4keras
|
216f408b0501a1e6e6903c7a6271213d88f7725c
|
[
"Apache-2.0"
] | null | null | null |
#! -*- coding: utf-8 -*-
# 主要模型
import numpy as np
from bert4keras.layers import *
from bert4keras.snippets import insert_arguments
from bert4keras.snippets import delete_arguments
from bert4keras.snippets import is_string
from keras.models import Model
import json
class Transformer(object):
"""模型基类
"""
def __init__(
self,
vocab_size, # 词表大小
hidden_size, # 编码维度
num_hidden_layers, # Transformer总层数
num_attention_heads, # Attention的头数
intermediate_size, # FeedForward的隐层维度
hidden_act, # FeedForward隐层的激活函数
dropout_rate=None, # Dropout比例
embedding_size=None, # 是否指定embedding_size
attention_head_size=None, # Attention中V的head_size
attention_key_size=None, # Attention中Q,K的head_size
sequence_length=None, # 是否固定序列长度
keep_tokens=None, # 要保留的词ID列表
compound_tokens=None, # 扩展Embedding
residual_attention_scores=False, # Attention矩阵加残差
layers=None, # 外部传入的Keras层
prefix=None, # 层名前缀
name=None, # 模型名称
**kwargs
):
if keep_tokens is not None:
vocab_size = len(keep_tokens)
if compound_tokens is not None:
vocab_size += len(compound_tokens)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.attention_head_size = attention_head_size or hidden_size // num_attention_heads
self.attention_key_size = attention_key_size or self.attention_head_size
self.intermediate_size = intermediate_size
self.dropout_rate = dropout_rate or 0
self.hidden_act = hidden_act
self.embedding_size = embedding_size or hidden_size
self.sequence_length = sequence_length
self.keep_tokens = keep_tokens
self.compound_tokens = compound_tokens
self.attention_bias = None
self.position_bias = None
self.attention_scores = None
self.residual_attention_scores = residual_attention_scores
self.layers = {} if layers is None else layers
self.prefix = prefix or ''
self.name = name
self.built = False
def build(
self,
attention_caches=None,
layer_norm_cond=None,
layer_norm_cond_hidden_size=None,
layer_norm_cond_hidden_act=None,
additional_input_layers=None,
**kwargs
):
"""模型构建函数
attention_caches:为Attention的K,V的缓存序列字典,格式为
{Attention层名: [K缓存, V缓存]};
layer_norm_*系列参数:实现Conditional Layer Normalization时使用,
用来实现以“固定长度向量”为条件的条件Bert。
"""
if self.built:
return None
# Input
inputs = self.get_inputs()
self.set_inputs(inputs, additional_input_layers)
# Other
self.attention_caches = attention_caches or {}
self.layer_norm_conds = [
layer_norm_cond,
layer_norm_cond_hidden_size,
layer_norm_cond_hidden_act or 'linear',
]
# Call
outputs = self.call(inputs)
self.set_outputs(outputs)
# Model
self.model = Model(self.inputs, self.outputs, name=self.name)
self.built = True
def call(self, inputs):
"""定义模型的执行流程
"""
# Embedding
outputs = self.apply_embeddings(inputs)
# Main
for i in range(self.num_hidden_layers):
outputs = self.apply_main_layers(outputs, i)
# Final
outputs = self.apply_final_layers(outputs)
return outputs
def prefixed(self, name):
"""给名字加前缀
"""
if name is not None:
return self.prefix + name
def apply(self, inputs=None, layer=None, arguments=None, **kwargs):
"""通过apply调用层会自动重用同名层
inputs: 上一层的输出;
layer: 要调用的层类名;
arguments: 传递给layer.call的参数;
kwargs: 传递给层初始化的参数。
"""
if layer is Dropout and self.dropout_rate == 0:
return inputs
if layer is MultiHeadAttention and self.residual_attention_scores:
kwargs['return_attention_scores'] = True
arguments = arguments or {}
name = self.prefixed(kwargs.get('name'))
kwargs['name'] = name
if name not in self.layers:
layer = layer(**kwargs)
name = layer.name
self.layers[name] = layer
if inputs is None:
return self.layers[name]
else:
if isinstance(self.layers[name], MultiHeadAttention):
if name in self.attention_caches:
# 如果检测到Cache的传入,那么自动在Key,Value处拼接起来
k_cache, v_cache = self.attention_caches[name]
k_name, v_name = name + '-Cached-Key', name + '-Cached-Value'
k = Concatenate1D(name=k_name)([k_cache, inputs[1]])
v = Concatenate1D(name=v_name)([v_cache, inputs[2]])
inputs = inputs[:1] + [k, v] + inputs[3:]
if self.residual_attention_scores:
# 如果使用残差Attention矩阵,则给每个Attention矩阵加上前上一层的Attention
# 矩阵,这对应RealFormer设计(https://arxiv.org/abs/2012.11747)。目前
# 该实现还相对粗糙,可能欠缺通用性。
if self.attention_scores is not None:
if arguments.get('a_bias'):
a_bias = Add(name=name + '-Attention-Bias'
)([inputs[3], self.attention_scores])
else:
a_bias = self.attention_scores
inputs = inputs[:3] + [a_bias] + inputs[4:]
arguments['a_bias'] = True
o, a = self.layers[name](inputs, **arguments)
self.attention_scores = a
return o
return self.layers[name](inputs, **arguments)
def get_inputs(self):
raise NotImplementedError
def apply_embeddings(self, inputs):
raise NotImplementedError
def apply_main_layers(self, inputs, index):
raise NotImplementedError
def apply_final_layers(self, inputs):
raise NotImplementedError
def compute_attention_bias(self, inputs=None):
"""定义每一层的Attention Bias
"""
return self.attention_bias
def compute_position_bias(self, inputs=None):
"""定义每一层的Position Bias(一般相对位置编码用)
"""
return self.position_bias
def set_inputs(self, inputs, additional_input_layers=None):
"""设置input和inputs属性
"""
if inputs is None:
inputs = []
elif not isinstance(inputs, list):
inputs = [inputs]
inputs = inputs[:]
if additional_input_layers is not None:
if not isinstance(additional_input_layers, list):
additional_input_layers = [additional_input_layers]
inputs.extend(additional_input_layers)
self.inputs = inputs
if len(inputs) > 1:
self.input = inputs
else:
self.input = inputs[0]
def set_outputs(self, outputs):
"""设置output和oututs属性
"""
if not isinstance(outputs, list):
outputs = [outputs]
outputs = outputs[:]
self.outputs = outputs
if len(outputs) > 1:
self.output = outputs
else:
self.output = outputs[0]
@property
def initializer(self):
"""默认使用截断正态分布初始化
"""
return keras.initializers.TruncatedNormal(stddev=0.02)
def simplify(self, inputs):
"""将list中的None过滤掉
"""
inputs = [i for i in inputs if i is not None]
if len(inputs) == 1:
inputs = inputs[0]
return inputs
def load_embeddings(self, embeddings):
"""处理Embedding层权重
"""
if self.keep_tokens is not None:
embeddings = embeddings[self.keep_tokens]
if self.compound_tokens is not None:
ext_embeddings = []
for item in self.compound_tokens:
if isinstance(item, list):
item = (item, [1] * len(item))
ext_embeddings.append(
np.average(embeddings[item[0]], 0, item[1])
)
embeddings = np.concatenate([embeddings, ext_embeddings], 0)
return embeddings
def load_variable(self, checkpoint, name):
"""加载单个变量的函数
"""
if isinstance(checkpoint, dict):
return checkpoint[name]
else:
return tf.train.load_variable(checkpoint, name)
def create_variable(self, name, value, dtype=None):
"""创建一个变量
"""
dtype = dtype or K.floatx()
return K.variable(
self.initializer(value.shape, dtype), dtype, name=name
), value
def variable_mapping(self):
"""构建keras层与checkpoint的变量名之间的映射表
"""
return {}
def load_weights_from_checkpoint(self, checkpoint, mapping=None):
"""根据mapping从checkpoint加载权重
"""
mapping = mapping or self.variable_mapping()
mapping = {self.prefixed(k): v for k, v in mapping.items()}
mapping = {k: v for k, v in mapping.items() if k in self.layers}
weight_value_pairs = []
for layer, variables in mapping.items():
layer = self.layers[layer]
weights = layer.trainable_weights
values = [self.load_variable(checkpoint, v) for v in variables]
if isinstance(layer, MultiHeadAttention):
"""如果key_size不等于head_size,则可以通过
正交矩阵将相应的权重投影到合适的shape。
"""
count = 2
if layer.use_bias:
count += 2
heads = self.num_attention_heads
head_size = self.attention_head_size
key_size = self.attention_key_size
W = np.linalg.qr(np.random.randn(key_size, head_size))[0].T
if layer.attention_scale:
W = W * key_size**0.25 / head_size**0.25
for i in range(count):
w, v = weights[i], values[i]
w_shape, v_shape = K.int_shape(w), v.shape
if w_shape[-1] != v_shape[-1]:
pre_shape = w_shape[:-1]
v = v.reshape(pre_shape + (heads, head_size))
v = np.dot(v, W)
v = v.reshape(pre_shape + (heads * key_size,))
values[i] = v
weight_value_pairs.extend(zip(weights, values))
K.batch_set_value(weight_value_pairs)
def save_weights_as_checkpoint(self, filename, mapping=None, dtype=None):
"""根据mapping将权重保存为checkpoint格式
"""
mapping = mapping or self.variable_mapping()
mapping = {self.prefixed(k): v for k, v in mapping.items()}
mapping = {k: v for k, v in mapping.items() if k in self.layers}
with tf.Graph().as_default():
all_variables, all_values = [], []
for layer, variables in mapping.items():
layer = self.layers[layer]
values = K.batch_get_value(layer.trainable_weights)
for name, value in zip(variables, values):
variable, value = self.create_variable(name, value, dtype)
all_variables.append(variable)
all_values.append(value)
with tf.Session() as sess:
K.batch_set_value(zip(all_variables, all_values))
saver = tf.train.Saver()
saver.save(sess, filename)
class LM_Mask(object):
"""定义下三角Attention Mask(语言模型用)
"""
def compute_attention_bias(self, inputs=None):
"""通过idxs序列的比较来得到对应的mask
"""
if self.attention_bias is None:
def lm_mask(s):
seq_len = K.shape(s)[1]
idxs = K.arange(0, seq_len)
mask = idxs[None, :] <= idxs[:, None]
mask = K.cast(mask, K.floatx())
return -(1 - mask[None, None]) * 1e12
self.attention_bias = self.apply(
inputs=self.inputs[0],
layer=Lambda,
function=lm_mask,
name='Attention-LM-Mask'
)
return self.attention_bias
class UniLM_Mask(object):
"""定义UniLM的Attention Mask(Seq2Seq模型用)
其中source和target的分区,由segment_ids来表示。
UniLM: https://arxiv.org/abs/1905.03197
"""
def compute_attention_bias(self, inputs=None):
"""通过idxs序列的比较来得到对应的mask
"""
if self.attention_bias is None:
def unilm_mask(s):
idxs = K.cumsum(s, axis=1)
mask = idxs[:, None, :] <= idxs[:, :, None]
mask = K.cast(mask, K.floatx())
return -(1 - mask[:, None]) * 1e12
self.attention_bias = self.apply(
inputs=self.inputs[1],
layer=Lambda,
function=unilm_mask,
name='Attention-UniLM-Mask'
)
return self.attention_bias
class BERT(Transformer):
"""构建BERT模型
"""
def __init__(
self,
max_position, # 序列最大长度
segment_vocab_size=2, # segment总数目
with_pool=False, # 是否包含Pool部分
with_nsp=False, # 是否包含NSP部分
with_mlm=False, # 是否包含MLM部分
hierarchical_position=None, # 是否层次分解位置编码
custom_position_ids=False, # 是否自行传入位置id
shared_segment_embeddings=False, # 若True,则segment跟token共用embedding
**kwargs # 其余参数
):
super(BERT, self).__init__(**kwargs)
self.max_position = max_position
self.segment_vocab_size = segment_vocab_size
self.with_pool = with_pool
self.with_nsp = with_nsp
self.with_mlm = with_mlm
self.hierarchical_position = hierarchical_position
self.custom_position_ids = custom_position_ids
self.shared_segment_embeddings = shared_segment_embeddings
if self.with_nsp and not self.with_pool:
self.with_pool = True
def get_inputs(self):
"""BERT的输入是token_ids和segment_ids
(但允许自行传入位置id,以实现一些特殊需求)
"""
x_in = self.apply(
layer=Input, shape=(self.sequence_length,), name='Input-Token'
)
inputs = [x_in]
if self.segment_vocab_size > 0:
s_in = self.apply(
layer=Input,
shape=(self.sequence_length,),
name='Input-Segment'
)
inputs.append(s_in)
if self.custom_position_ids:
p_in = self.apply(
layer=Input,
shape=(self.sequence_length,),
name='Input-Position'
)
inputs.append(p_in)
return inputs
def apply_embeddings(self, inputs):
"""BERT的embedding是token、position、segment三者embedding之和
"""
inputs = inputs[:]
x = inputs.pop(0)
if self.segment_vocab_size > 0:
s = inputs.pop(0)
if self.custom_position_ids:
p = inputs.pop(0)
else:
p = None
z = self.layer_norm_conds[0]
x = self.apply(
inputs=x,
layer=Embedding,
input_dim=self.vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
mask_zero=True,
name='Embedding-Token'
)
if self.segment_vocab_size > 0:
if self.shared_segment_embeddings:
name = 'Embedding-Token'
else:
name = 'Embedding-Segment'
s = self.apply(
inputs=s,
layer=Embedding,
input_dim=self.segment_vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
name=name
)
x = self.apply(
inputs=[x, s], layer=Add, name='Embedding-Token-Segment'
)
x = self.apply(
inputs=self.simplify([x, p]),
layer=PositionEmbedding,
input_dim=self.max_position,
output_dim=self.embedding_size,
merge_mode='add',
hierarchical=self.hierarchical_position,
embeddings_initializer=self.initializer,
custom_position_ids=self.custom_position_ids,
name='Embedding-Position'
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='Embedding-Norm'
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='Embedding-Dropout'
)
if self.embedding_size != self.hidden_size:
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
kernel_initializer=self.initializer,
name='Embedding-Mapping'
)
return x
def apply_main_layers(self, inputs, index):
"""BERT的主体是基于Self-Attention的模块
顺序:Att --> Add --> LN --> FFN --> Add --> LN
"""
x = inputs
z = self.layer_norm_conds[0]
attention_name = 'Transformer-%d-MultiHeadSelfAttention' % index
feed_forward_name = 'Transformer-%d-FeedForward' % index
attention_mask = self.compute_attention_bias(index)
# Self Attention
xi, x, arguments = x, [x, x, x], {'a_bias': None}
if attention_mask is not None:
arguments['a_bias'] = True
x.append(attention_mask)
x = self.apply(
inputs=x,
layer=MultiHeadAttention,
arguments=arguments,
heads=self.num_attention_heads,
head_size=self.attention_head_size,
out_dim=self.hidden_size,
key_size=self.attention_key_size,
kernel_initializer=self.initializer,
name=attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % attention_name
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % attention_name
)
# Feed Forward
xi = x
x = self.apply(
inputs=x,
layer=FeedForward,
units=self.intermediate_size,
activation=self.hidden_act,
kernel_initializer=self.initializer,
name=feed_forward_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % feed_forward_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % feed_forward_name
)
return x
def apply_final_layers(self, inputs):
"""根据剩余参数决定输出
"""
x = inputs
z = self.layer_norm_conds[0]
outputs = [x]
if self.with_pool:
# Pooler部分(提取CLS向量)
x = outputs[0]
x = self.apply(
inputs=x,
layer=Lambda,
function=lambda x: x[:, 0],
name='Pooler'
)
pool_activation = 'tanh' if self.with_pool is True else self.with_pool
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
activation=pool_activation,
kernel_initializer=self.initializer,
name='Pooler-Dense'
)
if self.with_nsp:
# Next Sentence Prediction部分
x = self.apply(
inputs=x,
layer=Dense,
units=2,
activation='softmax',
kernel_initializer=self.initializer,
name='NSP-Proba'
)
outputs.append(x)
if self.with_mlm:
# Masked Language Model部分
x = outputs[0]
x = self.apply(
inputs=x,
layer=Dense,
units=self.embedding_size,
activation=self.hidden_act,
kernel_initializer=self.initializer,
name='MLM-Dense'
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='MLM-Norm'
)
x = self.apply(
inputs=x,
layer=Embedding,
arguments={'mode': 'dense'},
name='Embedding-Token'
)
x = self.apply(inputs=x, layer=BiasAdd, name='MLM-Bias')
mlm_activation = 'softmax' if self.with_mlm is True else self.with_mlm
x = self.apply(
inputs=x,
layer=Activation,
activation=mlm_activation,
name='MLM-Activation'
)
outputs.append(x)
if len(outputs) == 1:
outputs = outputs[0]
elif len(outputs) == 2:
outputs = outputs[1]
else:
outputs = outputs[1:]
return outputs
def load_variable(self, checkpoint, name):
"""加载单个变量的函数
"""
variable = super(BERT, self).load_variable(checkpoint, name)
if name in [
'bert/embeddings/word_embeddings',
'cls/predictions/output_bias',
]:
return self.load_embeddings(variable)
elif name == 'cls/seq_relationship/output_weights':
return variable.T
else:
return variable
def create_variable(self, name, value, dtype=None):
"""在tensorflow中创建一个变量
"""
if name == 'cls/seq_relationship/output_weights':
value = value.T
return super(BERT, self).create_variable(name, value, dtype)
def variable_mapping(self):
"""映射到官方BERT权重格式
"""
mapping = {
'Embedding-Token': ['bert/embeddings/word_embeddings'],
'Embedding-Segment': ['bert/embeddings/token_type_embeddings'],
'Embedding-Position': ['bert/embeddings/position_embeddings'],
'Embedding-Norm': [
'bert/embeddings/LayerNorm/beta',
'bert/embeddings/LayerNorm/gamma',
],
'Embedding-Mapping': [
'bert/encoder/embedding_hidden_mapping_in/kernel',
'bert/encoder/embedding_hidden_mapping_in/bias',
],
'Pooler-Dense': [
'bert/pooler/dense/kernel',
'bert/pooler/dense/bias',
],
'NSP-Proba': [
'cls/seq_relationship/output_weights',
'cls/seq_relationship/output_bias',
],
'MLM-Dense': [
'cls/predictions/transform/dense/kernel',
'cls/predictions/transform/dense/bias',
],
'MLM-Norm': [
'cls/predictions/transform/LayerNorm/beta',
'cls/predictions/transform/LayerNorm/gamma',
],
'MLM-Bias': ['cls/predictions/output_bias'],
}
for i in range(self.num_hidden_layers):
prefix = 'bert/encoder/layer_%d/' % i
mapping.update({
'Transformer-%d-MultiHeadSelfAttention' % i: [
prefix + 'attention/self/query/kernel',
prefix + 'attention/self/query/bias',
prefix + 'attention/self/key/kernel',
prefix + 'attention/self/key/bias',
prefix + 'attention/self/value/kernel',
prefix + 'attention/self/value/bias',
prefix + 'attention/output/dense/kernel',
prefix + 'attention/output/dense/bias',
],
'Transformer-%d-MultiHeadSelfAttention-Norm' % i: [
prefix + 'attention/output/LayerNorm/beta',
prefix + 'attention/output/LayerNorm/gamma',
],
'Transformer-%d-FeedForward' % i: [
prefix + 'intermediate/dense/kernel',
prefix + 'intermediate/dense/bias',
prefix + 'output/dense/kernel',
prefix + 'output/dense/bias',
],
'Transformer-%d-FeedForward-Norm' % i: [
prefix + 'output/LayerNorm/beta',
prefix + 'output/LayerNorm/gamma',
],
})
return mapping
class ALBERT(BERT):
"""构建ALBERT模型
"""
def apply_main_layers(self, inputs, index):
"""ALBERT的主体是基于Self-Attention的模块
顺序:Att --> Add --> LN --> FFN --> Add --> LN
"""
x = inputs
z = self.layer_norm_conds[0]
attention_name = 'Transformer-MultiHeadSelfAttention'
feed_forward_name = 'Transformer-FeedForward'
attention_mask = self.compute_attention_bias(index)
# Self Attention
xi, x, arguments = x, [x, x, x], {'a_bias': None}
if attention_mask is not None:
arguments['a_bias'] = True
x.append(attention_mask)
x = self.apply(
inputs=x,
layer=MultiHeadAttention,
arguments=arguments,
heads=self.num_attention_heads,
head_size=self.attention_head_size,
out_dim=self.hidden_size,
key_size=self.attention_key_size,
kernel_initializer=self.initializer,
name=attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % attention_name
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % attention_name
)
# Feed Forward
xi = x
x = self.apply(
inputs=x,
layer=FeedForward,
units=self.intermediate_size,
activation=self.hidden_act,
kernel_initializer=self.initializer,
name=feed_forward_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % feed_forward_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % feed_forward_name
)
return x
def variable_mapping(self):
"""映射到官方ALBERT权重格式
"""
mapping = super(ALBERT, self).variable_mapping()
prefix = 'bert/encoder/transformer/group_0/inner_group_0/'
mapping.update({
'Transformer-MultiHeadSelfAttention': [
prefix + 'attention_1/self/query/kernel',
prefix + 'attention_1/self/query/bias',
prefix + 'attention_1/self/key/kernel',
prefix + 'attention_1/self/key/bias',
prefix + 'attention_1/self/value/kernel',
prefix + 'attention_1/self/value/bias',
prefix + 'attention_1/output/dense/kernel',
prefix + 'attention_1/output/dense/bias',
],
'Transformer-MultiHeadSelfAttention-Norm': [
prefix + 'LayerNorm/beta',
prefix + 'LayerNorm/gamma',
],
'Transformer-FeedForward': [
prefix + 'ffn_1/intermediate/dense/kernel',
prefix + 'ffn_1/intermediate/dense/bias',
prefix + 'ffn_1/intermediate/output/dense/kernel',
prefix + 'ffn_1/intermediate/output/dense/bias',
],
'Transformer-FeedForward-Norm': [
prefix + 'LayerNorm_1/beta',
prefix + 'LayerNorm_1/gamma',
],
})
return mapping
class ALBERT_Unshared(BERT):
"""解开ALBERT共享约束,当成BERT用
"""
def variable_mapping(self):
"""映射到官方ALBERT权重格式
"""
mapping = super(ALBERT_Unshared, self).variable_mapping()
prefix = 'bert/encoder/transformer/group_0/inner_group_0/'
for i in range(self.num_hidden_layers):
mapping.update({
'Transformer-%d-MultiHeadSelfAttention' % i: [
prefix + 'attention_1/self/query/kernel',
prefix + 'attention_1/self/query/bias',
prefix + 'attention_1/self/key/kernel',
prefix + 'attention_1/self/key/bias',
prefix + 'attention_1/self/value/kernel',
prefix + 'attention_1/self/value/bias',
prefix + 'attention_1/output/dense/kernel',
prefix + 'attention_1/output/dense/bias',
],
'Transformer-%d-MultiHeadSelfAttention-Norm' % i: [
prefix + 'LayerNorm/beta',
prefix + 'LayerNorm/gamma',
],
'Transformer-%d-FeedForward' % i: [
prefix + 'ffn_1/intermediate/dense/kernel',
prefix + 'ffn_1/intermediate/dense/bias',
prefix + 'ffn_1/intermediate/output/dense/kernel',
prefix + 'ffn_1/intermediate/output/dense/bias',
],
'Transformer-%d-FeedForward-Norm' % i: [
prefix + 'LayerNorm_1/beta',
prefix + 'LayerNorm_1/gamma',
],
})
return mapping
class NEZHA(BERT):
"""华为推出的NAZHA模型
链接:https://arxiv.org/abs/1909.00204
"""
def apply_embeddings(self, inputs):
"""NEZHA的embedding是token、segment两者embedding之和
"""
inputs = inputs[:]
x = inputs.pop(0)
if self.segment_vocab_size > 0:
s = inputs.pop(0)
z = self.layer_norm_conds[0]
x = self.apply(
inputs=x,
layer=Embedding,
input_dim=self.vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
mask_zero=True,
name='Embedding-Token'
)
if self.segment_vocab_size > 0:
if self.shared_segment_embeddings:
name = 'Embedding-Token'
else:
name = 'Embedding-Segment'
s = self.apply(
inputs=s,
layer=Embedding,
input_dim=2,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
name=name
)
x = self.apply(
inputs=[x, s], layer=Add, name='Embedding-Token-Segment'
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='Embedding-Norm'
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='Embedding-Dropout'
)
if self.embedding_size != self.hidden_size:
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
kernel_initializer=self.initializer,
name='Embedding-Mapping'
)
return x
def apply_main_layers(self, inputs, index):
"""NEZHA的主体是基于Self-Attention的模块
顺序:Att --> Add --> LN --> FFN --> Add --> LN
"""
x = inputs
z = self.layer_norm_conds[0]
attention_name = 'Transformer-%d-MultiHeadSelfAttention' % index
feed_forward_name = 'Transformer-%d-FeedForward' % index
attention_mask = self.compute_attention_bias(index)
position_bias = self.compute_position_bias(x)
# Self Attention
xi, x = x, [x, x, x, position_bias]
arguments = {'a_bias': None, 'p_bias': 'typical_relative'}
if attention_mask is not None:
arguments['a_bias'] = True
x.insert(3, attention_mask)
x = self.apply(
inputs=x,
layer=MultiHeadAttention,
arguments=arguments,
heads=self.num_attention_heads,
head_size=self.attention_head_size,
out_dim=self.hidden_size,
key_size=self.attention_key_size,
kernel_initializer=self.initializer,
name=attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % attention_name
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % attention_name
)
# Feed Forward
xi = x
x = self.apply(
inputs=x,
layer=FeedForward,
units=self.intermediate_size,
activation=self.hidden_act,
kernel_initializer=self.initializer,
name=feed_forward_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % feed_forward_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % feed_forward_name
)
return x
def compute_position_bias(self, inputs=None):
"""经典相对位置编码
"""
if self.position_bias is None:
x = inputs
self.position_bias = self.apply(
inputs=[x, x],
layer=RelativePositionEmbedding,
input_dim=2 * 64 + 1,
output_dim=self.attention_head_size,
embeddings_initializer='Sinusoidal',
name='Embedding-Relative-Position',
trainable=False
)
return self.position_bias
class ELECTRA(BERT):
"""Google推出的ELECTRA模型
链接:https://arxiv.org/abs/2003.10555
"""
@insert_arguments(with_discriminator=False)
@delete_arguments('with_pool', 'with_mlm')
def __init__(
self,
max_position, # 序列最大长度
**kwargs # 其余参数
):
super(ELECTRA, self).__init__(max_position, **kwargs)
def apply_final_layers(self, inputs):
x = inputs
if self.with_discriminator:
if self.with_discriminator is True:
final_activation = 'sigmoid'
else:
final_activation = self.with_discriminator
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
activation=self.hidden_act,
kernel_initializer=self.initializer,
name='Discriminator-Dense'
)
x = self.apply(
inputs=x,
layer=Dense,
units=1,
activation=final_activation,
kernel_initializer=self.initializer,
name='Discriminator-Prediction'
)
return x
def load_variable(self, checkpoint, name):
"""加载单个变量的函数
"""
variable = super(ELECTRA, self).load_variable(checkpoint, name)
if name == 'electra/embeddings/word_embeddings':
return self.load_embeddings(variable)
else:
return variable
def variable_mapping(self):
mapping = super(ELECTRA, self).variable_mapping()
mapping['Embedding-Mapping'] = [
'electra/embeddings_project/kernel',
'electra/embeddings_project/bias',
]
mapping = {
k: [i.replace('bert/', 'electra/') for i in v]
for k, v in mapping.items()
}
mapping['Discriminator-Dense'] = [
'discriminator_predictions/dense/kernel',
'discriminator_predictions/dense/bias',
]
mapping['Discriminator-Prediction'] = [
'discriminator_predictions/dense_1/kernel',
'discriminator_predictions/dense_1/bias',
]
return mapping
class GPT(LM_Mask, BERT):
"""构建GPT模型
链接:https://github.com/openai/finetune-transformer-lm
"""
@insert_arguments(final_activation='softmax')
@delete_arguments('with_pool', 'with_mlm')
def __init__(self, **kwargs):
super(GPT, self).__init__(**kwargs)
def apply_embeddings(self, inputs):
"""GPT的embedding是token、position、segment三者embedding之和
跟BERT的主要区别是三者相加之后没有加LayerNormalization层。
"""
inputs = inputs[:]
x = inputs.pop(0)
if self.segment_vocab_size > 0:
s = inputs.pop(0)
if self.custom_position_ids:
p = inputs.pop(0)
else:
p = None
x = self.apply(
inputs=x,
layer=Embedding,
input_dim=self.vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
mask_zero=True,
name='Embedding-Token'
)
if self.segment_vocab_size > 0:
if self.shared_segment_embeddings:
name = 'Embedding-Token'
else:
name = 'Embedding-Segment'
s = self.apply(
inputs=s,
layer=Embedding,
input_dim=self.segment_vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
name=name
)
x = self.apply(
inputs=[x, s], layer=Add, name='Embedding-Token-Segment'
)
x = self.apply(
inputs=self.simplify([x, p]),
layer=PositionEmbedding,
input_dim=self.max_position,
output_dim=self.embedding_size,
merge_mode='add',
hierarchical=self.hierarchical_position,
embeddings_initializer=self.initializer,
custom_position_ids=self.custom_position_ids,
name='Embedding-Position'
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='Embedding-Dropout'
)
if self.embedding_size != self.hidden_size:
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
kernel_initializer=self.initializer,
name='Embedding-Mapping'
)
return x
def apply_final_layers(self, inputs):
"""剩余部分
"""
x = inputs
# Language Model部分
x = self.apply(
inputs=x,
layer=Embedding,
arguments={'mode': 'dense'},
name='Embedding-Token'
)
x = self.apply(
inputs=x,
layer=Activation,
activation=self.final_activation,
name='LM-Activation'
)
return x
def load_variable(self, checkpoint, name):
"""加载单个变量的函数
"""
variable = super(GPT, self).load_variable(checkpoint, name)
if name == 'gpt/embeddings/word_embeddings':
return self.load_embeddings(variable)
else:
return variable
def variable_mapping(self):
"""映射到TF版GPT权重格式
"""
mapping = super(GPT, self).variable_mapping()
mapping = {
k: [
i.replace('bert/', 'gpt/').replace('encoder', 'transformer')
for i in v
]
for k, v in mapping.items()
}
return mapping
class GPT2(GPT):
"""构建GPT2模型
链接: https://github.com/openai/gpt-2
"""
def get_inputs(self):
"""GPT2的输入是token_ids
"""
x_in = self.apply(
layer=Input, shape=(self.sequence_length,), name='Input-Token'
)
return x_in
def apply_embeddings(self, inputs):
"""GPT2的embedding是token、position两者embedding之和
"""
x = inputs
x = self.apply(
inputs=x,
layer=Embedding,
input_dim=self.vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
mask_zero=True,
name='Embedding-Token'
)
x = self.apply(
inputs=x,
layer=PositionEmbedding,
input_dim=self.max_position,
output_dim=self.embedding_size,
merge_mode='add',
hierarchical=self.hierarchical_position,
embeddings_initializer=self.initializer,
name='Embedding-Position'
)
if self.embedding_size != self.hidden_size:
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
kernel_initializer=self.initializer,
name='Embedding-Mapping'
)
return x
def apply_main_layers(self, inputs, index):
"""GPT2的主体是基于Self-Attention的模块
顺序:LN --> Att --> Add --> LN --> FFN --> Add
"""
x = inputs
z = self.layer_norm_conds[0]
attention_name = 'Transformer-%d-MultiHeadSelfAttention' % index
feed_forward_name = 'Transformer-%d-FeedForward' % index
attention_mask = self.compute_attention_bias(index)
# Self Attention
xi = x
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
epsilon=1e-5,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % attention_name
)
x = self.apply(
inputs=[x, x, x, attention_mask],
layer=MultiHeadAttention,
arguments={'a_bias': True},
heads=self.num_attention_heads,
head_size=self.attention_head_size,
out_dim=self.hidden_size,
key_size=self.attention_key_size,
kernel_initializer=self.initializer,
name=attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % attention_name
)
# Feed Forward
xi = x
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
epsilon=1e-5,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % feed_forward_name
)
x = self.apply(
inputs=x,
layer=FeedForward,
units=self.intermediate_size,
activation=self.hidden_act,
kernel_initializer=self.initializer,
name=feed_forward_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % feed_forward_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name
)
return x
def apply_final_layers(self, inputs):
"""剩余部分
"""
x = inputs
z = self.layer_norm_conds[0]
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
epsilon=1e-5,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='Output-Norm'
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='Output-Dropout'
)
x = super(GPT2, self).apply_final_layers(x)
return x
def variable_mapping(self):
"""映射到TF版GPT2权重格式
"""
mapping = super(GPT2, self).variable_mapping()
mapping = {
k: [i.replace('output/LayerNorm', 'input/LayerNorm') for i in v]
for k, v in mapping.items()
}
mapping['Output-Norm'] = [
'gpt/output/LayerNorm/beta',
'gpt/output/LayerNorm/gamma',
]
return mapping
class GPT2_ML(GPT):
"""构建GPT2_ML模型
链接: https://github.com/imcaspar/gpt2-ml
注意:GPT2_ML虽然号称GPT2,但是它的结构其实更接近GPT,它自称GPT2的
原因大概是因为它开源的版本参数量达到了GPT2的15亿参数。
"""
def get_inputs(self):
"""GPT2_ML的输入是token_ids
"""
x_in = self.apply(
layer=Input, shape=(self.sequence_length,), name='Input-Token'
)
return x_in
def apply_embeddings(self, inputs):
"""GPT2_ML的embedding是token、position两者embedding之和
"""
x = inputs
z = self.layer_norm_conds[0]
x = self.apply(
inputs=x,
layer=Embedding,
input_dim=self.vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
mask_zero=True,
name='Embedding-Token'
)
x = self.apply(
inputs=x,
layer=PositionEmbedding,
input_dim=self.max_position,
output_dim=self.embedding_size,
merge_mode='add',
hierarchical=self.hierarchical_position,
embeddings_initializer=self.initializer,
name='Embedding-Position'
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
epsilon=1e-5,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='Embedding-Norm'
)
if self.embedding_size != self.hidden_size:
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
kernel_initializer=self.initializer,
name='Embedding-Mapping'
)
return x
def apply_main_layers(self, inputs, index):
"""GPT2_ML的主体是基于Self-Attention的模块
顺序:Att --> LN --> FFN --> Add --> LN
"""
x = inputs
z = self.layer_norm_conds[0]
attention_name = 'Transformer-%d-MultiHeadSelfAttention' % index
feed_forward_name = 'Transformer-%d-FeedForward' % index
attention_mask = self.compute_attention_bias(index)
# Self Attention
xi, x, arguments = x, [x, x, x, attention_mask], {'a_bias': True}
x = self.apply(
inputs=x,
layer=MultiHeadAttention,
arguments=arguments,
heads=self.num_attention_heads,
head_size=self.attention_head_size,
out_dim=self.hidden_size,
key_size=self.attention_key_size,
kernel_initializer=self.initializer,
name=attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % attention_name
)
# Feed Forward
xi = x
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
epsilon=1e-5,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm-0' % feed_forward_name
)
x = self.apply(
inputs=x,
layer=FeedForward,
units=self.intermediate_size,
activation=self.hidden_act,
kernel_initializer=self.initializer,
name=feed_forward_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % feed_forward_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
epsilon=1e-5,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm-1' % feed_forward_name
)
return x
def load_variable(self, checkpoint, name):
"""加载单个变量的函数
"""
variable = super(GPT2_ML, self).load_variable(checkpoint, name)
if name == 'newslm/embeddings/word_embed':
return self.load_embeddings(variable)
else:
return variable
def variable_mapping(self):
"""映射到官方GPT2_ML权重格式
"""
mapping = {
'Embedding-Token': ['newslm/embeddings/word_embed'],
'Embedding-Position': ['newslm/embeddings/pos_embed'],
'Embedding-Norm': [
'newslm/embeddings/LayerNorm_embed_norm/beta',
'newslm/embeddings/LayerNorm_embed_norm/gamma',
],
}
for i in range(self.num_hidden_layers):
prefix = 'newslm/layer%02d/' % i
mapping.update({
'Transformer-%d-MultiHeadSelfAttention' % i: [
prefix + 'query_layer/kernel',
prefix + 'query_layer/bias',
prefix + 'key_layer/kernel',
prefix + 'key_layer/bias',
prefix + 'value_layer/kernel',
prefix + 'value_layer/bias',
prefix + 'context_projection_layer/kernel',
prefix + 'context_projection_layer/bias',
],
'Transformer-%d-FeedForward-Norm-0' % i: [
prefix + 'LayerNorm_mlp_ln0/beta',
prefix + 'LayerNorm_mlp_ln0/gamma',
],
'Transformer-%d-FeedForward' % i: [
prefix + 'intermediate/kernel',
prefix + 'intermediate/bias',
prefix + 'output/kernel',
prefix + 'output/bias',
],
'Transformer-%d-FeedForward-Norm-1' % i: [
prefix + 'LayerNorm_mlp_ln1/beta',
prefix + 'LayerNorm_mlp_ln1/gamma',
],
})
return mapping
class T5_Base(Transformer):
"""Google的T5模型(基类)
注意T5有两个版本,一开始放出来的版本称为t5.1.0,而后来放出了一个升级
版本称为t5.1.1,两者结构略有不同,包括后来放出来的多国语言版T5也采用
了t5.1.1的结构。
t5.1.0: https://github.com/google-research/text-to-text-transfer-transformer
t5.1.1: https://github.com/google-research/text-to-text-transfer-transformer/blob/master/released_checkpoints.md#t511
multilingual-t5: https://github.com/google-research/multilingual-t5
"""
@insert_arguments(version='t5.1.0')
def __init__(self, **kwargs):
super(T5_Base, self).__init__(**kwargs)
def load_variable(self, checkpoint, name):
"""加载单个变量的函数
"""
variable = super(T5_Base, self).load_variable(checkpoint, name)
if name == 'shared/embedding':
return self.load_embeddings(variable)
elif name == 'decoder/logits/kernel':
return self.load_embeddings(variable.T).T
elif 'relative_attention_bias' in name:
return variable.T
else:
return variable
def create_variable(self, name, value, dtype=None):
"""在tensorflow中创建一个变量
"""
if 'relative_attention_bias' in name:
value = value.T
return super(T5_Base, self).create_variable(name, value, dtype)
def variable_mapping(self):
"""映射到官方T5权重格式
"""
mapping = {
'Embedding-Token': ['shared/embedding'],
'Encoder-Embedding-Relative-Position': [
'encoder/block_000/layer_000/SelfAttention/relative_attention_bias'
],
'Encoder-Output-Norm': ['encoder/final_layer_norm/scale'],
'Decoder-Embedding-Relative-Position': [
'decoder/block_000/layer_000/SelfAttention/relative_attention_bias',
],
'Decoder-Output-Norm': ['decoder/final_layer_norm/scale'],
}
for i in range(self.num_hidden_layers):
# Encoder主体
prefix = 'encoder/block_%03d/' % i
mapping.update({
'Encoder-Transformer-%d-MultiHeadSelfAttention' % i: [
prefix + 'layer_000/SelfAttention/q',
prefix + 'layer_000/SelfAttention/k',
prefix + 'layer_000/SelfAttention/v',
prefix + 'layer_000/SelfAttention/o',
],
'Encoder-Transformer-%d-MultiHeadSelfAttention-Norm' % i: [
prefix + 'layer_000/layer_norm/scale',
],
'Encoder-Transformer-%d-FeedForward' % i: [
prefix + 'layer_001/DenseReluDense/wi/kernel',
prefix + 'layer_001/DenseReluDense/wo/kernel',
],
'Encoder-Transformer-%d-FeedForward-Norm' % i: [
prefix + 'layer_001/layer_norm/scale',
],
})
# Decoder主体
prefix = 'decoder/block_%03d/' % i
mapping.update({
'Decoder-Transformer-%d-MultiHeadSelfAttention' % i: [
prefix + 'layer_000/SelfAttention/q',
prefix + 'layer_000/SelfAttention/k',
prefix + 'layer_000/SelfAttention/v',
prefix + 'layer_000/SelfAttention/o',
],
'Decoder-Transformer-%d-MultiHeadSelfAttention-Norm' % i: [
prefix + 'layer_000/layer_norm/scale',
],
'Decoder-Transformer-%d-MultiHeadCrossAttention' % i: [
prefix + 'layer_001/EncDecAttention/q',
prefix + 'layer_001/EncDecAttention/k',
prefix + 'layer_001/EncDecAttention/v',
prefix + 'layer_001/EncDecAttention/o',
],
'Decoder-Transformer-%d-MultiHeadCrossAttention-Norm' % i: [
prefix + 'layer_001/layer_norm/scale',
],
'Decoder-Transformer-%d-FeedForward' % i: [
prefix + 'layer_002/DenseReluDense/wi/kernel',
prefix + 'layer_002/DenseReluDense/wo/kernel',
],
'Decoder-Transformer-%d-FeedForward-Norm' % i: [
prefix + 'layer_002/layer_norm/scale',
],
})
if self.version == 't5.1.1':
mapping['Encoder-Output-Norm'] = ['encoder/rms_norm/scale']
mapping['Decoder-Output-Norm'] = ['decoder/rms_norm/scale']
mapping['Decoder-Output-LM'] = ['decoder/logits/kernel']
mapping = {
k: [i.replace('layer_norm', 'rms_norm') for i in v]
for k, v in mapping.items()
}
for i in range(self.num_hidden_layers):
for layer in [
'Encoder-Transformer-%d-FeedForward' % i,
'Decoder-Transformer-%d-FeedForward' % i
]:
mapping[layer] = [
mapping[layer][0][:-7] + '_0' + mapping[layer][0][-7:],
mapping[layer][0][:-7] + '_1' + mapping[layer][0][-7:],
mapping[layer][1]
]
return mapping
class T5_Encoder(T5_Base):
"""Google的T5模型(Encoder)
"""
def get_inputs(self):
"""T5的Encoder的输入只有token_ids
"""
x_in = self.apply(
layer=Input,
shape=(self.sequence_length,),
name='Encoder-Input-Token'
)
return x_in
def apply_embeddings(self, inputs):
"""T5的embedding只有token embedding,
并把relative position embedding准备好,待attention使用。
"""
x = inputs
x = self.apply(
inputs=x,
layer=Embedding,
input_dim=self.vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
mask_zero=True,
name='Embedding-Token'
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='Encoder-Embedding-Dropout'
)
if self.embedding_size != self.hidden_size:
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
kernel_initializer=self.initializer,
name='Encoder-Embedding-Mapping'
)
return x
def apply_main_layers(self, inputs, index):
"""T5的Encoder的主体是基于Self-Attention的模块
顺序:LN --> Att --> Add --> LN --> FFN --> Add
"""
x = inputs
z = self.layer_norm_conds[0]
attention_name = 'Encoder-Transformer-%d-MultiHeadSelfAttention' % index
feed_forward_name = 'Encoder-Transformer-%d-FeedForward' % index
attention_mask = self.compute_attention_bias(index)
position_bias = self.compute_position_bias(x)
# Self Attention
xi = x
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
center=False,
epsilon=1e-6,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % attention_name
)
x = self.apply(
inputs=[x, x, x, position_bias],
layer=MultiHeadAttention,
arguments={'p_bias': 't5_relative'},
heads=self.num_attention_heads,
head_size=self.attention_head_size,
out_dim=self.hidden_size,
key_size=self.attention_key_size,
use_bias=False,
attention_scale=False,
kernel_initializer=self.initializer,
name=attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % attention_name
)
# Feed Forward
xi = x
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
center=False,
epsilon=1e-6,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % feed_forward_name
)
x = self.apply(
inputs=x,
layer=FeedForward,
units=self.intermediate_size,
activation=self.hidden_act,
use_bias=False,
kernel_initializer=self.initializer,
name=feed_forward_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % feed_forward_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name
)
return x
def apply_final_layers(self, inputs):
"""剩余部分
"""
x = inputs
z = self.layer_norm_conds[0]
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
center=False,
epsilon=1e-6,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='Encoder-Output-Norm'
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='Encoder-Output-Dropout'
)
return x
def compute_position_bias(self, inputs=None):
"""T5相对位置编码
"""
if self.position_bias is None:
x = inputs
p = self.apply(
inputs=[x, x],
layer=RelativePositionEmbeddingT5,
input_dim=32,
output_dim=self.num_attention_heads,
bidirectional=True,
embeddings_initializer=self.initializer,
name='Encoder-Embedding-Relative-Position'
)
self.position_bias = p
return self.position_bias
class T5_Decoder(LM_Mask, T5_Base):
"""Google的T5模型(Decoder)
"""
def __init__(self, with_lm=True, **kwargs):
super(T5_Decoder, self).__init__(**kwargs)
self.with_lm = with_lm
def get_inputs(self):
"""T5的Decoder的输入为context序列和token_ids
"""
c_in = self.apply(
layer=Input,
shape=(self.sequence_length, self.hidden_size),
name='Input-Context'
)
x_in = self.apply(
layer=Input,
shape=(self.sequence_length,),
name='Decoder-Input-Token'
)
return [c_in, x_in]
def apply_embeddings(self, inputs):
"""T5的embedding只有token embedding,
并把relative position embedding准备好,待attention使用。
"""
c, x = inputs
x = self.apply(
inputs=x,
layer=Embedding,
input_dim=self.vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
mask_zero=True,
name='Embedding-Token'
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='Decoder-Embedding-Dropout'
)
if self.embedding_size != self.hidden_size:
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
kernel_initializer=self.initializer,
name='Decoder-Embedding-Mapping'
)
return [c, x]
def apply_main_layers(self, inputs, index):
"""T5的Dencoder主体是基于Self-Attention、Cross-Attention的模块
顺序:LN --> Att1 --> Add --> LN --> Att2 --> Add --> LN --> FFN --> Add
"""
c, x = inputs
z = self.layer_norm_conds[0]
self_attention_name = 'Decoder-Transformer-%d-MultiHeadSelfAttention' % index
cross_attention_name = 'Decoder-Transformer-%d-MultiHeadCrossAttention' % index
feed_forward_name = 'Decoder-Transformer-%d-FeedForward' % index
attention_mask = self.compute_attention_bias(index)
position_bias = self.compute_position_bias([x, c])
# Self Attention
xi = x
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
center=False,
epsilon=1e-6,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % self_attention_name
)
x = self.apply(
inputs=[x, x, x, attention_mask, position_bias[0]],
layer=MultiHeadAttention,
arguments={
'a_bias': True,
'p_bias': 't5_relative'
},
heads=self.num_attention_heads,
head_size=self.attention_head_size,
out_dim=self.hidden_size,
key_size=self.attention_key_size,
use_bias=False,
attention_scale=False,
kernel_initializer=self.initializer,
name=self_attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % self_attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % self_attention_name
)
# Cross Attention
xi = x
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
center=False,
epsilon=1e-6,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % cross_attention_name
)
x = self.apply(
inputs=[x, c, c, position_bias[1]],
layer=MultiHeadAttention,
arguments={
'a_bias': None,
'p_bias': 't5_relative'
},
heads=self.num_attention_heads,
head_size=self.attention_head_size,
out_dim=self.hidden_size,
key_size=self.attention_key_size,
use_bias=False,
attention_scale=False,
kernel_initializer=self.initializer,
name=cross_attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % cross_attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % cross_attention_name
)
# Feed Forward
xi = x
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
center=False,
epsilon=1e-6,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % feed_forward_name
)
x = self.apply(
inputs=x,
layer=FeedForward,
units=self.intermediate_size,
activation=self.hidden_act,
use_bias=False,
kernel_initializer=self.initializer,
name=feed_forward_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % feed_forward_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name
)
return [c, x]
def apply_final_layers(self, inputs):
"""剩余部分
"""
c, x = inputs
z = self.layer_norm_conds[0]
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
center=False,
epsilon=1e-6,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='Decoder-Output-Norm'
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='Decoder-Output-Dropout'
)
x = self.apply(
inputs=x,
layer=Lambda,
function=lambda x: x / np.sqrt(self.hidden_size),
mask=lambda i, m: m,
name='Decoder-Output-Scale'
)
if self.with_lm:
# 预测token概率部分
if self.embedding_size != self.hidden_size:
x = self.apply(
inputs=x,
layer=Dense,
units=self.embedding_size,
kernel_initializer=self.initializer,
name='Decoder-Output-Mapping'
)
lm_activation = 'softmax' if self.with_lm is True else self.with_lm
if self.version == 't5.1.0':
x = self.apply(
inputs=x,
layer=Embedding,
arguments={'mode': 'dense'},
name='Embedding-Token'
)
x = self.apply(
inputs=x,
layer=Activation,
activation=lm_activation,
name='Dencoder-Output-LM-Activation'
)
else:
x = self.apply(
inputs=x,
layer=Dense,
units=self.vocab_size,
activation=lm_activation,
use_bias=False,
kernel_initializer=self.initializer,
name='Decoder-Output-LM'
)
return x
def compute_attention_bias(self, inputs=None):
"""修改LM Mask的序列长度(从 self.inputs[0] 改为 self.inputs[1] )
"""
old_inputs = self.inputs[:]
self.inputs = [old_inputs[1]]
mask = super(T5_Decoder, self).compute_attention_bias(inputs)
self.inputs = old_inputs
return mask
def compute_position_bias(self, inputs=None):
"""T5相对位置编码
"""
if self.position_bias is None:
x, c = inputs
p1 = self.apply(
inputs=[x, x],
layer=RelativePositionEmbeddingT5,
input_dim=32,
output_dim=self.num_attention_heads,
bidirectional=False,
embeddings_initializer=self.initializer,
name='Decoder-Embedding-Relative-Position'
)
p2 = self.apply(
inputs=[x, c],
layer=RelativePositionEmbeddingT5,
input_dim=32,
output_dim=self.num_attention_heads,
bidirectional=False,
embeddings_initializer=self.initializer,
name='Decoder-Embedding-Relative-Position'
)
self.position_bias = (p1, p2)
return self.position_bias
class T5(T5_Base):
"""Google的T5模型(Encoder-Decoder)
"""
def __init__(self, **kwargs):
super(T5, self).__init__(**kwargs)
kwargs['layers'] = self.layers
e_name, d_name = 'Encoder', 'Decoder'
if 'name' in kwargs:
e_name = '%s_%s' % (kwargs['name'], e_name)
d_name = '%s_%s' % (kwargs['name'], d_name)
del kwargs['name'] # 防止重复传参
self._encoder = T5_Encoder(name=e_name, **kwargs)
self._decoder = T5_Decoder(name=d_name, **kwargs)
def build(self, **kwargs):
"""同时构建Encoder和Decoder
"""
self._encoder.build(**kwargs)
self._decoder.build(**kwargs)
self.encoder = self._encoder.model
self.decoder = self._decoder.model
self.inputs = self.encoder.inputs + self.decoder.inputs[1:]
self.outputs = self.decoder(
self.encoder.outputs + self.decoder.inputs[1:]
)
self.model = Model(self.inputs, self.outputs)
def extend_with_language_model(BaseModel):
"""添加下三角的Attention Mask(语言模型用)
"""
class LanguageModel(LM_Mask, BaseModel):
"""带下三角Attention Mask的派生模型
"""
def __init__(self, *args, **kwargs):
super(LanguageModel, self).__init__(*args, **kwargs)
self.with_mlm = self.with_mlm or True
return LanguageModel
def extend_with_unified_language_model(BaseModel):
"""添加UniLM的Attention Mask(Seq2Seq模型用)
"""
class UnifiedLanguageModel(UniLM_Mask, BaseModel):
"""带UniLM的Attention Mask的派生模型
UniLM: https://arxiv.org/abs/1905.03197
"""
def __init__(self, *args, **kwargs):
super(UnifiedLanguageModel, self).__init__(*args, **kwargs)
self.with_mlm = self.with_mlm or True
return UnifiedLanguageModel
def build_transformer_model(
config_path=None,
checkpoint_path=None,
model='bert',
application='encoder',
return_keras_model=True,
**kwargs
):
"""根据配置文件构建模型,可选加载checkpoint权重
"""
configs = {}
if config_path is not None:
configs.update(json.load(open(config_path)))
configs.update(kwargs)
if 'max_position' not in configs:
configs['max_position'] = configs.get('max_position_embeddings', 512)
if 'dropout_rate' not in configs:
configs['dropout_rate'] = configs.get('hidden_dropout_prob')
if 'segment_vocab_size' not in configs:
configs['segment_vocab_size'] = configs.get('type_vocab_size', 2)
models = {
'bert': BERT,
'albert': ALBERT,
'albert_unshared': ALBERT_Unshared,
'roberta': BERT,
'nezha': NEZHA,
'electra': ELECTRA,
'gpt': GPT,
'gpt2': GPT2,
'gpt2_ml': GPT2_ML,
't5': T5,
't5_encoder': T5_Encoder,
't5_decoder': T5_Decoder,
't5.1.0': T5,
't5.1.0_encoder': T5_Encoder,
't5.1.0_decoder': T5_Decoder,
't5.1.1': T5,
't5.1.1_encoder': T5_Encoder,
't5.1.1_decoder': T5_Decoder,
}
if is_string(model):
model = model.lower()
MODEL = models[model]
else:
MODEL = model
application = application.lower()
if application in ['lm', 'unilm'] and model in ['electra', 't5']:
raise ValueError(
'"%s" model can not be used as "%s" application.\n' %
(model, application)
)
if application == 'lm':
MODEL = extend_with_language_model(MODEL)
elif application == 'unilm':
MODEL = extend_with_unified_language_model(MODEL)
if model.startswith('t5.1.1'):
configs['version'] = 't5.1.1'
transformer = MODEL(**configs)
# 此处以Transformer类中的build()函数创建模型.
transformer.build(**configs)
if checkpoint_path is not None:
transformer.load_weights_from_checkpoint(checkpoint_path)
if return_keras_model:
return transformer.model
else:
return transformer
| 33.316331
| 121
| 0.536853
|
import numpy as np
from bert4keras.layers import *
from bert4keras.snippets import insert_arguments
from bert4keras.snippets import delete_arguments
from bert4keras.snippets import is_string
from keras.models import Model
import json
class Transformer(object):
def __init__(
self,
vocab_size,
hidden_size,
num_hidden_layers,
num_attention_heads,
intermediate_size,
hidden_act,
dropout_rate=None,
embedding_size=None,
attention_head_size=None,
attention_key_size=None,
sequence_length=None,
keep_tokens=None,
compound_tokens=None,
residual_attention_scores=False,
layers=None,
prefix=None,
name=None,
**kwargs
):
if keep_tokens is not None:
vocab_size = len(keep_tokens)
if compound_tokens is not None:
vocab_size += len(compound_tokens)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.attention_head_size = attention_head_size or hidden_size // num_attention_heads
self.attention_key_size = attention_key_size or self.attention_head_size
self.intermediate_size = intermediate_size
self.dropout_rate = dropout_rate or 0
self.hidden_act = hidden_act
self.embedding_size = embedding_size or hidden_size
self.sequence_length = sequence_length
self.keep_tokens = keep_tokens
self.compound_tokens = compound_tokens
self.attention_bias = None
self.position_bias = None
self.attention_scores = None
self.residual_attention_scores = residual_attention_scores
self.layers = {} if layers is None else layers
self.prefix = prefix or ''
self.name = name
self.built = False
def build(
self,
attention_caches=None,
layer_norm_cond=None,
layer_norm_cond_hidden_size=None,
layer_norm_cond_hidden_act=None,
additional_input_layers=None,
**kwargs
):
if self.built:
return None
inputs = self.get_inputs()
self.set_inputs(inputs, additional_input_layers)
self.attention_caches = attention_caches or {}
self.layer_norm_conds = [
layer_norm_cond,
layer_norm_cond_hidden_size,
layer_norm_cond_hidden_act or 'linear',
]
outputs = self.call(inputs)
self.set_outputs(outputs)
self.model = Model(self.inputs, self.outputs, name=self.name)
self.built = True
def call(self, inputs):
outputs = self.apply_embeddings(inputs)
for i in range(self.num_hidden_layers):
outputs = self.apply_main_layers(outputs, i)
outputs = self.apply_final_layers(outputs)
return outputs
def prefixed(self, name):
if name is not None:
return self.prefix + name
def apply(self, inputs=None, layer=None, arguments=None, **kwargs):
if layer is Dropout and self.dropout_rate == 0:
return inputs
if layer is MultiHeadAttention and self.residual_attention_scores:
kwargs['return_attention_scores'] = True
arguments = arguments or {}
name = self.prefixed(kwargs.get('name'))
kwargs['name'] = name
if name not in self.layers:
layer = layer(**kwargs)
name = layer.name
self.layers[name] = layer
if inputs is None:
return self.layers[name]
else:
if isinstance(self.layers[name], MultiHeadAttention):
if name in self.attention_caches:
k_cache, v_cache = self.attention_caches[name]
k_name, v_name = name + '-Cached-Key', name + '-Cached-Value'
k = Concatenate1D(name=k_name)([k_cache, inputs[1]])
v = Concatenate1D(name=v_name)([v_cache, inputs[2]])
inputs = inputs[:1] + [k, v] + inputs[3:]
if self.residual_attention_scores:
if self.attention_scores is not None:
if arguments.get('a_bias'):
a_bias = Add(name=name + '-Attention-Bias'
)([inputs[3], self.attention_scores])
else:
a_bias = self.attention_scores
inputs = inputs[:3] + [a_bias] + inputs[4:]
arguments['a_bias'] = True
o, a = self.layers[name](inputs, **arguments)
self.attention_scores = a
return o
return self.layers[name](inputs, **arguments)
def get_inputs(self):
raise NotImplementedError
def apply_embeddings(self, inputs):
raise NotImplementedError
def apply_main_layers(self, inputs, index):
raise NotImplementedError
def apply_final_layers(self, inputs):
raise NotImplementedError
def compute_attention_bias(self, inputs=None):
return self.attention_bias
def compute_position_bias(self, inputs=None):
return self.position_bias
def set_inputs(self, inputs, additional_input_layers=None):
if inputs is None:
inputs = []
elif not isinstance(inputs, list):
inputs = [inputs]
inputs = inputs[:]
if additional_input_layers is not None:
if not isinstance(additional_input_layers, list):
additional_input_layers = [additional_input_layers]
inputs.extend(additional_input_layers)
self.inputs = inputs
if len(inputs) > 1:
self.input = inputs
else:
self.input = inputs[0]
def set_outputs(self, outputs):
if not isinstance(outputs, list):
outputs = [outputs]
outputs = outputs[:]
self.outputs = outputs
if len(outputs) > 1:
self.output = outputs
else:
self.output = outputs[0]
@property
def initializer(self):
return keras.initializers.TruncatedNormal(stddev=0.02)
def simplify(self, inputs):
inputs = [i for i in inputs if i is not None]
if len(inputs) == 1:
inputs = inputs[0]
return inputs
def load_embeddings(self, embeddings):
if self.keep_tokens is not None:
embeddings = embeddings[self.keep_tokens]
if self.compound_tokens is not None:
ext_embeddings = []
for item in self.compound_tokens:
if isinstance(item, list):
item = (item, [1] * len(item))
ext_embeddings.append(
np.average(embeddings[item[0]], 0, item[1])
)
embeddings = np.concatenate([embeddings, ext_embeddings], 0)
return embeddings
def load_variable(self, checkpoint, name):
if isinstance(checkpoint, dict):
return checkpoint[name]
else:
return tf.train.load_variable(checkpoint, name)
def create_variable(self, name, value, dtype=None):
dtype = dtype or K.floatx()
return K.variable(
self.initializer(value.shape, dtype), dtype, name=name
), value
def variable_mapping(self):
return {}
def load_weights_from_checkpoint(self, checkpoint, mapping=None):
mapping = mapping or self.variable_mapping()
mapping = {self.prefixed(k): v for k, v in mapping.items()}
mapping = {k: v for k, v in mapping.items() if k in self.layers}
weight_value_pairs = []
for layer, variables in mapping.items():
layer = self.layers[layer]
weights = layer.trainable_weights
values = [self.load_variable(checkpoint, v) for v in variables]
if isinstance(layer, MultiHeadAttention):
count = 2
if layer.use_bias:
count += 2
heads = self.num_attention_heads
head_size = self.attention_head_size
key_size = self.attention_key_size
W = np.linalg.qr(np.random.randn(key_size, head_size))[0].T
if layer.attention_scale:
W = W * key_size**0.25 / head_size**0.25
for i in range(count):
w, v = weights[i], values[i]
w_shape, v_shape = K.int_shape(w), v.shape
if w_shape[-1] != v_shape[-1]:
pre_shape = w_shape[:-1]
v = v.reshape(pre_shape + (heads, head_size))
v = np.dot(v, W)
v = v.reshape(pre_shape + (heads * key_size,))
values[i] = v
weight_value_pairs.extend(zip(weights, values))
K.batch_set_value(weight_value_pairs)
def save_weights_as_checkpoint(self, filename, mapping=None, dtype=None):
mapping = mapping or self.variable_mapping()
mapping = {self.prefixed(k): v for k, v in mapping.items()}
mapping = {k: v for k, v in mapping.items() if k in self.layers}
with tf.Graph().as_default():
all_variables, all_values = [], []
for layer, variables in mapping.items():
layer = self.layers[layer]
values = K.batch_get_value(layer.trainable_weights)
for name, value in zip(variables, values):
variable, value = self.create_variable(name, value, dtype)
all_variables.append(variable)
all_values.append(value)
with tf.Session() as sess:
K.batch_set_value(zip(all_variables, all_values))
saver = tf.train.Saver()
saver.save(sess, filename)
class LM_Mask(object):
def compute_attention_bias(self, inputs=None):
if self.attention_bias is None:
def lm_mask(s):
seq_len = K.shape(s)[1]
idxs = K.arange(0, seq_len)
mask = idxs[None, :] <= idxs[:, None]
mask = K.cast(mask, K.floatx())
return -(1 - mask[None, None]) * 1e12
self.attention_bias = self.apply(
inputs=self.inputs[0],
layer=Lambda,
function=lm_mask,
name='Attention-LM-Mask'
)
return self.attention_bias
class UniLM_Mask(object):
def compute_attention_bias(self, inputs=None):
if self.attention_bias is None:
def unilm_mask(s):
idxs = K.cumsum(s, axis=1)
mask = idxs[:, None, :] <= idxs[:, :, None]
mask = K.cast(mask, K.floatx())
return -(1 - mask[:, None]) * 1e12
self.attention_bias = self.apply(
inputs=self.inputs[1],
layer=Lambda,
function=unilm_mask,
name='Attention-UniLM-Mask'
)
return self.attention_bias
class BERT(Transformer):
def __init__(
self,
max_position,
segment_vocab_size=2,
with_pool=False,
with_nsp=False,
with_mlm=False,
hierarchical_position=None,
custom_position_ids=False,
shared_segment_embeddings=False,
**kwargs
):
super(BERT, self).__init__(**kwargs)
self.max_position = max_position
self.segment_vocab_size = segment_vocab_size
self.with_pool = with_pool
self.with_nsp = with_nsp
self.with_mlm = with_mlm
self.hierarchical_position = hierarchical_position
self.custom_position_ids = custom_position_ids
self.shared_segment_embeddings = shared_segment_embeddings
if self.with_nsp and not self.with_pool:
self.with_pool = True
def get_inputs(self):
x_in = self.apply(
layer=Input, shape=(self.sequence_length,), name='Input-Token'
)
inputs = [x_in]
if self.segment_vocab_size > 0:
s_in = self.apply(
layer=Input,
shape=(self.sequence_length,),
name='Input-Segment'
)
inputs.append(s_in)
if self.custom_position_ids:
p_in = self.apply(
layer=Input,
shape=(self.sequence_length,),
name='Input-Position'
)
inputs.append(p_in)
return inputs
def apply_embeddings(self, inputs):
inputs = inputs[:]
x = inputs.pop(0)
if self.segment_vocab_size > 0:
s = inputs.pop(0)
if self.custom_position_ids:
p = inputs.pop(0)
else:
p = None
z = self.layer_norm_conds[0]
x = self.apply(
inputs=x,
layer=Embedding,
input_dim=self.vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
mask_zero=True,
name='Embedding-Token'
)
if self.segment_vocab_size > 0:
if self.shared_segment_embeddings:
name = 'Embedding-Token'
else:
name = 'Embedding-Segment'
s = self.apply(
inputs=s,
layer=Embedding,
input_dim=self.segment_vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
name=name
)
x = self.apply(
inputs=[x, s], layer=Add, name='Embedding-Token-Segment'
)
x = self.apply(
inputs=self.simplify([x, p]),
layer=PositionEmbedding,
input_dim=self.max_position,
output_dim=self.embedding_size,
merge_mode='add',
hierarchical=self.hierarchical_position,
embeddings_initializer=self.initializer,
custom_position_ids=self.custom_position_ids,
name='Embedding-Position'
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='Embedding-Norm'
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='Embedding-Dropout'
)
if self.embedding_size != self.hidden_size:
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
kernel_initializer=self.initializer,
name='Embedding-Mapping'
)
return x
def apply_main_layers(self, inputs, index):
x = inputs
z = self.layer_norm_conds[0]
attention_name = 'Transformer-%d-MultiHeadSelfAttention' % index
feed_forward_name = 'Transformer-%d-FeedForward' % index
attention_mask = self.compute_attention_bias(index)
xi, x, arguments = x, [x, x, x], {'a_bias': None}
if attention_mask is not None:
arguments['a_bias'] = True
x.append(attention_mask)
x = self.apply(
inputs=x,
layer=MultiHeadAttention,
arguments=arguments,
heads=self.num_attention_heads,
head_size=self.attention_head_size,
out_dim=self.hidden_size,
key_size=self.attention_key_size,
kernel_initializer=self.initializer,
name=attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % attention_name
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % attention_name
)
xi = x
x = self.apply(
inputs=x,
layer=FeedForward,
units=self.intermediate_size,
activation=self.hidden_act,
kernel_initializer=self.initializer,
name=feed_forward_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % feed_forward_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % feed_forward_name
)
return x
def apply_final_layers(self, inputs):
x = inputs
z = self.layer_norm_conds[0]
outputs = [x]
if self.with_pool:
x = outputs[0]
x = self.apply(
inputs=x,
layer=Lambda,
function=lambda x: x[:, 0],
name='Pooler'
)
pool_activation = 'tanh' if self.with_pool is True else self.with_pool
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
activation=pool_activation,
kernel_initializer=self.initializer,
name='Pooler-Dense'
)
if self.with_nsp:
x = self.apply(
inputs=x,
layer=Dense,
units=2,
activation='softmax',
kernel_initializer=self.initializer,
name='NSP-Proba'
)
outputs.append(x)
if self.with_mlm:
x = outputs[0]
x = self.apply(
inputs=x,
layer=Dense,
units=self.embedding_size,
activation=self.hidden_act,
kernel_initializer=self.initializer,
name='MLM-Dense'
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='MLM-Norm'
)
x = self.apply(
inputs=x,
layer=Embedding,
arguments={'mode': 'dense'},
name='Embedding-Token'
)
x = self.apply(inputs=x, layer=BiasAdd, name='MLM-Bias')
mlm_activation = 'softmax' if self.with_mlm is True else self.with_mlm
x = self.apply(
inputs=x,
layer=Activation,
activation=mlm_activation,
name='MLM-Activation'
)
outputs.append(x)
if len(outputs) == 1:
outputs = outputs[0]
elif len(outputs) == 2:
outputs = outputs[1]
else:
outputs = outputs[1:]
return outputs
def load_variable(self, checkpoint, name):
variable = super(BERT, self).load_variable(checkpoint, name)
if name in [
'bert/embeddings/word_embeddings',
'cls/predictions/output_bias',
]:
return self.load_embeddings(variable)
elif name == 'cls/seq_relationship/output_weights':
return variable.T
else:
return variable
def create_variable(self, name, value, dtype=None):
if name == 'cls/seq_relationship/output_weights':
value = value.T
return super(BERT, self).create_variable(name, value, dtype)
def variable_mapping(self):
mapping = {
'Embedding-Token': ['bert/embeddings/word_embeddings'],
'Embedding-Segment': ['bert/embeddings/token_type_embeddings'],
'Embedding-Position': ['bert/embeddings/position_embeddings'],
'Embedding-Norm': [
'bert/embeddings/LayerNorm/beta',
'bert/embeddings/LayerNorm/gamma',
],
'Embedding-Mapping': [
'bert/encoder/embedding_hidden_mapping_in/kernel',
'bert/encoder/embedding_hidden_mapping_in/bias',
],
'Pooler-Dense': [
'bert/pooler/dense/kernel',
'bert/pooler/dense/bias',
],
'NSP-Proba': [
'cls/seq_relationship/output_weights',
'cls/seq_relationship/output_bias',
],
'MLM-Dense': [
'cls/predictions/transform/dense/kernel',
'cls/predictions/transform/dense/bias',
],
'MLM-Norm': [
'cls/predictions/transform/LayerNorm/beta',
'cls/predictions/transform/LayerNorm/gamma',
],
'MLM-Bias': ['cls/predictions/output_bias'],
}
for i in range(self.num_hidden_layers):
prefix = 'bert/encoder/layer_%d/' % i
mapping.update({
'Transformer-%d-MultiHeadSelfAttention' % i: [
prefix + 'attention/self/query/kernel',
prefix + 'attention/self/query/bias',
prefix + 'attention/self/key/kernel',
prefix + 'attention/self/key/bias',
prefix + 'attention/self/value/kernel',
prefix + 'attention/self/value/bias',
prefix + 'attention/output/dense/kernel',
prefix + 'attention/output/dense/bias',
],
'Transformer-%d-MultiHeadSelfAttention-Norm' % i: [
prefix + 'attention/output/LayerNorm/beta',
prefix + 'attention/output/LayerNorm/gamma',
],
'Transformer-%d-FeedForward' % i: [
prefix + 'intermediate/dense/kernel',
prefix + 'intermediate/dense/bias',
prefix + 'output/dense/kernel',
prefix + 'output/dense/bias',
],
'Transformer-%d-FeedForward-Norm' % i: [
prefix + 'output/LayerNorm/beta',
prefix + 'output/LayerNorm/gamma',
],
})
return mapping
class ALBERT(BERT):
def apply_main_layers(self, inputs, index):
x = inputs
z = self.layer_norm_conds[0]
attention_name = 'Transformer-MultiHeadSelfAttention'
feed_forward_name = 'Transformer-FeedForward'
attention_mask = self.compute_attention_bias(index)
xi, x, arguments = x, [x, x, x], {'a_bias': None}
if attention_mask is not None:
arguments['a_bias'] = True
x.append(attention_mask)
x = self.apply(
inputs=x,
layer=MultiHeadAttention,
arguments=arguments,
heads=self.num_attention_heads,
head_size=self.attention_head_size,
out_dim=self.hidden_size,
key_size=self.attention_key_size,
kernel_initializer=self.initializer,
name=attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % attention_name
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % attention_name
)
xi = x
x = self.apply(
inputs=x,
layer=FeedForward,
units=self.intermediate_size,
activation=self.hidden_act,
kernel_initializer=self.initializer,
name=feed_forward_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % feed_forward_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % feed_forward_name
)
return x
def variable_mapping(self):
mapping = super(ALBERT, self).variable_mapping()
prefix = 'bert/encoder/transformer/group_0/inner_group_0/'
mapping.update({
'Transformer-MultiHeadSelfAttention': [
prefix + 'attention_1/self/query/kernel',
prefix + 'attention_1/self/query/bias',
prefix + 'attention_1/self/key/kernel',
prefix + 'attention_1/self/key/bias',
prefix + 'attention_1/self/value/kernel',
prefix + 'attention_1/self/value/bias',
prefix + 'attention_1/output/dense/kernel',
prefix + 'attention_1/output/dense/bias',
],
'Transformer-MultiHeadSelfAttention-Norm': [
prefix + 'LayerNorm/beta',
prefix + 'LayerNorm/gamma',
],
'Transformer-FeedForward': [
prefix + 'ffn_1/intermediate/dense/kernel',
prefix + 'ffn_1/intermediate/dense/bias',
prefix + 'ffn_1/intermediate/output/dense/kernel',
prefix + 'ffn_1/intermediate/output/dense/bias',
],
'Transformer-FeedForward-Norm': [
prefix + 'LayerNorm_1/beta',
prefix + 'LayerNorm_1/gamma',
],
})
return mapping
class ALBERT_Unshared(BERT):
def variable_mapping(self):
mapping = super(ALBERT_Unshared, self).variable_mapping()
prefix = 'bert/encoder/transformer/group_0/inner_group_0/'
for i in range(self.num_hidden_layers):
mapping.update({
'Transformer-%d-MultiHeadSelfAttention' % i: [
prefix + 'attention_1/self/query/kernel',
prefix + 'attention_1/self/query/bias',
prefix + 'attention_1/self/key/kernel',
prefix + 'attention_1/self/key/bias',
prefix + 'attention_1/self/value/kernel',
prefix + 'attention_1/self/value/bias',
prefix + 'attention_1/output/dense/kernel',
prefix + 'attention_1/output/dense/bias',
],
'Transformer-%d-MultiHeadSelfAttention-Norm' % i: [
prefix + 'LayerNorm/beta',
prefix + 'LayerNorm/gamma',
],
'Transformer-%d-FeedForward' % i: [
prefix + 'ffn_1/intermediate/dense/kernel',
prefix + 'ffn_1/intermediate/dense/bias',
prefix + 'ffn_1/intermediate/output/dense/kernel',
prefix + 'ffn_1/intermediate/output/dense/bias',
],
'Transformer-%d-FeedForward-Norm' % i: [
prefix + 'LayerNorm_1/beta',
prefix + 'LayerNorm_1/gamma',
],
})
return mapping
class NEZHA(BERT):
def apply_embeddings(self, inputs):
inputs = inputs[:]
x = inputs.pop(0)
if self.segment_vocab_size > 0:
s = inputs.pop(0)
z = self.layer_norm_conds[0]
x = self.apply(
inputs=x,
layer=Embedding,
input_dim=self.vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
mask_zero=True,
name='Embedding-Token'
)
if self.segment_vocab_size > 0:
if self.shared_segment_embeddings:
name = 'Embedding-Token'
else:
name = 'Embedding-Segment'
s = self.apply(
inputs=s,
layer=Embedding,
input_dim=2,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
name=name
)
x = self.apply(
inputs=[x, s], layer=Add, name='Embedding-Token-Segment'
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='Embedding-Norm'
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='Embedding-Dropout'
)
if self.embedding_size != self.hidden_size:
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
kernel_initializer=self.initializer,
name='Embedding-Mapping'
)
return x
def apply_main_layers(self, inputs, index):
x = inputs
z = self.layer_norm_conds[0]
attention_name = 'Transformer-%d-MultiHeadSelfAttention' % index
feed_forward_name = 'Transformer-%d-FeedForward' % index
attention_mask = self.compute_attention_bias(index)
position_bias = self.compute_position_bias(x)
xi, x = x, [x, x, x, position_bias]
arguments = {'a_bias': None, 'p_bias': 'typical_relative'}
if attention_mask is not None:
arguments['a_bias'] = True
x.insert(3, attention_mask)
x = self.apply(
inputs=x,
layer=MultiHeadAttention,
arguments=arguments,
heads=self.num_attention_heads,
head_size=self.attention_head_size,
out_dim=self.hidden_size,
key_size=self.attention_key_size,
kernel_initializer=self.initializer,
name=attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % attention_name
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % attention_name
)
xi = x
x = self.apply(
inputs=x,
layer=FeedForward,
units=self.intermediate_size,
activation=self.hidden_act,
kernel_initializer=self.initializer,
name=feed_forward_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % feed_forward_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % feed_forward_name
)
return x
def compute_position_bias(self, inputs=None):
if self.position_bias is None:
x = inputs
self.position_bias = self.apply(
inputs=[x, x],
layer=RelativePositionEmbedding,
input_dim=2 * 64 + 1,
output_dim=self.attention_head_size,
embeddings_initializer='Sinusoidal',
name='Embedding-Relative-Position',
trainable=False
)
return self.position_bias
class ELECTRA(BERT):
@insert_arguments(with_discriminator=False)
@delete_arguments('with_pool', 'with_mlm')
def __init__(
self,
max_position,
**kwargs
):
super(ELECTRA, self).__init__(max_position, **kwargs)
def apply_final_layers(self, inputs):
x = inputs
if self.with_discriminator:
if self.with_discriminator is True:
final_activation = 'sigmoid'
else:
final_activation = self.with_discriminator
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
activation=self.hidden_act,
kernel_initializer=self.initializer,
name='Discriminator-Dense'
)
x = self.apply(
inputs=x,
layer=Dense,
units=1,
activation=final_activation,
kernel_initializer=self.initializer,
name='Discriminator-Prediction'
)
return x
def load_variable(self, checkpoint, name):
variable = super(ELECTRA, self).load_variable(checkpoint, name)
if name == 'electra/embeddings/word_embeddings':
return self.load_embeddings(variable)
else:
return variable
def variable_mapping(self):
mapping = super(ELECTRA, self).variable_mapping()
mapping['Embedding-Mapping'] = [
'electra/embeddings_project/kernel',
'electra/embeddings_project/bias',
]
mapping = {
k: [i.replace('bert/', 'electra/') for i in v]
for k, v in mapping.items()
}
mapping['Discriminator-Dense'] = [
'discriminator_predictions/dense/kernel',
'discriminator_predictions/dense/bias',
]
mapping['Discriminator-Prediction'] = [
'discriminator_predictions/dense_1/kernel',
'discriminator_predictions/dense_1/bias',
]
return mapping
class GPT(LM_Mask, BERT):
@insert_arguments(final_activation='softmax')
@delete_arguments('with_pool', 'with_mlm')
def __init__(self, **kwargs):
super(GPT, self).__init__(**kwargs)
def apply_embeddings(self, inputs):
inputs = inputs[:]
x = inputs.pop(0)
if self.segment_vocab_size > 0:
s = inputs.pop(0)
if self.custom_position_ids:
p = inputs.pop(0)
else:
p = None
x = self.apply(
inputs=x,
layer=Embedding,
input_dim=self.vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
mask_zero=True,
name='Embedding-Token'
)
if self.segment_vocab_size > 0:
if self.shared_segment_embeddings:
name = 'Embedding-Token'
else:
name = 'Embedding-Segment'
s = self.apply(
inputs=s,
layer=Embedding,
input_dim=self.segment_vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
name=name
)
x = self.apply(
inputs=[x, s], layer=Add, name='Embedding-Token-Segment'
)
x = self.apply(
inputs=self.simplify([x, p]),
layer=PositionEmbedding,
input_dim=self.max_position,
output_dim=self.embedding_size,
merge_mode='add',
hierarchical=self.hierarchical_position,
embeddings_initializer=self.initializer,
custom_position_ids=self.custom_position_ids,
name='Embedding-Position'
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='Embedding-Dropout'
)
if self.embedding_size != self.hidden_size:
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
kernel_initializer=self.initializer,
name='Embedding-Mapping'
)
return x
def apply_final_layers(self, inputs):
x = inputs
x = self.apply(
inputs=x,
layer=Embedding,
arguments={'mode': 'dense'},
name='Embedding-Token'
)
x = self.apply(
inputs=x,
layer=Activation,
activation=self.final_activation,
name='LM-Activation'
)
return x
def load_variable(self, checkpoint, name):
variable = super(GPT, self).load_variable(checkpoint, name)
if name == 'gpt/embeddings/word_embeddings':
return self.load_embeddings(variable)
else:
return variable
def variable_mapping(self):
mapping = super(GPT, self).variable_mapping()
mapping = {
k: [
i.replace('bert/', 'gpt/').replace('encoder', 'transformer')
for i in v
]
for k, v in mapping.items()
}
return mapping
class GPT2(GPT):
def get_inputs(self):
x_in = self.apply(
layer=Input, shape=(self.sequence_length,), name='Input-Token'
)
return x_in
def apply_embeddings(self, inputs):
x = inputs
x = self.apply(
inputs=x,
layer=Embedding,
input_dim=self.vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
mask_zero=True,
name='Embedding-Token'
)
x = self.apply(
inputs=x,
layer=PositionEmbedding,
input_dim=self.max_position,
output_dim=self.embedding_size,
merge_mode='add',
hierarchical=self.hierarchical_position,
embeddings_initializer=self.initializer,
name='Embedding-Position'
)
if self.embedding_size != self.hidden_size:
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
kernel_initializer=self.initializer,
name='Embedding-Mapping'
)
return x
def apply_main_layers(self, inputs, index):
x = inputs
z = self.layer_norm_conds[0]
attention_name = 'Transformer-%d-MultiHeadSelfAttention' % index
feed_forward_name = 'Transformer-%d-FeedForward' % index
attention_mask = self.compute_attention_bias(index)
xi = x
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
epsilon=1e-5,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % attention_name
)
x = self.apply(
inputs=[x, x, x, attention_mask],
layer=MultiHeadAttention,
arguments={'a_bias': True},
heads=self.num_attention_heads,
head_size=self.attention_head_size,
out_dim=self.hidden_size,
key_size=self.attention_key_size,
kernel_initializer=self.initializer,
name=attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % attention_name
)
xi = x
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
epsilon=1e-5,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % feed_forward_name
)
x = self.apply(
inputs=x,
layer=FeedForward,
units=self.intermediate_size,
activation=self.hidden_act,
kernel_initializer=self.initializer,
name=feed_forward_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % feed_forward_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name
)
return x
def apply_final_layers(self, inputs):
x = inputs
z = self.layer_norm_conds[0]
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
epsilon=1e-5,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='Output-Norm'
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='Output-Dropout'
)
x = super(GPT2, self).apply_final_layers(x)
return x
def variable_mapping(self):
mapping = super(GPT2, self).variable_mapping()
mapping = {
k: [i.replace('output/LayerNorm', 'input/LayerNorm') for i in v]
for k, v in mapping.items()
}
mapping['Output-Norm'] = [
'gpt/output/LayerNorm/beta',
'gpt/output/LayerNorm/gamma',
]
return mapping
class GPT2_ML(GPT):
def get_inputs(self):
x_in = self.apply(
layer=Input, shape=(self.sequence_length,), name='Input-Token'
)
return x_in
def apply_embeddings(self, inputs):
x = inputs
z = self.layer_norm_conds[0]
x = self.apply(
inputs=x,
layer=Embedding,
input_dim=self.vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
mask_zero=True,
name='Embedding-Token'
)
x = self.apply(
inputs=x,
layer=PositionEmbedding,
input_dim=self.max_position,
output_dim=self.embedding_size,
merge_mode='add',
hierarchical=self.hierarchical_position,
embeddings_initializer=self.initializer,
name='Embedding-Position'
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
epsilon=1e-5,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='Embedding-Norm'
)
if self.embedding_size != self.hidden_size:
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
kernel_initializer=self.initializer,
name='Embedding-Mapping'
)
return x
def apply_main_layers(self, inputs, index):
x = inputs
z = self.layer_norm_conds[0]
attention_name = 'Transformer-%d-MultiHeadSelfAttention' % index
feed_forward_name = 'Transformer-%d-FeedForward' % index
attention_mask = self.compute_attention_bias(index)
xi, x, arguments = x, [x, x, x, attention_mask], {'a_bias': True}
x = self.apply(
inputs=x,
layer=MultiHeadAttention,
arguments=arguments,
heads=self.num_attention_heads,
head_size=self.attention_head_size,
out_dim=self.hidden_size,
key_size=self.attention_key_size,
kernel_initializer=self.initializer,
name=attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % attention_name
)
xi = x
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
epsilon=1e-5,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm-0' % feed_forward_name
)
x = self.apply(
inputs=x,
layer=FeedForward,
units=self.intermediate_size,
activation=self.hidden_act,
kernel_initializer=self.initializer,
name=feed_forward_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % feed_forward_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
epsilon=1e-5,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm-1' % feed_forward_name
)
return x
def load_variable(self, checkpoint, name):
variable = super(GPT2_ML, self).load_variable(checkpoint, name)
if name == 'newslm/embeddings/word_embed':
return self.load_embeddings(variable)
else:
return variable
def variable_mapping(self):
mapping = {
'Embedding-Token': ['newslm/embeddings/word_embed'],
'Embedding-Position': ['newslm/embeddings/pos_embed'],
'Embedding-Norm': [
'newslm/embeddings/LayerNorm_embed_norm/beta',
'newslm/embeddings/LayerNorm_embed_norm/gamma',
],
}
for i in range(self.num_hidden_layers):
prefix = 'newslm/layer%02d/' % i
mapping.update({
'Transformer-%d-MultiHeadSelfAttention' % i: [
prefix + 'query_layer/kernel',
prefix + 'query_layer/bias',
prefix + 'key_layer/kernel',
prefix + 'key_layer/bias',
prefix + 'value_layer/kernel',
prefix + 'value_layer/bias',
prefix + 'context_projection_layer/kernel',
prefix + 'context_projection_layer/bias',
],
'Transformer-%d-FeedForward-Norm-0' % i: [
prefix + 'LayerNorm_mlp_ln0/beta',
prefix + 'LayerNorm_mlp_ln0/gamma',
],
'Transformer-%d-FeedForward' % i: [
prefix + 'intermediate/kernel',
prefix + 'intermediate/bias',
prefix + 'output/kernel',
prefix + 'output/bias',
],
'Transformer-%d-FeedForward-Norm-1' % i: [
prefix + 'LayerNorm_mlp_ln1/beta',
prefix + 'LayerNorm_mlp_ln1/gamma',
],
})
return mapping
class T5_Base(Transformer):
@insert_arguments(version='t5.1.0')
def __init__(self, **kwargs):
super(T5_Base, self).__init__(**kwargs)
def load_variable(self, checkpoint, name):
variable = super(T5_Base, self).load_variable(checkpoint, name)
if name == 'shared/embedding':
return self.load_embeddings(variable)
elif name == 'decoder/logits/kernel':
return self.load_embeddings(variable.T).T
elif 'relative_attention_bias' in name:
return variable.T
else:
return variable
def create_variable(self, name, value, dtype=None):
if 'relative_attention_bias' in name:
value = value.T
return super(T5_Base, self).create_variable(name, value, dtype)
def variable_mapping(self):
mapping = {
'Embedding-Token': ['shared/embedding'],
'Encoder-Embedding-Relative-Position': [
'encoder/block_000/layer_000/SelfAttention/relative_attention_bias'
],
'Encoder-Output-Norm': ['encoder/final_layer_norm/scale'],
'Decoder-Embedding-Relative-Position': [
'decoder/block_000/layer_000/SelfAttention/relative_attention_bias',
],
'Decoder-Output-Norm': ['decoder/final_layer_norm/scale'],
}
for i in range(self.num_hidden_layers):
prefix = 'encoder/block_%03d/' % i
mapping.update({
'Encoder-Transformer-%d-MultiHeadSelfAttention' % i: [
prefix + 'layer_000/SelfAttention/q',
prefix + 'layer_000/SelfAttention/k',
prefix + 'layer_000/SelfAttention/v',
prefix + 'layer_000/SelfAttention/o',
],
'Encoder-Transformer-%d-MultiHeadSelfAttention-Norm' % i: [
prefix + 'layer_000/layer_norm/scale',
],
'Encoder-Transformer-%d-FeedForward' % i: [
prefix + 'layer_001/DenseReluDense/wi/kernel',
prefix + 'layer_001/DenseReluDense/wo/kernel',
],
'Encoder-Transformer-%d-FeedForward-Norm' % i: [
prefix + 'layer_001/layer_norm/scale',
],
})
prefix = 'decoder/block_%03d/' % i
mapping.update({
'Decoder-Transformer-%d-MultiHeadSelfAttention' % i: [
prefix + 'layer_000/SelfAttention/q',
prefix + 'layer_000/SelfAttention/k',
prefix + 'layer_000/SelfAttention/v',
prefix + 'layer_000/SelfAttention/o',
],
'Decoder-Transformer-%d-MultiHeadSelfAttention-Norm' % i: [
prefix + 'layer_000/layer_norm/scale',
],
'Decoder-Transformer-%d-MultiHeadCrossAttention' % i: [
prefix + 'layer_001/EncDecAttention/q',
prefix + 'layer_001/EncDecAttention/k',
prefix + 'layer_001/EncDecAttention/v',
prefix + 'layer_001/EncDecAttention/o',
],
'Decoder-Transformer-%d-MultiHeadCrossAttention-Norm' % i: [
prefix + 'layer_001/layer_norm/scale',
],
'Decoder-Transformer-%d-FeedForward' % i: [
prefix + 'layer_002/DenseReluDense/wi/kernel',
prefix + 'layer_002/DenseReluDense/wo/kernel',
],
'Decoder-Transformer-%d-FeedForward-Norm' % i: [
prefix + 'layer_002/layer_norm/scale',
],
})
if self.version == 't5.1.1':
mapping['Encoder-Output-Norm'] = ['encoder/rms_norm/scale']
mapping['Decoder-Output-Norm'] = ['decoder/rms_norm/scale']
mapping['Decoder-Output-LM'] = ['decoder/logits/kernel']
mapping = {
k: [i.replace('layer_norm', 'rms_norm') for i in v]
for k, v in mapping.items()
}
for i in range(self.num_hidden_layers):
for layer in [
'Encoder-Transformer-%d-FeedForward' % i,
'Decoder-Transformer-%d-FeedForward' % i
]:
mapping[layer] = [
mapping[layer][0][:-7] + '_0' + mapping[layer][0][-7:],
mapping[layer][0][:-7] + '_1' + mapping[layer][0][-7:],
mapping[layer][1]
]
return mapping
class T5_Encoder(T5_Base):
def get_inputs(self):
x_in = self.apply(
layer=Input,
shape=(self.sequence_length,),
name='Encoder-Input-Token'
)
return x_in
def apply_embeddings(self, inputs):
x = inputs
x = self.apply(
inputs=x,
layer=Embedding,
input_dim=self.vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
mask_zero=True,
name='Embedding-Token'
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='Encoder-Embedding-Dropout'
)
if self.embedding_size != self.hidden_size:
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
kernel_initializer=self.initializer,
name='Encoder-Embedding-Mapping'
)
return x
def apply_main_layers(self, inputs, index):
x = inputs
z = self.layer_norm_conds[0]
attention_name = 'Encoder-Transformer-%d-MultiHeadSelfAttention' % index
feed_forward_name = 'Encoder-Transformer-%d-FeedForward' % index
attention_mask = self.compute_attention_bias(index)
position_bias = self.compute_position_bias(x)
xi = x
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
center=False,
epsilon=1e-6,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % attention_name
)
x = self.apply(
inputs=[x, x, x, position_bias],
layer=MultiHeadAttention,
arguments={'p_bias': 't5_relative'},
heads=self.num_attention_heads,
head_size=self.attention_head_size,
out_dim=self.hidden_size,
key_size=self.attention_key_size,
use_bias=False,
attention_scale=False,
kernel_initializer=self.initializer,
name=attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % attention_name
)
xi = x
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
center=False,
epsilon=1e-6,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % feed_forward_name
)
x = self.apply(
inputs=x,
layer=FeedForward,
units=self.intermediate_size,
activation=self.hidden_act,
use_bias=False,
kernel_initializer=self.initializer,
name=feed_forward_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % feed_forward_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name
)
return x
def apply_final_layers(self, inputs):
x = inputs
z = self.layer_norm_conds[0]
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
center=False,
epsilon=1e-6,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='Encoder-Output-Norm'
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='Encoder-Output-Dropout'
)
return x
def compute_position_bias(self, inputs=None):
if self.position_bias is None:
x = inputs
p = self.apply(
inputs=[x, x],
layer=RelativePositionEmbeddingT5,
input_dim=32,
output_dim=self.num_attention_heads,
bidirectional=True,
embeddings_initializer=self.initializer,
name='Encoder-Embedding-Relative-Position'
)
self.position_bias = p
return self.position_bias
class T5_Decoder(LM_Mask, T5_Base):
def __init__(self, with_lm=True, **kwargs):
super(T5_Decoder, self).__init__(**kwargs)
self.with_lm = with_lm
def get_inputs(self):
c_in = self.apply(
layer=Input,
shape=(self.sequence_length, self.hidden_size),
name='Input-Context'
)
x_in = self.apply(
layer=Input,
shape=(self.sequence_length,),
name='Decoder-Input-Token'
)
return [c_in, x_in]
def apply_embeddings(self, inputs):
c, x = inputs
x = self.apply(
inputs=x,
layer=Embedding,
input_dim=self.vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
mask_zero=True,
name='Embedding-Token'
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='Decoder-Embedding-Dropout'
)
if self.embedding_size != self.hidden_size:
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
kernel_initializer=self.initializer,
name='Decoder-Embedding-Mapping'
)
return [c, x]
def apply_main_layers(self, inputs, index):
c, x = inputs
z = self.layer_norm_conds[0]
self_attention_name = 'Decoder-Transformer-%d-MultiHeadSelfAttention' % index
cross_attention_name = 'Decoder-Transformer-%d-MultiHeadCrossAttention' % index
feed_forward_name = 'Decoder-Transformer-%d-FeedForward' % index
attention_mask = self.compute_attention_bias(index)
position_bias = self.compute_position_bias([x, c])
xi = x
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
center=False,
epsilon=1e-6,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % self_attention_name
)
x = self.apply(
inputs=[x, x, x, attention_mask, position_bias[0]],
layer=MultiHeadAttention,
arguments={
'a_bias': True,
'p_bias': 't5_relative'
},
heads=self.num_attention_heads,
head_size=self.attention_head_size,
out_dim=self.hidden_size,
key_size=self.attention_key_size,
use_bias=False,
attention_scale=False,
kernel_initializer=self.initializer,
name=self_attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % self_attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % self_attention_name
)
xi = x
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
center=False,
epsilon=1e-6,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % cross_attention_name
)
x = self.apply(
inputs=[x, c, c, position_bias[1]],
layer=MultiHeadAttention,
arguments={
'a_bias': None,
'p_bias': 't5_relative'
},
heads=self.num_attention_heads,
head_size=self.attention_head_size,
out_dim=self.hidden_size,
key_size=self.attention_key_size,
use_bias=False,
attention_scale=False,
kernel_initializer=self.initializer,
name=cross_attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % cross_attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % cross_attention_name
)
xi = x
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
center=False,
epsilon=1e-6,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % feed_forward_name
)
x = self.apply(
inputs=x,
layer=FeedForward,
units=self.intermediate_size,
activation=self.hidden_act,
use_bias=False,
kernel_initializer=self.initializer,
name=feed_forward_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % feed_forward_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name
)
return [c, x]
def apply_final_layers(self, inputs):
c, x = inputs
z = self.layer_norm_conds[0]
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
center=False,
epsilon=1e-6,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='Decoder-Output-Norm'
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='Decoder-Output-Dropout'
)
x = self.apply(
inputs=x,
layer=Lambda,
function=lambda x: x / np.sqrt(self.hidden_size),
mask=lambda i, m: m,
name='Decoder-Output-Scale'
)
if self.with_lm:
if self.embedding_size != self.hidden_size:
x = self.apply(
inputs=x,
layer=Dense,
units=self.embedding_size,
kernel_initializer=self.initializer,
name='Decoder-Output-Mapping'
)
lm_activation = 'softmax' if self.with_lm is True else self.with_lm
if self.version == 't5.1.0':
x = self.apply(
inputs=x,
layer=Embedding,
arguments={'mode': 'dense'},
name='Embedding-Token'
)
x = self.apply(
inputs=x,
layer=Activation,
activation=lm_activation,
name='Dencoder-Output-LM-Activation'
)
else:
x = self.apply(
inputs=x,
layer=Dense,
units=self.vocab_size,
activation=lm_activation,
use_bias=False,
kernel_initializer=self.initializer,
name='Decoder-Output-LM'
)
return x
def compute_attention_bias(self, inputs=None):
old_inputs = self.inputs[:]
self.inputs = [old_inputs[1]]
mask = super(T5_Decoder, self).compute_attention_bias(inputs)
self.inputs = old_inputs
return mask
def compute_position_bias(self, inputs=None):
if self.position_bias is None:
x, c = inputs
p1 = self.apply(
inputs=[x, x],
layer=RelativePositionEmbeddingT5,
input_dim=32,
output_dim=self.num_attention_heads,
bidirectional=False,
embeddings_initializer=self.initializer,
name='Decoder-Embedding-Relative-Position'
)
p2 = self.apply(
inputs=[x, c],
layer=RelativePositionEmbeddingT5,
input_dim=32,
output_dim=self.num_attention_heads,
bidirectional=False,
embeddings_initializer=self.initializer,
name='Decoder-Embedding-Relative-Position'
)
self.position_bias = (p1, p2)
return self.position_bias
class T5(T5_Base):
def __init__(self, **kwargs):
super(T5, self).__init__(**kwargs)
kwargs['layers'] = self.layers
e_name, d_name = 'Encoder', 'Decoder'
if 'name' in kwargs:
e_name = '%s_%s' % (kwargs['name'], e_name)
d_name = '%s_%s' % (kwargs['name'], d_name)
del kwargs['name']
self._encoder = T5_Encoder(name=e_name, **kwargs)
self._decoder = T5_Decoder(name=d_name, **kwargs)
def build(self, **kwargs):
self._encoder.build(**kwargs)
self._decoder.build(**kwargs)
self.encoder = self._encoder.model
self.decoder = self._decoder.model
self.inputs = self.encoder.inputs + self.decoder.inputs[1:]
self.outputs = self.decoder(
self.encoder.outputs + self.decoder.inputs[1:]
)
self.model = Model(self.inputs, self.outputs)
def extend_with_language_model(BaseModel):
class LanguageModel(LM_Mask, BaseModel):
def __init__(self, *args, **kwargs):
super(LanguageModel, self).__init__(*args, **kwargs)
self.with_mlm = self.with_mlm or True
return LanguageModel
def extend_with_unified_language_model(BaseModel):
class UnifiedLanguageModel(UniLM_Mask, BaseModel):
def __init__(self, *args, **kwargs):
super(UnifiedLanguageModel, self).__init__(*args, **kwargs)
self.with_mlm = self.with_mlm or True
return UnifiedLanguageModel
def build_transformer_model(
config_path=None,
checkpoint_path=None,
model='bert',
application='encoder',
return_keras_model=True,
**kwargs
):
configs = {}
if config_path is not None:
configs.update(json.load(open(config_path)))
configs.update(kwargs)
if 'max_position' not in configs:
configs['max_position'] = configs.get('max_position_embeddings', 512)
if 'dropout_rate' not in configs:
configs['dropout_rate'] = configs.get('hidden_dropout_prob')
if 'segment_vocab_size' not in configs:
configs['segment_vocab_size'] = configs.get('type_vocab_size', 2)
models = {
'bert': BERT,
'albert': ALBERT,
'albert_unshared': ALBERT_Unshared,
'roberta': BERT,
'nezha': NEZHA,
'electra': ELECTRA,
'gpt': GPT,
'gpt2': GPT2,
'gpt2_ml': GPT2_ML,
't5': T5,
't5_encoder': T5_Encoder,
't5_decoder': T5_Decoder,
't5.1.0': T5,
't5.1.0_encoder': T5_Encoder,
't5.1.0_decoder': T5_Decoder,
't5.1.1': T5,
't5.1.1_encoder': T5_Encoder,
't5.1.1_decoder': T5_Decoder,
}
if is_string(model):
model = model.lower()
MODEL = models[model]
else:
MODEL = model
application = application.lower()
if application in ['lm', 'unilm'] and model in ['electra', 't5']:
raise ValueError(
'"%s" model can not be used as "%s" application.\n' %
(model, application)
)
if application == 'lm':
MODEL = extend_with_language_model(MODEL)
elif application == 'unilm':
MODEL = extend_with_unified_language_model(MODEL)
if model.startswith('t5.1.1'):
configs['version'] = 't5.1.1'
transformer = MODEL(**configs)
transformer.build(**configs)
if checkpoint_path is not None:
transformer.load_weights_from_checkpoint(checkpoint_path)
if return_keras_model:
return transformer.model
else:
return transformer
| true
| true
|
790d665955845251d27d1b13cc3d9cea5240ebcc
| 1,065
|
py
|
Python
|
manage.py
|
Tianny/incepiton_mysql
|
8ef86d19f26e22a39b4fac99ea0b4286c3226b6f
|
[
"MIT"
] | 74
|
2018-01-04T09:36:32.000Z
|
2018-09-06T07:13:57.000Z
|
manage.py
|
Tianny/incepiton-mysql
|
8ef86d19f26e22a39b4fac99ea0b4286c3226b6f
|
[
"MIT"
] | 1
|
2018-02-24T09:00:15.000Z
|
2018-04-20T02:08:52.000Z
|
manage.py
|
Tianny/incepiton_mysql
|
8ef86d19f26e22a39b4fac99ea0b4286c3226b6f
|
[
"MIT"
] | 23
|
2018-01-13T05:26:22.000Z
|
2018-07-05T13:34:07.000Z
|
import os
from werkzeug.security import generate_password_hash
from flask_script import Manager, Shell, Command, Option
from flask_migrate import Migrate, MigrateCommand
from app import db
from app import create_app
from app.models import User
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
manager = Manager(app)
migrate = Migrate(app, db)
class CreateUser(Command):
option_list = (
Option('--name', '-n', dest='name'),
Option('--password', '-p', dest='password'),
Option('--email', '-e', dest='email')
)
def run(self, name, password, email):
user = User()
user.name = name
user.hash_pass = generate_password_hash(password)
user.email = email
db.session.add(user)
db.session.commit()
def make_shell_context():
return dict(app=app, db=db, User=User)
manager.add_command('shell', Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
manager.add_command('create_user', CreateUser())
if __name__ == '__main__':
manager.run()
| 24.767442
| 68
| 0.684507
|
import os
from werkzeug.security import generate_password_hash
from flask_script import Manager, Shell, Command, Option
from flask_migrate import Migrate, MigrateCommand
from app import db
from app import create_app
from app.models import User
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
manager = Manager(app)
migrate = Migrate(app, db)
class CreateUser(Command):
option_list = (
Option('--name', '-n', dest='name'),
Option('--password', '-p', dest='password'),
Option('--email', '-e', dest='email')
)
def run(self, name, password, email):
user = User()
user.name = name
user.hash_pass = generate_password_hash(password)
user.email = email
db.session.add(user)
db.session.commit()
def make_shell_context():
return dict(app=app, db=db, User=User)
manager.add_command('shell', Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
manager.add_command('create_user', CreateUser())
if __name__ == '__main__':
manager.run()
| true
| true
|
790d674cff7ae664cf013b310e5bf3172325ca2f
| 562
|
py
|
Python
|
src/SRM-684/istr.py
|
mikefeneley/topcoder
|
175a7a05367c0458a900a3fea16af68ae5ee53ec
|
[
"MIT"
] | null | null | null |
src/SRM-684/istr.py
|
mikefeneley/topcoder
|
175a7a05367c0458a900a3fea16af68ae5ee53ec
|
[
"MIT"
] | null | null | null |
src/SRM-684/istr.py
|
mikefeneley/topcoder
|
175a7a05367c0458a900a3fea16af68ae5ee53ec
|
[
"MIT"
] | null | null | null |
import operator
class Istr:
def count(self, s, k):
letters = {}
for letter in s:
if letter not in letters:
letters[letter] = 1
else:
letters[letter] += 1
for i in range(0, k):
index = max(letters.iteritems(), key=operator.itemgetter(1))[0]
letters[index] -= 1
score = 0
for element in letters:
val = letters[element] * letters[element]
score += val
return score
| 21.615385
| 77
| 0.455516
|
import operator
class Istr:
def count(self, s, k):
letters = {}
for letter in s:
if letter not in letters:
letters[letter] = 1
else:
letters[letter] += 1
for i in range(0, k):
index = max(letters.iteritems(), key=operator.itemgetter(1))[0]
letters[index] -= 1
score = 0
for element in letters:
val = letters[element] * letters[element]
score += val
return score
| true
| true
|
790d675788b5a41ef28bcd938287045fb6fc3696
| 219
|
py
|
Python
|
src/day_16/src/models/__init__.py
|
chenyuanqi/python-training
|
3bbc2a45304de57cbc23b46da1ba7d38c8fad6e0
|
[
"Apache-2.0"
] | null | null | null |
src/day_16/src/models/__init__.py
|
chenyuanqi/python-training
|
3bbc2a45304de57cbc23b46da1ba7d38c8fad6e0
|
[
"Apache-2.0"
] | null | null | null |
src/day_16/src/models/__init__.py
|
chenyuanqi/python-training
|
3bbc2a45304de57cbc23b46da1ba7d38c8fad6e0
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# _*_ coding:utf-8 _*_
# Created by vikey on 2018/2/17
from __future__ import print_function
from __future__ import unicode_literals
def main():
pass
if __name__ == "__main__":
main()
| 14.6
| 39
| 0.707763
|
from __future__ import print_function
from __future__ import unicode_literals
def main():
pass
if __name__ == "__main__":
main()
| true
| true
|
790d682d59733918b6d1a5934d0417de1016946a
| 2,061
|
py
|
Python
|
simple-telnet-deception.py
|
raresteak/simple-telnet-deception
|
f7d1271ccaf01d5d6b206e88a52402f0240323b3
|
[
"BSD-2-Clause"
] | null | null | null |
simple-telnet-deception.py
|
raresteak/simple-telnet-deception
|
f7d1271ccaf01d5d6b206e88a52402f0240323b3
|
[
"BSD-2-Clause"
] | 2
|
2021-10-05T15:59:10.000Z
|
2021-10-05T16:31:38.000Z
|
simple-telnet-deception.py
|
raresteak/simple-telnet-deception
|
f7d1271ccaf01d5d6b206e88a52402f0240323b3
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python3
# purpose: Mimics a simple telnet daemon login prompts and records output
# starts a tcp listener on port and address with variables defined below
# author: Raresteak
# date: 6 October 2021
# version: 3
import datetime
import socket
HOST = '127.0.0.1'
PORT = 2323
FILE = "stn-results.json"
fh = open(FILE, "a")
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((HOST, PORT))
s.listen()
while True:
conn, addr = s.accept()
with conn:
timeNow = datetime.datetime.now()
conn.send(b'Warning: Telnet is not a secure protocol, and it is recommended to use Stelnet.\n\nLogin authentication\n\n\nUsername: ')
username = ""
while True:
data = conn.recv(1024)
if not data:
break
else:
try:
username = data.decode("utf-8").rstrip()
except UnicodeDecodeError:
username = "cancelledInput"
conn.send(b'Password: ')
password = ""
while True:
data = conn.recv(1024)
if not data:
break
else:
try:
password = data.decode("utf-8").rstrip()
except UnicodeDecodeError:
password = "cancelledInput"
conn.sendall(b'\b \b')
break
break
output = str("{ \"time\": \""
+ timeNow.strftime('%Y-%m-%dT%H:%M:%S')
+ "\", \"src.ip\": \"" + addr[0]
+ "\", \"username\": \"" + username
+ "\", \"password\": \"" + password + "\" }")
print(output)
fh.write(output + "\n")
| 37.472727
| 145
| 0.447841
|
import datetime
import socket
HOST = '127.0.0.1'
PORT = 2323
FILE = "stn-results.json"
fh = open(FILE, "a")
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((HOST, PORT))
s.listen()
while True:
conn, addr = s.accept()
with conn:
timeNow = datetime.datetime.now()
conn.send(b'Warning: Telnet is not a secure protocol, and it is recommended to use Stelnet.\n\nLogin authentication\n\n\nUsername: ')
username = ""
while True:
data = conn.recv(1024)
if not data:
break
else:
try:
username = data.decode("utf-8").rstrip()
except UnicodeDecodeError:
username = "cancelledInput"
conn.send(b'Password: ')
password = ""
while True:
data = conn.recv(1024)
if not data:
break
else:
try:
password = data.decode("utf-8").rstrip()
except UnicodeDecodeError:
password = "cancelledInput"
conn.sendall(b'\b \b')
break
break
output = str("{ \"time\": \""
+ timeNow.strftime('%Y-%m-%dT%H:%M:%S')
+ "\", \"src.ip\": \"" + addr[0]
+ "\", \"username\": \"" + username
+ "\", \"password\": \"" + password + "\" }")
print(output)
fh.write(output + "\n")
| true
| true
|
790d685e82700fc2d434189758494db076f50329
| 6,133
|
py
|
Python
|
predict/ensemble.py
|
DataArk/CHIP2021-Task1-Top1
|
e352198d96d31c60541e4a271f20cc23b3ab6b92
|
[
"Apache-2.0"
] | 15
|
2021-12-18T06:08:55.000Z
|
2022-03-30T00:41:45.000Z
|
predict/ensemble.py
|
confstantine/nlp-task
|
cb152e885bc6f6f1243a12ad90b1c715eb548736
|
[
"Apache-2.0"
] | 1
|
2021-12-20T05:57:37.000Z
|
2021-12-20T13:43:07.000Z
|
predict/ensemble.py
|
DataArk/CHIP2021-Task1-Top1
|
e352198d96d31c60541e4a271f20cc23b3ab6b92
|
[
"Apache-2.0"
] | 1
|
2021-12-27T04:49:35.000Z
|
2021-12-27T04:49:35.000Z
|
import codecs
import json
from tqdm import tqdm
import copy
submit_result2 = []
with codecs.open('dialog_chinese-macbert.txt', mode='r', encoding='utf8') as f:
reader = f.readlines(f)
data_list = []
for dialogue_idx_, dialogue_ in enumerate(tqdm(reader)):
dialogue_ = json.loads(dialogue_)
submit_result2.append(dialogue_)
submit_result4 = []
with codecs.open('macbert2-f-f.txt', mode='r', encoding='utf8') as f:
reader = f.readlines(f)
data_list = []
for dialogue_idx_, dialogue_ in enumerate(tqdm(reader)):
dialogue_ = json.loads(dialogue_)
submit_result4.append(dialogue_)
submit_result3 = []
with codecs.open('macbert2-f.txt', mode='r', encoding='utf8') as f:
reader = f.readlines(f)
data_list = []
for dialogue_idx_, dialogue_ in enumerate(tqdm(reader)):
dialogue_ = json.loads(dialogue_)
submit_result3.append(dialogue_)
submit_result5 = []
with codecs.open('mcbert.txt', mode='r', encoding='utf8') as f:
reader = f.readlines(f)
data_list = []
for dialogue_idx_, dialogue_ in enumerate(tqdm(reader)):
dialogue_ = json.loads(dialogue_)
submit_result5.append(dialogue_)
submit_result6 = []
with codecs.open('medbert.txt', mode='r', encoding='utf8') as f:
reader = f.readlines(f)
data_list = []
for dialogue_idx_, dialogue_ in enumerate(tqdm(reader)):
dialogue_ = json.loads(dialogue_)
submit_result6.append(dialogue_)
submit_result = []
with codecs.open('macbert2-f.txt', mode='r', encoding='utf8') as f:
reader = f.readlines(f)
data_list = []
for dialogue_idx_, dialogue_ in enumerate(tqdm(reader)):
dialogue_ = json.loads(dialogue_)
for content_idx_, contents_ in enumerate(dialogue_['dialog_info']):
terms_ = contents_['ner']
if len(terms_) != 0:
idx_ = 0
for _ner_idx, term_ in enumerate(terms_):
if dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] == '阳性' and dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] != submit_result3[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr']:
dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] = submit_result3[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr']
elif dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] == '阴性' and dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] != submit_result3[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr']:
dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] = submit_result3[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr']
elif dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] != submit_result2[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr']:
if submit_result2[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] == '不标注':
dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] = submit_result2[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr']
elif dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] == '阳性':
if submit_result2[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] == '其他':
dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] = submit_result2[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr']
elif dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] != submit_result4[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr']:
if dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] == '阴性':
if submit_result4[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] == '不标注':
dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] = submit_result4[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr']
elif dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] != submit_result5[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr']:
if dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] == '阴性':
if submit_result5[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] == '不标注':
dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] = submit_result5[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr']
# elif submit_result5[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] == '其他':
# dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] = submit_result5[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr']
elif dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] != submit_result6[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr']:
if dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] == '阳性':
if submit_result6[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] == '其他':
dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] = submit_result6[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr']
submit_result.append(dialogue_)
with open('./result.txt', 'w', encoding='utf-8') as output_data:
for json_content in submit_result:
output_data.write(json.dumps(json_content, ensure_ascii=False) + '\n')
| 51.537815
| 268
| 0.612098
|
import codecs
import json
from tqdm import tqdm
import copy
submit_result2 = []
with codecs.open('dialog_chinese-macbert.txt', mode='r', encoding='utf8') as f:
reader = f.readlines(f)
data_list = []
for dialogue_idx_, dialogue_ in enumerate(tqdm(reader)):
dialogue_ = json.loads(dialogue_)
submit_result2.append(dialogue_)
submit_result4 = []
with codecs.open('macbert2-f-f.txt', mode='r', encoding='utf8') as f:
reader = f.readlines(f)
data_list = []
for dialogue_idx_, dialogue_ in enumerate(tqdm(reader)):
dialogue_ = json.loads(dialogue_)
submit_result4.append(dialogue_)
submit_result3 = []
with codecs.open('macbert2-f.txt', mode='r', encoding='utf8') as f:
reader = f.readlines(f)
data_list = []
for dialogue_idx_, dialogue_ in enumerate(tqdm(reader)):
dialogue_ = json.loads(dialogue_)
submit_result3.append(dialogue_)
submit_result5 = []
with codecs.open('mcbert.txt', mode='r', encoding='utf8') as f:
reader = f.readlines(f)
data_list = []
for dialogue_idx_, dialogue_ in enumerate(tqdm(reader)):
dialogue_ = json.loads(dialogue_)
submit_result5.append(dialogue_)
submit_result6 = []
with codecs.open('medbert.txt', mode='r', encoding='utf8') as f:
reader = f.readlines(f)
data_list = []
for dialogue_idx_, dialogue_ in enumerate(tqdm(reader)):
dialogue_ = json.loads(dialogue_)
submit_result6.append(dialogue_)
submit_result = []
with codecs.open('macbert2-f.txt', mode='r', encoding='utf8') as f:
reader = f.readlines(f)
data_list = []
for dialogue_idx_, dialogue_ in enumerate(tqdm(reader)):
dialogue_ = json.loads(dialogue_)
for content_idx_, contents_ in enumerate(dialogue_['dialog_info']):
terms_ = contents_['ner']
if len(terms_) != 0:
idx_ = 0
for _ner_idx, term_ in enumerate(terms_):
if dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] == '阳性' and dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] != submit_result3[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr']:
dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] = submit_result3[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr']
elif dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] == '阴性' and dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] != submit_result3[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr']:
dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] = submit_result3[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr']
elif dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] != submit_result2[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr']:
if submit_result2[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] == '不标注':
dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] = submit_result2[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr']
elif dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] == '阳性':
if submit_result2[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] == '其他':
dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] = submit_result2[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr']
elif dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] != submit_result4[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr']:
if dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] == '阴性':
if submit_result4[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] == '不标注':
dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] = submit_result4[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr']
elif dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] != submit_result5[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr']:
if dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] == '阴性':
if submit_result5[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] == '不标注':
dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] = submit_result5[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr']
elif dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] != submit_result6[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr']:
if dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] == '阳性':
if submit_result6[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] == '其他':
dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] = submit_result6[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr']
submit_result.append(dialogue_)
with open('./result.txt', 'w', encoding='utf-8') as output_data:
for json_content in submit_result:
output_data.write(json.dumps(json_content, ensure_ascii=False) + '\n')
| true
| true
|
790d687e1a550df847e9c24f9d009229bbdf2d1b
| 108
|
py
|
Python
|
tests/test_apps/helloworld/hello.py
|
BlueMoon55/flask_test
|
eb32cf47b336dcf633cf4d85ed03478c003a74d7
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_apps/helloworld/hello.py
|
BlueMoon55/flask_test
|
eb32cf47b336dcf633cf4d85ed03478c003a74d7
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_apps/helloworld/hello.py
|
BlueMoon55/flask_test
|
eb32cf47b336dcf633cf4d85ed03478c003a74d7
|
[
"BSD-3-Clause"
] | null | null | null |
from flask import Flask
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello World!"
| 15.428571
| 26
| 0.62963
|
from flask import Flask
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello World!"
| true
| true
|
790d6881b84d575211ea1bf0221cc97754b74dc7
| 2,532
|
py
|
Python
|
examples/pybullet/examples/vrEvent.py
|
frk2/bullet3
|
225d823e4dc3f952c6c39920c3f87390383e0602
|
[
"Zlib"
] | 27
|
2018-05-21T14:28:10.000Z
|
2021-12-31T03:12:35.000Z
|
examples/pybullet/examples/vrEvent.py
|
frk2/bullet3
|
225d823e4dc3f952c6c39920c3f87390383e0602
|
[
"Zlib"
] | 2
|
2018-02-02T21:26:09.000Z
|
2018-02-06T19:05:24.000Z
|
examples/pybullet/examples/vrEvent.py
|
frk2/bullet3
|
225d823e4dc3f952c6c39920c3f87390383e0602
|
[
"Zlib"
] | 13
|
2019-11-08T12:48:44.000Z
|
2022-01-04T04:13:33.000Z
|
# See pybullet quickstart guide here:
# https://docs.google.com/document/d/10sXEhzFRSnvFcl3XxNGhnD4N2SedqwdAvK3dsihxVUA/edit#
# Create a Tiltbrush-like app, drawing lines using any controller
# Line width can be changed
import pybullet as p
CONTROLLER_ID = 0
POSITION=1
ORIENTATION=2
NUM_MOVE_EVENTS=5
BUTTONS=6
ANALOG_AXIS=8
#assume that the VR physics server is already started before
c = p.connect(p.SHARED_MEMORY)
print(c)
if (c<0):
p.connect(p.GUI)
p.setInternalSimFlags(0)#don't load default robot assets etc
p.resetSimulation()
p.loadURDF("plane.urdf")
prevPosition=[[0,0,0]]*p.VR_MAX_CONTROLLERS
colors=[0.,0.5,0.5]*p.VR_MAX_CONTROLLERS
widths = [3]*p.VR_MAX_CONTROLLERS
#use a few default colors
colors[0] = [0,0,0]
colors[1] = [0.5,0,0]
colors[2] = [0,0.5,0]
colors[3] = [0,0,0.5]
colors[4] = [0.5,0.5,0.]
colors[5] = [.5,.5,.5]
controllerId = -1
pt=[0,0,0]
print("waiting for VR controller trigger")
while (controllerId<0):
events = p.getVREvents()
for e in (events):
if (e[BUTTONS][33]==p.VR_BUTTON_IS_DOWN):
controllerId = e[CONTROLLER_ID]
if (e[BUTTONS][32]==p.VR_BUTTON_IS_DOWN):
controllerId = e[CONTROLLER_ID]
print("Using controllerId="+str(controllerId))
while True:
events = p.getVREvents(allAnalogAxes=1)
for e in (events):
if (e[CONTROLLER_ID]==controllerId ):
for a in range(10):
print("analog axis"+str(a)+"="+str(e[8][a]))
if (e[BUTTONS][33]&p.VR_BUTTON_WAS_TRIGGERED):
prevPosition[e[CONTROLLER_ID]] = e[POSITION]
if (e[BUTTONS][32]&p.VR_BUTTON_WAS_TRIGGERED):
widths[e[CONTROLLER_ID]]=widths[e[0]]+1
if (widths[e[CONTROLLER_ID]]>20):
widths[e[CONTROLLER_ID]] = 1
if (e[BUTTONS][1]&p.VR_BUTTON_WAS_TRIGGERED):
p.resetSimulation()
#p.setGravity(0,0,-10)
p.removeAllUserDebugItems()
p.loadURDF("plane.urdf")
if (e[BUTTONS][33]==p.VR_BUTTON_IS_DOWN):
pt = prevPosition[e[CONTROLLER_ID]]
#print(prevPosition[e[0]])
print("e[POSITION]")
print(e[POSITION])
print("pt")
print(pt)
diff = [pt[0]-e[POSITION][0],pt[1]-e[POSITION][1],pt[2]-e[POSITION][2]]
lenSqr = diff[0]*diff[0]+diff[1]*diff[1]+diff[2]*diff[2]
ptDistThreshold = 0.01
if (lenSqr>(ptDistThreshold*ptDistThreshold)):
p.addUserDebugLine(e[POSITION],prevPosition[e[CONTROLLER_ID]],colors[e[CONTROLLER_ID]],widths[e[CONTROLLER_ID]])
#p.loadURDF("cube_small.urdf",e[1])
colors[e[CONTROLLER_ID]] = [1-colors[e[CONTROLLER_ID]][0],1-colors[e[CONTROLLER_ID]][1],1-colors[e[CONTROLLER_ID]][2]]
prevPosition[e[CONTROLLER_ID]] = e[POSITION]
| 30.142857
| 122
| 0.699052
|
import pybullet as p
CONTROLLER_ID = 0
POSITION=1
ORIENTATION=2
NUM_MOVE_EVENTS=5
BUTTONS=6
ANALOG_AXIS=8
c = p.connect(p.SHARED_MEMORY)
print(c)
if (c<0):
p.connect(p.GUI)
p.setInternalSimFlags(0)
p.resetSimulation()
p.loadURDF("plane.urdf")
prevPosition=[[0,0,0]]*p.VR_MAX_CONTROLLERS
colors=[0.,0.5,0.5]*p.VR_MAX_CONTROLLERS
widths = [3]*p.VR_MAX_CONTROLLERS
#use a few default colors
colors[0] = [0,0,0]
colors[1] = [0.5,0,0]
colors[2] = [0,0.5,0]
colors[3] = [0,0,0.5]
colors[4] = [0.5,0.5,0.]
colors[5] = [.5,.5,.5]
controllerId = -1
pt=[0,0,0]
print("waiting for VR controller trigger")
while (controllerId<0):
events = p.getVREvents()
for e in (events):
if (e[BUTTONS][33]==p.VR_BUTTON_IS_DOWN):
controllerId = e[CONTROLLER_ID]
if (e[BUTTONS][32]==p.VR_BUTTON_IS_DOWN):
controllerId = e[CONTROLLER_ID]
print("Using controllerId="+str(controllerId))
while True:
events = p.getVREvents(allAnalogAxes=1)
for e in (events):
if (e[CONTROLLER_ID]==controllerId ):
for a in range(10):
print("analog axis"+str(a)+"="+str(e[8][a]))
if (e[BUTTONS][33]&p.VR_BUTTON_WAS_TRIGGERED):
prevPosition[e[CONTROLLER_ID]] = e[POSITION]
if (e[BUTTONS][32]&p.VR_BUTTON_WAS_TRIGGERED):
widths[e[CONTROLLER_ID]]=widths[e[0]]+1
if (widths[e[CONTROLLER_ID]]>20):
widths[e[CONTROLLER_ID]] = 1
if (e[BUTTONS][1]&p.VR_BUTTON_WAS_TRIGGERED):
p.resetSimulation()
#p.setGravity(0,0,-10)
p.removeAllUserDebugItems()
p.loadURDF("plane.urdf")
if (e[BUTTONS][33]==p.VR_BUTTON_IS_DOWN):
pt = prevPosition[e[CONTROLLER_ID]]
#print(prevPosition[e[0]])
print("e[POSITION]")
print(e[POSITION])
print("pt")
print(pt)
diff = [pt[0]-e[POSITION][0],pt[1]-e[POSITION][1],pt[2]-e[POSITION][2]]
lenSqr = diff[0]*diff[0]+diff[1]*diff[1]+diff[2]*diff[2]
ptDistThreshold = 0.01
if (lenSqr>(ptDistThreshold*ptDistThreshold)):
p.addUserDebugLine(e[POSITION],prevPosition[e[CONTROLLER_ID]],colors[e[CONTROLLER_ID]],widths[e[CONTROLLER_ID]])
#p.loadURDF("cube_small.urdf",e[1])
colors[e[CONTROLLER_ID]] = [1-colors[e[CONTROLLER_ID]][0],1-colors[e[CONTROLLER_ID]][1],1-colors[e[CONTROLLER_ID]][2]]
prevPosition[e[CONTROLLER_ID]] = e[POSITION]
| true
| true
|
790d6934b720f8d7e4a229513ef60df1a9ed7fd8
| 6,669
|
py
|
Python
|
dino/main.py
|
bartlomiej-kedziora/games
|
00ff00566bd7c0cd444161f3edf6cd7e1f4abb62
|
[
"MIT"
] | null | null | null |
dino/main.py
|
bartlomiej-kedziora/games
|
00ff00566bd7c0cd444161f3edf6cd7e1f4abb62
|
[
"MIT"
] | null | null | null |
dino/main.py
|
bartlomiej-kedziora/games
|
00ff00566bd7c0cd444161f3edf6cd7e1f4abb62
|
[
"MIT"
] | null | null | null |
import pgzero
import pgzrun
import random
from pgzero.actor import Actor
__all__ = ["pgzrun", "pgzero"]
from pgzero.clock import clock
from pgzero.keyboard import keyboard
from pgzero.loaders import sounds
clouds = [Actor('cloud1', (200, 200)),
Actor('cloud2', (400, 300)),
Actor('cloud3', (600, 200)),
Actor('cloud1', (800, 300))]
obstacles = [Actor('cactus', (random.randint(900, 1000), 495)),
Actor('cactus', (random.randint(1200, 1500), 495)),
Actor('cactus', (random.randint(1500, 2000), 495))]
player = Actor('p3_stand', (100, 484))
# 0 - game not started
# 1 - game just stared
# 2 - finished
game = 0
# frame that is currently running
frame = 0
# player movement speed and direction
jump = 0
# 0 - jump is available
# 1 - jump is forbidden
jump_blocked = 0
cloud_speed = 2
game_time = 0
# cactus movement speed
game_speed = 8
# 0 - game running
# 1 - game blocked
jump_unblocked = 0
def draw():
global game
screen.clear()
screen.fill('#cff4f7')
for i in range((screen.width // 70) + 1):
screen.blit('grass', (i * 70, screen.height - 70))
for cloud in clouds:
cloud.draw()
for obstacle in obstacles:
obstacle.draw()
screen.draw.text(
align_text_time(game_time),
midright=(screen.width - 50, 50),
fontname="roboto_mono_bold",
color="orange",
fontsize=45
)
player.draw()
if game == 0:
screen.draw.text(
"Wcisnij spacje",
center=(screen.width / 2, screen.height / 2),
color="orange",
fontsize=60
)
if game == 2:
screen.draw.text(
"Koniec gry",
center=(screen.width / 2, screen.height / 2),
color="red",
fontsize=60
)
screen.draw.text(
"Wcisnij spacje aby zagrac jeszcze raz",
center=(screen.width / 2, screen.height - 200),
color="red",
fontsize=30
)
def update():
global game
global jump
global jump_blocked
global jump_unblocked
if keyboard.SPACE and jump_unblocked == 0:
if game == 0 or game == 2:
jump_blocked = 1
clock.schedule_unique(unblock_jump, 0.3)
reset()
game = 1
if jump_blocked == 0:
jump = -18
jump_blocked = 1
sounds.jingles_jump.play()
animation()
jump_fall()
move_cloud()
move_obstacle()
check_collision()
# change difficulty level, increase game and clouds speed
def change_difficulty_level():
global game_speed
global cloud_speed
if game_speed < 16:
game_speed += 1
cloud_speed += 1
# reset global variables
def reset():
global frame
global game
global jump
global jump_blocked
global cloud_speed
global game_speed
global game_time
if game == 2:
frame = 0
game = 0
jump = 0
jump_blocked = 1
cloud_speed = 2
game_speed = 8
game_time = 0
player.pos = (100, 484)
clouds[0].pos = (200, 200)
clouds[1].pos = (400, 300)
clouds[2].pos = (600, 200)
clouds[3].pos = (800, 300)
obstacles[0].pos = (random.randint(900, 1000), 495)
obstacles[1].pos = (random.randint(1200, 1500), 495)
obstacles[2].pos = (random.randint(1500, 2000), 495)
clock.unschedule(change_difficulty_level)
# change difficulty level every 20s
clock.schedule_interval(change_difficulty_level, 20)
def unblock_game():
global jump_unblocked
jump_unblocked = 0
# check collision with cactus
def check_collision():
global game
global jump_unblocked
if game == 1:
for i in obstacles:
if player.collidepoint(i.x, i.y):
game = 2
sounds.jingles_end.play()
jump_unblocked = 1
# unblock game in 2 sec
clock.schedule_unique(unblock_game, 2.0)
def move_obstacle():
global game_speed
global game
if game == 1:
for i in range(len(obstacles)):
# decrease x for all obstacles about speed value
obstacles[i].x -= game_speed
# if obstacles is out of screen get random position
if obstacles[i].x + 35 < 0:
obstacles[i].x = random.randint(900, 1500)
# if obstacles have the same position as other or is too close, move it about 400
for j in range(0, len(obstacles)):
if j != i and abs(obstacles[i].x - obstacles[j].x < 300):
obstacles[i].x += 400
# triggered every 0.1s increasing game time about 1s
def measure_time():
global game_time
global game
if game == 0:
game_time = 0
elif game == 1:
game_time +=1
def align_text_time(time):
text = "0" * (5 - len(str(time)))
text += str(time)
return text
def move_cloud():
global cloud_speed
global game
if game == 1:
# move clouds x pos about cloud speed
for cloud in clouds:
cloud.x -= cloud_speed
# if cloud out of screen move it to right side
if cloud.x + 64 < 0:
cloud.x = screen.width + 32
def unblock_jump():
global jump_blocked
jump_blocked = 0
def jump_fall():
global jump
global frame
if jump != 0:
# block animation
frame = 0
player.y += jump
# if player on the ground unblock
if player.y >= 484:
unblock_jump()
jump = 0
# if player jumped start falling
if player.y <= 250:
jump *= (-1)
# player animation
def animation():
global frame
if game == 1:
if frame == 0:
player.image = 'p3_walk01'
if frame == 1:
player.image = 'p3_walk02'
if frame == 2:
player.image = 'p3_walk03'
if frame == 3:
player.image = 'p3_walk04'
if frame == 4:
player.image = 'p3_walk05'
if frame == 5:
player.image = 'p3_walk06'
if frame == 6:
player.image = 'p3_walk07'
if frame == 7:
player.image = 'p3_walk08'
if frame == 8:
player.image = 'p3_walk09'
if frame == 9:
player.image = 'p3_walk10'
if frame == 10:
player.image = 'p3_walk11'
frame += 1
# result is 0 or less than 11
frame %= 11
clock.schedule_interval(measure_time, 0.1)
clock.schedule_interval(change_difficulty_level, 20)
pgzrun.go()
| 24.791822
| 97
| 0.562453
|
import pgzero
import pgzrun
import random
from pgzero.actor import Actor
__all__ = ["pgzrun", "pgzero"]
from pgzero.clock import clock
from pgzero.keyboard import keyboard
from pgzero.loaders import sounds
clouds = [Actor('cloud1', (200, 200)),
Actor('cloud2', (400, 300)),
Actor('cloud3', (600, 200)),
Actor('cloud1', (800, 300))]
obstacles = [Actor('cactus', (random.randint(900, 1000), 495)),
Actor('cactus', (random.randint(1200, 1500), 495)),
Actor('cactus', (random.randint(1500, 2000), 495))]
player = Actor('p3_stand', (100, 484))
game = 0
frame = 0
jump = 0
jump_blocked = 0
cloud_speed = 2
game_time = 0
game_speed = 8
jump_unblocked = 0
def draw():
global game
screen.clear()
screen.fill('#cff4f7')
for i in range((screen.width // 70) + 1):
screen.blit('grass', (i * 70, screen.height - 70))
for cloud in clouds:
cloud.draw()
for obstacle in obstacles:
obstacle.draw()
screen.draw.text(
align_text_time(game_time),
midright=(screen.width - 50, 50),
fontname="roboto_mono_bold",
color="orange",
fontsize=45
)
player.draw()
if game == 0:
screen.draw.text(
"Wcisnij spacje",
center=(screen.width / 2, screen.height / 2),
color="orange",
fontsize=60
)
if game == 2:
screen.draw.text(
"Koniec gry",
center=(screen.width / 2, screen.height / 2),
color="red",
fontsize=60
)
screen.draw.text(
"Wcisnij spacje aby zagrac jeszcze raz",
center=(screen.width / 2, screen.height - 200),
color="red",
fontsize=30
)
def update():
global game
global jump
global jump_blocked
global jump_unblocked
if keyboard.SPACE and jump_unblocked == 0:
if game == 0 or game == 2:
jump_blocked = 1
clock.schedule_unique(unblock_jump, 0.3)
reset()
game = 1
if jump_blocked == 0:
jump = -18
jump_blocked = 1
sounds.jingles_jump.play()
animation()
jump_fall()
move_cloud()
move_obstacle()
check_collision()
def change_difficulty_level():
global game_speed
global cloud_speed
if game_speed < 16:
game_speed += 1
cloud_speed += 1
def reset():
global frame
global game
global jump
global jump_blocked
global cloud_speed
global game_speed
global game_time
if game == 2:
frame = 0
game = 0
jump = 0
jump_blocked = 1
cloud_speed = 2
game_speed = 8
game_time = 0
player.pos = (100, 484)
clouds[0].pos = (200, 200)
clouds[1].pos = (400, 300)
clouds[2].pos = (600, 200)
clouds[3].pos = (800, 300)
obstacles[0].pos = (random.randint(900, 1000), 495)
obstacles[1].pos = (random.randint(1200, 1500), 495)
obstacles[2].pos = (random.randint(1500, 2000), 495)
clock.unschedule(change_difficulty_level)
clock.schedule_interval(change_difficulty_level, 20)
def unblock_game():
global jump_unblocked
jump_unblocked = 0
def check_collision():
global game
global jump_unblocked
if game == 1:
for i in obstacles:
if player.collidepoint(i.x, i.y):
game = 2
sounds.jingles_end.play()
jump_unblocked = 1
clock.schedule_unique(unblock_game, 2.0)
def move_obstacle():
global game_speed
global game
if game == 1:
for i in range(len(obstacles)):
obstacles[i].x -= game_speed
if obstacles[i].x + 35 < 0:
obstacles[i].x = random.randint(900, 1500)
for j in range(0, len(obstacles)):
if j != i and abs(obstacles[i].x - obstacles[j].x < 300):
obstacles[i].x += 400
def measure_time():
global game_time
global game
if game == 0:
game_time = 0
elif game == 1:
game_time +=1
def align_text_time(time):
text = "0" * (5 - len(str(time)))
text += str(time)
return text
def move_cloud():
global cloud_speed
global game
if game == 1:
for cloud in clouds:
cloud.x -= cloud_speed
if cloud.x + 64 < 0:
cloud.x = screen.width + 32
def unblock_jump():
global jump_blocked
jump_blocked = 0
def jump_fall():
global jump
global frame
if jump != 0:
frame = 0
player.y += jump
if player.y >= 484:
unblock_jump()
jump = 0
if player.y <= 250:
jump *= (-1)
def animation():
global frame
if game == 1:
if frame == 0:
player.image = 'p3_walk01'
if frame == 1:
player.image = 'p3_walk02'
if frame == 2:
player.image = 'p3_walk03'
if frame == 3:
player.image = 'p3_walk04'
if frame == 4:
player.image = 'p3_walk05'
if frame == 5:
player.image = 'p3_walk06'
if frame == 6:
player.image = 'p3_walk07'
if frame == 7:
player.image = 'p3_walk08'
if frame == 8:
player.image = 'p3_walk09'
if frame == 9:
player.image = 'p3_walk10'
if frame == 10:
player.image = 'p3_walk11'
frame += 1
frame %= 11
clock.schedule_interval(measure_time, 0.1)
clock.schedule_interval(change_difficulty_level, 20)
pgzrun.go()
| true
| true
|
790d69a0863f817c9600d9648b29c56aefa86acd
| 2,180
|
py
|
Python
|
venv/lib/python3.8/site-packages/vsts/release/v4_1/models/release_task_attachment.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/vsts/release/v4_1/models/release_task_attachment.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/vsts/release/v4_1/models/release_task_attachment.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | 2
|
2021-05-23T16:46:31.000Z
|
2021-05-26T23:51:09.000Z
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class ReleaseTaskAttachment(Model):
"""ReleaseTaskAttachment.
:param _links:
:type _links: :class:`ReferenceLinks <release.v4_1.models.ReferenceLinks>`
:param created_on:
:type created_on: datetime
:param modified_by:
:type modified_by: :class:`IdentityRef <release.v4_1.models.IdentityRef>`
:param modified_on:
:type modified_on: datetime
:param name:
:type name: str
:param record_id:
:type record_id: str
:param timeline_id:
:type timeline_id: str
:param type:
:type type: str
"""
_attribute_map = {
'_links': {'key': '_links', 'type': 'ReferenceLinks'},
'created_on': {'key': 'createdOn', 'type': 'iso-8601'},
'modified_by': {'key': 'modifiedBy', 'type': 'IdentityRef'},
'modified_on': {'key': 'modifiedOn', 'type': 'iso-8601'},
'name': {'key': 'name', 'type': 'str'},
'record_id': {'key': 'recordId', 'type': 'str'},
'timeline_id': {'key': 'timelineId', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'}
}
def __init__(self, _links=None, created_on=None, modified_by=None, modified_on=None, name=None, record_id=None, timeline_id=None, type=None):
super(ReleaseTaskAttachment, self).__init__()
self._links = _links
self.created_on = created_on
self.modified_by = modified_by
self.modified_on = modified_on
self.name = name
self.record_id = record_id
self.timeline_id = timeline_id
self.type = type
| 40.37037
| 146
| 0.544495
|
from msrest.serialization import Model
class ReleaseTaskAttachment(Model):
_attribute_map = {
'_links': {'key': '_links', 'type': 'ReferenceLinks'},
'created_on': {'key': 'createdOn', 'type': 'iso-8601'},
'modified_by': {'key': 'modifiedBy', 'type': 'IdentityRef'},
'modified_on': {'key': 'modifiedOn', 'type': 'iso-8601'},
'name': {'key': 'name', 'type': 'str'},
'record_id': {'key': 'recordId', 'type': 'str'},
'timeline_id': {'key': 'timelineId', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'}
}
def __init__(self, _links=None, created_on=None, modified_by=None, modified_on=None, name=None, record_id=None, timeline_id=None, type=None):
super(ReleaseTaskAttachment, self).__init__()
self._links = _links
self.created_on = created_on
self.modified_by = modified_by
self.modified_on = modified_on
self.name = name
self.record_id = record_id
self.timeline_id = timeline_id
self.type = type
| true
| true
|
790d69c2fe14e452fd81adf6722dffb4d6179f1f
| 41
|
py
|
Python
|
recover_unseeded.py
|
maziara/deluge-feed-innoreader
|
874ae84d5f75569a6749e44f8c525e484aa801b7
|
[
"MIT"
] | 8
|
2016-07-31T01:58:00.000Z
|
2020-09-30T01:18:34.000Z
|
recover_unseeded.py
|
maziara/deluge-feed-innoreader
|
874ae84d5f75569a6749e44f8c525e484aa801b7
|
[
"MIT"
] | null | null | null |
recover_unseeded.py
|
maziara/deluge-feed-innoreader
|
874ae84d5f75569a6749e44f8c525e484aa801b7
|
[
"MIT"
] | null | null | null |
import main
main.recover_unseeded_items()
| 20.5
| 29
| 0.878049
|
import main
main.recover_unseeded_items()
| true
| true
|
790d6a1d89941bbe74cb2ee771b7995accfaed15
| 6,752
|
py
|
Python
|
src/ggrc_workflows/migrations/versions/20150707143127_44047daa31a9_add_non_adjusted_next_cycle_start_date.py
|
zidarsk8/ggrc-core
|
2509c989eddf434249d3bef50c21e08dbf56c1a4
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2018-01-03T02:49:23.000Z
|
2018-01-03T02:49:23.000Z
|
src/ggrc_workflows/migrations/versions/20150707143127_44047daa31a9_add_non_adjusted_next_cycle_start_date.py
|
zidarsk8/ggrc-core
|
2509c989eddf434249d3bef50c21e08dbf56c1a4
|
[
"ECL-2.0",
"Apache-2.0"
] | 6
|
2015-04-25T13:15:15.000Z
|
2019-03-21T22:38:01.000Z
|
src/ggrc_workflows/migrations/versions/20150707143127_44047daa31a9_add_non_adjusted_next_cycle_start_date.py
|
zidarsk8/ggrc-core
|
2509c989eddf434249d3bef50c21e08dbf56c1a4
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Add non-adjusted next cycle start date
Revision ID: 44047daa31a9
Revises: 1431e7094e26
Create Date: 2015-07-07 14:31:27.780564
"""
# revision identifiers, used by Alembic.
revision = '44047daa31a9'
down_revision = '4840f4760f4b'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
from datetime import date
from ggrc.app import app
from ggrc import settings, db
import ggrc_workflows.models as models
from ggrc_workflows import adjust_next_cycle_start_date
from ggrc_workflows.services.workflow_cycle_calculator import \
get_cycle_calculator
def upgrade():
op.add_column('workflows',
sa.Column('non_adjusted_next_cycle_start_date',
sa.Date(), nullable=True))
# If somebody deleted all the tasks we must clear the next cycle start
# date
workflows = db.session.query(models.Workflow) \
.filter(
models.Workflow.next_cycle_start_date != None,
models.Workflow.recurrences == True,
models.Workflow.status == 'Active',
models.Workflow.next_cycle_start_date < date.today()
).all()
for workflow in workflows:
tasks_start_days = [task.relative_start_day
for tg in workflow.task_groups
for task in tg.task_group_tasks]
tasks_end_days = [task.relative_end_day
for tg in workflow.task_groups
for task in tg.task_group_tasks]
if ((not all(tasks_start_days) and not all(tasks_end_days)) or
(not tasks_start_days and not tasks_end_days)):
app.logger.warning(
"Removing NCSD from expired WF {} because no tasks are "
"set up. Current NCSD: {}".format(
workflow.id,
workflow.next_cycle_start_date
))
workflow.next_cycle_start_date = None
db.session.add(workflow)
workflows = db.session.query(models.Workflow) \
.filter(
models.Workflow.next_cycle_start_date != None,
models.Workflow.non_adjusted_next_cycle_start_date == None,
models.Workflow.recurrences == True,
models.Workflow.status == 'Active',
models.Workflow.next_cycle_start_date >= date.today()
).all()
for workflow in workflows:
tasks_start_days = [task.relative_start_day
for tg in workflow.task_groups
for task in tg.task_group_tasks]
tasks_end_days = [task.relative_end_day
for tg in workflow.task_groups
for task in tg.task_group_tasks]
# We must skip tasks that don't have start days and end days defined
if ((not all(tasks_start_days) and not all(tasks_end_days)) or
(not tasks_start_days and not tasks_end_days)):
append_msg = ""
if workflow.next_cycle_start_date:
workflow.next_cycle_start_date = None
append_msg += (" Removing existing next cycle start date "
"because none are configured.")
db.session.add(workflow)
app.logger.warning(
"Skipping active WF {0} because no tasks "
"are set up.{1}".format(
workflow.id,
append_msg
))
continue
pre_compute_ncsd = workflow.next_cycle_start_date
last_cycle_start_date = None
if workflow.cycles:
last_cycle_start_date = max([c.start_date for c in workflow.cycles])
if last_cycle_start_date:
base_date = last_cycle_start_date
else:
base_date = base_date.today()
base_date = max(base_date, workflow.next_cycle_start_date)
calculator = get_cycle_calculator(workflow, base_date=base_date)
if workflow.frequency in {"weekly", "monthly"}:
nancsd_day = min(
v['relative_start'] for v in calculator.reified_tasks.values())
nancsd_month = None
else:
nancsd_month, nancsd_day = min(
v['relative_start'] for v in calculator.reified_tasks.values())
nancsd_date = calculator.relative_day_to_date(
relative_day=nancsd_day,
relative_month=nancsd_month,
base_date=base_date)
if last_cycle_start_date:
while calculator.adjust_date(nancsd_date) <= last_cycle_start_date:
base_date = base_date + calculator.time_delta
nancsd_date = calculator.relative_day_to_date(
relative_day=nancsd_day,
relative_month=nancsd_month,
base_date=base_date
)
else:
base_date = base_date - calculator.time_delta
while calculator.adjust_date(nancsd_date) <= pre_compute_ncsd:
base_date = base_date + calculator.time_delta
nancsd_date = calculator.relative_day_to_date(
relative_day=nancsd_day,
relative_month=nancsd_month,
base_date=base_date
)
workflow.non_adjusted_next_cycle_start_date = nancsd_date
workflow.next_cycle_start_date = calculator.adjust_date(nancsd_date)
post_compute_ncsd = workflow.next_cycle_start_date
start_dates = ["{}/{}".format(
task.relative_start_month,
task.relative_start_day) for tg in workflow.task_groups
for task in tg.task_group_tasks]
end_dates = ["{}/{}".format(
task.relative_end_month,
task.relative_end_day) for tg in workflow.task_groups
for task in tg.task_group_tasks]
if pre_compute_ncsd != post_compute_ncsd:
app.logger.warning(
"Adjusted NCSD for workflow {}. "
"Freq: {}, PRE: {}, Last cycle: {}, POST: {}, NON: {},"
"tasks start: {}, tasks end: {},".format(
workflow.id,
workflow.frequency[:2],
pre_compute_ncsd,
last_cycle_start_date,
post_compute_ncsd,
workflow.non_adjusted_next_cycle_start_date,
start_dates,
end_dates))
db.session.add(workflow)
# Save
db.session.commit()
def downgrade():
op.drop_column('workflows', 'non_adjusted_next_cycle_start_date')
| 38.146893
| 80
| 0.598045
|
revision = '44047daa31a9'
down_revision = '4840f4760f4b'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
from datetime import date
from ggrc.app import app
from ggrc import settings, db
import ggrc_workflows.models as models
from ggrc_workflows import adjust_next_cycle_start_date
from ggrc_workflows.services.workflow_cycle_calculator import \
get_cycle_calculator
def upgrade():
op.add_column('workflows',
sa.Column('non_adjusted_next_cycle_start_date',
sa.Date(), nullable=True))
workflows = db.session.query(models.Workflow) \
.filter(
models.Workflow.next_cycle_start_date != None,
models.Workflow.recurrences == True,
models.Workflow.status == 'Active',
models.Workflow.next_cycle_start_date < date.today()
).all()
for workflow in workflows:
tasks_start_days = [task.relative_start_day
for tg in workflow.task_groups
for task in tg.task_group_tasks]
tasks_end_days = [task.relative_end_day
for tg in workflow.task_groups
for task in tg.task_group_tasks]
if ((not all(tasks_start_days) and not all(tasks_end_days)) or
(not tasks_start_days and not tasks_end_days)):
app.logger.warning(
"Removing NCSD from expired WF {} because no tasks are "
"set up. Current NCSD: {}".format(
workflow.id,
workflow.next_cycle_start_date
))
workflow.next_cycle_start_date = None
db.session.add(workflow)
workflows = db.session.query(models.Workflow) \
.filter(
models.Workflow.next_cycle_start_date != None,
models.Workflow.non_adjusted_next_cycle_start_date == None,
models.Workflow.recurrences == True,
models.Workflow.status == 'Active',
models.Workflow.next_cycle_start_date >= date.today()
).all()
for workflow in workflows:
tasks_start_days = [task.relative_start_day
for tg in workflow.task_groups
for task in tg.task_group_tasks]
tasks_end_days = [task.relative_end_day
for tg in workflow.task_groups
for task in tg.task_group_tasks]
if ((not all(tasks_start_days) and not all(tasks_end_days)) or
(not tasks_start_days and not tasks_end_days)):
append_msg = ""
if workflow.next_cycle_start_date:
workflow.next_cycle_start_date = None
append_msg += (" Removing existing next cycle start date "
"because none are configured.")
db.session.add(workflow)
app.logger.warning(
"Skipping active WF {0} because no tasks "
"are set up.{1}".format(
workflow.id,
append_msg
))
continue
pre_compute_ncsd = workflow.next_cycle_start_date
last_cycle_start_date = None
if workflow.cycles:
last_cycle_start_date = max([c.start_date for c in workflow.cycles])
if last_cycle_start_date:
base_date = last_cycle_start_date
else:
base_date = base_date.today()
base_date = max(base_date, workflow.next_cycle_start_date)
calculator = get_cycle_calculator(workflow, base_date=base_date)
if workflow.frequency in {"weekly", "monthly"}:
nancsd_day = min(
v['relative_start'] for v in calculator.reified_tasks.values())
nancsd_month = None
else:
nancsd_month, nancsd_day = min(
v['relative_start'] for v in calculator.reified_tasks.values())
nancsd_date = calculator.relative_day_to_date(
relative_day=nancsd_day,
relative_month=nancsd_month,
base_date=base_date)
if last_cycle_start_date:
while calculator.adjust_date(nancsd_date) <= last_cycle_start_date:
base_date = base_date + calculator.time_delta
nancsd_date = calculator.relative_day_to_date(
relative_day=nancsd_day,
relative_month=nancsd_month,
base_date=base_date
)
else:
base_date = base_date - calculator.time_delta
while calculator.adjust_date(nancsd_date) <= pre_compute_ncsd:
base_date = base_date + calculator.time_delta
nancsd_date = calculator.relative_day_to_date(
relative_day=nancsd_day,
relative_month=nancsd_month,
base_date=base_date
)
workflow.non_adjusted_next_cycle_start_date = nancsd_date
workflow.next_cycle_start_date = calculator.adjust_date(nancsd_date)
post_compute_ncsd = workflow.next_cycle_start_date
start_dates = ["{}/{}".format(
task.relative_start_month,
task.relative_start_day) for tg in workflow.task_groups
for task in tg.task_group_tasks]
end_dates = ["{}/{}".format(
task.relative_end_month,
task.relative_end_day) for tg in workflow.task_groups
for task in tg.task_group_tasks]
if pre_compute_ncsd != post_compute_ncsd:
app.logger.warning(
"Adjusted NCSD for workflow {}. "
"Freq: {}, PRE: {}, Last cycle: {}, POST: {}, NON: {},"
"tasks start: {}, tasks end: {},".format(
workflow.id,
workflow.frequency[:2],
pre_compute_ncsd,
last_cycle_start_date,
post_compute_ncsd,
workflow.non_adjusted_next_cycle_start_date,
start_dates,
end_dates))
db.session.add(workflow)
# Save
db.session.commit()
def downgrade():
op.drop_column('workflows', 'non_adjusted_next_cycle_start_date')
| true
| true
|
790d6a227b95a008a2221d0e4dbd56cad8afaad3
| 152,075
|
py
|
Python
|
kapua-client/python-client/swagger_client/api/devices_api.py
|
liang-faan/SmartIOT-Diec
|
8336a4b558295295f10a82cf350d8b7ff3fb9f5c
|
[
"MIT"
] | null | null | null |
kapua-client/python-client/swagger_client/api/devices_api.py
|
liang-faan/SmartIOT-Diec
|
8336a4b558295295f10a82cf350d8b7ff3fb9f5c
|
[
"MIT"
] | null | null | null |
kapua-client/python-client/swagger_client/api/devices_api.py
|
liang-faan/SmartIOT-Diec
|
8336a4b558295295f10a82cf350d8b7ff3fb9f5c
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Eclipse Kapua REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class DevicesApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def device_asset_filtered_get(self, scope_id, device_id, **kwargs): # noqa: E501
"""Gets a list of assets # noqa: E501
Returns the list of all the Assets installed on the device. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_asset_filtered_get(scope_id, device_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the device. (required)
:param str device_id: The id of the device (required)
:param int timeout: The timeout of the operation in milliseconds
:param DeviceAssets body: The filter of the request
:return: DeviceAssets
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_asset_filtered_get_with_http_info(scope_id, device_id, **kwargs) # noqa: E501
else:
(data) = self.device_asset_filtered_get_with_http_info(scope_id, device_id, **kwargs) # noqa: E501
return data
def device_asset_filtered_get_with_http_info(self, scope_id, device_id, **kwargs): # noqa: E501
"""Gets a list of assets # noqa: E501
Returns the list of all the Assets installed on the device. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_asset_filtered_get_with_http_info(scope_id, device_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the device. (required)
:param str device_id: The id of the device (required)
:param int timeout: The timeout of the operation in milliseconds
:param DeviceAssets body: The filter of the request
:return: DeviceAssets
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['scope_id', 'device_id', 'timeout', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_asset_filtered_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'scope_id' is set
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_asset_filtered_get`") # noqa: E501
# verify the required parameter 'device_id' is set
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `device_asset_filtered_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id'] # noqa: E501
if 'device_id' in params:
path_params['deviceId'] = params['device_id'] # noqa: E501
query_params = []
if 'timeout' in params:
query_params.append(('timeout', params['timeout'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['kapuaAccessToken'] # noqa: E501
return self.api_client.call_api(
'/{scopeId}/devices/{deviceId}/assets', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DeviceAssets', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_asset_get(self, scope_id, device_id, **kwargs): # noqa: E501
"""Gets a list of assets # noqa: E501
Returns the list of all the Assets installed on the device. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_asset_get(scope_id, device_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the device. (required)
:param str device_id: The id of the device (required)
:param int timeout: The timeout of the operation in milliseconds
:return: DeviceAssets
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_asset_get_with_http_info(scope_id, device_id, **kwargs) # noqa: E501
else:
(data) = self.device_asset_get_with_http_info(scope_id, device_id, **kwargs) # noqa: E501
return data
def device_asset_get_with_http_info(self, scope_id, device_id, **kwargs): # noqa: E501
"""Gets a list of assets # noqa: E501
Returns the list of all the Assets installed on the device. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_asset_get_with_http_info(scope_id, device_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the device. (required)
:param str device_id: The id of the device (required)
:param int timeout: The timeout of the operation in milliseconds
:return: DeviceAssets
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['scope_id', 'device_id', 'timeout'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_asset_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'scope_id' is set
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_asset_get`") # noqa: E501
# verify the required parameter 'device_id' is set
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `device_asset_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id'] # noqa: E501
if 'device_id' in params:
path_params['deviceId'] = params['device_id'] # noqa: E501
query_params = []
if 'timeout' in params:
query_params.append(('timeout', params['timeout'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['kapuaAccessToken'] # noqa: E501
return self.api_client.call_api(
'/{scopeId}/devices/{deviceId}/assets', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DeviceAssets', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_asset_read(self, scope_id, device_id, **kwargs): # noqa: E501
"""Reads asset channel values # noqa: E501
Returns the value read from the asset channel # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_asset_read(scope_id, device_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the device. (required)
:param str device_id: The id of the device (required)
:param int timeout: The timeout of the operation in milliseconds
:param DeviceAssets body: The filter of the read request
:return: DeviceAssets
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_asset_read_with_http_info(scope_id, device_id, **kwargs) # noqa: E501
else:
(data) = self.device_asset_read_with_http_info(scope_id, device_id, **kwargs) # noqa: E501
return data
def device_asset_read_with_http_info(self, scope_id, device_id, **kwargs): # noqa: E501
"""Reads asset channel values # noqa: E501
Returns the value read from the asset channel # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_asset_read_with_http_info(scope_id, device_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the device. (required)
:param str device_id: The id of the device (required)
:param int timeout: The timeout of the operation in milliseconds
:param DeviceAssets body: The filter of the read request
:return: DeviceAssets
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['scope_id', 'device_id', 'timeout', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_asset_read" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'scope_id' is set
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_asset_read`") # noqa: E501
# verify the required parameter 'device_id' is set
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `device_asset_read`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id'] # noqa: E501
if 'device_id' in params:
path_params['deviceId'] = params['device_id'] # noqa: E501
query_params = []
if 'timeout' in params:
query_params.append(('timeout', params['timeout'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['kapuaAccessToken'] # noqa: E501
return self.api_client.call_api(
'/{scopeId}/devices/{deviceId}/assets/_read', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DeviceAssets', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_asset_write(self, scope_id, device_id, **kwargs): # noqa: E501
"""Gets a list of assets # noqa: E501
Returns the list of all the Assets installed on the device. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_asset_write(scope_id, device_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the device. (required)
:param str device_id: The id of the device (required)
:param int timeout: The timeout of the operation in milliseconds
:param DeviceAssets body: The values to write to the asset channels
:return: DeviceAssets
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_asset_write_with_http_info(scope_id, device_id, **kwargs) # noqa: E501
else:
(data) = self.device_asset_write_with_http_info(scope_id, device_id, **kwargs) # noqa: E501
return data
def device_asset_write_with_http_info(self, scope_id, device_id, **kwargs): # noqa: E501
"""Gets a list of assets # noqa: E501
Returns the list of all the Assets installed on the device. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_asset_write_with_http_info(scope_id, device_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the device. (required)
:param str device_id: The id of the device (required)
:param int timeout: The timeout of the operation in milliseconds
:param DeviceAssets body: The values to write to the asset channels
:return: DeviceAssets
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['scope_id', 'device_id', 'timeout', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_asset_write" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'scope_id' is set
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_asset_write`") # noqa: E501
# verify the required parameter 'device_id' is set
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `device_asset_write`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id'] # noqa: E501
if 'device_id' in params:
path_params['deviceId'] = params['device_id'] # noqa: E501
query_params = []
if 'timeout' in params:
query_params.append(('timeout', params['timeout'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['kapuaAccessToken'] # noqa: E501
return self.api_client.call_api(
'/{scopeId}/devices/{deviceId}/assets/_write', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DeviceAssets', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_bundle_get(self, scope_id, device_id, **kwargs): # noqa: E501
"""Gets a list of bundles # noqa: E501
Returns the list of all the Bundles installed on the device. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_bundle_get(scope_id, device_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the device. (required)
:param str device_id: The id of the device (required)
:param int timeout: The timeout of the operation in milliseconds
:return: DeviceBundles
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_bundle_get_with_http_info(scope_id, device_id, **kwargs) # noqa: E501
else:
(data) = self.device_bundle_get_with_http_info(scope_id, device_id, **kwargs) # noqa: E501
return data
def device_bundle_get_with_http_info(self, scope_id, device_id, **kwargs): # noqa: E501
"""Gets a list of bundles # noqa: E501
Returns the list of all the Bundles installed on the device. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_bundle_get_with_http_info(scope_id, device_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the device. (required)
:param str device_id: The id of the device (required)
:param int timeout: The timeout of the operation in milliseconds
:return: DeviceBundles
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['scope_id', 'device_id', 'timeout'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_bundle_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'scope_id' is set
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_bundle_get`") # noqa: E501
# verify the required parameter 'device_id' is set
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `device_bundle_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id'] # noqa: E501
if 'device_id' in params:
path_params['deviceId'] = params['device_id'] # noqa: E501
query_params = []
if 'timeout' in params:
query_params.append(('timeout', params['timeout'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['kapuaAccessToken'] # noqa: E501
return self.api_client.call_api(
'/{scopeId}/devices/{deviceId}/bundles', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DeviceBundles', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_bundle_start(self, scope_id, device_id, bundle_id, **kwargs): # noqa: E501
"""Start a bundle # noqa: E501
Starts the specified bundle # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_bundle_start(scope_id, device_id, bundle_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the device. (required)
:param str device_id: The id of the device (required)
:param str bundle_id: the ID of the bundle to start (required)
:param int timeout: The timeout of the operation in milliseconds
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_bundle_start_with_http_info(scope_id, device_id, bundle_id, **kwargs) # noqa: E501
else:
(data) = self.device_bundle_start_with_http_info(scope_id, device_id, bundle_id, **kwargs) # noqa: E501
return data
def device_bundle_start_with_http_info(self, scope_id, device_id, bundle_id, **kwargs): # noqa: E501
"""Start a bundle # noqa: E501
Starts the specified bundle # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_bundle_start_with_http_info(scope_id, device_id, bundle_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the device. (required)
:param str device_id: The id of the device (required)
:param str bundle_id: the ID of the bundle to start (required)
:param int timeout: The timeout of the operation in milliseconds
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['scope_id', 'device_id', 'bundle_id', 'timeout'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_bundle_start" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'scope_id' is set
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_bundle_start`") # noqa: E501
# verify the required parameter 'device_id' is set
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `device_bundle_start`") # noqa: E501
# verify the required parameter 'bundle_id' is set
if ('bundle_id' not in params or
params['bundle_id'] is None):
raise ValueError("Missing the required parameter `bundle_id` when calling `device_bundle_start`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id'] # noqa: E501
if 'device_id' in params:
path_params['deviceId'] = params['device_id'] # noqa: E501
if 'bundle_id' in params:
path_params['bundleId'] = params['bundle_id'] # noqa: E501
query_params = []
if 'timeout' in params:
query_params.append(('timeout', params['timeout'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/xml', 'application/json']) # noqa: E501
# Authentication setting
auth_settings = ['kapuaAccessToken'] # noqa: E501
return self.api_client.call_api(
'/{scopeId}/devices/{deviceId}/bundles/{bundleId}/_start', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_bundle_stop(self, scope_id, device_id, bundle_id, **kwargs): # noqa: E501
"""Stop a bundle # noqa: E501
Stops the specified bundle # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_bundle_stop(scope_id, device_id, bundle_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the device. (required)
:param str device_id: The id of the device (required)
:param str bundle_id: the ID of the bundle to stop (required)
:param int timeout: The timeout of the operation in milliseconds
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_bundle_stop_with_http_info(scope_id, device_id, bundle_id, **kwargs) # noqa: E501
else:
(data) = self.device_bundle_stop_with_http_info(scope_id, device_id, bundle_id, **kwargs) # noqa: E501
return data
def device_bundle_stop_with_http_info(self, scope_id, device_id, bundle_id, **kwargs): # noqa: E501
"""Stop a bundle # noqa: E501
Stops the specified bundle # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_bundle_stop_with_http_info(scope_id, device_id, bundle_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the device. (required)
:param str device_id: The id of the device (required)
:param str bundle_id: the ID of the bundle to stop (required)
:param int timeout: The timeout of the operation in milliseconds
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['scope_id', 'device_id', 'bundle_id', 'timeout'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_bundle_stop" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'scope_id' is set
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_bundle_stop`") # noqa: E501
# verify the required parameter 'device_id' is set
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `device_bundle_stop`") # noqa: E501
# verify the required parameter 'bundle_id' is set
if ('bundle_id' not in params or
params['bundle_id'] is None):
raise ValueError("Missing the required parameter `bundle_id` when calling `device_bundle_stop`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id'] # noqa: E501
if 'device_id' in params:
path_params['deviceId'] = params['device_id'] # noqa: E501
if 'bundle_id' in params:
path_params['bundleId'] = params['bundle_id'] # noqa: E501
query_params = []
if 'timeout' in params:
query_params.append(('timeout', params['timeout'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/xml', 'application/json']) # noqa: E501
# Authentication setting
auth_settings = ['kapuaAccessToken'] # noqa: E501
return self.api_client.call_api(
'/{scopeId}/devices/{deviceId}/bundles/{bundleId}/_stop', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_command_execute(self, scope_id, device_id, body, **kwargs): # noqa: E501
"""Executes a command # noqa: E501
Executes a remote command on a device and return the command output. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_command_execute(scope_id, device_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the device (required)
:param str device_id: The id of the device (required)
:param DeviceCommandInput body: The input command (required)
:param int timeout: The timeout of the command execution
:return: DeviceCommandOutput
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_command_execute_with_http_info(scope_id, device_id, body, **kwargs) # noqa: E501
else:
(data) = self.device_command_execute_with_http_info(scope_id, device_id, body, **kwargs) # noqa: E501
return data
def device_command_execute_with_http_info(self, scope_id, device_id, body, **kwargs): # noqa: E501
"""Executes a command # noqa: E501
Executes a remote command on a device and return the command output. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_command_execute_with_http_info(scope_id, device_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the device (required)
:param str device_id: The id of the device (required)
:param DeviceCommandInput body: The input command (required)
:param int timeout: The timeout of the command execution
:return: DeviceCommandOutput
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['scope_id', 'device_id', 'body', 'timeout'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_command_execute" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'scope_id' is set
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_command_execute`") # noqa: E501
# verify the required parameter 'device_id' is set
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `device_command_execute`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `device_command_execute`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id'] # noqa: E501
if 'device_id' in params:
path_params['deviceId'] = params['device_id'] # noqa: E501
query_params = []
if 'timeout' in params:
query_params.append(('timeout', params['timeout'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/xml', 'application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/xml', 'application/json']) # noqa: E501
# Authentication setting
auth_settings = ['kapuaAccessToken'] # noqa: E501
return self.api_client.call_api(
'/{scopeId}/devices/{deviceId}/commands/_execute', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DeviceCommandOutput', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_configuration_component_get(self, scope_id, device_id, component_id, **kwargs): # noqa: E501
"""Gets the configuration of a component on a device # noqa: E501
Returns the configuration of a device or the configuration of the OSGi component identified with specified PID (service's persistent identity). In the OSGi framework, the service's persistent identity is defined as the name attribute of the Component Descriptor XML file; at runtime, the same value is also available in the component.name and in the service.pid attributes of the Component Configuration. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_configuration_component_get(scope_id, device_id, component_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the Device. (required)
:param str device_id: The id of the device (required)
:param str component_id: An optional id of the component to get the configuration for (required)
:param int timeout: The timeout of the operation in milliseconds
:return: DeviceConfiguration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_configuration_component_get_with_http_info(scope_id, device_id, component_id, **kwargs) # noqa: E501
else:
(data) = self.device_configuration_component_get_with_http_info(scope_id, device_id, component_id, **kwargs) # noqa: E501
return data
def device_configuration_component_get_with_http_info(self, scope_id, device_id, component_id, **kwargs): # noqa: E501
"""Gets the configuration of a component on a device # noqa: E501
Returns the configuration of a device or the configuration of the OSGi component identified with specified PID (service's persistent identity). In the OSGi framework, the service's persistent identity is defined as the name attribute of the Component Descriptor XML file; at runtime, the same value is also available in the component.name and in the service.pid attributes of the Component Configuration. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_configuration_component_get_with_http_info(scope_id, device_id, component_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the Device. (required)
:param str device_id: The id of the device (required)
:param str component_id: An optional id of the component to get the configuration for (required)
:param int timeout: The timeout of the operation in milliseconds
:return: DeviceConfiguration
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['scope_id', 'device_id', 'component_id', 'timeout'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_configuration_component_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'scope_id' is set
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_configuration_component_get`") # noqa: E501
# verify the required parameter 'device_id' is set
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `device_configuration_component_get`") # noqa: E501
# verify the required parameter 'component_id' is set
if ('component_id' not in params or
params['component_id'] is None):
raise ValueError("Missing the required parameter `component_id` when calling `device_configuration_component_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id'] # noqa: E501
if 'device_id' in params:
path_params['deviceId'] = params['device_id'] # noqa: E501
if 'component_id' in params:
path_params['componentId'] = params['component_id'] # noqa: E501
query_params = []
if 'timeout' in params:
query_params.append(('timeout', params['timeout'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['kapuaAccessToken'] # noqa: E501
return self.api_client.call_api(
'/{scopeId}/devices/{deviceId}/configurations/{componentId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DeviceConfiguration', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_configuration_component_update(self, scope_id, device_id, component_id, body, **kwargs): # noqa: E501
"""Updates the configuration of a component on a device # noqa: E501
Updates a device component configuration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_configuration_component_update(scope_id, device_id, component_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the Device. (required)
:param str device_id: The id of the device (required)
:param str component_id: The component id to update (required)
:param DeviceComponentConfiguration body: The component configuration to send to the device (required)
:param int timeout: The timeout of the operation in milliseconds
:return: DeviceConfiguration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_configuration_component_update_with_http_info(scope_id, device_id, component_id, body, **kwargs) # noqa: E501
else:
(data) = self.device_configuration_component_update_with_http_info(scope_id, device_id, component_id, body, **kwargs) # noqa: E501
return data
def device_configuration_component_update_with_http_info(self, scope_id, device_id, component_id, body, **kwargs): # noqa: E501
"""Updates the configuration of a component on a device # noqa: E501
Updates a device component configuration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_configuration_component_update_with_http_info(scope_id, device_id, component_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the Device. (required)
:param str device_id: The id of the device (required)
:param str component_id: The component id to update (required)
:param DeviceComponentConfiguration body: The component configuration to send to the device (required)
:param int timeout: The timeout of the operation in milliseconds
:return: DeviceConfiguration
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['scope_id', 'device_id', 'component_id', 'body', 'timeout'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_configuration_component_update" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'scope_id' is set
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_configuration_component_update`") # noqa: E501
# verify the required parameter 'device_id' is set
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `device_configuration_component_update`") # noqa: E501
# verify the required parameter 'component_id' is set
if ('component_id' not in params or
params['component_id'] is None):
raise ValueError("Missing the required parameter `component_id` when calling `device_configuration_component_update`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `device_configuration_component_update`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id'] # noqa: E501
if 'device_id' in params:
path_params['deviceId'] = params['device_id'] # noqa: E501
if 'component_id' in params:
path_params['componentId'] = params['component_id'] # noqa: E501
query_params = []
if 'timeout' in params:
query_params.append(('timeout', params['timeout'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['kapuaAccessToken'] # noqa: E501
return self.api_client.call_api(
'/{scopeId}/devices/{deviceId}/configurations/{componentId}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DeviceConfiguration', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_configuration_get(self, scope_id, device_id, **kwargs): # noqa: E501
"""Gets the device configurations # noqa: E501
Returns the current configuration of a device # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_configuration_get(scope_id, device_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the Device. (required)
:param str device_id: The id of the device (required)
:param int timeout: The timeout of the operation in milliseconds
:return: DeviceConfiguration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_configuration_get_with_http_info(scope_id, device_id, **kwargs) # noqa: E501
else:
(data) = self.device_configuration_get_with_http_info(scope_id, device_id, **kwargs) # noqa: E501
return data
def device_configuration_get_with_http_info(self, scope_id, device_id, **kwargs): # noqa: E501
"""Gets the device configurations # noqa: E501
Returns the current configuration of a device # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_configuration_get_with_http_info(scope_id, device_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the Device. (required)
:param str device_id: The id of the device (required)
:param int timeout: The timeout of the operation in milliseconds
:return: DeviceConfiguration
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['scope_id', 'device_id', 'timeout'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_configuration_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'scope_id' is set
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_configuration_get`") # noqa: E501
# verify the required parameter 'device_id' is set
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `device_configuration_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id'] # noqa: E501
if 'device_id' in params:
path_params['deviceId'] = params['device_id'] # noqa: E501
query_params = []
if 'timeout' in params:
query_params.append(('timeout', params['timeout'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['kapuaAccessToken'] # noqa: E501
return self.api_client.call_api(
'/{scopeId}/devices/{deviceId}/configurations', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DeviceConfiguration', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_configuration_update(self, scope_id, device_id, body, **kwargs): # noqa: E501
"""Updates a device configuration # noqa: E501
Updates a device configuration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_configuration_update(scope_id, device_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the Device. (required)
:param str device_id: The id of the device (required)
:param DeviceConfiguration body: The configuration to send to the device (required)
:param int timeout: The timeout of the operation in milliseconds
:return: DeviceConfiguration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_configuration_update_with_http_info(scope_id, device_id, body, **kwargs) # noqa: E501
else:
(data) = self.device_configuration_update_with_http_info(scope_id, device_id, body, **kwargs) # noqa: E501
return data
def device_configuration_update_with_http_info(self, scope_id, device_id, body, **kwargs): # noqa: E501
"""Updates a device configuration # noqa: E501
Updates a device configuration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_configuration_update_with_http_info(scope_id, device_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the Device. (required)
:param str device_id: The id of the device (required)
:param DeviceConfiguration body: The configuration to send to the device (required)
:param int timeout: The timeout of the operation in milliseconds
:return: DeviceConfiguration
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['scope_id', 'device_id', 'body', 'timeout'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_configuration_update" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'scope_id' is set
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_configuration_update`") # noqa: E501
# verify the required parameter 'device_id' is set
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `device_configuration_update`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `device_configuration_update`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id'] # noqa: E501
if 'device_id' in params:
path_params['deviceId'] = params['device_id'] # noqa: E501
query_params = []
if 'timeout' in params:
query_params.append(('timeout', params['timeout'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['kapuaAccessToken'] # noqa: E501
return self.api_client.call_api(
'/{scopeId}/devices/{deviceId}/configurations', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DeviceConfiguration', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_count(self, scope_id, body, **kwargs): # noqa: E501
"""Counts the Devices # noqa: E501
Counts the Devices with the given DeviceQuery parameter returning the number of matching Devices # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_count(scope_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId in which to count results (required)
:param DeviceQuery body: The DeviceQuery to use to filter count results (required)
:return: CountResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_count_with_http_info(scope_id, body, **kwargs) # noqa: E501
else:
(data) = self.device_count_with_http_info(scope_id, body, **kwargs) # noqa: E501
return data
def device_count_with_http_info(self, scope_id, body, **kwargs): # noqa: E501
"""Counts the Devices # noqa: E501
Counts the Devices with the given DeviceQuery parameter returning the number of matching Devices # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_count_with_http_info(scope_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId in which to count results (required)
:param DeviceQuery body: The DeviceQuery to use to filter count results (required)
:return: CountResult
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['scope_id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_count" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'scope_id' is set
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_count`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `device_count`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/xml', 'application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/xml', 'application/json']) # noqa: E501
# Authentication setting
auth_settings = ['kapuaAccessToken'] # noqa: E501
return self.api_client.call_api(
'/{scopeId}/devices/_count', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CountResult', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_create(self, scope_id, body, **kwargs): # noqa: E501
"""Create an Device # noqa: E501
Creates a new Device based on the information provided in DeviceCreator parameter. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_create(scope_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId in which to create the Device. (required)
:param DeviceCreator body: Provides the information for the new Device to be created (required)
:return: Device
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_create_with_http_info(scope_id, body, **kwargs) # noqa: E501
else:
(data) = self.device_create_with_http_info(scope_id, body, **kwargs) # noqa: E501
return data
def device_create_with_http_info(self, scope_id, body, **kwargs): # noqa: E501
"""Create an Device # noqa: E501
Creates a new Device based on the information provided in DeviceCreator parameter. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_create_with_http_info(scope_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId in which to create the Device. (required)
:param DeviceCreator body: Provides the information for the new Device to be created (required)
:return: Device
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['scope_id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_create" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'scope_id' is set
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_create`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `device_create`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/xml', 'application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/xml', 'application/json']) # noqa: E501
# Authentication setting
auth_settings = ['kapuaAccessToken'] # noqa: E501
return self.api_client.call_api(
'/{scopeId}/devices', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Device', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_delete(self, scope_id, device_id, **kwargs): # noqa: E501
"""Delete a Device # noqa: E501
Deletes the Device specified by the \"deviceId\" path parameter. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_delete(scope_id, device_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the Device to delete. (required)
:param str device_id: The id of the Device to be deleted (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_delete_with_http_info(scope_id, device_id, **kwargs) # noqa: E501
else:
(data) = self.device_delete_with_http_info(scope_id, device_id, **kwargs) # noqa: E501
return data
def device_delete_with_http_info(self, scope_id, device_id, **kwargs): # noqa: E501
"""Delete a Device # noqa: E501
Deletes the Device specified by the \"deviceId\" path parameter. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_delete_with_http_info(scope_id, device_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the Device to delete. (required)
:param str device_id: The id of the Device to be deleted (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['scope_id', 'device_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'scope_id' is set
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_delete`") # noqa: E501
# verify the required parameter 'device_id' is set
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `device_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id'] # noqa: E501
if 'device_id' in params:
path_params['deviceId'] = params['device_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['kapuaAccessToken'] # noqa: E501
return self.api_client.call_api(
'/{scopeId}/devices/{deviceId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_event_count(self, scope_id, device_id, body, **kwargs): # noqa: E501
"""Counts the DeviceEvents # noqa: E501
Counts the DeviceEvents with the given DeviceEventQuery parameter returning the number of matching DeviceEvents # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_event_count(scope_id, device_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId in which to count results. (required)
:param str device_id: The id of the Device in which to count results (required)
:param DeviceEventQuery body: The DeviceEventQuery to use to filter count results (required)
:return: CountResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_event_count_with_http_info(scope_id, device_id, body, **kwargs) # noqa: E501
else:
(data) = self.device_event_count_with_http_info(scope_id, device_id, body, **kwargs) # noqa: E501
return data
def device_event_count_with_http_info(self, scope_id, device_id, body, **kwargs): # noqa: E501
"""Counts the DeviceEvents # noqa: E501
Counts the DeviceEvents with the given DeviceEventQuery parameter returning the number of matching DeviceEvents # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_event_count_with_http_info(scope_id, device_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId in which to count results. (required)
:param str device_id: The id of the Device in which to count results (required)
:param DeviceEventQuery body: The DeviceEventQuery to use to filter count results (required)
:return: CountResult
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['scope_id', 'device_id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_event_count" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'scope_id' is set
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_event_count`") # noqa: E501
# verify the required parameter 'device_id' is set
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `device_event_count`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `device_event_count`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id'] # noqa: E501
if 'device_id' in params:
path_params['deviceId'] = params['device_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/xml', 'application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/xml', 'application/json']) # noqa: E501
# Authentication setting
auth_settings = ['kapuaAccessToken'] # noqa: E501
return self.api_client.call_api(
'/{scopeId}/devices/{deviceId}/events/_count', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CountResult', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_event_delete(self, scope_id, device_id, device_event_id, **kwargs): # noqa: E501
"""Delete a DeviceEvent # noqa: E501
Deletes the DeviceEvent specified by the \"deviceEventId\" path parameter. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_event_delete(scope_id, device_id, device_event_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: (required)
:param str device_id: The id of the Device in which to delete the event. (required)
:param str device_event_id: The id of the DeviceEvent to be deleted (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_event_delete_with_http_info(scope_id, device_id, device_event_id, **kwargs) # noqa: E501
else:
(data) = self.device_event_delete_with_http_info(scope_id, device_id, device_event_id, **kwargs) # noqa: E501
return data
def device_event_delete_with_http_info(self, scope_id, device_id, device_event_id, **kwargs): # noqa: E501
"""Delete a DeviceEvent # noqa: E501
Deletes the DeviceEvent specified by the \"deviceEventId\" path parameter. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_event_delete_with_http_info(scope_id, device_id, device_event_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: (required)
:param str device_id: The id of the Device in which to delete the event. (required)
:param str device_event_id: The id of the DeviceEvent to be deleted (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['scope_id', 'device_id', 'device_event_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_event_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'scope_id' is set
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_event_delete`") # noqa: E501
# verify the required parameter 'device_id' is set
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `device_event_delete`") # noqa: E501
# verify the required parameter 'device_event_id' is set
if ('device_event_id' not in params or
params['device_event_id'] is None):
raise ValueError("Missing the required parameter `device_event_id` when calling `device_event_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id'] # noqa: E501
if 'device_id' in params:
path_params['deviceId'] = params['device_id'] # noqa: E501
if 'device_event_id' in params:
path_params['deviceEventId'] = params['device_event_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['kapuaAccessToken'] # noqa: E501
return self.api_client.call_api(
'/{scopeId}/devices/{deviceId}/events/{deviceEventId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_event_find(self, scope_id, device_id, device_event_id, **kwargs): # noqa: E501
"""Get an DeviceEvent # noqa: E501
Returns the DeviceEvent specified by the \"deviceEventId\" path parameter. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_event_find(scope_id, device_id, device_event_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the requested DeviceEvent. (required)
:param str device_id: The id of the requested Device (required)
:param str device_event_id: The id of the requested DeviceEvent (required)
:return: DeviceEvent
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_event_find_with_http_info(scope_id, device_id, device_event_id, **kwargs) # noqa: E501
else:
(data) = self.device_event_find_with_http_info(scope_id, device_id, device_event_id, **kwargs) # noqa: E501
return data
def device_event_find_with_http_info(self, scope_id, device_id, device_event_id, **kwargs): # noqa: E501
"""Get an DeviceEvent # noqa: E501
Returns the DeviceEvent specified by the \"deviceEventId\" path parameter. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_event_find_with_http_info(scope_id, device_id, device_event_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the requested DeviceEvent. (required)
:param str device_id: The id of the requested Device (required)
:param str device_event_id: The id of the requested DeviceEvent (required)
:return: DeviceEvent
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['scope_id', 'device_id', 'device_event_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_event_find" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'scope_id' is set
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_event_find`") # noqa: E501
# verify the required parameter 'device_id' is set
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `device_event_find`") # noqa: E501
# verify the required parameter 'device_event_id' is set
if ('device_event_id' not in params or
params['device_event_id'] is None):
raise ValueError("Missing the required parameter `device_event_id` when calling `device_event_find`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id'] # noqa: E501
if 'device_id' in params:
path_params['deviceId'] = params['device_id'] # noqa: E501
if 'device_event_id' in params:
path_params['deviceEventId'] = params['device_event_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['kapuaAccessToken'] # noqa: E501
return self.api_client.call_api(
'/{scopeId}/devices/{deviceId}/events/{deviceEventId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DeviceEvent', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_event_query(self, scope_id, device_id, body, **kwargs): # noqa: E501
"""Queries the DeviceEvents # noqa: E501
Queries the DeviceEvents with the given DeviceEvents parameter returning all matching DeviceEvents # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_event_query(scope_id, device_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId in which to search results. (required)
:param str device_id: The id of the Device in which to search results (required)
:param DeviceEventQuery body: The DeviceEventQuery to use to filter results. (required)
:return: DeviceEventListResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_event_query_with_http_info(scope_id, device_id, body, **kwargs) # noqa: E501
else:
(data) = self.device_event_query_with_http_info(scope_id, device_id, body, **kwargs) # noqa: E501
return data
def device_event_query_with_http_info(self, scope_id, device_id, body, **kwargs): # noqa: E501
"""Queries the DeviceEvents # noqa: E501
Queries the DeviceEvents with the given DeviceEvents parameter returning all matching DeviceEvents # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_event_query_with_http_info(scope_id, device_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId in which to search results. (required)
:param str device_id: The id of the Device in which to search results (required)
:param DeviceEventQuery body: The DeviceEventQuery to use to filter results. (required)
:return: DeviceEventListResult
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['scope_id', 'device_id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_event_query" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'scope_id' is set
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_event_query`") # noqa: E501
# verify the required parameter 'device_id' is set
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `device_event_query`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `device_event_query`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id'] # noqa: E501
if 'device_id' in params:
path_params['deviceId'] = params['device_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/xml', 'application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/xml', 'application/json']) # noqa: E501
# Authentication setting
auth_settings = ['kapuaAccessToken'] # noqa: E501
return self.api_client.call_api(
'/{scopeId}/devices/{deviceId}/events/_query', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DeviceEventListResult', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_event_simple_query(self, scope_id, device_id, **kwargs): # noqa: E501
"""Gets the DeviceEvent list in the scope # noqa: E501
Returns the list of all the deviceEvents associated to the current selected scope. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_event_simple_query(scope_id, device_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId in which to search results. (required)
:param str device_id: The client id to filter results. (required)
:param str resource: The resource of the DeviceEvent in which to search results
:param int offset: The result set offset.
:param int limit: The result set limit.
:return: DeviceEventListResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_event_simple_query_with_http_info(scope_id, device_id, **kwargs) # noqa: E501
else:
(data) = self.device_event_simple_query_with_http_info(scope_id, device_id, **kwargs) # noqa: E501
return data
def device_event_simple_query_with_http_info(self, scope_id, device_id, **kwargs): # noqa: E501
"""Gets the DeviceEvent list in the scope # noqa: E501
Returns the list of all the deviceEvents associated to the current selected scope. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_event_simple_query_with_http_info(scope_id, device_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId in which to search results. (required)
:param str device_id: The client id to filter results. (required)
:param str resource: The resource of the DeviceEvent in which to search results
:param int offset: The result set offset.
:param int limit: The result set limit.
:return: DeviceEventListResult
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['scope_id', 'device_id', 'resource', 'offset', 'limit'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_event_simple_query" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'scope_id' is set
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_event_simple_query`") # noqa: E501
# verify the required parameter 'device_id' is set
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `device_event_simple_query`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id'] # noqa: E501
if 'device_id' in params:
path_params['deviceId'] = params['device_id'] # noqa: E501
query_params = []
if 'resource' in params:
query_params.append(('resource', params['resource'])) # noqa: E501
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['kapuaAccessToken'] # noqa: E501
return self.api_client.call_api(
'/{scopeId}/devices/{deviceId}/events', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DeviceEventListResult', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_find(self, scope_id, device_id, **kwargs): # noqa: E501
"""Get a Device # noqa: E501
Returns the Device specified by the \"deviceId\" path parameter. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_find(scope_id, device_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the requested Device (required)
:param str device_id: The id of the requested Device (required)
:return: Device
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_find_with_http_info(scope_id, device_id, **kwargs) # noqa: E501
else:
(data) = self.device_find_with_http_info(scope_id, device_id, **kwargs) # noqa: E501
return data
def device_find_with_http_info(self, scope_id, device_id, **kwargs): # noqa: E501
"""Get a Device # noqa: E501
Returns the Device specified by the \"deviceId\" path parameter. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_find_with_http_info(scope_id, device_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the requested Device (required)
:param str device_id: The id of the requested Device (required)
:return: Device
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['scope_id', 'device_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_find" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'scope_id' is set
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_find`") # noqa: E501
# verify the required parameter 'device_id' is set
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `device_find`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id'] # noqa: E501
if 'device_id' in params:
path_params['deviceId'] = params['device_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['kapuaAccessToken'] # noqa: E501
return self.api_client.call_api(
'/{scopeId}/devices/{deviceId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Device', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_package_download(self, scope_id, device_id, body, **kwargs): # noqa: E501
"""Installs a package # noqa: E501
Installs a package into the device. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_package_download(scope_id, device_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the Device (required)
:param str device_id: The id of the device (required)
:param DevicePackageDownloadRequest body: Mandatory object with all the informations needed to download and install a package (required)
:param int timeout: The timeout of the operation
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_package_download_with_http_info(scope_id, device_id, body, **kwargs) # noqa: E501
else:
(data) = self.device_package_download_with_http_info(scope_id, device_id, body, **kwargs) # noqa: E501
return data
def device_package_download_with_http_info(self, scope_id, device_id, body, **kwargs): # noqa: E501
"""Installs a package # noqa: E501
Installs a package into the device. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_package_download_with_http_info(scope_id, device_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the Device (required)
:param str device_id: The id of the device (required)
:param DevicePackageDownloadRequest body: Mandatory object with all the informations needed to download and install a package (required)
:param int timeout: The timeout of the operation
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['scope_id', 'device_id', 'body', 'timeout'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_package_download" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'scope_id' is set
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_package_download`") # noqa: E501
# verify the required parameter 'device_id' is set
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `device_package_download`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `device_package_download`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id'] # noqa: E501
if 'device_id' in params:
path_params['deviceId'] = params['device_id'] # noqa: E501
query_params = []
if 'timeout' in params:
query_params.append(('timeout', params['timeout'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/xml', 'application/json']) # noqa: E501
# Authentication setting
auth_settings = ['kapuaAccessToken'] # noqa: E501
return self.api_client.call_api(
'/{scopeId}/devices/{deviceId}/packages/_download', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_package_get(self, scope_id, device_id, **kwargs): # noqa: E501
"""Gets a list of packages # noqa: E501
Returns the list of all the packages installed on the device. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_package_get(scope_id, device_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the Device (required)
:param str device_id: The id of the device (required)
:param int timeout: The timeout of the operation
:return: DevicePackages
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_package_get_with_http_info(scope_id, device_id, **kwargs) # noqa: E501
else:
(data) = self.device_package_get_with_http_info(scope_id, device_id, **kwargs) # noqa: E501
return data
def device_package_get_with_http_info(self, scope_id, device_id, **kwargs): # noqa: E501
"""Gets a list of packages # noqa: E501
Returns the list of all the packages installed on the device. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_package_get_with_http_info(scope_id, device_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the Device (required)
:param str device_id: The id of the device (required)
:param int timeout: The timeout of the operation
:return: DevicePackages
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['scope_id', 'device_id', 'timeout'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_package_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'scope_id' is set
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_package_get`") # noqa: E501
# verify the required parameter 'device_id' is set
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `device_package_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id'] # noqa: E501
if 'device_id' in params:
path_params['deviceId'] = params['device_id'] # noqa: E501
query_params = []
if 'timeout' in params:
query_params.append(('timeout', params['timeout'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['kapuaAccessToken'] # noqa: E501
return self.api_client.call_api(
'/{scopeId}/devices/{deviceId}/packages', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DevicePackages', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_package_uninstall(self, scope_id, device_id, body, **kwargs): # noqa: E501
"""Uninstalls a package # noqa: E501
Uninstalls a package into the device. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_package_uninstall(scope_id, device_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the Device (required)
:param str device_id: The id of the device (required)
:param DevicePackageUninstallRequest body: Mandatory object with all the informations needed to uninstall a package (required)
:param int timeout: The timeout of the operation
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_package_uninstall_with_http_info(scope_id, device_id, body, **kwargs) # noqa: E501
else:
(data) = self.device_package_uninstall_with_http_info(scope_id, device_id, body, **kwargs) # noqa: E501
return data
def device_package_uninstall_with_http_info(self, scope_id, device_id, body, **kwargs): # noqa: E501
"""Uninstalls a package # noqa: E501
Uninstalls a package into the device. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_package_uninstall_with_http_info(scope_id, device_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the Device (required)
:param str device_id: The id of the device (required)
:param DevicePackageUninstallRequest body: Mandatory object with all the informations needed to uninstall a package (required)
:param int timeout: The timeout of the operation
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['scope_id', 'device_id', 'body', 'timeout'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_package_uninstall" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'scope_id' is set
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_package_uninstall`") # noqa: E501
# verify the required parameter 'device_id' is set
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `device_package_uninstall`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `device_package_uninstall`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id'] # noqa: E501
if 'device_id' in params:
path_params['deviceId'] = params['device_id'] # noqa: E501
query_params = []
if 'timeout' in params:
query_params.append(('timeout', params['timeout'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/xml', 'application/json']) # noqa: E501
# Authentication setting
auth_settings = ['kapuaAccessToken'] # noqa: E501
return self.api_client.call_api(
'/{scopeId}/devices/{deviceId}/packages/_uninstall', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_query(self, scope_id, body, **kwargs): # noqa: E501
"""Queries the Devices # noqa: E501
Queries the Devices with the given Devices parameter returning all matching Devices # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_query(scope_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId in which to search results. (required)
:param DeviceQuery body: The DeviceQuery to use to filter results. (required)
:return: DeviceListResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_query_with_http_info(scope_id, body, **kwargs) # noqa: E501
else:
(data) = self.device_query_with_http_info(scope_id, body, **kwargs) # noqa: E501
return data
def device_query_with_http_info(self, scope_id, body, **kwargs): # noqa: E501
"""Queries the Devices # noqa: E501
Queries the Devices with the given Devices parameter returning all matching Devices # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_query_with_http_info(scope_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId in which to search results. (required)
:param DeviceQuery body: The DeviceQuery to use to filter results. (required)
:return: DeviceListResult
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['scope_id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_query" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'scope_id' is set
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_query`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `device_query`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/xml', 'application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/xml', 'application/json']) # noqa: E501
# Authentication setting
auth_settings = ['kapuaAccessToken'] # noqa: E501
return self.api_client.call_api(
'/{scopeId}/devices/_query', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DeviceListResult', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_request_send(self, scope_id, device_id, body, **kwargs): # noqa: E501
"""Sends a request # noqa: E501
Sends a request message to a device # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_request_send(scope_id, device_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the device (required)
:param str device_id: The id of the device (required)
:param JsonGenericRequestMessage body: The input request (required)
:param int timeout: The timeout of the request execution
:return: JsonGenericResponseMessage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_request_send_with_http_info(scope_id, device_id, body, **kwargs) # noqa: E501
else:
(data) = self.device_request_send_with_http_info(scope_id, device_id, body, **kwargs) # noqa: E501
return data
def device_request_send_with_http_info(self, scope_id, device_id, body, **kwargs): # noqa: E501
"""Sends a request # noqa: E501
Sends a request message to a device # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_request_send_with_http_info(scope_id, device_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the device (required)
:param str device_id: The id of the device (required)
:param JsonGenericRequestMessage body: The input request (required)
:param int timeout: The timeout of the request execution
:return: JsonGenericResponseMessage
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['scope_id', 'device_id', 'body', 'timeout'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_request_send" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'scope_id' is set
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_request_send`") # noqa: E501
# verify the required parameter 'device_id' is set
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `device_request_send`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `device_request_send`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id'] # noqa: E501
if 'device_id' in params:
path_params['deviceId'] = params['device_id'] # noqa: E501
query_params = []
if 'timeout' in params:
query_params.append(('timeout', params['timeout'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['kapuaAccessToken'] # noqa: E501
return self.api_client.call_api(
'/{scopeId}/devices/{deviceId}/requests', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='JsonGenericResponseMessage', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_simple_query(self, scope_id, **kwargs): # noqa: E501
"""Gets the Device list in the scope # noqa: E501
Returns the list of all the devices associated to the current selected scope. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_simple_query(scope_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId in which to search results. (required)
:param str tag_id: The tag id to filter results.
:param str client_id: The client id to filter results.
:param str status: The connection status to filter results.
:param list[str] fetch_attributes: Additional attributes to be returned. Allowed values: connection, lastEvent
:param int offset: The result set offset.
:param int limit: The result set limit.
:return: DeviceListResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_simple_query_with_http_info(scope_id, **kwargs) # noqa: E501
else:
(data) = self.device_simple_query_with_http_info(scope_id, **kwargs) # noqa: E501
return data
def device_simple_query_with_http_info(self, scope_id, **kwargs): # noqa: E501
"""Gets the Device list in the scope # noqa: E501
Returns the list of all the devices associated to the current selected scope. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_simple_query_with_http_info(scope_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId in which to search results. (required)
:param str tag_id: The tag id to filter results.
:param str client_id: The client id to filter results.
:param str status: The connection status to filter results.
:param list[str] fetch_attributes: Additional attributes to be returned. Allowed values: connection, lastEvent
:param int offset: The result set offset.
:param int limit: The result set limit.
:return: DeviceListResult
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['scope_id', 'tag_id', 'client_id', 'status', 'fetch_attributes', 'offset', 'limit'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_simple_query" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'scope_id' is set
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_simple_query`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id'] # noqa: E501
query_params = []
if 'tag_id' in params:
query_params.append(('tagId', params['tag_id'])) # noqa: E501
if 'client_id' in params:
query_params.append(('clientId', params['client_id'])) # noqa: E501
if 'status' in params:
query_params.append(('status', params['status'])) # noqa: E501
if 'fetch_attributes' in params:
query_params.append(('fetchAttributes', params['fetch_attributes'])) # noqa: E501
collection_formats['fetchAttributes'] = 'multi' # noqa: E501
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['kapuaAccessToken'] # noqa: E501
return self.api_client.call_api(
'/{scopeId}/devices', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DeviceListResult', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_snapshot_get(self, scope_id, device_id, **kwargs): # noqa: E501
"""Gets a list of snapshots # noqa: E501
Returns the list of all the Snapshots available on the device. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_snapshot_get(scope_id, device_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the device (required)
:param str device_id: The id of the device (required)
:param int timeout: The timeout of the operation
:return: DeviceSnapshots
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_snapshot_get_with_http_info(scope_id, device_id, **kwargs) # noqa: E501
else:
(data) = self.device_snapshot_get_with_http_info(scope_id, device_id, **kwargs) # noqa: E501
return data
def device_snapshot_get_with_http_info(self, scope_id, device_id, **kwargs): # noqa: E501
"""Gets a list of snapshots # noqa: E501
Returns the list of all the Snapshots available on the device. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_snapshot_get_with_http_info(scope_id, device_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the device (required)
:param str device_id: The id of the device (required)
:param int timeout: The timeout of the operation
:return: DeviceSnapshots
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['scope_id', 'device_id', 'timeout'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_snapshot_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'scope_id' is set
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_snapshot_get`") # noqa: E501
# verify the required parameter 'device_id' is set
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `device_snapshot_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id'] # noqa: E501
if 'device_id' in params:
path_params['deviceId'] = params['device_id'] # noqa: E501
query_params = []
if 'timeout' in params:
query_params.append(('timeout', params['timeout'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['kapuaAccessToken'] # noqa: E501
return self.api_client.call_api(
'/{scopeId}/devices/{deviceId}/snapshots', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DeviceSnapshots', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_snapshot_rollback(self, scope_id, device_id, snapshot_id, **kwargs): # noqa: E501
"""Gets a list of snapshots # noqa: E501
Updates the configuration of a device rolling back a given snapshot ID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_snapshot_rollback(scope_id, device_id, snapshot_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the device (required)
:param str device_id: The id of the device (required)
:param str snapshot_id: the ID of the snapshot to rollback to (required)
:param int timeout: The timeout of the operation
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_snapshot_rollback_with_http_info(scope_id, device_id, snapshot_id, **kwargs) # noqa: E501
else:
(data) = self.device_snapshot_rollback_with_http_info(scope_id, device_id, snapshot_id, **kwargs) # noqa: E501
return data
def device_snapshot_rollback_with_http_info(self, scope_id, device_id, snapshot_id, **kwargs): # noqa: E501
"""Gets a list of snapshots # noqa: E501
Updates the configuration of a device rolling back a given snapshot ID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_snapshot_rollback_with_http_info(scope_id, device_id, snapshot_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the device (required)
:param str device_id: The id of the device (required)
:param str snapshot_id: the ID of the snapshot to rollback to (required)
:param int timeout: The timeout of the operation
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['scope_id', 'device_id', 'snapshot_id', 'timeout'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_snapshot_rollback" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'scope_id' is set
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_snapshot_rollback`") # noqa: E501
# verify the required parameter 'device_id' is set
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `device_snapshot_rollback`") # noqa: E501
# verify the required parameter 'snapshot_id' is set
if ('snapshot_id' not in params or
params['snapshot_id'] is None):
raise ValueError("Missing the required parameter `snapshot_id` when calling `device_snapshot_rollback`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id'] # noqa: E501
if 'device_id' in params:
path_params['deviceId'] = params['device_id'] # noqa: E501
if 'snapshot_id' in params:
path_params['snapshotId'] = params['snapshot_id'] # noqa: E501
query_params = []
if 'timeout' in params:
query_params.append(('timeout', params['timeout'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/xml', 'application/json']) # noqa: E501
# Authentication setting
auth_settings = ['kapuaAccessToken'] # noqa: E501
return self.api_client.call_api(
'/{scopeId}/devices/{deviceId}/snapshots/{snapshotId}/_rollback', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_update(self, scope_id, device_id, body, **kwargs): # noqa: E501
"""Update a Device # noqa: E501
Updates a new Device based on the information provided in the Device parameter. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_update(scope_id, device_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the requested Device. (required)
:param str device_id: The id of the requested Device (required)
:param Device body: The modified Device whose attributed need to be updated (required)
:return: Device
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_update_with_http_info(scope_id, device_id, body, **kwargs) # noqa: E501
else:
(data) = self.device_update_with_http_info(scope_id, device_id, body, **kwargs) # noqa: E501
return data
def device_update_with_http_info(self, scope_id, device_id, body, **kwargs): # noqa: E501
"""Update a Device # noqa: E501
Updates a new Device based on the information provided in the Device parameter. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.device_update_with_http_info(scope_id, device_id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str scope_id: The ScopeId of the requested Device. (required)
:param str device_id: The id of the requested Device (required)
:param Device body: The modified Device whose attributed need to be updated (required)
:return: Device
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['scope_id', 'device_id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_update" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'scope_id' is set
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_update`") # noqa: E501
# verify the required parameter 'device_id' is set
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `device_update`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `device_update`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id'] # noqa: E501
if 'device_id' in params:
path_params['deviceId'] = params['device_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/xml', 'application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/xml', 'application/json']) # noqa: E501
# Authentication setting
auth_settings = ['kapuaAccessToken'] # noqa: E501
return self.api_client.call_api(
'/{scopeId}/devices/{deviceId}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Device', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 44.859882
| 426
| 0.624324
|
from __future__ import absolute_import
import re
import six
from swagger_client.api_client import ApiClient
class DevicesApi(object):
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def device_asset_filtered_get(self, scope_id, device_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_asset_filtered_get_with_http_info(scope_id, device_id, **kwargs)
else:
(data) = self.device_asset_filtered_get_with_http_info(scope_id, device_id, **kwargs)
return data
def device_asset_filtered_get_with_http_info(self, scope_id, device_id, **kwargs):
all_params = ['scope_id', 'device_id', 'timeout', 'body']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_asset_filtered_get" % key
)
params[key] = val
del params['kwargs']
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_asset_filtered_get`")
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `device_asset_filtered_get`")
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id']
if 'device_id' in params:
path_params['deviceId'] = params['device_id']
query_params = []
if 'timeout' in params:
query_params.append(('timeout', params['timeout']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml'])
auth_settings = ['kapuaAccessToken']
return self.api_client.call_api(
'/{scopeId}/devices/{deviceId}/assets', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DeviceAssets',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_asset_get(self, scope_id, device_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_asset_get_with_http_info(scope_id, device_id, **kwargs)
else:
(data) = self.device_asset_get_with_http_info(scope_id, device_id, **kwargs)
return data
def device_asset_get_with_http_info(self, scope_id, device_id, **kwargs):
all_params = ['scope_id', 'device_id', 'timeout']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_asset_get" % key
)
params[key] = val
del params['kwargs']
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_asset_get`")
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `device_asset_get`")
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id']
if 'device_id' in params:
path_params['deviceId'] = params['device_id']
query_params = []
if 'timeout' in params:
query_params.append(('timeout', params['timeout']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml'])
auth_settings = ['kapuaAccessToken']
return self.api_client.call_api(
'/{scopeId}/devices/{deviceId}/assets', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DeviceAssets',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_asset_read(self, scope_id, device_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_asset_read_with_http_info(scope_id, device_id, **kwargs)
else:
(data) = self.device_asset_read_with_http_info(scope_id, device_id, **kwargs)
return data
def device_asset_read_with_http_info(self, scope_id, device_id, **kwargs):
all_params = ['scope_id', 'device_id', 'timeout', 'body']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_asset_read" % key
)
params[key] = val
del params['kwargs']
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_asset_read`")
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `device_asset_read`")
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id']
if 'device_id' in params:
path_params['deviceId'] = params['device_id']
query_params = []
if 'timeout' in params:
query_params.append(('timeout', params['timeout']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml'])
auth_settings = ['kapuaAccessToken']
return self.api_client.call_api(
'/{scopeId}/devices/{deviceId}/assets/_read', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DeviceAssets',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_asset_write(self, scope_id, device_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_asset_write_with_http_info(scope_id, device_id, **kwargs)
else:
(data) = self.device_asset_write_with_http_info(scope_id, device_id, **kwargs)
return data
def device_asset_write_with_http_info(self, scope_id, device_id, **kwargs):
all_params = ['scope_id', 'device_id', 'timeout', 'body']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_asset_write" % key
)
params[key] = val
del params['kwargs']
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_asset_write`")
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `device_asset_write`")
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id']
if 'device_id' in params:
path_params['deviceId'] = params['device_id']
query_params = []
if 'timeout' in params:
query_params.append(('timeout', params['timeout']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml'])
auth_settings = ['kapuaAccessToken']
return self.api_client.call_api(
'/{scopeId}/devices/{deviceId}/assets/_write', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DeviceAssets',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_bundle_get(self, scope_id, device_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_bundle_get_with_http_info(scope_id, device_id, **kwargs)
else:
(data) = self.device_bundle_get_with_http_info(scope_id, device_id, **kwargs)
return data
def device_bundle_get_with_http_info(self, scope_id, device_id, **kwargs):
all_params = ['scope_id', 'device_id', 'timeout']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_bundle_get" % key
)
params[key] = val
del params['kwargs']
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_bundle_get`")
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `device_bundle_get`")
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id']
if 'device_id' in params:
path_params['deviceId'] = params['device_id']
query_params = []
if 'timeout' in params:
query_params.append(('timeout', params['timeout']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml'])
auth_settings = ['kapuaAccessToken']
return self.api_client.call_api(
'/{scopeId}/devices/{deviceId}/bundles', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DeviceBundles',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_bundle_start(self, scope_id, device_id, bundle_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_bundle_start_with_http_info(scope_id, device_id, bundle_id, **kwargs)
else:
(data) = self.device_bundle_start_with_http_info(scope_id, device_id, bundle_id, **kwargs)
return data
def device_bundle_start_with_http_info(self, scope_id, device_id, bundle_id, **kwargs):
all_params = ['scope_id', 'device_id', 'bundle_id', 'timeout']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_bundle_start" % key
)
params[key] = val
del params['kwargs']
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_bundle_start`")
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `device_bundle_start`")
if ('bundle_id' not in params or
params['bundle_id'] is None):
raise ValueError("Missing the required parameter `bundle_id` when calling `device_bundle_start`")
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id']
if 'device_id' in params:
path_params['deviceId'] = params['device_id']
if 'bundle_id' in params:
path_params['bundleId'] = params['bundle_id']
query_params = []
if 'timeout' in params:
query_params.append(('timeout', params['timeout']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/xml', 'application/json'])
auth_settings = ['kapuaAccessToken']
return self.api_client.call_api(
'/{scopeId}/devices/{deviceId}/bundles/{bundleId}/_start', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_bundle_stop(self, scope_id, device_id, bundle_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_bundle_stop_with_http_info(scope_id, device_id, bundle_id, **kwargs)
else:
(data) = self.device_bundle_stop_with_http_info(scope_id, device_id, bundle_id, **kwargs)
return data
def device_bundle_stop_with_http_info(self, scope_id, device_id, bundle_id, **kwargs):
all_params = ['scope_id', 'device_id', 'bundle_id', 'timeout']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_bundle_stop" % key
)
params[key] = val
del params['kwargs']
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_bundle_stop`")
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `device_bundle_stop`")
if ('bundle_id' not in params or
params['bundle_id'] is None):
raise ValueError("Missing the required parameter `bundle_id` when calling `device_bundle_stop`")
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id']
if 'device_id' in params:
path_params['deviceId'] = params['device_id']
if 'bundle_id' in params:
path_params['bundleId'] = params['bundle_id']
query_params = []
if 'timeout' in params:
query_params.append(('timeout', params['timeout']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/xml', 'application/json'])
auth_settings = ['kapuaAccessToken']
return self.api_client.call_api(
'/{scopeId}/devices/{deviceId}/bundles/{bundleId}/_stop', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_command_execute(self, scope_id, device_id, body, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_command_execute_with_http_info(scope_id, device_id, body, **kwargs)
else:
(data) = self.device_command_execute_with_http_info(scope_id, device_id, body, **kwargs)
return data
def device_command_execute_with_http_info(self, scope_id, device_id, body, **kwargs):
all_params = ['scope_id', 'device_id', 'body', 'timeout']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_command_execute" % key
)
params[key] = val
del params['kwargs']
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_command_execute`")
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `device_command_execute`")
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `device_command_execute`")
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id']
if 'device_id' in params:
path_params['deviceId'] = params['device_id']
query_params = []
if 'timeout' in params:
query_params.append(('timeout', params['timeout']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.select_header_accept(
['application/xml', 'application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/xml', 'application/json'])
auth_settings = ['kapuaAccessToken']
return self.api_client.call_api(
'/{scopeId}/devices/{deviceId}/commands/_execute', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DeviceCommandOutput',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_configuration_component_get(self, scope_id, device_id, component_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_configuration_component_get_with_http_info(scope_id, device_id, component_id, **kwargs)
else:
(data) = self.device_configuration_component_get_with_http_info(scope_id, device_id, component_id, **kwargs)
return data
def device_configuration_component_get_with_http_info(self, scope_id, device_id, component_id, **kwargs):
all_params = ['scope_id', 'device_id', 'component_id', 'timeout']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_configuration_component_get" % key
)
params[key] = val
del params['kwargs']
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_configuration_component_get`")
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `device_configuration_component_get`")
if ('component_id' not in params or
params['component_id'] is None):
raise ValueError("Missing the required parameter `component_id` when calling `device_configuration_component_get`")
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id']
if 'device_id' in params:
path_params['deviceId'] = params['device_id']
if 'component_id' in params:
path_params['componentId'] = params['component_id']
query_params = []
if 'timeout' in params:
query_params.append(('timeout', params['timeout']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml'])
auth_settings = ['kapuaAccessToken']
return self.api_client.call_api(
'/{scopeId}/devices/{deviceId}/configurations/{componentId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DeviceConfiguration',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_configuration_component_update(self, scope_id, device_id, component_id, body, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_configuration_component_update_with_http_info(scope_id, device_id, component_id, body, **kwargs)
else:
(data) = self.device_configuration_component_update_with_http_info(scope_id, device_id, component_id, body, **kwargs)
return data
def device_configuration_component_update_with_http_info(self, scope_id, device_id, component_id, body, **kwargs):
all_params = ['scope_id', 'device_id', 'component_id', 'body', 'timeout']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_configuration_component_update" % key
)
params[key] = val
del params['kwargs']
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_configuration_component_update`")
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `device_configuration_component_update`")
if ('component_id' not in params or
params['component_id'] is None):
raise ValueError("Missing the required parameter `component_id` when calling `device_configuration_component_update`")
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `device_configuration_component_update`")
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id']
if 'device_id' in params:
path_params['deviceId'] = params['device_id']
if 'component_id' in params:
path_params['componentId'] = params['component_id']
query_params = []
if 'timeout' in params:
query_params.append(('timeout', params['timeout']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json', 'application/xml'])
auth_settings = ['kapuaAccessToken']
return self.api_client.call_api(
'/{scopeId}/devices/{deviceId}/configurations/{componentId}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DeviceConfiguration',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_configuration_get(self, scope_id, device_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_configuration_get_with_http_info(scope_id, device_id, **kwargs)
else:
(data) = self.device_configuration_get_with_http_info(scope_id, device_id, **kwargs)
return data
def device_configuration_get_with_http_info(self, scope_id, device_id, **kwargs):
all_params = ['scope_id', 'device_id', 'timeout']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_configuration_get" % key
)
params[key] = val
del params['kwargs']
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_configuration_get`")
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `device_configuration_get`")
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id']
if 'device_id' in params:
path_params['deviceId'] = params['device_id']
query_params = []
if 'timeout' in params:
query_params.append(('timeout', params['timeout']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml'])
auth_settings = ['kapuaAccessToken']
return self.api_client.call_api(
'/{scopeId}/devices/{deviceId}/configurations', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DeviceConfiguration',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_configuration_update(self, scope_id, device_id, body, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_configuration_update_with_http_info(scope_id, device_id, body, **kwargs)
else:
(data) = self.device_configuration_update_with_http_info(scope_id, device_id, body, **kwargs)
return data
def device_configuration_update_with_http_info(self, scope_id, device_id, body, **kwargs):
all_params = ['scope_id', 'device_id', 'body', 'timeout']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_configuration_update" % key
)
params[key] = val
del params['kwargs']
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_configuration_update`")
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `device_configuration_update`")
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `device_configuration_update`")
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id']
if 'device_id' in params:
path_params['deviceId'] = params['device_id']
query_params = []
if 'timeout' in params:
query_params.append(('timeout', params['timeout']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json', 'application/xml'])
auth_settings = ['kapuaAccessToken']
return self.api_client.call_api(
'/{scopeId}/devices/{deviceId}/configurations', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DeviceConfiguration',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_count(self, scope_id, body, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_count_with_http_info(scope_id, body, **kwargs)
else:
(data) = self.device_count_with_http_info(scope_id, body, **kwargs)
return data
def device_count_with_http_info(self, scope_id, body, **kwargs):
all_params = ['scope_id', 'body']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_count" % key
)
params[key] = val
del params['kwargs']
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_count`")
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `device_count`")
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.select_header_accept(
['application/xml', 'application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/xml', 'application/json'])
auth_settings = ['kapuaAccessToken']
return self.api_client.call_api(
'/{scopeId}/devices/_count', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CountResult',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_create(self, scope_id, body, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_create_with_http_info(scope_id, body, **kwargs)
else:
(data) = self.device_create_with_http_info(scope_id, body, **kwargs)
return data
def device_create_with_http_info(self, scope_id, body, **kwargs):
all_params = ['scope_id', 'body']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_create" % key
)
params[key] = val
del params['kwargs']
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_create`")
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `device_create`")
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.select_header_accept(
['application/xml', 'application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/xml', 'application/json'])
auth_settings = ['kapuaAccessToken']
return self.api_client.call_api(
'/{scopeId}/devices', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Device',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_delete(self, scope_id, device_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_delete_with_http_info(scope_id, device_id, **kwargs)
else:
(data) = self.device_delete_with_http_info(scope_id, device_id, **kwargs)
return data
def device_delete_with_http_info(self, scope_id, device_id, **kwargs):
all_params = ['scope_id', 'device_id']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_delete" % key
)
params[key] = val
del params['kwargs']
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_delete`")
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `device_delete`")
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id']
if 'device_id' in params:
path_params['deviceId'] = params['device_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
auth_settings = ['kapuaAccessToken']
return self.api_client.call_api(
'/{scopeId}/devices/{deviceId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_event_count(self, scope_id, device_id, body, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_event_count_with_http_info(scope_id, device_id, body, **kwargs)
else:
(data) = self.device_event_count_with_http_info(scope_id, device_id, body, **kwargs)
return data
def device_event_count_with_http_info(self, scope_id, device_id, body, **kwargs):
all_params = ['scope_id', 'device_id', 'body']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_event_count" % key
)
params[key] = val
del params['kwargs']
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_event_count`")
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `device_event_count`")
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `device_event_count`")
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id']
if 'device_id' in params:
path_params['deviceId'] = params['device_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.select_header_accept(
['application/xml', 'application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/xml', 'application/json'])
auth_settings = ['kapuaAccessToken']
return self.api_client.call_api(
'/{scopeId}/devices/{deviceId}/events/_count', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CountResult',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_event_delete(self, scope_id, device_id, device_event_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_event_delete_with_http_info(scope_id, device_id, device_event_id, **kwargs)
else:
(data) = self.device_event_delete_with_http_info(scope_id, device_id, device_event_id, **kwargs)
return data
def device_event_delete_with_http_info(self, scope_id, device_id, device_event_id, **kwargs):
all_params = ['scope_id', 'device_id', 'device_event_id']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_event_delete" % key
)
params[key] = val
del params['kwargs']
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_event_delete`")
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `device_event_delete`")
if ('device_event_id' not in params or
params['device_event_id'] is None):
raise ValueError("Missing the required parameter `device_event_id` when calling `device_event_delete`")
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id']
if 'device_id' in params:
path_params['deviceId'] = params['device_id']
if 'device_event_id' in params:
path_params['deviceEventId'] = params['device_event_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
auth_settings = ['kapuaAccessToken']
return self.api_client.call_api(
'/{scopeId}/devices/{deviceId}/events/{deviceEventId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_event_find(self, scope_id, device_id, device_event_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_event_find_with_http_info(scope_id, device_id, device_event_id, **kwargs)
else:
(data) = self.device_event_find_with_http_info(scope_id, device_id, device_event_id, **kwargs)
return data
def device_event_find_with_http_info(self, scope_id, device_id, device_event_id, **kwargs):
all_params = ['scope_id', 'device_id', 'device_event_id']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_event_find" % key
)
params[key] = val
del params['kwargs']
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_event_find`")
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `device_event_find`")
if ('device_event_id' not in params or
params['device_event_id'] is None):
raise ValueError("Missing the required parameter `device_event_id` when calling `device_event_find`")
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id']
if 'device_id' in params:
path_params['deviceId'] = params['device_id']
if 'device_event_id' in params:
path_params['deviceEventId'] = params['device_event_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml'])
auth_settings = ['kapuaAccessToken']
return self.api_client.call_api(
'/{scopeId}/devices/{deviceId}/events/{deviceEventId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DeviceEvent',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_event_query(self, scope_id, device_id, body, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_event_query_with_http_info(scope_id, device_id, body, **kwargs)
else:
(data) = self.device_event_query_with_http_info(scope_id, device_id, body, **kwargs)
return data
def device_event_query_with_http_info(self, scope_id, device_id, body, **kwargs):
all_params = ['scope_id', 'device_id', 'body']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_event_query" % key
)
params[key] = val
del params['kwargs']
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_event_query`")
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `device_event_query`")
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `device_event_query`")
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id']
if 'device_id' in params:
path_params['deviceId'] = params['device_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.select_header_accept(
['application/xml', 'application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/xml', 'application/json'])
auth_settings = ['kapuaAccessToken']
return self.api_client.call_api(
'/{scopeId}/devices/{deviceId}/events/_query', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DeviceEventListResult',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_event_simple_query(self, scope_id, device_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_event_simple_query_with_http_info(scope_id, device_id, **kwargs)
else:
(data) = self.device_event_simple_query_with_http_info(scope_id, device_id, **kwargs)
return data
def device_event_simple_query_with_http_info(self, scope_id, device_id, **kwargs):
all_params = ['scope_id', 'device_id', 'resource', 'offset', 'limit']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_event_simple_query" % key
)
params[key] = val
del params['kwargs']
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_event_simple_query`")
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `device_event_simple_query`")
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id']
if 'device_id' in params:
path_params['deviceId'] = params['device_id']
query_params = []
if 'resource' in params:
query_params.append(('resource', params['resource']))
if 'offset' in params:
query_params.append(('offset', params['offset']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml'])
auth_settings = ['kapuaAccessToken']
return self.api_client.call_api(
'/{scopeId}/devices/{deviceId}/events', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DeviceEventListResult',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_find(self, scope_id, device_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_find_with_http_info(scope_id, device_id, **kwargs)
else:
(data) = self.device_find_with_http_info(scope_id, device_id, **kwargs)
return data
def device_find_with_http_info(self, scope_id, device_id, **kwargs):
all_params = ['scope_id', 'device_id']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_find" % key
)
params[key] = val
del params['kwargs']
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_find`")
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `device_find`")
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id']
if 'device_id' in params:
path_params['deviceId'] = params['device_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml'])
auth_settings = ['kapuaAccessToken']
return self.api_client.call_api(
'/{scopeId}/devices/{deviceId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Device',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_package_download(self, scope_id, device_id, body, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_package_download_with_http_info(scope_id, device_id, body, **kwargs)
else:
(data) = self.device_package_download_with_http_info(scope_id, device_id, body, **kwargs)
return data
def device_package_download_with_http_info(self, scope_id, device_id, body, **kwargs):
all_params = ['scope_id', 'device_id', 'body', 'timeout']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_package_download" % key
)
params[key] = val
del params['kwargs']
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_package_download`")
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `device_package_download`")
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `device_package_download`")
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id']
if 'device_id' in params:
path_params['deviceId'] = params['device_id']
query_params = []
if 'timeout' in params:
query_params.append(('timeout', params['timeout']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.select_header_accept(
['application/xml', 'application/json'])
auth_settings = ['kapuaAccessToken']
return self.api_client.call_api(
'/{scopeId}/devices/{deviceId}/packages/_download', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_package_get(self, scope_id, device_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_package_get_with_http_info(scope_id, device_id, **kwargs)
else:
(data) = self.device_package_get_with_http_info(scope_id, device_id, **kwargs)
return data
def device_package_get_with_http_info(self, scope_id, device_id, **kwargs):
all_params = ['scope_id', 'device_id', 'timeout']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_package_get" % key
)
params[key] = val
del params['kwargs']
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_package_get`")
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `device_package_get`")
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id']
if 'device_id' in params:
path_params['deviceId'] = params['device_id']
query_params = []
if 'timeout' in params:
query_params.append(('timeout', params['timeout']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml'])
auth_settings = ['kapuaAccessToken']
return self.api_client.call_api(
'/{scopeId}/devices/{deviceId}/packages', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DevicePackages',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_package_uninstall(self, scope_id, device_id, body, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_package_uninstall_with_http_info(scope_id, device_id, body, **kwargs)
else:
(data) = self.device_package_uninstall_with_http_info(scope_id, device_id, body, **kwargs)
return data
def device_package_uninstall_with_http_info(self, scope_id, device_id, body, **kwargs):
all_params = ['scope_id', 'device_id', 'body', 'timeout']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_package_uninstall" % key
)
params[key] = val
del params['kwargs']
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_package_uninstall`")
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `device_package_uninstall`")
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `device_package_uninstall`")
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id']
if 'device_id' in params:
path_params['deviceId'] = params['device_id']
query_params = []
if 'timeout' in params:
query_params.append(('timeout', params['timeout']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.select_header_accept(
['application/xml', 'application/json'])
auth_settings = ['kapuaAccessToken']
return self.api_client.call_api(
'/{scopeId}/devices/{deviceId}/packages/_uninstall', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_query(self, scope_id, body, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_query_with_http_info(scope_id, body, **kwargs)
else:
(data) = self.device_query_with_http_info(scope_id, body, **kwargs)
return data
def device_query_with_http_info(self, scope_id, body, **kwargs):
all_params = ['scope_id', 'body']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_query" % key
)
params[key] = val
del params['kwargs']
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_query`")
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `device_query`")
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.select_header_accept(
['application/xml', 'application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/xml', 'application/json'])
auth_settings = ['kapuaAccessToken']
return self.api_client.call_api(
'/{scopeId}/devices/_query', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DeviceListResult',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_request_send(self, scope_id, device_id, body, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_request_send_with_http_info(scope_id, device_id, body, **kwargs)
else:
(data) = self.device_request_send_with_http_info(scope_id, device_id, body, **kwargs)
return data
def device_request_send_with_http_info(self, scope_id, device_id, body, **kwargs):
all_params = ['scope_id', 'device_id', 'body', 'timeout']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_request_send" % key
)
params[key] = val
del params['kwargs']
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_request_send`")
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `device_request_send`")
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `device_request_send`")
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id']
if 'device_id' in params:
path_params['deviceId'] = params['device_id']
query_params = []
if 'timeout' in params:
query_params.append(('timeout', params['timeout']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
auth_settings = ['kapuaAccessToken']
return self.api_client.call_api(
'/{scopeId}/devices/{deviceId}/requests', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='JsonGenericResponseMessage',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_simple_query(self, scope_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_simple_query_with_http_info(scope_id, **kwargs)
else:
(data) = self.device_simple_query_with_http_info(scope_id, **kwargs)
return data
def device_simple_query_with_http_info(self, scope_id, **kwargs):
all_params = ['scope_id', 'tag_id', 'client_id', 'status', 'fetch_attributes', 'offset', 'limit']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_simple_query" % key
)
params[key] = val
del params['kwargs']
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_simple_query`")
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id']
query_params = []
if 'tag_id' in params:
query_params.append(('tagId', params['tag_id']))
if 'client_id' in params:
query_params.append(('clientId', params['client_id']))
if 'status' in params:
query_params.append(('status', params['status']))
if 'fetch_attributes' in params:
query_params.append(('fetchAttributes', params['fetch_attributes']))
collection_formats['fetchAttributes'] = 'multi'
if 'offset' in params:
query_params.append(('offset', params['offset']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml'])
auth_settings = ['kapuaAccessToken']
return self.api_client.call_api(
'/{scopeId}/devices', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DeviceListResult',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_snapshot_get(self, scope_id, device_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_snapshot_get_with_http_info(scope_id, device_id, **kwargs)
else:
(data) = self.device_snapshot_get_with_http_info(scope_id, device_id, **kwargs)
return data
def device_snapshot_get_with_http_info(self, scope_id, device_id, **kwargs):
all_params = ['scope_id', 'device_id', 'timeout']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_snapshot_get" % key
)
params[key] = val
del params['kwargs']
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_snapshot_get`")
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `device_snapshot_get`")
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id']
if 'device_id' in params:
path_params['deviceId'] = params['device_id']
query_params = []
if 'timeout' in params:
query_params.append(('timeout', params['timeout']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml'])
auth_settings = ['kapuaAccessToken']
return self.api_client.call_api(
'/{scopeId}/devices/{deviceId}/snapshots', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DeviceSnapshots',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_snapshot_rollback(self, scope_id, device_id, snapshot_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_snapshot_rollback_with_http_info(scope_id, device_id, snapshot_id, **kwargs)
else:
(data) = self.device_snapshot_rollback_with_http_info(scope_id, device_id, snapshot_id, **kwargs)
return data
def device_snapshot_rollback_with_http_info(self, scope_id, device_id, snapshot_id, **kwargs):
all_params = ['scope_id', 'device_id', 'snapshot_id', 'timeout']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_snapshot_rollback" % key
)
params[key] = val
del params['kwargs']
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_snapshot_rollback`")
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `device_snapshot_rollback`")
if ('snapshot_id' not in params or
params['snapshot_id'] is None):
raise ValueError("Missing the required parameter `snapshot_id` when calling `device_snapshot_rollback`")
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id']
if 'device_id' in params:
path_params['deviceId'] = params['device_id']
if 'snapshot_id' in params:
path_params['snapshotId'] = params['snapshot_id']
query_params = []
if 'timeout' in params:
query_params.append(('timeout', params['timeout']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/xml', 'application/json'])
auth_settings = ['kapuaAccessToken']
return self.api_client.call_api(
'/{scopeId}/devices/{deviceId}/snapshots/{snapshotId}/_rollback', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def device_update(self, scope_id, device_id, body, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.device_update_with_http_info(scope_id, device_id, body, **kwargs)
else:
(data) = self.device_update_with_http_info(scope_id, device_id, body, **kwargs)
return data
def device_update_with_http_info(self, scope_id, device_id, body, **kwargs):
all_params = ['scope_id', 'device_id', 'body']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method device_update" % key
)
params[key] = val
del params['kwargs']
if ('scope_id' not in params or
params['scope_id'] is None):
raise ValueError("Missing the required parameter `scope_id` when calling `device_update`")
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `device_update`")
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `device_update`")
collection_formats = {}
path_params = {}
if 'scope_id' in params:
path_params['scopeId'] = params['scope_id']
if 'device_id' in params:
path_params['deviceId'] = params['device_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.select_header_accept(
['application/xml', 'application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/xml', 'application/json'])
auth_settings = ['kapuaAccessToken']
return self.api_client.call_api(
'/{scopeId}/devices/{deviceId}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Device',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| true
| true
|
790d6a3efac390378876a5be53e7f10b1530fb5c
| 28,191
|
py
|
Python
|
tests/migrations/test_writer.py
|
robgolding/django
|
1d0bab0bfd77edcf1228d45bf654457a8ff1890d
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 5
|
2019-10-17T21:29:53.000Z
|
2021-06-23T16:27:02.000Z
|
tests/migrations/test_writer.py
|
robgolding/django
|
1d0bab0bfd77edcf1228d45bf654457a8ff1890d
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 10
|
2016-05-19T21:54:42.000Z
|
2019-08-09T15:59:50.000Z
|
tests/migrations/test_writer.py
|
robgolding/django
|
1d0bab0bfd77edcf1228d45bf654457a8ff1890d
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 11
|
2019-09-14T20:57:30.000Z
|
2022-01-19T17:59:26.000Z
|
import datetime
import decimal
import enum
import functools
import math
import os
import re
import uuid
from unittest import mock
import custom_migration_operations.more_operations
import custom_migration_operations.operations
from django import get_version
from django.conf import SettingsReference, settings
from django.core.validators import EmailValidator, RegexValidator
from django.db import migrations, models
from django.db.migrations.serializer import BaseSerializer
from django.db.migrations.writer import MigrationWriter, OperationWriter
from django.test import SimpleTestCase
from django.utils.deconstruct import deconstructible
from django.utils.functional import SimpleLazyObject
from django.utils.timezone import get_default_timezone, get_fixed_timezone, utc
from django.utils.translation import gettext_lazy as _
from .models import FoodManager, FoodQuerySet
class Money(decimal.Decimal):
def deconstruct(self):
return (
'%s.%s' % (self.__class__.__module__, self.__class__.__name__),
[str(self)],
{}
)
class TestModel1:
def upload_to(self):
return '/somewhere/dynamic/'
thing = models.FileField(upload_to=upload_to)
class OperationWriterTests(SimpleTestCase):
def test_empty_signature(self):
operation = custom_migration_operations.operations.TestOperation()
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.TestOperation(\n'
'),'
)
def test_args_signature(self):
operation = custom_migration_operations.operations.ArgsOperation(1, 2)
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.ArgsOperation(\n'
' arg1=1,\n'
' arg2=2,\n'
'),'
)
def test_kwargs_signature(self):
operation = custom_migration_operations.operations.KwargsOperation(kwarg1=1)
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.KwargsOperation(\n'
' kwarg1=1,\n'
'),'
)
def test_args_kwargs_signature(self):
operation = custom_migration_operations.operations.ArgsKwargsOperation(1, 2, kwarg2=4)
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.ArgsKwargsOperation(\n'
' arg1=1,\n'
' arg2=2,\n'
' kwarg2=4,\n'
'),'
)
def test_nested_args_signature(self):
operation = custom_migration_operations.operations.ArgsOperation(
custom_migration_operations.operations.ArgsOperation(1, 2),
custom_migration_operations.operations.KwargsOperation(kwarg1=3, kwarg2=4)
)
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.ArgsOperation(\n'
' arg1=custom_migration_operations.operations.ArgsOperation(\n'
' arg1=1,\n'
' arg2=2,\n'
' ),\n'
' arg2=custom_migration_operations.operations.KwargsOperation(\n'
' kwarg1=3,\n'
' kwarg2=4,\n'
' ),\n'
'),'
)
def test_multiline_args_signature(self):
operation = custom_migration_operations.operations.ArgsOperation("test\n arg1", "test\narg2")
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
"custom_migration_operations.operations.ArgsOperation(\n"
" arg1='test\\n arg1',\n"
" arg2='test\\narg2',\n"
"),"
)
def test_expand_args_signature(self):
operation = custom_migration_operations.operations.ExpandArgsOperation([1, 2])
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.ExpandArgsOperation(\n'
' arg=[\n'
' 1,\n'
' 2,\n'
' ],\n'
'),'
)
def test_nested_operation_expand_args_signature(self):
operation = custom_migration_operations.operations.ExpandArgsOperation(
arg=[
custom_migration_operations.operations.KwargsOperation(
kwarg1=1,
kwarg2=2,
),
]
)
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.ExpandArgsOperation(\n'
' arg=[\n'
' custom_migration_operations.operations.KwargsOperation(\n'
' kwarg1=1,\n'
' kwarg2=2,\n'
' ),\n'
' ],\n'
'),'
)
class WriterTests(SimpleTestCase):
"""
Tests the migration writer (makes migration files from Migration instances)
"""
def safe_exec(self, string, value=None):
d = {}
try:
exec(string, globals(), d)
except Exception as e:
if value:
self.fail("Could not exec %r (from value %r): %s" % (string.strip(), value, e))
else:
self.fail("Could not exec %r: %s" % (string.strip(), e))
return d
def serialize_round_trip(self, value):
string, imports = MigrationWriter.serialize(value)
return self.safe_exec("%s\ntest_value_result = %s" % ("\n".join(imports), string), value)['test_value_result']
def assertSerializedEqual(self, value):
self.assertEqual(self.serialize_round_trip(value), value)
def assertSerializedResultEqual(self, value, target):
self.assertEqual(MigrationWriter.serialize(value), target)
def assertSerializedFieldEqual(self, value):
new_value = self.serialize_round_trip(value)
self.assertEqual(value.__class__, new_value.__class__)
self.assertEqual(value.max_length, new_value.max_length)
self.assertEqual(value.null, new_value.null)
self.assertEqual(value.unique, new_value.unique)
def test_serialize_numbers(self):
self.assertSerializedEqual(1)
self.assertSerializedEqual(1.2)
self.assertTrue(math.isinf(self.serialize_round_trip(float("inf"))))
self.assertTrue(math.isinf(self.serialize_round_trip(float("-inf"))))
self.assertTrue(math.isnan(self.serialize_round_trip(float("nan"))))
self.assertSerializedEqual(decimal.Decimal('1.3'))
self.assertSerializedResultEqual(
decimal.Decimal('1.3'),
("Decimal('1.3')", {'from decimal import Decimal'})
)
self.assertSerializedEqual(Money('1.3'))
self.assertSerializedResultEqual(
Money('1.3'),
("migrations.test_writer.Money('1.3')", {'import migrations.test_writer'})
)
def test_serialize_constants(self):
self.assertSerializedEqual(None)
self.assertSerializedEqual(True)
self.assertSerializedEqual(False)
def test_serialize_strings(self):
self.assertSerializedEqual(b"foobar")
string, imports = MigrationWriter.serialize(b"foobar")
self.assertEqual(string, "b'foobar'")
self.assertSerializedEqual("föobár")
string, imports = MigrationWriter.serialize("foobar")
self.assertEqual(string, "'foobar'")
def test_serialize_multiline_strings(self):
self.assertSerializedEqual(b"foo\nbar")
string, imports = MigrationWriter.serialize(b"foo\nbar")
self.assertEqual(string, "b'foo\\nbar'")
self.assertSerializedEqual("föo\nbár")
string, imports = MigrationWriter.serialize("foo\nbar")
self.assertEqual(string, "'foo\\nbar'")
def test_serialize_collections(self):
self.assertSerializedEqual({1: 2})
self.assertSerializedEqual(["a", 2, True, None])
self.assertSerializedEqual({2, 3, "eighty"})
self.assertSerializedEqual({"lalalala": ["yeah", "no", "maybe"]})
self.assertSerializedEqual(_('Hello'))
def test_serialize_builtin_types(self):
self.assertSerializedEqual([list, tuple, dict, set, frozenset])
self.assertSerializedResultEqual(
[list, tuple, dict, set, frozenset],
("[list, tuple, dict, set, frozenset]", set())
)
def test_serialize_lazy_objects(self):
pattern = re.compile(r'^foo$')
lazy_pattern = SimpleLazyObject(lambda: pattern)
self.assertEqual(self.serialize_round_trip(lazy_pattern), pattern)
def test_serialize_enums(self):
class TextEnum(enum.Enum):
A = 'a-value'
B = 'value-b'
class BinaryEnum(enum.Enum):
A = b'a-value'
B = b'value-b'
class IntEnum(enum.IntEnum):
A = 1
B = 2
self.assertSerializedResultEqual(
TextEnum.A,
("migrations.test_writer.TextEnum('a-value')", {'import migrations.test_writer'})
)
self.assertSerializedResultEqual(
BinaryEnum.A,
("migrations.test_writer.BinaryEnum(b'a-value')", {'import migrations.test_writer'})
)
self.assertSerializedResultEqual(
IntEnum.B,
("migrations.test_writer.IntEnum(2)", {'import migrations.test_writer'})
)
field = models.CharField(default=TextEnum.B, choices=[(m.value, m) for m in TextEnum])
string = MigrationWriter.serialize(field)[0]
self.assertEqual(
string,
"models.CharField(choices=["
"('a-value', migrations.test_writer.TextEnum('a-value')), "
"('value-b', migrations.test_writer.TextEnum('value-b'))], "
"default=migrations.test_writer.TextEnum('value-b'))"
)
field = models.CharField(default=BinaryEnum.B, choices=[(m.value, m) for m in BinaryEnum])
string = MigrationWriter.serialize(field)[0]
self.assertEqual(
string,
"models.CharField(choices=["
"(b'a-value', migrations.test_writer.BinaryEnum(b'a-value')), "
"(b'value-b', migrations.test_writer.BinaryEnum(b'value-b'))], "
"default=migrations.test_writer.BinaryEnum(b'value-b'))"
)
field = models.IntegerField(default=IntEnum.A, choices=[(m.value, m) for m in IntEnum])
string = MigrationWriter.serialize(field)[0]
self.assertEqual(
string,
"models.IntegerField(choices=["
"(1, migrations.test_writer.IntEnum(1)), "
"(2, migrations.test_writer.IntEnum(2))], "
"default=migrations.test_writer.IntEnum(1))"
)
def test_serialize_uuid(self):
self.assertSerializedEqual(uuid.uuid1())
self.assertSerializedEqual(uuid.uuid4())
uuid_a = uuid.UUID('5c859437-d061-4847-b3f7-e6b78852f8c8')
uuid_b = uuid.UUID('c7853ec1-2ea3-4359-b02d-b54e8f1bcee2')
self.assertSerializedResultEqual(
uuid_a,
("uuid.UUID('5c859437-d061-4847-b3f7-e6b78852f8c8')", {'import uuid'})
)
self.assertSerializedResultEqual(
uuid_b,
("uuid.UUID('c7853ec1-2ea3-4359-b02d-b54e8f1bcee2')", {'import uuid'})
)
field = models.UUIDField(choices=((uuid_a, 'UUID A'), (uuid_b, 'UUID B')), default=uuid_a)
string = MigrationWriter.serialize(field)[0]
self.assertEqual(
string,
"models.UUIDField(choices=["
"(uuid.UUID('5c859437-d061-4847-b3f7-e6b78852f8c8'), 'UUID A'), "
"(uuid.UUID('c7853ec1-2ea3-4359-b02d-b54e8f1bcee2'), 'UUID B')], "
"default=uuid.UUID('5c859437-d061-4847-b3f7-e6b78852f8c8'))"
)
def test_serialize_functions(self):
with self.assertRaisesMessage(ValueError, 'Cannot serialize function: lambda'):
self.assertSerializedEqual(lambda x: 42)
self.assertSerializedEqual(models.SET_NULL)
string, imports = MigrationWriter.serialize(models.SET(42))
self.assertEqual(string, 'models.SET(42)')
self.serialize_round_trip(models.SET(42))
def test_serialize_datetime(self):
self.assertSerializedEqual(datetime.datetime.utcnow())
self.assertSerializedEqual(datetime.datetime.utcnow)
self.assertSerializedEqual(datetime.datetime.today())
self.assertSerializedEqual(datetime.datetime.today)
self.assertSerializedEqual(datetime.date.today())
self.assertSerializedEqual(datetime.date.today)
self.assertSerializedEqual(datetime.datetime.now().time())
self.assertSerializedEqual(datetime.datetime(2014, 1, 1, 1, 1, tzinfo=get_default_timezone()))
self.assertSerializedEqual(datetime.datetime(2013, 12, 31, 22, 1, tzinfo=get_fixed_timezone(180)))
self.assertSerializedResultEqual(
datetime.datetime(2014, 1, 1, 1, 1),
("datetime.datetime(2014, 1, 1, 1, 1)", {'import datetime'})
)
self.assertSerializedResultEqual(
datetime.datetime(2012, 1, 1, 1, 1, tzinfo=utc),
(
"datetime.datetime(2012, 1, 1, 1, 1, tzinfo=utc)",
{'import datetime', 'from django.utils.timezone import utc'},
)
)
def test_serialize_fields(self):
self.assertSerializedFieldEqual(models.CharField(max_length=255))
self.assertSerializedResultEqual(
models.CharField(max_length=255),
("models.CharField(max_length=255)", {"from django.db import models"})
)
self.assertSerializedFieldEqual(models.TextField(null=True, blank=True))
self.assertSerializedResultEqual(
models.TextField(null=True, blank=True),
("models.TextField(blank=True, null=True)", {'from django.db import models'})
)
def test_serialize_settings(self):
self.assertSerializedEqual(SettingsReference(settings.AUTH_USER_MODEL, "AUTH_USER_MODEL"))
self.assertSerializedResultEqual(
SettingsReference("someapp.model", "AUTH_USER_MODEL"),
("settings.AUTH_USER_MODEL", {"from django.conf import settings"})
)
def test_serialize_iterators(self):
self.assertSerializedResultEqual(
((x, x * x) for x in range(3)),
("((0, 0), (1, 1), (2, 4))", set())
)
def test_serialize_compiled_regex(self):
"""
Make sure compiled regex can be serialized.
"""
regex = re.compile(r'^\w+$')
self.assertSerializedEqual(regex)
def test_serialize_class_based_validators(self):
"""
Ticket #22943: Test serialization of class-based validators, including
compiled regexes.
"""
validator = RegexValidator(message="hello")
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(string, "django.core.validators.RegexValidator(message='hello')")
self.serialize_round_trip(validator)
# Test with a compiled regex.
validator = RegexValidator(regex=re.compile(r'^\w+$'))
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(string, "django.core.validators.RegexValidator(regex=re.compile('^\\\\w+$'))")
self.serialize_round_trip(validator)
# Test a string regex with flag
validator = RegexValidator(r'^[0-9]+$', flags=re.S)
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(string, "django.core.validators.RegexValidator('^[0-9]+$', flags=re.RegexFlag(16))")
self.serialize_round_trip(validator)
# Test message and code
validator = RegexValidator('^[-a-zA-Z0-9_]+$', 'Invalid', 'invalid')
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(string, "django.core.validators.RegexValidator('^[-a-zA-Z0-9_]+$', 'Invalid', 'invalid')")
self.serialize_round_trip(validator)
# Test with a subclass.
validator = EmailValidator(message="hello")
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(string, "django.core.validators.EmailValidator(message='hello')")
self.serialize_round_trip(validator)
validator = deconstructible(path="migrations.test_writer.EmailValidator")(EmailValidator)(message="hello")
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(string, "migrations.test_writer.EmailValidator(message='hello')")
validator = deconstructible(path="custom.EmailValidator")(EmailValidator)(message="hello")
with self.assertRaisesMessage(ImportError, "No module named 'custom'"):
MigrationWriter.serialize(validator)
validator = deconstructible(path="django.core.validators.EmailValidator2")(EmailValidator)(message="hello")
with self.assertRaisesMessage(ValueError, "Could not find object EmailValidator2 in django.core.validators."):
MigrationWriter.serialize(validator)
def test_serialize_empty_nonempty_tuple(self):
"""
Ticket #22679: makemigrations generates invalid code for (an empty
tuple) default_permissions = ()
"""
empty_tuple = ()
one_item_tuple = ('a',)
many_items_tuple = ('a', 'b', 'c')
self.assertSerializedEqual(empty_tuple)
self.assertSerializedEqual(one_item_tuple)
self.assertSerializedEqual(many_items_tuple)
def test_serialize_range(self):
string, imports = MigrationWriter.serialize(range(1, 5))
self.assertEqual(string, 'range(1, 5)')
self.assertEqual(imports, set())
def test_serialize_builtins(self):
string, imports = MigrationWriter.serialize(range)
self.assertEqual(string, 'range')
self.assertEqual(imports, set())
def test_serialize_unbound_method_reference(self):
"""An unbound method used within a class body can be serialized."""
self.serialize_round_trip(TestModel1.thing)
def test_serialize_local_function_reference(self):
"""A reference in a local scope can't be serialized."""
class TestModel2:
def upload_to(self):
return "somewhere dynamic"
thing = models.FileField(upload_to=upload_to)
with self.assertRaisesMessage(ValueError, 'Could not find function upload_to in migrations.test_writer'):
self.serialize_round_trip(TestModel2.thing)
def test_serialize_managers(self):
self.assertSerializedEqual(models.Manager())
self.assertSerializedResultEqual(
FoodQuerySet.as_manager(),
('migrations.models.FoodQuerySet.as_manager()', {'import migrations.models'})
)
self.assertSerializedEqual(FoodManager('a', 'b'))
self.assertSerializedEqual(FoodManager('x', 'y', c=3, d=4))
def test_serialize_frozensets(self):
self.assertSerializedEqual(frozenset())
self.assertSerializedEqual(frozenset("let it go"))
def test_serialize_set(self):
self.assertSerializedEqual(set())
self.assertSerializedResultEqual(set(), ('set()', set()))
self.assertSerializedEqual({'a'})
self.assertSerializedResultEqual({'a'}, ("{'a'}", set()))
def test_serialize_timedelta(self):
self.assertSerializedEqual(datetime.timedelta())
self.assertSerializedEqual(datetime.timedelta(minutes=42))
def test_serialize_functools_partial(self):
value = functools.partial(datetime.timedelta, 1, seconds=2)
result = self.serialize_round_trip(value)
self.assertEqual(result.func, value.func)
self.assertEqual(result.args, value.args)
self.assertEqual(result.keywords, value.keywords)
def test_serialize_functools_partialmethod(self):
value = functools.partialmethod(datetime.timedelta, 1, seconds=2)
result = self.serialize_round_trip(value)
self.assertIsInstance(result, functools.partialmethod)
self.assertEqual(result.func, value.func)
self.assertEqual(result.args, value.args)
self.assertEqual(result.keywords, value.keywords)
def test_serialize_type_none(self):
self.assertSerializedEqual(type(None))
def test_simple_migration(self):
"""
Tests serializing a simple migration.
"""
fields = {
'charfield': models.DateTimeField(default=datetime.datetime.utcnow),
'datetimefield': models.DateTimeField(default=datetime.datetime.utcnow),
}
options = {
'verbose_name': 'My model',
'verbose_name_plural': 'My models',
}
migration = type("Migration", (migrations.Migration,), {
"operations": [
migrations.CreateModel("MyModel", tuple(fields.items()), options, (models.Model,)),
migrations.CreateModel("MyModel2", tuple(fields.items()), bases=(models.Model,)),
migrations.CreateModel(
name="MyModel3", fields=tuple(fields.items()), options=options, bases=(models.Model,)
),
migrations.DeleteModel("MyModel"),
migrations.AddField("OtherModel", "datetimefield", fields["datetimefield"]),
],
"dependencies": [("testapp", "some_other_one")],
})
writer = MigrationWriter(migration)
output = writer.as_string()
# We don't test the output formatting - that's too fragile.
# Just make sure it runs for now, and that things look alright.
result = self.safe_exec(output)
self.assertIn("Migration", result)
def test_migration_path(self):
test_apps = [
'migrations.migrations_test_apps.normal',
'migrations.migrations_test_apps.with_package_model',
'migrations.migrations_test_apps.without_init_file',
]
base_dir = os.path.dirname(os.path.dirname(__file__))
for app in test_apps:
with self.modify_settings(INSTALLED_APPS={'append': app}):
migration = migrations.Migration('0001_initial', app.split('.')[-1])
expected_path = os.path.join(base_dir, *(app.split('.') + ['migrations', '0001_initial.py']))
writer = MigrationWriter(migration)
self.assertEqual(writer.path, expected_path)
def test_custom_operation(self):
migration = type("Migration", (migrations.Migration,), {
"operations": [
custom_migration_operations.operations.TestOperation(),
custom_migration_operations.operations.CreateModel(),
migrations.CreateModel("MyModel", (), {}, (models.Model,)),
custom_migration_operations.more_operations.TestOperation()
],
"dependencies": []
})
writer = MigrationWriter(migration)
output = writer.as_string()
result = self.safe_exec(output)
self.assertIn("custom_migration_operations", result)
self.assertNotEqual(
result['custom_migration_operations'].operations.TestOperation,
result['custom_migration_operations'].more_operations.TestOperation
)
def test_sorted_imports(self):
"""
#24155 - Tests ordering of imports.
"""
migration = type("Migration", (migrations.Migration,), {
"operations": [
migrations.AddField("mymodel", "myfield", models.DateTimeField(
default=datetime.datetime(2012, 1, 1, 1, 1, tzinfo=utc),
)),
]
})
writer = MigrationWriter(migration)
output = writer.as_string()
self.assertIn(
"import datetime\n"
"from django.db import migrations, models\n"
"from django.utils.timezone import utc\n",
output
)
def test_migration_file_header_comments(self):
"""
Test comments at top of file.
"""
migration = type("Migration", (migrations.Migration,), {
"operations": []
})
dt = datetime.datetime(2015, 7, 31, 4, 40, 0, 0, tzinfo=utc)
with mock.patch('django.db.migrations.writer.now', lambda: dt):
for include_header in (True, False):
with self.subTest(include_header=include_header):
writer = MigrationWriter(migration, include_header)
output = writer.as_string()
self.assertEqual(
include_header,
output.startswith(
"# Generated by Django %s on 2015-07-31 04:40\n\n" % get_version()
)
)
if not include_header:
# Make sure the output starts with something that's not
# a comment or indentation or blank line
self.assertRegex(output.splitlines(keepends=True)[0], r"^[^#\s]+")
def test_models_import_omitted(self):
"""
django.db.models shouldn't be imported if unused.
"""
migration = type("Migration", (migrations.Migration,), {
"operations": [
migrations.AlterModelOptions(
name='model',
options={'verbose_name': 'model', 'verbose_name_plural': 'models'},
),
]
})
writer = MigrationWriter(migration)
output = writer.as_string()
self.assertIn("from django.db import migrations\n", output)
def test_deconstruct_class_arguments(self):
# Yes, it doesn't make sense to use a class as a default for a
# CharField. It does make sense for custom fields though, for example
# an enumfield that takes the enum class as an argument.
class DeconstructibleInstances:
def deconstruct(self):
return ('DeconstructibleInstances', [], {})
string = MigrationWriter.serialize(models.CharField(default=DeconstructibleInstances))[0]
self.assertEqual(string, "models.CharField(default=migrations.test_writer.DeconstructibleInstances)")
def test_register_serializer(self):
class ComplexSerializer(BaseSerializer):
def serialize(self):
return 'complex(%r)' % self.value, {}
MigrationWriter.register_serializer(complex, ComplexSerializer)
self.assertSerializedEqual(complex(1, 2))
MigrationWriter.unregister_serializer(complex)
with self.assertRaisesMessage(ValueError, 'Cannot serialize: (1+2j)'):
self.assertSerializedEqual(complex(1, 2))
def test_register_non_serializer(self):
with self.assertRaisesMessage(ValueError, "'TestModel1' must inherit from 'BaseSerializer'."):
MigrationWriter.register_serializer(complex, TestModel1)
| 41.888559
| 118
| 0.631975
|
import datetime
import decimal
import enum
import functools
import math
import os
import re
import uuid
from unittest import mock
import custom_migration_operations.more_operations
import custom_migration_operations.operations
from django import get_version
from django.conf import SettingsReference, settings
from django.core.validators import EmailValidator, RegexValidator
from django.db import migrations, models
from django.db.migrations.serializer import BaseSerializer
from django.db.migrations.writer import MigrationWriter, OperationWriter
from django.test import SimpleTestCase
from django.utils.deconstruct import deconstructible
from django.utils.functional import SimpleLazyObject
from django.utils.timezone import get_default_timezone, get_fixed_timezone, utc
from django.utils.translation import gettext_lazy as _
from .models import FoodManager, FoodQuerySet
class Money(decimal.Decimal):
def deconstruct(self):
return (
'%s.%s' % (self.__class__.__module__, self.__class__.__name__),
[str(self)],
{}
)
class TestModel1:
def upload_to(self):
return '/somewhere/dynamic/'
thing = models.FileField(upload_to=upload_to)
class OperationWriterTests(SimpleTestCase):
def test_empty_signature(self):
operation = custom_migration_operations.operations.TestOperation()
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.TestOperation(\n'
'),'
)
def test_args_signature(self):
operation = custom_migration_operations.operations.ArgsOperation(1, 2)
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.ArgsOperation(\n'
' arg1=1,\n'
' arg2=2,\n'
'),'
)
def test_kwargs_signature(self):
operation = custom_migration_operations.operations.KwargsOperation(kwarg1=1)
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.KwargsOperation(\n'
' kwarg1=1,\n'
'),'
)
def test_args_kwargs_signature(self):
operation = custom_migration_operations.operations.ArgsKwargsOperation(1, 2, kwarg2=4)
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.ArgsKwargsOperation(\n'
' arg1=1,\n'
' arg2=2,\n'
' kwarg2=4,\n'
'),'
)
def test_nested_args_signature(self):
operation = custom_migration_operations.operations.ArgsOperation(
custom_migration_operations.operations.ArgsOperation(1, 2),
custom_migration_operations.operations.KwargsOperation(kwarg1=3, kwarg2=4)
)
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.ArgsOperation(\n'
' arg1=custom_migration_operations.operations.ArgsOperation(\n'
' arg1=1,\n'
' arg2=2,\n'
' ),\n'
' arg2=custom_migration_operations.operations.KwargsOperation(\n'
' kwarg1=3,\n'
' kwarg2=4,\n'
' ),\n'
'),'
)
def test_multiline_args_signature(self):
operation = custom_migration_operations.operations.ArgsOperation("test\n arg1", "test\narg2")
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
"custom_migration_operations.operations.ArgsOperation(\n"
" arg1='test\\n arg1',\n"
" arg2='test\\narg2',\n"
"),"
)
def test_expand_args_signature(self):
operation = custom_migration_operations.operations.ExpandArgsOperation([1, 2])
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.ExpandArgsOperation(\n'
' arg=[\n'
' 1,\n'
' 2,\n'
' ],\n'
'),'
)
def test_nested_operation_expand_args_signature(self):
operation = custom_migration_operations.operations.ExpandArgsOperation(
arg=[
custom_migration_operations.operations.KwargsOperation(
kwarg1=1,
kwarg2=2,
),
]
)
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.ExpandArgsOperation(\n'
' arg=[\n'
' custom_migration_operations.operations.KwargsOperation(\n'
' kwarg1=1,\n'
' kwarg2=2,\n'
' ),\n'
' ],\n'
'),'
)
class WriterTests(SimpleTestCase):
def safe_exec(self, string, value=None):
d = {}
try:
exec(string, globals(), d)
except Exception as e:
if value:
self.fail("Could not exec %r (from value %r): %s" % (string.strip(), value, e))
else:
self.fail("Could not exec %r: %s" % (string.strip(), e))
return d
def serialize_round_trip(self, value):
string, imports = MigrationWriter.serialize(value)
return self.safe_exec("%s\ntest_value_result = %s" % ("\n".join(imports), string), value)['test_value_result']
def assertSerializedEqual(self, value):
self.assertEqual(self.serialize_round_trip(value), value)
def assertSerializedResultEqual(self, value, target):
self.assertEqual(MigrationWriter.serialize(value), target)
def assertSerializedFieldEqual(self, value):
new_value = self.serialize_round_trip(value)
self.assertEqual(value.__class__, new_value.__class__)
self.assertEqual(value.max_length, new_value.max_length)
self.assertEqual(value.null, new_value.null)
self.assertEqual(value.unique, new_value.unique)
def test_serialize_numbers(self):
self.assertSerializedEqual(1)
self.assertSerializedEqual(1.2)
self.assertTrue(math.isinf(self.serialize_round_trip(float("inf"))))
self.assertTrue(math.isinf(self.serialize_round_trip(float("-inf"))))
self.assertTrue(math.isnan(self.serialize_round_trip(float("nan"))))
self.assertSerializedEqual(decimal.Decimal('1.3'))
self.assertSerializedResultEqual(
decimal.Decimal('1.3'),
("Decimal('1.3')", {'from decimal import Decimal'})
)
self.assertSerializedEqual(Money('1.3'))
self.assertSerializedResultEqual(
Money('1.3'),
("migrations.test_writer.Money('1.3')", {'import migrations.test_writer'})
)
def test_serialize_constants(self):
self.assertSerializedEqual(None)
self.assertSerializedEqual(True)
self.assertSerializedEqual(False)
def test_serialize_strings(self):
self.assertSerializedEqual(b"foobar")
string, imports = MigrationWriter.serialize(b"foobar")
self.assertEqual(string, "b'foobar'")
self.assertSerializedEqual("föobár")
string, imports = MigrationWriter.serialize("foobar")
self.assertEqual(string, "'foobar'")
def test_serialize_multiline_strings(self):
self.assertSerializedEqual(b"foo\nbar")
string, imports = MigrationWriter.serialize(b"foo\nbar")
self.assertEqual(string, "b'foo\\nbar'")
self.assertSerializedEqual("föo\nbár")
string, imports = MigrationWriter.serialize("foo\nbar")
self.assertEqual(string, "'foo\\nbar'")
def test_serialize_collections(self):
self.assertSerializedEqual({1: 2})
self.assertSerializedEqual(["a", 2, True, None])
self.assertSerializedEqual({2, 3, "eighty"})
self.assertSerializedEqual({"lalalala": ["yeah", "no", "maybe"]})
self.assertSerializedEqual(_('Hello'))
def test_serialize_builtin_types(self):
self.assertSerializedEqual([list, tuple, dict, set, frozenset])
self.assertSerializedResultEqual(
[list, tuple, dict, set, frozenset],
("[list, tuple, dict, set, frozenset]", set())
)
def test_serialize_lazy_objects(self):
pattern = re.compile(r'^foo$')
lazy_pattern = SimpleLazyObject(lambda: pattern)
self.assertEqual(self.serialize_round_trip(lazy_pattern), pattern)
def test_serialize_enums(self):
class TextEnum(enum.Enum):
A = 'a-value'
B = 'value-b'
class BinaryEnum(enum.Enum):
A = b'a-value'
B = b'value-b'
class IntEnum(enum.IntEnum):
A = 1
B = 2
self.assertSerializedResultEqual(
TextEnum.A,
("migrations.test_writer.TextEnum('a-value')", {'import migrations.test_writer'})
)
self.assertSerializedResultEqual(
BinaryEnum.A,
("migrations.test_writer.BinaryEnum(b'a-value')", {'import migrations.test_writer'})
)
self.assertSerializedResultEqual(
IntEnum.B,
("migrations.test_writer.IntEnum(2)", {'import migrations.test_writer'})
)
field = models.CharField(default=TextEnum.B, choices=[(m.value, m) for m in TextEnum])
string = MigrationWriter.serialize(field)[0]
self.assertEqual(
string,
"models.CharField(choices=["
"('a-value', migrations.test_writer.TextEnum('a-value')), "
"('value-b', migrations.test_writer.TextEnum('value-b'))], "
"default=migrations.test_writer.TextEnum('value-b'))"
)
field = models.CharField(default=BinaryEnum.B, choices=[(m.value, m) for m in BinaryEnum])
string = MigrationWriter.serialize(field)[0]
self.assertEqual(
string,
"models.CharField(choices=["
"(b'a-value', migrations.test_writer.BinaryEnum(b'a-value')), "
"(b'value-b', migrations.test_writer.BinaryEnum(b'value-b'))], "
"default=migrations.test_writer.BinaryEnum(b'value-b'))"
)
field = models.IntegerField(default=IntEnum.A, choices=[(m.value, m) for m in IntEnum])
string = MigrationWriter.serialize(field)[0]
self.assertEqual(
string,
"models.IntegerField(choices=["
"(1, migrations.test_writer.IntEnum(1)), "
"(2, migrations.test_writer.IntEnum(2))], "
"default=migrations.test_writer.IntEnum(1))"
)
def test_serialize_uuid(self):
self.assertSerializedEqual(uuid.uuid1())
self.assertSerializedEqual(uuid.uuid4())
uuid_a = uuid.UUID('5c859437-d061-4847-b3f7-e6b78852f8c8')
uuid_b = uuid.UUID('c7853ec1-2ea3-4359-b02d-b54e8f1bcee2')
self.assertSerializedResultEqual(
uuid_a,
("uuid.UUID('5c859437-d061-4847-b3f7-e6b78852f8c8')", {'import uuid'})
)
self.assertSerializedResultEqual(
uuid_b,
("uuid.UUID('c7853ec1-2ea3-4359-b02d-b54e8f1bcee2')", {'import uuid'})
)
field = models.UUIDField(choices=((uuid_a, 'UUID A'), (uuid_b, 'UUID B')), default=uuid_a)
string = MigrationWriter.serialize(field)[0]
self.assertEqual(
string,
"models.UUIDField(choices=["
"(uuid.UUID('5c859437-d061-4847-b3f7-e6b78852f8c8'), 'UUID A'), "
"(uuid.UUID('c7853ec1-2ea3-4359-b02d-b54e8f1bcee2'), 'UUID B')], "
"default=uuid.UUID('5c859437-d061-4847-b3f7-e6b78852f8c8'))"
)
def test_serialize_functions(self):
with self.assertRaisesMessage(ValueError, 'Cannot serialize function: lambda'):
self.assertSerializedEqual(lambda x: 42)
self.assertSerializedEqual(models.SET_NULL)
string, imports = MigrationWriter.serialize(models.SET(42))
self.assertEqual(string, 'models.SET(42)')
self.serialize_round_trip(models.SET(42))
def test_serialize_datetime(self):
self.assertSerializedEqual(datetime.datetime.utcnow())
self.assertSerializedEqual(datetime.datetime.utcnow)
self.assertSerializedEqual(datetime.datetime.today())
self.assertSerializedEqual(datetime.datetime.today)
self.assertSerializedEqual(datetime.date.today())
self.assertSerializedEqual(datetime.date.today)
self.assertSerializedEqual(datetime.datetime.now().time())
self.assertSerializedEqual(datetime.datetime(2014, 1, 1, 1, 1, tzinfo=get_default_timezone()))
self.assertSerializedEqual(datetime.datetime(2013, 12, 31, 22, 1, tzinfo=get_fixed_timezone(180)))
self.assertSerializedResultEqual(
datetime.datetime(2014, 1, 1, 1, 1),
("datetime.datetime(2014, 1, 1, 1, 1)", {'import datetime'})
)
self.assertSerializedResultEqual(
datetime.datetime(2012, 1, 1, 1, 1, tzinfo=utc),
(
"datetime.datetime(2012, 1, 1, 1, 1, tzinfo=utc)",
{'import datetime', 'from django.utils.timezone import utc'},
)
)
def test_serialize_fields(self):
self.assertSerializedFieldEqual(models.CharField(max_length=255))
self.assertSerializedResultEqual(
models.CharField(max_length=255),
("models.CharField(max_length=255)", {"from django.db import models"})
)
self.assertSerializedFieldEqual(models.TextField(null=True, blank=True))
self.assertSerializedResultEqual(
models.TextField(null=True, blank=True),
("models.TextField(blank=True, null=True)", {'from django.db import models'})
)
def test_serialize_settings(self):
self.assertSerializedEqual(SettingsReference(settings.AUTH_USER_MODEL, "AUTH_USER_MODEL"))
self.assertSerializedResultEqual(
SettingsReference("someapp.model", "AUTH_USER_MODEL"),
("settings.AUTH_USER_MODEL", {"from django.conf import settings"})
)
def test_serialize_iterators(self):
self.assertSerializedResultEqual(
((x, x * x) for x in range(3)),
("((0, 0), (1, 1), (2, 4))", set())
)
def test_serialize_compiled_regex(self):
regex = re.compile(r'^\w+$')
self.assertSerializedEqual(regex)
def test_serialize_class_based_validators(self):
validator = RegexValidator(message="hello")
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(string, "django.core.validators.RegexValidator(message='hello')")
self.serialize_round_trip(validator)
validator = RegexValidator(regex=re.compile(r'^\w+$'))
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(string, "django.core.validators.RegexValidator(regex=re.compile('^\\\\w+$'))")
self.serialize_round_trip(validator)
validator = RegexValidator(r'^[0-9]+$', flags=re.S)
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(string, "django.core.validators.RegexValidator('^[0-9]+$', flags=re.RegexFlag(16))")
self.serialize_round_trip(validator)
validator = RegexValidator('^[-a-zA-Z0-9_]+$', 'Invalid', 'invalid')
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(string, "django.core.validators.RegexValidator('^[-a-zA-Z0-9_]+$', 'Invalid', 'invalid')")
self.serialize_round_trip(validator)
validator = EmailValidator(message="hello")
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(string, "django.core.validators.EmailValidator(message='hello')")
self.serialize_round_trip(validator)
validator = deconstructible(path="migrations.test_writer.EmailValidator")(EmailValidator)(message="hello")
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(string, "migrations.test_writer.EmailValidator(message='hello')")
validator = deconstructible(path="custom.EmailValidator")(EmailValidator)(message="hello")
with self.assertRaisesMessage(ImportError, "No module named 'custom'"):
MigrationWriter.serialize(validator)
validator = deconstructible(path="django.core.validators.EmailValidator2")(EmailValidator)(message="hello")
with self.assertRaisesMessage(ValueError, "Could not find object EmailValidator2 in django.core.validators."):
MigrationWriter.serialize(validator)
def test_serialize_empty_nonempty_tuple(self):
empty_tuple = ()
one_item_tuple = ('a',)
many_items_tuple = ('a', 'b', 'c')
self.assertSerializedEqual(empty_tuple)
self.assertSerializedEqual(one_item_tuple)
self.assertSerializedEqual(many_items_tuple)
def test_serialize_range(self):
string, imports = MigrationWriter.serialize(range(1, 5))
self.assertEqual(string, 'range(1, 5)')
self.assertEqual(imports, set())
def test_serialize_builtins(self):
string, imports = MigrationWriter.serialize(range)
self.assertEqual(string, 'range')
self.assertEqual(imports, set())
def test_serialize_unbound_method_reference(self):
self.serialize_round_trip(TestModel1.thing)
def test_serialize_local_function_reference(self):
class TestModel2:
def upload_to(self):
return "somewhere dynamic"
thing = models.FileField(upload_to=upload_to)
with self.assertRaisesMessage(ValueError, 'Could not find function upload_to in migrations.test_writer'):
self.serialize_round_trip(TestModel2.thing)
def test_serialize_managers(self):
self.assertSerializedEqual(models.Manager())
self.assertSerializedResultEqual(
FoodQuerySet.as_manager(),
('migrations.models.FoodQuerySet.as_manager()', {'import migrations.models'})
)
self.assertSerializedEqual(FoodManager('a', 'b'))
self.assertSerializedEqual(FoodManager('x', 'y', c=3, d=4))
def test_serialize_frozensets(self):
self.assertSerializedEqual(frozenset())
self.assertSerializedEqual(frozenset("let it go"))
def test_serialize_set(self):
self.assertSerializedEqual(set())
self.assertSerializedResultEqual(set(), ('set()', set()))
self.assertSerializedEqual({'a'})
self.assertSerializedResultEqual({'a'}, ("{'a'}", set()))
def test_serialize_timedelta(self):
self.assertSerializedEqual(datetime.timedelta())
self.assertSerializedEqual(datetime.timedelta(minutes=42))
def test_serialize_functools_partial(self):
value = functools.partial(datetime.timedelta, 1, seconds=2)
result = self.serialize_round_trip(value)
self.assertEqual(result.func, value.func)
self.assertEqual(result.args, value.args)
self.assertEqual(result.keywords, value.keywords)
def test_serialize_functools_partialmethod(self):
value = functools.partialmethod(datetime.timedelta, 1, seconds=2)
result = self.serialize_round_trip(value)
self.assertIsInstance(result, functools.partialmethod)
self.assertEqual(result.func, value.func)
self.assertEqual(result.args, value.args)
self.assertEqual(result.keywords, value.keywords)
def test_serialize_type_none(self):
self.assertSerializedEqual(type(None))
def test_simple_migration(self):
fields = {
'charfield': models.DateTimeField(default=datetime.datetime.utcnow),
'datetimefield': models.DateTimeField(default=datetime.datetime.utcnow),
}
options = {
'verbose_name': 'My model',
'verbose_name_plural': 'My models',
}
migration = type("Migration", (migrations.Migration,), {
"operations": [
migrations.CreateModel("MyModel", tuple(fields.items()), options, (models.Model,)),
migrations.CreateModel("MyModel2", tuple(fields.items()), bases=(models.Model,)),
migrations.CreateModel(
name="MyModel3", fields=tuple(fields.items()), options=options, bases=(models.Model,)
),
migrations.DeleteModel("MyModel"),
migrations.AddField("OtherModel", "datetimefield", fields["datetimefield"]),
],
"dependencies": [("testapp", "some_other_one")],
})
writer = MigrationWriter(migration)
output = writer.as_string()
result = self.safe_exec(output)
self.assertIn("Migration", result)
def test_migration_path(self):
test_apps = [
'migrations.migrations_test_apps.normal',
'migrations.migrations_test_apps.with_package_model',
'migrations.migrations_test_apps.without_init_file',
]
base_dir = os.path.dirname(os.path.dirname(__file__))
for app in test_apps:
with self.modify_settings(INSTALLED_APPS={'append': app}):
migration = migrations.Migration('0001_initial', app.split('.')[-1])
expected_path = os.path.join(base_dir, *(app.split('.') + ['migrations', '0001_initial.py']))
writer = MigrationWriter(migration)
self.assertEqual(writer.path, expected_path)
def test_custom_operation(self):
migration = type("Migration", (migrations.Migration,), {
"operations": [
custom_migration_operations.operations.TestOperation(),
custom_migration_operations.operations.CreateModel(),
migrations.CreateModel("MyModel", (), {}, (models.Model,)),
custom_migration_operations.more_operations.TestOperation()
],
"dependencies": []
})
writer = MigrationWriter(migration)
output = writer.as_string()
result = self.safe_exec(output)
self.assertIn("custom_migration_operations", result)
self.assertNotEqual(
result['custom_migration_operations'].operations.TestOperation,
result['custom_migration_operations'].more_operations.TestOperation
)
def test_sorted_imports(self):
migration = type("Migration", (migrations.Migration,), {
"operations": [
migrations.AddField("mymodel", "myfield", models.DateTimeField(
default=datetime.datetime(2012, 1, 1, 1, 1, tzinfo=utc),
)),
]
})
writer = MigrationWriter(migration)
output = writer.as_string()
self.assertIn(
"import datetime\n"
"from django.db import migrations, models\n"
"from django.utils.timezone import utc\n",
output
)
def test_migration_file_header_comments(self):
migration = type("Migration", (migrations.Migration,), {
"operations": []
})
dt = datetime.datetime(2015, 7, 31, 4, 40, 0, 0, tzinfo=utc)
with mock.patch('django.db.migrations.writer.now', lambda: dt):
for include_header in (True, False):
with self.subTest(include_header=include_header):
writer = MigrationWriter(migration, include_header)
output = writer.as_string()
self.assertEqual(
include_header,
output.startswith(
"# Generated by Django %s on 2015-07-31 04:40\n\n" % get_version()
)
)
if not include_header:
# a comment or indentation or blank line
self.assertRegex(output.splitlines(keepends=True)[0], r"^[^#\s]+")
def test_models_import_omitted(self):
migration = type("Migration", (migrations.Migration,), {
"operations": [
migrations.AlterModelOptions(
name='model',
options={'verbose_name': 'model', 'verbose_name_plural': 'models'},
),
]
})
writer = MigrationWriter(migration)
output = writer.as_string()
self.assertIn("from django.db import migrations\n", output)
def test_deconstruct_class_arguments(self):
# Yes, it doesn't make sense to use a class as a default for a
class DeconstructibleInstances:
def deconstruct(self):
return ('DeconstructibleInstances', [], {})
string = MigrationWriter.serialize(models.CharField(default=DeconstructibleInstances))[0]
self.assertEqual(string, "models.CharField(default=migrations.test_writer.DeconstructibleInstances)")
def test_register_serializer(self):
class ComplexSerializer(BaseSerializer):
def serialize(self):
return 'complex(%r)' % self.value, {}
MigrationWriter.register_serializer(complex, ComplexSerializer)
self.assertSerializedEqual(complex(1, 2))
MigrationWriter.unregister_serializer(complex)
with self.assertRaisesMessage(ValueError, 'Cannot serialize: (1+2j)'):
self.assertSerializedEqual(complex(1, 2))
def test_register_non_serializer(self):
with self.assertRaisesMessage(ValueError, "'TestModel1' must inherit from 'BaseSerializer'."):
MigrationWriter.register_serializer(complex, TestModel1)
| true
| true
|
790d6b4ad1b4eae37dac46c169794c84eb65e406
| 6,486
|
py
|
Python
|
tests/openapi/test_validation.py
|
dreuse/kinto
|
533037ad421b63419f9883653a428683c67d43b8
|
[
"Apache-2.0"
] | null | null | null |
tests/openapi/test_validation.py
|
dreuse/kinto
|
533037ad421b63419f9883653a428683c67d43b8
|
[
"Apache-2.0"
] | null | null | null |
tests/openapi/test_validation.py
|
dreuse/kinto
|
533037ad421b63419f9883653a428683c67d43b8
|
[
"Apache-2.0"
] | null | null | null |
from bravado_core.request import IncomingRequest, unmarshal_request
from bravado_core.swagger20_validator import ValidationError
from .support import OpenAPITest
class OpenAPIRequestsValidationTest(OpenAPITest):
def setUp(self):
super().setUp()
self.request = IncomingRequest()
self.request.path = {}
self.request.headers = {}
self.request.query = {}
self.request._json = {}
self.request.json = lambda: self.request._json
def test_validate_bucket_path(self):
self.assertRaises(
ValidationError, unmarshal_request, self.request, self.resources["Buckets"].get_bucket
)
def test_validate_groups_path(self):
self.assertRaises(
ValidationError, unmarshal_request, self.request, self.resources["Groups"].get_groups
)
def test_validate_group_path(self):
paths = [{}, {"bucket_id": "b1"}, {"id": "g1"}]
for path in paths:
self.request.path = path
self.assertRaises(
ValidationError,
unmarshal_request,
self.request,
self.resources["Groups"].get_group,
)
def test_validate_collections_path(self):
self.assertRaises(
ValidationError,
unmarshal_request,
self.request,
self.resources["Collections"].get_collections,
)
def test_validate_collection_path(self):
paths = [{}, {"bucket_id": "b1"}, {"id": "c1"}]
for path in paths:
self.request.path = path
self.assertRaises(
ValidationError,
unmarshal_request,
self.request,
self.resources["Collections"].get_collection,
)
def test_validate_records_path(self):
paths = [{}, {"bucket_id": "b1"}, {"collection_id": "c1"}]
for path in paths:
self.request.path = path
self.assertRaises(
ValidationError,
unmarshal_request,
self.request,
self.resources["Records"].get_records,
)
def test_validate_record_path(self):
paths = [{}, {"bucket_id": "b1", "collection_id": "c1"}, {"id": "r1"}]
for path in paths:
self.request.path = path
self.assertRaises(
ValidationError,
unmarshal_request,
self.request,
self.resources["Records"].get_record,
)
def test_validate_data(self):
bodies = [{"data": "aaa"}]
for body in bodies:
self.request._json = body
self.assertRaises(
ValidationError,
unmarshal_request,
self.request,
self.resources["Buckets"].create_bucket,
)
def test_validate_permissions(self):
bodies = [
{"permissions": "aaa"},
{"permissions": {"read": "aaa"}},
{"permissions": {"read": [111]}},
]
for body in bodies:
self.request._json = body
self.assertRaises(
ValidationError,
unmarshal_request,
self.request,
self.resources["Buckets"].create_bucket,
)
def test_validate_queries(self):
queries = [{"_since": "aaa"}, {"_before": "aaa"}, {"_limit": "aaa"}, {"_token": {}}]
for query in queries:
self.request.query = query
self.assertRaises(
ValidationError,
unmarshal_request,
self.request,
self.resources["Buckets"].get_buckets,
)
def test_validate_headers(self):
headers = [{"If-None-Match": "123"}, {"If-Match": "123"}]
for head in headers:
self.request.headers = head
self.assertRaises(
ValidationError,
unmarshal_request,
self.request,
self.resources["Buckets"].get_buckets,
)
def test_validate_batch_requests_method(self):
self.request._json = {"requests": [{"method": "AAA", "path": "/buckets/b1"}]}
self.assertRaises(
ValidationError, unmarshal_request, self.request, self.resources["Batch"].batch
)
def test_validate_batch_requests_path(self):
self.request._json = {"requests": [{"method": "GET", "path": 123}]}
self.assertRaises(
ValidationError, unmarshal_request, self.request, self.resources["Batch"].batch
)
def test_validate_batch_requests_body(self):
self.request._json = {"requests": [{"method": "GET", "path": "/buckets/b1", "body": []}]}
self.assertRaises(
ValidationError, unmarshal_request, self.request, self.resources["Batch"].batch
)
def test_validate_batch_requests_header(self):
self.request._json = {
"requests": [{"method": "GET", "path": "/buckets/b1", "body": {}, "headers": []}]
}
self.assertRaises(
ValidationError, unmarshal_request, self.request, self.resources["Batch"].batch
)
def test_validate_batch_defaults(self):
self.request._json = {
"defaults": [],
"requests": [{"method": "GET", "path": "/buckets/b1"}],
}
self.assertRaises(
ValidationError, unmarshal_request, self.request, self.resources["Batch"].batch
)
def test_validate_batch_defaults_method(self):
self.request._json = {"defaults": {"method": "AAA"}, "requests": [{"path": "/buckets/b1"}]}
self.assertRaises(
ValidationError, unmarshal_request, self.request, self.resources["Batch"].batch
)
def test_validate_batch_defaults_body(self):
self.request._json = {
"defaults": {"body": []},
"requests": [{"method": "PUT", "path": "/buckets/b1"}],
}
self.assertRaises(
ValidationError, unmarshal_request, self.request, self.resources["Batch"].batch
)
def test_validate_batch_defaults_headers(self):
self.request._json = {
"defaults": {"headers": []},
"requests": [{"method": "GET", "path": "/buckets/b1"}],
}
self.assertRaises(
ValidationError, unmarshal_request, self.request, self.resources["Batch"].batch
)
| 34.870968
| 99
| 0.557046
|
from bravado_core.request import IncomingRequest, unmarshal_request
from bravado_core.swagger20_validator import ValidationError
from .support import OpenAPITest
class OpenAPIRequestsValidationTest(OpenAPITest):
def setUp(self):
super().setUp()
self.request = IncomingRequest()
self.request.path = {}
self.request.headers = {}
self.request.query = {}
self.request._json = {}
self.request.json = lambda: self.request._json
def test_validate_bucket_path(self):
self.assertRaises(
ValidationError, unmarshal_request, self.request, self.resources["Buckets"].get_bucket
)
def test_validate_groups_path(self):
self.assertRaises(
ValidationError, unmarshal_request, self.request, self.resources["Groups"].get_groups
)
def test_validate_group_path(self):
paths = [{}, {"bucket_id": "b1"}, {"id": "g1"}]
for path in paths:
self.request.path = path
self.assertRaises(
ValidationError,
unmarshal_request,
self.request,
self.resources["Groups"].get_group,
)
def test_validate_collections_path(self):
self.assertRaises(
ValidationError,
unmarshal_request,
self.request,
self.resources["Collections"].get_collections,
)
def test_validate_collection_path(self):
paths = [{}, {"bucket_id": "b1"}, {"id": "c1"}]
for path in paths:
self.request.path = path
self.assertRaises(
ValidationError,
unmarshal_request,
self.request,
self.resources["Collections"].get_collection,
)
def test_validate_records_path(self):
paths = [{}, {"bucket_id": "b1"}, {"collection_id": "c1"}]
for path in paths:
self.request.path = path
self.assertRaises(
ValidationError,
unmarshal_request,
self.request,
self.resources["Records"].get_records,
)
def test_validate_record_path(self):
paths = [{}, {"bucket_id": "b1", "collection_id": "c1"}, {"id": "r1"}]
for path in paths:
self.request.path = path
self.assertRaises(
ValidationError,
unmarshal_request,
self.request,
self.resources["Records"].get_record,
)
def test_validate_data(self):
bodies = [{"data": "aaa"}]
for body in bodies:
self.request._json = body
self.assertRaises(
ValidationError,
unmarshal_request,
self.request,
self.resources["Buckets"].create_bucket,
)
def test_validate_permissions(self):
bodies = [
{"permissions": "aaa"},
{"permissions": {"read": "aaa"}},
{"permissions": {"read": [111]}},
]
for body in bodies:
self.request._json = body
self.assertRaises(
ValidationError,
unmarshal_request,
self.request,
self.resources["Buckets"].create_bucket,
)
def test_validate_queries(self):
queries = [{"_since": "aaa"}, {"_before": "aaa"}, {"_limit": "aaa"}, {"_token": {}}]
for query in queries:
self.request.query = query
self.assertRaises(
ValidationError,
unmarshal_request,
self.request,
self.resources["Buckets"].get_buckets,
)
def test_validate_headers(self):
headers = [{"If-None-Match": "123"}, {"If-Match": "123"}]
for head in headers:
self.request.headers = head
self.assertRaises(
ValidationError,
unmarshal_request,
self.request,
self.resources["Buckets"].get_buckets,
)
def test_validate_batch_requests_method(self):
self.request._json = {"requests": [{"method": "AAA", "path": "/buckets/b1"}]}
self.assertRaises(
ValidationError, unmarshal_request, self.request, self.resources["Batch"].batch
)
def test_validate_batch_requests_path(self):
self.request._json = {"requests": [{"method": "GET", "path": 123}]}
self.assertRaises(
ValidationError, unmarshal_request, self.request, self.resources["Batch"].batch
)
def test_validate_batch_requests_body(self):
self.request._json = {"requests": [{"method": "GET", "path": "/buckets/b1", "body": []}]}
self.assertRaises(
ValidationError, unmarshal_request, self.request, self.resources["Batch"].batch
)
def test_validate_batch_requests_header(self):
self.request._json = {
"requests": [{"method": "GET", "path": "/buckets/b1", "body": {}, "headers": []}]
}
self.assertRaises(
ValidationError, unmarshal_request, self.request, self.resources["Batch"].batch
)
def test_validate_batch_defaults(self):
self.request._json = {
"defaults": [],
"requests": [{"method": "GET", "path": "/buckets/b1"}],
}
self.assertRaises(
ValidationError, unmarshal_request, self.request, self.resources["Batch"].batch
)
def test_validate_batch_defaults_method(self):
self.request._json = {"defaults": {"method": "AAA"}, "requests": [{"path": "/buckets/b1"}]}
self.assertRaises(
ValidationError, unmarshal_request, self.request, self.resources["Batch"].batch
)
def test_validate_batch_defaults_body(self):
self.request._json = {
"defaults": {"body": []},
"requests": [{"method": "PUT", "path": "/buckets/b1"}],
}
self.assertRaises(
ValidationError, unmarshal_request, self.request, self.resources["Batch"].batch
)
def test_validate_batch_defaults_headers(self):
self.request._json = {
"defaults": {"headers": []},
"requests": [{"method": "GET", "path": "/buckets/b1"}],
}
self.assertRaises(
ValidationError, unmarshal_request, self.request, self.resources["Batch"].batch
)
| true
| true
|
790d6bc82351cc457b84198eafb931c2ef75e9b8
| 17,632
|
py
|
Python
|
tests/bugs/core_5275_test.py
|
reevespaul/firebird-qa
|
98f16f425aa9ab8ee63b86172f959d63a2d76f21
|
[
"MIT"
] | null | null | null |
tests/bugs/core_5275_test.py
|
reevespaul/firebird-qa
|
98f16f425aa9ab8ee63b86172f959d63a2d76f21
|
[
"MIT"
] | null | null | null |
tests/bugs/core_5275_test.py
|
reevespaul/firebird-qa
|
98f16f425aa9ab8ee63b86172f959d63a2d76f21
|
[
"MIT"
] | null | null | null |
#coding:utf-8
#
# id: bugs.core_5275
# title: CORE-5275: Expression index may become inconsistent if CREATE INDEX was interrupted after b-tree creation but before commiting
# decription:
# This test (and CORE- ticket) has been created after wrong initial implementation of test for CORE-1746.
# Scenario:
# 1. ISQL_1 is launched as child async. process, inserts 1000 rows and then falls in pause (delay) ~10 seconds;
# 2. ISQL_2 is launched as child async. process in Tx = WAIT, tries to create index on the table which is handled
# by ISQL_1 and immediatelly falls in pause because of waiting for table lock.
# 3. ISQL_3 is launched in SYNC mode and does 'DELETE FROM MON$ATTACHMENTS' thus forcing other attachments to be
# closed with raising 00803/connection shutdown.
# 4. Repeat step 1. On WI-T4.0.0.258 this step lead to:
# "invalid SEND request (167), file: JrdStatement.cpp line: 325", 100% reproducilbe.
#
# Checked on WI-V2.5.6.27017 (SC), WI-V3.0.1.32539 (SS/SC/CS), WI-T4.0.0.262 (SS) - works fine.
#
# Beside above mentioned steps, we also:
# 1) compare content of old/new firebird.log (difference): it should NOT contain line "consistency check";
# 2) run database online validation: it should NOT report any error in the database.
#
# :::::::::::::::::::::::::::::::::::::::: NB ::::::::::::::::::::::::::::::::::::
# 18.08.2020. FB 4.x has incompatible behaviour with all previous versions since build 4.0.0.2131 (06-aug-2020):
# statement 'alter sequence <seq_name> restart with 0' changes rdb$generators.rdb$initial_value to -1 thus next call
# gen_id(<seq_name>,1) will return 0 (ZERO!) rather than 1.
# See also CORE-6084 and its fix: https://github.com/FirebirdSQL/firebird/commit/23dc0c6297825b2e9006f4d5a2c488702091033d
# ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
# This is considered as *expected* and is noted in doc/README.incompatibilities.3to4.txt
#
# Because of this, it was decided to replace 'alter sequence restart...' with subtraction of two gen values:
# c = gen_id(<g>, -gen_id(<g>, 0)) -- see procedure sp_restart_sequences.
#
# Checked on:
# 4.0.0.2164 SS: 15.932s.
# 4.0.0.2119 SS: 16.141s.
# 4.0.0.2164 CS: 17.549s.
# 3.0.7.33356 SS: 17.446s.
# 3.0.7.33356 CS: 18.321s.
# 2.5.9.27150 SC: 13.768s.
#
# tracker_id: CORE-5275
# min_versions: ['2.5.6']
# versions: 2.5.6
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 2.5.6
# resources: None
substitutions_1 = [('0: CREATE INDEX LOG: RDB_EXPR_BLOB.*', '0: CREATE INDEX LOG: RDB_EXPR_BLOB'), ('BULK_INSERT_START.*', 'BULK_INSERT_START'), ('.*KILLED BY DATABASE ADMINISTRATOR.*', ''), ('BULK_INSERT_FINISH.*', 'BULK_INSERT_FINISH'), ('CREATE_INDX_START.*', 'CREATE_INDX_START'), ('AFTER LINE.*', 'AFTER LINE'), ('RECORDS AFFECTED:.*', 'RECORDS AFFECTED:'), ('[0-9][0-9]:[0-9][0-9]:[0-9][0-9].[0-9][0-9]', ''), ('RELATION [0-9]{3,4}', 'RELATION')]
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1
#---
# import os
# import time
# import difflib
# import subprocess
#
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password
# db_file=db_conn.database_name
# engine =str(db_conn.engine_version)
#
# db_conn.close()
#
# #--------------------------------------------
#
# def flush_and_close(file_handle):
# # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os
#
# file_handle.flush()
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno())
# file_handle.close()
#
# #--------------------------------------------
#
# def cleanup( f_names_list ):
# global os
# for i in range(len( f_names_list )):
# if type(f_names_list[i]) == file:
# del_name = f_names_list[i].name
# elif type(f_names_list[i]) == str:
# del_name = f_names_list[i]
# else:
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
# del_name = None
#
# if del_name and os.path.isfile( del_name ):
# os.remove( del_name )
#
# #--------------------------------------------
#
# def svc_get_fb_log( engine, f_fb_log ):
#
# import subprocess
#
# if engine.startswith('2.5'):
# get_firebird_log_key='action_get_ib_log'
# else:
# get_firebird_log_key='action_get_fb_log'
#
# # C:\\MIX
# irebird\\oldfb251in
# bsvcmgr localhost:service_mgr -user sysdba -password masterkey action_get_ib_log
# subprocess.call([ context['fbsvcmgr_path'],
# "localhost:service_mgr",
# get_firebird_log_key
# ],
# stdout=f_fb_log, stderr=subprocess.STDOUT
# )
#
# return
#
# sql_ddl='''
# create or alter procedure sp_ins(n int) as begin end;
#
# recreate table test(x int unique using index test_x, s varchar(10) default 'qwerty' );
#
# set term ^;
# execute block as
# begin
# execute statement 'drop sequence g';
# when any do begin end
# end
# ^
# set term ;^
# commit;
# create sequence g;
# commit;
#
# set term ^;
# create or alter procedure sp_ins(n int) as
# begin
# while (n>0) do
# begin
# insert into test( x ) values( gen_id(g,1) );
# n = n - 1;
# end
# end
# ^
# set term ;^
# commit;
# '''
# runProgram('isql',[dsn],sql_ddl)
#
# f_fblog_before=open( os.path.join(context['temp_directory'],'tmp_5275_fblog_before.txt'), 'w')
# svc_get_fb_log( engine, f_fblog_before )
# flush_and_close( f_fblog_before )
#
# #########################################################
#
# rows_to_add=1000
#
# sql_bulk_insert=''' set bail on;
# set list on;
#
# -- DISABLED 19.08.2020: alter sequence g restart with 0;
#
# delete from test;
# commit;
# set transaction lock timeout 10; -- THIS LOCK TIMEOUT SERVES ONLY FOR DELAY, see below auton Tx start.
#
# select current_timestamp as bulk_insert_start from rdb$database;
# set term ^;
# execute block as
# declare i int;
# begin
# i = gen_id(g, -gen_id(g, 0)); -- restart sequence, since 19.08.2020
# execute procedure sp_ins( %(rows_to_add)s );
# begin
# -- #########################################################
# -- ####################### D E L A Y #####################
# -- #########################################################
# in autonomous transaction do
# insert into test( x ) values( %(rows_to_add)s ); -- this will cause delay because of duplicate in index
# when any do
# begin
# i = gen_id(g,1);
# end
# end
# end
# ^
# set term ;^
# commit;
# select current_timestamp as bulk_insert_finish from rdb$database;
# '''
#
# sql_create_indx=''' set bail on;
# set list on;
# set blob all;
# select
# iif( gen_id(g,0) > 0 and gen_id(g,0) < 1 + %(rows_to_add)s,
# 'OK, IS RUNNING',
# iif( gen_id(g,0) <=0,
# 'WRONG: not yet started, current gen_id='||gen_id(g,0),
# 'WRONG: already finished, rows_to_add='||%(rows_to_add)s ||', current gen_id='||gen_id(g,0)
# )
# ) as inserts_state,
# current_timestamp as create_indx_start
# from rdb$database;
# set autoddl off;
# commit;
#
# set echo on;
# set transaction %(tx_decl)s;
#
# create index test_%(idx_name)s on test computed by( %(idx_expr)s );
# set echo off;
# commit;
#
# select
# iif( gen_id(g,0) >= 1 + %(rows_to_add)s,
# 'OK, FINISHED',
# 'SOMETHING WRONG: current gen_id=' || gen_id(g,0)||', rows_to_add='||%(rows_to_add)s
# ) as inserts_state
# from rdb$database;
#
# set count on;
# select
# rdb$index_name
# ,coalesce(rdb$unique_flag,0) as rdb$unique_flag
# ,coalesce(rdb$index_inactive,0) as rdb$index_inactive
# ,rdb$expression_source as rdb_expr_blob
# from rdb$indices ri
# where ri.rdb$index_name = upper( 'test_%(idx_name)s' )
# ;
# set count off;
# set echo on;
# set plan on;
# select 1 from test where %(idx_expr)s > '' rows 0;
# set plan off;
# set echo off;
# commit;
# drop index test_%(idx_name)s;
# commit;
# '''
#
# sql_kill_att=''' set count on;
# set list on;
# commit;
# delete from mon$attachments where mon$attachment_id<>current_connection;
# '''
#
# f_kill_att_sql = open( os.path.join(context['temp_directory'],'tmp_5275_kill_att.sql' ), 'w')
# f_kill_att_sql.write( sql_kill_att )
# flush_and_close( f_kill_att_sql )
#
# tx_param=['WAIT','WAIT']
#
# for i in range(len(tx_param)):
#
# f_bulk_insert_sql = open( os.path.join(context['temp_directory'],'tmp_5275_ins.sql'), 'w')
# f_bulk_insert_sql.write(sql_bulk_insert % locals() )
# flush_and_close( f_bulk_insert_sql )
#
# tx_decl=tx_param[i]
# idx_name=tx_decl.replace(' ','_')
# idx_expr="'"+idx_name+"'|| s"
#
# f_create_indx_sql = open( os.path.join(context['temp_directory'],'tmp_5275_idx_%s.sql' % str(i) ), 'w')
# f_create_indx_sql.write( sql_create_indx % locals() )
# flush_and_close( f_create_indx_sql )
#
# f_bulk_insert_log = open( os.path.join(context['temp_directory'],'tmp_5275_ins_%s.log' % str(i) ), 'w')
# f_create_indx_log = open( os.path.join(context['temp_directory'],'tmp_5275_idx_%s.log' % str(i) ), 'w')
#
# p_bulk_insert=subprocess.Popen( [context['isql_path'], dsn, "-q", "-i", f_bulk_insert_sql.name ],
# stdout = f_bulk_insert_log,
# stderr = subprocess.STDOUT
# )
#
# # 3.0 Classic: seems that it requires at least 2 seconds for ISQL be loaded into memory.
# time.sleep(2)
#
# p_create_indx=subprocess.Popen( [context['isql_path'], dsn, "-q", "-i", f_create_indx_sql.name ],
# stdout = f_create_indx_log,
# stderr = subprocess.STDOUT
# )
# time.sleep(2)
#
# f_kill_att_log = open( os.path.join(context['temp_directory'],'tmp_5275_kill_att.log' ), 'w')
#
# subprocess.call( [context['isql_path'], dsn, "-q", "-i", f_kill_att_sql.name ],
# stdout = f_kill_att_log,
# stderr = subprocess.STDOUT
# )
# flush_and_close( f_kill_att_log )
#
# # 11.05.2017, FB 4.0 only!
# # Following messages can appear after 'connection shutdown'
# # (letter from dimitr, 08-may-2017 20:41):
# # isc_att_shut_killed: Killed by database administrator
# # isc_att_shut_idle: Idle timeout expired
# # isc_att_shut_db_down: Database is shutdown
# # isc_att_shut_engine: Engine is shutdown
#
# # do NOT remove this delay, otherwise ISQL logs in 2.5.x will contain NO text with error message
# # STATEMENT FAILED, SQLSTATE = 08003 / CONNECTION SHUTDOWN:
# time.sleep(1)
#
# p_create_indx.terminate()
# p_bulk_insert.terminate()
#
# flush_and_close( f_bulk_insert_log )
# flush_and_close( f_create_indx_log )
#
# with open( f_bulk_insert_log.name,'r') as f:
# for line in f:
# if line.split():
# print( str(i)+': BULK INSERTS LOG: '+line.strip().upper() )
#
# with open( f_create_indx_log.name,'r') as f:
# for line in f:
# if line.split():
# print( str(i)+': CREATE INDEX LOG: '+line.strip().upper() )
#
# with open( f_kill_att_log.name,'r') as f:
# for line in f:
# if line.split():
# print( str(i)+': KILL ATTACH LOG: '+line.upper() )
#
# # cleanup (nitermediate):
# #########
# time.sleep(1)
# cleanup( (f_bulk_insert_sql, f_create_indx_sql, f_bulk_insert_log, f_create_indx_log, f_kill_att_log) )
#
# # ------------------------------------------------------------------------------------------
#
# f_fblog_after=open( os.path.join(context['temp_directory'],'tmp_5275_fblog_after.txt'), 'w')
# svc_get_fb_log( engine, f_fblog_after )
# flush_and_close( f_fblog_after )
#
# # Now we can compare two versions of firebird.log and check their difference.
# #################################
#
# oldfb=open(f_fblog_before.name, 'r')
# newfb=open(f_fblog_after.name, 'r')
#
# difftext = ''.join(difflib.unified_diff(
# oldfb.readlines(),
# newfb.readlines()
# ))
# oldfb.close()
# newfb.close()
#
# f_diff_txt=open( os.path.join(context['temp_directory'],'tmp_5275_diff.txt'), 'w')
# f_diff_txt.write(difftext)
# flush_and_close( f_diff_txt )
#
# # This should be empty:
# #######################
# with open( f_diff_txt.name,'r') as f:
# for line in f:
# # internal Firebird consistency check (invalid SEND request (167), file: JrdStatement.cpp line: 325)
# if 'consistency check' in line:
# print('NEW IN FIREBIRD.LOG: '+line.upper())
#
#
# #--------------------------------------------------------------------------------------------
#
# f_validate_log=open( os.path.join(context['temp_directory'],'tmp_5275_validate.log'), "w")
# f_validate_err=open( os.path.join(context['temp_directory'],'tmp_5275_validate.err'), "w")
#
# subprocess.call([context['fbsvcmgr_path'],"localhost:service_mgr",
# "action_validate",
# "dbname", "$(DATABASE_LOCATION)bugs.core_5275.fdb"
# ],
# stdout=f_validate_log,
# stderr=f_validate_err)
# flush_and_close( f_validate_log )
# flush_and_close( f_validate_err )
#
# with open( f_validate_log.name,'r') as f:
# for line in f:
# if line.split():
# print( 'VALIDATION STDOUT: '+line.upper() )
#
# with open( f_validate_err.name,'r') as f:
# for line in f:
# if line.split():
# print( 'VALIDATION STDERR: '+line.upper() )
#
# # cleanup
# #########
# time.sleep(1)
# cleanup( (f_validate_log, f_validate_err, f_kill_att_sql, f_fblog_before, f_fblog_after, f_diff_txt) )
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
0: BULK INSERTS LOG: BULK_INSERT_START
0: BULK INSERTS LOG: STATEMENT FAILED, SQLSTATE = 08003
0: BULK INSERTS LOG: CONNECTION SHUTDOWN
0: BULK INSERTS LOG: AFTER LINE
0: CREATE INDEX LOG: INSERTS_STATE OK, IS RUNNING
0: CREATE INDEX LOG: CREATE_INDX_START
0: CREATE INDEX LOG: SET TRANSACTION WAIT;
0: CREATE INDEX LOG: CREATE INDEX TEST_WAIT ON TEST COMPUTED BY( 'WAIT'|| S );
0: CREATE INDEX LOG: SET ECHO OFF;
0: CREATE INDEX LOG: STATEMENT FAILED, SQLSTATE = 08003
0: CREATE INDEX LOG: CONNECTION SHUTDOWN
0: CREATE INDEX LOG: AFTER LINE
0: KILL ATTACH LOG: RECORDS AFFECTED:
1: BULK INSERTS LOG: BULK_INSERT_START
1: BULK INSERTS LOG: STATEMENT FAILED, SQLSTATE = 08003
1: BULK INSERTS LOG: CONNECTION SHUTDOWN
1: BULK INSERTS LOG: AFTER LINE
1: CREATE INDEX LOG: INSERTS_STATE OK, IS RUNNING
1: CREATE INDEX LOG: CREATE_INDX_START
1: CREATE INDEX LOG: SET TRANSACTION WAIT;
1: CREATE INDEX LOG: CREATE INDEX TEST_WAIT ON TEST COMPUTED BY( 'WAIT'|| S );
1: CREATE INDEX LOG: SET ECHO OFF;
1: CREATE INDEX LOG: STATEMENT FAILED, SQLSTATE = 08003
1: CREATE INDEX LOG: CONNECTION SHUTDOWN
1: CREATE INDEX LOG: AFTER LINE
1: KILL ATTACH LOG: RECORDS AFFECTED:
VALIDATION STDOUT: 20:05:26.86 VALIDATION STARTED
VALIDATION STDOUT: 20:05:26.86 RELATION 128 (TEST)
VALIDATION STDOUT: 20:05:26.86 PROCESS POINTER PAGE 0 OF 1
VALIDATION STDOUT: 20:05:26.86 INDEX 1 (TEST_X)
VALIDATION STDOUT: 20:05:26.86 RELATION 128 (TEST) IS OK
VALIDATION STDOUT: 20:05:26.86 VALIDATION FINISHED
"""
@pytest.mark.version('>=2.5.6')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
| 39.711712
| 452
| 0.556148
|
import pytest
from firebird.qa import db_factory, isql_act, Action
substitutions_1 = [('0: CREATE INDEX LOG: RDB_EXPR_BLOB.*', '0: CREATE INDEX LOG: RDB_EXPR_BLOB'), ('BULK_INSERT_START.*', 'BULK_INSERT_START'), ('.*KILLED BY DATABASE ADMINISTRATOR.*', ''), ('BULK_INSERT_FINISH.*', 'BULK_INSERT_FINISH'), ('CREATE_INDX_START.*', 'CREATE_INDX_START'), ('AFTER LINE.*', 'AFTER LINE'), ('RECORDS AFFECTED:.*', 'RECORDS AFFECTED:'), ('[0-9][0-9]:[0-9][0-9]:[0-9][0-9].[0-9][0-9]', ''), ('RELATION [0-9]{3,4}', 'RELATION')]
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
global os
#
# file_handle.flush()
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno())
# file_handle.close()
#
# #--------------------------------------------
#
# def cleanup( f_names_list ):
# global os
# for i in range(len( f_names_list )):
# if type(f_names_list[i]) == file:
# del_name = f_names_list[i].name
# elif type(f_names_list[i]) == str:
# del_name = f_names_list[i]
# else:
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
# del_name = None
#
# if del_name and os.path.isfile( del_name ):
# os.remove( del_name )
#
# #--------------------------------------------
#
# def svc_get_fb_log( engine, f_fb_log ):
#
# import subprocess
#
# if engine.startswith('2.5'):
# get_firebird_log_key='action_get_ib_log'
# else:
# get_firebird_log_key='action_get_fb_log'
#
# # C:\\MIX
# irebird\\oldfb251in
# bsvcmgr localhost:service_mgr -user sysdba -password masterkey action_get_ib_log
# subprocess.call([ context['fbsvcmgr_path'],
# "localhost:service_mgr",
# get_firebird_log_key
# ],
# stdout=f_fb_log, stderr=subprocess.STDOUT
# )
#
# return
#
# sql_ddl='''
# create or alter procedure sp_ins(n int) as begin end;
#
# recreate table test(x int unique using index test_x, s varchar(10) default 'qwerty' );
#
# set term ^;
# execute block as
# begin
# execute statement 'drop sequence g';
# when any do begin end
# end
# ^
# set term ;^
# commit;
# create sequence g;
# commit;
#
# set term ^;
# create or alter procedure sp_ins(n int) as
# begin
# while (n>0) do
# begin
# insert into test( x ) values( gen_id(g,1) );
# n = n - 1;
# end
# end
# ^
# set term ;^
# commit;
# '''
# runProgram('isql',[dsn],sql_ddl)
#
# f_fblog_before=open( os.path.join(context['temp_directory'],'tmp_5275_fblog_before.txt'), 'w')
# svc_get_fb_log( engine, f_fblog_before )
# flush_and_close( f_fblog_before )
#
# #########################################################
#
# rows_to_add=1000
#
# sql_bulk_insert=''' set bail on;
# set list on;
#
# -- DISABLED 19.08.2020: alter sequence g restart with 0;
#
# delete from test;
# commit;
# set transaction lock timeout 10; -- THIS LOCK TIMEOUT SERVES ONLY FOR DELAY, see below auton Tx start.
#
# select current_timestamp as bulk_insert_start from rdb$database;
# set term ^;
# execute block as
# declare i int;
# begin
# i = gen_id(g, -gen_id(g, 0)); -- restart sequence, since 19.08.2020
# execute procedure sp_ins( %(rows_to_add)s );
# begin
# -- #########################################################
# -- ####################### D E L A Y #####################
# -- #########################################################
# in autonomous transaction do
# insert into test( x ) values( %(rows_to_add)s ); -- this will cause delay because of duplicate in index
# when any do
# begin
# i = gen_id(g,1);
# end
# end
# end
# ^
# set term ;^
# commit;
# select current_timestamp as bulk_insert_finish from rdb$database;
# '''
#
# sql_create_indx=''' set bail on;
# set list on;
# set blob all;
# select
# iif( gen_id(g,0) > 0 and gen_id(g,0) < 1 + %(rows_to_add)s,
# 'OK, IS RUNNING',
# iif( gen_id(g,0) <=0,
# 'WRONG: not yet started, current gen_id='||gen_id(g,0),
# 'WRONG: already finished, rows_to_add='||%(rows_to_add)s ||', current gen_id='||gen_id(g,0)
# )
# ) as inserts_state,
# current_timestamp as create_indx_start
# from rdb$database;
# set autoddl off;
# commit;
#
# set echo on;
# set transaction %(tx_decl)s;
#
# create index test_%(idx_name)s on test computed by( %(idx_expr)s );
# set echo off;
# commit;
#
# select
# iif( gen_id(g,0) >= 1 + %(rows_to_add)s,
# 'OK, FINISHED',
# 'SOMETHING WRONG: current gen_id=' || gen_id(g,0)||', rows_to_add='||%(rows_to_add)s
# ) as inserts_state
# from rdb$database;
#
# set count on;
# select
# rdb$index_name
# ,coalesce(rdb$unique_flag,0) as rdb$unique_flag
# ,coalesce(rdb$index_inactive,0) as rdb$index_inactive
# ,rdb$expression_source as rdb_expr_blob
# from rdb$indices ri
# where ri.rdb$index_name = upper( 'test_%(idx_name)s' )
# ;
# set count off;
# set echo on;
# set plan on;
# select 1 from test where %(idx_expr)s > '' rows 0;
# set plan off;
# set echo off;
# commit;
# drop index test_%(idx_name)s;
# commit;
# '''
#
# sql_kill_att=''' set count on;
# set list on;
# commit;
# delete from mon$attachments where mon$attachment_id<>current_connection;
# '''
#
# f_kill_att_sql = open( os.path.join(context['temp_directory'],'tmp_5275_kill_att.sql' ), 'w')
# f_kill_att_sql.write( sql_kill_att )
# flush_and_close( f_kill_att_sql )
#
# tx_param=['WAIT','WAIT']
#
# for i in range(len(tx_param)):
#
# f_bulk_insert_sql = open( os.path.join(context['temp_directory'],'tmp_5275_ins.sql'), 'w')
# f_bulk_insert_sql.write(sql_bulk_insert % locals() )
# flush_and_close( f_bulk_insert_sql )
#
# tx_decl=tx_param[i]
# idx_name=tx_decl.replace(' ','_')
# idx_expr="'"+idx_name+"'|| s"
#
# f_create_indx_sql = open( os.path.join(context['temp_directory'],'tmp_5275_idx_%s.sql' % str(i) ), 'w')
# f_create_indx_sql.write( sql_create_indx % locals() )
# flush_and_close( f_create_indx_sql )
#
# f_bulk_insert_log = open( os.path.join(context['temp_directory'],'tmp_5275_ins_%s.log' % str(i) ), 'w')
# f_create_indx_log = open( os.path.join(context['temp_directory'],'tmp_5275_idx_%s.log' % str(i) ), 'w')
#
# p_bulk_insert=subprocess.Popen( [context['isql_path'], dsn, "-q", "-i", f_bulk_insert_sql.name ],
# stdout = f_bulk_insert_log,
# stderr = subprocess.STDOUT
# )
#
# # 3.0 Classic: seems that it requires at least 2 seconds for ISQL be loaded into memory.
# time.sleep(2)
#
# p_create_indx=subprocess.Popen( [context['isql_path'], dsn, "-q", "-i", f_create_indx_sql.name ],
# stdout = f_create_indx_log,
# stderr = subprocess.STDOUT
# )
# time.sleep(2)
#
# f_kill_att_log = open( os.path.join(context['temp_directory'],'tmp_5275_kill_att.log' ), 'w')
#
# subprocess.call( [context['isql_path'], dsn, "-q", "-i", f_kill_att_sql.name ],
# stdout = f_kill_att_log,
# stderr = subprocess.STDOUT
# )
# flush_and_close( f_kill_att_log )
#
# # 11.05.2017, FB 4.0 only!
# # Following messages can appear after 'connection shutdown'
# # (letter from dimitr, 08-may-2017 20:41):
# # isc_att_shut_killed: Killed by database administrator
# # isc_att_shut_idle: Idle timeout expired
# # isc_att_shut_db_down: Database is shutdown
# # isc_att_shut_engine: Engine is shutdown
#
# # do NOT remove this delay, otherwise ISQL logs in 2.5.x will contain NO text with error message
# # STATEMENT FAILED, SQLSTATE = 08003 / CONNECTION SHUTDOWN:
# time.sleep(1)
#
# p_create_indx.terminate()
# p_bulk_insert.terminate()
#
# flush_and_close( f_bulk_insert_log )
# flush_and_close( f_create_indx_log )
#
# with open( f_bulk_insert_log.name,'r') as f:
# for line in f:
# if line.split():
# print( str(i)+': BULK INSERTS LOG: '+line.strip().upper() )
#
# with open( f_create_indx_log.name,'r') as f:
# for line in f:
# if line.split():
# print( str(i)+': CREATE INDEX LOG: '+line.strip().upper() )
#
# with open( f_kill_att_log.name,'r') as f:
# for line in f:
# if line.split():
# print( str(i)+': KILL ATTACH LOG: '+line.upper() )
#
# # cleanup (nitermediate):
# #########
# time.sleep(1)
# cleanup( (f_bulk_insert_sql, f_create_indx_sql, f_bulk_insert_log, f_create_indx_log, f_kill_att_log) )
#
# # ------------------------------------------------------------------------------------------
#
# f_fblog_after=open( os.path.join(context['temp_directory'],'tmp_5275_fblog_after.txt'), 'w')
# svc_get_fb_log( engine, f_fblog_after )
# flush_and_close( f_fblog_after )
#
# # Now we can compare two versions of firebird.log and check their difference.
# #################################
#
# oldfb=open(f_fblog_before.name, 'r')
# newfb=open(f_fblog_after.name, 'r')
#
# difftext = ''.join(difflib.unified_diff(
# oldfb.readlines(),
# newfb.readlines()
# ))
# oldfb.close()
# newfb.close()
#
# f_diff_txt=open( os.path.join(context['temp_directory'],'tmp_5275_diff.txt'), 'w')
# f_diff_txt.write(difftext)
# flush_and_close( f_diff_txt )
#
# # This should be empty:
# #######################
# with open( f_diff_txt.name,'r') as f:
# for line in f:
# # internal Firebird consistency check (invalid SEND request (167), file: JrdStatement.cpp line: 325)
# if 'consistency check' in line:
# print('NEW IN FIREBIRD.LOG: '+line.upper())
#
#
# #--------------------------------------------------------------------------------------------
#
# f_validate_log=open( os.path.join(context['temp_directory'],'tmp_5275_validate.log'), "w")
# f_validate_err=open( os.path.join(context['temp_directory'],'tmp_5275_validate.err'), "w")
#
# subprocess.call([context['fbsvcmgr_path'],"localhost:service_mgr",
# "action_validate",
# "dbname", "$(DATABASE_LOCATION)bugs.core_5275.fdb"
# ],
# stdout=f_validate_log,
# stderr=f_validate_err)
# flush_and_close( f_validate_log )
# flush_and_close( f_validate_err )
#
# with open( f_validate_log.name,'r') as f:
# for line in f:
# if line.split():
# print( 'VALIDATION STDOUT: '+line.upper() )
#
# with open( f_validate_err.name,'r') as f:
# for line in f:
# if line.split():
# print( 'VALIDATION STDERR: '+line.upper() )
#
# # cleanup
# #########
# time.sleep(1)
# cleanup( (f_validate_log, f_validate_err, f_kill_att_sql, f_fblog_before, f_fblog_after, f_diff_txt) )
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
0: BULK INSERTS LOG: BULK_INSERT_START
0: BULK INSERTS LOG: STATEMENT FAILED, SQLSTATE = 08003
0: BULK INSERTS LOG: CONNECTION SHUTDOWN
0: BULK INSERTS LOG: AFTER LINE
0: CREATE INDEX LOG: INSERTS_STATE OK, IS RUNNING
0: CREATE INDEX LOG: CREATE_INDX_START
0: CREATE INDEX LOG: SET TRANSACTION WAIT;
0: CREATE INDEX LOG: CREATE INDEX TEST_WAIT ON TEST COMPUTED BY( 'WAIT'|| S );
0: CREATE INDEX LOG: SET ECHO OFF;
0: CREATE INDEX LOG: STATEMENT FAILED, SQLSTATE = 08003
0: CREATE INDEX LOG: CONNECTION SHUTDOWN
0: CREATE INDEX LOG: AFTER LINE
0: KILL ATTACH LOG: RECORDS AFFECTED:
1: BULK INSERTS LOG: BULK_INSERT_START
1: BULK INSERTS LOG: STATEMENT FAILED, SQLSTATE = 08003
1: BULK INSERTS LOG: CONNECTION SHUTDOWN
1: BULK INSERTS LOG: AFTER LINE
1: CREATE INDEX LOG: INSERTS_STATE OK, IS RUNNING
1: CREATE INDEX LOG: CREATE_INDX_START
1: CREATE INDEX LOG: SET TRANSACTION WAIT;
1: CREATE INDEX LOG: CREATE INDEX TEST_WAIT ON TEST COMPUTED BY( 'WAIT'|| S );
1: CREATE INDEX LOG: SET ECHO OFF;
1: CREATE INDEX LOG: STATEMENT FAILED, SQLSTATE = 08003
1: CREATE INDEX LOG: CONNECTION SHUTDOWN
1: CREATE INDEX LOG: AFTER LINE
1: KILL ATTACH LOG: RECORDS AFFECTED:
VALIDATION STDOUT: 20:05:26.86 VALIDATION STARTED
VALIDATION STDOUT: 20:05:26.86 RELATION 128 (TEST)
VALIDATION STDOUT: 20:05:26.86 PROCESS POINTER PAGE 0 OF 1
VALIDATION STDOUT: 20:05:26.86 INDEX 1 (TEST_X)
VALIDATION STDOUT: 20:05:26.86 RELATION 128 (TEST) IS OK
VALIDATION STDOUT: 20:05:26.86 VALIDATION FINISHED
"""
@pytest.mark.version('>=2.5.6')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
| true
| true
|
790d6c64277cf767cc545c71a99683f5f5160fa9
| 962
|
py
|
Python
|
saas/backend/debug/urls.py
|
nannan00/bk-iam-saas
|
217600fa6e5fd466fff9c33c20c4dbd7c69f77d9
|
[
"MIT"
] | 7
|
2021-08-13T03:48:16.000Z
|
2021-12-20T15:31:38.000Z
|
saas/backend/debug/urls.py
|
nannan00/bk-iam-saas
|
217600fa6e5fd466fff9c33c20c4dbd7c69f77d9
|
[
"MIT"
] | 456
|
2021-08-16T02:13:57.000Z
|
2022-03-30T10:02:49.000Z
|
saas/backend/debug/urls.py
|
nannan00/bk-iam-saas
|
217600fa6e5fd466fff9c33c20c4dbd7c69f77d9
|
[
"MIT"
] | 17
|
2021-08-10T04:08:46.000Z
|
2022-03-14T14:24:36.000Z
|
# -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-权限中心(BlueKing-IAM) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from django.urls import path
from . import views
urlpatterns = [
path("", views.DebugViewSet.as_view({"get": "list"}), name="debug.list_debug"),
path("<str:id>/", views.DebugViewSet.as_view({"get": "retrieve"}), name="debug.detail"),
]
| 50.631579
| 115
| 0.754678
|
from django.urls import path
from . import views
urlpatterns = [
path("", views.DebugViewSet.as_view({"get": "list"}), name="debug.list_debug"),
path("<str:id>/", views.DebugViewSet.as_view({"get": "retrieve"}), name="debug.detail"),
]
| true
| true
|
790d6cb3a155ec439cad0318ff3fa9dce5e228ef
| 267
|
py
|
Python
|
pylint_django_translations/plugin.py
|
troyjfarrell/pylint_django_translations
|
b6c5349379024cdc5445499229bc31330591049a
|
[
"BSD-3-Clause"
] | null | null | null |
pylint_django_translations/plugin.py
|
troyjfarrell/pylint_django_translations
|
b6c5349379024cdc5445499229bc31330591049a
|
[
"BSD-3-Clause"
] | null | null | null |
pylint_django_translations/plugin.py
|
troyjfarrell/pylint_django_translations
|
b6c5349379024cdc5445499229bc31330591049a
|
[
"BSD-3-Clause"
] | null | null | null |
"Plugin registration"
from pylint.lint import PyLinter
from .checkers import register_checkers
from .suppression import suppress_warnings
def register(linter: PyLinter) -> None:
"Register the plugin"
register_checkers(linter)
suppress_warnings(linter)
| 22.25
| 42
| 0.790262
|
from pylint.lint import PyLinter
from .checkers import register_checkers
from .suppression import suppress_warnings
def register(linter: PyLinter) -> None:
register_checkers(linter)
suppress_warnings(linter)
| true
| true
|
790d6e47e1a19163d57d299b3382141bc440e8c5
| 11,598
|
py
|
Python
|
Cinder/Ocata/extend/fc_zone_helper.py
|
doubletao318/New
|
1be04d22592af4150a58129e4169d2ea1df25379
|
[
"Apache-2.0"
] | 14
|
2019-05-25T01:55:50.000Z
|
2021-02-23T06:54:06.000Z
|
Cinder/Ocata/extend/fc_zone_helper.py
|
doubletao318/New
|
1be04d22592af4150a58129e4169d2ea1df25379
|
[
"Apache-2.0"
] | 4
|
2019-12-31T08:46:30.000Z
|
2021-10-30T09:27:58.000Z
|
Cinder/Ocata/extend/fc_zone_helper.py
|
doubletao318/New
|
1be04d22592af4150a58129e4169d2ea1df25379
|
[
"Apache-2.0"
] | 17
|
2019-07-31T03:13:07.000Z
|
2022-02-21T08:09:15.000Z
|
# Copyright (c) 2015 Huawei Technologies Co., Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import importutils
from cinder import exception
from cinder.i18n import _
from cinder.i18n import _LE
from cinder.i18n import _LI
from cinder.volume import configuration as config
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
controller_list = ['A', 'B', 'C', 'D']
zone_manager_opts = [
cfg.StrOpt('zone_driver',
default='cinder.zonemanager.drivers.brocade.brcd_fc_zone_driver'
'.BrcdFCZoneDriver',
help='FC Zone Driver responsible for zone management')
]
class FCZoneHelper(object):
"""FC zone helper for Huawei driver."""
def __init__(self, zm, client):
self.zm = zm
self.client = client
def _check_fc_port_and_init(self, wwns, hostid, fabric_map, nsinfos):
"""Check FC port on array and wwn on host is connected to switch.
If no FC port on array is connected to switch or no ini on host is
connected to switch, raise a error.
"""
if not fabric_map:
msg = _('No FC port on array is connected to switch.')
LOG.error(msg)
raise exception.CinderException(msg)
no_wwn_connected_to_switch = True
for wwn in wwns:
formatted_initiator = fczm_utils.get_formatted_wwn(wwn)
for fabric in fabric_map:
nsinfo = nsinfos[fabric]
if formatted_initiator in nsinfo:
no_wwn_connected_to_switch = False
self.client.ensure_fc_initiator_added(wwn, hostid)
break
if no_wwn_connected_to_switch:
msg = _('No wwn on host is connected to switch.')
LOG.error(msg)
raise exception.CinderException(msg)
def build_ini_tgt_map(self, wwns, host_id, port_list, is_add):
fabric_map = self.zm.get_san_context(port_list)
nsinfos = {}
cfgmap_from_fabrics = {}
for fabric in fabric_map:
nsinfos[fabric] = self._get_nameserver_info(fabric)
cfgmap_from_fabric = self._get_active_zone_set(fabric)
cfgmap_from_fabrics[fabric] = cfgmap_from_fabric
self._check_fc_port_and_init(wwns, host_id, fabric_map, nsinfos)
return self._build_ini_tgt_map(wwns, is_add, nsinfos,
cfgmap_from_fabrics)
def _build_ini_tgt_map(self, wwns, need_add_con, nsinfos,
cfgmap_from_fabrics):
tgt_port_wwns = []
init_targ_map_total = {}
fabric_maps = {}
for contr in controller_list:
port_list_from_contr = self.client.get_fc_ports_from_contr(contr)
if port_list_from_contr:
fabric_map = self.zm.get_san_context(port_list_from_contr)
fabric_maps[contr] = fabric_map
for wwn in wwns:
init_targ_map = {}
tmp_port_list = []
tgt_port_for_map = []
tmp_flag = False
need_new_zone = False
for contr in fabric_maps:
(fc_port_for_zone, tmp_flag) = \
self._get_one_fc_port_for_zone(wwn, contr, nsinfos,
cfgmap_from_fabrics,
fabric_maps)
if tmp_flag:
need_new_zone = True
if fc_port_for_zone:
tgt_port_wwns.append(fc_port_for_zone)
if not tmp_flag:
tgt_port_for_map.append(fc_port_for_zone)
if tmp_flag:
tmp_port_list.append(fc_port_for_zone)
init_targ_map[wwn] = tmp_port_list
LOG.debug("tmp_port_list: %s" % tmp_port_list)
init_targ_map_total[wwn] = tgt_port_for_map
if need_new_zone and need_add_con:
LOG.debug("Got init_targ_map to create zone: %s"
% init_targ_map)
self.zm.add_connection(init_targ_map)
tgt_port_wwns = list(set(tgt_port_wwns))
return (tgt_port_wwns, init_targ_map_total)
def _get_fabric_vendor(self):
zone_config = config.Configuration(zone_manager_opts,
'fc-zone-manager')
fabric_driver = zone_config.zone_driver
LOG.debug('Using fabric driver: %s' % fabric_driver)
driver_vendor = None
try:
driver_vendor = fabric_driver.split('.')[3]
except Exception:
msg = _('Get fabric driver vendor error.')
LOG.exception(msg)
raise exception.VolumeBackendAPIException(data=msg)
return driver_vendor
def _get_nameserver_info(self, fabric):
driver_vendor = self._get_fabric_vendor()
if driver_vendor == 'brocade':
nsinfo = self._get_brcd_nsinfo(fabric)
elif driver_vendor == 'cisco':
nsinfo = self._get_cisco_nsinfo(fabric)
else:
msg = ('Unsupported fabric, vendor name: %s.' % driver_vendor)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
return nsinfo
def _get_cisco_config(self, fabric):
fabric_ip = self.zm.driver.fabric_configs[fabric].safe_get(
'cisco_fc_fabric_address')
fabric_user = self.zm.driver.fabric_configs[fabric].safe_get(
'cisco_fc_fabric_user')
fabric_pwd = self.zm.driver.fabric_configs[fabric].safe_get(
'cisco_fc_fabric_password')
fabric_port = self.zm.driver.fabric_configs[fabric].safe_get(
'cisco_fc_fabric_port')
zoning_vsan = self.zm.driver.fabric_configs[fabric].safe_get(
'cisco_zoning_vsan')
return (fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan)
def _get_brcd_nsinfo(self, fabric):
conn = self.zm.driver._get_cli_client(fabric)
try:
nsinfo = conn.get_nameserver_info()
LOG.debug("name server info from fabric: %s", nsinfo)
conn.cleanup()
except exception.BrocadeZoningCliException:
if not conn.is_supported_firmware():
msg = _("Unsupported firmware on switch %s. Make sure "
"switch is running firmware v6.4 or higher."
) % conn.switch_ip
LOG.error(msg)
raise exception.FCZoneDriverException(msg)
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Error getting name server info."))
except Exception:
msg = _("Failed to get name server info.")
LOG.exception(msg)
raise exception.FCZoneDriverException(msg)
return nsinfo
def _get_cisco_nsinfo(self, fabric):
(fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan) = (
self._get_cisco_config(fabric))
try:
conn = importutils.import_object(
self.zm.driver.configuration.cisco_sb_connector,
ipaddress=fabric_ip,
username=fabric_user,
password=fabric_pwd, port=fabric_port,
vsan=zoning_vsan)
nsinfo = conn.get_nameserver_info()
LOG.debug("name server info from fabric: %s",
nsinfo)
conn.cleanup()
except exception.CiscoZoningCliException:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Error getting show fcns database "
"info."))
except Exception:
msg = ("Failed to get show fcns database info.")
LOG.exception(msg)
raise exception.FCZoneDriverException(msg)
return nsinfo
def _get_one_fc_port_for_zone(self, initiator, contr, nsinfos,
cfgmap_from_fabrics, fabric_maps):
"""Get on FC port per one controller.
task flow:
1. Get all the FC port from the array.
2. Filter out ports belonged to the specific controller
and the status is connected.
3. Filter out ports connected to the fabric configured in cinder.conf.
4. Get active zones set from switch.
5. Find a port according to three cases.
"""
LOG.info(_LI("Get in function _get_one_fc_port_for_zone. "
"Initiator: %s"), initiator)
formatted_initiator = fczm_utils.get_formatted_wwn(initiator)
fabric_map = fabric_maps[contr]
if not fabric_map:
return (None, False)
port_zone_number_map = {}
for fabric in fabric_map:
LOG.info(_LI("Dealing with fabric: %s"), fabric)
nsinfo = nsinfos[fabric]
if formatted_initiator not in nsinfo:
continue
final_port_list_per_fabric = fabric_map[fabric]
cfgmap_from_fabric = cfgmap_from_fabrics[fabric]
zones_members = cfgmap_from_fabric['zones'].values()
for port in final_port_list_per_fabric:
port_zone_number_map[port] = 0
formatted_port = fczm_utils.get_formatted_wwn(port)
for zones_member in zones_members:
if formatted_port in zones_member:
# For the second case use.
if formatted_initiator in zones_member:
# First case: found a port in the same
# zone with the given initiator.
return (port, False)
# For the third case use.
port_zone_number_map[port] += 1
if port_zone_number_map == {}:
return (None, False)
temp_list = []
temp_list = sorted(port_zone_number_map.items(), key=lambda d: d[1])
# Third case: find a port referenced in fewest zone.
return (temp_list[0][0], True)
def _get_active_zone_set(self, fabric):
driver_vendor = self._get_fabric_vendor()
if driver_vendor == 'brocade':
conn = self.zm.driver._get_cli_client(fabric)
cfgmap_from_fabric = self.zm.driver._get_active_zone_set(conn)
conn.cleanup()
elif driver_vendor == 'cisco':
(fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan) = (
self._get_cisco_config(fabric))
cfgmap_from_fabric = self.zm.driver.get_active_zone_set(
fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan)
else:
msg = ('Unsupported fabric, vendor name: %s.' % driver_vendor)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
return cfgmap_from_fabric
| 41.274021
| 79
| 0.604846
|
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import importutils
from cinder import exception
from cinder.i18n import _
from cinder.i18n import _LE
from cinder.i18n import _LI
from cinder.volume import configuration as config
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
controller_list = ['A', 'B', 'C', 'D']
zone_manager_opts = [
cfg.StrOpt('zone_driver',
default='cinder.zonemanager.drivers.brocade.brcd_fc_zone_driver'
'.BrcdFCZoneDriver',
help='FC Zone Driver responsible for zone management')
]
class FCZoneHelper(object):
def __init__(self, zm, client):
self.zm = zm
self.client = client
def _check_fc_port_and_init(self, wwns, hostid, fabric_map, nsinfos):
if not fabric_map:
msg = _('No FC port on array is connected to switch.')
LOG.error(msg)
raise exception.CinderException(msg)
no_wwn_connected_to_switch = True
for wwn in wwns:
formatted_initiator = fczm_utils.get_formatted_wwn(wwn)
for fabric in fabric_map:
nsinfo = nsinfos[fabric]
if formatted_initiator in nsinfo:
no_wwn_connected_to_switch = False
self.client.ensure_fc_initiator_added(wwn, hostid)
break
if no_wwn_connected_to_switch:
msg = _('No wwn on host is connected to switch.')
LOG.error(msg)
raise exception.CinderException(msg)
def build_ini_tgt_map(self, wwns, host_id, port_list, is_add):
fabric_map = self.zm.get_san_context(port_list)
nsinfos = {}
cfgmap_from_fabrics = {}
for fabric in fabric_map:
nsinfos[fabric] = self._get_nameserver_info(fabric)
cfgmap_from_fabric = self._get_active_zone_set(fabric)
cfgmap_from_fabrics[fabric] = cfgmap_from_fabric
self._check_fc_port_and_init(wwns, host_id, fabric_map, nsinfos)
return self._build_ini_tgt_map(wwns, is_add, nsinfos,
cfgmap_from_fabrics)
def _build_ini_tgt_map(self, wwns, need_add_con, nsinfos,
cfgmap_from_fabrics):
tgt_port_wwns = []
init_targ_map_total = {}
fabric_maps = {}
for contr in controller_list:
port_list_from_contr = self.client.get_fc_ports_from_contr(contr)
if port_list_from_contr:
fabric_map = self.zm.get_san_context(port_list_from_contr)
fabric_maps[contr] = fabric_map
for wwn in wwns:
init_targ_map = {}
tmp_port_list = []
tgt_port_for_map = []
tmp_flag = False
need_new_zone = False
for contr in fabric_maps:
(fc_port_for_zone, tmp_flag) = \
self._get_one_fc_port_for_zone(wwn, contr, nsinfos,
cfgmap_from_fabrics,
fabric_maps)
if tmp_flag:
need_new_zone = True
if fc_port_for_zone:
tgt_port_wwns.append(fc_port_for_zone)
if not tmp_flag:
tgt_port_for_map.append(fc_port_for_zone)
if tmp_flag:
tmp_port_list.append(fc_port_for_zone)
init_targ_map[wwn] = tmp_port_list
LOG.debug("tmp_port_list: %s" % tmp_port_list)
init_targ_map_total[wwn] = tgt_port_for_map
if need_new_zone and need_add_con:
LOG.debug("Got init_targ_map to create zone: %s"
% init_targ_map)
self.zm.add_connection(init_targ_map)
tgt_port_wwns = list(set(tgt_port_wwns))
return (tgt_port_wwns, init_targ_map_total)
def _get_fabric_vendor(self):
zone_config = config.Configuration(zone_manager_opts,
'fc-zone-manager')
fabric_driver = zone_config.zone_driver
LOG.debug('Using fabric driver: %s' % fabric_driver)
driver_vendor = None
try:
driver_vendor = fabric_driver.split('.')[3]
except Exception:
msg = _('Get fabric driver vendor error.')
LOG.exception(msg)
raise exception.VolumeBackendAPIException(data=msg)
return driver_vendor
def _get_nameserver_info(self, fabric):
driver_vendor = self._get_fabric_vendor()
if driver_vendor == 'brocade':
nsinfo = self._get_brcd_nsinfo(fabric)
elif driver_vendor == 'cisco':
nsinfo = self._get_cisco_nsinfo(fabric)
else:
msg = ('Unsupported fabric, vendor name: %s.' % driver_vendor)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
return nsinfo
def _get_cisco_config(self, fabric):
fabric_ip = self.zm.driver.fabric_configs[fabric].safe_get(
'cisco_fc_fabric_address')
fabric_user = self.zm.driver.fabric_configs[fabric].safe_get(
'cisco_fc_fabric_user')
fabric_pwd = self.zm.driver.fabric_configs[fabric].safe_get(
'cisco_fc_fabric_password')
fabric_port = self.zm.driver.fabric_configs[fabric].safe_get(
'cisco_fc_fabric_port')
zoning_vsan = self.zm.driver.fabric_configs[fabric].safe_get(
'cisco_zoning_vsan')
return (fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan)
def _get_brcd_nsinfo(self, fabric):
conn = self.zm.driver._get_cli_client(fabric)
try:
nsinfo = conn.get_nameserver_info()
LOG.debug("name server info from fabric: %s", nsinfo)
conn.cleanup()
except exception.BrocadeZoningCliException:
if not conn.is_supported_firmware():
msg = _("Unsupported firmware on switch %s. Make sure "
"switch is running firmware v6.4 or higher."
) % conn.switch_ip
LOG.error(msg)
raise exception.FCZoneDriverException(msg)
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Error getting name server info."))
except Exception:
msg = _("Failed to get name server info.")
LOG.exception(msg)
raise exception.FCZoneDriverException(msg)
return nsinfo
def _get_cisco_nsinfo(self, fabric):
(fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan) = (
self._get_cisco_config(fabric))
try:
conn = importutils.import_object(
self.zm.driver.configuration.cisco_sb_connector,
ipaddress=fabric_ip,
username=fabric_user,
password=fabric_pwd, port=fabric_port,
vsan=zoning_vsan)
nsinfo = conn.get_nameserver_info()
LOG.debug("name server info from fabric: %s",
nsinfo)
conn.cleanup()
except exception.CiscoZoningCliException:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Error getting show fcns database "
"info."))
except Exception:
msg = ("Failed to get show fcns database info.")
LOG.exception(msg)
raise exception.FCZoneDriverException(msg)
return nsinfo
def _get_one_fc_port_for_zone(self, initiator, contr, nsinfos,
cfgmap_from_fabrics, fabric_maps):
LOG.info(_LI("Get in function _get_one_fc_port_for_zone. "
"Initiator: %s"), initiator)
formatted_initiator = fczm_utils.get_formatted_wwn(initiator)
fabric_map = fabric_maps[contr]
if not fabric_map:
return (None, False)
port_zone_number_map = {}
for fabric in fabric_map:
LOG.info(_LI("Dealing with fabric: %s"), fabric)
nsinfo = nsinfos[fabric]
if formatted_initiator not in nsinfo:
continue
final_port_list_per_fabric = fabric_map[fabric]
cfgmap_from_fabric = cfgmap_from_fabrics[fabric]
zones_members = cfgmap_from_fabric['zones'].values()
for port in final_port_list_per_fabric:
port_zone_number_map[port] = 0
formatted_port = fczm_utils.get_formatted_wwn(port)
for zones_member in zones_members:
if formatted_port in zones_member:
if formatted_initiator in zones_member:
return (port, False)
port_zone_number_map[port] += 1
if port_zone_number_map == {}:
return (None, False)
temp_list = []
temp_list = sorted(port_zone_number_map.items(), key=lambda d: d[1])
return (temp_list[0][0], True)
def _get_active_zone_set(self, fabric):
driver_vendor = self._get_fabric_vendor()
if driver_vendor == 'brocade':
conn = self.zm.driver._get_cli_client(fabric)
cfgmap_from_fabric = self.zm.driver._get_active_zone_set(conn)
conn.cleanup()
elif driver_vendor == 'cisco':
(fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan) = (
self._get_cisco_config(fabric))
cfgmap_from_fabric = self.zm.driver.get_active_zone_set(
fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan)
else:
msg = ('Unsupported fabric, vendor name: %s.' % driver_vendor)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
return cfgmap_from_fabric
| true
| true
|
790d6e48c3ce711d691ffe339851840bd6867634
| 5,282
|
py
|
Python
|
modules/transformer.py
|
riokt/video-paragraph
|
2da3298819e73809af495457db2cf1dfffad712f
|
[
"MIT"
] | null | null | null |
modules/transformer.py
|
riokt/video-paragraph
|
2da3298819e73809af495457db2cf1dfffad712f
|
[
"MIT"
] | null | null | null |
modules/transformer.py
|
riokt/video-paragraph
|
2da3298819e73809af495457db2cf1dfffad712f
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import framework.configbase
import math
import time
import numpy as np
from modules.transformer_encoder import Encoder
from modules.transformer_decoder import Decoder
decay1 = [(i+1)*20**(-1) for i in range(20)]
decay2 = [1-(i+1)*50**(-1) for i in range(50)]
class TransformerConfig(framework.configbase.ModuleConfig):
def __init__(self):
super(TransformerConfig, self).__init__()
self.vocab = 0
self.max_words_in_sent = 150
self.ft_dim = 4096
self.d_model = 512
self.enc_n_layers = 3
self.dec_n_layers = 3
self.heads = 8
self.dropout = 0.1
self.keyframes = False
self.rl = False
self.document_freq = None
class Transformer(nn.Module):
def __init__(self, config):
super(Transformer, self).__init__()
self.config = config
self.encoder = Encoder(self.config.ft_dim, self.config.d_model, self.config.enc_n_layers, self.config.heads, self.config.dropout, self.config.keyframes, act=True)
self.decoder = Decoder(self.config.vocab, self.config.d_model, self.config.dec_n_layers, self.config.heads, self.config.dropout, act=True)
self.dropout = nn.Dropout(self.config.dropout)
self.logit = nn.Linear(self.config.d_model, self.config.vocab)
self.logit.weight = self.decoder.embed.embed.weight
self.remove_gate = nn.Linear(self.config.d_model, 1)
self.add_gate = nn.Linear(self.config.d_model, 1)
self.q_linear = nn.Linear(self.config.d_model, self.config.d_model, bias=False)
self.next_attn = nn.Linear(2*self.config.d_model, 1)
self.init_weights()
def init_weights(self,):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def forward(self, src, trg, src_mask, trg_mask):
e_outputs, org_key, select = self.encoder(src, src_mask)
add_state = torch.tensor(decay2[:e_outputs.size(1)]+[0]*max(0,e_outputs.size(1)-50)).cuda().unsqueeze(0).unsqueeze(-1)
memory_bank = e_outputs * add_state
d_output, attn_weights = [], []
for i in range(1, trg.size(1)+1):
word, attn, _ = self.decoder(trg[:,i-1].unsqueeze(1), memory_bank, src_mask, trg_mask[:,i-1,i-1].unsqueeze(1), step=i-1)
d_output.append(word[:,-1])
attn_weights.append(attn[:,:,-1].mean(dim=1))
memory_bank, add_state = self.update_memory(memory_bank, add_state, e_outputs, attn_weights[-20:], d_output[-20:])
output = self.logit(torch.cat([_.unsqueeze(1) for _ in d_output], 1))
return output, org_key, select
def update_memory(self, memory_bank, add_state, e_outputs, attn, query_s):
remove_prob = torch.sigmoid(self.remove_gate(query_s[-1])).unsqueeze(-1)
add_prob = torch.sigmoid(self.add_gate(query_s[-1])).unsqueeze(-1)
temp = torch.softmax(torch.tensor(decay1[20-len(attn):]).cuda(), dim=-1)
attn = sum([attn[i]*temp[i] for i in range(len(attn))]).unsqueeze(-1)
# remove for diversity
query_s = sum([query_s[i]*temp[i] for i in range(len(query_s))])
sim = torch.sigmoid(torch.matmul(memory_bank, self.q_linear(query_s).unsqueeze(-1)))
memory_bank = memory_bank * (1 - remove_prob * attn * sim)
# add for coherence
last_ctx = (e_outputs * attn).sum(dim=1, keepdim=True)
next_attn = torch.sigmoid(self.next_attn(torch.cat([e_outputs,last_ctx.expand_as(e_outputs)], dim=-1)))
memory_bank = memory_bank + e_outputs * (1-add_state) * (add_prob*next_attn)
add_state = add_state + (1-add_state) * (add_prob*next_attn)
return memory_bank, add_state
def sample(self, src, src_mask, decoding='greedy'):
init_tok = 2
eos_tok = 3
if self.config.keyframes:
e_outputs, src_mask = self.encoder.get_keyframes(src, src_mask)
else:
e_outputs, _, _ = self.encoder(src, src_mask)
add_state = torch.tensor(decay2[:e_outputs.size(1)]+[0]*max(0,e_outputs.size(1)-50)).cuda().unsqueeze(0).unsqueeze(-1)
memory_bank = e_outputs * add_state
outputs = torch.ones(src.size(0), 1).fill_(init_tok).long().cuda()
seqLogprobs = torch.zeros(src.size(0), 60).cuda()
attn_weights, d_output = [], []
for i in range(1, 60):
trg_mask = self.nopeak_mask(i)
word, attn, _ = self.decoder(outputs[:,-1].unsqueeze(1), memory_bank, src_mask, trg_mask[:,-1,-1].unsqueeze(1), step=i-1)
attn_weights.append(attn[:,:,-1].mean(dim=1))
d_output.append(word[:,-1])
out = self.logit(word)
logprobs = F.log_softmax(out[:,-1], dim=-1)
if decoding == 'greedy':
_, next_word = torch.max(logprobs, dim=1)
next_word = next_word.unsqueeze(-1)
else:
probs = torch.exp(logprobs.data).cpu()
next_word = torch.multinomial(probs, 1).cuda()
seqLogprobs[:,i] = logprobs.gather(1, next_word).view(-1)
outputs = torch.cat([outputs, next_word], dim=1)
memory_bank, add_state = self.update_memory(memory_bank, add_state, e_outputs, attn_weights[-20:], d_output[-20:])
attn_weights = torch.cat([_.unsqueeze(1) for _ in attn_weights], dim=1)
return outputs, seqLogprobs, attn_weights
def nopeak_mask(self, size):
np_mask = np.triu(np.ones((1, size, size)), k=1).astype('uint8')
np_mask = Variable(torch.from_numpy(np_mask) == 0).cuda()
return np_mask
| 44.762712
| 166
| 0.683453
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import framework.configbase
import math
import time
import numpy as np
from modules.transformer_encoder import Encoder
from modules.transformer_decoder import Decoder
decay1 = [(i+1)*20**(-1) for i in range(20)]
decay2 = [1-(i+1)*50**(-1) for i in range(50)]
class TransformerConfig(framework.configbase.ModuleConfig):
def __init__(self):
super(TransformerConfig, self).__init__()
self.vocab = 0
self.max_words_in_sent = 150
self.ft_dim = 4096
self.d_model = 512
self.enc_n_layers = 3
self.dec_n_layers = 3
self.heads = 8
self.dropout = 0.1
self.keyframes = False
self.rl = False
self.document_freq = None
class Transformer(nn.Module):
def __init__(self, config):
super(Transformer, self).__init__()
self.config = config
self.encoder = Encoder(self.config.ft_dim, self.config.d_model, self.config.enc_n_layers, self.config.heads, self.config.dropout, self.config.keyframes, act=True)
self.decoder = Decoder(self.config.vocab, self.config.d_model, self.config.dec_n_layers, self.config.heads, self.config.dropout, act=True)
self.dropout = nn.Dropout(self.config.dropout)
self.logit = nn.Linear(self.config.d_model, self.config.vocab)
self.logit.weight = self.decoder.embed.embed.weight
self.remove_gate = nn.Linear(self.config.d_model, 1)
self.add_gate = nn.Linear(self.config.d_model, 1)
self.q_linear = nn.Linear(self.config.d_model, self.config.d_model, bias=False)
self.next_attn = nn.Linear(2*self.config.d_model, 1)
self.init_weights()
def init_weights(self,):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def forward(self, src, trg, src_mask, trg_mask):
e_outputs, org_key, select = self.encoder(src, src_mask)
add_state = torch.tensor(decay2[:e_outputs.size(1)]+[0]*max(0,e_outputs.size(1)-50)).cuda().unsqueeze(0).unsqueeze(-1)
memory_bank = e_outputs * add_state
d_output, attn_weights = [], []
for i in range(1, trg.size(1)+1):
word, attn, _ = self.decoder(trg[:,i-1].unsqueeze(1), memory_bank, src_mask, trg_mask[:,i-1,i-1].unsqueeze(1), step=i-1)
d_output.append(word[:,-1])
attn_weights.append(attn[:,:,-1].mean(dim=1))
memory_bank, add_state = self.update_memory(memory_bank, add_state, e_outputs, attn_weights[-20:], d_output[-20:])
output = self.logit(torch.cat([_.unsqueeze(1) for _ in d_output], 1))
return output, org_key, select
def update_memory(self, memory_bank, add_state, e_outputs, attn, query_s):
remove_prob = torch.sigmoid(self.remove_gate(query_s[-1])).unsqueeze(-1)
add_prob = torch.sigmoid(self.add_gate(query_s[-1])).unsqueeze(-1)
temp = torch.softmax(torch.tensor(decay1[20-len(attn):]).cuda(), dim=-1)
attn = sum([attn[i]*temp[i] for i in range(len(attn))]).unsqueeze(-1)
query_s = sum([query_s[i]*temp[i] for i in range(len(query_s))])
sim = torch.sigmoid(torch.matmul(memory_bank, self.q_linear(query_s).unsqueeze(-1)))
memory_bank = memory_bank * (1 - remove_prob * attn * sim)
last_ctx = (e_outputs * attn).sum(dim=1, keepdim=True)
next_attn = torch.sigmoid(self.next_attn(torch.cat([e_outputs,last_ctx.expand_as(e_outputs)], dim=-1)))
memory_bank = memory_bank + e_outputs * (1-add_state) * (add_prob*next_attn)
add_state = add_state + (1-add_state) * (add_prob*next_attn)
return memory_bank, add_state
def sample(self, src, src_mask, decoding='greedy'):
init_tok = 2
eos_tok = 3
if self.config.keyframes:
e_outputs, src_mask = self.encoder.get_keyframes(src, src_mask)
else:
e_outputs, _, _ = self.encoder(src, src_mask)
add_state = torch.tensor(decay2[:e_outputs.size(1)]+[0]*max(0,e_outputs.size(1)-50)).cuda().unsqueeze(0).unsqueeze(-1)
memory_bank = e_outputs * add_state
outputs = torch.ones(src.size(0), 1).fill_(init_tok).long().cuda()
seqLogprobs = torch.zeros(src.size(0), 60).cuda()
attn_weights, d_output = [], []
for i in range(1, 60):
trg_mask = self.nopeak_mask(i)
word, attn, _ = self.decoder(outputs[:,-1].unsqueeze(1), memory_bank, src_mask, trg_mask[:,-1,-1].unsqueeze(1), step=i-1)
attn_weights.append(attn[:,:,-1].mean(dim=1))
d_output.append(word[:,-1])
out = self.logit(word)
logprobs = F.log_softmax(out[:,-1], dim=-1)
if decoding == 'greedy':
_, next_word = torch.max(logprobs, dim=1)
next_word = next_word.unsqueeze(-1)
else:
probs = torch.exp(logprobs.data).cpu()
next_word = torch.multinomial(probs, 1).cuda()
seqLogprobs[:,i] = logprobs.gather(1, next_word).view(-1)
outputs = torch.cat([outputs, next_word], dim=1)
memory_bank, add_state = self.update_memory(memory_bank, add_state, e_outputs, attn_weights[-20:], d_output[-20:])
attn_weights = torch.cat([_.unsqueeze(1) for _ in attn_weights], dim=1)
return outputs, seqLogprobs, attn_weights
def nopeak_mask(self, size):
np_mask = np.triu(np.ones((1, size, size)), k=1).astype('uint8')
np_mask = Variable(torch.from_numpy(np_mask) == 0).cuda()
return np_mask
| true
| true
|
790d6ea3e263f841adc2a3adb570394e159cd2d3
| 157
|
py
|
Python
|
alarmexception.py
|
Megha-Bose/Brick-Breaker-Game
|
b543ec8277193dcca0ec15afab4a4775744b9587
|
[
"MIT"
] | 1
|
2021-04-08T04:15:36.000Z
|
2021-04-08T04:15:36.000Z
|
alarmexception.py
|
Megha-Bose/Brick-Breaker-Game
|
b543ec8277193dcca0ec15afab4a4775744b9587
|
[
"MIT"
] | null | null | null |
alarmexception.py
|
Megha-Bose/Brick-Breaker-Game
|
b543ec8277193dcca0ec15afab4a4775744b9587
|
[
"MIT"
] | null | null | null |
''' Taking characters from terminal without pressing enter for movements '''
from __future__ import print_function
class AlarmException(Exception):
pass
| 31.4
| 76
| 0.802548
|
from __future__ import print_function
class AlarmException(Exception):
pass
| true
| true
|
790d6edd3e6ec87c0fb40dea98f50155369e3bae
| 56,304
|
py
|
Python
|
datadog_checks_base/datadog_checks/base/checks/openmetrics/mixins.py
|
vbarbaresi/integrations-core
|
ab26ab1cd6c28a97c1ad1177093a93659658c7aa
|
[
"BSD-3-Clause"
] | null | null | null |
datadog_checks_base/datadog_checks/base/checks/openmetrics/mixins.py
|
vbarbaresi/integrations-core
|
ab26ab1cd6c28a97c1ad1177093a93659658c7aa
|
[
"BSD-3-Clause"
] | 2
|
2021-04-26T13:37:48.000Z
|
2021-04-26T13:37:49.000Z
|
datadog_checks_base/datadog_checks/base/checks/openmetrics/mixins.py
|
vbarbaresi/integrations-core
|
ab26ab1cd6c28a97c1ad1177093a93659658c7aa
|
[
"BSD-3-Clause"
] | null | null | null |
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from __future__ import division
import copy
from fnmatch import translate
from math import isinf, isnan
from os.path import isfile
from re import compile
import requests
from prometheus_client.samples import Sample
from six import PY3, iteritems, string_types
from ...config import is_affirmative
from ...errors import CheckException
from ...utils.common import to_native_string
from ...utils.http import RequestsWrapper
from .. import AgentCheck
from ..libs.prometheus import text_fd_to_metric_families
if PY3:
long = int
class OpenMetricsScraperMixin(object):
# pylint: disable=E1101
# This class is not supposed to be used by itself, it provides scraping behavior but
# need to be within a check in the end
REQUESTS_CHUNK_SIZE = 1024 * 10 # use 10kb as chunk size when using the Stream feature in requests.get
# indexes in the sample tuple of core.Metric
SAMPLE_NAME = 0
SAMPLE_LABELS = 1
SAMPLE_VALUE = 2
MICROS_IN_S = 1000000
MINUS_INF = float("-inf")
TELEMETRY_GAUGE_MESSAGE_SIZE = "payload.size"
TELEMETRY_COUNTER_METRICS_BLACKLIST_COUNT = "metrics.blacklist.count"
TELEMETRY_COUNTER_METRICS_INPUT_COUNT = "metrics.input.count"
TELEMETRY_COUNTER_METRICS_IGNORE_COUNT = "metrics.ignored.count"
TELEMETRY_COUNTER_METRICS_PROCESS_COUNT = "metrics.processed.count"
METRIC_TYPES = ['counter', 'gauge', 'summary', 'histogram']
KUBERNETES_TOKEN_PATH = '/var/run/secrets/kubernetes.io/serviceaccount/token'
def __init__(self, *args, **kwargs):
# Initialize AgentCheck's base class
super(OpenMetricsScraperMixin, self).__init__(*args, **kwargs)
def create_scraper_configuration(self, instance=None):
"""
Creates a scraper configuration.
If instance does not specify a value for a configuration option, the value will default to the `init_config`.
Otherwise, the `default_instance` value will be used.
A default mixin configuration will be returned if there is no instance.
"""
if 'openmetrics_endpoint' in instance:
raise CheckException('The setting `openmetrics_endpoint` is only available for Agent version 7 or later')
# We can choose to create a default mixin configuration for an empty instance
if instance is None:
instance = {}
# Supports new configuration options
config = copy.deepcopy(instance)
# Set the endpoint
endpoint = instance.get('prometheus_url')
if instance and endpoint is None:
raise CheckException("You have to define a prometheus_url for each prometheus instance")
config['prometheus_url'] = endpoint
# `NAMESPACE` is the prefix metrics will have. Need to be hardcoded in the
# child check class.
namespace = instance.get('namespace')
# Check if we have a namespace
if instance and namespace is None:
if self.default_namespace is None:
raise CheckException("You have to define a namespace for each prometheus check")
namespace = self.default_namespace
config['namespace'] = namespace
# Retrieve potential default instance settings for the namespace
default_instance = self.default_instances.get(namespace, {})
# `metrics_mapper` is a dictionary where the keys are the metrics to capture
# and the values are the corresponding metrics names to have in datadog.
# Note: it is empty in the parent class but will need to be
# overloaded/hardcoded in the final check not to be counted as custom metric.
# Metrics are preprocessed if no mapping
metrics_mapper = {}
# We merge list and dictionaries from optional defaults & instance settings
metrics = default_instance.get('metrics', []) + instance.get('metrics', [])
for metric in metrics:
if isinstance(metric, string_types):
metrics_mapper[metric] = metric
else:
metrics_mapper.update(metric)
config['metrics_mapper'] = metrics_mapper
# `_wildcards_re` is a Pattern object used to match metric wildcards
config['_wildcards_re'] = None
wildcards = set()
for metric in config['metrics_mapper']:
if "*" in metric:
wildcards.add(translate(metric))
if wildcards:
config['_wildcards_re'] = compile('|'.join(wildcards))
# `prometheus_metrics_prefix` allows to specify a prefix that all
# prometheus metrics should have. This can be used when the prometheus
# endpoint we are scrapping allows to add a custom prefix to it's
# metrics.
config['prometheus_metrics_prefix'] = instance.get(
'prometheus_metrics_prefix', default_instance.get('prometheus_metrics_prefix', '')
)
# `label_joins` holds the configuration for extracting 1:1 labels from
# a target metric to all metric matching the label, example:
# self.label_joins = {
# 'kube_pod_info': {
# 'labels_to_match': ['pod'],
# 'labels_to_get': ['node', 'host_ip']
# }
# }
config['label_joins'] = default_instance.get('label_joins', {})
config['label_joins'].update(instance.get('label_joins', {}))
# `_label_mapping` holds the additionals label info to add for a specific
# label value, example:
# self._label_mapping = {
# 'pod': {
# 'dd-agent-9s1l1': {
# "node": "yolo",
# "host_ip": "yey"
# }
# }
# }
config['_label_mapping'] = {}
# `_active_label_mapping` holds a dictionary of label values found during the run
# to cleanup the label_mapping of unused values, example:
# self._active_label_mapping = {
# 'pod': {
# 'dd-agent-9s1l1': True
# }
# }
config['_active_label_mapping'] = {}
# `_watched_labels` holds the sets of labels to watch for enrichment
config['_watched_labels'] = {}
config['_dry_run'] = True
# Some metrics are ignored because they are duplicates or introduce a
# very high cardinality. Metrics included in this list will be silently
# skipped without a 'Unable to handle metric' debug line in the logs
config['ignore_metrics'] = instance.get('ignore_metrics', default_instance.get('ignore_metrics', []))
config['_ignored_metrics'] = set()
# `_ignored_re` is a Pattern object used to match ignored metric patterns
config['_ignored_re'] = None
ignored_patterns = set()
# Separate ignored metric names and ignored patterns in different sets for faster lookup later
for metric in config['ignore_metrics']:
if '*' in metric:
ignored_patterns.add(translate(metric))
else:
config['_ignored_metrics'].add(metric)
if ignored_patterns:
config['_ignored_re'] = compile('|'.join(ignored_patterns))
# Ignore metrics based on label keys or specific label values
config['ignore_metrics_by_labels'] = instance.get(
'ignore_metrics_by_labels', default_instance.get('ignore_metrics_by_labels', {})
)
# If you want to send the buckets as tagged values when dealing with histograms,
# set send_histograms_buckets to True, set to False otherwise.
config['send_histograms_buckets'] = is_affirmative(
instance.get('send_histograms_buckets', default_instance.get('send_histograms_buckets', True))
)
# If you want the bucket to be non cumulative and to come with upper/lower bound tags
# set non_cumulative_buckets to True, enabled when distribution metrics are enabled.
config['non_cumulative_buckets'] = is_affirmative(
instance.get('non_cumulative_buckets', default_instance.get('non_cumulative_buckets', False))
)
# Send histograms as datadog distribution metrics
config['send_distribution_buckets'] = is_affirmative(
instance.get('send_distribution_buckets', default_instance.get('send_distribution_buckets', False))
)
# Non cumulative buckets are mandatory for distribution metrics
if config['send_distribution_buckets'] is True:
config['non_cumulative_buckets'] = True
# If you want to send `counter` metrics as monotonic counts, set this value to True.
# Set to False if you want to instead send those metrics as `gauge`.
config['send_monotonic_counter'] = is_affirmative(
instance.get('send_monotonic_counter', default_instance.get('send_monotonic_counter', True))
)
# If you want `counter` metrics to be submitted as both gauges and monotonic counts. Set this value to True.
config['send_monotonic_with_gauge'] = is_affirmative(
instance.get('send_monotonic_with_gauge', default_instance.get('send_monotonic_with_gauge', False))
)
config['send_distribution_counts_as_monotonic'] = is_affirmative(
instance.get(
'send_distribution_counts_as_monotonic',
default_instance.get('send_distribution_counts_as_monotonic', False),
)
)
config['send_distribution_sums_as_monotonic'] = is_affirmative(
instance.get(
'send_distribution_sums_as_monotonic',
default_instance.get('send_distribution_sums_as_monotonic', False),
)
)
# If the `labels_mapper` dictionary is provided, the metrics labels names
# in the `labels_mapper` will use the corresponding value as tag name
# when sending the gauges.
config['labels_mapper'] = default_instance.get('labels_mapper', {})
config['labels_mapper'].update(instance.get('labels_mapper', {}))
# Rename bucket "le" label to "upper_bound"
config['labels_mapper']['le'] = 'upper_bound'
# `exclude_labels` is an array of labels names to exclude. Those labels
# will just not be added as tags when submitting the metric.
config['exclude_labels'] = default_instance.get('exclude_labels', []) + instance.get('exclude_labels', [])
# `type_overrides` is a dictionary where the keys are prometheus metric names
# and the values are a metric type (name as string) to use instead of the one
# listed in the payload. It can be used to force a type on untyped metrics.
# Note: it is empty in the parent class but will need to be
# overloaded/hardcoded in the final check not to be counted as custom metric.
config['type_overrides'] = default_instance.get('type_overrides', {})
config['type_overrides'].update(instance.get('type_overrides', {}))
# `_type_override_patterns` is a dictionary where we store Pattern objects
# that match metric names as keys, and their corresponding metric type overrrides as values.
config['_type_override_patterns'] = {}
with_wildcards = set()
for metric, type in iteritems(config['type_overrides']):
if '*' in metric:
config['_type_override_patterns'][compile(translate(metric))] = type
with_wildcards.add(metric)
# cleanup metric names with wildcards from the 'type_overrides' dict
for metric in with_wildcards:
del config['type_overrides'][metric]
# Some metrics are retrieved from differents hosts and often
# a label can hold this information, this transfers it to the hostname
config['label_to_hostname'] = instance.get('label_to_hostname', default_instance.get('label_to_hostname', None))
# In combination to label_as_hostname, allows to add a common suffix to the hostnames
# submitted. This can be used for instance to discriminate hosts between clusters.
config['label_to_hostname_suffix'] = instance.get(
'label_to_hostname_suffix', default_instance.get('label_to_hostname_suffix', None)
)
# Add a 'health' service check for the prometheus endpoint
config['health_service_check'] = is_affirmative(
instance.get('health_service_check', default_instance.get('health_service_check', True))
)
# Can either be only the path to the certificate and thus you should specify the private key
# or it can be the path to a file containing both the certificate & the private key
config['ssl_cert'] = instance.get('ssl_cert', default_instance.get('ssl_cert', None))
# Needed if the certificate does not include the private key
#
# /!\ The private key to your local certificate must be unencrypted.
# Currently, Requests does not support using encrypted keys.
config['ssl_private_key'] = instance.get('ssl_private_key', default_instance.get('ssl_private_key', None))
# The path to the trusted CA used for generating custom certificates
config['ssl_ca_cert'] = instance.get('ssl_ca_cert', default_instance.get('ssl_ca_cert', None))
# Whether or not to validate SSL certificates
config['ssl_verify'] = is_affirmative(instance.get('ssl_verify', default_instance.get('ssl_verify', True)))
# Extra http headers to be sent when polling endpoint
config['extra_headers'] = default_instance.get('extra_headers', {})
config['extra_headers'].update(instance.get('extra_headers', {}))
# Timeout used during the network request
config['prometheus_timeout'] = instance.get(
'prometheus_timeout', default_instance.get('prometheus_timeout', 10)
)
# Authentication used when polling endpoint
config['username'] = instance.get('username', default_instance.get('username', None))
config['password'] = instance.get('password', default_instance.get('password', None))
# Custom tags that will be sent with each metric
config['custom_tags'] = instance.get('tags', [])
# Additional tags to be sent with each metric
config['_metric_tags'] = []
# List of strings to filter the input text payload on. If any line contains
# one of these strings, it will be filtered out before being parsed.
# INTERNAL FEATURE, might be removed in future versions
config['_text_filter_blacklist'] = []
# Whether or not to use the service account bearer token for authentication
# if 'bearer_token_path' is not set, we use /var/run/secrets/kubernetes.io/serviceaccount/token
# as a default path to get the token.
config['bearer_token_auth'] = is_affirmative(
instance.get('bearer_token_auth', default_instance.get('bearer_token_auth', False))
)
# Can be used to get a service account bearer token from files
# other than /var/run/secrets/kubernetes.io/serviceaccount/token
# 'bearer_token_auth' should be enabled.
config['bearer_token_path'] = instance.get('bearer_token_path', default_instance.get('bearer_token_path', None))
# The service account bearer token to be used for authentication
config['_bearer_token'] = self._get_bearer_token(config['bearer_token_auth'], config['bearer_token_path'])
config['telemetry'] = is_affirmative(instance.get('telemetry', default_instance.get('telemetry', False)))
# The metric name services use to indicate build information
config['metadata_metric_name'] = instance.get(
'metadata_metric_name', default_instance.get('metadata_metric_name')
)
# Map of metadata key names to label names
config['metadata_label_map'] = instance.get(
'metadata_label_map', default_instance.get('metadata_label_map', {})
)
config['_default_metric_transformers'] = {}
if config['metadata_metric_name'] and config['metadata_label_map']:
config['_default_metric_transformers'][config['metadata_metric_name']] = self.transform_metadata
# Whether or not to enable flushing of the first value of monotonic counts
config['_successfully_executed'] = False
return config
def get_http_handler(self, scraper_config):
"""
Get http handler for a specific scraper config.
The http handler is cached using `prometheus_url` as key.
"""
prometheus_url = scraper_config['prometheus_url']
if prometheus_url in self._http_handlers:
return self._http_handlers[prometheus_url]
# TODO: Deprecate this behavior in Agent 8
if scraper_config['ssl_ca_cert'] is False:
scraper_config['ssl_verify'] = False
# TODO: Deprecate this behavior in Agent 8
if scraper_config['ssl_verify'] is False:
scraper_config.setdefault('tls_ignore_warning', True)
http_handler = self._http_handlers[prometheus_url] = RequestsWrapper(
scraper_config, self.init_config, self.HTTP_CONFIG_REMAPPER, self.log
)
headers = http_handler.options['headers']
bearer_token = scraper_config['_bearer_token']
if bearer_token is not None:
headers['Authorization'] = 'Bearer {}'.format(bearer_token)
# TODO: Determine if we really need this
headers.setdefault('accept-encoding', 'gzip')
# Explicitly set the content type we accept
headers.setdefault('accept', 'text/plain')
return http_handler
def reset_http_config(self):
"""
You may need to use this when configuration is determined dynamically during every
check run, such as when polling an external resource like the Kubelet.
"""
self._http_handlers.clear()
def parse_metric_family(self, response, scraper_config):
"""
Parse the MetricFamily from a valid `requests.Response` object to provide a MetricFamily object.
The text format uses iter_lines() generator.
"""
if response.encoding is None:
response.encoding = 'utf-8'
input_gen = response.iter_lines(chunk_size=self.REQUESTS_CHUNK_SIZE, decode_unicode=True)
if scraper_config['_text_filter_blacklist']:
input_gen = self._text_filter_input(input_gen, scraper_config)
for metric in text_fd_to_metric_families(input_gen):
self._send_telemetry_counter(
self.TELEMETRY_COUNTER_METRICS_INPUT_COUNT, len(metric.samples), scraper_config
)
type_override = scraper_config['type_overrides'].get(metric.name)
if type_override:
metric.type = type_override
elif scraper_config['_type_override_patterns']:
for pattern, new_type in iteritems(scraper_config['_type_override_patterns']):
if pattern.search(metric.name):
metric.type = new_type
break
if metric.type not in self.METRIC_TYPES:
continue
metric.name = self._remove_metric_prefix(metric.name, scraper_config)
yield metric
def _text_filter_input(self, input_gen, scraper_config):
"""
Filters out the text input line by line to avoid parsing and processing
metrics we know we don't want to process. This only works on `text/plain`
payloads, and is an INTERNAL FEATURE implemented for the kubelet check
:param input_get: line generator
:output: generator of filtered lines
"""
for line in input_gen:
for item in scraper_config['_text_filter_blacklist']:
if item in line:
self._send_telemetry_counter(self.TELEMETRY_COUNTER_METRICS_BLACKLIST_COUNT, 1, scraper_config)
break
else:
# No blacklist matches, passing the line through
yield line
def _remove_metric_prefix(self, metric, scraper_config):
prometheus_metrics_prefix = scraper_config['prometheus_metrics_prefix']
return metric[len(prometheus_metrics_prefix) :] if metric.startswith(prometheus_metrics_prefix) else metric
def scrape_metrics(self, scraper_config):
"""
Poll the data from Prometheus and return the metrics as a generator.
"""
response = self.poll(scraper_config)
if scraper_config['telemetry']:
if 'content-length' in response.headers:
content_len = int(response.headers['content-length'])
else:
content_len = len(response.content)
self._send_telemetry_gauge(self.TELEMETRY_GAUGE_MESSAGE_SIZE, content_len, scraper_config)
try:
# no dry run if no label joins
if not scraper_config['label_joins']:
scraper_config['_dry_run'] = False
elif not scraper_config['_watched_labels']:
watched = scraper_config['_watched_labels']
watched['sets'] = {}
watched['keys'] = {}
watched['singles'] = set()
for key, val in iteritems(scraper_config['label_joins']):
labels = []
if 'labels_to_match' in val:
labels = val['labels_to_match']
elif 'label_to_match' in val:
self.log.warning("`label_to_match` is being deprecated, please use `labels_to_match`")
if isinstance(val['label_to_match'], list):
labels = val['label_to_match']
else:
labels = [val['label_to_match']]
if labels:
s = frozenset(labels)
watched['sets'][key] = s
watched['keys'][key] = ','.join(s)
if len(labels) == 1:
watched['singles'].add(labels[0])
for metric in self.parse_metric_family(response, scraper_config):
yield metric
# Set dry run off
scraper_config['_dry_run'] = False
# Garbage collect unused mapping and reset active labels
for metric, mapping in list(iteritems(scraper_config['_label_mapping'])):
for key in list(mapping):
if (
metric in scraper_config['_active_label_mapping']
and key not in scraper_config['_active_label_mapping'][metric]
):
del scraper_config['_label_mapping'][metric][key]
scraper_config['_active_label_mapping'] = {}
finally:
response.close()
def process(self, scraper_config, metric_transformers=None):
"""
Polls the data from Prometheus and submits them as Datadog metrics.
`endpoint` is the metrics endpoint to use to poll metrics from Prometheus
Note that if the instance has a `tags` attribute, it will be pushed
automatically as additional custom tags and added to the metrics
"""
transformers = scraper_config['_default_metric_transformers'].copy()
if metric_transformers:
transformers.update(metric_transformers)
for metric in self.scrape_metrics(scraper_config):
self.process_metric(metric, scraper_config, metric_transformers=transformers)
scraper_config['_successfully_executed'] = True
def transform_metadata(self, metric, scraper_config):
labels = metric.samples[0][self.SAMPLE_LABELS]
for metadata_name, label_name in iteritems(scraper_config['metadata_label_map']):
if label_name in labels:
self.set_metadata(metadata_name, labels[label_name])
def _metric_name_with_namespace(self, metric_name, scraper_config):
namespace = scraper_config['namespace']
if not namespace:
return metric_name
return '{}.{}'.format(namespace, metric_name)
def _telemetry_metric_name_with_namespace(self, metric_name, scraper_config):
namespace = scraper_config['namespace']
if not namespace:
return '{}.{}'.format('telemetry', metric_name)
return '{}.{}.{}'.format(namespace, 'telemetry', metric_name)
def _send_telemetry_gauge(self, metric_name, val, scraper_config):
if scraper_config['telemetry']:
metric_name_with_namespace = self._telemetry_metric_name_with_namespace(metric_name, scraper_config)
# Determine the tags to send
custom_tags = scraper_config['custom_tags']
tags = list(custom_tags)
tags.extend(scraper_config['_metric_tags'])
self.gauge(metric_name_with_namespace, val, tags=tags)
def _send_telemetry_counter(self, metric_name, val, scraper_config, extra_tags=None):
if scraper_config['telemetry']:
metric_name_with_namespace = self._telemetry_metric_name_with_namespace(metric_name, scraper_config)
# Determine the tags to send
custom_tags = scraper_config['custom_tags']
tags = list(custom_tags)
tags.extend(scraper_config['_metric_tags'])
if extra_tags:
tags.extend(extra_tags)
self.count(metric_name_with_namespace, val, tags=tags)
def _store_labels(self, metric, scraper_config):
# If targeted metric, store labels
if metric.name not in scraper_config['label_joins']:
return
watched = scraper_config['_watched_labels']
matching_labels = watched['sets'][metric.name]
mapping_key = watched['keys'][metric.name]
labels_to_get = scraper_config['label_joins'][metric.name]['labels_to_get']
get_all = '*' in labels_to_get
match_all = mapping_key == '*'
for sample in metric.samples:
# metadata-only metrics that are used for label joins are always equal to 1
# this is required for metrics where all combinations of a state are sent
# but only the active one is set to 1 (others are set to 0)
# example: kube_pod_status_phase in kube-state-metrics
if sample[self.SAMPLE_VALUE] != 1:
continue
sample_labels = sample[self.SAMPLE_LABELS]
sample_labels_keys = sample_labels.keys()
if match_all or matching_labels.issubset(sample_labels_keys):
label_dict = dict()
if get_all:
for label_name, label_value in iteritems(sample_labels):
if label_name in matching_labels:
continue
label_dict[label_name] = label_value
else:
for label_name in labels_to_get:
if label_name in sample_labels:
label_dict[label_name] = sample_labels[label_name]
if match_all:
mapping_value = '*'
else:
mapping_value = ','.join([sample_labels[l] for l in matching_labels])
scraper_config['_label_mapping'].setdefault(mapping_key, {}).setdefault(mapping_value, {}).update(
label_dict
)
def _join_labels(self, metric, scraper_config):
# Filter metric to see if we can enrich with joined labels
if not scraper_config['label_joins']:
return
label_mapping = scraper_config['_label_mapping']
active_label_mapping = scraper_config['_active_label_mapping']
watched = scraper_config['_watched_labels']
sets = watched['sets']
keys = watched['keys']
singles = watched['singles']
for sample in metric.samples:
sample_labels = sample[self.SAMPLE_LABELS]
sample_labels_keys = sample_labels.keys()
# Match with wildcard label
# Label names are [a-zA-Z0-9_]*, so no risk of collision
if '*' in singles:
active_label_mapping.setdefault('*', {})['*'] = True
if '*' in label_mapping and '*' in label_mapping['*']:
sample_labels.update(label_mapping['*']['*'])
# Match with single labels
matching_single_labels = singles.intersection(sample_labels_keys)
for label in matching_single_labels:
mapping_key = label
mapping_value = sample_labels[label]
active_label_mapping.setdefault(mapping_key, {})[mapping_value] = True
if mapping_key in label_mapping and mapping_value in label_mapping[mapping_key]:
sample_labels.update(label_mapping[mapping_key][mapping_value])
# Match with tuples of labels
for key, mapping_key in iteritems(keys):
if mapping_key in matching_single_labels:
continue
matching_labels = sets[key]
if matching_labels.issubset(sample_labels_keys):
matching_values = [sample_labels[l] for l in matching_labels]
mapping_value = ','.join(matching_values)
active_label_mapping.setdefault(mapping_key, {})[mapping_value] = True
if mapping_key in label_mapping and mapping_value in label_mapping[mapping_key]:
sample_labels.update(label_mapping[mapping_key][mapping_value])
def _ignore_metrics_by_label(self, scraper_config, metric_name, sample):
ignore_metrics_by_label = scraper_config['ignore_metrics_by_labels']
sample_labels = sample[self.SAMPLE_LABELS]
for label_key, label_values in ignore_metrics_by_label.items():
if not label_values:
self.log.debug(
"Skipping filter label `%s` with an empty values list, did you mean to use '*' wildcard?", label_key
)
elif '*' in label_values:
# Wildcard '*' means all metrics with label_key will be ignored
self.log.debug("Detected wildcard for label `%s`", label_key)
if label_key in sample_labels.keys():
self.log.debug("Skipping metric `%s` due to label key matching: %s", metric_name, label_key)
return True
else:
for val in label_values:
if label_key in sample_labels and sample_labels[label_key] == val:
self.log.debug(
"Skipping metric `%s` due to label `%s` value matching: %s", metric_name, label_key, val
)
return True
return False
def process_metric(self, metric, scraper_config, metric_transformers=None):
"""
Handle a Prometheus metric according to the following flow:
- search `scraper_config['metrics_mapper']` for a prometheus.metric to datadog.metric mapping
- call check method with the same name as the metric
- log info if none of the above worked
`metric_transformers` is a dict of `<metric name>:<function to run when the metric name is encountered>`
"""
# If targeted metric, store labels
self._store_labels(metric, scraper_config)
if scraper_config['ignore_metrics']:
if metric.name in scraper_config['_ignored_metrics']:
self._send_telemetry_counter(
self.TELEMETRY_COUNTER_METRICS_IGNORE_COUNT, len(metric.samples), scraper_config
)
return # Ignore the metric
if scraper_config['_ignored_re'] and scraper_config['_ignored_re'].search(metric.name):
# Metric must be ignored
scraper_config['_ignored_metrics'].add(metric.name)
self._send_telemetry_counter(
self.TELEMETRY_COUNTER_METRICS_IGNORE_COUNT, len(metric.samples), scraper_config
)
return # Ignore the metric
self._send_telemetry_counter(self.TELEMETRY_COUNTER_METRICS_PROCESS_COUNT, len(metric.samples), scraper_config)
if self._filter_metric(metric, scraper_config):
return # Ignore the metric
# Filter metric to see if we can enrich with joined labels
self._join_labels(metric, scraper_config)
if scraper_config['_dry_run']:
return
try:
self.submit_openmetric(scraper_config['metrics_mapper'][metric.name], metric, scraper_config)
except KeyError:
if metric_transformers is not None and metric.name in metric_transformers:
try:
# Get the transformer function for this specific metric
transformer = metric_transformers[metric.name]
transformer(metric, scraper_config)
except Exception as err:
self.log.warning('Error handling metric: %s - error: %s', metric.name, err)
return
# check for wilcards in transformers
for transformer_name, transformer in iteritems(metric_transformers):
if transformer_name.endswith('*') and metric.name.startswith(transformer_name[:-1]):
transformer(metric, scraper_config, transformer_name)
# try matching wildcards
if scraper_config['_wildcards_re'] and scraper_config['_wildcards_re'].search(metric.name):
self.submit_openmetric(metric.name, metric, scraper_config)
return
self.log.debug(
'Skipping metric `%s` as it is not defined in the metrics mapper, '
'has no transformer function, nor does it match any wildcards.',
metric.name,
)
def poll(self, scraper_config, headers=None):
"""
Returns a valid `requests.Response`, otherwise raise requests.HTTPError if the status code of the
response isn't valid - see `response.raise_for_status()`
The caller needs to close the requests.Response.
Custom headers can be added to the default headers.
"""
endpoint = scraper_config.get('prometheus_url')
# Should we send a service check for when we make a request
health_service_check = scraper_config['health_service_check']
service_check_name = self._metric_name_with_namespace('prometheus.health', scraper_config)
service_check_tags = ['endpoint:{}'.format(endpoint)]
service_check_tags.extend(scraper_config['custom_tags'])
try:
response = self.send_request(endpoint, scraper_config, headers)
except requests.exceptions.SSLError:
self.log.error("Invalid SSL settings for requesting %s endpoint", endpoint)
raise
except IOError:
if health_service_check:
self.service_check(service_check_name, AgentCheck.CRITICAL, tags=service_check_tags)
raise
try:
response.raise_for_status()
if health_service_check:
self.service_check(service_check_name, AgentCheck.OK, tags=service_check_tags)
return response
except requests.HTTPError:
response.close()
if health_service_check:
self.service_check(service_check_name, AgentCheck.CRITICAL, tags=service_check_tags)
raise
def send_request(self, endpoint, scraper_config, headers=None):
kwargs = {}
if headers:
kwargs['headers'] = headers
http_handler = self.get_http_handler(scraper_config)
return http_handler.get(endpoint, stream=True, **kwargs)
def get_hostname_for_sample(self, sample, scraper_config):
"""
Expose the label_to_hostname mapping logic to custom handler methods
"""
return self._get_hostname(None, sample, scraper_config)
def submit_openmetric(self, metric_name, metric, scraper_config, hostname=None):
"""
For each sample in the metric, report it as a gauge with all labels as tags
except if a labels `dict` is passed, in which case keys are label names we'll extract
and corresponding values are tag names we'll use (eg: {'node': 'node'}).
Histograms generate a set of values instead of a unique metric.
`send_histograms_buckets` is used to specify if you want to
send the buckets as tagged values when dealing with histograms.
`custom_tags` is an array of `tag:value` that will be added to the
metric when sending the gauge to Datadog.
"""
if metric.type in ["gauge", "counter", "rate"]:
metric_name_with_namespace = self._metric_name_with_namespace(metric_name, scraper_config)
for sample in metric.samples:
if self._ignore_metrics_by_label(scraper_config, metric_name, sample):
continue
val = sample[self.SAMPLE_VALUE]
if not self._is_value_valid(val):
self.log.debug("Metric value is not supported for metric %s", sample[self.SAMPLE_NAME])
continue
custom_hostname = self._get_hostname(hostname, sample, scraper_config)
# Determine the tags to send
tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname=custom_hostname)
if metric.type == "counter" and scraper_config['send_monotonic_counter']:
self.monotonic_count(
metric_name_with_namespace,
val,
tags=tags,
hostname=custom_hostname,
flush_first_value=scraper_config['_successfully_executed'],
)
elif metric.type == "rate":
self.rate(metric_name_with_namespace, val, tags=tags, hostname=custom_hostname)
else:
self.gauge(metric_name_with_namespace, val, tags=tags, hostname=custom_hostname)
# Metric is a "counter" but legacy behavior has "send_as_monotonic" defaulted to False
# Submit metric as monotonic_count with appended name
if metric.type == "counter" and scraper_config['send_monotonic_with_gauge']:
self.monotonic_count(
metric_name_with_namespace + '.total',
val,
tags=tags,
hostname=custom_hostname,
flush_first_value=scraper_config['_successfully_executed'],
)
elif metric.type == "histogram":
self._submit_gauges_from_histogram(metric_name, metric, scraper_config)
elif metric.type == "summary":
self._submit_gauges_from_summary(metric_name, metric, scraper_config)
else:
self.log.error("Metric type %s unsupported for metric %s.", metric.type, metric_name)
def _get_hostname(self, hostname, sample, scraper_config):
"""
If hostname is None, look at label_to_hostname setting
"""
if (
hostname is None
and scraper_config['label_to_hostname'] is not None
and sample[self.SAMPLE_LABELS].get(scraper_config['label_to_hostname'])
):
hostname = sample[self.SAMPLE_LABELS][scraper_config['label_to_hostname']]
suffix = scraper_config['label_to_hostname_suffix']
if suffix is not None:
hostname += suffix
return hostname
def _submit_gauges_from_summary(self, metric_name, metric, scraper_config, hostname=None):
"""
Extracts metrics from a prometheus summary metric and sends them as gauges
"""
for sample in metric.samples:
val = sample[self.SAMPLE_VALUE]
if not self._is_value_valid(val):
self.log.debug("Metric value is not supported for metric %s", sample[self.SAMPLE_NAME])
continue
if self._ignore_metrics_by_label(scraper_config, metric_name, sample):
continue
custom_hostname = self._get_hostname(hostname, sample, scraper_config)
if sample[self.SAMPLE_NAME].endswith("_sum"):
tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname=custom_hostname)
self._submit_distribution_count(
scraper_config['send_distribution_sums_as_monotonic'],
scraper_config['send_monotonic_with_gauge'],
"{}.sum".format(self._metric_name_with_namespace(metric_name, scraper_config)),
val,
tags=tags,
hostname=custom_hostname,
flush_first_value=scraper_config['_successfully_executed'],
)
elif sample[self.SAMPLE_NAME].endswith("_count"):
tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname=custom_hostname)
self._submit_distribution_count(
scraper_config['send_distribution_counts_as_monotonic'],
scraper_config['send_monotonic_with_gauge'],
"{}.count".format(self._metric_name_with_namespace(metric_name, scraper_config)),
val,
tags=tags,
hostname=custom_hostname,
flush_first_value=scraper_config['_successfully_executed'],
)
else:
try:
quantile = sample[self.SAMPLE_LABELS]["quantile"]
except KeyError:
# TODO: In the Prometheus spec the 'quantile' label is optional, but it's not clear yet
# what we should do in this case. Let's skip for now and submit the rest of metrics.
message = (
'"quantile" label not present in metric %r. '
'Quantile-less summary metrics are not currently supported. Skipping...'
)
self.log.debug(message, metric_name)
continue
sample[self.SAMPLE_LABELS]["quantile"] = str(float(quantile))
tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname=custom_hostname)
self.gauge(
"{}.quantile".format(self._metric_name_with_namespace(metric_name, scraper_config)),
val,
tags=tags,
hostname=custom_hostname,
)
def _submit_gauges_from_histogram(self, metric_name, metric, scraper_config, hostname=None):
"""
Extracts metrics from a prometheus histogram and sends them as gauges
"""
if scraper_config['non_cumulative_buckets']:
self._decumulate_histogram_buckets(metric)
for sample in metric.samples:
val = sample[self.SAMPLE_VALUE]
if not self._is_value_valid(val):
self.log.debug("Metric value is not supported for metric %s", sample[self.SAMPLE_NAME])
continue
if self._ignore_metrics_by_label(scraper_config, metric_name, sample):
continue
custom_hostname = self._get_hostname(hostname, sample, scraper_config)
if sample[self.SAMPLE_NAME].endswith("_sum") and not scraper_config['send_distribution_buckets']:
tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname)
self._submit_distribution_count(
scraper_config['send_distribution_sums_as_monotonic'],
scraper_config['send_monotonic_with_gauge'],
"{}.sum".format(self._metric_name_with_namespace(metric_name, scraper_config)),
val,
tags=tags,
hostname=custom_hostname,
flush_first_value=scraper_config['_successfully_executed'],
)
elif sample[self.SAMPLE_NAME].endswith("_count") and not scraper_config['send_distribution_buckets']:
tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname)
if scraper_config['send_histograms_buckets']:
tags.append("upper_bound:none")
self._submit_distribution_count(
scraper_config['send_distribution_counts_as_monotonic'],
scraper_config['send_monotonic_with_gauge'],
"{}.count".format(self._metric_name_with_namespace(metric_name, scraper_config)),
val,
tags=tags,
hostname=custom_hostname,
flush_first_value=scraper_config['_successfully_executed'],
)
elif scraper_config['send_histograms_buckets'] and sample[self.SAMPLE_NAME].endswith("_bucket"):
if scraper_config['send_distribution_buckets']:
self._submit_sample_histogram_buckets(metric_name, sample, scraper_config, hostname)
elif "Inf" not in sample[self.SAMPLE_LABELS]["le"] or scraper_config['non_cumulative_buckets']:
sample[self.SAMPLE_LABELS]["le"] = str(float(sample[self.SAMPLE_LABELS]["le"]))
tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname)
self._submit_distribution_count(
scraper_config['send_distribution_counts_as_monotonic'],
scraper_config['send_monotonic_with_gauge'],
"{}.count".format(self._metric_name_with_namespace(metric_name, scraper_config)),
val,
tags=tags,
hostname=custom_hostname,
flush_first_value=scraper_config['_successfully_executed'],
)
def _compute_bucket_hash(self, tags):
# we need the unique context for all the buckets
# hence we remove the "le" tag
return hash(frozenset(sorted((k, v) for k, v in iteritems(tags) if k != 'le')))
def _decumulate_histogram_buckets(self, metric):
"""
Decumulate buckets in a given histogram metric and adds the lower_bound label (le being upper_bound)
"""
bucket_values_by_context_upper_bound = {}
for sample in metric.samples:
if sample[self.SAMPLE_NAME].endswith("_bucket"):
context_key = self._compute_bucket_hash(sample[self.SAMPLE_LABELS])
if context_key not in bucket_values_by_context_upper_bound:
bucket_values_by_context_upper_bound[context_key] = {}
bucket_values_by_context_upper_bound[context_key][float(sample[self.SAMPLE_LABELS]["le"])] = sample[
self.SAMPLE_VALUE
]
sorted_buckets_by_context = {}
for context in bucket_values_by_context_upper_bound:
sorted_buckets_by_context[context] = sorted(bucket_values_by_context_upper_bound[context])
# Tuples (lower_bound, upper_bound, value)
bucket_tuples_by_context_upper_bound = {}
for context in sorted_buckets_by_context:
for i, upper_b in enumerate(sorted_buckets_by_context[context]):
if i == 0:
if context not in bucket_tuples_by_context_upper_bound:
bucket_tuples_by_context_upper_bound[context] = {}
if upper_b > 0:
# positive buckets start at zero
bucket_tuples_by_context_upper_bound[context][upper_b] = (
0,
upper_b,
bucket_values_by_context_upper_bound[context][upper_b],
)
else:
# negative buckets start at -inf
bucket_tuples_by_context_upper_bound[context][upper_b] = (
self.MINUS_INF,
upper_b,
bucket_values_by_context_upper_bound[context][upper_b],
)
continue
tmp = (
bucket_values_by_context_upper_bound[context][upper_b]
- bucket_values_by_context_upper_bound[context][sorted_buckets_by_context[context][i - 1]]
)
bucket_tuples_by_context_upper_bound[context][upper_b] = (
sorted_buckets_by_context[context][i - 1],
upper_b,
tmp,
)
# modify original metric to inject lower_bound & modified value
for i, sample in enumerate(metric.samples):
if not sample[self.SAMPLE_NAME].endswith("_bucket"):
continue
context_key = self._compute_bucket_hash(sample[self.SAMPLE_LABELS])
matching_bucket_tuple = bucket_tuples_by_context_upper_bound[context_key][
float(sample[self.SAMPLE_LABELS]["le"])
]
# Replacing the sample tuple
sample[self.SAMPLE_LABELS]["lower_bound"] = str(matching_bucket_tuple[0])
metric.samples[i] = Sample(sample[self.SAMPLE_NAME], sample[self.SAMPLE_LABELS], matching_bucket_tuple[2])
def _submit_sample_histogram_buckets(self, metric_name, sample, scraper_config, hostname=None):
if "lower_bound" not in sample[self.SAMPLE_LABELS] or "le" not in sample[self.SAMPLE_LABELS]:
self.log.warning(
"Metric: %s was not containing required bucket boundaries labels: %s",
metric_name,
sample[self.SAMPLE_LABELS],
)
return
sample[self.SAMPLE_LABELS]["le"] = str(float(sample[self.SAMPLE_LABELS]["le"]))
sample[self.SAMPLE_LABELS]["lower_bound"] = str(float(sample[self.SAMPLE_LABELS]["lower_bound"]))
if sample[self.SAMPLE_LABELS]["le"] == sample[self.SAMPLE_LABELS]["lower_bound"]:
# this can happen for -inf/-inf bucket that we don't want to send (always 0)
self.log.warning(
"Metric: %s has bucket boundaries equal, skipping: %s", metric_name, sample[self.SAMPLE_LABELS]
)
return
tags = self._metric_tags(metric_name, sample[self.SAMPLE_VALUE], sample, scraper_config, hostname)
self.submit_histogram_bucket(
self._metric_name_with_namespace(metric_name, scraper_config),
sample[self.SAMPLE_VALUE],
float(sample[self.SAMPLE_LABELS]["lower_bound"]),
float(sample[self.SAMPLE_LABELS]["le"]),
True,
hostname,
tags,
)
def _submit_distribution_count(
self,
monotonic,
send_monotonic_with_gauge,
metric_name,
value,
tags=None,
hostname=None,
flush_first_value=False,
):
if monotonic:
self.monotonic_count(metric_name, value, tags=tags, hostname=hostname, flush_first_value=flush_first_value)
else:
self.gauge(metric_name, value, tags=tags, hostname=hostname)
if send_monotonic_with_gauge:
self.monotonic_count(
metric_name + ".total", value, tags=tags, hostname=hostname, flush_first_value=flush_first_value
)
def _metric_tags(self, metric_name, val, sample, scraper_config, hostname=None):
custom_tags = scraper_config['custom_tags']
_tags = list(custom_tags)
_tags.extend(scraper_config['_metric_tags'])
for label_name, label_value in iteritems(sample[self.SAMPLE_LABELS]):
if label_name not in scraper_config['exclude_labels']:
tag_name = scraper_config['labels_mapper'].get(label_name, label_name)
_tags.append('{}:{}'.format(to_native_string(tag_name), to_native_string(label_value)))
return self._finalize_tags_to_submit(
_tags, metric_name, val, sample, custom_tags=custom_tags, hostname=hostname
)
def _is_value_valid(self, val):
return not (isnan(val) or isinf(val))
def _get_bearer_token(self, bearer_token_auth, bearer_token_path):
if bearer_token_auth is False:
return None
path = None
if bearer_token_path is not None:
if isfile(bearer_token_path):
path = bearer_token_path
else:
self.log.error("File not found: %s", bearer_token_path)
elif isfile(self.KUBERNETES_TOKEN_PATH):
path = self.KUBERNETES_TOKEN_PATH
if path is None:
self.log.error("Cannot get bearer token from bearer_token_path or auto discovery")
raise IOError("Cannot get bearer token from bearer_token_path or auto discovery")
try:
with open(path, 'r') as f:
return f.read().rstrip()
except Exception as err:
self.log.error("Cannot get bearer token from path: %s - error: %s", path, err)
raise
def _histogram_convert_values(self, metric_name, converter):
def _convert(metric, scraper_config=None):
for index, sample in enumerate(metric.samples):
val = sample[self.SAMPLE_VALUE]
if not self._is_value_valid(val):
self.log.debug("Metric value is not supported for metric %s", sample[self.SAMPLE_NAME])
continue
if sample[self.SAMPLE_NAME].endswith("_sum"):
lst = list(sample)
lst[self.SAMPLE_VALUE] = converter(val)
metric.samples[index] = tuple(lst)
elif sample[self.SAMPLE_NAME].endswith("_bucket") and "Inf" not in sample[self.SAMPLE_LABELS]["le"]:
sample[self.SAMPLE_LABELS]["le"] = str(converter(float(sample[self.SAMPLE_LABELS]["le"])))
self.submit_openmetric(metric_name, metric, scraper_config)
return _convert
def _histogram_from_microseconds_to_seconds(self, metric_name):
return self._histogram_convert_values(metric_name, lambda v: v / self.MICROS_IN_S)
def _histogram_from_seconds_to_microseconds(self, metric_name):
return self._histogram_convert_values(metric_name, lambda v: v * self.MICROS_IN_S)
def _summary_convert_values(self, metric_name, converter):
def _convert(metric, scraper_config=None):
for index, sample in enumerate(metric.samples):
val = sample[self.SAMPLE_VALUE]
if not self._is_value_valid(val):
self.log.debug("Metric value is not supported for metric %s", sample[self.SAMPLE_NAME])
continue
if sample[self.SAMPLE_NAME].endswith("_count"):
continue
else:
lst = list(sample)
lst[self.SAMPLE_VALUE] = converter(val)
metric.samples[index] = tuple(lst)
self.submit_openmetric(metric_name, metric, scraper_config)
return _convert
def _summary_from_microseconds_to_seconds(self, metric_name):
return self._summary_convert_values(metric_name, lambda v: v / self.MICROS_IN_S)
def _summary_from_seconds_to_microseconds(self, metric_name):
return self._summary_convert_values(metric_name, lambda v: v * self.MICROS_IN_S)
| 47.554054
| 120
| 0.627327
|
from __future__ import division
import copy
from fnmatch import translate
from math import isinf, isnan
from os.path import isfile
from re import compile
import requests
from prometheus_client.samples import Sample
from six import PY3, iteritems, string_types
from ...config import is_affirmative
from ...errors import CheckException
from ...utils.common import to_native_string
from ...utils.http import RequestsWrapper
from .. import AgentCheck
from ..libs.prometheus import text_fd_to_metric_families
if PY3:
long = int
class OpenMetricsScraperMixin(object):
REQUESTS_CHUNK_SIZE = 1024 * 10
SAMPLE_NAME = 0
SAMPLE_LABELS = 1
SAMPLE_VALUE = 2
MICROS_IN_S = 1000000
MINUS_INF = float("-inf")
TELEMETRY_GAUGE_MESSAGE_SIZE = "payload.size"
TELEMETRY_COUNTER_METRICS_BLACKLIST_COUNT = "metrics.blacklist.count"
TELEMETRY_COUNTER_METRICS_INPUT_COUNT = "metrics.input.count"
TELEMETRY_COUNTER_METRICS_IGNORE_COUNT = "metrics.ignored.count"
TELEMETRY_COUNTER_METRICS_PROCESS_COUNT = "metrics.processed.count"
METRIC_TYPES = ['counter', 'gauge', 'summary', 'histogram']
KUBERNETES_TOKEN_PATH = '/var/run/secrets/kubernetes.io/serviceaccount/token'
def __init__(self, *args, **kwargs):
super(OpenMetricsScraperMixin, self).__init__(*args, **kwargs)
def create_scraper_configuration(self, instance=None):
if 'openmetrics_endpoint' in instance:
raise CheckException('The setting `openmetrics_endpoint` is only available for Agent version 7 or later')
# We can choose to create a default mixin configuration for an empty instance
if instance is None:
instance = {}
# Supports new configuration options
config = copy.deepcopy(instance)
# Set the endpoint
endpoint = instance.get('prometheus_url')
if instance and endpoint is None:
raise CheckException("You have to define a prometheus_url for each prometheus instance")
config['prometheus_url'] = endpoint
# `NAMESPACE` is the prefix metrics will have. Need to be hardcoded in the
# child check class.
namespace = instance.get('namespace')
# Check if we have a namespace
if instance and namespace is None:
if self.default_namespace is None:
raise CheckException("You have to define a namespace for each prometheus check")
namespace = self.default_namespace
config['namespace'] = namespace
# Retrieve potential default instance settings for the namespace
default_instance = self.default_instances.get(namespace, {})
# `metrics_mapper` is a dictionary where the keys are the metrics to capture
# and the values are the corresponding metrics names to have in datadog.
# Note: it is empty in the parent class but will need to be
# overloaded/hardcoded in the final check not to be counted as custom metric.
# Metrics are preprocessed if no mapping
metrics_mapper = {}
# We merge list and dictionaries from optional defaults & instance settings
metrics = default_instance.get('metrics', []) + instance.get('metrics', [])
for metric in metrics:
if isinstance(metric, string_types):
metrics_mapper[metric] = metric
else:
metrics_mapper.update(metric)
config['metrics_mapper'] = metrics_mapper
# `_wildcards_re` is a Pattern object used to match metric wildcards
config['_wildcards_re'] = None
wildcards = set()
for metric in config['metrics_mapper']:
if "*" in metric:
wildcards.add(translate(metric))
if wildcards:
config['_wildcards_re'] = compile('|'.join(wildcards))
# `prometheus_metrics_prefix` allows to specify a prefix that all
# prometheus metrics should have. This can be used when the prometheus
# endpoint we are scrapping allows to add a custom prefix to it's
config['prometheus_metrics_prefix'] = instance.get(
'prometheus_metrics_prefix', default_instance.get('prometheus_metrics_prefix', '')
)
config['label_joins'] = default_instance.get('label_joins', {})
config['label_joins'].update(instance.get('label_joins', {}))
config['_label_mapping'] = {}
config['_active_label_mapping'] = {}
config['_watched_labels'] = {}
config['_dry_run'] = True
config['ignore_metrics'] = instance.get('ignore_metrics', default_instance.get('ignore_metrics', []))
config['_ignored_metrics'] = set()
config['_ignored_re'] = None
ignored_patterns = set()
for metric in config['ignore_metrics']:
if '*' in metric:
ignored_patterns.add(translate(metric))
else:
config['_ignored_metrics'].add(metric)
if ignored_patterns:
config['_ignored_re'] = compile('|'.join(ignored_patterns))
config['ignore_metrics_by_labels'] = instance.get(
'ignore_metrics_by_labels', default_instance.get('ignore_metrics_by_labels', {})
)
config['send_histograms_buckets'] = is_affirmative(
instance.get('send_histograms_buckets', default_instance.get('send_histograms_buckets', True))
)
config['non_cumulative_buckets'] = is_affirmative(
instance.get('non_cumulative_buckets', default_instance.get('non_cumulative_buckets', False))
)
config['send_distribution_buckets'] = is_affirmative(
instance.get('send_distribution_buckets', default_instance.get('send_distribution_buckets', False))
)
if config['send_distribution_buckets'] is True:
config['non_cumulative_buckets'] = True
config['send_monotonic_counter'] = is_affirmative(
instance.get('send_monotonic_counter', default_instance.get('send_monotonic_counter', True))
)
config['send_monotonic_with_gauge'] = is_affirmative(
instance.get('send_monotonic_with_gauge', default_instance.get('send_monotonic_with_gauge', False))
)
config['send_distribution_counts_as_monotonic'] = is_affirmative(
instance.get(
'send_distribution_counts_as_monotonic',
default_instance.get('send_distribution_counts_as_monotonic', False),
)
)
config['send_distribution_sums_as_monotonic'] = is_affirmative(
instance.get(
'send_distribution_sums_as_monotonic',
default_instance.get('send_distribution_sums_as_monotonic', False),
)
)
config['labels_mapper'] = default_instance.get('labels_mapper', {})
config['labels_mapper'].update(instance.get('labels_mapper', {}))
config['labels_mapper']['le'] = 'upper_bound'
config['exclude_labels'] = default_instance.get('exclude_labels', []) + instance.get('exclude_labels', [])
config['type_overrides'] = default_instance.get('type_overrides', {})
config['type_overrides'].update(instance.get('type_overrides', {}))
config['_type_override_patterns'] = {}
with_wildcards = set()
for metric, type in iteritems(config['type_overrides']):
if '*' in metric:
config['_type_override_patterns'][compile(translate(metric))] = type
with_wildcards.add(metric)
for metric in with_wildcards:
del config['type_overrides'][metric]
config['label_to_hostname'] = instance.get('label_to_hostname', default_instance.get('label_to_hostname', None))
config['label_to_hostname_suffix'] = instance.get(
'label_to_hostname_suffix', default_instance.get('label_to_hostname_suffix', None)
)
config['health_service_check'] = is_affirmative(
instance.get('health_service_check', default_instance.get('health_service_check', True))
)
config['ssl_cert'] = instance.get('ssl_cert', default_instance.get('ssl_cert', None))
config['ssl_private_key'] = instance.get('ssl_private_key', default_instance.get('ssl_private_key', None))
config['ssl_ca_cert'] = instance.get('ssl_ca_cert', default_instance.get('ssl_ca_cert', None))
config['ssl_verify'] = is_affirmative(instance.get('ssl_verify', default_instance.get('ssl_verify', True)))
config['extra_headers'] = default_instance.get('extra_headers', {})
config['extra_headers'].update(instance.get('extra_headers', {}))
config['prometheus_timeout'] = instance.get(
'prometheus_timeout', default_instance.get('prometheus_timeout', 10)
)
config['username'] = instance.get('username', default_instance.get('username', None))
config['password'] = instance.get('password', default_instance.get('password', None))
config['custom_tags'] = instance.get('tags', [])
config['_metric_tags'] = []
config['_text_filter_blacklist'] = []
config['bearer_token_auth'] = is_affirmative(
instance.get('bearer_token_auth', default_instance.get('bearer_token_auth', False))
)
config['bearer_token_path'] = instance.get('bearer_token_path', default_instance.get('bearer_token_path', None))
config['_bearer_token'] = self._get_bearer_token(config['bearer_token_auth'], config['bearer_token_path'])
config['telemetry'] = is_affirmative(instance.get('telemetry', default_instance.get('telemetry', False)))
config['metadata_metric_name'] = instance.get(
'metadata_metric_name', default_instance.get('metadata_metric_name')
)
config['metadata_label_map'] = instance.get(
'metadata_label_map', default_instance.get('metadata_label_map', {})
)
config['_default_metric_transformers'] = {}
if config['metadata_metric_name'] and config['metadata_label_map']:
config['_default_metric_transformers'][config['metadata_metric_name']] = self.transform_metadata
config['_successfully_executed'] = False
return config
def get_http_handler(self, scraper_config):
prometheus_url = scraper_config['prometheus_url']
if prometheus_url in self._http_handlers:
return self._http_handlers[prometheus_url]
if scraper_config['ssl_ca_cert'] is False:
scraper_config['ssl_verify'] = False
if scraper_config['ssl_verify'] is False:
scraper_config.setdefault('tls_ignore_warning', True)
http_handler = self._http_handlers[prometheus_url] = RequestsWrapper(
scraper_config, self.init_config, self.HTTP_CONFIG_REMAPPER, self.log
)
headers = http_handler.options['headers']
bearer_token = scraper_config['_bearer_token']
if bearer_token is not None:
headers['Authorization'] = 'Bearer {}'.format(bearer_token)
headers.setdefault('accept-encoding', 'gzip')
headers.setdefault('accept', 'text/plain')
return http_handler
def reset_http_config(self):
self._http_handlers.clear()
def parse_metric_family(self, response, scraper_config):
if response.encoding is None:
response.encoding = 'utf-8'
input_gen = response.iter_lines(chunk_size=self.REQUESTS_CHUNK_SIZE, decode_unicode=True)
if scraper_config['_text_filter_blacklist']:
input_gen = self._text_filter_input(input_gen, scraper_config)
for metric in text_fd_to_metric_families(input_gen):
self._send_telemetry_counter(
self.TELEMETRY_COUNTER_METRICS_INPUT_COUNT, len(metric.samples), scraper_config
)
type_override = scraper_config['type_overrides'].get(metric.name)
if type_override:
metric.type = type_override
elif scraper_config['_type_override_patterns']:
for pattern, new_type in iteritems(scraper_config['_type_override_patterns']):
if pattern.search(metric.name):
metric.type = new_type
break
if metric.type not in self.METRIC_TYPES:
continue
metric.name = self._remove_metric_prefix(metric.name, scraper_config)
yield metric
def _text_filter_input(self, input_gen, scraper_config):
for line in input_gen:
for item in scraper_config['_text_filter_blacklist']:
if item in line:
self._send_telemetry_counter(self.TELEMETRY_COUNTER_METRICS_BLACKLIST_COUNT, 1, scraper_config)
break
else:
yield line
def _remove_metric_prefix(self, metric, scraper_config):
prometheus_metrics_prefix = scraper_config['prometheus_metrics_prefix']
return metric[len(prometheus_metrics_prefix) :] if metric.startswith(prometheus_metrics_prefix) else metric
def scrape_metrics(self, scraper_config):
response = self.poll(scraper_config)
if scraper_config['telemetry']:
if 'content-length' in response.headers:
content_len = int(response.headers['content-length'])
else:
content_len = len(response.content)
self._send_telemetry_gauge(self.TELEMETRY_GAUGE_MESSAGE_SIZE, content_len, scraper_config)
try:
if not scraper_config['label_joins']:
scraper_config['_dry_run'] = False
elif not scraper_config['_watched_labels']:
watched = scraper_config['_watched_labels']
watched['sets'] = {}
watched['keys'] = {}
watched['singles'] = set()
for key, val in iteritems(scraper_config['label_joins']):
labels = []
if 'labels_to_match' in val:
labels = val['labels_to_match']
elif 'label_to_match' in val:
self.log.warning("`label_to_match` is being deprecated, please use `labels_to_match`")
if isinstance(val['label_to_match'], list):
labels = val['label_to_match']
else:
labels = [val['label_to_match']]
if labels:
s = frozenset(labels)
watched['sets'][key] = s
watched['keys'][key] = ','.join(s)
if len(labels) == 1:
watched['singles'].add(labels[0])
for metric in self.parse_metric_family(response, scraper_config):
yield metric
scraper_config['_dry_run'] = False
for metric, mapping in list(iteritems(scraper_config['_label_mapping'])):
for key in list(mapping):
if (
metric in scraper_config['_active_label_mapping']
and key not in scraper_config['_active_label_mapping'][metric]
):
del scraper_config['_label_mapping'][metric][key]
scraper_config['_active_label_mapping'] = {}
finally:
response.close()
def process(self, scraper_config, metric_transformers=None):
transformers = scraper_config['_default_metric_transformers'].copy()
if metric_transformers:
transformers.update(metric_transformers)
for metric in self.scrape_metrics(scraper_config):
self.process_metric(metric, scraper_config, metric_transformers=transformers)
scraper_config['_successfully_executed'] = True
def transform_metadata(self, metric, scraper_config):
labels = metric.samples[0][self.SAMPLE_LABELS]
for metadata_name, label_name in iteritems(scraper_config['metadata_label_map']):
if label_name in labels:
self.set_metadata(metadata_name, labels[label_name])
def _metric_name_with_namespace(self, metric_name, scraper_config):
namespace = scraper_config['namespace']
if not namespace:
return metric_name
return '{}.{}'.format(namespace, metric_name)
def _telemetry_metric_name_with_namespace(self, metric_name, scraper_config):
namespace = scraper_config['namespace']
if not namespace:
return '{}.{}'.format('telemetry', metric_name)
return '{}.{}.{}'.format(namespace, 'telemetry', metric_name)
def _send_telemetry_gauge(self, metric_name, val, scraper_config):
if scraper_config['telemetry']:
metric_name_with_namespace = self._telemetry_metric_name_with_namespace(metric_name, scraper_config)
custom_tags = scraper_config['custom_tags']
tags = list(custom_tags)
tags.extend(scraper_config['_metric_tags'])
self.gauge(metric_name_with_namespace, val, tags=tags)
def _send_telemetry_counter(self, metric_name, val, scraper_config, extra_tags=None):
if scraper_config['telemetry']:
metric_name_with_namespace = self._telemetry_metric_name_with_namespace(metric_name, scraper_config)
custom_tags = scraper_config['custom_tags']
tags = list(custom_tags)
tags.extend(scraper_config['_metric_tags'])
if extra_tags:
tags.extend(extra_tags)
self.count(metric_name_with_namespace, val, tags=tags)
def _store_labels(self, metric, scraper_config):
if metric.name not in scraper_config['label_joins']:
return
watched = scraper_config['_watched_labels']
matching_labels = watched['sets'][metric.name]
mapping_key = watched['keys'][metric.name]
labels_to_get = scraper_config['label_joins'][metric.name]['labels_to_get']
get_all = '*' in labels_to_get
match_all = mapping_key == '*'
for sample in metric.samples:
if sample[self.SAMPLE_VALUE] != 1:
continue
sample_labels = sample[self.SAMPLE_LABELS]
sample_labels_keys = sample_labels.keys()
if match_all or matching_labels.issubset(sample_labels_keys):
label_dict = dict()
if get_all:
for label_name, label_value in iteritems(sample_labels):
if label_name in matching_labels:
continue
label_dict[label_name] = label_value
else:
for label_name in labels_to_get:
if label_name in sample_labels:
label_dict[label_name] = sample_labels[label_name]
if match_all:
mapping_value = '*'
else:
mapping_value = ','.join([sample_labels[l] for l in matching_labels])
scraper_config['_label_mapping'].setdefault(mapping_key, {}).setdefault(mapping_value, {}).update(
label_dict
)
def _join_labels(self, metric, scraper_config):
if not scraper_config['label_joins']:
return
label_mapping = scraper_config['_label_mapping']
active_label_mapping = scraper_config['_active_label_mapping']
watched = scraper_config['_watched_labels']
sets = watched['sets']
keys = watched['keys']
singles = watched['singles']
for sample in metric.samples:
sample_labels = sample[self.SAMPLE_LABELS]
sample_labels_keys = sample_labels.keys()
if '*' in singles:
active_label_mapping.setdefault('*', {})['*'] = True
if '*' in label_mapping and '*' in label_mapping['*']:
sample_labels.update(label_mapping['*']['*'])
matching_single_labels = singles.intersection(sample_labels_keys)
for label in matching_single_labels:
mapping_key = label
mapping_value = sample_labels[label]
active_label_mapping.setdefault(mapping_key, {})[mapping_value] = True
if mapping_key in label_mapping and mapping_value in label_mapping[mapping_key]:
sample_labels.update(label_mapping[mapping_key][mapping_value])
for key, mapping_key in iteritems(keys):
if mapping_key in matching_single_labels:
continue
matching_labels = sets[key]
if matching_labels.issubset(sample_labels_keys):
matching_values = [sample_labels[l] for l in matching_labels]
mapping_value = ','.join(matching_values)
active_label_mapping.setdefault(mapping_key, {})[mapping_value] = True
if mapping_key in label_mapping and mapping_value in label_mapping[mapping_key]:
sample_labels.update(label_mapping[mapping_key][mapping_value])
def _ignore_metrics_by_label(self, scraper_config, metric_name, sample):
ignore_metrics_by_label = scraper_config['ignore_metrics_by_labels']
sample_labels = sample[self.SAMPLE_LABELS]
for label_key, label_values in ignore_metrics_by_label.items():
if not label_values:
self.log.debug(
"Skipping filter label `%s` with an empty values list, did you mean to use '*' wildcard?", label_key
)
elif '*' in label_values:
self.log.debug("Detected wildcard for label `%s`", label_key)
if label_key in sample_labels.keys():
self.log.debug("Skipping metric `%s` due to label key matching: %s", metric_name, label_key)
return True
else:
for val in label_values:
if label_key in sample_labels and sample_labels[label_key] == val:
self.log.debug(
"Skipping metric `%s` due to label `%s` value matching: %s", metric_name, label_key, val
)
return True
return False
def process_metric(self, metric, scraper_config, metric_transformers=None):
self._store_labels(metric, scraper_config)
if scraper_config['ignore_metrics']:
if metric.name in scraper_config['_ignored_metrics']:
self._send_telemetry_counter(
self.TELEMETRY_COUNTER_METRICS_IGNORE_COUNT, len(metric.samples), scraper_config
)
return
if scraper_config['_ignored_re'] and scraper_config['_ignored_re'].search(metric.name):
scraper_config['_ignored_metrics'].add(metric.name)
self._send_telemetry_counter(
self.TELEMETRY_COUNTER_METRICS_IGNORE_COUNT, len(metric.samples), scraper_config
)
return
self._send_telemetry_counter(self.TELEMETRY_COUNTER_METRICS_PROCESS_COUNT, len(metric.samples), scraper_config)
if self._filter_metric(metric, scraper_config):
return
self._join_labels(metric, scraper_config)
if scraper_config['_dry_run']:
return
try:
self.submit_openmetric(scraper_config['metrics_mapper'][metric.name], metric, scraper_config)
except KeyError:
if metric_transformers is not None and metric.name in metric_transformers:
try:
transformer = metric_transformers[metric.name]
transformer(metric, scraper_config)
except Exception as err:
self.log.warning('Error handling metric: %s - error: %s', metric.name, err)
return
for transformer_name, transformer in iteritems(metric_transformers):
if transformer_name.endswith('*') and metric.name.startswith(transformer_name[:-1]):
transformer(metric, scraper_config, transformer_name)
if scraper_config['_wildcards_re'] and scraper_config['_wildcards_re'].search(metric.name):
self.submit_openmetric(metric.name, metric, scraper_config)
return
self.log.debug(
'Skipping metric `%s` as it is not defined in the metrics mapper, '
'has no transformer function, nor does it match any wildcards.',
metric.name,
)
def poll(self, scraper_config, headers=None):
endpoint = scraper_config.get('prometheus_url')
health_service_check = scraper_config['health_service_check']
service_check_name = self._metric_name_with_namespace('prometheus.health', scraper_config)
service_check_tags = ['endpoint:{}'.format(endpoint)]
service_check_tags.extend(scraper_config['custom_tags'])
try:
response = self.send_request(endpoint, scraper_config, headers)
except requests.exceptions.SSLError:
self.log.error("Invalid SSL settings for requesting %s endpoint", endpoint)
raise
except IOError:
if health_service_check:
self.service_check(service_check_name, AgentCheck.CRITICAL, tags=service_check_tags)
raise
try:
response.raise_for_status()
if health_service_check:
self.service_check(service_check_name, AgentCheck.OK, tags=service_check_tags)
return response
except requests.HTTPError:
response.close()
if health_service_check:
self.service_check(service_check_name, AgentCheck.CRITICAL, tags=service_check_tags)
raise
def send_request(self, endpoint, scraper_config, headers=None):
kwargs = {}
if headers:
kwargs['headers'] = headers
http_handler = self.get_http_handler(scraper_config)
return http_handler.get(endpoint, stream=True, **kwargs)
def get_hostname_for_sample(self, sample, scraper_config):
return self._get_hostname(None, sample, scraper_config)
def submit_openmetric(self, metric_name, metric, scraper_config, hostname=None):
if metric.type in ["gauge", "counter", "rate"]:
metric_name_with_namespace = self._metric_name_with_namespace(metric_name, scraper_config)
for sample in metric.samples:
if self._ignore_metrics_by_label(scraper_config, metric_name, sample):
continue
val = sample[self.SAMPLE_VALUE]
if not self._is_value_valid(val):
self.log.debug("Metric value is not supported for metric %s", sample[self.SAMPLE_NAME])
continue
custom_hostname = self._get_hostname(hostname, sample, scraper_config)
tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname=custom_hostname)
if metric.type == "counter" and scraper_config['send_monotonic_counter']:
self.monotonic_count(
metric_name_with_namespace,
val,
tags=tags,
hostname=custom_hostname,
flush_first_value=scraper_config['_successfully_executed'],
)
elif metric.type == "rate":
self.rate(metric_name_with_namespace, val, tags=tags, hostname=custom_hostname)
else:
self.gauge(metric_name_with_namespace, val, tags=tags, hostname=custom_hostname)
if metric.type == "counter" and scraper_config['send_monotonic_with_gauge']:
self.monotonic_count(
metric_name_with_namespace + '.total',
val,
tags=tags,
hostname=custom_hostname,
flush_first_value=scraper_config['_successfully_executed'],
)
elif metric.type == "histogram":
self._submit_gauges_from_histogram(metric_name, metric, scraper_config)
elif metric.type == "summary":
self._submit_gauges_from_summary(metric_name, metric, scraper_config)
else:
self.log.error("Metric type %s unsupported for metric %s.", metric.type, metric_name)
def _get_hostname(self, hostname, sample, scraper_config):
if (
hostname is None
and scraper_config['label_to_hostname'] is not None
and sample[self.SAMPLE_LABELS].get(scraper_config['label_to_hostname'])
):
hostname = sample[self.SAMPLE_LABELS][scraper_config['label_to_hostname']]
suffix = scraper_config['label_to_hostname_suffix']
if suffix is not None:
hostname += suffix
return hostname
def _submit_gauges_from_summary(self, metric_name, metric, scraper_config, hostname=None):
for sample in metric.samples:
val = sample[self.SAMPLE_VALUE]
if not self._is_value_valid(val):
self.log.debug("Metric value is not supported for metric %s", sample[self.SAMPLE_NAME])
continue
if self._ignore_metrics_by_label(scraper_config, metric_name, sample):
continue
custom_hostname = self._get_hostname(hostname, sample, scraper_config)
if sample[self.SAMPLE_NAME].endswith("_sum"):
tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname=custom_hostname)
self._submit_distribution_count(
scraper_config['send_distribution_sums_as_monotonic'],
scraper_config['send_monotonic_with_gauge'],
"{}.sum".format(self._metric_name_with_namespace(metric_name, scraper_config)),
val,
tags=tags,
hostname=custom_hostname,
flush_first_value=scraper_config['_successfully_executed'],
)
elif sample[self.SAMPLE_NAME].endswith("_count"):
tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname=custom_hostname)
self._submit_distribution_count(
scraper_config['send_distribution_counts_as_monotonic'],
scraper_config['send_monotonic_with_gauge'],
"{}.count".format(self._metric_name_with_namespace(metric_name, scraper_config)),
val,
tags=tags,
hostname=custom_hostname,
flush_first_value=scraper_config['_successfully_executed'],
)
else:
try:
quantile = sample[self.SAMPLE_LABELS]["quantile"]
except KeyError:
# what we should do in this case. Let's skip for now and submit the rest of metrics.
message = (
'"quantile" label not present in metric %r. '
'Quantile-less summary metrics are not currently supported. Skipping...'
)
self.log.debug(message, metric_name)
continue
sample[self.SAMPLE_LABELS]["quantile"] = str(float(quantile))
tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname=custom_hostname)
self.gauge(
"{}.quantile".format(self._metric_name_with_namespace(metric_name, scraper_config)),
val,
tags=tags,
hostname=custom_hostname,
)
def _submit_gauges_from_histogram(self, metric_name, metric, scraper_config, hostname=None):
if scraper_config['non_cumulative_buckets']:
self._decumulate_histogram_buckets(metric)
for sample in metric.samples:
val = sample[self.SAMPLE_VALUE]
if not self._is_value_valid(val):
self.log.debug("Metric value is not supported for metric %s", sample[self.SAMPLE_NAME])
continue
if self._ignore_metrics_by_label(scraper_config, metric_name, sample):
continue
custom_hostname = self._get_hostname(hostname, sample, scraper_config)
if sample[self.SAMPLE_NAME].endswith("_sum") and not scraper_config['send_distribution_buckets']:
tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname)
self._submit_distribution_count(
scraper_config['send_distribution_sums_as_monotonic'],
scraper_config['send_monotonic_with_gauge'],
"{}.sum".format(self._metric_name_with_namespace(metric_name, scraper_config)),
val,
tags=tags,
hostname=custom_hostname,
flush_first_value=scraper_config['_successfully_executed'],
)
elif sample[self.SAMPLE_NAME].endswith("_count") and not scraper_config['send_distribution_buckets']:
tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname)
if scraper_config['send_histograms_buckets']:
tags.append("upper_bound:none")
self._submit_distribution_count(
scraper_config['send_distribution_counts_as_monotonic'],
scraper_config['send_monotonic_with_gauge'],
"{}.count".format(self._metric_name_with_namespace(metric_name, scraper_config)),
val,
tags=tags,
hostname=custom_hostname,
flush_first_value=scraper_config['_successfully_executed'],
)
elif scraper_config['send_histograms_buckets'] and sample[self.SAMPLE_NAME].endswith("_bucket"):
if scraper_config['send_distribution_buckets']:
self._submit_sample_histogram_buckets(metric_name, sample, scraper_config, hostname)
elif "Inf" not in sample[self.SAMPLE_LABELS]["le"] or scraper_config['non_cumulative_buckets']:
sample[self.SAMPLE_LABELS]["le"] = str(float(sample[self.SAMPLE_LABELS]["le"]))
tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname)
self._submit_distribution_count(
scraper_config['send_distribution_counts_as_monotonic'],
scraper_config['send_monotonic_with_gauge'],
"{}.count".format(self._metric_name_with_namespace(metric_name, scraper_config)),
val,
tags=tags,
hostname=custom_hostname,
flush_first_value=scraper_config['_successfully_executed'],
)
def _compute_bucket_hash(self, tags):
return hash(frozenset(sorted((k, v) for k, v in iteritems(tags) if k != 'le')))
def _decumulate_histogram_buckets(self, metric):
bucket_values_by_context_upper_bound = {}
for sample in metric.samples:
if sample[self.SAMPLE_NAME].endswith("_bucket"):
context_key = self._compute_bucket_hash(sample[self.SAMPLE_LABELS])
if context_key not in bucket_values_by_context_upper_bound:
bucket_values_by_context_upper_bound[context_key] = {}
bucket_values_by_context_upper_bound[context_key][float(sample[self.SAMPLE_LABELS]["le"])] = sample[
self.SAMPLE_VALUE
]
sorted_buckets_by_context = {}
for context in bucket_values_by_context_upper_bound:
sorted_buckets_by_context[context] = sorted(bucket_values_by_context_upper_bound[context])
bucket_tuples_by_context_upper_bound = {}
for context in sorted_buckets_by_context:
for i, upper_b in enumerate(sorted_buckets_by_context[context]):
if i == 0:
if context not in bucket_tuples_by_context_upper_bound:
bucket_tuples_by_context_upper_bound[context] = {}
if upper_b > 0:
bucket_tuples_by_context_upper_bound[context][upper_b] = (
0,
upper_b,
bucket_values_by_context_upper_bound[context][upper_b],
)
else:
bucket_tuples_by_context_upper_bound[context][upper_b] = (
self.MINUS_INF,
upper_b,
bucket_values_by_context_upper_bound[context][upper_b],
)
continue
tmp = (
bucket_values_by_context_upper_bound[context][upper_b]
- bucket_values_by_context_upper_bound[context][sorted_buckets_by_context[context][i - 1]]
)
bucket_tuples_by_context_upper_bound[context][upper_b] = (
sorted_buckets_by_context[context][i - 1],
upper_b,
tmp,
)
for i, sample in enumerate(metric.samples):
if not sample[self.SAMPLE_NAME].endswith("_bucket"):
continue
context_key = self._compute_bucket_hash(sample[self.SAMPLE_LABELS])
matching_bucket_tuple = bucket_tuples_by_context_upper_bound[context_key][
float(sample[self.SAMPLE_LABELS]["le"])
]
sample[self.SAMPLE_LABELS]["lower_bound"] = str(matching_bucket_tuple[0])
metric.samples[i] = Sample(sample[self.SAMPLE_NAME], sample[self.SAMPLE_LABELS], matching_bucket_tuple[2])
def _submit_sample_histogram_buckets(self, metric_name, sample, scraper_config, hostname=None):
if "lower_bound" not in sample[self.SAMPLE_LABELS] or "le" not in sample[self.SAMPLE_LABELS]:
self.log.warning(
"Metric: %s was not containing required bucket boundaries labels: %s",
metric_name,
sample[self.SAMPLE_LABELS],
)
return
sample[self.SAMPLE_LABELS]["le"] = str(float(sample[self.SAMPLE_LABELS]["le"]))
sample[self.SAMPLE_LABELS]["lower_bound"] = str(float(sample[self.SAMPLE_LABELS]["lower_bound"]))
if sample[self.SAMPLE_LABELS]["le"] == sample[self.SAMPLE_LABELS]["lower_bound"]:
self.log.warning(
"Metric: %s has bucket boundaries equal, skipping: %s", metric_name, sample[self.SAMPLE_LABELS]
)
return
tags = self._metric_tags(metric_name, sample[self.SAMPLE_VALUE], sample, scraper_config, hostname)
self.submit_histogram_bucket(
self._metric_name_with_namespace(metric_name, scraper_config),
sample[self.SAMPLE_VALUE],
float(sample[self.SAMPLE_LABELS]["lower_bound"]),
float(sample[self.SAMPLE_LABELS]["le"]),
True,
hostname,
tags,
)
def _submit_distribution_count(
self,
monotonic,
send_monotonic_with_gauge,
metric_name,
value,
tags=None,
hostname=None,
flush_first_value=False,
):
if monotonic:
self.monotonic_count(metric_name, value, tags=tags, hostname=hostname, flush_first_value=flush_first_value)
else:
self.gauge(metric_name, value, tags=tags, hostname=hostname)
if send_monotonic_with_gauge:
self.monotonic_count(
metric_name + ".total", value, tags=tags, hostname=hostname, flush_first_value=flush_first_value
)
def _metric_tags(self, metric_name, val, sample, scraper_config, hostname=None):
custom_tags = scraper_config['custom_tags']
_tags = list(custom_tags)
_tags.extend(scraper_config['_metric_tags'])
for label_name, label_value in iteritems(sample[self.SAMPLE_LABELS]):
if label_name not in scraper_config['exclude_labels']:
tag_name = scraper_config['labels_mapper'].get(label_name, label_name)
_tags.append('{}:{}'.format(to_native_string(tag_name), to_native_string(label_value)))
return self._finalize_tags_to_submit(
_tags, metric_name, val, sample, custom_tags=custom_tags, hostname=hostname
)
def _is_value_valid(self, val):
return not (isnan(val) or isinf(val))
def _get_bearer_token(self, bearer_token_auth, bearer_token_path):
if bearer_token_auth is False:
return None
path = None
if bearer_token_path is not None:
if isfile(bearer_token_path):
path = bearer_token_path
else:
self.log.error("File not found: %s", bearer_token_path)
elif isfile(self.KUBERNETES_TOKEN_PATH):
path = self.KUBERNETES_TOKEN_PATH
if path is None:
self.log.error("Cannot get bearer token from bearer_token_path or auto discovery")
raise IOError("Cannot get bearer token from bearer_token_path or auto discovery")
try:
with open(path, 'r') as f:
return f.read().rstrip()
except Exception as err:
self.log.error("Cannot get bearer token from path: %s - error: %s", path, err)
raise
def _histogram_convert_values(self, metric_name, converter):
def _convert(metric, scraper_config=None):
for index, sample in enumerate(metric.samples):
val = sample[self.SAMPLE_VALUE]
if not self._is_value_valid(val):
self.log.debug("Metric value is not supported for metric %s", sample[self.SAMPLE_NAME])
continue
if sample[self.SAMPLE_NAME].endswith("_sum"):
lst = list(sample)
lst[self.SAMPLE_VALUE] = converter(val)
metric.samples[index] = tuple(lst)
elif sample[self.SAMPLE_NAME].endswith("_bucket") and "Inf" not in sample[self.SAMPLE_LABELS]["le"]:
sample[self.SAMPLE_LABELS]["le"] = str(converter(float(sample[self.SAMPLE_LABELS]["le"])))
self.submit_openmetric(metric_name, metric, scraper_config)
return _convert
def _histogram_from_microseconds_to_seconds(self, metric_name):
return self._histogram_convert_values(metric_name, lambda v: v / self.MICROS_IN_S)
def _histogram_from_seconds_to_microseconds(self, metric_name):
return self._histogram_convert_values(metric_name, lambda v: v * self.MICROS_IN_S)
def _summary_convert_values(self, metric_name, converter):
def _convert(metric, scraper_config=None):
for index, sample in enumerate(metric.samples):
val = sample[self.SAMPLE_VALUE]
if not self._is_value_valid(val):
self.log.debug("Metric value is not supported for metric %s", sample[self.SAMPLE_NAME])
continue
if sample[self.SAMPLE_NAME].endswith("_count"):
continue
else:
lst = list(sample)
lst[self.SAMPLE_VALUE] = converter(val)
metric.samples[index] = tuple(lst)
self.submit_openmetric(metric_name, metric, scraper_config)
return _convert
def _summary_from_microseconds_to_seconds(self, metric_name):
return self._summary_convert_values(metric_name, lambda v: v / self.MICROS_IN_S)
def _summary_from_seconds_to_microseconds(self, metric_name):
return self._summary_convert_values(metric_name, lambda v: v * self.MICROS_IN_S)
| true
| true
|
790d6ee6eaa7f2a7b19763772641feb2cf553339
| 10,276
|
py
|
Python
|
test/units/formats/office/test_xlxtr.py
|
bronxc/refinery
|
9448facf48a0008f27861dd1a5ee8f5218e6bb86
|
[
"BSD-3-Clause"
] | 1
|
2022-02-13T20:57:15.000Z
|
2022-02-13T20:57:15.000Z
|
test/units/formats/office/test_xlxtr.py
|
bronxc/refinery
|
9448facf48a0008f27861dd1a5ee8f5218e6bb86
|
[
"BSD-3-Clause"
] | null | null | null |
test/units/formats/office/test_xlxtr.py
|
bronxc/refinery
|
9448facf48a0008f27861dd1a5ee8f5218e6bb86
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import base64
import lzma
from ... import TestUnitBase
from refinery.units.formats.office.xlxtr import _ref2rc, _rc2ref
class TestCellIndexConverter(TestUnitBase):
def test_concistency(self):
for row in range(1, 12):
for col in range(1, 12):
ref = _rc2ref(row, col)
r, c = _ref2rc(ref)
self.assertEqual((r, c), (row, col), F'({row},{col}) -> {ref} -> ({r}, {c}) != ({row},{col})')
class TestExcelExtractor(TestUnitBase):
def test_regular_xlsx(self):
data = self.TEST_XLSX
unit = self.load()
self.assertEqual(unit(data), B'Binary\nRefinery.\nBinary Refinery.')
xl1 = self.load('A1', 'R33', squeeze=True)(data)
xl2 = self.load('2#E10')(data)
xl3 = self.load('Refinery#E10')(data)
self.assertEqual(xl2, xl3)
self.assertEqual(xl1, b'BinaryRefinery.')
self.assertEqual(xl2, b'Binary Refinery.')
TEST_XLSX = lzma.decompress(base64.b85decode(
'{Wp48S^xk9=GL@E0stWa8~^|S5YJf5;3PvDAzc6{61-q2m(dT*lz$@h&uisO-M2S>G=qQEROhS?T`LVCl<0*Kr;j=qGZrTMa1_{74oZ0B;H_q6z{0fO2`#4p'
'Z(%@Rrb2l^+DIK4qbHHF_tmNDpz&Y$NlI-C6c(59S<hkLEM^A)s!{gk@qKO#f!<CU&7G31h2%4o%gM*%hC-@#t>rmqA<7aPOjP!YEkx*jkYln_Gs2{7ZcSSp'
'k%^+f{8_0fK#=AnGd4nKnS~b32=88*Gzk18vHibqY6IP;P8rsEd*hi%t(hYl<vzGV#mly+rRuPU?H$RjiOhkC&_Y^=3@n*lF-L-p{&*dA>A$-1cYhlULYXE~'
'9lRf#_`OFa&uH^H|E#>F1+<slwderZG)kz>f=O+S%CnbmT=-*EXvyp=?C!#p@e|yqJFol$s>T6*DyGIxp^}#q4f#_*{FEDNWty4CtIr9?l}dTd2ZvRe4c(lw'
'DABO4`<xHUA!rFO$CY0pMP$7Ch|~lYzBzW26csva+1m`if>ts<6(kc$R^2wfYI_u<Q|ve2LG39foqnwf%7wRQd2S-u4FHQJN@YT;52pT!6{VrFCidv$Fyf;}'
'rH559u)j4P7JILO$#(5+ZYcGMZALFyO?bVadG%NCWt)~F^p=Pm29lCFbYt)Fedzu<1zSy|M+}&@hOGrpf$f_=Y#DSA@|#f687|=g$UxDWWJKOTp)mW6TzZ=^'
'p2l)f#+eE2G<HArbYwZE!pb>bRES(cfK<g8_b)!Kft2?rXK}=vK3~G(CX^_QX)BQi&gU31F}4c4VcB7TrBk^r&0ca1okiuv1q4^388j~{y%RNKdMWD;q7$3l'
'#C;mMydS27!Koh*Bsd(dJ8m~*nz#&cRltJuz`RD02l;!L145|lg~%t7)#pZ6bT%^@aB5v|Mx2gU?|0@qMh{gR9r!(5QDnF8uc&l@Th{F@viY>d61j#TIyb8X'
'61@K*a|ghIpbVLNf7H)(W5>emQ41R#dw<#Af~ZpQO|)JqOd_Vj*kk+pzMMj@w+^G{FQH|dL4#ia(qX?XVK!~^yYHeq(&}Ngxfz31xqCY)rD*@_3Pyn>pc~Wn'
'MYDkF4kdF2tAi&B|JQ~s4)B9`NTUl4qos<(L1M+~{2d!BjkqBUb0%v1*kgIrF+ptfh}s0W$bSkIfJEba^sYW_lhRuUo-$5(Fftuy6p{|&N2JPAGBvqFg`%Q)'
'1cB<NMLt8qVvugS&hO*6_B9Kg?C_=TOZyGd>o8}DAXwo}7%+6|%=!Q&@h){<N`TgzUUJ67cJdcdXo;y#hyb@#8t&HY8P=kV)6}2jZhORE^Qab?zfQf7B_xQV'
'RK!+xABFg{33KMQ{4`>l&=iyiPUfI)c<LSMZ$G<RZa2rC=p3JGN`2;6a?#<4(EV$(=VK)cnGq^2NNZgPm;XW_n&r%)Tv0l1<R+xEEgpr*wA|*#_J_;WjMhx*'
'2_V1cq6SWKO|ImPFM#_s4uUlRF5$o<bxhE8EI!Cp;wWYl$Rwb5FtH|uR2(*WCRKe{RcePa){nOIYL{IHzSvbnG=TE4j4@A1=U$eDy?6P-nQ|;;P(T(jnSv=m'
'A&Rh1<Lz=W1J+!8u%iw8-_zZAtJcr2%@WV=+r{F4QyRi-NYdmBUk!FaGe5&&sf5vL_S1fe>CT`VFqQJ@BYH?72AFt;%Y}5m9zy2-<(iY_-&tjDSa4w0OtaO1'
'8tKtv_^&+^2ur(e<A~BD=}W({XC6cTLgOQNXL9dl25Uj~y?U_xM??>jmwHU+ICMbW#mHy;%;FmR7XxDT&|UA)JmOx6IY-%2Nzf6u%Ak^&L#DrA=cJ-qL+2V4'
'QaEix%b9zxe1xNE5#G23ON{#;_>8Kk9uORLt@ysrPLTL;n@tE%n;XrSU|Lbfw)ow=_ou8?#%|lEmF1WDbL}FKuGMr+{x400xau(;+mVCbvi;c!7;xGT@yFdV'
'O%KZ3Zd7>8k{6`<kvAq=;*cc=8so}&t<|n@0JZ0ilyz;t_j^nrUr_nSS-~|bLvwY%)Eezn(t5`=4(yJ3=C)R^NZ7aBvqw##zY<>uu=C59T>6kOvA{kgk@|v`'
's>pkG(&hxNnj-cSvL;G~#$Ew`FZiF$IM+7ut?;osAW_o%bvrhoYq6nZm9@=HAw>h4Pp#i=u)I}zReJI81}J1NlhYYmCJI!K?zcp6@Y#8Z3MQwQRUxzknnlp5'
'Rl_cFj`Wt<CU*@+s1`HvyHy~l=e_`sA<(R)nIRh{g7LFc>#eyLlRNK~<0x(GE1^FLwOTD6)j;!)u7?|Ed8uB8efa1bHZN)eQzTas@ce)BAOmvmldGs|(&vx<'
'5<<8Fy}}2W=u;!65A`@sm;bxZvSJ7?a@dwF?Hm9qA<e_Li%pFt+<IhChQmdjO{g%kg(jDtI-dwJFT9Gy@;{Nj;_p=$7QGZ6J(<db_mP^Z0@hL`fMm~^emi-<'
'#U}<C;1S7UX&q{)L&*;Bb4F4&hy!RF0|TGtm9!CB-zUI~7+XmC5f#gR?25`_79+(~-tv8S?S4f!r4*c$F!XRrO<4{vh^|w`l%t?0J>547bF1x6nFKL1FZME8'
'x>xF18ESM1s;wm*-x&m$NDpw?@x=<tlcE)STJnr9{NuK;#i6_2MYCPl%4Zq^9*$^R372ua6jwv>oH^mR0ioqk%%)Awns;#lrjXkIhYB_Vt*Pr*oTgse6Uazr'
'd)yUnaZ|Z`9?Q6aTHa2@m4`pd_?E;;Re)&<*otbim^DZ!V{~?+t%H;U2&V8O9CkMdW*tOzBErCD-E}{=Nl%~-`;W#E5$bMF8A-TOVDt09^K)tTG2cvWxLh%9'
'cuC?O7rL(QbGlAASV!M6dTB)pfy|#N5k4(Mdd*7+Mb<Fc^fR3BfFeEzF^|<<jpBXBM&T8{-77eX)1)UjzwbB1E&LZ4khDM^66En##rJ{5FB;62)1u0P(WW!?'
'lQ>ewk;iuv3T5ya!?u25bnj7}T|JgGJ#9v?s8&4#t^H+#psB8+5X2Nb(T)9WO*Vt|gLB|i#r-n1JMfe$j%Ph5SXMv_Tanlh$I>cVX}KMHqanK)`S{y}?Q*p%'
'q?-9=^4NCH4UFSGW?!(CtBYJuyypt+p0$nV^cK}KotkY2nSQndYOQFUvFVS3FW3?x>5yfLCco*5cW<@V1M^*WZG|(A0JM*3=9Sna%;2QH>md}mDc9$Mt3&b<'
'9G4eqoW1wvVYXkau#+Amms%7l0aoEO^`4|P4TnM0ZoXb_xoe`WfYVjGR)VLd+Q_@wE=eFJLr%5%w|=*hWf977@eZKekfJ3;&92d7q=M_xzybcYrXD3rWUx7T'
'YtP}VErR+Qx_;gt-vsQ=`UR=~2p9|w1mvGLTHTzpFy}ehnsV!-@9w;Br-4Iy$oZ!4*Ll%|=GkY0?kD^ebMpDWalI!>y!qU=-PH<$+%SHQox|bdqM~E30Lu?y'
'n3PZbZ?~4RkXMF4T;wYcr7pG)Y;}^m^8PA7N*9B(6278}V(4CuTj{g8cnHCBjFEVl$#zR(-FckDWBH2kXxgM8VN!zSNkFRsiLX1J0e7IR-ok22b<Fh{0Zygn'
'a->J1Tx<^V>tdmaeJ-AACUvHtR6ZqlAQc@|nfUvSjY9l8N}O1iL6tlkQNk$0EBJwV(D`Rl=MKmb{EZ(M+d9%;77%vNLbvj%X;Q>8k8h<6zf-kMENA;DDq9?9'
'-c<)(XUOK-37=JI@*2_!1<`E;#sXJ^h*;4qBLW;_Mqdg3;l@sO8%u?U%P9drSYd47l>^xT9m~sM>V(|XYphyEM=oa(c$R$_SoS+4>&;O_fr;olaT?C<i;vRU'
'>Z8O<b2dxzIAJbmw!O!q;jOe}<&^u*MaLUU@LxD!+r5~a9H*A^$_=p#3ZXmDXf(Ty2c+E9sKficRn4c|8+AF4uuF9VhW4%}>6syvgejhm`t$tpvg6Jz^Mj8-'
'eJGh$HQ4_nYI6{Gq5tdgPaPK)6ehDCQ26}`@0(w}Y^jsD<S<4|2sfQd4)8g&VMHyPnhehJDk?3y@tj=^?fTchQ<Z_k7Q{seld!f7y2Ywsq3-BjBL~RJ=5!>)'
'HrxQ9A#UUcI9OGd_dxu$A@8Czd8m&#<QJ`NMc2=__EFY=>wz*j8D_g9qx!^5p-44yDeVK-*Gq+h`Egsr8Zykb6#8*Md3@|MQtCqirE)!j#`xE3#3A;CNhhW6'
'@xeBsNwb7OLeXHM-mx$+KjZN~rhI!XRzXlWcBNb!0{QQkA>m)Ta~ke$Z)|_T1I7V2h|AKhLTNs87A1I@LGcUyR57K}(+;tyyC8y-FEcM0@?iXGNBemODlLlH'
'Mr&W(;)1Rbej$uqHn(yDH1F0kV@~eFf?-tYTXATJy75xajc$TygYO-K*F4I#iR*jVbT#0Sdc1yVJ~!nF1^f>mIxj#WHstZO4$~XMjt_&5m)E?ylIEe-l>(D!'
'Mw7{vPF6HG$F-4mG8(?dUrM(jcMhCc>w~{Ex93TcYS@D19c^KVJU^TjPDbY1#=Uo*b{(Gv7n|GEQI?et?&_b)@xjCL01(3wMnc**8<dg)VsKfN?;QKq*-WZ)'
'q@;?J7@QA^o5@YrEzLRbqL85Xn}ts4#pD44_rq|5fCqw#p~C9+4;y)=Dp3c|*;ZXTMF8FuRosDAR|5(w(ZGuW>E%_fgyG!r7?lqe3%xP?6V05D$y(VTsvUOT'
'RQ^YFF+kR~czqgECf1UH;jIk8r2hg10EZw_%qg1HJbY!EE)z8(=N8PB9wvri&LcM3CAHa~Zirs4h%N)MGC{rV+dfuhiX)QYc8+1rR=`2V+zGHRbmllDEAxHp'
'5<BjB;1rT_p7x*Z(v-bV+>i}0tw8REAnOZTGG7W$nnx$)6{BQ+R|g58X;%wAPn`#jR3qZx53X`$$S}|bEg91k*?nTro+A~2&E&c8bAL%TiOH-=B0Dj={_BRs'
'zN_c*A9%woCER;T-@U)QT6Y*KB@#oPZMMU^)_cLl=aG57!=?!dINhxjR`Ad2cib22ZA>g)GQ}!oy<&=n)X-%0d%FsL#aNFDW*P*JZ{;gPC=bY4!wS)S?l&6g'
'P6jM($%?=15;a!OkD@n`fxgQD^$w&KfMrNsA$(M<5bG)@`poZAgOs7zR6<b(_4gthE?vWQx9oH$gktbx6#eVoF&Xe5SGj?`c4Ao`3W{RMIdubs0e`X_6hiFK'
'>wynbkbfB+=3+_Q?eSa6QO0d~q7yubxNApHZEG1Hp||VtF*`Epn)YU>IO$zG_leh1K>qkB&wVr6gi`E{(q4nMnP9&;s(RCZ@vfO7zGg>mK5c_Y1Sg6{rCRjF'
'>nlWlf=PT6<0yV|00WvnG1-5Un}Qq#53Bat2Q+!&tPTzivUE>N5ydL&9B19kAevrDy(wr<id^TwwLC1O<k;_iWc3Al{%JZBDtYK^2QRE%g{XBQK>RO)dC9ur'
'@dAER%=sun5g7ZDw^S%4sIPS^s2JBddi`&zG>k9cE<1bsW}oa3e?YeDQ&KX<O;c9qMe=CF{Aa$9kInQ9TT5DSP>=GYt(Gg*5b{QCyON-vRaXXK>xC<i&$tt2'
'8|53#7Dg@~Q`bM<Zrh)ti1;$!Az6zi<f(9>`#JA?QiV1cR(HH_v>Ov#2ANK_#yB+M?#;Nxp?jzw_nBF?R|2yAURu=_MNoe$F@vzOw_rP{es)Mih4nvYQqY%f'
'>%2udb2Id;8z%n8M|N}@WUOK6lk%1+62-uL>X?x0^(=9Y%o;c`$8#a?kCmpiihl|Q+S^8)dNsvuEqmd)J<2`*U_(F9{q6Sj<v84blBU_=ikeoN_)5W<J!VAw'
'Sv$Ibl}+*I)>Qi(5y*2+-JLaxaUo`dNhioHs31)Ge(_tp+tQA>$|Gm~rxc`xRrz3dgbl<pfRlVz)6nvzGF>2$pK9lNm6NZx+A;1hh!Y^wq`e~y=n}-<6<e4<'
'`*ul_NDY@>-g1WZ7hMwR?&tw9dyu+yY)xfY(Dxz$RK4(dU`!)mqVpN&qWD~f^V+}I=fWT<KC$YC_833-rC&s|-&P@2ne_N3A*te>b=X=Eek@lN46s;fVDhJ3'
'^0`2@#<^2lA)H$6PSfhS?T3)G^?2IsKn{*Dcx9GZ>V1;)^ERS%jQdawwN@DFWmV_f?Max{4e??;&;7K<!{h(WPyGD{+@L*u(wzmx;X?xF{eUiKds($%ES*Ym'
'@~7q)`@30*YJ|TX!8tw+6+2AlC{V-75h&3sf|h$oyap$59-bkLE$lBVKy1<dt?%3gfzvf!xPmrvI?%b7BV-?+9fzF(^>Rh&lc!B=7&O#O89HJ}8FtKJ8;*`G'
'#oG&ackie*nZ<;gf4|RfL;3yJAyqllmLUY|?+yJh`Mg~?S^7{RY=Fzu=lz$Qg`QXCXTenb*>MO)qZKpGp?w@Wfo$u4oGUgZBL8~f!=1#)#f($a&NhjkJ@-g*'
'+|f`#ugApNgEbuU`g6DMU9FM%e5J^mP;<ieN^1hy#Qk2#I>7|+b#|2|XaIX$?zVFH1@WR&)QzgwuL-#U&fG=uM=T9yeNcpwB+pV^h(zB$ZU5<M5gGqvOeN#N'
'yVgbJ5<P11H}3-iK3WH)3&P%7HtVj_bQtmFcv{$s2yL*)Ii>v+ikrq*68vX=BgM4X#SvNA<ltrz-GE}KFtMrB(_&Z~V@}q;HCn15$x(Pijd=!-;U6Z~PoF&^'
'0bkjt88le{rYSw?&3;UjOaX^gf3jGo@-xA5b()&3rH;aQgcyLDn(s~vim6}iRS{UhiHDj6J>u2XPyEZpPa~5t#8t}Zs&SnD&E{>^&$saZ?Mq`u7T-2s^Y-Ng'
'5)+D+M@{nPIEmmA7yZb?N<>N0X_d)2EVrU~e?CqMCxLH~R^AVFzT{4dEXfA5k3DvQzw3Hs$VEW)xg^+5DPt<^7U9(JiWKa~nq2hxULBb*a&Y))x)#rQM8Z`j'
'5Mmpf+M1+Y+jwRI6l<q@v9rV32JHH@XZtkinW?VkC)c278{WH8UyCuUxSAM<df#~$a<VV$$*tKVxAvl{Ax%2MO(8?<9gzDAuo(}9Y#e<svKuK1bD~XdngQEg'
'L7nRHl|{{+DiK><=XU8^(;|agSqoyRyOB8*W9)6x|2vRE#7gKSkO^)4rPK~0v0)}fs&ZswK%<HY$uk`?OTLu>pD>T&rdIcf)>1>~PWqh-w8JOS&+-VlyMsOK'
'<$oB)VeHgqUh%v_4krv{i6I|6O&`lof_mK2O+00|a#}BwG(2&@xM;48<nSGP3J~DqIBzs?Qy5Q-@Kyh&!Fl6@HL+`8)!~4;G)Oa=ex1SlfM$5+Zs#1`37rEW'
'!z>3k19J|3fFOu?xIDa~SwA9%l3cKzCXIk>O75p|Bg)~|2;&k|mVGr+)MWRWCz;vY*&2yR97bK*S$>Ualdz*yplSY%%`-Yj!e!v%y*ROG3UlCsxgRcY70fqQ'
'I+EX+tv<@*&DUsq7bbCHUntXdFs5vDGP@MDqpto`ZT!$seb}vPzciItw_Z$+jnO(0Q5Ge{`CApXVtSioC!~KF;1mjl7zHO2z0YfqFLzph`avY-bbj?E;T^30'
'M0>~Bqjf8;WegI*+rs3kK<7hqTBy|v&jIUCfY+C*1mZJJbyU4ZEF!~_=0L~)Q#G|Ii{z+<I;P6A(s1E(@9b>FumB7oBm>X(NL!?}$KeF2{j3Ul)B_f84h4M5'
'r9)#GV2+28fa6fK6R4CHHh)K#0Fad@oZV4_Gua#}uAjxJ*>@g%+T|%ID!}k^BS2Je^`Ky*>aIoivXvF>-dgPgyt#In;bPorwRyWMLjuMWcW+c-9boYE)8iS>'
'q!Em8IIPsA1Y|^Xc@jro(IP
y0;na@27uAd3Z1T<ga0jjkKx&+RWCtm!fw0>lEr)3m(rj-=U)Zw-<dl;K4GSxkTx(VhK(SI=UN7dA?Lv=#D>Qsd{nfTXm'
'pxA`o(dC=F2E!ILT@*bC=AU*b$fz9Y`RM+&%tUiKh(1zr0b-tBkC^=#vjh`Aw~`^(Z}03wRH!x87TD<`J_|NamNx>q96dEcpLR`+0~*>P<hAWD^Q;hQo+5F<'
'jkThMTR3~)t79?MN$7I(KMPx$mkUjhroGlDzyqi{sBeG_$w)uw3xyWMeG8?|PVNM@^!iEg8ZFVzg+!q|&_T%AV79u_NzR%3;O-V&1mRqcD2rPxeHk7RDVwj+'
'TW~`L2g!$~bL55kst*mQ@YGUoVM@Q%(QGB!3x%5Ts?P*J5jLjM`8si3@#uU;K+U@o3R88*v$BeZFy>Z6<)6zkIfDg$P{F3Tl%R;1Iy!4f7pFwT{pda1v(L5Y'
'UAt4vr3g<_cO7kXPR6q&HzDpZU9JzHml~E4e~KjPSIg1zc8JX3ffWqT3X9rhxdhiZcI14+hrSC3geN)~9kc)SH6NaPEv7|+!C8lhOJHLhpn<#SnL<zbQ`F1d'
'F7z+X3NUnd;Cc@zZzz1@J)*=%vm5Kr|KqESpnKN`SrPmK$ZOI60Z#t#%ak|7wNPLIs_$bSRqYTpZCMnKd^q}R>)k?yVOgo)24Y*7v8)rsT^@GGq}6!!?oE!^'
'd+U-g60>iG7RE;8d~$5Nais62-MIq@rRX&o)QtxeW#N_%7vMGGro#IN7SIar0k*UrI@bNMf~JE^W&+Qnet4Kt7e#+qzFUEV{w~l8@%_@&J<W=gc7p!^u7cs7'
'000006<H{x300yM00F%;#7F=D8E*!*vBYQl0ssI200dcD'
))
| 96.943396
| 130
| 0.672635
|
import base64
import lzma
from ... import TestUnitBase
from refinery.units.formats.office.xlxtr import _ref2rc, _rc2ref
class TestCellIndexConverter(TestUnitBase):
def test_concistency(self):
for row in range(1, 12):
for col in range(1, 12):
ref = _rc2ref(row, col)
r, c = _ref2rc(ref)
self.assertEqual((r, c), (row, col), F'({row},{col}) -> {ref} -> ({r}, {c}) != ({row},{col})')
class TestExcelExtractor(TestUnitBase):
def test_regular_xlsx(self):
data = self.TEST_XLSX
unit = self.load()
self.assertEqual(unit(data), B'Binary\nRefinery.\nBinary Refinery.')
xl1 = self.load('A1', 'R33', squeeze=True)(data)
xl2 = self.load('2#E10')(data)
xl3 = self.load('Refinery#E10')(data)
self.assertEqual(xl2, xl3)
self.assertEqual(xl1, b'BinaryRefinery.')
self.assertEqual(xl2, b'Binary Refinery.')
TEST_XLSX = lzma.decompress(base64.b85decode(
'{Wp48S^xk9=GL@E0stWa8~^|S5YJf5;3PvDAzc6{61-q2m(dT*lz$@h&uisO-M2S>G=qQEROhS?T`LVCl<0*Kr;j=qGZrTMa1_{74oZ0B;H_q6z{0fO2`#4p'
'Z(%@Rrb2l^+DIK4qbHHF_tmNDpz&Y$NlI-C6c(59S<hkLEM^A)s!{gk@qKO#f!<CU&7G31h2%4o%gM*%hC-@#t>rmqA<7aPOjP!YEkx*jkYln_Gs2{7ZcSSp'
'k%^+f{8_0fK#=AnGd4nKnS~b32=88*Gzk18vHibqY6IP;P8rsEd*hi%t(hYl<vzGV#mly+rRuPU?H$RjiOhkC&_Y^=3@n*lF-L-p{&*dA>A$-1cYhlULYXE~'
'9lRf#_`OFa&uH^H|E#>F1+<slwderZG)kz>f=O+S%CnbmT=-*EXvyp=?C!#p@e|yqJFol$s>T6*DyGIxp^}#q4f#_*{FEDNWty4CtIr9?l}dTd2ZvRe4c(lw'
'DABO4`<xHUA!rFO$CY0pMP$7Ch|~lYzBzW26csva+1m`if>ts<6(kc$R^2wfYI_u<Q|ve2LG39foqnwf%7wRQd2S-u4FHQJN@YT;52pT!6{VrFCidv$Fyf;}'
'rH559u)j4P7JILO$#(5+ZYcGMZALFyO?bVadG%NCWt)~F^p=Pm29lCFbYt)Fedzu<1zSy|M+}&@hOGrpf$f_=Y#DSA@|#f687|=g$UxDWWJKOTp)mW6TzZ=^'
'p2l)f#+eE2G<HArbYwZE!pb>bRES(cfK<g8_b)!Kft2?rXK}=vK3~G(CX^_QX)BQi&gU31F}4c4VcB7TrBk^r&0ca1okiuv1q4^388j~{y%RNKdMWD;q7$3l'
'#C;mMydS27!Koh*Bsd(dJ8m~*nz#&cRltJuz`RD02l;!L145|lg~%t7)#pZ6bT%^@aB5v|Mx2gU?|0@qMh{gR9r!(5QDnF8uc&l@Th{F@viY>d61j#TIyb8X'
'61@K*a|ghIpbVLNf7H)(W5>emQ41R#dw<#Af~ZpQO|)JqOd_Vj*kk+pzMMj@w+^G{FQH|dL4#ia(qX?XVK!~^yYHeq(&}Ngxfz31xqCY)rD*@_3Pyn>pc~Wn'
'MYDkF4kdF2tAi&B|JQ~s4)B9`NTUl4qos<(L1M+~{2d!BjkqBUb0%v1*kgIrF+ptfh}s0W$bSkIfJEba^sYW_lhRuUo-$5(Fftuy6p{|&N2JPAGBvqFg`%Q)'
'1cB<NMLt8qVvugS&hO*6_B9Kg?C_=TOZyGd>o8}DAXwo}7%+6|%=!Q&@h){<N`TgzUUJ67cJdcdXo;y#hyb@#8t&HY8P=kV)6}2jZhORE^Qab?zfQf7B_xQV'
'RK!+xABFg{33KMQ{4`>l&=iyiPUfI)c<LSMZ$G<RZa2rC=p3JGN`2;6a?#<4(EV$(=VK)cnGq^2NNZgPm;XW_n&r%)Tv0l1<R+xEEgpr*wA|*#_J_;WjMhx*'
'2_V1cq6SWKO|ImPFM#_s4uUlRF5$o<bxhE8EI!Cp;wWYl$Rwb5FtH|uR2(*WCRKe{RcePa){nOIYL{IHzSvbnG=TE4j4@A1=U$eDy?6P-nQ|;;P(T(jnSv=m'
'A&Rh1<Lz=W1J+!8u%iw8-_zZAtJcr2%@WV=+r{F4QyRi-NYdmBUk!FaGe5&&sf5vL_S1fe>CT`VFqQJ@BYH?72AFt;%Y}5m9zy2-<(iY_-&tjDSa4w0OtaO1'
'8tKtv_^&+^2ur(e<A~BD=}W({XC6cTLgOQNXL9dl25Uj~y?U_xM??>jmwHU+ICMbW#mHy;%;FmR7XxDT&|UA)JmOx6IY-%2Nzf6u%Ak^&L#DrA=cJ-qL+2V4'
'QaEix%b9zxe1xNE5#G23ON{#;_>8Kk9uORLt@ysrPLTL;n@tE%n;XrSU|Lbfw)ow=_ou8?#%|lEmF1WDbL}FKuGMr+{x400xau(;+mVCbvi;c!7;xGT@yFdV'
'O%KZ3Zd7>8k{6`<kvAq=;*cc=8so}&t<|n@0JZ0ilyz;t_j^nrUr_nSS-~|bLvwY%)Eezn(t5`=4(yJ3=C)R^NZ7aBvqw##zY<>uu=C59T>6kOvA{kgk@|v`'
's>pkG(&hxNnj-cSvL;G~#$Ew`FZiF$IM+7ut?;osAW_o%bvrhoYq6nZm9@=HAw>h4Pp#i=u)I}zReJI81}J1NlhYYmCJI!K?zcp6@Y#8Z3MQwQRUxzknnlp5'
'Rl_cFj`Wt<CU*@+s1`HvyHy~l=e_`sA<(R)nIRh{g7LFc>#eyLlRNK~<0x(GE1^FLwOTD6)j;!)u7?|Ed8uB8efa1bHZN)eQzTas@ce)BAOmvmldGs|(&vx<'
'5<<8Fy}}2W=u;!65A`@sm;bxZvSJ7?a@dwF?Hm9qA<e_Li%pFt+<IhChQmdjO{g%kg(jDtI-dwJFT9Gy@;{Nj;_p=$7QGZ6J(<db_mP^Z0@hL`fMm~^emi-<'
'#U}<C;1S7UX&q{)L&*;Bb4F4&hy!RF0|TGtm9!CB-zUI~7+XmC5f#gR?25`_79+(~-tv8S?S4f!r4*c$F!XRrO<4{vh^|w`l%t?0J>547bF1x6nFKL1FZME8'
'x>xF18ESM1s;wm*-x&m$NDpw?@x=<tlcE)STJnr9{NuK;#i6_2MYCPl%4Zq^9*$^R372ua6jwv>oH^mR0ioqk%%)Awns;#lrjXkIhYB_Vt*Pr*oTgse6Uazr'
'd)yUnaZ|Z`9?Q6aTHa2@m4`pd_?E;;Re)&<*otbim^DZ!V{~?+t%H;U2&V8O9CkMdW*tOzBErCD-E}{=Nl%~-`;W#E5$bMF8A-TOVDt09^K)tTG2cvWxLh%9'
'cuC?O7rL(QbGlAASV!M6dTB)pfy|#N5k4(Mdd*7+Mb<Fc^fR3BfFeEzF^|<<jpBXBM&T8{-77eX)1)UjzwbB1E&LZ4khDM^66En##rJ{5FB;62)1u0P(WW!?'
'lQ>ewk;iuv3T5ya!?u25bnj7}T|JgGJ#9v?s8&4#t^H+#psB8+5X2Nb(T)9WO*Vt|gLB|i#r-n1JMfe$j%Ph5SXMv_Tanlh$I>cVX}KMHqanK)`S{y}?Q*p%'
'q?-9=^4NCH4UFSGW?!(CtBYJuyypt+p0$nV^cK}KotkY2nSQndYOQFUvFVS3FW3?x>5yfLCco*5cW<@V1M^*WZG|(A0JM*3=9Sna%;2QH>md}mDc9$Mt3&b<'
'9G4eqoW1wvVYXkau#+Amms%7l0aoEO^`4|P4TnM0ZoXb_xoe`WfYVjGR)VLd+Q_@wE=eFJLr%5%w|=*hWf977@eZKekfJ3;&92d7q=M_xzybcYrXD3rWUx7T'
'YtP}VErR+Qx_;gt-vsQ=`UR=~2p9|w1mvGLTHTzpFy}ehnsV!-@9w;Br-4Iy$oZ!4*Ll%|=GkY0?kD^ebMpDWalI!>y!qU=-PH<$+%SHQox|bdqM~E30Lu?y'
'n3PZbZ?~4RkXMF4T;wYcr7pG)Y;}^m^8PA7N*9B(6278}V(4CuTj{g8cnHCBjFEVl$#zR(-FckDWBH2kXxgM8VN!zSNkFRsiLX1J0e7IR-ok22b<Fh{0Zygn'
'a->J1Tx<^V>tdmaeJ-AACUvHtR6ZqlAQc@|nfUvSjY9l8N}O1iL6tlkQNk$0EBJwV(D`Rl=MKmb{EZ(M+d9%;77%vNLbvj%X;Q>8k8h<6zf-kMENA;DDq9?9'
'-c<)(XUOK-37=JI@*2_!1<`E;#sXJ^h*;4qBLW;_Mqdg3;l@sO8%u?U%P9drSYd47l>^xT9m~sM>V(|XYphyEM=oa(c$R$_SoS+4>&;O_fr;olaT?C<i;vRU'
'>Z8O<b2dxzIAJbmw!O!q;jOe}<&^u*MaLUU@LxD!+r5~a9H*A^$_=p#3ZXmDXf(Ty2c+E9sKficRn4c|8+AF4uuF9VhW4%}>6syvgejhm`t$tpvg6Jz^Mj8-'
'eJGh$HQ4_nYI6{Gq5tdgPaPK)6ehDCQ26}`@0(w}Y^jsD<S<4|2sfQd4)8g&VMHyPnhehJDk?3y@tj=^?fTchQ<Z_k7Q{seld!f7y2Ywsq3-BjBL~RJ=5!>)'
'HrxQ9A#UUcI9OGd_dxu$A@8Czd8m&#<QJ`NMc2=__EFY=>wz*j8D_g9qx!^5p-44yDeVK-*Gq+h`Egsr8Zykb6#8*Md3@|MQtCqirE)!j#`xE3#3A;CNhhW6'
'@xeBsNwb7OLeXHM-mx$+KjZN~rhI!XRzXlWcBNb!0{QQkA>m)Ta~ke$Z)|_T1I7V2h|AKhLTNs87A1I@LGcUyR57K}(+;tyyC8y-FEcM0@?iXGNBemODlLlH'
'Mr&W(;)1Rbej$uqHn(yDH1F0kV@~eFf?-tYTXATJy75xajc$TygYO-K*F4I#iR*jVbT#0Sdc1yVJ~!nF1^f>mIxj#WHstZO4$~XMjt_&5m)E?ylIEe-l>(D!'
'Mw7{vPF6HG$F-4mG8(?dUrM(jcMhCc>w~{Ex93TcYS@D19c^KVJU^TjPDbY1#=Uo*b{(Gv7n|GEQI?et?&_b)@xjCL01(3wMnc**8<dg)VsKfN?;QKq*-WZ)'
'q@;?J7@QA^o5@YrEzLRbqL85Xn}ts4#pD44_rq|5fCqw#p~C9+4;y)=Dp3c|*;ZXTMF8FuRosDAR|5(w(ZGuW>E%_fgyG!r7?lqe3%xP?6V05D$y(VTsvUOT'
'RQ^YFF+kR~czqgECf1UH;jIk8r2hg10EZw_%qg1HJbY!EE)z8(=N8PB9wvri&LcM3CAHa~Zirs4h%N)MGC{rV+dfuhiX)QYc8+1rR=`2V+zGHRbmllDEAxHp'
'5<BjB;1rT_p7x*Z(v-bV+>i}0tw8REAnOZTGG7W$nnx$)6{BQ+R|g58X;%wAPn`#jR3qZx53X`$$S}|bEg91k*?nTro+A~2&E&c8bAL%TiOH-=B0Dj={_BRs'
'zN_c*A9%woCER;T-@U)QT6Y*KB@#oPZMMU^)_cLl=aG57!=?!dINhxjR`Ad2cib22ZA>g)GQ}!oy<&=n)X-%0d%FsL#aNFDW*P*JZ{;gPC=bY4!wS)S?l&6g'
'P6jM($%?=15;a!OkD@n`fxgQD^$w&KfMrNsA$(M<5bG)@`poZAgOs7zR6<b(_4gthE?vWQx9oH$gktbx6#eVoF&Xe5SGj?`c4Ao`3W{RMIdubs0e`X_6hiFK'
'>wynbkbfB+=3+_Q?eSa6QO0d~q7yubxNApHZEG1Hp||VtF*`Epn)YU>IO$zG_leh1K>qkB&wVr6gi`E{(q4nMnP9&;s(RCZ@vfO7zGg>mK5c_Y1Sg6{rCRjF'
'>nlWlf=PT6<0yV|00WvnG1-5Un}Qq#53Bat2Q+!&tPTzivUE>N5ydL&9B19kAevrDy(wr<id^TwwLC1O<k;_iWc3Al{%JZBDtYK^2QRE%g{XBQK>RO)dC9ur'
'@dAER%=sun5g7ZDw^S%4sIPS^s2JBddi`&zG>k9cE<1bsW}oa3e?YeDQ&KX<O;c9qMe=CF{Aa$9kInQ9TT5DSP>=GYt(Gg*5b{QCyON-vRaXXK>xC<i&$tt2'
'8|53#7Dg@~Q`bM<Zrh)ti1;$!Az6zi<f(9>`#JA?QiV1cR(HH_v>Ov#2ANK_#yB+M?#;Nxp?jzw_nBF?R|2yAURu=_MNoe$F@vzOw_rP{es)Mih4nvYQqY%f'
'>%2udb2Id;8z%n8M|N}@WUOK6lk%1+62-uL>X?x0^(=9Y%o;c`$8#a?kCmpiihl|Q+S^8)dNsvuEqmd)J<2`*U_(F9{q6Sj<v84blBU_=ikeoN_)5W<J!VAw'
'Sv$Ibl}+*I)>Qi(5y*2+-JLaxaUo`dNhioHs31)Ge(_tp+tQA>$|Gm~rxc`xRrz3dgbl<pfRlVz)6nvzGF>2$pK9lNm6NZx+A;1hh!Y^wq`e~y=n}-<6<e4<'
'`*ul_NDY@>-g1WZ7hMwR?&tw9dyu+yY)xfY(Dxz$RK4(dU`!)mqVpN&qWD~f^V+}I=fWT<KC$YC_833-rC&s|-&P@2ne_N3A*te>b=X=Eek@lN46s;fVDhJ3'
'^0`2@#<^2lA)H$6PSfhS?T3)G^?2IsKn{*Dcx9GZ>V1;)^ERS%jQdawwN@DFWmV_f?Max{4e??;&;7K<!{h(WPyGD{+@L*u(wzmx;X?xF{eUiKds($%ES*Ym'
'@~7q)`@30*YJ|TX!8tw+6+2AlC{V-75h&3sf|h$oyap$59-bkLE$lBVKy1<dt?%3gfzvf!xPmrvI?%b7BV-?+9fzF(^>Rh&lc!B=7&O#O89HJ}8FtKJ8;*`G'
'#oG&ackie*nZ<;gf4|RfL;3yJAyqllmLUY|?+yJh`Mg~?S^7{RY=Fzu=lz$Qg`QXCXTenb*>MO)qZKpGp?w@Wfo$u4oGUgZBL8~f!=1#)#f($a&NhjkJ@-g*'
'+|f`#ugApNgEbuU`g6DMU9FM%e5J^mP;<ieN^1hy#Qk2#I>7|+b#|2|XaIX$?zVFH1@WR&)QzgwuL-#U&fG=uM=T9yeNcpwB+pV^h(zB$ZU5<M5gGqvOeN#N'
'yVgbJ5<P11H}3-iK3WH)3&P%7HtVj_bQtmFcv{$s2yL*)Ii>v+ikrq*68vX=BgM4X#SvNA<ltrz-GE}KFtMrB(_&Z~V@}q;HCn15$x(Pijd=!-;U6Z~PoF&^'
'0bkjt88le{rYSw?&3;UjOaX^gf3jGo@-xA5b()&3rH;aQgcyLDn(s~vim6}iRS{UhiHDj6J>u2XPyEZpPa~5t#8t}Zs&SnD&E{>^&$saZ?Mq`u7T-2s^Y-Ng'
'5)+D+M@{nPIEmmA7yZb?N<>N0X_d)2EVrU~e?CqMCxLH~R^AVFzT{4dEXfA5k3DvQzw3Hs$VEW)xg^+5DPt<^7U9(JiWKa~nq2hxULBb*a&Y))x)#rQM8Z`j'
'5Mmpf+M1+Y+jwRI6l<q@v9rV32JHH@XZtkinW?VkC)c278{WH8UyCuUxSAM<df#~$a<VV$$*tKVxAvl{Ax%2MO(8?<9gzDAuo(}9Y#e<svKuK1bD~XdngQEg'
'L7nRHl|{{+DiK><=XU8^(;|agSqoyRyOB8*W9)6x|2vRE#7gKSkO^)4rPK~0v0)}fs&ZswK%<HY$uk`?OTLu>pD>T&rdIcf)>1>~PWqh-w8JOS&+-VlyMsOK'
'<$oB)VeHgqUh%v_4krv{i6I|6O&`lof_mK2O+00|a#}BwG(2&@xM;48<nSGP3J~DqIBzs?Qy5Q-@Kyh&!Fl6@HL+`8)!~4;G)Oa=ex1SlfM$5+Zs#1`37rEW'
'!z>3k19J|3fFOu?xIDa~SwA9%l3cKzCXIk>O75p|Bg)~|2;&k|mVGr+)MWRWCz;vY*&2yR97bK*S$>Ualdz*yplSY%%`-Yj!e!v%y*ROG3UlCsxgRcY70fqQ'
'I+EX+tv<@*&DUsq7bbCHUntXdFs5vDGP@MDqpto`ZT!$seb}vPzciItw_Z$+jnO(0Q5Ge{`CApXVtSioC!~KF;1mjl7zHO2z0YfqFLzph`avY-bbj?E;T^30'
'M0>~Bqjf8;WegI*+rs3kK<7hqTBy|v&jIUCfY+C*1mZJJbyU4ZEF!~_=0L~)Q#G|Ii{z+<I;P6A(s1E(@9b>FumB7oBm>X(NL!?}$KeF2{j3Ul)B_f84h4M5'
'r9)#GV2+28fa6fK6R4CHHh)K#0Fad@oZV4_Gua#}uAjxJ*>@g%+T|%ID!}k^BS2Je^`Ky*>aIoivXvF>-dgPgyt#In;bPorwRyWMLjuMWcW+c-9boYE)8iS>'
'q!Em8IIPsA1Y|^Xc@jro(IP
y0;na@27uAd3Z1T<ga0jjkKx&+RWCtm!fw0>lEr)3m(rj-=U)Zw-<dl;K4GSxkTx(VhK(SI=UN7dA?Lv=#D>Qsd{nfTXm'
'pxA`o(dC=F2E!ILT@*bC=AU*b$fz9Y`RM+&%tUiKh(1zr0b-tBkC^=#vjh`Aw~`^(Z}03wRH!x87TD<`J_|NamNx>q96dEcpLR`+0~*>P<hAWD^Q;hQo+5F<'
'jkThMTR3~)t79?MN$7I(KMPx$mkUjhroGlDzyqi{sBeG_$w)uw3xyWMeG8?|PVNM@^!iEg8ZFVzg+!q|&_T%AV79u_NzR%3;O-V&1mRqcD2rPxeHk7RDVwj+'
'TW~`L2g!$~bL55kst*mQ@YGUoVM@Q%(QGB!3x%5Ts?P*J5jLjM`8si3@#uU;K+U@o3R88*v$BeZFy>Z6<)6zkIfDg$P{F3Tl%R;1Iy!4f7pFwT{pda1v(L5Y'
'UAt4vr3g<_cO7kXPR6q&HzDpZU9JzHml~E4e~KjPSIg1zc8JX3ffWqT3X9rhxdhiZcI14+hrSC3geN)~9kc)SH6NaPEv7|+!C8lhOJHLhpn<#SnL<zbQ`F1d'
'F7z+X3NUnd;Cc@zZzz1@J)*=%vm5Kr|KqESpnKN`SrPmK$ZOI60Z#t#%ak|7wNPLIs_$bSRqYTpZCMnKd^q}R>)k?yVOgo)24Y*7v8)rsT^@GGq}6!!?oE!^'
'd+U-g60>iG7RE;8d~$5Nais62-MIq@rRX&o)QtxeW#N_%7vMGGro#IN7SIar0k*UrI@bNMf~JE^W&+Qnet4Kt7e#+qzFUEV{w~l8@%_@&J<W=gc7p!^u7cs7'
'000006<H{x300yM00F%;#7F=D8E*!*vBYQl0ssI200dcD'
))
| true
| true
|
790d70b112d173624d86dc5ffce0b1d185b8a479
| 14,962
|
py
|
Python
|
examples/eg1/eg1.py
|
SagarRoy1996/TabularDataExtraction
|
59b05dde00272e7f04f56b89bd2139e3a4e252e5
|
[
"Apache-2.0"
] | null | null | null |
examples/eg1/eg1.py
|
SagarRoy1996/TabularDataExtraction
|
59b05dde00272e7f04f56b89bd2139e3a4e252e5
|
[
"Apache-2.0"
] | null | null | null |
examples/eg1/eg1.py
|
SagarRoy1996/TabularDataExtraction
|
59b05dde00272e7f04f56b89bd2139e3a4e252e5
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import re
from math import radians, degrees
import numpy as np
import pandas as pd
import cv2
from pdftabextract import imgproc
from pdftabextract.geom import pt
from pdftabextract.common import read_xml, parse_pages, save_page_grids
from pdftabextract.textboxes import rotate_textboxes, sorted_by_attr
from pdftabextract.clustering import (find_clusters_1d_break_dist,
calc_cluster_centers_1d,
zip_clusters_and_values)
from pdftabextract.splitpages import split_page_texts, create_split_pages_dict_structure
from pdftabextract.extract import make_grid_from_positions, fit_texts_into_grid, datatable_to_dataframe
#%% Some constants
#DATAPATH = 'data/'
#DATAPATH = 'ip/'
#OUTPUTPATH = 'generated_output/'
#OUTPUTPATH = 'op/'
#INPUT_XML = 'output.xml'
#INPUT_XML = 'output.xml'
DATAPATH = 'data/'
OUTPUTPATH = 'generated_output/'
INPUT_XML = 'schoollist_1.pdf.xml'
MIN_ROW_HEIGHT = 260 # minimum height of a row in pixels, measured in the scanned pages
MIN_COL_WIDTH = 194 # very important. the minimum width of a column in pixels, measured in the scanned pages
#%% Some helper functions
def save_image_w_lines(iproc_obj, imgfilebasename, orig_img_as_background, file_suffix_prefix=''):
file_suffix = 'lines-orig' if orig_img_as_background else 'lines'
img_lines = iproc_obj.draw_lines(orig_img_as_background=orig_img_as_background)
img_lines_file = os.path.join(OUTPUTPATH, '%s-%s.png' % (imgfilebasename, file_suffix_prefix + file_suffix))
print("> saving image with detected lines to '%s'" % img_lines_file)
cv2.imwrite(img_lines_file, img_lines)
#%% Read the XML
# Load the XML that was generated with pdftohtml
xmltree, xmlroot = read_xml(os.path.join(DATAPATH, INPUT_XML))
# parse it and generate a dict of pages
pages = parse_pages(xmlroot, require_image=True)
#%% Split the scanned double pages so that we can later process the lists page-by-page
split_texts_and_images = [] # list of tuples with (double page, split text boxes, split images)
for p_num, p in pages.items():
# get the image file of the scanned page
imgfilebasename = p['image'][:p['image'].rindex('.')]
imgfile = os.path.join(DATAPATH, p['image'])
print("page %d: detecting lines in image file '%s'..." % (p_num, imgfile))
# create an image processing object with the scanned page
iproc_obj = imgproc.ImageProc(imgfile)
# calculate the scaling of the image file in relation to the text boxes coordinate system dimensions
page_scaling_x = iproc_obj.img_w / p['width']
page_scaling_y = iproc_obj.img_h / p['height']
image_scaling = (page_scaling_x, # scaling in X-direction
page_scaling_y) # scaling in Y-direction
# detect the lines in the double pages
lines_hough = iproc_obj.detect_lines(canny_low_thresh=50, canny_high_thresh=150, canny_kernel_size=3,
hough_rho_res=1,
hough_theta_res=np.pi/500,
hough_votes_thresh=350)
print("> found %d lines" % len(lines_hough))
save_image_w_lines(iproc_obj, imgfilebasename, True, 'bothpages-')
# find the vertical line that separates both sides
sep_line_img_x = iproc_obj.find_pages_separator_line(dist_thresh=MIN_COL_WIDTH/2)
sep_line_page_x = sep_line_img_x / page_scaling_x
print("> found pages separator line at %f (image space position) / %f (page space position)"
% (sep_line_img_x, sep_line_page_x))
# split the scanned double page at the separator line
split_images = iproc_obj.split_image(sep_line_img_x)
# split the textboxes at the separator line
split_texts = split_page_texts(p, sep_line_page_x)
split_texts_and_images.append((p, split_texts, split_images))
# generate a new XML and "pages" dict structure from the split pages
split_pages_xmlfile = os.path.join(OUTPUTPATH, INPUT_XML[:INPUT_XML.rindex('.')] + '.split.xml')
print("> saving split pages XML to '%s'" % split_pages_xmlfile)
split_tree, split_root, split_pages = create_split_pages_dict_structure(split_texts_and_images,
save_to_output_path=split_pages_xmlfile)
# we don't need the original double pages any more, we'll work with 'split_pages'
del pages
#%% Detect clusters of horizontal lines using the image processing module and rotate back or deskew pages
hori_lines_clusters = {}
pages_image_scaling = {} # scaling of the scanned page image in relation to the OCR page dimensions for each page
for p_num, p in split_pages.items():
# get the image file of the scanned page
imgfilebasename = p['image'][:p['image'].rindex('.')]
imgfile = os.path.join(OUTPUTPATH, p['image'])
print("page %d: detecting lines in image file '%s'..." % (p_num, imgfile))
# create an image processing object with the scanned page
iproc_obj = imgproc.ImageProc(imgfile)
# calculate the scaling of the image file in relation to the text boxes coordinate system dimensions
page_scaling_x = iproc_obj.img_w / p['width']
page_scaling_y = iproc_obj.img_h / p['height']
pages_image_scaling[p_num] = (page_scaling_x, # scaling in X-direction
page_scaling_y) # scaling in Y-direction
# detect the lines
lines_hough = iproc_obj.detect_lines(canny_low_thresh=50, canny_high_thresh=150, canny_kernel_size=3,
hough_rho_res=1,
hough_theta_res=np.pi/500,
hough_votes_thresh=round(0.2 * iproc_obj.img_w))
print("> found %d lines" % len(lines_hough))
save_image_w_lines(iproc_obj, imgfilebasename, True)
save_image_w_lines(iproc_obj, imgfilebasename, False)
# find rotation or skew
# the parameters are:
# 1. the minimum threshold in radians for a rotation to be counted as such
# 2. the maximum threshold for the difference between horizontal and vertical line rotation (to detect skew)
# 3. an optional threshold to filter out "stray" lines whose angle is too far apart from the median angle of
# all other lines that go in the same direction (no effect here)
rot_or_skew_type, rot_or_skew_radians = iproc_obj.find_rotation_or_skew(radians(0.5), # uses "lines_hough"
radians(1),
omit_on_rot_thresh=radians(0.5))
# rotate back text boxes
# since often no vertical lines can be detected and hence it cannot be determined if the page is rotated or skewed,
# we assume that it's always rotated
if rot_or_skew_type is not None:
print("> rotating back by %f°" % -degrees(rot_or_skew_radians))
rotate_textboxes(p, -rot_or_skew_radians, pt(0, 0))
# rotate back detected lines
lines_hough = iproc_obj.apply_found_rotation_or_skew(rot_or_skew_type, -rot_or_skew_radians)
save_image_w_lines(iproc_obj, imgfilebasename + '-repaired', True)
save_image_w_lines(iproc_obj, imgfilebasename + '-repaired', False)
# cluster the detected *horizontal* lines using find_clusters_1d_break_dist as simple clustering function
# (break on distance MIN_ROW_HEIGHT/2)
# additionally, remove all cluster sections that are considered empty
# a cluster is considered empty when the number of text boxes in it is below 10% of the median number of text boxes
# per cluster section
hori_clusters = iproc_obj.find_clusters(imgproc.DIRECTION_HORIZONTAL, find_clusters_1d_break_dist,
remove_empty_cluster_sections_use_texts=p['texts'], # use this page's textboxes
remove_empty_cluster_sections_n_texts_ratio=0.1, # 10% rule
remove_empty_cluster_sections_scaling=page_scaling_y, # the positions are in "scanned image space" -> we scale them to "text box space"
dist_thresh=MIN_ROW_HEIGHT/2)
print("> found %d clusters" % len(hori_clusters))
if len(hori_clusters) > 0:
# draw the clusters
img_w_clusters = iproc_obj.draw_line_clusters(imgproc.DIRECTION_HORIZONTAL, hori_clusters)
save_img_file = os.path.join(OUTPUTPATH, '%s-hori-clusters.png' % imgfilebasename)
print("> saving image with detected horizontal clusters to '%s'" % save_img_file)
cv2.imwrite(save_img_file, img_w_clusters)
hori_lines_clusters[p_num] = hori_clusters
else:
print("> no horizontal line clusters found")
# save split and repaired XML (i.e. XML with deskewed textbox positions)
output_files_basename = INPUT_XML[:INPUT_XML.rindex('.')]
repaired_xmlfile = os.path.join(OUTPUTPATH, output_files_basename + '.split.repaired.xml')
print("saving split and repaired XML file to '%s'..." % repaired_xmlfile)
split_tree.write(repaired_xmlfile)
#%% Determine the rows and columns of the tables
pttrn_schoolnum = re.compile(r'^\d{6}$') # a valid school number indicates a table row
page_grids = {}
print("detecting rows and columns...")
for p_num, p in split_pages.items():
scaling_x, scaling_y = pages_image_scaling[p_num]
# try to find out the table rows in this page using the horizontal lines that were detected before
hori_lines = list(np.array(calc_cluster_centers_1d(hori_lines_clusters[p_num])) / scaling_y)
hori_lines.append(p['height']) # last line: page bottom
prev_line_y = 0
row_texts = []
row_positions = []
in_table = False # is True when the current segment is a real table row (not a table header or surrounding text)
for line_y in hori_lines:
# get all texts in this row
segment_texts = [t for t in p['texts'] if prev_line_y < t['bottom'] <= line_y]
if not segment_texts: continue # skip empty rows
# try to find the start and the end of the table
for t in segment_texts:
t_val = t['value'].strip()
if pttrn_schoolnum.search(t_val): # if this matches, we found the start of the table
if not in_table:
in_table = True
row_positions.append(prev_line_y)
break
else:
if in_table: # we found the end of the table
in_table = False
if in_table: # this is a table row, so add the texts and row positions to the respective lists
row_texts.append(segment_texts)
row_positions.append(line_y)
prev_line_y = line_y
# try to find out the table columns in this page using the distribution of x-coordinates of the left position of
# each text box in all rows
text_xs = []
for texts in row_texts:
text_xs.extend([t['left'] for t in texts])
text_xs = np.array(text_xs)
# make clusters of x positions
text_xs_clusters = find_clusters_1d_break_dist(text_xs, dist_thresh=MIN_COL_WIDTH/2/scaling_x)
text_xs_clusters_w_values = zip_clusters_and_values(text_xs_clusters, text_xs)
col_positions = calc_cluster_centers_1d(text_xs_clusters_w_values)
# remove falsely identified columns (i.e. merge columns with only a few text boxes)
filtered_col_positions = []
n_rows = len(row_positions)
n_cols = len(col_positions)
if n_cols > 1 and n_rows > 1:
top_y = row_positions[0]
bottom_y = row_positions[-1]
# append the rightmost text's right border as the last column border
rightmost_pos = sorted_by_attr(p['texts'], 'right')[-1]['right']
col_positions.append(rightmost_pos)
# merge columns with few text boxes
texts_in_table = [t for t in p['texts'] if top_y < t['top'] + t['height']/2 <= bottom_y]
prev_col_x = col_positions[0]
for col_x in col_positions[1:]:
col_texts = [t for t in texts_in_table if prev_col_x < t['left'] + t['width']/2 <= col_x]
if len(col_texts) >= n_rows: # there should be at least one text box per row
filtered_col_positions.append(prev_col_x)
last_col_x = col_x
prev_col_x = col_x
# manually add border for the last column because it has very few or no text boxes
filtered_col_positions.append(filtered_col_positions[-1] + (rightmost_pos - filtered_col_positions[-1]) / 2)
filtered_col_positions.append(rightmost_pos)
# create the grid
if filtered_col_positions:
grid = make_grid_from_positions(filtered_col_positions, row_positions)
n_rows = len(grid)
n_cols = len(grid[0])
print("> page %d: grid with %d rows, %d columns" % (p_num, n_rows, n_cols))
page_grids[p_num] = grid
else: # this happens for the first page as there's no table on that
print("> page %d: no table found" % p_num)
# save the page grids
# After you created the page grids, you should then check that they're correct using pdf2xml-viewer's
# loadGridFile() function
page_grids_file = os.path.join(OUTPUTPATH, output_files_basename + '.pagegrids.json')
print("saving page grids JSON file to '%s'" % page_grids_file)
save_page_grids(page_grids, page_grids_file)
#%% Create data frames (requires pandas library)
# For sake of simplicity, we will just fit the text boxes into the grid, merge the texts in their cells (splitting text
# boxes to separate lines if necessary) and output the result. Normally, you would do some more parsing here, e.g.
# extracting the address components from the second column.
full_df = pd.DataFrame()
print("fitting text boxes into page grids and generating final output...")
for p_num, p in split_pages.items():
if p_num not in page_grids: continue # happens when no table was detected
print("> page %d" % p_num)
datatable, unmatched_texts = fit_texts_into_grid(p['texts'], page_grids[p_num], return_unmatched_texts=True)
df = datatable_to_dataframe(datatable, split_texts_in_lines=True)
df['from_page'] = p_num
full_df = full_df.append(df, ignore_index=True)
print("extracted %d rows from %d pages" % (len(full_df), len(split_pages)))
csv_output_file = os.path.join(OUTPUTPATH, output_files_basename + '.csv')
print("saving extracted data to '%s'" % csv_output_file)
full_df.to_csv(csv_output_file, index=False)
excel_output_file = os.path.join(OUTPUTPATH, output_files_basename + '.xlsx')
print("saving extracted data to '%s'" % excel_output_file)
full_df.to_excel(excel_output_file, index=False)
| 46.610592
| 180
| 0.679054
|
import os
import re
from math import radians, degrees
import numpy as np
import pandas as pd
import cv2
from pdftabextract import imgproc
from pdftabextract.geom import pt
from pdftabextract.common import read_xml, parse_pages, save_page_grids
from pdftabextract.textboxes import rotate_textboxes, sorted_by_attr
from pdftabextract.clustering import (find_clusters_1d_break_dist,
calc_cluster_centers_1d,
zip_clusters_and_values)
from pdftabextract.splitpages import split_page_texts, create_split_pages_dict_structure
from pdftabextract.extract import make_grid_from_positions, fit_texts_into_grid, datatable_to_dataframe
DATAPATH = 'data/'
OUTPUTPATH = 'generated_output/'
INPUT_XML = 'schoollist_1.pdf.xml'
MIN_ROW_HEIGHT = 260
MIN_COL_WIDTH = 194
def save_image_w_lines(iproc_obj, imgfilebasename, orig_img_as_background, file_suffix_prefix=''):
file_suffix = 'lines-orig' if orig_img_as_background else 'lines'
img_lines = iproc_obj.draw_lines(orig_img_as_background=orig_img_as_background)
img_lines_file = os.path.join(OUTPUTPATH, '%s-%s.png' % (imgfilebasename, file_suffix_prefix + file_suffix))
print("> saving image with detected lines to '%s'" % img_lines_file)
cv2.imwrite(img_lines_file, img_lines)
xmltree, xmlroot = read_xml(os.path.join(DATAPATH, INPUT_XML))
pages = parse_pages(xmlroot, require_image=True)
split_texts_and_images = []
for p_num, p in pages.items():
imgfilebasename = p['image'][:p['image'].rindex('.')]
imgfile = os.path.join(DATAPATH, p['image'])
print("page %d: detecting lines in image file '%s'..." % (p_num, imgfile))
iproc_obj = imgproc.ImageProc(imgfile)
page_scaling_x = iproc_obj.img_w / p['width']
page_scaling_y = iproc_obj.img_h / p['height']
image_scaling = (page_scaling_x,
page_scaling_y)
lines_hough = iproc_obj.detect_lines(canny_low_thresh=50, canny_high_thresh=150, canny_kernel_size=3,
hough_rho_res=1,
hough_theta_res=np.pi/500,
hough_votes_thresh=350)
print("> found %d lines" % len(lines_hough))
save_image_w_lines(iproc_obj, imgfilebasename, True, 'bothpages-')
sep_line_img_x = iproc_obj.find_pages_separator_line(dist_thresh=MIN_COL_WIDTH/2)
sep_line_page_x = sep_line_img_x / page_scaling_x
print("> found pages separator line at %f (image space position) / %f (page space position)"
% (sep_line_img_x, sep_line_page_x))
split_images = iproc_obj.split_image(sep_line_img_x)
split_texts = split_page_texts(p, sep_line_page_x)
split_texts_and_images.append((p, split_texts, split_images))
split_pages_xmlfile = os.path.join(OUTPUTPATH, INPUT_XML[:INPUT_XML.rindex('.')] + '.split.xml')
print("> saving split pages XML to '%s'" % split_pages_xmlfile)
split_tree, split_root, split_pages = create_split_pages_dict_structure(split_texts_and_images,
save_to_output_path=split_pages_xmlfile)
del pages
hori_lines_clusters = {}
pages_image_scaling = {}
for p_num, p in split_pages.items():
imgfilebasename = p['image'][:p['image'].rindex('.')]
imgfile = os.path.join(OUTPUTPATH, p['image'])
print("page %d: detecting lines in image file '%s'..." % (p_num, imgfile))
iproc_obj = imgproc.ImageProc(imgfile)
page_scaling_x = iproc_obj.img_w / p['width']
page_scaling_y = iproc_obj.img_h / p['height']
pages_image_scaling[p_num] = (page_scaling_x,
page_scaling_y)
lines_hough = iproc_obj.detect_lines(canny_low_thresh=50, canny_high_thresh=150, canny_kernel_size=3,
hough_rho_res=1,
hough_theta_res=np.pi/500,
hough_votes_thresh=round(0.2 * iproc_obj.img_w))
print("> found %d lines" % len(lines_hough))
save_image_w_lines(iproc_obj, imgfilebasename, True)
save_image_w_lines(iproc_obj, imgfilebasename, False)
rot_or_skew_type, rot_or_skew_radians = iproc_obj.find_rotation_or_skew(radians(0.5),
radians(1),
omit_on_rot_thresh=radians(0.5))
if rot_or_skew_type is not None:
print("> rotating back by %f°" % -degrees(rot_or_skew_radians))
rotate_textboxes(p, -rot_or_skew_radians, pt(0, 0))
# rotate back detected lines
lines_hough = iproc_obj.apply_found_rotation_or_skew(rot_or_skew_type, -rot_or_skew_radians)
save_image_w_lines(iproc_obj, imgfilebasename + '-repaired', True)
save_image_w_lines(iproc_obj, imgfilebasename + '-repaired', False)
# cluster the detected *horizontal* lines using find_clusters_1d_break_dist as simple clustering function
# (break on distance MIN_ROW_HEIGHT/2)
# additionally, remove all cluster sections that are considered empty
# a cluster is considered empty when the number of text boxes in it is below 10% of the median number of text boxes
# per cluster section
hori_clusters = iproc_obj.find_clusters(imgproc.DIRECTION_HORIZONTAL, find_clusters_1d_break_dist,
remove_empty_cluster_sections_use_texts=p['texts'], # use this page's textboxes
remove_empty_cluster_sections_n_texts_ratio=0.1,
remove_empty_cluster_sections_scaling=page_scaling_y,
dist_thresh=MIN_ROW_HEIGHT/2)
print("> found %d clusters" % len(hori_clusters))
if len(hori_clusters) > 0:
img_w_clusters = iproc_obj.draw_line_clusters(imgproc.DIRECTION_HORIZONTAL, hori_clusters)
save_img_file = os.path.join(OUTPUTPATH, '%s-hori-clusters.png' % imgfilebasename)
print("> saving image with detected horizontal clusters to '%s'" % save_img_file)
cv2.imwrite(save_img_file, img_w_clusters)
hori_lines_clusters[p_num] = hori_clusters
else:
print("> no horizontal line clusters found")
output_files_basename = INPUT_XML[:INPUT_XML.rindex('.')]
repaired_xmlfile = os.path.join(OUTPUTPATH, output_files_basename + '.split.repaired.xml')
print("saving split and repaired XML file to '%s'..." % repaired_xmlfile)
split_tree.write(repaired_xmlfile)
pttrn_schoolnum = re.compile(r'^\d{6}$')
page_grids = {}
print("detecting rows and columns...")
for p_num, p in split_pages.items():
scaling_x, scaling_y = pages_image_scaling[p_num]
hori_lines = list(np.array(calc_cluster_centers_1d(hori_lines_clusters[p_num])) / scaling_y)
hori_lines.append(p['height'])
prev_line_y = 0
row_texts = []
row_positions = []
in_table = False
for line_y in hori_lines:
segment_texts = [t for t in p['texts'] if prev_line_y < t['bottom'] <= line_y]
if not segment_texts: continue
for t in segment_texts:
t_val = t['value'].strip()
if pttrn_schoolnum.search(t_val):
if not in_table:
in_table = True
row_positions.append(prev_line_y)
break
else:
if in_table:
in_table = False
if in_table:
row_texts.append(segment_texts)
row_positions.append(line_y)
prev_line_y = line_y
text_xs = []
for texts in row_texts:
text_xs.extend([t['left'] for t in texts])
text_xs = np.array(text_xs)
text_xs_clusters = find_clusters_1d_break_dist(text_xs, dist_thresh=MIN_COL_WIDTH/2/scaling_x)
text_xs_clusters_w_values = zip_clusters_and_values(text_xs_clusters, text_xs)
col_positions = calc_cluster_centers_1d(text_xs_clusters_w_values)
filtered_col_positions = []
n_rows = len(row_positions)
n_cols = len(col_positions)
if n_cols > 1 and n_rows > 1:
top_y = row_positions[0]
bottom_y = row_positions[-1]
rightmost_pos = sorted_by_attr(p['texts'], 'right')[-1]['right']
col_positions.append(rightmost_pos)
# merge columns with few text boxes
texts_in_table = [t for t in p['texts'] if top_y < t['top'] + t['height']/2 <= bottom_y]
prev_col_x = col_positions[0]
for col_x in col_positions[1:]:
col_texts = [t for t in texts_in_table if prev_col_x < t['left'] + t['width']/2 <= col_x]
if len(col_texts) >= n_rows: # there should be at least one text box per row
filtered_col_positions.append(prev_col_x)
last_col_x = col_x
prev_col_x = col_x
# manually add border for the last column because it has very few or no text boxes
filtered_col_positions.append(filtered_col_positions[-1] + (rightmost_pos - filtered_col_positions[-1]) / 2)
filtered_col_positions.append(rightmost_pos)
# create the grid
if filtered_col_positions:
grid = make_grid_from_positions(filtered_col_positions, row_positions)
n_rows = len(grid)
n_cols = len(grid[0])
print("> page %d: grid with %d rows, %d columns" % (p_num, n_rows, n_cols))
page_grids[p_num] = grid
else: # this happens for the first page as there's no table on that
print("> page %d: no table found" % p_num)
page_grids_file = os.path.join(OUTPUTPATH, output_files_basename + '.pagegrids.json')
print("saving page grids JSON file to '%s'" % page_grids_file)
save_page_grids(page_grids, page_grids_file)
full_df = pd.DataFrame()
print("fitting text boxes into page grids and generating final output...")
for p_num, p in split_pages.items():
if p_num not in page_grids: continue
print("> page %d" % p_num)
datatable, unmatched_texts = fit_texts_into_grid(p['texts'], page_grids[p_num], return_unmatched_texts=True)
df = datatable_to_dataframe(datatable, split_texts_in_lines=True)
df['from_page'] = p_num
full_df = full_df.append(df, ignore_index=True)
print("extracted %d rows from %d pages" % (len(full_df), len(split_pages)))
csv_output_file = os.path.join(OUTPUTPATH, output_files_basename + '.csv')
print("saving extracted data to '%s'" % csv_output_file)
full_df.to_csv(csv_output_file, index=False)
excel_output_file = os.path.join(OUTPUTPATH, output_files_basename + '.xlsx')
print("saving extracted data to '%s'" % excel_output_file)
full_df.to_excel(excel_output_file, index=False)
| true
| true
|
790d70c7303ba08660f1fc2fc19df3a6b93b2447
| 4,492
|
py
|
Python
|
manga_py/base_classes/base.py
|
theincognito-inc/manga-dl
|
899905bafb6c6891815b58cce41eaff32a682570
|
[
"MIT"
] | 1
|
2020-11-19T00:40:49.000Z
|
2020-11-19T00:40:49.000Z
|
manga_py/base_classes/base.py
|
eduhoribe/manga-py
|
fe7eb2e08532b3c75b4f7ac8cc4132f0e7a65eb4
|
[
"MIT"
] | null | null | null |
manga_py/base_classes/base.py
|
eduhoribe/manga-py
|
fe7eb2e08532b3c75b4f7ac8cc4132f0e7a65eb4
|
[
"MIT"
] | null | null | null |
from logging import warning
from os import path
from typing import Optional, List
from lxml.html import HtmlElement
from manga_py.http import Http
from .params import ProviderParams
class Base(ProviderParams):
_storage = None
_params = None
_image_params = None
_http_kwargs = None
__http = None
__arguments = None
chapter_id = 0
quiet = False
original_url = None
def __init__(self):
self._storage = {
'cookies': {},
'main_content': None,
'chapters': [],
'current_chapter': 0,
'current_file': 0,
'proxies': {},
'domain_uri': None,
}
self._params = {
'destination': 'Manga',
'cf-protect': False,
}
self._image_params = {
'crop': (0, 0, 0, 0),
# 'crop': (left, upper, right, lower)
'auto_crop': False,
# 'auto_crop': True,
}
self._http_kwargs = {}
def _archive_type(self) -> str:
arc_type = 'zip'
if self._params['cbz']:
arc_type = 'cbz'
return arc_type
def get_url(self):
return self._params['url']
def _build_http_params(self, params):
if params is None:
params = {}
params.setdefault('allow_webp', not self._params.get('disallow_webp', None))
params.setdefault('referer', self._storage.get('referer', self.domain))
params.setdefault('user_agent', self._get_user_agent())
params.setdefault('proxies', self._storage.get('proxies', None))
params.setdefault('cookies', self._storage.get('cookies', None))
params.setdefault('kwargs', self._http_kwargs)
return params
def http(self, new=False, params=None) -> Http:
http_params = self._build_http_params(params)
if new:
http = Http(**http_params)
return http
elif not self.__http:
self.__http = Http(**http_params)
return self.__http
def http_get(self, url: str, headers: dict = None, cookies: dict = None):
return self.http().get(url=url, headers=headers, cookies=cookies)
def http_post(self, url: str, headers: dict = None, cookies: dict = None, data=()):
return self.http().post(url=url, headers=headers, cookies=cookies, data=data)
def _get_user_agent(self):
ua_storage = self._storage.get('user_agent', None)
ua_params = self._params.get('user_agent', None)
if self._params.get('cf_scrape', False):
return ua_storage
return ua_params
@classmethod
def __normalize_chapters(cls, n, element):
if isinstance(element, HtmlElement):
return n(element.get('href'))
if isinstance(element, str):
return n(element)
return element
def _prepare_chapters(self, chapters):
n = self.http().normalize_uri
items = []
if chapters and len(chapters):
for i in chapters:
url = self.__normalize_chapters(n, i)
items.append(url)
else:
warning('Chapters list empty. Check %s' % self.get_url())
return items
def get_current_file(self):
return self._storage['files'][self._storage['current_file']]
def book_meta(self) -> dict:
return {}
def _image_name(self, idx, filename):
if idx is None:
idx = self._storage['current_file']
fn, extension = path.splitext(filename)
_path = '{:0>3}_{}'.format(idx, fn)
if self._params['rename_pages']:
_path = '{:0>3}'.format(idx)
return _path + extension
def chapter_for_json(self) -> str:
return self.chapter
def put_info_json(self, meta):
# manga_name, url, directory
pass
def _fill_arguments(self, arguments: List[str]):
know_args = [
'login',
'password',
'language',
'translator',
]
if self.__arguments is None:
self.__arguments = {}
for arg in arguments:
key, value = arg.split('=', 1) # type: str, str
if key in know_args:
self.__arguments[key] = value
def arg(self, key: str) -> Optional[str]:
if self.__arguments is None:
return None
return self.__arguments.get(key)
def allow_auto_change_url(self):
return True
| 29.748344
| 87
| 0.573241
|
from logging import warning
from os import path
from typing import Optional, List
from lxml.html import HtmlElement
from manga_py.http import Http
from .params import ProviderParams
class Base(ProviderParams):
_storage = None
_params = None
_image_params = None
_http_kwargs = None
__http = None
__arguments = None
chapter_id = 0
quiet = False
original_url = None
def __init__(self):
self._storage = {
'cookies': {},
'main_content': None,
'chapters': [],
'current_chapter': 0,
'current_file': 0,
'proxies': {},
'domain_uri': None,
}
self._params = {
'destination': 'Manga',
'cf-protect': False,
}
self._image_params = {
'crop': (0, 0, 0, 0),
'auto_crop': False,
}
self._http_kwargs = {}
def _archive_type(self) -> str:
arc_type = 'zip'
if self._params['cbz']:
arc_type = 'cbz'
return arc_type
def get_url(self):
return self._params['url']
def _build_http_params(self, params):
if params is None:
params = {}
params.setdefault('allow_webp', not self._params.get('disallow_webp', None))
params.setdefault('referer', self._storage.get('referer', self.domain))
params.setdefault('user_agent', self._get_user_agent())
params.setdefault('proxies', self._storage.get('proxies', None))
params.setdefault('cookies', self._storage.get('cookies', None))
params.setdefault('kwargs', self._http_kwargs)
return params
def http(self, new=False, params=None) -> Http:
http_params = self._build_http_params(params)
if new:
http = Http(**http_params)
return http
elif not self.__http:
self.__http = Http(**http_params)
return self.__http
def http_get(self, url: str, headers: dict = None, cookies: dict = None):
return self.http().get(url=url, headers=headers, cookies=cookies)
def http_post(self, url: str, headers: dict = None, cookies: dict = None, data=()):
return self.http().post(url=url, headers=headers, cookies=cookies, data=data)
def _get_user_agent(self):
ua_storage = self._storage.get('user_agent', None)
ua_params = self._params.get('user_agent', None)
if self._params.get('cf_scrape', False):
return ua_storage
return ua_params
@classmethod
def __normalize_chapters(cls, n, element):
if isinstance(element, HtmlElement):
return n(element.get('href'))
if isinstance(element, str):
return n(element)
return element
def _prepare_chapters(self, chapters):
n = self.http().normalize_uri
items = []
if chapters and len(chapters):
for i in chapters:
url = self.__normalize_chapters(n, i)
items.append(url)
else:
warning('Chapters list empty. Check %s' % self.get_url())
return items
def get_current_file(self):
return self._storage['files'][self._storage['current_file']]
def book_meta(self) -> dict:
return {}
def _image_name(self, idx, filename):
if idx is None:
idx = self._storage['current_file']
fn, extension = path.splitext(filename)
_path = '{:0>3}_{}'.format(idx, fn)
if self._params['rename_pages']:
_path = '{:0>3}'.format(idx)
return _path + extension
def chapter_for_json(self) -> str:
return self.chapter
def put_info_json(self, meta):
pass
def _fill_arguments(self, arguments: List[str]):
know_args = [
'login',
'password',
'language',
'translator',
]
if self.__arguments is None:
self.__arguments = {}
for arg in arguments:
key, value = arg.split('=', 1)
if key in know_args:
self.__arguments[key] = value
def arg(self, key: str) -> Optional[str]:
if self.__arguments is None:
return None
return self.__arguments.get(key)
def allow_auto_change_url(self):
return True
| true
| true
|
790d70e3cbbdaaa46d1decb3dcc65fb133d8e02c
| 18,752
|
py
|
Python
|
cmdb/views_ajax.py
|
bopopescu/dbsupport
|
9b0f767cebc338fe22f5f3435a8d261101ea35dd
|
[
"Apache-2.0"
] | 2
|
2019-04-20T06:10:49.000Z
|
2020-06-11T08:11:46.000Z
|
cmdb/views_ajax.py
|
bopopescu/dbsupport
|
9b0f767cebc338fe22f5f3435a8d261101ea35dd
|
[
"Apache-2.0"
] | null | null | null |
cmdb/views_ajax.py
|
bopopescu/dbsupport
|
9b0f767cebc338fe22f5f3435a8d261101ea35dd
|
[
"Apache-2.0"
] | 1
|
2020-07-22T02:57:46.000Z
|
2020-07-22T02:57:46.000Z
|
# -*- coding: UTF-8 -*-
import datetime
import json
from django.contrib.auth.hashers import check_password, make_password
from django.core import serializers
from django.db import connection
from django.http import HttpResponse
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from cmdb.models import host, hostUser, dbGroup, dbInstance
from utils.jsonExt import DateEncoder
from utils.logUtil import getLogger
# from cmdb.models import dbCluster
logger = getLogger()
@csrf_exempt
def addChangeHostInfo(request):
'''
新增主机
修改主机
'''
v_hostId = request.POST.get('host_id')
v_businessName = request.POST.get('business_name')
v_serviceEnv = request.POST.get('service_env')
v_hostName = request.POST.get('host_name')
v_intranetIpAddr = request.POST.get('intranet_ipaddr')
v_publicIpAddr = request.POST.get('public_ipaddr')
v_sshPort = request.POST.get('ssh_port')
v_hostType = request.POST.get('host_type')
v_hostRole = request.POST.get('host_role')
v_hostDesc = request.POST.get('host_desc')
print(v_hostId, v_businessName, v_serviceEnv, v_hostName, v_intranetIpAddr, v_publicIpAddr, v_sshPort, v_hostType, v_hostRole, v_hostDesc)
if v_hostId == '' or v_hostId is None:
# 新增
try:
hostObj = host(businessName=v_businessName, serviceEnv=v_serviceEnv, hostName=v_hostName, intranetIpAddr=v_intranetIpAddr, publicIpAddr=v_publicIpAddr, sshPort=v_sshPort, hostType=v_hostType, hostRole=v_hostRole, hostDesc=v_hostDesc)
hostObj.save()
result = {'status':1, 'msg':'保存成功!', 'data':''}
return HttpResponse(json.dumps(result), content_type='application/json')
except Exception as e:
result = {'status':2, 'msg':'保存失败!'+str(e), 'data':''}
return HttpResponse(json.dumps(result), content_type='application/json')
else:
# 修改
try:
hostObj = host.objects.filter(id=v_hostId)
hostObj.update(businessName=v_businessName, serviceEnv=v_serviceEnv, hostName=v_hostName, intranetIpAddr=v_intranetIpAddr, publicIpAddr=v_publicIpAddr, sshPort=v_sshPort, hostType=v_hostType, hostRole=v_hostRole, hostDesc=v_hostDesc)
# masterConfigObj.save()
result = {'status':1, 'msg':'修改成功!', 'data':''}
return HttpResponse(json.dumps(result), content_type='application/json')
except Exception as e:
result = {'status':2, 'msg':'修改失败!'+str(e), 'data':''}
return HttpResponse(json.dumps(result), content_type='application/json')
@csrf_exempt
def getHostDetailInfo(request):
hostId = request.POST['hostId']
try:
hostObj = host.objects.get(id=hostId)
hostJson = hostObj.toJSON()
result = {'status':1, 'msg':'请求成功', 'obj':hostJson}
print(result)
return HttpResponse(json.dumps(result), content_type='application/json')
except Exception as e:
print(e)
result = {'status':2, 'msg':'请求失败!'+str(e), 'data':''}
return HttpResponse(json.dumps(result), content_type='application/json')
@csrf_exempt
def delHost(request):
hostId = request.POST['hostId']
if hostId == "" or hostId is None:
result = {'status':3, 'msg':'未选中任何记录!', 'data':''}
return HttpResponse(json.dumps(result), content_type='application/json')
else:
try:
delResult = host.objects.filter(id=hostId).delete()
print(delResult)
result = {'status':1, 'msg':'删除成功!', 'data':delResult}
return HttpResponse(json.dumps(result), content_type='application/json')
except Exception as e:
print(e)
result = {'status':2, 'msg':'删除失败!'+str(e), 'data':''}
return HttpResponse(json.dumps(result), content_type='application/json')
@csrf_exempt
def addChangeHostUserInfo(request):
'''
新增主机用户
修改主机用户
'''
v_hostUserId = request.POST.get('host_user_id')
v_hostId = request.POST.get('host_id')
v_hostUser = request.POST.get('host_user')
v_hostPasswd = request.POST.get('host_passwd')
v_userDesc = request.POST.get('user_desc')
print(v_hostUserId, v_hostId, v_hostUser, v_hostPasswd, v_userDesc)
if v_hostUserId == '' or v_hostUserId is None:
# 新增
try:
hostObj = host.objects.get(id=v_hostId)
hostUserObj = hostUser(hostUser=v_hostUser, hostPasswd=v_hostPasswd, userDesc=v_userDesc, host=hostObj)
hostUserObj.save()
result = {'status':1, 'msg':'保存成功!', 'data':''}
return HttpResponse(json.dumps(result), content_type='application/json')
except Exception as e:
logger.error(str(e))
result = {'status':2, 'msg':'保存失败!', 'data':''}
return HttpResponse(json.dumps(result), content_type='application/json')
else:
# 修改
try:
hostUserObj = hostUser.objects.filter(id=v_hostUserId)
hostUserObj.update(hostUser=v_hostUser, hostPasswd=v_hostPasswd, userDesc=v_userDesc)
# masterConfigObj.save()
result = {'status':1, 'msg':'修改成功!', 'data':''}
return HttpResponse(json.dumps(result), content_type='application/json')
except Exception as e:
logger.error(str(e))
result = {'status':2, 'msg':'修改失败!', 'data':''}
return HttpResponse(json.dumps(result), content_type='application/json')
@csrf_exempt
def getHostUserDetailInfo(request):
hostUserId = request.POST['hostUserId'].strip()
try:
hostUserInfo = hostUser.objects.filter(id=hostUserId)
hostUserJson = serializers.serialize("json", hostUserInfo, use_natural_foreign_keys=True)
result = {'status':1, 'msg':'请求成功', 'hostUserJson':hostUserJson}
print(result)
return HttpResponse(json.dumps(result), content_type='application/json')
except Exception as e:
print(e)
result = {'status':2, 'msg':'请求失败!'+str(e), 'data':''}
return HttpResponse(json.dumps(result), content_type='application/json')
@csrf_exempt
def delHostUser(request):
hostUserId = request.POST['hostUserId']
if hostUserId == "" or hostUserId is None:
result = {'status':3, 'msg':'未选中任何记录!', 'data':''}
return HttpResponse(json.dumps(result), content_type='application/json')
else:
try:
delResult = hostUser.objects.filter(id=hostUserId).delete()
print(delResult)
logger.error(delResult)
result = {'status':1, 'msg':'删除成功!', 'data':delResult}
return HttpResponse(json.dumps(result), content_type='application/json')
except Exception as e:
print(e)
logger.error(e)
result = {'status':2, 'msg':'删除失败!', 'data':''}
return HttpResponse(json.dumps(result), content_type='application/json')
@csrf_exempt
def addChangeDbGroupInfo(request):
'''
新增数据库组
修改数据库组
'''
v_groupId = request.POST.get('group_id')
v_businessName = request.POST.get('business_name')
v_groupName = request.POST.get('group_name')
v_groupStatus = request.POST.get('group_status')
v_groupDesc = request.POST.get('group_desc')
v_groupEnv = request.POST.get('group_env')
print(v_groupId, v_businessName, v_groupName, v_groupEnv, v_groupStatus, v_groupDesc)
logger.info("保存或修改数据库组信息,接收前端参数:", v_groupId, v_businessName, v_groupName, v_groupEnv, v_groupStatus, v_groupDesc)
if v_groupId == '' or v_groupId is None:
# 新增
try:
dbGroupObj = dbGroup(businessName=v_businessName, groupName=v_groupName, groupEnv=v_groupEnv, groupStatus=v_groupStatus, groupDesc=v_groupDesc)
dbGroupObj.save()
result = {'status':1, 'msg':'保存成功!', 'data':''}
return HttpResponse(json.dumps(result), content_type='application/json')
except Exception as e:
logger.error(str(e))
result = {'status':2, 'msg':'保存失败!', 'data':''}
return HttpResponse(json.dumps(result), content_type='application/json')
else:
# 修改
try:
dbGroupObj = dbGroup.objects.filter(id=v_groupId)
dbGroupObj.update(businessName=v_businessName, groupName=v_groupName, groupEnv=v_groupEnv, groupStatus=v_groupStatus, groupDesc=v_groupDesc)
# masterConfigObj.save()
result = {'status':1, 'msg':'修改成功!', 'data':''}
return HttpResponse(json.dumps(result), content_type='application/json')
except Exception as e:
logger.error(str(e))
result = {'status':2, 'msg':'修改失败!', 'data':''}
return HttpResponse(json.dumps(result), content_type='application/json')
# @csrf_exempt
# def getDbClusterDetailInfo(request):
# clusterId = request.POST['clusterId']
#
# try:
# dbClusterObj = dbCluster.objects.get(id=clusterId)
# dbClusterJson = dbClusterObj.toJSON()
#
# result = {'status':1, 'msg':'请求成功', 'obj':dbClusterJson}
# print(result)
# return HttpResponse(json.dumps(result), content_type='application/json')
# except Exception as e:
# print(e)
# result = {'status':2, 'msg':'请求失败!'+str(e), 'data':''}
# return HttpResponse(json.dumps(result), content_type='application/json')
@csrf_exempt
def getDbGroupDetailInfo(request):
groupId = request.POST['groupId']
try:
dbGroupObj = dbGroup.objects.get(id=groupId)
dbGroupJson = dbGroupObj.toJSON()
result = {'status':1, 'msg':'请求成功', 'obj':dbGroupJson}
print(result)
return HttpResponse(json.dumps(result), content_type='application/json')
except Exception as e:
print(e)
result = {'status':2, 'msg':'请求失败!'+str(e), 'data':''}
return HttpResponse(json.dumps(result), content_type='application/json')
@csrf_exempt
def addChangeDbInstanceInfo(request):
'''
新增数据库实例
修改数据库实例
'''
v_instanceId = request.POST.get('instance_id')
v_groupId = request.POST.get('group_id')
v_host_id = request.POST.get('host_id')
v_instanceName = request.POST.get('instance_env')
v_instanceType = request.POST.get('instance_type')
v_portNum = request.POST.get('port_num')
v_instanceRole = request.POST.get('instance_role')
v_instanceStatus = request.POST.get('instance_status')
v_instanceDesc = request.POST.get('instance_desc')
print(v_instanceId, v_groupId, v_host_id, v_instanceName, v_instanceType, v_portNum, v_instanceRole, v_instanceStatus, v_instanceDesc)
logger.info("保存或修改数据库实例信息,接收前端参数:", v_instanceId, v_groupId, v_host_id, v_instanceName, v_instanceType, v_portNum, v_instanceRole, v_instanceStatus, v_instanceDesc)
if v_instanceId == '' or v_instanceId is None:
# 新增
try:
dbGroupObj = dbGroup.objects.get(id=v_groupId)
hostObj = host.objects.get(id=v_host_id)
print(hostObj)
dbInstanceObj = dbInstance(groupName=dbGroupObj, host=hostObj, instanceName=v_instanceName, instanceType=v_instanceType, portNum=v_portNum, instanceRole=v_instanceRole, instanceStatus=v_instanceStatus, instanceDesc=v_instanceDesc)
dbInstanceObj.save()
result = {'status':1, 'msg':'保存成功!', 'data':''}
return HttpResponse(json.dumps(result), content_type='application/json')
except Exception as e:
print(e)
logger.error(str(e))
result = {'status':2, 'msg':'保存失败!', 'data':''}
return HttpResponse(json.dumps(result), content_type='application/json')
else:
# 修改
try:
dbGroupObj = dbGroup.objects.get(id=v_groupId)
hostObj = host.objects.get(id=v_host_id)
dbInstanceObj = dbInstance.objects.filter(id=v_instanceId)
dbInstanceObj.update(groupName=dbGroupObj, host=hostObj, instanceName=v_instanceName, instanceType=v_instanceType, portNum=v_portNum, instanceRole=v_instanceRole, instanceStatus=v_instanceStatus, instanceDesc=v_instanceDesc)
# masterConfigObj.save()
result = {'status':1, 'msg':'修改成功!', 'data':''}
return HttpResponse(json.dumps(result), content_type='application/json')
except Exception as e:
logger.error(str(e))
result = {'status':2, 'msg':'修改失败!', 'data':''}
return HttpResponse(json.dumps(result), content_type='application/json')
@csrf_exempt
def getDbInstanceDetailInfo(request):
instanceId = request.POST['instanceId'].strip()
try:
dbInstanceInfo = dbInstance.objects.filter(id=instanceId)
dbInstanceJson = serializers.serialize("json", dbInstanceInfo, use_natural_foreign_keys=True)
result = {'status':1, 'msg':'请求成功', 'dbInstanceJson':dbInstanceJson}
print(result)
return HttpResponse(json.dumps(result), content_type='application/json')
except Exception as e:
print(e)
result = {'status':2, 'msg':'请求失败!'+str(e), 'data':''}
return HttpResponse(json.dumps(result), content_type='application/json')
# conn = connection.cursor()
# try:
# conn.execute('SELECT cdi.*, ch.host_name, ch.intranet_ip_addr, cdg.group_name FROM cmdb_db_instance cdi inner join cmdb_host ch on cdi.host = ch.id inner join cmdb_db_group cdg on cdi.db_group = cdg.id WHERE cdi.id = %s', [instanceId])
# dbInstanceInfo = conn.fetchall()
# print(dbInstanceInfo)
# dbInstanceJson = serializers.serialize("json", dbInstanceInfo)
# result = {'status':1, 'msg':'请求成功', 'dbInstanceInfo':dbInstanceInfo}
# print(result)
# return HttpResponse(json.dumps(result, cls=DateEncoder), content_type='application/json')
# except Exception as e:
# print(e)
# result = {'status':2, 'msg':'请求失败!'+str(e), 'data':''}
# return HttpResponse(json.dumps(result), content_type='application/json')
# finally:
# conn.close()
# try:
# dbInstanceInfo = dbInstance.objects.raw('SELECT * FROM cmdb_db_instance WHERE id = %d', [instanceId])
# dbInstanceJson = serializers.serialize("json", dbInstanceInfo)
#
# print(dbInstanceJson[0].fields.host)
# print(type(dbInstanceJson[0].fields.host))
#
# hostInfo = host.objects.raw('SELECT * FROM cmdb_host WHERE id = %d', [int(dbInstanceJson[0].fields.host)])
# hostJson = serializers.serialize("json", hostInfo)
# print(hostJson)
#
# result = {'status':1, 'msg':'请求成功', 'dbInstanceJson':dbInstanceJson}
# print(result)
# return HttpResponse(json.dumps(result), content_type='application/json')
# except Exception as e:
# print(e)
# result = {'status':2, 'msg':'请求失败!'+str(e), 'data':''}
# return HttpResponse(json.dumps(result), content_type='application/json')
@csrf_exempt
def delDbInstance(request):
instanceId = request.POST['instanceId']
if instanceId == "" or instanceId is None:
result = {'status':3, 'msg':'未选中任何记录!', 'data':''}
return HttpResponse(json.dumps(result), content_type='application/json')
else:
try:
delResult = dbInstance.objects.filter(id=instanceId).delete()
print(delResult)
logger.error(delResult)
result = {'status':1, 'msg':'删除成功!', 'data':delResult}
return HttpResponse(json.dumps(result), content_type='application/json')
except Exception as e:
print(e)
logger.error(e)
result = {'status':2, 'msg':'删除失败!', 'data':''}
return HttpResponse(json.dumps(result), content_type='application/json')
# @csrf_exempt
# def addChangeDbClusterInfo(request):
# '''
# 新增集群信息
# 修改集群信息
# '''
# v_clusterId = request.POST.get('cluster_id')
# v_clusterName = request.POST.get('cluster_name')
# v_clusterStatus = request.POST.get('cluster_status')
# v_clusterDesc = request.POST.get('cluster_desc')
#
# print("begin add Cluster: ", v_clusterId, v_clusterName, v_clusterStatus, v_clusterDesc)
#
# if v_clusterId == '' or v_clusterId is None:
# # 新增
# try:
# dbClusterObj = dbCluster(clusterName=v_clusterName, clusterStatus=v_clusterStatus, clusterDesc=v_clusterDesc)
# dbClusterObj.save()
# result = {'status':1, 'msg':'保存成功!', 'data':''}
# return HttpResponse(json.dumps(result), content_type='application/json')
# except Exception as e:
# logger.error(str(e))
# result = {'status':2, 'msg':'保存失败!', 'data':''}
# return HttpResponse(json.dumps(result), content_type='application/json')
# else:
# # 修改
# try:
# dbClusterObj = dbCluster.objects.filter(id=v_clusterId)
# dbClusterObj.update(clusterName=v_clusterName, clusterStatus=v_clusterStatus, clusterDesc=v_clusterDesc)
# # masterConfigObj.save()
# result = {'status':1, 'msg':'修改成功!', 'data':''}
# return HttpResponse(json.dumps(result), content_type='application/json')
# except Exception as e:
# logger.error(str(e))
# result = {'status':2, 'msg':'修改失败!', 'data':''}
# return HttpResponse(json.dumps(result), content_type='application/json')
#
# @csrf_exempt
# def delDbCluster(request):
# v_clusterId = request.POST['cluster_id']
#
# if v_clusterId == "" or v_clusterId is None:
# result = {'status':3, 'msg':'未选中任何记录!', 'data':''}
# return HttpResponse(json.dumps(result), content_type='application/json')
# else:
# try:
# delResult = dbCluster.objects.filter(id=v_clusterId).delete()
# print(delResult)
# logger.info(delResult)
# result = {'status':1, 'msg':'删除成功!', 'data':delResult}
# return HttpResponse(json.dumps(result), content_type='application/json')
# except Exception as e:
# print(e)
# logger.error(e)
# result = {'status':2, 'msg':'删除失败!', 'data':''}
# return HttpResponse(json.dumps(result), content_type='application/json')
| 44.330969
| 245
| 0.624307
|
import datetime
import json
from django.contrib.auth.hashers import check_password, make_password
from django.core import serializers
from django.db import connection
from django.http import HttpResponse
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from cmdb.models import host, hostUser, dbGroup, dbInstance
from utils.jsonExt import DateEncoder
from utils.logUtil import getLogger
logger = getLogger()
@csrf_exempt
def addChangeHostInfo(request):
v_hostId = request.POST.get('host_id')
v_businessName = request.POST.get('business_name')
v_serviceEnv = request.POST.get('service_env')
v_hostName = request.POST.get('host_name')
v_intranetIpAddr = request.POST.get('intranet_ipaddr')
v_publicIpAddr = request.POST.get('public_ipaddr')
v_sshPort = request.POST.get('ssh_port')
v_hostType = request.POST.get('host_type')
v_hostRole = request.POST.get('host_role')
v_hostDesc = request.POST.get('host_desc')
print(v_hostId, v_businessName, v_serviceEnv, v_hostName, v_intranetIpAddr, v_publicIpAddr, v_sshPort, v_hostType, v_hostRole, v_hostDesc)
if v_hostId == '' or v_hostId is None:
try:
hostObj = host(businessName=v_businessName, serviceEnv=v_serviceEnv, hostName=v_hostName, intranetIpAddr=v_intranetIpAddr, publicIpAddr=v_publicIpAddr, sshPort=v_sshPort, hostType=v_hostType, hostRole=v_hostRole, hostDesc=v_hostDesc)
hostObj.save()
result = {'status':1, 'msg':'保存成功!', 'data':''}
return HttpResponse(json.dumps(result), content_type='application/json')
except Exception as e:
result = {'status':2, 'msg':'保存失败!'+str(e), 'data':''}
return HttpResponse(json.dumps(result), content_type='application/json')
else:
try:
hostObj = host.objects.filter(id=v_hostId)
hostObj.update(businessName=v_businessName, serviceEnv=v_serviceEnv, hostName=v_hostName, intranetIpAddr=v_intranetIpAddr, publicIpAddr=v_publicIpAddr, sshPort=v_sshPort, hostType=v_hostType, hostRole=v_hostRole, hostDesc=v_hostDesc)
result = {'status':1, 'msg':'修改成功!', 'data':''}
return HttpResponse(json.dumps(result), content_type='application/json')
except Exception as e:
result = {'status':2, 'msg':'修改失败!'+str(e), 'data':''}
return HttpResponse(json.dumps(result), content_type='application/json')
@csrf_exempt
def getHostDetailInfo(request):
hostId = request.POST['hostId']
try:
hostObj = host.objects.get(id=hostId)
hostJson = hostObj.toJSON()
result = {'status':1, 'msg':'请求成功', 'obj':hostJson}
print(result)
return HttpResponse(json.dumps(result), content_type='application/json')
except Exception as e:
print(e)
result = {'status':2, 'msg':'请求失败!'+str(e), 'data':''}
return HttpResponse(json.dumps(result), content_type='application/json')
@csrf_exempt
def delHost(request):
hostId = request.POST['hostId']
if hostId == "" or hostId is None:
result = {'status':3, 'msg':'未选中任何记录!', 'data':''}
return HttpResponse(json.dumps(result), content_type='application/json')
else:
try:
delResult = host.objects.filter(id=hostId).delete()
print(delResult)
result = {'status':1, 'msg':'删除成功!', 'data':delResult}
return HttpResponse(json.dumps(result), content_type='application/json')
except Exception as e:
print(e)
result = {'status':2, 'msg':'删除失败!'+str(e), 'data':''}
return HttpResponse(json.dumps(result), content_type='application/json')
@csrf_exempt
def addChangeHostUserInfo(request):
v_hostUserId = request.POST.get('host_user_id')
v_hostId = request.POST.get('host_id')
v_hostUser = request.POST.get('host_user')
v_hostPasswd = request.POST.get('host_passwd')
v_userDesc = request.POST.get('user_desc')
print(v_hostUserId, v_hostId, v_hostUser, v_hostPasswd, v_userDesc)
if v_hostUserId == '' or v_hostUserId is None:
try:
hostObj = host.objects.get(id=v_hostId)
hostUserObj = hostUser(hostUser=v_hostUser, hostPasswd=v_hostPasswd, userDesc=v_userDesc, host=hostObj)
hostUserObj.save()
result = {'status':1, 'msg':'保存成功!', 'data':''}
return HttpResponse(json.dumps(result), content_type='application/json')
except Exception as e:
logger.error(str(e))
result = {'status':2, 'msg':'保存失败!', 'data':''}
return HttpResponse(json.dumps(result), content_type='application/json')
else:
try:
hostUserObj = hostUser.objects.filter(id=v_hostUserId)
hostUserObj.update(hostUser=v_hostUser, hostPasswd=v_hostPasswd, userDesc=v_userDesc)
result = {'status':1, 'msg':'修改成功!', 'data':''}
return HttpResponse(json.dumps(result), content_type='application/json')
except Exception as e:
logger.error(str(e))
result = {'status':2, 'msg':'修改失败!', 'data':''}
return HttpResponse(json.dumps(result), content_type='application/json')
@csrf_exempt
def getHostUserDetailInfo(request):
hostUserId = request.POST['hostUserId'].strip()
try:
hostUserInfo = hostUser.objects.filter(id=hostUserId)
hostUserJson = serializers.serialize("json", hostUserInfo, use_natural_foreign_keys=True)
result = {'status':1, 'msg':'请求成功', 'hostUserJson':hostUserJson}
print(result)
return HttpResponse(json.dumps(result), content_type='application/json')
except Exception as e:
print(e)
result = {'status':2, 'msg':'请求失败!'+str(e), 'data':''}
return HttpResponse(json.dumps(result), content_type='application/json')
@csrf_exempt
def delHostUser(request):
hostUserId = request.POST['hostUserId']
if hostUserId == "" or hostUserId is None:
result = {'status':3, 'msg':'未选中任何记录!', 'data':''}
return HttpResponse(json.dumps(result), content_type='application/json')
else:
try:
delResult = hostUser.objects.filter(id=hostUserId).delete()
print(delResult)
logger.error(delResult)
result = {'status':1, 'msg':'删除成功!', 'data':delResult}
return HttpResponse(json.dumps(result), content_type='application/json')
except Exception as e:
print(e)
logger.error(e)
result = {'status':2, 'msg':'删除失败!', 'data':''}
return HttpResponse(json.dumps(result), content_type='application/json')
@csrf_exempt
def addChangeDbGroupInfo(request):
v_groupId = request.POST.get('group_id')
v_businessName = request.POST.get('business_name')
v_groupName = request.POST.get('group_name')
v_groupStatus = request.POST.get('group_status')
v_groupDesc = request.POST.get('group_desc')
v_groupEnv = request.POST.get('group_env')
print(v_groupId, v_businessName, v_groupName, v_groupEnv, v_groupStatus, v_groupDesc)
logger.info("保存或修改数据库组信息,接收前端参数:", v_groupId, v_businessName, v_groupName, v_groupEnv, v_groupStatus, v_groupDesc)
if v_groupId == '' or v_groupId is None:
try:
dbGroupObj = dbGroup(businessName=v_businessName, groupName=v_groupName, groupEnv=v_groupEnv, groupStatus=v_groupStatus, groupDesc=v_groupDesc)
dbGroupObj.save()
result = {'status':1, 'msg':'保存成功!', 'data':''}
return HttpResponse(json.dumps(result), content_type='application/json')
except Exception as e:
logger.error(str(e))
result = {'status':2, 'msg':'保存失败!', 'data':''}
return HttpResponse(json.dumps(result), content_type='application/json')
else:
try:
dbGroupObj = dbGroup.objects.filter(id=v_groupId)
dbGroupObj.update(businessName=v_businessName, groupName=v_groupName, groupEnv=v_groupEnv, groupStatus=v_groupStatus, groupDesc=v_groupDesc)
result = {'status':1, 'msg':'修改成功!', 'data':''}
return HttpResponse(json.dumps(result), content_type='application/json')
except Exception as e:
logger.error(str(e))
result = {'status':2, 'msg':'修改失败!', 'data':''}
return HttpResponse(json.dumps(result), content_type='application/json')
@csrf_exempt
def getDbGroupDetailInfo(request):
groupId = request.POST['groupId']
try:
dbGroupObj = dbGroup.objects.get(id=groupId)
dbGroupJson = dbGroupObj.toJSON()
result = {'status':1, 'msg':'请求成功', 'obj':dbGroupJson}
print(result)
return HttpResponse(json.dumps(result), content_type='application/json')
except Exception as e:
print(e)
result = {'status':2, 'msg':'请求失败!'+str(e), 'data':''}
return HttpResponse(json.dumps(result), content_type='application/json')
@csrf_exempt
def addChangeDbInstanceInfo(request):
v_instanceId = request.POST.get('instance_id')
v_groupId = request.POST.get('group_id')
v_host_id = request.POST.get('host_id')
v_instanceName = request.POST.get('instance_env')
v_instanceType = request.POST.get('instance_type')
v_portNum = request.POST.get('port_num')
v_instanceRole = request.POST.get('instance_role')
v_instanceStatus = request.POST.get('instance_status')
v_instanceDesc = request.POST.get('instance_desc')
print(v_instanceId, v_groupId, v_host_id, v_instanceName, v_instanceType, v_portNum, v_instanceRole, v_instanceStatus, v_instanceDesc)
logger.info("保存或修改数据库实例信息,接收前端参数:", v_instanceId, v_groupId, v_host_id, v_instanceName, v_instanceType, v_portNum, v_instanceRole, v_instanceStatus, v_instanceDesc)
if v_instanceId == '' or v_instanceId is None:
try:
dbGroupObj = dbGroup.objects.get(id=v_groupId)
hostObj = host.objects.get(id=v_host_id)
print(hostObj)
dbInstanceObj = dbInstance(groupName=dbGroupObj, host=hostObj, instanceName=v_instanceName, instanceType=v_instanceType, portNum=v_portNum, instanceRole=v_instanceRole, instanceStatus=v_instanceStatus, instanceDesc=v_instanceDesc)
dbInstanceObj.save()
result = {'status':1, 'msg':'保存成功!', 'data':''}
return HttpResponse(json.dumps(result), content_type='application/json')
except Exception as e:
print(e)
logger.error(str(e))
result = {'status':2, 'msg':'保存失败!', 'data':''}
return HttpResponse(json.dumps(result), content_type='application/json')
else:
try:
dbGroupObj = dbGroup.objects.get(id=v_groupId)
hostObj = host.objects.get(id=v_host_id)
dbInstanceObj = dbInstance.objects.filter(id=v_instanceId)
dbInstanceObj.update(groupName=dbGroupObj, host=hostObj, instanceName=v_instanceName, instanceType=v_instanceType, portNum=v_portNum, instanceRole=v_instanceRole, instanceStatus=v_instanceStatus, instanceDesc=v_instanceDesc)
result = {'status':1, 'msg':'修改成功!', 'data':''}
return HttpResponse(json.dumps(result), content_type='application/json')
except Exception as e:
logger.error(str(e))
result = {'status':2, 'msg':'修改失败!', 'data':''}
return HttpResponse(json.dumps(result), content_type='application/json')
@csrf_exempt
def getDbInstanceDetailInfo(request):
instanceId = request.POST['instanceId'].strip()
try:
dbInstanceInfo = dbInstance.objects.filter(id=instanceId)
dbInstanceJson = serializers.serialize("json", dbInstanceInfo, use_natural_foreign_keys=True)
result = {'status':1, 'msg':'请求成功', 'dbInstanceJson':dbInstanceJson}
print(result)
return HttpResponse(json.dumps(result), content_type='application/json')
except Exception as e:
print(e)
result = {'status':2, 'msg':'请求失败!'+str(e), 'data':''}
return HttpResponse(json.dumps(result), content_type='application/json')
@csrf_exempt
def delDbInstance(request):
instanceId = request.POST['instanceId']
if instanceId == "" or instanceId is None:
result = {'status':3, 'msg':'未选中任何记录!', 'data':''}
return HttpResponse(json.dumps(result), content_type='application/json')
else:
try:
delResult = dbInstance.objects.filter(id=instanceId).delete()
print(delResult)
logger.error(delResult)
result = {'status':1, 'msg':'删除成功!', 'data':delResult}
return HttpResponse(json.dumps(result), content_type='application/json')
except Exception as e:
print(e)
logger.error(e)
result = {'status':2, 'msg':'删除失败!', 'data':''}
return HttpResponse(json.dumps(result), content_type='application/json')
# 新增集群信息
# 修改集群信息
# '''
| true
| true
|
790d7258611c71f85ea414a0d78f516bb3b1cbb3
| 219
|
py
|
Python
|
blink_001.py
|
luisC62/RPi_Pico_Examples
|
d2fb34e6ec0835d9265b3bd750add9e2da3eabf7
|
[
"MIT"
] | null | null | null |
blink_001.py
|
luisC62/RPi_Pico_Examples
|
d2fb34e6ec0835d9265b3bd750add9e2da3eabf7
|
[
"MIT"
] | null | null | null |
blink_001.py
|
luisC62/RPi_Pico_Examples
|
d2fb34e6ec0835d9265b3bd750add9e2da3eabf7
|
[
"MIT"
] | null | null | null |
from machine import Pin
import utime
led = Pin(28, Pin.OUT)
onboard_led = Pin(25, Pin.OUT)
led.low()
onboard_led.high()
while True:
led.toggle()
onboard_led.toggle()
print("Toggle")
utime.sleep(0.5)
| 18.25
| 30
| 0.666667
|
from machine import Pin
import utime
led = Pin(28, Pin.OUT)
onboard_led = Pin(25, Pin.OUT)
led.low()
onboard_led.high()
while True:
led.toggle()
onboard_led.toggle()
print("Toggle")
utime.sleep(0.5)
| true
| true
|
790d728eeac14afc437d0301467e95f1c6a85fee
| 978
|
py
|
Python
|
SRC/December-Batch/02_class/01_list.py
|
archeranimesh/fantastic-waffle
|
74274be44a469dac765379624c489cd5952e9b7c
|
[
"MIT"
] | null | null | null |
SRC/December-Batch/02_class/01_list.py
|
archeranimesh/fantastic-waffle
|
74274be44a469dac765379624c489cd5952e9b7c
|
[
"MIT"
] | null | null | null |
SRC/December-Batch/02_class/01_list.py
|
archeranimesh/fantastic-waffle
|
74274be44a469dac765379624c489cd5952e9b7c
|
[
"MIT"
] | null | null | null |
a = []
# append element at the end.
a.append(2)
a.append(3)
print(a)
# insert at a specific location.
a.insert(0, 5)
a.insert(10, 5)
print(a)
# when specified a position not in list, it inserts at the end.
a.insert(100, 6)
print(a)
# Deleting elements from a list.
a.remove(5) # removes the first occurence of value passed
print(a, len(a))
del a[0]
print(a, len(a))
# access the last element
print(a[-1])
# Printing a list
print(len(a))
for item in range(len(a)): # the len is not inclusive
print("(", item, ", ", a[item], ")")
print("-" * 30)
for item in range(0, len(a), 1): # the len is not inclusive
print("(", item, ", ", a[item], ")")
print("-" * 30)
# Reverse printing a list
for item in range(len(a) - 1, -1, -1): # the len is not inclusive
print("(", item, ", ", a[item], ")")
print("-" * 30)
# Jump a certain number of times.
for item in range(0, len(a), 2): # the len is not inclusive
print("(", item, ", ", a[item], ")")
print("-" * 30)
| 22.227273
| 66
| 0.604294
|
a = []
a.append(2)
a.append(3)
print(a)
a.insert(0, 5)
a.insert(10, 5)
print(a)
a.insert(100, 6)
print(a)
a.remove(5)
print(a, len(a))
del a[0]
print(a, len(a))
print(a[-1])
print(len(a))
for item in range(len(a)):
print("(", item, ", ", a[item], ")")
print("-" * 30)
for item in range(0, len(a), 1):
print("(", item, ", ", a[item], ")")
print("-" * 30)
for item in range(len(a) - 1, -1, -1):
print("(", item, ", ", a[item], ")")
print("-" * 30)
for item in range(0, len(a), 2):
print("(", item, ", ", a[item], ")")
print("-" * 30)
| true
| true
|
790d742ca9e0602fc2e720daaa6e3b8267c06812
| 2,395
|
py
|
Python
|
data/p4VQE/R1/benchmark/startQiskit_Class82.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p4VQE/R1/benchmark/startQiskit_Class82.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p4VQE/R1/benchmark/startQiskit_Class82.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=3
# total number=9
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.swap(input_qubit[1],input_qubit[0]) # number=5
prog.swap(input_qubit[1],input_qubit[0]) # number=6
prog.y(input_qubit[3]) # number=7
prog.y(input_qubit[3]) # number=8
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =5200
writefile = open("../data/startQiskit_Class82.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = BasicAer.get_backend('statevector_simulator')
circuit1 = transpile(prog, FakeYorktown())
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| 27.215909
| 118
| 0.634238
|
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[0])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.h(input_qubit[3])
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.swap(input_qubit[1],input_qubit[0])
prog.swap(input_qubit[1],input_qubit[0])
prog.y(input_qubit[3])
prog.y(input_qubit[3])
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =5200
writefile = open("../data/startQiskit_Class82.csv", "w")
backend = BasicAer.get_backend('statevector_simulator')
circuit1 = transpile(prog, FakeYorktown())
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| true
| true
|
790d74b84c68d02413bfbc62e01e8661e782f03d
| 3,650
|
py
|
Python
|
support_files/scraping/entries/proj_2062/proj_2062/middlewares.py
|
miccaldas/new_rss
|
9580887ac44b5c3e4c4ed5045478f2c7fef36afe
|
[
"MIT"
] | null | null | null |
support_files/scraping/entries/proj_2062/proj_2062/middlewares.py
|
miccaldas/new_rss
|
9580887ac44b5c3e4c4ed5045478f2c7fef36afe
|
[
"MIT"
] | null | null | null |
support_files/scraping/entries/proj_2062/proj_2062/middlewares.py
|
miccaldas/new_rss
|
9580887ac44b5c3e4c4ed5045478f2c7fef36afe
|
[
"MIT"
] | null | null | null |
# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
# useful for handling different item types with a single interface
from itemadapter import is_item, ItemAdapter
class Proj2062SpiderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, or item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request or item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class Proj2062DownloaderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| 35.096154
| 78
| 0.674521
|
from scrapy import signals
from itemadapter import is_item, ItemAdapter
class Proj2062SpiderMiddleware:
@classmethod
def from_crawler(cls, crawler):
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
return None
def process_spider_output(self, response, result, spider):
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
pass
def process_start_requests(self, start_requests, spider):
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class Proj2062DownloaderMiddleware:
@classmethod
def from_crawler(cls, crawler):
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
return None
def process_response(self, request, response, spider):
return response
def process_exception(self, request, exception, spider):
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| true
| true
|
790d750c0073c6e8d5d7bf30cbf1030e3f3b4896
| 132
|
py
|
Python
|
Beginner/1173.py
|
pedrodanieljardim/DesafiosURI-feitos-em-JAVA
|
4e727e1b08e01f527d0b7b884c268643f1472ded
|
[
"MIT"
] | 1
|
2022-03-19T18:06:25.000Z
|
2022-03-19T18:06:25.000Z
|
Beginner/1173.py
|
pedrodanieljardim/beecrowd
|
4e727e1b08e01f527d0b7b884c268643f1472ded
|
[
"MIT"
] | null | null | null |
Beginner/1173.py
|
pedrodanieljardim/beecrowd
|
4e727e1b08e01f527d0b7b884c268643f1472ded
|
[
"MIT"
] | null | null | null |
n = []
v = int(input())
n.append([v*x*2 for x in range(1,11)])
print(n)
for i in range(len(n)):
print('N[%d] = %d' % (i, n[i]))
| 18.857143
| 38
| 0.5
|
n = []
v = int(input())
n.append([v*x*2 for x in range(1,11)])
print(n)
for i in range(len(n)):
print('N[%d] = %d' % (i, n[i]))
| true
| true
|
790d781330116ec3665c91ad77ec24b53c2d4fc6
| 1,572
|
py
|
Python
|
problem_6.py
|
johangenis/problems_vs_algorithms
|
9925d7319de849fd7814cf87050232c22d8c2a96
|
[
"MIT"
] | null | null | null |
problem_6.py
|
johangenis/problems_vs_algorithms
|
9925d7319de849fd7814cf87050232c22d8c2a96
|
[
"MIT"
] | null | null | null |
problem_6.py
|
johangenis/problems_vs_algorithms
|
9925d7319de849fd7814cf87050232c22d8c2a96
|
[
"MIT"
] | null | null | null |
def get_min_max(ints):
"""
Return a tuple(min, max) out of list of unsorted integers.
Args:
ints(list): list of integers containing one or more integers
"""
# Handle non-list input
if not isinstance(ints, list):
return None, None
# Define variables for min and max value and initialize to None
min_value = None
max_value = None
for index, value in enumerate(ints):
if index == 0:
min_value = value
max_value = value
if value < min_value:
min_value = value
elif value > max_value:
max_value = value
return min_value, max_value
# Example Test Case of Ten Integers
import random
# Test case 1: random int array
l = [i for i in range(0, 10)] # a list containing 0 - 9
print(f"Test case 1 - random list of int: {l}")
random.shuffle(l)
# Should print "Pass" as the result should be (0, 9)
print ("Pass" if ((0, 9) == get_min_max(l)) else "Fail")
# Test case 2: empty array
print(f"Test case 2 - empty array")
# Should print "Pass" as the result should be (None, None)
print ("Pass" if ((None, None) == get_min_max([])) else "Fail")
# Test case 3: array with single item
print(f"Test case 3 - array with single item")
# Should print "Pass" as the result should be (None, None)
print ("Pass" if ((1, 1) == get_min_max([1])) else "Fail")
# Test case 4: non array input
print(f"Test case 4 - non array input")
# Should print "Pass" as the result should be (None, None)
print ("Pass" if ((None, None) == get_min_max(10)) else "Fail")
| 26.644068
| 67
| 0.636768
|
def get_min_max(ints):
if not isinstance(ints, list):
return None, None
min_value = None
max_value = None
for index, value in enumerate(ints):
if index == 0:
min_value = value
max_value = value
if value < min_value:
min_value = value
elif value > max_value:
max_value = value
return min_value, max_value
import random
l = [i for i in range(0, 10)]
print(f"Test case 1 - random list of int: {l}")
random.shuffle(l)
print ("Pass" if ((0, 9) == get_min_max(l)) else "Fail")
print(f"Test case 2 - empty array")
print ("Pass" if ((None, None) == get_min_max([])) else "Fail")
print(f"Test case 3 - array with single item")
print ("Pass" if ((1, 1) == get_min_max([1])) else "Fail")
print(f"Test case 4 - non array input")
print ("Pass" if ((None, None) == get_min_max(10)) else "Fail")
| true
| true
|
790d782a620aacd6fa936c2d559a372314eb37d6
| 2,806
|
py
|
Python
|
clients/kratos/python/test/test_request_method_config.py
|
UkonnRa/sdk
|
23ab5408a89cdf6ba7a6d8944f8d1b1cdc68aa4c
|
[
"Apache-2.0"
] | null | null | null |
clients/kratos/python/test/test_request_method_config.py
|
UkonnRa/sdk
|
23ab5408a89cdf6ba7a6d8944f8d1b1cdc68aa4c
|
[
"Apache-2.0"
] | null | null | null |
clients/kratos/python/test/test_request_method_config.py
|
UkonnRa/sdk
|
23ab5408a89cdf6ba7a6d8944f8d1b1cdc68aa4c
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Ory Kratos
Welcome to the ORY Kratos HTTP API documentation! # noqa: E501
The version of the OpenAPI document: latest
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import ory_kratos_client
from ory_kratos_client.models.request_method_config import RequestMethodConfig # noqa: E501
from ory_kratos_client.rest import ApiException
class TestRequestMethodConfig(unittest.TestCase):
"""RequestMethodConfig unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test RequestMethodConfig
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = ory_kratos_client.models.request_method_config.RequestMethodConfig() # noqa: E501
if include_optional :
return RequestMethodConfig(
action = '0',
errors = [
ory_kratos_client.models.error.Error(
message = '0', )
],
fields = [
ory_kratos_client.models.form_field.formField(
disabled = True,
errors = [
ory_kratos_client.models.error.Error(
message = '0', )
],
name = '0',
pattern = '0',
required = True,
type = '0',
value = ory_kratos_client.models.value.value(), )
],
method = '0'
)
else :
return RequestMethodConfig(
action = '0',
fields = [
ory_kratos_client.models.form_field.formField(
disabled = True,
errors = [
ory_kratos_client.models.error.Error(
message = '0', )
],
name = '0',
pattern = '0',
required = True,
type = '0',
value = ory_kratos_client.models.value.value(), )
],
method = '0',
)
def testRequestMethodConfig(self):
"""Test RequestMethodConfig"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| 32.627907
| 100
| 0.497149
|
from __future__ import absolute_import
import unittest
import datetime
import ory_kratos_client
from ory_kratos_client.models.request_method_config import RequestMethodConfig
from ory_kratos_client.rest import ApiException
class TestRequestMethodConfig(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
include_optional :
return RequestMethodConfig(
action = '0',
errors = [
ory_kratos_client.models.error.Error(
message = '0', )
],
fields = [
ory_kratos_client.models.form_field.formField(
disabled = True,
errors = [
ory_kratos_client.models.error.Error(
message = '0', )
],
name = '0',
pattern = '0',
required = True,
type = '0',
value = ory_kratos_client.models.value.value(), )
],
method = '0'
)
else :
return RequestMethodConfig(
action = '0',
fields = [
ory_kratos_client.models.form_field.formField(
disabled = True,
errors = [
ory_kratos_client.models.error.Error(
message = '0', )
],
name = '0',
pattern = '0',
required = True,
type = '0',
value = ory_kratos_client.models.value.value(), )
],
method = '0',
)
def testRequestMethodConfig(self):
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| true
| true
|
790d78f20215b94be671773f0601de32147c393d
| 2,646
|
py
|
Python
|
detr_tensorflow/models/custom_layers.py
|
Leonardo-Blanger/detr_tensorflow
|
38fc3c586b6767deed09bd7ec6c2a2fd7002346e
|
[
"MIT"
] | 59
|
2020-07-04T19:14:31.000Z
|
2022-03-08T14:30:27.000Z
|
detr_tensorflow/models/custom_layers.py
|
Leonardo-Blanger/detr_tensorflow
|
38fc3c586b6767deed09bd7ec6c2a2fd7002346e
|
[
"MIT"
] | 7
|
2020-08-17T23:57:43.000Z
|
2022-03-22T02:52:20.000Z
|
detr_tensorflow/models/custom_layers.py
|
Leonardo-Blanger/detr_tensorflow
|
38fc3c586b6767deed09bd7ec6c2a2fd7002346e
|
[
"MIT"
] | 14
|
2020-08-17T04:10:16.000Z
|
2022-02-06T05:48:33.000Z
|
import tensorflow as tf
class FrozenBatchNorm2D(tf.keras.layers.Layer):
def __init__(self, eps=1e-5, **kwargs):
super().__init__(**kwargs)
self.eps = eps
def build(self, input_shape):
self.weight = self.add_weight(name='weight', shape=[input_shape[-1]],
initializer='zeros', trainable=False)
self.bias = self.add_weight(name='bias', shape=[input_shape[-1]],
initializer='zeros', trainable=False)
self.running_mean = self.add_weight(name='running_mean',
shape=[input_shape[-1]],
initializer='zeros',
trainable=False)
self.running_var = self.add_weight(name='running_var',
shape=[input_shape[-1]],
initializer='ones',
trainable=False)
def call(self, x):
scale = self.weight * tf.math.rsqrt(self.running_var + self.eps)
shift = self.bias - self.running_mean * scale
return x * scale + shift
def compute_output_shape(self, input_shape):
return input_shape
class Linear(tf.keras.layers.Layer):
'''
Use this custom layer instead of tf.keras.layers.Dense
to allow loading converted PyTorch Dense weights
that have shape (output_dim, input_dim)
'''
def __init__(self, output_dim, **kwargs):
super().__init__(**kwargs)
self.output_dim = output_dim
def build(self, input_shape):
self.kernel = self.add_weight(name='kernel',
shape=[self.output_dim, input_shape[-1]],
initializer='zeros', trainable=True)
self.bias = self.add_weight(name='bias',
shape=[self.output_dim],
initializer='zeros', trainable=True)
def call(self, x):
return tf.matmul(x, self.kernel, transpose_b=True) + self.bias
def compute_output_shape(self, input_shape):
return input_shape.as_list()[:-1] + [self.output_dim]
class FixedEmbedding(tf.keras.layers.Layer):
def __init__(self, embed_shape, **kwargs):
super().__init__(**kwargs)
self.embed_shape = embed_shape
def build(self, input_shape):
self.w = self.add_weight(name='kernel', shape=self.embed_shape,
initializer='zeros', trainable=True)
def call(self, x=None):
return self.w
| 38.911765
| 79
| 0.544974
|
import tensorflow as tf
class FrozenBatchNorm2D(tf.keras.layers.Layer):
def __init__(self, eps=1e-5, **kwargs):
super().__init__(**kwargs)
self.eps = eps
def build(self, input_shape):
self.weight = self.add_weight(name='weight', shape=[input_shape[-1]],
initializer='zeros', trainable=False)
self.bias = self.add_weight(name='bias', shape=[input_shape[-1]],
initializer='zeros', trainable=False)
self.running_mean = self.add_weight(name='running_mean',
shape=[input_shape[-1]],
initializer='zeros',
trainable=False)
self.running_var = self.add_weight(name='running_var',
shape=[input_shape[-1]],
initializer='ones',
trainable=False)
def call(self, x):
scale = self.weight * tf.math.rsqrt(self.running_var + self.eps)
shift = self.bias - self.running_mean * scale
return x * scale + shift
def compute_output_shape(self, input_shape):
return input_shape
class Linear(tf.keras.layers.Layer):
def __init__(self, output_dim, **kwargs):
super().__init__(**kwargs)
self.output_dim = output_dim
def build(self, input_shape):
self.kernel = self.add_weight(name='kernel',
shape=[self.output_dim, input_shape[-1]],
initializer='zeros', trainable=True)
self.bias = self.add_weight(name='bias',
shape=[self.output_dim],
initializer='zeros', trainable=True)
def call(self, x):
return tf.matmul(x, self.kernel, transpose_b=True) + self.bias
def compute_output_shape(self, input_shape):
return input_shape.as_list()[:-1] + [self.output_dim]
class FixedEmbedding(tf.keras.layers.Layer):
def __init__(self, embed_shape, **kwargs):
super().__init__(**kwargs)
self.embed_shape = embed_shape
def build(self, input_shape):
self.w = self.add_weight(name='kernel', shape=self.embed_shape,
initializer='zeros', trainable=True)
def call(self, x=None):
return self.w
| true
| true
|
790d794890d607e5896329ec11df3c8e12aae1c0
| 18,344
|
py
|
Python
|
lldb/test/API/python_api/process/TestProcessAPI.py
|
acidburn0zzz/llvm-project
|
7ca7a2547f00e34f5ec91be776a1d0bbca74b7a9
|
[
"Apache-2.0"
] | 61
|
2019-04-12T18:49:57.000Z
|
2022-03-19T22:23:16.000Z
|
lldb/test/API/python_api/process/TestProcessAPI.py
|
acidburn0zzz/llvm-project
|
7ca7a2547f00e34f5ec91be776a1d0bbca74b7a9
|
[
"Apache-2.0"
] | 127
|
2019-04-09T00:55:50.000Z
|
2022-03-21T15:35:41.000Z
|
lldb/test/API/python_api/process/TestProcessAPI.py
|
acidburn0zzz/llvm-project
|
7ca7a2547f00e34f5ec91be776a1d0bbca74b7a9
|
[
"Apache-2.0"
] | 10
|
2019-04-02T18:25:40.000Z
|
2022-02-15T07:11:37.000Z
|
"""
Test SBProcess APIs, including ReadMemory(), WriteMemory(), and others.
"""
from __future__ import print_function
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test.lldbutil import get_stopped_thread, state_type_to_str
class ProcessAPITestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break inside main().
self.line = line_number(
"main.cpp",
"// Set break point at this line and check variable 'my_char'.")
@skipIfReproducer # SBProcess::ReadMemory is not instrumented.
def test_read_memory(self):
"""Test Python SBProcess.ReadMemory() API."""
self.build()
exe = self.getBuildArtifact("a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
breakpoint = target.BreakpointCreateByLocation("main.cpp", self.line)
self.assertTrue(breakpoint, VALID_BREAKPOINT)
# Launch the process, and do not stop at the entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
thread = get_stopped_thread(process, lldb.eStopReasonBreakpoint)
self.assertTrue(
thread.IsValid(),
"There should be a thread stopped due to breakpoint")
frame = thread.GetFrameAtIndex(0)
# Get the SBValue for the global variable 'my_char'.
val = frame.FindValue("my_char", lldb.eValueTypeVariableGlobal)
self.DebugSBValue(val)
# Due to the typemap magic (see lldb.swig), we pass in 1 to ReadMemory and
# expect to get a Python string as the result object!
error = lldb.SBError()
self.assertFalse(val.TypeIsPointerType())
content = process.ReadMemory(
val.AddressOf().GetValueAsUnsigned(), 1, error)
if not error.Success():
self.fail("SBProcess.ReadMemory() failed")
if self.TraceOn():
print("memory content:", content)
self.expect(
content,
"Result from SBProcess.ReadMemory() matches our expected output: 'x'",
exe=False,
startstr=b'x')
# Read (char *)my_char_ptr.
val = frame.FindValue("my_char_ptr", lldb.eValueTypeVariableGlobal)
self.DebugSBValue(val)
cstring = process.ReadCStringFromMemory(
val.GetValueAsUnsigned(), 256, error)
if not error.Success():
self.fail("SBProcess.ReadCStringFromMemory() failed")
if self.TraceOn():
print("cstring read is:", cstring)
self.expect(
cstring,
"Result from SBProcess.ReadCStringFromMemory() matches our expected output",
exe=False,
startstr='Does it work?')
# Get the SBValue for the global variable 'my_cstring'.
val = frame.FindValue("my_cstring", lldb.eValueTypeVariableGlobal)
self.DebugSBValue(val)
# Due to the typemap magic (see lldb.swig), we pass in 256 to read at most 256 bytes
# from the address, and expect to get a Python string as the result
# object!
self.assertFalse(val.TypeIsPointerType())
cstring = process.ReadCStringFromMemory(
val.AddressOf().GetValueAsUnsigned(), 256, error)
if not error.Success():
self.fail("SBProcess.ReadCStringFromMemory() failed")
if self.TraceOn():
print("cstring read is:", cstring)
self.expect(
cstring,
"Result from SBProcess.ReadCStringFromMemory() matches our expected output",
exe=False,
startstr='lldb.SBProcess.ReadCStringFromMemory() works!')
# Get the SBValue for the global variable 'my_uint32'.
val = frame.FindValue("my_uint32", lldb.eValueTypeVariableGlobal)
self.DebugSBValue(val)
# Due to the typemap magic (see lldb.swig), we pass in 4 to read 4 bytes
# from the address, and expect to get an int as the result!
self.assertFalse(val.TypeIsPointerType())
my_uint32 = process.ReadUnsignedFromMemory(
val.AddressOf().GetValueAsUnsigned(), 4, error)
if not error.Success():
self.fail("SBProcess.ReadCStringFromMemory() failed")
if self.TraceOn():
print("uint32 read is:", my_uint32)
if my_uint32 != 12345:
self.fail(
"Result from SBProcess.ReadUnsignedFromMemory() does not match our expected output")
@skipIfReproducer # SBProcess::WriteMemory is not instrumented.
def test_write_memory(self):
"""Test Python SBProcess.WriteMemory() API."""
self.build()
exe = self.getBuildArtifact("a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
breakpoint = target.BreakpointCreateByLocation("main.cpp", self.line)
self.assertTrue(breakpoint, VALID_BREAKPOINT)
# Launch the process, and do not stop at the entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
thread = get_stopped_thread(process, lldb.eStopReasonBreakpoint)
self.assertTrue(
thread.IsValid(),
"There should be a thread stopped due to breakpoint")
frame = thread.GetFrameAtIndex(0)
# Get the SBValue for the global variable 'my_char'.
val = frame.FindValue("my_char", lldb.eValueTypeVariableGlobal)
self.DebugSBValue(val)
# If the variable does not have a load address, there's no sense
# continuing.
if not val.GetLocation().startswith("0x"):
return
# OK, let's get the hex location of the variable.
location = int(val.GetLocation(), 16)
# The program logic makes the 'my_char' variable to have memory content as 'x'.
# But we want to use the WriteMemory() API to assign 'a' to the
# variable.
# Now use WriteMemory() API to write 'a' into the global variable.
error = lldb.SBError()
result = process.WriteMemory(location, 'a', error)
if not error.Success() or result != 1:
self.fail("SBProcess.WriteMemory() failed")
# Read from the memory location. This time it should be 'a'.
# Due to the typemap magic (see lldb.swig), we pass in 1 to ReadMemory and
# expect to get a Python string as the result object!
content = process.ReadMemory(location, 1, error)
if not error.Success():
self.fail("SBProcess.ReadMemory() failed")
if self.TraceOn():
print("memory content:", content)
self.expect(
content,
"Result from SBProcess.ReadMemory() matches our expected output: 'a'",
exe=False,
startstr=b'a')
@skipIfReproducer # SBProcess::WriteMemory is not instrumented.
def test_access_my_int(self):
"""Test access 'my_int' using Python SBProcess.GetByteOrder() and other APIs."""
self.build()
exe = self.getBuildArtifact("a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
breakpoint = target.BreakpointCreateByLocation("main.cpp", self.line)
self.assertTrue(breakpoint, VALID_BREAKPOINT)
# Launch the process, and do not stop at the entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
thread = get_stopped_thread(process, lldb.eStopReasonBreakpoint)
self.assertTrue(
thread.IsValid(),
"There should be a thread stopped due to breakpoint")
frame = thread.GetFrameAtIndex(0)
# Get the SBValue for the global variable 'my_int'.
val = frame.FindValue("my_int", lldb.eValueTypeVariableGlobal)
self.DebugSBValue(val)
# If the variable does not have a load address, there's no sense
# continuing.
if not val.GetLocation().startswith("0x"):
return
# OK, let's get the hex location of the variable.
location = int(val.GetLocation(), 16)
# Note that the canonical from of the bytearray is little endian.
from lldbsuite.test.lldbutil import int_to_bytearray, bytearray_to_int
byteSize = val.GetByteSize()
bytes = int_to_bytearray(256, byteSize)
byteOrder = process.GetByteOrder()
if byteOrder == lldb.eByteOrderBig:
bytes.reverse()
elif byteOrder == lldb.eByteOrderLittle:
pass
else:
# Neither big endian nor little endian? Return for now.
# Add more logic here if we want to handle other types.
return
# The program logic makes the 'my_int' variable to have int type and value of 0.
# But we want to use the WriteMemory() API to assign 256 to the
# variable.
# Now use WriteMemory() API to write 256 into the global variable.
error = lldb.SBError()
result = process.WriteMemory(location, bytes, error)
if not error.Success() or result != byteSize:
self.fail("SBProcess.WriteMemory() failed")
# Make sure that the val we got originally updates itself to notice the
# change:
self.expect(
val.GetValue(),
"SBProcess.ReadMemory() successfully writes (int)256 to the memory location for 'my_int'",
exe=False,
startstr='256')
# And for grins, get the SBValue for the global variable 'my_int'
# again, to make sure that also tracks the new value:
val = frame.FindValue("my_int", lldb.eValueTypeVariableGlobal)
self.expect(
val.GetValue(),
"SBProcess.ReadMemory() successfully writes (int)256 to the memory location for 'my_int'",
exe=False,
startstr='256')
# Now read the memory content. The bytearray should have (byte)1 as
# the second element.
content = process.ReadMemory(location, byteSize, error)
if not error.Success():
self.fail("SBProcess.ReadMemory() failed")
# The bytearray_to_int utility function expects a little endian
# bytearray.
if byteOrder == lldb.eByteOrderBig:
content = bytearray(content, 'ascii')
content.reverse()
new_value = bytearray_to_int(content, byteSize)
if new_value != 256:
self.fail("Memory content read from 'my_int' does not match (int)256")
# Dump the memory content....
if self.TraceOn():
for i in content:
print("byte:", i)
def test_remote_launch(self):
"""Test SBProcess.RemoteLaunch() API with a process not in eStateConnected, and it should fail."""
self.build()
exe = self.getBuildArtifact("a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Launch the process, and do not stop at the entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
if self.TraceOn():
print("process state:", state_type_to_str(process.GetState()))
self.assertTrue(process.GetState() != lldb.eStateConnected)
error = lldb.SBError()
success = process.RemoteLaunch(
None, None, None, None, None, None, 0, False, error)
self.assertTrue(
not success,
"RemoteLaunch() should fail for process state != eStateConnected")
def test_get_num_supported_hardware_watchpoints(self):
"""Test SBProcess.GetNumSupportedHardwareWatchpoints() API with a process."""
self.build()
exe = self.getBuildArtifact("a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
breakpoint = target.BreakpointCreateByLocation("main.cpp", self.line)
self.assertTrue(breakpoint, VALID_BREAKPOINT)
# Launch the process, and do not stop at the entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
error = lldb.SBError()
num = process.GetNumSupportedHardwareWatchpoints(error)
if self.TraceOn() and error.Success():
print("Number of supported hardware watchpoints: %d" % num)
@no_debug_info_test
def test_get_process_info(self):
"""Test SBProcess::GetProcessInfo() API with a locally launched process."""
self.build()
exe = self.getBuildArtifact("a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Launch the process and stop at the entry point.
launch_info = target.GetLaunchInfo()
launch_info.SetWorkingDirectory(self.get_process_working_directory())
launch_flags = launch_info.GetLaunchFlags()
launch_flags |= lldb.eLaunchFlagStopAtEntry
launch_info.SetLaunchFlags(launch_flags)
error = lldb.SBError()
process = target.Launch(launch_info, error)
if not error.Success():
self.fail("Failed to launch process")
# Verify basic process info can be retrieved successfully
process_info = process.GetProcessInfo()
self.assertTrue(process_info.IsValid())
file_spec = process_info.GetExecutableFile()
self.assertTrue(file_spec.IsValid())
process_name = process_info.GetName()
self.assertIsNotNone(process_name, "Process has a name")
self.assertGreater(len(process_name), 0, "Process name isn't blank")
self.assertEqual(file_spec.GetFilename(), "a.out")
self.assertNotEqual(
process_info.GetProcessID(), lldb.LLDB_INVALID_PROCESS_ID,
"Process ID is valid")
triple = process_info.GetTriple()
self.assertIsNotNone(triple, "Process has a triple")
# Additional process info varies by platform, so just check that
# whatever info was retrieved is consistent and nothing blows up.
if process_info.UserIDIsValid():
self.assertNotEqual(
process_info.GetUserID(), lldb.UINT32_MAX,
"Process user ID is valid")
else:
self.assertEqual(
process_info.GetUserID(), lldb.UINT32_MAX,
"Process user ID is invalid")
if process_info.GroupIDIsValid():
self.assertNotEqual(
process_info.GetGroupID(), lldb.UINT32_MAX,
"Process group ID is valid")
else:
self.assertEqual(
process_info.GetGroupID(), lldb.UINT32_MAX,
"Process group ID is invalid")
if process_info.EffectiveUserIDIsValid():
self.assertNotEqual(
process_info.GetEffectiveUserID(), lldb.UINT32_MAX,
"Process effective user ID is valid")
else:
self.assertEqual(
process_info.GetEffectiveUserID(), lldb.UINT32_MAX,
"Process effective user ID is invalid")
if process_info.EffectiveGroupIDIsValid():
self.assertNotEqual(
process_info.GetEffectiveGroupID(), lldb.UINT32_MAX,
"Process effective group ID is valid")
else:
self.assertEqual(
process_info.GetEffectiveGroupID(), lldb.UINT32_MAX,
"Process effective group ID is invalid")
process_info.GetParentProcessID()
def test_allocate_deallocate_memory(self):
"""Test Python SBProcess.AllocateMemory() and SBProcess.DeallocateMemory() APIs."""
self.build()
(target, process, main_thread, main_breakpoint) = lldbutil.run_to_source_breakpoint(
self, "// Set break point at this line", lldb.SBFileSpec("main.cpp"))
# Allocate a block of memory in the target process
error = lldb.SBError()
addr = process.AllocateMemory(16384, lldb.ePermissionsReadable, error)
if not error.Success() or addr == lldb.LLDB_INVALID_ADDRESS:
self.fail("SBProcess.AllocateMemory() failed")
# Now use WriteMemory() API to write 'a' into the allocated
# memory. Note that the debugger can do this even though the
# block is not set writable.
result = process.WriteMemory(addr, 'a', error)
if not error.Success() or result != 1:
self.fail("SBProcess.WriteMemory() failed")
# Read from the memory location. This time it should be 'a'.
# Due to the typemap magic (see lldb.swig), we pass in 1 to ReadMemory and
# expect to get a Python string as the result object!
content = process.ReadMemory(addr, 1, error)
if not error.Success():
self.fail("SBProcess.ReadMemory() failed")
if self.TraceOn():
print("memory content:", content)
self.expect(
content,
"Result from SBProcess.ReadMemory() matches our expected output: 'a'",
exe=False,
startstr=b'a')
# Verify that the process itself can read the allocated memory
frame = main_thread.GetFrameAtIndex(0)
val = frame.EvaluateExpression(
"test_read(reinterpret_cast<char *>({:#x}))".format(addr))
self.expect(val.GetValue(),
"Result of test_read() matches expected output 'a'",
exe=False,
startstr="'a'")
# Verify that the process cannot write into the block
val = frame.EvaluateExpression(
"test_write(reinterpret_cast<char *>({:#x}), 'b')".format(addr))
if val.GetError().Success():
self.fail(
"test_write() to allocated memory without write permission unexpectedly succeeded")
# Deallocate the memory
error = process.DeallocateMemory(addr)
if not error.Success():
self.fail("SBProcess.DeallocateMemory() failed")
| 40.22807
| 106
| 0.629089
|
from __future__ import print_function
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test.lldbutil import get_stopped_thread, state_type_to_str
class ProcessAPITestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
TestBase.setUp(self)
# Find the line number to break inside main().
self.line = line_number(
"main.cpp",
"// Set break point at this line and check variable 'my_char'.")
@skipIfReproducer # SBProcess::ReadMemory is not instrumented.
def test_read_memory(self):
self.build()
exe = self.getBuildArtifact("a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
breakpoint = target.BreakpointCreateByLocation("main.cpp", self.line)
self.assertTrue(breakpoint, VALID_BREAKPOINT)
# Launch the process, and do not stop at the entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
thread = get_stopped_thread(process, lldb.eStopReasonBreakpoint)
self.assertTrue(
thread.IsValid(),
"There should be a thread stopped due to breakpoint")
frame = thread.GetFrameAtIndex(0)
# Get the SBValue for the global variable 'my_char'.
val = frame.FindValue("my_char", lldb.eValueTypeVariableGlobal)
self.DebugSBValue(val)
# Due to the typemap magic (see lldb.swig), we pass in 1 to ReadMemory and
# expect to get a Python string as the result object!
error = lldb.SBError()
self.assertFalse(val.TypeIsPointerType())
content = process.ReadMemory(
val.AddressOf().GetValueAsUnsigned(), 1, error)
if not error.Success():
self.fail("SBProcess.ReadMemory() failed")
if self.TraceOn():
print("memory content:", content)
self.expect(
content,
"Result from SBProcess.ReadMemory() matches our expected output: 'x'",
exe=False,
startstr=b'x')
# Read (char *)my_char_ptr.
val = frame.FindValue("my_char_ptr", lldb.eValueTypeVariableGlobal)
self.DebugSBValue(val)
cstring = process.ReadCStringFromMemory(
val.GetValueAsUnsigned(), 256, error)
if not error.Success():
self.fail("SBProcess.ReadCStringFromMemory() failed")
if self.TraceOn():
print("cstring read is:", cstring)
self.expect(
cstring,
"Result from SBProcess.ReadCStringFromMemory() matches our expected output",
exe=False,
startstr='Does it work?')
# Get the SBValue for the global variable 'my_cstring'.
val = frame.FindValue("my_cstring", lldb.eValueTypeVariableGlobal)
self.DebugSBValue(val)
# Due to the typemap magic (see lldb.swig), we pass in 256 to read at most 256 bytes
# from the address, and expect to get a Python string as the result
# object!
self.assertFalse(val.TypeIsPointerType())
cstring = process.ReadCStringFromMemory(
val.AddressOf().GetValueAsUnsigned(), 256, error)
if not error.Success():
self.fail("SBProcess.ReadCStringFromMemory() failed")
if self.TraceOn():
print("cstring read is:", cstring)
self.expect(
cstring,
"Result from SBProcess.ReadCStringFromMemory() matches our expected output",
exe=False,
startstr='lldb.SBProcess.ReadCStringFromMemory() works!')
# Get the SBValue for the global variable 'my_uint32'.
val = frame.FindValue("my_uint32", lldb.eValueTypeVariableGlobal)
self.DebugSBValue(val)
# Due to the typemap magic (see lldb.swig), we pass in 4 to read 4 bytes
# from the address, and expect to get an int as the result!
self.assertFalse(val.TypeIsPointerType())
my_uint32 = process.ReadUnsignedFromMemory(
val.AddressOf().GetValueAsUnsigned(), 4, error)
if not error.Success():
self.fail("SBProcess.ReadCStringFromMemory() failed")
if self.TraceOn():
print("uint32 read is:", my_uint32)
if my_uint32 != 12345:
self.fail(
"Result from SBProcess.ReadUnsignedFromMemory() does not match our expected output")
@skipIfReproducer # SBProcess::WriteMemory is not instrumented.
def test_write_memory(self):
self.build()
exe = self.getBuildArtifact("a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
breakpoint = target.BreakpointCreateByLocation("main.cpp", self.line)
self.assertTrue(breakpoint, VALID_BREAKPOINT)
# Launch the process, and do not stop at the entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
thread = get_stopped_thread(process, lldb.eStopReasonBreakpoint)
self.assertTrue(
thread.IsValid(),
"There should be a thread stopped due to breakpoint")
frame = thread.GetFrameAtIndex(0)
# Get the SBValue for the global variable 'my_char'.
val = frame.FindValue("my_char", lldb.eValueTypeVariableGlobal)
self.DebugSBValue(val)
# If the variable does not have a load address, there's no sense
if not val.GetLocation().startswith("0x"):
return
location = int(val.GetLocation(), 16)
# The program logic makes the 'my_char' variable to have memory content as 'x'.
# But we want to use the WriteMemory() API to assign 'a' to the
# variable.
# Now use WriteMemory() API to write 'a' into the global variable.
error = lldb.SBError()
result = process.WriteMemory(location, 'a', error)
if not error.Success() or result != 1:
self.fail("SBProcess.WriteMemory() failed")
# Read from the memory location. This time it should be 'a'.
# Due to the typemap magic (see lldb.swig), we pass in 1 to ReadMemory and
# expect to get a Python string as the result object!
content = process.ReadMemory(location, 1, error)
if not error.Success():
self.fail("SBProcess.ReadMemory() failed")
if self.TraceOn():
print("memory content:", content)
self.expect(
content,
"Result from SBProcess.ReadMemory() matches our expected output: 'a'",
exe=False,
startstr=b'a')
@skipIfReproducer # SBProcess::WriteMemory is not instrumented.
def test_access_my_int(self):
self.build()
exe = self.getBuildArtifact("a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
breakpoint = target.BreakpointCreateByLocation("main.cpp", self.line)
self.assertTrue(breakpoint, VALID_BREAKPOINT)
# Launch the process, and do not stop at the entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
thread = get_stopped_thread(process, lldb.eStopReasonBreakpoint)
self.assertTrue(
thread.IsValid(),
"There should be a thread stopped due to breakpoint")
frame = thread.GetFrameAtIndex(0)
# Get the SBValue for the global variable 'my_int'.
val = frame.FindValue("my_int", lldb.eValueTypeVariableGlobal)
self.DebugSBValue(val)
# If the variable does not have a load address, there's no sense
if not val.GetLocation().startswith("0x"):
return
location = int(val.GetLocation(), 16)
# Note that the canonical from of the bytearray is little endian.
from lldbsuite.test.lldbutil import int_to_bytearray, bytearray_to_int
byteSize = val.GetByteSize()
bytes = int_to_bytearray(256, byteSize)
byteOrder = process.GetByteOrder()
if byteOrder == lldb.eByteOrderBig:
bytes.reverse()
elif byteOrder == lldb.eByteOrderLittle:
pass
else:
# Neither big endian nor little endian? Return for now.
# Add more logic here if we want to handle other types.
return
# The program logic makes the 'my_int' variable to have int type and value of 0.
# But we want to use the WriteMemory() API to assign 256 to the
# variable.
# Now use WriteMemory() API to write 256 into the global variable.
error = lldb.SBError()
result = process.WriteMemory(location, bytes, error)
if not error.Success() or result != byteSize:
self.fail("SBProcess.WriteMemory() failed")
# Make sure that the val we got originally updates itself to notice the
# change:
self.expect(
val.GetValue(),
"SBProcess.ReadMemory() successfully writes (int)256 to the memory location for 'my_int'",
exe=False,
startstr='256')
# And for grins, get the SBValue for the global variable 'my_int'
# again, to make sure that also tracks the new value:
val = frame.FindValue("my_int", lldb.eValueTypeVariableGlobal)
self.expect(
val.GetValue(),
"SBProcess.ReadMemory() successfully writes (int)256 to the memory location for 'my_int'",
exe=False,
startstr='256')
# Now read the memory content. The bytearray should have (byte)1 as
# the second element.
content = process.ReadMemory(location, byteSize, error)
if not error.Success():
self.fail("SBProcess.ReadMemory() failed")
# The bytearray_to_int utility function expects a little endian
# bytearray.
if byteOrder == lldb.eByteOrderBig:
content = bytearray(content, 'ascii')
content.reverse()
new_value = bytearray_to_int(content, byteSize)
if new_value != 256:
self.fail("Memory content read from 'my_int' does not match (int)256")
# Dump the memory content....
if self.TraceOn():
for i in content:
print("byte:", i)
def test_remote_launch(self):
self.build()
exe = self.getBuildArtifact("a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Launch the process, and do not stop at the entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
if self.TraceOn():
print("process state:", state_type_to_str(process.GetState()))
self.assertTrue(process.GetState() != lldb.eStateConnected)
error = lldb.SBError()
success = process.RemoteLaunch(
None, None, None, None, None, None, 0, False, error)
self.assertTrue(
not success,
"RemoteLaunch() should fail for process state != eStateConnected")
def test_get_num_supported_hardware_watchpoints(self):
self.build()
exe = self.getBuildArtifact("a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
breakpoint = target.BreakpointCreateByLocation("main.cpp", self.line)
self.assertTrue(breakpoint, VALID_BREAKPOINT)
# Launch the process, and do not stop at the entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
error = lldb.SBError()
num = process.GetNumSupportedHardwareWatchpoints(error)
if self.TraceOn() and error.Success():
print("Number of supported hardware watchpoints: %d" % num)
@no_debug_info_test
def test_get_process_info(self):
self.build()
exe = self.getBuildArtifact("a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Launch the process and stop at the entry point.
launch_info = target.GetLaunchInfo()
launch_info.SetWorkingDirectory(self.get_process_working_directory())
launch_flags = launch_info.GetLaunchFlags()
launch_flags |= lldb.eLaunchFlagStopAtEntry
launch_info.SetLaunchFlags(launch_flags)
error = lldb.SBError()
process = target.Launch(launch_info, error)
if not error.Success():
self.fail("Failed to launch process")
# Verify basic process info can be retrieved successfully
process_info = process.GetProcessInfo()
self.assertTrue(process_info.IsValid())
file_spec = process_info.GetExecutableFile()
self.assertTrue(file_spec.IsValid())
process_name = process_info.GetName()
self.assertIsNotNone(process_name, "Process has a name")
self.assertGreater(len(process_name), 0, "Process name isn't blank")
self.assertEqual(file_spec.GetFilename(), "a.out")
self.assertNotEqual(
process_info.GetProcessID(), lldb.LLDB_INVALID_PROCESS_ID,
"Process ID is valid")
triple = process_info.GetTriple()
self.assertIsNotNone(triple, "Process has a triple")
if process_info.UserIDIsValid():
self.assertNotEqual(
process_info.GetUserID(), lldb.UINT32_MAX,
"Process user ID is valid")
else:
self.assertEqual(
process_info.GetUserID(), lldb.UINT32_MAX,
"Process user ID is invalid")
if process_info.GroupIDIsValid():
self.assertNotEqual(
process_info.GetGroupID(), lldb.UINT32_MAX,
"Process group ID is valid")
else:
self.assertEqual(
process_info.GetGroupID(), lldb.UINT32_MAX,
"Process group ID is invalid")
if process_info.EffectiveUserIDIsValid():
self.assertNotEqual(
process_info.GetEffectiveUserID(), lldb.UINT32_MAX,
"Process effective user ID is valid")
else:
self.assertEqual(
process_info.GetEffectiveUserID(), lldb.UINT32_MAX,
"Process effective user ID is invalid")
if process_info.EffectiveGroupIDIsValid():
self.assertNotEqual(
process_info.GetEffectiveGroupID(), lldb.UINT32_MAX,
"Process effective group ID is valid")
else:
self.assertEqual(
process_info.GetEffectiveGroupID(), lldb.UINT32_MAX,
"Process effective group ID is invalid")
process_info.GetParentProcessID()
def test_allocate_deallocate_memory(self):
self.build()
(target, process, main_thread, main_breakpoint) = lldbutil.run_to_source_breakpoint(
self, "// Set break point at this line", lldb.SBFileSpec("main.cpp"))
error = lldb.SBError()
addr = process.AllocateMemory(16384, lldb.ePermissionsReadable, error)
if not error.Success() or addr == lldb.LLDB_INVALID_ADDRESS:
self.fail("SBProcess.AllocateMemory() failed")
result = process.WriteMemory(addr, 'a', error)
if not error.Success() or result != 1:
self.fail("SBProcess.WriteMemory() failed")
content = process.ReadMemory(addr, 1, error)
if not error.Success():
self.fail("SBProcess.ReadMemory() failed")
if self.TraceOn():
print("memory content:", content)
self.expect(
content,
"Result from SBProcess.ReadMemory() matches our expected output: 'a'",
exe=False,
startstr=b'a')
frame = main_thread.GetFrameAtIndex(0)
val = frame.EvaluateExpression(
"test_read(reinterpret_cast<char *>({:#x}))".format(addr))
self.expect(val.GetValue(),
"Result of test_read() matches expected output 'a'",
exe=False,
startstr="'a'")
val = frame.EvaluateExpression(
"test_write(reinterpret_cast<char *>({:#x}), 'b')".format(addr))
if val.GetError().Success():
self.fail(
"test_write() to allocated memory without write permission unexpectedly succeeded")
error = process.DeallocateMemory(addr)
if not error.Success():
self.fail("SBProcess.DeallocateMemory() failed")
| true
| true
|
790d79ee904552647ec607d5d99d7c416fe813e2
| 2,996
|
py
|
Python
|
src/avm2/generated/generate.py
|
paolodm/shumway
|
75c8d387b48a2f2e561eb4bc3458162b7cc71b16
|
[
"Apache-2.0"
] | 1
|
2015-01-17T05:42:59.000Z
|
2015-01-17T05:42:59.000Z
|
src/avm2/generated/generate.py
|
Acidburn0zzz/shumway
|
ef61c3211b91cb62f22441a29b59a0bdbcc2bf93
|
[
"Apache-2.0"
] | null | null | null |
src/avm2/generated/generate.py
|
Acidburn0zzz/shumway
|
ef61c3211b91cb62f22441a29b59a0bdbcc2bf93
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- Mode: Python; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 4 -*-
# vi: set ts=4 sw=4 expandtab:
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is [Open Source Virtual Machine.].
#
# The Initial Developer of the Original Code is
# Adobe System Incorporated.
# Portions created by the Initial Developer are Copyright (C) 2004-2006
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Adobe AS3 Team
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
import os
import subprocess
import sys
def compile_abc(target, files, deps=None, configs=None):
asc_jar = os.environ.get('ASC', os.path.realpath('../../../utils/asc.jar'))
javacmd = ['java', '-ea', '-DAS3', '-DAVMPLUS', '-classpath', asc_jar, 'macromedia.asc.embedding.ScriptCompiler', '-builtin']
if deps:
javacmd.extend("../%s/%s.abc" % (dep, dep) for dep in deps)
javacmd.extend(['-out', target])
javacmd.extend(files)
javacmd.extend(configs)
p = subprocess.Popen(javacmd, cwd=target)
p.wait()
def main():
configs = sys.argv[1:]
if configs == []:
# Build without float suppot by default
configs = ['-config', 'CONFIG::VMCFG_FLOAT=false']
compile_abc("builtin", ["builtin.as", "Vector.as", "DescribeType.as", "JSON.as", "Math.as", "Error.as", "Date.as", "RegExp.as", "IDataInput.as", "IDataOutput.as", "ByteArray.as", "Proxy.as", "XML.as", "Dictionary.as"], configs=configs)
compile_abc("shell", ["Capabilities.as", "Domain.as", "System.as"], deps=["builtin"], configs=configs)
compile_abc("avmplus", ["avmplus.as"], deps=["builtin"], configs=configs)
if __name__ == "__main__":
main()
| 42.8
| 239
| 0.697263
|
import os
import subprocess
import sys
def compile_abc(target, files, deps=None, configs=None):
asc_jar = os.environ.get('ASC', os.path.realpath('../../../utils/asc.jar'))
javacmd = ['java', '-ea', '-DAS3', '-DAVMPLUS', '-classpath', asc_jar, 'macromedia.asc.embedding.ScriptCompiler', '-builtin']
if deps:
javacmd.extend("../%s/%s.abc" % (dep, dep) for dep in deps)
javacmd.extend(['-out', target])
javacmd.extend(files)
javacmd.extend(configs)
p = subprocess.Popen(javacmd, cwd=target)
p.wait()
def main():
configs = sys.argv[1:]
if configs == []:
configs = ['-config', 'CONFIG::VMCFG_FLOAT=false']
compile_abc("builtin", ["builtin.as", "Vector.as", "DescribeType.as", "JSON.as", "Math.as", "Error.as", "Date.as", "RegExp.as", "IDataInput.as", "IDataOutput.as", "ByteArray.as", "Proxy.as", "XML.as", "Dictionary.as"], configs=configs)
compile_abc("shell", ["Capabilities.as", "Domain.as", "System.as"], deps=["builtin"], configs=configs)
compile_abc("avmplus", ["avmplus.as"], deps=["builtin"], configs=configs)
if __name__ == "__main__":
main()
| true
| true
|
790d7b3bbafa0650cab614e8411daae9b0927426
| 29,770
|
py
|
Python
|
include/users_pb2.py
|
toyan/TinkoffNewAPI_Python_use_example
|
983c2743b472b3444f77fd06279e2a8f715fb951
|
[
"MIT"
] | 1
|
2022-01-20T21:43:31.000Z
|
2022-01-20T21:43:31.000Z
|
include/users_pb2.py
|
toyan/TinkoffNewAPI_Python_use_example
|
983c2743b472b3444f77fd06279e2a8f715fb951
|
[
"MIT"
] | null | null | null |
include/users_pb2.py
|
toyan/TinkoffNewAPI_Python_use_example
|
983c2743b472b3444f77fd06279e2a8f715fb951
|
[
"MIT"
] | 1
|
2022-01-13T03:38:45.000Z
|
2022-01-13T03:38:45.000Z
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: users.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
import include.common_pb2 as common__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='users.proto',
package='tinkoff.public.invest.api.contract.v1',
syntax='proto3',
serialized_options=b'\n\034ru.tinkoff.piapi.contract.v1P\001Z\021Tinkoff/investAPI\242\002\005TIAPI\252\002\024Tinkoff.InvestAPI.V1\312\002\021Tinkoff\\Invest\\V1',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x0busers.proto\x12%tinkoff.public.invest.api.contract.v1\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x0c\x63ommon.proto\"\x14\n\x12GetAccountsRequest\"W\n\x13GetAccountsResponse\x12@\n\x08\x61\x63\x63ounts\x18\x01 \x03(\x0b\x32..tinkoff.public.invest.api.contract.v1.Account\"\x8d\x02\n\x07\x41\x63\x63ount\x12\n\n\x02id\x18\x01 \x01(\t\x12@\n\x04type\x18\x02 \x01(\x0e\x32\x32.tinkoff.public.invest.api.contract.v1.AccountType\x12\x0c\n\x04name\x18\x03 \x01(\t\x12\x44\n\x06status\x18\x04 \x01(\x0e\x32\x34.tinkoff.public.invest.api.contract.v1.AccountStatus\x12/\n\x0bopened_date\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x63losed_date\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"0\n\x1aGetMarginAttributesRequest\x12\x12\n\naccount_id\x18\x01 \x01(\t\"\xa8\x03\n\x1bGetMarginAttributesResponse\x12K\n\x10liquid_portfolio\x18\x01 \x01(\x0b\x32\x31.tinkoff.public.invest.api.contract.v1.MoneyValue\x12J\n\x0fstarting_margin\x18\x02 \x01(\x0b\x32\x31.tinkoff.public.invest.api.contract.v1.MoneyValue\x12I\n\x0eminimal_margin\x18\x03 \x01(\x0b\x32\x31.tinkoff.public.invest.api.contract.v1.MoneyValue\x12Q\n\x17\x66unds_sufficiency_level\x18\x04 \x01(\x0b\x32\x30.tinkoff.public.invest.api.contract.v1.Quotation\x12R\n\x17\x61mount_of_missing_funds\x18\x05 \x01(\x0b\x32\x31.tinkoff.public.invest.api.contract.v1.MoneyValue\"\x16\n\x14GetUserTariffRequest\"\xab\x01\n\x15GetUserTariffResponse\x12G\n\x0cunary_limits\x18\x01 \x03(\x0b\x32\x31.tinkoff.public.invest.api.contract.v1.UnaryLimit\x12I\n\rstream_limits\x18\x02 \x03(\x0b\x32\x32.tinkoff.public.invest.api.contract.v1.StreamLimit\"7\n\nUnaryLimit\x12\x18\n\x10limit_per_minute\x18\x01 \x01(\x05\x12\x0f\n\x07methods\x18\x02 \x03(\t\"-\n\x0bStreamLimit\x12\r\n\x05limit\x18\x01 \x01(\x05\x12\x0f\n\x07streams\x18\x02 \x03(\t\"\x10\n\x0eGetInfoRequest\"\\\n\x0fGetInfoResponse\x12\x13\n\x0bprem_status\x18\x01 \x01(\x08\x12\x13\n\x0bqual_status\x18\x02 \x01(\x08\x12\x1f\n\x17qualified_for_work_with\x18\x03 \x03(\t*\x80\x01\n\x0b\x41\x63\x63ountType\x12\x1c\n\x18\x41\x43\x43OUNT_TYPE_UNSPECIFIED\x10\x00\x12\x18\n\x14\x41\x43\x43OUNT_TYPE_TINKOFF\x10\x01\x12\x1c\n\x18\x41\x43\x43OUNT_TYPE_TINKOFF_IIS\x10\x02\x12\x1b\n\x17\x41\x43\x43OUNT_TYPE_INVEST_BOX\x10\x03*{\n\rAccountStatus\x12\x1e\n\x1a\x41\x43\x43OUNT_STATUS_UNSPECIFIED\x10\x00\x12\x16\n\x12\x41\x43\x43OUNT_STATUS_NEW\x10\x01\x12\x17\n\x13\x41\x43\x43OUNT_STATUS_OPEN\x10\x02\x12\x19\n\x15\x41\x43\x43OUNT_STATUS_CLOSED\x10\x03\x32\xbb\x04\n\x0cUsersService\x12\x84\x01\n\x0bGetAccounts\x12\x39.tinkoff.public.invest.api.contract.v1.GetAccountsRequest\x1a:.tinkoff.public.invest.api.contract.v1.GetAccountsResponse\x12\x9c\x01\n\x13GetMarginAttributes\x12\x41.tinkoff.public.invest.api.contract.v1.GetMarginAttributesRequest\x1a\x42.tinkoff.public.invest.api.contract.v1.GetMarginAttributesResponse\x12\x8a\x01\n\rGetUserTariff\x12;.tinkoff.public.invest.api.contract.v1.GetUserTariffRequest\x1a<.tinkoff.public.invest.api.contract.v1.GetUserTariffResponse\x12x\n\x07GetInfo\x12\x35.tinkoff.public.invest.api.contract.v1.GetInfoRequest\x1a\x36.tinkoff.public.invest.api.contract.v1.GetInfoResponseBf\n\x1cru.tinkoff.piapi.contract.v1P\x01Z\x11Tinkoff/investAPI\xa2\x02\x05TIAPI\xaa\x02\x14Tinkoff.InvestAPI.V1\xca\x02\x11Tinkoff\\Invest\\V1b\x06proto3'
,
dependencies=[google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,common__pb2.DESCRIPTOR,])
_ACCOUNTTYPE = _descriptor.EnumDescriptor(
name='AccountType',
full_name='tinkoff.public.invest.api.contract.v1.AccountType',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='ACCOUNT_TYPE_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ACCOUNT_TYPE_TINKOFF', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ACCOUNT_TYPE_TINKOFF_IIS', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ACCOUNT_TYPE_INVEST_BOX', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=1376,
serialized_end=1504,
)
_sym_db.RegisterEnumDescriptor(_ACCOUNTTYPE)
AccountType = enum_type_wrapper.EnumTypeWrapper(_ACCOUNTTYPE)
_ACCOUNTSTATUS = _descriptor.EnumDescriptor(
name='AccountStatus',
full_name='tinkoff.public.invest.api.contract.v1.AccountStatus',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='ACCOUNT_STATUS_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ACCOUNT_STATUS_NEW', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ACCOUNT_STATUS_OPEN', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ACCOUNT_STATUS_CLOSED', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=1506,
serialized_end=1629,
)
_sym_db.RegisterEnumDescriptor(_ACCOUNTSTATUS)
AccountStatus = enum_type_wrapper.EnumTypeWrapper(_ACCOUNTSTATUS)
ACCOUNT_TYPE_UNSPECIFIED = 0
ACCOUNT_TYPE_TINKOFF = 1
ACCOUNT_TYPE_TINKOFF_IIS = 2
ACCOUNT_TYPE_INVEST_BOX = 3
ACCOUNT_STATUS_UNSPECIFIED = 0
ACCOUNT_STATUS_NEW = 1
ACCOUNT_STATUS_OPEN = 2
ACCOUNT_STATUS_CLOSED = 3
_GETACCOUNTSREQUEST = _descriptor.Descriptor(
name='GetAccountsRequest',
full_name='tinkoff.public.invest.api.contract.v1.GetAccountsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=101,
serialized_end=121,
)
_GETACCOUNTSRESPONSE = _descriptor.Descriptor(
name='GetAccountsResponse',
full_name='tinkoff.public.invest.api.contract.v1.GetAccountsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='accounts', full_name='tinkoff.public.invest.api.contract.v1.GetAccountsResponse.accounts', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=123,
serialized_end=210,
)
_ACCOUNT = _descriptor.Descriptor(
name='Account',
full_name='tinkoff.public.invest.api.contract.v1.Account',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='tinkoff.public.invest.api.contract.v1.Account.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='type', full_name='tinkoff.public.invest.api.contract.v1.Account.type', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='tinkoff.public.invest.api.contract.v1.Account.name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='status', full_name='tinkoff.public.invest.api.contract.v1.Account.status', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='opened_date', full_name='tinkoff.public.invest.api.contract.v1.Account.opened_date', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='closed_date', full_name='tinkoff.public.invest.api.contract.v1.Account.closed_date', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=213,
serialized_end=482,
)
_GETMARGINATTRIBUTESREQUEST = _descriptor.Descriptor(
name='GetMarginAttributesRequest',
full_name='tinkoff.public.invest.api.contract.v1.GetMarginAttributesRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='account_id', full_name='tinkoff.public.invest.api.contract.v1.GetMarginAttributesRequest.account_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=484,
serialized_end=532,
)
_GETMARGINATTRIBUTESRESPONSE = _descriptor.Descriptor(
name='GetMarginAttributesResponse',
full_name='tinkoff.public.invest.api.contract.v1.GetMarginAttributesResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='liquid_portfolio', full_name='tinkoff.public.invest.api.contract.v1.GetMarginAttributesResponse.liquid_portfolio', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='starting_margin', full_name='tinkoff.public.invest.api.contract.v1.GetMarginAttributesResponse.starting_margin', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='minimal_margin', full_name='tinkoff.public.invest.api.contract.v1.GetMarginAttributesResponse.minimal_margin', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='funds_sufficiency_level', full_name='tinkoff.public.invest.api.contract.v1.GetMarginAttributesResponse.funds_sufficiency_level', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='amount_of_missing_funds', full_name='tinkoff.public.invest.api.contract.v1.GetMarginAttributesResponse.amount_of_missing_funds', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=535,
serialized_end=959,
)
_GETUSERTARIFFREQUEST = _descriptor.Descriptor(
name='GetUserTariffRequest',
full_name='tinkoff.public.invest.api.contract.v1.GetUserTariffRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=961,
serialized_end=983,
)
_GETUSERTARIFFRESPONSE = _descriptor.Descriptor(
name='GetUserTariffResponse',
full_name='tinkoff.public.invest.api.contract.v1.GetUserTariffResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='unary_limits', full_name='tinkoff.public.invest.api.contract.v1.GetUserTariffResponse.unary_limits', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='stream_limits', full_name='tinkoff.public.invest.api.contract.v1.GetUserTariffResponse.stream_limits', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=986,
serialized_end=1157,
)
_UNARYLIMIT = _descriptor.Descriptor(
name='UnaryLimit',
full_name='tinkoff.public.invest.api.contract.v1.UnaryLimit',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='limit_per_minute', full_name='tinkoff.public.invest.api.contract.v1.UnaryLimit.limit_per_minute', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='methods', full_name='tinkoff.public.invest.api.contract.v1.UnaryLimit.methods', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1159,
serialized_end=1214,
)
_STREAMLIMIT = _descriptor.Descriptor(
name='StreamLimit',
full_name='tinkoff.public.invest.api.contract.v1.StreamLimit',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='limit', full_name='tinkoff.public.invest.api.contract.v1.StreamLimit.limit', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='streams', full_name='tinkoff.public.invest.api.contract.v1.StreamLimit.streams', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1216,
serialized_end=1261,
)
_GETINFOREQUEST = _descriptor.Descriptor(
name='GetInfoRequest',
full_name='tinkoff.public.invest.api.contract.v1.GetInfoRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1263,
serialized_end=1279,
)
_GETINFORESPONSE = _descriptor.Descriptor(
name='GetInfoResponse',
full_name='tinkoff.public.invest.api.contract.v1.GetInfoResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='prem_status', full_name='tinkoff.public.invest.api.contract.v1.GetInfoResponse.prem_status', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='qual_status', full_name='tinkoff.public.invest.api.contract.v1.GetInfoResponse.qual_status', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='qualified_for_work_with', full_name='tinkoff.public.invest.api.contract.v1.GetInfoResponse.qualified_for_work_with', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1281,
serialized_end=1373,
)
_GETACCOUNTSRESPONSE.fields_by_name['accounts'].message_type = _ACCOUNT
_ACCOUNT.fields_by_name['type'].enum_type = _ACCOUNTTYPE
_ACCOUNT.fields_by_name['status'].enum_type = _ACCOUNTSTATUS
_ACCOUNT.fields_by_name['opened_date'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_ACCOUNT.fields_by_name['closed_date'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_GETMARGINATTRIBUTESRESPONSE.fields_by_name['liquid_portfolio'].message_type = common__pb2._MONEYVALUE
_GETMARGINATTRIBUTESRESPONSE.fields_by_name['starting_margin'].message_type = common__pb2._MONEYVALUE
_GETMARGINATTRIBUTESRESPONSE.fields_by_name['minimal_margin'].message_type = common__pb2._MONEYVALUE
_GETMARGINATTRIBUTESRESPONSE.fields_by_name['funds_sufficiency_level'].message_type = common__pb2._QUOTATION
_GETMARGINATTRIBUTESRESPONSE.fields_by_name['amount_of_missing_funds'].message_type = common__pb2._MONEYVALUE
_GETUSERTARIFFRESPONSE.fields_by_name['unary_limits'].message_type = _UNARYLIMIT
_GETUSERTARIFFRESPONSE.fields_by_name['stream_limits'].message_type = _STREAMLIMIT
DESCRIPTOR.message_types_by_name['GetAccountsRequest'] = _GETACCOUNTSREQUEST
DESCRIPTOR.message_types_by_name['GetAccountsResponse'] = _GETACCOUNTSRESPONSE
DESCRIPTOR.message_types_by_name['Account'] = _ACCOUNT
DESCRIPTOR.message_types_by_name['GetMarginAttributesRequest'] = _GETMARGINATTRIBUTESREQUEST
DESCRIPTOR.message_types_by_name['GetMarginAttributesResponse'] = _GETMARGINATTRIBUTESRESPONSE
DESCRIPTOR.message_types_by_name['GetUserTariffRequest'] = _GETUSERTARIFFREQUEST
DESCRIPTOR.message_types_by_name['GetUserTariffResponse'] = _GETUSERTARIFFRESPONSE
DESCRIPTOR.message_types_by_name['UnaryLimit'] = _UNARYLIMIT
DESCRIPTOR.message_types_by_name['StreamLimit'] = _STREAMLIMIT
DESCRIPTOR.message_types_by_name['GetInfoRequest'] = _GETINFOREQUEST
DESCRIPTOR.message_types_by_name['GetInfoResponse'] = _GETINFORESPONSE
DESCRIPTOR.enum_types_by_name['AccountType'] = _ACCOUNTTYPE
DESCRIPTOR.enum_types_by_name['AccountStatus'] = _ACCOUNTSTATUS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetAccountsRequest = _reflection.GeneratedProtocolMessageType('GetAccountsRequest', (_message.Message,), {
'DESCRIPTOR' : _GETACCOUNTSREQUEST,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetAccountsRequest)
})
_sym_db.RegisterMessage(GetAccountsRequest)
GetAccountsResponse = _reflection.GeneratedProtocolMessageType('GetAccountsResponse', (_message.Message,), {
'DESCRIPTOR' : _GETACCOUNTSRESPONSE,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetAccountsResponse)
})
_sym_db.RegisterMessage(GetAccountsResponse)
Account = _reflection.GeneratedProtocolMessageType('Account', (_message.Message,), {
'DESCRIPTOR' : _ACCOUNT,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.Account)
})
_sym_db.RegisterMessage(Account)
GetMarginAttributesRequest = _reflection.GeneratedProtocolMessageType('GetMarginAttributesRequest', (_message.Message,), {
'DESCRIPTOR' : _GETMARGINATTRIBUTESREQUEST,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetMarginAttributesRequest)
})
_sym_db.RegisterMessage(GetMarginAttributesRequest)
GetMarginAttributesResponse = _reflection.GeneratedProtocolMessageType('GetMarginAttributesResponse', (_message.Message,), {
'DESCRIPTOR' : _GETMARGINATTRIBUTESRESPONSE,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetMarginAttributesResponse)
})
_sym_db.RegisterMessage(GetMarginAttributesResponse)
GetUserTariffRequest = _reflection.GeneratedProtocolMessageType('GetUserTariffRequest', (_message.Message,), {
'DESCRIPTOR' : _GETUSERTARIFFREQUEST,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetUserTariffRequest)
})
_sym_db.RegisterMessage(GetUserTariffRequest)
GetUserTariffResponse = _reflection.GeneratedProtocolMessageType('GetUserTariffResponse', (_message.Message,), {
'DESCRIPTOR' : _GETUSERTARIFFRESPONSE,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetUserTariffResponse)
})
_sym_db.RegisterMessage(GetUserTariffResponse)
UnaryLimit = _reflection.GeneratedProtocolMessageType('UnaryLimit', (_message.Message,), {
'DESCRIPTOR' : _UNARYLIMIT,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.UnaryLimit)
})
_sym_db.RegisterMessage(UnaryLimit)
StreamLimit = _reflection.GeneratedProtocolMessageType('StreamLimit', (_message.Message,), {
'DESCRIPTOR' : _STREAMLIMIT,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.StreamLimit)
})
_sym_db.RegisterMessage(StreamLimit)
GetInfoRequest = _reflection.GeneratedProtocolMessageType('GetInfoRequest', (_message.Message,), {
'DESCRIPTOR' : _GETINFOREQUEST,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetInfoRequest)
})
_sym_db.RegisterMessage(GetInfoRequest)
GetInfoResponse = _reflection.GeneratedProtocolMessageType('GetInfoResponse', (_message.Message,), {
'DESCRIPTOR' : _GETINFORESPONSE,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetInfoResponse)
})
_sym_db.RegisterMessage(GetInfoResponse)
DESCRIPTOR._options = None
_USERSSERVICE = _descriptor.ServiceDescriptor(
name='UsersService',
full_name='tinkoff.public.invest.api.contract.v1.UsersService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=1632,
serialized_end=2203,
methods=[
_descriptor.MethodDescriptor(
name='GetAccounts',
full_name='tinkoff.public.invest.api.contract.v1.UsersService.GetAccounts',
index=0,
containing_service=None,
input_type=_GETACCOUNTSREQUEST,
output_type=_GETACCOUNTSRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetMarginAttributes',
full_name='tinkoff.public.invest.api.contract.v1.UsersService.GetMarginAttributes',
index=1,
containing_service=None,
input_type=_GETMARGINATTRIBUTESREQUEST,
output_type=_GETMARGINATTRIBUTESRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetUserTariff',
full_name='tinkoff.public.invest.api.contract.v1.UsersService.GetUserTariff',
index=2,
containing_service=None,
input_type=_GETUSERTARIFFREQUEST,
output_type=_GETUSERTARIFFRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetInfo',
full_name='tinkoff.public.invest.api.contract.v1.UsersService.GetInfo',
index=3,
containing_service=None,
input_type=_GETINFOREQUEST,
output_type=_GETINFORESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_USERSSERVICE)
DESCRIPTOR.services_by_name['UsersService'] = _USERSSERVICE
# @@protoc_insertion_point(module_scope)
| 42.347084
| 3,323
| 0.777158
|
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
import include.common_pb2 as common__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='users.proto',
package='tinkoff.public.invest.api.contract.v1',
syntax='proto3',
serialized_options=b'\n\034ru.tinkoff.piapi.contract.v1P\001Z\021Tinkoff/investAPI\242\002\005TIAPI\252\002\024Tinkoff.InvestAPI.V1\312\002\021Tinkoff\\Invest\\V1',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x0busers.proto\x12%tinkoff.public.invest.api.contract.v1\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x0c\x63ommon.proto\"\x14\n\x12GetAccountsRequest\"W\n\x13GetAccountsResponse\x12@\n\x08\x61\x63\x63ounts\x18\x01 \x03(\x0b\x32..tinkoff.public.invest.api.contract.v1.Account\"\x8d\x02\n\x07\x41\x63\x63ount\x12\n\n\x02id\x18\x01 \x01(\t\x12@\n\x04type\x18\x02 \x01(\x0e\x32\x32.tinkoff.public.invest.api.contract.v1.AccountType\x12\x0c\n\x04name\x18\x03 \x01(\t\x12\x44\n\x06status\x18\x04 \x01(\x0e\x32\x34.tinkoff.public.invest.api.contract.v1.AccountStatus\x12/\n\x0bopened_date\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x63losed_date\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"0\n\x1aGetMarginAttributesRequest\x12\x12\n\naccount_id\x18\x01 \x01(\t\"\xa8\x03\n\x1bGetMarginAttributesResponse\x12K\n\x10liquid_portfolio\x18\x01 \x01(\x0b\x32\x31.tinkoff.public.invest.api.contract.v1.MoneyValue\x12J\n\x0fstarting_margin\x18\x02 \x01(\x0b\x32\x31.tinkoff.public.invest.api.contract.v1.MoneyValue\x12I\n\x0eminimal_margin\x18\x03 \x01(\x0b\x32\x31.tinkoff.public.invest.api.contract.v1.MoneyValue\x12Q\n\x17\x66unds_sufficiency_level\x18\x04 \x01(\x0b\x32\x30.tinkoff.public.invest.api.contract.v1.Quotation\x12R\n\x17\x61mount_of_missing_funds\x18\x05 \x01(\x0b\x32\x31.tinkoff.public.invest.api.contract.v1.MoneyValue\"\x16\n\x14GetUserTariffRequest\"\xab\x01\n\x15GetUserTariffResponse\x12G\n\x0cunary_limits\x18\x01 \x03(\x0b\x32\x31.tinkoff.public.invest.api.contract.v1.UnaryLimit\x12I\n\rstream_limits\x18\x02 \x03(\x0b\x32\x32.tinkoff.public.invest.api.contract.v1.StreamLimit\"7\n\nUnaryLimit\x12\x18\n\x10limit_per_minute\x18\x01 \x01(\x05\x12\x0f\n\x07methods\x18\x02 \x03(\t\"-\n\x0bStreamLimit\x12\r\n\x05limit\x18\x01 \x01(\x05\x12\x0f\n\x07streams\x18\x02 \x03(\t\"\x10\n\x0eGetInfoRequest\"\\\n\x0fGetInfoResponse\x12\x13\n\x0bprem_status\x18\x01 \x01(\x08\x12\x13\n\x0bqual_status\x18\x02 \x01(\x08\x12\x1f\n\x17qualified_for_work_with\x18\x03 \x03(\t*\x80\x01\n\x0b\x41\x63\x63ountType\x12\x1c\n\x18\x41\x43\x43OUNT_TYPE_UNSPECIFIED\x10\x00\x12\x18\n\x14\x41\x43\x43OUNT_TYPE_TINKOFF\x10\x01\x12\x1c\n\x18\x41\x43\x43OUNT_TYPE_TINKOFF_IIS\x10\x02\x12\x1b\n\x17\x41\x43\x43OUNT_TYPE_INVEST_BOX\x10\x03*{\n\rAccountStatus\x12\x1e\n\x1a\x41\x43\x43OUNT_STATUS_UNSPECIFIED\x10\x00\x12\x16\n\x12\x41\x43\x43OUNT_STATUS_NEW\x10\x01\x12\x17\n\x13\x41\x43\x43OUNT_STATUS_OPEN\x10\x02\x12\x19\n\x15\x41\x43\x43OUNT_STATUS_CLOSED\x10\x03\x32\xbb\x04\n\x0cUsersService\x12\x84\x01\n\x0bGetAccounts\x12\x39.tinkoff.public.invest.api.contract.v1.GetAccountsRequest\x1a:.tinkoff.public.invest.api.contract.v1.GetAccountsResponse\x12\x9c\x01\n\x13GetMarginAttributes\x12\x41.tinkoff.public.invest.api.contract.v1.GetMarginAttributesRequest\x1a\x42.tinkoff.public.invest.api.contract.v1.GetMarginAttributesResponse\x12\x8a\x01\n\rGetUserTariff\x12;.tinkoff.public.invest.api.contract.v1.GetUserTariffRequest\x1a<.tinkoff.public.invest.api.contract.v1.GetUserTariffResponse\x12x\n\x07GetInfo\x12\x35.tinkoff.public.invest.api.contract.v1.GetInfoRequest\x1a\x36.tinkoff.public.invest.api.contract.v1.GetInfoResponseBf\n\x1cru.tinkoff.piapi.contract.v1P\x01Z\x11Tinkoff/investAPI\xa2\x02\x05TIAPI\xaa\x02\x14Tinkoff.InvestAPI.V1\xca\x02\x11Tinkoff\\Invest\\V1b\x06proto3'
,
dependencies=[google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,common__pb2.DESCRIPTOR,])
_ACCOUNTTYPE = _descriptor.EnumDescriptor(
name='AccountType',
full_name='tinkoff.public.invest.api.contract.v1.AccountType',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='ACCOUNT_TYPE_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ACCOUNT_TYPE_TINKOFF', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ACCOUNT_TYPE_TINKOFF_IIS', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ACCOUNT_TYPE_INVEST_BOX', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=1376,
serialized_end=1504,
)
_sym_db.RegisterEnumDescriptor(_ACCOUNTTYPE)
AccountType = enum_type_wrapper.EnumTypeWrapper(_ACCOUNTTYPE)
_ACCOUNTSTATUS = _descriptor.EnumDescriptor(
name='AccountStatus',
full_name='tinkoff.public.invest.api.contract.v1.AccountStatus',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='ACCOUNT_STATUS_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ACCOUNT_STATUS_NEW', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ACCOUNT_STATUS_OPEN', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ACCOUNT_STATUS_CLOSED', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=1506,
serialized_end=1629,
)
_sym_db.RegisterEnumDescriptor(_ACCOUNTSTATUS)
AccountStatus = enum_type_wrapper.EnumTypeWrapper(_ACCOUNTSTATUS)
ACCOUNT_TYPE_UNSPECIFIED = 0
ACCOUNT_TYPE_TINKOFF = 1
ACCOUNT_TYPE_TINKOFF_IIS = 2
ACCOUNT_TYPE_INVEST_BOX = 3
ACCOUNT_STATUS_UNSPECIFIED = 0
ACCOUNT_STATUS_NEW = 1
ACCOUNT_STATUS_OPEN = 2
ACCOUNT_STATUS_CLOSED = 3
_GETACCOUNTSREQUEST = _descriptor.Descriptor(
name='GetAccountsRequest',
full_name='tinkoff.public.invest.api.contract.v1.GetAccountsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=101,
serialized_end=121,
)
_GETACCOUNTSRESPONSE = _descriptor.Descriptor(
name='GetAccountsResponse',
full_name='tinkoff.public.invest.api.contract.v1.GetAccountsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='accounts', full_name='tinkoff.public.invest.api.contract.v1.GetAccountsResponse.accounts', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=123,
serialized_end=210,
)
_ACCOUNT = _descriptor.Descriptor(
name='Account',
full_name='tinkoff.public.invest.api.contract.v1.Account',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='tinkoff.public.invest.api.contract.v1.Account.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='type', full_name='tinkoff.public.invest.api.contract.v1.Account.type', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='tinkoff.public.invest.api.contract.v1.Account.name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='status', full_name='tinkoff.public.invest.api.contract.v1.Account.status', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='opened_date', full_name='tinkoff.public.invest.api.contract.v1.Account.opened_date', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='closed_date', full_name='tinkoff.public.invest.api.contract.v1.Account.closed_date', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=213,
serialized_end=482,
)
_GETMARGINATTRIBUTESREQUEST = _descriptor.Descriptor(
name='GetMarginAttributesRequest',
full_name='tinkoff.public.invest.api.contract.v1.GetMarginAttributesRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='account_id', full_name='tinkoff.public.invest.api.contract.v1.GetMarginAttributesRequest.account_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=484,
serialized_end=532,
)
_GETMARGINATTRIBUTESRESPONSE = _descriptor.Descriptor(
name='GetMarginAttributesResponse',
full_name='tinkoff.public.invest.api.contract.v1.GetMarginAttributesResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='liquid_portfolio', full_name='tinkoff.public.invest.api.contract.v1.GetMarginAttributesResponse.liquid_portfolio', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='starting_margin', full_name='tinkoff.public.invest.api.contract.v1.GetMarginAttributesResponse.starting_margin', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='minimal_margin', full_name='tinkoff.public.invest.api.contract.v1.GetMarginAttributesResponse.minimal_margin', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='funds_sufficiency_level', full_name='tinkoff.public.invest.api.contract.v1.GetMarginAttributesResponse.funds_sufficiency_level', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='amount_of_missing_funds', full_name='tinkoff.public.invest.api.contract.v1.GetMarginAttributesResponse.amount_of_missing_funds', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=535,
serialized_end=959,
)
_GETUSERTARIFFREQUEST = _descriptor.Descriptor(
name='GetUserTariffRequest',
full_name='tinkoff.public.invest.api.contract.v1.GetUserTariffRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=961,
serialized_end=983,
)
_GETUSERTARIFFRESPONSE = _descriptor.Descriptor(
name='GetUserTariffResponse',
full_name='tinkoff.public.invest.api.contract.v1.GetUserTariffResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='unary_limits', full_name='tinkoff.public.invest.api.contract.v1.GetUserTariffResponse.unary_limits', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='stream_limits', full_name='tinkoff.public.invest.api.contract.v1.GetUserTariffResponse.stream_limits', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=986,
serialized_end=1157,
)
_UNARYLIMIT = _descriptor.Descriptor(
name='UnaryLimit',
full_name='tinkoff.public.invest.api.contract.v1.UnaryLimit',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='limit_per_minute', full_name='tinkoff.public.invest.api.contract.v1.UnaryLimit.limit_per_minute', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='methods', full_name='tinkoff.public.invest.api.contract.v1.UnaryLimit.methods', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1159,
serialized_end=1214,
)
_STREAMLIMIT = _descriptor.Descriptor(
name='StreamLimit',
full_name='tinkoff.public.invest.api.contract.v1.StreamLimit',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='limit', full_name='tinkoff.public.invest.api.contract.v1.StreamLimit.limit', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='streams', full_name='tinkoff.public.invest.api.contract.v1.StreamLimit.streams', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1216,
serialized_end=1261,
)
_GETINFOREQUEST = _descriptor.Descriptor(
name='GetInfoRequest',
full_name='tinkoff.public.invest.api.contract.v1.GetInfoRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1263,
serialized_end=1279,
)
_GETINFORESPONSE = _descriptor.Descriptor(
name='GetInfoResponse',
full_name='tinkoff.public.invest.api.contract.v1.GetInfoResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='prem_status', full_name='tinkoff.public.invest.api.contract.v1.GetInfoResponse.prem_status', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='qual_status', full_name='tinkoff.public.invest.api.contract.v1.GetInfoResponse.qual_status', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='qualified_for_work_with', full_name='tinkoff.public.invest.api.contract.v1.GetInfoResponse.qualified_for_work_with', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1281,
serialized_end=1373,
)
_GETACCOUNTSRESPONSE.fields_by_name['accounts'].message_type = _ACCOUNT
_ACCOUNT.fields_by_name['type'].enum_type = _ACCOUNTTYPE
_ACCOUNT.fields_by_name['status'].enum_type = _ACCOUNTSTATUS
_ACCOUNT.fields_by_name['opened_date'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_ACCOUNT.fields_by_name['closed_date'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_GETMARGINATTRIBUTESRESPONSE.fields_by_name['liquid_portfolio'].message_type = common__pb2._MONEYVALUE
_GETMARGINATTRIBUTESRESPONSE.fields_by_name['starting_margin'].message_type = common__pb2._MONEYVALUE
_GETMARGINATTRIBUTESRESPONSE.fields_by_name['minimal_margin'].message_type = common__pb2._MONEYVALUE
_GETMARGINATTRIBUTESRESPONSE.fields_by_name['funds_sufficiency_level'].message_type = common__pb2._QUOTATION
_GETMARGINATTRIBUTESRESPONSE.fields_by_name['amount_of_missing_funds'].message_type = common__pb2._MONEYVALUE
_GETUSERTARIFFRESPONSE.fields_by_name['unary_limits'].message_type = _UNARYLIMIT
_GETUSERTARIFFRESPONSE.fields_by_name['stream_limits'].message_type = _STREAMLIMIT
DESCRIPTOR.message_types_by_name['GetAccountsRequest'] = _GETACCOUNTSREQUEST
DESCRIPTOR.message_types_by_name['GetAccountsResponse'] = _GETACCOUNTSRESPONSE
DESCRIPTOR.message_types_by_name['Account'] = _ACCOUNT
DESCRIPTOR.message_types_by_name['GetMarginAttributesRequest'] = _GETMARGINATTRIBUTESREQUEST
DESCRIPTOR.message_types_by_name['GetMarginAttributesResponse'] = _GETMARGINATTRIBUTESRESPONSE
DESCRIPTOR.message_types_by_name['GetUserTariffRequest'] = _GETUSERTARIFFREQUEST
DESCRIPTOR.message_types_by_name['GetUserTariffResponse'] = _GETUSERTARIFFRESPONSE
DESCRIPTOR.message_types_by_name['UnaryLimit'] = _UNARYLIMIT
DESCRIPTOR.message_types_by_name['StreamLimit'] = _STREAMLIMIT
DESCRIPTOR.message_types_by_name['GetInfoRequest'] = _GETINFOREQUEST
DESCRIPTOR.message_types_by_name['GetInfoResponse'] = _GETINFORESPONSE
DESCRIPTOR.enum_types_by_name['AccountType'] = _ACCOUNTTYPE
DESCRIPTOR.enum_types_by_name['AccountStatus'] = _ACCOUNTSTATUS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetAccountsRequest = _reflection.GeneratedProtocolMessageType('GetAccountsRequest', (_message.Message,), {
'DESCRIPTOR' : _GETACCOUNTSREQUEST,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetAccountsRequest)
})
_sym_db.RegisterMessage(GetAccountsRequest)
GetAccountsResponse = _reflection.GeneratedProtocolMessageType('GetAccountsResponse', (_message.Message,), {
'DESCRIPTOR' : _GETACCOUNTSRESPONSE,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetAccountsResponse)
})
_sym_db.RegisterMessage(GetAccountsResponse)
Account = _reflection.GeneratedProtocolMessageType('Account', (_message.Message,), {
'DESCRIPTOR' : _ACCOUNT,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.Account)
})
_sym_db.RegisterMessage(Account)
GetMarginAttributesRequest = _reflection.GeneratedProtocolMessageType('GetMarginAttributesRequest', (_message.Message,), {
'DESCRIPTOR' : _GETMARGINATTRIBUTESREQUEST,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetMarginAttributesRequest)
})
_sym_db.RegisterMessage(GetMarginAttributesRequest)
GetMarginAttributesResponse = _reflection.GeneratedProtocolMessageType('GetMarginAttributesResponse', (_message.Message,), {
'DESCRIPTOR' : _GETMARGINATTRIBUTESRESPONSE,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetMarginAttributesResponse)
})
_sym_db.RegisterMessage(GetMarginAttributesResponse)
GetUserTariffRequest = _reflection.GeneratedProtocolMessageType('GetUserTariffRequest', (_message.Message,), {
'DESCRIPTOR' : _GETUSERTARIFFREQUEST,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetUserTariffRequest)
})
_sym_db.RegisterMessage(GetUserTariffRequest)
GetUserTariffResponse = _reflection.GeneratedProtocolMessageType('GetUserTariffResponse', (_message.Message,), {
'DESCRIPTOR' : _GETUSERTARIFFRESPONSE,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetUserTariffResponse)
})
_sym_db.RegisterMessage(GetUserTariffResponse)
UnaryLimit = _reflection.GeneratedProtocolMessageType('UnaryLimit', (_message.Message,), {
'DESCRIPTOR' : _UNARYLIMIT,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.UnaryLimit)
})
_sym_db.RegisterMessage(UnaryLimit)
StreamLimit = _reflection.GeneratedProtocolMessageType('StreamLimit', (_message.Message,), {
'DESCRIPTOR' : _STREAMLIMIT,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.StreamLimit)
})
_sym_db.RegisterMessage(StreamLimit)
GetInfoRequest = _reflection.GeneratedProtocolMessageType('GetInfoRequest', (_message.Message,), {
'DESCRIPTOR' : _GETINFOREQUEST,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetInfoRequest)
})
_sym_db.RegisterMessage(GetInfoRequest)
GetInfoResponse = _reflection.GeneratedProtocolMessageType('GetInfoResponse', (_message.Message,), {
'DESCRIPTOR' : _GETINFORESPONSE,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetInfoResponse)
})
_sym_db.RegisterMessage(GetInfoResponse)
DESCRIPTOR._options = None
_USERSSERVICE = _descriptor.ServiceDescriptor(
name='UsersService',
full_name='tinkoff.public.invest.api.contract.v1.UsersService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=1632,
serialized_end=2203,
methods=[
_descriptor.MethodDescriptor(
name='GetAccounts',
full_name='tinkoff.public.invest.api.contract.v1.UsersService.GetAccounts',
index=0,
containing_service=None,
input_type=_GETACCOUNTSREQUEST,
output_type=_GETACCOUNTSRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetMarginAttributes',
full_name='tinkoff.public.invest.api.contract.v1.UsersService.GetMarginAttributes',
index=1,
containing_service=None,
input_type=_GETMARGINATTRIBUTESREQUEST,
output_type=_GETMARGINATTRIBUTESRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetUserTariff',
full_name='tinkoff.public.invest.api.contract.v1.UsersService.GetUserTariff',
index=2,
containing_service=None,
input_type=_GETUSERTARIFFREQUEST,
output_type=_GETUSERTARIFFRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetInfo',
full_name='tinkoff.public.invest.api.contract.v1.UsersService.GetInfo',
index=3,
containing_service=None,
input_type=_GETINFOREQUEST,
output_type=_GETINFORESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_USERSSERVICE)
DESCRIPTOR.services_by_name['UsersService'] = _USERSSERVICE
# @@protoc_insertion_point(module_scope)
| true
| true
|
790d7b95c2ddf064518121d57e15ffbe76b1f1e1
| 5,575
|
py
|
Python
|
test/expected/python.tornado/actual_base/ttypes.py
|
dustyholmes-wf/frugal
|
915ccfc58fcc9baabc4549c522e3acd2975a2e0b
|
[
"Apache-2.0"
] | null | null | null |
test/expected/python.tornado/actual_base/ttypes.py
|
dustyholmes-wf/frugal
|
915ccfc58fcc9baabc4549c522e3acd2975a2e0b
|
[
"Apache-2.0"
] | null | null | null |
test/expected/python.tornado/actual_base/ttypes.py
|
dustyholmes-wf/frugal
|
915ccfc58fcc9baabc4549c522e3acd2975a2e0b
|
[
"Apache-2.0"
] | null | null | null |
#
# Autogenerated by Frugal Compiler (3.4.7)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from frugal.util import make_hashable
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
class base_health_condition(int):
PASS = 1
WARN = 2
FAIL = 3
UNKNOWN = 4
_VALUES_TO_NAMES = {
1: "PASS",
2: "WARN",
3: "FAIL",
4: "UNKNOWN",
}
_NAMES_TO_VALUES = {
"PASS": 1,
"WARN": 2,
"FAIL": 3,
"UNKNOWN": 4,
}
class thing(object):
"""
Attributes:
- an_id
- a_string
"""
def __init__(self, an_id=None, a_string=None):
self.an_id = an_id
self.a_string = a_string
def read(self, iprot):
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.an_id = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.a_string = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.validate()
def write(self, oprot):
self.validate()
oprot.writeStructBegin('thing')
if self.an_id is not None:
oprot.writeFieldBegin('an_id', TType.I32, 1)
oprot.writeI32(self.an_id)
oprot.writeFieldEnd()
if self.a_string is not None:
oprot.writeFieldBegin('a_string', TType.STRING, 2)
oprot.writeString(self.a_string)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(make_hashable(self.an_id))
value = (value * 31) ^ hash(make_hashable(self.a_string))
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class nested_thing(object):
"""
Attributes:
- things
"""
def __init__(self, things=None):
self.things = things
def read(self, iprot):
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.things = []
(_, elem78) = iprot.readListBegin()
for _ in range(elem78):
elem79 = thing()
elem79.read(iprot)
self.things.append(elem79)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.validate()
def write(self, oprot):
self.validate()
oprot.writeStructBegin('nested_thing')
if self.things is not None:
oprot.writeFieldBegin('things', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.things))
for elem80 in self.things:
elem80.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(make_hashable(self.things))
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class api_exception(TException):
def read(self, iprot):
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.validate()
def write(self, oprot):
self.validate()
oprot.writeStructBegin('api_exception')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __str__(self):
return repr(self)
def __hash__(self):
value = 17
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| 27.463054
| 84
| 0.535785
|
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from frugal.util import make_hashable
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
class base_health_condition(int):
PASS = 1
WARN = 2
FAIL = 3
UNKNOWN = 4
_VALUES_TO_NAMES = {
1: "PASS",
2: "WARN",
3: "FAIL",
4: "UNKNOWN",
}
_NAMES_TO_VALUES = {
"PASS": 1,
"WARN": 2,
"FAIL": 3,
"UNKNOWN": 4,
}
class thing(object):
def __init__(self, an_id=None, a_string=None):
self.an_id = an_id
self.a_string = a_string
def read(self, iprot):
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.an_id = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.a_string = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.validate()
def write(self, oprot):
self.validate()
oprot.writeStructBegin('thing')
if self.an_id is not None:
oprot.writeFieldBegin('an_id', TType.I32, 1)
oprot.writeI32(self.an_id)
oprot.writeFieldEnd()
if self.a_string is not None:
oprot.writeFieldBegin('a_string', TType.STRING, 2)
oprot.writeString(self.a_string)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(make_hashable(self.an_id))
value = (value * 31) ^ hash(make_hashable(self.a_string))
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class nested_thing(object):
def __init__(self, things=None):
self.things = things
def read(self, iprot):
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.things = []
(_, elem78) = iprot.readListBegin()
for _ in range(elem78):
elem79 = thing()
elem79.read(iprot)
self.things.append(elem79)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.validate()
def write(self, oprot):
self.validate()
oprot.writeStructBegin('nested_thing')
if self.things is not None:
oprot.writeFieldBegin('things', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.things))
for elem80 in self.things:
elem80.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(make_hashable(self.things))
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class api_exception(TException):
def read(self, iprot):
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.validate()
def write(self, oprot):
self.validate()
oprot.writeStructBegin('api_exception')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __str__(self):
return repr(self)
def __hash__(self):
value = 17
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| true
| true
|
790d7d696ec7f804417edc31f8fffd6f6e9ddc48
| 26,411
|
py
|
Python
|
demos/gce_demo.py
|
atsaki/libcloud
|
ae85479e835494e196e2f6e79aae9a475603d8ac
|
[
"Apache-2.0"
] | 3
|
2015-09-11T15:42:16.000Z
|
2021-05-12T01:10:05.000Z
|
demos/gce_demo.py
|
atsaki/libcloud
|
ae85479e835494e196e2f6e79aae9a475603d8ac
|
[
"Apache-2.0"
] | null | null | null |
demos/gce_demo.py
|
atsaki/libcloud
|
ae85479e835494e196e2f6e79aae9a475603d8ac
|
[
"Apache-2.0"
] | 3
|
2016-02-08T23:38:18.000Z
|
2019-11-05T00:31:34.000Z
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This example performs several tasks on Google Compute Platform. It can be
# run directly or can be imported into an interactive python session. This
# can also serve as live integration tests.
#
# To run directly, use python 2.7 or greater:
# - $ python gce_demo.py --help # to see the help screen
# - $ python gce_demo.py # to run all demos / tests
#
# To run interactively:
# - Make sure you have valid values in secrets.py
# (For more information about setting up your credentials, see the
# libcloud/common/google.py docstring)
# - Run 'python' in this directory, then:
# import gce_demo
# gce = gce_demo.get_gce_driver()
# gce.list_nodes()
# etc.
# - Or, to run the full demo from the interactive python shell:
# import gce_demo
# gce_demo.CLEANUP = False # optional
# gce_demo.MAX_NODES = 4 # optional
# gce_demo.DATACENTER = 'us-central1-a' # optional
# gce_demo.main_compute() # 'compute' only demo
# gce_demo.main_load_balancer() # 'load_balancer' only demo
# gce_demo.main_dns() # 'dns only demo
# gce_demo.main() # all demos / tests
import os.path
import sys
import datetime
import time
try:
import argparse
except:
print('This script uses the python "argparse" module. Please use Python '
'2.7 or greater.')
raise
try:
import secrets
except ImportError:
print('"demos/secrets.py" not found.\n\n'
'Please copy secrets.py-dist to secrets.py and update the GCE* '
'values with appropriate authentication information.\n'
'Additional information about setting these values can be found '
'in the docstring for:\n'
'libcloud/common/google.py\n')
sys.exit(1)
# Add parent dir of this file's dir to sys.path (OS-agnostically)
sys.path.append(os.path.normpath(os.path.join(os.path.dirname(__file__),
os.path.pardir)))
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import ResourceNotFoundError
from libcloud.loadbalancer.types import Provider as Provider_lb
from libcloud.loadbalancer.providers import get_driver as get_driver_lb
from libcloud.dns.types import Provider as Provider_dns
from libcloud.dns.providers import get_driver as get_driver_dns
from libcloud.dns.base import Record, Zone
from libcloud.utils.py3 import PY3
if PY3:
import urllib.request as url_req
else:
import urllib2 as url_req
# Maximum number of 1-CPU nodes to allow to run simultaneously
MAX_NODES = 5
# String that all resource names created by the demo will start with
# WARNING: Any resource that has a matching name will be destroyed.
DEMO_BASE_NAME = 'lct'
# Datacenter to create resources in
DATACENTER = 'us-central1-f'
# Clean up resources at the end (can be set to false in order to
# inspect resources at the end of the run). Resources will be cleaned
# at the beginning regardless.
CLEANUP = True
args = getattr(secrets, 'GCE_PARAMS', ())
kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
# Add datacenter to kwargs for Python 2.5 compatibility
kwargs = kwargs.copy()
kwargs['datacenter'] = DATACENTER
# ==== HELPER FUNCTIONS ====
def get_gce_driver():
driver = get_driver(Provider.GCE)(*args, **kwargs)
return driver
def get_gcelb_driver(gce_driver=None):
# The GCE Load Balancer driver uses the GCE Compute driver for all of its
# API calls. You can either provide the driver directly, or provide the
# same authentication information so the LB driver can get its own
# Compute driver.
if gce_driver:
driver = get_driver_lb(Provider_lb.GCE)(gce_driver=gce_driver)
else:
driver = get_driver_lb(Provider_lb.GCE)(*args, **kwargs)
return driver
def get_dns_driver(gce_driver=None):
# The Google DNS driver uses the GCE Compute driver for all of its
# API calls. You can either provide the driver directly, or provide the
# same authentication information so the LB driver can get its own
# Compute driver.
if gce_driver:
driver = get_driver_dns(Provider_dns.GOOGLE)(gce_driver=gce_driver)
else:
driver = get_driver_dns(Provider_dns.GOOGLE)(*args, **kwargs)
return driver
def display(title, resource_list=[]):
"""
Display a list of resources.
:param title: String to be printed at the heading of the list.
:type title: ``str``
:param resource_list: List of resources to display
:type resource_list: Any ``object`` with a C{name} attribute
"""
print('=> %s' % title)
for item in resource_list:
if isinstance(item, Record):
if item.name.startswith(DEMO_BASE_NAME):
print('=> name=%s, type=%s' % (item.name, item.type))
else:
print(' name=%s, type=%s' % (item.name, item.type))
elif isinstance(item, Zone):
if item.domain.startswith(DEMO_BASE_NAME):
print('=> name=%s, dnsname=%s' % (item.id, item.domain))
else:
print(' name=%s, dnsname=%s' % (item.id, item.domain))
elif hasattr(item, 'name'):
if item.name.startswith(DEMO_BASE_NAME):
print('=> %s' % item.name)
else:
print(' %s' % item.name)
else:
if item.startswith(DEMO_BASE_NAME):
print('=> %s' % item)
else:
print(' %s' % item)
def cleanup_only():
start_time = datetime.datetime.now()
display('Clean-up start time: %s' % str(start_time))
gce = get_gce_driver()
# Get project info and print name
project = gce.ex_get_project()
display('Project: %s' % project.name)
# == Get Lists of Everything and Display the lists (up to 10) ==
# These can either just return values for the current datacenter (zone)
# or for everything.
all_nodes = gce.list_nodes(ex_zone='all')
display('Nodes:', all_nodes)
all_addresses = gce.ex_list_addresses(region='all')
display('Addresses:', all_addresses)
all_volumes = gce.list_volumes(ex_zone='all')
display('Volumes:', all_volumes)
# This can return everything, but there is a large amount of overlap,
# so we'll just get the sizes from the current zone.
sizes = gce.list_sizes()
display('Sizes:', sizes)
# These are global
firewalls = gce.ex_list_firewalls()
display('Firewalls:', firewalls)
networks = gce.ex_list_networks()
display('Networks:', networks)
images = gce.list_images()
display('Images:', images)
locations = gce.list_locations()
display('Locations:', locations)
zones = gce.ex_list_zones()
display('Zones:', zones)
snapshots = gce.ex_list_snapshots()
display('Snapshots:', snapshots)
# == Clean up any old demo resources ==
display('Cleaning up any "%s" resources' % DEMO_BASE_NAME)
clean_up(gce, DEMO_BASE_NAME, all_nodes,
all_addresses + all_volumes + firewalls + networks + snapshots)
volumes = gce.list_volumes()
clean_up(gce, DEMO_BASE_NAME, None, volumes)
end_time = datetime.datetime.now()
display('Total runtime: %s' % str(end_time - start_time))
def clean_up(gce, base_name, node_list=None, resource_list=None):
"""
Destroy all resources that have a name beginning with 'base_name'.
:param base_name: String with the first part of the name of resources
to destroy
:type base_name: ``str``
:keyword node_list: List of nodes to consider for deletion
:type node_list: ``list`` of :class:`Node`
:keyword resource_list: List of resources to consider for deletion
:type resource_list: ``list`` of I{Resource Objects}
"""
if node_list is None:
node_list = []
if resource_list is None:
resource_list = []
# Use ex_destroy_multiple_nodes to destroy nodes
del_nodes = []
for node in node_list:
if node.name.startswith(base_name):
del_nodes.append(node)
result = gce.ex_destroy_multiple_nodes(del_nodes)
for i, success in enumerate(result):
if success:
display(' Deleted %s' % del_nodes[i].name)
else:
display(' Failed to delete %s' % del_nodes[i].name)
# Destroy everything else with just the destroy method
for resrc in resource_list:
if resrc.name.startswith(base_name):
try:
resrc.destroy()
except ResourceNotFoundError:
display(' Not found: %s (%s)' % (resrc.name,
resrc.__class__.__name__))
except:
class_name = resrc.__class__.__name__
display(' Failed to Delete %s (%s)' % (resrc.name,
class_name))
raise
# ==== COMPUTE CODE STARTS HERE ====
def main_compute():
start_time = datetime.datetime.now()
display('Compute demo/test start time: %s' % str(start_time))
gce = get_gce_driver()
# Get project info and print name
project = gce.ex_get_project()
display('Project: %s' % project.name)
# == Get Lists of Everything and Display the lists (up to 10) ==
# These can either just return values for the current datacenter (zone)
# or for everything.
all_nodes = gce.list_nodes(ex_zone='all')
display('Nodes:', all_nodes)
all_addresses = gce.ex_list_addresses(region='all')
display('Addresses:', all_addresses)
all_volumes = gce.list_volumes(ex_zone='all')
display('Volumes:', all_volumes)
# This can return everything, but there is a large amount of overlap,
# so we'll just get the sizes from the current zone.
sizes = gce.list_sizes()
display('Sizes:', sizes)
# These are global
firewalls = gce.ex_list_firewalls()
display('Firewalls:', firewalls)
networks = gce.ex_list_networks()
display('Networks:', networks)
images = gce.list_images()
display('Images:', images)
locations = gce.list_locations()
display('Locations:', locations)
zones = gce.ex_list_zones()
display('Zones:', zones)
snapshots = gce.ex_list_snapshots()
display('Snapshots:', snapshots)
# == Clean up any old demo resources ==
display('Cleaning up any "%s" resources' % DEMO_BASE_NAME)
clean_up(gce, DEMO_BASE_NAME, all_nodes,
all_addresses + all_volumes + firewalls + networks + snapshots)
# == Create Node with disk auto-created ==
if MAX_NODES > 1:
display('Creating a node with boot/local-ssd using GCE structure:')
name = '%s-gstruct' % DEMO_BASE_NAME
img_url = "projects/debian-cloud/global/images/"
img_url += "backports-debian-7-wheezy-v20141205"
disk_type_url = "projects/%s/zones/us-central1-f/" % project.name
disk_type_url += "diskTypes/local-ssd"
gce_disk_struct = [
{
"type": "PERSISTENT",
"deviceName": '%s-gstruct' % DEMO_BASE_NAME,
"initializeParams": {
"diskName": '%s-gstruct' % DEMO_BASE_NAME,
"sourceImage": img_url
},
"boot": True,
"autoDelete": True
},
{
"type": "SCRATCH",
"deviceName": '%s-gstruct-lssd' % DEMO_BASE_NAME,
"initializeParams": {
"diskType": disk_type_url
},
"autoDelete": True
}
]
node_gstruct = gce.create_node(name, 'n1-standard-1', None,
'us-central1-f',
ex_disks_gce_struct=gce_disk_struct)
num_disks = len(node_gstruct.extra['disks'])
display(' Node %s created with %d disks' % (node_gstruct.name,
num_disks))
display('Creating Node with auto-created SSD:')
name = '%s-np-node' % DEMO_BASE_NAME
node_1 = gce.create_node(name, 'n1-standard-1', 'debian-7',
ex_tags=['libcloud'], ex_disk_type='pd-ssd',
ex_disk_auto_delete=False)
display(' Node %s created' % name)
# == Create, and attach a disk ==
display('Creating a new disk:')
disk_name = '%s-attach-disk' % DEMO_BASE_NAME
volume = gce.create_volume(10, disk_name)
if volume.attach(node_1):
display(' Attached %s to %s' % (volume.name, node_1.name))
display(' Disabled auto-delete for %s on %s' % (volume.name,
node_1.name))
gce.ex_set_volume_auto_delete(volume, node_1, auto_delete=False)
if CLEANUP:
# == Detach the disk ==
if gce.detach_volume(volume, ex_node=node_1):
display(' Detached %s from %s' % (volume.name,
node_1.name))
# == Create Snapshot ==
display('Creating a snapshot from existing disk:')
# Create a disk to snapshot
vol_name = '%s-snap-template' % DEMO_BASE_NAME
image = gce.ex_get_image('debian-7')
vol = gce.create_volume(None, vol_name, image=image)
display('Created disk %s to shapshot:' % DEMO_BASE_NAME)
# Snapshot volume
snapshot = vol.snapshot('%s-snapshot' % DEMO_BASE_NAME)
display(' Snapshot %s created' % snapshot.name)
# == Create Node with existing disk ==
display('Creating Node with existing disk:')
name = '%s-persist-node' % DEMO_BASE_NAME
# Use objects this time instead of names
# Get latest Debian 7 image
image = gce.ex_get_image('debian-7')
# Get Machine Size
size = gce.ex_get_size('n1-standard-1')
# Create Disk from Snapshot created above
volume_name = '%s-boot-disk' % DEMO_BASE_NAME
volume = gce.create_volume(None, volume_name, snapshot=snapshot)
display(' Created %s from snapshot' % volume.name)
# Create Node with Disk
node_2 = gce.create_node(name, size, image, ex_tags=['libcloud'],
ex_boot_disk=volume,
ex_disk_auto_delete=False)
display(' Node %s created with attached disk %s' % (node_2.name,
volume.name))
# == Update Tags for Node ==
display('Updating Tags for %s:' % node_2.name)
tags = node_2.extra['tags']
tags.append('newtag')
if gce.ex_set_node_tags(node_2, tags):
display(' Tags updated for %s' % node_2.name)
check_node = gce.ex_get_node(node_2.name)
display(' New tags: %s' % check_node.extra['tags'])
# == Setting Metadata for Node ==
display('Setting Metadata for %s:' % node_2.name)
if gce.ex_set_node_metadata(node_2, {'foo': 'bar', 'baz': 'foobarbaz'}):
display(' Metadata updated for %s' % node_2.name)
check_node = gce.ex_get_node(node_2.name)
display(' New Metadata: %s' % check_node.extra['metadata'])
# == Create Multiple nodes at once ==
base_name = '%s-multiple-nodes' % DEMO_BASE_NAME
number = MAX_NODES - 2
if number > 0:
display('Creating Multiple Nodes (%s):' % number)
multi_nodes = gce.ex_create_multiple_nodes(base_name, size, image,
number,
ex_tags=['libcloud'],
ex_disk_auto_delete=True)
for node in multi_nodes:
display(' Node %s created' % node.name)
# == Create a Network ==
display('Creating Network:')
name = '%s-network' % DEMO_BASE_NAME
cidr = '10.10.0.0/16'
network_1 = gce.ex_create_network(name, cidr)
display(' Network %s created' % network_1.name)
# == Create a Firewall ==
display('Creating a Firewall:')
name = '%s-firewall' % DEMO_BASE_NAME
allowed = [{'IPProtocol': 'tcp',
'ports': ['3141']}]
firewall_1 = gce.ex_create_firewall(name, allowed, network=network_1,
source_tags=['libcloud'])
display(' Firewall %s created' % firewall_1.name)
# == Create a Static Address ==
display('Creating an Address:')
name = '%s-address' % DEMO_BASE_NAME
address_1 = gce.ex_create_address(name)
display(' Address %s created with IP %s' % (address_1.name,
address_1.address))
# == List Updated Resources in current zone/region ==
display('Updated Resources in current zone/region')
nodes = gce.list_nodes()
display('Nodes:', nodes)
addresses = gce.ex_list_addresses()
display('Addresses:', addresses)
firewalls = gce.ex_list_firewalls()
display('Firewalls:', firewalls)
networks = gce.ex_list_networks()
display('Networks:', networks)
snapshots = gce.ex_list_snapshots()
display('Snapshots:', snapshots)
if CLEANUP:
display('Cleaning up %s resources created' % DEMO_BASE_NAME)
clean_up(gce, DEMO_BASE_NAME, nodes,
addresses + firewalls + networks + snapshots)
volumes = gce.list_volumes()
clean_up(gce, DEMO_BASE_NAME, None, volumes)
end_time = datetime.datetime.now()
display('Total runtime: %s' % str(end_time - start_time))
# ==== LOAD BALANCER CODE STARTS HERE ====
def main_load_balancer():
start_time = datetime.datetime.now()
display('Load-balancer demo/test start time: %s' % str(start_time))
gce = get_gce_driver()
gcelb = get_gcelb_driver(gce)
# Get project info and print name
project = gce.ex_get_project()
display('Project: %s' % project.name)
# Existing Balancers
balancers = gcelb.list_balancers()
display('Load Balancers', balancers)
# Protocols
protocols = gcelb.list_protocols()
display('Protocols', protocols)
# Healthchecks
healthchecks = gcelb.ex_list_healthchecks()
display('Health Checks', healthchecks)
# This demo is based on the GCE Load Balancing Quickstart described here:
# https://developers.google.com/compute/docs/load-balancing/lb-quickstart
# == Clean-up and existing demo resources ==
all_nodes = gce.list_nodes(ex_zone='all')
firewalls = gce.ex_list_firewalls()
display('Cleaning up any "%s" resources' % DEMO_BASE_NAME)
clean_up(gce, DEMO_BASE_NAME, all_nodes,
balancers + healthchecks + firewalls)
# == Create 3 nodes to balance between ==
startup_script = ('apt-get -y update && '
'apt-get -y install apache2 && '
'hostname > /var/www/index.html')
tag = '%s-www' % DEMO_BASE_NAME
base_name = '%s-www' % DEMO_BASE_NAME
image = gce.ex_get_image('debian-7')
size = gce.ex_get_size('n1-standard-1')
number = 3
display('Creating %d nodes' % number)
metadata = {'items': [{'key': 'startup-script',
'value': startup_script}]}
lb_nodes = gce.ex_create_multiple_nodes(base_name, size, image,
number, ex_tags=[tag],
ex_metadata=metadata,
ex_disk_auto_delete=True,
ignore_errors=False)
display('Created Nodes', lb_nodes)
# == Create a Firewall for instances ==
display('Creating a Firewall')
name = '%s-firewall' % DEMO_BASE_NAME
allowed = [{'IPProtocol': 'tcp',
'ports': ['80']}]
firewall = gce.ex_create_firewall(name, allowed, source_tags=[tag])
display(' Firewall %s created' % firewall.name)
# == Create a Health Check ==
display('Creating a HealthCheck')
name = '%s-healthcheck' % DEMO_BASE_NAME
# These are all the default values, but listed here as an example. To
# create a healthcheck with the defaults, only name is required.
hc = gcelb.ex_create_healthcheck(name, host=None, path='/', port='80',
interval=5, timeout=5,
unhealthy_threshold=2,
healthy_threshold=2)
display('Healthcheck %s created' % hc.name)
# == Create Load Balancer ==
display('Creating Load Balancer')
name = '%s-lb' % DEMO_BASE_NAME
port = 80
protocol = 'tcp'
algorithm = None
members = lb_nodes[:2] # Only attach the first two initially
healthchecks = [hc]
balancer = gcelb.create_balancer(name, port, protocol, algorithm, members,
ex_healthchecks=healthchecks)
display(' Load Balancer %s created' % balancer.name)
# == Attach third Node ==
display('Attaching additional node to Load Balancer')
member = balancer.attach_compute_node(lb_nodes[2])
display(' Attached %s to %s' % (member.id, balancer.name))
# == Show Balancer Members ==
members = balancer.list_members()
display('Load Balancer Members')
for member in members:
display(' ID: %s IP: %s' % (member.id, member.ip))
# == Remove a Member ==
display('Removing a Member')
detached = members[0]
detach = balancer.detach_member(detached)
if detach:
display(' Member %s detached from %s' % (detached.id,
balancer.name))
# == Show Updated Balancer Members ==
members = balancer.list_members()
display('Updated Load Balancer Members')
for member in members:
display(' ID: %s IP: %s' % (member.id, member.ip))
# == Reattach Member ==
display('Reattaching Member')
member = balancer.attach_member(detached)
display(' Member %s attached to %s' % (member.id, balancer.name))
# == Test Load Balancer by connecting to it multiple times ==
PAUSE = 60
display('Sleeping for %d seconds for LB members to serve...' % PAUSE)
time.sleep(PAUSE)
rounds = 200
url = 'http://%s/' % balancer.ip
line_length = 75
display('Connecting to %s %s times' % (url, rounds))
for x in range(rounds):
response = url_req.urlopen(url)
if PY3:
output = str(response.read(), encoding='utf-8').strip()
else:
output = response.read().strip()
if 'www-001' in output:
padded_output = output.center(line_length)
elif 'www-002' in output:
padded_output = output.rjust(line_length)
else:
padded_output = output.ljust(line_length)
sys.stdout.write('\r%s' % padded_output)
sys.stdout.flush()
time.sleep(.25)
print ""
if CLEANUP:
balancers = gcelb.list_balancers()
healthchecks = gcelb.ex_list_healthchecks()
nodes = gce.list_nodes(ex_zone='all')
firewalls = gce.ex_list_firewalls()
display('Cleaning up %s resources created' % DEMO_BASE_NAME)
clean_up(gce, DEMO_BASE_NAME, nodes,
balancers + healthchecks + firewalls)
end_time = datetime.datetime.now()
display('Total runtime: %s' % str(end_time - start_time))
# ==== GOOGLE DNS CODE STARTS HERE ====
def main_dns():
start_time = datetime.datetime.now()
display('DNS demo/test start time: %s' % str(start_time))
gce = get_gce_driver()
gdns = get_dns_driver()
# Get project info and print name
project = gce.ex_get_project()
display('Project: %s' % project.name)
# Get list of managed zones
zones = gdns.iterate_zones()
display('Zones', zones)
# Get list of records
zones = gdns.iterate_zones()
for z in zones:
records = gdns.iterate_records(z)
display('Records for managed zone "%s"' % z.id, records)
# TODO(erjohnso): Finish this DNS section. Challenging in that you need to
# own a domain, so testing will require user customization. Perhaps a new
# command-line required flag unless --skip-dns is supplied. Also, real
# e2e testing should try to do DNS lookups on new records, but DNS TTL
# and propagation delays will introduce limits on what can be tested.
end_time = datetime.datetime.now()
display('Total runtime: %s' % str(end_time - start_time))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Google Cloud Platform Demo / Live Test Script')
parser.add_argument("--compute",
help="perform compute demo / live tests",
dest="compute", action="store_true")
parser.add_argument("--load-balancer",
help="perform load-balancer demo / live tests",
dest="lb", action="store_true")
parser.add_argument("--dns",
help="perform DNS demo / live tests",
dest="dns", action="store_true")
parser.add_argument("--cleanup-only",
help="perform clean-up (skips all tests)",
dest="cleanup", action="store_true")
cl_args = parser.parse_args()
if cl_args.cleanup:
cleanup_only()
else:
if cl_args.compute:
main_compute()
if cl_args.lb:
main_load_balancer()
if cl_args.dns:
main_dns()
| 37.892396
| 78
| 0.613381
|
try:
import argparse
except:
print('This script uses the python "argparse" module. Please use Python '
'2.7 or greater.')
raise
try:
import secrets
except ImportError:
print('"demos/secrets.py" not found.\n\n'
'Please copy secrets.py-dist to secrets.py and update the GCE* '
'values with appropriate authentication information.\n'
'Additional information about setting these values can be found '
'in the docstring for:\n'
'libcloud/common/google.py\n')
sys.exit(1)
# Add parent dir of this file's dir to sys.path (OS-agnostically)
sys.path.append(os.path.normpath(os.path.join(os.path.dirname(__file__),
os.path.pardir)))
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import ResourceNotFoundError
from libcloud.loadbalancer.types import Provider as Provider_lb
from libcloud.loadbalancer.providers import get_driver as get_driver_lb
from libcloud.dns.types import Provider as Provider_dns
from libcloud.dns.providers import get_driver as get_driver_dns
from libcloud.dns.base import Record, Zone
from libcloud.utils.py3 import PY3
if PY3:
import urllib.request as url_req
else:
import urllib2 as url_req
MAX_NODES = 5
DEMO_BASE_NAME = 'lct'
DATACENTER = 'us-central1-f'
CLEANUP = True
args = getattr(secrets, 'GCE_PARAMS', ())
kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
kwargs = kwargs.copy()
kwargs['datacenter'] = DATACENTER
def get_gce_driver():
driver = get_driver(Provider.GCE)(*args, **kwargs)
return driver
def get_gcelb_driver(gce_driver=None):
if gce_driver:
driver = get_driver_lb(Provider_lb.GCE)(gce_driver=gce_driver)
else:
driver = get_driver_lb(Provider_lb.GCE)(*args, **kwargs)
return driver
def get_dns_driver(gce_driver=None):
if gce_driver:
driver = get_driver_dns(Provider_dns.GOOGLE)(gce_driver=gce_driver)
else:
driver = get_driver_dns(Provider_dns.GOOGLE)(*args, **kwargs)
return driver
def display(title, resource_list=[]):
"""
Display a list of resources.
:param title: String to be printed at the heading of the list.
:type title: ``str``
:param resource_list: List of resources to display
:type resource_list: Any ``object`` with a C{name} attribute
"""
print('=> %s' % title)
for item in resource_list:
if isinstance(item, Record):
if item.name.startswith(DEMO_BASE_NAME):
print('=> name=%s, type=%s' % (item.name, item.type))
else:
print(' name=%s, type=%s' % (item.name, item.type))
elif isinstance(item, Zone):
if item.domain.startswith(DEMO_BASE_NAME):
print('=> name=%s, dnsname=%s' % (item.id, item.domain))
else:
print(' name=%s, dnsname=%s' % (item.id, item.domain))
elif hasattr(item, 'name'):
if item.name.startswith(DEMO_BASE_NAME):
print('=> %s' % item.name)
else:
print(' %s' % item.name)
else:
if item.startswith(DEMO_BASE_NAME):
print('=> %s' % item)
else:
print(' %s' % item)
def cleanup_only():
start_time = datetime.datetime.now()
display('Clean-up start time: %s' % str(start_time))
gce = get_gce_driver()
project = gce.ex_get_project()
display('Project: %s' % project.name)
all_nodes = gce.list_nodes(ex_zone='all')
display('Nodes:', all_nodes)
all_addresses = gce.ex_list_addresses(region='all')
display('Addresses:', all_addresses)
all_volumes = gce.list_volumes(ex_zone='all')
display('Volumes:', all_volumes)
sizes = gce.list_sizes()
display('Sizes:', sizes)
# These are global
firewalls = gce.ex_list_firewalls()
display('Firewalls:', firewalls)
networks = gce.ex_list_networks()
display('Networks:', networks)
images = gce.list_images()
display('Images:', images)
locations = gce.list_locations()
display('Locations:', locations)
zones = gce.ex_list_zones()
display('Zones:', zones)
snapshots = gce.ex_list_snapshots()
display('Snapshots:', snapshots)
# == Clean up any old demo resources ==
display('Cleaning up any "%s" resources' % DEMO_BASE_NAME)
clean_up(gce, DEMO_BASE_NAME, all_nodes,
all_addresses + all_volumes + firewalls + networks + snapshots)
volumes = gce.list_volumes()
clean_up(gce, DEMO_BASE_NAME, None, volumes)
end_time = datetime.datetime.now()
display('Total runtime: %s' % str(end_time - start_time))
def clean_up(gce, base_name, node_list=None, resource_list=None):
"""
Destroy all resources that have a name beginning with 'base_name'.
:param base_name: String with the first part of the name of resources
to destroy
:type base_name: ``str``
:keyword node_list: List of nodes to consider for deletion
:type node_list: ``list`` of :class:`Node`
:keyword resource_list: List of resources to consider for deletion
:type resource_list: ``list`` of I{Resource Objects}
"""
if node_list is None:
node_list = []
if resource_list is None:
resource_list = []
# Use ex_destroy_multiple_nodes to destroy nodes
del_nodes = []
for node in node_list:
if node.name.startswith(base_name):
del_nodes.append(node)
result = gce.ex_destroy_multiple_nodes(del_nodes)
for i, success in enumerate(result):
if success:
display(' Deleted %s' % del_nodes[i].name)
else:
display(' Failed to delete %s' % del_nodes[i].name)
# Destroy everything else with just the destroy method
for resrc in resource_list:
if resrc.name.startswith(base_name):
try:
resrc.destroy()
except ResourceNotFoundError:
display(' Not found: %s (%s)' % (resrc.name,
resrc.__class__.__name__))
except:
class_name = resrc.__class__.__name__
display(' Failed to Delete %s (%s)' % (resrc.name,
class_name))
raise
# ==== COMPUTE CODE STARTS HERE ====
def main_compute():
start_time = datetime.datetime.now()
display('Compute demo/test start time: %s' % str(start_time))
gce = get_gce_driver()
# Get project info and print name
project = gce.ex_get_project()
display('Project: %s' % project.name)
# == Get Lists of Everything and Display the lists (up to 10) ==
# These can either just return values for the current datacenter (zone)
# or for everything.
all_nodes = gce.list_nodes(ex_zone='all')
display('Nodes:', all_nodes)
all_addresses = gce.ex_list_addresses(region='all')
display('Addresses:', all_addresses)
all_volumes = gce.list_volumes(ex_zone='all')
display('Volumes:', all_volumes)
# This can return everything, but there is a large amount of overlap,
# so we'll just get the sizes from the current zone.
sizes = gce.list_sizes()
display('Sizes:', sizes)
firewalls = gce.ex_list_firewalls()
display('Firewalls:', firewalls)
networks = gce.ex_list_networks()
display('Networks:', networks)
images = gce.list_images()
display('Images:', images)
locations = gce.list_locations()
display('Locations:', locations)
zones = gce.ex_list_zones()
display('Zones:', zones)
snapshots = gce.ex_list_snapshots()
display('Snapshots:', snapshots)
display('Cleaning up any "%s" resources' % DEMO_BASE_NAME)
clean_up(gce, DEMO_BASE_NAME, all_nodes,
all_addresses + all_volumes + firewalls + networks + snapshots)
if MAX_NODES > 1:
display('Creating a node with boot/local-ssd using GCE structure:')
name = '%s-gstruct' % DEMO_BASE_NAME
img_url = "projects/debian-cloud/global/images/"
img_url += "backports-debian-7-wheezy-v20141205"
disk_type_url = "projects/%s/zones/us-central1-f/" % project.name
disk_type_url += "diskTypes/local-ssd"
gce_disk_struct = [
{
"type": "PERSISTENT",
"deviceName": '%s-gstruct' % DEMO_BASE_NAME,
"initializeParams": {
"diskName": '%s-gstruct' % DEMO_BASE_NAME,
"sourceImage": img_url
},
"boot": True,
"autoDelete": True
},
{
"type": "SCRATCH",
"deviceName": '%s-gstruct-lssd' % DEMO_BASE_NAME,
"initializeParams": {
"diskType": disk_type_url
},
"autoDelete": True
}
]
node_gstruct = gce.create_node(name, 'n1-standard-1', None,
'us-central1-f',
ex_disks_gce_struct=gce_disk_struct)
num_disks = len(node_gstruct.extra['disks'])
display(' Node %s created with %d disks' % (node_gstruct.name,
num_disks))
display('Creating Node with auto-created SSD:')
name = '%s-np-node' % DEMO_BASE_NAME
node_1 = gce.create_node(name, 'n1-standard-1', 'debian-7',
ex_tags=['libcloud'], ex_disk_type='pd-ssd',
ex_disk_auto_delete=False)
display(' Node %s created' % name)
display('Creating a new disk:')
disk_name = '%s-attach-disk' % DEMO_BASE_NAME
volume = gce.create_volume(10, disk_name)
if volume.attach(node_1):
display(' Attached %s to %s' % (volume.name, node_1.name))
display(' Disabled auto-delete for %s on %s' % (volume.name,
node_1.name))
gce.ex_set_volume_auto_delete(volume, node_1, auto_delete=False)
if CLEANUP:
if gce.detach_volume(volume, ex_node=node_1):
display(' Detached %s from %s' % (volume.name,
node_1.name))
display('Creating a snapshot from existing disk:')
vol_name = '%s-snap-template' % DEMO_BASE_NAME
image = gce.ex_get_image('debian-7')
vol = gce.create_volume(None, vol_name, image=image)
display('Created disk %s to shapshot:' % DEMO_BASE_NAME)
snapshot = vol.snapshot('%s-snapshot' % DEMO_BASE_NAME)
display(' Snapshot %s created' % snapshot.name)
display('Creating Node with existing disk:')
name = '%s-persist-node' % DEMO_BASE_NAME
image = gce.ex_get_image('debian-7')
size = gce.ex_get_size('n1-standard-1')
volume_name = '%s-boot-disk' % DEMO_BASE_NAME
volume = gce.create_volume(None, volume_name, snapshot=snapshot)
display(' Created %s from snapshot' % volume.name)
node_2 = gce.create_node(name, size, image, ex_tags=['libcloud'],
ex_boot_disk=volume,
ex_disk_auto_delete=False)
display(' Node %s created with attached disk %s' % (node_2.name,
volume.name))
display('Updating Tags for %s:' % node_2.name)
tags = node_2.extra['tags']
tags.append('newtag')
if gce.ex_set_node_tags(node_2, tags):
display(' Tags updated for %s' % node_2.name)
check_node = gce.ex_get_node(node_2.name)
display(' New tags: %s' % check_node.extra['tags'])
display('Setting Metadata for %s:' % node_2.name)
if gce.ex_set_node_metadata(node_2, {'foo': 'bar', 'baz': 'foobarbaz'}):
display(' Metadata updated for %s' % node_2.name)
check_node = gce.ex_get_node(node_2.name)
display(' New Metadata: %s' % check_node.extra['metadata'])
base_name = '%s-multiple-nodes' % DEMO_BASE_NAME
number = MAX_NODES - 2
if number > 0:
display('Creating Multiple Nodes (%s):' % number)
multi_nodes = gce.ex_create_multiple_nodes(base_name, size, image,
number,
ex_tags=['libcloud'],
ex_disk_auto_delete=True)
for node in multi_nodes:
display(' Node %s created' % node.name)
display('Creating Network:')
name = '%s-network' % DEMO_BASE_NAME
cidr = '10.10.0.0/16'
network_1 = gce.ex_create_network(name, cidr)
display(' Network %s created' % network_1.name)
display('Creating a Firewall:')
name = '%s-firewall' % DEMO_BASE_NAME
allowed = [{'IPProtocol': 'tcp',
'ports': ['3141']}]
firewall_1 = gce.ex_create_firewall(name, allowed, network=network_1,
source_tags=['libcloud'])
display(' Firewall %s created' % firewall_1.name)
display('Creating an Address:')
name = '%s-address' % DEMO_BASE_NAME
address_1 = gce.ex_create_address(name)
display(' Address %s created with IP %s' % (address_1.name,
address_1.address))
display('Updated Resources in current zone/region')
nodes = gce.list_nodes()
display('Nodes:', nodes)
addresses = gce.ex_list_addresses()
display('Addresses:', addresses)
firewalls = gce.ex_list_firewalls()
display('Firewalls:', firewalls)
networks = gce.ex_list_networks()
display('Networks:', networks)
snapshots = gce.ex_list_snapshots()
display('Snapshots:', snapshots)
if CLEANUP:
display('Cleaning up %s resources created' % DEMO_BASE_NAME)
clean_up(gce, DEMO_BASE_NAME, nodes,
addresses + firewalls + networks + snapshots)
volumes = gce.list_volumes()
clean_up(gce, DEMO_BASE_NAME, None, volumes)
end_time = datetime.datetime.now()
display('Total runtime: %s' % str(end_time - start_time))
def main_load_balancer():
start_time = datetime.datetime.now()
display('Load-balancer demo/test start time: %s' % str(start_time))
gce = get_gce_driver()
gcelb = get_gcelb_driver(gce)
project = gce.ex_get_project()
display('Project: %s' % project.name)
balancers = gcelb.list_balancers()
display('Load Balancers', balancers)
protocols = gcelb.list_protocols()
display('Protocols', protocols)
healthchecks = gcelb.ex_list_healthchecks()
display('Health Checks', healthchecks)
all_nodes = gce.list_nodes(ex_zone='all')
firewalls = gce.ex_list_firewalls()
display('Cleaning up any "%s" resources' % DEMO_BASE_NAME)
clean_up(gce, DEMO_BASE_NAME, all_nodes,
balancers + healthchecks + firewalls)
startup_script = ('apt-get -y update && '
'apt-get -y install apache2 && '
'hostname > /var/www/index.html')
tag = '%s-www' % DEMO_BASE_NAME
base_name = '%s-www' % DEMO_BASE_NAME
image = gce.ex_get_image('debian-7')
size = gce.ex_get_size('n1-standard-1')
number = 3
display('Creating %d nodes' % number)
metadata = {'items': [{'key': 'startup-script',
'value': startup_script}]}
lb_nodes = gce.ex_create_multiple_nodes(base_name, size, image,
number, ex_tags=[tag],
ex_metadata=metadata,
ex_disk_auto_delete=True,
ignore_errors=False)
display('Created Nodes', lb_nodes)
display('Creating a Firewall')
name = '%s-firewall' % DEMO_BASE_NAME
allowed = [{'IPProtocol': 'tcp',
'ports': ['80']}]
firewall = gce.ex_create_firewall(name, allowed, source_tags=[tag])
display(' Firewall %s created' % firewall.name)
display('Creating a HealthCheck')
name = '%s-healthcheck' % DEMO_BASE_NAME
hc = gcelb.ex_create_healthcheck(name, host=None, path='/', port='80',
interval=5, timeout=5,
unhealthy_threshold=2,
healthy_threshold=2)
display('Healthcheck %s created' % hc.name)
display('Creating Load Balancer')
name = '%s-lb' % DEMO_BASE_NAME
port = 80
protocol = 'tcp'
algorithm = None
members = lb_nodes[:2]
healthchecks = [hc]
balancer = gcelb.create_balancer(name, port, protocol, algorithm, members,
ex_healthchecks=healthchecks)
display(' Load Balancer %s created' % balancer.name)
display('Attaching additional node to Load Balancer')
member = balancer.attach_compute_node(lb_nodes[2])
display(' Attached %s to %s' % (member.id, balancer.name))
members = balancer.list_members()
display('Load Balancer Members')
for member in members:
display(' ID: %s IP: %s' % (member.id, member.ip))
display('Removing a Member')
detached = members[0]
detach = balancer.detach_member(detached)
if detach:
display(' Member %s detached from %s' % (detached.id,
balancer.name))
members = balancer.list_members()
display('Updated Load Balancer Members')
for member in members:
display(' ID: %s IP: %s' % (member.id, member.ip))
display('Reattaching Member')
member = balancer.attach_member(detached)
display(' Member %s attached to %s' % (member.id, balancer.name))
PAUSE = 60
display('Sleeping for %d seconds for LB members to serve...' % PAUSE)
time.sleep(PAUSE)
rounds = 200
url = 'http://%s/' % balancer.ip
line_length = 75
display('Connecting to %s %s times' % (url, rounds))
for x in range(rounds):
response = url_req.urlopen(url)
if PY3:
output = str(response.read(), encoding='utf-8').strip()
else:
output = response.read().strip()
if 'www-001' in output:
padded_output = output.center(line_length)
elif 'www-002' in output:
padded_output = output.rjust(line_length)
else:
padded_output = output.ljust(line_length)
sys.stdout.write('\r%s' % padded_output)
sys.stdout.flush()
time.sleep(.25)
print ""
if CLEANUP:
balancers = gcelb.list_balancers()
healthchecks = gcelb.ex_list_healthchecks()
nodes = gce.list_nodes(ex_zone='all')
firewalls = gce.ex_list_firewalls()
display('Cleaning up %s resources created' % DEMO_BASE_NAME)
clean_up(gce, DEMO_BASE_NAME, nodes,
balancers + healthchecks + firewalls)
end_time = datetime.datetime.now()
display('Total runtime: %s' % str(end_time - start_time))
def main_dns():
start_time = datetime.datetime.now()
display('DNS demo/test start time: %s' % str(start_time))
gce = get_gce_driver()
gdns = get_dns_driver()
project = gce.ex_get_project()
display('Project: %s' % project.name)
zones = gdns.iterate_zones()
display('Zones', zones)
zones = gdns.iterate_zones()
for z in zones:
records = gdns.iterate_records(z)
display('Records for managed zone "%s"' % z.id, records)
end_time = datetime.datetime.now()
display('Total runtime: %s' % str(end_time - start_time))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Google Cloud Platform Demo / Live Test Script')
parser.add_argument("--compute",
help="perform compute demo / live tests",
dest="compute", action="store_true")
parser.add_argument("--load-balancer",
help="perform load-balancer demo / live tests",
dest="lb", action="store_true")
parser.add_argument("--dns",
help="perform DNS demo / live tests",
dest="dns", action="store_true")
parser.add_argument("--cleanup-only",
help="perform clean-up (skips all tests)",
dest="cleanup", action="store_true")
cl_args = parser.parse_args()
if cl_args.cleanup:
cleanup_only()
else:
if cl_args.compute:
main_compute()
if cl_args.lb:
main_load_balancer()
if cl_args.dns:
main_dns()
| false
| true
|
790d7d7ba0053e639f2f0a33658279ba5db13313
| 3,474
|
py
|
Python
|
vaccines.py
|
Karalius/get-vaccine-vilnius
|
49a918cdef6fedc7538f7e49210b18fb1f03f7f4
|
[
"MIT"
] | null | null | null |
vaccines.py
|
Karalius/get-vaccine-vilnius
|
49a918cdef6fedc7538f7e49210b18fb1f03f7f4
|
[
"MIT"
] | null | null | null |
vaccines.py
|
Karalius/get-vaccine-vilnius
|
49a918cdef6fedc7538f7e49210b18fb1f03f7f4
|
[
"MIT"
] | null | null | null |
import time
from bs4 import BeautifulSoup
import requests
import json
from datetime import datetime, timedelta
import psycopg2
import smtplib
import os
DATABASE = os.environ["DATABASE"]
USER = os.environ["USER"]
PASSWORD = os.environ["PASSWORD"]
HOST = os.environ["HOST"]
def send_email(message: str) -> None:
"""
Sends an email to target email with given message.
Args:
message (str): message you're sending
"""
with open("../creds.json", "r") as f:
creds = json.loads(f)
gmail_user = creds["user"]
gmail_pass = creds["pass"]
try:
server = smtplib.SMTP("smtp.gmail.com", 587)
server.starttls()
server.login(gmail_user, gmail_pass)
server.sendmail(gmail_user, creds["target"], message)
except:
print("Email didnt work...")
def get_data() -> None:
"""
Infinite loop of every 10min requests to Vilnius vaccination center.
Collects count of vaccines and adds to PostgreSQL database.
Sends an email if Pfizer vaccine is available.
"""
while True:
sql_connection = psycopg2.connect(
database=DATABASE, user=USER, password=PASSWORD, host=HOST
)
# Connect to DB
cur = sql_connection.cursor()
headers = {
"Connection": "keep-alive",
"Cache-Control": "max-age=0",
"sec-ch-ua": "^\\^",
"sec-ch-ua-mobile": "?0",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.93 Safari/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Sec-Fetch-Site": "cross-site",
"Sec-Fetch-Mode": "navigate",
"Sec-Fetch-User": "?1",
"Sec-Fetch-Dest": "document",
"Accept-Language": "en-US,en;q=0.9",
}
page = requests.get(
"https://vilnius-vac.myhybridlab.com/selfregister/vaccine", headers=headers
)
soup = BeautifulSoup(page.content, "html.parser")
vaccines = soup.find("vaccine-rooms", class_=None)[":vaccine-rooms"]
json_object = json.loads(vaccines)
# Time
time_raw = soup.find("small", class_="text-muted").get_text().split()
time_str = time_raw[2] + " " + time_raw[3]
dt = datetime.fromisoformat(time_str)
now = datetime.now().replace(microsecond=0)
eet_dt = now + timedelta(hours=3)
diff_secs = (eet_dt - dt).seconds
total_sleep = 602 - diff_secs
moderna = json_object[0]["free_total"]
pfizer = json_object[1]["free_total"]
astra = json_object[2]["free_total"]
janssen = json_object[3]["free_total"]
cur.execute(
f"INSERT INTO vilnius_vakcinos (time, moderna, pfizer, astra_zeneca, janssen) VALUES ('{time_str}', {moderna}, {pfizer}, {astra}, {janssen});"
)
sql_connection.commit()
sql_connection.close()
if pfizer > 0:
send_email(
"Pfizer count: {pfizer}, link to register: https://vilnius-vac.myhybridlab.com/selfregister/vaccine"
)
time.sleep(total_sleep)
if __name__ == "__main__":
get_data()
| 32.46729
| 161
| 0.582614
|
import time
from bs4 import BeautifulSoup
import requests
import json
from datetime import datetime, timedelta
import psycopg2
import smtplib
import os
DATABASE = os.environ["DATABASE"]
USER = os.environ["USER"]
PASSWORD = os.environ["PASSWORD"]
HOST = os.environ["HOST"]
def send_email(message: str) -> None:
with open("../creds.json", "r") as f:
creds = json.loads(f)
gmail_user = creds["user"]
gmail_pass = creds["pass"]
try:
server = smtplib.SMTP("smtp.gmail.com", 587)
server.starttls()
server.login(gmail_user, gmail_pass)
server.sendmail(gmail_user, creds["target"], message)
except:
print("Email didnt work...")
def get_data() -> None:
while True:
sql_connection = psycopg2.connect(
database=DATABASE, user=USER, password=PASSWORD, host=HOST
)
cur = sql_connection.cursor()
headers = {
"Connection": "keep-alive",
"Cache-Control": "max-age=0",
"sec-ch-ua": "^\\^",
"sec-ch-ua-mobile": "?0",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.93 Safari/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Sec-Fetch-Site": "cross-site",
"Sec-Fetch-Mode": "navigate",
"Sec-Fetch-User": "?1",
"Sec-Fetch-Dest": "document",
"Accept-Language": "en-US,en;q=0.9",
}
page = requests.get(
"https://vilnius-vac.myhybridlab.com/selfregister/vaccine", headers=headers
)
soup = BeautifulSoup(page.content, "html.parser")
vaccines = soup.find("vaccine-rooms", class_=None)[":vaccine-rooms"]
json_object = json.loads(vaccines)
time_raw = soup.find("small", class_="text-muted").get_text().split()
time_str = time_raw[2] + " " + time_raw[3]
dt = datetime.fromisoformat(time_str)
now = datetime.now().replace(microsecond=0)
eet_dt = now + timedelta(hours=3)
diff_secs = (eet_dt - dt).seconds
total_sleep = 602 - diff_secs
moderna = json_object[0]["free_total"]
pfizer = json_object[1]["free_total"]
astra = json_object[2]["free_total"]
janssen = json_object[3]["free_total"]
cur.execute(
f"INSERT INTO vilnius_vakcinos (time, moderna, pfizer, astra_zeneca, janssen) VALUES ('{time_str}', {moderna}, {pfizer}, {astra}, {janssen});"
)
sql_connection.commit()
sql_connection.close()
if pfizer > 0:
send_email(
"Pfizer count: {pfizer}, link to register: https://vilnius-vac.myhybridlab.com/selfregister/vaccine"
)
time.sleep(total_sleep)
if __name__ == "__main__":
get_data()
| true
| true
|
790d7db67280443b19cd4193370f605802115a87
| 847
|
py
|
Python
|
salt/runners/ssh.py
|
bogdanr/salt
|
4f198525873a1b7da3fbb9994dbb40d381494922
|
[
"Apache-2.0"
] | 2
|
2015-08-04T21:54:38.000Z
|
2019-04-25T21:47:08.000Z
|
salt/runners/ssh.py
|
bogdanr/salt
|
4f198525873a1b7da3fbb9994dbb40d381494922
|
[
"Apache-2.0"
] | 1
|
2015-09-02T12:49:48.000Z
|
2015-09-02T19:22:58.000Z
|
salt/runners/ssh.py
|
bogdanr/salt
|
4f198525873a1b7da3fbb9994dbb40d381494922
|
[
"Apache-2.0"
] | 1
|
2020-10-19T11:49:50.000Z
|
2020-10-19T11:49:50.000Z
|
# -*- coding: utf-8 -*-
'''
A Runner module interface on top of the salt-ssh Python API.
This allows for programmatic use from salt-api, the Reactor, Orchestrate, etc.
'''
# Import Python Libs
from __future__ import absolute_import
# Import Salt Libs
import salt.client.ssh.client
def cmd(
tgt,
fun,
arg=(),
timeout=None,
expr_form='glob',
kwarg=None):
'''
Execute a single command via the salt-ssh subsystem and return all
routines at once
.. versionaddedd:: 2015.2
A wrapper around the :py:meth:`SSHClient.cmd
<salt.client.ssh.client.SSHClient.cmd>` method.
'''
client = salt.client.ssh.client.SSHClient(mopts=__opts__)
return client.cmd(
tgt,
fun,
arg,
timeout,
expr_form,
kwarg)
| 21.175
| 78
| 0.602125
|
from __future__ import absolute_import
import salt.client.ssh.client
def cmd(
tgt,
fun,
arg=(),
timeout=None,
expr_form='glob',
kwarg=None):
client = salt.client.ssh.client.SSHClient(mopts=__opts__)
return client.cmd(
tgt,
fun,
arg,
timeout,
expr_form,
kwarg)
| true
| true
|
790d7dc3297a1f9745e929ec6e91dfe5c2d85a35
| 13,991
|
py
|
Python
|
superset/dashboards/commands/importers/v0.py
|
Jacob-ru/superset
|
148409214ce760368e3bf8122eb0d79297606a0a
|
[
"Apache-2.0"
] | null | null | null |
superset/dashboards/commands/importers/v0.py
|
Jacob-ru/superset
|
148409214ce760368e3bf8122eb0d79297606a0a
|
[
"Apache-2.0"
] | null | null | null |
superset/dashboards/commands/importers/v0.py
|
Jacob-ru/superset
|
148409214ce760368e3bf8122eb0d79297606a0a
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import logging
import time
from copy import copy
from datetime import datetime
from typing import Any, Dict, Optional
from flask_babel import lazy_gettext as _
from sqlalchemy.orm import make_transient, Session
from superset import ConnectorRegistry, db
from superset.commands.base import BaseCommand
from superset.connectors.sqla.models import SqlaTable, SqlMetric, TableColumn
from superset.datasets.commands.importers.v0 import import_dataset
from superset.exceptions import DashboardImportException
from superset.models.dashboard import Dashboard
from superset.models.slice import Slice
from superset.models.core import Database
from superset.utils.dashboard_filter_scopes_converter import (
convert_filter_scopes,
copy_filter_scopes,
)
logger = logging.getLogger(__name__)
def import_chart(
slc_to_import: Slice,
slc_to_override: Optional[Slice],
import_time: Optional[int] = None,
) -> int:
"""Inserts or overrides slc in the database.
remote_id and import_time fields in params_dict are set to track the
slice origin and ensure correct overrides for multiple imports.
Slice.perm is used to find the datasources and connect them.
:param Slice slc_to_import: Slice object to import
:param Slice slc_to_override: Slice to replace, id matches remote_id
:returns: The resulting id for the imported slice
:rtype: int
"""
session = db.session
make_transient(slc_to_import)
slc_to_import.dashboards = []
slc_to_import.alter_params(remote_id=slc_to_import.id, import_time=import_time)
slc_to_import = slc_to_import.copy()
slc_to_import.reset_ownership()
params = slc_to_import.params_dict
datasource = ConnectorRegistry.get_datasource_by_name(
session,
slc_to_import.datasource_type,
params["datasource_name"],
params["schema"],
params["database_name"],
)
slc_to_import.datasource_id = datasource.id # type: ignore
if slc_to_override:
slc_to_override.override(slc_to_import)
session.flush()
return slc_to_override.id
session.add(slc_to_import)
logger.info("Final slice: %s", str(slc_to_import.to_json()))
session.flush()
return slc_to_import.id
def import_dashboard(
# pylint: disable=too-many-locals,too-many-statements
dashboard_to_import: Dashboard,
dataset_id_mapping: Optional[Dict[int, int]] = None,
import_time: Optional[int] = None,
database_id: Optional[int] = None,
) -> int:
"""Imports the dashboard from the object to the database.
Once dashboard is imported, json_metadata field is extended and stores
remote_id and import_time. It helps to decide if the dashboard has to
be overridden or just copies over. Slices that belong to this
dashboard will be wired to existing tables. This function can be used
to import/export dashboards between multiple superset instances.
Audit metadata isn't copied over.
"""
def alter_positions(
dashboard: Dashboard, old_to_new_slc_id_dict: Dict[int, int]
) -> None:
"""Updates slice_ids in the position json.
Sample position_json data:
{
"DASHBOARD_VERSION_KEY": "v2",
"DASHBOARD_ROOT_ID": {
"type": "DASHBOARD_ROOT_TYPE",
"id": "DASHBOARD_ROOT_ID",
"children": ["DASHBOARD_GRID_ID"]
},
"DASHBOARD_GRID_ID": {
"type": "DASHBOARD_GRID_TYPE",
"id": "DASHBOARD_GRID_ID",
"children": ["DASHBOARD_CHART_TYPE-2"]
},
"DASHBOARD_CHART_TYPE-2": {
"type": "CHART",
"id": "DASHBOARD_CHART_TYPE-2",
"children": [],
"meta": {
"width": 4,
"height": 50,
"chartId": 118
}
},
}
"""
position_data = json.loads(dashboard.position_json)
position_json = position_data.values()
for value in position_json:
if (
isinstance(value, dict)
and value.get("meta")
and value.get("meta", {}).get("chartId")
):
old_slice_id = value["meta"]["chartId"]
if old_slice_id in old_to_new_slc_id_dict:
value["meta"]["chartId"] = old_to_new_slc_id_dict[old_slice_id]
dashboard.position_json = json.dumps(position_data)
def alter_native_filters(dashboard: Dashboard) -> None:
json_metadata = json.loads(dashboard.json_metadata)
native_filter_configuration = json_metadata.get("native_filter_configuration")
if not native_filter_configuration:
return
for native_filter in native_filter_configuration:
for target in native_filter.get("targets", []):
old_dataset_id = target.get("datasetId")
if dataset_id_mapping and old_dataset_id is not None:
target["datasetId"] = dataset_id_mapping.get(
old_dataset_id, old_dataset_id,
)
dashboard.json_metadata = json.dumps(json_metadata)
logger.info("Started import of the dashboard: %s", dashboard_to_import.to_json())
session = db.session
logger.info("Dashboard has %d slices", len(dashboard_to_import.slices))
# copy slices object as Slice.import_slice will mutate the slice
# and will remove the existing dashboard - slice association
slices = copy(dashboard_to_import.slices)
# Clearing the slug to avoid conflicts
dashboard_to_import.slug = None
old_json_metadata = json.loads(dashboard_to_import.json_metadata or "{}")
old_to_new_slc_id_dict: Dict[int, int] = {}
new_timed_refresh_immune_slices = []
new_expanded_slices = {}
new_filter_scopes = {}
i_params_dict = dashboard_to_import.params_dict
remote_id_slice_map = {
slc.params_dict["remote_id"]: slc
for slc in session.query(Slice)
.filter(Slice.datasource_id.in_(list(dataset_id_mapping.values())))
.all()
if "remote_id" in slc.params_dict
}
for slc in slices:
logger.info(
"Importing slice %s from the dashboard: %s",
slc.to_json(),
dashboard_to_import.dashboard_title,
)
# Change database name in params due to using new database for imported dashboard
if database_id:
database_name = session.query(Database).filter(Database.id == database_id).first().name
slc.alter_params(database_name=database_name)
remote_slc = remote_id_slice_map.get(slc.id)
new_slc_id = import_chart(slc, remote_slc, import_time=import_time)
old_to_new_slc_id_dict[slc.id] = new_slc_id
# update json metadata that deals with slice ids
new_slc_id_str = str(new_slc_id)
old_slc_id_str = str(slc.id)
if (
"timed_refresh_immune_slices" in i_params_dict
and old_slc_id_str in i_params_dict["timed_refresh_immune_slices"]
):
new_timed_refresh_immune_slices.append(new_slc_id_str)
if (
"expanded_slices" in i_params_dict
and old_slc_id_str in i_params_dict["expanded_slices"]
):
new_expanded_slices[new_slc_id_str] = i_params_dict["expanded_slices"][
old_slc_id_str
]
# since PR #9109, filter_immune_slices and filter_immune_slice_fields
# are converted to filter_scopes
# but dashboard create from import may still have old dashboard filter metadata
# here we convert them to new filter_scopes metadata first
filter_scopes = {}
if (
"filter_immune_slices" in i_params_dict
or "filter_immune_slice_fields" in i_params_dict
):
filter_scopes = convert_filter_scopes(old_json_metadata, slices)
if "filter_scopes" in i_params_dict:
filter_scopes = old_json_metadata.get("filter_scopes")
# then replace old slice id to new slice id:
if filter_scopes:
new_filter_scopes = copy_filter_scopes(
old_to_new_slc_id_dict=old_to_new_slc_id_dict,
old_filter_scopes=filter_scopes,
)
# override the dashboard
existing_dashboard = None
for dash in session.query(Dashboard).all():
if (
"remote_id" in dash.params_dict
and dash.params_dict["remote_id"] == dashboard_to_import.id
):
existing_dashboard = dash
dashboard_to_import = dashboard_to_import.copy()
dashboard_to_import.id = None
dashboard_to_import.reset_ownership()
# position_json can be empty for dashboards
# with charts added from chart-edit page and without re-arranging
if dashboard_to_import.position_json:
alter_positions(dashboard_to_import, old_to_new_slc_id_dict)
dashboard_to_import.alter_params(import_time=import_time)
dashboard_to_import.remove_params(param_to_remove="filter_immune_slices")
dashboard_to_import.remove_params(param_to_remove="filter_immune_slice_fields")
if new_filter_scopes:
dashboard_to_import.alter_params(filter_scopes=new_filter_scopes)
if new_expanded_slices:
dashboard_to_import.alter_params(expanded_slices=new_expanded_slices)
if new_timed_refresh_immune_slices:
dashboard_to_import.alter_params(
timed_refresh_immune_slices=new_timed_refresh_immune_slices
)
alter_native_filters(dashboard_to_import)
new_slices = (
session.query(Slice).filter(Slice.id.in_(old_to_new_slc_id_dict.values())).all()
)
if existing_dashboard:
existing_dashboard.override(dashboard_to_import)
existing_dashboard.slices = new_slices
session.flush()
return existing_dashboard.id
dashboard_to_import.slices = new_slices
session.add(dashboard_to_import)
session.flush()
return dashboard_to_import.id # type: ignore
def decode_dashboards( # pylint: disable=too-many-return-statements
o: Dict[str, Any]
) -> Any:
"""
Function to be passed into json.loads obj_hook parameter
Recreates the dashboard object from a json representation.
"""
# pylint: disable=import-outside-toplevel
from superset.connectors.druid.models import (
DruidCluster,
DruidColumn,
DruidDatasource,
DruidMetric,
)
if "__Dashboard__" in o:
return Dashboard(**o["__Dashboard__"])
if "__Slice__" in o:
return Slice(**o["__Slice__"])
if "__TableColumn__" in o:
return TableColumn(**o["__TableColumn__"])
if "__SqlaTable__" in o:
return SqlaTable(**o["__SqlaTable__"])
if "__SqlMetric__" in o:
return SqlMetric(**o["__SqlMetric__"])
if "__DruidCluster__" in o:
return DruidCluster(**o["__DruidCluster__"])
if "__DruidColumn__" in o:
return DruidColumn(**o["__DruidColumn__"])
if "__DruidDatasource__" in o:
return DruidDatasource(**o["__DruidDatasource__"])
if "__DruidMetric__" in o:
return DruidMetric(**o["__DruidMetric__"])
if "__datetime__" in o:
return datetime.strptime(o["__datetime__"], "%Y-%m-%dT%H:%M:%S")
return o
def import_dashboards(
session: Session,
content: str,
database_id: Optional[int] = None,
import_time: Optional[int] = None,
) -> None:
"""Imports dashboards from a stream to databases"""
current_tt = int(time.time())
import_time = current_tt if import_time is None else import_time
data = json.loads(content, object_hook=decode_dashboards)
if not data:
raise DashboardImportException(_("No data in file"))
dataset_id_mapping: Dict[int, int] = {}
for table in data["datasources"]:
new_dataset_id = import_dataset(table, database_id, import_time=import_time)
params = json.loads(table.params)
dataset_id_mapping[params["remote_id"]] = new_dataset_id
session.commit()
for dashboard in data["dashboards"]:
import_dashboard(dashboard, dataset_id_mapping, import_time=import_time,
database_id=database_id)
session.commit()
class ImportDashboardsCommand(BaseCommand):
"""
Import dashboard in JSON format.
This is the original unversioned format used to export and import dashboards
in Superset.
"""
# pylint: disable=unused-argument
def __init__(
self, contents: Dict[str, str], database_id: Optional[int] = None, **kwargs: Any
):
self.contents = contents
self.database_id = database_id
def run(self) -> None:
self.validate()
for file_name, content in self.contents.items():
logger.info("Importing dashboard from file %s", file_name)
import_dashboards(db.session, content, self.database_id)
def validate(self) -> None:
# ensure all files are JSON
for content in self.contents.values():
try:
json.loads(content)
except ValueError:
logger.exception("Invalid JSON file")
raise
| 37.509383
| 99
| 0.67715
|
import json
import logging
import time
from copy import copy
from datetime import datetime
from typing import Any, Dict, Optional
from flask_babel import lazy_gettext as _
from sqlalchemy.orm import make_transient, Session
from superset import ConnectorRegistry, db
from superset.commands.base import BaseCommand
from superset.connectors.sqla.models import SqlaTable, SqlMetric, TableColumn
from superset.datasets.commands.importers.v0 import import_dataset
from superset.exceptions import DashboardImportException
from superset.models.dashboard import Dashboard
from superset.models.slice import Slice
from superset.models.core import Database
from superset.utils.dashboard_filter_scopes_converter import (
convert_filter_scopes,
copy_filter_scopes,
)
logger = logging.getLogger(__name__)
def import_chart(
slc_to_import: Slice,
slc_to_override: Optional[Slice],
import_time: Optional[int] = None,
) -> int:
session = db.session
make_transient(slc_to_import)
slc_to_import.dashboards = []
slc_to_import.alter_params(remote_id=slc_to_import.id, import_time=import_time)
slc_to_import = slc_to_import.copy()
slc_to_import.reset_ownership()
params = slc_to_import.params_dict
datasource = ConnectorRegistry.get_datasource_by_name(
session,
slc_to_import.datasource_type,
params["datasource_name"],
params["schema"],
params["database_name"],
)
slc_to_import.datasource_id = datasource.id
if slc_to_override:
slc_to_override.override(slc_to_import)
session.flush()
return slc_to_override.id
session.add(slc_to_import)
logger.info("Final slice: %s", str(slc_to_import.to_json()))
session.flush()
return slc_to_import.id
def import_dashboard(
dashboard_to_import: Dashboard,
dataset_id_mapping: Optional[Dict[int, int]] = None,
import_time: Optional[int] = None,
database_id: Optional[int] = None,
) -> int:
def alter_positions(
dashboard: Dashboard, old_to_new_slc_id_dict: Dict[int, int]
) -> None:
position_data = json.loads(dashboard.position_json)
position_json = position_data.values()
for value in position_json:
if (
isinstance(value, dict)
and value.get("meta")
and value.get("meta", {}).get("chartId")
):
old_slice_id = value["meta"]["chartId"]
if old_slice_id in old_to_new_slc_id_dict:
value["meta"]["chartId"] = old_to_new_slc_id_dict[old_slice_id]
dashboard.position_json = json.dumps(position_data)
def alter_native_filters(dashboard: Dashboard) -> None:
json_metadata = json.loads(dashboard.json_metadata)
native_filter_configuration = json_metadata.get("native_filter_configuration")
if not native_filter_configuration:
return
for native_filter in native_filter_configuration:
for target in native_filter.get("targets", []):
old_dataset_id = target.get("datasetId")
if dataset_id_mapping and old_dataset_id is not None:
target["datasetId"] = dataset_id_mapping.get(
old_dataset_id, old_dataset_id,
)
dashboard.json_metadata = json.dumps(json_metadata)
logger.info("Started import of the dashboard: %s", dashboard_to_import.to_json())
session = db.session
logger.info("Dashboard has %d slices", len(dashboard_to_import.slices))
slices = copy(dashboard_to_import.slices)
dashboard_to_import.slug = None
old_json_metadata = json.loads(dashboard_to_import.json_metadata or "{}")
old_to_new_slc_id_dict: Dict[int, int] = {}
new_timed_refresh_immune_slices = []
new_expanded_slices = {}
new_filter_scopes = {}
i_params_dict = dashboard_to_import.params_dict
remote_id_slice_map = {
slc.params_dict["remote_id"]: slc
for slc in session.query(Slice)
.filter(Slice.datasource_id.in_(list(dataset_id_mapping.values())))
.all()
if "remote_id" in slc.params_dict
}
for slc in slices:
logger.info(
"Importing slice %s from the dashboard: %s",
slc.to_json(),
dashboard_to_import.dashboard_title,
)
if database_id:
database_name = session.query(Database).filter(Database.id == database_id).first().name
slc.alter_params(database_name=database_name)
remote_slc = remote_id_slice_map.get(slc.id)
new_slc_id = import_chart(slc, remote_slc, import_time=import_time)
old_to_new_slc_id_dict[slc.id] = new_slc_id
new_slc_id_str = str(new_slc_id)
old_slc_id_str = str(slc.id)
if (
"timed_refresh_immune_slices" in i_params_dict
and old_slc_id_str in i_params_dict["timed_refresh_immune_slices"]
):
new_timed_refresh_immune_slices.append(new_slc_id_str)
if (
"expanded_slices" in i_params_dict
and old_slc_id_str in i_params_dict["expanded_slices"]
):
new_expanded_slices[new_slc_id_str] = i_params_dict["expanded_slices"][
old_slc_id_str
]
ilter_immune_slices" in i_params_dict
or "filter_immune_slice_fields" in i_params_dict
):
filter_scopes = convert_filter_scopes(old_json_metadata, slices)
if "filter_scopes" in i_params_dict:
filter_scopes = old_json_metadata.get("filter_scopes")
if filter_scopes:
new_filter_scopes = copy_filter_scopes(
old_to_new_slc_id_dict=old_to_new_slc_id_dict,
old_filter_scopes=filter_scopes,
)
existing_dashboard = None
for dash in session.query(Dashboard).all():
if (
"remote_id" in dash.params_dict
and dash.params_dict["remote_id"] == dashboard_to_import.id
):
existing_dashboard = dash
dashboard_to_import = dashboard_to_import.copy()
dashboard_to_import.id = None
dashboard_to_import.reset_ownership()
if dashboard_to_import.position_json:
alter_positions(dashboard_to_import, old_to_new_slc_id_dict)
dashboard_to_import.alter_params(import_time=import_time)
dashboard_to_import.remove_params(param_to_remove="filter_immune_slices")
dashboard_to_import.remove_params(param_to_remove="filter_immune_slice_fields")
if new_filter_scopes:
dashboard_to_import.alter_params(filter_scopes=new_filter_scopes)
if new_expanded_slices:
dashboard_to_import.alter_params(expanded_slices=new_expanded_slices)
if new_timed_refresh_immune_slices:
dashboard_to_import.alter_params(
timed_refresh_immune_slices=new_timed_refresh_immune_slices
)
alter_native_filters(dashboard_to_import)
new_slices = (
session.query(Slice).filter(Slice.id.in_(old_to_new_slc_id_dict.values())).all()
)
if existing_dashboard:
existing_dashboard.override(dashboard_to_import)
existing_dashboard.slices = new_slices
session.flush()
return existing_dashboard.id
dashboard_to_import.slices = new_slices
session.add(dashboard_to_import)
session.flush()
return dashboard_to_import.id
def decode_dashboards(
o: Dict[str, Any]
) -> Any:
from superset.connectors.druid.models import (
DruidCluster,
DruidColumn,
DruidDatasource,
DruidMetric,
)
if "__Dashboard__" in o:
return Dashboard(**o["__Dashboard__"])
if "__Slice__" in o:
return Slice(**o["__Slice__"])
if "__TableColumn__" in o:
return TableColumn(**o["__TableColumn__"])
if "__SqlaTable__" in o:
return SqlaTable(**o["__SqlaTable__"])
if "__SqlMetric__" in o:
return SqlMetric(**o["__SqlMetric__"])
if "__DruidCluster__" in o:
return DruidCluster(**o["__DruidCluster__"])
if "__DruidColumn__" in o:
return DruidColumn(**o["__DruidColumn__"])
if "__DruidDatasource__" in o:
return DruidDatasource(**o["__DruidDatasource__"])
if "__DruidMetric__" in o:
return DruidMetric(**o["__DruidMetric__"])
if "__datetime__" in o:
return datetime.strptime(o["__datetime__"], "%Y-%m-%dT%H:%M:%S")
return o
def import_dashboards(
session: Session,
content: str,
database_id: Optional[int] = None,
import_time: Optional[int] = None,
) -> None:
current_tt = int(time.time())
import_time = current_tt if import_time is None else import_time
data = json.loads(content, object_hook=decode_dashboards)
if not data:
raise DashboardImportException(_("No data in file"))
dataset_id_mapping: Dict[int, int] = {}
for table in data["datasources"]:
new_dataset_id = import_dataset(table, database_id, import_time=import_time)
params = json.loads(table.params)
dataset_id_mapping[params["remote_id"]] = new_dataset_id
session.commit()
for dashboard in data["dashboards"]:
import_dashboard(dashboard, dataset_id_mapping, import_time=import_time,
database_id=database_id)
session.commit()
class ImportDashboardsCommand(BaseCommand):
def __init__(
self, contents: Dict[str, str], database_id: Optional[int] = None, **kwargs: Any
):
self.contents = contents
self.database_id = database_id
def run(self) -> None:
self.validate()
for file_name, content in self.contents.items():
logger.info("Importing dashboard from file %s", file_name)
import_dashboards(db.session, content, self.database_id)
def validate(self) -> None:
for content in self.contents.values():
try:
json.loads(content)
except ValueError:
logger.exception("Invalid JSON file")
raise
| true
| true
|
790d7dd48cc8d6d26ccd217555dd59ffcf548329
| 19,845
|
py
|
Python
|
nemo/collections/asr/metrics/rnnt_wer.py
|
Zenodia/NeMo
|
3c288d8a7caf667c95444c39434e3ebc5f53d911
|
[
"Apache-2.0"
] | null | null | null |
nemo/collections/asr/metrics/rnnt_wer.py
|
Zenodia/NeMo
|
3c288d8a7caf667c95444c39434e3ebc5f53d911
|
[
"Apache-2.0"
] | null | null | null |
nemo/collections/asr/metrics/rnnt_wer.py
|
Zenodia/NeMo
|
3c288d8a7caf667c95444c39434e3ebc5f53d911
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from typing import List, Optional, Union
import editdistance
import torch
from pytorch_lightning.metrics import Metric
from nemo.collections.asr.parts import rnnt_beam_decoding as beam_decode
from nemo.collections.asr.parts import rnnt_greedy_decoding as greedy_decode
from nemo.collections.asr.parts.rnnt_utils import Hypothesis, NBestHypotheses
from nemo.utils import logging
__all__ = ['RNNTDecoding', 'RNNTWER']
class AbstractRNNTDecoding(ABC):
"""
Used for performing RNN-T auto-regressive decoding of the Decoder+Joint network given the encoder state.
Args:
decoding_cfg: A dict-like object which contains the following key-value pairs.
strategy: str value which represents the type of decoding that can occur.
Possible values are :
- greedy, greedy_batch (for greedy decoding).
- beam, tsd, alsd (for beam search decoding).
compute_hypothesis_token_set: A bool flag, which determines whether to compute a list of decoded
tokens as well as the decoded string. Default is False in order to avoid double decoding
unless required.
The config may further contain the following sub-dictionaries:
"greedy":
max_symbols: int, describing the maximum number of target tokens to decode per
timestep during greedy decoding. Setting to larger values allows longer sentences
to be decoded, at the cost of increased execution time.
"beam":
beam_size: int, defining the beam size for beam search. Must be >= 1.
If beam_size == 1, will perform cached greedy search. This might be slightly different
results compared to the greedy search above.
score_norm: optional bool, whether to normalize the returned beam score in the hypotheses.
Set to True by default.
return_best_hypothesis: optional bool, whether to return just the best hypothesis or all of the
hypotheses after beam search has concluded. This flag is set by default.
tsd_max_sym_exp: optional int, determines number of symmetric expansions of the target symbols
per timestep of the acoustic model. Larger values will allow longer sentences to be decoded,
at increased cost to execution time.
alsd_max_target_len: optional int or float, determines the potential maximum target sequence length.
If an integer is provided, it can decode sequences of that particular maximum length.
If a float is provided, it can decode sequences of int(alsd_max_target_len * seq_len),
where seq_len is the length of the acoustic model output (T).
NOTE:
If a float is provided, it can be greater than 1!
By default, a float of 2.0 is used so that a target sequence can be at most twice
as long as the acoustic model output length T.
decoder: The Decoder/Prediction network module.
joint: The Joint network module.
blank_id: The id of the RNNT blank token.
"""
def __init__(self, decoding_cfg, decoder, joint, blank_id: int):
super(AbstractRNNTDecoding, self).__init__()
self.cfg = decoding_cfg
self.blank_id = blank_id
self.compute_hypothesis_token_set = self.cfg.get("compute_hypothesis_token_set", False)
possible_strategies = ['greedy', 'greedy_batch', 'beam', 'tsd', 'alsd']
if self.cfg.strategy not in possible_strategies:
raise ValueError(f"Decoding strategy must be one of {possible_strategies}")
if self.cfg.strategy == 'greedy':
self.decoding = greedy_decode.GreedyRNNTInfer(
decoder_model=decoder,
joint_model=joint,
blank_index=self.blank_id,
max_symbols_per_step=self.cfg.greedy.get('max_symbols', None),
)
elif self.cfg.strategy == 'greedy_batch':
self.decoding = greedy_decode.GreedyBatchedRNNTInfer(
decoder_model=decoder,
joint_model=joint,
blank_index=self.blank_id,
max_symbols_per_step=self.cfg.greedy.get('max_symbols', None),
)
elif self.cfg.strategy == 'beam':
self.decoding = beam_decode.BeamRNNTInfer(
decoder_model=decoder,
joint_model=joint,
beam_size=self.cfg.beam.beam_size,
return_best_hypothesis=decoding_cfg.beam.get('return_best_hypothesis', True),
search_type='default',
score_norm=self.cfg.beam.get('score_norm', True),
)
elif self.cfg.strategy == 'tsd':
self.decoding = beam_decode.BeamRNNTInfer(
decoder_model=decoder,
joint_model=joint,
beam_size=self.cfg.beam.beam_size,
return_best_hypothesis=decoding_cfg.beam.get('return_best_hypothesis', True),
search_type='tsd',
score_norm=self.cfg.beam.get('score_norm', True),
tsd_max_sym_exp_per_step=self.cfg.beam.get('tsd_max_sym_exp', 50),
)
elif self.cfg.strategy == 'alsd':
self.decoding = beam_decode.BeamRNNTInfer(
decoder_model=decoder,
joint_model=joint,
beam_size=self.cfg.beam.beam_size,
return_best_hypothesis=decoding_cfg.beam.get('return_best_hypothesis', True),
search_type='alsd',
score_norm=self.cfg.beam.get('score_norm', True),
alsd_max_target_len=self.cfg.beam.get('alsd_max_target_len', 2),
)
def rnnt_decoder_predictions_tensor(
self, encoder_output: torch.Tensor, encoded_lengths: torch.Tensor, return_hypotheses: bool = False
) -> (List[str], Optional[List[List[str]]], Optional[Union[Hypothesis, NBestHypotheses]]):
"""
Decode an encoder output by autoregressive decoding of the Decoder+Joint networks.
Args:
encoder_output: torch.Tensor of shape [B, D, T].
encoded_lengths: torch.Tensor containing lengths of the padded encoder outputs. Shape [B].
return_hypotheses: bool. If set to True it will return list of Hypothesis or NBestHypotheses
Returns:
If `return_best_hypothesis` is set:
A tuple (hypotheses, None):
hypotheses - list of Hypothesis (best hypothesis per sample).
Look at rnnt_utils.Hypothesis for more information.
If `return_best_hypothesis` is not set:
A tuple(hypotheses, all_hypotheses)
hypotheses - list of Hypothesis (best hypothesis per sample).
Look at rnnt_utils.Hypothesis for more information.
all_hypotheses - list of NBestHypotheses. Each NBestHypotheses further contains a sorted
list of all the hypotheses of the model per sample.
Look at rnnt_utils.NBestHypotheses for more information.
"""
# Compute hypotheses
with torch.no_grad():
hypotheses_list = self.decoding(
encoder_output=encoder_output, encoded_lengths=encoded_lengths
) # type: [List[Hypothesis]]
# extract the hypotheses
hypotheses_list = hypotheses_list[0] # type: List[Hypothesis]
prediction_list = hypotheses_list
if isinstance(prediction_list[0], NBestHypotheses):
hypotheses = []
all_hypotheses = []
for nbest_hyp in prediction_list: # type: NBestHypotheses
n_hyps = nbest_hyp.n_best_hypotheses # Extract all hypotheses for this sample
decoded_hyps = self.decode_hypothesis(n_hyps) # type: List[str]
hypotheses.append(decoded_hyps[0]) # best hypothesis
all_hypotheses.append(decoded_hyps)
if return_hypotheses:
return hypotheses, all_hypotheses
best_hyp_text = [h.text for h in hypotheses]
all_hyp_text = [h.text for hh in all_hypotheses for h in hh]
return best_hyp_text, all_hyp_text
else:
hypotheses = self.decode_hypothesis(prediction_list) # type: List[str]
if return_hypotheses:
return hypotheses, None
best_hyp_text = [h.text for h in hypotheses]
return best_hyp_text, None
def decode_hypothesis(self, hypotheses_list: List[Hypothesis]) -> List[Union[Hypothesis, NBestHypotheses]]:
"""
Decode a list of hypotheses into a list of strings.
Args:
hypotheses_list: List of Hypothesis.
Returns:
A list of strings.
"""
for ind in range(len(hypotheses_list)):
# Extract the integer encoded hypothesis
prediction = hypotheses_list[ind].y_sequence
if type(prediction) != list:
prediction = prediction.tolist()
# RNN-T sample level is already preprocessed by implicit CTC decoding
# Simply remove any blank tokens
prediction = [p for p in prediction if p != self.blank_id]
# De-tokenize the integer tokens
hypothesis = self.decode_tokens_to_str(prediction)
hypotheses_list[ind].text = hypothesis
if self.compute_hypothesis_token_set:
hypotheses_list[ind].tokens = self.decode_ids_to_tokens(prediction)
return hypotheses_list
@abstractmethod
def decode_tokens_to_str(self, tokens: List[int]) -> str:
"""
Implemented by subclass in order to decoder a token id list into a string.
Args:
tokens: List of int representing the token ids.
Returns:
A decoded string.
"""
raise NotImplementedError()
@abstractmethod
def decode_ids_to_tokens(self, tokens: List[int]) -> List[str]:
"""
Implemented by subclass in order to decode a token id list into a token list.
A token list is the string representation of each token id.
Args:
tokens: List of int representing the token ids.
Returns:
A list of decoded tokens.
"""
raise NotImplementedError()
class RNNTDecoding(AbstractRNNTDecoding):
"""
Used for performing RNN-T auto-regressive decoding of the Decoder+Joint network given the encoder state.
Args:
decoding_cfg: A dict-like object which contains the following key-value pairs.
strategy: str value which represents the type of decoding that can occur.
Possible values are :
- greedy, greedy_batch (for greedy decoding).
- beam, tsd, alsd (for beam search decoding).
compute_hypothesis_token_set: A bool flag, which determines whether to compute a list of decoded
tokens as well as the decoded string. Default is False in order to avoid double decoding
unless required.
The config may further contain the following sub-dictionaries:
"greedy":
max_symbols: int, describing the maximum number of target tokens to decode per
timestep during greedy decoding. Setting to larger values allows longer sentences
to be decoded, at the cost of increased execution time.
"beam":
beam_size: int, defining the beam size for beam search. Must be >= 1.
If beam_size == 1, will perform cached greedy search. This might be slightly different
results compared to the greedy search above.
score_norm: optional bool, whether to normalize the returned beam score in the hypotheses.
Set to True by default.
return_best_hypothesis: optional bool, whether to return just the best hypothesis or all of the
hypotheses after beam search has concluded. This flag is set by default.
tsd_max_sym_exp: optional int, determines number of symmetric expansions of the target symbols
per timestep of the acoustic model. Larger values will allow longer sentences to be decoded,
at increased cost to execution time.
alsd_max_target_len: optional int or float, determines the potential maximum target sequence length.
If an integer is provided, it can decode sequences of that particular maximum length.
If a float is provided, it can decode sequences of int(alsd_max_target_len * seq_len),
where seq_len is the length of the acoustic model output (T).
NOTE:
If a float is provided, it can be greater than 1!
By default, a float of 2.0 is used so that a target sequence can be at most twice
as long as the acoustic model output length T.
decoder: The Decoder/Prediction network module.
joint: The Joint network module.
vocabulary: The vocabulary (excluding the RNNT blank token) which will be used for decoding.
"""
def __init__(
self, decoding_cfg, decoder, joint, vocabulary,
):
blank_id = len(vocabulary)
self.labels_map = dict([(i, vocabulary[i]) for i in range(len(vocabulary))])
super(RNNTDecoding, self).__init__(decoding_cfg=decoding_cfg, decoder=decoder, joint=joint, blank_id=blank_id)
def decode_tokens_to_str(self, tokens: List[int]) -> str:
"""
Implemented by subclass in order to decoder a token list into a string.
Args:
tokens: List of int representing the token ids.
Returns:
A decoded string.
"""
hypothesis = ''.join([self.labels_map[c] for c in tokens if c != self.blank_id])
return hypothesis
def decode_ids_to_tokens(self, tokens: List[int]) -> List[str]:
"""
Implemented by subclass in order to decode a token id list into a token list.
A token list is the string representation of each token id.
Args:
tokens: List of int representing the token ids.
Returns:
A list of decoded tokens.
"""
token_list = [self.labels_map[c] for c in tokens if c != self.blank_id]
return token_list
class RNNTWER(Metric):
"""
This metric computes numerator and denominator for Overall Word Error Rate (WER) between prediction and reference texts.
When doing distributed training/evaluation the result of res=WER(predictions, targets, target_lengths) calls
will be all-reduced between all workers using SUM operations.
Here contains two numbers res=[wer_numerator, wer_denominator]. WER=wer_numerator/wer_denominator.
If used with PytorchLightning LightningModule, include wer_numerator and wer_denominators inside validation_step results.
Then aggregate (sum) then at the end of validation epoch to correctly compute validation WER.
Example:
def validation_step(self, batch, batch_idx):
...
wer_num, wer_denom = self.__wer(predictions, transcript, transcript_len)
return {'val_loss': loss_value, 'val_wer_num': wer_num, 'val_wer_denom': wer_denom}
def validation_epoch_end(self, outputs):
...
wer_num = torch.stack([x['val_wer_num'] for x in outputs]).sum()
wer_denom = torch.stack([x['val_wer_denom'] for x in outputs]).sum()
tensorboard_logs = {'validation_loss': val_loss_mean, 'validation_avg_wer': wer_num / wer_denom}
return {'val_loss': val_loss_mean, 'log': tensorboard_logs}
Args:
decoding: RNNTDecoding object that will perform autoregressive decoding of the RNNT model.
batch_dim_index: Index of the batch dimension.
use_cer: Whether to use Character Error Rate isntead of Word Error Rate.
log_prediction: Whether to log a single decoded sample per call.
Returns:
res: a torch.Tensor object with two elements: [wer_numerator, wer_denominator]. To correctly compute average
text word error rate, compute wer=wer_numerator/wer_denominator
"""
def __init__(
self, decoding: RNNTDecoding, batch_dim_index=0, use_cer=False, log_prediction=True, dist_sync_on_step=False
):
super(RNNTWER, self).__init__(dist_sync_on_step=dist_sync_on_step, compute_on_step=False)
self.decoding = decoding
self.batch_dim_index = batch_dim_index
self.use_cer = use_cer
self.log_prediction = log_prediction
self.blank_id = self.decoding.blank_id
self.labels_map = self.decoding.labels_map
self.add_state("scores", default=torch.tensor(0), dist_reduce_fx='sum', persistent=False)
self.add_state("words", default=torch.tensor(0), dist_reduce_fx='sum', persistent=False)
def update(
self,
encoder_output: torch.Tensor,
encoded_lengths: torch.Tensor,
targets: torch.Tensor,
target_lengths: torch.Tensor,
) -> torch.Tensor:
words = 0.0
scores = 0.0
references = []
with torch.no_grad():
# prediction_cpu_tensor = tensors[0].long().cpu()
targets_cpu_tensor = targets.long().cpu()
tgt_lenths_cpu_tensor = target_lengths.long().cpu()
# iterate over batch
for ind in range(targets_cpu_tensor.shape[self.batch_dim_index]):
tgt_len = tgt_lenths_cpu_tensor[ind].item()
target = targets_cpu_tensor[ind][:tgt_len].numpy().tolist()
reference = self.decoding.decode_tokens_to_str(target)
references.append(reference)
hypotheses, _ = self.decoding.rnnt_decoder_predictions_tensor(encoder_output, encoded_lengths)
if self.log_prediction:
logging.info(f"\n")
logging.info(f"reference :{references[0]}")
logging.info(f"predicted :{hypotheses[0]}")
for h, r in zip(hypotheses, references):
if self.use_cer:
h_list = list(h)
r_list = list(r)
else:
h_list = h.split()
r_list = r.split()
words += len(r_list)
# Compute Levenshtein's distance
scores += editdistance.eval(h_list, r_list)
self.scores += torch.tensor(scores, device=self.scores.device, dtype=self.scores.dtype)
self.words += torch.tensor(words, device=self.words.device, dtype=self.words.dtype)
# return torch.tensor([scores, words]).to(predictions.device)
def compute(self):
wer = self.scores.float() / self.words
return wer, self.scores.detach(), self.words.detach()
| 45.308219
| 125
| 0.639153
|
from abc import ABC, abstractmethod
from typing import List, Optional, Union
import editdistance
import torch
from pytorch_lightning.metrics import Metric
from nemo.collections.asr.parts import rnnt_beam_decoding as beam_decode
from nemo.collections.asr.parts import rnnt_greedy_decoding as greedy_decode
from nemo.collections.asr.parts.rnnt_utils import Hypothesis, NBestHypotheses
from nemo.utils import logging
__all__ = ['RNNTDecoding', 'RNNTWER']
class AbstractRNNTDecoding(ABC):
def __init__(self, decoding_cfg, decoder, joint, blank_id: int):
super(AbstractRNNTDecoding, self).__init__()
self.cfg = decoding_cfg
self.blank_id = blank_id
self.compute_hypothesis_token_set = self.cfg.get("compute_hypothesis_token_set", False)
possible_strategies = ['greedy', 'greedy_batch', 'beam', 'tsd', 'alsd']
if self.cfg.strategy not in possible_strategies:
raise ValueError(f"Decoding strategy must be one of {possible_strategies}")
if self.cfg.strategy == 'greedy':
self.decoding = greedy_decode.GreedyRNNTInfer(
decoder_model=decoder,
joint_model=joint,
blank_index=self.blank_id,
max_symbols_per_step=self.cfg.greedy.get('max_symbols', None),
)
elif self.cfg.strategy == 'greedy_batch':
self.decoding = greedy_decode.GreedyBatchedRNNTInfer(
decoder_model=decoder,
joint_model=joint,
blank_index=self.blank_id,
max_symbols_per_step=self.cfg.greedy.get('max_symbols', None),
)
elif self.cfg.strategy == 'beam':
self.decoding = beam_decode.BeamRNNTInfer(
decoder_model=decoder,
joint_model=joint,
beam_size=self.cfg.beam.beam_size,
return_best_hypothesis=decoding_cfg.beam.get('return_best_hypothesis', True),
search_type='default',
score_norm=self.cfg.beam.get('score_norm', True),
)
elif self.cfg.strategy == 'tsd':
self.decoding = beam_decode.BeamRNNTInfer(
decoder_model=decoder,
joint_model=joint,
beam_size=self.cfg.beam.beam_size,
return_best_hypothesis=decoding_cfg.beam.get('return_best_hypothesis', True),
search_type='tsd',
score_norm=self.cfg.beam.get('score_norm', True),
tsd_max_sym_exp_per_step=self.cfg.beam.get('tsd_max_sym_exp', 50),
)
elif self.cfg.strategy == 'alsd':
self.decoding = beam_decode.BeamRNNTInfer(
decoder_model=decoder,
joint_model=joint,
beam_size=self.cfg.beam.beam_size,
return_best_hypothesis=decoding_cfg.beam.get('return_best_hypothesis', True),
search_type='alsd',
score_norm=self.cfg.beam.get('score_norm', True),
alsd_max_target_len=self.cfg.beam.get('alsd_max_target_len', 2),
)
def rnnt_decoder_predictions_tensor(
self, encoder_output: torch.Tensor, encoded_lengths: torch.Tensor, return_hypotheses: bool = False
) -> (List[str], Optional[List[List[str]]], Optional[Union[Hypothesis, NBestHypotheses]]):
with torch.no_grad():
hypotheses_list = self.decoding(
encoder_output=encoder_output, encoded_lengths=encoded_lengths
)
hypotheses_list = hypotheses_list[0]
prediction_list = hypotheses_list
if isinstance(prediction_list[0], NBestHypotheses):
hypotheses = []
all_hypotheses = []
for nbest_hyp in prediction_list:
n_hyps = nbest_hyp.n_best_hypotheses
decoded_hyps = self.decode_hypothesis(n_hyps)
hypotheses.append(decoded_hyps[0])
all_hypotheses.append(decoded_hyps)
if return_hypotheses:
return hypotheses, all_hypotheses
best_hyp_text = [h.text for h in hypotheses]
all_hyp_text = [h.text for hh in all_hypotheses for h in hh]
return best_hyp_text, all_hyp_text
else:
hypotheses = self.decode_hypothesis(prediction_list)
if return_hypotheses:
return hypotheses, None
best_hyp_text = [h.text for h in hypotheses]
return best_hyp_text, None
def decode_hypothesis(self, hypotheses_list: List[Hypothesis]) -> List[Union[Hypothesis, NBestHypotheses]]:
for ind in range(len(hypotheses_list)):
prediction = hypotheses_list[ind].y_sequence
if type(prediction) != list:
prediction = prediction.tolist()
prediction = [p for p in prediction if p != self.blank_id]
hypothesis = self.decode_tokens_to_str(prediction)
hypotheses_list[ind].text = hypothesis
if self.compute_hypothesis_token_set:
hypotheses_list[ind].tokens = self.decode_ids_to_tokens(prediction)
return hypotheses_list
@abstractmethod
def decode_tokens_to_str(self, tokens: List[int]) -> str:
raise NotImplementedError()
@abstractmethod
def decode_ids_to_tokens(self, tokens: List[int]) -> List[str]:
raise NotImplementedError()
class RNNTDecoding(AbstractRNNTDecoding):
def __init__(
self, decoding_cfg, decoder, joint, vocabulary,
):
blank_id = len(vocabulary)
self.labels_map = dict([(i, vocabulary[i]) for i in range(len(vocabulary))])
super(RNNTDecoding, self).__init__(decoding_cfg=decoding_cfg, decoder=decoder, joint=joint, blank_id=blank_id)
def decode_tokens_to_str(self, tokens: List[int]) -> str:
hypothesis = ''.join([self.labels_map[c] for c in tokens if c != self.blank_id])
return hypothesis
def decode_ids_to_tokens(self, tokens: List[int]) -> List[str]:
token_list = [self.labels_map[c] for c in tokens if c != self.blank_id]
return token_list
class RNNTWER(Metric):
def __init__(
self, decoding: RNNTDecoding, batch_dim_index=0, use_cer=False, log_prediction=True, dist_sync_on_step=False
):
super(RNNTWER, self).__init__(dist_sync_on_step=dist_sync_on_step, compute_on_step=False)
self.decoding = decoding
self.batch_dim_index = batch_dim_index
self.use_cer = use_cer
self.log_prediction = log_prediction
self.blank_id = self.decoding.blank_id
self.labels_map = self.decoding.labels_map
self.add_state("scores", default=torch.tensor(0), dist_reduce_fx='sum', persistent=False)
self.add_state("words", default=torch.tensor(0), dist_reduce_fx='sum', persistent=False)
def update(
self,
encoder_output: torch.Tensor,
encoded_lengths: torch.Tensor,
targets: torch.Tensor,
target_lengths: torch.Tensor,
) -> torch.Tensor:
words = 0.0
scores = 0.0
references = []
with torch.no_grad():
targets_cpu_tensor = targets.long().cpu()
tgt_lenths_cpu_tensor = target_lengths.long().cpu()
for ind in range(targets_cpu_tensor.shape[self.batch_dim_index]):
tgt_len = tgt_lenths_cpu_tensor[ind].item()
target = targets_cpu_tensor[ind][:tgt_len].numpy().tolist()
reference = self.decoding.decode_tokens_to_str(target)
references.append(reference)
hypotheses, _ = self.decoding.rnnt_decoder_predictions_tensor(encoder_output, encoded_lengths)
if self.log_prediction:
logging.info(f"\n")
logging.info(f"reference :{references[0]}")
logging.info(f"predicted :{hypotheses[0]}")
for h, r in zip(hypotheses, references):
if self.use_cer:
h_list = list(h)
r_list = list(r)
else:
h_list = h.split()
r_list = r.split()
words += len(r_list)
scores += editdistance.eval(h_list, r_list)
self.scores += torch.tensor(scores, device=self.scores.device, dtype=self.scores.dtype)
self.words += torch.tensor(words, device=self.words.device, dtype=self.words.dtype)
# return torch.tensor([scores, words]).to(predictions.device)
def compute(self):
wer = self.scores.float() / self.words
return wer, self.scores.detach(), self.words.detach()
| true
| true
|
790d7ea7e04de4db2eb42eabb7345b913f10bb3e
| 3,115
|
py
|
Python
|
stdplugins/new.py
|
dqanshi/PornHub
|
162a7053ca7f2c0b3617b852559cfaf0502d94a7
|
[
"Apache-2.0"
] | 55
|
2019-07-13T15:57:54.000Z
|
2021-09-20T16:50:42.000Z
|
stdplugins/new.py
|
dqanshi/PornHub
|
162a7053ca7f2c0b3617b852559cfaf0502d94a7
|
[
"Apache-2.0"
] | 4
|
2020-11-07T07:39:51.000Z
|
2020-11-10T03:46:41.000Z
|
stdplugins/new.py
|
dqanshi/PornHub
|
162a7053ca7f2c0b3617b852559cfaf0502d94a7
|
[
"Apache-2.0"
] | 450
|
2019-07-12T13:18:41.000Z
|
2022-03-29T18:47:42.000Z
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import string
from telethon import events
from telethon.utils import add_surrogate
from telethon.tl.types import MessageEntityPre
from telethon.tl.tlobject import TLObject
import datetime
PRINTABLE_SET = set(bytes(string.printable, 'ascii'))
STR_LEN_MAX = 256
BYTE_LEN_MAX = 64
def parse_pre(text):
text = text.strip()
return (
text,
[MessageEntityPre(offset=0, length=len(add_surrogate(text)), language='potato')]
)
def yaml_format(obj, indent=0):
"""
Pretty formats the given object as a YAML string which is returned.
(based on TLObject.pretty_format)
"""
result = []
if isinstance(obj, TLObject):
obj = obj.to_dict()
if isinstance(obj, dict):
result.append(obj.get('_', 'dict') + ':')
if obj:
items = obj.items()
has_multiple_items = len(items) > 2
if has_multiple_items:
result.append('\n')
indent += 2
for k, v in items:
if k == '_' or v is None:
continue
formatted = yaml_format(v, indent)
if not formatted.strip():
continue
result.append(' ' * (indent if has_multiple_items else 1))
result.append(f'{k}: {formatted}')
result.append('\n')
result.pop()
indent -= 2
result.append(' ' * indent)
elif isinstance(obj, str):
# truncate long strings and display elipsis
result.append(repr(obj[:STR_LEN_MAX]))
if len(obj) > STR_LEN_MAX:
result.append('…')
elif isinstance(obj, bytes):
# repr() bytes if it's printable, hex like "FF EE BB" otherwise
if all(c in PRINTABLE_SET for c in obj):
result.append(repr(obj))
else:
if len(obj) > BYTE_LEN_MAX:
result.append('<…>')
else:
result.append(' '.join(f'{b:02X}' for b in obj))
elif isinstance(obj, datetime.datetime):
# ISO-8601 without timezone offset (telethon dates are always UTC)
result.append(obj.strftime('%Y-%m-%d %H:%M:%S'))
elif hasattr(obj, '__iter__'):
# display iterables one after another at the base indentation level
result.append('\n')
indent += 2
for x in obj:
result.append(' ' * indent)
result.append(yaml_format(x, indent))
result.append('\n')
result.pop()
indent -= 2
result.append(' ' * indent)
else:
result.append(repr(obj))
return ''.join(result)
@borg.on(events.NewMessage(pattern=r"\.new", outgoing=True))
async def _(event):
if not event.message.is_reply:
return
msg = await event.message.get_reply_message()
yaml_text = yaml_format(msg)
await event.edit(
yaml_text,
parse_mode=parse_pre
)
| 31.464646
| 88
| 0.579775
|
import string
from telethon import events
from telethon.utils import add_surrogate
from telethon.tl.types import MessageEntityPre
from telethon.tl.tlobject import TLObject
import datetime
PRINTABLE_SET = set(bytes(string.printable, 'ascii'))
STR_LEN_MAX = 256
BYTE_LEN_MAX = 64
def parse_pre(text):
text = text.strip()
return (
text,
[MessageEntityPre(offset=0, length=len(add_surrogate(text)), language='potato')]
)
def yaml_format(obj, indent=0):
result = []
if isinstance(obj, TLObject):
obj = obj.to_dict()
if isinstance(obj, dict):
result.append(obj.get('_', 'dict') + ':')
if obj:
items = obj.items()
has_multiple_items = len(items) > 2
if has_multiple_items:
result.append('\n')
indent += 2
for k, v in items:
if k == '_' or v is None:
continue
formatted = yaml_format(v, indent)
if not formatted.strip():
continue
result.append(' ' * (indent if has_multiple_items else 1))
result.append(f'{k}: {formatted}')
result.append('\n')
result.pop()
indent -= 2
result.append(' ' * indent)
elif isinstance(obj, str):
result.append(repr(obj[:STR_LEN_MAX]))
if len(obj) > STR_LEN_MAX:
result.append('…')
elif isinstance(obj, bytes):
if all(c in PRINTABLE_SET for c in obj):
result.append(repr(obj))
else:
if len(obj) > BYTE_LEN_MAX:
result.append('<…>')
else:
result.append(' '.join(f'{b:02X}' for b in obj))
elif isinstance(obj, datetime.datetime):
# ISO-8601 without timezone offset (telethon dates are always UTC)
result.append(obj.strftime('%Y-%m-%d %H:%M:%S'))
elif hasattr(obj, '__iter__'):
# display iterables one after another at the base indentation level
result.append('\n')
indent += 2
for x in obj:
result.append(' ' * indent)
result.append(yaml_format(x, indent))
result.append('\n')
result.pop()
indent -= 2
result.append(' ' * indent)
else:
result.append(repr(obj))
return ''.join(result)
@borg.on(events.NewMessage(pattern=r"\.new", outgoing=True))
async def _(event):
if not event.message.is_reply:
return
msg = await event.message.get_reply_message()
yaml_text = yaml_format(msg)
await event.edit(
yaml_text,
parse_mode=parse_pre
)
| true
| true
|
790d7f07d31c0347f7b6720bbb957b85cd61094c
| 472
|
py
|
Python
|
47 Setters_Property Decorators/main1.py
|
codewithsandy/Python-Basic-Exp
|
4c70ada4a042923a94301453c7bd76e704cd2989
|
[
"MIT"
] | 3
|
2021-05-08T13:11:41.000Z
|
2021-05-14T02:43:20.000Z
|
47 Setters_Property Decorators/main1.py
|
codewithsandy/Python-Basic-Exp
|
4c70ada4a042923a94301453c7bd76e704cd2989
|
[
"MIT"
] | null | null | null |
47 Setters_Property Decorators/main1.py
|
codewithsandy/Python-Basic-Exp
|
4c70ada4a042923a94301453c7bd76e704cd2989
|
[
"MIT"
] | null | null | null |
class Employee:
def __init__(self, fname, lname):
self.fname = fname
self.lname = lname
# self.email = f"{fname}.{lname}@sandy.com"
def explain(self):
return f"This employee is {self.fname} {self.lname}"
def email(self):
return f"{self.fname}.{self.lname} @parker.com"
obj1 = Employee("Peter", "Parkar")
print(obj1.email())
obj1.fname = "Spider"
print(obj1.email()) #required call email() function to print
| 24.842105
| 68
| 0.616525
|
class Employee:
def __init__(self, fname, lname):
self.fname = fname
self.lname = lname
def explain(self):
return f"This employee is {self.fname} {self.lname}"
def email(self):
return f"{self.fname}.{self.lname} @parker.com"
obj1 = Employee("Peter", "Parkar")
print(obj1.email())
obj1.fname = "Spider"
print(obj1.email())
| true
| true
|
790d7f13e8eacdb2870107a09124364cf34f5df9
| 23,996
|
py
|
Python
|
h2o-py/dynamic_tests/testdir_algos/glm/pyunit_glm_gaussian_gridsearch_randomdiscrete_large.py
|
ahmedengu/h2o-3
|
ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11
|
[
"Apache-2.0"
] | 2
|
2018-09-20T03:28:46.000Z
|
2018-12-06T21:39:29.000Z
|
h2o-py/dynamic_tests/testdir_algos/glm/pyunit_glm_gaussian_gridsearch_randomdiscrete_large.py
|
ahmedengu/h2o-3
|
ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11
|
[
"Apache-2.0"
] | 2
|
2021-06-02T02:24:03.000Z
|
2021-11-15T17:51:49.000Z
|
h2o-py/dynamic_tests/testdir_algos/glm/pyunit_glm_gaussian_gridsearch_randomdiscrete_large.py
|
ahmedengu/h2o-3
|
ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11
|
[
"Apache-2.0"
] | 1
|
2020-04-17T13:06:26.000Z
|
2020-04-17T13:06:26.000Z
|
from __future__ import print_function
import sys
import random
import os
from builtins import range
import time
import json
sys.path.insert(1, "../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
from h2o.grid.grid_search import H2OGridSearch
class Test_glm_random_grid_search:
"""
This class is created to test the three stopping conditions for randomized gridsearch using
GLM Binomial family. The three stopping conditions are :
1. max_runtime_secs:
2. max_models:
3. metrics. We will be picking 2 stopping metrics to test this stopping condition with. One metric
will be optimized if it increases and the other one should be optimized if it decreases.
I have written 4 tests:
1. test1_glm_random_grid_search_model_number: this test will not put any stopping conditions
on randomized search. The purpose here is to make sure that randomized search will give us all possible
hyper-parameter combinations.
2. test2_glm_random_grid_search_max_model: this test the stopping condition of setting the max_model in
search criteria;
3. test3_glm_random_grid_search_max_runtime_secs: this test the stopping condition max_runtime_secs
in search criteria;
4. test4_glm_random_grid_search_metric: this test the stopping condition of using a metric which can be
increasing or decreasing.
"""
# parameters set by users, change with care
curr_time = str(round(time.time()))
# parameters denoting filenames of interested that store training/validation/test data sets in csv format
training1_filename = "smalldata/gridsearch/gaussian_training1_set.csv"
json_filename = "random_gridsearch_GLM_Gaussian_hyper_parameter_" + curr_time + ".json"
allowed_diff = 0.5 # error tolerance allowed
allowed_time_diff = 1e-1 # fraction of max_runtime_secs allowed for max run time stopping criteria
# System parameters, do not change. Dire consequences may follow if you do
current_dir = os.path.dirname(os.path.realpath(sys.argv[1])) # directory of this test file
train_row_count = 0 # training data row count, randomly generated later
train_col_count = 0 # training data column count, randomly generated later
max_int_val = 1000 # maximum size of random integer values
min_int_val = 0 # minimum size of random integer values
max_int_number = 3 # maximum number of integer random grid values to generate
max_real_val = 1 # maximum size of random float values
min_real_val = 0.0 # minimum size of random float values
max_real_number = 3 # maximum number of real grid values to generate
lambda_scale = 100 # scale lambda value to be from 0 to 100 instead of 0 to 1
max_runtime_scale = 3 # scale the max runtime to be different from 0 to 1
one_model_time = 0 # time taken to build one barebone model
possible_number_models = 0 # possible number of models built based on hyper-parameter specification
max_model_number = 0 # maximum number of models specified to test for stopping conditions, generated later
max_grid_runtime = 1 # maximum runtime value in seconds, 1 minute max
allowed_scaled_overtime = 1 # used to set max_allowed_runtime as allowed_scaled_overtime * total model run time
allowed_scaled_time = 1 # how much to scale back max time
allowed_scaled_model_number = 1.5 # used to set max_model_number as
# possible_number_models * allowed_scaled_model_number
max_stopping_rounds = 5 # maximum stopping rounds allowed to be used for early stopping metric
max_tolerance = 0.01 # maximum tolerance to be used for early stopping metric
family = 'gaussian' # set gaussian as default
test_name = "pyunit_glm_gaussian_gridsearch_randomdiscrete_large.py" # name of this test
sandbox_dir = "" # sandbox directory where we are going to save our failed test data sets
# store information about training/test data sets
x_indices = [] # store predictor indices in the data set
y_index = 0 # store response index in the data set
training1_data = [] # store training data sets
total_test_number = 5 # number of tests carried out
test_failed = 0 # count total number of tests that have failed
test_failed_array = [0]*total_test_number # denote test results for all tests run. 1 error, 0 pass
test_num = 0 # index representing which test is being run
# give the user opportunity to pre-assign hyper parameters for fixed values
hyper_params = {}
# parameters to be excluded from hyper parameter list even though they may be gridable
exclude_parameter_lists = ['tweedie_link_power', 'tweedie_variance_power'] # do not need these
# these are supposed to be gridable but not really
exclude_parameter_lists.extend(['fold_column', 'weights_column', 'offset_column'])
# these are excluded for extracting parameters to manually build H2O GLM models
exclude_parameter_lists.extend(['model_id'])
gridable_parameters = [] # store griddable parameter names
gridable_types = [] # store the corresponding griddable parameter types
gridable_defaults = [] # store the gridabble parameter default values
correct_model_number = 0 # count number of models built with correct hyper-parameter specification
nfolds = 5 # enable cross validation to test fold_assignment
def __init__(self, family):
"""
Constructor.
:param family: distribution family for tests
:return: None
"""
self.setup_data() # setup_data training data
self.setup_grid_params() # setup_data grid hyper-parameters
def setup_data(self):
"""
This function performs all initializations necessary:
load the data sets and set the training set indices and response column index
"""
# clean out the sandbox directory first
self.sandbox_dir = pyunit_utils.make_Rsandbox_dir(self.current_dir, self.test_name, True)
# preload data sets
self.training1_data = h2o.import_file(path=pyunit_utils.locate(self.training1_filename))
# set data set indices for predictors and response
self.y_index = self.training1_data.ncol-1
self.x_indices = list(range(self.y_index))
# save the training data files just in case the code crashed.
pyunit_utils.remove_csv_files(self.current_dir, ".csv", action='copy', new_dir_path=self.sandbox_dir)
def setup_grid_params(self):
"""
This function setup the randomized gridsearch parameters that will be used later on:
1. It will first try to grab all the parameters that are griddable and parameters used by GLM.
2. It will find the intersection of parameters that are both griddable and used by GLM.
3. There are several extra parameters that are used by GLM that are denoted as griddable but actually is not.
These parameters have to be discovered manually and they These are captured in self.exclude_parameter_lists.
4. We generate the gridsearch hyper-parameter. For numerical parameters, we will generate those randomly.
For enums, we will include all of them.
:return: None
"""
# build bare bone model to get all parameters
model = H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds)
model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)
self.one_model_time = pyunit_utils.find_grid_runtime([model]) # find model train time
print("Time taken to build a base barebone model is {0}".format(self.one_model_time))
# grab all gridable parameters and its type
(self.gridable_parameters, self.gridable_types, self.gridable_defaults) = \
pyunit_utils.get_gridables(model._model_json["parameters"])
# give the user opportunity to pre-assign hyper parameters for fixed values
self.hyper_params = {}
self.hyper_params["fold_assignment"] = ['AUTO', 'Random', 'Modulo']
self.hyper_params["missing_values_handling"] = ['MeanImputation', 'Skip']
# randomly generate griddable parameters
(self.hyper_params, self.gridable_parameters, self.gridable_types, self.gridable_defaults) = \
pyunit_utils.gen_grid_search(model.full_parameters.keys(), self.hyper_params, self.exclude_parameter_lists,
self.gridable_parameters, self.gridable_types, self.gridable_defaults,
random.randint(1, self.max_int_number), self.max_int_val, self.min_int_val,
random.randint(1, self.max_real_number), self.max_real_val, self.min_real_val)
# change the value of lambda parameters to be from 0 to self.lambda_scale instead of 0 to 1.
if "lambda" in list(self.hyper_params):
self.hyper_params["lambda"] = [self.lambda_scale * x for x in self.hyper_params["lambda"]]
time_scale = self.max_runtime_scale * self.one_model_time
# change the value of runtime parameters to be from 0 to self.lambda_scale instead of 0 to 1.
if "max_runtime_secs" in list(self.hyper_params):
self.hyper_params["max_runtime_secs"] = [time_scale * x for x in
self.hyper_params["max_runtime_secs"]]
# number of possible models being built:
self.possible_number_models = pyunit_utils.count_models(self.hyper_params)
# save hyper-parameters in sandbox and current test directories.
pyunit_utils.write_hyper_parameters_json(self.current_dir, self.sandbox_dir, self.json_filename,
self.hyper_params)
def tear_down(self):
"""
This function performs teardown after the dynamic test is completed. If all tests
passed, it will delete all data sets generated since they can be quite large. It
will move the training/validation/test data sets into a Rsandbox directory so that
we can re-run the failed test.
"""
if self.test_failed: # some tests have failed. Need to save data sets for later re-runs
# create Rsandbox directory to keep data sets and weight information
self.sandbox_dir = pyunit_utils.make_Rsandbox_dir(self.current_dir, self.test_name, True)
# Do not want to save all data sets. Only save data sets that are needed for failed tests
pyunit_utils.move_files(self.sandbox_dir, self.training1_data_file, self.training1_filename)
# write out the jenkins job info into log files.
json_file = os.path.join(self.sandbox_dir, self.json_filename)
with open(json_file,'wb') as test_file:
json.dump(self.hyper_params, test_file)
else: # all tests have passed. Delete sandbox if if was not wiped before
pyunit_utils.make_Rsandbox_dir(self.current_dir, self.test_name, False)
def test1_glm_random_grid_search_model_number(self, metric_name):
"""
This test is used to make sure the randomized gridsearch will generate all models specified in the
hyperparameters if no stopping condition is given in the search criterion.
:param metric_name: string to denote what grid search model should be sort by
:return: None
"""
print("*******************************************************************************************")
print("test1_glm_random_grid_search_model_number for GLM " + self.family)
h2o.cluster_info()
# setup_data our stopping condition here, random discrete and find all models
search_criteria = {'strategy': 'RandomDiscrete', "stopping_rounds": 0, "seed": round(time.time())}
print("GLM Gaussian grid search_criteria: {0}".format(search_criteria))
# fire off random grid-search
random_grid_model = \
H2OGridSearch(H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds),
hyper_params=self.hyper_params, search_criteria=search_criteria)
random_grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)
# compare number of models built from both gridsearch
if not (len(random_grid_model) == self.possible_number_models):
self.test_failed += 1
self.test_failed_array[self.test_num] = 1
print("test1_glm_random_grid_search_model_number for GLM: failed, number of models generated"
"possible model number {0} and randomized gridsearch model number {1} are not "
"equal.".format(self.possible_number_models, len(random_grid_model)))
else:
self.max_grid_runtime = pyunit_utils.find_grid_runtime(random_grid_model) # time taken to build all models
if self.test_failed_array[self.test_num] == 0:
print("test1_glm_random_grid_search_model_number for GLM: passed!")
self.test_num += 1
sys.stdout.flush()
def test2_glm_random_grid_search_max_model(self):
"""
This test is used to test the stopping condition max_model_number in the randomized gridsearch. The
max_models parameter is randomly generated. If it is higher than the actual possible number of models
that can be generated with the current hyper-space parameters, randomized grid search should generate
all the models. Otherwise, grid search shall return a model that equals to the max_model setting.
"""
print("*******************************************************************************************")
print("test2_glm_random_grid_search_max_model for GLM " + self.family)
h2o.cluster_info()
# setup_data our stopping condition here
self.max_model_number = random.randint(1, int(self.allowed_scaled_model_number * self.possible_number_models))
search_criteria = {'strategy': 'RandomDiscrete', 'max_models': self.max_model_number,
"seed": round(time.time())}
print("GLM Gaussian grid search_criteria: {0}".format(search_criteria))
print("Possible number of models built is {0}".format(self.possible_number_models))
# fire off random grid-search
grid_model = \
H2OGridSearch(H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds),
hyper_params=self.hyper_params, search_criteria=search_criteria)
grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)
number_model_built = len(grid_model) # count actual number of models built
print("Maximum model limit is {0}. Number of models built is {1}".format(search_criteria["max_models"],
number_model_built))
if self.possible_number_models >= self.max_model_number: # stopping condition restricts model number
if not (number_model_built == self.max_model_number):
print("test2_glm_random_grid_search_max_model: failed. Number of model built {0} "
"does not match stopping condition number{1}.".format(number_model_built, self.max_model_number))
self.test_failed += 1
self.test_failed_array[self.test_num] = 1
else:
print("test2_glm_random_grid_search_max_model for GLM: passed.")
else: # stopping condition is too loose
if not (number_model_built == self.possible_number_models):
self.test_failed += 1
self.test_failed_array[self.test_num] = 1
print("test2_glm_random_grid_search_max_model: failed. Number of model built {0} does not equal "
"to possible model number {1}.".format(number_model_built, self.possible_number_models))
else:
print("test2_glm_random_grid_search_max_model for GLM: passed.")
self.test_num += 1
sys.stdout.flush()
def test3_glm_random_grid_search_max_runtime_secs(self):
"""
This function will test the stopping criteria max_runtime_secs. For each model built, the field
run_time actually denote the time in ms used to build the model. We will add up the run_time from all
models and check against the stopping criteria max_runtime_secs. Since each model will check its run time
differently, there is some inaccuracies in the actual run time. For example, if we give a model 10 ms to
build. The GLM may check and see if it has used up all the time for every 10 epochs that it has run. On
the other hand, deeplearning may check the time it has spent after every epoch of training.
If we are able to restrict the runtime to not exceed the specified max_runtime_secs by a certain
percentage, we will consider the test a success.
:return: None
"""
print("*******************************************************************************************")
print("test3_glm_random_grid_search_max_runtime_secs for GLM " + self.family)
h2o.cluster_info()
if "max_runtime_secs" in list(self.hyper_params):
del self.hyper_params['max_runtime_secs']
# number of possible models being built:
self.possible_number_models = pyunit_utils.count_models(self.hyper_params)
# setup_data our stopping condition here
max_run_time_secs = random.uniform(self.one_model_time, self.allowed_scaled_time*self.max_grid_runtime)
search_criteria = {'strategy': 'RandomDiscrete', 'max_runtime_secs': max_run_time_secs,
"seed": round(time.time())}
# search_criteria = {'strategy': 'RandomDiscrete', 'max_runtime_secs': 1/1e8}
print("GLM Gaussian grid search_criteria: {0}".format(search_criteria))
# fire off random grid-search
grid_model = \
H2OGridSearch(H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds),
hyper_params=self.hyper_params, search_criteria=search_criteria)
grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)
actual_run_time_secs = pyunit_utils.find_grid_runtime(grid_model)
print("Maximum time limit is {0}. Time taken to build all model is "
"{1}".format(search_criteria["max_runtime_secs"], actual_run_time_secs))
print("Maximum model number is {0}. Actual number of models built is {1}".format(self.possible_number_models,
len(grid_model)))
if actual_run_time_secs <= search_criteria["max_runtime_secs"]*(1+self.allowed_diff):
print("test3_glm_random_grid_search_max_runtime_secs: passed!")
if len(grid_model) > self.possible_number_models: # generate too many models, something is wrong
self.test_failed += 1
self.test_failed_array[self.test_num] = 1
print("test3_glm_random_grid_search_max_runtime_secs: failed. Generated {0} models "
" which exceeds maximum possible model number {1}".format(len(grid_model),
self.possible_number_models))
elif len(grid_model) == 1: # will always generate 1 model
print("test3_glm_random_grid_search_max_runtime_secs: passed!")
else:
self.test_failed += 1
self.test_failed_array[self.test_num] = 1
print("test3_glm_random_grid_search_max_runtime_secs: failed. Model takes time {0}"
" seconds which exceeds allowed time {1}".format(actual_run_time_secs,
max_run_time_secs*(1+self.allowed_diff)))
self.test_num += 1
sys.stdout.flush()
def test4_glm_random_grid_search_metric(self, metric_name, bigger_is_better):
"""
This function will test the last stopping condition using metrics.
:param metric_name: metric we want to use to test the last stopping condition
:param bigger_is_better: higher metric value indicates better model performance
:return: None
"""
print("*******************************************************************************************")
print("test4_glm_random_grid_search_metric using " + metric_name + " for family " + self.family)
h2o.cluster_info()
search_criteria = {
"strategy": "RandomDiscrete",
"stopping_metric": metric_name,
"stopping_tolerance": random.uniform(1e-8, self.max_tolerance),
"stopping_rounds": random.randint(1, self.max_stopping_rounds),
"seed": round(time.time())
}
print("GLM Gaussian grid search_criteria: {0}".format(search_criteria))
# add max_runtime_secs back into hyper-parameters to limit model runtime.
self.hyper_params["max_runtime_secs"] = [0.3] # arbitrarily set to 0.1 second
# fire off random grid-search
grid_model = \
H2OGridSearch(H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds),
hyper_params=self.hyper_params, search_criteria=search_criteria)
grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)
# bool indicating if randomized grid search has calculated the early stopping condition correctly
stopped_correctly = \
pyunit_utils.evaluate_metrics_stopping(grid_model.models, metric_name, bigger_is_better, search_criteria,
self.possible_number_models)
if stopped_correctly:
print("test4_glm_random_grid_search_metric " + metric_name + ": passed. ")
else:
self.test_failed += 1
self.test_failed_array[self.test_num] = 1
print("test4_glm_random_grid_search_metric " + metric_name + ": failed. ")
self.test_num += 1
def test_random_grid_search_for_glm():
"""
Create and instantiate classes, call test methods to test randomize grid search for GLM Gaussian
or Binomial families.
:return: None
"""
# randomize grid search for Gaussian
test_glm_gaussian_random_grid = Test_glm_random_grid_search("gaussian")
test_glm_gaussian_random_grid.test1_glm_random_grid_search_model_number("mse(xval=True)") # this test must be run.
test_glm_gaussian_random_grid.test2_glm_random_grid_search_max_model()
test_glm_gaussian_random_grid.test3_glm_random_grid_search_max_runtime_secs()
test_glm_gaussian_random_grid.test4_glm_random_grid_search_metric("MSE", False)
# test_glm_gaussian_random_grid.test4_glm_random_grid_search_metric("r2", True) # R2 was removed as a stopping metric
# test_glm_gaussian_random_grid.tear_down() # obsolete
# exit with error if any tests have failed
if test_glm_gaussian_random_grid.test_failed > 0:
sys.exit(1)
else:
pyunit_utils.remove_files(os.path.join(test_glm_gaussian_random_grid.current_dir,
test_glm_gaussian_random_grid.json_filename))
if __name__ == "__main__":
pyunit_utils.standalone_test(test_random_grid_search_for_glm)
else:
test_random_grid_search_for_glm()
| 53.324444
| 121
| 0.67257
|
from __future__ import print_function
import sys
import random
import os
from builtins import range
import time
import json
sys.path.insert(1, "../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
from h2o.grid.grid_search import H2OGridSearch
class Test_glm_random_grid_search:
curr_time = str(round(time.time()))
training1_filename = "smalldata/gridsearch/gaussian_training1_set.csv"
json_filename = "random_gridsearch_GLM_Gaussian_hyper_parameter_" + curr_time + ".json"
allowed_diff = 0.5
allowed_time_diff = 1e-1
current_dir = os.path.dirname(os.path.realpath(sys.argv[1]))
train_row_count = 0
train_col_count = 0
max_int_val = 1000
min_int_val = 0
max_int_number = 3
max_real_val = 1
min_real_val = 0.0
max_real_number = 3
lambda_scale = 100
max_runtime_scale = 3
one_model_time = 0
possible_number_models = 0
max_model_number = 0
max_grid_runtime = 1
allowed_scaled_overtime = 1
allowed_scaled_time = 1
allowed_scaled_model_number = 1.5
max_stopping_rounds = 5
max_tolerance = 0.01
family = 'gaussian'
test_name = "pyunit_glm_gaussian_gridsearch_randomdiscrete_large.py"
sandbox_dir = ""
x_indices = []
y_index = 0
training1_data = []
total_test_number = 5
test_failed = 0
test_failed_array = [0]*total_test_number
test_num = 0
hyper_params = {}
exclude_parameter_lists = ['tweedie_link_power', 'tweedie_variance_power']
exclude_parameter_lists.extend(['fold_column', 'weights_column', 'offset_column'])
exclude_parameter_lists.extend(['model_id'])
gridable_parameters = []
gridable_types = []
gridable_defaults = []
correct_model_number = 0
nfolds = 5
def __init__(self, family):
self.setup_data()
self.setup_grid_params()
def setup_data(self):
self.sandbox_dir = pyunit_utils.make_Rsandbox_dir(self.current_dir, self.test_name, True)
self.training1_data = h2o.import_file(path=pyunit_utils.locate(self.training1_filename))
self.y_index = self.training1_data.ncol-1
self.x_indices = list(range(self.y_index))
pyunit_utils.remove_csv_files(self.current_dir, ".csv", action='copy', new_dir_path=self.sandbox_dir)
def setup_grid_params(self):
model = H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds)
model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)
self.one_model_time = pyunit_utils.find_grid_runtime([model])
print("Time taken to build a base barebone model is {0}".format(self.one_model_time))
(self.gridable_parameters, self.gridable_types, self.gridable_defaults) = \
pyunit_utils.get_gridables(model._model_json["parameters"])
self.hyper_params = {}
self.hyper_params["fold_assignment"] = ['AUTO', 'Random', 'Modulo']
self.hyper_params["missing_values_handling"] = ['MeanImputation', 'Skip']
(self.hyper_params, self.gridable_parameters, self.gridable_types, self.gridable_defaults) = \
pyunit_utils.gen_grid_search(model.full_parameters.keys(), self.hyper_params, self.exclude_parameter_lists,
self.gridable_parameters, self.gridable_types, self.gridable_defaults,
random.randint(1, self.max_int_number), self.max_int_val, self.min_int_val,
random.randint(1, self.max_real_number), self.max_real_val, self.min_real_val)
if "lambda" in list(self.hyper_params):
self.hyper_params["lambda"] = [self.lambda_scale * x for x in self.hyper_params["lambda"]]
time_scale = self.max_runtime_scale * self.one_model_time
if "max_runtime_secs" in list(self.hyper_params):
self.hyper_params["max_runtime_secs"] = [time_scale * x for x in
self.hyper_params["max_runtime_secs"]]
self.possible_number_models = pyunit_utils.count_models(self.hyper_params)
pyunit_utils.write_hyper_parameters_json(self.current_dir, self.sandbox_dir, self.json_filename,
self.hyper_params)
def tear_down(self):
if self.test_failed:
self.sandbox_dir = pyunit_utils.make_Rsandbox_dir(self.current_dir, self.test_name, True)
pyunit_utils.move_files(self.sandbox_dir, self.training1_data_file, self.training1_filename)
json_file = os.path.join(self.sandbox_dir, self.json_filename)
with open(json_file,'wb') as test_file:
json.dump(self.hyper_params, test_file)
else:
pyunit_utils.make_Rsandbox_dir(self.current_dir, self.test_name, False)
def test1_glm_random_grid_search_model_number(self, metric_name):
print("*******************************************************************************************")
print("test1_glm_random_grid_search_model_number for GLM " + self.family)
h2o.cluster_info()
search_criteria = {'strategy': 'RandomDiscrete', "stopping_rounds": 0, "seed": round(time.time())}
print("GLM Gaussian grid search_criteria: {0}".format(search_criteria))
random_grid_model = \
H2OGridSearch(H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds),
hyper_params=self.hyper_params, search_criteria=search_criteria)
random_grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)
if not (len(random_grid_model) == self.possible_number_models):
self.test_failed += 1
self.test_failed_array[self.test_num] = 1
print("test1_glm_random_grid_search_model_number for GLM: failed, number of models generated"
"possible model number {0} and randomized gridsearch model number {1} are not "
"equal.".format(self.possible_number_models, len(random_grid_model)))
else:
self.max_grid_runtime = pyunit_utils.find_grid_runtime(random_grid_model)
if self.test_failed_array[self.test_num] == 0:
print("test1_glm_random_grid_search_model_number for GLM: passed!")
self.test_num += 1
sys.stdout.flush()
def test2_glm_random_grid_search_max_model(self):
print("*******************************************************************************************")
print("test2_glm_random_grid_search_max_model for GLM " + self.family)
h2o.cluster_info()
self.max_model_number = random.randint(1, int(self.allowed_scaled_model_number * self.possible_number_models))
search_criteria = {'strategy': 'RandomDiscrete', 'max_models': self.max_model_number,
"seed": round(time.time())}
print("GLM Gaussian grid search_criteria: {0}".format(search_criteria))
print("Possible number of models built is {0}".format(self.possible_number_models))
grid_model = \
H2OGridSearch(H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds),
hyper_params=self.hyper_params, search_criteria=search_criteria)
grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)
number_model_built = len(grid_model)
print("Maximum model limit is {0}. Number of models built is {1}".format(search_criteria["max_models"],
number_model_built))
if self.possible_number_models >= self.max_model_number:
if not (number_model_built == self.max_model_number):
print("test2_glm_random_grid_search_max_model: failed. Number of model built {0} "
"does not match stopping condition number{1}.".format(number_model_built, self.max_model_number))
self.test_failed += 1
self.test_failed_array[self.test_num] = 1
else:
print("test2_glm_random_grid_search_max_model for GLM: passed.")
else:
if not (number_model_built == self.possible_number_models):
self.test_failed += 1
self.test_failed_array[self.test_num] = 1
print("test2_glm_random_grid_search_max_model: failed. Number of model built {0} does not equal "
"to possible model number {1}.".format(number_model_built, self.possible_number_models))
else:
print("test2_glm_random_grid_search_max_model for GLM: passed.")
self.test_num += 1
sys.stdout.flush()
def test3_glm_random_grid_search_max_runtime_secs(self):
print("*******************************************************************************************")
print("test3_glm_random_grid_search_max_runtime_secs for GLM " + self.family)
h2o.cluster_info()
if "max_runtime_secs" in list(self.hyper_params):
del self.hyper_params['max_runtime_secs']
self.possible_number_models = pyunit_utils.count_models(self.hyper_params)
max_run_time_secs = random.uniform(self.one_model_time, self.allowed_scaled_time*self.max_grid_runtime)
search_criteria = {'strategy': 'RandomDiscrete', 'max_runtime_secs': max_run_time_secs,
"seed": round(time.time())}
print("GLM Gaussian grid search_criteria: {0}".format(search_criteria))
grid_model = \
H2OGridSearch(H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds),
hyper_params=self.hyper_params, search_criteria=search_criteria)
grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)
actual_run_time_secs = pyunit_utils.find_grid_runtime(grid_model)
print("Maximum time limit is {0}. Time taken to build all model is "
"{1}".format(search_criteria["max_runtime_secs"], actual_run_time_secs))
print("Maximum model number is {0}. Actual number of models built is {1}".format(self.possible_number_models,
len(grid_model)))
if actual_run_time_secs <= search_criteria["max_runtime_secs"]*(1+self.allowed_diff):
print("test3_glm_random_grid_search_max_runtime_secs: passed!")
if len(grid_model) > self.possible_number_models:
self.test_failed += 1
self.test_failed_array[self.test_num] = 1
print("test3_glm_random_grid_search_max_runtime_secs: failed. Generated {0} models "
" which exceeds maximum possible model number {1}".format(len(grid_model),
self.possible_number_models))
elif len(grid_model) == 1:
print("test3_glm_random_grid_search_max_runtime_secs: passed!")
else:
self.test_failed += 1
self.test_failed_array[self.test_num] = 1
print("test3_glm_random_grid_search_max_runtime_secs: failed. Model takes time {0}"
" seconds which exceeds allowed time {1}".format(actual_run_time_secs,
max_run_time_secs*(1+self.allowed_diff)))
self.test_num += 1
sys.stdout.flush()
def test4_glm_random_grid_search_metric(self, metric_name, bigger_is_better):
print("*******************************************************************************************")
print("test4_glm_random_grid_search_metric using " + metric_name + " for family " + self.family)
h2o.cluster_info()
search_criteria = {
"strategy": "RandomDiscrete",
"stopping_metric": metric_name,
"stopping_tolerance": random.uniform(1e-8, self.max_tolerance),
"stopping_rounds": random.randint(1, self.max_stopping_rounds),
"seed": round(time.time())
}
print("GLM Gaussian grid search_criteria: {0}".format(search_criteria))
self.hyper_params["max_runtime_secs"] = [0.3]
grid_model = \
H2OGridSearch(H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds),
hyper_params=self.hyper_params, search_criteria=search_criteria)
grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)
stopped_correctly = \
pyunit_utils.evaluate_metrics_stopping(grid_model.models, metric_name, bigger_is_better, search_criteria,
self.possible_number_models)
if stopped_correctly:
print("test4_glm_random_grid_search_metric " + metric_name + ": passed. ")
else:
self.test_failed += 1
self.test_failed_array[self.test_num] = 1
print("test4_glm_random_grid_search_metric " + metric_name + ": failed. ")
self.test_num += 1
def test_random_grid_search_for_glm():
test_glm_gaussian_random_grid = Test_glm_random_grid_search("gaussian")
test_glm_gaussian_random_grid.test1_glm_random_grid_search_model_number("mse(xval=True)")
test_glm_gaussian_random_grid.test2_glm_random_grid_search_max_model()
test_glm_gaussian_random_grid.test3_glm_random_grid_search_max_runtime_secs()
test_glm_gaussian_random_grid.test4_glm_random_grid_search_metric("MSE", False)
d.test_failed > 0:
sys.exit(1)
else:
pyunit_utils.remove_files(os.path.join(test_glm_gaussian_random_grid.current_dir,
test_glm_gaussian_random_grid.json_filename))
if __name__ == "__main__":
pyunit_utils.standalone_test(test_random_grid_search_for_glm)
else:
test_random_grid_search_for_glm()
| true
| true
|
790d7f2469b623df18560268f0e89fc2f0e10bab
| 2,558
|
py
|
Python
|
Good_Boids_module/tests/test_the_Good_Boids.py
|
anest1s/Refactoring_the_Bad_Boids
|
d569de4372d96917ef6aa7f1ca8acdaa09c26e0f
|
[
"MIT"
] | null | null | null |
Good_Boids_module/tests/test_the_Good_Boids.py
|
anest1s/Refactoring_the_Bad_Boids
|
d569de4372d96917ef6aa7f1ca8acdaa09c26e0f
|
[
"MIT"
] | null | null | null |
Good_Boids_module/tests/test_the_Good_Boids.py
|
anest1s/Refactoring_the_Bad_Boids
|
d569de4372d96917ef6aa7f1ca8acdaa09c26e0f
|
[
"MIT"
] | null | null | null |
from Good_Boids_module.Update_Boids import Boids
import numpy as np
from nose.tools import assert_almost_equal, assert_greater
from nose.tools import assert_less, assert_equal
from numpy.testing import assert_array_equal
import os
import yaml
from Good_Boids_module.tests.record_fixtures import configuration_file
fixtures = yaml.load(open('fixture.yaml'))
configuration_file_data = yaml.load(open(configuration_file))
def test_good_boids_for_regression():
before_positions = list(fixtures["before_positions"])
before_velocities = list(fixtures["before_velocities"])
new_positions = list(Boids(configuration_file).get_raw_positions(before_positions, before_velocities))
after_positions = list(fixtures["after_positions"])
new_velocities = list(Boids(configuration_file).get_raw_velocities(before_positions, before_velocities))
after_velocities = list(fixtures["after_velocities"])
for i in range(len(new_positions)):
assert_almost_equal(new_positions[0][i], after_positions[0][i], delta=0.1)
assert_almost_equal(new_positions[1][i], after_positions[1][i], delta=0.1)
assert_almost_equal(new_velocities[0][i], after_velocities[0][i], delta=15)
assert_almost_equal(new_velocities[1][i], after_velocities[1][i], delta=15)
test_good_boids_for_regression()
def test_good_boids_initialization():
boids_positions = Boids(configuration_file).positions
boids_velocities = Boids(configuration_file).velocities
assert_equal(configuration_file_data['birds_number'], len(boids_positions[0]))
assert_equal(configuration_file_data['birds_number'], Boids(configuration_file).birds_num)
for boid in range(Boids(configuration_file).birds_num):
assert_less(boids_positions[0][boid], configuration_file_data['position_upper_limits'][0])
assert_greater(boids_positions[0][boid], configuration_file_data['position_lower_limits'][0])
assert_less(boids_positions[1][boid], configuration_file_data['position_upper_limits'][1])
assert_greater(boids_positions[1][boid], configuration_file_data['position_lower_limits'][1])
assert_less(boids_velocities[0][boid], configuration_file_data['velocity_upper_limits'][0])
assert_greater(boids_velocities[0][boid], configuration_file_data['velocity_lower_limits'][0])
assert_less(boids_velocities[1][boid], configuration_file_data['velocity_upper_limits'][1])
assert_greater(boids_velocities[1][boid], configuration_file_data['velocity_lower_limits'][1])
test_good_boids_initialization()
| 51.16
| 108
| 0.788898
|
from Good_Boids_module.Update_Boids import Boids
import numpy as np
from nose.tools import assert_almost_equal, assert_greater
from nose.tools import assert_less, assert_equal
from numpy.testing import assert_array_equal
import os
import yaml
from Good_Boids_module.tests.record_fixtures import configuration_file
fixtures = yaml.load(open('fixture.yaml'))
configuration_file_data = yaml.load(open(configuration_file))
def test_good_boids_for_regression():
before_positions = list(fixtures["before_positions"])
before_velocities = list(fixtures["before_velocities"])
new_positions = list(Boids(configuration_file).get_raw_positions(before_positions, before_velocities))
after_positions = list(fixtures["after_positions"])
new_velocities = list(Boids(configuration_file).get_raw_velocities(before_positions, before_velocities))
after_velocities = list(fixtures["after_velocities"])
for i in range(len(new_positions)):
assert_almost_equal(new_positions[0][i], after_positions[0][i], delta=0.1)
assert_almost_equal(new_positions[1][i], after_positions[1][i], delta=0.1)
assert_almost_equal(new_velocities[0][i], after_velocities[0][i], delta=15)
assert_almost_equal(new_velocities[1][i], after_velocities[1][i], delta=15)
test_good_boids_for_regression()
def test_good_boids_initialization():
boids_positions = Boids(configuration_file).positions
boids_velocities = Boids(configuration_file).velocities
assert_equal(configuration_file_data['birds_number'], len(boids_positions[0]))
assert_equal(configuration_file_data['birds_number'], Boids(configuration_file).birds_num)
for boid in range(Boids(configuration_file).birds_num):
assert_less(boids_positions[0][boid], configuration_file_data['position_upper_limits'][0])
assert_greater(boids_positions[0][boid], configuration_file_data['position_lower_limits'][0])
assert_less(boids_positions[1][boid], configuration_file_data['position_upper_limits'][1])
assert_greater(boids_positions[1][boid], configuration_file_data['position_lower_limits'][1])
assert_less(boids_velocities[0][boid], configuration_file_data['velocity_upper_limits'][0])
assert_greater(boids_velocities[0][boid], configuration_file_data['velocity_lower_limits'][0])
assert_less(boids_velocities[1][boid], configuration_file_data['velocity_upper_limits'][1])
assert_greater(boids_velocities[1][boid], configuration_file_data['velocity_lower_limits'][1])
test_good_boids_initialization()
| true
| true
|
790d7fd564a4906d9c3188c0bff4c57844454fd8
| 681
|
py
|
Python
|
seija/reusables/verification.py
|
MapsetManagementServer/Seija
|
88acafaa311970df5ae881b7eda48ea780a18d03
|
[
"MIT"
] | 3
|
2019-07-25T18:27:13.000Z
|
2021-11-28T18:51:09.000Z
|
seija/reusables/verification.py
|
MapsetManagementServer/Seija
|
88acafaa311970df5ae881b7eda48ea780a18d03
|
[
"MIT"
] | null | null | null |
seija/reusables/verification.py
|
MapsetManagementServer/Seija
|
88acafaa311970df5ae881b7eda48ea780a18d03
|
[
"MIT"
] | 6
|
2019-12-17T19:48:10.000Z
|
2022-03-11T04:29:06.000Z
|
import discord
async def get_role_based_on_reputation(self, guild, ranked_amount):
if ranked_amount >= 10:
return await get_role_from_db(self, "experienced_mapper", guild)
elif ranked_amount >= 1:
return await get_role_from_db(self, "ranked_mapper", guild)
else:
return await get_role_from_db(self, "mapper", guild)
async def get_role_from_db(self, setting, guild):
async with self.bot.db.execute("SELECT role_id FROM roles WHERE setting = ? AND guild_id = ?",
[setting, int(guild.id)]) as cursor:
role_id = await cursor.fetchone()
return discord.utils.get(guild.roles, id=int(role_id[0]))
| 37.833333
| 98
| 0.675477
|
import discord
async def get_role_based_on_reputation(self, guild, ranked_amount):
if ranked_amount >= 10:
return await get_role_from_db(self, "experienced_mapper", guild)
elif ranked_amount >= 1:
return await get_role_from_db(self, "ranked_mapper", guild)
else:
return await get_role_from_db(self, "mapper", guild)
async def get_role_from_db(self, setting, guild):
async with self.bot.db.execute("SELECT role_id FROM roles WHERE setting = ? AND guild_id = ?",
[setting, int(guild.id)]) as cursor:
role_id = await cursor.fetchone()
return discord.utils.get(guild.roles, id=int(role_id[0]))
| true
| true
|
790d80d0c914106a10d2850b810eb380fd4604ed
| 840
|
py
|
Python
|
LeetCode/581.py
|
KevinTMtz/CompetitiveProgramming
|
0bf8a297c404073df707b6d7b06965b055ccd872
|
[
"MIT"
] | 1
|
2020-12-08T02:01:18.000Z
|
2020-12-08T02:01:18.000Z
|
LeetCode/581.py
|
KevinTMtz/CompetitiveProgramming
|
0bf8a297c404073df707b6d7b06965b055ccd872
|
[
"MIT"
] | null | null | null |
LeetCode/581.py
|
KevinTMtz/CompetitiveProgramming
|
0bf8a297c404073df707b6d7b06965b055ccd872
|
[
"MIT"
] | null | null | null |
#
# LeetCode
#
# Problem - 581
# URL - https://leetcode.com/problems/shortest-unsorted-continuous-subarray/
#
class Solution:
def findUnsortedSubarray(self, arr: List[int]) -> int:
if (not arr):
0
index1 = -1
index2 = -1
for i in range(1, len(arr)):
if (arr[i] < arr[i-1]):
index1 = i-1
break
for i in range(len(arr)-2, -1, -1):
if (arr[i] > arr[i+1]):
index2 = i+1
break
if (index1 == -1):
return 0
else:
maxSubArr = max(arr[index1:index2+1])
minSubArr = min(arr[index1:index2+1])
for i in range(0, index1):
if (arr[i] > minSubArr):
index1 = i
break
for i in range(len(arr)-1, index2, -1):
if (arr[i] < maxSubArr):
index2 = i
break
return index2 - index1 + 1
| 19.534884
| 76
| 0.515476
|
class Solution:
def findUnsortedSubarray(self, arr: List[int]) -> int:
if (not arr):
0
index1 = -1
index2 = -1
for i in range(1, len(arr)):
if (arr[i] < arr[i-1]):
index1 = i-1
break
for i in range(len(arr)-2, -1, -1):
if (arr[i] > arr[i+1]):
index2 = i+1
break
if (index1 == -1):
return 0
else:
maxSubArr = max(arr[index1:index2+1])
minSubArr = min(arr[index1:index2+1])
for i in range(0, index1):
if (arr[i] > minSubArr):
index1 = i
break
for i in range(len(arr)-1, index2, -1):
if (arr[i] < maxSubArr):
index2 = i
break
return index2 - index1 + 1
| true
| true
|
790d8113d67e2f8121e30d836a9d637643f5563b
| 814
|
py
|
Python
|
nebula2/gclient/net/__init__.py
|
xiaoronghuang/nebula-python
|
fe2a85639dd1500133a63bad50f72b3c0370d1de
|
[
"Apache-2.0"
] | 110
|
2019-10-24T09:21:07.000Z
|
2022-03-31T07:06:00.000Z
|
nebula2/gclient/net/__init__.py
|
xiaoronghuang/nebula-python
|
fe2a85639dd1500133a63bad50f72b3c0370d1de
|
[
"Apache-2.0"
] | 83
|
2019-11-20T07:55:05.000Z
|
2022-03-23T10:55:14.000Z
|
nebula2/gclient/net/__init__.py
|
xiaoronghuang/nebula-python
|
fe2a85639dd1500133a63bad50f72b3c0370d1de
|
[
"Apache-2.0"
] | 56
|
2019-10-11T07:01:05.000Z
|
2022-03-11T09:09:15.000Z
|
#!/usr/bin/env python
# --coding:utf-8--
# Copyright (c) 2020 vesoft inc. All rights reserved.
#
# This source code is licensed under Apache 2.0 License,
# attached with Common Clause Condition 1.0, found in the LICENSES directory.
import logging
from nebula2.common.ttypes import ErrorCode
from nebula2.Exception import (
AuthFailedException,
IOErrorException,
NotValidConnectionException,
InValidHostname,
)
from nebula2.data.ResultSet import ResultSet
from nebula2.gclient.net.AuthResult import AuthResult
from nebula2.gclient.net.Session import Session
from nebula2.gclient.net.Connection import Connection
from nebula2.gclient.net.ConnectionPool import ConnectionPool
logging.basicConfig(level=logging.INFO, format='[%(asctime)s] %(levelname)-8s [%(filename)s:%(lineno)d]:%(message)s')
| 29.071429
| 117
| 0.782555
|
import logging
from nebula2.common.ttypes import ErrorCode
from nebula2.Exception import (
AuthFailedException,
IOErrorException,
NotValidConnectionException,
InValidHostname,
)
from nebula2.data.ResultSet import ResultSet
from nebula2.gclient.net.AuthResult import AuthResult
from nebula2.gclient.net.Session import Session
from nebula2.gclient.net.Connection import Connection
from nebula2.gclient.net.ConnectionPool import ConnectionPool
logging.basicConfig(level=logging.INFO, format='[%(asctime)s] %(levelname)-8s [%(filename)s:%(lineno)d]:%(message)s')
| true
| true
|
790d81313101dbd0c7e9056bdded1306b21ca4f5
| 1,586
|
py
|
Python
|
src/scheduler/models/dao/connection/ConnectionDatabase.py
|
jedicontributors/pythondataintegrator
|
3e877b367ab9b20185476128ec053db41087879f
|
[
"MIT"
] | 14
|
2020-12-19T15:06:13.000Z
|
2022-01-12T19:52:17.000Z
|
src/scheduler/models/dao/connection/ConnectionDatabase.py
|
jedicontributors/pythondataintegrator
|
3e877b367ab9b20185476128ec053db41087879f
|
[
"MIT"
] | 43
|
2021-01-06T22:05:22.000Z
|
2022-03-10T10:30:30.000Z
|
src/scheduler/models/dao/connection/ConnectionDatabase.py
|
jedicontributors/pythondataintegrator
|
3e877b367ab9b20185476128ec053db41087879f
|
[
"MIT"
] | 4
|
2020-12-18T23:10:09.000Z
|
2021-04-02T13:03:12.000Z
|
from sqlalchemy import Column, String, Integer, ForeignKey
from sqlalchemy.orm import relationship
from IocManager import IocManager
from models.dao.Entity import Entity
class ConnectionDatabase(Entity, IocManager.Base):
__tablename__ = "ConnectionDatabase"
__table_args__ = {"schema": "Connection"}
ConnectionId = Column(Integer, ForeignKey('Connection.Connection.Id'))
ConnectorTypeId = Column(Integer, ForeignKey('Connection.ConnectorType.Id'))
Sid = Column(String(100), index=False, unique=False, nullable=True)
ServiceName = Column(String(100), index=False, unique=False, nullable=True)
DatabaseName = Column(String(100), index=False, unique=False, nullable=True)
ConnectorType = relationship("ConnectorType", back_populates="Databases")
def __init__(self,
ConnectionId: int = None,
ConnectorTypeId: int = None,
Host: str = None,
Port: int = None,
Sid: str = None,
ServiceName: str = None,
DatabaseName: str = None,
Connection = None,
ConnectorType = None,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.ConnectionId: str = ConnectionId
self.ConnectorTypeId: str = ConnectorTypeId
self.Host: str = Host
self.Port: int = Port
self.Sid: str = Sid
self.ServiceName: str = ServiceName
self.DatabaseName: str = DatabaseName
self.Connection = Connection
self.ConnectorType = ConnectorType
| 42.864865
| 80
| 0.639344
|
from sqlalchemy import Column, String, Integer, ForeignKey
from sqlalchemy.orm import relationship
from IocManager import IocManager
from models.dao.Entity import Entity
class ConnectionDatabase(Entity, IocManager.Base):
__tablename__ = "ConnectionDatabase"
__table_args__ = {"schema": "Connection"}
ConnectionId = Column(Integer, ForeignKey('Connection.Connection.Id'))
ConnectorTypeId = Column(Integer, ForeignKey('Connection.ConnectorType.Id'))
Sid = Column(String(100), index=False, unique=False, nullable=True)
ServiceName = Column(String(100), index=False, unique=False, nullable=True)
DatabaseName = Column(String(100), index=False, unique=False, nullable=True)
ConnectorType = relationship("ConnectorType", back_populates="Databases")
def __init__(self,
ConnectionId: int = None,
ConnectorTypeId: int = None,
Host: str = None,
Port: int = None,
Sid: str = None,
ServiceName: str = None,
DatabaseName: str = None,
Connection = None,
ConnectorType = None,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.ConnectionId: str = ConnectionId
self.ConnectorTypeId: str = ConnectorTypeId
self.Host: str = Host
self.Port: int = Port
self.Sid: str = Sid
self.ServiceName: str = ServiceName
self.DatabaseName: str = DatabaseName
self.Connection = Connection
self.ConnectorType = ConnectorType
| true
| true
|
790d825980ab768a98f6d16fa5cacee688c8eb00
| 550
|
py
|
Python
|
backend/home/migrations/0001_load_initial_data.py
|
crowdbotics-apps/solitary-morning-29716
|
d7672250b446b91af96f5bf0f75838d96d7c5b3a
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/home/migrations/0001_load_initial_data.py
|
crowdbotics-apps/solitary-morning-29716
|
d7672250b446b91af96f5bf0f75838d96d7c5b3a
|
[
"FTL",
"AML",
"RSA-MD"
] | 43
|
2021-08-11T09:52:00.000Z
|
2022-02-06T17:28:12.000Z
|
backend/home/migrations/0001_load_initial_data.py
|
crowdbotics-apps/solitary-morning-29716
|
d7672250b446b91af96f5bf0f75838d96d7c5b3a
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "solitary-morning-29716.botics.co"
site_params = {
"name": "Solitary Morning",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
| 21.153846
| 61
| 0.661818
|
from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "solitary-morning-29716.botics.co"
site_params = {
"name": "Solitary Morning",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
| true
| true
|
790d828e52a2e702df0df87304c75254ba67a0fa
| 603
|
py
|
Python
|
blog/migrations/0001_initial.py
|
bwarren2/django-basic-blog
|
20d4c40f19054d4aa8899240d211781624a7e0c7
|
[
"MIT"
] | 1
|
2019-08-14T13:26:24.000Z
|
2019-08-14T13:26:24.000Z
|
blog/migrations/0001_initial.py
|
bwarren2/django-basic-blog
|
20d4c40f19054d4aa8899240d211781624a7e0c7
|
[
"MIT"
] | 1
|
2015-07-25T15:23:41.000Z
|
2015-07-25T15:23:41.000Z
|
blog/migrations/0001_initial.py
|
bwarren2/django-basic-blog
|
20d4c40f19054d4aa8899240d211781624a7e0c7
|
[
"MIT"
] | 2
|
2015-07-25T01:42:10.000Z
|
2019-08-14T13:26:33.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Entry',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=200)),
('content', models.TextField()),
],
options={
},
bases=(models.Model,),
),
]
| 24.12
| 114
| 0.538972
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Entry',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=200)),
('content', models.TextField()),
],
options={
},
bases=(models.Model,),
),
]
| true
| true
|
790d82adcd085ab6a86925542f75f0ad1cbc516c
| 2,617
|
py
|
Python
|
tools/convert.py
|
tjclement/cz19-badge
|
6d04756f61053f2ddd97ed60cfb008393d476721
|
[
"MIT"
] | null | null | null |
tools/convert.py
|
tjclement/cz19-badge
|
6d04756f61053f2ddd97ed60cfb008393d476721
|
[
"MIT"
] | null | null | null |
tools/convert.py
|
tjclement/cz19-badge
|
6d04756f61053f2ddd97ed60cfb008393d476721
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import sys
from PIL import Image
import argparse
parser = argparse.ArgumentParser(description='Convert animated or static images to CampZone2019 badge code')
parser.add_argument('image', help='The path to an image to read from (e.g. .gif, .jpg, .png)')
parser.add_argument('--start_x', type=int, default=0, help='The X offset in the image to start reading from')
parser.add_argument('--start_y', type=int, default=0, help='The Y offset in the image to start reading from')
parser.add_argument('--length_x', type=int, default=32, help='The width to read from the image, starting at start_x')
parser.add_argument('--length_y', type=int, default=8, help='The height to read from the image, starting at start_y')
parser.add_argument('--start_at_frame', type=int, default=0, help='The frame to start from, if the image is animated')
parser.add_argument('--lim_frames', type=int, default=16, help='The number of frames to parse, if the image is animated')
parser.add_argument('--skip_frames', type=int, default=1, help='The number of frames to parse, if the image is animated')
parser.add_argument('--is_icon', type=bool, default=False, help='Set to "true" to output rgb.image() instead of rgb.gif()')
args = parser.parse_args()
start_x = args.start_x
start_y = args.start_y
length_x = args.length_x
length_y = args.length_y
start_at_frame = args.start_at_frame
lim_frames = args.lim_frames
skip_frames = args.skip_frames
is_icon = args.is_icon
frames = []
image = Image.open(sys.argv[1])
n_frames, width, height = image.n_frames if hasattr(image, 'n_frames') else 1, image.width, image.height
used_frames = min((n_frames - start_at_frame) / skip_frames, lim_frames)
used_width = min(length_x, image.width)
used_height = min(length_y, image.height)
for frame_no in range(start_at_frame, start_at_frame + used_frames):
image.seek(frame_no)
frame = list(image.convert('RGBA').getdata())
cut_frame = []
for y in range(start_y, start_y + used_height):
for x in range(start_x, start_x + used_width):
cut_frame.append(frame[x + width * y])
frames.append(cut_frame)
if is_icon:
print('icon = ([0x' +
', 0x'.join([', 0x'.join([format(r << 24 | g << 16 | b << 8 | a, '08x') for r, g, b, a in frame]) for frame in
frames]) +
'], %d)' % used_frames)
else:
print('rgb.gif([0x' +
', 0x'.join([', 0x'.join([format(r << 24 | g << 16 | b << 8 | a, '08x') for r, g, b, a in frame]) for frame in
frames]) +
'], %d, %d, %d, %d, %d)' % (0, 0, used_width, used_height, used_frames))
| 45.912281
| 123
| 0.680168
|
import sys
from PIL import Image
import argparse
parser = argparse.ArgumentParser(description='Convert animated or static images to CampZone2019 badge code')
parser.add_argument('image', help='The path to an image to read from (e.g. .gif, .jpg, .png)')
parser.add_argument('--start_x', type=int, default=0, help='The X offset in the image to start reading from')
parser.add_argument('--start_y', type=int, default=0, help='The Y offset in the image to start reading from')
parser.add_argument('--length_x', type=int, default=32, help='The width to read from the image, starting at start_x')
parser.add_argument('--length_y', type=int, default=8, help='The height to read from the image, starting at start_y')
parser.add_argument('--start_at_frame', type=int, default=0, help='The frame to start from, if the image is animated')
parser.add_argument('--lim_frames', type=int, default=16, help='The number of frames to parse, if the image is animated')
parser.add_argument('--skip_frames', type=int, default=1, help='The number of frames to parse, if the image is animated')
parser.add_argument('--is_icon', type=bool, default=False, help='Set to "true" to output rgb.image() instead of rgb.gif()')
args = parser.parse_args()
start_x = args.start_x
start_y = args.start_y
length_x = args.length_x
length_y = args.length_y
start_at_frame = args.start_at_frame
lim_frames = args.lim_frames
skip_frames = args.skip_frames
is_icon = args.is_icon
frames = []
image = Image.open(sys.argv[1])
n_frames, width, height = image.n_frames if hasattr(image, 'n_frames') else 1, image.width, image.height
used_frames = min((n_frames - start_at_frame) / skip_frames, lim_frames)
used_width = min(length_x, image.width)
used_height = min(length_y, image.height)
for frame_no in range(start_at_frame, start_at_frame + used_frames):
image.seek(frame_no)
frame = list(image.convert('RGBA').getdata())
cut_frame = []
for y in range(start_y, start_y + used_height):
for x in range(start_x, start_x + used_width):
cut_frame.append(frame[x + width * y])
frames.append(cut_frame)
if is_icon:
print('icon = ([0x' +
', 0x'.join([', 0x'.join([format(r << 24 | g << 16 | b << 8 | a, '08x') for r, g, b, a in frame]) for frame in
frames]) +
'], %d)' % used_frames)
else:
print('rgb.gif([0x' +
', 0x'.join([', 0x'.join([format(r << 24 | g << 16 | b << 8 | a, '08x') for r, g, b, a in frame]) for frame in
frames]) +
'], %d, %d, %d, %d, %d)' % (0, 0, used_width, used_height, used_frames))
| true
| true
|
790d82f674b469724c789de3e008a86a773eabf8
| 1,670
|
py
|
Python
|
tools/clean_file_locks.py
|
bopopescu/extra-specs-1
|
6a14d8d7807727023b4d589af47e8a9605f12db1
|
[
"Apache-2.0"
] | null | null | null |
tools/clean_file_locks.py
|
bopopescu/extra-specs-1
|
6a14d8d7807727023b4d589af47e8a9605f12db1
|
[
"Apache-2.0"
] | 1
|
2020-07-24T14:14:13.000Z
|
2020-07-24T14:14:13.000Z
|
tools/clean_file_locks.py
|
bopopescu/extra-specs-1
|
6a14d8d7807727023b4d589af47e8a9605f12db1
|
[
"Apache-2.0"
] | 1
|
2020-07-24T10:40:59.000Z
|
2020-07-24T10:40:59.000Z
|
#!/usr/bin/env python
# Copyright 2012 La Honda Research Center, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""clean_file_locks.py - Cleans stale interprocess locks
This rountine can be used to find and delete stale lock files from
nova's interprocess synchroization. It can be used safely while services
are running.
"""
import logging
import optparse
from nova import flags
from nova import log
from nova import utils
LOG = log.getLogger('nova.utils')
FLAGS = flags.FLAGS
def parse_options():
"""process command line options."""
parser = optparse.OptionParser('usage: %prog [options]')
parser.add_option('--verbose', action='store_true',
help='List lock files found and deleted')
options, args = parser.parse_args()
return options, args
def main():
"""Main loop."""
options, args = parse_options()
verbose = options.verbose
if verbose:
LOG.logger.setLevel(logging.DEBUG)
else:
LOG.logger.setLevel(logging.INFO)
LOG.info('Cleaning stale locks from %s' % FLAGS.lock_path)
utils.cleanup_file_locks()
LOG.info('Finished')
if __name__ == '__main__':
main()
| 26.09375
| 74
| 0.713772
|
import logging
import optparse
from nova import flags
from nova import log
from nova import utils
LOG = log.getLogger('nova.utils')
FLAGS = flags.FLAGS
def parse_options():
parser = optparse.OptionParser('usage: %prog [options]')
parser.add_option('--verbose', action='store_true',
help='List lock files found and deleted')
options, args = parser.parse_args()
return options, args
def main():
options, args = parse_options()
verbose = options.verbose
if verbose:
LOG.logger.setLevel(logging.DEBUG)
else:
LOG.logger.setLevel(logging.INFO)
LOG.info('Cleaning stale locks from %s' % FLAGS.lock_path)
utils.cleanup_file_locks()
LOG.info('Finished')
if __name__ == '__main__':
main()
| true
| true
|
790d832d1bb65414d4f691b248e8be0d69894926
| 458
|
py
|
Python
|
data/scripts/templates/object/tangible/loot/quest/shared_nym_droid_memory_chip.py
|
obi-two/GameServer
|
7d37024e2291a97d49522610cd8f1dbe5666afc2
|
[
"MIT"
] | 20
|
2015-02-23T15:11:56.000Z
|
2022-03-18T20:56:48.000Z
|
data/scripts/templates/object/tangible/loot/quest/shared_nym_droid_memory_chip.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | null | null | null |
data/scripts/templates/object/tangible/loot/quest/shared_nym_droid_memory_chip.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | 20
|
2015-04-04T16:35:59.000Z
|
2022-03-24T14:54:37.000Z
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/loot/quest/shared_nym_droid_memory_chip.iff"
result.attribute_template_id = -1
result.stfName("item_n","nym_memory_chip")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
| 26.941176
| 80
| 0.731441
| true
| true
|
|
790d83c21c632207c9ab00fc86d6452ee5f648d5
| 3,011
|
py
|
Python
|
code/logz.py
|
edwithschoolofai/ARS
|
22ec3c58637a84e374611d3f8bf05b89a9468f8a
|
[
"BSD-2-Clause"
] | 398
|
2018-03-20T07:08:01.000Z
|
2022-03-14T05:51:47.000Z
|
code/logz.py
|
edwithschoolofai/ARS
|
22ec3c58637a84e374611d3f8bf05b89a9468f8a
|
[
"BSD-2-Clause"
] | 13
|
2018-03-28T19:12:07.000Z
|
2021-03-19T03:49:49.000Z
|
code/logz.py
|
edwithschoolofai/ARS
|
22ec3c58637a84e374611d3f8bf05b89a9468f8a
|
[
"BSD-2-Clause"
] | 96
|
2018-03-20T21:17:33.000Z
|
2021-12-23T02:58:40.000Z
|
# Code in this file is copied and adapted from
# https://github.com/berkeleydeeprlcourse
import json
"""
Some simple logging functionality, inspired by rllab's logging.
Assumes that each diagnostic gets logged each iteration
Call logz.configure_output_dir() to start logging to a
tab-separated-values file (some_folder_name/log.txt)
"""
import os.path as osp, shutil, time, atexit, os, subprocess
color2num = dict(
gray=30,
red=31,
green=32,
yellow=33,
blue=34,
magenta=35,
cyan=36,
white=37,
crimson=38
)
def colorize(string, color, bold=False, highlight=False):
attr = []
num = color2num[color]
if highlight: num += 10
attr.append(str(num))
if bold: attr.append('1')
return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), string)
class G(object):
output_dir = None
output_file = None
first_row = True
log_headers = []
log_current_row = {}
def configure_output_dir(d=None):
"""
Set output directory to d, or to /tmp/somerandomnumber if d is None
"""
G.first_row = True
G.log_headers = []
G.log_current_row = {}
G.output_dir = d or "/tmp/experiments/%i"%int(time.time())
if not osp.exists(G.output_dir):
os.makedirs(G.output_dir)
G.output_file = open(osp.join(G.output_dir, "log.txt"), 'w')
atexit.register(G.output_file.close)
print(colorize("Logging data to %s"%G.output_file.name, 'green', bold=True))
def log_tabular(key, val):
"""
Log a value of some diagnostic
Call this once for each diagnostic quantity, each iteration
"""
if G.first_row:
G.log_headers.append(key)
else:
assert key in G.log_headers, "Trying to introduce a new key %s that you didn't include in the first iteration"%key
assert key not in G.log_current_row, "You already set %s this iteration. Maybe you forgot to call dump_tabular()"%key
G.log_current_row[key] = val
def save_params(params):
with open(osp.join(G.output_dir, "params.json"), 'w') as out:
out.write(json.dumps(params, separators=(',\n','\t:\t'), sort_keys=True))
def dump_tabular():
"""
Write all of the diagnostics from the current iteration
"""
vals = []
key_lens = [len(key) for key in G.log_headers]
max_key_len = max(15,max(key_lens))
keystr = '%'+'%d'%max_key_len
fmt = "| " + keystr + "s | %15s |"
n_slashes = 22 + max_key_len
print("-"*n_slashes)
for key in G.log_headers:
val = G.log_current_row.get(key, "")
if hasattr(val, "__float__"): valstr = "%8.3g"%val
else: valstr = val
print(fmt%(key, valstr))
vals.append(val)
print("-"*n_slashes)
if G.output_file is not None:
if G.first_row:
G.output_file.write("\t".join(G.log_headers))
G.output_file.write("\n")
G.output_file.write("\t".join(map(str,vals)))
G.output_file.write("\n")
G.output_file.flush()
G.log_current_row.clear()
G.first_row=False
| 28.67619
| 122
| 0.63733
|
import json
import os.path as osp, shutil, time, atexit, os, subprocess
color2num = dict(
gray=30,
red=31,
green=32,
yellow=33,
blue=34,
magenta=35,
cyan=36,
white=37,
crimson=38
)
def colorize(string, color, bold=False, highlight=False):
attr = []
num = color2num[color]
if highlight: num += 10
attr.append(str(num))
if bold: attr.append('1')
return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), string)
class G(object):
output_dir = None
output_file = None
first_row = True
log_headers = []
log_current_row = {}
def configure_output_dir(d=None):
G.first_row = True
G.log_headers = []
G.log_current_row = {}
G.output_dir = d or "/tmp/experiments/%i"%int(time.time())
if not osp.exists(G.output_dir):
os.makedirs(G.output_dir)
G.output_file = open(osp.join(G.output_dir, "log.txt"), 'w')
atexit.register(G.output_file.close)
print(colorize("Logging data to %s"%G.output_file.name, 'green', bold=True))
def log_tabular(key, val):
if G.first_row:
G.log_headers.append(key)
else:
assert key in G.log_headers, "Trying to introduce a new key %s that you didn't include in the first iteration"%key
assert key not in G.log_current_row, "You already set %s this iteration. Maybe you forgot to call dump_tabular()"%key
G.log_current_row[key] = val
def save_params(params):
with open(osp.join(G.output_dir, "params.json"), 'w') as out:
out.write(json.dumps(params, separators=(',\n','\t:\t'), sort_keys=True))
def dump_tabular():
vals = []
key_lens = [len(key) for key in G.log_headers]
max_key_len = max(15,max(key_lens))
keystr = '%'+'%d'%max_key_len
fmt = "| " + keystr + "s | %15s |"
n_slashes = 22 + max_key_len
print("-"*n_slashes)
for key in G.log_headers:
val = G.log_current_row.get(key, "")
if hasattr(val, "__float__"): valstr = "%8.3g"%val
else: valstr = val
print(fmt%(key, valstr))
vals.append(val)
print("-"*n_slashes)
if G.output_file is not None:
if G.first_row:
G.output_file.write("\t".join(G.log_headers))
G.output_file.write("\n")
G.output_file.write("\t".join(map(str,vals)))
G.output_file.write("\n")
G.output_file.flush()
G.log_current_row.clear()
G.first_row=False
| true
| true
|
790d848466bbf50695151b9cdec24f3f85cd12db
| 24,285
|
py
|
Python
|
src/image-gallery/azext_image_gallery/vendored_sdks/azure_mgmt_compute/operations/_virtual_machine_scale_set_rolling_upgrades_operations.py
|
haroonf/azure-cli-extensions
|
61c044d34c224372f186934fa7c9313f1cd3a525
|
[
"MIT"
] | 1
|
2022-02-01T18:50:12.000Z
|
2022-02-01T18:50:12.000Z
|
src/image-gallery/azext_image_gallery/vendored_sdks/azure_mgmt_compute/operations/_virtual_machine_scale_set_rolling_upgrades_operations.py
|
haroonf/azure-cli-extensions
|
61c044d34c224372f186934fa7c9313f1cd3a525
|
[
"MIT"
] | 9
|
2022-03-25T19:35:49.000Z
|
2022-03-31T06:09:47.000Z
|
src/image-gallery/azext_image_gallery/vendored_sdks/azure_mgmt_compute/operations/_virtual_machine_scale_set_rolling_upgrades_operations.py
|
haroonf/azure-cli-extensions
|
61c044d34c224372f186934fa7c9313f1cd3a525
|
[
"MIT"
] | 1
|
2022-03-10T22:13:02.000Z
|
2022-03-10T22:13:02.000Z
|
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Optional, TypeVar, Union
from msrest import Serializer
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_cancel_request_initial(
resource_group_name: str,
vm_scale_set_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2021-07-01") # type: str
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/rollingUpgrades/cancel") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"vmScaleSetName": _SERIALIZER.url("vm_scale_set_name", vm_scale_set_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
return HttpRequest(
method="POST",
url=_url,
params=_query_parameters,
**kwargs
)
def build_start_os_upgrade_request_initial(
resource_group_name: str,
vm_scale_set_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2021-07-01") # type: str
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/osRollingUpgrade") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"vmScaleSetName": _SERIALIZER.url("vm_scale_set_name", vm_scale_set_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
return HttpRequest(
method="POST",
url=_url,
params=_query_parameters,
**kwargs
)
def build_start_extension_upgrade_request_initial(
resource_group_name: str,
vm_scale_set_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2021-07-01") # type: str
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensionRollingUpgrade") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"vmScaleSetName": _SERIALIZER.url("vm_scale_set_name", vm_scale_set_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
return HttpRequest(
method="POST",
url=_url,
params=_query_parameters,
**kwargs
)
def build_get_latest_request(
resource_group_name: str,
vm_scale_set_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2021-07-01") # type: str
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/rollingUpgrades/latest") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"vmScaleSetName": _SERIALIZER.url("vm_scale_set_name", vm_scale_set_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
class VirtualMachineScaleSetRollingUpgradesOperations(object):
"""VirtualMachineScaleSetRollingUpgradesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2021_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _cancel_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-07-01") # type: str
request = build_cancel_request_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._cancel_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_cancel_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/rollingUpgrades/cancel"} # type: ignore
@distributed_trace
def begin_cancel( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Cancels the current virtual machine scale set rolling upgrade.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2021-07-01") # type: str
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._cancel_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_cancel.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/rollingUpgrades/cancel"} # type: ignore
def _start_os_upgrade_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-07-01") # type: str
request = build_start_os_upgrade_request_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._start_os_upgrade_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_start_os_upgrade_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/osRollingUpgrade"} # type: ignore
@distributed_trace
def begin_start_os_upgrade( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Starts a rolling upgrade to move all virtual machine scale set instances to the latest
available Platform Image OS version. Instances which are already running the latest available
OS version are not affected.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2021-07-01") # type: str
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._start_os_upgrade_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start_os_upgrade.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/osRollingUpgrade"} # type: ignore
def _start_extension_upgrade_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-07-01") # type: str
request = build_start_extension_upgrade_request_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._start_extension_upgrade_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_start_extension_upgrade_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensionRollingUpgrade"} # type: ignore
@distributed_trace
def begin_start_extension_upgrade( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Starts a rolling upgrade to move all extensions for all virtual machine scale set instances to
the latest available extension version. Instances which are already running the latest
extension versions are not affected.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2021-07-01") # type: str
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._start_extension_upgrade_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start_extension_upgrade.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensionRollingUpgrade"} # type: ignore
@distributed_trace
def get_latest(
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> "_models.RollingUpgradeStatusInfo":
"""Gets the status of the latest virtual machine scale set rolling upgrade.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RollingUpgradeStatusInfo, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_07_01.models.RollingUpgradeStatusInfo
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RollingUpgradeStatusInfo"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-07-01") # type: str
request = build_get_latest_request(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_latest.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RollingUpgradeStatusInfo', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_latest.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/rollingUpgrades/latest"} # type: ignore
| 44.154545
| 234
| 0.680255
|
from typing import Any, Callable, Dict, Optional, TypeVar, Union
from msrest import Serializer
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_cancel_request_initial(
resource_group_name: str,
vm_scale_set_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2021-07-01")
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/rollingUpgrades/cancel")
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"vmScaleSetName": _SERIALIZER.url("vm_scale_set_name", vm_scale_set_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
_query_parameters = kwargs.pop("params", {})
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
return HttpRequest(
method="POST",
url=_url,
params=_query_parameters,
**kwargs
)
def build_start_os_upgrade_request_initial(
resource_group_name: str,
vm_scale_set_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2021-07-01")
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/osRollingUpgrade")
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"vmScaleSetName": _SERIALIZER.url("vm_scale_set_name", vm_scale_set_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
_query_parameters = kwargs.pop("params", {})
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
return HttpRequest(
method="POST",
url=_url,
params=_query_parameters,
**kwargs
)
def build_start_extension_upgrade_request_initial(
resource_group_name: str,
vm_scale_set_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2021-07-01")
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensionRollingUpgrade")
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"vmScaleSetName": _SERIALIZER.url("vm_scale_set_name", vm_scale_set_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
_query_parameters = kwargs.pop("params", {})
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
return HttpRequest(
method="POST",
url=_url,
params=_query_parameters,
**kwargs
)
def build_get_latest_request(
resource_group_name: str,
vm_scale_set_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2021-07-01")
accept = "application/json"
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/rollingUpgrades/latest")
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"vmScaleSetName": _SERIALIZER.url("vm_scale_set_name", vm_scale_set_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
_query_parameters = kwargs.pop("params", {})
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
_header_parameters = kwargs.pop("headers", {})
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
class VirtualMachineScaleSetRollingUpgradesOperations(object):
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _cancel_initial(
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-07-01")
request = build_cancel_request_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._cancel_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_cancel_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/rollingUpgrades/cancel"}
@distributed_trace
def begin_cancel(
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> LROPoller[None]:
api_version = kwargs.pop('api_version', "2021-07-01")
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = self._cancel_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_cancel.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/rollingUpgrades/cancel"}
def _start_os_upgrade_initial(
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-07-01")
request = build_start_os_upgrade_request_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._start_os_upgrade_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_start_os_upgrade_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/osRollingUpgrade"}
@distributed_trace
def begin_start_os_upgrade(
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> LROPoller[None]:
api_version = kwargs.pop('api_version', "2021-07-01")
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = self._start_os_upgrade_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start_os_upgrade.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/osRollingUpgrade"}
def _start_extension_upgrade_initial(
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-07-01")
request = build_start_extension_upgrade_request_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._start_extension_upgrade_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_start_extension_upgrade_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensionRollingUpgrade"}
@distributed_trace
def begin_start_extension_upgrade(
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> LROPoller[None]:
api_version = kwargs.pop('api_version', "2021-07-01")
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = self._start_extension_upgrade_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start_extension_upgrade.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensionRollingUpgrade"}
@distributed_trace
def get_latest(
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> "_models.RollingUpgradeStatusInfo":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-07-01")
request = build_get_latest_request(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_latest.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RollingUpgradeStatusInfo', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_latest.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/rollingUpgrades/latest"}
| true
| true
|
790d87bd4bdeedbce5f096b24423ab2e051374d9
| 122
|
py
|
Python
|
api/uploader/__init__.py
|
StepaTa/vkbottle
|
3b04a5343380cbabe782151e7cb1c1645a9fa9ce
|
[
"MIT"
] | null | null | null |
api/uploader/__init__.py
|
StepaTa/vkbottle
|
3b04a5343380cbabe782151e7cb1c1645a9fa9ce
|
[
"MIT"
] | null | null | null |
api/uploader/__init__.py
|
StepaTa/vkbottle
|
3b04a5343380cbabe782151e7cb1c1645a9fa9ce
|
[
"MIT"
] | null | null | null |
from .base import Uploader
from .photo import PhotoUploader
from .doc import DocUploader
from .audio import AudioUploader
| 24.4
| 32
| 0.836066
|
from .base import Uploader
from .photo import PhotoUploader
from .doc import DocUploader
from .audio import AudioUploader
| true
| true
|
790d881362b5d343de0d5f2fd2d6938e5126cc81
| 12,685
|
py
|
Python
|
python_modules/dagster/dagster/core/code_pointer.py
|
mpkocher/dagster
|
c25c07de0e9259b08d6227f82d7aaa24f23bee85
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster/dagster/core/code_pointer.py
|
mpkocher/dagster
|
c25c07de0e9259b08d6227f82d7aaa24f23bee85
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster/dagster/core/code_pointer.py
|
mpkocher/dagster
|
c25c07de0e9259b08d6227f82d7aaa24f23bee85
|
[
"Apache-2.0"
] | null | null | null |
import importlib
import inspect
import os
import sys
import warnings
from abc import ABCMeta, abstractmethod
from collections import namedtuple
import six
from dagster import check
from dagster.core.errors import DagsterInvariantViolationError
from dagster.serdes import whitelist_for_serdes
from dagster.seven import import_module_from_path
from dagster.utils import alter_sys_path, load_yaml_from_path
class CodePointer(six.with_metaclass(ABCMeta)):
@abstractmethod
def load_target(self):
pass
@abstractmethod
def describe(self):
pass
@staticmethod
def from_module(module_name, definition):
check.str_param(module_name, 'module_name')
check.str_param(definition, 'definition')
return ModuleCodePointer(module_name, definition)
@staticmethod
def from_python_package(module_name, attribute):
check.str_param(module_name, 'module_name')
check.str_param(attribute, 'attribute')
return PackageCodePointer(module_name, attribute)
@staticmethod
def from_python_file(python_file, definition, working_directory):
check.str_param(python_file, 'python_file')
check.str_param(definition, 'definition')
check.opt_str_param(working_directory, 'working_directory')
if working_directory:
return FileInDirectoryCodePointer(
python_file=python_file, fn_name=definition, working_directory=working_directory
)
return FileCodePointer(python_file=python_file, fn_name=definition)
@staticmethod
def from_legacy_repository_yaml(file_path):
check.str_param(file_path, 'file_path')
config = load_yaml_from_path(file_path)
repository_config = check.dict_elem(config, 'repository')
module_name = check.opt_str_elem(repository_config, 'module')
file_name = check.opt_str_elem(repository_config, 'file')
fn_name = check.str_elem(repository_config, 'fn')
return (
CodePointer.from_module(module_name, fn_name)
if module_name
# rebase file in config off of the path in the config file
else CodePointer.from_python_file(rebase_file(file_name, file_path), fn_name, None)
)
def rebase_file(relative_path_in_file, file_path_resides_in):
'''
In config files, you often put file paths that are meant to be relative
to the location of that config file. This does that calculation.
'''
check.str_param(relative_path_in_file, 'relative_path_in_file')
check.str_param(file_path_resides_in, 'file_path_resides_in')
return os.path.join(
os.path.dirname(os.path.abspath(file_path_resides_in)), relative_path_in_file
)
def load_python_file(python_file, working_directory):
'''
Takes a path to a python file and returns a loaded module
'''
check.str_param(python_file, 'python_file')
module_name = os.path.splitext(os.path.basename(python_file))[0]
cwd = sys.path[0]
if working_directory:
with alter_sys_path(to_add=[working_directory], to_remove=[cwd]):
return import_module_from_path(module_name, python_file)
error = None
sys_modules = {k: v for k, v in sys.modules.items()}
with alter_sys_path(to_add=[], to_remove=[cwd]):
try:
module = import_module_from_path(module_name, python_file)
except ImportError as ie:
# importing alters sys.modules in ways that may interfere with the import below, even
# if the import has failed. to work around this, we need to manually clear any modules
# that have been cached in sys.modules due to the speculative import call
# Also, we are mutating sys.modules instead of straight-up assigning to sys_modules,
# because some packages will do similar shenanigans to sys.modules (e.g. numpy)
to_delete = set(sys.modules) - set(sys_modules)
for key in to_delete:
del sys.modules[key]
error = ie
if not error:
return module
try:
module = import_module_from_path(module_name, python_file)
# if here, we were able to resolve the module with the working directory on the
# path, but should error because we may not always invoke from the same directory
# (e.g. from cron)
warnings.warn(
(
'Module `{module}` was resolved using the working directory. The ability to '
'implicitly load modules from the working directory is deprecated and '
'will be removed in a future release. Please explicitly specify the '
'`working_directory` config option in your workspace.yaml or install `{module}` to '
'your python environment.'
).format(module=error.name if hasattr(error, 'name') else module_name)
)
return module
except ImportError:
raise error
def load_python_module(module_name, warn_only=False, remove_from_path_fn=None):
check.str_param(module_name, 'module_name')
check.bool_param(warn_only, 'warn_only')
check.opt_callable_param(remove_from_path_fn, 'remove_from_path_fn')
error = None
remove_paths = remove_from_path_fn() if remove_from_path_fn else [] # hook for tests
remove_paths.insert(0, sys.path[0]) # remove the working directory
with alter_sys_path(to_add=[], to_remove=remove_paths):
try:
module = importlib.import_module(module_name)
except ImportError as ie:
error = ie
if error:
try:
module = importlib.import_module(module_name)
# if here, we were able to resolve the module with the working directory on the path,
# but should error because we may not always invoke from the same directory (e.g. from
# cron)
if warn_only:
warnings.warn(
(
'Module {module} was resolved using the working directory. The ability to '
'load uninstalled modules from the working directory is deprecated and '
'will be removed in a future release. Please use the python-file based '
'load arguments or install {module} to your python environment.'
).format(module=module_name)
)
else:
six.raise_from(
DagsterInvariantViolationError(
(
'Module {module} not found. Packages must be installed rather than '
'relying on the working directory to resolve module loading.'
).format(module=module_name)
),
error,
)
except ImportError as ie:
raise error
return module
@whitelist_for_serdes
class FileCodePointer(namedtuple('_FileCodePointer', 'python_file fn_name'), CodePointer):
def __new__(cls, python_file, fn_name):
return super(FileCodePointer, cls).__new__(
cls, check.str_param(python_file, 'python_file'), check.str_param(fn_name, 'fn_name'),
)
def load_target(self):
module = load_python_file(self.python_file, None)
if not hasattr(module, self.fn_name):
raise DagsterInvariantViolationError(
'{name} not found at module scope in file {file}.'.format(
name=self.fn_name, file=self.python_file
)
)
return getattr(module, self.fn_name)
def describe(self):
return '{self.python_file}::{self.fn_name}'.format(self=self)
def get_cli_args(self):
return '-f {python_file} -a {fn_name}'.format(
python_file=os.path.abspath(os.path.expanduser(self.python_file)), fn_name=self.fn_name
)
@whitelist_for_serdes
class FileInDirectoryCodePointer(
namedtuple('_FileInDirectoryCodePointer', 'python_file fn_name working_directory'), CodePointer
):
'''
Same as FileCodePointer, but with an additional field `working_directory` to help resolve
modules that are resolved from the python invocation directory. Required so other processes
that need to resolve modules (e.g. cron scheduler) can do so. This could be merged with the
`FileCodePointer` with `working_directory` as a None-able field, but not without changing
the origin_id for schedules. This would require purging schedule storage to resolve.
Should strongly consider merging when we need to do a storage migration.
https://github.com/dagster-io/dagster/issues/2673
'''
def __new__(cls, python_file, fn_name, working_directory):
return super(FileInDirectoryCodePointer, cls).__new__(
cls,
check.str_param(python_file, 'python_file'),
check.str_param(fn_name, 'fn_name'),
check.str_param(working_directory, 'working_directory'),
)
def load_target(self):
module = load_python_file(self.python_file, self.working_directory)
if not hasattr(module, self.fn_name):
raise DagsterInvariantViolationError(
'{name} not found at module scope in file {file}.'.format(
name=self.fn_name, file=self.python_file
)
)
return getattr(module, self.fn_name)
def describe(self):
return '{self.python_file}::{self.fn_name} -- [dir {self.working_directory}]'.format(
self=self
)
def get_cli_args(self):
return '-f {python_file} -a {fn_name} -d {directory}'.format(
python_file=os.path.abspath(os.path.expanduser(self.python_file)),
fn_name=self.fn_name,
directory=self.working_directory,
)
@whitelist_for_serdes
class ModuleCodePointer(namedtuple('_ModuleCodePointer', 'module fn_name'), CodePointer):
def __new__(cls, module, fn_name):
return super(ModuleCodePointer, cls).__new__(
cls, check.str_param(module, 'module'), check.str_param(fn_name, 'fn_name')
)
def load_target(self):
module = load_python_module(self.module, warn_only=True)
if not hasattr(module, self.fn_name):
raise DagsterInvariantViolationError(
'{name} not found in module {module}. dir: {dir}'.format(
name=self.fn_name, module=self.module, dir=dir(module)
)
)
return getattr(module, self.fn_name)
def describe(self):
return 'from {self.module} import {self.fn_name}'.format(self=self)
def get_cli_args(self):
return '-m {module} -a {fn_name}'.format(module=self.module, fn_name=self.fn_name)
@whitelist_for_serdes
class PackageCodePointer(namedtuple('_PackageCodePointer', 'module attribute'), CodePointer):
def __new__(cls, module, attribute):
return super(PackageCodePointer, cls).__new__(
cls, check.str_param(module, 'module'), check.str_param(attribute, 'attribute')
)
def load_target(self):
module = load_python_module(self.module)
if not hasattr(module, self.attribute):
raise DagsterInvariantViolationError(
'{name} not found in module {module}. dir: {dir}'.format(
name=self.attribute, module=self.module, dir=dir(module)
)
)
return getattr(module, self.attribute)
def describe(self):
return 'from {self.module} import {self.attribute}'.format(self=self)
def get_cli_args(self):
return '-m {module} -a {attribute}'.format(module=self.module, attribute=self.attribute)
def get_python_file_from_previous_stack_frame():
'''inspect.stack() lets us introspect the call stack; inspect.stack()[1] is the previous
stack frame.
In Python < 3.5, this is just a tuple, of which the python file of the previous frame is the 1st
element.
In Python 3.5+, this is a FrameInfo namedtuple instance; the python file of the previous frame
remains the 1st element.
'''
# Since this is now a function in this file, we need to go back two hops to find the
# callsite file.
previous_stack_frame = inspect.stack(0)[2]
# See: https://docs.python.org/3/library/inspect.html
if sys.version_info.major == 3 and sys.version_info.minor >= 5:
check.inst(previous_stack_frame, inspect.FrameInfo)
else:
check.inst(previous_stack_frame, tuple)
python_file = previous_stack_frame[1]
return os.path.abspath(python_file)
| 39.030769
| 100
| 0.659361
|
import importlib
import inspect
import os
import sys
import warnings
from abc import ABCMeta, abstractmethod
from collections import namedtuple
import six
from dagster import check
from dagster.core.errors import DagsterInvariantViolationError
from dagster.serdes import whitelist_for_serdes
from dagster.seven import import_module_from_path
from dagster.utils import alter_sys_path, load_yaml_from_path
class CodePointer(six.with_metaclass(ABCMeta)):
@abstractmethod
def load_target(self):
pass
@abstractmethod
def describe(self):
pass
@staticmethod
def from_module(module_name, definition):
check.str_param(module_name, 'module_name')
check.str_param(definition, 'definition')
return ModuleCodePointer(module_name, definition)
@staticmethod
def from_python_package(module_name, attribute):
check.str_param(module_name, 'module_name')
check.str_param(attribute, 'attribute')
return PackageCodePointer(module_name, attribute)
@staticmethod
def from_python_file(python_file, definition, working_directory):
check.str_param(python_file, 'python_file')
check.str_param(definition, 'definition')
check.opt_str_param(working_directory, 'working_directory')
if working_directory:
return FileInDirectoryCodePointer(
python_file=python_file, fn_name=definition, working_directory=working_directory
)
return FileCodePointer(python_file=python_file, fn_name=definition)
@staticmethod
def from_legacy_repository_yaml(file_path):
check.str_param(file_path, 'file_path')
config = load_yaml_from_path(file_path)
repository_config = check.dict_elem(config, 'repository')
module_name = check.opt_str_elem(repository_config, 'module')
file_name = check.opt_str_elem(repository_config, 'file')
fn_name = check.str_elem(repository_config, 'fn')
return (
CodePointer.from_module(module_name, fn_name)
if module_name
else CodePointer.from_python_file(rebase_file(file_name, file_path), fn_name, None)
)
def rebase_file(relative_path_in_file, file_path_resides_in):
check.str_param(relative_path_in_file, 'relative_path_in_file')
check.str_param(file_path_resides_in, 'file_path_resides_in')
return os.path.join(
os.path.dirname(os.path.abspath(file_path_resides_in)), relative_path_in_file
)
def load_python_file(python_file, working_directory):
check.str_param(python_file, 'python_file')
module_name = os.path.splitext(os.path.basename(python_file))[0]
cwd = sys.path[0]
if working_directory:
with alter_sys_path(to_add=[working_directory], to_remove=[cwd]):
return import_module_from_path(module_name, python_file)
error = None
sys_modules = {k: v for k, v in sys.modules.items()}
with alter_sys_path(to_add=[], to_remove=[cwd]):
try:
module = import_module_from_path(module_name, python_file)
except ImportError as ie:
to_delete = set(sys.modules) - set(sys_modules)
for key in to_delete:
del sys.modules[key]
error = ie
if not error:
return module
try:
module = import_module_from_path(module_name, python_file)
warnings.warn(
(
'Module `{module}` was resolved using the working directory. The ability to '
'implicitly load modules from the working directory is deprecated and '
'will be removed in a future release. Please explicitly specify the '
'`working_directory` config option in your workspace.yaml or install `{module}` to '
'your python environment.'
).format(module=error.name if hasattr(error, 'name') else module_name)
)
return module
except ImportError:
raise error
def load_python_module(module_name, warn_only=False, remove_from_path_fn=None):
check.str_param(module_name, 'module_name')
check.bool_param(warn_only, 'warn_only')
check.opt_callable_param(remove_from_path_fn, 'remove_from_path_fn')
error = None
remove_paths = remove_from_path_fn() if remove_from_path_fn else []
remove_paths.insert(0, sys.path[0])
with alter_sys_path(to_add=[], to_remove=remove_paths):
try:
module = importlib.import_module(module_name)
except ImportError as ie:
error = ie
if error:
try:
module = importlib.import_module(module_name)
if warn_only:
warnings.warn(
(
'Module {module} was resolved using the working directory. The ability to '
'load uninstalled modules from the working directory is deprecated and '
'will be removed in a future release. Please use the python-file based '
'load arguments or install {module} to your python environment.'
).format(module=module_name)
)
else:
six.raise_from(
DagsterInvariantViolationError(
(
'Module {module} not found. Packages must be installed rather than '
'relying on the working directory to resolve module loading.'
).format(module=module_name)
),
error,
)
except ImportError as ie:
raise error
return module
@whitelist_for_serdes
class FileCodePointer(namedtuple('_FileCodePointer', 'python_file fn_name'), CodePointer):
def __new__(cls, python_file, fn_name):
return super(FileCodePointer, cls).__new__(
cls, check.str_param(python_file, 'python_file'), check.str_param(fn_name, 'fn_name'),
)
def load_target(self):
module = load_python_file(self.python_file, None)
if not hasattr(module, self.fn_name):
raise DagsterInvariantViolationError(
'{name} not found at module scope in file {file}.'.format(
name=self.fn_name, file=self.python_file
)
)
return getattr(module, self.fn_name)
def describe(self):
return '{self.python_file}::{self.fn_name}'.format(self=self)
def get_cli_args(self):
return '-f {python_file} -a {fn_name}'.format(
python_file=os.path.abspath(os.path.expanduser(self.python_file)), fn_name=self.fn_name
)
@whitelist_for_serdes
class FileInDirectoryCodePointer(
namedtuple('_FileInDirectoryCodePointer', 'python_file fn_name working_directory'), CodePointer
):
def __new__(cls, python_file, fn_name, working_directory):
return super(FileInDirectoryCodePointer, cls).__new__(
cls,
check.str_param(python_file, 'python_file'),
check.str_param(fn_name, 'fn_name'),
check.str_param(working_directory, 'working_directory'),
)
def load_target(self):
module = load_python_file(self.python_file, self.working_directory)
if not hasattr(module, self.fn_name):
raise DagsterInvariantViolationError(
'{name} not found at module scope in file {file}.'.format(
name=self.fn_name, file=self.python_file
)
)
return getattr(module, self.fn_name)
def describe(self):
return '{self.python_file}::{self.fn_name} -- [dir {self.working_directory}]'.format(
self=self
)
def get_cli_args(self):
return '-f {python_file} -a {fn_name} -d {directory}'.format(
python_file=os.path.abspath(os.path.expanduser(self.python_file)),
fn_name=self.fn_name,
directory=self.working_directory,
)
@whitelist_for_serdes
class ModuleCodePointer(namedtuple('_ModuleCodePointer', 'module fn_name'), CodePointer):
def __new__(cls, module, fn_name):
return super(ModuleCodePointer, cls).__new__(
cls, check.str_param(module, 'module'), check.str_param(fn_name, 'fn_name')
)
def load_target(self):
module = load_python_module(self.module, warn_only=True)
if not hasattr(module, self.fn_name):
raise DagsterInvariantViolationError(
'{name} not found in module {module}. dir: {dir}'.format(
name=self.fn_name, module=self.module, dir=dir(module)
)
)
return getattr(module, self.fn_name)
def describe(self):
return 'from {self.module} import {self.fn_name}'.format(self=self)
def get_cli_args(self):
return '-m {module} -a {fn_name}'.format(module=self.module, fn_name=self.fn_name)
@whitelist_for_serdes
class PackageCodePointer(namedtuple('_PackageCodePointer', 'module attribute'), CodePointer):
def __new__(cls, module, attribute):
return super(PackageCodePointer, cls).__new__(
cls, check.str_param(module, 'module'), check.str_param(attribute, 'attribute')
)
def load_target(self):
module = load_python_module(self.module)
if not hasattr(module, self.attribute):
raise DagsterInvariantViolationError(
'{name} not found in module {module}. dir: {dir}'.format(
name=self.attribute, module=self.module, dir=dir(module)
)
)
return getattr(module, self.attribute)
def describe(self):
return 'from {self.module} import {self.attribute}'.format(self=self)
def get_cli_args(self):
return '-m {module} -a {attribute}'.format(module=self.module, attribute=self.attribute)
def get_python_file_from_previous_stack_frame():
previous_stack_frame = inspect.stack(0)[2]
if sys.version_info.major == 3 and sys.version_info.minor >= 5:
check.inst(previous_stack_frame, inspect.FrameInfo)
else:
check.inst(previous_stack_frame, tuple)
python_file = previous_stack_frame[1]
return os.path.abspath(python_file)
| true
| true
|
790d886400acb4a4a179d95f684170cd908e401b
| 4,777
|
py
|
Python
|
metrics/__init__.py
|
nathan-bennett/skellam
|
8a1fff14ac8c5f6bd415a51befab818f864ab3e5
|
[
"Apache-2.0"
] | null | null | null |
metrics/__init__.py
|
nathan-bennett/skellam
|
8a1fff14ac8c5f6bd415a51befab818f864ab3e5
|
[
"Apache-2.0"
] | null | null | null |
metrics/__init__.py
|
nathan-bennett/skellam
|
8a1fff14ac8c5f6bd415a51befab818f864ab3e5
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import numpy as np
import scipy
from shared_utils import ArrayUtils
class SkellamMetrics:
def __init__(self, x_metrics, y_metrics, y_hat, model, l0, l1, training_values):
self._y = y_metrics
self._y_hat = y_hat
self.model = model
self.l0 = ArrayUtils.convert_to_array(l0)
self.l1 = ArrayUtils.convert_to_array(l1)
self.training_values = training_values
self._x0, self._x1 = self.split_or_duplicate_x(x_metrics)
self.max_ll = self.model.fun
self.coeff_size = self._x0.shape[1]
self.lambda_0_coefficients = self.model.x[0 : self.coeff_size].reshape(-1, 1)
self.lambda_1_coefficients = self.model.x[self.coeff_size :].reshape(-1, 1)
self.train_length = len(training_values[0])
@staticmethod
def split_or_duplicate_x(x):
return ArrayUtils.split_or_duplicate_x(x, False)
def sse(self):
return ((self._y - self._y_hat) ** 2).sum()
def _y_bar(self):
return self._y.mean()
def sst(self):
return ((self._y - self._y_bar()) ** 2).sum()
def r2(self):
"""Calculate R2 for either the train model or the test model"""
sse_sst = self.sse() / self.sst()
return 1 - sse_sst
def adjusted_r2(self):
"""Calculate adjusted R2 for either the train model or the test model"""
r2 = self.r2()
return 1 - (1-r2)*(self.train_length - 1)/(self.train_length - self.coeff_size - 1)
def log_likelihood(self):
"""Returns the maximum of the log likelihood function"""
return self.max_ll
def aic(self):
return 2*self.coeff_size - 2*np.log(self.max_ll)
def bic(self):
return self.coeff_size*np.log(self.train_length) - 2*np.log(self.max_ll)
def _calculate_lambda(self):
"""Create arrays for our predictions of the two Poisson distributions
"""
_lambda0 = ArrayUtils.convert_to_array(
np.exp(np.squeeze(self._x0 @ self.lambda_0_coefficients))
)
_lambda1 = ArrayUtils.convert_to_array(
np.exp(np.squeeze(self._x1 @ self.lambda_1_coefficients))
)
return _lambda0, _lambda1
def _calculate_v(self):
"""Create diagonal matrix consisting of our predictions of the Poisson distributions
"""
_lambda0, _lambda1 = self._calculate_lambda()
_v0 = np.diagflat(_lambda0)
_v1 = np.diagflat(_lambda1)
return _v0, _v1
def _calculate_w(self):
"""Create a diagonal matrix consisting of the difference between our predictions of the 2 Poisson distributions
with their observed values
"""
_lambda0, _lambda1 = self._calculate_lambda()
_w0 = np.diagflat((self.l0 - _lambda0.reshape(-1, 1)) ** 2)
_w1 = np.diagflat((self.l1 - _lambda1.reshape(-1, 1)) ** 2)
return _w0, _w1
def _calculate_robust_covariance(self):
"""Calculate robust variance covariance matrices for our two sets of coefficients
"""
_v0, _v1 = self._calculate_v()
_w0, _w1 = self._calculate_w()
_robust_cov0 = (
np.linalg.inv(np.dot(np.dot(self._x0.T, _v0), self._x0))
* np.dot(np.dot(self._x0.T, _w0), self._x0)
* np.linalg.inv(np.dot(np.dot(self._x0.T, _v0), self._x0))
)
_robust_cov1 = (
np.linalg.inv(np.dot(np.dot(self._x1.T, _v1), self._x1))
* np.dot(np.dot(self._x1.T, _w1), self._x1)
* np.linalg.inv(np.dot(np.dot(self._x1.T, _v1), self._x1))
)
return _robust_cov0, _robust_cov1
def _calculate_robust_standard_errors(self):
"""Calculate robust standard errors for our two sets of coefficients by taking the square root of the diagonal
values in the variance covariance matrices
"""
_robust_cov0, _robust_cov1 = self._calculate_robust_covariance()
_std_error0 = np.sqrt(np.diag(_robust_cov0))
_std_error1 = np.sqrt(np.diag(_robust_cov1))
return _std_error0, _std_error1
def _calculate_z_values(self):
"""Calculate z statistics for our two sets of coefficients
"""
_std_error0, _std_error1 = self._calculate_robust_standard_errors()
_z_values0 = self.lambda_0_coefficients[:, 0] / _std_error0
_z_values1 = self.lambda_1_coefficients[:, 0] / _std_error1
return _z_values0, _z_values1
def _calculate_p_values(self):
"""Calculate p values for our two sets of coefficients
"""
_z_values0, _z_values1 = self._calculate_z_values()
_p_values0 = scipy.stats.norm.sf(abs(_z_values0)) * 2
_p_values1 = scipy.stats.norm.sf(abs(_z_values1)) * 2
return _p_values0, _p_values1
| 38.524194
| 119
| 0.639941
|
import numpy as np
import scipy
from shared_utils import ArrayUtils
class SkellamMetrics:
def __init__(self, x_metrics, y_metrics, y_hat, model, l0, l1, training_values):
self._y = y_metrics
self._y_hat = y_hat
self.model = model
self.l0 = ArrayUtils.convert_to_array(l0)
self.l1 = ArrayUtils.convert_to_array(l1)
self.training_values = training_values
self._x0, self._x1 = self.split_or_duplicate_x(x_metrics)
self.max_ll = self.model.fun
self.coeff_size = self._x0.shape[1]
self.lambda_0_coefficients = self.model.x[0 : self.coeff_size].reshape(-1, 1)
self.lambda_1_coefficients = self.model.x[self.coeff_size :].reshape(-1, 1)
self.train_length = len(training_values[0])
@staticmethod
def split_or_duplicate_x(x):
return ArrayUtils.split_or_duplicate_x(x, False)
def sse(self):
return ((self._y - self._y_hat) ** 2).sum()
def _y_bar(self):
return self._y.mean()
def sst(self):
return ((self._y - self._y_bar()) ** 2).sum()
def r2(self):
sse_sst = self.sse() / self.sst()
return 1 - sse_sst
def adjusted_r2(self):
r2 = self.r2()
return 1 - (1-r2)*(self.train_length - 1)/(self.train_length - self.coeff_size - 1)
def log_likelihood(self):
return self.max_ll
def aic(self):
return 2*self.coeff_size - 2*np.log(self.max_ll)
def bic(self):
return self.coeff_size*np.log(self.train_length) - 2*np.log(self.max_ll)
def _calculate_lambda(self):
_lambda0 = ArrayUtils.convert_to_array(
np.exp(np.squeeze(self._x0 @ self.lambda_0_coefficients))
)
_lambda1 = ArrayUtils.convert_to_array(
np.exp(np.squeeze(self._x1 @ self.lambda_1_coefficients))
)
return _lambda0, _lambda1
def _calculate_v(self):
_lambda0, _lambda1 = self._calculate_lambda()
_v0 = np.diagflat(_lambda0)
_v1 = np.diagflat(_lambda1)
return _v0, _v1
def _calculate_w(self):
_lambda0, _lambda1 = self._calculate_lambda()
_w0 = np.diagflat((self.l0 - _lambda0.reshape(-1, 1)) ** 2)
_w1 = np.diagflat((self.l1 - _lambda1.reshape(-1, 1)) ** 2)
return _w0, _w1
def _calculate_robust_covariance(self):
_v0, _v1 = self._calculate_v()
_w0, _w1 = self._calculate_w()
_robust_cov0 = (
np.linalg.inv(np.dot(np.dot(self._x0.T, _v0), self._x0))
* np.dot(np.dot(self._x0.T, _w0), self._x0)
* np.linalg.inv(np.dot(np.dot(self._x0.T, _v0), self._x0))
)
_robust_cov1 = (
np.linalg.inv(np.dot(np.dot(self._x1.T, _v1), self._x1))
* np.dot(np.dot(self._x1.T, _w1), self._x1)
* np.linalg.inv(np.dot(np.dot(self._x1.T, _v1), self._x1))
)
return _robust_cov0, _robust_cov1
def _calculate_robust_standard_errors(self):
_robust_cov0, _robust_cov1 = self._calculate_robust_covariance()
_std_error0 = np.sqrt(np.diag(_robust_cov0))
_std_error1 = np.sqrt(np.diag(_robust_cov1))
return _std_error0, _std_error1
def _calculate_z_values(self):
_std_error0, _std_error1 = self._calculate_robust_standard_errors()
_z_values0 = self.lambda_0_coefficients[:, 0] / _std_error0
_z_values1 = self.lambda_1_coefficients[:, 0] / _std_error1
return _z_values0, _z_values1
def _calculate_p_values(self):
_z_values0, _z_values1 = self._calculate_z_values()
_p_values0 = scipy.stats.norm.sf(abs(_z_values0)) * 2
_p_values1 = scipy.stats.norm.sf(abs(_z_values1)) * 2
return _p_values0, _p_values1
| true
| true
|
790d890e572f0484dc39deb4959d6eb47614406d
| 1,683
|
py
|
Python
|
compecon/demos/demddp04.py
|
daniel-schaefer/CompEcon-python
|
d3f66e04a7e02be648fc5a68065806ec7cc6ffd6
|
[
"MIT"
] | 23
|
2016-12-14T13:21:27.000Z
|
2020-08-23T21:04:34.000Z
|
compecon/demos/demddp04.py
|
daniel-schaefer/CompEcon-python
|
d3f66e04a7e02be648fc5a68065806ec7cc6ffd6
|
[
"MIT"
] | 1
|
2017-09-10T04:48:54.000Z
|
2018-03-31T01:36:46.000Z
|
compecon/demos/demddp04.py
|
daniel-schaefer/CompEcon-python
|
d3f66e04a7e02be648fc5a68065806ec7cc6ffd6
|
[
"MIT"
] | 13
|
2017-02-25T08:10:38.000Z
|
2020-05-15T09:49:16.000Z
|
__author__ = 'Randall'
from demos.setup import np, plt, demo
from compecon import DDPmodel
# DEMDDP04 Binomial American put option model
# Model Parameters
T = 0.5 # years to expiration
sigma = 0.2 # annual volatility
r = 0.05 # annual interest rate
strike = 2.1 # option strike price
p0 = 2.0 # current asset price
# Discretization Parameters
N = 100 # number of time intervals
tau = T / N # length of time intervals
delta = np.exp(-r * tau) # discount factor
u = np.exp(sigma * np.sqrt(tau)) # up jump factor
q = 0.5 + np.sqrt(tau) * (r - (sigma**2) / 2) / (2 * sigma) # up jump probability
# State Space
price = p0 * u ** np.arange(-N, N+1) # asset prices
n = price.size # number of states
# Action Space (hold=1, exercise=2)
X = ['hold', 'exercise'] # vector of actions
m = len(X) # number of actions
# Reward Function
f = np.zeros((m,n))
f[1] = strike - price
# State Transition Probability Matrix
P = np.zeros((m, n, n))
for i in range(n):
P[0, i, min(i + 1, n - 1)] = q
P[0, i, max(i - 1, 0)] = 1 - q
# Model Structure
model = DDPmodel(f, P, delta, horizon=N)
model.solve()
## Analysis
# Plot Optimal Exercise Boundary
i, j = np.where(np.diff(model.policy[:-1], 1))
temp = (i * tau)[::-1]
demo.figure('Put Option Optimal Exercise Boundary', 'Time to Maturity', 'Asset Price')
plt.plot(temp, price[j])
# Plot Option Premium vs. Asset Price
demo.figure('Put Option Value', 'Asset Price', 'Premium', [0, 2 * strike])
plt.plot([0, strike],[strike, 0], 'k--', lw=2)
plt.plot(price, model.value[0], lw=3)
plt.show()
| 29.017241
| 86
| 0.59893
|
__author__ = 'Randall'
from demos.setup import np, plt, demo
from compecon import DDPmodel
T = 0.5
sigma = 0.2
r = 0.05
strike = 2.1
p0 = 2.0
N = 100
tau = T / N
delta = np.exp(-r * tau)
u = np.exp(sigma * np.sqrt(tau))
q = 0.5 + np.sqrt(tau) * (r - (sigma**2) / 2) / (2 * sigma)
price = p0 * u ** np.arange(-N, N+1)
n = price.size
X = ['hold', 'exercise']
m = len(X)
f = np.zeros((m,n))
f[1] = strike - price
P = np.zeros((m, n, n))
for i in range(n):
P[0, i, min(i + 1, n - 1)] = q
P[0, i, max(i - 1, 0)] = 1 - q
model = DDPmodel(f, P, delta, horizon=N)
model.solve()
np.where(np.diff(model.policy[:-1], 1))
temp = (i * tau)[::-1]
demo.figure('Put Option Optimal Exercise Boundary', 'Time to Maturity', 'Asset Price')
plt.plot(temp, price[j])
demo.figure('Put Option Value', 'Asset Price', 'Premium', [0, 2 * strike])
plt.plot([0, strike],[strike, 0], 'k--', lw=2)
plt.plot(price, model.value[0], lw=3)
plt.show()
| true
| true
|
790d89446cea7063ba034aae3075bfd56fe8dac1
| 11,029
|
py
|
Python
|
benchmarks/crypto.py
|
codeclimate-testing/falcon
|
c2d0b9da4d4cffd39cd489ffa886ee745d06f063
|
[
"Apache-2.0"
] | 115
|
2015-01-18T13:28:05.000Z
|
2022-03-01T23:45:44.000Z
|
benchmarks/crypto.py
|
codeclimate-testing/falcon
|
c2d0b9da4d4cffd39cd489ffa886ee745d06f063
|
[
"Apache-2.0"
] | null | null | null |
benchmarks/crypto.py
|
codeclimate-testing/falcon
|
c2d0b9da4d4cffd39cd489ffa886ee745d06f063
|
[
"Apache-2.0"
] | 8
|
2015-02-12T04:08:42.000Z
|
2018-09-11T20:55:29.000Z
|
"""
A pure python (slow) implementation of rijndael with a decent interface
To include -
from rijndael import rijndael
To do a key setup -
r = rijndael(key, block_size = 16)
key must be a string of length 16, 24, or 32
blocksize must be 16, 24, or 32. Default is 16
To use -
ciphertext = r.encrypt(plaintext)
plaintext = r.decrypt(ciphertext)
If any strings are of the wrong length a ValueError is thrown
"""
# ported from the Java reference code by Bram Cohen, April 2001
# this code is public domain, unless someone makes
# an intellectual property claim against the reference
# code, in which case it can be made public domain by
# deleting all the comments and renaming all the variables
import copy
import string
shifts = [[[0, 0], [1, 3], [2, 2], [3, 1]],
[[0, 0], [1, 5], [2, 4], [3, 3]],
[[0, 0], [1, 7], [3, 5], [4, 4]]]
# [keysize][block_size]
num_rounds = {16: {16: 10, 24: 12, 32: 14}, 24: {16: 12, 24: 12, 32: 14}, 32: {16: 14, 24: 14, 32: 14}}
A = [[1, 1, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 1, 1, 1, 1],
[1, 1, 0, 0, 0, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 1, 1],
[1, 1, 1, 1, 0, 0, 0, 1]]
# produce log and alog tables, needed for multiplying in the
# field GF(2^m) (generator = 3)
alog = [1]
for i in range(255):
j = (alog[-1] << 1) ^ alog[-1]
if j & 0x100 != 0:
j ^= 0x11B
alog.append(j)
log = [0] * 256
for i in range(1, 255):
log[alog[i]] = i
# multiply two elements of GF(2^m)
def mul(a, b):
if a == 0 or b == 0:
return 0
return alog[(log[a & 0xFF] + log[b & 0xFF]) % 255]
# substitution box based on F^{-1}(x)
box = [[0] * 8 for i in range(256)]
box[1][7] = 1
for i in range(2, 256):
j = alog[255 - log[i]]
for t in range(8):
box[i][t] = (j >> (7 - t)) & 0x01
B = [0, 1, 1, 0, 0, 0, 1, 1]
# affine transform: box[i] <- B + A*box[i]
cox = [[0] * 8 for i in range(256)]
for i in range(256):
for t in range(8):
cox[i][t] = B[t]
for j in range(8):
cox[i][t] ^= A[t][j] * box[i][j]
# S-boxes and inverse S-boxes
S = [0] * 256
Si = [0] * 256
for i in range(256):
S[i] = cox[i][0] << 7
for t in range(1, 8):
S[i] ^= cox[i][t] << (7-t)
Si[S[i] & 0xFF] = i
# T-boxes
G = [[2, 1, 1, 3],
[3, 2, 1, 1],
[1, 3, 2, 1],
[1, 1, 3, 2]]
AA = [[0] * 8 for i in range(4)]
for i in range(4):
for j in range(4):
AA[i][j] = G[i][j]
AA[i][i+4] = 1
for i in range(4):
pivot = AA[i][i]
if pivot == 0:
t = i + 1
while AA[t][i] == 0 and t < 4:
t += 1
assert t != 4, 'G matrix must be invertible'
for j in range(8):
AA[i][j], AA[t][j] = AA[t][j], AA[i][j]
pivot = AA[i][i]
for j in range(8):
if AA[i][j] != 0:
AA[i][j] = alog[(255 + log[AA[i][j] & 0xFF] - log[pivot & 0xFF]) % 255]
for t in range(4):
if i != t:
for j in range(i+1, 8):
AA[t][j] ^= mul(AA[i][j], AA[t][i])
AA[t][i] = 0
iG = [[0] * 4 for i in range(4)]
for i in range(4):
for j in range(4):
iG[i][j] = AA[i][j + 4]
def mul4(a, bs):
if a == 0:
return 0
r = 0
for b in bs:
r <<= 8
if b != 0:
r = r | mul(a, b)
return r
T1 = []
T2 = []
T3 = []
T4 = []
T5 = []
T6 = []
T7 = []
T8 = []
U1 = []
U2 = []
U3 = []
U4 = []
for t in range(256):
s = S[t]
T1.append(mul4(s, G[0]))
T2.append(mul4(s, G[1]))
T3.append(mul4(s, G[2]))
T4.append(mul4(s, G[3]))
s = Si[t]
T5.append(mul4(s, iG[0]))
T6.append(mul4(s, iG[1]))
T7.append(mul4(s, iG[2]))
T8.append(mul4(s, iG[3]))
U1.append(mul4(t, iG[0]))
U2.append(mul4(t, iG[1]))
U3.append(mul4(t, iG[2]))
U4.append(mul4(t, iG[3]))
# round constants
rcon = [1]
r = 1
for t in range(1, 30):
r = mul(2, r)
rcon.append(r)
del A
del AA
del pivot
del B
del G
del box
del log
del alog
del i
del j
del r
del s
del t
del mul
del mul4
del cox
del iG
class rijndael(object):
def __init__(self, key, block_size = 16):
if block_size != 16 and block_size != 24 and block_size != 32:
raise ValueError('Invalid block size: ' + str(block_size))
if len(key) != 16 and len(key) != 24 and len(key) != 32:
raise ValueError('Invalid key size: ' + str(len(key)))
self.block_size = block_size
ROUNDS = num_rounds[len(key)][block_size]
BC = block_size // 4
# encryption round keys
Ke = [[0] * BC for i in range(ROUNDS + 1)]
# decryption round keys
Kd = [[0] * BC for i in range(ROUNDS + 1)]
ROUND_KEY_COUNT = (ROUNDS + 1) * BC
KC = len(key) // 4
# copy user material bytes into temporary ints
tk = []
for i in range(0, KC):
tk.append((ord(key[i * 4]) << 24) | (ord(key[i * 4 + 1]) << 16) |
(ord(key[i * 4 + 2]) << 8) | ord(key[i * 4 + 3]))
# copy values into round key arrays
t = 0
j = 0
while j < KC and t < ROUND_KEY_COUNT:
Ke[t // BC][t % BC] = tk[j]
Kd[ROUNDS - (t // BC)][t % BC] = tk[j]
j += 1
t += 1
tt = 0
rconpointer = 0
while t < ROUND_KEY_COUNT:
# extrapolate using phi (the round key evolution function)
tt = tk[KC - 1]
tk[0] ^= (S[(tt >> 16) & 0xFF] & 0xFF) << 24 ^ \
(S[(tt >> 8) & 0xFF] & 0xFF) << 16 ^ \
(S[ tt & 0xFF] & 0xFF) << 8 ^ \
(S[(tt >> 24) & 0xFF] & 0xFF) ^ \
(rcon[rconpointer] & 0xFF) << 24
rconpointer += 1
if KC != 8:
for i in range(1, KC):
tk[i] ^= tk[i-1]
else:
for i in range(1, KC // 2):
tk[i] ^= tk[i-1]
tt = tk[KC // 2 - 1]
tk[KC // 2] ^= (S[ tt & 0xFF] & 0xFF) ^ \
(S[(tt >> 8) & 0xFF] & 0xFF) << 8 ^ \
(S[(tt >> 16) & 0xFF] & 0xFF) << 16 ^ \
(S[(tt >> 24) & 0xFF] & 0xFF) << 24
for i in range(KC // 2 + 1, KC):
tk[i] ^= tk[i-1]
# copy values into round key arrays
j = 0
while j < KC and t < ROUND_KEY_COUNT:
Ke[t // BC][t % BC] = tk[j]
Kd[ROUNDS - (t // BC)][t % BC] = tk[j]
j += 1
t += 1
# inverse MixColumn where needed
for r in range(1, ROUNDS):
for j in range(BC):
tt = Kd[r][j]
Kd[r][j] = U1[(tt >> 24) & 0xFF] ^ \
U2[(tt >> 16) & 0xFF] ^ \
U3[(tt >> 8) & 0xFF] ^ \
U4[ tt & 0xFF]
self.Ke = Ke
self.Kd = Kd
def encrypt(self, plaintext):
if len(plaintext) != self.block_size:
raise ValueError('wrong block length, expected ' + str(self.block_size) + ' got ' + str(len(plaintext)))
Ke = self.Ke
BC = self.block_size // 4
ROUNDS = len(Ke) - 1
if BC == 4:
SC = 0
elif BC == 6:
SC = 1
else:
SC = 2
s1 = shifts[SC][1][0]
s2 = shifts[SC][2][0]
s3 = shifts[SC][3][0]
a = [0] * BC
# temporary work array
t = []
# plaintext to ints + key
for i in range(BC):
t.append((ord(plaintext[i * 4 ]) << 24 |
ord(plaintext[i * 4 + 1]) << 16 |
ord(plaintext[i * 4 + 2]) << 8 |
ord(plaintext[i * 4 + 3]) ) ^ Ke[0][i])
# apply round transforms
for r in range(1, ROUNDS):
for i in range(BC):
a[i] = (T1[(t[ i ] >> 24) & 0xFF] ^
T2[(t[(i + s1) % BC] >> 16) & 0xFF] ^
T3[(t[(i + s2) % BC] >> 8) & 0xFF] ^
T4[ t[(i + s3) % BC] & 0xFF] ) ^ Ke[r][i]
t = copy.copy(a)
# last round is special
result = []
for i in range(BC):
tt = Ke[ROUNDS][i]
result.append((S[(t[ i ] >> 24) & 0xFF] ^ (tt >> 24)) & 0xFF)
result.append((S[(t[(i + s1) % BC] >> 16) & 0xFF] ^ (tt >> 16)) & 0xFF)
result.append((S[(t[(i + s2) % BC] >> 8) & 0xFF] ^ (tt >> 8)) & 0xFF)
result.append((S[ t[(i + s3) % BC] & 0xFF] ^ tt ) & 0xFF)
return ''.join(map(chr, result))
def decrypt(self, ciphertext):
if len(ciphertext) != self.block_size:
raise ValueError('wrong block length, expected ' + str(self.block_size) + ' got ' + str(len(ciphertext)))
Kd = self.Kd
BC = self.block_size // 4
ROUNDS = len(Kd) - 1
if BC == 4:
SC = 0
elif BC == 6:
SC = 1
else:
SC = 2
s1 = shifts[SC][1][1]
s2 = shifts[SC][2][1]
s3 = shifts[SC][3][1]
a = [0] * BC
# temporary work array
t = [0] * BC
# ciphertext to ints + key
for i in range(BC):
t[i] = (ord(ciphertext[i * 4 ]) << 24 |
ord(ciphertext[i * 4 + 1]) << 16 |
ord(ciphertext[i * 4 + 2]) << 8 |
ord(ciphertext[i * 4 + 3]) ) ^ Kd[0][i]
# apply round transforms
for r in range(1, ROUNDS):
for i in range(BC):
a[i] = (T5[(t[ i ] >> 24) & 0xFF] ^
T6[(t[(i + s1) % BC] >> 16) & 0xFF] ^
T7[(t[(i + s2) % BC] >> 8) & 0xFF] ^
T8[ t[(i + s3) % BC] & 0xFF] ) ^ Kd[r][i]
t = copy.copy(a)
# last round is special
result = []
for i in range(BC):
tt = Kd[ROUNDS][i]
result.append((Si[(t[ i ] >> 24) & 0xFF] ^ (tt >> 24)) & 0xFF)
result.append((Si[(t[(i + s1) % BC] >> 16) & 0xFF] ^ (tt >> 16)) & 0xFF)
result.append((Si[(t[(i + s2) % BC] >> 8) & 0xFF] ^ (tt >> 8)) & 0xFF)
result.append((Si[ t[(i + s3) % BC] & 0xFF] ^ tt ) & 0xFF)
return ''.join(map(chr, result))
def encrypt(key, block):
return rijndael(key, len(block)).encrypt(block)
def decrypt(key, block):
return rijndael(key, len(block)).decrypt(block)
def t(kl, bl):
b = 'b' * bl
r = rijndael('a' * kl, bl)
assert r.decrypt(r.encrypt(b)) == b
def multiple_calls(N):
for _ in xrange(N):
t(16, 24)
t(16, 32)
t(24, 16)
t(24, 24)
t(24, 32)
t(32, 16)
t(32, 24)
t(32, 32)
if __name__ == '__main__':
n_repeats = 50
multiple_calls(n_repeats)
| 28.871728
| 117
| 0.430683
|
import copy
import string
shifts = [[[0, 0], [1, 3], [2, 2], [3, 1]],
[[0, 0], [1, 5], [2, 4], [3, 3]],
[[0, 0], [1, 7], [3, 5], [4, 4]]]
num_rounds = {16: {16: 10, 24: 12, 32: 14}, 24: {16: 12, 24: 12, 32: 14}, 32: {16: 14, 24: 14, 32: 14}}
A = [[1, 1, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 1, 1, 1, 1],
[1, 1, 0, 0, 0, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 1, 1],
[1, 1, 1, 1, 0, 0, 0, 1]]
alog = [1]
for i in range(255):
j = (alog[-1] << 1) ^ alog[-1]
if j & 0x100 != 0:
j ^= 0x11B
alog.append(j)
log = [0] * 256
for i in range(1, 255):
log[alog[i]] = i
def mul(a, b):
if a == 0 or b == 0:
return 0
return alog[(log[a & 0xFF] + log[b & 0xFF]) % 255]
box = [[0] * 8 for i in range(256)]
box[1][7] = 1
for i in range(2, 256):
j = alog[255 - log[i]]
for t in range(8):
box[i][t] = (j >> (7 - t)) & 0x01
B = [0, 1, 1, 0, 0, 0, 1, 1]
cox = [[0] * 8 for i in range(256)]
for i in range(256):
for t in range(8):
cox[i][t] = B[t]
for j in range(8):
cox[i][t] ^= A[t][j] * box[i][j]
S = [0] * 256
Si = [0] * 256
for i in range(256):
S[i] = cox[i][0] << 7
for t in range(1, 8):
S[i] ^= cox[i][t] << (7-t)
Si[S[i] & 0xFF] = i
G = [[2, 1, 1, 3],
[3, 2, 1, 1],
[1, 3, 2, 1],
[1, 1, 3, 2]]
AA = [[0] * 8 for i in range(4)]
for i in range(4):
for j in range(4):
AA[i][j] = G[i][j]
AA[i][i+4] = 1
for i in range(4):
pivot = AA[i][i]
if pivot == 0:
t = i + 1
while AA[t][i] == 0 and t < 4:
t += 1
assert t != 4, 'G matrix must be invertible'
for j in range(8):
AA[i][j], AA[t][j] = AA[t][j], AA[i][j]
pivot = AA[i][i]
for j in range(8):
if AA[i][j] != 0:
AA[i][j] = alog[(255 + log[AA[i][j] & 0xFF] - log[pivot & 0xFF]) % 255]
for t in range(4):
if i != t:
for j in range(i+1, 8):
AA[t][j] ^= mul(AA[i][j], AA[t][i])
AA[t][i] = 0
iG = [[0] * 4 for i in range(4)]
for i in range(4):
for j in range(4):
iG[i][j] = AA[i][j + 4]
def mul4(a, bs):
if a == 0:
return 0
r = 0
for b in bs:
r <<= 8
if b != 0:
r = r | mul(a, b)
return r
T1 = []
T2 = []
T3 = []
T4 = []
T5 = []
T6 = []
T7 = []
T8 = []
U1 = []
U2 = []
U3 = []
U4 = []
for t in range(256):
s = S[t]
T1.append(mul4(s, G[0]))
T2.append(mul4(s, G[1]))
T3.append(mul4(s, G[2]))
T4.append(mul4(s, G[3]))
s = Si[t]
T5.append(mul4(s, iG[0]))
T6.append(mul4(s, iG[1]))
T7.append(mul4(s, iG[2]))
T8.append(mul4(s, iG[3]))
U1.append(mul4(t, iG[0]))
U2.append(mul4(t, iG[1]))
U3.append(mul4(t, iG[2]))
U4.append(mul4(t, iG[3]))
rcon = [1]
r = 1
for t in range(1, 30):
r = mul(2, r)
rcon.append(r)
del A
del AA
del pivot
del B
del G
del box
del log
del alog
del i
del j
del r
del s
del t
del mul
del mul4
del cox
del iG
class rijndael(object):
def __init__(self, key, block_size = 16):
if block_size != 16 and block_size != 24 and block_size != 32:
raise ValueError('Invalid block size: ' + str(block_size))
if len(key) != 16 and len(key) != 24 and len(key) != 32:
raise ValueError('Invalid key size: ' + str(len(key)))
self.block_size = block_size
ROUNDS = num_rounds[len(key)][block_size]
BC = block_size // 4
Ke = [[0] * BC for i in range(ROUNDS + 1)]
Kd = [[0] * BC for i in range(ROUNDS + 1)]
ROUND_KEY_COUNT = (ROUNDS + 1) * BC
KC = len(key) // 4
tk = []
for i in range(0, KC):
tk.append((ord(key[i * 4]) << 24) | (ord(key[i * 4 + 1]) << 16) |
(ord(key[i * 4 + 2]) << 8) | ord(key[i * 4 + 3]))
t = 0
j = 0
while j < KC and t < ROUND_KEY_COUNT:
Ke[t // BC][t % BC] = tk[j]
Kd[ROUNDS - (t // BC)][t % BC] = tk[j]
j += 1
t += 1
tt = 0
rconpointer = 0
while t < ROUND_KEY_COUNT:
tt = tk[KC - 1]
tk[0] ^= (S[(tt >> 16) & 0xFF] & 0xFF) << 24 ^ \
(S[(tt >> 8) & 0xFF] & 0xFF) << 16 ^ \
(S[ tt & 0xFF] & 0xFF) << 8 ^ \
(S[(tt >> 24) & 0xFF] & 0xFF) ^ \
(rcon[rconpointer] & 0xFF) << 24
rconpointer += 1
if KC != 8:
for i in range(1, KC):
tk[i] ^= tk[i-1]
else:
for i in range(1, KC // 2):
tk[i] ^= tk[i-1]
tt = tk[KC // 2 - 1]
tk[KC // 2] ^= (S[ tt & 0xFF] & 0xFF) ^ \
(S[(tt >> 8) & 0xFF] & 0xFF) << 8 ^ \
(S[(tt >> 16) & 0xFF] & 0xFF) << 16 ^ \
(S[(tt >> 24) & 0xFF] & 0xFF) << 24
for i in range(KC // 2 + 1, KC):
tk[i] ^= tk[i-1]
j = 0
while j < KC and t < ROUND_KEY_COUNT:
Ke[t // BC][t % BC] = tk[j]
Kd[ROUNDS - (t // BC)][t % BC] = tk[j]
j += 1
t += 1
for r in range(1, ROUNDS):
for j in range(BC):
tt = Kd[r][j]
Kd[r][j] = U1[(tt >> 24) & 0xFF] ^ \
U2[(tt >> 16) & 0xFF] ^ \
U3[(tt >> 8) & 0xFF] ^ \
U4[ tt & 0xFF]
self.Ke = Ke
self.Kd = Kd
def encrypt(self, plaintext):
if len(plaintext) != self.block_size:
raise ValueError('wrong block length, expected ' + str(self.block_size) + ' got ' + str(len(plaintext)))
Ke = self.Ke
BC = self.block_size // 4
ROUNDS = len(Ke) - 1
if BC == 4:
SC = 0
elif BC == 6:
SC = 1
else:
SC = 2
s1 = shifts[SC][1][0]
s2 = shifts[SC][2][0]
s3 = shifts[SC][3][0]
a = [0] * BC
t = []
for i in range(BC):
t.append((ord(plaintext[i * 4 ]) << 24 |
ord(plaintext[i * 4 + 1]) << 16 |
ord(plaintext[i * 4 + 2]) << 8 |
ord(plaintext[i * 4 + 3]) ) ^ Ke[0][i])
for r in range(1, ROUNDS):
for i in range(BC):
a[i] = (T1[(t[ i ] >> 24) & 0xFF] ^
T2[(t[(i + s1) % BC] >> 16) & 0xFF] ^
T3[(t[(i + s2) % BC] >> 8) & 0xFF] ^
T4[ t[(i + s3) % BC] & 0xFF] ) ^ Ke[r][i]
t = copy.copy(a)
result = []
for i in range(BC):
tt = Ke[ROUNDS][i]
result.append((S[(t[ i ] >> 24) & 0xFF] ^ (tt >> 24)) & 0xFF)
result.append((S[(t[(i + s1) % BC] >> 16) & 0xFF] ^ (tt >> 16)) & 0xFF)
result.append((S[(t[(i + s2) % BC] >> 8) & 0xFF] ^ (tt >> 8)) & 0xFF)
result.append((S[ t[(i + s3) % BC] & 0xFF] ^ tt ) & 0xFF)
return ''.join(map(chr, result))
def decrypt(self, ciphertext):
if len(ciphertext) != self.block_size:
raise ValueError('wrong block length, expected ' + str(self.block_size) + ' got ' + str(len(ciphertext)))
Kd = self.Kd
BC = self.block_size // 4
ROUNDS = len(Kd) - 1
if BC == 4:
SC = 0
elif BC == 6:
SC = 1
else:
SC = 2
s1 = shifts[SC][1][1]
s2 = shifts[SC][2][1]
s3 = shifts[SC][3][1]
a = [0] * BC
t = [0] * BC
for i in range(BC):
t[i] = (ord(ciphertext[i * 4 ]) << 24 |
ord(ciphertext[i * 4 + 1]) << 16 |
ord(ciphertext[i * 4 + 2]) << 8 |
ord(ciphertext[i * 4 + 3]) ) ^ Kd[0][i]
for r in range(1, ROUNDS):
for i in range(BC):
a[i] = (T5[(t[ i ] >> 24) & 0xFF] ^
T6[(t[(i + s1) % BC] >> 16) & 0xFF] ^
T7[(t[(i + s2) % BC] >> 8) & 0xFF] ^
T8[ t[(i + s3) % BC] & 0xFF] ) ^ Kd[r][i]
t = copy.copy(a)
result = []
for i in range(BC):
tt = Kd[ROUNDS][i]
result.append((Si[(t[ i ] >> 24) & 0xFF] ^ (tt >> 24)) & 0xFF)
result.append((Si[(t[(i + s1) % BC] >> 16) & 0xFF] ^ (tt >> 16)) & 0xFF)
result.append((Si[(t[(i + s2) % BC] >> 8) & 0xFF] ^ (tt >> 8)) & 0xFF)
result.append((Si[ t[(i + s3) % BC] & 0xFF] ^ tt ) & 0xFF)
return ''.join(map(chr, result))
def encrypt(key, block):
return rijndael(key, len(block)).encrypt(block)
def decrypt(key, block):
return rijndael(key, len(block)).decrypt(block)
def t(kl, bl):
b = 'b' * bl
r = rijndael('a' * kl, bl)
assert r.decrypt(r.encrypt(b)) == b
def multiple_calls(N):
for _ in xrange(N):
t(16, 24)
t(16, 32)
t(24, 16)
t(24, 24)
t(24, 32)
t(32, 16)
t(32, 24)
t(32, 32)
if __name__ == '__main__':
n_repeats = 50
multiple_calls(n_repeats)
| true
| true
|
790d894ac649eab4e1f3c6ca5bc2cad193bdd4e5
| 19,599
|
py
|
Python
|
tests/unit/test_validators/test_linearity_validator.py
|
abxsantos/analytical-validation-backend
|
1ea980f17be10562f2b9e9db384076374f445642
|
[
"MIT"
] | null | null | null |
tests/unit/test_validators/test_linearity_validator.py
|
abxsantos/analytical-validation-backend
|
1ea980f17be10562f2b9e9db384076374f445642
|
[
"MIT"
] | 6
|
2021-03-20T04:28:03.000Z
|
2022-01-21T20:32:07.000Z
|
tests/unit/test_validators/test_linearity_validator.py
|
abxsantos/analytical-validation-backend
|
1ea980f17be10562f2b9e9db384076374f445642
|
[
"MIT"
] | null | null | null |
from unittest.mock import call, PropertyMock, MagicMock
import pytest
from analytical_validation.exceptions import DataWasNotFitted
from src.analytical_validation.validators.linearity_validator import LinearityValidator
@pytest.fixture(scope='function')
def fitted_result_obj(mocker):
mock = mocker.Mock(create=True)
mock.params = (mocker.Mock(), mocker.Mock())
mock.pvalues = (mocker.Mock(), mocker.Mock())
mock.ess = MagicMock()
mock.ssr = MagicMock()
mock.df_model = MagicMock()
mock.df_resid = MagicMock()
mock.resid = mocker.Mock()
return mock
@pytest.fixture(scope='function')
def linearity_validator_obj(fitted_result_obj):
analytical_data = [[0.100, 0.200, 0.150]]
concentration_data = [[0.1, 0.2, 0.3]]
linearity_validator = LinearityValidator(analytical_data, concentration_data)
linearity_validator.fitted_result = fitted_result_obj
return linearity_validator
@pytest.fixture(scope='function')
def linearity_validator_outlier_obj():
analytical_data = [[1.0, 1.0, 10.0], [2.0, 6.0, 2.0]]
concentration_data = [[1.0, 2.0, 3.0], [8.0, 9.0, 10.0]]
return LinearityValidator(analytical_data, concentration_data)
@pytest.fixture(scope='function')
def het_breuschpagan_mock(mocker):
het_breuschpagan_mock = mocker.patch('analytical_validation.validators.linearity_validator.'
'statsmodelsapi.het_breuschpagan')
het_breuschpagan_mock.return_value = (33, 42)
return het_breuschpagan_mock
@pytest.fixture(scope='function')
def shapiro_mock(mocker, linearity_validator_obj):
shapiro_mock = mocker.patch('analytical_validation.validators.linearity_validator.scipy.stats')
shapiro_mock.shapiro(linearity_validator_obj.analytical_data).return_value = (0, 1)
return shapiro_mock
@pytest.fixture(scope='function')
def durbin_watson_mock(mocker):
durbin_watson_mock = mocker.patch('analytical_validation.validators.linearity_validator.stattools.durbin_watson')
durbin_watson_mock.return_value = 1
return durbin_watson_mock
@pytest.fixture(scope='function')
def add_constant_mock(mocker):
add_constant_mock = mocker.patch(
'analytical_validation.validators.linearity_validator.statsmodels.add_constant')
return add_constant_mock
@pytest.fixture(scope='function')
def ordinary_least_squares_regression_mock(mocker):
ordinary_least_squares_regression_mock = mocker.patch(
'analytical_validation.validators.linearity_validator.statsmodels.OLS')
return ordinary_least_squares_regression_mock
class TestLinearityValidator(object):
def test_constructor_must_create_object_when_analytical_data_has_float_values(self, linearity_validator_obj):
"""Given analytical data
The LinearityValidator
Should create a list of floats
"""
# Assert
assert linearity_validator_obj.analytical_data == [0.100, 0.200, 0.150]
assert linearity_validator_obj.concentration_data == [0.1, 0.2, 0.3]
def test_ordinary_least_squares_linear_regression_must_pass_float_when_given_correct_data(self,
ordinary_least_squares_regression_mock,
add_constant_mock,
linearity_validator_obj):
"""Given concentration values = float
The ordinary_least_squares_linear_regression
Then must set properties"""
# Act
linearity_validator_obj.ordinary_least_squares_linear_regression()
# Assert
assert linearity_validator_obj.fitted_result == ordinary_least_squares_regression_mock.return_value.fit.return_value # Garante que a regressao e resultado do resultado do metodo statsmodels.OLS(), aplicado .fit().
assert ordinary_least_squares_regression_mock.called # Garante que o metodo ols esta sendo chamado
assert ordinary_least_squares_regression_mock.call_args_list == [
call(linearity_validator_obj.analytical_data, add_constant_mock.return_value)
# Garante que os arquivos de entrada definidos no call foram utilizados
]
assert add_constant_mock.called
assert add_constant_mock.call_args_list == [
call(linearity_validator_obj.concentration_data)
]
def test_slope_property_exists_when_fitted_result_not_none(self, linearity_validator_obj, fitted_result_obj):
# Act & assert
assert linearity_validator_obj.slope == fitted_result_obj.params[1]
def test_intercept_property_exists_when_fitted_result_not_none(self, linearity_validator_obj, fitted_result_obj):
# Act & assert
assert linearity_validator_obj.intercept == fitted_result_obj.params[0]
def test_r_squared_adjusted_property_exists_when_fitted_result_not_none(self, linearity_validator_obj,
fitted_result_obj):
# Act & assert
assert linearity_validator_obj.r_squared_adj == fitted_result_obj.rsquared_adj
def test_r_squared_property_exists_when_fitted_result_not_none(self, linearity_validator_obj, fitted_result_obj):
# Act & assert
assert linearity_validator_obj.r_squared == fitted_result_obj.rsquared
def test_regression_residues_exists_when_fitted_result_not_none(self, linearity_validator_obj, fitted_result_obj):
"""Given a regression model
when regression_residues is called
the regression residues must be created"""
assert linearity_validator_obj.regression_residues == fitted_result_obj.resid.tolist()
def test_sum_of_squares_model_property_exists_when_fitted_result_not_none(self, linearity_validator_obj,
fitted_result_obj):
# Act & assert
assert linearity_validator_obj.sum_of_squares_model == fitted_result_obj.ess
def test_sum_of_squares_total_property_exists_when_fitted_result_not_none(self, linearity_validator_obj,
fitted_result_obj):
# Act & assert
assert linearity_validator_obj.sum_of_squares_total == fitted_result_obj.ess + fitted_result_obj.ssr
def test_sum_of_squares_resid_property_exists_when_fitted_result_not_none(self, linearity_validator_obj,
fitted_result_obj):
# Act & assert
assert linearity_validator_obj.sum_of_squares_resid == fitted_result_obj.ssr
def test_degrees_of_freedom_model_property_exists_when_fitted_result_not_none(self, linearity_validator_obj,
fitted_result_obj):
# Act & assert
assert linearity_validator_obj.degrees_of_freedom_model == fitted_result_obj.df_model
def test_degrees_of_freedom_residues_property_exists_when_fitted_result_not_none(self, linearity_validator_obj,
fitted_result_obj):
# Act & assert
assert linearity_validator_obj.degrees_of_freedom_residues == fitted_result_obj.df_resid
def test_degrees_of_freedom_total_property_exists_when_fitted_result_not_none(self, linearity_validator_obj,
fitted_result_obj):
# Act & assert
assert linearity_validator_obj.degrees_of_freedom_total == fitted_result_obj.df_model + fitted_result_obj.df_resid
def test_mean_squared_error_model_property_exists_when_fitted_result_not_none(self, linearity_validator_obj,
fitted_result_obj):
# Act & assert
assert linearity_validator_obj.mean_squared_error_model == fitted_result_obj.mse_model
def test_mean_squared_error_residues_property_exists_when_fitted_result_not_none(self, linearity_validator_obj,
fitted_result_obj):
# Act & assert
assert linearity_validator_obj.mean_squared_error_residues == fitted_result_obj.mse_resid
def test_anova_f_value_property_exists_when_fitted_result_not_none(self, linearity_validator_obj,
fitted_result_obj):
# Act & assert
assert linearity_validator_obj.anova_f_value == fitted_result_obj.fvalue
def test_anova_f_pvalue_property_exists_when_fitted_result_not_none(self, linearity_validator_obj,
fitted_result_obj):
# Act & assert
assert linearity_validator_obj.anova_f_pvalue == fitted_result_obj.f_pvalue
@pytest.mark.parametrize('param_anova_f_pvalue, param_alpha, expected_result', [
(0.051, 0.05, False), (10, 0.1, False), (0.049, 0.05, True), (0.001, 0.10, True)
])
def test_valid_anova_f_pvalue_must_return_true_when_r_squared_is_greater_than_0990(self, param_alpha,
linearity_validator_obj,
param_anova_f_pvalue,
expected_result):
"""Given data with an aceptable regression model
When valid_anova_f_pvalue is called
Then anova_f_pvalue < alpha must assert true"""
# Arrange
linearity_validator_obj.alpha = param_alpha
linearity_validator_obj.fitted_result.f_pvalue = param_anova_f_pvalue
# Act & Assert
assert linearity_validator_obj.valid_anova_f_pvalue is expected_result
@pytest.mark.parametrize('param_alpha, param_breusch_pagan_pvalue, expected_result', [
(1, -10, False), (0.05, 0.049, False), (0.10, 0.11, True), (0.05, 10, True)
])
def test_is_homokedastic_must_return_false_when_breusch_pagan_pvalue_is_smaller_than_alpha_otherwise_true(self,
param_alpha,
param_breusch_pagan_pvalue,
expected_result):
# Arrange
analytical_data = [[0.100, 0.200, 0.150]]
concentration_data = [[0.1, 0.2, 0.3]]
linearity_validator = LinearityValidator(analytical_data, concentration_data, param_alpha)
linearity_validator.breusch_pagan_pvalue = param_breusch_pagan_pvalue
# Act & Assert
assert linearity_validator.is_homoscedastic is expected_result
@pytest.mark.parametrize('param_significant_slope, param_alpha, expected_result', [
(0.051, 0.05, False), (10, 0.1, False), (0.049, 0.05, True), (0.001, 0.10, True)
])
def test_significant_slope_must_return_true_when_slope_pvalue_is_smaller_than_alpha(self, linearity_validator_obj,
param_significant_slope,
param_alpha, expected_result):
"""Given homokedastic data
When check_hypothesis is called
Then slope_is_significant must assert true"""
# Arrange
linearity_validator_obj.alpha = param_alpha
linearity_validator_obj.fitted_result.pvalues = ("mock value", param_significant_slope)
# Act & Assert
assert linearity_validator_obj.significant_slope is expected_result
@pytest.mark.parametrize('param_insignificant_intercept, param_alpha, expected_result', [
(0.051, 0.05, True), (10, 0.1, True), (0.049, 0.05, False), (0.001, 0.10, False)
])
def test_insignificant_intercept_must_return_true_when_intercept_pvalue_is_greater_than_alpha(self,
linearity_validator_obj,
param_alpha,
param_insignificant_intercept,
expected_result):
"""Given homokedastic data
When check_hypothesis is called
Then intercept_not_significant must assert true"""
# Arrange
linearity_validator_obj.alpha = param_alpha
linearity_validator_obj.fitted_result.pvalues = (param_insignificant_intercept, "mock value")
# Act & Assert
assert linearity_validator_obj.insignificant_intercept is expected_result
@pytest.mark.parametrize('param_r_squared, expected_result', [
(1, True), (0.99, True), (0.98, False)
])
def test_valid_r_squared_must_return_true_when_r_squared_is_greater_than_0990(self,
linearity_validator_obj,
param_r_squared, expected_result):
"""Given homokedastic data
When check_hypothesis is called
Then r_squared > 0.990 must assert true"""
# Arrange
linearity_validator_obj.fitted_result.rsquared = param_r_squared
# Act & Assert
assert linearity_validator_obj.valid_r_squared is expected_result
@pytest.mark.parametrize(
'param_significant_slope, param_insignificant_intercept, param_valid_r_squared, expected_result', [
(True, True, True, True), (True, False, False, False), (True, True, False, False),
(False, True, True, False), (False, True, False, False), (False, False, False, False)
])
def test_valid_regression_model(self, mocker, param_significant_slope, param_insignificant_intercept,
param_valid_r_squared, expected_result):
# Arrange
mocker.patch('unit.test_validators.test_linearity_validator.LinearityValidator.significant_slope',
new_callable=PropertyMock, return_value=param_significant_slope)
mocker.patch('unit.test_validators.test_linearity_validator.LinearityValidator.insignificant_intercept',
new_callable=PropertyMock, return_value=param_insignificant_intercept)
mocker.patch('unit.test_validators.test_linearity_validator.LinearityValidator.valid_r_squared',
new_callable=PropertyMock, return_value=param_valid_r_squared)
analytical_data = [[0.100, 0.200, 0.150]]
concentration_data = [[0.2, 0.2, 0.3]]
linearity_validator = LinearityValidator(analytical_data, concentration_data)
# Act & Assert
assert linearity_validator.valid_regression_model is expected_result
def test_check_outliers_when_given_list_of_list_data(self, linearity_validator_outlier_obj):
linearity_validator_outlier_obj.check_outliers()
assert linearity_validator_outlier_obj.outliers == [[10.0], [6.0]]
assert linearity_validator_outlier_obj.cleaned_analytical_data == [[1.0, 1.0], [2.0, 2.0]]
assert linearity_validator_outlier_obj.cleaned_concentration_data == [[1.0, 2.0], [8.0, 10.0]]
@pytest.mark.parametrize('param_shapiro_pvalue, param_alpha, expected_result', [
(10, 0.05, True), (0.01, 0.1, False), (0.0501, 0.05, True), (0.099, 0.1, False)
])
def test_is_normal_distribution(self, param_shapiro_pvalue, param_alpha, expected_result):
analytical_data = [[0.100, 0.200, 0.150]]
concentration_data = [[0.2, 0.2, 0.3]]
validator = LinearityValidator(analytical_data, concentration_data, param_alpha)
validator.shapiro_pvalue = param_shapiro_pvalue
# Assert
assert validator.is_normal_distribution is expected_result
def test_run_breusch_pagan_test_must_raise_exception_when_model_is_none(self):
"""Not given a model parameter
The check_homokedasticity
Should raise exception"""
# Arrange
analytical_data = [[0.100, 0.200, 0.150]]
concentration_data = [[0.2, 0.2, 0.3]]
# Act & Assert
with pytest.raises(DataWasNotFitted):
LinearityValidator(analytical_data, concentration_data).run_breusch_pagan_test()
def test_run_breusch_pagan_test(self, linearity_validator_obj, het_breuschpagan_mock):
"""Given heterokedastic data
When check_homokedasticity is called
Then must return false"""
# Act
linearity_validator_obj.run_breusch_pagan_test()
# Assert
assert linearity_validator_obj.breusch_pagan_pvalue == 42
assert het_breuschpagan_mock.called
assert het_breuschpagan_mock.call_args_list == [
call(linearity_validator_obj.fitted_result.resid, linearity_validator_obj.fitted_result.model.exog)
]
@pytest.mark.parametrize('durbin_watson_pvalue', [
0.1, 1, 2, 2.5, 3, 3.9
])
def test_check_residual_autocorrelation(self, linearity_validator_obj, durbin_watson_mock,
durbin_watson_pvalue):
"""Given data
When residual_autocorrelation is called
Then must create durbin_watson_value"""
# Arrange
durbin_watson_mock.return_value = durbin_watson_pvalue
# Act
linearity_validator_obj.check_residual_autocorrelation()
# Assert
assert linearity_validator_obj.durbin_watson_value == durbin_watson_mock.return_value
assert durbin_watson_mock.called
assert durbin_watson_mock.call_args_list == [
call(linearity_validator_obj.fitted_result.resid)
]
def test_check_residual_autocorrelation_must_raise_exception_when_data_not_fitted(self, linearity_validator_obj):
"""Given data,
if no regression was calculated
Should raise an exception"""
# Arrange
linearity_validator_obj.fitted_result = None
# Act & assert
with pytest.raises(DataWasNotFitted):
linearity_validator_obj.check_residual_autocorrelation()
@pytest.mark.parametrize('durbin_watson_pvalue', [
-1, 10, 4.1
])
def test_check_residual_autocorrelation_must_pass_when_durbin_watson_value_is_between_0_and_4(self,
linearity_validator_obj,
durbin_watson_mock,
durbin_watson_pvalue):
"""Given data,
When check_residual is called
after fitting the model
Should pass creating
0 < durbin_watson_value < 4"""
# Arrange
durbin_watson_mock.return_value = durbin_watson_pvalue
# Act & Assert
assert linearity_validator_obj.durbin_watson_value is None
| 54.140884
| 222
| 0.643247
|
from unittest.mock import call, PropertyMock, MagicMock
import pytest
from analytical_validation.exceptions import DataWasNotFitted
from src.analytical_validation.validators.linearity_validator import LinearityValidator
@pytest.fixture(scope='function')
def fitted_result_obj(mocker):
mock = mocker.Mock(create=True)
mock.params = (mocker.Mock(), mocker.Mock())
mock.pvalues = (mocker.Mock(), mocker.Mock())
mock.ess = MagicMock()
mock.ssr = MagicMock()
mock.df_model = MagicMock()
mock.df_resid = MagicMock()
mock.resid = mocker.Mock()
return mock
@pytest.fixture(scope='function')
def linearity_validator_obj(fitted_result_obj):
analytical_data = [[0.100, 0.200, 0.150]]
concentration_data = [[0.1, 0.2, 0.3]]
linearity_validator = LinearityValidator(analytical_data, concentration_data)
linearity_validator.fitted_result = fitted_result_obj
return linearity_validator
@pytest.fixture(scope='function')
def linearity_validator_outlier_obj():
analytical_data = [[1.0, 1.0, 10.0], [2.0, 6.0, 2.0]]
concentration_data = [[1.0, 2.0, 3.0], [8.0, 9.0, 10.0]]
return LinearityValidator(analytical_data, concentration_data)
@pytest.fixture(scope='function')
def het_breuschpagan_mock(mocker):
het_breuschpagan_mock = mocker.patch('analytical_validation.validators.linearity_validator.'
'statsmodelsapi.het_breuschpagan')
het_breuschpagan_mock.return_value = (33, 42)
return het_breuschpagan_mock
@pytest.fixture(scope='function')
def shapiro_mock(mocker, linearity_validator_obj):
shapiro_mock = mocker.patch('analytical_validation.validators.linearity_validator.scipy.stats')
shapiro_mock.shapiro(linearity_validator_obj.analytical_data).return_value = (0, 1)
return shapiro_mock
@pytest.fixture(scope='function')
def durbin_watson_mock(mocker):
durbin_watson_mock = mocker.patch('analytical_validation.validators.linearity_validator.stattools.durbin_watson')
durbin_watson_mock.return_value = 1
return durbin_watson_mock
@pytest.fixture(scope='function')
def add_constant_mock(mocker):
add_constant_mock = mocker.patch(
'analytical_validation.validators.linearity_validator.statsmodels.add_constant')
return add_constant_mock
@pytest.fixture(scope='function')
def ordinary_least_squares_regression_mock(mocker):
ordinary_least_squares_regression_mock = mocker.patch(
'analytical_validation.validators.linearity_validator.statsmodels.OLS')
return ordinary_least_squares_regression_mock
class TestLinearityValidator(object):
def test_constructor_must_create_object_when_analytical_data_has_float_values(self, linearity_validator_obj):
assert linearity_validator_obj.analytical_data == [0.100, 0.200, 0.150]
assert linearity_validator_obj.concentration_data == [0.1, 0.2, 0.3]
def test_ordinary_least_squares_linear_regression_must_pass_float_when_given_correct_data(self,
ordinary_least_squares_regression_mock,
add_constant_mock,
linearity_validator_obj):
linearity_validator_obj.ordinary_least_squares_linear_regression()
assert linearity_validator_obj.fitted_result == ordinary_least_squares_regression_mock.return_value.fit.return_value
assert ordinary_least_squares_regression_mock.called
assert ordinary_least_squares_regression_mock.call_args_list == [
call(linearity_validator_obj.analytical_data, add_constant_mock.return_value)
]
assert add_constant_mock.called
assert add_constant_mock.call_args_list == [
call(linearity_validator_obj.concentration_data)
]
def test_slope_property_exists_when_fitted_result_not_none(self, linearity_validator_obj, fitted_result_obj):
assert linearity_validator_obj.slope == fitted_result_obj.params[1]
def test_intercept_property_exists_when_fitted_result_not_none(self, linearity_validator_obj, fitted_result_obj):
assert linearity_validator_obj.intercept == fitted_result_obj.params[0]
def test_r_squared_adjusted_property_exists_when_fitted_result_not_none(self, linearity_validator_obj,
fitted_result_obj):
assert linearity_validator_obj.r_squared_adj == fitted_result_obj.rsquared_adj
def test_r_squared_property_exists_when_fitted_result_not_none(self, linearity_validator_obj, fitted_result_obj):
assert linearity_validator_obj.r_squared == fitted_result_obj.rsquared
def test_regression_residues_exists_when_fitted_result_not_none(self, linearity_validator_obj, fitted_result_obj):
assert linearity_validator_obj.regression_residues == fitted_result_obj.resid.tolist()
def test_sum_of_squares_model_property_exists_when_fitted_result_not_none(self, linearity_validator_obj,
fitted_result_obj):
assert linearity_validator_obj.sum_of_squares_model == fitted_result_obj.ess
def test_sum_of_squares_total_property_exists_when_fitted_result_not_none(self, linearity_validator_obj,
fitted_result_obj):
assert linearity_validator_obj.sum_of_squares_total == fitted_result_obj.ess + fitted_result_obj.ssr
def test_sum_of_squares_resid_property_exists_when_fitted_result_not_none(self, linearity_validator_obj,
fitted_result_obj):
assert linearity_validator_obj.sum_of_squares_resid == fitted_result_obj.ssr
def test_degrees_of_freedom_model_property_exists_when_fitted_result_not_none(self, linearity_validator_obj,
fitted_result_obj):
assert linearity_validator_obj.degrees_of_freedom_model == fitted_result_obj.df_model
def test_degrees_of_freedom_residues_property_exists_when_fitted_result_not_none(self, linearity_validator_obj,
fitted_result_obj):
assert linearity_validator_obj.degrees_of_freedom_residues == fitted_result_obj.df_resid
def test_degrees_of_freedom_total_property_exists_when_fitted_result_not_none(self, linearity_validator_obj,
fitted_result_obj):
assert linearity_validator_obj.degrees_of_freedom_total == fitted_result_obj.df_model + fitted_result_obj.df_resid
def test_mean_squared_error_model_property_exists_when_fitted_result_not_none(self, linearity_validator_obj,
fitted_result_obj):
assert linearity_validator_obj.mean_squared_error_model == fitted_result_obj.mse_model
def test_mean_squared_error_residues_property_exists_when_fitted_result_not_none(self, linearity_validator_obj,
fitted_result_obj):
assert linearity_validator_obj.mean_squared_error_residues == fitted_result_obj.mse_resid
def test_anova_f_value_property_exists_when_fitted_result_not_none(self, linearity_validator_obj,
fitted_result_obj):
assert linearity_validator_obj.anova_f_value == fitted_result_obj.fvalue
def test_anova_f_pvalue_property_exists_when_fitted_result_not_none(self, linearity_validator_obj,
fitted_result_obj):
assert linearity_validator_obj.anova_f_pvalue == fitted_result_obj.f_pvalue
@pytest.mark.parametrize('param_anova_f_pvalue, param_alpha, expected_result', [
(0.051, 0.05, False), (10, 0.1, False), (0.049, 0.05, True), (0.001, 0.10, True)
])
def test_valid_anova_f_pvalue_must_return_true_when_r_squared_is_greater_than_0990(self, param_alpha,
linearity_validator_obj,
param_anova_f_pvalue,
expected_result):
linearity_validator_obj.alpha = param_alpha
linearity_validator_obj.fitted_result.f_pvalue = param_anova_f_pvalue
assert linearity_validator_obj.valid_anova_f_pvalue is expected_result
@pytest.mark.parametrize('param_alpha, param_breusch_pagan_pvalue, expected_result', [
(1, -10, False), (0.05, 0.049, False), (0.10, 0.11, True), (0.05, 10, True)
])
def test_is_homokedastic_must_return_false_when_breusch_pagan_pvalue_is_smaller_than_alpha_otherwise_true(self,
param_alpha,
param_breusch_pagan_pvalue,
expected_result):
analytical_data = [[0.100, 0.200, 0.150]]
concentration_data = [[0.1, 0.2, 0.3]]
linearity_validator = LinearityValidator(analytical_data, concentration_data, param_alpha)
linearity_validator.breusch_pagan_pvalue = param_breusch_pagan_pvalue
assert linearity_validator.is_homoscedastic is expected_result
@pytest.mark.parametrize('param_significant_slope, param_alpha, expected_result', [
(0.051, 0.05, False), (10, 0.1, False), (0.049, 0.05, True), (0.001, 0.10, True)
])
def test_significant_slope_must_return_true_when_slope_pvalue_is_smaller_than_alpha(self, linearity_validator_obj,
param_significant_slope,
param_alpha, expected_result):
linearity_validator_obj.alpha = param_alpha
linearity_validator_obj.fitted_result.pvalues = ("mock value", param_significant_slope)
assert linearity_validator_obj.significant_slope is expected_result
@pytest.mark.parametrize('param_insignificant_intercept, param_alpha, expected_result', [
(0.051, 0.05, True), (10, 0.1, True), (0.049, 0.05, False), (0.001, 0.10, False)
])
def test_insignificant_intercept_must_return_true_when_intercept_pvalue_is_greater_than_alpha(self,
linearity_validator_obj,
param_alpha,
param_insignificant_intercept,
expected_result):
linearity_validator_obj.alpha = param_alpha
linearity_validator_obj.fitted_result.pvalues = (param_insignificant_intercept, "mock value")
assert linearity_validator_obj.insignificant_intercept is expected_result
@pytest.mark.parametrize('param_r_squared, expected_result', [
(1, True), (0.99, True), (0.98, False)
])
def test_valid_r_squared_must_return_true_when_r_squared_is_greater_than_0990(self,
linearity_validator_obj,
param_r_squared, expected_result):
linearity_validator_obj.fitted_result.rsquared = param_r_squared
assert linearity_validator_obj.valid_r_squared is expected_result
@pytest.mark.parametrize(
'param_significant_slope, param_insignificant_intercept, param_valid_r_squared, expected_result', [
(True, True, True, True), (True, False, False, False), (True, True, False, False),
(False, True, True, False), (False, True, False, False), (False, False, False, False)
])
def test_valid_regression_model(self, mocker, param_significant_slope, param_insignificant_intercept,
param_valid_r_squared, expected_result):
mocker.patch('unit.test_validators.test_linearity_validator.LinearityValidator.significant_slope',
new_callable=PropertyMock, return_value=param_significant_slope)
mocker.patch('unit.test_validators.test_linearity_validator.LinearityValidator.insignificant_intercept',
new_callable=PropertyMock, return_value=param_insignificant_intercept)
mocker.patch('unit.test_validators.test_linearity_validator.LinearityValidator.valid_r_squared',
new_callable=PropertyMock, return_value=param_valid_r_squared)
analytical_data = [[0.100, 0.200, 0.150]]
concentration_data = [[0.2, 0.2, 0.3]]
linearity_validator = LinearityValidator(analytical_data, concentration_data)
assert linearity_validator.valid_regression_model is expected_result
def test_check_outliers_when_given_list_of_list_data(self, linearity_validator_outlier_obj):
linearity_validator_outlier_obj.check_outliers()
assert linearity_validator_outlier_obj.outliers == [[10.0], [6.0]]
assert linearity_validator_outlier_obj.cleaned_analytical_data == [[1.0, 1.0], [2.0, 2.0]]
assert linearity_validator_outlier_obj.cleaned_concentration_data == [[1.0, 2.0], [8.0, 10.0]]
@pytest.mark.parametrize('param_shapiro_pvalue, param_alpha, expected_result', [
(10, 0.05, True), (0.01, 0.1, False), (0.0501, 0.05, True), (0.099, 0.1, False)
])
def test_is_normal_distribution(self, param_shapiro_pvalue, param_alpha, expected_result):
analytical_data = [[0.100, 0.200, 0.150]]
concentration_data = [[0.2, 0.2, 0.3]]
validator = LinearityValidator(analytical_data, concentration_data, param_alpha)
validator.shapiro_pvalue = param_shapiro_pvalue
assert validator.is_normal_distribution is expected_result
def test_run_breusch_pagan_test_must_raise_exception_when_model_is_none(self):
analytical_data = [[0.100, 0.200, 0.150]]
concentration_data = [[0.2, 0.2, 0.3]]
with pytest.raises(DataWasNotFitted):
LinearityValidator(analytical_data, concentration_data).run_breusch_pagan_test()
def test_run_breusch_pagan_test(self, linearity_validator_obj, het_breuschpagan_mock):
linearity_validator_obj.run_breusch_pagan_test()
assert linearity_validator_obj.breusch_pagan_pvalue == 42
assert het_breuschpagan_mock.called
assert het_breuschpagan_mock.call_args_list == [
call(linearity_validator_obj.fitted_result.resid, linearity_validator_obj.fitted_result.model.exog)
]
@pytest.mark.parametrize('durbin_watson_pvalue', [
0.1, 1, 2, 2.5, 3, 3.9
])
def test_check_residual_autocorrelation(self, linearity_validator_obj, durbin_watson_mock,
durbin_watson_pvalue):
durbin_watson_mock.return_value = durbin_watson_pvalue
linearity_validator_obj.check_residual_autocorrelation()
assert linearity_validator_obj.durbin_watson_value == durbin_watson_mock.return_value
assert durbin_watson_mock.called
assert durbin_watson_mock.call_args_list == [
call(linearity_validator_obj.fitted_result.resid)
]
def test_check_residual_autocorrelation_must_raise_exception_when_data_not_fitted(self, linearity_validator_obj):
linearity_validator_obj.fitted_result = None
with pytest.raises(DataWasNotFitted):
linearity_validator_obj.check_residual_autocorrelation()
@pytest.mark.parametrize('durbin_watson_pvalue', [
-1, 10, 4.1
])
def test_check_residual_autocorrelation_must_pass_when_durbin_watson_value_is_between_0_and_4(self,
linearity_validator_obj,
durbin_watson_mock,
durbin_watson_pvalue):
durbin_watson_mock.return_value = durbin_watson_pvalue
assert linearity_validator_obj.durbin_watson_value is None
| true
| true
|
790d89b051ad928e9d6e848f5756aacde4baebf3
| 1,049
|
py
|
Python
|
xlsxwriter/test/comparison/test_comment06.py
|
dthadi3/XlsxWriter
|
f1801e82240aa9c746ce14948ef95990b83162cf
|
[
"BSD-2-Clause-FreeBSD"
] | 1
|
2020-07-01T07:24:37.000Z
|
2020-07-01T07:24:37.000Z
|
xlsxwriter/test/comparison/test_comment06.py
|
dthadi3/XlsxWriter
|
f1801e82240aa9c746ce14948ef95990b83162cf
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
xlsxwriter/test/comparison/test_comment06.py
|
dthadi3/XlsxWriter
|
f1801e82240aa9c746ce14948ef95990b83162cf
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2020, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('comment06.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with comments."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.write_comment('A1', 'Some text')
worksheet.write_comment('A2', 'Some text')
worksheet.write_comment('A3', 'Some text', {'visible': True})
worksheet.write_comment('A4', 'Some text')
worksheet.write_comment('A5', 'Some text')
worksheet.set_comments_author('John')
workbook.close()
self.assertExcelEqual()
| 26.225
| 79
| 0.618684
| true
| true
|
|
790d8a145788a182d3cee1348ee6a22e7eba58ed
| 3,354
|
py
|
Python
|
google-cloud-sdk/lib/googlecloudsdk/core/diagnostics/diagnostic_base.py
|
bopopescu/searchparty
|
afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6
|
[
"Apache-2.0"
] | 1
|
2017-11-29T18:52:27.000Z
|
2017-11-29T18:52:27.000Z
|
google-cloud-sdk/lib/googlecloudsdk/core/diagnostics/diagnostic_base.py
|
bopopescu/searchparty
|
afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6
|
[
"Apache-2.0"
] | null | null | null |
google-cloud-sdk/lib/googlecloudsdk/core/diagnostics/diagnostic_base.py
|
bopopescu/searchparty
|
afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6
|
[
"Apache-2.0"
] | 3
|
2017-07-27T18:44:13.000Z
|
2020-07-25T17:48:53.000Z
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base classes for diagnostics."""
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core.console import progress_tracker
class Diagnostic(object):
"""Base class for diagnostics.
Attributes:
intro: A message to introduce the objectives and tasks of the diagnostic.
title: The name of the diagnostic.
checklist: An iterator of checkbase.Check objects to be run by the
diagnostic.
"""
_MAX_RETRIES = 5
def __init__(self, intro, title, checklist):
"""Initializes Diagnostic with neccessary attributes.
Args:
intro: A message to introduce the objectives and tasks of the diagnostic.
title: The name of the diagnostic.
checklist: An iterable of checkbase.Check objects to be run by the
diagnostic.
"""
self.intro = intro
self.title = title
self.checklist = checklist
def RunChecks(self):
"""Runs one or more checks, tries fixes, and outputs results.
Returns:
True if the diagnostic ultimately passed.
"""
self._Print(self.intro)
num_checks_passed = 0
for check in self.checklist:
result, fixer = self._RunCheck(check)
if properties.VALUES.core.disable_prompts.GetBool():
continue
# If the initial check failed, and a fixer is available try to fix issue
# and recheck.
num_retries = 0
while not result.passed and fixer and num_retries < self._MAX_RETRIES:
num_retries += 1
should_check_again = fixer()
if should_check_again:
result, fixer = self._RunCheck(check, first_run=False)
else:
fixer = None
if not result.passed and fixer and num_retries == self._MAX_RETRIES:
log.warn('Unable to fix {0} failure after {1} attempts.'.format(
self.title, num_retries))
if result.passed:
num_checks_passed += 1
num_checks = len(self.checklist)
passed = (num_checks_passed == num_checks)
summary = '{check} ({num_passed}/{num_checks} checks) {passed}.\n'.format(
check=self.title, num_passed=num_checks_passed, num_checks=num_checks,
passed='passed' if passed else 'failed')
self._Print(summary, as_error=not passed)
return passed
def _RunCheck(self, check, first_run=True):
with progress_tracker.ProgressTracker('{0} {1}'.format(
'Checking' if first_run else 'Rechecking', check.issue)):
result, fixer = check.Check(first_run=first_run)
self._PrintResult(result)
return result, fixer
def _Print(self, message, as_error=False):
logger = log.status.Print if not as_error else log.error
logger(message)
def _PrintResult(self, result):
self._Print(result.message, not result.passed)
| 33.207921
| 79
| 0.699463
|
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core.console import progress_tracker
class Diagnostic(object):
_MAX_RETRIES = 5
def __init__(self, intro, title, checklist):
self.intro = intro
self.title = title
self.checklist = checklist
def RunChecks(self):
self._Print(self.intro)
num_checks_passed = 0
for check in self.checklist:
result, fixer = self._RunCheck(check)
if properties.VALUES.core.disable_prompts.GetBool():
continue
num_retries = 0
while not result.passed and fixer and num_retries < self._MAX_RETRIES:
num_retries += 1
should_check_again = fixer()
if should_check_again:
result, fixer = self._RunCheck(check, first_run=False)
else:
fixer = None
if not result.passed and fixer and num_retries == self._MAX_RETRIES:
log.warn('Unable to fix {0} failure after {1} attempts.'.format(
self.title, num_retries))
if result.passed:
num_checks_passed += 1
num_checks = len(self.checklist)
passed = (num_checks_passed == num_checks)
summary = '{check} ({num_passed}/{num_checks} checks) {passed}.\n'.format(
check=self.title, num_passed=num_checks_passed, num_checks=num_checks,
passed='passed' if passed else 'failed')
self._Print(summary, as_error=not passed)
return passed
def _RunCheck(self, check, first_run=True):
with progress_tracker.ProgressTracker('{0} {1}'.format(
'Checking' if first_run else 'Rechecking', check.issue)):
result, fixer = check.Check(first_run=first_run)
self._PrintResult(result)
return result, fixer
def _Print(self, message, as_error=False):
logger = log.status.Print if not as_error else log.error
logger(message)
def _PrintResult(self, result):
self._Print(result.message, not result.passed)
| true
| true
|
790d8aa3dd20e63992a5022c4c718a8f25bcdb4a
| 371
|
py
|
Python
|
doc/for_dev/scikit-image/setup_codes/cmorph__dilate.py
|
fluiddyn/transonic
|
a460e9f6d1139f79b668cb3306d1e8a7e190b72d
|
[
"BSD-3-Clause"
] | 88
|
2019-01-08T16:39:08.000Z
|
2022-02-06T14:19:23.000Z
|
doc/for_dev/scikit-image/setup_codes/cmorph__dilate.py
|
fluiddyn/transonic
|
a460e9f6d1139f79b668cb3306d1e8a7e190b72d
|
[
"BSD-3-Clause"
] | 13
|
2019-06-20T15:53:10.000Z
|
2021-02-09T11:03:29.000Z
|
doc/for_dev/scikit-image/setup_codes/cmorph__dilate.py
|
fluiddyn/transonic
|
a460e9f6d1139f79b668cb3306d1e8a7e190b72d
|
[
"BSD-3-Clause"
] | 1
|
2019-11-05T03:03:14.000Z
|
2019-11-05T03:03:14.000Z
|
import numpy as np
from future.cmorph import _dilate
rows = 1024
cols = 1024
srows = 64
scols = 64
image = np.random.randint(0, 255, rows * cols, dtype=np.uint8).reshape(
(rows, cols)
)
selem = np.random.randint(0, 1, srows * scols, dtype=np.uint8).reshape(
(srows, scols)
)
out = np.zeros((rows, cols), dtype=np.uint8)
shift_x = np.int8(2)
shift_y = np.int8(2)
| 21.823529
| 71
| 0.679245
|
import numpy as np
from future.cmorph import _dilate
rows = 1024
cols = 1024
srows = 64
scols = 64
image = np.random.randint(0, 255, rows * cols, dtype=np.uint8).reshape(
(rows, cols)
)
selem = np.random.randint(0, 1, srows * scols, dtype=np.uint8).reshape(
(srows, scols)
)
out = np.zeros((rows, cols), dtype=np.uint8)
shift_x = np.int8(2)
shift_y = np.int8(2)
| true
| true
|
790d8c973d8f3371872a0ff0b9877108bc43a039
| 622
|
py
|
Python
|
sdk/keyvault/azure-mgmt-keyvault/azure/mgmt/keyvault/v2016_10_01/aio/operations_async/__init__.py
|
LianwMS/azure-sdk-for-python
|
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
|
[
"MIT"
] | 2
|
2019-05-17T21:24:53.000Z
|
2020-02-12T11:13:42.000Z
|
sdk/keyvault/azure-mgmt-keyvault/azure/mgmt/keyvault/v2016_10_01/aio/operations_async/__init__.py
|
LianwMS/azure-sdk-for-python
|
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
|
[
"MIT"
] | 15
|
2019-07-12T18:18:04.000Z
|
2019-07-25T20:55:51.000Z
|
sdk/keyvault/azure-mgmt-keyvault/azure/mgmt/keyvault/v2016_10_01/aio/operations_async/__init__.py
|
LianwMS/azure-sdk-for-python
|
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
|
[
"MIT"
] | 2
|
2020-05-21T22:51:22.000Z
|
2020-05-26T20:53:01.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._vaults_operations_async import VaultsOperations
from ._operations_async import Operations
__all__ = [
'VaultsOperations',
'Operations',
]
| 38.875
| 94
| 0.588424
|
from ._vaults_operations_async import VaultsOperations
from ._operations_async import Operations
__all__ = [
'VaultsOperations',
'Operations',
]
| true
| true
|
790d8d10c93b27d59e0cddbc0638ac05326fbd57
| 3,359
|
py
|
Python
|
djng/middleware.py
|
shriDeveloper/django-angular
|
b32a910b0e154e5707a10fe3e58de1542fd4183b
|
[
"MIT"
] | 1
|
2020-01-09T12:18:59.000Z
|
2020-01-09T12:18:59.000Z
|
djng/middleware.py
|
shriDeveloper/django-angular
|
b32a910b0e154e5707a10fe3e58de1542fd4183b
|
[
"MIT"
] | null | null | null |
djng/middleware.py
|
shriDeveloper/django-angular
|
b32a910b0e154e5707a10fe3e58de1542fd4183b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import six
from django import http
from django.urls import reverse
from django.utils.http import unquote
try:
from django.utils.deprecation import MiddlewareMixin
except ImportError:
MiddlewareMixin = object
class AngularUrlMiddleware(MiddlewareMixin):
"""
If the request path is <ANGULAR_REVERSE> it should be resolved to actual view, otherwise return
``None`` and continue as usual.
This must be the first middleware in the MIDDLEWARE_CLASSES tuple!
"""
ANGULAR_REVERSE = '/angular/reverse/'
def process_request(self, request):
"""
Reads url name, args, kwargs from GET parameters, reverses the url and resolves view function
Returns the result of resolved view function, called with provided args and kwargs
Since the view function is called directly, it isn't ran through middlewares, so the middlewares must
be added manually
The final result is exactly the same as if the request was for the resolved view.
Parametrized urls:
djangoUrl.reverse can be used with parametrized urls of $resource
In that case the reverse url is something like: /angular/reverse/?djng_url_name=orders&djng_url_kwarg_id=:id
$resource can either replace the ':id' part with say 2 and we can proceed as usual,
reverse with reverse('orders', kwargs={'id': 2}).
If it's not replaced we want to reverse to url we get a request to url
'/angular/reverse/?djng_url_name=orders&djng_url_kwarg_id=' which
gives a request.GET QueryDict {u'djng_url_name': [u'orders'], u'djng_url_kwarg_id': [u'']}
In that case we want to ignore the id param and only reverse to url with name 'orders' and no params.
So we ignore args and kwargs that are empty strings.
"""
if request.path == self.ANGULAR_REVERSE:
url_name = request.GET.get('djng_url_name')
url_args = request.GET.getlist('djng_url_args', [])
url_kwargs = {}
# Remove falsy values (empty strings)
url_args = filter(lambda x: x, url_args)
# Read kwargs
for param in request.GET:
if param.startswith('djng_url_kwarg_'):
# Ignore kwargs that are empty strings
if request.GET[param]:
url_kwargs[param[15:]] = request.GET[param] # [15:] to remove 'djng_url_kwarg' prefix
url = unquote(reverse(url_name, args=url_args, kwargs=url_kwargs))
assert not url.startswith(self.ANGULAR_REVERSE), "Prevent recursive requests"
# rebuild the request object with a different environ
request.path = request.path_info = url
request.environ['PATH_INFO'] = url
query = request.GET.copy()
for key in request.GET:
if key.startswith('djng_url'):
query.pop(key, None)
if six.PY3:
request.environ['QUERY_STRING'] = query.urlencode()
else:
request.environ['QUERY_STRING'] = query.urlencode().encode('utf-8')
# Reconstruct GET QueryList in the same way WSGIRequest.GET function works
request.GET = http.QueryDict(request.environ['QUERY_STRING'])
| 45.391892
| 116
| 0.650789
|
from __future__ import unicode_literals
import six
from django import http
from django.urls import reverse
from django.utils.http import unquote
try:
from django.utils.deprecation import MiddlewareMixin
except ImportError:
MiddlewareMixin = object
class AngularUrlMiddleware(MiddlewareMixin):
ANGULAR_REVERSE = '/angular/reverse/'
def process_request(self, request):
if request.path == self.ANGULAR_REVERSE:
url_name = request.GET.get('djng_url_name')
url_args = request.GET.getlist('djng_url_args', [])
url_kwargs = {}
url_args = filter(lambda x: x, url_args)
for param in request.GET:
if param.startswith('djng_url_kwarg_'):
if request.GET[param]:
url_kwargs[param[15:]] = request.GET[param]
url = unquote(reverse(url_name, args=url_args, kwargs=url_kwargs))
assert not url.startswith(self.ANGULAR_REVERSE), "Prevent recursive requests"
request.path = request.path_info = url
request.environ['PATH_INFO'] = url
query = request.GET.copy()
for key in request.GET:
if key.startswith('djng_url'):
query.pop(key, None)
if six.PY3:
request.environ['QUERY_STRING'] = query.urlencode()
else:
request.environ['QUERY_STRING'] = query.urlencode().encode('utf-8')
request.GET = http.QueryDict(request.environ['QUERY_STRING'])
| true
| true
|
790d8d6c49aacc1ce2a70311d3da94451ba61e50
| 1,214
|
py
|
Python
|
Toolz/sqlmap/tamper/charunicodeescape.py
|
thezakman/CTF-Toolz
|
b369246ea6766165cce0852e537fb6a0c970869b
|
[
"Unlicense"
] | 71
|
2019-02-02T11:38:46.000Z
|
2022-03-31T14:08:27.000Z
|
tools/sqlmap/tamper/charunicodeescape.py
|
sravani-m/Web-Application-Security-Framework
|
d9f71538f5cba6fe1d8eabcb26c557565472f6a6
|
[
"MIT"
] | null | null | null |
tools/sqlmap/tamper/charunicodeescape.py
|
sravani-m/Web-Application-Security-Framework
|
d9f71538f5cba6fe1d8eabcb26c557565472f6a6
|
[
"MIT"
] | 15
|
2019-08-07T06:32:04.000Z
|
2022-03-09T12:48:20.000Z
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
See the file 'LICENSE' for copying permission
"""
import string
from lib.core.enums import PRIORITY
__priority__ = PRIORITY.NORMAL
def tamper(payload, **kwargs):
"""
Unicode-escapes non-encoded characters in a given payload (not processing already encoded) (e.g. SELECT -> \u0053\u0045\u004C\u0045\u0043\u0054)
Notes:
* Useful to bypass weak filtering and/or WAFs in JSON contexes
>>> tamper('SELECT FIELD FROM TABLE')
'\\\\u0053\\\\u0045\\\\u004C\\\\u0045\\\\u0043\\\\u0054\\\\u0020\\\\u0046\\\\u0049\\\\u0045\\\\u004C\\\\u0044\\\\u0020\\\\u0046\\\\u0052\\\\u004F\\\\u004D\\\\u0020\\\\u0054\\\\u0041\\\\u0042\\\\u004C\\\\u0045'
"""
retVal = payload
if payload:
retVal = ""
i = 0
while i < len(payload):
if payload[i] == '%' and (i < len(payload) - 2) and payload[i + 1:i + 2] in string.hexdigits and payload[i + 2:i + 3] in string.hexdigits:
retVal += "\\u00%s" % payload[i + 1:i + 3]
i += 3
else:
retVal += '\\u%.4X' % ord(payload[i])
i += 1
return retVal
| 30.35
| 213
| 0.57084
|
import string
from lib.core.enums import PRIORITY
__priority__ = PRIORITY.NORMAL
def tamper(payload, **kwargs):
retVal = payload
if payload:
retVal = ""
i = 0
while i < len(payload):
if payload[i] == '%' and (i < len(payload) - 2) and payload[i + 1:i + 2] in string.hexdigits and payload[i + 2:i + 3] in string.hexdigits:
retVal += "\\u00%s" % payload[i + 1:i + 3]
i += 3
else:
retVal += '\\u%.4X' % ord(payload[i])
i += 1
return retVal
| true
| true
|
790d8d88be5c0cc63cebc70f5ce66d0a202515c4
| 3,957
|
py
|
Python
|
tests/lib/copy_engines/test_bbcp_copier.py
|
SVilgelm/CloudFerry
|
4459c0d21ba7ccffe51176932197b352e426ba63
|
[
"Apache-2.0"
] | 6
|
2017-04-20T00:49:49.000Z
|
2020-12-20T16:27:10.000Z
|
tests/lib/copy_engines/test_bbcp_copier.py
|
SVilgelm/CloudFerry
|
4459c0d21ba7ccffe51176932197b352e426ba63
|
[
"Apache-2.0"
] | 3
|
2017-04-08T15:47:16.000Z
|
2017-05-18T17:40:59.000Z
|
tests/lib/copy_engines/test_bbcp_copier.py
|
SVilgelm/CloudFerry
|
4459c0d21ba7ccffe51176932197b352e426ba63
|
[
"Apache-2.0"
] | 8
|
2017-04-07T23:42:36.000Z
|
2021-08-10T11:05:10.000Z
|
# Copyright (c) 2016 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
import mock
from cloudferry.lib.copy_engines import base
from cloudferry.lib.copy_engines import bbcp_copier
from cloudferry.lib.utils import remote_runner
from tests.lib.copy_engines import test_base
from tests import test
class BbcpCopierTestCase(test_base.BaseTestCase):
copier_class = bbcp_copier.BbcpCopier
def setUp(self):
super(BbcpCopierTestCase, self).setUp()
self.src_cloud.hosts_with_bbcp = set()
self.dst_cloud.hosts_with_bbcp = set()
@mock.patch('cloudferry.lib.utils.utils.forward_agent')
@mock.patch('os.path.isfile')
def test_usage_false(self, mock_isfile, _):
mock_isfile.return_value = False
self.assertFalse(self.copier.check_usage(self.data))
mock_isfile.return_value = True
with mock.patch.object(self.copier, 'copy_bbcp',
side_effect=remote_runner.RemoteExecutionError):
self.assertFalse(self.copier.check_usage(self.data))
@mock.patch('os.path.isfile')
def test_usage_true(self, mock_isfile):
mock_isfile.return_value = True
with mock.patch.object(self.copier, 'copy_bbcp') as mock_copy_bbcp:
self.assertTrue(self.copier.check_usage(self.data))
self.assertEqual(2, mock_copy_bbcp.call_count)
mock_copy_bbcp.reset_mock()
self.assertTrue(self.copier.check_usage(self.data))
mock_copy_bbcp.assert_not_called()
def test_transfer_direct_true(self):
with self.mock_runner() as mock_runner:
self.copier.transfer(self.data)
self.assertCalledOnce(mock_runner.run)
mock_runner.reset_mock()
self.cfg.set_override('retry', 2, 'migrate')
mock_runner.run.side_effect = remote_runner.RemoteExecutionError()
with mock.patch.object(self.copier, 'clean_dst') as mock_clean_dst:
self.assertRaises(base.FileCopyError, self.copier.transfer,
self.data)
self.assertEqual(2, mock_runner.run.call_count)
self.assertCalledOnce(mock_clean_dst)
@mock.patch('cloudferry.lib.utils.local.run')
def test_transfer_direct_false(self, mock_run):
self.cfg.set_override('direct_transfer', False, 'migrate')
self.copier.transfer(self.data)
self.assertCalledOnce(mock_run)
@mock.patch('cloudferry.lib.utils.local.run')
def test_copy_bbcp(self, mock_run):
with self.mock_runner() as runner:
self.copier.copy_bbcp('fake_host', 'src')
self.assertCalledOnce(runner.run)
mock_run.assert_not_called()
runner.reset_mock()
runner.run.side_effect = (remote_runner.RemoteExecutionError,
None)
self.copier.copy_bbcp('fake_host', 'src')
self.assertEqual(2, runner.run.call_count)
self.assertCalledOnce(mock_run)
class RemoveBBCPTestCase(test.TestCase):
@mock.patch('cloudferry.lib.utils.remote_runner.RemoteRunner.'
'run_ignoring_errors')
def test_remove_bbcp(self, mock_run_ignoring_errors):
cloud = mock.Mock()
cloud.hosts_with_bbcp = {'fake_host_1', 'fake_host_2'}
cloud.position = 'src'
bbcp_copier.remove_bbcp(cloud)
self.assertEqual(2, mock_run_ignoring_errors.call_count)
| 39.969697
| 79
| 0.682841
|
import mock
from cloudferry.lib.copy_engines import base
from cloudferry.lib.copy_engines import bbcp_copier
from cloudferry.lib.utils import remote_runner
from tests.lib.copy_engines import test_base
from tests import test
class BbcpCopierTestCase(test_base.BaseTestCase):
copier_class = bbcp_copier.BbcpCopier
def setUp(self):
super(BbcpCopierTestCase, self).setUp()
self.src_cloud.hosts_with_bbcp = set()
self.dst_cloud.hosts_with_bbcp = set()
@mock.patch('cloudferry.lib.utils.utils.forward_agent')
@mock.patch('os.path.isfile')
def test_usage_false(self, mock_isfile, _):
mock_isfile.return_value = False
self.assertFalse(self.copier.check_usage(self.data))
mock_isfile.return_value = True
with mock.patch.object(self.copier, 'copy_bbcp',
side_effect=remote_runner.RemoteExecutionError):
self.assertFalse(self.copier.check_usage(self.data))
@mock.patch('os.path.isfile')
def test_usage_true(self, mock_isfile):
mock_isfile.return_value = True
with mock.patch.object(self.copier, 'copy_bbcp') as mock_copy_bbcp:
self.assertTrue(self.copier.check_usage(self.data))
self.assertEqual(2, mock_copy_bbcp.call_count)
mock_copy_bbcp.reset_mock()
self.assertTrue(self.copier.check_usage(self.data))
mock_copy_bbcp.assert_not_called()
def test_transfer_direct_true(self):
with self.mock_runner() as mock_runner:
self.copier.transfer(self.data)
self.assertCalledOnce(mock_runner.run)
mock_runner.reset_mock()
self.cfg.set_override('retry', 2, 'migrate')
mock_runner.run.side_effect = remote_runner.RemoteExecutionError()
with mock.patch.object(self.copier, 'clean_dst') as mock_clean_dst:
self.assertRaises(base.FileCopyError, self.copier.transfer,
self.data)
self.assertEqual(2, mock_runner.run.call_count)
self.assertCalledOnce(mock_clean_dst)
@mock.patch('cloudferry.lib.utils.local.run')
def test_transfer_direct_false(self, mock_run):
self.cfg.set_override('direct_transfer', False, 'migrate')
self.copier.transfer(self.data)
self.assertCalledOnce(mock_run)
@mock.patch('cloudferry.lib.utils.local.run')
def test_copy_bbcp(self, mock_run):
with self.mock_runner() as runner:
self.copier.copy_bbcp('fake_host', 'src')
self.assertCalledOnce(runner.run)
mock_run.assert_not_called()
runner.reset_mock()
runner.run.side_effect = (remote_runner.RemoteExecutionError,
None)
self.copier.copy_bbcp('fake_host', 'src')
self.assertEqual(2, runner.run.call_count)
self.assertCalledOnce(mock_run)
class RemoveBBCPTestCase(test.TestCase):
@mock.patch('cloudferry.lib.utils.remote_runner.RemoteRunner.'
'run_ignoring_errors')
def test_remove_bbcp(self, mock_run_ignoring_errors):
cloud = mock.Mock()
cloud.hosts_with_bbcp = {'fake_host_1', 'fake_host_2'}
cloud.position = 'src'
bbcp_copier.remove_bbcp(cloud)
self.assertEqual(2, mock_run_ignoring_errors.call_count)
| true
| true
|
790d8e57faed3e19d353ef1b42ede6c299f99f05
| 66
|
py
|
Python
|
src/pomodoro/__main__.py
|
Dev3XOR/pomodoro
|
d4dc48b6ebb86ebcccf0897faac7bba36d0319aa
|
[
"MIT"
] | null | null | null |
src/pomodoro/__main__.py
|
Dev3XOR/pomodoro
|
d4dc48b6ebb86ebcccf0897faac7bba36d0319aa
|
[
"MIT"
] | 1
|
2021-12-17T22:24:00.000Z
|
2021-12-17T22:24:00.000Z
|
src/pomodoro/__main__.py
|
Dev3XOR/pomodoro
|
d4dc48b6ebb86ebcccf0897faac7bba36d0319aa
|
[
"MIT"
] | null | null | null |
from pomodoro import main
if __name__ == "__main__":
main()
| 11
| 26
| 0.666667
|
from pomodoro import main
if __name__ == "__main__":
main()
| true
| true
|
790d8e8aea11def00cbaffc91e9868fda5dc192f
| 568
|
py
|
Python
|
src/fidalgo/azext_fidalgo/generated/_params.py
|
tbyfield/azure-cli-extensions
|
e7e5f37fdcea3afb5c4aecb61fa72eac72c2128e
|
[
"MIT"
] | null | null | null |
src/fidalgo/azext_fidalgo/generated/_params.py
|
tbyfield/azure-cli-extensions
|
e7e5f37fdcea3afb5c4aecb61fa72eac72c2128e
|
[
"MIT"
] | null | null | null |
src/fidalgo/azext_fidalgo/generated/_params.py
|
tbyfield/azure-cli-extensions
|
e7e5f37fdcea3afb5c4aecb61fa72eac72c2128e
|
[
"MIT"
] | 1
|
2022-02-14T21:43:29.000Z
|
2022-02-14T21:43:29.000Z
|
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=too-many-lines
# pylint: disable=too-many-statements
def load_arguments(self, _):
pass
| 37.866667
| 76
| 0.558099
|
def load_arguments(self, _):
pass
| true
| true
|
790d8ed36d68e07e09425d5ec08df55d0d2d55e4
| 338
|
py
|
Python
|
tests/devices/eiger/test_eiger_status.py
|
dls-controls/tickit
|
00bb013e69674bcfe4926f365ecb3c65c080abe8
|
[
"Apache-2.0"
] | 4
|
2021-09-16T13:35:33.000Z
|
2022-02-01T23:35:53.000Z
|
tests/devices/eiger/test_eiger_status.py
|
dls-controls/tickit
|
00bb013e69674bcfe4926f365ecb3c65c080abe8
|
[
"Apache-2.0"
] | 46
|
2021-09-16T13:44:58.000Z
|
2022-02-02T13:42:56.000Z
|
tests/devices/eiger/test_eiger_status.py
|
dls-controls/tickit
|
00bb013e69674bcfe4926f365ecb3c65c080abe8
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from tickit.devices.eiger.eiger_status import EigerStatus
# # # # # EigerStatus Tests # # # # #
@pytest.fixture
def eiger_status() -> EigerStatus:
return EigerStatus()
def test_eiger_status_constructor():
EigerStatus()
def test_eiger_status_getitem(eiger_status):
assert 24.5 == eiger_status["th0_temp"]
| 17.789474
| 57
| 0.730769
|
import pytest
from tickit.devices.eiger.eiger_status import EigerStatus
status_getitem(eiger_status):
assert 24.5 == eiger_status["th0_temp"]
| true
| true
|
790d8ed41339bba225986addcf060058eecda9c0
| 5,645
|
py
|
Python
|
cogrob_ros/cogrob_pololu_lidar/scripts/cogrob_pololu_lidar.py
|
CogRob/TritonBot
|
d05b521ec7a7f54a04409f5a2897f3e5c75fd3bf
|
[
"BSD-3-Clause"
] | 8
|
2018-09-21T09:56:02.000Z
|
2021-07-26T14:35:14.000Z
|
cogrob_ros/cogrob_pololu_lidar/scripts/cogrob_pololu_lidar.py
|
CogRob/TritonBot
|
d05b521ec7a7f54a04409f5a2897f3e5c75fd3bf
|
[
"BSD-3-Clause"
] | null | null | null |
cogrob_ros/cogrob_pololu_lidar/scripts/cogrob_pololu_lidar.py
|
CogRob/TritonBot
|
d05b521ec7a7f54a04409f5a2897f3e5c75fd3bf
|
[
"BSD-3-Clause"
] | 4
|
2018-08-26T21:44:52.000Z
|
2019-08-22T07:38:08.000Z
|
#!/usr/bin/env python
# Copyright (c) 2018, The Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the University of California nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OF THE UNIVERSITY OF CALIFORNIA
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import math
import serial
import sys
import threading
import rospy
import std_msgs.msg
import tf
class PololuSerial(object):
def __init__(self, tty_device='/dev/ttyACM0'):
self._serial = serial.Serial(tty_device, timeout=0.2)
self._lock = threading.Lock()
def _SendCommand(self, command_sequence, reply_length=0):
# TODO(shengye): Check command_sequence is an iterable of integers [0, 255]
buf_out = bytearray(command_sequence)
with self._lock:
self._serial.write(buf_out)
if reply_length > 0:
buf_in = bytearray(self._serial.read(reply_length))
assert len(buf_in) == reply_length
else:
buf_in = None
return buf_in
def Close(self):
with self._lock:
self._serial.close()
self._serial = None
def SetTarget(self, target_val, channel=0):
command = [0x84, channel, target_val & 0x7F, (target_val >> 7) & 0x7F]
self._SendCommand(command)
def SetSpeed(self, speed_val, channel=0):
command = [0x87, channel, speed_val & 0x7F, (speed_val >> 7) & 0x7F]
self._SendCommand(command)
def SetAcceleration(self, acc_val, channel=0):
command = [0x89, channel, acc_val & 0x7F, (acc_val >> 7) & 0x7F]
self._SendCommand(command)
def GetPos(self, channel=0):
command = [0x90, channel]
result = self._SendCommand(command, 2)
return_val = (result[1] << 8) | result[0]
return return_val
def main(argv):
rospy.init_node("head_laser_servo_tf")
# Parameters
tty_device = rospy.get_param("~tty_device", "/dev/ttyACM0")
acceleration = rospy.get_param("~acceleration", 20)
speed = rospy.get_param("~speed", 10)
min_val = rospy.get_param("~min_val", 885 * 4)
min_deg = rospy.get_param("~min_deg", -90)
max_val = rospy.get_param("~max_val", 1900 * 4)
max_deg = rospy.get_param("~max_deg", 0)
default_deg = rospy.get_param("~default_deg", -90)
fixed_frame = rospy.get_param("~fixed_frame", "head_laser_servo_base")
rotating_frame = rospy.get_param("~rotating_frame",
"head_laser_servo_mount")
time_adj = rospy.get_param("~time_adj", 0.125)
tf_pub_rate = rospy.get_param("~tf_pub_rate", 20)
dev = PololuSerial(tty_device)
dev.SetAcceleration(acceleration)
dev.SetSpeed(speed)
tf_broadcaster = tf.TransformBroadcaster()
disable_tf_publisher = [False]
latest_deg = [min_deg]
def MoveToDeg(target_deg):
target = int((target_deg - min_deg) / (max_deg - min_deg) *
(max_val - min_val) + min_val)
dev.SetTarget(target)
pos = float(dev.GetPos())
disable_tf_publisher[0] = True
while pos != target:
deg = ((pos - min_val) / (max_val - min_val) * (max_deg - min_deg)
+ min_deg)
tf_broadcaster.sendTransform(
(0, 0, 0),
tf.transformations.quaternion_from_euler(
0, -deg / 180.0 * math.pi, 0),
rospy.Time.now() + rospy.Duration(time_adj),
rotating_frame,
fixed_frame)
latest_deg[0] = deg
rospy.loginfo("Degree: %f, Value: %f", deg, pos)
pos = float(dev.GetPos())
disable_tf_publisher[0] = False
def HeadLaserAngleCallback(data):
angle = data.data
if angle < min_deg or angle > max_deg:
rospy.logerr("%f is not between [%f, %f]", angle, min_deg, max_deg)
else:
MoveToDeg(angle)
MoveToDeg(min_deg)
MoveToDeg(max_deg)
MoveToDeg(default_deg)
rospy.Subscriber("/head_laser/angle", std_msgs.msg.Float64,
HeadLaserAngleCallback)
rospy.loginfo("Ready to serve.")
tf_pub_rate_rospy_rate = rospy.Rate(tf_pub_rate)
while not rospy.is_shutdown():
if not disable_tf_publisher[0]:
tf_broadcaster.sendTransform(
(0, 0, 0),
tf.transformations.quaternion_from_euler(
0, -latest_deg[0] / 180.0 * math.pi, 0),
rospy.Time.now() + rospy.Duration(time_adj),
rotating_frame,
fixed_frame)
tf_pub_rate_rospy_rate.sleep()
dev.Close()
if __name__ == "__main__":
main(sys.argv)
| 33.205882
| 79
| 0.692117
|
import math
import serial
import sys
import threading
import rospy
import std_msgs.msg
import tf
class PololuSerial(object):
def __init__(self, tty_device='/dev/ttyACM0'):
self._serial = serial.Serial(tty_device, timeout=0.2)
self._lock = threading.Lock()
def _SendCommand(self, command_sequence, reply_length=0):
buf_out = bytearray(command_sequence)
with self._lock:
self._serial.write(buf_out)
if reply_length > 0:
buf_in = bytearray(self._serial.read(reply_length))
assert len(buf_in) == reply_length
else:
buf_in = None
return buf_in
def Close(self):
with self._lock:
self._serial.close()
self._serial = None
def SetTarget(self, target_val, channel=0):
command = [0x84, channel, target_val & 0x7F, (target_val >> 7) & 0x7F]
self._SendCommand(command)
def SetSpeed(self, speed_val, channel=0):
command = [0x87, channel, speed_val & 0x7F, (speed_val >> 7) & 0x7F]
self._SendCommand(command)
def SetAcceleration(self, acc_val, channel=0):
command = [0x89, channel, acc_val & 0x7F, (acc_val >> 7) & 0x7F]
self._SendCommand(command)
def GetPos(self, channel=0):
command = [0x90, channel]
result = self._SendCommand(command, 2)
return_val = (result[1] << 8) | result[0]
return return_val
def main(argv):
rospy.init_node("head_laser_servo_tf")
tty_device = rospy.get_param("~tty_device", "/dev/ttyACM0")
acceleration = rospy.get_param("~acceleration", 20)
speed = rospy.get_param("~speed", 10)
min_val = rospy.get_param("~min_val", 885 * 4)
min_deg = rospy.get_param("~min_deg", -90)
max_val = rospy.get_param("~max_val", 1900 * 4)
max_deg = rospy.get_param("~max_deg", 0)
default_deg = rospy.get_param("~default_deg", -90)
fixed_frame = rospy.get_param("~fixed_frame", "head_laser_servo_base")
rotating_frame = rospy.get_param("~rotating_frame",
"head_laser_servo_mount")
time_adj = rospy.get_param("~time_adj", 0.125)
tf_pub_rate = rospy.get_param("~tf_pub_rate", 20)
dev = PololuSerial(tty_device)
dev.SetAcceleration(acceleration)
dev.SetSpeed(speed)
tf_broadcaster = tf.TransformBroadcaster()
disable_tf_publisher = [False]
latest_deg = [min_deg]
def MoveToDeg(target_deg):
target = int((target_deg - min_deg) / (max_deg - min_deg) *
(max_val - min_val) + min_val)
dev.SetTarget(target)
pos = float(dev.GetPos())
disable_tf_publisher[0] = True
while pos != target:
deg = ((pos - min_val) / (max_val - min_val) * (max_deg - min_deg)
+ min_deg)
tf_broadcaster.sendTransform(
(0, 0, 0),
tf.transformations.quaternion_from_euler(
0, -deg / 180.0 * math.pi, 0),
rospy.Time.now() + rospy.Duration(time_adj),
rotating_frame,
fixed_frame)
latest_deg[0] = deg
rospy.loginfo("Degree: %f, Value: %f", deg, pos)
pos = float(dev.GetPos())
disable_tf_publisher[0] = False
def HeadLaserAngleCallback(data):
angle = data.data
if angle < min_deg or angle > max_deg:
rospy.logerr("%f is not between [%f, %f]", angle, min_deg, max_deg)
else:
MoveToDeg(angle)
MoveToDeg(min_deg)
MoveToDeg(max_deg)
MoveToDeg(default_deg)
rospy.Subscriber("/head_laser/angle", std_msgs.msg.Float64,
HeadLaserAngleCallback)
rospy.loginfo("Ready to serve.")
tf_pub_rate_rospy_rate = rospy.Rate(tf_pub_rate)
while not rospy.is_shutdown():
if not disable_tf_publisher[0]:
tf_broadcaster.sendTransform(
(0, 0, 0),
tf.transformations.quaternion_from_euler(
0, -latest_deg[0] / 180.0 * math.pi, 0),
rospy.Time.now() + rospy.Duration(time_adj),
rotating_frame,
fixed_frame)
tf_pub_rate_rospy_rate.sleep()
dev.Close()
if __name__ == "__main__":
main(sys.argv)
| true
| true
|
790d8edc8ab2aa68079caa33608fed0c6a069a7b
| 2,216
|
py
|
Python
|
src/py-opentimelineio/opentimelineio/algorithms/timeline_algo.py
|
desruie/OpenTimelineIO
|
918797b00e840b7de8a15a3b1ab51e35a004c50f
|
[
"Apache-2.0"
] | 1,021
|
2017-07-29T05:50:20.000Z
|
2022-03-28T16:53:28.000Z
|
src/py-opentimelineio/opentimelineio/algorithms/timeline_algo.py
|
desruie/OpenTimelineIO
|
918797b00e840b7de8a15a3b1ab51e35a004c50f
|
[
"Apache-2.0"
] | 987
|
2017-08-01T17:14:57.000Z
|
2022-03-31T22:49:03.000Z
|
src/py-opentimelineio/opentimelineio/algorithms/timeline_algo.py
|
reinecke/OpenTimelineIO
|
1325927157564989952edf7c5f7c317fb90e1288
|
[
"Apache-2.0"
] | 233
|
2017-07-28T23:27:10.000Z
|
2022-03-31T10:40:35.000Z
|
#
# Copyright Contributors to the OpenTimelineIO project
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
"""Algorithms for timeline objects."""
import copy
from . import (
track_algo
)
def timeline_trimmed_to_range(in_timeline, trim_range):
"""Returns a new timeline that is a copy of the in_timeline, but with items
outside the trim_range removed and items on the ends trimmed to the
trim_range. Note that the timeline is never expanded, only shortened.
Please note that you could do nearly the same thing non-destructively by
just setting the Track's source_range but sometimes you want to really cut
away the stuff outside and that's what this function is meant for."""
new_timeline = copy.deepcopy(in_timeline)
for track_num, child_track in enumerate(in_timeline.tracks):
# @TODO: put the trim_range into the space of the tracks
# new_range = new_timeline.tracks.transformed_time_range(
# trim_range,
# child_track
# )
# trim the track and assign it to the new stack.
new_timeline.tracks[track_num] = track_algo.track_trimmed_to_range(
child_track,
trim_range
)
return new_timeline
| 38.877193
| 79
| 0.733303
|
import copy
from . import (
track_algo
)
def timeline_trimmed_to_range(in_timeline, trim_range):
new_timeline = copy.deepcopy(in_timeline)
for track_num, child_track in enumerate(in_timeline.tracks):
new_timeline.tracks[track_num] = track_algo.track_trimmed_to_range(
child_track,
trim_range
)
return new_timeline
| true
| true
|
790d8fb9ce171e31e7cff74bd1b470c81d27f126
| 4,809
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/network/v20200301/get_network_interface_tap_configuration.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/network/v20200301/get_network_interface_tap_configuration.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/network/v20200301/get_network_interface_tap_configuration.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetNetworkInterfaceTapConfigurationResult',
'AwaitableGetNetworkInterfaceTapConfigurationResult',
'get_network_interface_tap_configuration',
]
@pulumi.output_type
class GetNetworkInterfaceTapConfigurationResult:
"""
Tap configuration in a Network Interface.
"""
def __init__(__self__, etag=None, name=None, provisioning_state=None, type=None, virtual_network_tap=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if virtual_network_tap and not isinstance(virtual_network_tap, dict):
raise TypeError("Expected argument 'virtual_network_tap' to be a dict")
pulumi.set(__self__, "virtual_network_tap", virtual_network_tap)
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the network interface tap configuration resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
"""
Sub Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualNetworkTap")
def virtual_network_tap(self) -> Optional['outputs.VirtualNetworkTapResponse']:
"""
The reference to the Virtual Network Tap resource.
"""
return pulumi.get(self, "virtual_network_tap")
class AwaitableGetNetworkInterfaceTapConfigurationResult(GetNetworkInterfaceTapConfigurationResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetNetworkInterfaceTapConfigurationResult(
etag=self.etag,
name=self.name,
provisioning_state=self.provisioning_state,
type=self.type,
virtual_network_tap=self.virtual_network_tap)
def get_network_interface_tap_configuration(network_interface_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
tap_configuration_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNetworkInterfaceTapConfigurationResult:
"""
Use this data source to access information about an existing resource.
:param str network_interface_name: The name of the network interface.
:param str resource_group_name: The name of the resource group.
:param str tap_configuration_name: The name of the tap configuration.
"""
__args__ = dict()
__args__['networkInterfaceName'] = network_interface_name
__args__['resourceGroupName'] = resource_group_name
__args__['tapConfigurationName'] = tap_configuration_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:network/v20200301:getNetworkInterfaceTapConfiguration', __args__, opts=opts, typ=GetNetworkInterfaceTapConfigurationResult).value
return AwaitableGetNetworkInterfaceTapConfigurationResult(
etag=__ret__.etag,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
type=__ret__.type,
virtual_network_tap=__ret__.virtual_network_tap)
| 39.743802
| 180
| 0.680183
|
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetNetworkInterfaceTapConfigurationResult',
'AwaitableGetNetworkInterfaceTapConfigurationResult',
'get_network_interface_tap_configuration',
]
@pulumi.output_type
class GetNetworkInterfaceTapConfigurationResult:
def __init__(__self__, etag=None, name=None, provisioning_state=None, type=None, virtual_network_tap=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if virtual_network_tap and not isinstance(virtual_network_tap, dict):
raise TypeError("Expected argument 'virtual_network_tap' to be a dict")
pulumi.set(__self__, "virtual_network_tap", virtual_network_tap)
@property
@pulumi.getter
def etag(self) -> str:
return pulumi.get(self, "etag")
@property
@pulumi.getter
def name(self) -> Optional[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualNetworkTap")
def virtual_network_tap(self) -> Optional['outputs.VirtualNetworkTapResponse']:
return pulumi.get(self, "virtual_network_tap")
class AwaitableGetNetworkInterfaceTapConfigurationResult(GetNetworkInterfaceTapConfigurationResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetNetworkInterfaceTapConfigurationResult(
etag=self.etag,
name=self.name,
provisioning_state=self.provisioning_state,
type=self.type,
virtual_network_tap=self.virtual_network_tap)
def get_network_interface_tap_configuration(network_interface_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
tap_configuration_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNetworkInterfaceTapConfigurationResult:
__args__ = dict()
__args__['networkInterfaceName'] = network_interface_name
__args__['resourceGroupName'] = resource_group_name
__args__['tapConfigurationName'] = tap_configuration_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:network/v20200301:getNetworkInterfaceTapConfiguration', __args__, opts=opts, typ=GetNetworkInterfaceTapConfigurationResult).value
return AwaitableGetNetworkInterfaceTapConfigurationResult(
etag=__ret__.etag,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
type=__ret__.type,
virtual_network_tap=__ret__.virtual_network_tap)
| true
| true
|
790d907ce96067c105d27b4ff18d5cd849620ec7
| 142
|
py
|
Python
|
tests/functions.py
|
apragacz/functoolsplus
|
9d26666a2d017b25dd0e031ddde1570f5a4bffd3
|
[
"MIT"
] | 2
|
2019-03-01T15:09:16.000Z
|
2021-01-26T15:18:58.000Z
|
tests/functions.py
|
apragacz/functoolsplus
|
9d26666a2d017b25dd0e031ddde1570f5a4bffd3
|
[
"MIT"
] | null | null | null |
tests/functions.py
|
apragacz/functoolsplus
|
9d26666a2d017b25dd0e031ddde1570f5a4bffd3
|
[
"MIT"
] | null | null | null |
def identity(x):
return x
def always_false(x):
return False
def always_true(x):
return True
def add(x, y):
return x + y
| 9.466667
| 20
| 0.612676
|
def identity(x):
return x
def always_false(x):
return False
def always_true(x):
return True
def add(x, y):
return x + y
| true
| true
|
790d90b0c4b8b17bedcb8e7ece4b102f0eef8c66
| 2,740
|
py
|
Python
|
ghwiz/mshp.py
|
nerdralph/h2k
|
187534da4ec1ffb8711e738acd0271b5d3c7a261
|
[
"MIT"
] | null | null | null |
ghwiz/mshp.py
|
nerdralph/h2k
|
187534da4ec1ffb8711e738acd0271b5d3c7a261
|
[
"MIT"
] | null | null | null |
ghwiz/mshp.py
|
nerdralph/h2k
|
187534da4ec1ffb8711e738acd0271b5d3c7a261
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# configure mini-split heat pumps for E files
# uses NRCan CSV list converted to TSV
# https://oee.nrcan.gc.ca/pml-lmp/index.cfm?language_langue=en&action=app.search-recherche&appliance=ASHP2_GH
import math, os, sys
import xml.etree.ElementTree as ET
if len(sys.argv) < 3:
print(sys.argv[0], "E-file.h2k AHRI heads|0(ducted)")
sys.exit()
e_file = sys.argv[1]
ahri = sys.argv[2]
heads = sys.argv[3]
t = ET.parse(e_file)
# tsv field list:
# Brand Outside model Inside model Furnace model HSPF (Region IV) Rated heating capacity (Btu/hour) Grant amount AHRI / Verification reference AHRI Classification Series name/product line (if applicable) SEER Rated cooling capacity (Btu/hour) Coefficient of Performance (COP) at -15 °C (5 °F) (at maximum capacity) Capacity Maintenance % (Max -15°C/5°F ÷ Rated 8.3°C/47°F)
cchp_search = "grep '" + ahri + "' ccashp.tsv"
#d = os.popen(cchp_search).read().split('\t')
d = os.popen(cchp_search).read().rstrip('\n').split('\t')
# 1 kW = 3412 BTU/hr
(mfr, model, head_mdl, size_kw, hspf, seer, cop, fraction) = \
d[0], d[1], d[2], str(float(d[5])/3412), d[4], d[10], d[12], d[13]
#(ahri, size_kw, hspf, cop, seer) = cols[9], str(float(cols[5])/3412), cols[4], cols[13], cols[12]
e = t.find("./ProgramInformation/Information")
info = ET.Element("Info", {"code": "Info. 5"})
# no NEEP until spreadsheet and/or H2K is fixed
if (int(heads) > 0):
info.text = "MSHP-" + heads
else:
info.text = "CENTRAL-HP"
e.append(info)
# GHG instructions are to use Info 6 when more than 1 ccASHP system is installed
# but ENS wants all heat pumps in Info 6
info = ET.Element("Info", {"code": "Info. 6"})
info.text = mfr + ";AHRI-" + ahri + ';' + model + ';' + head_mdl
e.append(info)
#print(info, info.attrib, info.text)
# Type 2 CCHP heating system
type2 = ET.parse("Type2.xml").getroot()
ahp = type2.find("AirHeatPump")
ei = ahp.find("EquipmentInformation")
ei.attrib["AHRI"] = ahri
ei.find("Manufacturer").text = mfr
ei.find("Model").text = model
ahp.find("Equipment").attrib["numberOfHeads"] = heads
specs = ahp.find("Specifications")
specs.find("OutputCapacity").attrib["value"] = size_kw
# use ASHP HSPF/SEER until NEEP spreadsheet or H2K is fixed for ccHP
specs.find("HeatingEfficiency").attrib["value"] = str(float(hspf)/1.15)
specs.find("CoolingEfficiency").attrib["value"] = seer
cchp = ahp.find("ColdClimateHeatPump")
cchp.attrib["heatingEfficiency"] = hspf
cchp.attrib["coolingEfficiency"] = seer
cchp.attrib["capacity"] = size_kw
cchp.attrib["cop"] = cop
cchp.attrib["capacityMaintenance"] = fraction
hc = t.find("./House/HeatingCooling")
hc.remove(hc.find("Type2"))
hc.append(type2)
#outfile = "MSHP-out.h2k"
outfile = e_file
t.write(outfile, "UTF-8", True)
| 36.533333
| 373
| 0.693796
|
import math, os, sys
import xml.etree.ElementTree as ET
if len(sys.argv) < 3:
print(sys.argv[0], "E-file.h2k AHRI heads|0(ducted)")
sys.exit()
e_file = sys.argv[1]
ahri = sys.argv[2]
heads = sys.argv[3]
t = ET.parse(e_file)
cchp_search = "grep '" + ahri + "' ccashp.tsv"
d = os.popen(cchp_search).read().rstrip('\n').split('\t')
(mfr, model, head_mdl, size_kw, hspf, seer, cop, fraction) = \
d[0], d[1], d[2], str(float(d[5])/3412), d[4], d[10], d[12], d[13]
e = t.find("./ProgramInformation/Information")
info = ET.Element("Info", {"code": "Info. 5"})
if (int(heads) > 0):
info.text = "MSHP-" + heads
else:
info.text = "CENTRAL-HP"
e.append(info)
info = ET.Element("Info", {"code": "Info. 6"})
info.text = mfr + ";AHRI-" + ahri + ';' + model + ';' + head_mdl
e.append(info)
type2 = ET.parse("Type2.xml").getroot()
ahp = type2.find("AirHeatPump")
ei = ahp.find("EquipmentInformation")
ei.attrib["AHRI"] = ahri
ei.find("Manufacturer").text = mfr
ei.find("Model").text = model
ahp.find("Equipment").attrib["numberOfHeads"] = heads
specs = ahp.find("Specifications")
specs.find("OutputCapacity").attrib["value"] = size_kw
specs.find("HeatingEfficiency").attrib["value"] = str(float(hspf)/1.15)
specs.find("CoolingEfficiency").attrib["value"] = seer
cchp = ahp.find("ColdClimateHeatPump")
cchp.attrib["heatingEfficiency"] = hspf
cchp.attrib["coolingEfficiency"] = seer
cchp.attrib["capacity"] = size_kw
cchp.attrib["cop"] = cop
cchp.attrib["capacityMaintenance"] = fraction
hc = t.find("./House/HeatingCooling")
hc.remove(hc.find("Type2"))
hc.append(type2)
outfile = e_file
t.write(outfile, "UTF-8", True)
| true
| true
|
790d926797199d18bc0487056b852ac06c0b6295
| 3,280
|
py
|
Python
|
tests/conftest.py
|
elin1231/htmap
|
b9c43ec1d86e90730210c3317409b75595061d91
|
[
"Apache-2.0"
] | null | null | null |
tests/conftest.py
|
elin1231/htmap
|
b9c43ec1d86e90730210c3317409b75595061d91
|
[
"Apache-2.0"
] | null | null | null |
tests/conftest.py
|
elin1231/htmap
|
b9c43ec1d86e90730210c3317409b75595061d91
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 HTCondor Team, Computer Sciences Department,
# University of Wisconsin-Madison, WI.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from pathlib import Path
from copy import copy
import pytest
import htmap
from htmap.settings import BASE_SETTINGS
from htmap._startup import ensure_htmap_dir_exists
# start with base settings (ignore user settings for tests)
htmap.settings.replace(BASE_SETTINGS)
htmap.settings[
"DELIVERY_METHOD"
] = "shared" # shared is the default for all tests that aren't parametric
htmap.settings["WAIT_TIME"] = 0.1
htmap.settings["MAP_OPTIONS.request_memory"] = "10MB"
htmap.settings["MAP_OPTIONS.keep_claim_idle"] = "1"
SETTINGS = copy(htmap.settings)
@pytest.fixture(scope="function", autouse=True)
def reset_settings():
htmap.settings.replace(SETTINGS)
@pytest.fixture(scope="function", autouse=True)
def set_transplant_dir(tmpdir_factory, reset_settings):
path = Path(tmpdir_factory.mktemp("htmap_transplant_dir"))
htmap.settings["TRANSPLANT.DIR"] = path
@pytest.fixture(scope="function")
def delivery_methods(delivery_method, reset_settings):
htmap.settings["DELIVERY_METHOD"] = delivery_method
def pytest_addoption(parser):
parser.addoption(
"--delivery",
nargs="+",
default=["shared"], # shared is the default for parametric delivery testing
)
def pytest_generate_tests(metafunc):
if "delivery_methods" in metafunc.fixturenames:
metafunc.parametrize(
"delivery_method", metafunc.config.getoption("delivery"),
)
@pytest.fixture(scope="function", autouse=True)
def set_htmap_dir_and_clean(tmpdir_factory):
map_dir = Path(tmpdir_factory.mktemp("htmap_dir"))
htmap.settings["HTMAP_DIR"] = map_dir
ensure_htmap_dir_exists()
yield
htmap.clean(all=True)
@pytest.fixture(scope="session")
def doubler():
def doubler(x):
return 2 * x
return doubler
@pytest.fixture(scope="session")
def mapped_doubler(doubler):
mapper = htmap.mapped(doubler)
return mapper
@pytest.fixture(scope="session")
def power():
def power(x=0, p=2):
return x ** p
return power
@pytest.fixture(scope="session")
def mapped_power(power):
mapper = htmap.mapped(power)
return mapper
@pytest.fixture(scope="session")
def never_returns():
def never(_):
while True:
time.sleep(1)
return never
@pytest.fixture(scope="function")
def map_that_never_finishes(never_returns):
m = htmap.map(never_returns, [None])
yield m
m.remove()
@pytest.fixture(scope="session")
def mapped_exception():
@htmap.mapped
def fail(x):
raise Exception(str(x))
return fail
def exception_msg(exc_info) -> str:
return str(exc_info.value)
| 23.941606
| 84
| 0.721341
|
import time
from pathlib import Path
from copy import copy
import pytest
import htmap
from htmap.settings import BASE_SETTINGS
from htmap._startup import ensure_htmap_dir_exists
htmap.settings.replace(BASE_SETTINGS)
htmap.settings[
"DELIVERY_METHOD"
] = "shared"
htmap.settings["WAIT_TIME"] = 0.1
htmap.settings["MAP_OPTIONS.request_memory"] = "10MB"
htmap.settings["MAP_OPTIONS.keep_claim_idle"] = "1"
SETTINGS = copy(htmap.settings)
@pytest.fixture(scope="function", autouse=True)
def reset_settings():
htmap.settings.replace(SETTINGS)
@pytest.fixture(scope="function", autouse=True)
def set_transplant_dir(tmpdir_factory, reset_settings):
path = Path(tmpdir_factory.mktemp("htmap_transplant_dir"))
htmap.settings["TRANSPLANT.DIR"] = path
@pytest.fixture(scope="function")
def delivery_methods(delivery_method, reset_settings):
htmap.settings["DELIVERY_METHOD"] = delivery_method
def pytest_addoption(parser):
parser.addoption(
"--delivery",
nargs="+",
default=["shared"], # shared is the default for parametric delivery testing
)
def pytest_generate_tests(metafunc):
if "delivery_methods" in metafunc.fixturenames:
metafunc.parametrize(
"delivery_method", metafunc.config.getoption("delivery"),
)
@pytest.fixture(scope="function", autouse=True)
def set_htmap_dir_and_clean(tmpdir_factory):
map_dir = Path(tmpdir_factory.mktemp("htmap_dir"))
htmap.settings["HTMAP_DIR"] = map_dir
ensure_htmap_dir_exists()
yield
htmap.clean(all=True)
@pytest.fixture(scope="session")
def doubler():
def doubler(x):
return 2 * x
return doubler
@pytest.fixture(scope="session")
def mapped_doubler(doubler):
mapper = htmap.mapped(doubler)
return mapper
@pytest.fixture(scope="session")
def power():
def power(x=0, p=2):
return x ** p
return power
@pytest.fixture(scope="session")
def mapped_power(power):
mapper = htmap.mapped(power)
return mapper
@pytest.fixture(scope="session")
def never_returns():
def never(_):
while True:
time.sleep(1)
return never
@pytest.fixture(scope="function")
def map_that_never_finishes(never_returns):
m = htmap.map(never_returns, [None])
yield m
m.remove()
@pytest.fixture(scope="session")
def mapped_exception():
@htmap.mapped
def fail(x):
raise Exception(str(x))
return fail
def exception_msg(exc_info) -> str:
return str(exc_info.value)
| true
| true
|
790d9292ec96dd030beb02589ad7db47a61fdcc8
| 9,395
|
py
|
Python
|
yyskmultilearn/cluster/graphtool.py
|
yuan776/scikit-multilearn
|
5ad32df237e6a9746fd5ec2f9543dcd011e8cdd2
|
[
"BSD-2-Clause"
] | null | null | null |
yyskmultilearn/cluster/graphtool.py
|
yuan776/scikit-multilearn
|
5ad32df237e6a9746fd5ec2f9543dcd011e8cdd2
|
[
"BSD-2-Clause"
] | null | null | null |
yyskmultilearn/cluster/graphtool.py
|
yuan776/scikit-multilearn
|
5ad32df237e6a9746fd5ec2f9543dcd011e8cdd2
|
[
"BSD-2-Clause"
] | null | null | null |
from __future__ import absolute_import
from __future__ import print_function
import graph_tool.all as gt
import numpy as np
from .base import LabelGraphClustererBase
from .helpers import _membership_to_list_of_communities, _overlapping_membership_to_list_of_communities
class StochasticBlockModel:
"""A Stochastic Blockmodel fit to Label Graph
This contains a stochastic block model instance constructed for a block model variant specified in parameters.
It can be fit to an instance of a graph and set of weights. More information on how to select parameters can be
found in `the extensive introduction into Stochastic Block Models
<https://graph-tool.skewed.de/static/doc/demos/inference/inference.html>`_ in graphtool documentation.
Parameters
----------
nested: boolean
whether to build a nested Stochastic Block Model or the regular variant,
will be automatically put under :code:`self.nested`.
use_degree_correlation: boolean
whether to correct for degree correlation in modeling, will be automatically
put under :code:`self.use_degree_correlation`.
allow_overlap: boolean
whether to allow overlapping clusters or not, will be automatically
put under :code:`self.allow_overlap`.
weight_model: string or None
decide whether to generate a weighted or unweighted graph,
will be automatically put under :code:`self.weight_model`.
Attributes
----------
model_: graph_tool.inference.BlockState or its subclass
an instance of the fitted model obtained from graph-tool
"""
def __init__(self, nested, use_degree_correlation, allow_overlap, weight_model):
self.nested = nested
self.use_degree_correlation = use_degree_correlation
self.allow_overlap = allow_overlap
self.weight_model = weight_model
self.model_ = None
def fit_predict(self, graph, weights):
"""Fits model to a given graph and weights list
Sets :code:`self.model_` to the state of graphtool's Stochastic Block Model the after fitting.
Attributes
----------
graph: graphtool.Graph
the graph to fit the model to
weights: graphtool.EdgePropertyMap<double>
the property map: edge -> weight (double) to fit the model to, if weighted variant
is selected
Returns
-------
numpy.ndarray
partition of labels, each sublist contains label indices
related to label positions in :code:`y`
"""
if self.weight_model:
self.model_ = self._model_fit_function()(
graph,
deg_corr=self.use_degree_correlation,
overlap=self.allow_overlap,
state_args=dict(recs=[weights],
rec_types=[self.weight_model])
)
else:
self.model_ = self._model_fit_function()(
graph,
deg_corr=self.use_degree_correlation,
overlap=self.allow_overlap
)
return self._detect_communities()
def _detect_communities(self):
if self.nested:
lowest_level = self.model_.get_levels()[0]
else:
lowest_level = self.model_
number_of_communities = lowest_level.get_B()
if self.allow_overlap:
# the overlaps block returns
# membership vector, and also edges vectors, we need just the membership here at the moment
membership_vector = list(lowest_level.get_overlap_blocks()[0])
else:
membership_vector = list(lowest_level.get_blocks())
if self.allow_overlap:
return _overlapping_membership_to_list_of_communities(membership_vector, number_of_communities)
return _membership_to_list_of_communities(membership_vector, number_of_communities)
def _model_fit_function(self):
if self.nested:
return gt.minimize_nested_blockmodel_dl
else:
return gt.minimize_blockmodel_dl
class GraphToolLabelGraphClusterer(LabelGraphClustererBase):
"""Fits a Stochastic Block Model to the Label Graph and infers the communities
This clusterer clusters the label space using by fitting a stochastic block
model to the label network and inferring the community structure using graph-tool.
The obtained community structure is returned as the label clustering. More information on the inference itself
can be found in `the extensive introduction into Stochastic Block Models
<https://graph-tool.skewed.de/static/doc/demos/inference/inference.html>`_ in graphtool documentation.
Parameters
----------
graph_builder: a GraphBuilderBase inherited transformer
the graph builder to provide the adjacency matrix and weight map for the underlying graph
model: StochasticBlockModel
the desired stochastic block model variant to use
Attributes
----------
graph_ : graphtool.Graph
object representing a label co-occurence graph
weights_ : graphtool.EdgeProperty<double>
edge weights defined by graph builder stored in a graphtool compatible format
.. note ::
This functionality is still undergoing research.
.. note ::
This clusterer is GPL-licenced and will taint your code with GPL restrictions.
References
----------
If you use this class please cite:
.. code : latex
article{peixoto_graph-tool_2014,
title = {The graph-tool python library},
url = {http://figshare.com/articles/graph_tool/1164194},
doi = {10.6084/m9.figshare.1164194},
urldate = {2014-09-10},
journal = {figshare},
author = {Peixoto, Tiago P.},
year = {2014},
keywords = {all, complex networks, graph, network, other}}
Examples
--------
An example code for using this clusterer with a classifier looks like this:
.. code-block:: python
from sklearn.ensemble import RandomForestClassifier
from yyskmultilearn.problem_transform import LabelPowerset
from yyskmultilearn.cluster import IGraphLabelGraphClusterer, LabelCooccurrenceGraphBuilder
from yyskmultilearn.ensemble import LabelSpacePartitioningClassifier
# construct base forest classifier
base_classifier = RandomForestClassifier(n_estimators=1000)
# construct a graph builder that will include
# label relations weighted by how many times they
# co-occurred in the data, without self-edges
graph_builder = LabelCooccurrenceGraphBuilder(
weighted = True,
include_self_edges = False
)
# select parameters for the model, we fit a flat,
# non-degree correlated, partitioning model
# which will use fit the normal distribution as the weights model
model = StochasticBlockModel(
nested=False,
use_degree_correlation=True,
allow_overlap=False,
weight_model='real-normal'
)
# setup problem transformation approach with sparse matrices for random forest
problem_transform_classifier = LabelPowerset(classifier=base_classifier,
require_dense=[False, False])
# setup the clusterer to use, we selected the fast greedy modularity-maximization approach
clusterer = GraphToolLabelGraphClusterer(graph_builder=graph_builder, model=model)
# setup the ensemble metaclassifier
classifier = LabelSpacePartitioningClassifier(problem_transform_classifier, clusterer)
# train
classifier.fit(X_train, y_train)
# predict
predictions = classifier.predict(X_test)
For more use cases see `the label relations exploration guide <../labelrelations.ipynb>`_.
"""
def __init__(self, graph_builder, model):
super(GraphToolLabelGraphClusterer, self).__init__(graph_builder)
self.model = model
self.graph_builder = graph_builder
def fit_predict(self, X, y):
"""Performs clustering on y and returns list of label lists
Builds a label graph using the provided graph builder's `transform` method
on `y` and then detects communities using the selected `method`.
Sets :code:`self.weights_` and :code:`self.graph_`.
Parameters
----------
X : None
currently unused, left for scikit compatibility
y : scipy.sparse
label space of shape :code:`(n_samples, n_labels)`
Returns
-------
arrray of arrays of label indexes (numpy.ndarray)
label space division, each sublist represents labels that are in that community
"""
self._build_graph_instance(y)
clusters = self.model.fit_predict(self.graph_, weights=self.weights_)
return np.array([community for community in clusters if len(community) > 0])
def _build_graph_instance(self, y):
edge_map = self.graph_builder.transform(y)
g = gt.Graph(directed=False)
g.add_vertex(y.shape[1])
self.weights_ = g.new_edge_property('double')
for edge, weight in edge_map.items():
e = g.add_edge(edge[0], edge[1])
self.weights_[e] = weight
self.graph_ = g
| 36.699219
| 115
| 0.673018
|
from __future__ import absolute_import
from __future__ import print_function
import graph_tool.all as gt
import numpy as np
from .base import LabelGraphClustererBase
from .helpers import _membership_to_list_of_communities, _overlapping_membership_to_list_of_communities
class StochasticBlockModel:
def __init__(self, nested, use_degree_correlation, allow_overlap, weight_model):
self.nested = nested
self.use_degree_correlation = use_degree_correlation
self.allow_overlap = allow_overlap
self.weight_model = weight_model
self.model_ = None
def fit_predict(self, graph, weights):
if self.weight_model:
self.model_ = self._model_fit_function()(
graph,
deg_corr=self.use_degree_correlation,
overlap=self.allow_overlap,
state_args=dict(recs=[weights],
rec_types=[self.weight_model])
)
else:
self.model_ = self._model_fit_function()(
graph,
deg_corr=self.use_degree_correlation,
overlap=self.allow_overlap
)
return self._detect_communities()
def _detect_communities(self):
if self.nested:
lowest_level = self.model_.get_levels()[0]
else:
lowest_level = self.model_
number_of_communities = lowest_level.get_B()
if self.allow_overlap:
membership_vector = list(lowest_level.get_overlap_blocks()[0])
else:
membership_vector = list(lowest_level.get_blocks())
if self.allow_overlap:
return _overlapping_membership_to_list_of_communities(membership_vector, number_of_communities)
return _membership_to_list_of_communities(membership_vector, number_of_communities)
def _model_fit_function(self):
if self.nested:
return gt.minimize_nested_blockmodel_dl
else:
return gt.minimize_blockmodel_dl
class GraphToolLabelGraphClusterer(LabelGraphClustererBase):
def __init__(self, graph_builder, model):
super(GraphToolLabelGraphClusterer, self).__init__(graph_builder)
self.model = model
self.graph_builder = graph_builder
def fit_predict(self, X, y):
self._build_graph_instance(y)
clusters = self.model.fit_predict(self.graph_, weights=self.weights_)
return np.array([community for community in clusters if len(community) > 0])
def _build_graph_instance(self, y):
edge_map = self.graph_builder.transform(y)
g = gt.Graph(directed=False)
g.add_vertex(y.shape[1])
self.weights_ = g.new_edge_property('double')
for edge, weight in edge_map.items():
e = g.add_edge(edge[0], edge[1])
self.weights_[e] = weight
self.graph_ = g
| true
| true
|
790d9370983b4903b988e873a086e753013d1ced
| 5,724
|
py
|
Python
|
src/djangoreactredux/djrenv/lib/python3.5/site-packages/faker/providers/address/sv_SE/__init__.py
|
m2jobe/c_x
|
ba914449a9a85d82703895fc884733ca20454034
|
[
"MIT"
] | 9
|
2018-03-29T18:41:22.000Z
|
2021-03-11T23:35:30.000Z
|
faker/providers/address/sv_SE/__init__.py
|
Saber-xxf/faker1
|
c966a144b370f7abb568a5154c4ef704e846722e
|
[
"MIT"
] | 5
|
2020-03-24T16:37:25.000Z
|
2021-06-10T21:24:54.000Z
|
faker/providers/address/sv_SE/__init__.py
|
Saber-xxf/faker1
|
c966a144b370f7abb568a5154c4ef704e846722e
|
[
"MIT"
] | 1
|
2018-04-05T22:07:48.000Z
|
2018-04-05T22:07:48.000Z
|
# coding=utf-8
from __future__ import unicode_literals
from .. import Provider as AddressProvider
class Provider(AddressProvider):
building_number_formats = ('###', '##', '#')
street_name_formats = ('{{street_prefix}}{{street_suffix}}', )
street_address_formats = ('{{street_name}} {{building_number}}',)
street_prefixes = (
'Björk', 'Järnvägs', 'Ring', 'Skol', 'Skogs', 'Ny', 'Gran', 'Idrotts',
'Stor', 'Kyrk', 'Industri', 'Park', 'Strand', 'Skol', 'Trädgårds',
'Industri', 'Ängs', 'Kyrko', 'Park', 'Villa', 'Ek', 'Kvarn', 'Stations',
'Back', 'Furu', 'Gen', 'Fabriks', 'Åker', 'Bäck', 'Asp'
)
street_suffixes = ('gatan', 'gatan', 'vägen', 'vägen',
'stigen', 'gränd', 'torget')
address_formats = ("{{street_address}}\n{{postcode}} {{city}}", )
postcode_formats = ('#####', )
city_formats = ('{{city_name}}', )
cities = (
'Stockholm', 'Göteborg', 'Malmö', 'Uppsala', 'Västerås', 'Örebro',
'Linköping', 'Helsingborg', 'Jönköping', 'Norrköping', 'Lund', 'Umeå',
'Gävle', 'Borås', 'Mölndal', 'Södertälje', 'Eskilstuna', 'Karlstad',
'Halmstad', 'Växjö', 'Sundsvall', 'Luleå', 'Trollhättan', 'Östersund',
'Borlänge', 'Falun', 'Kalmar', 'Skövde', 'Kristianstad', 'Karlskrona',
'Skellefteå', 'Uddevalla', 'Lidingö', 'Motala', 'Landskrona',
'Örnsköldsvik', 'Nyköping', 'Karlskoga', 'Varberg', 'Trelleborg',
'Lidköping', 'Alingsås', 'Piteå', 'Sandviken', 'Ängelholm'
)
countries = (
'Afghanistan', 'Albanien', 'Algeriet', 'Amerikanska Samoa', 'Andorra',
'Angola', 'Anguilla', 'Antarktis', 'Antigua och Barbuda', 'Argentina',
'Armenien', 'Aruba', 'Ascension', 'Australien', 'Azerbajdzjan',
'Bahamas', 'Bahrain', 'Bangladesh', 'Barbados', 'Belgien', 'Belize',
'Benin', 'Bermuda', 'Bhutan', 'Bolivia', 'Bosnien och Hercegovina',
'Botswana', 'Brasilien', 'Brittiska Jungfruöarna', 'Brunei',
'Bulgarien', 'Burkina Faso', 'Burma', 'Burundi', 'Caymanöarna',
'Centralafrikanska republiken', 'Chile', 'Colombia', 'Cooköarna',
'Costa Rica', 'Cypern', 'Danmark', 'Diego Garcia', 'Djibouti',
'Dominica', 'Dominikanska republiken', 'Ecuador', 'Egypten',
'Ekvatorialguinea', 'Elfenbenskusten', 'El Salvador', 'Eritrea',
'Estland', 'Etiopien', 'England', 'Falklandsöarna', 'Fiji',
'Filippinerna', 'Finland', 'Frankrike', 'Franska Guyana',
'Franska Polynesien', 'Färöarna', 'Förenade Arabemiraten', 'Gabon',
'Gambia', 'Georgien', 'Ghana', 'Gibraltar', 'Grekland', 'Grenada',
'Grönland', 'Guadeloupe', 'Guatemala', 'Guinea', 'Guinea-Bissau',
'Guyana', 'Haiti', 'Honduras', 'Hongkong', 'Indien', 'Indonesien',
'Irak', 'Iran', 'Irland', 'Island', 'Israel', 'Italien', 'Jamaica',
'Japan', 'Jemen', 'Jordanien', 'Kambodja', 'Kamerun', 'Kanada',
'Kap Verde', 'Kazakstan', 'Kenya', 'Kina', 'Kirgizistan', 'Kiribati',
'Komorerna', 'Kongo-Brazzaville', 'Kongo-Kinshasa', 'Kosovo',
'Kroatien', 'Kuba', 'Kuwait', 'Laos', 'Lesotho', 'Lettland', 'Libanon',
'Liberia', 'Libyen', 'Liechtenstein', 'Litauen', 'Luxemburg', 'Macao',
'Madagaskar', 'Makedonien', 'Malawi', 'Malaysia', 'Maldiverna', 'Mali',
'Malta', 'Marianerna', 'Marocko', 'Marshallöarna', 'Martinique',
'Mauretanien', 'Mauritius', 'Mayotte', 'Mexiko', 'Midwayöarna',
'Mikronesiens federerade stater', 'Moçambique', 'Moldavien', 'Monaco',
'Mongoliet', 'Montenegro', 'Montserrat', 'Namibia', 'Nauru',
'Nederländerna', 'Nederländska Antillerna', 'Nepal',
'Nicaragua', 'Niger', 'Nigeria', 'Niue', 'Nordkorea', 'Nordmarianerna',
'Norfolkön', 'Norge', 'Nya Kaledonien', 'Nya Zeeland', 'Oman',
'Pakistan', 'Palau', 'Palestina', 'Panama', 'Papua Nya Guinea',
'Paraguay', 'Peru', 'Pitcairnöarna', 'Polen', 'Portugal', 'Qatar',
'Réunion', 'Rumänien', 'Rwanda', 'Ryssland', 'Saint Kitts och Nevis',
'Saint Lucia', 'Saint-Pierre och Miquelon',
'Saint Vincent och Grenadinerna', 'Salomonöarna', 'Samoa',
'Sankta Helena', 'San Marino', 'São Tomé och Príncipe',
'Saudiarabien', 'Schweiz', 'Senegal', 'Serbien', 'Seychellerna',
'SierraLeone', 'Singapore', 'Sint Maarten', 'Slovakien', 'Slovenien',
'Somalia', 'Spanien', 'Sri Lanka', 'Storbritannien', 'Sudan',
'Surinam', 'Sverige', 'Swaziland', 'Sydafrika', 'Sydkorea', 'Sydsudan',
'Syrien', 'Tadzjikistan', 'Taiwan', 'Tanzania', 'Tchad', 'Thailand',
'Tjeckien', 'Togo', 'Tokelauöarna', 'Tonga', 'Trinidad och Tobago',
'Tunisien', 'Turkiet', 'Turkmenistan', 'Turks-och Caicosöarna',
'Tuvalu', 'Tyskland', 'Uganda', 'Ukraina', 'Ungern', 'Uruguay', 'USA',
'Uzbekistan', 'Vanuatu', 'Vatikanstaten', 'Venezuela', 'Vietnam',
'Vitryssland', 'Wake', 'Wallis-och Futunaöarna', 'Zambia', 'Zimbabwe',
'Österrike', 'Östtimor'
)
states = (
'Stockholms län', 'Uppsala län', 'Södermanlands län'
'Östergötlands län', 'Jönköpings län', 'Kronobergs län', 'Kalmar län',
'Gotlands län', 'Blekinge län', 'Skåne län', 'Hallands län',
'Västra Götalands län', 'Värmlands län', 'Örebro län',
'Västmanlands län', 'Dalarnas län', 'Gävleborgs län',
'Västernorrlands län', 'Jämtlands län', 'Västerbottens län',
'Norrbottens län'
)
def street_prefix(self):
return self.random_element(self.street_prefixes)
def city_name(self):
return self.random_element(self.cities)
def state(self):
return self.random_element(self.states)
| 51.567568
| 80
| 0.599581
|
from __future__ import unicode_literals
from .. import Provider as AddressProvider
class Provider(AddressProvider):
building_number_formats = ('###', '##', '#')
street_name_formats = ('{{street_prefix}}{{street_suffix}}', )
street_address_formats = ('{{street_name}} {{building_number}}',)
street_prefixes = (
'Björk', 'Järnvägs', 'Ring', 'Skol', 'Skogs', 'Ny', 'Gran', 'Idrotts',
'Stor', 'Kyrk', 'Industri', 'Park', 'Strand', 'Skol', 'Trädgårds',
'Industri', 'Ängs', 'Kyrko', 'Park', 'Villa', 'Ek', 'Kvarn', 'Stations',
'Back', 'Furu', 'Gen', 'Fabriks', 'Åker', 'Bäck', 'Asp'
)
street_suffixes = ('gatan', 'gatan', 'vägen', 'vägen',
'stigen', 'gränd', 'torget')
address_formats = ("{{street_address}}\n{{postcode}} {{city}}", )
postcode_formats = ('#####', )
city_formats = ('{{city_name}}', )
cities = (
'Stockholm', 'Göteborg', 'Malmö', 'Uppsala', 'Västerås', 'Örebro',
'Linköping', 'Helsingborg', 'Jönköping', 'Norrköping', 'Lund', 'Umeå',
'Gävle', 'Borås', 'Mölndal', 'Södertälje', 'Eskilstuna', 'Karlstad',
'Halmstad', 'Växjö', 'Sundsvall', 'Luleå', 'Trollhättan', 'Östersund',
'Borlänge', 'Falun', 'Kalmar', 'Skövde', 'Kristianstad', 'Karlskrona',
'Skellefteå', 'Uddevalla', 'Lidingö', 'Motala', 'Landskrona',
'Örnsköldsvik', 'Nyköping', 'Karlskoga', 'Varberg', 'Trelleborg',
'Lidköping', 'Alingsås', 'Piteå', 'Sandviken', 'Ängelholm'
)
countries = (
'Afghanistan', 'Albanien', 'Algeriet', 'Amerikanska Samoa', 'Andorra',
'Angola', 'Anguilla', 'Antarktis', 'Antigua och Barbuda', 'Argentina',
'Armenien', 'Aruba', 'Ascension', 'Australien', 'Azerbajdzjan',
'Bahamas', 'Bahrain', 'Bangladesh', 'Barbados', 'Belgien', 'Belize',
'Benin', 'Bermuda', 'Bhutan', 'Bolivia', 'Bosnien och Hercegovina',
'Botswana', 'Brasilien', 'Brittiska Jungfruöarna', 'Brunei',
'Bulgarien', 'Burkina Faso', 'Burma', 'Burundi', 'Caymanöarna',
'Centralafrikanska republiken', 'Chile', 'Colombia', 'Cooköarna',
'Costa Rica', 'Cypern', 'Danmark', 'Diego Garcia', 'Djibouti',
'Dominica', 'Dominikanska republiken', 'Ecuador', 'Egypten',
'Ekvatorialguinea', 'Elfenbenskusten', 'El Salvador', 'Eritrea',
'Estland', 'Etiopien', 'England', 'Falklandsöarna', 'Fiji',
'Filippinerna', 'Finland', 'Frankrike', 'Franska Guyana',
'Franska Polynesien', 'Färöarna', 'Förenade Arabemiraten', 'Gabon',
'Gambia', 'Georgien', 'Ghana', 'Gibraltar', 'Grekland', 'Grenada',
'Grönland', 'Guadeloupe', 'Guatemala', 'Guinea', 'Guinea-Bissau',
'Guyana', 'Haiti', 'Honduras', 'Hongkong', 'Indien', 'Indonesien',
'Irak', 'Iran', 'Irland', 'Island', 'Israel', 'Italien', 'Jamaica',
'Japan', 'Jemen', 'Jordanien', 'Kambodja', 'Kamerun', 'Kanada',
'Kap Verde', 'Kazakstan', 'Kenya', 'Kina', 'Kirgizistan', 'Kiribati',
'Komorerna', 'Kongo-Brazzaville', 'Kongo-Kinshasa', 'Kosovo',
'Kroatien', 'Kuba', 'Kuwait', 'Laos', 'Lesotho', 'Lettland', 'Libanon',
'Liberia', 'Libyen', 'Liechtenstein', 'Litauen', 'Luxemburg', 'Macao',
'Madagaskar', 'Makedonien', 'Malawi', 'Malaysia', 'Maldiverna', 'Mali',
'Malta', 'Marianerna', 'Marocko', 'Marshallöarna', 'Martinique',
'Mauretanien', 'Mauritius', 'Mayotte', 'Mexiko', 'Midwayöarna',
'Mikronesiens federerade stater', 'Moçambique', 'Moldavien', 'Monaco',
'Mongoliet', 'Montenegro', 'Montserrat', 'Namibia', 'Nauru',
'Nederländerna', 'Nederländska Antillerna', 'Nepal',
'Nicaragua', 'Niger', 'Nigeria', 'Niue', 'Nordkorea', 'Nordmarianerna',
'Norfolkön', 'Norge', 'Nya Kaledonien', 'Nya Zeeland', 'Oman',
'Pakistan', 'Palau', 'Palestina', 'Panama', 'Papua Nya Guinea',
'Paraguay', 'Peru', 'Pitcairnöarna', 'Polen', 'Portugal', 'Qatar',
'Réunion', 'Rumänien', 'Rwanda', 'Ryssland', 'Saint Kitts och Nevis',
'Saint Lucia', 'Saint-Pierre och Miquelon',
'Saint Vincent och Grenadinerna', 'Salomonöarna', 'Samoa',
'Sankta Helena', 'San Marino', 'São Tomé och Príncipe',
'Saudiarabien', 'Schweiz', 'Senegal', 'Serbien', 'Seychellerna',
'SierraLeone', 'Singapore', 'Sint Maarten', 'Slovakien', 'Slovenien',
'Somalia', 'Spanien', 'Sri Lanka', 'Storbritannien', 'Sudan',
'Surinam', 'Sverige', 'Swaziland', 'Sydafrika', 'Sydkorea', 'Sydsudan',
'Syrien', 'Tadzjikistan', 'Taiwan', 'Tanzania', 'Tchad', 'Thailand',
'Tjeckien', 'Togo', 'Tokelauöarna', 'Tonga', 'Trinidad och Tobago',
'Tunisien', 'Turkiet', 'Turkmenistan', 'Turks-och Caicosöarna',
'Tuvalu', 'Tyskland', 'Uganda', 'Ukraina', 'Ungern', 'Uruguay', 'USA',
'Uzbekistan', 'Vanuatu', 'Vatikanstaten', 'Venezuela', 'Vietnam',
'Vitryssland', 'Wake', 'Wallis-och Futunaöarna', 'Zambia', 'Zimbabwe',
'Österrike', 'Östtimor'
)
states = (
'Stockholms län', 'Uppsala län', 'Södermanlands län'
'Östergötlands län', 'Jönköpings län', 'Kronobergs län', 'Kalmar län',
'Gotlands län', 'Blekinge län', 'Skåne län', 'Hallands län',
'Västra Götalands län', 'Värmlands län', 'Örebro län',
'Västmanlands län', 'Dalarnas län', 'Gävleborgs län',
'Västernorrlands län', 'Jämtlands län', 'Västerbottens län',
'Norrbottens län'
)
def street_prefix(self):
return self.random_element(self.street_prefixes)
def city_name(self):
return self.random_element(self.cities)
def state(self):
return self.random_element(self.states)
| true
| true
|
790d944fc3cfeccd57e1e4f48ccc80a1c326b3c3
| 11,935
|
py
|
Python
|
kuka_driver/src/kuka_driver/kuka_rsi_router.py
|
adamleon/kuka
|
cac2880ff9bf1fb798029280a9baf51450195fc4
|
[
"BSD-3-Clause"
] | null | null | null |
kuka_driver/src/kuka_driver/kuka_rsi_router.py
|
adamleon/kuka
|
cac2880ff9bf1fb798029280a9baf51450195fc4
|
[
"BSD-3-Clause"
] | null | null | null |
kuka_driver/src/kuka_driver/kuka_rsi_router.py
|
adamleon/kuka
|
cac2880ff9bf1fb798029280a9baf51450195fc4
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# Copyright (c) 2014, Norwegian University of Science and Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Author: Lars Tingelstad
# Maintainer: Lars Tingelstad <lars.tingelstad@ntnu.no>
import socket
import threading
import time
import numpy as np
import struct
import xml.etree.ElementTree as et
class UDPServerRealTime(threading.Thread):
def __init__(self,name, host, port, handshake=None):
threading.Thread.__init__(self)
self.daemon = True
self.name = name
self._host = host
self._port = port
self._handshake = handshake
self._timeout = None
self._timeout_count = 0
self._is_timed_out = False
self._max_timeout_count = None
self._lock = threading.Lock()
self._recv_data = None
self._send_data = None
self._remote_addr = None
self.is_connected = False
self._stop_flag = threading.Event()
self._disconnect_client_flag = threading.Event()
self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._socket.settimeout(self._timeout)
self._socket.bind((self._host, self._port))
def set_max_timeout_count(self, timeout_count):
self._max_timeout_count = timeout_count
def timeout(self):
return self._timeout
def set_timeout(self, timeout):
self._timeout = timeout
self._socket.settimeout(self._timeout)
def receive(self):
try:
#self._lock.acquire()
data, addr = self._socket.recvfrom(1024)
self._recv_data = data
#self._lock.release()
## Set connection if handshake mechanism is not used
if self._handshake is None and not self.is_connected:
self._remote_addr = addr
print("{name}: Got connection from: {addr}".format(name=self.name, addr=self._remote_addr))
self.is_connected = True
self._timeout_count = 0
return data
except socket.timeout, e:
if self._max_timeout_count is not None:
self._timeout_count += 1
print("{name}: Late package!".format(name=self.name))
if self._timeout_count > self._max_timeout_count:
print("{name}: Maximum timeouts. Disconnecting client: {addr}".format(name=self.name, addr=self._remote_addr))
self._disconnect_client_flag.set()
return None
def send(self, data):
#self._lock.acquire()
self._send_data = data
self._socket.sendto(self._send_data, self._remote_addr)
#self._lock.release()
def connect(self):
''' Create connection from external client '''
if self._handshake is not None:
if not self.is_connected:
self._socket.settimeout(None)
data, remote_addr = self._socket.recvfrom(1024)
if data == self._handshake:
self._remote_addr = remote_addr
print("{name}: Got connection from: {addr}".format(name=self.name, addr=self._remote_addr))
self.is_connected = True
else:
print("{name}: Could not accept connection from: {addr}".format(name=self.name, addr=remote_addr))
self._disconnect_client_flag.set()
else:
print("{name}: Can not create connection without handshake!".format(name=self.name))
if self._timeout is not None:
self._socket.settimeout(self._timeout)
def stop(self):
print("{name}: Stopping!".format(name=self.name))
self._stop_flag.set()
def disconnect(self):
#print("{name}: Disconnecting!".format(name=self.name))
self._disconnect_client_flag.set()
def run(self):
while not self._stop_flag.is_set():
print("{name}: Waiting for connection!".format(name=self.name))
if self._handshake is not None:
self.connect()
self._disconnect_client_flag.wait()
print("{name}: Disconnecting client".format(name=self.name))
self.is_connected = False
self._remote_addr = None
self._disconnect_client_flag.clear()
self.join()
class KUKARSIRouter(object):
def __init__(self):
self._lock = threading.Lock()
self._joint_correction = np.zeros(6).astype(np.float32)
self._joint_setpoint_position_init = None
#self._rsi_server = UDPServerRealTime('rsi server','localhost', 49152)
self._rsi_server = UDPServerRealTime('rsi server','192.168.1.67', 49152)
self._rsi_server.set_max_timeout_count(3)
self._ext_control_server = UDPServerRealTime('ext control server', 'localhost', 10000, "RSI")
self._ext_control_server.set_timeout(0.004)
self._ext_control_server.set_max_timeout_count(3)
def _parse_xml_from_robot(self, data):
root = et.fromstring(data)
# Cartesian actual position
RIst = root.find('RIst').attrib
cart_actual_pos = np.array([RIst['X'], RIst['Y'], RIst['Z'],
RIst['A'], RIst['B'], RIst['C']], dtype=np.float64)
# Cartesian setpoint position
RSol = root.find('RSol').attrib
cart_setpoint_pos = np.array([RSol['X'], RSol['Y'], RSol['Z'],
RSol['A'], RSol['B'], RSol['C']], dtype=np.float64)
# Axis actual
AIPos = root.find('AIPos').attrib
axis_actual_pos = np.array([AIPos['A1'], AIPos['A2'],AIPos['A3'],
AIPos['A4'], AIPos['A5'],AIPos['A6']], dtype=np.float64)
# Axis setpoint pos
ASPos = root.find('ASPos').attrib
axis_setpoint_pos = np.array([ASPos['A1'], ASPos['A2'],ASPos['A3'],
ASPos['A4'], ASPos['A5'],ASPos['A6']], dtype=np.float64)
# Number of late packages
Delay = root.find('Delay').attrib
n_late_packages = int(Delay['D'])
# IPOC number
IPOC = int(root.find('IPOC').text)
return axis_actual_pos, axis_setpoint_pos, n_late_packages, IPOC
def _create_xml_to_robot(self, desired_axis_corr, ipoc_cycle_num):
dac = desired_axis_corr
sen = et.Element('Sen', {'Type':'ImFree'})
akorr = et.SubElement(sen, 'AK', {'A1':str(dac[0]),
'A2':str(dac[1]),
'A3':str(dac[2]),
'A4':str(dac[3]),
'A5':str(dac[4]),
'A6':str(dac[5])})
ipoc = et.SubElement(sen, 'IPOC').text = str(ipoc_cycle_num)
return et.tostring(sen)
def _create_joint_pos_packet(self, ipoc, axis_actual_pos):
return struct.pack('Q6d', ipoc, *axis_actual_pos)
def _parse_joint_pos_packet(self, packet):
data = struct.unpack('Q6d', packet)
ipoc = data[0]
q_desired = np.array(data[1:], dtype=np.float64)
return ipoc, q_desired
def run(self):
self._ext_control_server.start()
self._rsi_server.start()
#while not self._stop_flag.is_set():
while True:
## Receive rsi packet from robot. This is a blocking call if no rsi
## is connected. The timeout is set to 4ms when the robot connects,
## and is reset to None when the robot disconnects.
data = self._rsi_server.receive()
if self._rsi_server.is_connected:
## Set timeout of receive for RSI client when robot connects
if self._rsi_server.timeout() is None:
self._rsi_server.set_timeout(0.004)
## Only parse rsi packet if content is not None
if data is not None:
## Parse rsi packet xml document
q_actual, q_setpoint, late_packages, ipoc = self._parse_xml_from_robot(data)
if self._joint_setpoint_position_init is None:
self._joint_setpoint_position_init = q_setpoint
if self._ext_control_server.is_connected:
ipoc_out = ipoc
## Create joint position packet to send to external control client
packet = self._create_joint_pos_packet(ipoc_out, q_actual)
## Send send joint position packet to external control client
self._ext_control_server.send(packet)
## Receive desired joint position packet
data = self._ext_control_server.receive()
if data is not None:
## parse data from client
ipoc_in, q_desired = self._parse_joint_pos_packet(data)
print(q_desired)
## check if the received ipoc timestamp is equal to
## the received ipoc timestamp from the external
## control client
if ipoc_in == ipoc_out:
## The joint correction is equal to the desired joint
# position minus the current joint setpoint.
with self._lock:
#self._joint_correction = q_desired - self._joint_setpoint_position_init
self._joint_correction = q_desired - q_setpoint
with self._lock:
data = self._create_xml_to_robot(self._joint_correction, ipoc)
print(data)
self._rsi_server.send(data)
else:
print("RSI Router: No connection with robot. Disconnecting all external connections!")
self._joint_setpoint_position_init = None
self._joint_correction = np.zeros(6).astype(np.float32)
self._ext_control_server.disconnect()
self._rsi_server.set_timeout(None)
self._ext_control_server.stop()
self._rsi_server.stop;
if __name__ == '__main__':
router = KUKARSIRouter()
router.run()
| 43.242754
| 130
| 0.600335
|
import socket
import threading
import time
import numpy as np
import struct
import xml.etree.ElementTree as et
class UDPServerRealTime(threading.Thread):
def __init__(self,name, host, port, handshake=None):
threading.Thread.__init__(self)
self.daemon = True
self.name = name
self._host = host
self._port = port
self._handshake = handshake
self._timeout = None
self._timeout_count = 0
self._is_timed_out = False
self._max_timeout_count = None
self._lock = threading.Lock()
self._recv_data = None
self._send_data = None
self._remote_addr = None
self.is_connected = False
self._stop_flag = threading.Event()
self._disconnect_client_flag = threading.Event()
self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._socket.settimeout(self._timeout)
self._socket.bind((self._host, self._port))
def set_max_timeout_count(self, timeout_count):
self._max_timeout_count = timeout_count
def timeout(self):
return self._timeout
def set_timeout(self, timeout):
self._timeout = timeout
self._socket.settimeout(self._timeout)
def receive(self):
try:
data, addr = self._socket.recvfrom(1024)
self._recv_data = data
f.is_connected:
self._remote_addr = addr
print("{name}: Got connection from: {addr}".format(name=self.name, addr=self._remote_addr))
self.is_connected = True
self._timeout_count = 0
return data
except socket.timeout, e:
if self._max_timeout_count is not None:
self._timeout_count += 1
print("{name}: Late package!".format(name=self.name))
if self._timeout_count > self._max_timeout_count:
print("{name}: Maximum timeouts. Disconnecting client: {addr}".format(name=self.name, addr=self._remote_addr))
self._disconnect_client_flag.set()
return None
def send(self, data):
self._send_data = data
self._socket.sendto(self._send_data, self._remote_addr)
def connect(self):
''' Create connection from external client '''
if self._handshake is not None:
if not self.is_connected:
self._socket.settimeout(None)
data, remote_addr = self._socket.recvfrom(1024)
if data == self._handshake:
self._remote_addr = remote_addr
print("{name}: Got connection from: {addr}".format(name=self.name, addr=self._remote_addr))
self.is_connected = True
else:
print("{name}: Could not accept connection from: {addr}".format(name=self.name, addr=remote_addr))
self._disconnect_client_flag.set()
else:
print("{name}: Can not create connection without handshake!".format(name=self.name))
if self._timeout is not None:
self._socket.settimeout(self._timeout)
def stop(self):
print("{name}: Stopping!".format(name=self.name))
self._stop_flag.set()
def disconnect(self):
self._disconnect_client_flag.set()
def run(self):
while not self._stop_flag.is_set():
print("{name}: Waiting for connection!".format(name=self.name))
if self._handshake is not None:
self.connect()
self._disconnect_client_flag.wait()
print("{name}: Disconnecting client".format(name=self.name))
self.is_connected = False
self._remote_addr = None
self._disconnect_client_flag.clear()
self.join()
class KUKARSIRouter(object):
def __init__(self):
self._lock = threading.Lock()
self._joint_correction = np.zeros(6).astype(np.float32)
self._joint_setpoint_position_init = None
self._rsi_server = UDPServerRealTime('rsi server','192.168.1.67', 49152)
self._rsi_server.set_max_timeout_count(3)
self._ext_control_server = UDPServerRealTime('ext control server', 'localhost', 10000, "RSI")
self._ext_control_server.set_timeout(0.004)
self._ext_control_server.set_max_timeout_count(3)
def _parse_xml_from_robot(self, data):
root = et.fromstring(data)
RIst = root.find('RIst').attrib
cart_actual_pos = np.array([RIst['X'], RIst['Y'], RIst['Z'],
RIst['A'], RIst['B'], RIst['C']], dtype=np.float64)
RSol = root.find('RSol').attrib
cart_setpoint_pos = np.array([RSol['X'], RSol['Y'], RSol['Z'],
RSol['A'], RSol['B'], RSol['C']], dtype=np.float64)
AIPos = root.find('AIPos').attrib
axis_actual_pos = np.array([AIPos['A1'], AIPos['A2'],AIPos['A3'],
AIPos['A4'], AIPos['A5'],AIPos['A6']], dtype=np.float64)
ASPos = root.find('ASPos').attrib
axis_setpoint_pos = np.array([ASPos['A1'], ASPos['A2'],ASPos['A3'],
ASPos['A4'], ASPos['A5'],ASPos['A6']], dtype=np.float64)
Delay = root.find('Delay').attrib
n_late_packages = int(Delay['D'])
IPOC = int(root.find('IPOC').text)
return axis_actual_pos, axis_setpoint_pos, n_late_packages, IPOC
def _create_xml_to_robot(self, desired_axis_corr, ipoc_cycle_num):
dac = desired_axis_corr
sen = et.Element('Sen', {'Type':'ImFree'})
akorr = et.SubElement(sen, 'AK', {'A1':str(dac[0]),
'A2':str(dac[1]),
'A3':str(dac[2]),
'A4':str(dac[3]),
'A5':str(dac[4]),
'A6':str(dac[5])})
ipoc = et.SubElement(sen, 'IPOC').text = str(ipoc_cycle_num)
return et.tostring(sen)
def _create_joint_pos_packet(self, ipoc, axis_actual_pos):
return struct.pack('Q6d', ipoc, *axis_actual_pos)
def _parse_joint_pos_packet(self, packet):
data = struct.unpack('Q6d', packet)
ipoc = data[0]
q_desired = np.array(data[1:], dtype=np.float64)
return ipoc, q_desired
def run(self):
self._ext_control_server.start()
self._rsi_server.start()
while True:
.004)
q_setpoint, late_packages, ipoc = self._parse_xml_from_robot(data)
if self._joint_setpoint_position_init is None:
self._joint_setpoint_position_init = q_setpoint
if self._ext_control_server.is_connected:
ipoc_out = ipoc
poc_out, q_actual)
t)
xt_control_server.receive()
if data is not None:
ipoc_in, q_desired = self._parse_joint_pos_packet(data)
print(q_desired)
with self._lock:
self._joint_correction = q_desired - q_setpoint
with self._lock:
data = self._create_xml_to_robot(self._joint_correction, ipoc)
print(data)
self._rsi_server.send(data)
else:
print("RSI Router: No connection with robot. Disconnecting all external connections!")
self._joint_setpoint_position_init = None
self._joint_correction = np.zeros(6).astype(np.float32)
self._ext_control_server.disconnect()
self._rsi_server.set_timeout(None)
self._ext_control_server.stop()
self._rsi_server.stop;
if __name__ == '__main__':
router = KUKARSIRouter()
router.run()
| false
| true
|
790d946125e1389709ed26de240d8d12a5c217e1
| 1,087
|
py
|
Python
|
ML-in-Action/MachineLearning-dev/src/py3.x/ML/15.BigData_MapReduce/mrMeanMapper.py
|
cherisyu/ML_in_Action
|
8c1019de911e7fb1bbab973067213f5f62ab9dcd
|
[
"Apache-2.0"
] | 1
|
2019-01-23T01:47:31.000Z
|
2019-01-23T01:47:31.000Z
|
ML-in-Action/MachineLearning-dev/src/py3.x/ML/15.BigData_MapReduce/mrMeanMapper.py
|
cherisyu/ML_in_Action
|
8c1019de911e7fb1bbab973067213f5f62ab9dcd
|
[
"Apache-2.0"
] | null | null | null |
ML-in-Action/MachineLearning-dev/src/py3.x/ML/15.BigData_MapReduce/mrMeanMapper.py
|
cherisyu/ML_in_Action
|
8c1019de911e7fb1bbab973067213f5f62ab9dcd
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# coding:utf-8
'''
Created on 2017-04-06
Update on 2017-11-17
Author: Peter/ApacheCN-xy/片刻
GitHub: https://github.com/apachecn/MachineLearning
'''
import sys
from numpy import mat, mean, power
'''
这个mapper文件按行读取所有的输入并创建一组对应的浮点数,然后得到数组的长度并创建NumPy矩阵。
再对所有的值进行平方,最后将均值和平方后的均值发送出去。这些值将用来计算全局的均值和方差。
Args:
file 输入数据
Return:
'''
def read_input(file):
for line in file:
yield line.rstrip() # 返回一个 yield 迭代器,每次获取下一个值,节约内存。
input = read_input(sys.stdin) # 创建一个输入的数据行的列表list
input = [float(line) for line in input] # 将得到的数据转化为 float 类型
numInputs = len(input) # 获取数据的个数,即输入文件的数据的行数
input = mat(input) # 将 List 转换为矩阵
sqInput = power(input, 2) # 将矩阵的数据分别求 平方,即 2次方
# 输出 数据的个数,n个数据的均值,n个数据平方之后的均值
# 第一行是标准输出,也就是reducer的输出
# 第二行识标准错误输出,即对主节点作出的响应报告,表明本节点工作正常。
# 【这不就是面试的装逼重点吗?如何设计监听架构细节】注意:一个好的习惯是想标准错误输出发送报告。如果某任务10分钟内没有报告输出,则将被Hadoop中止。
print("%d\t%f\t%f" % (numInputs, mean(input), mean(sqInput))) # 计算均值
print("map report: still alive", file=sys.stderr)
| 26.512195
| 78
| 0.677093
|
import sys
from numpy import mat, mean, power
def read_input(file):
for line in file:
yield line.rstrip()
input = read_input(sys.stdin)
input = [float(line) for line in input]
numInputs = len(input)
input = mat(input)
sqInput = power(input, 2)
print("%d\t%f\t%f" % (numInputs, mean(input), mean(sqInput)))
print("map report: still alive", file=sys.stderr)
| true
| true
|
790d94786a3272ddf89bf0cd9092ab9ab9a52f2a
| 2,337
|
py
|
Python
|
dependencies/generate maps/pythongis/app/tk2/_othermisc/dropdown_works.py
|
karimbahgat/AutoMap
|
eae52f16b7ce71cb2b4b7ae67cf6e4680ea2194f
|
[
"MIT"
] | 4
|
2015-12-05T14:31:55.000Z
|
2018-02-09T05:54:36.000Z
|
dependencies/generate maps/pythongis/app/tk2/_othermisc/dropdown_works.py
|
karimbahgat/AutoMap
|
eae52f16b7ce71cb2b4b7ae67cf6e4680ea2194f
|
[
"MIT"
] | 1
|
2022-01-13T02:52:09.000Z
|
2022-01-13T02:52:09.000Z
|
dependencies/generate maps/pythongis/app/tk2/_othermisc/dropdown_works.py
|
karimbahgat/AutoMap
|
eae52f16b7ce71cb2b4b7ae67cf6e4680ea2194f
|
[
"MIT"
] | 1
|
2018-10-24T01:08:11.000Z
|
2018-10-24T01:08:11.000Z
|
import Tkinter as tk
class Combobox(tk.Label):
def __init__(self, master, choices=[], default=None, direction="down", arrowimage="default", **kwargs):
style = {"relief": "groove", "bg":"white"}
style.update(kwargs)
tk.Label.__init__(self, master, **style)
# options
if direction not in ("down","up"):
raise Exception("Direction must be either down or up")
self.direction = direction
self.choices = choices
# entry
self.entry = tk.Entry(self, bg=style["bg"], borderwidth=0)
self.entry.pack(side="left", fill="y")
if default != None:
self.entry.insert(0, default)
# dropdown arrow
if arrowimage == "default":
arrowimage = tk.PhotoImage(file="dropdown.gif")
else: pass # image should be passed as a Photoimage
self.arrow = tk.Label(self, bg=style["bg"], image=arrowimage)
self.arrow.img = arrowimage
self.arrow.pack(side="right")
self.arrow.bind("<Button-1>", self.dropdown)
def dropdown(self, event=None):
self.arrow["relief"] = "sunken"
self.entry.focus_force()
self.entry.select_range(0, tk.END)
menu = tk.Menu(self.entry, tearoff=0, bg="white")
def changeentry(choice):
self.entry.delete(0, tk.END)
self.entry.insert(0, choice)
self.rollup()
if self.direction == "down": choices = self.choices
elif self.direction == "up": choices = list(reversed(self.choices))
for choice in choices:
menu.add_command(label=repr(choice).ljust(30), command=lambda x=choice: changeentry(x))
x = self.entry.winfo_rootx()
if self.direction == "down":
y = self.entry.winfo_rooty() + self.entry.winfo_height()
elif self.direction == "up":
y = self.entry.winfo_rooty() - menu.yposition(0) #menu.winfo_height()
menu.post(x, y)
def rollup(self, event=None):
self.arrow["relief"] = "flat"
if __name__ == "__main__":
win = tk.Tk()
OPTIONS = range(20)
cbox = Combobox(win, choices=OPTIONS, default=12, direction="down")
cbox.pack(side="left")
cbox2 = Combobox(win, choices=OPTIONS, default=24, direction="up")
cbox2.pack(side="left")
win.mainloop()
| 35.953846
| 107
| 0.595208
|
import Tkinter as tk
class Combobox(tk.Label):
def __init__(self, master, choices=[], default=None, direction="down", arrowimage="default", **kwargs):
style = {"relief": "groove", "bg":"white"}
style.update(kwargs)
tk.Label.__init__(self, master, **style)
if direction not in ("down","up"):
raise Exception("Direction must be either down or up")
self.direction = direction
self.choices = choices
self.entry = tk.Entry(self, bg=style["bg"], borderwidth=0)
self.entry.pack(side="left", fill="y")
if default != None:
self.entry.insert(0, default)
if arrowimage == "default":
arrowimage = tk.PhotoImage(file="dropdown.gif")
else: pass
self.arrow = tk.Label(self, bg=style["bg"], image=arrowimage)
self.arrow.img = arrowimage
self.arrow.pack(side="right")
self.arrow.bind("<Button-1>", self.dropdown)
def dropdown(self, event=None):
self.arrow["relief"] = "sunken"
self.entry.focus_force()
self.entry.select_range(0, tk.END)
menu = tk.Menu(self.entry, tearoff=0, bg="white")
def changeentry(choice):
self.entry.delete(0, tk.END)
self.entry.insert(0, choice)
self.rollup()
if self.direction == "down": choices = self.choices
elif self.direction == "up": choices = list(reversed(self.choices))
for choice in choices:
menu.add_command(label=repr(choice).ljust(30), command=lambda x=choice: changeentry(x))
x = self.entry.winfo_rootx()
if self.direction == "down":
y = self.entry.winfo_rooty() + self.entry.winfo_height()
elif self.direction == "up":
y = self.entry.winfo_rooty() - menu.yposition(0)
menu.post(x, y)
def rollup(self, event=None):
self.arrow["relief"] = "flat"
if __name__ == "__main__":
win = tk.Tk()
OPTIONS = range(20)
cbox = Combobox(win, choices=OPTIONS, default=12, direction="down")
cbox.pack(side="left")
cbox2 = Combobox(win, choices=OPTIONS, default=24, direction="up")
cbox2.pack(side="left")
win.mainloop()
| true
| true
|
790d953aa8005e2273ab5dd9fc378123efc23152
| 474
|
py
|
Python
|
sentence_transformers/losses/__init__.py
|
WHU-Peter/sentence-transformers
|
a9acd9e8eb086221c1351ad6489ed29a076ca8f5
|
[
"Apache-2.0"
] | null | null | null |
sentence_transformers/losses/__init__.py
|
WHU-Peter/sentence-transformers
|
a9acd9e8eb086221c1351ad6489ed29a076ca8f5
|
[
"Apache-2.0"
] | null | null | null |
sentence_transformers/losses/__init__.py
|
WHU-Peter/sentence-transformers
|
a9acd9e8eb086221c1351ad6489ed29a076ca8f5
|
[
"Apache-2.0"
] | null | null | null |
from .CosineSimilarityLoss import *
from .SoftmaxLoss import *
from .MultipleNegativesRankingLoss import *
from .TripletLoss import *
from .MSELoss import *
from .ContrastiveLoss import *
from .OnlineContrastiveLoss import *
from .MegaBatchMarginLoss import *
from .DenoisingAutoEncoderLoss import *
# Triplet losses
from .BatchHardTripletLoss import *
from .BatchHardSoftMarginTripletLoss import *
from .BatchSemiHardTripletLoss import *
from .BatchAllTripletLoss import *
| 31.6
| 45
| 0.827004
|
from .CosineSimilarityLoss import *
from .SoftmaxLoss import *
from .MultipleNegativesRankingLoss import *
from .TripletLoss import *
from .MSELoss import *
from .ContrastiveLoss import *
from .OnlineContrastiveLoss import *
from .MegaBatchMarginLoss import *
from .DenoisingAutoEncoderLoss import *
from .BatchHardTripletLoss import *
from .BatchHardSoftMarginTripletLoss import *
from .BatchSemiHardTripletLoss import *
from .BatchAllTripletLoss import *
| true
| true
|
790d973791541e76f2855f405878a763b947f1a2
| 8,733
|
py
|
Python
|
packages/structural_dhcp_mriqc/structural_dhcp_mriqc/qc/functional.py
|
amakropoulos/structural-pipeline-measures
|
70e22f9ad94cc57e72e510576cfc3129da83f7fc
|
[
"Apache-2.0"
] | 2
|
2017-09-11T15:25:14.000Z
|
2019-09-27T17:08:31.000Z
|
packages/structural_dhcp_mriqc/structural_dhcp_mriqc/qc/functional.py
|
amakropoulos/structural-pipeline-measures
|
70e22f9ad94cc57e72e510576cfc3129da83f7fc
|
[
"Apache-2.0"
] | 6
|
2019-08-22T06:29:45.000Z
|
2021-09-19T18:59:46.000Z
|
packages/structural_dhcp_mriqc/structural_dhcp_mriqc/qc/functional.py
|
amakropoulos/structural-pipeline-measures
|
70e22f9ad94cc57e72e510576cfc3129da83f7fc
|
[
"Apache-2.0"
] | 1
|
2018-02-12T14:38:33.000Z
|
2018-02-12T14:38:33.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
# pylint: disable=no-member
#
# @Author: oesteban
# @Date: 2016-02-23 19:25:39
# @Email: code@oscaresteban.es
# @Last Modified by: oesteban
# @Last Modified time: 2016-02-29 11:43:16
"""
Computation of the quality assessment measures on functional MRI
"""
import os.path as op
import numpy as np
import nibabel as nb
from nitime import algorithms as nta
import scipy
def gsr(epi_data, mask, direction="y", ref_file=None, out_file=None):
"""
Computes the :abbr:`GSR (ghost to signal ratio)` [Giannelli2010]_. The
procedure is as follows:
#. Create a Nyquist ghost mask by circle-shifting the original mask by :math:`N/2`.
#. Rotate by :math:`N/2`
#. Remove the intersection with the original mask
#. Generate a non-ghost background
#. Calculate the :abbr:`GSR (ghost to signal ratio)`
.. warning ::
This should be used with EPI images for which the phase
encoding direction is known.
:param str epi_file: path to epi file
:param str mask_file: path to brain mask
:param str direction: the direction of phase encoding (x, y, all)
:return: the computed gsr
"""
direction = direction.lower()
if direction[-1] not in ['x', 'y', 'all']:
raise Exception("Unknown direction %s, should be one of x, -x, y, -y, all"
% direction)
if direction == 'all':
result = []
for newdir in ['x', 'y']:
ofile = None
if out_file is not None:
fname, ext = op.splitext(ofile)
if ext == '.gz':
fname, ext2 = op.splitext(fname)
ext = ext2 + ext
ofile = '%s_%s%s' % (fname, newdir, ext)
result += [gsr(epi_data, mask, newdir,
ref_file=ref_file, out_file=ofile)]
return result
# Step 1
n2_mask = np.zeros_like(mask)
# Step 2
if direction == "x":
n2lim = np.floor(mask.shape[0]/2)
n2_mask[:n2lim, :, :] = mask[n2lim:(n2lim*2), :, :]
n2_mask[n2lim:(n2lim*2), :, :] = mask[:n2lim, :, :]
elif direction == "y":
n2lim = np.floor(mask.shape[1]/2)
n2_mask[:, :n2lim, :] = mask[:, n2lim:(n2lim*2), :]
n2_mask[:, n2lim:(n2lim*2), :] = mask[:, :n2lim, :]
elif direction == "z":
n2lim = np.floor(mask.shape[2]/2)
n2_mask[:, :, :n2lim] = mask[:, :, n2lim:(n2lim*2)]
n2_mask[:, :, n2lim:(n2lim*2)] = mask[:, :, :n2lim]
# Step 3
n2_mask = n2_mask * (1-mask)
# Step 4: non-ghost background region is labeled as 2
n2_mask = n2_mask + 2 * (1 - n2_mask - mask)
# Save mask
if ref_file is not None and out_file is not None:
ref = nb.load(ref_file)
out = nb.Nifti1Image(n2_mask, ref.get_affine(), ref.get_header())
out.to_filename(out_file)
# Step 5: signal is the entire foreground image
ghost = epi_data[n2_mask == 1].mean() - epi_data[n2_mask == 2].mean()
signal = epi_data[n2_mask == 0].mean()
return float(ghost/signal)
def dvars(func, mask, output_all=False, out_file=None):
"""
Compute the mean :abbr:`DVARS (D referring to temporal
derivative of timecourses, VARS referring to RMS variance over voxels)`
[Power2012]_.
Particularly, the *standardized* :abbr:`DVARS (D referring to temporal
derivative of timecourses, VARS referring to RMS variance over voxels)`
[Nichols2013]_ are computed.
.. note:: Implementation details
Uses the implementation of the `Yule-Walker equations
from nitime
<http://nipy.org/nitime/api/generated/nitime.algorithms.autoregressive.html\
#nitime.algorithms.autoregressive.AR_est_YW>`_
for the :abbr:`AR (auto-regressive)` filtering of the fMRI signal.
:param numpy.ndarray func: functional data, after head-motion-correction.
:param numpy.ndarray mask: a 3D mask of the brain
:param bool output_all: write out all dvars
:param str out_file: a path to which the standardized dvars should be saved.
:return: the standardized DVARS
"""
if len(func.shape) != 4:
raise RuntimeError(
"Input fMRI dataset should be 4-dimensional" % func)
# Remove zero-variance voxels across time axis
zv_mask = zero_variance(func, mask)
idx = np.where(zv_mask > 0)
mfunc = func[idx[0], idx[1], idx[2], :]
# Robust standard deviation
func_sd = (np.percentile(mfunc, 75) -
np.percentile(mfunc, 25)) / 1.349
# Demean
mfunc -= mfunc.mean(axis=1)[..., np.newaxis]
# AR1
ak_coeffs = np.apply_along_axis(nta.AR_est_YW, 1, mfunc, 1)
# Predicted standard deviation of temporal derivative
func_sd_pd = np.squeeze(np.sqrt((2 * (1 - ak_coeffs[:, 0])).tolist()) * func_sd)
diff_sd_mean = func_sd_pd[func_sd_pd > 0].mean()
# Compute temporal difference time series
func_diff = np.diff(mfunc, axis=1)
# DVARS (no standardization)
dvars_nstd = func_diff.std(axis=0)
# standardization
dvars_stdz = dvars_nstd / diff_sd_mean
# voxelwise standardization
diff_vx_stdz = func_diff / np.array([func_sd_pd] * func_diff.shape[-1]).T
dvars_vx_stdz = diff_vx_stdz.std(1, ddof=1)
if output_all:
gendvars = np.vstack((dvars_stdz, dvars_nstd, dvars_vx_stdz))
else:
gendvars = dvars_stdz.reshape(len(dvars_stdz), 1)
if out_file is not None:
np.savetxt(out_file, gendvars, fmt='%.12f')
return gendvars
def fd_jenkinson(in_file, rmax=80., out_file=None):
"""
Compute the :abbr:`FD (framewise displacement)` [Jenkinson2002]_
on a 4D dataset, after ``3dvolreg`` has been executed
(generally a file named ``*.affmat12.1D``).
:param str in_file: path to epi file
:param float rmax: the default radius (as in FSL) of a sphere represents
the brain in which the angular displacements are projected.
:param str out_file: a path for the output file with the FD
:return: the output file with the FD, and the average FD along
the time series
:rtype: tuple(str, float)
.. note ::
:code:`infile` should have one 3dvolreg affine matrix in one row -
NOT the motion parameters
"""
import sys
import math
if out_file is None:
fname, ext = op.splitext(op.basename(in_file))
out_file = op.abspath('%s_fdfile%s' % (fname, ext))
# if in_file (coordinate_transformation) is actually the rel_mean output
# of the MCFLIRT command, forward that file
if 'rel.rms' in in_file:
return in_file
pm_ = np.genfromtxt(in_file)
original_shape = pm_.shape
pm = np.zeros((pm_.shape[0], pm_.shape[1] + 4))
pm[:, :original_shape[1]] = pm_
pm[:, original_shape[1]:] = [0.0, 0.0, 0.0, 1.0]
# rigid body transformation matrix
T_rb_prev = np.matrix(np.eye(4))
flag = 0
X = [0] # First timepoint
for i in range(0, pm.shape[0]):
# making use of the fact that the order of aff12 matrix is "row-by-row"
T_rb = np.matrix(pm[i].reshape(4, 4))
if flag == 0:
flag = 1
else:
M = np.dot(T_rb, T_rb_prev.I) - np.eye(4)
A = M[0:3, 0:3]
b = M[0:3, 3]
FD_J = math.sqrt(
(rmax * rmax / 5) * np.trace(np.dot(A.T, A)) + np.dot(b.T, b))
X.append(FD_J)
T_rb_prev = T_rb
np.savetxt(out_file, X)
return out_file
def gcor(func, mask):
"""
Compute the :abbr:`GCOR (global correlation)`.
:param numpy.ndarray func: input fMRI dataset, after motion correction
:param numpy.ndarray mask: 3D brain mask
:return: the computed GCOR value
"""
# Remove zero-variance voxels across time axis
tv_mask = zero_variance(func, mask)
idx = np.where(tv_mask > 0)
zscores = scipy.stats.mstats.zscore(func[idx[0], idx[1], idx[2], :], axis=1)
avg_ts = zscores.mean(axis=0)
return float(avg_ts.transpose().dot(avg_ts) / len(avg_ts))
def zero_variance(func, mask):
"""
Mask out voxels with zero variance across t-axis
:param numpy.ndarray func: input fMRI dataset, after motion correction
:param numpy.ndarray mask: 3D brain mask
:return: the 3D mask of voxels with nonzero variance across :math:`t`.
:rtype: numpy.ndarray
"""
idx = np.where(mask > 0)
func = func[idx[0], idx[1], idx[2], :]
tvariance = func.var(axis=1)
tv_mask = np.zeros_like(tvariance)
tv_mask[tvariance > 0] = 1
newmask = np.zeros_like(mask)
newmask[idx] = tv_mask
return newmask
| 31.078292
| 89
| 0.620749
|
import os.path as op
import numpy as np
import nibabel as nb
from nitime import algorithms as nta
import scipy
def gsr(epi_data, mask, direction="y", ref_file=None, out_file=None):
direction = direction.lower()
if direction[-1] not in ['x', 'y', 'all']:
raise Exception("Unknown direction %s, should be one of x, -x, y, -y, all"
% direction)
if direction == 'all':
result = []
for newdir in ['x', 'y']:
ofile = None
if out_file is not None:
fname, ext = op.splitext(ofile)
if ext == '.gz':
fname, ext2 = op.splitext(fname)
ext = ext2 + ext
ofile = '%s_%s%s' % (fname, newdir, ext)
result += [gsr(epi_data, mask, newdir,
ref_file=ref_file, out_file=ofile)]
return result
n2_mask = np.zeros_like(mask)
if direction == "x":
n2lim = np.floor(mask.shape[0]/2)
n2_mask[:n2lim, :, :] = mask[n2lim:(n2lim*2), :, :]
n2_mask[n2lim:(n2lim*2), :, :] = mask[:n2lim, :, :]
elif direction == "y":
n2lim = np.floor(mask.shape[1]/2)
n2_mask[:, :n2lim, :] = mask[:, n2lim:(n2lim*2), :]
n2_mask[:, n2lim:(n2lim*2), :] = mask[:, :n2lim, :]
elif direction == "z":
n2lim = np.floor(mask.shape[2]/2)
n2_mask[:, :, :n2lim] = mask[:, :, n2lim:(n2lim*2)]
n2_mask[:, :, n2lim:(n2lim*2)] = mask[:, :, :n2lim]
n2_mask = n2_mask * (1-mask)
n2_mask = n2_mask + 2 * (1 - n2_mask - mask)
if ref_file is not None and out_file is not None:
ref = nb.load(ref_file)
out = nb.Nifti1Image(n2_mask, ref.get_affine(), ref.get_header())
out.to_filename(out_file)
ghost = epi_data[n2_mask == 1].mean() - epi_data[n2_mask == 2].mean()
signal = epi_data[n2_mask == 0].mean()
return float(ghost/signal)
def dvars(func, mask, output_all=False, out_file=None):
if len(func.shape) != 4:
raise RuntimeError(
"Input fMRI dataset should be 4-dimensional" % func)
zv_mask = zero_variance(func, mask)
idx = np.where(zv_mask > 0)
mfunc = func[idx[0], idx[1], idx[2], :]
func_sd = (np.percentile(mfunc, 75) -
np.percentile(mfunc, 25)) / 1.349
mfunc -= mfunc.mean(axis=1)[..., np.newaxis]
ak_coeffs = np.apply_along_axis(nta.AR_est_YW, 1, mfunc, 1)
func_sd_pd = np.squeeze(np.sqrt((2 * (1 - ak_coeffs[:, 0])).tolist()) * func_sd)
diff_sd_mean = func_sd_pd[func_sd_pd > 0].mean()
func_diff = np.diff(mfunc, axis=1)
dvars_nstd = func_diff.std(axis=0)
dvars_stdz = dvars_nstd / diff_sd_mean
diff_vx_stdz = func_diff / np.array([func_sd_pd] * func_diff.shape[-1]).T
dvars_vx_stdz = diff_vx_stdz.std(1, ddof=1)
if output_all:
gendvars = np.vstack((dvars_stdz, dvars_nstd, dvars_vx_stdz))
else:
gendvars = dvars_stdz.reshape(len(dvars_stdz), 1)
if out_file is not None:
np.savetxt(out_file, gendvars, fmt='%.12f')
return gendvars
def fd_jenkinson(in_file, rmax=80., out_file=None):
import sys
import math
if out_file is None:
fname, ext = op.splitext(op.basename(in_file))
out_file = op.abspath('%s_fdfile%s' % (fname, ext))
if 'rel.rms' in in_file:
return in_file
pm_ = np.genfromtxt(in_file)
original_shape = pm_.shape
pm = np.zeros((pm_.shape[0], pm_.shape[1] + 4))
pm[:, :original_shape[1]] = pm_
pm[:, original_shape[1]:] = [0.0, 0.0, 0.0, 1.0]
T_rb_prev = np.matrix(np.eye(4))
flag = 0
X = [0]
for i in range(0, pm.shape[0]):
T_rb = np.matrix(pm[i].reshape(4, 4))
if flag == 0:
flag = 1
else:
M = np.dot(T_rb, T_rb_prev.I) - np.eye(4)
A = M[0:3, 0:3]
b = M[0:3, 3]
FD_J = math.sqrt(
(rmax * rmax / 5) * np.trace(np.dot(A.T, A)) + np.dot(b.T, b))
X.append(FD_J)
T_rb_prev = T_rb
np.savetxt(out_file, X)
return out_file
def gcor(func, mask):
tv_mask = zero_variance(func, mask)
idx = np.where(tv_mask > 0)
zscores = scipy.stats.mstats.zscore(func[idx[0], idx[1], idx[2], :], axis=1)
avg_ts = zscores.mean(axis=0)
return float(avg_ts.transpose().dot(avg_ts) / len(avg_ts))
def zero_variance(func, mask):
idx = np.where(mask > 0)
func = func[idx[0], idx[1], idx[2], :]
tvariance = func.var(axis=1)
tv_mask = np.zeros_like(tvariance)
tv_mask[tvariance > 0] = 1
newmask = np.zeros_like(mask)
newmask[idx] = tv_mask
return newmask
| true
| true
|
790d9738efeb876e3e52c3f4c9907f9c3bb7fc43
| 18,305
|
py
|
Python
|
app/src/main/python/KinoCode.py
|
susumOyaji/chaquopy-matplotlib-master
|
dda4a8da1391f968023bdd9d4b9c05e63b499390
|
[
"MIT"
] | null | null | null |
app/src/main/python/KinoCode.py
|
susumOyaji/chaquopy-matplotlib-master
|
dda4a8da1391f968023bdd9d4b9c05e63b499390
|
[
"MIT"
] | null | null | null |
app/src/main/python/KinoCode.py
|
susumOyaji/chaquopy-matplotlib-master
|
dda4a8da1391f968023bdd9d4b9c05e63b499390
|
[
"MIT"
] | null | null | null |
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay
from sklearn.metrics import accuracy_score
from sklearn.model_selection import TimeSeriesSplit
from keras.layers import Dropout
from keras.layers import Dense, LSTM
from keras.models import Sequential
import numpy as np
from sklearn.preprocessing import StandardScaler
from matplotlib import pyplot as plt
from datetime import timedelta
from datetime import datetime
import pandas as pd
import datetime
from dateutil.relativedelta import relativedelta
from pandas_datareader import data as pdr
from sklearn.metrics import r2_score, mean_squared_error
# pandasのインポート
# データの読み込み
#df = pd.read_csv('finance_dataset.csv')
# データフレームの表示
#df
code = '6976' # '6976'#6758
#2021年から今日までの1年間のデータを取得しましょう。期日を決めて行きます。
# (2021, 1, 1) # 教師データ(今までのデータ)
#start_train = datetime.date(2022, 1, 1) # 教師データ(今までのデータ)
start_train=datetime.date.today() + relativedelta(days=-700)
#dowstart_train = datetime.date(2022, 1, 5)#start_train + relativedelta(days=+3)
# 昨日分(today-1日)まで取得できる(当日分は変動しているため)
end_train = datetime.date.today() + relativedelta(days=-1)
data = pdr.get_data_yahoo(f'{code}.T', start_train, end_train) # 教師データを読み込む。
Dow_df = pdr.get_data_yahoo('^DJI', start_train, end_train) # 試験データのcsvファイルを読み込む。
Nikkei_df = pdr.get_data_yahoo('^N225', start_train, end_train) # 試験データのcsvファイルを読み込む。
#データの前処理
#欠損データがあるので、欠損値NaNを除外する
#df_NikkeiAll_drop = df_NikkeiAll.dropna()
#df_NikkeiAll_drop.head() # 先頭の5行を表形式で表示
print(data.head())
'''
png
インデックスが0から13966までの連番で、カラムに
日付('Date')、最高値('High')、最安値('Low')、始値('Open')、終値('Close')が設定されたデータフレームである事が確認できます。
日付('Date)は1965年1月5日から2021年10月21日までとなっています。
後に詳しく説明を行いますが、予測モデル作成に対して、目的変数の追加や、週ごとにデータを纏める必要があります。
そのために、曜日情報や初めの週を基準として何週目となるか等の情報と、今回の目的変数である木曜日の終値から翌日金曜日の始値が上がるかどうかの’Up’(上がる場合は'1', 同じ又は下がる場合は'0')を追加していきます。
次に、infoメソッドを用いて、欠損値の有無やカラムのデータ型の確認を行います。
'''
# 各カラムの詳細確認
data.info()
'''
png
各カラム欠損値なしである事がわかります。
日付('Date')が’object'型となっています。今回の様な時系列データを用いる際には、'datetime64'型を用いる方が利便性が高い為、pandasの'to_datetime'メソッドを用いてデータ型の変換を行います。
'''
# 日付インデックスをりセット
data.reset_index(drop=False,inplace=True)
Dow_df.reset_index(drop=False,inplace=True)
Nikkei_df.reset_index(drop=False, inplace=True)
# Dateのデータ型をを'datetime'型へ変更
data['Date'] = pd.to_datetime(data['Date'])
Dow_df['Date'] = pd.to_datetime(Dow_df['Date'])
Nikkei_df['Date'] = pd.to_datetime(Nikkei_df['Date'])
data.info()
'''
png
'Date'のカラムが'object'型から'datetime64'型へ代わっていることが確認できます。
次に曜日情報のカラムを追加します。'datetime64'型は'dt.weekday'メソッドを用いて、曜日情報を取得する事ができます。月曜日を0として連番の数字を設定されます。実行結果をdfに'weekday'カラムを追加して入力し、実行結果を確認します。
'''
# 曜日情報を追加(月曜:0, 火曜:1, 水曜:2, 木曜:3, 金曜:4、土曜:5、日曜:6)
data['weekday'] = data['Date'].dt.weekday
#data['Dowweekday'] = Dow_df['Date'].dt.weekday
#data['DowDate'] = Dow_df['Date']
#data['Nikkeiweekday'] = Nikkei_df['Date'].dt.weekday
print(data)
'''
png
'weekday'のカラムが追加され0から4の数字が入力されている事がわかります。
また、株取引の行われない土曜日: 5と日曜日: 6のデータは存在していない事もわかります。
次に、1965年1月5日の週を基準に何周目となるのかの情報を追加します。
1965年1月5日が火曜日である事がわかるので、その週の頭の月曜日となる1965年1月4日を基準として、何日目となるのかの情報を追加します。
datetimeのライブラリからdatetimeとtimedeltaをインポートします。
基準となる日の1965年1月4日をdatetime関数を使って、変数startに代入します。
dfの'Date'カラムから基準のstartと引き算をすることで、何日目となるのかを計算します。これをtimedelta関数を用いて1週間となる7日周期で割ることで何週目かを計算する事ができます。
timedelta(weeks=1)と設定することで1週間となります。
この計算結果を'weeks'というカラムをdfに追加します。実行することで初めの週は0から始まり最後の2021年10月18日の週は2963となっている事が分かります。
'''
# 初めの月曜日となる1965/1/4を基準に日数を追加
start = start_train+relativedelta(days=-2) # datetime(1965, 1, 4)
start = pd.to_datetime(start)
#data['weeks'] = (data['Date'] - start) // timedelta(weeks=1)
#data['Dowweeks'] = (Dow_df['Date'] - start) // timedelta(weeks=1)
#data['Nikkiweeks'] = (Nikkei_df['Date'] - start) // timedelta(weeks=1)
#print(data)
#data.to_csv('data/stocks_price_data/KinoCode_data.csv') # csv書き出し
'''
png
日付の情報の'Date', 'weekday', 'weeks'のカラムが分かれて表示されているので、見栄えを整理する目的で、一旦カラムの並び替えを行います。
先頭に日付の情報をまとめます。
並び替えたい順序でカラムを記述しdfを置き換えます。
実行する事で、並び替えられている事がわかります。
'''
# Closeの列のデータのみを取り出し
data['NikkiClose'] = Nikkei_df['Close'].values
# カラムの並べ替え
df = data[['Date', 'weekday','High', 'Low', 'Open', 'Close', 'NikkiClose']]
#df_dow = Dow_df[['Date', 'weeks', 'weekday', 'High', 'Low', 'Open', 'Close']]
#df_nikkei = Nikkei_df[['Date', 'weeks', 'weekday', 'High', 'Low', 'Open', 'Close']]
print(df)
df.to_csv('data/stocks_price_data/KinoCode_data.csv') # csv書き出し
'''
png
今回のような時系列データを処理する際には、set_indexメソッドを使ってindexを日付に設定します。念のためにsort_valuesメソッドを使って日付順に並び替えを行います。実行する事で、日付の'Date'がindexに設定されている事がわかります。
'''
# データの並び替え
df.sort_values(by='Date', ascending=True, inplace=True)
# 日付をインデックスにセット
df.set_index(keys='Date', inplace=True)
print(df)
'''
png
次に今回予測したい翌日の終値が本日の終値よりも上がるのかどうかの情報を追加します。shiftメソッドを用いてカラムの情報をずらすdfを作成する事ができるので、それを用いて計算を行います。
shift(-1)とする事で、カラムの情報を1行上にずらしたデータフレームを作成する事ができます。
dfを1行分上にずらしたものをdf_shiftとして作成します。実行する事でカラムの情報が1行分上にシフトしている事がわかります。一番下のカラムは欠損値となります。
'''
#カラム情報を1行上にずらしたデータフレームを作成する
df_shift = df.shift(-1)
df_shift
#png
#このdf_shiftを用いて、翌日の終値と本日の終値を引き算し、その結果を'delta_Close'というカラムを追加しdfに入力します。
#翌日の始値と本日の終値の差分を追加する
df['delta_Close'] = df_shift['Close'] - df['Close']
df
'''
png
この'delta_Close'が上がる場合1、それ以外を0として目的変数となる'Up'のカラムを追加します。同時に'delta_Close'カラムの削除も行います。
'''
#目的変数Upを追加する(翌日の終値が上がる場合1、それ以外は0とする)、'delta_Close'カラムの削除
df['Up'] = 0
df['Up'][df['delta_Close'] > 0] = 1
df = df.drop('delta_Close', axis=1)
df
'''
png
ここまでで、下準備となる週番号、曜日、目的変数の追加が終わりました。
データの全体像をつかむ
時系列データをグラフで表示する事で、株価変動の大まかなイメージを掴みます。
'Open', 'High', 'Low', 'Close'を抜き出しdf_newを作成後に、pyplotを用いてグラフ化行います。
matplotlibのライブラリからpyplotをpltという名前でインポートします。
df_newにplotメソッドを用いて、引数'kind=line'とする事で折れ線グラフが作成されます。pyplotのshowメソッドでグラフを表示します。
初めの1965年から1990年頃までは、上昇傾向となっています。その後は下がる傾向となり、2010頃より再度上昇傾向である事がわかります。
'''
# 'Open', 'High', 'Low', 'Close'グラフ化のためにカラム抽出
df_new = df[['Open', 'High', 'Low', 'Close']]
# matplotlibのインポート
# 時系列折れ線グラフの作成
df_new.plot(kind='line')
plt.show()
'''
png
特徴量を追加する
予測を正しく行えるようにする為の情報量(特徴量)を追加します。現在dfに入っている始値、終値、最高値、最安値の情報だけを用いて予測する事も可能ですが、株価の変動に影響すると言われている一般的な情報を追加していきます。
終値の前日比率と、始値と終値の差分カラムに追加します。
まず終値の前日比率ですが、本日の終値が前日から何%変動したのかを表す値となります。
(今日の終値 - 前日の終値) ÷ 前日の終値
で計算します。
shiftメソッドを用いて、今度は1行したにずらしたデータフレームを作成し、終値の前日比率'Close_ratio'を計算しdfにカラムを追加します。
'''
# 終値の前日比の追加
df_shift = df.shift(1)
df['Close_ratio'] = (df['Close'] - df_shift['Close']) / df_shift['Close']
df
#png
#次に、始値と終値の差分'Body'をdfに追加します。
# 始値と終値の差分を追加
df['Body'] = df['Open'] - df['Close']
df
'''
png
特徴量の追加は以上になります。次に、不要なデータの削除を行います。今回、月曜日から木曜日までの情報を用いて、金曜日の始値が上がるか下がるのかを予測するモデルを作成するために、各週で月曜日から金曜日までのデータが揃っている週だけ使用します。祝日や年末年始など株取引が行われていない日はデータがない為、5日分のデータが揃っていない週が存在しています。
各週毎に何日分のデータが存在しているのかを調べて、5日分揃っている週のデータを持ってきます。
手順としては、週番号'weeks'のリストを作成します。その後リストから取り出した同じ週番号のデータ数をカウントして行き結果をdfに格納し、5日揃っている週だけ残す処理をします。
週番号は0から2963まで連番で有ると考えられ、0から順番に処理すれば良いと考えられますが、万が一抜けている週が存在して居ても処理が行えるように、あえて週番号を抜き出したリスト(list_weeks)を作成します。
'''
'''
# 週番号をリストに格納
list_weeks = []
list_weeks = df['weeks'].unique()
list_weeks
#png
#リストに従い、for文を用いて、週毎の日数をカウントしたカラム'week_days'にカウント数を入力します。
# 各週ごとの日数を入力
df['week_days'] = 0
for i in list_weeks:
df['week_days'][df['weeks'] == i] = len(df[df['weeks'] == i])
df
#png
#5日データの存在する週(week_daysが5)の週のデータを抜き出して、dfに入力します。
# 月曜〜金曜まで5日分データのある週だけデータを取り出す
df = df[df['week_days'] == 5]
df
#png
#予測に使用しない金曜日のデータ(weekdayが4)を削除します。
#金曜日のデータを削除する(weekday:4となるデータ)
df = df[df['weekday'] != 4]
df
'''
#png
#不要カラムの削除と並び替えを行います。
# 不要カラムの削除と並べ替え
df = df[['weekday', 'High', 'Low', 'Open', 'Close', 'Close_ratio', 'Body', 'Up']]
df
'''
png
ここまでで、データの準備は完了です。
学習データと検証データに分割する
さて、ここからは直近の2018年以降のデータを使用します。
2018年から2020年を学習データ、2021年以降を検証データとして分割します。
datetime64型をindexに設定している時系列のデータフレームは、期間を設定してデータを抜き出す事ができます。
2018年1月1日から2020年12月31日までのデータを抜き出し、df_trainに入力します。
'''
# 学習データを2018-01-01〜2020-12-31の期間としdf_trainに入力する
df_train = df['2018-01-01': '2020-12-31']
df_train
#png
#同様に、2021年1月1日以降のデータを抜き出し、df_valに入力します。
# 検証データを2021-01-01以降としてとしてdf_valに入力する
df_val = df['2021-01-01':]
df_val
'''
png
学習データと検証データをそれぞれ、説明変数と目的変数に分けます。
説明変数のカラムは'weekday', 'High', 'Low', 'Open', 'Close', 'Close_ratio', 'Body'を
目的変数のカラムは'Up'になります。
学習データの説明変数をX_train、学習データの目的変数をy_trainとしてカラムを指定して、それぞれを入力します。また、表示することでX_train, y_trainそれぞれに指定した期間内のデータが入力されていることが分かります。
'''
# 学習データを説明変数(X_train)と目的変数(y_train)に分ける
X_train = df_train[['weekday', 'High', 'Low',
'Open', 'Close', 'Close_ratio', 'Body']]
y_train = df_train['Up']
# 学習データの説明変数と目的変数を確認
print(X_train)
print(y_train)
#png
#png
#同様に検証データの説明変数をX_val、目的変数をy_valとしてデータを入力し、確認します。
# 検証データを説明変数(X_val)と目的変数(y_val)に分ける
X_val = df_val[['weekday', 'High', 'Low',
'Open', 'Close', 'Close_ratio', 'Body']]
y_val = df_val['Up']
# 検証データの説明変数と目的変数を確認
print(X_val)
print(y_val)
#png
#png
#学習データと検証データの時系列グラフを作成し2021年前後でデータが分かれていることを目で確認します。2021年以前が学習データで青いグラフ、2021年以降が検証データでオレンジのグラフで示されている事が分かります。
# 学習データと検証データの終値(Close)の折れ線グラフ作成
X_train['Close'].plot(kind='line')
X_val['Close'].plot(kind='line')
# グラフの凡例を設定
plt.legend(['X_train', 'X_val'])
# グラフの表示
plt.show()
'''
png
データを整える
予測モデルに学習をさせるために、データを整えます。
説明変数は各週毎の月曜日から木曜日の4日間をセットとして一つにまとめます。また、目的変数は翌日の金曜日の始値が上がるか下がるかを示す木曜日のデータを抜き出します。機械学習を行うためには説明変数と目的変数の数を揃える必要があります。
png
説明変数を抜き出す期間により、株価の金額や変動量が違っています。
例えば、2020年4月頃は株価が16000円程度であったのに対し、12月頃には25000円を超えていたり、同じ週でも株価の変動が大きい事もあります。
このように抜き出している期間内において、データの大きさや変動幅が大きく異なっている場合、機械学習では予測が正しく行えない事があります。このような場合に標準化という処理を行うことが有ります。
この処理を行うことで、平均が0で±3以内の範囲に収める事が出来るために、機械は計算の処理がし易くなり、また予測精度が向上する事もあります。
png
この4日毎にデータを抜き出して、標準化を行うための処理を、sklearnのpreprocessingというライブラリのStandardScalerという関数を使って、for文の繰り返し処理を用いて次のような関数を定義します。
また今回、機械学習に使用する予測モデルはLSTMというニューラルネットのモデルを使用します。このモデルではnumpy配列という形式のデータを用います。
'''
# 標準化関数(StandardScaler)のインポート
# numpyのインポート
# 4日ごとにデータを抜き出して、標準化ととnumpy配列に変換する関数(std_to_np)の定義
def std_to_np(df):
df_list = []
df = np.array(df)
for i in range(0, len(df) - 3, 4):
df_s = df[i:i+4]
scl = StandardScaler()
df_std = scl.fit_transform(df_s)
df_list.append(df_std)
return np.array(df_list)
#標準化を行うStandardScalaerをsklearn.preprocessingから、numpyをnpとしてインポートします。
# 次に4日毎にデータを抜き出し、標準化を行い、numpy配列で出力する関数(std_to_np)を定義します。
#df_list = [] でまず空のリストを定義します。ここには標準化をおこなった後の、4日毎にまとまったデータを格納して行きます。
#df = np.array(df) で入力されたデータフレームをまずnumpy配列に変換します。
#この配列に対して、for文を用いて4日ずつのデータ抜き出して、df_sに入力(df_s=df[i:i+4])した後に、StandardScalerをインスタンス化し(scl= StandardScaler()) 標準化をおこなった結果をdf_stdに入力(df_std=scl.fit_transform(df_s))し、それをはじめに定義したdf_listにappendメソッドを用いて格納(df_list.append(df_std))して行きます。最後の4日分のデータまでこの繰り返し処理を行います。
#繰り返し処理が終了すると、df_listをnumpy配列で出力(return np.array(df_list))します。
#この関数をX_trainとX_valに適用してデータの型を確認します。
# 学習データと検証データの説明変数に関数(std_to_np)を実行
X_train_np_array = std_to_np(X_train)
X_val_np_array = std_to_np(X_val)
# 学習データと検証データの形の確認
print(X_train_np_array.shape)
print(X_val_np_array.shape)
'''
png
出力結果から、480日分あったX_trainが4分の1の120個のデータとなり、132日分あったX_valが4分の1の33個のデータになっている事がわかります。
それぞれの数に続く'4'は月曜から木曜の4日分のデータ数を表しており、'7'は説明変数('weekday', 'High', 'Low', 'Open', 'Close', 'Close_ratio', 'Body')のカラム数を表しています。
続いて、目的変数の木曜日のデータだけ抜き出します。抜き出す前に一旦、学習データと検証データのデータを確認します。
'''
# 学習データと検証データの目的変数を確認
print(y_train)
print(y_val)
#png
#学習データは480個、検証データは132個有ることがわかります。
#これらのデータに対して、各週の4日目(木曜日)のデータを抜き出して確認します。
# 学習データ、検証データの目的変数の間引き
# 週の4日目(木曜日)のデータだけ抜き出す
y_train_new = y_train[3::4]
y_val_new = y_val[3::4]
# 間引き後の学習データと検証データの目的変数を確認
print(y_train_new)
print(y_val_new)
#学習データと検証データそれぞれ各週の4日目のデータのみになっており、個数は120個と33個となっており、4日毎にまとめた説明変数のデータ数と同じになっています。
#png
#png
#これで、機械学習を行うためのデータは整いました。
'''
予測モデルの作成
ニューラルネットの1種のLSTMを用いて予測モデルの作成と、検証データを用いた予測精度の検証をします。
LSTMを使用するためにkerasのライブラリを使えるようにする必要があります。まずこのためにtensorflowをインストールします。個人の環境で、インストール済みの方は不要ですが、google colabolatoryを使用の方は毎回行う必要があります。インストールは次のコマンドで数秒で完了します。
'''
#!pip install tensorflow
#続いて、kerasから必要な関数をインポートします。
# keras.modelsからSequentialのインポート
# keras.layersからDense、LSTMのインポート
# Dropoutのインポート
#ニューラルネットの構築や、パラメータのチューニング方法の説明は省略させて頂きますが、
# 基本的な入力層、中間層と出力層からなるモデルをこのように構築することができます。
# また、このモデルをlstm_compという関数で定義しましょう。
# LSTM構築とコンパイル関数
def lstm_comp(df):
# 入力層/中間層/出力層のネットワークを構築
model = Sequential()
model.add(LSTM(256, activation='relu', batch_input_shape=(
None, df.shape[1], df.shape[2])))
model.add(Dropout(0.2))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(1, activation='sigmoid'))
# ネットワークのコンパイル
model.compile(loss='binary_crossentropy',
optimizer='adam', metrics=['accuracy'])
return model
'''
次に、作成したモデルが本当に予測に使用できるのかを確認する方法として、交差検証をしましょう。正解の分かっている学習データを複数に分割して、交差検証を行うのが有効です。
交差検証の手法には複数存在しますが、今回の様な時系列のデータで過去のデータを用いて未来を予測する場合は、時系列分割の交差検証を用いるのが一般的です。
今回は学習データを5分割し、学習データと検証データが図の様なイメージの組み合わせで合計4回の学習、予測と精度検証を繰り返します。これらのスコアの平均値から、モデルが予測に使用できるかの判断を行います。
この手法では検証データよりも過去のデータのみを用いて学習を行ないます。
png
まず、時系列分割交差検証を行うためのTimeSeriesSplitと、予測結果の精度(accuracy)を算出するためにaccuracy_scoreをインポートします。
# 時系列分割のためTimeSeriesSplitのインポート
# accuracy算出のためaccuracy_scoreのインポート
つぎに、4回分の交差検証の結果を代入する空のリストを作成します。そして、TimeSeriesSplitのインスタンス化を行い変数(tscv)に代入します。
'''
valid_scores = []
tscv = TimeSeriesSplit(n_splits=4)
'''
for文を用いて、交差検証を4回繰り返します。
具体的にはこのような検証を実施します。
splitメソッドを用いて学習データを分割し、交差検証用の学習データと検証データを作成
先に定義したlstm_comp関数よりLSTMモデルを作成
交差検証用の学習データより学習
検証データの説明変数を用いて予測
予測結果の2値化
検証データの目的変数(正解データ)を用いて、予測結果の精度算出と表示
予測精度のスコアをリストに格納
'''
for fold, (train_indices, valid_indices) in enumerate(tscv.split(X_train_np_array)):
X_train, X_valid = X_train_np_array[train_indices], X_train_np_array[valid_indices]
y_train, y_valid = y_train_new[train_indices], y_train_new[valid_indices]
# LSTM構築とコンパイル関数にX_trainを渡し、変数modelに代入
model = lstm_comp(X_train)
'''# モデル学習'''
hist = model.fit(X_train, y_train, epochs=10, batch_size=64)
# loss(訓練データに対する判定結果)、val_loss(テストデータに対する判定結果)をプロットする
#loss = hist.history['loss']
#val_loss = hist.history['val_loss']
#epochs = len(loss)
''''''
# 予測
y_valid_pred = model.predict(X_valid)
# 予測結果の2値化
y_valid_pred = np.where(y_valid_pred < 0.5, 0, 1)
# 予測精度の算出と表示
score = accuracy_score(y_valid, y_valid_pred)
print(f'fold {fold} MAE: {score}')
# 予測精度スコアをリストに格納
valid_scores.append(score)
#4回の交差検証が終了したら、予測精度のスコアが格納されたリストの表示し、スコアの平均値の算出と表示もしてみましょう。
#4回のそれぞれのスコアと、平均値はこのようになりました。
print(f'valid_scores: {valid_scores}')
cv_score = np.mean(valid_scores)
print(f'CV score: {cv_score}')
'''
png
1回目:0.541
2回目:0.708
3回目:0.541
4回目:0.333
平均:0.531
今回のような上がるか下がるかの2値予測の場合、一般的にはスコアが0.5以上であればある程度使用できるという目安となります。
算出したスコアと平均値から、このモデルがある程度使用できるものと判断して次に進みましょう。
では、このモデルに対して、2018年から2020年の学習データを用いて学習をします。
流れは先ほどの交差検証と似ています。
まずは標準化した学習データでLSTMモデルを作成します。
'''
# LSTM構築とコンパイル関数にX_train_np_arrayを渡し、変数modelに代入
model = lstm_comp(X_train_np_array)
#作成したモデルで、学習します。
#一瞬で学習が終了しました。
# モデルの学習の実行
result = model.fit(X_train_np_array, y_train_new, epochs=10, batch_size=64)
#今度は学習したモデルを用いて、検証データについて予測を行い、先頭の10個を表示させてみましょう。
# 作成したモデルより検証データを用いて予測を行う
pred = model.predict(X_val_np_array)
pred[:10]
'''
このように予測した結果が表示されます。
png
この数値を、上がるか下がるかの0と1に変換します。numpyのwhereメソッドを用いて0.5を超えるものを1、それ以外を0と修正します。そして再度先頭の10個を表示します。
これで、上がるか下がるかの01どちらかの予測ができました。
'''
# 予測結果を0もしくは1に修正(0.5を境にして、1に近いほど株価が上昇、0に近いほど株価が上昇しない)
pred = np.where(pred < 0.5, 0, 1)
# 修正した予測結果の先頭10件を確認
pred[:10]
'''
png
次に、予測モデルの精度確認を行います。この予測結果を実際の値となる検証データの目的変数と比較し、正解率を計算します。sklearnのaccuracy_scoreという関数を使うことで計算が行えます。
この結果を表示すると57%の正解率で有ることがわかります。今回の様な株価が上がるか下がるかの2値の予測では、直感的に予測を行う場合50%の正解率となります。機械学習を用いる事でそれを超える正解率となりました。
'''
# 実際の結果から予測値の正解率を計算する
print('accuracy = ', accuracy_score(y_true=y_val_new, y_pred=pred))
'''
# モデルの精度を評価する
# 決定係数とRMSEを計算する
# 決定係数は1.0に、RMSEは0.0に近いほど、モデルの精度は高い
r2_score = r2_score(y_test, predictions)
rmse = np.sqrt(mean_squared_error(y_test, predictions))
print(f'r2_score: {r2_score:.4f}')
print(f'rmse: {rmse:.4f}')
'''
'''
png
最後に、予測結果と正解結果を混同行列を用いて確認します。
混同行列とは、このように2行2列の表で、真陽性、真陰性、偽陽性、偽陰性の数を表したものです。今回は、予測が0で結果も0、予測が1で結果も1であれば正解です。0と予測して結果が1、1と予測して結果が0なら不正解ということになります。全体の精度だけではなく、0と1それぞれの正解に対する精度を確認することができます。
jpg
混同行列を生成するために、sklern.mericsからconfusion_matrixとConfusionMatrixDisplayをインポートします。
また、視覚的にわかりやすい様に、ヒートマップで表示しましょう。
このように、正しく予測が行えているのは、右下の真陽性(TP)と左上の真陰性(TN)です。予測結果が、0か1のどちらかに極端に偏っている傾向ではなさそうですが、正しく予測できていないものも存在していることがわかります。予測精度を改善することで、偽陽性(FP)と偽陰性(FN)の数を減らすことができます。
'''
# 混同行列生成のためconfusion_matrixをインポート
# 混同行列を表示
cm = confusion_matrix(y_val_new, pred)
cmp = ConfusionMatrixDisplay(cm)
cmp.plot(cmap=plt.cm.Reds)
# グラフの表示
plt.show()
'''
今回は基本的な特徴量や、機械学習モデルの構築方法で予測を行いました。特徴量を追加することや、学習モデルの改良を行うことで、予測精度を向上させることが可能です。
とはいえ、データの期間が変わるだけでも精度も変わります。必ずいつも予測がうまくいくわけではありませんのでご注意ください。
'''
'''
Graphics parameter
'''
# Closeの列のデータのみを取り出し
TergetData = data['Close'].values
# datetimeの列のデータのみを取り出し
data = data.reset_index(drop=False)
TergetDate = data['Date'].values
#リシェイプ
TergetData = TergetData.reshape(-1, 1) # float64
TergetDate = TergetDate.reshape(-1, 1) # datetime64[ns]
# 読み込んだ日経平均をプロット
k = 700 # 表示する数
i = TergetData.shape[0]-k
j = TergetData.shape[0]
xdata = TergetDate[i:j]
ydata = TergetData[i:j]
#描画するデータの読み込み
fig = plt.figure(figsize=(15, 10), dpi=100)
ax = fig.add_subplot(2, 1, 1)
# 図全体のタイトル
fig.suptitle(
"Long Short-Term Memory (Deep Larning) of Artificial Intelligence[AI]", fontsize=20)
plt.title("Test Graph", {"fontsize": 20})
ax1 = plt.subplot(2, 2, 1) # 2x2の1番目
ax1.plot(xdata, ydata) # 1番目に描画
ax1.legend(loc='best')
ax1.grid()
ax1.set_xlabel('Date') # 1番目にxラベルを追加
ax1.set_ylabel(f'{code}') # 1番目にyラベルを追加
ax2 = plt.subplot(2, 2, 2) # 2x2の1番目
ax2.plot(range(epochs), loss, marker='.',
label='loss(training data)') # 1番目に描画
ax2.plot(range(epochs), val_loss, marker='.',
label='val_loss(evaluation data)') # 1番目に追加描画
ax2.legend(loc='best')
ax2.grid()
ax2.set_xlabel('epoch') # 1番目にxラベルを追加
ax2.set_ylabel('loss') # 1番目にyラベルを追加
ax3 = plt.subplot(2, 2, 3) # 2x2の3番目
ax3.plot(datelabel, predicted_N, marker='.', label='predicted') # 1番目に描画
ax3.plot(datelabel, y_test_price_N, marker='.',
label='y_test_price') # 1番目に追加描画
ax3.legend(loc='best')
ax3.grid()
ax3.set_xlabel('Date')
ax3.set_ylabel(f'{code}')
ax4 = plt.subplot(2, 2, 4) # 2x2の4番目
ax4.plot(range(len(predicted_futureN)), predicted_futureN,
marker='.', label='future predicted') # 1番目に描画
ax4.plot(range(len(predicted_futureN[:10])), predicted_futureN[:10],
marker='.', label='real data', color="0.5") # 1番目に追加描画
ax4.legend(loc='best')
ax4.grid()
ax4.set_xlabel('Date') # 1番目にxラベルを追加
ax4.set_ylabel(f'{code}') # 1番目にyラベルを追加
# グラフを表示する
plt.show()
| 26.414141
| 254
| 0.781043
|
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay
from sklearn.metrics import accuracy_score
from sklearn.model_selection import TimeSeriesSplit
from keras.layers import Dropout
from keras.layers import Dense, LSTM
from keras.models import Sequential
import numpy as np
from sklearn.preprocessing import StandardScaler
from matplotlib import pyplot as plt
from datetime import timedelta
from datetime import datetime
import pandas as pd
import datetime
from dateutil.relativedelta import relativedelta
from pandas_datareader import data as pdr
from sklearn.metrics import r2_score, mean_squared_error
code = '6976' + relativedelta(days=-700)
relativedelta(days=-1)
data = pdr.get_data_yahoo(f'{code}.T', start_train, end_train)
Dow_df = pdr.get_data_yahoo('^DJI', start_train, end_train)
Nikkei_df = pdr.get_data_yahoo('^N225', start_train, end_train)
ad())
data.info()
data.reset_index(drop=False,inplace=True)
Dow_df.reset_index(drop=False,inplace=True)
Nikkei_df.reset_index(drop=False, inplace=True)
data['Date'] = pd.to_datetime(data['Date'])
Dow_df['Date'] = pd.to_datetime(Dow_df['Date'])
Nikkei_df['Date'] = pd.to_datetime(Nikkei_df['Date'])
data.info()
data['weekday'] = data['Date'].dt.weekday
print(data)
start = start_train+relativedelta(days=-2)
start = pd.to_datetime(start)
['NikkiClose'] = Nikkei_df['Close'].values
df = data[['Date', 'weekday','High', 'Low', 'Open', 'Close', 'NikkiClose']]
print(df)
df.to_csv('data/stocks_price_data/KinoCode_data.csv')
df.sort_values(by='Date', ascending=True, inplace=True)
df.set_index(keys='Date', inplace=True)
print(df)
df_shift = df.shift(-1)
df_shift
df['delta_Close'] = df_shift['Close'] - df['Close']
df
df['Up'] = 0
df['Up'][df['delta_Close'] > 0] = 1
df = df.drop('delta_Close', axis=1)
df
df_new = df[['Open', 'High', 'Low', 'Close']]
df_new.plot(kind='line')
plt.show()
df_shift = df.shift(1)
df['Close_ratio'] = (df['Close'] - df_shift['Close']) / df_shift['Close']
df
df['Body'] = df['Open'] - df['Close']
df
df = df[['weekday', 'High', 'Low', 'Open', 'Close', 'Close_ratio', 'Body', 'Up']]
df
df_train = df['2018-01-01': '2020-12-31']
df_train
df_val = df['2021-01-01':]
df_val
X_train = df_train[['weekday', 'High', 'Low',
'Open', 'Close', 'Close_ratio', 'Body']]
y_train = df_train['Up']
print(X_train)
print(y_train)
X_val = df_val[['weekday', 'High', 'Low',
'Open', 'Close', 'Close_ratio', 'Body']]
y_val = df_val['Up']
print(X_val)
print(y_val)
X_train['Close'].plot(kind='line')
X_val['Close'].plot(kind='line')
plt.legend(['X_train', 'X_val'])
plt.show()
def std_to_np(df):
df_list = []
df = np.array(df)
for i in range(0, len(df) - 3, 4):
df_s = df[i:i+4]
scl = StandardScaler()
df_std = scl.fit_transform(df_s)
df_list.append(df_std)
return np.array(df_list)
X_train_np_array = std_to_np(X_train)
X_val_np_array = std_to_np(X_val)
print(X_train_np_array.shape)
print(X_val_np_array.shape)
print(y_train)
print(y_val)
y_train_new = y_train[3::4]
y_val_new = y_val[3::4]
print(y_train_new)
print(y_val_new)
def lstm_comp(df):
model = Sequential()
model.add(LSTM(256, activation='relu', batch_input_shape=(
None, df.shape[1], df.shape[2])))
model.add(Dropout(0.2))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam', metrics=['accuracy'])
return model
valid_scores = []
tscv = TimeSeriesSplit(n_splits=4)
for fold, (train_indices, valid_indices) in enumerate(tscv.split(X_train_np_array)):
X_train, X_valid = X_train_np_array[train_indices], X_train_np_array[valid_indices]
y_train, y_valid = y_train_new[train_indices], y_train_new[valid_indices]
model = lstm_comp(X_train)
hist = model.fit(X_train, y_train, epochs=10, batch_size=64)
y_valid_pred = model.predict(X_valid)
y_valid_pred = np.where(y_valid_pred < 0.5, 0, 1)
score = accuracy_score(y_valid, y_valid_pred)
print(f'fold {fold} MAE: {score}')
valid_scores.append(score)
print(f'valid_scores: {valid_scores}')
cv_score = np.mean(valid_scores)
print(f'CV score: {cv_score}')
model = lstm_comp(X_train_np_array)
result = model.fit(X_train_np_array, y_train_new, epochs=10, batch_size=64)
pred = model.predict(X_val_np_array)
pred[:10]
pred = np.where(pred < 0.5, 0, 1)
pred[:10]
print('accuracy = ', accuracy_score(y_true=y_val_new, y_pred=pred))
cm = confusion_matrix(y_val_new, pred)
cmp = ConfusionMatrixDisplay(cm)
cmp.plot(cmap=plt.cm.Reds)
plt.show()
TergetData = data['Close'].values
data = data.reset_index(drop=False)
TergetDate = data['Date'].values
TergetData = TergetData.reshape(-1, 1)
TergetDate = TergetDate.reshape(-1, 1)
k = 700
i = TergetData.shape[0]-k
j = TergetData.shape[0]
xdata = TergetDate[i:j]
ydata = TergetData[i:j]
fig = plt.figure(figsize=(15, 10), dpi=100)
ax = fig.add_subplot(2, 1, 1)
fig.suptitle(
"Long Short-Term Memory (Deep Larning) of Artificial Intelligence[AI]", fontsize=20)
plt.title("Test Graph", {"fontsize": 20})
ax1 = plt.subplot(2, 2, 1)
ax1.plot(xdata, ydata)
ax1.legend(loc='best')
ax1.grid()
ax1.set_xlabel('Date')
ax1.set_ylabel(f'{code}')
ax2 = plt.subplot(2, 2, 2)
ax2.plot(range(epochs), loss, marker='.',
label='loss(training data)')
ax2.plot(range(epochs), val_loss, marker='.',
label='val_loss(evaluation data)')
ax2.legend(loc='best')
ax2.grid()
ax2.set_xlabel('epoch')
ax2.set_ylabel('loss')
ax3 = plt.subplot(2, 2, 3)
ax3.plot(datelabel, predicted_N, marker='.', label='predicted')
ax3.plot(datelabel, y_test_price_N, marker='.',
label='y_test_price')
ax3.legend(loc='best')
ax3.grid()
ax3.set_xlabel('Date')
ax3.set_ylabel(f'{code}')
ax4 = plt.subplot(2, 2, 4)
ax4.plot(range(len(predicted_futureN)), predicted_futureN,
marker='.', label='future predicted')
ax4.plot(range(len(predicted_futureN[:10])), predicted_futureN[:10],
marker='.', label='real data', color="0.5")
ax4.legend(loc='best')
ax4.grid()
ax4.set_xlabel('Date')
ax4.set_ylabel(f'{code}')
plt.show()
| true
| true
|
790d97e566e99e7453937ad785d99d09ad9d43a7
| 220
|
py
|
Python
|
src/tikkie2/v2/__init__.py
|
new10com/tikkie-api
|
9dfa96f46eb5150fb22a9d65b7c90cd2133da442
|
[
"MIT"
] | null | null | null |
src/tikkie2/v2/__init__.py
|
new10com/tikkie-api
|
9dfa96f46eb5150fb22a9d65b7c90cd2133da442
|
[
"MIT"
] | null | null | null |
src/tikkie2/v2/__init__.py
|
new10com/tikkie-api
|
9dfa96f46eb5150fb22a9d65b7c90cd2133da442
|
[
"MIT"
] | null | null | null |
from . import ( # noqa
ideal_qr,
ideal_qr_notification,
payment,
payment_request,
payment_request_notification,
refund,
sandbox_app,
transaction_bundle,
transactions_notifications,
)
| 18.333333
| 33
| 0.7
|
from . import (
ideal_qr,
ideal_qr_notification,
payment,
payment_request,
payment_request_notification,
refund,
sandbox_app,
transaction_bundle,
transactions_notifications,
)
| true
| true
|
790d985f21e29559ae25c3c407dbec1a5e270d4b
| 4,351
|
py
|
Python
|
iqs_client/models/twc_repository_info_response.py
|
thomas-bc/mms-autocref
|
1db6697f929a1c782c902923209389e337ec6961
|
[
"Apache-2.0"
] | null | null | null |
iqs_client/models/twc_repository_info_response.py
|
thomas-bc/mms-autocref
|
1db6697f929a1c782c902923209389e337ec6961
|
[
"Apache-2.0"
] | null | null | null |
iqs_client/models/twc_repository_info_response.py
|
thomas-bc/mms-autocref
|
1db6697f929a1c782c902923209389e337ec6961
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
IncQuery Server
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: 0.12.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class TWCRepositoryInfoResponse(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'repository_structure': 'TWCRepositoryStructure',
'last_updated': 'str'
}
attribute_map = {
'repository_structure': 'repositoryStructure',
'last_updated': 'lastUpdated'
}
def __init__(self, repository_structure=None, last_updated=None): # noqa: E501
"""TWCRepositoryInfoResponse - a model defined in OpenAPI""" # noqa: E501
self._repository_structure = None
self._last_updated = None
self.discriminator = None
self.repository_structure = repository_structure
self.last_updated = last_updated
@property
def repository_structure(self):
"""Gets the repository_structure of this TWCRepositoryInfoResponse. # noqa: E501
:return: The repository_structure of this TWCRepositoryInfoResponse. # noqa: E501
:rtype: TWCRepositoryStructure
"""
return self._repository_structure
@repository_structure.setter
def repository_structure(self, repository_structure):
"""Sets the repository_structure of this TWCRepositoryInfoResponse.
:param repository_structure: The repository_structure of this TWCRepositoryInfoResponse. # noqa: E501
:type: TWCRepositoryStructure
"""
if repository_structure is None:
raise ValueError("Invalid value for `repository_structure`, must not be `None`") # noqa: E501
self._repository_structure = repository_structure
@property
def last_updated(self):
"""Gets the last_updated of this TWCRepositoryInfoResponse. # noqa: E501
:return: The last_updated of this TWCRepositoryInfoResponse. # noqa: E501
:rtype: str
"""
return self._last_updated
@last_updated.setter
def last_updated(self, last_updated):
"""Sets the last_updated of this TWCRepositoryInfoResponse.
:param last_updated: The last_updated of this TWCRepositoryInfoResponse. # noqa: E501
:type: str
"""
if last_updated is None:
raise ValueError("Invalid value for `last_updated`, must not be `None`") # noqa: E501
self._last_updated = last_updated
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TWCRepositoryInfoResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 30.858156
| 124
| 0.619168
|
import pprint
import re
import six
class TWCRepositoryInfoResponse(object):
openapi_types = {
'repository_structure': 'TWCRepositoryStructure',
'last_updated': 'str'
}
attribute_map = {
'repository_structure': 'repositoryStructure',
'last_updated': 'lastUpdated'
}
def __init__(self, repository_structure=None, last_updated=None):
self._repository_structure = None
self._last_updated = None
self.discriminator = None
self.repository_structure = repository_structure
self.last_updated = last_updated
@property
def repository_structure(self):
return self._repository_structure
@repository_structure.setter
def repository_structure(self, repository_structure):
if repository_structure is None:
raise ValueError("Invalid value for `repository_structure`, must not be `None`")
self._repository_structure = repository_structure
@property
def last_updated(self):
return self._last_updated
@last_updated.setter
def last_updated(self, last_updated):
if last_updated is None:
raise ValueError("Invalid value for `last_updated`, must not be `None`")
self._last_updated = last_updated
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, TWCRepositoryInfoResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
790d98a72fe8f84a93bdacf95cb7a6a30c124775
| 3,462
|
py
|
Python
|
toDoList/toDoList/settings.py
|
ruoyunruyan/toDoList
|
99c06b67f3c153ae66871725b44cde907c972a86
|
[
"MIT"
] | null | null | null |
toDoList/toDoList/settings.py
|
ruoyunruyan/toDoList
|
99c06b67f3c153ae66871725b44cde907c972a86
|
[
"MIT"
] | null | null | null |
toDoList/toDoList/settings.py
|
ruoyunruyan/toDoList
|
99c06b67f3c153ae66871725b44cde907c972a86
|
[
"MIT"
] | null | null | null |
"""
Django settings for toDoList project.
Generated by 'django-admin startproject' using Django 2.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'r=cr&4z(#t-&vbyp_71-sy&edioe73mt48%)1ur^g1&@p$m69e'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'apps.todo',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'toDoList.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'builtins': ['django.templatetags.static']
},
},
]
WSGI_APPLICATION = 'toDoList.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'things_to_do',
'USER': 'root',
'PASSWORD': '123456',
'PORT': 3306,
'HOST': '127.0.0.1'
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
| 27.046875
| 92
| 0.659445
|
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'r=cr&4z(#t-&vbyp_71-sy&edioe73mt48%)1ur^g1&@p$m69e'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'apps.todo',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'toDoList.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'builtins': ['django.templatetags.static']
},
},
]
WSGI_APPLICATION = 'toDoList.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'things_to_do',
'USER': 'root',
'PASSWORD': '123456',
'PORT': 3306,
'HOST': '127.0.0.1'
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
| true
| true
|
790d99a94d22472b3df9e52c3412f8a2f82ec3ce
| 3,270
|
py
|
Python
|
trading_calendars/exchange_calendar_cmes.py
|
quantrocket-llc/trading-calendars
|
b72630cbcb288601c62e61ebe002a9043f9a3112
|
[
"Apache-2.0"
] | 1
|
2020-07-25T06:18:30.000Z
|
2020-07-25T06:18:30.000Z
|
trading_calendars/exchange_calendar_cmes.py
|
quantrocket-llc/trading-calendars
|
b72630cbcb288601c62e61ebe002a9043f9a3112
|
[
"Apache-2.0"
] | 13
|
2021-04-13T06:49:23.000Z
|
2022-03-31T00:08:10.000Z
|
trading_calendars/exchange_calendar_cmes.py
|
quantrocket-llc/trading-calendars
|
b72630cbcb288601c62e61ebe002a9043f9a3112
|
[
"Apache-2.0"
] | 3
|
2020-03-05T23:38:14.000Z
|
2021-12-12T00:31:36.000Z
|
#
# Copyright 2018 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import time
from pandas.tseries.holiday import (
GoodFriday,
USLaborDay,
USPresidentsDay,
USThanksgivingDay,
)
from pytz import timezone
from .trading_calendar import HolidayCalendar, TradingCalendar
from .us_holidays import (
Christmas,
ChristmasEveBefore1993,
ChristmasEveInOrAfter1993,
USBlackFridayInOrAfter1993,
USIndependenceDay,
USMartinLutherKingJrAfter1998,
USMemorialDay,
USNationalDaysofMourning,
USNewYearsDay,
)
# Useful resources for making changes to this file:
# http://www.cmegroup.com/tools-information/holiday-calendar.html
class CMESExchangeCalendar(TradingCalendar):
"""
Exchange calendar for the Chicago Mercantile Exchange (CMES).
Open Time: 5:00 PM, America/Chicago
Close Time: 5:00 PM, America/Chicago
Regularly-Observed Holidays:
- New Years Day
- Good Friday
- Christmas
"""
name = "CME"
country_code = "US"
tz = timezone("America/Chicago")
open_times = ((None, time(17, 1)),)
close_times = ((None, time(16)),)
@property
def open_offset(self):
return -1
@property
def regular_holidays(self):
# The CME has different holiday rules depending on the type of
# instrument. For example, http://www.cmegroup.com/tools-information/holiday-calendar/files/2016-4th-of-july-holiday-schedule.pdf # noqa
# shows that Equity, Interest Rate, FX, Energy, Metals & DME Products
# close at 1200 CT on July 4, 2016, while Grain, Oilseed & MGEX
# Products and Livestock, Dairy & Lumber products are completely
# closed.
# For now, we will treat the CME as having a single calendar, and just
# go with the most conservative hours - and treat July 4 as an early
# close at noon.
return HolidayCalendar(
[
USNewYearsDay,
GoodFriday,
Christmas,
]
)
@property
def adhoc_holidays(self):
return USNationalDaysofMourning
@property
def special_closes(self):
return [
(
time(12),
HolidayCalendar(
[
USMartinLutherKingJrAfter1998,
USPresidentsDay,
USMemorialDay,
USLaborDay,
USIndependenceDay,
USThanksgivingDay,
USBlackFridayInOrAfter1993,
ChristmasEveBefore1993,
ChristmasEveInOrAfter1993,
]
),
)
]
| 29.196429
| 144
| 0.624159
|
from datetime import time
from pandas.tseries.holiday import (
GoodFriday,
USLaborDay,
USPresidentsDay,
USThanksgivingDay,
)
from pytz import timezone
from .trading_calendar import HolidayCalendar, TradingCalendar
from .us_holidays import (
Christmas,
ChristmasEveBefore1993,
ChristmasEveInOrAfter1993,
USBlackFridayInOrAfter1993,
USIndependenceDay,
USMartinLutherKingJrAfter1998,
USMemorialDay,
USNationalDaysofMourning,
USNewYearsDay,
)
class CMESExchangeCalendar(TradingCalendar):
name = "CME"
country_code = "US"
tz = timezone("America/Chicago")
open_times = ((None, time(17, 1)),)
close_times = ((None, time(16)),)
@property
def open_offset(self):
return -1
@property
def regular_holidays(self):
return HolidayCalendar(
[
USNewYearsDay,
GoodFriday,
Christmas,
]
)
@property
def adhoc_holidays(self):
return USNationalDaysofMourning
@property
def special_closes(self):
return [
(
time(12),
HolidayCalendar(
[
USMartinLutherKingJrAfter1998,
USPresidentsDay,
USMemorialDay,
USLaborDay,
USIndependenceDay,
USThanksgivingDay,
USBlackFridayInOrAfter1993,
ChristmasEveBefore1993,
ChristmasEveInOrAfter1993,
]
),
)
]
| true
| true
|
790d9a42da8e6c811ed3b154e78306f6f7e56b9e
| 144
|
py
|
Python
|
AlteMatrix/ipanalyzer/__init__.py
|
Ir0n-c0d3X/AlteMatrix
|
9479ddeec9839b88d8f7079d00fd62f3ee47157d
|
[
"MIT"
] | 10
|
2021-09-19T13:55:58.000Z
|
2022-01-16T02:15:28.000Z
|
AlteMatrix/ipanalyzer/__init__.py
|
samuelajala01/AlteMatrix
|
e22fe443241fb646a218100bdcb19e0e4cc85635
|
[
"MIT"
] | null | null | null |
AlteMatrix/ipanalyzer/__init__.py
|
samuelajala01/AlteMatrix
|
e22fe443241fb646a218100bdcb19e0e4cc85635
|
[
"MIT"
] | 2
|
2021-09-19T23:51:51.000Z
|
2022-01-16T02:15:42.000Z
|
# Use of this source code is governed by the MIT license.
__license__ = "MIT"
"""This is the IP Analyzer program for the AlteMatrix module."""
| 28.8
| 64
| 0.736111
|
__license__ = "MIT"
| true
| true
|
790d9b66017834e0dfa129ac19fd3acb39af0d07
| 7,097
|
py
|
Python
|
sb/stable_baselines_ex/common/wrappers_ex.py
|
artberryx/SAR
|
e6c79ea271f1033d5ea3c11556aff173adf6d941
|
[
"MIT"
] | 4
|
2021-11-12T05:24:21.000Z
|
2021-12-13T01:18:08.000Z
|
sb/stable_baselines_ex/common/wrappers_ex.py
|
artberryx/SAR
|
e6c79ea271f1033d5ea3c11556aff173adf6d941
|
[
"MIT"
] | null | null | null |
sb/stable_baselines_ex/common/wrappers_ex.py
|
artberryx/SAR
|
e6c79ea271f1033d5ea3c11556aff173adf6d941
|
[
"MIT"
] | null | null | null |
import gym
import numpy as np
from gym import spaces
from stable_baselines.common.running_mean_std import RunningMeanStd
class ScaleRewardEnv(gym.RewardWrapper):
def __init__(self, env: gym.Env, scale):
gym.RewardWrapper.__init__(self, env)
self.scale = scale
def reward(self, reward: float) -> float:
return reward * self.scale
class RepeatGoalEnv(gym.Wrapper):
def __init__(
self,
env: gym.Env,
gamma,
max_d,
max_t,
lambda_dt,
anoise_type=None,
anoise_prob=0.,
anoise_std=0.,
):
gym.Wrapper.__init__(self, env)
self.epsilon_std = 1e-3
self.gamma = gamma
self.max_d = max_d
self.max_t = max_t
self.lambda_dt = lambda_dt
self.anoise_type = anoise_type
self.anoise_prob = anoise_prob
self.anoise_std = anoise_std
self.body_key = None
part_keys = set(self.env.sim.model._body_name2id.keys())
target_keys = ['torso', 'cart', 'body1']
for target_key in target_keys:
if target_key in part_keys:
self.body_key = target_key
break
if self.anoise_type in ['ext_fpc']:
low = np.concatenate([self.observation_space.low, [-np.inf] * 3])
high = np.concatenate([self.observation_space.high, [np.inf] * 3])
self.observation_space = spaces.Box(
low=low, high=high,
shape=(self.observation_space.shape[0] + 3,), dtype=self.observation_space.dtype,
)
self.obs_dim = self.observation_space.shape[0] + 3
self.cur_force = np.zeros(3)
else:
self.obs_dim = self.observation_space.shape[0]
action_dim = self.env.action_space.shape[0]
self.ori_action_dim = action_dim
low = self.env.action_space.low
high = self.env.action_space.high
if self.max_d is not None or self.max_t is not None:
action_dim += 1
low = np.r_[low, -1.]
high = np.r_[high, 1.]
self.action_space = spaces.Box(
low=low, high=high, shape=(action_dim,), dtype=env.action_space.dtype
)
self.cur_obs = None
self.obs_rms = RunningMeanStd(shape=self.observation_space.shape)
self.reset_update_obs_estimate = False
self.num_steps = 0
self.eval_mode = False
def _update_obs_estimate(self, obs):
if not self.eval_mode:
self.obs_rms.update(obs[:, :self.obs_dim])
def step(self, aug_action):
cur_idx = self.ori_action_dim
action = aug_action[:self.ori_action_dim]
if self.anoise_type == 'action':
if np.random.rand() < self.anoise_prob:
action = action + np.random.randn(*action.shape) * self.anoise_std
action = np.clip(action, self.action_space.low[:len(action)], self.action_space.high[:len(action)])
elif self.anoise_type is not None and 'ext' in self.anoise_type:
if np.random.rand() < self.anoise_prob:
if self.env.spec.id == 'Reacher-v2':
force = np.zeros(3)
torque = np.random.randn(3) * self.anoise_std
cur_info = torque
else:
force = np.random.randn(3) * self.anoise_std
torque = np.zeros(3)
cur_info = force
if self.anoise_type == 'ext_fpc':
self.cur_force = np.clip(cur_info, -1, 1)
self.env.sim.data.xfrc_applied[self.env.sim.model._body_name2id[self.body_key], :] = np.r_[
force, torque]
else:
self.env.sim.data.xfrc_applied[self.env.sim.model._body_name2id[self.body_key], :] = [0] * 6
if self.max_d is not None or self.max_t is not None:
u = aug_action[cur_idx]
cur_idx += 1
norm_u = (u + 1) / 2
u = norm_u
else:
u = None
lambda_dt = self.lambda_dt
total_reward = 0.0
done = None
cur_gamma = 1.0
first_obs = self.cur_obs
for i in range(100000000):
obs, reward, done, info = self.env.step(action)
if self.anoise_type in ['ext_fpc']:
obs = np.concatenate([obs, self.cur_force])
if not done:
self._update_obs_estimate(obs[np.newaxis, ...])
self.reset_update_obs_estimate = True
total_reward += reward * cur_gamma
cur_gamma *= self.gamma
if done:
break
if self.max_d is None and self.max_t is None:
break
if self.max_t is not None:
t_delta = (i + 1) * self.env.dt
if self.max_d is not None:
norm_obs = (obs - self.obs_rms.mean) / (np.sqrt(self.obs_rms.var) + self.epsilon_std)
norm_first_obs = (first_obs - self.obs_rms.mean) / (np.sqrt(self.obs_rms.var) + self.epsilon_std)
d_delta = np.linalg.norm(norm_obs - norm_first_obs, ord=1) / len(obs)
if self.max_d is not None and self.max_t is not None:
if lambda_dt is None:
if d_delta >= u * self.max_d:
break
if t_delta >= self.max_t:
break
else:
ori_t_delta = t_delta
t_delta = t_delta / self.max_t
d_delta = d_delta / self.max_d
delta = lambda_dt * d_delta + (1 - lambda_dt) * t_delta
if delta >= u:
break
if ori_t_delta >= self.max_t:
break
elif self.max_t is not None:
if t_delta >= u * self.max_t:
break
elif self.max_d is not None:
if d_delta >= u * self.max_d:
break
self.cur_obs = obs
info['w'] = i + 1
info['t_diff'] = (i + 1) * self.env.dt
if u is not None:
if self.max_d is not None and self.max_t is not None:
pass
elif self.max_t is not None:
info['t'] = u * self.max_t
elif self.max_d is not None:
info['d'] = u * self.max_d
info['u'] = u
if lambda_dt is not None:
info['lambda_dt'] = lambda_dt
self.num_steps += 1
return self.cur_obs, total_reward, done, info
def reset(self, **kwargs):
obs = self.env.reset(**kwargs)
if self.anoise_type in ['ext_fpc']:
self.cur_force = np.zeros(3)
obs = np.concatenate([obs, self.cur_force])
if self.reset_update_obs_estimate:
self._update_obs_estimate(obs[np.newaxis, ...])
self.reset_update_obs_estimate = False
self.cur_obs = obs
return self.cur_obs
| 35.133663
| 115
| 0.533888
|
import gym
import numpy as np
from gym import spaces
from stable_baselines.common.running_mean_std import RunningMeanStd
class ScaleRewardEnv(gym.RewardWrapper):
def __init__(self, env: gym.Env, scale):
gym.RewardWrapper.__init__(self, env)
self.scale = scale
def reward(self, reward: float) -> float:
return reward * self.scale
class RepeatGoalEnv(gym.Wrapper):
def __init__(
self,
env: gym.Env,
gamma,
max_d,
max_t,
lambda_dt,
anoise_type=None,
anoise_prob=0.,
anoise_std=0.,
):
gym.Wrapper.__init__(self, env)
self.epsilon_std = 1e-3
self.gamma = gamma
self.max_d = max_d
self.max_t = max_t
self.lambda_dt = lambda_dt
self.anoise_type = anoise_type
self.anoise_prob = anoise_prob
self.anoise_std = anoise_std
self.body_key = None
part_keys = set(self.env.sim.model._body_name2id.keys())
target_keys = ['torso', 'cart', 'body1']
for target_key in target_keys:
if target_key in part_keys:
self.body_key = target_key
break
if self.anoise_type in ['ext_fpc']:
low = np.concatenate([self.observation_space.low, [-np.inf] * 3])
high = np.concatenate([self.observation_space.high, [np.inf] * 3])
self.observation_space = spaces.Box(
low=low, high=high,
shape=(self.observation_space.shape[0] + 3,), dtype=self.observation_space.dtype,
)
self.obs_dim = self.observation_space.shape[0] + 3
self.cur_force = np.zeros(3)
else:
self.obs_dim = self.observation_space.shape[0]
action_dim = self.env.action_space.shape[0]
self.ori_action_dim = action_dim
low = self.env.action_space.low
high = self.env.action_space.high
if self.max_d is not None or self.max_t is not None:
action_dim += 1
low = np.r_[low, -1.]
high = np.r_[high, 1.]
self.action_space = spaces.Box(
low=low, high=high, shape=(action_dim,), dtype=env.action_space.dtype
)
self.cur_obs = None
self.obs_rms = RunningMeanStd(shape=self.observation_space.shape)
self.reset_update_obs_estimate = False
self.num_steps = 0
self.eval_mode = False
def _update_obs_estimate(self, obs):
if not self.eval_mode:
self.obs_rms.update(obs[:, :self.obs_dim])
def step(self, aug_action):
cur_idx = self.ori_action_dim
action = aug_action[:self.ori_action_dim]
if self.anoise_type == 'action':
if np.random.rand() < self.anoise_prob:
action = action + np.random.randn(*action.shape) * self.anoise_std
action = np.clip(action, self.action_space.low[:len(action)], self.action_space.high[:len(action)])
elif self.anoise_type is not None and 'ext' in self.anoise_type:
if np.random.rand() < self.anoise_prob:
if self.env.spec.id == 'Reacher-v2':
force = np.zeros(3)
torque = np.random.randn(3) * self.anoise_std
cur_info = torque
else:
force = np.random.randn(3) * self.anoise_std
torque = np.zeros(3)
cur_info = force
if self.anoise_type == 'ext_fpc':
self.cur_force = np.clip(cur_info, -1, 1)
self.env.sim.data.xfrc_applied[self.env.sim.model._body_name2id[self.body_key], :] = np.r_[
force, torque]
else:
self.env.sim.data.xfrc_applied[self.env.sim.model._body_name2id[self.body_key], :] = [0] * 6
if self.max_d is not None or self.max_t is not None:
u = aug_action[cur_idx]
cur_idx += 1
norm_u = (u + 1) / 2
u = norm_u
else:
u = None
lambda_dt = self.lambda_dt
total_reward = 0.0
done = None
cur_gamma = 1.0
first_obs = self.cur_obs
for i in range(100000000):
obs, reward, done, info = self.env.step(action)
if self.anoise_type in ['ext_fpc']:
obs = np.concatenate([obs, self.cur_force])
if not done:
self._update_obs_estimate(obs[np.newaxis, ...])
self.reset_update_obs_estimate = True
total_reward += reward * cur_gamma
cur_gamma *= self.gamma
if done:
break
if self.max_d is None and self.max_t is None:
break
if self.max_t is not None:
t_delta = (i + 1) * self.env.dt
if self.max_d is not None:
norm_obs = (obs - self.obs_rms.mean) / (np.sqrt(self.obs_rms.var) + self.epsilon_std)
norm_first_obs = (first_obs - self.obs_rms.mean) / (np.sqrt(self.obs_rms.var) + self.epsilon_std)
d_delta = np.linalg.norm(norm_obs - norm_first_obs, ord=1) / len(obs)
if self.max_d is not None and self.max_t is not None:
if lambda_dt is None:
if d_delta >= u * self.max_d:
break
if t_delta >= self.max_t:
break
else:
ori_t_delta = t_delta
t_delta = t_delta / self.max_t
d_delta = d_delta / self.max_d
delta = lambda_dt * d_delta + (1 - lambda_dt) * t_delta
if delta >= u:
break
if ori_t_delta >= self.max_t:
break
elif self.max_t is not None:
if t_delta >= u * self.max_t:
break
elif self.max_d is not None:
if d_delta >= u * self.max_d:
break
self.cur_obs = obs
info['w'] = i + 1
info['t_diff'] = (i + 1) * self.env.dt
if u is not None:
if self.max_d is not None and self.max_t is not None:
pass
elif self.max_t is not None:
info['t'] = u * self.max_t
elif self.max_d is not None:
info['d'] = u * self.max_d
info['u'] = u
if lambda_dt is not None:
info['lambda_dt'] = lambda_dt
self.num_steps += 1
return self.cur_obs, total_reward, done, info
def reset(self, **kwargs):
obs = self.env.reset(**kwargs)
if self.anoise_type in ['ext_fpc']:
self.cur_force = np.zeros(3)
obs = np.concatenate([obs, self.cur_force])
if self.reset_update_obs_estimate:
self._update_obs_estimate(obs[np.newaxis, ...])
self.reset_update_obs_estimate = False
self.cur_obs = obs
return self.cur_obs
| true
| true
|
790d9c116845947669e908b57d75437bbfcf16c8
| 4,041
|
py
|
Python
|
conversions/length_conversion.py
|
NavpreetDevpuri/Python
|
7ef5ae66d777e8ed702993c6aa9270e0669cb0c6
|
[
"MIT"
] | 145,614
|
2016-07-21T05:40:05.000Z
|
2022-03-31T22:17:22.000Z
|
conversions/length_conversion.py
|
NavpreetDevpuri/Python
|
7ef5ae66d777e8ed702993c6aa9270e0669cb0c6
|
[
"MIT"
] | 3,987
|
2016-07-28T17:31:25.000Z
|
2022-03-30T23:07:46.000Z
|
conversions/length_conversion.py
|
NavpreetDevpuri/Python
|
7ef5ae66d777e8ed702993c6aa9270e0669cb0c6
|
[
"MIT"
] | 40,014
|
2016-07-26T15:14:41.000Z
|
2022-03-31T22:23:03.000Z
|
"""
Conversion of length units.
Available Units:- Metre,Kilometre,Feet,Inch,Centimeter,Yard,Foot,Mile,Millimeter
USAGE :
-> Import this file into their respective project.
-> Use the function length_conversion() for conversion of length units.
-> Parameters :
-> value : The number of from units you want to convert
-> from_type : From which type you want to convert
-> to_type : To which type you want to convert
REFERENCES :
-> Wikipedia reference: https://en.wikipedia.org/wiki/Meter
-> Wikipedia reference: https://en.wikipedia.org/wiki/Kilometer
-> Wikipedia reference: https://en.wikipedia.org/wiki/Feet
-> Wikipedia reference: https://en.wikipedia.org/wiki/Inch
-> Wikipedia reference: https://en.wikipedia.org/wiki/Centimeter
-> Wikipedia reference: https://en.wikipedia.org/wiki/Yard
-> Wikipedia reference: https://en.wikipedia.org/wiki/Foot
-> Wikipedia reference: https://en.wikipedia.org/wiki/Mile
-> Wikipedia reference: https://en.wikipedia.org/wiki/Millimeter
"""
from collections import namedtuple
from_to = namedtuple("from_to", "from_ to")
TYPE_CONVERSION = {
"millimeter": "mm",
"centimeter": "cm",
"meter": "m",
"kilometer": "km",
"inch": "in",
"inche": "in", # Trailing 's' has been stripped off
"feet": "ft",
"foot": "ft",
"yard": "yd",
"mile": "mi",
}
METRIC_CONVERSION = {
"mm": from_to(0.001, 1000),
"cm": from_to(0.01, 100),
"m": from_to(1, 1),
"km": from_to(1000, 0.001),
"in": from_to(0.0254, 39.3701),
"ft": from_to(0.3048, 3.28084),
"yd": from_to(0.9144, 1.09361),
"mi": from_to(1609.34, 0.000621371),
}
def length_conversion(value: float, from_type: str, to_type: str) -> float:
"""
Conversion between length units.
>>> length_conversion(4, "METER", "FEET")
13.12336
>>> length_conversion(4, "M", "FT")
13.12336
>>> length_conversion(1, "meter", "kilometer")
0.001
>>> length_conversion(1, "kilometer", "inch")
39370.1
>>> length_conversion(3, "kilometer", "mile")
1.8641130000000001
>>> length_conversion(2, "feet", "meter")
0.6096
>>> length_conversion(4, "feet", "yard")
1.333329312
>>> length_conversion(1, "inch", "meter")
0.0254
>>> length_conversion(2, "inch", "mile")
3.15656468e-05
>>> length_conversion(2, "centimeter", "millimeter")
20.0
>>> length_conversion(2, "centimeter", "yard")
0.0218722
>>> length_conversion(4, "yard", "meter")
3.6576
>>> length_conversion(4, "yard", "kilometer")
0.0036576
>>> length_conversion(3, "foot", "meter")
0.9144000000000001
>>> length_conversion(3, "foot", "inch")
36.00001944
>>> length_conversion(4, "mile", "kilometer")
6.43736
>>> length_conversion(2, "miles", "InChEs")
126719.753468
>>> length_conversion(3, "millimeter", "centimeter")
0.3
>>> length_conversion(3, "mm", "in")
0.1181103
>>> length_conversion(4, "wrongUnit", "inch")
Traceback (most recent call last):
...
ValueError: Invalid 'from_type' value: 'wrongUnit'.
Conversion abbreviations are: mm, cm, m, km, in, ft, yd, mi
"""
new_from = from_type.lower().rstrip("s")
new_from = TYPE_CONVERSION.get(new_from, new_from)
new_to = to_type.lower().rstrip("s")
new_to = TYPE_CONVERSION.get(new_to, new_to)
if new_from not in METRIC_CONVERSION:
raise ValueError(
f"Invalid 'from_type' value: {from_type!r}.\n"
f"Conversion abbreviations are: {', '.join(METRIC_CONVERSION)}"
)
if new_to not in METRIC_CONVERSION:
raise ValueError(
f"Invalid 'to_type' value: {to_type!r}.\n"
f"Conversion abbreviations are: {', '.join(METRIC_CONVERSION)}"
)
return value * METRIC_CONVERSION[new_from].from_ * METRIC_CONVERSION[new_to].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32.853659
| 84
| 0.621133
|
from collections import namedtuple
from_to = namedtuple("from_to", "from_ to")
TYPE_CONVERSION = {
"millimeter": "mm",
"centimeter": "cm",
"meter": "m",
"kilometer": "km",
"inch": "in",
"inche": "in",
"feet": "ft",
"foot": "ft",
"yard": "yd",
"mile": "mi",
}
METRIC_CONVERSION = {
"mm": from_to(0.001, 1000),
"cm": from_to(0.01, 100),
"m": from_to(1, 1),
"km": from_to(1000, 0.001),
"in": from_to(0.0254, 39.3701),
"ft": from_to(0.3048, 3.28084),
"yd": from_to(0.9144, 1.09361),
"mi": from_to(1609.34, 0.000621371),
}
def length_conversion(value: float, from_type: str, to_type: str) -> float:
new_from = from_type.lower().rstrip("s")
new_from = TYPE_CONVERSION.get(new_from, new_from)
new_to = to_type.lower().rstrip("s")
new_to = TYPE_CONVERSION.get(new_to, new_to)
if new_from not in METRIC_CONVERSION:
raise ValueError(
f"Invalid 'from_type' value: {from_type!r}.\n"
f"Conversion abbreviations are: {', '.join(METRIC_CONVERSION)}"
)
if new_to not in METRIC_CONVERSION:
raise ValueError(
f"Invalid 'to_type' value: {to_type!r}.\n"
f"Conversion abbreviations are: {', '.join(METRIC_CONVERSION)}"
)
return value * METRIC_CONVERSION[new_from].from_ * METRIC_CONVERSION[new_to].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| true
| true
|
790d9c7645afc33219e39cfaad728a2eff4993a3
| 3,474
|
py
|
Python
|
kinetick/models/position.py
|
aWFtbGVnaW9u/kinetick
|
2562a666ff57e72d1314e053db415d2873b8f71f
|
[
"Apache-2.0"
] | 1
|
2022-01-23T23:00:34.000Z
|
2022-01-23T23:00:34.000Z
|
kinetick/models/position.py
|
aWFtbGVnaW9u/kinetick
|
2562a666ff57e72d1314e053db415d2873b8f71f
|
[
"Apache-2.0"
] | null | null | null |
kinetick/models/position.py
|
aWFtbGVnaW9u/kinetick
|
2562a666ff57e72d1314e053db415d2873b8f71f
|
[
"Apache-2.0"
] | null | null | null |
from mongoengine import StringField, DateTimeField, IntField, FloatField, BooleanField, DynamicDocument
from datetime import datetime
# note: position / order / trade are used interchangeably through the app.
class Position(DynamicDocument):
""" Position Data Model.
holds information relating to either trade/order/position.
"""
_tickerId = StringField(max_length=50, required=True, db_field="tickerId")
_symbol = StringField(max_length=50, required=False, db_field="symbol")
datetime = DateTimeField(required=True, default=datetime.utcnow)
algo = StringField(max_length=100)
_direction = StringField(max_length=20, choices=('LONG', 'SHORT'), db_field="direction")
_quantity = IntField(default=0, db_field="quantity")
entry_time = DateTimeField()
exit_time = DateTimeField()
exit_reason = StringField()
order_type = StringField() # LIMIT/MARKET
_broker_order_id = StringField(db_field="broker_order_id")
_variety = StringField(db_field="variety")
market_price = FloatField()
target = FloatField(default=0.0)
stop = FloatField(default=0.0)
entry_price = FloatField(default=0.0)
exit_price = FloatField(default=0.0)
realized_pnl = FloatField(default=0.0)
_active = BooleanField(default=False, db_field="active")
opt_ticker = StringField(max_length=50, required=False)
opt_strike = FloatField(required=False)
opt_type = StringField(require=False),
opt_expiry = StringField(required=False),
sec_type = StringField(default='STK') # TODO add enum
underlying = StringField(required=False)
meta = {
'indexes': [
{
'fields': ['_active'],
'sparse': True
},
{
'fields': ['algo'],
'sparse': True
}
]
}
def open_position(self):
if self._direction is None:
raise Exception("no direction provided")
if self._quantity is None:
raise Exception("no quantity provided")
if self.entry_time is None:
self.entry_time = datetime.now()
self._active = True
def close_position(self):
if self._direction is None:
raise Exception("no direction provided")
if self._quantity is None:
raise Exception("no quantity provided")
if self.exit_time is None:
self.exit_time = datetime.now()
self._active = False
def pnl(self):
pnl = abs(self.exit_price - self.entry_price)
sl_hit = False
if self.exit_price <= self.entry_price and self._direction == "LONG":
sl_hit = True
elif self.exit_price >= self.entry_price and self._direction == "SHORT":
sl_hit = True
pnl = -pnl if sl_hit else pnl
pnl = pnl * self._quantity
return pnl
@property
def active(self):
return self._active
@property
def ticker_id(self):
return self._tickerId
@property
def symbol(self):
return self._symbol
@property
def direction(self):
return self._direction
@property
def quantity(self):
return self._quantity
@property
def broker_order_id(self):
return self._broker_order_id
@property
def variety(self):
return self._variety
@staticmethod
def find(algo, **query) -> list:
return Position.objects(algo=algo, **query)
| 31.297297
| 103
| 0.637018
|
from mongoengine import StringField, DateTimeField, IntField, FloatField, BooleanField, DynamicDocument
from datetime import datetime
class Position(DynamicDocument):
_tickerId = StringField(max_length=50, required=True, db_field="tickerId")
_symbol = StringField(max_length=50, required=False, db_field="symbol")
datetime = DateTimeField(required=True, default=datetime.utcnow)
algo = StringField(max_length=100)
_direction = StringField(max_length=20, choices=('LONG', 'SHORT'), db_field="direction")
_quantity = IntField(default=0, db_field="quantity")
entry_time = DateTimeField()
exit_time = DateTimeField()
exit_reason = StringField()
order_type = StringField()
_broker_order_id = StringField(db_field="broker_order_id")
_variety = StringField(db_field="variety")
market_price = FloatField()
target = FloatField(default=0.0)
stop = FloatField(default=0.0)
entry_price = FloatField(default=0.0)
exit_price = FloatField(default=0.0)
realized_pnl = FloatField(default=0.0)
_active = BooleanField(default=False, db_field="active")
opt_ticker = StringField(max_length=50, required=False)
opt_strike = FloatField(required=False)
opt_type = StringField(require=False),
opt_expiry = StringField(required=False),
sec_type = StringField(default='STK')
underlying = StringField(required=False)
meta = {
'indexes': [
{
'fields': ['_active'],
'sparse': True
},
{
'fields': ['algo'],
'sparse': True
}
]
}
def open_position(self):
if self._direction is None:
raise Exception("no direction provided")
if self._quantity is None:
raise Exception("no quantity provided")
if self.entry_time is None:
self.entry_time = datetime.now()
self._active = True
def close_position(self):
if self._direction is None:
raise Exception("no direction provided")
if self._quantity is None:
raise Exception("no quantity provided")
if self.exit_time is None:
self.exit_time = datetime.now()
self._active = False
def pnl(self):
pnl = abs(self.exit_price - self.entry_price)
sl_hit = False
if self.exit_price <= self.entry_price and self._direction == "LONG":
sl_hit = True
elif self.exit_price >= self.entry_price and self._direction == "SHORT":
sl_hit = True
pnl = -pnl if sl_hit else pnl
pnl = pnl * self._quantity
return pnl
@property
def active(self):
return self._active
@property
def ticker_id(self):
return self._tickerId
@property
def symbol(self):
return self._symbol
@property
def direction(self):
return self._direction
@property
def quantity(self):
return self._quantity
@property
def broker_order_id(self):
return self._broker_order_id
@property
def variety(self):
return self._variety
@staticmethod
def find(algo, **query) -> list:
return Position.objects(algo=algo, **query)
| true
| true
|
790d9e6dfa929eb30227ee925a46f34c1ce1594a
| 1,642
|
py
|
Python
|
RK45 - Copy.py
|
Mahdi-Asadi/python_thesis
|
6cb1dbe24fcf9133e971e64c91e1dde234250da9
|
[
"MIT"
] | null | null | null |
RK45 - Copy.py
|
Mahdi-Asadi/python_thesis
|
6cb1dbe24fcf9133e971e64c91e1dde234250da9
|
[
"MIT"
] | null | null | null |
RK45 - Copy.py
|
Mahdi-Asadi/python_thesis
|
6cb1dbe24fcf9133e971e64c91e1dde234250da9
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import RK45
f_out = "E:\\1\\P_rk4.txt" # address file for output
f2 = open(f_out,"w+")
def du_dx(x,y):
wa=1 # atomic frequency
wp=0.6 # field frequency
g=0.6 # coupling strength
n = 1 # number of photons
A = n*wp+(wa/2)
B = (1+n)*wp-(wa/2)
X = n+1
C = np.sqrt(X)
dydx_1= A*y[1]+g*C*y[3]
dydx_2= -A*y[0]-g*C*y[2]
dydx_3= B*y[3]+g*C*y[1]
dydx_4= -B*y[2]-g*C*y[0]
return [dydx_1,dydx_2,dydx_3,dydx_4]
y_0 = (1/np.sqrt(2),0,1/np.sqrt(2),0) # initial value
# print("y_0 = ",y_0)
m = 1000
ti = 0
tf = 30
h = tf/m
tspan = np.arange(ti,tf,h)
print(h)
for i in tspan:
print(i)
v = RK45(du_dx,t0 =i,y0 = y_0,t_bound=i) # 4 answer of dydx_1,...,dydx_4
print(v.y[0:])
# print(type(v))
# print("v.t[0] = ",v.t[0])
# print(len(v.t))
# print("------------------")
# print(v.y)
# print(len(v.t))
# print("------------------")
# y_1 = v.y[:,0]
# print("y_1 = ",y_1)
# print("------------------")
# y_2 = v.y[0,:]
# print("y_2 = ",y_2)
# print("------------------")
# y_3 = v.y[0,0]
# print("y_3 = ",y_3)
# print("------------------")
# # --------------------------
# # print in file
# count = 0
# while count<1000:
# y_i = v.y[:,count]
# f2.write(str(v.t[count]))
# f2.write(" ")
# for i in y_i:
# i = round(i,4)
# i = str(i)
# f2.write(i)
# f2.write(len(i)*" ")
# f2.write("\n")
# count = count+1
# # y_prime = u_s[:,1]
# # print(y_prime)
# plt.plot(v.t, v.y[0,:],'-', label='r(t)')
# plt.xlabel("x")
# plt.ylabel("y")
# plt.show()
| 23.457143
| 76
| 0.476248
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import RK45
f_out = "E:\\1\\P_rk4.txt"
f2 = open(f_out,"w+")
def du_dx(x,y):
wa=1
wp=0.6
g=0.6
n = 1
A = n*wp+(wa/2)
B = (1+n)*wp-(wa/2)
X = n+1
C = np.sqrt(X)
dydx_1= A*y[1]+g*C*y[3]
dydx_2= -A*y[0]-g*C*y[2]
dydx_3= B*y[3]+g*C*y[1]
dydx_4= -B*y[2]-g*C*y[0]
return [dydx_1,dydx_2,dydx_3,dydx_4]
y_0 = (1/np.sqrt(2),0,1/np.sqrt(2),0)
m = 1000
ti = 0
tf = 30
h = tf/m
tspan = np.arange(ti,tf,h)
print(h)
for i in tspan:
print(i)
v = RK45(du_dx,t0 =i,y0 = y_0,t_bound=i)
print(v.y[0:])
| true
| true
|
790d9e8d01cd1e437033f450863d4865f44cd735
| 22,915
|
py
|
Python
|
metaflow/datatools/s3.py
|
oliverholworthy/metaflow
|
378e718a0091d1189e92f3027e3a52c659be59bc
|
[
"Apache-2.0"
] | 2
|
2020-06-07T13:52:03.000Z
|
2020-08-17T17:05:06.000Z
|
metaflow/datatools/s3.py
|
oliverholworthy/metaflow
|
378e718a0091d1189e92f3027e3a52c659be59bc
|
[
"Apache-2.0"
] | null | null | null |
metaflow/datatools/s3.py
|
oliverholworthy/metaflow
|
378e718a0091d1189e92f3027e3a52c659be59bc
|
[
"Apache-2.0"
] | 1
|
2020-03-12T11:12:38.000Z
|
2020-03-12T11:12:38.000Z
|
import os
import sys
import time
import shutil
import random
import subprocess
from itertools import starmap
from tempfile import mkdtemp, NamedTemporaryFile
from .. import current, FlowSpec
from ..metaflow_config import DATATOOLS_S3ROOT
from ..util import is_stringish,\
to_bytes,\
to_unicode,\
to_fileobj,\
url_quote,\
url_unquote
from ..exception import MetaflowException
from ..debug import debug
from . import s3op
try:
# python2
from urlparse import urlparse
except:
# python3
from urllib.parse import urlparse
from metaflow.datastore.util.s3util import get_s3_client
from botocore.exceptions import ClientError
NUM_S3OP_RETRIES = 8
class MetaflowS3InvalidObject(MetaflowException):
headline = 'Not a string-like object'
class MetaflowS3URLException(MetaflowException):
headline = 'Invalid address'
class MetaflowS3Exception(MetaflowException):
headline = 'S3 access failed'
class MetaflowS3NotFound(MetaflowException):
headline = 'S3 object not found'
class MetaflowS3AccessDenied(MetaflowException):
headline = 'S3 access denied'
class S3Object(object):
"""
This object represents a path or an object in S3,
with an optional local copy.
Get or list calls return one or more of S3Objects.
"""
def __init__(self, prefix, url, path, size=None):
# all fields of S3Object should return a unicode object
def ensure_unicode(x):
return None if x is None else to_unicode(x)
prefix, url, path = map(ensure_unicode, (prefix, url, path))
self._size = size
self._url = url
self._path = path
self._key = None
if path:
self._size = os.stat(self._path).st_size
if prefix is None or prefix == url:
self._key = url
self._prefix = None
else:
self._key = url[len(prefix.rstrip('/')) + 1:].rstrip('/')
self._prefix = prefix
@property
def exists(self):
"""
Does this key correspond to an object in S3?
"""
return self._size is not None
@property
def downloaded(self):
"""
Has this object been downloaded?
"""
return bool(self._path)
@property
def url(self):
"""
S3 location of the object
"""
return self._url
@property
def prefix(self):
"""
Prefix requested that matches the object.
"""
return self._prefix
@property
def key(self):
"""
Key corresponds to the key given to the get call that produced
this object. This may be a full S3 URL or a suffix based on what
was requested.
"""
return self._key
@property
def path(self):
"""
Path to the local file corresponding to the object downloaded.
This file gets deleted automatically when a S3 scope exits.
Returns None if this S3Object has not been downloaded.
"""
return self._path
@property
def blob(self):
"""
Contents of the object as a byte string.
Returns None if this S3Object has not been downloaded.
"""
if self._path:
with open(self._path, 'rb') as f:
return f.read()
@property
def text(self):
"""
Contents of the object as a Unicode string.
Returns None if this S3Object has not been downloaded.
"""
if self._path:
return self.blob.decode('utf-8', errors='replace')
@property
def size(self):
"""
Size of the object in bytes.
Returns None if the key does not correspond to an object in S3.
"""
return self._size
def __str__(self):
if self._path:
return '<S3Object %s (%d bytes, local)>' % (self._url, self._size)
elif self._size:
return '<S3Object %s (%d bytes, in S3)>' % (self._url, self._size)
else:
return '<S3Object %s (object does not exist)>' % self._url
def __repr__(self):
return str(self)
class S3(object):
def __init__(self,
tmproot='.',
bucket=None,
prefix=None,
run=None,
s3root=None):
"""
Initialize a new context for S3 operations. This object is based used as
a context manager for a with statement.
There are two ways to initialize this object depending whether you want
to bind paths to a Metaflow run or not.
1. With a run object:
run: (required) Either a FlowSpec object (typically 'self') or a
Run object corresponding to an existing Metaflow run. These
are used to add a version suffix in the S3 path.
bucket: (optional) S3 bucket.
prefix: (optional) S3 prefix.
2. Without a run object:
s3root: (optional) An S3 root URL for all operations. If this is
not specified, all operations require a full S3 URL.
These options are supported in both the modes:
tmproot: (optional) Root path for temporary files (default: '.')
"""
if run:
# 1. use a (current) run ID with optional customizations
parsed = urlparse(DATATOOLS_S3ROOT)
if not bucket:
bucket = parsed.netloc
if not prefix:
prefix = parsed.path
if isinstance(run, FlowSpec):
if current.is_running_flow:
prefix = os.path.join(prefix,
current.flow_name,
current.run_id)
else:
raise MetaflowS3URLException(\
"Initializing S3 with a FlowSpec outside of a running "
"flow is not supported.")
else:
prefix = os.path.join(prefix, run.parent.id, run.id)
self._s3root = u's3://%s' % os.path.join(bucket, prefix.strip('/'))
elif s3root:
# 2. use an explicit S3 prefix
parsed = urlparse(to_unicode(s3root))
if parsed.scheme != 's3':
raise MetaflowS3URLException(\
"s3root needs to be an S3 URL prefxied with s3://.")
self._s3root = s3root.rstrip('/')
else:
# 3. use the client only with full URLs
self._s3root = None
self._tmpdir = mkdtemp(dir=tmproot, prefix='metaflow.s3.')
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def close(self):
"""
Delete all temporary files downloaded in this context.
"""
try:
if not debug.s3client:
shutil.rmtree(self._tmpdir)
except:
pass
def _url(self, key):
# NOTE: All URLs are handled as Unicode objects (unicde in py2,
# string in py3) internally. We expect that all URLs passed to this
# class as either Unicode or UTF-8 encoded byte strings. All URLs
# returned are Unicode.
if self._s3root is None:
parsed = urlparse(to_unicode(key))
if parsed.scheme == 's3' and parsed.path:
return key
else:
if current.is_running_flow:
raise MetaflowS3URLException(\
"Specify S3(run=self) when you use S3 inside a running "
"flow. Otherwise you have to use S3 with full "
"s3:// urls.")
else:
raise MetaflowS3URLException(\
"Initialize S3 with an 's3root' or 'run' if you don't "
"want to specify full s3:// urls.")
elif key:
if key.startswith('s3://'):
raise MetaflowS3URLException(\
"Don't use absolute S3 URLs when the S3 client is "
"initialized with a prefix. URL: %s" % key)
return os.path.join(self._s3root, key)
else:
return self._s3root
def list_paths(self, keys=None):
"""
List the next level of paths in S3. If multiple keys are
specified, listings are done in parallel. The returned
S3Objects have .exists == False if the url refers to a
prefix, not an existing S3 object.
Args:
keys: (required) a list of suffixes for paths to list.
Returns:
a list of S3Objects (not downloaded)
Example:
Consider the following paths in S3:
A/B/C
D/E
In this case, list_paths(['A', 'D']), returns ['A/B', 'D/E']. The
first S3Object has .exists == False, since it does not refer to an
object in S3. It is just a prefix.
"""
def _list(keys):
if keys is None:
keys = [None]
urls = (self._url(key).rstrip('/') + '/' for key in keys)
res = self._read_many_files('list', urls)
for s3prefix, s3url, size in res:
if size:
yield s3prefix, s3url, None, int(size)
else:
yield s3prefix, s3url, None, None
return list(starmap(S3Object, _list(keys)))
def list_recursive(self, keys=None):
"""
List objects in S3 recursively. If multiple keys are
specified, listings are done in parallel. The returned
S3Objects have always .exists == True, since they refer
to existing objects in S3.
Args:
keys: (required) a list of suffixes for paths to list.
Returns:
a list of S3Objects (not downloaded)
Example:
Consider the following paths in S3:
A/B/C
D/E
In this case, list_recursive(['A', 'D']), returns ['A/B/C', 'D/E'].
"""
def _list(keys):
if keys is None:
keys = [None]
res = self._read_many_files('list',
map(self._url, keys),
recursive=True)
for s3prefix, s3url, size in res:
yield s3prefix, s3url, None, int(size)
return list(starmap(S3Object, _list(keys)))
def get(self, key=None, return_missing=False):
"""
Get a single object from S3.
Args:
key: (optional) a suffix identifying the object.
return_missing: (optional, default False) if set to True, do
not raise an exception for a missing key but
return it as an S3Object with .exists == False.
Returns:
an S3Object corresponding to the object requested.
"""
url = self._url(key)
src = urlparse(url)
def _download(s3, tmp):
s3.download_file(src.netloc, src.path.lstrip('/'), tmp)
return url
try:
path = self._one_boto_op(_download, url)
except MetaflowS3NotFound:
if return_missing:
path = None
else:
raise
return S3Object(self._s3root, url, path)
def get_many(self, keys, return_missing=False):
"""
Get many objects from S3 in parallel.
Args:
keys: (required) a list of suffixes identifying the objects.
return_missing: (optional, default False) if set to True, do
not raise an exception for a missing key but
return it as an S3Object with .exists == False.
Returns:
a list of S3Objects corresponding to the objects requested.
"""
def _get():
res = self._read_many_files('get',
map(self._url, keys),
allow_missing=return_missing,
verify=True,
verbose=False,
listing=True)
for s3prefix, s3url, fname in res:
if fname:
yield self._s3root, s3url, os.path.join(self._tmpdir, fname)
else:
# missing entries per return_missing=True
yield self._s3root, s3prefix, None, None
return list(starmap(S3Object, _get()))
def get_recursive(self, keys):
"""
Get many objects from S3 recursively in parallel.
Args:
keys: (required) a list of suffixes for paths to download
recursively.
Returns:
a list of S3Objects corresponding to the objects requested.
"""
def _get():
res = self._read_many_files('get',
map(self._url, keys),
recursive=True,
verify=True,
verbose=False,
listing=True)
for s3prefix, s3url, fname in res:
yield s3prefix, s3url, os.path.join(self._tmpdir, fname)
return list(starmap(S3Object, _get()))
def get_all(self):
"""
Get all objects from S3 recursively (in parallel). This request
only works if S3 is initialized with a run or a s3root prefix.
Returns:
a list of S3Objects corresponding to the objects requested.
"""
if self._s3root is None:
raise MetaflowS3URLException(\
"Can't get_all() when S3 is initialized without a prefix")
else:
return self.get_recursive([None])
def put(self, key, obj, overwrite=True):
"""
Put an object to S3.
Args:
key: (required) suffix for the object.
obj: (required) a bytes, string, or a unicode object to
be stored in S3.
overwrite: (optional) overwrites the key with obj, if it exists
Returns:
an S3 URL corresponding to the object stored.
"""
if not is_stringish(obj):
raise MetaflowS3InvalidObject(\
"Object corresponding to the key '%s' is not a string "
"or a bytes object." % key)
url = self._url(key)
src = urlparse(url)
def _upload(s3, tmp):
# we need to recreate the StringIO object for retries since
# apparently upload_fileobj will/may close() it
blob = to_fileobj(obj)
s3.upload_fileobj(blob, src.netloc, src.path.lstrip('/'))
if overwrite:
self._one_boto_op(_upload, url)
return url
else:
def _head(s3, tmp):
s3.head_object(Bucket=src.netloc, Key=src.path.lstrip('/'))
try:
self._one_boto_op(_head, url)
except MetaflowS3NotFound as err:
self._one_boto_op(_upload, url)
return url
def put_many(self, key_objs, overwrite=True):
"""
Put objects to S3 in parallel.
Args:
key_objs: (required) an iterator of (key, value) tuples. Value must
be a string, bytes, or a unicode object.
overwrite: (optional) overwrites the key with obj, if it exists
Returns:
a list of (key, S3 URL) tuples corresponding to the files sent.
"""
def _store():
for key, obj in key_objs:
if is_stringish(obj):
with NamedTemporaryFile(dir=self._tmpdir,
delete=False,
mode='wb',
prefix='metaflow.s3.put_many.') as tmp:
tmp.write(to_bytes(obj))
tmp.close()
yield tmp.name, self._url(key), key
else:
raise MetaflowS3InvalidObject(
"Object corresponding to the key '%s' is not a string "
"or a bytes object." % key)
return self._put_many_files(_store(), overwrite)
def put_files(self, key_paths, overwrite=True):
"""
Put files to S3 in parallel.
Args:
key_paths: (required) an iterator of (key, path) tuples.
overwrite: (optional) overwrites the key with obj, if it exists
Returns:
a list of (key, S3 URL) tuples corresponding to the files sent.
"""
def _check():
for key, path in key_paths:
if not os.path.exists(path):
raise MetaflowS3NotFound("Local file not found: %s" % path)
yield path, self._url(key), key
return self._put_many_files(_check(), overwrite)
def _one_boto_op(self, op, url):
error = ''
for i in range(NUM_S3OP_RETRIES):
tmp = NamedTemporaryFile(dir=self._tmpdir,
prefix='metaflow.s3.one_file.',
delete=False)
try:
s3, _ = get_s3_client()
op(s3, tmp.name)
return tmp.name
except ClientError as err:
error_code = s3op.normalize_client_error(err)
if error_code == 404:
raise MetaflowS3NotFound(url)
elif error_code == 403:
raise MetaflowS3AccessDenied(url)
elif error_code == 'NoSuchBucket':
raise MetaflowS3URLException("Specified S3 bucket doesn't exist.")
error = str(err)
except Exception as ex:
# TODO specific error message for out of disk space
error = str(ex)
os.unlink(tmp.name)
# add some jitter to make sure retries are not synchronized
time.sleep(2**i + random.randint(0, 10))
raise MetaflowS3Exception("S3 operation failed.\n"\
"Key requested: %s\n"\
"Error: %s" % (url, error))
# NOTE: re: _read_many_files and _put_many_files
# All file IO is through binary files - we write bytes, we read
# bytes. All inputs and outputs from these functions are Unicode.
# Conversion between bytes and unicode is done through url_quote
# and url_unquote.
def _read_many_files(self, op, prefixes, **options):
with NamedTemporaryFile(dir=self._tmpdir,
mode='wb',
delete=not debug.s3client,
prefix='metaflow.s3.inputs.') as inputfile:
inputfile.write(b'\n'.join(map(url_quote, prefixes)))
inputfile.flush()
stdout, stderr = self._s3op_with_retries(op,
inputs=inputfile.name,
**options)
if stderr:
raise MetaflowS3Exception("Getting S3 files failed.\n"\
"First prefix requested: %s\n"\
"Error: %s" % (prefixes[0], stderr))
else:
for line in stdout.splitlines():
yield tuple(map(url_unquote, line.strip(b'\n').split(b' ')))
def _put_many_files(self, url_files, overwrite):
url_files = list(url_files)
with NamedTemporaryFile(dir=self._tmpdir,
mode='wb',
delete=not debug.s3client,
prefix='metaflow.s3.put_inputs.') as inputfile:
lines = (b' '.join(map(url_quote, (os.path.realpath(local), url)))
for local, url, _ in url_files)
inputfile.write(b'\n'.join(lines))
inputfile.flush()
stdout, stderr = self._s3op_with_retries('put',
filelist=inputfile.name,
verbose=False,
overwrite=overwrite,
listing=True)
if stderr:
raise MetaflowS3Exception("Uploading S3 files failed.\n"\
"First key: %s\n"\
"Error: %s" % (url_files[0][2],
stderr))
else:
urls = set()
for line in stdout.splitlines():
url, _, _ = map(url_unquote, line.strip(b'\n').split(b' '))
urls.add(url)
return [(key, url) for _, url, key in url_files if url in urls]
def _s3op_with_retries(self, mode, **options):
cmdline = [sys.executable, os.path.abspath(s3op.__file__), mode]
for key, value in options.items():
key = key.replace('_', '-')
if isinstance(value, bool):
if value:
cmdline.append('--%s' % key)
else:
cmdline.append('--no-%s' % key)
else:
cmdline.extend(('--%s' % key, value))
for i in range(NUM_S3OP_RETRIES):
with NamedTemporaryFile(dir=self._tmpdir,
mode='wb+',
delete=not debug.s3client,
prefix='metaflow.s3op.stderr') as stderr:
try:
debug.s3client_exec(cmdline)
stdout = subprocess.check_output(cmdline,
cwd=self._tmpdir,
stderr=stderr.file)
return stdout, None
except subprocess.CalledProcessError as ex:
stderr.seek(0)
err_out = stderr.read().decode('utf-8', errors='replace')
stderr.seek(0)
if ex.returncode == s3op.ERROR_URL_NOT_FOUND:
raise MetaflowS3NotFound(err_out)
elif ex.returncode == s3op.ERROR_URL_ACCESS_DENIED:
raise MetaflowS3AccessDenied(err_out)
time.sleep(2**i + random.randint(0, 10))
return None, err_out
| 35.472136
| 86
| 0.514728
|
import os
import sys
import time
import shutil
import random
import subprocess
from itertools import starmap
from tempfile import mkdtemp, NamedTemporaryFile
from .. import current, FlowSpec
from ..metaflow_config import DATATOOLS_S3ROOT
from ..util import is_stringish,\
to_bytes,\
to_unicode,\
to_fileobj,\
url_quote,\
url_unquote
from ..exception import MetaflowException
from ..debug import debug
from . import s3op
try:
from urlparse import urlparse
except:
from urllib.parse import urlparse
from metaflow.datastore.util.s3util import get_s3_client
from botocore.exceptions import ClientError
NUM_S3OP_RETRIES = 8
class MetaflowS3InvalidObject(MetaflowException):
headline = 'Not a string-like object'
class MetaflowS3URLException(MetaflowException):
headline = 'Invalid address'
class MetaflowS3Exception(MetaflowException):
headline = 'S3 access failed'
class MetaflowS3NotFound(MetaflowException):
headline = 'S3 object not found'
class MetaflowS3AccessDenied(MetaflowException):
headline = 'S3 access denied'
class S3Object(object):
def __init__(self, prefix, url, path, size=None):
def ensure_unicode(x):
return None if x is None else to_unicode(x)
prefix, url, path = map(ensure_unicode, (prefix, url, path))
self._size = size
self._url = url
self._path = path
self._key = None
if path:
self._size = os.stat(self._path).st_size
if prefix is None or prefix == url:
self._key = url
self._prefix = None
else:
self._key = url[len(prefix.rstrip('/')) + 1:].rstrip('/')
self._prefix = prefix
@property
def exists(self):
return self._size is not None
@property
def downloaded(self):
return bool(self._path)
@property
def url(self):
return self._url
@property
def prefix(self):
return self._prefix
@property
def key(self):
return self._key
@property
def path(self):
return self._path
@property
def blob(self):
if self._path:
with open(self._path, 'rb') as f:
return f.read()
@property
def text(self):
if self._path:
return self.blob.decode('utf-8', errors='replace')
@property
def size(self):
return self._size
def __str__(self):
if self._path:
return '<S3Object %s (%d bytes, local)>' % (self._url, self._size)
elif self._size:
return '<S3Object %s (%d bytes, in S3)>' % (self._url, self._size)
else:
return '<S3Object %s (object does not exist)>' % self._url
def __repr__(self):
return str(self)
class S3(object):
def __init__(self,
tmproot='.',
bucket=None,
prefix=None,
run=None,
s3root=None):
if run:
parsed = urlparse(DATATOOLS_S3ROOT)
if not bucket:
bucket = parsed.netloc
if not prefix:
prefix = parsed.path
if isinstance(run, FlowSpec):
if current.is_running_flow:
prefix = os.path.join(prefix,
current.flow_name,
current.run_id)
else:
raise MetaflowS3URLException(\
"Initializing S3 with a FlowSpec outside of a running "
"flow is not supported.")
else:
prefix = os.path.join(prefix, run.parent.id, run.id)
self._s3root = u's3://%s' % os.path.join(bucket, prefix.strip('/'))
elif s3root:
parsed = urlparse(to_unicode(s3root))
if parsed.scheme != 's3':
raise MetaflowS3URLException(\
"s3root needs to be an S3 URL prefxied with s3://.")
self._s3root = s3root.rstrip('/')
else:
self._s3root = None
self._tmpdir = mkdtemp(dir=tmproot, prefix='metaflow.s3.')
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def close(self):
try:
if not debug.s3client:
shutil.rmtree(self._tmpdir)
except:
pass
def _url(self, key):
if self._s3root is None:
parsed = urlparse(to_unicode(key))
if parsed.scheme == 's3' and parsed.path:
return key
else:
if current.is_running_flow:
raise MetaflowS3URLException(\
"Specify S3(run=self) when you use S3 inside a running "
"flow. Otherwise you have to use S3 with full "
"s3:// urls.")
else:
raise MetaflowS3URLException(\
"Initialize S3 with an 's3root' or 'run' if you don't "
"want to specify full s3:// urls.")
elif key:
if key.startswith('s3://'):
raise MetaflowS3URLException(\
"Don't use absolute S3 URLs when the S3 client is "
"initialized with a prefix. URL: %s" % key)
return os.path.join(self._s3root, key)
else:
return self._s3root
def list_paths(self, keys=None):
def _list(keys):
if keys is None:
keys = [None]
urls = (self._url(key).rstrip('/') + '/' for key in keys)
res = self._read_many_files('list', urls)
for s3prefix, s3url, size in res:
if size:
yield s3prefix, s3url, None, int(size)
else:
yield s3prefix, s3url, None, None
return list(starmap(S3Object, _list(keys)))
def list_recursive(self, keys=None):
def _list(keys):
if keys is None:
keys = [None]
res = self._read_many_files('list',
map(self._url, keys),
recursive=True)
for s3prefix, s3url, size in res:
yield s3prefix, s3url, None, int(size)
return list(starmap(S3Object, _list(keys)))
def get(self, key=None, return_missing=False):
url = self._url(key)
src = urlparse(url)
def _download(s3, tmp):
s3.download_file(src.netloc, src.path.lstrip('/'), tmp)
return url
try:
path = self._one_boto_op(_download, url)
except MetaflowS3NotFound:
if return_missing:
path = None
else:
raise
return S3Object(self._s3root, url, path)
def get_many(self, keys, return_missing=False):
def _get():
res = self._read_many_files('get',
map(self._url, keys),
allow_missing=return_missing,
verify=True,
verbose=False,
listing=True)
for s3prefix, s3url, fname in res:
if fname:
yield self._s3root, s3url, os.path.join(self._tmpdir, fname)
else:
yield self._s3root, s3prefix, None, None
return list(starmap(S3Object, _get()))
def get_recursive(self, keys):
def _get():
res = self._read_many_files('get',
map(self._url, keys),
recursive=True,
verify=True,
verbose=False,
listing=True)
for s3prefix, s3url, fname in res:
yield s3prefix, s3url, os.path.join(self._tmpdir, fname)
return list(starmap(S3Object, _get()))
def get_all(self):
if self._s3root is None:
raise MetaflowS3URLException(\
"Can't get_all() when S3 is initialized without a prefix")
else:
return self.get_recursive([None])
def put(self, key, obj, overwrite=True):
if not is_stringish(obj):
raise MetaflowS3InvalidObject(\
"Object corresponding to the key '%s' is not a string "
"or a bytes object." % key)
url = self._url(key)
src = urlparse(url)
def _upload(s3, tmp):
# we need to recreate the StringIO object for retries since
# apparently upload_fileobj will/may close() it
blob = to_fileobj(obj)
s3.upload_fileobj(blob, src.netloc, src.path.lstrip('/'))
if overwrite:
self._one_boto_op(_upload, url)
return url
else:
def _head(s3, tmp):
s3.head_object(Bucket=src.netloc, Key=src.path.lstrip('/'))
try:
self._one_boto_op(_head, url)
except MetaflowS3NotFound as err:
self._one_boto_op(_upload, url)
return url
def put_many(self, key_objs, overwrite=True):
def _store():
for key, obj in key_objs:
if is_stringish(obj):
with NamedTemporaryFile(dir=self._tmpdir,
delete=False,
mode='wb',
prefix='metaflow.s3.put_many.') as tmp:
tmp.write(to_bytes(obj))
tmp.close()
yield tmp.name, self._url(key), key
else:
raise MetaflowS3InvalidObject(
"Object corresponding to the key '%s' is not a string "
"or a bytes object." % key)
return self._put_many_files(_store(), overwrite)
def put_files(self, key_paths, overwrite=True):
def _check():
for key, path in key_paths:
if not os.path.exists(path):
raise MetaflowS3NotFound("Local file not found: %s" % path)
yield path, self._url(key), key
return self._put_many_files(_check(), overwrite)
def _one_boto_op(self, op, url):
error = ''
for i in range(NUM_S3OP_RETRIES):
tmp = NamedTemporaryFile(dir=self._tmpdir,
prefix='metaflow.s3.one_file.',
delete=False)
try:
s3, _ = get_s3_client()
op(s3, tmp.name)
return tmp.name
except ClientError as err:
error_code = s3op.normalize_client_error(err)
if error_code == 404:
raise MetaflowS3NotFound(url)
elif error_code == 403:
raise MetaflowS3AccessDenied(url)
elif error_code == 'NoSuchBucket':
raise MetaflowS3URLException("Specified S3 bucket doesn't exist.")
error = str(err)
except Exception as ex:
error = str(ex)
os.unlink(tmp.name)
time.sleep(2**i + random.randint(0, 10))
raise MetaflowS3Exception("S3 operation failed.\n"\
"Key requested: %s\n"\
"Error: %s" % (url, error))
def _read_many_files(self, op, prefixes, **options):
with NamedTemporaryFile(dir=self._tmpdir,
mode='wb',
delete=not debug.s3client,
prefix='metaflow.s3.inputs.') as inputfile:
inputfile.write(b'\n'.join(map(url_quote, prefixes)))
inputfile.flush()
stdout, stderr = self._s3op_with_retries(op,
inputs=inputfile.name,
**options)
if stderr:
raise MetaflowS3Exception("Getting S3 files failed.\n"\
"First prefix requested: %s\n"\
"Error: %s" % (prefixes[0], stderr))
else:
for line in stdout.splitlines():
yield tuple(map(url_unquote, line.strip(b'\n').split(b' ')))
def _put_many_files(self, url_files, overwrite):
url_files = list(url_files)
with NamedTemporaryFile(dir=self._tmpdir,
mode='wb',
delete=not debug.s3client,
prefix='metaflow.s3.put_inputs.') as inputfile:
lines = (b' '.join(map(url_quote, (os.path.realpath(local), url)))
for local, url, _ in url_files)
inputfile.write(b'\n'.join(lines))
inputfile.flush()
stdout, stderr = self._s3op_with_retries('put',
filelist=inputfile.name,
verbose=False,
overwrite=overwrite,
listing=True)
if stderr:
raise MetaflowS3Exception("Uploading S3 files failed.\n"\
"First key: %s\n"\
"Error: %s" % (url_files[0][2],
stderr))
else:
urls = set()
for line in stdout.splitlines():
url, _, _ = map(url_unquote, line.strip(b'\n').split(b' '))
urls.add(url)
return [(key, url) for _, url, key in url_files if url in urls]
def _s3op_with_retries(self, mode, **options):
cmdline = [sys.executable, os.path.abspath(s3op.__file__), mode]
for key, value in options.items():
key = key.replace('_', '-')
if isinstance(value, bool):
if value:
cmdline.append('--%s' % key)
else:
cmdline.append('--no-%s' % key)
else:
cmdline.extend(('--%s' % key, value))
for i in range(NUM_S3OP_RETRIES):
with NamedTemporaryFile(dir=self._tmpdir,
mode='wb+',
delete=not debug.s3client,
prefix='metaflow.s3op.stderr') as stderr:
try:
debug.s3client_exec(cmdline)
stdout = subprocess.check_output(cmdline,
cwd=self._tmpdir,
stderr=stderr.file)
return stdout, None
except subprocess.CalledProcessError as ex:
stderr.seek(0)
err_out = stderr.read().decode('utf-8', errors='replace')
stderr.seek(0)
if ex.returncode == s3op.ERROR_URL_NOT_FOUND:
raise MetaflowS3NotFound(err_out)
elif ex.returncode == s3op.ERROR_URL_ACCESS_DENIED:
raise MetaflowS3AccessDenied(err_out)
time.sleep(2**i + random.randint(0, 10))
return None, err_out
| true
| true
|
790da00e416e6ea53931fd81ed764a45913d098e
| 1,674
|
py
|
Python
|
week4/week4_additionalexercice_5.py
|
harshonyou/SOFT1
|
1bd2b0cc26d39c549bec576389bebd0fd011387d
|
[
"Apache-2.0"
] | null | null | null |
week4/week4_additionalexercice_5.py
|
harshonyou/SOFT1
|
1bd2b0cc26d39c549bec576389bebd0fd011387d
|
[
"Apache-2.0"
] | null | null | null |
week4/week4_additionalexercice_5.py
|
harshonyou/SOFT1
|
1bd2b0cc26d39c549bec576389bebd0fd011387d
|
[
"Apache-2.0"
] | null | null | null |
'''
Exercise 5: Vectors
A vector of dimension 𝑛𝑛 can be represented by a list in Python. For example, a vector of
dimension 3 could represent a point in space, and a vector of dimension 4 could represent a
point in space and time (the fourth dimension being the time). In mathematical notation, a
vector of dimension 3 is represented as follow:
�
𝑎𝑎
𝑏𝑏
𝑐𝑐
�
The vector could be stored in a Python list [a, b, c]. There are two simple operations that
can be done on vector, and the result of the two operation is also a vector. The two operations
are:
Scalar product: 𝜆𝜆 ∙ �
𝑎𝑎
𝑏𝑏
𝑐𝑐
� = �
𝜆𝜆 ∙ 𝑎𝑎
𝜆𝜆 ∙ 𝑏𝑏
𝜆𝜆 ∙ 𝑐𝑐
�
Addition: �
𝑎𝑎
𝑏𝑏
𝑐𝑐
� + �
𝑑𝑑
𝑒𝑒
𝑓𝑓
� = �
𝑎𝑎 + 𝑑𝑑
𝑏𝑏 + 𝑒𝑒
𝑐𝑐 + 𝑓𝑓
�
Implement two functions:
1. scalar_product(scalar, vector) where scalar is a float and vector is a list
of float. The function returns the scalar product of the two parameters.
2. vector_addition(vector1, vector2) where vector1 and vector2 are
lists of float. The function returns the vector addition of the two parameters. If
vector1 and vector2 don’t have the same dimension, you should print an error
message and return None.
'''
def scalar_product(scalar, vector):
for x in range(len(vector)):
vector[x]*=scalar
return vector
def vector_addition(vector1, vector2):
if(len(vector2)!=len(vector1)):
return 'Error'
for x in range(len(vector1)):
vector1[x]=int(vector1[x])+int(vector2[x])
return vector1
print(scalar_product(int(input('Enter Scalar Value: ')),input('Enter a Matrix seperated by coma: ').split(',') ))
print(vector_addition( input('Enter first Matrix to add: ').split(',') , input('Enter second Matrix to add: ').split(',') ))
| 27.442623
| 124
| 0.717443
|
def scalar_product(scalar, vector):
for x in range(len(vector)):
vector[x]*=scalar
return vector
def vector_addition(vector1, vector2):
if(len(vector2)!=len(vector1)):
return 'Error'
for x in range(len(vector1)):
vector1[x]=int(vector1[x])+int(vector2[x])
return vector1
print(scalar_product(int(input('Enter Scalar Value: ')),input('Enter a Matrix seperated by coma: ').split(',') ))
print(vector_addition( input('Enter first Matrix to add: ').split(',') , input('Enter second Matrix to add: ').split(',') ))
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.