hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c310a363cd43bc3cf0bf2612db8eb1c88950cce | 23,180 | py | Python | models/nets/cpm_hand.py | Aniket1998Agrawal/robotic-palm | 3062db8da9a32a9606aa19ef0ee632013cd40b30 | [
"Apache-2.0"
] | 828 | 2017-08-28T07:42:53.000Z | 2022-03-24T07:24:22.000Z | models/nets/cpm_hand.py | abcx3261/convolutional-pose-machines-tensorflow | b9a30fbb5a2f1d15faf8f553201203a431cb34cb | [
"Apache-2.0"
] | 81 | 2017-08-27T13:46:54.000Z | 2022-01-20T11:31:44.000Z | models/nets/cpm_hand.py | abcx3261/convolutional-pose-machines-tensorflow | b9a30fbb5a2f1d15faf8f553201203a431cb34cb | [
"Apache-2.0"
] | 312 | 2017-08-29T08:13:02.000Z | 2022-01-16T12:27:21.000Z | import tensorflow as tf
import pickle
from models.nets.CPM import CPM
class CPM_Model(CPM):
def __init__(self, input_size, heatmap_size, stages, joints, img_type='RGB', is_training=True):
self.stages = stages
self.stage_heatmap = []
self.stage_loss = [0 for _ in range(stages)]
self.total_loss = 0
self.input_image = None
self.center_map = None
self.gt_heatmap = None
self.init_lr = 0
self.merged_summary = None
self.joints = joints
self.batch_size = 0
self.inference_type = 'Train'
if img_type == 'RGB':
self.input_images = tf.placeholder(dtype=tf.float32,
shape=(None, input_size, input_size, 3),
name='input_placeholder')
elif img_type == 'GRAY':
self.input_images = tf.placeholder(dtype=tf.float32,
shape=(None, input_size, input_size, 1),
name='input_placeholder')
self.cmap_placeholder = tf.placeholder(dtype=tf.float32,
shape=(None, input_size, input_size, 1),
name='cmap_placeholder')
self.gt_hmap_placeholder = tf.placeholder(dtype=tf.float32,
shape=(None, heatmap_size, heatmap_size, joints + 1),
name='gt_hmap_placeholder')
self._build_model()
def _build_model(self):
with tf.variable_scope('pooled_center_map'):
self.center_map = tf.layers.average_pooling2d(inputs=self.cmap_placeholder,
pool_size=[9, 9],
strides=[8, 8],
padding='same',
name='center_map')
with tf.variable_scope('sub_stages'):
sub_conv1 = tf.layers.conv2d(inputs=self.input_images,
filters=64,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='sub_conv1')
sub_conv2 = tf.layers.conv2d(inputs=sub_conv1,
filters=64,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='sub_conv2')
sub_pool1 = tf.layers.max_pooling2d(inputs=sub_conv2,
pool_size=[2, 2],
strides=2,
padding='valid',
name='sub_pool1')
sub_conv3 = tf.layers.conv2d(inputs=sub_pool1,
filters=128,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='sub_conv3')
sub_conv4 = tf.layers.conv2d(inputs=sub_conv3,
filters=128,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='sub_conv4')
sub_pool2 = tf.layers.max_pooling2d(inputs=sub_conv4,
pool_size=[2, 2],
strides=2,
padding='valid',
name='sub_pool2')
sub_conv5 = tf.layers.conv2d(inputs=sub_pool2,
filters=256,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='sub_conv5')
sub_conv6 = tf.layers.conv2d(inputs=sub_conv5,
filters=256,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='sub_conv6')
sub_conv7 = tf.layers.conv2d(inputs=sub_conv6,
filters=256,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='sub_conv7')
sub_conv8 = tf.layers.conv2d(inputs=sub_conv7,
filters=256,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='sub_conv8')
sub_pool3 = tf.layers.max_pooling2d(inputs=sub_conv8,
pool_size=[2, 2],
strides=2,
padding='valid',
name='sub_pool3')
sub_conv9 = tf.layers.conv2d(inputs=sub_pool3,
filters=512,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='sub_conv9')
sub_conv10 = tf.layers.conv2d(inputs=sub_conv9,
filters=512,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='sub_conv10')
sub_conv11 = tf.layers.conv2d(inputs=sub_conv10,
filters=512,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='sub_conv11')
sub_conv12 = tf.layers.conv2d(inputs=sub_conv11,
filters=512,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='sub_conv12')
sub_conv13 = tf.layers.conv2d(inputs=sub_conv12,
filters=512,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='sub_conv13')
sub_conv14 = tf.layers.conv2d(inputs=sub_conv13,
filters=512,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='sub_conv14')
self.sub_stage_img_feature = tf.layers.conv2d(inputs=sub_conv14,
filters=128,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='sub_stage_img_feature')
with tf.variable_scope('stage_1'):
conv1 = tf.layers.conv2d(inputs=self.sub_stage_img_feature,
filters=512,
kernel_size=[1, 1],
strides=[1, 1],
padding='valid',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='conv1')
self.stage_heatmap.append(tf.layers.conv2d(inputs=conv1,
filters=self.joints+1,
kernel_size=[1, 1],
strides=[1, 1],
padding='valid',
activation=None,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='stage_heatmap'))
for stage in range(2, self.stages + 1):
self._middle_conv(stage)
def _middle_conv(self, stage):
with tf.variable_scope('stage_' + str(stage)):
self.current_featuremap = tf.concat([self.stage_heatmap[stage - 2],
self.sub_stage_img_feature,
# self.center_map],
],
axis=3)
mid_conv1 = tf.layers.conv2d(inputs=self.current_featuremap,
filters=128,
kernel_size=[7, 7],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='mid_conv1')
mid_conv2 = tf.layers.conv2d(inputs=mid_conv1,
filters=128,
kernel_size=[7, 7],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='mid_conv2')
mid_conv3 = tf.layers.conv2d(inputs=mid_conv2,
filters=128,
kernel_size=[7, 7],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='mid_conv3')
mid_conv4 = tf.layers.conv2d(inputs=mid_conv3,
filters=128,
kernel_size=[7, 7],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='mid_conv4')
mid_conv5 = tf.layers.conv2d(inputs=mid_conv4,
filters=128,
kernel_size=[7, 7],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='mid_conv5')
mid_conv6 = tf.layers.conv2d(inputs=mid_conv5,
filters=128,
kernel_size=[1, 1],
strides=[1, 1],
padding='valid',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='mid_conv6')
self.current_heatmap = tf.layers.conv2d(inputs=mid_conv6,
filters=self.joints+1,
kernel_size=[1, 1],
strides=[1, 1],
padding='valid',
activation=None,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='mid_conv7')
self.stage_heatmap.append(self.current_heatmap)
def build_loss(self, lr, lr_decay_rate, lr_decay_step, optimizer='Adam'):
self.total_loss = 0
self.total_loss_eval = 0
self.init_lr = lr
self.lr_decay_rate = lr_decay_rate
self.lr_decay_step = lr_decay_step
self.optimizer = optimizer
self.batch_size = tf.cast(tf.shape(self.input_images)[0], dtype=tf.float32)
for stage in range(self.stages):
with tf.variable_scope('stage' + str(stage + 1) + '_loss'):
self.stage_loss[stage] = tf.nn.l2_loss(self.stage_heatmap[stage] - self.gt_hmap_placeholder,
name='l2_loss') / self.batch_size
tf.summary.scalar('stage' + str(stage + 1) + '_loss', self.stage_loss[stage])
with tf.variable_scope('total_loss'):
for stage in range(self.stages):
self.total_loss += self.stage_loss[stage]
tf.summary.scalar('total loss train', self.total_loss)
with tf.variable_scope('total_loss_eval'):
for stage in range(self.stages):
self.total_loss_eval += self.stage_loss[stage]
tf.summary.scalar('total loss eval', self.total_loss)
with tf.variable_scope('train'):
self.global_step = tf.contrib.framework.get_or_create_global_step()
self.cur_lr = tf.train.exponential_decay(self.init_lr,
global_step=self.global_step,
decay_rate=self.lr_decay_rate,
decay_steps=self.lr_decay_step)
tf.summary.scalar('global learning rate', self.cur_lr)
self.train_op = tf.contrib.layers.optimize_loss(loss=self.total_loss,
global_step=self.global_step,
learning_rate=self.cur_lr,
optimizer=self.optimizer)
def load_weights_from_file(self, weight_file_path, sess, finetune=True):
# weight_file_object = open(weight_file_path, 'rb')
weights = pickle.load(open(weight_file_path, 'rb'))#, encoding='latin1')
with tf.variable_scope('', reuse=True):
## Pre stage conv
# conv1
for layer in range(1, 3):
conv_kernel = tf.get_variable('sub_stages/sub_conv' + str(layer) + '/kernel')
conv_bias = tf.get_variable('sub_stages/sub_conv' + str(layer) + '/bias')
loaded_kernel = weights['conv1_' + str(layer)]
loaded_bias = weights['conv1_' + str(layer) + '_b']
sess.run(tf.assign(conv_kernel, loaded_kernel))
sess.run(tf.assign(conv_bias, loaded_bias))
# conv2
for layer in range(1, 3):
conv_kernel = tf.get_variable('sub_stages/sub_conv' + str(layer + 2) + '/kernel')
conv_bias = tf.get_variable('sub_stages/sub_conv' + str(layer + 2) + '/bias')
loaded_kernel = weights['conv2_' + str(layer)]
loaded_bias = weights['conv2_' + str(layer) + '_b']
sess.run(tf.assign(conv_kernel, loaded_kernel))
sess.run(tf.assign(conv_bias, loaded_bias))
# conv3
for layer in range(1, 5):
conv_kernel = tf.get_variable('sub_stages/sub_conv' + str(layer + 4) + '/kernel')
conv_bias = tf.get_variable('sub_stages/sub_conv' + str(layer + 4) + '/bias')
loaded_kernel = weights['conv3_' + str(layer)]
loaded_bias = weights['conv3_' + str(layer) + '_b']
sess.run(tf.assign(conv_kernel, loaded_kernel))
sess.run(tf.assign(conv_bias, loaded_bias))
# conv4
for layer in range(1, 5):
conv_kernel = tf.get_variable('sub_stages/sub_conv' + str(layer + 8) + '/kernel')
conv_bias = tf.get_variable('sub_stages/sub_conv' + str(layer + 8) + '/bias')
loaded_kernel = weights['conv4_' + str(layer)]
loaded_bias = weights['conv4_' + str(layer) + '_b']
sess.run(tf.assign(conv_kernel, loaded_kernel))
sess.run(tf.assign(conv_bias, loaded_bias))
# conv5
for layer in range(1, 3):
conv_kernel = tf.get_variable('sub_stages/sub_conv' + str(layer + 12) + '/kernel')
conv_bias = tf.get_variable('sub_stages/sub_conv' + str(layer + 12) + '/bias')
loaded_kernel = weights['conv5_' + str(layer)]
loaded_bias = weights['conv5_' + str(layer) + '_b']
sess.run(tf.assign(conv_kernel, loaded_kernel))
sess.run(tf.assign(conv_bias, loaded_bias))
# conv5_3_CPM
conv_kernel = tf.get_variable('sub_stages/sub_stage_img_feature/kernel')
conv_bias = tf.get_variable('sub_stages/sub_stage_img_feature/bias')
loaded_kernel = weights['conv5_3_CPM']
loaded_bias = weights['conv5_3_CPM_b']
sess.run(tf.assign(conv_kernel, loaded_kernel))
sess.run(tf.assign(conv_bias, loaded_bias))
## stage 1
conv_kernel = tf.get_variable('stage_1/conv1/kernel')
conv_bias = tf.get_variable('stage_1/conv1/bias')
loaded_kernel = weights['conv6_1_CPM']
loaded_bias = weights['conv6_1_CPM_b']
sess.run(tf.assign(conv_kernel, loaded_kernel))
sess.run(tf.assign(conv_bias, loaded_bias))
if finetune != True:
conv_kernel = tf.get_variable('stage_1/stage_heatmap/kernel')
conv_bias = tf.get_variable('stage_1/stage_heatmap/bias')
loaded_kernel = weights['conv6_2_CPM']
loaded_bias = weights['conv6_2_CPM_b']
sess.run(tf.assign(conv_kernel, loaded_kernel))
sess.run(tf.assign(conv_bias, loaded_bias))
## Stage 2 and behind
for stage in range(2, self.stages + 1):
for layer in range(1, 8):
conv_kernel = tf.get_variable('stage_' + str(stage) + '/mid_conv' + str(layer) + '/kernel')
conv_bias = tf.get_variable('stage_' + str(stage) + '/mid_conv' + str(layer) + '/bias')
loaded_kernel = weights['Mconv' + str(layer) + '_stage' + str(stage)]
loaded_bias = weights['Mconv' + str(layer) + '_stage' + str(stage) + '_b']
sess.run(tf.assign(conv_kernel, loaded_kernel))
sess.run(tf.assign(conv_bias, loaded_bias))
| 56.674817 | 116 | 0.40302 | import tensorflow as tf
import pickle
from models.nets.CPM import CPM
class CPM_Model(CPM):
def __init__(self, input_size, heatmap_size, stages, joints, img_type='RGB', is_training=True):
self.stages = stages
self.stage_heatmap = []
self.stage_loss = [0 for _ in range(stages)]
self.total_loss = 0
self.input_image = None
self.center_map = None
self.gt_heatmap = None
self.init_lr = 0
self.merged_summary = None
self.joints = joints
self.batch_size = 0
self.inference_type = 'Train'
if img_type == 'RGB':
self.input_images = tf.placeholder(dtype=tf.float32,
shape=(None, input_size, input_size, 3),
name='input_placeholder')
elif img_type == 'GRAY':
self.input_images = tf.placeholder(dtype=tf.float32,
shape=(None, input_size, input_size, 1),
name='input_placeholder')
self.cmap_placeholder = tf.placeholder(dtype=tf.float32,
shape=(None, input_size, input_size, 1),
name='cmap_placeholder')
self.gt_hmap_placeholder = tf.placeholder(dtype=tf.float32,
shape=(None, heatmap_size, heatmap_size, joints + 1),
name='gt_hmap_placeholder')
self._build_model()
def _build_model(self):
with tf.variable_scope('pooled_center_map'):
self.center_map = tf.layers.average_pooling2d(inputs=self.cmap_placeholder,
pool_size=[9, 9],
strides=[8, 8],
padding='same',
name='center_map')
with tf.variable_scope('sub_stages'):
sub_conv1 = tf.layers.conv2d(inputs=self.input_images,
filters=64,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='sub_conv1')
sub_conv2 = tf.layers.conv2d(inputs=sub_conv1,
filters=64,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='sub_conv2')
sub_pool1 = tf.layers.max_pooling2d(inputs=sub_conv2,
pool_size=[2, 2],
strides=2,
padding='valid',
name='sub_pool1')
sub_conv3 = tf.layers.conv2d(inputs=sub_pool1,
filters=128,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='sub_conv3')
sub_conv4 = tf.layers.conv2d(inputs=sub_conv3,
filters=128,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='sub_conv4')
sub_pool2 = tf.layers.max_pooling2d(inputs=sub_conv4,
pool_size=[2, 2],
strides=2,
padding='valid',
name='sub_pool2')
sub_conv5 = tf.layers.conv2d(inputs=sub_pool2,
filters=256,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='sub_conv5')
sub_conv6 = tf.layers.conv2d(inputs=sub_conv5,
filters=256,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='sub_conv6')
sub_conv7 = tf.layers.conv2d(inputs=sub_conv6,
filters=256,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='sub_conv7')
sub_conv8 = tf.layers.conv2d(inputs=sub_conv7,
filters=256,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='sub_conv8')
sub_pool3 = tf.layers.max_pooling2d(inputs=sub_conv8,
pool_size=[2, 2],
strides=2,
padding='valid',
name='sub_pool3')
sub_conv9 = tf.layers.conv2d(inputs=sub_pool3,
filters=512,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='sub_conv9')
sub_conv10 = tf.layers.conv2d(inputs=sub_conv9,
filters=512,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='sub_conv10')
sub_conv11 = tf.layers.conv2d(inputs=sub_conv10,
filters=512,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='sub_conv11')
sub_conv12 = tf.layers.conv2d(inputs=sub_conv11,
filters=512,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='sub_conv12')
sub_conv13 = tf.layers.conv2d(inputs=sub_conv12,
filters=512,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='sub_conv13')
sub_conv14 = tf.layers.conv2d(inputs=sub_conv13,
filters=512,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='sub_conv14')
self.sub_stage_img_feature = tf.layers.conv2d(inputs=sub_conv14,
filters=128,
kernel_size=[3, 3],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='sub_stage_img_feature')
with tf.variable_scope('stage_1'):
conv1 = tf.layers.conv2d(inputs=self.sub_stage_img_feature,
filters=512,
kernel_size=[1, 1],
strides=[1, 1],
padding='valid',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='conv1')
self.stage_heatmap.append(tf.layers.conv2d(inputs=conv1,
filters=self.joints+1,
kernel_size=[1, 1],
strides=[1, 1],
padding='valid',
activation=None,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='stage_heatmap'))
for stage in range(2, self.stages + 1):
self._middle_conv(stage)
def _middle_conv(self, stage):
with tf.variable_scope('stage_' + str(stage)):
self.current_featuremap = tf.concat([self.stage_heatmap[stage - 2],
self.sub_stage_img_feature,
],
axis=3)
mid_conv1 = tf.layers.conv2d(inputs=self.current_featuremap,
filters=128,
kernel_size=[7, 7],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='mid_conv1')
mid_conv2 = tf.layers.conv2d(inputs=mid_conv1,
filters=128,
kernel_size=[7, 7],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='mid_conv2')
mid_conv3 = tf.layers.conv2d(inputs=mid_conv2,
filters=128,
kernel_size=[7, 7],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='mid_conv3')
mid_conv4 = tf.layers.conv2d(inputs=mid_conv3,
filters=128,
kernel_size=[7, 7],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='mid_conv4')
mid_conv5 = tf.layers.conv2d(inputs=mid_conv4,
filters=128,
kernel_size=[7, 7],
strides=[1, 1],
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='mid_conv5')
mid_conv6 = tf.layers.conv2d(inputs=mid_conv5,
filters=128,
kernel_size=[1, 1],
strides=[1, 1],
padding='valid',
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='mid_conv6')
self.current_heatmap = tf.layers.conv2d(inputs=mid_conv6,
filters=self.joints+1,
kernel_size=[1, 1],
strides=[1, 1],
padding='valid',
activation=None,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name='mid_conv7')
self.stage_heatmap.append(self.current_heatmap)
def build_loss(self, lr, lr_decay_rate, lr_decay_step, optimizer='Adam'):
self.total_loss = 0
self.total_loss_eval = 0
self.init_lr = lr
self.lr_decay_rate = lr_decay_rate
self.lr_decay_step = lr_decay_step
self.optimizer = optimizer
self.batch_size = tf.cast(tf.shape(self.input_images)[0], dtype=tf.float32)
for stage in range(self.stages):
with tf.variable_scope('stage' + str(stage + 1) + '_loss'):
self.stage_loss[stage] = tf.nn.l2_loss(self.stage_heatmap[stage] - self.gt_hmap_placeholder,
name='l2_loss') / self.batch_size
tf.summary.scalar('stage' + str(stage + 1) + '_loss', self.stage_loss[stage])
with tf.variable_scope('total_loss'):
for stage in range(self.stages):
self.total_loss += self.stage_loss[stage]
tf.summary.scalar('total loss train', self.total_loss)
with tf.variable_scope('total_loss_eval'):
for stage in range(self.stages):
self.total_loss_eval += self.stage_loss[stage]
tf.summary.scalar('total loss eval', self.total_loss)
with tf.variable_scope('train'):
self.global_step = tf.contrib.framework.get_or_create_global_step()
self.cur_lr = tf.train.exponential_decay(self.init_lr,
global_step=self.global_step,
decay_rate=self.lr_decay_rate,
decay_steps=self.lr_decay_step)
tf.summary.scalar('global learning rate', self.cur_lr)
self.train_op = tf.contrib.layers.optimize_loss(loss=self.total_loss,
global_step=self.global_step,
learning_rate=self.cur_lr,
optimizer=self.optimizer)
def load_weights_from_file(self, weight_file_path, sess, finetune=True):
weights = pickle.load(open(weight_file_path, 'rb'))
with tf.variable_scope('', reuse=True):
for layer in range(1, 3):
conv_kernel = tf.get_variable('sub_stages/sub_conv' + str(layer) + '/kernel')
conv_bias = tf.get_variable('sub_stages/sub_conv' + str(layer) + '/bias')
loaded_kernel = weights['conv1_' + str(layer)]
loaded_bias = weights['conv1_' + str(layer) + '_b']
sess.run(tf.assign(conv_kernel, loaded_kernel))
sess.run(tf.assign(conv_bias, loaded_bias))
for layer in range(1, 3):
conv_kernel = tf.get_variable('sub_stages/sub_conv' + str(layer + 2) + '/kernel')
conv_bias = tf.get_variable('sub_stages/sub_conv' + str(layer + 2) + '/bias')
loaded_kernel = weights['conv2_' + str(layer)]
loaded_bias = weights['conv2_' + str(layer) + '_b']
sess.run(tf.assign(conv_kernel, loaded_kernel))
sess.run(tf.assign(conv_bias, loaded_bias))
for layer in range(1, 5):
conv_kernel = tf.get_variable('sub_stages/sub_conv' + str(layer + 4) + '/kernel')
conv_bias = tf.get_variable('sub_stages/sub_conv' + str(layer + 4) + '/bias')
loaded_kernel = weights['conv3_' + str(layer)]
loaded_bias = weights['conv3_' + str(layer) + '_b']
sess.run(tf.assign(conv_kernel, loaded_kernel))
sess.run(tf.assign(conv_bias, loaded_bias))
for layer in range(1, 5):
conv_kernel = tf.get_variable('sub_stages/sub_conv' + str(layer + 8) + '/kernel')
conv_bias = tf.get_variable('sub_stages/sub_conv' + str(layer + 8) + '/bias')
loaded_kernel = weights['conv4_' + str(layer)]
loaded_bias = weights['conv4_' + str(layer) + '_b']
sess.run(tf.assign(conv_kernel, loaded_kernel))
sess.run(tf.assign(conv_bias, loaded_bias))
for layer in range(1, 3):
conv_kernel = tf.get_variable('sub_stages/sub_conv' + str(layer + 12) + '/kernel')
conv_bias = tf.get_variable('sub_stages/sub_conv' + str(layer + 12) + '/bias')
loaded_kernel = weights['conv5_' + str(layer)]
loaded_bias = weights['conv5_' + str(layer) + '_b']
sess.run(tf.assign(conv_kernel, loaded_kernel))
sess.run(tf.assign(conv_bias, loaded_bias))
conv_kernel = tf.get_variable('sub_stages/sub_stage_img_feature/kernel')
conv_bias = tf.get_variable('sub_stages/sub_stage_img_feature/bias')
loaded_kernel = weights['conv5_3_CPM']
loaded_bias = weights['conv5_3_CPM_b']
sess.run(tf.assign(conv_kernel, loaded_kernel))
sess.run(tf.assign(conv_bias, loaded_bias))
conv_kernel = tf.get_variable('stage_1/conv1/kernel')
conv_bias = tf.get_variable('stage_1/conv1/bias')
loaded_kernel = weights['conv6_1_CPM']
loaded_bias = weights['conv6_1_CPM_b']
sess.run(tf.assign(conv_kernel, loaded_kernel))
sess.run(tf.assign(conv_bias, loaded_bias))
if finetune != True:
conv_kernel = tf.get_variable('stage_1/stage_heatmap/kernel')
conv_bias = tf.get_variable('stage_1/stage_heatmap/bias')
loaded_kernel = weights['conv6_2_CPM']
loaded_bias = weights['conv6_2_CPM_b']
sess.run(tf.assign(conv_kernel, loaded_kernel))
sess.run(tf.assign(conv_bias, loaded_bias))
stage in range(2, self.stages + 1):
for layer in range(1, 8):
conv_kernel = tf.get_variable('stage_' + str(stage) + '/mid_conv' + str(layer) + '/kernel')
conv_bias = tf.get_variable('stage_' + str(stage) + '/mid_conv' + str(layer) + '/bias')
loaded_kernel = weights['Mconv' + str(layer) + '_stage' + str(stage)]
loaded_bias = weights['Mconv' + str(layer) + '_stage' + str(stage) + '_b']
sess.run(tf.assign(conv_kernel, loaded_kernel))
sess.run(tf.assign(conv_bias, loaded_bias))
| true | true |
1c310aa9cac9b207948edbf0c276725f744ffb7e | 672 | py | Python | Day 09/1.py | Xerisu/Advent-of-Code | bd068a90b26b04a2345f62cb2054566fbbfce631 | [
"MIT"
] | 1 | 2021-12-02T13:58:00.000Z | 2021-12-02T13:58:00.000Z | Day 09/1.py | Xerisu/Advent-of-Code | bd068a90b26b04a2345f62cb2054566fbbfce631 | [
"MIT"
] | null | null | null | Day 09/1.py | Xerisu/Advent-of-Code | bd068a90b26b04a2345f62cb2054566fbbfce631 | [
"MIT"
] | null | null | null | input_file = open("./cave.txt","r")
floor = input_file.readlines()
input_file.close()
floor = ["9" + elem.strip() + "9" for elem in floor]
floor = ["9" * len(floor[0])] + floor + ["9" * len(floor[0])]
floor = [[int(x) for x in row] for row in floor]
#floor[y][x]
low_points = []
sum_low_points = 0
for y in range(1, len(floor) - 1):
for x in range(1, len(floor[0]) - 1):
if floor[y][x] < floor[y+1][x] and floor[y][x] < floor[y-1][x] and floor[y][x] < floor[y][x+1] and floor[y][x] < floor[y][x-1]:
sum_low_points += floor[y][x] + 1
point = (y , x)
low_points.append(point)
print(sum_low_points)
print(low_points)
| 24 | 135 | 0.568452 | input_file = open("./cave.txt","r")
floor = input_file.readlines()
input_file.close()
floor = ["9" + elem.strip() + "9" for elem in floor]
floor = ["9" * len(floor[0])] + floor + ["9" * len(floor[0])]
floor = [[int(x) for x in row] for row in floor]
low_points = []
sum_low_points = 0
for y in range(1, len(floor) - 1):
for x in range(1, len(floor[0]) - 1):
if floor[y][x] < floor[y+1][x] and floor[y][x] < floor[y-1][x] and floor[y][x] < floor[y][x+1] and floor[y][x] < floor[y][x-1]:
sum_low_points += floor[y][x] + 1
point = (y , x)
low_points.append(point)
print(sum_low_points)
print(low_points)
| true | true |
1c310b4c33b1a3f0e7e4d9388796098395a17ff7 | 3,888 | py | Python | Python/whatsView/apps/views.py | min9288/Multicampus | 2aaac730b35e530f8f91cb1ba41c08ee18d59142 | [
"MIT"
] | 2 | 2022-01-18T09:27:42.000Z | 2022-03-29T14:59:00.000Z | Python/whatsView/apps/views.py | min9288/Multicampus | 2aaac730b35e530f8f91cb1ba41c08ee18d59142 | [
"MIT"
] | null | null | null | Python/whatsView/apps/views.py | min9288/Multicampus | 2aaac730b35e530f8f91cb1ba41c08ee18d59142 | [
"MIT"
] | null | null | null | import json, os, sys, urllib.request, requests, re
from django.shortcuts import render, redirect
from django.conf import settings
from django.views.generic import FormView
from requests import request
from bs4 import BeautifulSoup
def index(request):
return render(request, 'common/main.html')
def make_naver_search_api_url(search_text, start_num, disp_num):
base_url = 'https://openapi.naver.com/v1/search/blog.json'
param_query = "?query=" + urllib.parse.quote(search_text)
param_start = "&start=" + str(start_num)
param_disp = "&display=" + str(disp_num)
return base_url + param_query + param_start + param_disp
def get_request_url(request):
searchValue = request.GET.get('searchValue',"")
API_URL = make_naver_search_api_url(searchValue, 1, 10)
config_secret_debug = json.loads(open(settings.SECRET_DEBUG_FILE).read())
client_id = config_secret_debug['NAVER']['CLIENT_ID']
client_secret = config_secret_debug['NAVER']['CLIENT_SECRET']
request = urllib.request.Request(API_URL)
request.add_header("X-Naver-Client-Id", client_id)
request.add_header("X-Naver-Client-Secret", client_secret)
response = urllib.request.urlopen(request)
rescode = response.getcode()
if (rescode == 200):
response_body = response.read()
result = json.loads(response_body.decode('utf-8'))
items = result.get('items')
context = {
'items': items
}
return render(request, 'searchView/info.html', {'info_items': context[0]})
else:
print("---error---")
return None
def youtube(request):
url = 'https://www.googleapis.com/youtube/v3/search'
params = {
'key': 'AIzaSyAWvSovFGym1Wj9116pOIGF4Fcx4wigK3Y',
'part': 'snippet',
'type': 'video',
'maxResults': '10',
'regionCode': "KR",
'q': request.GET.get('searchValue',""),
}
response = requests.get(url, params)
response_dict = response.json()
context = {
'youtube_items': response_dict['items']
}
return render(request, 'searchView/video.html', {'video_items': context['youtube_items']})
def all(request):
if request.method == 'GET':
# naver search value
# search_text = request.POST.get('searchValue', "")
search_text = request.GET.get('searchValue')
API_URL = make_naver_search_api_url(search_text, 1, 40)
config_secret_debug = json.loads(open(settings.SECRET_DEBUG_FILE).read())
client_id = config_secret_debug['NAVER']['CLIENT_ID']
client_secret = config_secret_debug['NAVER']['CLIENT_SECRET']
naver_request = urllib.request.Request(API_URL)
naver_request.add_header("X-Naver-Client-Id", client_id)
naver_request.add_header("X-Naver-Client-Secret", client_secret)
response = urllib.request.urlopen(naver_request)
rescode = response.getcode()
if (rescode == 200):
# youtube search value
url = 'https://www.googleapis.com/youtube/v3/search'
params = {
'key': '유튜브 키',
'part': 'snippet',
'type': 'video',
'maxResults': '12',
'regionCode': "KR",
'q': search_text,
}
response1 = requests.get(url, params)
response_dict = response1.json()
response_body = response.read()
naver_result = json.loads(response_body.decode('utf-8'))
naver_items = naver_result.get('items')
context = {
'items': naver_items,
'youtube_items': response_dict['items']
}
return render(request, 'searchView/all.html', {'info_items':context['items'], 'video_items':context['youtube_items']})
else:
print("---error---")
return None | 34.714286 | 130 | 0.623971 | import json, os, sys, urllib.request, requests, re
from django.shortcuts import render, redirect
from django.conf import settings
from django.views.generic import FormView
from requests import request
from bs4 import BeautifulSoup
def index(request):
return render(request, 'common/main.html')
def make_naver_search_api_url(search_text, start_num, disp_num):
base_url = 'https://openapi.naver.com/v1/search/blog.json'
param_query = "?query=" + urllib.parse.quote(search_text)
param_start = "&start=" + str(start_num)
param_disp = "&display=" + str(disp_num)
return base_url + param_query + param_start + param_disp
def get_request_url(request):
searchValue = request.GET.get('searchValue',"")
API_URL = make_naver_search_api_url(searchValue, 1, 10)
config_secret_debug = json.loads(open(settings.SECRET_DEBUG_FILE).read())
client_id = config_secret_debug['NAVER']['CLIENT_ID']
client_secret = config_secret_debug['NAVER']['CLIENT_SECRET']
request = urllib.request.Request(API_URL)
request.add_header("X-Naver-Client-Id", client_id)
request.add_header("X-Naver-Client-Secret", client_secret)
response = urllib.request.urlopen(request)
rescode = response.getcode()
if (rescode == 200):
response_body = response.read()
result = json.loads(response_body.decode('utf-8'))
items = result.get('items')
context = {
'items': items
}
return render(request, 'searchView/info.html', {'info_items': context[0]})
else:
print("---error---")
return None
def youtube(request):
url = 'https://www.googleapis.com/youtube/v3/search'
params = {
'key': 'AIzaSyAWvSovFGym1Wj9116pOIGF4Fcx4wigK3Y',
'part': 'snippet',
'type': 'video',
'maxResults': '10',
'regionCode': "KR",
'q': request.GET.get('searchValue',""),
}
response = requests.get(url, params)
response_dict = response.json()
context = {
'youtube_items': response_dict['items']
}
return render(request, 'searchView/video.html', {'video_items': context['youtube_items']})
def all(request):
if request.method == 'GET':
search_text = request.GET.get('searchValue')
API_URL = make_naver_search_api_url(search_text, 1, 40)
config_secret_debug = json.loads(open(settings.SECRET_DEBUG_FILE).read())
client_id = config_secret_debug['NAVER']['CLIENT_ID']
client_secret = config_secret_debug['NAVER']['CLIENT_SECRET']
naver_request = urllib.request.Request(API_URL)
naver_request.add_header("X-Naver-Client-Id", client_id)
naver_request.add_header("X-Naver-Client-Secret", client_secret)
response = urllib.request.urlopen(naver_request)
rescode = response.getcode()
if (rescode == 200):
url = 'https://www.googleapis.com/youtube/v3/search'
params = {
'key': '유튜브 키',
'part': 'snippet',
'type': 'video',
'maxResults': '12',
'regionCode': "KR",
'q': search_text,
}
response1 = requests.get(url, params)
response_dict = response1.json()
response_body = response.read()
naver_result = json.loads(response_body.decode('utf-8'))
naver_items = naver_result.get('items')
context = {
'items': naver_items,
'youtube_items': response_dict['items']
}
return render(request, 'searchView/all.html', {'info_items':context['items'], 'video_items':context['youtube_items']})
else:
print("---error---")
return None | true | true |
1c310c172c06261161c1ed4dfea6d45ac54fb34c | 655 | py | Python | var/spack/repos/builtin/packages/lndir/package.py | carlabguillen/spack | 7070bb892f9bdb5cf9e76e0eecd64f6cc5f4695c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 9 | 2018-04-18T07:51:40.000Z | 2021-09-10T03:56:57.000Z | var/spack/repos/builtin/packages/lndir/package.py | carlabguillen/spack | 7070bb892f9bdb5cf9e76e0eecd64f6cc5f4695c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 907 | 2018-04-18T11:17:57.000Z | 2022-03-31T13:20:25.000Z | var/spack/repos/builtin/packages/lndir/package.py | carlabguillen/spack | 7070bb892f9bdb5cf9e76e0eecd64f6cc5f4695c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 29 | 2018-11-05T16:14:23.000Z | 2022-02-03T16:07:09.000Z | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Lndir(AutotoolsPackage, XorgPackage):
"""lndir - create a shadow directory of symbolic links to another
directory tree."""
homepage = "http://cgit.freedesktop.org/xorg/util/lndir"
xorg_mirror_path = "util/lndir-1.0.3.tar.gz"
version('1.0.3', sha256='95b2d26fb3cbe702f828146c7a4c7c48001d2da52b062580227b7b68180be902')
depends_on('xproto@7.0.17:', type='build')
depends_on('pkgconfig', type='build')
| 32.75 | 95 | 0.737405 |
from spack import *
class Lndir(AutotoolsPackage, XorgPackage):
homepage = "http://cgit.freedesktop.org/xorg/util/lndir"
xorg_mirror_path = "util/lndir-1.0.3.tar.gz"
version('1.0.3', sha256='95b2d26fb3cbe702f828146c7a4c7c48001d2da52b062580227b7b68180be902')
depends_on('xproto@7.0.17:', type='build')
depends_on('pkgconfig', type='build')
| true | true |
1c310c178edaf20ebe5d2e344ace27beb2ba5739 | 30,327 | py | Python | tests/sagemaker/test_deployment.py | devlibx/mlflowx | 291c51161ec26450b1e79c8e4a32af960da79591 | [
"Apache-2.0"
] | 1 | 2022-03-15T00:19:10.000Z | 2022-03-15T00:19:10.000Z | tests/sagemaker/test_deployment.py | dengzhihai/mlflow | 1ce3b5eadf6543878a62b070fd06735d471d75d5 | [
"Apache-2.0"
] | null | null | null | tests/sagemaker/test_deployment.py | dengzhihai/mlflow | 1ce3b5eadf6543878a62b070fd06735d471d75d5 | [
"Apache-2.0"
] | null | null | null | import os
import pytest
import time
from collections import namedtuple
from unittest import mock
import boto3
import botocore
import numpy as np
from click.testing import CliRunner
from sklearn.linear_model import LogisticRegression
import mlflow
import mlflow.pyfunc
import mlflow.sklearn
import mlflow.sagemaker as mfs
import mlflow.sagemaker.cli as mfscli
from mlflow.exceptions import MlflowException
from mlflow.models import Model
from mlflow.protos.databricks_pb2 import (
ErrorCode,
RESOURCE_DOES_NOT_EXIST,
INVALID_PARAMETER_VALUE,
INTERNAL_ERROR,
)
from mlflow.store.artifact.s3_artifact_repo import S3ArtifactRepository
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
from tests.helper_functions import set_boto_credentials # pylint: disable=unused-import
from tests.sagemaker.mock import mock_sagemaker, Endpoint, EndpointOperation
TrainedModel = namedtuple("TrainedModel", ["model_path", "run_id", "model_uri"])
@pytest.fixture
def pretrained_model():
model_path = "model"
with mlflow.start_run():
X = np.array([-2, -1, 0, 1, 2, 1]).reshape(-1, 1)
y = np.array([0, 0, 1, 1, 1, 0])
lr = LogisticRegression(solver="lbfgs")
lr.fit(X, y)
mlflow.sklearn.log_model(lr, model_path)
run_id = mlflow.active_run().info.run_id
model_uri = "runs:/" + run_id + "/" + model_path
return TrainedModel(model_path, run_id, model_uri)
@pytest.fixture
def sagemaker_client():
return boto3.client("sagemaker", region_name="us-west-2")
def get_sagemaker_backend(region_name):
return mock_sagemaker.backends[region_name]
def mock_sagemaker_aws_services(fn):
from functools import wraps
from moto import mock_s3, mock_ecr, mock_sts, mock_iam
@mock_ecr
@mock_iam
@mock_s3
@mock_sagemaker
@mock_sts
@wraps(fn)
def mock_wrapper(*args, **kwargs):
# Create an ECR repository for the `mlflow-pyfunc` SageMaker docker image
ecr_client = boto3.client("ecr", region_name="us-west-2")
ecr_client.create_repository(repositoryName=mfs.DEFAULT_IMAGE_NAME)
# Create the moto IAM role
role_policy = """
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "*",
"Resource": "*"
}
]
}
"""
iam_client = boto3.client("iam", region_name="us-west-2")
iam_client.create_role(RoleName="moto", AssumeRolePolicyDocument=role_policy)
# Create IAM role to be asssumed (could be in another AWS account)
iam_client.create_role(RoleName="assumed_role", AssumeRolePolicyDocument=role_policy)
return fn(*args, **kwargs)
return mock_wrapper
@mock_sagemaker_aws_services
def test_assume_role_and_get_credentials():
assumed_role_credentials = mfs._assume_role_and_get_credentials(
assume_role_arn="arn:aws:iam::123456789012:role/assumed_role"
)
assert "aws_access_key_id" in assumed_role_credentials.keys()
assert "aws_secret_access_key" in assumed_role_credentials.keys()
assert "aws_session_token" in assumed_role_credentials.keys()
assert len(assumed_role_credentials["aws_session_token"]) == 356
assert assumed_role_credentials["aws_session_token"].startswith("FQoGZXIvYXdzE")
assert len(assumed_role_credentials["aws_access_key_id"]) == 20
assert assumed_role_credentials["aws_access_key_id"].startswith("ASIA")
assert len(assumed_role_credentials["aws_secret_access_key"]) == 40
@pytest.mark.large
@mock_sagemaker_aws_services
def test_deployment_with_non_existent_assume_role_arn_raises_exception(pretrained_model):
match = (
r"An error occurred \(NoSuchEntity\) when calling the GetRole "
r"operation: Role non-existent-role-arn not found"
)
with pytest.raises(botocore.exceptions.ClientError, match=match):
mfs.deploy(
app_name="bad_assume_role_arn",
model_uri=pretrained_model.model_uri,
assume_role_arn="arn:aws:iam::123456789012:role/non-existent-role-arn",
)
@pytest.mark.large
@mock_sagemaker_aws_services
def test_deployment_with_assume_role_arn(pretrained_model, sagemaker_client):
app_name = "deploy_with_assume_role_arn"
mfs.deploy(
app_name=app_name,
model_uri=pretrained_model.model_uri,
assume_role_arn="arn:aws:iam::123456789012:role/assumed_role",
)
assert app_name in [
endpoint["EndpointName"] for endpoint in sagemaker_client.list_endpoints()["Endpoints"]
]
@pytest.mark.large
def test_deployment_with_unsupported_flavor_raises_exception(pretrained_model):
unsupported_flavor = "this is not a valid flavor"
match = "The specified flavor: `this is not a valid flavor` is not supported for deployment"
with pytest.raises(MlflowException, match=match) as exc:
mfs.deploy(
app_name="bad_flavor", model_uri=pretrained_model.model_uri, flavor=unsupported_flavor
)
assert exc.value.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
@pytest.mark.large
def test_deployment_with_missing_flavor_raises_exception(pretrained_model):
missing_flavor = "mleap"
match = "The specified model does not contain the specified deployment flavor"
with pytest.raises(MlflowException, match=match) as exc:
mfs.deploy(
app_name="missing-flavor", model_uri=pretrained_model.model_uri, flavor=missing_flavor
)
assert exc.value.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST)
@pytest.mark.large
def test_deployment_of_model_with_no_supported_flavors_raises_exception(pretrained_model):
logged_model_path = _download_artifact_from_uri(pretrained_model.model_uri)
model_config_path = os.path.join(logged_model_path, "MLmodel")
model_config = Model.load(model_config_path)
del model_config.flavors[mlflow.pyfunc.FLAVOR_NAME]
model_config.save(path=model_config_path)
match = "The specified model does not contain any of the supported flavors for deployment"
with pytest.raises(MlflowException, match=match) as exc:
mfs.deploy(app_name="missing-flavor", model_uri=logged_model_path, flavor=None)
assert exc.value.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST)
@pytest.mark.large
def test_validate_deployment_flavor_validates_python_function_flavor_successfully(pretrained_model):
model_config_path = os.path.join(
_download_artifact_from_uri(pretrained_model.model_uri), "MLmodel"
)
model_config = Model.load(model_config_path)
mfs._validate_deployment_flavor(model_config=model_config, flavor=mlflow.pyfunc.FLAVOR_NAME)
@pytest.mark.large
def test_get_preferred_deployment_flavor_obtains_valid_flavor_from_model(pretrained_model):
model_config_path = os.path.join(
_download_artifact_from_uri(pretrained_model.model_uri), "MLmodel"
)
model_config = Model.load(model_config_path)
selected_flavor = mfs._get_preferred_deployment_flavor(model_config=model_config)
assert selected_flavor in mfs.SUPPORTED_DEPLOYMENT_FLAVORS
assert selected_flavor in model_config.flavors
@pytest.mark.large
def test_attempting_to_deploy_in_asynchronous_mode_without_archiving_throws_exception(
pretrained_model,
):
with pytest.raises(MlflowException, match="Resources must be archived") as exc:
mfs.deploy(
app_name="test-app",
model_uri=pretrained_model.model_uri,
mode=mfs.DEPLOYMENT_MODE_CREATE,
archive=False,
synchronous=False,
)
assert exc.value.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
@pytest.mark.large
@mock_sagemaker_aws_services
def test_deploy_creates_sagemaker_and_s3_resources_with_expected_names_and_env_from_local(
pretrained_model, sagemaker_client
):
app_name = "test-app"
mfs.deploy(
app_name=app_name, model_uri=pretrained_model.model_uri, mode=mfs.DEPLOYMENT_MODE_CREATE
)
region_name = sagemaker_client.meta.region_name
s3_client = boto3.client("s3", region_name=region_name)
default_bucket = mfs._get_default_s3_bucket(region_name)
endpoint_description = sagemaker_client.describe_endpoint(EndpointName=app_name)
endpoint_production_variants = endpoint_description["ProductionVariants"]
assert len(endpoint_production_variants) == 1
model_name = endpoint_production_variants[0]["VariantName"]
assert model_name in [model["ModelName"] for model in sagemaker_client.list_models()["Models"]]
object_names = [
entry["Key"] for entry in s3_client.list_objects(Bucket=default_bucket)["Contents"]
]
assert any([model_name in object_name for object_name in object_names])
assert any(
[
app_name in config["EndpointConfigName"]
for config in sagemaker_client.list_endpoint_configs()["EndpointConfigs"]
]
)
assert app_name in [
endpoint["EndpointName"] for endpoint in sagemaker_client.list_endpoints()["Endpoints"]
]
model_environment = sagemaker_client.describe_model(ModelName=model_name)["PrimaryContainer"][
"Environment"
]
assert model_environment == {
"MLFLOW_DEPLOYMENT_FLAVOR_NAME": "python_function",
"SERVING_ENVIRONMENT": "SageMaker",
}
@pytest.mark.large
@mock_sagemaker_aws_services
def test_deploy_cli_creates_sagemaker_and_s3_resources_with_expected_names_and_env_from_local(
pretrained_model, sagemaker_client
):
app_name = "test-app"
result = CliRunner(env={"LC_ALL": "en_US.UTF-8", "LANG": "en_US.UTF-8"}).invoke(
mfscli.commands,
[
"deploy",
"-a",
app_name,
"-m",
pretrained_model.model_uri,
"--mode",
mfs.DEPLOYMENT_MODE_CREATE,
],
)
assert result.exit_code == 0
region_name = sagemaker_client.meta.region_name
s3_client = boto3.client("s3", region_name=region_name)
default_bucket = mfs._get_default_s3_bucket(region_name)
endpoint_description = sagemaker_client.describe_endpoint(EndpointName=app_name)
endpoint_production_variants = endpoint_description["ProductionVariants"]
assert len(endpoint_production_variants) == 1
model_name = endpoint_production_variants[0]["VariantName"]
assert model_name in [model["ModelName"] for model in sagemaker_client.list_models()["Models"]]
object_names = [
entry["Key"] for entry in s3_client.list_objects(Bucket=default_bucket)["Contents"]
]
assert any([model_name in object_name for object_name in object_names])
assert any(
[
app_name in config["EndpointConfigName"]
for config in sagemaker_client.list_endpoint_configs()["EndpointConfigs"]
]
)
assert app_name in [
endpoint["EndpointName"] for endpoint in sagemaker_client.list_endpoints()["Endpoints"]
]
model_environment = sagemaker_client.describe_model(ModelName=model_name)["PrimaryContainer"][
"Environment"
]
assert model_environment == {
"MLFLOW_DEPLOYMENT_FLAVOR_NAME": "python_function",
"SERVING_ENVIRONMENT": "SageMaker",
}
@pytest.mark.large
@mock_sagemaker_aws_services
def test_deploy_creates_sagemaker_and_s3_resources_with_expected_names_and_env_from_s3(
pretrained_model, sagemaker_client
):
local_model_path = _download_artifact_from_uri(pretrained_model.model_uri)
artifact_path = "model"
region_name = sagemaker_client.meta.region_name
default_bucket = mfs._get_default_s3_bucket(region_name)
s3_artifact_repo = S3ArtifactRepository("s3://{}".format(default_bucket))
s3_artifact_repo.log_artifacts(local_model_path, artifact_path=artifact_path)
model_s3_uri = "s3://{bucket_name}/{artifact_path}".format(
bucket_name=default_bucket, artifact_path=pretrained_model.model_path
)
app_name = "test-app"
mfs.deploy(app_name=app_name, model_uri=model_s3_uri, mode=mfs.DEPLOYMENT_MODE_CREATE)
endpoint_description = sagemaker_client.describe_endpoint(EndpointName=app_name)
endpoint_production_variants = endpoint_description["ProductionVariants"]
assert len(endpoint_production_variants) == 1
model_name = endpoint_production_variants[0]["VariantName"]
assert model_name in [model["ModelName"] for model in sagemaker_client.list_models()["Models"]]
s3_client = boto3.client("s3", region_name=region_name)
object_names = [
entry["Key"] for entry in s3_client.list_objects(Bucket=default_bucket)["Contents"]
]
assert any([model_name in object_name for object_name in object_names])
assert any(
[
app_name in config["EndpointConfigName"]
for config in sagemaker_client.list_endpoint_configs()["EndpointConfigs"]
]
)
assert app_name in [
endpoint["EndpointName"] for endpoint in sagemaker_client.list_endpoints()["Endpoints"]
]
model_environment = sagemaker_client.describe_model(ModelName=model_name)["PrimaryContainer"][
"Environment"
]
assert model_environment == {
"MLFLOW_DEPLOYMENT_FLAVOR_NAME": "python_function",
"SERVING_ENVIRONMENT": "SageMaker",
}
@pytest.mark.large
@mock_sagemaker_aws_services
def test_deploy_cli_creates_sagemaker_and_s3_resources_with_expected_names_and_env_from_s3(
pretrained_model, sagemaker_client
):
local_model_path = _download_artifact_from_uri(pretrained_model.model_uri)
artifact_path = "model"
region_name = sagemaker_client.meta.region_name
default_bucket = mfs._get_default_s3_bucket(region_name)
s3_artifact_repo = S3ArtifactRepository("s3://{}".format(default_bucket))
s3_artifact_repo.log_artifacts(local_model_path, artifact_path=artifact_path)
model_s3_uri = "s3://{bucket_name}/{artifact_path}".format(
bucket_name=default_bucket, artifact_path=pretrained_model.model_path
)
app_name = "test-app"
result = CliRunner(env={"LC_ALL": "en_US.UTF-8", "LANG": "en_US.UTF-8"}).invoke(
mfscli.commands,
["deploy", "-a", app_name, "-m", model_s3_uri, "--mode", mfs.DEPLOYMENT_MODE_CREATE],
)
assert result.exit_code == 0
region_name = sagemaker_client.meta.region_name
s3_client = boto3.client("s3", region_name=region_name)
default_bucket = mfs._get_default_s3_bucket(region_name)
endpoint_description = sagemaker_client.describe_endpoint(EndpointName=app_name)
endpoint_production_variants = endpoint_description["ProductionVariants"]
assert len(endpoint_production_variants) == 1
model_name = endpoint_production_variants[0]["VariantName"]
assert model_name in [model["ModelName"] for model in sagemaker_client.list_models()["Models"]]
object_names = [
entry["Key"] for entry in s3_client.list_objects(Bucket=default_bucket)["Contents"]
]
assert any([model_name in object_name for object_name in object_names])
assert any(
[
app_name in config["EndpointConfigName"]
for config in sagemaker_client.list_endpoint_configs()["EndpointConfigs"]
]
)
assert app_name in [
endpoint["EndpointName"] for endpoint in sagemaker_client.list_endpoints()["Endpoints"]
]
model_environment = sagemaker_client.describe_model(ModelName=model_name)["PrimaryContainer"][
"Environment"
]
assert model_environment == {
"MLFLOW_DEPLOYMENT_FLAVOR_NAME": "python_function",
"SERVING_ENVIRONMENT": "SageMaker",
}
@pytest.mark.large
@mock_sagemaker_aws_services
def test_deploying_application_with_preexisting_name_in_create_mode_throws_exception(
pretrained_model,
):
app_name = "test-app"
mfs.deploy(
app_name=app_name, model_uri=pretrained_model.model_uri, mode=mfs.DEPLOYMENT_MODE_CREATE
)
with pytest.raises(
MlflowException, match="an application with the same name already exists"
) as exc:
mfs.deploy(
app_name=app_name, model_uri=pretrained_model.model_uri, mode=mfs.DEPLOYMENT_MODE_CREATE
)
assert exc.value.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
@pytest.mark.large
@mock_sagemaker_aws_services
def test_deploy_in_synchronous_mode_waits_for_endpoint_creation_to_complete_before_returning(
pretrained_model, sagemaker_client
):
endpoint_creation_latency = 10
get_sagemaker_backend(sagemaker_client.meta.region_name).set_endpoint_update_latency(
endpoint_creation_latency
)
app_name = "test-app"
deployment_start_time = time.time()
mfs.deploy(
app_name=app_name,
model_uri=pretrained_model.model_uri,
mode=mfs.DEPLOYMENT_MODE_CREATE,
synchronous=True,
)
deployment_end_time = time.time()
assert (deployment_end_time - deployment_start_time) >= endpoint_creation_latency
endpoint_description = sagemaker_client.describe_endpoint(EndpointName=app_name)
assert endpoint_description["EndpointStatus"] == Endpoint.STATUS_IN_SERVICE
@pytest.mark.large
@mock_sagemaker_aws_services
def test_deploy_create_in_asynchronous_mode_returns_before_endpoint_creation_completes(
pretrained_model, sagemaker_client
):
endpoint_creation_latency = 10
get_sagemaker_backend(sagemaker_client.meta.region_name).set_endpoint_update_latency(
endpoint_creation_latency
)
app_name = "test-app"
deployment_start_time = time.time()
mfs.deploy(
app_name=app_name,
model_uri=pretrained_model.model_uri,
mode=mfs.DEPLOYMENT_MODE_CREATE,
synchronous=False,
archive=True,
)
deployment_end_time = time.time()
assert (deployment_end_time - deployment_start_time) < endpoint_creation_latency
endpoint_description = sagemaker_client.describe_endpoint(EndpointName=app_name)
assert endpoint_description["EndpointStatus"] == Endpoint.STATUS_CREATING
@pytest.mark.large
@mock_sagemaker_aws_services
def test_deploy_replace_in_asynchronous_mode_returns_before_endpoint_creation_completes(
pretrained_model, sagemaker_client
):
endpoint_update_latency = 10
get_sagemaker_backend(sagemaker_client.meta.region_name).set_endpoint_update_latency(
endpoint_update_latency
)
app_name = "test-app"
mfs.deploy(
app_name=app_name,
model_uri=pretrained_model.model_uri,
mode=mfs.DEPLOYMENT_MODE_CREATE,
synchronous=True,
)
update_start_time = time.time()
mfs.deploy(
app_name=app_name,
model_uri=pretrained_model.model_uri,
mode=mfs.DEPLOYMENT_MODE_REPLACE,
synchronous=False,
archive=True,
)
update_end_time = time.time()
assert (update_end_time - update_start_time) < endpoint_update_latency
endpoint_description = sagemaker_client.describe_endpoint(EndpointName=app_name)
assert endpoint_description["EndpointStatus"] == Endpoint.STATUS_UPDATING
@pytest.mark.large
@mock_sagemaker_aws_services
def test_deploy_in_create_mode_throws_exception_after_endpoint_creation_fails(
pretrained_model, sagemaker_client
):
endpoint_creation_latency = 10
sagemaker_backend = get_sagemaker_backend(sagemaker_client.meta.region_name)
sagemaker_backend.set_endpoint_update_latency(endpoint_creation_latency)
boto_caller = botocore.client.BaseClient._make_api_call
def fail_endpoint_creations(self, operation_name, operation_kwargs):
"""
Processes all boto3 client operations according to the following rules:
- If the operation is an endpoint creation, create the endpoint and set its status to
``Endpoint.STATUS_FAILED``.
- Else, execute the client operation as normal
"""
result = boto_caller(self, operation_name, operation_kwargs)
if operation_name == "CreateEndpoint":
endpoint_name = operation_kwargs["EndpointName"]
sagemaker_backend.set_endpoint_latest_operation(
endpoint_name=endpoint_name,
operation=EndpointOperation.create_unsuccessful(
latency_seconds=endpoint_creation_latency
),
)
return result
with mock.patch(
"botocore.client.BaseClient._make_api_call", new=fail_endpoint_creations
), pytest.raises(MlflowException, match="deployment operation failed") as exc:
mfs.deploy(
app_name="test-app",
model_uri=pretrained_model.model_uri,
mode=mfs.DEPLOYMENT_MODE_CREATE,
)
assert exc.value.error_code == ErrorCode.Name(INTERNAL_ERROR)
@pytest.mark.large
@mock_sagemaker_aws_services
def test_deploy_in_add_mode_adds_new_model_to_existing_endpoint(pretrained_model, sagemaker_client):
app_name = "test-app"
mfs.deploy(
app_name=app_name, model_uri=pretrained_model.model_uri, mode=mfs.DEPLOYMENT_MODE_CREATE
)
models_added = 1
for _ in range(11):
mfs.deploy(
app_name=app_name,
model_uri=pretrained_model.model_uri,
mode=mfs.DEPLOYMENT_MODE_ADD,
archive=True,
synchronous=False,
)
models_added += 1
endpoint_response = sagemaker_client.describe_endpoint(EndpointName=app_name)
endpoint_config_name = endpoint_response["EndpointConfigName"]
endpoint_config_response = sagemaker_client.describe_endpoint_config(
EndpointConfigName=endpoint_config_name
)
production_variants = endpoint_config_response["ProductionVariants"]
assert len(production_variants) == models_added
@pytest.mark.large
@mock_sagemaker_aws_services
def test_deploy_in_replace_model_removes_preexisting_models_from_endpoint(
pretrained_model, sagemaker_client
):
app_name = "test-app"
mfs.deploy(
app_name=app_name, model_uri=pretrained_model.model_uri, mode=mfs.DEPLOYMENT_MODE_ADD
)
for _ in range(11):
mfs.deploy(
app_name=app_name,
model_uri=pretrained_model.model_uri,
mode=mfs.DEPLOYMENT_MODE_ADD,
archive=True,
synchronous=False,
)
endpoint_response_before_replacement = sagemaker_client.describe_endpoint(EndpointName=app_name)
endpoint_config_name_before_replacement = endpoint_response_before_replacement[
"EndpointConfigName"
]
endpoint_config_response_before_replacement = sagemaker_client.describe_endpoint_config(
EndpointConfigName=endpoint_config_name_before_replacement
)
production_variants_before_replacement = endpoint_config_response_before_replacement[
"ProductionVariants"
]
deployed_models_before_replacement = [
variant["ModelName"] for variant in production_variants_before_replacement
]
mfs.deploy(
app_name=app_name,
model_uri=pretrained_model.model_uri,
mode=mfs.DEPLOYMENT_MODE_REPLACE,
archive=True,
synchronous=False,
)
endpoint_response_after_replacement = sagemaker_client.describe_endpoint(EndpointName=app_name)
endpoint_config_name_after_replacement = endpoint_response_after_replacement[
"EndpointConfigName"
]
endpoint_config_response_after_replacement = sagemaker_client.describe_endpoint_config(
EndpointConfigName=endpoint_config_name_after_replacement
)
production_variants_after_replacement = endpoint_config_response_after_replacement[
"ProductionVariants"
]
deployed_models_after_replacement = [
variant["ModelName"] for variant in production_variants_after_replacement
]
assert len(deployed_models_after_replacement) == 1
assert all(
[
model_name not in deployed_models_after_replacement
for model_name in deployed_models_before_replacement
]
)
@pytest.mark.large
@mock_sagemaker_aws_services
def test_deploy_in_replace_mode_throws_exception_after_endpoint_update_fails(
pretrained_model, sagemaker_client
):
endpoint_update_latency = 5
sagemaker_backend = get_sagemaker_backend(sagemaker_client.meta.region_name)
sagemaker_backend.set_endpoint_update_latency(endpoint_update_latency)
app_name = "test-app"
mfs.deploy(
app_name=app_name, model_uri=pretrained_model.model_uri, mode=mfs.DEPLOYMENT_MODE_CREATE
)
boto_caller = botocore.client.BaseClient._make_api_call
def fail_endpoint_updates(self, operation_name, operation_kwargs):
"""
Processes all boto3 client operations according to the following rules:
- If the operation is an endpoint update, update the endpoint and set its status to
``Endpoint.STATUS_FAILED``.
- Else, execute the client operation as normal
"""
result = boto_caller(self, operation_name, operation_kwargs)
if operation_name == "UpdateEndpoint":
endpoint_name = operation_kwargs["EndpointName"]
sagemaker_backend.set_endpoint_latest_operation(
endpoint_name=endpoint_name,
operation=EndpointOperation.update_unsuccessful(
latency_seconds=endpoint_update_latency
),
)
return result
with mock.patch(
"botocore.client.BaseClient._make_api_call", new=fail_endpoint_updates
), pytest.raises(MlflowException, match="deployment operation failed") as exc:
mfs.deploy(
app_name="test-app",
model_uri=pretrained_model.model_uri,
mode=mfs.DEPLOYMENT_MODE_REPLACE,
)
assert exc.value.error_code == ErrorCode.Name(INTERNAL_ERROR)
@pytest.mark.large
@mock_sagemaker_aws_services
def test_deploy_in_replace_mode_waits_for_endpoint_update_completion_before_deleting_resources(
pretrained_model, sagemaker_client
):
endpoint_update_latency = 10
sagemaker_backend = get_sagemaker_backend(sagemaker_client.meta.region_name)
sagemaker_backend.set_endpoint_update_latency(endpoint_update_latency)
app_name = "test-app"
mfs.deploy(
app_name=app_name, model_uri=pretrained_model.model_uri, mode=mfs.DEPLOYMENT_MODE_CREATE
)
endpoint_config_name_before_replacement = sagemaker_client.describe_endpoint(
EndpointName=app_name
)["EndpointConfigName"]
boto_caller = botocore.client.BaseClient._make_api_call
update_start_time = time.time()
def validate_deletes(self, operation_name, operation_kwargs):
"""
Processes all boto3 client operations according to the following rules:
- If the operation deletes an S3 or SageMaker resource, ensure that the deletion was
initiated after the completion of the endpoint update
- Else, execute the client operation as normal
"""
result = boto_caller(self, operation_name, operation_kwargs)
if "Delete" in operation_name:
# Confirm that a successful endpoint update occurred prior to the invocation of this
# delete operation
endpoint_info = sagemaker_client.describe_endpoint(EndpointName=app_name)
assert endpoint_info["EndpointStatus"] == Endpoint.STATUS_IN_SERVICE
assert endpoint_info["EndpointConfigName"] != endpoint_config_name_before_replacement
assert time.time() - update_start_time >= endpoint_update_latency
return result
with mock.patch("botocore.client.BaseClient._make_api_call", new=validate_deletes):
mfs.deploy(
app_name=app_name,
model_uri=pretrained_model.model_uri,
mode=mfs.DEPLOYMENT_MODE_REPLACE,
archive=False,
)
@pytest.mark.large
@mock_sagemaker_aws_services
def test_deploy_in_replace_mode_with_archiving_does_not_delete_resources(
pretrained_model, sagemaker_client
):
region_name = sagemaker_client.meta.region_name
sagemaker_backend = get_sagemaker_backend(region_name)
sagemaker_backend.set_endpoint_update_latency(5)
app_name = "test-app"
mfs.deploy(
app_name=app_name, model_uri=pretrained_model.model_uri, mode=mfs.DEPLOYMENT_MODE_CREATE
)
s3_client = boto3.client("s3", region_name=region_name)
default_bucket = mfs._get_default_s3_bucket(region_name)
object_names_before_replacement = [
entry["Key"] for entry in s3_client.list_objects(Bucket=default_bucket)["Contents"]
]
endpoint_configs_before_replacement = [
config["EndpointConfigName"]
for config in sagemaker_client.list_endpoint_configs()["EndpointConfigs"]
]
models_before_replacement = [
model["ModelName"] for model in sagemaker_client.list_models()["Models"]
]
model_uri = "runs:/{run_id}/{artifact_path}".format(
run_id=pretrained_model.run_id, artifact_path=pretrained_model.model_path
)
sk_model = mlflow.sklearn.load_model(model_uri=model_uri)
new_artifact_path = "model"
with mlflow.start_run():
mlflow.sklearn.log_model(sk_model=sk_model, artifact_path=new_artifact_path)
new_model_uri = "runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id, artifact_path=new_artifact_path
)
mfs.deploy(
app_name=app_name,
model_uri=new_model_uri,
mode=mfs.DEPLOYMENT_MODE_REPLACE,
archive=True,
synchronous=True,
)
object_names_after_replacement = [
entry["Key"] for entry in s3_client.list_objects(Bucket=default_bucket)["Contents"]
]
endpoint_configs_after_replacement = [
config["EndpointConfigName"]
for config in sagemaker_client.list_endpoint_configs()["EndpointConfigs"]
]
models_after_replacement = [
model["ModelName"] for model in sagemaker_client.list_models()["Models"]
]
assert all(
[
object_name in object_names_after_replacement
for object_name in object_names_before_replacement
]
)
assert all(
[
endpoint_config in endpoint_configs_after_replacement
for endpoint_config in endpoint_configs_before_replacement
]
)
assert all([model in models_after_replacement for model in models_before_replacement])
| 37.861423 | 100 | 0.732845 | import os
import pytest
import time
from collections import namedtuple
from unittest import mock
import boto3
import botocore
import numpy as np
from click.testing import CliRunner
from sklearn.linear_model import LogisticRegression
import mlflow
import mlflow.pyfunc
import mlflow.sklearn
import mlflow.sagemaker as mfs
import mlflow.sagemaker.cli as mfscli
from mlflow.exceptions import MlflowException
from mlflow.models import Model
from mlflow.protos.databricks_pb2 import (
ErrorCode,
RESOURCE_DOES_NOT_EXIST,
INVALID_PARAMETER_VALUE,
INTERNAL_ERROR,
)
from mlflow.store.artifact.s3_artifact_repo import S3ArtifactRepository
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
from tests.helper_functions import set_boto_credentials
from tests.sagemaker.mock import mock_sagemaker, Endpoint, EndpointOperation
TrainedModel = namedtuple("TrainedModel", ["model_path", "run_id", "model_uri"])
@pytest.fixture
def pretrained_model():
model_path = "model"
with mlflow.start_run():
X = np.array([-2, -1, 0, 1, 2, 1]).reshape(-1, 1)
y = np.array([0, 0, 1, 1, 1, 0])
lr = LogisticRegression(solver="lbfgs")
lr.fit(X, y)
mlflow.sklearn.log_model(lr, model_path)
run_id = mlflow.active_run().info.run_id
model_uri = "runs:/" + run_id + "/" + model_path
return TrainedModel(model_path, run_id, model_uri)
@pytest.fixture
def sagemaker_client():
return boto3.client("sagemaker", region_name="us-west-2")
def get_sagemaker_backend(region_name):
return mock_sagemaker.backends[region_name]
def mock_sagemaker_aws_services(fn):
from functools import wraps
from moto import mock_s3, mock_ecr, mock_sts, mock_iam
@mock_ecr
@mock_iam
@mock_s3
@mock_sagemaker
@mock_sts
@wraps(fn)
def mock_wrapper(*args, **kwargs):
ecr_client = boto3.client("ecr", region_name="us-west-2")
ecr_client.create_repository(repositoryName=mfs.DEFAULT_IMAGE_NAME)
role_policy = """
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "*",
"Resource": "*"
}
]
}
"""
iam_client = boto3.client("iam", region_name="us-west-2")
iam_client.create_role(RoleName="moto", AssumeRolePolicyDocument=role_policy)
iam_client.create_role(RoleName="assumed_role", AssumeRolePolicyDocument=role_policy)
return fn(*args, **kwargs)
return mock_wrapper
@mock_sagemaker_aws_services
def test_assume_role_and_get_credentials():
assumed_role_credentials = mfs._assume_role_and_get_credentials(
assume_role_arn="arn:aws:iam::123456789012:role/assumed_role"
)
assert "aws_access_key_id" in assumed_role_credentials.keys()
assert "aws_secret_access_key" in assumed_role_credentials.keys()
assert "aws_session_token" in assumed_role_credentials.keys()
assert len(assumed_role_credentials["aws_session_token"]) == 356
assert assumed_role_credentials["aws_session_token"].startswith("FQoGZXIvYXdzE")
assert len(assumed_role_credentials["aws_access_key_id"]) == 20
assert assumed_role_credentials["aws_access_key_id"].startswith("ASIA")
assert len(assumed_role_credentials["aws_secret_access_key"]) == 40
@pytest.mark.large
@mock_sagemaker_aws_services
def test_deployment_with_non_existent_assume_role_arn_raises_exception(pretrained_model):
match = (
r"An error occurred \(NoSuchEntity\) when calling the GetRole "
r"operation: Role non-existent-role-arn not found"
)
with pytest.raises(botocore.exceptions.ClientError, match=match):
mfs.deploy(
app_name="bad_assume_role_arn",
model_uri=pretrained_model.model_uri,
assume_role_arn="arn:aws:iam::123456789012:role/non-existent-role-arn",
)
@pytest.mark.large
@mock_sagemaker_aws_services
def test_deployment_with_assume_role_arn(pretrained_model, sagemaker_client):
app_name = "deploy_with_assume_role_arn"
mfs.deploy(
app_name=app_name,
model_uri=pretrained_model.model_uri,
assume_role_arn="arn:aws:iam::123456789012:role/assumed_role",
)
assert app_name in [
endpoint["EndpointName"] for endpoint in sagemaker_client.list_endpoints()["Endpoints"]
]
@pytest.mark.large
def test_deployment_with_unsupported_flavor_raises_exception(pretrained_model):
unsupported_flavor = "this is not a valid flavor"
match = "The specified flavor: `this is not a valid flavor` is not supported for deployment"
with pytest.raises(MlflowException, match=match) as exc:
mfs.deploy(
app_name="bad_flavor", model_uri=pretrained_model.model_uri, flavor=unsupported_flavor
)
assert exc.value.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
@pytest.mark.large
def test_deployment_with_missing_flavor_raises_exception(pretrained_model):
missing_flavor = "mleap"
match = "The specified model does not contain the specified deployment flavor"
with pytest.raises(MlflowException, match=match) as exc:
mfs.deploy(
app_name="missing-flavor", model_uri=pretrained_model.model_uri, flavor=missing_flavor
)
assert exc.value.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST)
@pytest.mark.large
def test_deployment_of_model_with_no_supported_flavors_raises_exception(pretrained_model):
logged_model_path = _download_artifact_from_uri(pretrained_model.model_uri)
model_config_path = os.path.join(logged_model_path, "MLmodel")
model_config = Model.load(model_config_path)
del model_config.flavors[mlflow.pyfunc.FLAVOR_NAME]
model_config.save(path=model_config_path)
match = "The specified model does not contain any of the supported flavors for deployment"
with pytest.raises(MlflowException, match=match) as exc:
mfs.deploy(app_name="missing-flavor", model_uri=logged_model_path, flavor=None)
assert exc.value.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST)
@pytest.mark.large
def test_validate_deployment_flavor_validates_python_function_flavor_successfully(pretrained_model):
model_config_path = os.path.join(
_download_artifact_from_uri(pretrained_model.model_uri), "MLmodel"
)
model_config = Model.load(model_config_path)
mfs._validate_deployment_flavor(model_config=model_config, flavor=mlflow.pyfunc.FLAVOR_NAME)
@pytest.mark.large
def test_get_preferred_deployment_flavor_obtains_valid_flavor_from_model(pretrained_model):
model_config_path = os.path.join(
_download_artifact_from_uri(pretrained_model.model_uri), "MLmodel"
)
model_config = Model.load(model_config_path)
selected_flavor = mfs._get_preferred_deployment_flavor(model_config=model_config)
assert selected_flavor in mfs.SUPPORTED_DEPLOYMENT_FLAVORS
assert selected_flavor in model_config.flavors
@pytest.mark.large
def test_attempting_to_deploy_in_asynchronous_mode_without_archiving_throws_exception(
pretrained_model,
):
with pytest.raises(MlflowException, match="Resources must be archived") as exc:
mfs.deploy(
app_name="test-app",
model_uri=pretrained_model.model_uri,
mode=mfs.DEPLOYMENT_MODE_CREATE,
archive=False,
synchronous=False,
)
assert exc.value.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
@pytest.mark.large
@mock_sagemaker_aws_services
def test_deploy_creates_sagemaker_and_s3_resources_with_expected_names_and_env_from_local(
pretrained_model, sagemaker_client
):
app_name = "test-app"
mfs.deploy(
app_name=app_name, model_uri=pretrained_model.model_uri, mode=mfs.DEPLOYMENT_MODE_CREATE
)
region_name = sagemaker_client.meta.region_name
s3_client = boto3.client("s3", region_name=region_name)
default_bucket = mfs._get_default_s3_bucket(region_name)
endpoint_description = sagemaker_client.describe_endpoint(EndpointName=app_name)
endpoint_production_variants = endpoint_description["ProductionVariants"]
assert len(endpoint_production_variants) == 1
model_name = endpoint_production_variants[0]["VariantName"]
assert model_name in [model["ModelName"] for model in sagemaker_client.list_models()["Models"]]
object_names = [
entry["Key"] for entry in s3_client.list_objects(Bucket=default_bucket)["Contents"]
]
assert any([model_name in object_name for object_name in object_names])
assert any(
[
app_name in config["EndpointConfigName"]
for config in sagemaker_client.list_endpoint_configs()["EndpointConfigs"]
]
)
assert app_name in [
endpoint["EndpointName"] for endpoint in sagemaker_client.list_endpoints()["Endpoints"]
]
model_environment = sagemaker_client.describe_model(ModelName=model_name)["PrimaryContainer"][
"Environment"
]
assert model_environment == {
"MLFLOW_DEPLOYMENT_FLAVOR_NAME": "python_function",
"SERVING_ENVIRONMENT": "SageMaker",
}
@pytest.mark.large
@mock_sagemaker_aws_services
def test_deploy_cli_creates_sagemaker_and_s3_resources_with_expected_names_and_env_from_local(
pretrained_model, sagemaker_client
):
app_name = "test-app"
result = CliRunner(env={"LC_ALL": "en_US.UTF-8", "LANG": "en_US.UTF-8"}).invoke(
mfscli.commands,
[
"deploy",
"-a",
app_name,
"-m",
pretrained_model.model_uri,
"--mode",
mfs.DEPLOYMENT_MODE_CREATE,
],
)
assert result.exit_code == 0
region_name = sagemaker_client.meta.region_name
s3_client = boto3.client("s3", region_name=region_name)
default_bucket = mfs._get_default_s3_bucket(region_name)
endpoint_description = sagemaker_client.describe_endpoint(EndpointName=app_name)
endpoint_production_variants = endpoint_description["ProductionVariants"]
assert len(endpoint_production_variants) == 1
model_name = endpoint_production_variants[0]["VariantName"]
assert model_name in [model["ModelName"] for model in sagemaker_client.list_models()["Models"]]
object_names = [
entry["Key"] for entry in s3_client.list_objects(Bucket=default_bucket)["Contents"]
]
assert any([model_name in object_name for object_name in object_names])
assert any(
[
app_name in config["EndpointConfigName"]
for config in sagemaker_client.list_endpoint_configs()["EndpointConfigs"]
]
)
assert app_name in [
endpoint["EndpointName"] for endpoint in sagemaker_client.list_endpoints()["Endpoints"]
]
model_environment = sagemaker_client.describe_model(ModelName=model_name)["PrimaryContainer"][
"Environment"
]
assert model_environment == {
"MLFLOW_DEPLOYMENT_FLAVOR_NAME": "python_function",
"SERVING_ENVIRONMENT": "SageMaker",
}
@pytest.mark.large
@mock_sagemaker_aws_services
def test_deploy_creates_sagemaker_and_s3_resources_with_expected_names_and_env_from_s3(
pretrained_model, sagemaker_client
):
local_model_path = _download_artifact_from_uri(pretrained_model.model_uri)
artifact_path = "model"
region_name = sagemaker_client.meta.region_name
default_bucket = mfs._get_default_s3_bucket(region_name)
s3_artifact_repo = S3ArtifactRepository("s3://{}".format(default_bucket))
s3_artifact_repo.log_artifacts(local_model_path, artifact_path=artifact_path)
model_s3_uri = "s3://{bucket_name}/{artifact_path}".format(
bucket_name=default_bucket, artifact_path=pretrained_model.model_path
)
app_name = "test-app"
mfs.deploy(app_name=app_name, model_uri=model_s3_uri, mode=mfs.DEPLOYMENT_MODE_CREATE)
endpoint_description = sagemaker_client.describe_endpoint(EndpointName=app_name)
endpoint_production_variants = endpoint_description["ProductionVariants"]
assert len(endpoint_production_variants) == 1
model_name = endpoint_production_variants[0]["VariantName"]
assert model_name in [model["ModelName"] for model in sagemaker_client.list_models()["Models"]]
s3_client = boto3.client("s3", region_name=region_name)
object_names = [
entry["Key"] for entry in s3_client.list_objects(Bucket=default_bucket)["Contents"]
]
assert any([model_name in object_name for object_name in object_names])
assert any(
[
app_name in config["EndpointConfigName"]
for config in sagemaker_client.list_endpoint_configs()["EndpointConfigs"]
]
)
assert app_name in [
endpoint["EndpointName"] for endpoint in sagemaker_client.list_endpoints()["Endpoints"]
]
model_environment = sagemaker_client.describe_model(ModelName=model_name)["PrimaryContainer"][
"Environment"
]
assert model_environment == {
"MLFLOW_DEPLOYMENT_FLAVOR_NAME": "python_function",
"SERVING_ENVIRONMENT": "SageMaker",
}
@pytest.mark.large
@mock_sagemaker_aws_services
def test_deploy_cli_creates_sagemaker_and_s3_resources_with_expected_names_and_env_from_s3(
pretrained_model, sagemaker_client
):
local_model_path = _download_artifact_from_uri(pretrained_model.model_uri)
artifact_path = "model"
region_name = sagemaker_client.meta.region_name
default_bucket = mfs._get_default_s3_bucket(region_name)
s3_artifact_repo = S3ArtifactRepository("s3://{}".format(default_bucket))
s3_artifact_repo.log_artifacts(local_model_path, artifact_path=artifact_path)
model_s3_uri = "s3://{bucket_name}/{artifact_path}".format(
bucket_name=default_bucket, artifact_path=pretrained_model.model_path
)
app_name = "test-app"
result = CliRunner(env={"LC_ALL": "en_US.UTF-8", "LANG": "en_US.UTF-8"}).invoke(
mfscli.commands,
["deploy", "-a", app_name, "-m", model_s3_uri, "--mode", mfs.DEPLOYMENT_MODE_CREATE],
)
assert result.exit_code == 0
region_name = sagemaker_client.meta.region_name
s3_client = boto3.client("s3", region_name=region_name)
default_bucket = mfs._get_default_s3_bucket(region_name)
endpoint_description = sagemaker_client.describe_endpoint(EndpointName=app_name)
endpoint_production_variants = endpoint_description["ProductionVariants"]
assert len(endpoint_production_variants) == 1
model_name = endpoint_production_variants[0]["VariantName"]
assert model_name in [model["ModelName"] for model in sagemaker_client.list_models()["Models"]]
object_names = [
entry["Key"] for entry in s3_client.list_objects(Bucket=default_bucket)["Contents"]
]
assert any([model_name in object_name for object_name in object_names])
assert any(
[
app_name in config["EndpointConfigName"]
for config in sagemaker_client.list_endpoint_configs()["EndpointConfigs"]
]
)
assert app_name in [
endpoint["EndpointName"] for endpoint in sagemaker_client.list_endpoints()["Endpoints"]
]
model_environment = sagemaker_client.describe_model(ModelName=model_name)["PrimaryContainer"][
"Environment"
]
assert model_environment == {
"MLFLOW_DEPLOYMENT_FLAVOR_NAME": "python_function",
"SERVING_ENVIRONMENT": "SageMaker",
}
@pytest.mark.large
@mock_sagemaker_aws_services
def test_deploying_application_with_preexisting_name_in_create_mode_throws_exception(
pretrained_model,
):
app_name = "test-app"
mfs.deploy(
app_name=app_name, model_uri=pretrained_model.model_uri, mode=mfs.DEPLOYMENT_MODE_CREATE
)
with pytest.raises(
MlflowException, match="an application with the same name already exists"
) as exc:
mfs.deploy(
app_name=app_name, model_uri=pretrained_model.model_uri, mode=mfs.DEPLOYMENT_MODE_CREATE
)
assert exc.value.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
@pytest.mark.large
@mock_sagemaker_aws_services
def test_deploy_in_synchronous_mode_waits_for_endpoint_creation_to_complete_before_returning(
pretrained_model, sagemaker_client
):
endpoint_creation_latency = 10
get_sagemaker_backend(sagemaker_client.meta.region_name).set_endpoint_update_latency(
endpoint_creation_latency
)
app_name = "test-app"
deployment_start_time = time.time()
mfs.deploy(
app_name=app_name,
model_uri=pretrained_model.model_uri,
mode=mfs.DEPLOYMENT_MODE_CREATE,
synchronous=True,
)
deployment_end_time = time.time()
assert (deployment_end_time - deployment_start_time) >= endpoint_creation_latency
endpoint_description = sagemaker_client.describe_endpoint(EndpointName=app_name)
assert endpoint_description["EndpointStatus"] == Endpoint.STATUS_IN_SERVICE
@pytest.mark.large
@mock_sagemaker_aws_services
def test_deploy_create_in_asynchronous_mode_returns_before_endpoint_creation_completes(
pretrained_model, sagemaker_client
):
endpoint_creation_latency = 10
get_sagemaker_backend(sagemaker_client.meta.region_name).set_endpoint_update_latency(
endpoint_creation_latency
)
app_name = "test-app"
deployment_start_time = time.time()
mfs.deploy(
app_name=app_name,
model_uri=pretrained_model.model_uri,
mode=mfs.DEPLOYMENT_MODE_CREATE,
synchronous=False,
archive=True,
)
deployment_end_time = time.time()
assert (deployment_end_time - deployment_start_time) < endpoint_creation_latency
endpoint_description = sagemaker_client.describe_endpoint(EndpointName=app_name)
assert endpoint_description["EndpointStatus"] == Endpoint.STATUS_CREATING
@pytest.mark.large
@mock_sagemaker_aws_services
def test_deploy_replace_in_asynchronous_mode_returns_before_endpoint_creation_completes(
pretrained_model, sagemaker_client
):
endpoint_update_latency = 10
get_sagemaker_backend(sagemaker_client.meta.region_name).set_endpoint_update_latency(
endpoint_update_latency
)
app_name = "test-app"
mfs.deploy(
app_name=app_name,
model_uri=pretrained_model.model_uri,
mode=mfs.DEPLOYMENT_MODE_CREATE,
synchronous=True,
)
update_start_time = time.time()
mfs.deploy(
app_name=app_name,
model_uri=pretrained_model.model_uri,
mode=mfs.DEPLOYMENT_MODE_REPLACE,
synchronous=False,
archive=True,
)
update_end_time = time.time()
assert (update_end_time - update_start_time) < endpoint_update_latency
endpoint_description = sagemaker_client.describe_endpoint(EndpointName=app_name)
assert endpoint_description["EndpointStatus"] == Endpoint.STATUS_UPDATING
@pytest.mark.large
@mock_sagemaker_aws_services
def test_deploy_in_create_mode_throws_exception_after_endpoint_creation_fails(
pretrained_model, sagemaker_client
):
endpoint_creation_latency = 10
sagemaker_backend = get_sagemaker_backend(sagemaker_client.meta.region_name)
sagemaker_backend.set_endpoint_update_latency(endpoint_creation_latency)
boto_caller = botocore.client.BaseClient._make_api_call
def fail_endpoint_creations(self, operation_name, operation_kwargs):
result = boto_caller(self, operation_name, operation_kwargs)
if operation_name == "CreateEndpoint":
endpoint_name = operation_kwargs["EndpointName"]
sagemaker_backend.set_endpoint_latest_operation(
endpoint_name=endpoint_name,
operation=EndpointOperation.create_unsuccessful(
latency_seconds=endpoint_creation_latency
),
)
return result
with mock.patch(
"botocore.client.BaseClient._make_api_call", new=fail_endpoint_creations
), pytest.raises(MlflowException, match="deployment operation failed") as exc:
mfs.deploy(
app_name="test-app",
model_uri=pretrained_model.model_uri,
mode=mfs.DEPLOYMENT_MODE_CREATE,
)
assert exc.value.error_code == ErrorCode.Name(INTERNAL_ERROR)
@pytest.mark.large
@mock_sagemaker_aws_services
def test_deploy_in_add_mode_adds_new_model_to_existing_endpoint(pretrained_model, sagemaker_client):
app_name = "test-app"
mfs.deploy(
app_name=app_name, model_uri=pretrained_model.model_uri, mode=mfs.DEPLOYMENT_MODE_CREATE
)
models_added = 1
for _ in range(11):
mfs.deploy(
app_name=app_name,
model_uri=pretrained_model.model_uri,
mode=mfs.DEPLOYMENT_MODE_ADD,
archive=True,
synchronous=False,
)
models_added += 1
endpoint_response = sagemaker_client.describe_endpoint(EndpointName=app_name)
endpoint_config_name = endpoint_response["EndpointConfigName"]
endpoint_config_response = sagemaker_client.describe_endpoint_config(
EndpointConfigName=endpoint_config_name
)
production_variants = endpoint_config_response["ProductionVariants"]
assert len(production_variants) == models_added
@pytest.mark.large
@mock_sagemaker_aws_services
def test_deploy_in_replace_model_removes_preexisting_models_from_endpoint(
pretrained_model, sagemaker_client
):
app_name = "test-app"
mfs.deploy(
app_name=app_name, model_uri=pretrained_model.model_uri, mode=mfs.DEPLOYMENT_MODE_ADD
)
for _ in range(11):
mfs.deploy(
app_name=app_name,
model_uri=pretrained_model.model_uri,
mode=mfs.DEPLOYMENT_MODE_ADD,
archive=True,
synchronous=False,
)
endpoint_response_before_replacement = sagemaker_client.describe_endpoint(EndpointName=app_name)
endpoint_config_name_before_replacement = endpoint_response_before_replacement[
"EndpointConfigName"
]
endpoint_config_response_before_replacement = sagemaker_client.describe_endpoint_config(
EndpointConfigName=endpoint_config_name_before_replacement
)
production_variants_before_replacement = endpoint_config_response_before_replacement[
"ProductionVariants"
]
deployed_models_before_replacement = [
variant["ModelName"] for variant in production_variants_before_replacement
]
mfs.deploy(
app_name=app_name,
model_uri=pretrained_model.model_uri,
mode=mfs.DEPLOYMENT_MODE_REPLACE,
archive=True,
synchronous=False,
)
endpoint_response_after_replacement = sagemaker_client.describe_endpoint(EndpointName=app_name)
endpoint_config_name_after_replacement = endpoint_response_after_replacement[
"EndpointConfigName"
]
endpoint_config_response_after_replacement = sagemaker_client.describe_endpoint_config(
EndpointConfigName=endpoint_config_name_after_replacement
)
production_variants_after_replacement = endpoint_config_response_after_replacement[
"ProductionVariants"
]
deployed_models_after_replacement = [
variant["ModelName"] for variant in production_variants_after_replacement
]
assert len(deployed_models_after_replacement) == 1
assert all(
[
model_name not in deployed_models_after_replacement
for model_name in deployed_models_before_replacement
]
)
@pytest.mark.large
@mock_sagemaker_aws_services
def test_deploy_in_replace_mode_throws_exception_after_endpoint_update_fails(
pretrained_model, sagemaker_client
):
endpoint_update_latency = 5
sagemaker_backend = get_sagemaker_backend(sagemaker_client.meta.region_name)
sagemaker_backend.set_endpoint_update_latency(endpoint_update_latency)
app_name = "test-app"
mfs.deploy(
app_name=app_name, model_uri=pretrained_model.model_uri, mode=mfs.DEPLOYMENT_MODE_CREATE
)
boto_caller = botocore.client.BaseClient._make_api_call
def fail_endpoint_updates(self, operation_name, operation_kwargs):
result = boto_caller(self, operation_name, operation_kwargs)
if operation_name == "UpdateEndpoint":
endpoint_name = operation_kwargs["EndpointName"]
sagemaker_backend.set_endpoint_latest_operation(
endpoint_name=endpoint_name,
operation=EndpointOperation.update_unsuccessful(
latency_seconds=endpoint_update_latency
),
)
return result
with mock.patch(
"botocore.client.BaseClient._make_api_call", new=fail_endpoint_updates
), pytest.raises(MlflowException, match="deployment operation failed") as exc:
mfs.deploy(
app_name="test-app",
model_uri=pretrained_model.model_uri,
mode=mfs.DEPLOYMENT_MODE_REPLACE,
)
assert exc.value.error_code == ErrorCode.Name(INTERNAL_ERROR)
@pytest.mark.large
@mock_sagemaker_aws_services
def test_deploy_in_replace_mode_waits_for_endpoint_update_completion_before_deleting_resources(
pretrained_model, sagemaker_client
):
endpoint_update_latency = 10
sagemaker_backend = get_sagemaker_backend(sagemaker_client.meta.region_name)
sagemaker_backend.set_endpoint_update_latency(endpoint_update_latency)
app_name = "test-app"
mfs.deploy(
app_name=app_name, model_uri=pretrained_model.model_uri, mode=mfs.DEPLOYMENT_MODE_CREATE
)
endpoint_config_name_before_replacement = sagemaker_client.describe_endpoint(
EndpointName=app_name
)["EndpointConfigName"]
boto_caller = botocore.client.BaseClient._make_api_call
update_start_time = time.time()
def validate_deletes(self, operation_name, operation_kwargs):
result = boto_caller(self, operation_name, operation_kwargs)
if "Delete" in operation_name:
endpoint_info = sagemaker_client.describe_endpoint(EndpointName=app_name)
assert endpoint_info["EndpointStatus"] == Endpoint.STATUS_IN_SERVICE
assert endpoint_info["EndpointConfigName"] != endpoint_config_name_before_replacement
assert time.time() - update_start_time >= endpoint_update_latency
return result
with mock.patch("botocore.client.BaseClient._make_api_call", new=validate_deletes):
mfs.deploy(
app_name=app_name,
model_uri=pretrained_model.model_uri,
mode=mfs.DEPLOYMENT_MODE_REPLACE,
archive=False,
)
@pytest.mark.large
@mock_sagemaker_aws_services
def test_deploy_in_replace_mode_with_archiving_does_not_delete_resources(
pretrained_model, sagemaker_client
):
region_name = sagemaker_client.meta.region_name
sagemaker_backend = get_sagemaker_backend(region_name)
sagemaker_backend.set_endpoint_update_latency(5)
app_name = "test-app"
mfs.deploy(
app_name=app_name, model_uri=pretrained_model.model_uri, mode=mfs.DEPLOYMENT_MODE_CREATE
)
s3_client = boto3.client("s3", region_name=region_name)
default_bucket = mfs._get_default_s3_bucket(region_name)
object_names_before_replacement = [
entry["Key"] for entry in s3_client.list_objects(Bucket=default_bucket)["Contents"]
]
endpoint_configs_before_replacement = [
config["EndpointConfigName"]
for config in sagemaker_client.list_endpoint_configs()["EndpointConfigs"]
]
models_before_replacement = [
model["ModelName"] for model in sagemaker_client.list_models()["Models"]
]
model_uri = "runs:/{run_id}/{artifact_path}".format(
run_id=pretrained_model.run_id, artifact_path=pretrained_model.model_path
)
sk_model = mlflow.sklearn.load_model(model_uri=model_uri)
new_artifact_path = "model"
with mlflow.start_run():
mlflow.sklearn.log_model(sk_model=sk_model, artifact_path=new_artifact_path)
new_model_uri = "runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id, artifact_path=new_artifact_path
)
mfs.deploy(
app_name=app_name,
model_uri=new_model_uri,
mode=mfs.DEPLOYMENT_MODE_REPLACE,
archive=True,
synchronous=True,
)
object_names_after_replacement = [
entry["Key"] for entry in s3_client.list_objects(Bucket=default_bucket)["Contents"]
]
endpoint_configs_after_replacement = [
config["EndpointConfigName"]
for config in sagemaker_client.list_endpoint_configs()["EndpointConfigs"]
]
models_after_replacement = [
model["ModelName"] for model in sagemaker_client.list_models()["Models"]
]
assert all(
[
object_name in object_names_after_replacement
for object_name in object_names_before_replacement
]
)
assert all(
[
endpoint_config in endpoint_configs_after_replacement
for endpoint_config in endpoint_configs_before_replacement
]
)
assert all([model in models_after_replacement for model in models_before_replacement])
| true | true |
1c310c2fa7447febf5131a6eb41de0b79a189580 | 2,587 | py | Python | tools/xf-batch.py | vanlink/xf-traffic-generator | 32d10b1d19af413acbd498f9ffe8c399aa5b3b49 | [
"Apache-2.0"
] | null | null | null | tools/xf-batch.py | vanlink/xf-traffic-generator | 32d10b1d19af413acbd498f9ffe8c399aa5b3b49 | [
"Apache-2.0"
] | null | null | null | tools/xf-batch.py | vanlink/xf-traffic-generator | 32d10b1d19af413acbd498f9ffe8c399aa5b3b49 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
import os
import sys
import getopt
import traceback
import json
import time
import copy
import pprint
import requests
from libtools import *
UNIQUE = None
TO_KILL = False
TO_LWIP = False
TO_GENERATOR = False
TO_DISPATCH = False
TO_STREAM = False
TO_INTERFACE = False
# ------------------------------ main ------------------------------
if __name__ != '__main__':
sys.exit(0)
try:
kvs, leftargs = getopt.getopt(sys.argv[1:],
"u:klgdsi", [
"unique=",
"kill",
"lwip",
"generator",
"dispatch",
"stream",
"interface",
]
)
for k, v in kvs:
if k in ("-u", "--unique"):
UNIQUE = v
elif k in ("-k", "--kill"):
TO_KILL = True
elif k in ("-l", "--lwip"):
TO_LWIP = True
elif k in ("-g", "--generator"):
TO_GENERATOR = True
elif k in ("-d", "--dispatch"):
TO_DISPATCH = True
elif k in ("-s", "--stream"):
TO_STREAM = True
elif k in ("-i", "--interface"):
TO_INTERFACE = True
except Exception as e:
print("Invalid args.")
sys.exit(-1)
if TO_KILL:
if not UNIQUE:
print("No unique ID.")
sys.exit(-1)
pids = get_unique_pids(UNIQUE)
if not pids:
print("No unique found.")
sys.exit(-1)
for i in pids:
os.system("kill -9 %s" % (i))
pids = get_unique_pids(UNIQUE)
if pids:
print("Kill fail %s." % (pids))
print("Unique ID [%s] killed." % (UNIQUE))
sys.exit(0)
if TO_LWIP or TO_GENERATOR or TO_DISPATCH or TO_STREAM or TO_INTERFACE:
if TO_LWIP:
url = "get_stat_lwip"
elif TO_GENERATOR:
url = "get_stat_generator"
elif TO_DISPATCH:
url = "get_stat_dispatch"
elif TO_STREAM:
url = "get_stat_stream"
elif TO_INTERFACE:
url = "get_interface"
lport = get_unique_lport(UNIQUE)
if not lport:
print("Unique ID [%s] local port not found." % (UNIQUE))
sys.exit(-1)
cmd = 'curl http://127.0.0.1:%s/%s' % (lport, url)
(ret, outstr, errstr) = run_cmd_wrapper(cmd, check_interval=0.1, timeout=3)
obj = json.loads(outstr)
s = json.dumps(NonzeroDict(obj), indent=2)
print(s)
| 26.670103 | 79 | 0.478933 |
import os
import sys
import getopt
import traceback
import json
import time
import copy
import pprint
import requests
from libtools import *
UNIQUE = None
TO_KILL = False
TO_LWIP = False
TO_GENERATOR = False
TO_DISPATCH = False
TO_STREAM = False
TO_INTERFACE = False
if __name__ != '__main__':
sys.exit(0)
try:
kvs, leftargs = getopt.getopt(sys.argv[1:],
"u:klgdsi", [
"unique=",
"kill",
"lwip",
"generator",
"dispatch",
"stream",
"interface",
]
)
for k, v in kvs:
if k in ("-u", "--unique"):
UNIQUE = v
elif k in ("-k", "--kill"):
TO_KILL = True
elif k in ("-l", "--lwip"):
TO_LWIP = True
elif k in ("-g", "--generator"):
TO_GENERATOR = True
elif k in ("-d", "--dispatch"):
TO_DISPATCH = True
elif k in ("-s", "--stream"):
TO_STREAM = True
elif k in ("-i", "--interface"):
TO_INTERFACE = True
except Exception as e:
print("Invalid args.")
sys.exit(-1)
if TO_KILL:
if not UNIQUE:
print("No unique ID.")
sys.exit(-1)
pids = get_unique_pids(UNIQUE)
if not pids:
print("No unique found.")
sys.exit(-1)
for i in pids:
os.system("kill -9 %s" % (i))
pids = get_unique_pids(UNIQUE)
if pids:
print("Kill fail %s." % (pids))
print("Unique ID [%s] killed." % (UNIQUE))
sys.exit(0)
if TO_LWIP or TO_GENERATOR or TO_DISPATCH or TO_STREAM or TO_INTERFACE:
if TO_LWIP:
url = "get_stat_lwip"
elif TO_GENERATOR:
url = "get_stat_generator"
elif TO_DISPATCH:
url = "get_stat_dispatch"
elif TO_STREAM:
url = "get_stat_stream"
elif TO_INTERFACE:
url = "get_interface"
lport = get_unique_lport(UNIQUE)
if not lport:
print("Unique ID [%s] local port not found." % (UNIQUE))
sys.exit(-1)
cmd = 'curl http://127.0.0.1:%s/%s' % (lport, url)
(ret, outstr, errstr) = run_cmd_wrapper(cmd, check_interval=0.1, timeout=3)
obj = json.loads(outstr)
s = json.dumps(NonzeroDict(obj), indent=2)
print(s)
| true | true |
1c310c6664d26c85ae161d02b9015c70e433ed29 | 3,494 | py | Python | coil_model_warmstart.py | havefun28/scenario_runner | d24e9563179b7a345705c53e7da877b42736acf2 | [
"MIT"
] | 1 | 2020-10-09T07:25:36.000Z | 2020-10-09T07:25:36.000Z | coil_model_warmstart.py | RuihanGao/scenario_runner | d24e9563179b7a345705c53e7da877b42736acf2 | [
"MIT"
] | null | null | null | coil_model_warmstart.py | RuihanGao/scenario_runner | d24e9563179b7a345705c53e7da877b42736acf2 | [
"MIT"
] | null | null | null | import os, sys
sys.path.append('/home/ruihan/coiltraine/')
import yaml
import torch
from network.models.coil_icra import CoILICRA
from coilutils import AttributeDict
# from attribute_dict import AttributeDict
# # Sample from PyTorch docs: https://pytorch.org/tutorials/beginner/saving_loading_models.html#warmstarting-model-using-parameters-from-a-different-model
# # save
# torch.save(modelA.state_dict(), PATH)
# # load
# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# modelB = TheModelBClass(*args, **kwargs)
# modelB.load_state_dict(torch.load(PATH, map_location = device), strict=False)
# # Sample load a pretrained model
# load part of the pre trained model
# save
# torch.save(pre_model.state_dict(), PATH)
# # load
# pretrained_dict = torch.load(PATH)
# model = TheModelClass(*args, **kwargs)
# model_dict = model.state_dict()
# # 1. filter out unnecessary keys
# pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
# # 2. overwrite entries in the existing state dict
# model_dict.update(pretrained_dict)
# # 3. load the new state dict
# model.load_state_dict(model_dict)
torch.set_default_dtype(torch.float32)
torch.set_default_tensor_type('torch.cuda.FloatTensor')
# read yaml file
yaml_filename = 'coil_configs.yaml'
with open(yaml_filename, 'r') as f:
# TODO: combine all know configuraitons into one file and load it into a dict
yaml_file = yaml.load(f, Loader=yaml.FullLoader)
yaml_cfg = AttributeDict(yaml_file)
# # load checkpoint dict
# checkpoint = torch.load(os.path.join('/home/ruihan/scenario_runner/models/CoIL/'+str(180000)+'.pth'))
# # load model
# model = CoILModel(yaml_cfg.MODEL_TYPE, yaml_cfg.MODEL_CONFIGURATION)
# model.cuda()
# checkpoint_iteration = checkpoint['iteration']
# print("Pretrained CoIL loaded ", checkpoint_iteration)
# model.load_state_dict(checkpoint['state_dict'])
# model.eval()
# torch.save(model.state_dict(), '/home/ruihan/scenario_runner/models/CoIL/CoIL_180000.pth' )
print("load empty CoIlModel")
modelB = CoILICRA(yaml_cfg.MODEL_CONFIGURATION)
for param_tensor in modelB.state_dict():
print(param_tensor, "\t", modelB.state_dict()[param_tensor].size())
param_tensor = 'branches.branched_modules.0.layers.0.0.weight'
print(param_tensor, "\t", modelB.state_dict()[param_tensor])
print("try to copy pretrained model to B")
modelB.load_state_dict(torch.load('models/CoIL/CoIL_180000.pth'))
print(param_tensor, "\t", modelB.state_dict()[param_tensor])
modelB.eval()
# TODO: The structure is specified in coil_icra.
# check which module you want to reuse and create your own.
# then load the state_dict with `strict=False`
class FC_coil_cut(nn.Module):
"""
copy the full-connectin network from coil, adpted for MLP controller
"""
def __init__(self, nx=106, ny=2, nh=53, p=0.2):
"""
original coil (512-256-3)
input: latent_embeddings dim_z = 106
one hidden layer: 64
output: dim_u = 3
p: possibility for dropout
"""
super(FC_coil, self).__init__()
self.layers = nn.Sequential(
nn.Linear(nx, nh),
nn.Dropout2d(p=p),
nn.ReLU(),
nn.Linear(nh, ny),
nn.Dropout2d(p=p)
)
self.sig = nn.Sigmoid()
self.tanh = nn.Tanh()
def forward(self, x):
x = x.view(x.size(0), -1)
x = self.layers(x)
# throttle = self.sig(x[:, 0]).view(x.shape[0],-1)
# steer = self.tanh(x[:, 1]).view(x.shape[0],-1)
# brake = self.sig(x[:, 2]).view(x.shape[0],-1)
# return torch.cat([throttle, steer, brake], dim=1)
return self.sig(x) | 31.763636 | 154 | 0.726388 | import os, sys
sys.path.append('/home/ruihan/coiltraine/')
import yaml
import torch
from network.models.coil_icra import CoILICRA
from coilutils import AttributeDict
modelB.state_dict():
print(param_tensor, "\t", modelB.state_dict()[param_tensor].size())
param_tensor = 'branches.branched_modules.0.layers.0.0.weight'
print(param_tensor, "\t", modelB.state_dict()[param_tensor])
print("try to copy pretrained model to B")
modelB.load_state_dict(torch.load('models/CoIL/CoIL_180000.pth'))
print(param_tensor, "\t", modelB.state_dict()[param_tensor])
modelB.eval()
class FC_coil_cut(nn.Module):
def __init__(self, nx=106, ny=2, nh=53, p=0.2):
super(FC_coil, self).__init__()
self.layers = nn.Sequential(
nn.Linear(nx, nh),
nn.Dropout2d(p=p),
nn.ReLU(),
nn.Linear(nh, ny),
nn.Dropout2d(p=p)
)
self.sig = nn.Sigmoid()
self.tanh = nn.Tanh()
def forward(self, x):
x = x.view(x.size(0), -1)
x = self.layers(x)
return self.sig(x) | true | true |
1c310cf5f4de81d0af84be7fff6b32545eb07092 | 408 | py | Python | src/mpl_styles/__init__.py | icaros-usc/dqd-rl | 83e3da62df37b45c4b8fc549c07f566797b5f685 | [
"MIT"
] | 6 | 2022-02-09T05:35:37.000Z | 2022-03-12T11:54:59.000Z | src/mpl_styles/__init__.py | icaros-usc/dqd-rl | 83e3da62df37b45c4b8fc549c07f566797b5f685 | [
"MIT"
] | null | null | null | src/mpl_styles/__init__.py | icaros-usc/dqd-rl | 83e3da62df37b45c4b8fc549c07f566797b5f685 | [
"MIT"
] | null | null | null | """Styles for Matplotlib."""
from matplotlib.colors import ListedColormap
# Qualitative colormap that (should) be color-blind friendly. See
# https://personal.sron.nl/~pault/ for more accessible color schemes.
QUALITATIVE_COLORS = (
'#77AADD',
'#EE8866',
'#44BB99',
'#FFAABB',
'#99DDFF',
'#BBCC33',
'#EEDD88',
'#AAAA00',
)
QualitativeMap = ListedColormap(QUALITATIVE_COLORS)
| 24 | 69 | 0.676471 | from matplotlib.colors import ListedColormap
QUALITATIVE_COLORS = (
'#77AADD',
'#EE8866',
'#44BB99',
'#FFAABB',
'#99DDFF',
'#BBCC33',
'#EEDD88',
'#AAAA00',
)
QualitativeMap = ListedColormap(QUALITATIVE_COLORS)
| true | true |
1c310defa615980cda6cafd978363a331a7e0346 | 4,385 | py | Python | dev-utils/_windows/msys/msys.py | gitdevmod/craft-blueprints-kde | 81a866d2d606dabd57347fbac7cdab42979332dd | [
"BSD-2-Clause"
] | null | null | null | dev-utils/_windows/msys/msys.py | gitdevmod/craft-blueprints-kde | 81a866d2d606dabd57347fbac7cdab42979332dd | [
"BSD-2-Clause"
] | null | null | null | dev-utils/_windows/msys/msys.py | gitdevmod/craft-blueprints-kde | 81a866d2d606dabd57347fbac7cdab42979332dd | [
"BSD-2-Clause"
] | null | null | null | import io
import info
import shells
from CraftOS.osutils import OsUtils
from Package.MaybeVirtualPackageBase import *
class subinfo(info.infoclass):
def setTargets(self):
#as updates are applied with msys and not by craft don't ever change the name of the target, its a bad idea...
self.targets["base"] = f"https://github.com/msys2/msys2-installer/releases/download/2020-05-22/msys2-base-x86_64-20200522.tar.xz"
self.targetInstSrc["base"] = "msys64"
self.targetInstallPath["base"] = "msys"
self.targetDigests["base"] = (['deec23a772774d874b557bcc5dfb2a8a115224fb6a919f19062af108b6bf4735'], CraftHash.HashAlgorithm.SHA256)
self.defaultTarget = "base"
def setDependencies(self):
self.runtimeDependencies["virtual/bin-base"] = None
self.runtimeDependencies["dev-utils/python3"] = None
def msysInstallShim(self, installDir):
return utils.createShim(os.path.join(installDir, "dev-utils", "bin", "msys.exe"),
os.path.join(installDir, "dev-utils", "bin", "python3.exe"),
args=[os.path.join(CraftStandardDirs.craftBin(), "shells.py")])
def updateMsys(self):
msysDir = CraftCore.settings.get("Paths", "Msys", os.path.join(CraftStandardDirs.craftRoot(), "msys"))
shell = shells.BashShell()
useOverwrite = CraftCore.cache.checkCommandOutputFor(os.path.join(msysDir, "usr/bin", "pacman.exe"), "--overwrite", "-Sh")
# force was replace by overwrite
overwrite = "--overwrite='*'" if useOverwrite else "--force"
def stopProcesses():
return OsUtils.killProcess("*", msysDir)
def queryForUpdate():
out = io.BytesIO()
if not shell.execute(".", "pacman", f"-Sy --noconfirm {overwrite}"):
raise Exception()
shell.execute(".", "pacman", "-Qu --noconfirm", stdout=out, stderr=subprocess.PIPE)
out = out.getvalue()
return out != b""
# start and restart msys before first use
if not (shell.execute(".", "echo", "Init update") and
stopProcesses() and
shell.execute(".", "pacman-key", "--init") and
shell.execute(".", "pacman-key", "--populate")):
return False
try:
# max 10 tries
for _ in range(10):
if not queryForUpdate():
break
# might return 1 on core updates...
shell.execute(".", "pacman", f"-Su --noconfirm {overwrite} --ask 20")
if not stopProcesses():
return False
except Exception as e:
CraftCore.log.error(e, exc_info=e)
return False
if not (shell.execute(".", "pacman", f"-S base-devel msys/binutils --noconfirm {overwrite} --needed") and
stopProcesses()):
return False
return utils.system("autorebase.bat", cwd=msysDir)
from Package.BinaryPackageBase import *
class MsysPackage(BinaryPackageBase):
def __init__(self):
BinaryPackageBase.__init__(self)
def postInstall(self):
return self.subinfo.msysInstallShim(self.imageDir())
def postQmerge(self):
return self.subinfo.updateMsys()
class VirtualPackage(VirtualPackageBase):
def __init__(self):
VirtualPackageBase.__init__(self)
def install(self):
if not VirtualPackageBase.install(self):
return False
return self.subinfo.msysInstallShim(self.imageDir()) and self.subinfo.updateMsys()
def qmerge(self):
if self.package.isInstalled:
return True
return super().qmerge()
class Package(MaybeVirtualPackageBase):
def __init__(self):
useExternalMsys = ("Paths", "Msys") not in CraftCore.settings
self.skipCondition = useExternalMsys and not CraftPackageObject.get("dev-utils/msys").isInstalled
MaybeVirtualPackageBase.__init__(self, condition=self.skipCondition, classA=MsysPackage, classB=VirtualPackage)
if not useExternalMsys:
# override the install method
def install():
CraftCore.log.info(f"Using manually installed msys {CraftStandardDirs.msysDir()}")
return self.baseClass.install(self)
setattr(self, "install", install)
| 38.80531 | 140 | 0.620525 | import io
import info
import shells
from CraftOS.osutils import OsUtils
from Package.MaybeVirtualPackageBase import *
class subinfo(info.infoclass):
def setTargets(self):
self.targets["base"] = f"https://github.com/msys2/msys2-installer/releases/download/2020-05-22/msys2-base-x86_64-20200522.tar.xz"
self.targetInstSrc["base"] = "msys64"
self.targetInstallPath["base"] = "msys"
self.targetDigests["base"] = (['deec23a772774d874b557bcc5dfb2a8a115224fb6a919f19062af108b6bf4735'], CraftHash.HashAlgorithm.SHA256)
self.defaultTarget = "base"
def setDependencies(self):
self.runtimeDependencies["virtual/bin-base"] = None
self.runtimeDependencies["dev-utils/python3"] = None
def msysInstallShim(self, installDir):
return utils.createShim(os.path.join(installDir, "dev-utils", "bin", "msys.exe"),
os.path.join(installDir, "dev-utils", "bin", "python3.exe"),
args=[os.path.join(CraftStandardDirs.craftBin(), "shells.py")])
def updateMsys(self):
msysDir = CraftCore.settings.get("Paths", "Msys", os.path.join(CraftStandardDirs.craftRoot(), "msys"))
shell = shells.BashShell()
useOverwrite = CraftCore.cache.checkCommandOutputFor(os.path.join(msysDir, "usr/bin", "pacman.exe"), "--overwrite", "-Sh")
# force was replace by overwrite
overwrite = "--overwrite='*'" if useOverwrite else "--force"
def stopProcesses():
return OsUtils.killProcess("*", msysDir)
def queryForUpdate():
out = io.BytesIO()
if not shell.execute(".", "pacman", f"-Sy --noconfirm {overwrite}"):
raise Exception()
shell.execute(".", "pacman", "-Qu --noconfirm", stdout=out, stderr=subprocess.PIPE)
out = out.getvalue()
return out != b""
# start and restart msys before first use
if not (shell.execute(".", "echo", "Init update") and
stopProcesses() and
shell.execute(".", "pacman-key", "--init") and
shell.execute(".", "pacman-key", "--populate")):
return False
try:
# max 10 tries
for _ in range(10):
if not queryForUpdate():
break
# might return 1 on core updates...
shell.execute(".", "pacman", f"-Su --noconfirm {overwrite} --ask 20")
if not stopProcesses():
return False
except Exception as e:
CraftCore.log.error(e, exc_info=e)
return False
if not (shell.execute(".", "pacman", f"-S base-devel msys/binutils --noconfirm {overwrite} --needed") and
stopProcesses()):
return False
return utils.system("autorebase.bat", cwd=msysDir)
from Package.BinaryPackageBase import *
class MsysPackage(BinaryPackageBase):
def __init__(self):
BinaryPackageBase.__init__(self)
def postInstall(self):
return self.subinfo.msysInstallShim(self.imageDir())
def postQmerge(self):
return self.subinfo.updateMsys()
class VirtualPackage(VirtualPackageBase):
def __init__(self):
VirtualPackageBase.__init__(self)
def install(self):
if not VirtualPackageBase.install(self):
return False
return self.subinfo.msysInstallShim(self.imageDir()) and self.subinfo.updateMsys()
def qmerge(self):
if self.package.isInstalled:
return True
return super().qmerge()
class Package(MaybeVirtualPackageBase):
def __init__(self):
useExternalMsys = ("Paths", "Msys") not in CraftCore.settings
self.skipCondition = useExternalMsys and not CraftPackageObject.get("dev-utils/msys").isInstalled
MaybeVirtualPackageBase.__init__(self, condition=self.skipCondition, classA=MsysPackage, classB=VirtualPackage)
if not useExternalMsys:
# override the install method
def install():
CraftCore.log.info(f"Using manually installed msys {CraftStandardDirs.msysDir()}")
return self.baseClass.install(self)
setattr(self, "install", install)
| true | true |
1c310e8484badd79ac34991597b64d85aecd23a2 | 1,766 | py | Python | python_ast/flake8_tests/test_ast_linter.py | jerryliu55/pyre-check | ca779758cda4468c95dc2cd84f97a896bb983e3c | [
"MIT"
] | 3 | 2019-03-29T22:32:12.000Z | 2019-04-16T08:54:57.000Z | python_ast/flake8_tests/test_ast_linter.py | jerryliu55/pyre-check | ca779758cda4468c95dc2cd84f97a896bb983e3c | [
"MIT"
] | null | null | null | python_ast/flake8_tests/test_ast_linter.py | jerryliu55/pyre-check | ca779758cda4468c95dc2cd84f97a896bb983e3c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import ast
import unittest
from pathlib import Path
from typing import List, NamedTuple
from .ast_linter import AstChecker, Error
class ExpectedError(NamedTuple):
line: int
message: str
class AstVisitorBaseCase(unittest.TestCase):
def load_checker(self, test_file) -> AstChecker:
test_repository = "tools/pyre/python_ast/flake8_tests/mock_repository"
test_file = Path(test_repository) / test_file
source_code = test_file.read_text()
tree = ast.parse(source_code)
return AstChecker(
# pyre-fixme[6]: Expected `Module` for 1st param but got `AST`.
tree,
source_code.split("\n"),
test_repository,
str(test_file),
)
def assert_errors(self, actual: List[Error], expected: List[ExpectedError]) -> None:
if len(expected) != len(actual):
self.fail(
f"Expected {len(expected)} errors, got {len(actual)}:\n"
+ "\n".join(str(error) for error in actual)
)
for expected_error, actual_error in zip(expected, actual):
self.assertEqual(expected_error.line, actual_error.line)
self.assertEqual(expected_error.message, actual_error.message)
def setUp(self) -> None:
self.checker = self.load_checker("")
def run_checker(self) -> List[Error]:
return list(self.checker.run())
class AstVisitorTestCase(AstVisitorBaseCase):
def setUp(self) -> None:
self.checker = self.load_checker("a.py")
def test_linter(self):
errors = self.run_checker()
self.assert_errors(
errors,
[ExpectedError(line=9, message="Assigning to expression of type `int`.")],
)
| 30.982456 | 88 | 0.632503 |
import ast
import unittest
from pathlib import Path
from typing import List, NamedTuple
from .ast_linter import AstChecker, Error
class ExpectedError(NamedTuple):
line: int
message: str
class AstVisitorBaseCase(unittest.TestCase):
def load_checker(self, test_file) -> AstChecker:
test_repository = "tools/pyre/python_ast/flake8_tests/mock_repository"
test_file = Path(test_repository) / test_file
source_code = test_file.read_text()
tree = ast.parse(source_code)
return AstChecker(
tree,
source_code.split("\n"),
test_repository,
str(test_file),
)
def assert_errors(self, actual: List[Error], expected: List[ExpectedError]) -> None:
if len(expected) != len(actual):
self.fail(
f"Expected {len(expected)} errors, got {len(actual)}:\n"
+ "\n".join(str(error) for error in actual)
)
for expected_error, actual_error in zip(expected, actual):
self.assertEqual(expected_error.line, actual_error.line)
self.assertEqual(expected_error.message, actual_error.message)
def setUp(self) -> None:
self.checker = self.load_checker("")
def run_checker(self) -> List[Error]:
return list(self.checker.run())
class AstVisitorTestCase(AstVisitorBaseCase):
def setUp(self) -> None:
self.checker = self.load_checker("a.py")
def test_linter(self):
errors = self.run_checker()
self.assert_errors(
errors,
[ExpectedError(line=9, message="Assigning to expression of type `int`.")],
)
| true | true |
1c310f7bbabe1c66ae36be1262be6c97762c5011 | 13,942 | py | Python | mhs/common/mhs_common/messages/ebxml_request_envelope.py | nhsconnect/integration-adaptor-mhs | fa9006ad8b64b696040d48cd469d60c9fc803b3e | [
"Apache-2.0"
] | 1 | 2020-05-20T12:26:46.000Z | 2020-05-20T12:26:46.000Z | mhs/common/mhs_common/messages/ebxml_request_envelope.py | nhsconnect/integration-adaptor-mhs | fa9006ad8b64b696040d48cd469d60c9fc803b3e | [
"Apache-2.0"
] | 41 | 2020-05-18T12:49:29.000Z | 2022-02-28T13:34:01.000Z | mhs/common/mhs_common/messages/ebxml_request_envelope.py | nhsconnect/integration-adaptor-mhs | fa9006ad8b64b696040d48cd469d60c9fc803b3e | [
"Apache-2.0"
] | 6 | 2020-06-04T18:59:25.000Z | 2021-12-16T16:42:32.000Z | """This module defines the envelope used to wrap asynchronous request messages to be sent to a remote MHS."""
from __future__ import annotations
import base64
import copy
import email
import email.message
import email.policy
from typing import Dict, Tuple, Union, List, Sequence, Generator
from xml.etree.ElementTree import Element
from builder import pystache_message_builder
from defusedxml import ElementTree
from comms.http_headers import HttpHeaders
from utilities import integration_adaptors_logger as log, message_utilities
from mhs_common.messages import ebxml_envelope
logger = log.IntegrationAdaptorsLogger(__name__)
EBXML_TEMPLATE = "ebxml_request"
MESSAGE = "hl7_message"
EBXML = "ebxml"
DUPLICATE_ELIMINATION = "duplicate_elimination"
ACK_REQUESTED = "ack_requested"
ACK_SOAP_ACTOR = "ack_soap_actor"
SYNC_REPLY = "sync_reply"
ATTACHMENTS = 'attachments'
EXTERNAL_ATTACHMENTS = 'external_attachments'
ATTACHMENT_CONTENT_ID = 'content_id'
ATTACHMENT_CONTENT_TYPE = 'content_type'
ATTACHMENT_BASE64 = 'is_base64'
ATTACHMENT_CONTENT_TRANSFER_ENCODING = 'content_transfer_encoding'
ATTACHMENT_PAYLOAD = 'payload'
ATTACHMENT_DESCRIPTION = 'description'
EBXML_CONTENT_TYPE_VALUE = 'multipart/related; boundary="--=_MIME-Boundary"; type=text/xml; ' \
'start=ebXMLHeader@spine.nhs.uk'
class EbxmlRequestEnvelope(ebxml_envelope.EbxmlEnvelope):
"""An envelope that contains a request to be sent asynchronously to a remote MHS."""
def __init__(self, message_dictionary: Dict[str, Union[str, bool, List[Dict[str, Union[str, bool]]]]]):
"""Create a new EbxmlRequestEnvelope that populates the message with the provided dictionary.
:param message_dictionary: The dictionary of values to use when populating the template.
Example `message_dictionary`::
{
'from_party_id': 'TESTGEN-201324',
'to_party_id': 'YEA-0000806',
'cpa_id': 'S1001A1630',
'conversation_id': '79F49A34-9798-404C-AEC4-FD38DD81C138',
'service': 'urn:nhs:names:services:pdsquery',
'action': 'QUPA_IN000006UK02',
'duplicate_elimination': True,
'ack_requested': True,
'ack_soap_actor': 'urn:oasis:names:tc:ebxml-msg:actor:toPartyMSH',
'sync_reply': True,
'hl7_message': '<QUPA_IN000006UK02 xmlns="urn:hl7-org:v3"></QUPA_IN000006UK02>',
'attachments': [ # Optional, defaults to empty list if not set
{
'content_type': 'text/plain',
'payload': 'Some text here',
'is_base64': False,
'description': 'Attachment description'
},
{
'content_type': 'image/png',
'payload': 'base64-encoded content here',
'is_base64': True,
'description': 'Another attachment description'
}
],
'external_attachments': [ # Optional, defaults to empty list if not set
{
'document_id' : '6a7b4c68-8be8-46ba-8fbc-9b8313569380',
'message_id': '4dd554f1-2827-4b98-adf3-7cefab763fff',
'description': 'attachment description'
}
]
}
"""
super().__init__(EBXML_TEMPLATE, message_dictionary)
def serialize(self, _message_dictionary=None) -> Tuple[str, Dict[str, str], str]:
message_dictionary = copy.deepcopy(self.message_dictionary)
self._set_headers_for_attachments(message_dictionary)
message_id, http_headers, message = super().serialize(_message_dictionary=message_dictionary)
http_headers[HttpHeaders.CONTENT_TYPE] = EBXML_CONTENT_TYPE_VALUE
return message_id, http_headers, message
@staticmethod
def _set_headers_for_attachments(message_dictionary):
"""
Generate a content ID for each attachment and set the content transfer encoding based on whether the
attachment is Base64-encoded or not.
:param message_dictionary: message dictionary that has the attachments
"""
message_dictionary.setdefault(EXTERNAL_ATTACHMENTS, [])
attachment: dict
for attachment in message_dictionary.setdefault(ATTACHMENTS, []):
attachment[ATTACHMENT_CONTENT_ID] = f'{message_utilities.get_uuid()}@spine.nhs.uk'
try:
attachment[ATTACHMENT_CONTENT_TRANSFER_ENCODING] = 'base64' if attachment.pop(ATTACHMENT_BASE64) \
else '8bit'
except KeyError as e:
logger.error('Failed to find {Key} when generating message from {TemplateFile} . {ErrorMessage}',
fparams={
'Key': f'{ATTACHMENTS}[].{ATTACHMENT_BASE64}',
'TemplateFile': EBXML_TEMPLATE,
'ErrorMessage': e
})
raise pystache_message_builder.MessageGenerationError(f'Failed to find '
f'key:{ATTACHMENTS}[].{ATTACHMENT_BASE64} when '
f'generating message from template '
f'file:{EBXML_TEMPLATE}') from e
@classmethod
def from_string(cls, headers: Dict[str, str], message: str) -> EbxmlRequestEnvelope:
"""Parse the provided message string and create an instance of an EbxmlRequestEnvelope.
:param headers A dictionary of headers received with the message.
:param message: The message to be parsed.
:return: An instance of an EbxmlAckEnvelope constructed from the message.
"""
msg = EbxmlRequestEnvelope._parse_mime_message(headers, message)
ebxml_part, payload_part, attachments = EbxmlRequestEnvelope._extract_message_parts(msg)
xml_tree: Element = ElementTree.fromstring(ebxml_part)
extracted_values = super().parse_message(xml_tree)
cls._extract_more_values_from_xml_tree(xml_tree, extracted_values)
extracted_values[EBXML] = ebxml_part
extracted_values[ATTACHMENTS] = attachments
if payload_part:
extracted_values[MESSAGE] = payload_part
return EbxmlRequestEnvelope(extracted_values)
@classmethod
def _extract_more_values_from_xml_tree(cls, xml_tree: Element,
extracted_values: Dict[str, Union[str, bool]]):
"""
Extract more values from XML tree (DuplicateElimination, SyncReply, AckRequested and SOAP actor). Some of the
values extracted are booleans (ie if the element is present or not).
:param xml_tree: XML tree to extract values from.
:param extracted_values: Values extracted so far. The additional extracted values will be added to this dict.
"""
cls._add_flag(extracted_values, DUPLICATE_ELIMINATION,
cls._extract_ebxml_value(xml_tree, "DuplicateElimination"))
cls._add_flag(extracted_values, SYNC_REPLY, cls._extract_ebxml_value(xml_tree, "SyncReply"))
cls._add_flag(extracted_values, ACK_REQUESTED, cls._extract_ebxml_value(xml_tree, "AckRequested"))
cls._extract_attribute(xml_tree, "AckRequested", ebxml_envelope.SOAP_NAMESPACE, "actor", extracted_values,
ACK_SOAP_ACTOR)
@staticmethod
def _parse_mime_message(headers: Dict[str, str], message: str) -> email.message.EmailMessage:
""" Take the provided message string (and set of HTTP headers received with it) and parse it to obtain a Message
object.
:param headers: The HTTP headers received with the message.
:param message: The message (as a string) to be parsed.
:return: a Message that represents the message received.
"""
content_type_header = f'{HttpHeaders.CONTENT_TYPE}: {headers[HttpHeaders.CONTENT_TYPE]}\r\n\r\n'
msg = email.message_from_string(content_type_header + message, policy=email.policy.HTTP)
if msg.defects:
logger.warning('Found defects in MIME message during parsing. {Defects}',
fparams={'Defects': msg.defects})
return msg
@staticmethod
def _extract_message_parts(msg: email.message.EmailMessage) -> Tuple[str, str, List[Dict[str, Union[str, bool]]]]:
"""Extract the ebXML and payload parts of the message and return them as a tuple.
:param msg: The message to extract parts from.
:return: A tuple containing the ebXML and payload (if present, otherwise None) parts of the message provided.
"""
# EIS section 2.5.4 defines that the first MIME part must contain the ebML SOAP message and the message payload
# (if present) must be the first additional attachment.
if not msg.is_multipart():
logger.error('Non-multipart message received')
raise ebxml_envelope.EbXmlParsingError("Non-multipart message received")
message_parts: Sequence[email.message.EmailMessage] = tuple(msg.iter_parts())
EbxmlRequestEnvelope._report_any_defects_in_message_parts(message_parts)
# ebXML part is the first part of the message
ebxml_part = EbxmlRequestEnvelope._extract_ebxml_part(message_parts[0])
payload_part = None
attachments = []
if len(message_parts) > 1:
# HL7 payload part is the second part of the message
payload_part = EbxmlRequestEnvelope._extract_hl7_payload_part(message_parts[1])
# Any additional attachments are from the third part of the message onwards
attachments.extend(EbxmlRequestEnvelope._extract_additional_attachments_parts(message_parts[2:]))
return ebxml_part, payload_part, attachments
@staticmethod
def _report_any_defects_in_message_parts(message_parts: Sequence[email.message.EmailMessage]):
for i, part in enumerate(message_parts):
if part.defects:
logger.warning('Found defects in {PartIndex} of MIME message during parsing. {Defects}',
fparams={'PartIndex': i, 'Defects': part.defects})
@staticmethod
def _extract_ebxml_part(message_part: email.message.EmailMessage) -> str:
ebxml_part, is_base64_ebxml_part = EbxmlRequestEnvelope._convert_message_part_to_str(message_part)
if is_base64_ebxml_part:
logger.error('Failed to decode ebXML header part of message as text')
raise ebxml_envelope.EbXmlParsingError("Failed to decode ebXML header part of message as text")
return ebxml_part
@staticmethod
def _extract_hl7_payload_part(message_part: email.message.EmailMessage) -> str:
payload_part, is_base64_payload = EbxmlRequestEnvelope._convert_message_part_to_str(message_part)
if is_base64_payload:
logger.error('Failed to decode HL7 payload part of message as text')
raise ebxml_envelope.EbXmlParsingError("Failed to decode HL7 payload part of message as text")
return payload_part
@staticmethod
def _extract_additional_attachments_parts(message_parts: Sequence[email.message.EmailMessage]) \
-> Generator[Dict[Union[str, bool]]]:
for attachment_message in message_parts:
payload, is_base64 = EbxmlRequestEnvelope._convert_message_part_to_str(attachment_message)
attachment = {
ATTACHMENT_PAYLOAD: payload,
ATTACHMENT_BASE64: is_base64,
# The [1:-1] is to remove angle brackets (<>) that surround the content ID
ATTACHMENT_CONTENT_ID: str(attachment_message['Content-Id'][1:-1]),
ATTACHMENT_CONTENT_TYPE: attachment_message.get_content_type()
}
yield attachment
@staticmethod
def _convert_message_part_to_str(message_part: email.message.EmailMessage) -> Tuple[str, bool]:
content: Union[str, bytes] = message_part.get_content()
content_type = message_part.get_content_type()
content_transfer_encoding = message_part['Content-Transfer-Encoding']
logger_dict = {'ContentType': content_type, 'ContentTransferEncoding': content_transfer_encoding}
if isinstance(content, str):
logger.info('Successfully decoded message part with {ContentType} {ContentTransferEncoding} as string',
fparams=logger_dict)
return content, False
try:
if content_type == 'application/xml':
decoded_content = content.decode()
logger.info('Successfully decoded message part with {ContentType} {ContentTransferEncoding} '
'as a string', fparams=logger_dict)
return decoded_content, False
decoded_content = base64.b64encode(content).decode()
logger.info('Successfully encoded binary message part with {ContentType} {ContentTransferEncoding} as '
'a base64 string', fparams=logger_dict)
return decoded_content, True
except UnicodeDecodeError as e:
logger.error('Failed to decode ebXML message part with {ContentType} {ContentTransferEncoding}.',
fparams=logger_dict)
raise ebxml_envelope.EbXmlParsingError(f'Failed to decode ebXML message part with '
f'Content-Type: {content_type} and '
f'Content-Transfer-Encoding: {content_transfer_encoding}') from e
| 49.091549 | 120 | 0.650624 |
from __future__ import annotations
import base64
import copy
import email
import email.message
import email.policy
from typing import Dict, Tuple, Union, List, Sequence, Generator
from xml.etree.ElementTree import Element
from builder import pystache_message_builder
from defusedxml import ElementTree
from comms.http_headers import HttpHeaders
from utilities import integration_adaptors_logger as log, message_utilities
from mhs_common.messages import ebxml_envelope
logger = log.IntegrationAdaptorsLogger(__name__)
EBXML_TEMPLATE = "ebxml_request"
MESSAGE = "hl7_message"
EBXML = "ebxml"
DUPLICATE_ELIMINATION = "duplicate_elimination"
ACK_REQUESTED = "ack_requested"
ACK_SOAP_ACTOR = "ack_soap_actor"
SYNC_REPLY = "sync_reply"
ATTACHMENTS = 'attachments'
EXTERNAL_ATTACHMENTS = 'external_attachments'
ATTACHMENT_CONTENT_ID = 'content_id'
ATTACHMENT_CONTENT_TYPE = 'content_type'
ATTACHMENT_BASE64 = 'is_base64'
ATTACHMENT_CONTENT_TRANSFER_ENCODING = 'content_transfer_encoding'
ATTACHMENT_PAYLOAD = 'payload'
ATTACHMENT_DESCRIPTION = 'description'
EBXML_CONTENT_TYPE_VALUE = 'multipart/related; boundary="--=_MIME-Boundary"; type=text/xml; ' \
'start=ebXMLHeader@spine.nhs.uk'
class EbxmlRequestEnvelope(ebxml_envelope.EbxmlEnvelope):
def __init__(self, message_dictionary: Dict[str, Union[str, bool, List[Dict[str, Union[str, bool]]]]]):
super().__init__(EBXML_TEMPLATE, message_dictionary)
def serialize(self, _message_dictionary=None) -> Tuple[str, Dict[str, str], str]:
message_dictionary = copy.deepcopy(self.message_dictionary)
self._set_headers_for_attachments(message_dictionary)
message_id, http_headers, message = super().serialize(_message_dictionary=message_dictionary)
http_headers[HttpHeaders.CONTENT_TYPE] = EBXML_CONTENT_TYPE_VALUE
return message_id, http_headers, message
@staticmethod
def _set_headers_for_attachments(message_dictionary):
message_dictionary.setdefault(EXTERNAL_ATTACHMENTS, [])
attachment: dict
for attachment in message_dictionary.setdefault(ATTACHMENTS, []):
attachment[ATTACHMENT_CONTENT_ID] = f'{message_utilities.get_uuid()}@spine.nhs.uk'
try:
attachment[ATTACHMENT_CONTENT_TRANSFER_ENCODING] = 'base64' if attachment.pop(ATTACHMENT_BASE64) \
else '8bit'
except KeyError as e:
logger.error('Failed to find {Key} when generating message from {TemplateFile} . {ErrorMessage}',
fparams={
'Key': f'{ATTACHMENTS}[].{ATTACHMENT_BASE64}',
'TemplateFile': EBXML_TEMPLATE,
'ErrorMessage': e
})
raise pystache_message_builder.MessageGenerationError(f'Failed to find '
f'key:{ATTACHMENTS}[].{ATTACHMENT_BASE64} when '
f'generating message from template '
f'file:{EBXML_TEMPLATE}') from e
@classmethod
def from_string(cls, headers: Dict[str, str], message: str) -> EbxmlRequestEnvelope:
msg = EbxmlRequestEnvelope._parse_mime_message(headers, message)
ebxml_part, payload_part, attachments = EbxmlRequestEnvelope._extract_message_parts(msg)
xml_tree: Element = ElementTree.fromstring(ebxml_part)
extracted_values = super().parse_message(xml_tree)
cls._extract_more_values_from_xml_tree(xml_tree, extracted_values)
extracted_values[EBXML] = ebxml_part
extracted_values[ATTACHMENTS] = attachments
if payload_part:
extracted_values[MESSAGE] = payload_part
return EbxmlRequestEnvelope(extracted_values)
@classmethod
def _extract_more_values_from_xml_tree(cls, xml_tree: Element,
extracted_values: Dict[str, Union[str, bool]]):
cls._add_flag(extracted_values, DUPLICATE_ELIMINATION,
cls._extract_ebxml_value(xml_tree, "DuplicateElimination"))
cls._add_flag(extracted_values, SYNC_REPLY, cls._extract_ebxml_value(xml_tree, "SyncReply"))
cls._add_flag(extracted_values, ACK_REQUESTED, cls._extract_ebxml_value(xml_tree, "AckRequested"))
cls._extract_attribute(xml_tree, "AckRequested", ebxml_envelope.SOAP_NAMESPACE, "actor", extracted_values,
ACK_SOAP_ACTOR)
@staticmethod
def _parse_mime_message(headers: Dict[str, str], message: str) -> email.message.EmailMessage:
content_type_header = f'{HttpHeaders.CONTENT_TYPE}: {headers[HttpHeaders.CONTENT_TYPE]}\r\n\r\n'
msg = email.message_from_string(content_type_header + message, policy=email.policy.HTTP)
if msg.defects:
logger.warning('Found defects in MIME message during parsing. {Defects}',
fparams={'Defects': msg.defects})
return msg
@staticmethod
def _extract_message_parts(msg: email.message.EmailMessage) -> Tuple[str, str, List[Dict[str, Union[str, bool]]]]:
if not msg.is_multipart():
logger.error('Non-multipart message received')
raise ebxml_envelope.EbXmlParsingError("Non-multipart message received")
message_parts: Sequence[email.message.EmailMessage] = tuple(msg.iter_parts())
EbxmlRequestEnvelope._report_any_defects_in_message_parts(message_parts)
ebxml_part = EbxmlRequestEnvelope._extract_ebxml_part(message_parts[0])
payload_part = None
attachments = []
if len(message_parts) > 1:
payload_part = EbxmlRequestEnvelope._extract_hl7_payload_part(message_parts[1])
attachments.extend(EbxmlRequestEnvelope._extract_additional_attachments_parts(message_parts[2:]))
return ebxml_part, payload_part, attachments
@staticmethod
def _report_any_defects_in_message_parts(message_parts: Sequence[email.message.EmailMessage]):
for i, part in enumerate(message_parts):
if part.defects:
logger.warning('Found defects in {PartIndex} of MIME message during parsing. {Defects}',
fparams={'PartIndex': i, 'Defects': part.defects})
@staticmethod
def _extract_ebxml_part(message_part: email.message.EmailMessage) -> str:
ebxml_part, is_base64_ebxml_part = EbxmlRequestEnvelope._convert_message_part_to_str(message_part)
if is_base64_ebxml_part:
logger.error('Failed to decode ebXML header part of message as text')
raise ebxml_envelope.EbXmlParsingError("Failed to decode ebXML header part of message as text")
return ebxml_part
@staticmethod
def _extract_hl7_payload_part(message_part: email.message.EmailMessage) -> str:
payload_part, is_base64_payload = EbxmlRequestEnvelope._convert_message_part_to_str(message_part)
if is_base64_payload:
logger.error('Failed to decode HL7 payload part of message as text')
raise ebxml_envelope.EbXmlParsingError("Failed to decode HL7 payload part of message as text")
return payload_part
@staticmethod
def _extract_additional_attachments_parts(message_parts: Sequence[email.message.EmailMessage]) \
-> Generator[Dict[Union[str, bool]]]:
for attachment_message in message_parts:
payload, is_base64 = EbxmlRequestEnvelope._convert_message_part_to_str(attachment_message)
attachment = {
ATTACHMENT_PAYLOAD: payload,
ATTACHMENT_BASE64: is_base64,
ATTACHMENT_CONTENT_ID: str(attachment_message['Content-Id'][1:-1]),
ATTACHMENT_CONTENT_TYPE: attachment_message.get_content_type()
}
yield attachment
@staticmethod
def _convert_message_part_to_str(message_part: email.message.EmailMessage) -> Tuple[str, bool]:
content: Union[str, bytes] = message_part.get_content()
content_type = message_part.get_content_type()
content_transfer_encoding = message_part['Content-Transfer-Encoding']
logger_dict = {'ContentType': content_type, 'ContentTransferEncoding': content_transfer_encoding}
if isinstance(content, str):
logger.info('Successfully decoded message part with {ContentType} {ContentTransferEncoding} as string',
fparams=logger_dict)
return content, False
try:
if content_type == 'application/xml':
decoded_content = content.decode()
logger.info('Successfully decoded message part with {ContentType} {ContentTransferEncoding} '
'as a string', fparams=logger_dict)
return decoded_content, False
decoded_content = base64.b64encode(content).decode()
logger.info('Successfully encoded binary message part with {ContentType} {ContentTransferEncoding} as '
'a base64 string', fparams=logger_dict)
return decoded_content, True
except UnicodeDecodeError as e:
logger.error('Failed to decode ebXML message part with {ContentType} {ContentTransferEncoding}.',
fparams=logger_dict)
raise ebxml_envelope.EbXmlParsingError(f'Failed to decode ebXML message part with '
f'Content-Type: {content_type} and '
f'Content-Transfer-Encoding: {content_transfer_encoding}') from e
| true | true |
1c311011386e04a22ecac2c71ba793d750f6de17 | 7,872 | py | Python | hcipy/field/field.py | dskleingeld/hcipy | 85cacfb7a8058506afb288e3acdf3b6059ba2b50 | [
"MIT"
] | 1 | 2020-07-20T23:25:17.000Z | 2020-07-20T23:25:17.000Z | hcipy/field/field.py | dskleingeld/hcipy | 85cacfb7a8058506afb288e3acdf3b6059ba2b50 | [
"MIT"
] | null | null | null | hcipy/field/field.py | dskleingeld/hcipy | 85cacfb7a8058506afb288e3acdf3b6059ba2b50 | [
"MIT"
] | null | null | null | import numpy as np
import string
class Field(np.ndarray):
'''The value of some physical quantity for each point in some coordinate system.
Parameters
----------
arr : array_like
An array of values or tensors for each point in the :class:`Grid`.
grid : Grid
The corresponding :class:`Grid` on which the values are set.
Attributes
----------
grid : Grid
The grid on which the values are defined.
'''
def __new__(cls, arr, grid):
obj = np.asarray(arr).view(cls)
obj.grid = grid
return obj
def __array_finalize__(self, obj):
if obj is None:
return
self.grid = getattr(obj, 'grid', None)
@property
def tensor_order(self):
'''The order of the tensor of the field.
'''
return self.ndim - 1
@property
def tensor_shape(self):
'''The shape of the tensor of the field.
'''
return np.array(self.shape)[:-1]
@property
def is_scalar_field(self):
'''True if this field is a scalar field (ie. a tensor order of 0), False otherwise.
'''
return self.tensor_order == 0
@property
def is_vector_field(self):
'''True if this field is a vector field (ie. a tensor order of 1), False otherwise.
'''
return self.tensor_order == 1
@property
def is_valid_field(self):
'''True if the field corresponds with its grid.
'''
return self.shape[-1] == self.grid.size
@property
def shaped(self):
'''The reshaped version of this field.
Raises
------
ValueError
If this field isn't separated, no reshaped version can be made.
'''
if not self.grid.is_separated:
raise ValueError('This field doesn\'t have a shape.')
if self.tensor_order > 0:
new_shape = np.concatenate([np.array(self.shape)[:-1], self.grid.shape])
return self.reshape(new_shape)
return self.reshape(self.grid.shape)
def at(self, p):
'''The value of this field closest to point p.
Parameters
----------
p : array_like
The point at which the closest value should be returned.
Returns
-------
array_like
The value, potentially tensor, closest to point p.
'''
i = self.grid.closest_to(p)
return self[...,i]
def field_einsum(subscripts, *operands, **kwargs):
'''Evaluates the Einstein summation convention on the operand fields.
This function uses the same conventions as numpy.einsum(). The input
subscript is multiplexed over each position in the grid. The grids of each of
the input field operands don't have to match, but must have the same lengths.
The subscripts must be written as you would for a single position in the grid.
The function alters these subscripts to multiplex over the entire grid.
.. caution::
Some subscripts may yield no exception, even though they would fail for
a single point in the grid. The output in these cases can not be trusted.
Parameters
----------
subscripts : str
Specifies the subscripts for summation.
operands : list of array_like or `Field`
These are the arrays or fields for the operation.
out : {ndarray, None}, optional
If provided, the calculation is done into this array.
dtype : {data-type, None}, optional
If provided, forces the calculation to use the data type specified.
Note that you may have to also give a more liberal `casting`
parameter to allow the conversions. Default is None.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the output. 'C' means it should
be C contiguous. 'F' means it should be Fortran contiguous,
'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise.
'K' means it should be as close to the layout as the inputs as
is possible, including arbitrarily permuted axes.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Setting this to
'unsafe' is not recommended, as it can adversely affect accumulations.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
Default is 'safe'.
optimize : {False, True, 'greedy', 'optimal'}, optional
Controls if intermediate optimization should occur. No optimization
will occur if False and True will default to the 'greedy' algorithm.
Also accepts an explicit contraction list from the ``np.einsum_path``
function. See ``np.einsum_path`` for more details. Default is False.
Returns
-------
Field
The calculated Field based on the Einstein summation convention.
Raises
------
ValueError
If all of the fields don't have the same grid size. If the number of
operands is not equal to the number of subscripts specified.
'''
is_field = [isinstance(o, Field) for o in operands]
if not np.count_nonzero(is_field):
return np.einsum(subscripts, *operands, **kwargs)
field_sizes = [o.grid.size for i, o in enumerate(operands) if is_field[i]]
if not np.allclose(field_sizes, field_sizes[0]):
raise ValueError('All fields must be the same size for a field_einsum().')
# Decompose the subscript into input and output
splitted_string = subscripts.split('->')
if len(splitted_string) == 2:
ss_input, ss_output = splitted_string
else:
ss_input = splitted_string[0]
ss_output = ''
# split the input operands in separate strings
ss = ss_input.split(',')
if len(ss) != len(operands):
raise ValueError('Number of operands is not equal to number of indexing operands.')
# Find an indexing letter that can be used for field dimension.
unused_index = [a for a in string.ascii_lowercase if a not in subscripts][0]
# Add the field dimension to the input field operands.
ss = [s + unused_index if is_field[i] else s for i,s in enumerate(ss)]
# Recombine all operands into the final subscripts
if len(splitted_string) == 2:
subscripts_new = ','.join(ss) + '->' + ss_output + unused_index
else:
subscripts_new = ','.join(ss)
res = np.einsum(subscripts_new, *operands, **kwargs)
grid = operands[np.flatnonzero(np.array(is_field))[0]].grid
if 'out' in kwargs:
kwargs['out'] = Field(res, grid)
return Field(res, grid)
def field_dot(a, b, out=None):
'''Perform a dot product of `a` and `b` multiplexed over the field dimension.
Parameters
----------
a : Field or array_like
Left argument of the dot product.
b : Field or array_like
Right argument of the dot product.
out : Field or array_like
If provided, the calculation is done into this array.
Returns
-------
Field
The result of the dot product.
'''
# Find out if a or b are vectors or higher dimensional tensors
if hasattr(a, 'tensor_order'):
amat = a.tensor_order > 1
elif np.isscalar(a):
if out is None:
return a * b
else:
return np.multiply(a, b, out)
else:
amat = a.ndim > 1
if hasattr(b, 'tensor_order'):
bmat = b.tensor_order > 1
elif np.isscalar(b):
if out is None:
return a * b
else:
return np.multiply(a, b, out)
else:
bmat = b.ndim > 1
# Select correct multiplication behaviour.
if amat and bmat:
subscripts = '...ij,...jk->...ik'
elif amat and not bmat:
subscripts = '...i,...i->...'
elif not amat and bmat:
subscripts = '...i,...ij->...j'
elif not amat and not bmat:
subscripts = '...i,...i->...'
# Perform calculation and return.
if out is None:
return field_einsum(subscripts, a, b)
else:
return field_einsum(subscripts, a, b, out=out)
def field_trace(a, out=None):
if out is None:
return field_einsum('ii', a)
else:
return field_einsum('ii', a, out=out)
def field_inv(a):
if hasattr(a, 'grid'):
if a.tensor_order != 2:
raise ValueError("Only tensor fields of order 2 can be inverted.")
res = np.rollaxis(np.linalg.inv(np.rollaxis(a, -1)), 0, 3)
return Field(res, a.grid)
else:
return np.linalg.inv(a) | 29.70566 | 85 | 0.696138 | import numpy as np
import string
class Field(np.ndarray):
def __new__(cls, arr, grid):
obj = np.asarray(arr).view(cls)
obj.grid = grid
return obj
def __array_finalize__(self, obj):
if obj is None:
return
self.grid = getattr(obj, 'grid', None)
@property
def tensor_order(self):
return self.ndim - 1
@property
def tensor_shape(self):
return np.array(self.shape)[:-1]
@property
def is_scalar_field(self):
return self.tensor_order == 0
@property
def is_vector_field(self):
return self.tensor_order == 1
@property
def is_valid_field(self):
return self.shape[-1] == self.grid.size
@property
def shaped(self):
if not self.grid.is_separated:
raise ValueError('This field doesn\'t have a shape.')
if self.tensor_order > 0:
new_shape = np.concatenate([np.array(self.shape)[:-1], self.grid.shape])
return self.reshape(new_shape)
return self.reshape(self.grid.shape)
def at(self, p):
i = self.grid.closest_to(p)
return self[...,i]
def field_einsum(subscripts, *operands, **kwargs):
is_field = [isinstance(o, Field) for o in operands]
if not np.count_nonzero(is_field):
return np.einsum(subscripts, *operands, **kwargs)
field_sizes = [o.grid.size for i, o in enumerate(operands) if is_field[i]]
if not np.allclose(field_sizes, field_sizes[0]):
raise ValueError('All fields must be the same size for a field_einsum().')
# Decompose the subscript into input and output
splitted_string = subscripts.split('->')
if len(splitted_string) == 2:
ss_input, ss_output = splitted_string
else:
ss_input = splitted_string[0]
ss_output = ''
# split the input operands in separate strings
ss = ss_input.split(',')
if len(ss) != len(operands):
raise ValueError('Number of operands is not equal to number of indexing operands.')
# Find an indexing letter that can be used for field dimension.
unused_index = [a for a in string.ascii_lowercase if a not in subscripts][0]
# Add the field dimension to the input field operands.
ss = [s + unused_index if is_field[i] else s for i,s in enumerate(ss)]
# Recombine all operands into the final subscripts
if len(splitted_string) == 2:
subscripts_new = ','.join(ss) + '->' + ss_output + unused_index
else:
subscripts_new = ','.join(ss)
res = np.einsum(subscripts_new, *operands, **kwargs)
grid = operands[np.flatnonzero(np.array(is_field))[0]].grid
if 'out' in kwargs:
kwargs['out'] = Field(res, grid)
return Field(res, grid)
def field_dot(a, b, out=None):
# Find out if a or b are vectors or higher dimensional tensors
if hasattr(a, 'tensor_order'):
amat = a.tensor_order > 1
elif np.isscalar(a):
if out is None:
return a * b
else:
return np.multiply(a, b, out)
else:
amat = a.ndim > 1
if hasattr(b, 'tensor_order'):
bmat = b.tensor_order > 1
elif np.isscalar(b):
if out is None:
return a * b
else:
return np.multiply(a, b, out)
else:
bmat = b.ndim > 1
# Select correct multiplication behaviour.
if amat and bmat:
subscripts = '...ij,...jk->...ik'
elif amat and not bmat:
subscripts = '...i,...i->...'
elif not amat and bmat:
subscripts = '...i,...ij->...j'
elif not amat and not bmat:
subscripts = '...i,...i->...'
# Perform calculation and return.
if out is None:
return field_einsum(subscripts, a, b)
else:
return field_einsum(subscripts, a, b, out=out)
def field_trace(a, out=None):
if out is None:
return field_einsum('ii', a)
else:
return field_einsum('ii', a, out=out)
def field_inv(a):
if hasattr(a, 'grid'):
if a.tensor_order != 2:
raise ValueError("Only tensor fields of order 2 can be inverted.")
res = np.rollaxis(np.linalg.inv(np.rollaxis(a, -1)), 0, 3)
return Field(res, a.grid)
else:
return np.linalg.inv(a) | true | true |
1c31111aac8aff95f5f45202a6e11e841a5044da | 6,289 | py | Python | python-basic-template.py | realSnoopy/python-ffmpeg-multi-conversion | 3e9986252fabe229273771e021ea55fd9b208bd4 | [
"MIT"
] | null | null | null | python-basic-template.py | realSnoopy/python-ffmpeg-multi-conversion | 3e9986252fabe229273771e021ea55fd9b208bd4 | [
"MIT"
] | null | null | null | python-basic-template.py | realSnoopy/python-ffmpeg-multi-conversion | 3e9986252fabe229273771e021ea55fd9b208bd4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Beautiful is better than ugly.
# Explicit is better than implicit.
# Simple is better than complex.
# Complex is better than complicated.
# Flat is better than nested.
# Python 3.5 and up
# getestet Python 3.6.5
VERSION = 'ALPHA'
def clear_console():
if os.name=='nt':
os.system('cls')
else:
os.system('clear')
def exit(error_msg=None):
if error_msg:
print('\n[ERROR]\n{}\n[EXIT]'.format(error_msg))
sys.exit(0)
try:
import os
import sys
import unicodedata
import logging
from pathlib import Path
from platform import python_version
from codecs import BOM_UTF8, BOM_UTF16, BOM_UTF16_BE, BOM_UTF16_LE, BOM_UTF32_BE, BOM_UTF32_LE
except Exception as error:
exit(error)
def check_python():
try:
assert(python_version() >= '3.8')
except AssertionError:
error = 'This script requires at least Python 3.5. Please update or use "python3" to invoke.\n'
error += 'Python {} found.'.format(python_version())
exit(error)
def get_files(path, ):
file_list = []
try:
assert(python_version() >= '3.6')
except AssertionError:
directory = str(directory)
for entry in os.scandir(path):
if entry.is_dir(follow_symlinks=False):
tmp_files = get_files(entry, )
[file_list.append(Path(file)) for file in tmp_files]
if entry.is_file():
file_list.append(Path(entry))
return file_list
def get_files_filter(files, file_filter, ):
file_list_filter = []
for file in files:
if file.name in file_filter or file.suffix.lower() in [filter.lower() for filter in file_filter]:
file_list_filter.append(Path(file))
return file_list_filter
def get_size(file):
return (Path(file).stat().st_size)
def size_to_human(filesize, base='KB'):
if base == 'KB':
return '{:.2f} KB'.format(filesize/1024)
elif base == 'MB':
return '{:.2f} MB'.format(filesize/(1024*1024))
class GetWork:
def __init__(self, path, file_filter=None):
self._path = path
self._file_filter = file_filter
self._root = Path.cwd()
self._outpath = Path(self._root) / '#-OUT-#'
if not self._outpath.exists():
self._outpath.mkdir(parents=True)
if not self._path:
logging.info('Input-Path not found')
exit()
self._files = get_files(self._path, )
if self._file_filter:
self._files_filtered = get_files_filter(self._files, self._file_filter)
else:
self._files_filtered = self._files
@property
def files(self):
return self._files
@property
def files_filtered(self):
return self._files_filtered
@property
def outpath(self):
return self._outpath
BOMS = (
(BOM_UTF8, 'UTF-8-SIG'),
(BOM_UTF32_BE, 'UTF-32-BE'),
(BOM_UTF32_LE, 'UTF-32-LE'),
(BOM_UTF16_BE, 'UTF-16-BE'),
(BOM_UTF16_LE, 'UTF-16-LE'),
)
def check_bom(data):
return [encoding for bom, encoding in BOMS if data.startswith(bom)]
def get_content(file, read, ):
def read_file(file, encoding, read, mode='r'):
with open(file=file, encoding=encoding, mode=mode, errors='strict') as file_object:
if read == 'read':
file_content = file_object.read()
elif read == 'lines':
file_content = file_object.readlines()
return file_content
error_msg = ''
with open(file, mode='rb') as file_object:
encoding = check_bom(file_object.readline())
encoding = ''.join(encoding)
if encoding != '':
logging.info('encoding\t{}'.format(encoding))
else:
logging.info('encoding\tNo BOM found')
# mit erkannter BOM-Codierung auslesen, ohne Codierung auf UTF-8 ausweichen
if encoding != '':
try:
file_content = read_file(file=file, encoding=encoding, read=read, )
except Exception as error:
logging.debug('{}'.format(error))
encoding = ''
if encoding == '':
try:
file_content = read_file(file=file, encoding='UTF-8', read=read)
except Exception as error:
logging.debug('{}'.format(error))
exit('Krasser Fehler')
return file_content
def write_to_file(path, content, mode='w'):
path = Path(path) # pathlib path object
# create required folders if needed
if not path.parent.exists():
path.parent.mkdir(parents=True)
with open(path, mode=mode, encoding='UTF-8', errors='strict') as output:
if isinstance(content, (str)):
content = content + '\n'
output.writelines(content)
else:
content = [str(entry) + '\n' for entry in content]
output.writelines(content)
### Logging-Stuff
# CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET
def create_basic_logger(file_handler):
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(file_handler)
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch_formatter = logging.Formatter(fmt='{levelname}\t{message}', style='{', )
fh_formatter = logging.Formatter(fmt='{asctime}\t{levelname}\t{message}', style='{', datefmt='%H:%M:%S')
ch.setFormatter(ch_formatter)
fh.setFormatter(fh_formatter)
logger.addHandler(ch)
logger.addHandler(fh)
return logger
#######
# Ich bin das Alpha und das Omega, der Erste und der Letzte, der Anfang und das Ende.
#######
clear_console()
check_python()
settings = {
'path' : Path.cwd() ,
'file_filter' : ('.txt', ) ,
}
root = GetWork(**settings)
# create basic logger
logging = create_basic_logger(file_handler = Path(root.outpath) / 'debug.log')
if __name__ == '__main__':
for file in root.files:
pass
for file in root.files_filtered:
pass
# basic infos for files
logging.info('filename\t{}'.format(file.name))
logging.info('filesize\t{}'.format(size_to_human(get_size(file), base='KB')))
file_content = get_content(file, read='lines')
| 27.583333 | 109 | 0.618699 |
VERSION = 'ALPHA'
def clear_console():
if os.name=='nt':
os.system('cls')
else:
os.system('clear')
def exit(error_msg=None):
if error_msg:
print('\n[ERROR]\n{}\n[EXIT]'.format(error_msg))
sys.exit(0)
try:
import os
import sys
import unicodedata
import logging
from pathlib import Path
from platform import python_version
from codecs import BOM_UTF8, BOM_UTF16, BOM_UTF16_BE, BOM_UTF16_LE, BOM_UTF32_BE, BOM_UTF32_LE
except Exception as error:
exit(error)
def check_python():
try:
assert(python_version() >= '3.8')
except AssertionError:
error = 'This script requires at least Python 3.5. Please update or use "python3" to invoke.\n'
error += 'Python {} found.'.format(python_version())
exit(error)
def get_files(path, ):
file_list = []
try:
assert(python_version() >= '3.6')
except AssertionError:
directory = str(directory)
for entry in os.scandir(path):
if entry.is_dir(follow_symlinks=False):
tmp_files = get_files(entry, )
[file_list.append(Path(file)) for file in tmp_files]
if entry.is_file():
file_list.append(Path(entry))
return file_list
def get_files_filter(files, file_filter, ):
file_list_filter = []
for file in files:
if file.name in file_filter or file.suffix.lower() in [filter.lower() for filter in file_filter]:
file_list_filter.append(Path(file))
return file_list_filter
def get_size(file):
return (Path(file).stat().st_size)
def size_to_human(filesize, base='KB'):
if base == 'KB':
return '{:.2f} KB'.format(filesize/1024)
elif base == 'MB':
return '{:.2f} MB'.format(filesize/(1024*1024))
class GetWork:
def __init__(self, path, file_filter=None):
self._path = path
self._file_filter = file_filter
self._root = Path.cwd()
self._outpath = Path(self._root) / '#-OUT-#'
if not self._outpath.exists():
self._outpath.mkdir(parents=True)
if not self._path:
logging.info('Input-Path not found')
exit()
self._files = get_files(self._path, )
if self._file_filter:
self._files_filtered = get_files_filter(self._files, self._file_filter)
else:
self._files_filtered = self._files
@property
def files(self):
return self._files
@property
def files_filtered(self):
return self._files_filtered
@property
def outpath(self):
return self._outpath
BOMS = (
(BOM_UTF8, 'UTF-8-SIG'),
(BOM_UTF32_BE, 'UTF-32-BE'),
(BOM_UTF32_LE, 'UTF-32-LE'),
(BOM_UTF16_BE, 'UTF-16-BE'),
(BOM_UTF16_LE, 'UTF-16-LE'),
)
def check_bom(data):
return [encoding for bom, encoding in BOMS if data.startswith(bom)]
def get_content(file, read, ):
def read_file(file, encoding, read, mode='r'):
with open(file=file, encoding=encoding, mode=mode, errors='strict') as file_object:
if read == 'read':
file_content = file_object.read()
elif read == 'lines':
file_content = file_object.readlines()
return file_content
error_msg = ''
with open(file, mode='rb') as file_object:
encoding = check_bom(file_object.readline())
encoding = ''.join(encoding)
if encoding != '':
logging.info('encoding\t{}'.format(encoding))
else:
logging.info('encoding\tNo BOM found')
if encoding != '':
try:
file_content = read_file(file=file, encoding=encoding, read=read, )
except Exception as error:
logging.debug('{}'.format(error))
encoding = ''
if encoding == '':
try:
file_content = read_file(file=file, encoding='UTF-8', read=read)
except Exception as error:
logging.debug('{}'.format(error))
exit('Krasser Fehler')
return file_content
def write_to_file(path, content, mode='w'):
path = Path(path)
if not path.parent.exists():
path.parent.mkdir(parents=True)
with open(path, mode=mode, encoding='UTF-8', errors='strict') as output:
if isinstance(content, (str)):
content = content + '\n'
output.writelines(content)
else:
content = [str(entry) + '\n' for entry in content]
output.writelines(content)
_handler):
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(file_handler)
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch_formatter = logging.Formatter(fmt='{levelname}\t{message}', style='{', )
fh_formatter = logging.Formatter(fmt='{asctime}\t{levelname}\t{message}', style='{', datefmt='%H:%M:%S')
ch.setFormatter(ch_formatter)
fh.setFormatter(fh_formatter)
logger.addHandler(ch)
logger.addHandler(fh)
return logger
gs = {
'path' : Path.cwd() ,
'file_filter' : ('.txt', ) ,
}
root = GetWork(**settings)
logging = create_basic_logger(file_handler = Path(root.outpath) / 'debug.log')
if __name__ == '__main__':
for file in root.files:
pass
for file in root.files_filtered:
pass
logging.info('filename\t{}'.format(file.name))
logging.info('filesize\t{}'.format(size_to_human(get_size(file), base='KB')))
file_content = get_content(file, read='lines')
| true | true |
1c31124a28d1dee46f542f0014529787d8946185 | 222 | py | Python | tests/test_iterator_TCP.py | jcarreira/cirrus-kv | a44099185e02859385997956333b364ae836fee5 | [
"Apache-2.0"
] | 8 | 2018-07-18T22:13:36.000Z | 2021-08-24T12:28:42.000Z | tests/test_iterator_TCP.py | jcarreira/ddc | a44099185e02859385997956333b364ae836fee5 | [
"Apache-2.0"
] | 7 | 2016-11-22T11:07:14.000Z | 2016-12-17T22:49:23.000Z | tests/test_iterator_TCP.py | jcarreira/ddc | a44099185e02859385997956333b364ae836fee5 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import sys
import subprocess
import time
import test_runner
# Set name of test to run
testPath = "./tests/object_store/test_iterator"
# Call script to run the test
test_runner.runTestTCP(testPath)
| 18.5 | 47 | 0.788288 |
import sys
import subprocess
import time
import test_runner
testPath = "./tests/object_store/test_iterator"
test_runner.runTestTCP(testPath)
| true | true |
1c3112ce7a1bbf5a04753c11f0da9a56ef16ce27 | 22,703 | py | Python | python-profiles/STANDA/8MID12-1-AR.py | EPC-MSU/libximc | b0349721f57c8274b098a7b646d7ae67b8e70b9d | [
"BSD-2-Clause"
] | 3 | 2020-12-08T14:41:48.000Z | 2022-02-23T13:42:42.000Z | python-profiles/STANDA/8MID12-1-AR.py | EPC-MSU/libximc | b0349721f57c8274b098a7b646d7ae67b8e70b9d | [
"BSD-2-Clause"
] | 4 | 2020-12-08T20:15:06.000Z | 2021-12-08T14:15:24.000Z | python-profiles/STANDA/8MID12-1-AR.py | EPC-MSU/libximc | b0349721f57c8274b098a7b646d7ae67b8e70b9d | [
"BSD-2-Clause"
] | 2 | 2020-11-02T02:17:35.000Z | 2021-03-18T14:13:56.000Z | def set_profile_8MID12_1_AR(lib, id):
worst_result = Result.Ok
result = Result.Ok
feedback_settings = feedback_settings_t()
feedback_settings.IPS = 4000
class FeedbackType_:
FEEDBACK_ENCODER_MEDIATED = 6
FEEDBACK_NONE = 5
FEEDBACK_EMF = 4
FEEDBACK_ENCODER = 1
feedback_settings.FeedbackType = FeedbackType_.FEEDBACK_NONE
class FeedbackFlags_:
FEEDBACK_ENC_TYPE_BITS = 192
FEEDBACK_ENC_TYPE_DIFFERENTIAL = 128
FEEDBACK_ENC_TYPE_SINGLE_ENDED = 64
FEEDBACK_ENC_REVERSE = 1
FEEDBACK_ENC_TYPE_AUTO = 0
feedback_settings.FeedbackFlags = FeedbackFlags_.FEEDBACK_ENC_TYPE_SINGLE_ENDED | FeedbackFlags_.FEEDBACK_ENC_TYPE_AUTO
feedback_settings.CountsPerTurn = 4000
result = lib.set_feedback_settings(id, byref(feedback_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
home_settings = home_settings_t()
home_settings.FastHome = 50
home_settings.uFastHome = 0
home_settings.SlowHome = 500
home_settings.uSlowHome = 0
home_settings.HomeDelta = 1020
home_settings.uHomeDelta = 0
class HomeFlags_:
HOME_USE_FAST = 256
HOME_STOP_SECOND_BITS = 192
HOME_STOP_SECOND_LIM = 192
HOME_STOP_SECOND_SYN = 128
HOME_STOP_SECOND_REV = 64
HOME_STOP_FIRST_BITS = 48
HOME_STOP_FIRST_LIM = 48
HOME_STOP_FIRST_SYN = 32
HOME_STOP_FIRST_REV = 16
HOME_HALF_MV = 8
HOME_MV_SEC_EN = 4
HOME_DIR_SECOND = 2
HOME_DIR_FIRST = 1
home_settings.HomeFlags = HomeFlags_.HOME_USE_FAST | HomeFlags_.HOME_STOP_SECOND_REV | HomeFlags_.HOME_STOP_FIRST_BITS | HomeFlags_.HOME_DIR_SECOND
result = lib.set_home_settings(id, byref(home_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
move_settings = move_settings_t()
move_settings.Speed = 800
move_settings.uSpeed = 0
move_settings.Accel = 3200
move_settings.Decel = 4800
move_settings.AntiplaySpeed = 800
move_settings.uAntiplaySpeed = 0
class MoveFlags_:
RPM_DIV_1000 = 1
result = lib.set_move_settings(id, byref(move_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
engine_settings = engine_settings_t()
engine_settings.NomVoltage = 50
engine_settings.NomCurrent = 200
engine_settings.NomSpeed = 1600
engine_settings.uNomSpeed = 0
class EngineFlags_:
ENGINE_LIMIT_RPM = 128
ENGINE_LIMIT_CURR = 64
ENGINE_LIMIT_VOLT = 32
ENGINE_ACCEL_ON = 16
ENGINE_ANTIPLAY = 8
ENGINE_MAX_SPEED = 4
ENGINE_CURRENT_AS_RMS = 2
ENGINE_REVERSE = 1
engine_settings.EngineFlags = EngineFlags_.ENGINE_LIMIT_RPM | EngineFlags_.ENGINE_ACCEL_ON | EngineFlags_.ENGINE_REVERSE
engine_settings.Antiplay = 326
class MicrostepMode_:
MICROSTEP_MODE_FRAC_256 = 9
MICROSTEP_MODE_FRAC_128 = 8
MICROSTEP_MODE_FRAC_64 = 7
MICROSTEP_MODE_FRAC_32 = 6
MICROSTEP_MODE_FRAC_16 = 5
MICROSTEP_MODE_FRAC_8 = 4
MICROSTEP_MODE_FRAC_4 = 3
MICROSTEP_MODE_FRAC_2 = 2
MICROSTEP_MODE_FULL = 1
engine_settings.MicrostepMode = MicrostepMode_.MICROSTEP_MODE_FRAC_256
engine_settings.StepsPerRev = 20
result = lib.set_engine_settings(id, byref(engine_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
entype_settings = entype_settings_t()
class EngineType_:
ENGINE_TYPE_BRUSHLESS = 5
ENGINE_TYPE_TEST = 4
ENGINE_TYPE_STEP = 3
ENGINE_TYPE_2DC = 2
ENGINE_TYPE_DC = 1
ENGINE_TYPE_NONE = 0
entype_settings.EngineType = EngineType_.ENGINE_TYPE_STEP | EngineType_.ENGINE_TYPE_NONE
class DriverType_:
DRIVER_TYPE_EXTERNAL = 3
DRIVER_TYPE_INTEGRATE = 2
DRIVER_TYPE_DISCRETE_FET = 1
entype_settings.DriverType = DriverType_.DRIVER_TYPE_INTEGRATE
result = lib.set_entype_settings(id, byref(entype_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
power_settings = power_settings_t()
power_settings.HoldCurrent = 50
power_settings.CurrReductDelay = 1000
power_settings.PowerOffDelay = 60
power_settings.CurrentSetTime = 300
class PowerFlags_:
POWER_SMOOTH_CURRENT = 4
POWER_OFF_ENABLED = 2
POWER_REDUCT_ENABLED = 1
power_settings.PowerFlags = PowerFlags_.POWER_SMOOTH_CURRENT | PowerFlags_.POWER_REDUCT_ENABLED
result = lib.set_power_settings(id, byref(power_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
secure_settings = secure_settings_t()
secure_settings.LowUpwrOff = 800
secure_settings.CriticalIpwr = 4000
secure_settings.CriticalUpwr = 5500
secure_settings.CriticalT = 800
secure_settings.CriticalIusb = 450
secure_settings.CriticalUusb = 520
secure_settings.MinimumUusb = 420
class Flags_:
ALARM_ENGINE_RESPONSE = 128
ALARM_WINDING_MISMATCH = 64
USB_BREAK_RECONNECT = 32
ALARM_FLAGS_STICKING = 16
ALARM_ON_BORDERS_SWAP_MISSET = 8
H_BRIDGE_ALERT = 4
LOW_UPWR_PROTECTION = 2
ALARM_ON_DRIVER_OVERHEATING = 1
secure_settings.Flags = Flags_.ALARM_ENGINE_RESPONSE | Flags_.ALARM_FLAGS_STICKING | Flags_.ALARM_ON_BORDERS_SWAP_MISSET | Flags_.H_BRIDGE_ALERT | Flags_.ALARM_ON_DRIVER_OVERHEATING
result = lib.set_secure_settings(id, byref(secure_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
edges_settings = edges_settings_t()
class BorderFlags_:
BORDERS_SWAP_MISSET_DETECTION = 8
BORDER_STOP_RIGHT = 4
BORDER_STOP_LEFT = 2
BORDER_IS_ENCODER = 1
edges_settings.BorderFlags = BorderFlags_.BORDER_STOP_RIGHT | BorderFlags_.BORDER_STOP_LEFT
class EnderFlags_:
ENDER_SW2_ACTIVE_LOW = 4
ENDER_SW1_ACTIVE_LOW = 2
ENDER_SWAP = 1
edges_settings.EnderFlags = EnderFlags_.ENDER_SW2_ACTIVE_LOW | EnderFlags_.ENDER_SW1_ACTIVE_LOW | EnderFlags_.ENDER_SWAP
edges_settings.LeftBorder = -971
edges_settings.uLeftBorder = 0
edges_settings.RightBorder = 910
edges_settings.uRightBorder = 0
result = lib.set_edges_settings(id, byref(edges_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
pid_settings = pid_settings_t()
pid_settings.KpU = 0
pid_settings.KiU = 0
pid_settings.KdU = 0
pid_settings.Kpf = 0.003599999938160181
pid_settings.Kif = 0.03799999877810478
pid_settings.Kdf = 2.8000000384054147e-05
result = lib.set_pid_settings(id, byref(pid_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
sync_in_settings = sync_in_settings_t()
class SyncInFlags_:
SYNCIN_GOTOPOSITION = 4
SYNCIN_INVERT = 2
SYNCIN_ENABLED = 1
sync_in_settings.ClutterTime = 4
sync_in_settings.Position = 0
sync_in_settings.uPosition = 0
sync_in_settings.Speed = 0
sync_in_settings.uSpeed = 0
result = lib.set_sync_in_settings(id, byref(sync_in_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
sync_out_settings = sync_out_settings_t()
class SyncOutFlags_:
SYNCOUT_ONPERIOD = 64
SYNCOUT_ONSTOP = 32
SYNCOUT_ONSTART = 16
SYNCOUT_IN_STEPS = 8
SYNCOUT_INVERT = 4
SYNCOUT_STATE = 2
SYNCOUT_ENABLED = 1
sync_out_settings.SyncOutFlags = SyncOutFlags_.SYNCOUT_ONSTOP | SyncOutFlags_.SYNCOUT_ONSTART
sync_out_settings.SyncOutPulseSteps = 100
sync_out_settings.SyncOutPeriod = 2000
sync_out_settings.Accuracy = 0
sync_out_settings.uAccuracy = 0
result = lib.set_sync_out_settings(id, byref(sync_out_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
extio_settings = extio_settings_t()
class EXTIOSetupFlags_:
EXTIO_SETUP_INVERT = 2
EXTIO_SETUP_OUTPUT = 1
extio_settings.EXTIOSetupFlags = EXTIOSetupFlags_.EXTIO_SETUP_OUTPUT
class EXTIOModeFlags_:
EXTIO_SETUP_MODE_OUT_BITS = 240
EXTIO_SETUP_MODE_OUT_MOTOR_ON = 64
EXTIO_SETUP_MODE_OUT_ALARM = 48
EXTIO_SETUP_MODE_OUT_MOVING = 32
EXTIO_SETUP_MODE_OUT_ON = 16
EXTIO_SETUP_MODE_IN_BITS = 15
EXTIO_SETUP_MODE_IN_ALARM = 5
EXTIO_SETUP_MODE_IN_HOME = 4
EXTIO_SETUP_MODE_IN_MOVR = 3
EXTIO_SETUP_MODE_IN_PWOF = 2
EXTIO_SETUP_MODE_IN_STOP = 1
EXTIO_SETUP_MODE_IN_NOP = 0
EXTIO_SETUP_MODE_OUT_OFF = 0
extio_settings.EXTIOModeFlags = EXTIOModeFlags_.EXTIO_SETUP_MODE_IN_STOP | EXTIOModeFlags_.EXTIO_SETUP_MODE_IN_NOP | EXTIOModeFlags_.EXTIO_SETUP_MODE_OUT_OFF
result = lib.set_extio_settings(id, byref(extio_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
brake_settings = brake_settings_t()
brake_settings.t1 = 300
brake_settings.t2 = 500
brake_settings.t3 = 300
brake_settings.t4 = 400
class BrakeFlags_:
BRAKE_ENG_PWROFF = 2
BRAKE_ENABLED = 1
brake_settings.BrakeFlags = BrakeFlags_.BRAKE_ENG_PWROFF
result = lib.set_brake_settings(id, byref(brake_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
control_settings = control_settings_t()
control_settings.MaxSpeed[0] = 80
control_settings.MaxSpeed[1] = 800
control_settings.MaxSpeed[2] = 0
control_settings.MaxSpeed[3] = 0
control_settings.MaxSpeed[4] = 0
control_settings.MaxSpeed[5] = 0
control_settings.MaxSpeed[6] = 0
control_settings.MaxSpeed[7] = 0
control_settings.MaxSpeed[8] = 0
control_settings.MaxSpeed[9] = 0
control_settings.uMaxSpeed[0] = 0
control_settings.uMaxSpeed[1] = 0
control_settings.uMaxSpeed[2] = 0
control_settings.uMaxSpeed[3] = 0
control_settings.uMaxSpeed[4] = 0
control_settings.uMaxSpeed[5] = 0
control_settings.uMaxSpeed[6] = 0
control_settings.uMaxSpeed[7] = 0
control_settings.uMaxSpeed[8] = 0
control_settings.uMaxSpeed[9] = 0
control_settings.Timeout[0] = 1000
control_settings.Timeout[1] = 1000
control_settings.Timeout[2] = 1000
control_settings.Timeout[3] = 1000
control_settings.Timeout[4] = 1000
control_settings.Timeout[5] = 1000
control_settings.Timeout[6] = 1000
control_settings.Timeout[7] = 1000
control_settings.Timeout[8] = 1000
control_settings.MaxClickTime = 300
class Flags_:
CONTROL_BTN_RIGHT_PUSHED_OPEN = 8
CONTROL_BTN_LEFT_PUSHED_OPEN = 4
CONTROL_MODE_BITS = 3
CONTROL_MODE_LR = 2
CONTROL_MODE_JOY = 1
CONTROL_MODE_OFF = 0
control_settings.Flags = Flags_.CONTROL_MODE_LR | Flags_.CONTROL_MODE_OFF
control_settings.DeltaPosition = 1
control_settings.uDeltaPosition = 0
result = lib.set_control_settings(id, byref(control_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
joystick_settings = joystick_settings_t()
joystick_settings.JoyLowEnd = 0
joystick_settings.JoyCenter = 5000
joystick_settings.JoyHighEnd = 10000
joystick_settings.ExpFactor = 100
joystick_settings.DeadZone = 50
class JoyFlags_:
JOY_REVERSE = 1
result = lib.set_joystick_settings(id, byref(joystick_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
ctp_settings = ctp_settings_t()
ctp_settings.CTPMinError = 3
class CTPFlags_:
CTP_ERROR_CORRECTION = 16
REV_SENS_INV = 8
CTP_ALARM_ON_ERROR = 4
CTP_BASE = 2
CTP_ENABLED = 1
ctp_settings.CTPFlags = CTPFlags_.CTP_ERROR_CORRECTION
result = lib.set_ctp_settings(id, byref(ctp_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
uart_settings = uart_settings_t()
uart_settings.Speed = 115200
class UARTSetupFlags_:
UART_STOP_BIT = 8
UART_PARITY_BIT_USE = 4
UART_PARITY_BITS = 3
UART_PARITY_BIT_MARK = 3
UART_PARITY_BIT_SPACE = 2
UART_PARITY_BIT_ODD = 1
UART_PARITY_BIT_EVEN = 0
uart_settings.UARTSetupFlags = UARTSetupFlags_.UART_PARITY_BIT_EVEN
result = lib.set_uart_settings(id, byref(uart_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
controller_name = controller_name_t()
controller_name.ControllerName = bytes([0, 113, 252, 118, 36, 0, 72, 0, 3, 0, 0, 0, 104, 101, 103, 0])
class CtrlFlags_:
EEPROM_PRECEDENCE = 1
result = lib.set_controller_name(id, byref(controller_name))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
emf_settings = emf_settings_t()
emf_settings.L = 0
emf_settings.R = 0
emf_settings.Km = 0
class BackEMFFlags_:
BACK_EMF_KM_AUTO = 4
BACK_EMF_RESISTANCE_AUTO = 2
BACK_EMF_INDUCTANCE_AUTO = 1
result = lib.set_emf_settings(id, byref(emf_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
engine_advansed_setup = engine_advansed_setup_t()
engine_advansed_setup.stepcloseloop_Kw = 50
engine_advansed_setup.stepcloseloop_Kp_low = 1000
engine_advansed_setup.stepcloseloop_Kp_high = 33
result = lib.set_engine_advansed_setup(id, byref(engine_advansed_setup))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
extended_settings = extended_settings_t()
extended_settings.Param1 = 0
result = lib.set_extended_settings(id, byref(extended_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
stage_name = stage_name_t()
stage_name.PositionerName = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_stage_name(id, byref(stage_name))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
stage_information = stage_information_t()
stage_information.Manufacturer = bytes([83, 116, 97, 110, 100, 97, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
stage_information.PartNumber = bytes([56, 77, 73, 68, 49, 50, 45, 49, 45, 65, 82, 0, 95, 49, 53, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_stage_information(id, byref(stage_information))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
stage_settings = stage_settings_t()
stage_settings.LeadScrewPitch = 0.25
stage_settings.Units = bytes([109, 109, 0, 114, 101, 101, 0, 0])
stage_settings.MaxSpeed = 0
stage_settings.TravelRange = 11
stage_settings.SupplyVoltageMin = 5
stage_settings.SupplyVoltageMax = 12
stage_settings.MaxCurrentConsumption = 0
stage_settings.HorizontalLoadCapacity = 0
stage_settings.VerticalLoadCapacity = 0
result = lib.set_stage_settings(id, byref(stage_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
motor_information = motor_information_t()
motor_information.Manufacturer = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
motor_information.PartNumber = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_motor_information(id, byref(motor_information))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
motor_settings = motor_settings_t()
class MotorType_:
MOTOR_TYPE_BLDC = 3
MOTOR_TYPE_DC = 2
MOTOR_TYPE_STEP = 1
MOTOR_TYPE_UNKNOWN = 0
motor_settings.MotorType = MotorType_.MOTOR_TYPE_STEP | MotorType_.MOTOR_TYPE_UNKNOWN
motor_settings.ReservedField = 0
motor_settings.Poles = 0
motor_settings.Phases = 0
motor_settings.NominalVoltage = 0
motor_settings.NominalCurrent = 0
motor_settings.NominalSpeed = 0
motor_settings.NominalTorque = 0
motor_settings.NominalPower = 0
motor_settings.WindingResistance = 0
motor_settings.WindingInductance = 0
motor_settings.RotorInertia = 0
motor_settings.StallTorque = 0
motor_settings.DetentTorque = 0
motor_settings.TorqueConstant = 0
motor_settings.SpeedConstant = 0
motor_settings.SpeedTorqueGradient = 0
motor_settings.MechanicalTimeConstant = 0
motor_settings.MaxSpeed = 1600
motor_settings.MaxCurrent = 0
motor_settings.MaxCurrentTime = 0
motor_settings.NoLoadCurrent = 0
motor_settings.NoLoadSpeed = 0
result = lib.set_motor_settings(id, byref(motor_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
encoder_information = encoder_information_t()
encoder_information.Manufacturer = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
encoder_information.PartNumber = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_encoder_information(id, byref(encoder_information))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
encoder_settings = encoder_settings_t()
encoder_settings.MaxOperatingFrequency = 0
encoder_settings.SupplyVoltageMin = 0
encoder_settings.SupplyVoltageMax = 0
encoder_settings.MaxCurrentConsumption = 0
encoder_settings.PPR = 1000
class EncoderSettings_:
ENCSET_REVOLUTIONSENSOR_ACTIVE_HIGH = 256
ENCSET_REVOLUTIONSENSOR_PRESENT = 64
ENCSET_INDEXCHANNEL_PRESENT = 16
ENCSET_PUSHPULL_OUTPUT = 4
ENCSET_DIFFERENTIAL_OUTPUT = 1
result = lib.set_encoder_settings(id, byref(encoder_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
hallsensor_information = hallsensor_information_t()
hallsensor_information.Manufacturer = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
hallsensor_information.PartNumber = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_hallsensor_information(id, byref(hallsensor_information))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
hallsensor_settings = hallsensor_settings_t()
hallsensor_settings.MaxOperatingFrequency = 0
hallsensor_settings.SupplyVoltageMin = 0
hallsensor_settings.SupplyVoltageMax = 0
hallsensor_settings.MaxCurrentConsumption = 0
hallsensor_settings.PPR = 0
result = lib.set_hallsensor_settings(id, byref(hallsensor_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
gear_information = gear_information_t()
gear_information.Manufacturer = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
gear_information.PartNumber = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_gear_information(id, byref(gear_information))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
gear_settings = gear_settings_t()
gear_settings.ReductionIn = 0
gear_settings.ReductionOut = 0
gear_settings.RatedInputTorque = 0
gear_settings.RatedInputSpeed = 0
gear_settings.MaxOutputBacklash = 0
gear_settings.InputInertia = 0
gear_settings.Efficiency = 0
result = lib.set_gear_settings(id, byref(gear_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
accessories_settings = accessories_settings_t()
accessories_settings.MagneticBrakeInfo = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
accessories_settings.MBRatedVoltage = 0
accessories_settings.MBRatedCurrent = 0
accessories_settings.MBTorque = 0
class MBSettings_:
MB_POWERED_HOLD = 2
MB_AVAILABLE = 1
accessories_settings.TemperatureSensorInfo = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
accessories_settings.TSMin = 0
accessories_settings.TSMax = 0
accessories_settings.TSGrad = 0
class TSSettings_:
TS_AVAILABLE = 8
TS_TYPE_BITS = 7
TS_TYPE_SEMICONDUCTOR = 2
TS_TYPE_THERMOCOUPLE = 1
TS_TYPE_UNKNOWN = 0
accessories_settings.TSSettings = TSSettings_.TS_TYPE_UNKNOWN
class LimitSwitchesSettings_:
LS_SHORTED = 16
LS_SW2_ACTIVE_LOW = 8
LS_SW1_ACTIVE_LOW = 4
LS_ON_SW2_AVAILABLE = 2
LS_ON_SW1_AVAILABLE = 1
result = lib.set_accessories_settings(id, byref(accessories_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
return worst_result
| 35.922468 | 185 | 0.693389 | def set_profile_8MID12_1_AR(lib, id):
worst_result = Result.Ok
result = Result.Ok
feedback_settings = feedback_settings_t()
feedback_settings.IPS = 4000
class FeedbackType_:
FEEDBACK_ENCODER_MEDIATED = 6
FEEDBACK_NONE = 5
FEEDBACK_EMF = 4
FEEDBACK_ENCODER = 1
feedback_settings.FeedbackType = FeedbackType_.FEEDBACK_NONE
class FeedbackFlags_:
FEEDBACK_ENC_TYPE_BITS = 192
FEEDBACK_ENC_TYPE_DIFFERENTIAL = 128
FEEDBACK_ENC_TYPE_SINGLE_ENDED = 64
FEEDBACK_ENC_REVERSE = 1
FEEDBACK_ENC_TYPE_AUTO = 0
feedback_settings.FeedbackFlags = FeedbackFlags_.FEEDBACK_ENC_TYPE_SINGLE_ENDED | FeedbackFlags_.FEEDBACK_ENC_TYPE_AUTO
feedback_settings.CountsPerTurn = 4000
result = lib.set_feedback_settings(id, byref(feedback_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
home_settings = home_settings_t()
home_settings.FastHome = 50
home_settings.uFastHome = 0
home_settings.SlowHome = 500
home_settings.uSlowHome = 0
home_settings.HomeDelta = 1020
home_settings.uHomeDelta = 0
class HomeFlags_:
HOME_USE_FAST = 256
HOME_STOP_SECOND_BITS = 192
HOME_STOP_SECOND_LIM = 192
HOME_STOP_SECOND_SYN = 128
HOME_STOP_SECOND_REV = 64
HOME_STOP_FIRST_BITS = 48
HOME_STOP_FIRST_LIM = 48
HOME_STOP_FIRST_SYN = 32
HOME_STOP_FIRST_REV = 16
HOME_HALF_MV = 8
HOME_MV_SEC_EN = 4
HOME_DIR_SECOND = 2
HOME_DIR_FIRST = 1
home_settings.HomeFlags = HomeFlags_.HOME_USE_FAST | HomeFlags_.HOME_STOP_SECOND_REV | HomeFlags_.HOME_STOP_FIRST_BITS | HomeFlags_.HOME_DIR_SECOND
result = lib.set_home_settings(id, byref(home_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
move_settings = move_settings_t()
move_settings.Speed = 800
move_settings.uSpeed = 0
move_settings.Accel = 3200
move_settings.Decel = 4800
move_settings.AntiplaySpeed = 800
move_settings.uAntiplaySpeed = 0
class MoveFlags_:
RPM_DIV_1000 = 1
result = lib.set_move_settings(id, byref(move_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
engine_settings = engine_settings_t()
engine_settings.NomVoltage = 50
engine_settings.NomCurrent = 200
engine_settings.NomSpeed = 1600
engine_settings.uNomSpeed = 0
class EngineFlags_:
ENGINE_LIMIT_RPM = 128
ENGINE_LIMIT_CURR = 64
ENGINE_LIMIT_VOLT = 32
ENGINE_ACCEL_ON = 16
ENGINE_ANTIPLAY = 8
ENGINE_MAX_SPEED = 4
ENGINE_CURRENT_AS_RMS = 2
ENGINE_REVERSE = 1
engine_settings.EngineFlags = EngineFlags_.ENGINE_LIMIT_RPM | EngineFlags_.ENGINE_ACCEL_ON | EngineFlags_.ENGINE_REVERSE
engine_settings.Antiplay = 326
class MicrostepMode_:
MICROSTEP_MODE_FRAC_256 = 9
MICROSTEP_MODE_FRAC_128 = 8
MICROSTEP_MODE_FRAC_64 = 7
MICROSTEP_MODE_FRAC_32 = 6
MICROSTEP_MODE_FRAC_16 = 5
MICROSTEP_MODE_FRAC_8 = 4
MICROSTEP_MODE_FRAC_4 = 3
MICROSTEP_MODE_FRAC_2 = 2
MICROSTEP_MODE_FULL = 1
engine_settings.MicrostepMode = MicrostepMode_.MICROSTEP_MODE_FRAC_256
engine_settings.StepsPerRev = 20
result = lib.set_engine_settings(id, byref(engine_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
entype_settings = entype_settings_t()
class EngineType_:
ENGINE_TYPE_BRUSHLESS = 5
ENGINE_TYPE_TEST = 4
ENGINE_TYPE_STEP = 3
ENGINE_TYPE_2DC = 2
ENGINE_TYPE_DC = 1
ENGINE_TYPE_NONE = 0
entype_settings.EngineType = EngineType_.ENGINE_TYPE_STEP | EngineType_.ENGINE_TYPE_NONE
class DriverType_:
DRIVER_TYPE_EXTERNAL = 3
DRIVER_TYPE_INTEGRATE = 2
DRIVER_TYPE_DISCRETE_FET = 1
entype_settings.DriverType = DriverType_.DRIVER_TYPE_INTEGRATE
result = lib.set_entype_settings(id, byref(entype_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
power_settings = power_settings_t()
power_settings.HoldCurrent = 50
power_settings.CurrReductDelay = 1000
power_settings.PowerOffDelay = 60
power_settings.CurrentSetTime = 300
class PowerFlags_:
POWER_SMOOTH_CURRENT = 4
POWER_OFF_ENABLED = 2
POWER_REDUCT_ENABLED = 1
power_settings.PowerFlags = PowerFlags_.POWER_SMOOTH_CURRENT | PowerFlags_.POWER_REDUCT_ENABLED
result = lib.set_power_settings(id, byref(power_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
secure_settings = secure_settings_t()
secure_settings.LowUpwrOff = 800
secure_settings.CriticalIpwr = 4000
secure_settings.CriticalUpwr = 5500
secure_settings.CriticalT = 800
secure_settings.CriticalIusb = 450
secure_settings.CriticalUusb = 520
secure_settings.MinimumUusb = 420
class Flags_:
ALARM_ENGINE_RESPONSE = 128
ALARM_WINDING_MISMATCH = 64
USB_BREAK_RECONNECT = 32
ALARM_FLAGS_STICKING = 16
ALARM_ON_BORDERS_SWAP_MISSET = 8
H_BRIDGE_ALERT = 4
LOW_UPWR_PROTECTION = 2
ALARM_ON_DRIVER_OVERHEATING = 1
secure_settings.Flags = Flags_.ALARM_ENGINE_RESPONSE | Flags_.ALARM_FLAGS_STICKING | Flags_.ALARM_ON_BORDERS_SWAP_MISSET | Flags_.H_BRIDGE_ALERT | Flags_.ALARM_ON_DRIVER_OVERHEATING
result = lib.set_secure_settings(id, byref(secure_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
edges_settings = edges_settings_t()
class BorderFlags_:
BORDERS_SWAP_MISSET_DETECTION = 8
BORDER_STOP_RIGHT = 4
BORDER_STOP_LEFT = 2
BORDER_IS_ENCODER = 1
edges_settings.BorderFlags = BorderFlags_.BORDER_STOP_RIGHT | BorderFlags_.BORDER_STOP_LEFT
class EnderFlags_:
ENDER_SW2_ACTIVE_LOW = 4
ENDER_SW1_ACTIVE_LOW = 2
ENDER_SWAP = 1
edges_settings.EnderFlags = EnderFlags_.ENDER_SW2_ACTIVE_LOW | EnderFlags_.ENDER_SW1_ACTIVE_LOW | EnderFlags_.ENDER_SWAP
edges_settings.LeftBorder = -971
edges_settings.uLeftBorder = 0
edges_settings.RightBorder = 910
edges_settings.uRightBorder = 0
result = lib.set_edges_settings(id, byref(edges_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
pid_settings = pid_settings_t()
pid_settings.KpU = 0
pid_settings.KiU = 0
pid_settings.KdU = 0
pid_settings.Kpf = 0.003599999938160181
pid_settings.Kif = 0.03799999877810478
pid_settings.Kdf = 2.8000000384054147e-05
result = lib.set_pid_settings(id, byref(pid_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
sync_in_settings = sync_in_settings_t()
class SyncInFlags_:
SYNCIN_GOTOPOSITION = 4
SYNCIN_INVERT = 2
SYNCIN_ENABLED = 1
sync_in_settings.ClutterTime = 4
sync_in_settings.Position = 0
sync_in_settings.uPosition = 0
sync_in_settings.Speed = 0
sync_in_settings.uSpeed = 0
result = lib.set_sync_in_settings(id, byref(sync_in_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
sync_out_settings = sync_out_settings_t()
class SyncOutFlags_:
SYNCOUT_ONPERIOD = 64
SYNCOUT_ONSTOP = 32
SYNCOUT_ONSTART = 16
SYNCOUT_IN_STEPS = 8
SYNCOUT_INVERT = 4
SYNCOUT_STATE = 2
SYNCOUT_ENABLED = 1
sync_out_settings.SyncOutFlags = SyncOutFlags_.SYNCOUT_ONSTOP | SyncOutFlags_.SYNCOUT_ONSTART
sync_out_settings.SyncOutPulseSteps = 100
sync_out_settings.SyncOutPeriod = 2000
sync_out_settings.Accuracy = 0
sync_out_settings.uAccuracy = 0
result = lib.set_sync_out_settings(id, byref(sync_out_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
extio_settings = extio_settings_t()
class EXTIOSetupFlags_:
EXTIO_SETUP_INVERT = 2
EXTIO_SETUP_OUTPUT = 1
extio_settings.EXTIOSetupFlags = EXTIOSetupFlags_.EXTIO_SETUP_OUTPUT
class EXTIOModeFlags_:
EXTIO_SETUP_MODE_OUT_BITS = 240
EXTIO_SETUP_MODE_OUT_MOTOR_ON = 64
EXTIO_SETUP_MODE_OUT_ALARM = 48
EXTIO_SETUP_MODE_OUT_MOVING = 32
EXTIO_SETUP_MODE_OUT_ON = 16
EXTIO_SETUP_MODE_IN_BITS = 15
EXTIO_SETUP_MODE_IN_ALARM = 5
EXTIO_SETUP_MODE_IN_HOME = 4
EXTIO_SETUP_MODE_IN_MOVR = 3
EXTIO_SETUP_MODE_IN_PWOF = 2
EXTIO_SETUP_MODE_IN_STOP = 1
EXTIO_SETUP_MODE_IN_NOP = 0
EXTIO_SETUP_MODE_OUT_OFF = 0
extio_settings.EXTIOModeFlags = EXTIOModeFlags_.EXTIO_SETUP_MODE_IN_STOP | EXTIOModeFlags_.EXTIO_SETUP_MODE_IN_NOP | EXTIOModeFlags_.EXTIO_SETUP_MODE_OUT_OFF
result = lib.set_extio_settings(id, byref(extio_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
brake_settings = brake_settings_t()
brake_settings.t1 = 300
brake_settings.t2 = 500
brake_settings.t3 = 300
brake_settings.t4 = 400
class BrakeFlags_:
BRAKE_ENG_PWROFF = 2
BRAKE_ENABLED = 1
brake_settings.BrakeFlags = BrakeFlags_.BRAKE_ENG_PWROFF
result = lib.set_brake_settings(id, byref(brake_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
control_settings = control_settings_t()
control_settings.MaxSpeed[0] = 80
control_settings.MaxSpeed[1] = 800
control_settings.MaxSpeed[2] = 0
control_settings.MaxSpeed[3] = 0
control_settings.MaxSpeed[4] = 0
control_settings.MaxSpeed[5] = 0
control_settings.MaxSpeed[6] = 0
control_settings.MaxSpeed[7] = 0
control_settings.MaxSpeed[8] = 0
control_settings.MaxSpeed[9] = 0
control_settings.uMaxSpeed[0] = 0
control_settings.uMaxSpeed[1] = 0
control_settings.uMaxSpeed[2] = 0
control_settings.uMaxSpeed[3] = 0
control_settings.uMaxSpeed[4] = 0
control_settings.uMaxSpeed[5] = 0
control_settings.uMaxSpeed[6] = 0
control_settings.uMaxSpeed[7] = 0
control_settings.uMaxSpeed[8] = 0
control_settings.uMaxSpeed[9] = 0
control_settings.Timeout[0] = 1000
control_settings.Timeout[1] = 1000
control_settings.Timeout[2] = 1000
control_settings.Timeout[3] = 1000
control_settings.Timeout[4] = 1000
control_settings.Timeout[5] = 1000
control_settings.Timeout[6] = 1000
control_settings.Timeout[7] = 1000
control_settings.Timeout[8] = 1000
control_settings.MaxClickTime = 300
class Flags_:
CONTROL_BTN_RIGHT_PUSHED_OPEN = 8
CONTROL_BTN_LEFT_PUSHED_OPEN = 4
CONTROL_MODE_BITS = 3
CONTROL_MODE_LR = 2
CONTROL_MODE_JOY = 1
CONTROL_MODE_OFF = 0
control_settings.Flags = Flags_.CONTROL_MODE_LR | Flags_.CONTROL_MODE_OFF
control_settings.DeltaPosition = 1
control_settings.uDeltaPosition = 0
result = lib.set_control_settings(id, byref(control_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
joystick_settings = joystick_settings_t()
joystick_settings.JoyLowEnd = 0
joystick_settings.JoyCenter = 5000
joystick_settings.JoyHighEnd = 10000
joystick_settings.ExpFactor = 100
joystick_settings.DeadZone = 50
class JoyFlags_:
JOY_REVERSE = 1
result = lib.set_joystick_settings(id, byref(joystick_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
ctp_settings = ctp_settings_t()
ctp_settings.CTPMinError = 3
class CTPFlags_:
CTP_ERROR_CORRECTION = 16
REV_SENS_INV = 8
CTP_ALARM_ON_ERROR = 4
CTP_BASE = 2
CTP_ENABLED = 1
ctp_settings.CTPFlags = CTPFlags_.CTP_ERROR_CORRECTION
result = lib.set_ctp_settings(id, byref(ctp_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
uart_settings = uart_settings_t()
uart_settings.Speed = 115200
class UARTSetupFlags_:
UART_STOP_BIT = 8
UART_PARITY_BIT_USE = 4
UART_PARITY_BITS = 3
UART_PARITY_BIT_MARK = 3
UART_PARITY_BIT_SPACE = 2
UART_PARITY_BIT_ODD = 1
UART_PARITY_BIT_EVEN = 0
uart_settings.UARTSetupFlags = UARTSetupFlags_.UART_PARITY_BIT_EVEN
result = lib.set_uart_settings(id, byref(uart_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
controller_name = controller_name_t()
controller_name.ControllerName = bytes([0, 113, 252, 118, 36, 0, 72, 0, 3, 0, 0, 0, 104, 101, 103, 0])
class CtrlFlags_:
EEPROM_PRECEDENCE = 1
result = lib.set_controller_name(id, byref(controller_name))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
emf_settings = emf_settings_t()
emf_settings.L = 0
emf_settings.R = 0
emf_settings.Km = 0
class BackEMFFlags_:
BACK_EMF_KM_AUTO = 4
BACK_EMF_RESISTANCE_AUTO = 2
BACK_EMF_INDUCTANCE_AUTO = 1
result = lib.set_emf_settings(id, byref(emf_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
engine_advansed_setup = engine_advansed_setup_t()
engine_advansed_setup.stepcloseloop_Kw = 50
engine_advansed_setup.stepcloseloop_Kp_low = 1000
engine_advansed_setup.stepcloseloop_Kp_high = 33
result = lib.set_engine_advansed_setup(id, byref(engine_advansed_setup))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
extended_settings = extended_settings_t()
extended_settings.Param1 = 0
result = lib.set_extended_settings(id, byref(extended_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
stage_name = stage_name_t()
stage_name.PositionerName = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_stage_name(id, byref(stage_name))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
stage_information = stage_information_t()
stage_information.Manufacturer = bytes([83, 116, 97, 110, 100, 97, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
stage_information.PartNumber = bytes([56, 77, 73, 68, 49, 50, 45, 49, 45, 65, 82, 0, 95, 49, 53, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_stage_information(id, byref(stage_information))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
stage_settings = stage_settings_t()
stage_settings.LeadScrewPitch = 0.25
stage_settings.Units = bytes([109, 109, 0, 114, 101, 101, 0, 0])
stage_settings.MaxSpeed = 0
stage_settings.TravelRange = 11
stage_settings.SupplyVoltageMin = 5
stage_settings.SupplyVoltageMax = 12
stage_settings.MaxCurrentConsumption = 0
stage_settings.HorizontalLoadCapacity = 0
stage_settings.VerticalLoadCapacity = 0
result = lib.set_stage_settings(id, byref(stage_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
motor_information = motor_information_t()
motor_information.Manufacturer = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
motor_information.PartNumber = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_motor_information(id, byref(motor_information))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
motor_settings = motor_settings_t()
class MotorType_:
MOTOR_TYPE_BLDC = 3
MOTOR_TYPE_DC = 2
MOTOR_TYPE_STEP = 1
MOTOR_TYPE_UNKNOWN = 0
motor_settings.MotorType = MotorType_.MOTOR_TYPE_STEP | MotorType_.MOTOR_TYPE_UNKNOWN
motor_settings.ReservedField = 0
motor_settings.Poles = 0
motor_settings.Phases = 0
motor_settings.NominalVoltage = 0
motor_settings.NominalCurrent = 0
motor_settings.NominalSpeed = 0
motor_settings.NominalTorque = 0
motor_settings.NominalPower = 0
motor_settings.WindingResistance = 0
motor_settings.WindingInductance = 0
motor_settings.RotorInertia = 0
motor_settings.StallTorque = 0
motor_settings.DetentTorque = 0
motor_settings.TorqueConstant = 0
motor_settings.SpeedConstant = 0
motor_settings.SpeedTorqueGradient = 0
motor_settings.MechanicalTimeConstant = 0
motor_settings.MaxSpeed = 1600
motor_settings.MaxCurrent = 0
motor_settings.MaxCurrentTime = 0
motor_settings.NoLoadCurrent = 0
motor_settings.NoLoadSpeed = 0
result = lib.set_motor_settings(id, byref(motor_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
encoder_information = encoder_information_t()
encoder_information.Manufacturer = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
encoder_information.PartNumber = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_encoder_information(id, byref(encoder_information))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
encoder_settings = encoder_settings_t()
encoder_settings.MaxOperatingFrequency = 0
encoder_settings.SupplyVoltageMin = 0
encoder_settings.SupplyVoltageMax = 0
encoder_settings.MaxCurrentConsumption = 0
encoder_settings.PPR = 1000
class EncoderSettings_:
ENCSET_REVOLUTIONSENSOR_ACTIVE_HIGH = 256
ENCSET_REVOLUTIONSENSOR_PRESENT = 64
ENCSET_INDEXCHANNEL_PRESENT = 16
ENCSET_PUSHPULL_OUTPUT = 4
ENCSET_DIFFERENTIAL_OUTPUT = 1
result = lib.set_encoder_settings(id, byref(encoder_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
hallsensor_information = hallsensor_information_t()
hallsensor_information.Manufacturer = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
hallsensor_information.PartNumber = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_hallsensor_information(id, byref(hallsensor_information))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
hallsensor_settings = hallsensor_settings_t()
hallsensor_settings.MaxOperatingFrequency = 0
hallsensor_settings.SupplyVoltageMin = 0
hallsensor_settings.SupplyVoltageMax = 0
hallsensor_settings.MaxCurrentConsumption = 0
hallsensor_settings.PPR = 0
result = lib.set_hallsensor_settings(id, byref(hallsensor_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
gear_information = gear_information_t()
gear_information.Manufacturer = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
gear_information.PartNumber = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_gear_information(id, byref(gear_information))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
gear_settings = gear_settings_t()
gear_settings.ReductionIn = 0
gear_settings.ReductionOut = 0
gear_settings.RatedInputTorque = 0
gear_settings.RatedInputSpeed = 0
gear_settings.MaxOutputBacklash = 0
gear_settings.InputInertia = 0
gear_settings.Efficiency = 0
result = lib.set_gear_settings(id, byref(gear_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
accessories_settings = accessories_settings_t()
accessories_settings.MagneticBrakeInfo = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
accessories_settings.MBRatedVoltage = 0
accessories_settings.MBRatedCurrent = 0
accessories_settings.MBTorque = 0
class MBSettings_:
MB_POWERED_HOLD = 2
MB_AVAILABLE = 1
accessories_settings.TemperatureSensorInfo = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
accessories_settings.TSMin = 0
accessories_settings.TSMax = 0
accessories_settings.TSGrad = 0
class TSSettings_:
TS_AVAILABLE = 8
TS_TYPE_BITS = 7
TS_TYPE_SEMICONDUCTOR = 2
TS_TYPE_THERMOCOUPLE = 1
TS_TYPE_UNKNOWN = 0
accessories_settings.TSSettings = TSSettings_.TS_TYPE_UNKNOWN
class LimitSwitchesSettings_:
LS_SHORTED = 16
LS_SW2_ACTIVE_LOW = 8
LS_SW1_ACTIVE_LOW = 4
LS_ON_SW2_AVAILABLE = 2
LS_ON_SW1_AVAILABLE = 1
result = lib.set_accessories_settings(id, byref(accessories_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
return worst_result
| true | true |
1c31164741e00323e319a697268a69b6f3d9f9bb | 2,029 | py | Python | tests/test_all_notebooks.py | scottprahl/iadpython | df04f6446c73b5c5c1aabed072e986877f81104b | [
"MIT"
] | 4 | 2017-09-13T14:01:32.000Z | 2021-11-09T04:48:17.000Z | tests/test_all_notebooks.py | scottprahl/iadpython | df04f6446c73b5c5c1aabed072e986877f81104b | [
"MIT"
] | null | null | null | tests/test_all_notebooks.py | scottprahl/iadpython | df04f6446c73b5c5c1aabed072e986877f81104b | [
"MIT"
] | 1 | 2020-06-16T21:09:44.000Z | 2020-06-16T21:09:44.000Z | """
This file is intended to be the target of a pytest run.
It will recursively find all .ipynb files in the current directory, ignoring
directories that start with . and any files matching patterins found in the file
.testignore
List patterns to skip in .testignore file:
under_construction/*
Sample invocations of pytest which make the output nicely readable:
pytest --verbose --durations=5 test_all_notebooks.py
If you install pytest-xdist you can run tests in parallel with
pytest --verbose --durations=5 -n 4 test_all_notebooks.py
Original version is licensed under GPL 3.0 so this one is too.
The original can be located at
https://github.com/alchemyst/Dynamics-and-Control/test_all_notebooks.py
"""
import os.path
import pathlib
import pytest
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
# Default search path is the current directory
searchpath = pathlib.Path('./docs/') # all notebooks are in here
# Read patterns from .testignore file
ignores = ''
if os.path.exists('.testignore'):
ignores = [line.strip() for line in open('.testignore') if line.strip()]
# Ignore hidden folders (startswith('.')) and files matching ignore patterns
notebooks = [notebook for notebook in searchpath.glob('**/*.ipynb')
if not (any(parent.startswith('.')
for parent in notebook.parent.parts)
or any(notebook.match(pattern)
for pattern in ignores))]
notebooks.sort()
ids = [str(n) for n in notebooks]
@pytest.mark.notebooks
@pytest.mark.parametrize("notebook", notebooks, ids=ids)
def test_run_notebook(notebook):
"""Read and execute notebook.
The method here is directly from the nbconvert docs
Note that there is no error handling in this file as any errors will be
caught by pytest
"""
with open(notebook) as f:
nb = nbformat.read(f, as_version=4)
ep = ExecutePreprocessor(timeout=600)
ep.preprocess(nb, {'metadata': {'path': notebook.parent}})
| 31.703125 | 80 | 0.712666 | import os.path
import pathlib
import pytest
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
searchpath = pathlib.Path('./docs/')
ignores = ''
if os.path.exists('.testignore'):
ignores = [line.strip() for line in open('.testignore') if line.strip()]
notebooks = [notebook for notebook in searchpath.glob('**/*.ipynb')
if not (any(parent.startswith('.')
for parent in notebook.parent.parts)
or any(notebook.match(pattern)
for pattern in ignores))]
notebooks.sort()
ids = [str(n) for n in notebooks]
@pytest.mark.notebooks
@pytest.mark.parametrize("notebook", notebooks, ids=ids)
def test_run_notebook(notebook):
with open(notebook) as f:
nb = nbformat.read(f, as_version=4)
ep = ExecutePreprocessor(timeout=600)
ep.preprocess(nb, {'metadata': {'path': notebook.parent}})
| true | true |
1c3116918f0e8ccfa2d25dce8b10ccbb99e8d1a0 | 8,361 | py | Python | src/AE/ae.py | goeckslab/MarkerIntensityPredictor | 704e4ea782c6653cabb4b37a7b34fea4cd9fe595 | [
"MIT"
] | 3 | 2021-02-22T19:26:04.000Z | 2022-03-02T22:08:25.000Z | src/AE/ae.py | goeckslab/MarkerIntensityPredictor | 704e4ea782c6653cabb4b37a7b34fea4cd9fe595 | [
"MIT"
] | 1 | 2021-03-12T22:22:25.000Z | 2021-03-12T22:22:25.000Z | src/AE/ae.py | goeckslab/MarkerIntensityPredictor | 704e4ea782c6653cabb4b37a7b34fea4cd9fe595 | [
"MIT"
] | 1 | 2021-03-12T20:28:50.000Z | 2021-03-12T20:28:50.000Z | import pickle
import sys
from pathlib import Path
from Shared.data import Data
from Shared.data_loader import DataLoader
import numpy as np
import keras
from keras import layers, regularizers
from sklearn.preprocessing import StandardScaler, MinMaxScaler
import anndata as ad
import pandas as pd
import umap
import tensorflow as tf
from sklearn.metrics import r2_score
import keract as kt
class AutoEncoder:
data: Data
# The defined encoder
encoder: any
# The defined decoder
decoder: any
# The ae
ae: any
# the training history of the AE
history: any
input_dim: int
encoding_dim: int
input_umap: any
latent_umap: any
r2_scores = pd.DataFrame(columns=["Marker", "Score"])
encoded_data = pd.DataFrame()
reconstructed_data = pd.DataFrame()
args = None
results_folder = Path("results", "ae")
def __init__(self, args):
self.encoding_dim = 5
self.args = args
def normalize(self, data):
# Input data contains some zeros which results in NaN (or Inf)
# values when their log10 is computed. NaN (or Inf) are problematic
# values for downstream analysis. Therefore, zeros are replaced by
# a small value; see the following thread for related discussion.
# https://www.researchgate.net/post/Log_transformation_of_values_that_include_0_zero_for_statistical_analyses2
data[data == 0] = 1e-32
data = np.log10(data)
standard_scaler = StandardScaler()
data = standard_scaler.fit_transform(data)
data = data.clip(min=-5, max=5)
min_max_scaler = MinMaxScaler(feature_range=(0, 1))
data = min_max_scaler.fit_transform(data)
return data
def load_data(self):
print("Loading data...")
if self.args.file:
inputs, markers = DataLoader.get_data(
self.args.file)
elif self.args.dir:
inputs, markers = DataLoader.load_folder_data(
self.args.dir)
else:
print("Please specify a directory or a file")
sys.exit()
self.data = Data(np.array(inputs), markers, self.normalize)
def build_auto_encoder(self):
activation = tf.keras.layers.LeakyReLU()
activity_regularizer = regularizers.l1_l2(10e-5)
input_layer = keras.Input(shape=(self.data.inputs_dim,))
# Encoder
encoded = layers.Dense(self.data.inputs_dim / 2, activation=activation,
activity_regularizer=activity_regularizer)(input_layer)
encoded = layers.Dense(self.data.inputs_dim / 3, activation=activation,
activity_regularizer=activity_regularizer)(encoded)
# encoded = layers.Dropout(0.2)(encoded)
encoded = layers.Dense(self.encoding_dim, activation=activation, activity_regularizer=activity_regularizer)(
encoded)
# encoded = layers.Dropout(0.3)(encoded)
# Decoder
decoded = layers.Dense(self.data.inputs_dim / 3, activation=activation)(encoded)
decoded = layers.Dense(self.data.inputs_dim / 2, activation=activation)(decoded)
decoded = layers.Dense(self.data.inputs_dim, activation=activation)(decoded)
# Auto encoder
self.ae = keras.Model(input_layer, decoded, name="AE")
self.ae.summary()
# Separate encoder model
self.encoder = keras.Model(input_layer, encoded, name="encoder")
self.encoder.summary()
# Separate decoder model
encoded_input = keras.Input(shape=(self.encoding_dim,))
deco = self.ae.layers[-3](encoded_input)
deco = self.ae.layers[-2](deco)
deco = self.ae.layers[-1](deco)
# create the decoder model
self.decoder = keras.Model(encoded_input, deco, name="decoder")
self.decoder.summary()
# Compile ae
self.ae.compile(optimizer="adam", loss=keras.losses.MeanSquaredError(), metrics=['acc', 'mean_squared_error'])
callback = tf.keras.callbacks.EarlyStopping(monitor="val_loss",
mode="min", patience=5,
restore_best_weights=True)
self.history = self.ae.fit(self.data.X_train, self.data.X_train,
epochs=500,
batch_size=32,
shuffle=True,
callbacks=[callback],
validation_data=(self.data.X_val, self.data.X_val))
def predict(self):
# Make some predictions
cell = self.data.X_test[0]
cell = cell.reshape(1, cell.shape[0])
encoded_cell = self.encoder.predict(cell)
decoded_cell = self.decoder.predict(encoded_cell)
# var_cell = self.ae.predict(cell)
print(f"Epochs: {len(self.history.history['loss'])}")
print(f"Input shape:\t{cell.shape}")
print(f"Encoded shape:\t{encoded_cell.shape}")
print(f"Decoded shape:\t{decoded_cell.shape}")
print(f"\nInput:\n{cell[0]}")
print(f"\nEncoded:\n{encoded_cell[0]}")
print(f"\nDecoded:\n{decoded_cell[0]}")
def calculate_r2_score(self):
recon_test = self.ae.predict(self.data.X_test)
recon_test = pd.DataFrame(data=recon_test, columns=self.data.markers)
input_data = pd.DataFrame(data=self.data.X_test, columns=self.data.markers)
for marker in self.data.markers:
input_marker = input_data[f"{marker}"]
var_marker = recon_test[f"{marker}"]
score = r2_score(input_marker, var_marker)
self.r2_scores = self.r2_scores.append(
{
"Marker": marker,
"Score": score
}, ignore_index=True
)
def create_h5ad_object(self):
# Input
fit = umap.UMAP()
self.input_umap = input_umap = fit.fit_transform(self.data.X_test)
# latent space
fit = umap.UMAP()
encoded = self.encoder.predict(self.data.X_test)
self.latent_umap = fit.fit_transform(encoded)
self.__create_h5ad("latent_markers", self.latent_umap, self.data.markers,
pd.DataFrame(columns=self.data.markers, data=self.data.X_test))
self.__create_h5ad("input", input_umap, self.data.markers,
pd.DataFrame(columns=self.data.markers, data=self.data.X_test))
return
def __create_h5ad(self, file_name: str, umap, markers, df):
obs = pd.DataFrame(data=df, index=df.index)
var = pd.DataFrame(index=markers)
obsm = {"X_umap": umap}
uns = dict()
adata = ad.AnnData(df.to_numpy(), var=var, obs=obs, uns=uns, obsm=obsm)
adata.write(Path(f'{self.results_folder}/{file_name}.h5ad'))
def create_test_predictions(self):
self.encoded_data = pd.DataFrame(self.encoder.predict(self.data.X_test))
self.reconstructed_data = pd.DataFrame(columns=self.data.markers, data=self.decoder.predict(self.encoded_data))
def create_correlation_data(self):
inputs = pd.DataFrame(columns=self.data.markers, data=self.data.inputs)
corr = inputs.corr()
corr.to_csv(Path(f'{self.results_folder}/correlation.csv'), index=False)
def write_created_data_to_disk(self):
with open(f'{self.results_folder}/ae_history', 'wb') as file_pi:
pickle.dump(self.history.history, file_pi)
X_test = pd.DataFrame(columns=self.data.markers, data=self.data.X_test)
X_test.to_csv(Path(f'{self.results_folder}/test_data.csv'), index=False)
self.encoded_data.to_csv(Path(f'{self.results_folder}/encoded_data.csv'), index=False)
self.reconstructed_data.to_csv(Path(f'{self.results_folder}/reconstructed_data.csv'), index=False)
self.r2_scores.to_csv(Path(f'{self.results_folder}/r2_scores.csv'), index=False)
def get_activations(self):
cell = self.data.X_test[0]
cell = cell.reshape(1, cell.shape[0])
# activations = kt.get_activations(self.encoder, self.data.X_val)
activations = kt.get_activations(self.ae, cell)
fig = kt.display_activations(activations, cmap="summer", directory=f'{self.results_folder}', save=True)
| 38.888372 | 119 | 0.631384 | import pickle
import sys
from pathlib import Path
from Shared.data import Data
from Shared.data_loader import DataLoader
import numpy as np
import keras
from keras import layers, regularizers
from sklearn.preprocessing import StandardScaler, MinMaxScaler
import anndata as ad
import pandas as pd
import umap
import tensorflow as tf
from sklearn.metrics import r2_score
import keract as kt
class AutoEncoder:
data: Data
encoder: any
decoder: any
ae: any
history: any
input_dim: int
encoding_dim: int
input_umap: any
latent_umap: any
r2_scores = pd.DataFrame(columns=["Marker", "Score"])
encoded_data = pd.DataFrame()
reconstructed_data = pd.DataFrame()
args = None
results_folder = Path("results", "ae")
def __init__(self, args):
self.encoding_dim = 5
self.args = args
def normalize(self, data):
data[data == 0] = 1e-32
data = np.log10(data)
standard_scaler = StandardScaler()
data = standard_scaler.fit_transform(data)
data = data.clip(min=-5, max=5)
min_max_scaler = MinMaxScaler(feature_range=(0, 1))
data = min_max_scaler.fit_transform(data)
return data
def load_data(self):
print("Loading data...")
if self.args.file:
inputs, markers = DataLoader.get_data(
self.args.file)
elif self.args.dir:
inputs, markers = DataLoader.load_folder_data(
self.args.dir)
else:
print("Please specify a directory or a file")
sys.exit()
self.data = Data(np.array(inputs), markers, self.normalize)
def build_auto_encoder(self):
activation = tf.keras.layers.LeakyReLU()
activity_regularizer = regularizers.l1_l2(10e-5)
input_layer = keras.Input(shape=(self.data.inputs_dim,))
encoded = layers.Dense(self.data.inputs_dim / 2, activation=activation,
activity_regularizer=activity_regularizer)(input_layer)
encoded = layers.Dense(self.data.inputs_dim / 3, activation=activation,
activity_regularizer=activity_regularizer)(encoded)
encoded = layers.Dense(self.encoding_dim, activation=activation, activity_regularizer=activity_regularizer)(
encoded)
decoded = layers.Dense(self.data.inputs_dim / 3, activation=activation)(encoded)
decoded = layers.Dense(self.data.inputs_dim / 2, activation=activation)(decoded)
decoded = layers.Dense(self.data.inputs_dim, activation=activation)(decoded)
self.ae = keras.Model(input_layer, decoded, name="AE")
self.ae.summary()
self.encoder = keras.Model(input_layer, encoded, name="encoder")
self.encoder.summary()
encoded_input = keras.Input(shape=(self.encoding_dim,))
deco = self.ae.layers[-3](encoded_input)
deco = self.ae.layers[-2](deco)
deco = self.ae.layers[-1](deco)
self.decoder = keras.Model(encoded_input, deco, name="decoder")
self.decoder.summary()
self.ae.compile(optimizer="adam", loss=keras.losses.MeanSquaredError(), metrics=['acc', 'mean_squared_error'])
callback = tf.keras.callbacks.EarlyStopping(monitor="val_loss",
mode="min", patience=5,
restore_best_weights=True)
self.history = self.ae.fit(self.data.X_train, self.data.X_train,
epochs=500,
batch_size=32,
shuffle=True,
callbacks=[callback],
validation_data=(self.data.X_val, self.data.X_val))
def predict(self):
cell = self.data.X_test[0]
cell = cell.reshape(1, cell.shape[0])
encoded_cell = self.encoder.predict(cell)
decoded_cell = self.decoder.predict(encoded_cell)
print(f"Epochs: {len(self.history.history['loss'])}")
print(f"Input shape:\t{cell.shape}")
print(f"Encoded shape:\t{encoded_cell.shape}")
print(f"Decoded shape:\t{decoded_cell.shape}")
print(f"\nInput:\n{cell[0]}")
print(f"\nEncoded:\n{encoded_cell[0]}")
print(f"\nDecoded:\n{decoded_cell[0]}")
def calculate_r2_score(self):
recon_test = self.ae.predict(self.data.X_test)
recon_test = pd.DataFrame(data=recon_test, columns=self.data.markers)
input_data = pd.DataFrame(data=self.data.X_test, columns=self.data.markers)
for marker in self.data.markers:
input_marker = input_data[f"{marker}"]
var_marker = recon_test[f"{marker}"]
score = r2_score(input_marker, var_marker)
self.r2_scores = self.r2_scores.append(
{
"Marker": marker,
"Score": score
}, ignore_index=True
)
def create_h5ad_object(self):
fit = umap.UMAP()
self.input_umap = input_umap = fit.fit_transform(self.data.X_test)
fit = umap.UMAP()
encoded = self.encoder.predict(self.data.X_test)
self.latent_umap = fit.fit_transform(encoded)
self.__create_h5ad("latent_markers", self.latent_umap, self.data.markers,
pd.DataFrame(columns=self.data.markers, data=self.data.X_test))
self.__create_h5ad("input", input_umap, self.data.markers,
pd.DataFrame(columns=self.data.markers, data=self.data.X_test))
return
def __create_h5ad(self, file_name: str, umap, markers, df):
obs = pd.DataFrame(data=df, index=df.index)
var = pd.DataFrame(index=markers)
obsm = {"X_umap": umap}
uns = dict()
adata = ad.AnnData(df.to_numpy(), var=var, obs=obs, uns=uns, obsm=obsm)
adata.write(Path(f'{self.results_folder}/{file_name}.h5ad'))
def create_test_predictions(self):
self.encoded_data = pd.DataFrame(self.encoder.predict(self.data.X_test))
self.reconstructed_data = pd.DataFrame(columns=self.data.markers, data=self.decoder.predict(self.encoded_data))
def create_correlation_data(self):
inputs = pd.DataFrame(columns=self.data.markers, data=self.data.inputs)
corr = inputs.corr()
corr.to_csv(Path(f'{self.results_folder}/correlation.csv'), index=False)
def write_created_data_to_disk(self):
with open(f'{self.results_folder}/ae_history', 'wb') as file_pi:
pickle.dump(self.history.history, file_pi)
X_test = pd.DataFrame(columns=self.data.markers, data=self.data.X_test)
X_test.to_csv(Path(f'{self.results_folder}/test_data.csv'), index=False)
self.encoded_data.to_csv(Path(f'{self.results_folder}/encoded_data.csv'), index=False)
self.reconstructed_data.to_csv(Path(f'{self.results_folder}/reconstructed_data.csv'), index=False)
self.r2_scores.to_csv(Path(f'{self.results_folder}/r2_scores.csv'), index=False)
def get_activations(self):
cell = self.data.X_test[0]
cell = cell.reshape(1, cell.shape[0])
activations = kt.get_activations(self.ae, cell)
fig = kt.display_activations(activations, cmap="summer", directory=f'{self.results_folder}', save=True)
| true | true |
1c311714daa3855f26e41a441c7cba090913511a | 2,127 | py | Python | scons-local/SCons/Tool/sgilink.py | bibleuspro/scons | 625d446ae8996ff1b3d660c44e2827fc832cf12b | [
"MIT"
] | 1 | 2017-02-10T00:26:44.000Z | 2017-02-10T00:26:44.000Z | scons-local/SCons/Tool/sgilink.py | bibleuspro/scons | 625d446ae8996ff1b3d660c44e2827fc832cf12b | [
"MIT"
] | null | null | null | scons-local/SCons/Tool/sgilink.py | bibleuspro/scons | 625d446ae8996ff1b3d660c44e2827fc832cf12b | [
"MIT"
] | null | null | null | """SCons.Tool.sgilink
Tool-specific initialization for the SGI MIPSPro linker on SGI.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/sgilink.py 2014/07/05 09:42:21 garyo"
import SCons.Util
import link
linkers = ['CC', 'cc']
def generate(env):
"""Add Builders and construction variables for MIPSPro to an Environment."""
link.generate(env)
env['LINK'] = env.Detect(linkers) or 'cc'
env['SHLINKFLAGS'] = SCons.Util.CLVar('$LINKFLAGS -shared')
# __RPATH is set to $_RPATH in the platform specification if that
# platform supports it.
env['RPATHPREFIX'] = '-rpath '
env['RPATHSUFFIX'] = ''
env['_RPATH'] = '${_concat(RPATHPREFIX, RPATH, RPATHSUFFIX, __env__)}'
def exists(env):
return env.Detect(linkers)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 33.761905 | 80 | 0.739069 |
__revision__ = "src/engine/SCons/Tool/sgilink.py 2014/07/05 09:42:21 garyo"
import SCons.Util
import link
linkers = ['CC', 'cc']
def generate(env):
link.generate(env)
env['LINK'] = env.Detect(linkers) or 'cc'
env['SHLINKFLAGS'] = SCons.Util.CLVar('$LINKFLAGS -shared')
env['RPATHPREFIX'] = '-rpath '
env['RPATHSUFFIX'] = ''
env['_RPATH'] = '${_concat(RPATHPREFIX, RPATH, RPATHSUFFIX, __env__)}'
def exists(env):
return env.Detect(linkers)
| true | true |
1c31177090badb6048b814d1896ef8b6e2323ee1 | 264 | py | Python | tests/artificial/transf_BoxCox/trend_MovingMedian/cycle_12/ar_/test_artificial_128_BoxCox_MovingMedian_12__0.py | shaido987/pyaf | b9afd089557bed6b90b246d3712c481ae26a1957 | [
"BSD-3-Clause"
] | 377 | 2016-10-13T20:52:44.000Z | 2022-03-29T18:04:14.000Z | tests/artificial/transf_BoxCox/trend_MovingMedian/cycle_12/ar_/test_artificial_128_BoxCox_MovingMedian_12__0.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 160 | 2016-10-13T16:11:53.000Z | 2022-03-28T04:21:34.000Z | tests/artificial/transf_BoxCox/trend_MovingMedian/cycle_12/ar_/test_artificial_128_BoxCox_MovingMedian_12__0.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 63 | 2017-03-09T14:51:18.000Z | 2022-03-27T20:52:57.000Z | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "MovingMedian", cycle_length = 12, transform = "BoxCox", sigma = 0.0, exog_count = 0, ar_order = 0); | 37.714286 | 164 | 0.731061 | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "MovingMedian", cycle_length = 12, transform = "BoxCox", sigma = 0.0, exog_count = 0, ar_order = 0); | true | true |
1c31198db568de2df98c2405e815b17b0c759b7f | 1,784 | py | Python | gargantua/apis/query_makers.py | Laisky/laisky-blog | ebe7dadf8fce283ebab0539926ad1be1246e5156 | [
"Apache-2.0"
] | 18 | 2015-05-08T02:06:39.000Z | 2022-03-05T21:36:48.000Z | gargantua/apis/query_makers.py | Laisky/laisky-blog | ebe7dadf8fce283ebab0539926ad1be1246e5156 | [
"Apache-2.0"
] | 131 | 2015-01-22T14:54:59.000Z | 2022-02-16T15:14:10.000Z | gargantua/apis/query_makers.py | Laisky/laisky-blog | ebe7dadf8fce283ebab0539926ad1be1246e5156 | [
"Apache-2.0"
] | 3 | 2016-01-11T13:52:41.000Z | 2019-06-12T08:54:15.000Z | """Constructors to make query to db"""
from abc import ABC, abstractclassmethod
import pymongo
from bson import ObjectId
import tornado
from gargantua.utils import logger, is_objectid, debug_wrapper
class QueryMakerError(Exception):
pass
class BaseMaker(ABC):
@abstractclassmethod
async def update_query(cls, app, query, projection):
return query, projection
class PostCategoiesFilterMaker(BaseMaker):
"""Posts' Categories filter"""
@staticmethod
def get_default_posts_projection():
return {
'post_author': 1,
'post_content': 1,
'link': 1,
'post_id': 1,
'post_name': 1,
'post_status': 1,
'post_title': 1,
'post_type': 1,
'post_menu': 1,
'post_modified_gmt': 1,
'post_created_at': 1,
'post_tags': 1,
}
@classmethod
async def update_query(cls, app, query, projection):
try:
category = app.get_argument('category', default=None, strip=True)
if category and category != 'null':
if is_objectid(category):
category = ObjectId(category)
else:
docu = await app.db.categories.find_one({'name': category})
if docu:
category = docu['_id']
except Exception as err:
raise QueryMakerError(err)
if 'category' not in query and category:
logger.debug('CategoryFilterMaker.update_query for category %s', category)
if category == 'null':
query['category'] = {'$exists': False}
else:
query['category'] = category
return query, projection
| 27.446154 | 86 | 0.56222 | from abc import ABC, abstractclassmethod
import pymongo
from bson import ObjectId
import tornado
from gargantua.utils import logger, is_objectid, debug_wrapper
class QueryMakerError(Exception):
pass
class BaseMaker(ABC):
@abstractclassmethod
async def update_query(cls, app, query, projection):
return query, projection
class PostCategoiesFilterMaker(BaseMaker):
@staticmethod
def get_default_posts_projection():
return {
'post_author': 1,
'post_content': 1,
'link': 1,
'post_id': 1,
'post_name': 1,
'post_status': 1,
'post_title': 1,
'post_type': 1,
'post_menu': 1,
'post_modified_gmt': 1,
'post_created_at': 1,
'post_tags': 1,
}
@classmethod
async def update_query(cls, app, query, projection):
try:
category = app.get_argument('category', default=None, strip=True)
if category and category != 'null':
if is_objectid(category):
category = ObjectId(category)
else:
docu = await app.db.categories.find_one({'name': category})
if docu:
category = docu['_id']
except Exception as err:
raise QueryMakerError(err)
if 'category' not in query and category:
logger.debug('CategoryFilterMaker.update_query for category %s', category)
if category == 'null':
query['category'] = {'$exists': False}
else:
query['category'] = category
return query, projection
| true | true |
1c311a9d72424ccf04f8719f6f13a013322ccf33 | 438 | py | Python | products/migrations/0006_album_special_offer_price.py | JuanBrachoDev/Vinyl | b988c93f9919371aa151869fef2eed2f7c705b44 | [
"Net-SNMP",
"Xnet"
] | null | null | null | products/migrations/0006_album_special_offer_price.py | JuanBrachoDev/Vinyl | b988c93f9919371aa151869fef2eed2f7c705b44 | [
"Net-SNMP",
"Xnet"
] | null | null | null | products/migrations/0006_album_special_offer_price.py | JuanBrachoDev/Vinyl | b988c93f9919371aa151869fef2eed2f7c705b44 | [
"Net-SNMP",
"Xnet"
] | 1 | 2021-10-20T21:13:26.000Z | 2021-10-20T21:13:26.000Z | # Generated by Django 3.2 on 2021-10-05 18:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0005_auto_20211005_1735'),
]
operations = [
migrations.AddField(
model_name='album',
name='special_offer_price',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=6, null=True),
),
]
| 23.052632 | 93 | 0.627854 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0005_auto_20211005_1735'),
]
operations = [
migrations.AddField(
model_name='album',
name='special_offer_price',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=6, null=True),
),
]
| true | true |
1c311a9ef7a57e39e9cdd6e1cdbd444534088472 | 2,216 | py | Python | hata/discord/guild/flags.py | monoidic/hata | 869fdd116360745cd554799d5df3b3a810f156b2 | [
"0BSD"
] | 1 | 2022-03-02T03:59:57.000Z | 2022-03-02T03:59:57.000Z | hata/discord/guild/flags.py | m0nk3ybraindead/hata | f87ed3d7009eeae31d6ea158772efd33775c7b1c | [
"0BSD"
] | 1 | 2022-02-08T16:54:39.000Z | 2022-02-08T16:54:39.000Z | hata/discord/guild/flags.py | WizzyBots/hata | f6991afc0bebf7dad932888a536f4d010f8663c7 | [
"0BSD"
] | 1 | 2020-09-17T20:10:15.000Z | 2020-09-17T20:10:15.000Z | __all__ = ('SystemChannelFlag',)
from ..bases import ReverseFlagBase
class SystemChannelFlag(ReverseFlagBase):
"""
The flags of a ``Guild``'s system channel.
For Discord these flags tell, what ``MessageType`-s are not sent to the guild's system channel, but the wrapper
reverses this behaviour.
The implemented system channel flags are the following:
+---------------------------+-------------------+
| Respective name | Bitwise position |
+===========================+===================+
| welcome | 0 |
+---------------------------+-------------------+
| boost | 1 |
+---------------------------+-------------------+
| setup_tips | 2 |
+---------------------------+-------------------+
| join_sticker_replies | 3 |
+---------------------------+-------------------+
There are also predefined ``SystemChannelFlag``-s:
+-----------------------+-----------------------+
| Class attribute name | value |
+=======================+=======================+
| NONE | ActivityFlag(0b1111) |
+-----------------------+-----------------------+
| ALL | ActivityFlag(0b0000) |
+-----------------------+-----------------------+
"""
__keys__ = {
'welcome': 0,
'boost': 1,
'setup_tips': 2,
'join_sticker_replies': 3,
}
@property
def none(self):
"""
Whether the flag not allows any system messages at the respective system channel.
Returns
-------
none : `bool`
"""
return (self == self.NONE)
@property
def all(self):
"""
Whether the flag allows all the system messages at the respective system channel.
Returns
-------
none : `bool`
"""
return (self == self.ALL)
NONE = NotImplemented
ALL = NotImplemented
SystemChannelFlag.NONE = SystemChannelFlag(0b1111)
SystemChannelFlag.ALL = SystemChannelFlag(0b0000)
| 31.657143 | 115 | 0.393051 | __all__ = ('SystemChannelFlag',)
from ..bases import ReverseFlagBase
class SystemChannelFlag(ReverseFlagBase):
__keys__ = {
'welcome': 0,
'boost': 1,
'setup_tips': 2,
'join_sticker_replies': 3,
}
@property
def none(self):
return (self == self.NONE)
@property
def all(self):
return (self == self.ALL)
NONE = NotImplemented
ALL = NotImplemented
SystemChannelFlag.NONE = SystemChannelFlag(0b1111)
SystemChannelFlag.ALL = SystemChannelFlag(0b0000)
| true | true |
1c311b580482ffa462e622f3c9d59ce48f1417ba | 21 | py | Python | song/__init__.py | louisgv/song-cli | 10186b26f66c2f07e3cf1a3cd7b5212610c33afb | [
"MIT"
] | 70 | 2017-05-17T15:11:27.000Z | 2021-01-10T01:09:06.000Z | song/__init__.py | louisgv/song-cli | 10186b26f66c2f07e3cf1a3cd7b5212610c33afb | [
"MIT"
] | 9 | 2017-05-12T17:29:46.000Z | 2018-03-16T19:21:50.000Z | song/__init__.py | louisgv/song-cli | 10186b26f66c2f07e3cf1a3cd7b5212610c33afb | [
"MIT"
] | 17 | 2017-05-28T20:27:35.000Z | 2021-07-12T03:41:25.000Z | __version__ = '2.9.1' | 21 | 21 | 0.666667 | __version__ = '2.9.1' | true | true |
1c311bee07b9b229e65fea6e6239bd3237c48315 | 341 | py | Python | app/api/api_v1/api.py | wlsouza/fastapi-todolist | c7c75bd73754dde8687e1486a80c77d903e33b31 | [
"MIT"
] | 4 | 2021-09-09T00:20:21.000Z | 2022-01-12T09:08:07.000Z | app/api/api_v1/api.py | wlsouza/fastapi-todolist | c7c75bd73754dde8687e1486a80c77d903e33b31 | [
"MIT"
] | null | null | null | app/api/api_v1/api.py | wlsouza/fastapi-todolist | c7c75bd73754dde8687e1486a80c77d903e33b31 | [
"MIT"
] | null | null | null | from fastapi import APIRouter
from app.api.api_v1.endpoints import login, users
api_v1_router = APIRouter()
api_v1_router.include_router(login.router, prefix="/login", tags=["login"])
api_v1_router.include_router(users.router, prefix="/users", tags=["users"])
# api_v1_router.include_router(tasks.router, prefix="/tasks", tags=["tasks"])
| 34.1 | 77 | 0.771261 | from fastapi import APIRouter
from app.api.api_v1.endpoints import login, users
api_v1_router = APIRouter()
api_v1_router.include_router(login.router, prefix="/login", tags=["login"])
api_v1_router.include_router(users.router, prefix="/users", tags=["users"])
| true | true |
1c311c26c934dbfc350461f0fa8b24cde322d362 | 9,058 | py | Python | src/spaczz/search/tokensearcher.py | JonasHablitzel/spaczz | 9f79fe9d35a25787f926b4c4955c2650f4600073 | [
"MIT"
] | 153 | 2020-07-07T01:26:25.000Z | 2022-03-31T23:47:00.000Z | src/spaczz/search/tokensearcher.py | JonasHablitzel/spaczz | 9f79fe9d35a25787f926b4c4955c2650f4600073 | [
"MIT"
] | 38 | 2020-07-15T02:29:34.000Z | 2021-08-15T21:32:54.000Z | src/spaczz/search/tokensearcher.py | JonasHablitzel/spaczz | 9f79fe9d35a25787f926b4c4955c2650f4600073 | [
"MIT"
] | 20 | 2020-07-07T15:41:05.000Z | 2022-02-21T19:28:22.000Z | """Module for TokenSearcher: flexible token searching in spaCy `Doc` objects."""
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
import regex
from spacy.tokens import Doc, Token
from spacy.vocab import Vocab
from ..process import FuzzyFuncs
from ..util import n_wise
class TokenSearcher:
"""Class for flexbile token searching in spaCy `Doc` objects.
Uses individual (and extended) spaCy token matching patterns to find
match candidates. Candidates are used to generate new patterns to add
to a spaCy `Matcher`.
"FUZZY" and "FREGEX" are the two additional spaCy token pattern options.
For example:
{"TEXT": {"FREGEX": "(database){e<=1}"}},
{"LOWER": {"FUZZY": "access", "MIN_R": 85, "FUZZY_FUNC": "quick_lev"}}
Make sure to use uppercase dictionary keys in patterns.
Attributes:
vocab (Vocab): The shared vocabulary.
Included for consistency and potential future-state.
_fuzzy_funcs (FuzzyFuncs):
Container class housing fuzzy matching functions.
Functions are accessible via the classes `get()` method
by their given key name. The following rapidfuzz matching
functions with default settings are available:
"simple" = `ratio`
"quick" = `QRatio`
"quick_lev" = `quick_lev_ratio`
"""
def __init__(self: TokenSearcher, vocab: Vocab) -> None:
"""Initializes a token searcher.
Args:
vocab: A spaCy `Vocab` object.
Purely for consistency between spaCy
and spaczz matcher APIs for now.
spaczz matchers are mostly pure-Python
currently and do not share vocabulary
with spaCy pipelines.
"""
self.vocab = vocab
self._fuzzy_funcs: FuzzyFuncs = FuzzyFuncs(match_type="token")
def fuzzy_compare(
self: TokenSearcher,
a: str,
b: str,
ignore_case: bool = True,
fuzzy_func: str = "simple",
) -> int:
"""Peforms fuzzy matching between two strings.
Applies the given fuzzy matching algorithm (fuzzy_func)
to two strings and returns the resulting fuzzy ratio.
Args:
a: First string for comparison.
b: Second string for comparison.
ignore_case: Whether to lower-case a and b
before comparison or not. Default is `True`.
fuzzy_func: Key name of fuzzy matching function to use.
The following rapidfuzz matching functions with default
settings are available:
"simple" = `ratio`
"quick" = `QRatio`
Default is `"simple"`.
Returns:
The fuzzy ratio between a and b.
Example:
>>> import spacy
>>> from spaczz.search import TokenSearcher
>>> nlp = spacy.blank("en")
>>> searcher = TokenSearcher(nlp.vocab)
>>> searcher.fuzzy_compare("spaczz", "spacy")
73
"""
if ignore_case:
a = a.lower()
b = b.lower()
return round(self._fuzzy_funcs.get(fuzzy_func)(a, b))
def match(
self: TokenSearcher,
doc: Doc,
pattern: List[Dict[str, Any]],
min_r: int = 75,
fuzzy_func: str = "simple",
) -> List[List[Optional[Tuple[str, str]]]]:
"""Finds potential token pattern matches in a `Doc` object.
Make sure to use uppercase dictionary keys in patterns.
Args:
doc: `Doc` object to search over.
pattern: Individual spaCy token pattern.
min_r: Minimum match ratio required for fuzzy matching.
Can be overwritten with token pattern options.
Default is `75`.
fuzzy_func: Fuzzy matching function to use.
Can be overwritten with token pattern options.
Default is `simple`.
Returns:
A list of lists with each inner list representing a potential match.
The inner lists will be populated with key, value tuples of token matches
and `None` for placeholder tokens representing non-fuzzy tokens.
Raises:
TypeError: doc must be a `Doc` object.
TypeError: pattern must be a `Sequence`.
ValueError: pattern cannot have zero tokens.
Example:
>>> import spacy
>>> from spaczz.search import TokenSearcher
>>> nlp = spacy.blank("en")
>>> searcher = TokenSearcher(nlp)
>>> doc = nlp("I was prescribed zithramax and advar")
>>> pattern = [
{"TEXT": {"FUZZY": "zithromax"}},
{"POS": "CCONJ"},
{"TEXT": {"FREGEX": "(advair){e<=1}"}}
]
>>> searcher.match(doc, pattern)
[[('TEXT', 'zithramax'), None, ('TEXT', 'advar')]]
"""
if not isinstance(doc, Doc):
raise TypeError("doc must be a Doc object.")
if not isinstance(pattern, list):
raise TypeError(
"pattern must be a list",
"Make sure pattern is wrapped in a list.",
)
if len(pattern) == 0:
raise ValueError("pattern cannot have zero tokens.")
matches = []
for seq in n_wise(doc, len(pattern)):
seq_matches = self._iter_pattern(seq, pattern, min_r, fuzzy_func)
if seq_matches:
matches.append(seq_matches)
if matches:
filtered_matches = [
i for n, i in enumerate(matches) if i not in matches[:n]
]
return filtered_matches
else:
return matches
@staticmethod
def regex_compare(text: str, pattern: str, ignore_case: bool = False) -> bool:
"""Performs fuzzy-regex supporting regex matching between two strings.
Args:
text: The string to match against.
pattern: The regex pattern string.
ignore_case: Whether to lower-case text
before comparison or not. Default is `False`.
Returns:
`True` if match, `False` if not.
Example:
>>> import spacy
>>> from spaczz.search import TokenSearcher
>>> nlp = spacy.blank("en")
>>> searcher = TokenSearcher(nlp)
>>> searcher.regex_compare("sequel", "(sql){i<=3}")
True
"""
if ignore_case:
text = text.lower()
if regex.match(pattern, text):
return True
else:
return False
def _iter_pattern(
self: TokenSearcher,
seq: Tuple[Token, ...],
pattern: List[Dict[str, Any]],
min_r: int,
fuzzy_func: str,
) -> List[Optional[Tuple[str, str]]]:
"""Evaluates each token in a pattern against a doc token sequence."""
seq_matches: List[Optional[Tuple[str, str]]] = []
for i, token in enumerate(pattern):
pattern_dict, case, case_bool = self._parse_case(token)
if isinstance(pattern_dict, dict):
pattern_text, pattern_type = self._parse_type(pattern_dict)
if pattern_text and pattern_type == "FUZZY":
if (
self.fuzzy_compare(
seq[i].text,
pattern_text,
case_bool,
pattern_dict.get("FUZZY_FUNC", fuzzy_func),
)
>= pattern_dict.get("MIN_R", min_r)
):
seq_matches.append((case, seq[i].text))
else:
return []
elif pattern_text and pattern_type == "FREGEX":
if self.regex_compare(seq[i].text, pattern_text, case_bool):
seq_matches.append((case, seq[i].text))
else:
return []
else:
seq_matches.append(None)
else:
seq_matches.append(None)
return seq_matches
@staticmethod
def _parse_case(token: Dict[str, Any]) -> Tuple[Union[str, Dict, None], str, bool]:
"""Parses the case of a token pattern."""
if token.get("TEXT"):
return token.get("TEXT"), "TEXT", False
else:
return token.get("LOWER"), "LOWER", True
@staticmethod
def _parse_type(pattern_dict: Dict[str, Any]) -> Tuple[str, str]:
"""Parses the type of a token pattern."""
fuzzy_text = pattern_dict.get("FUZZY")
regex_text = pattern_dict.get("FREGEX")
if isinstance(fuzzy_text, str):
return fuzzy_text, "FUZZY"
elif isinstance(regex_text, str):
return regex_text, "FREGEX"
else:
return "", ""
| 36.672065 | 87 | 0.548465 | from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
import regex
from spacy.tokens import Doc, Token
from spacy.vocab import Vocab
from ..process import FuzzyFuncs
from ..util import n_wise
class TokenSearcher:
def __init__(self: TokenSearcher, vocab: Vocab) -> None:
self.vocab = vocab
self._fuzzy_funcs: FuzzyFuncs = FuzzyFuncs(match_type="token")
def fuzzy_compare(
self: TokenSearcher,
a: str,
b: str,
ignore_case: bool = True,
fuzzy_func: str = "simple",
) -> int:
if ignore_case:
a = a.lower()
b = b.lower()
return round(self._fuzzy_funcs.get(fuzzy_func)(a, b))
def match(
self: TokenSearcher,
doc: Doc,
pattern: List[Dict[str, Any]],
min_r: int = 75,
fuzzy_func: str = "simple",
) -> List[List[Optional[Tuple[str, str]]]]:
if not isinstance(doc, Doc):
raise TypeError("doc must be a Doc object.")
if not isinstance(pattern, list):
raise TypeError(
"pattern must be a list",
"Make sure pattern is wrapped in a list.",
)
if len(pattern) == 0:
raise ValueError("pattern cannot have zero tokens.")
matches = []
for seq in n_wise(doc, len(pattern)):
seq_matches = self._iter_pattern(seq, pattern, min_r, fuzzy_func)
if seq_matches:
matches.append(seq_matches)
if matches:
filtered_matches = [
i for n, i in enumerate(matches) if i not in matches[:n]
]
return filtered_matches
else:
return matches
@staticmethod
def regex_compare(text: str, pattern: str, ignore_case: bool = False) -> bool:
if ignore_case:
text = text.lower()
if regex.match(pattern, text):
return True
else:
return False
def _iter_pattern(
self: TokenSearcher,
seq: Tuple[Token, ...],
pattern: List[Dict[str, Any]],
min_r: int,
fuzzy_func: str,
) -> List[Optional[Tuple[str, str]]]:
seq_matches: List[Optional[Tuple[str, str]]] = []
for i, token in enumerate(pattern):
pattern_dict, case, case_bool = self._parse_case(token)
if isinstance(pattern_dict, dict):
pattern_text, pattern_type = self._parse_type(pattern_dict)
if pattern_text and pattern_type == "FUZZY":
if (
self.fuzzy_compare(
seq[i].text,
pattern_text,
case_bool,
pattern_dict.get("FUZZY_FUNC", fuzzy_func),
)
>= pattern_dict.get("MIN_R", min_r)
):
seq_matches.append((case, seq[i].text))
else:
return []
elif pattern_text and pattern_type == "FREGEX":
if self.regex_compare(seq[i].text, pattern_text, case_bool):
seq_matches.append((case, seq[i].text))
else:
return []
else:
seq_matches.append(None)
else:
seq_matches.append(None)
return seq_matches
@staticmethod
def _parse_case(token: Dict[str, Any]) -> Tuple[Union[str, Dict, None], str, bool]:
if token.get("TEXT"):
return token.get("TEXT"), "TEXT", False
else:
return token.get("LOWER"), "LOWER", True
@staticmethod
def _parse_type(pattern_dict: Dict[str, Any]) -> Tuple[str, str]:
fuzzy_text = pattern_dict.get("FUZZY")
regex_text = pattern_dict.get("FREGEX")
if isinstance(fuzzy_text, str):
return fuzzy_text, "FUZZY"
elif isinstance(regex_text, str):
return regex_text, "FREGEX"
else:
return "", ""
| true | true |
1c311c684a007f61d10d19b067c8f928b015dca9 | 1,480 | py | Python | doctr/models/detection/predictor/pytorch.py | elejke/doctr | 1d62d3f2b1e9a60560af0685fe882a69826503a7 | [
"Apache-2.0"
] | null | null | null | doctr/models/detection/predictor/pytorch.py | elejke/doctr | 1d62d3f2b1e9a60560af0685fe882a69826503a7 | [
"Apache-2.0"
] | null | null | null | doctr/models/detection/predictor/pytorch.py | elejke/doctr | 1d62d3f2b1e9a60560af0685fe882a69826503a7 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2021, Mindee.
# This program is licensed under the Apache License version 2.
# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.
from typing import Any, List, Union
import numpy as np
import torch
from torch import nn
from doctr.models.preprocessor import PreProcessor
__all__ = ['DetectionPredictor']
class DetectionPredictor(nn.Module):
"""Implements an object able to localize text elements in a document
Args:
pre_processor: transform inputs for easier batched model inference
model: core detection architecture
"""
def __init__(
self,
pre_processor: PreProcessor,
model: nn.Module,
) -> None:
super().__init__()
self.pre_processor = pre_processor
self.model = model.eval()
@torch.no_grad()
def forward(
self,
pages: List[Union[np.ndarray, torch.Tensor]],
**kwargs: Any,
) -> List[np.ndarray]:
# Dimension check
if any(page.ndim != 3 for page in pages):
raise ValueError("incorrect input shape: all pages are expected to be multi-channel 2D images.")
processed_batches = self.pre_processor(pages)
predicted_batches = [
self.model(batch, return_boxes=True, **kwargs)['preds'] # type:ignore[operator]
for batch in processed_batches
]
return [pred for batch in predicted_batches for pred in batch]
| 28.461538 | 108 | 0.658784 |
from typing import Any, List, Union
import numpy as np
import torch
from torch import nn
from doctr.models.preprocessor import PreProcessor
__all__ = ['DetectionPredictor']
class DetectionPredictor(nn.Module):
def __init__(
self,
pre_processor: PreProcessor,
model: nn.Module,
) -> None:
super().__init__()
self.pre_processor = pre_processor
self.model = model.eval()
@torch.no_grad()
def forward(
self,
pages: List[Union[np.ndarray, torch.Tensor]],
**kwargs: Any,
) -> List[np.ndarray]:
if any(page.ndim != 3 for page in pages):
raise ValueError("incorrect input shape: all pages are expected to be multi-channel 2D images.")
processed_batches = self.pre_processor(pages)
predicted_batches = [
self.model(batch, return_boxes=True, **kwargs)['preds']
for batch in processed_batches
]
return [pred for batch in predicted_batches for pred in batch]
| true | true |
1c311c9c35666325e1ee647c83484b12a76c0664 | 37,270 | py | Python | sympy/simplify/tests/test_hyperexpand.py | ovolve/sympy | 0a15782f20505673466b940454b33b8014a25c13 | [
"BSD-3-Clause"
] | 1 | 2016-02-22T22:46:50.000Z | 2016-02-22T22:46:50.000Z | sympy/simplify/tests/test_hyperexpand.py | ovolve/sympy | 0a15782f20505673466b940454b33b8014a25c13 | [
"BSD-3-Clause"
] | 7 | 2017-05-01T14:15:32.000Z | 2017-09-06T20:44:24.000Z | sympy/simplify/tests/test_hyperexpand.py | ovolve/sympy | 0a15782f20505673466b940454b33b8014a25c13 | [
"BSD-3-Clause"
] | 1 | 2020-09-09T15:20:27.000Z | 2020-09-09T15:20:27.000Z | from random import randrange
from sympy.simplify.hyperexpand import (ShiftA, ShiftB, UnShiftA, UnShiftB,
MeijerShiftA, MeijerShiftB, MeijerShiftC, MeijerShiftD,
MeijerUnShiftA, MeijerUnShiftB, MeijerUnShiftC,
MeijerUnShiftD,
ReduceOrder, reduce_order, apply_operators,
devise_plan, make_derivative_operator, Formula,
hyperexpand, Hyper_Function, G_Function,
reduce_order_meijer,
build_hypergeometric_formula)
from sympy import hyper, I, S, meijerg, Piecewise
from sympy.abc import z, a, b, c
from sympy.utilities.pytest import XFAIL, raises, slow
from sympy.utilities.randtest import verify_numerically as tn
from sympy.core.compatibility import range
from sympy import (cos, sin, log, exp, asin, lowergamma, atanh, besseli,
gamma, sqrt, pi, erf, exp_polar)
def test_branch_bug():
assert hyperexpand(hyper((-S(1)/3, S(1)/2), (S(2)/3, S(3)/2), -z)) == \
-z**S('1/3')*lowergamma(exp_polar(I*pi)/3, z)/5 \
+ sqrt(pi)*erf(sqrt(z))/(5*sqrt(z))
assert hyperexpand(meijerg([S(7)/6, 1], [], [S(2)/3], [S(1)/6, 0], z)) == \
2*z**S('2/3')*(2*sqrt(pi)*erf(sqrt(z))/sqrt(z) - 2*lowergamma(
S(2)/3, z)/z**S('2/3'))*gamma(S(2)/3)/gamma(S(5)/3)
def test_hyperexpand():
# Luke, Y. L. (1969), The Special Functions and Their Approximations,
# Volume 1, section 6.2
assert hyperexpand(hyper([], [], z)) == exp(z)
assert hyperexpand(hyper([1, 1], [2], -z)*z) == log(1 + z)
assert hyperexpand(hyper([], [S.Half], -z**2/4)) == cos(z)
assert hyperexpand(z*hyper([], [S('3/2')], -z**2/4)) == sin(z)
assert hyperexpand(hyper([S('1/2'), S('1/2')], [S('3/2')], z**2)*z) \
== asin(z)
def can_do(ap, bq, numerical=True, div=1, lowerplane=False):
from sympy import exp_polar, exp
r = hyperexpand(hyper(ap, bq, z))
if r.has(hyper):
return False
if not numerical:
return True
repl = {}
for n, a in enumerate(r.free_symbols - set([z])):
repl[a] = randcplx(n)/div
[a, b, c, d] = [2, -1, 3, 1]
if lowerplane:
[a, b, c, d] = [2, -2, 3, -1]
return tn(
hyper(ap, bq, z).subs(repl),
r.replace(exp_polar, exp).subs(repl),
z, a=a, b=b, c=c, d=d)
def test_roach():
# Kelly B. Roach. Meijer G Function Representations.
# Section "Gallery"
assert can_do([S(1)/2], [S(9)/2])
assert can_do([], [1, S(5)/2, 4])
assert can_do([-S.Half, 1, 2], [3, 4])
assert can_do([S(1)/3], [-S(2)/3, -S(1)/2, S(1)/2, 1])
assert can_do([-S(3)/2, -S(1)/2], [-S(5)/2, 1])
assert can_do([-S(3)/2, ], [-S(1)/2, S(1)/2]) # shine-integral
assert can_do([-S(3)/2, -S(1)/2], [2]) # elliptic integrals
@XFAIL
def test_roach_fail():
assert can_do([-S(1)/2, 1], [S(1)/4, S(1)/2, S(3)/4]) # PFDD
assert can_do([S(3)/2], [S(5)/2, 5]) # struve function
assert can_do([-S(1)/2, S(1)/2, 1], [S(3)/2, S(5)/2]) # polylog, pfdd
assert can_do([1, 2, 3], [S(1)/2, 4]) # XXX ?
assert can_do([S(1)/2], [-S(1)/3, -S(1)/2, -S(2)/3]) # PFDD ?
# For the long table tests, see end of file
def test_polynomial():
from sympy import oo
assert hyperexpand(hyper([], [-1], z)) == oo
assert hyperexpand(hyper([-2], [-1], z)) == oo
assert hyperexpand(hyper([0, 0], [-1], z)) == 1
assert can_do([-5, -2, randcplx(), randcplx()], [-10, randcplx()])
def test_hyperexpand_bases():
assert hyperexpand(hyper([2], [a], z)) == \
a + z**(-a + 1)*(-a**2 + 3*a + z*(a - 1) - 2)*exp(z)* \
lowergamma(a - 1, z) - 1
# TODO [a+1, a-S.Half], [2*a]
assert hyperexpand(hyper([1, 2], [3], z)) == -2/z - 2*log(-z + 1)/z**2
assert hyperexpand(hyper([S.Half, 2], [S(3)/2], z)) == \
-1/(2*z - 2) + atanh(sqrt(z))/sqrt(z)/2
assert hyperexpand(hyper([S(1)/2, S(1)/2], [S(5)/2], z)) == \
(-3*z + 3)/4/(z*sqrt(-z + 1)) \
+ (6*z - 3)*asin(sqrt(z))/(4*z**(S(3)/2))
assert hyperexpand(hyper([1, 2], [S(3)/2], z)) == -1/(2*z - 2) \
- asin(sqrt(z))/(sqrt(z)*(2*z - 2)*sqrt(-z + 1))
assert hyperexpand(hyper([-S.Half - 1, 1, 2], [S.Half, 3], z)) == \
sqrt(z)*(6*z/7 - S(6)/5)*atanh(sqrt(z)) \
+ (-30*z**2 + 32*z - 6)/35/z - 6*log(-z + 1)/(35*z**2)
assert hyperexpand(hyper([1 + S.Half, 1, 1], [2, 2], z)) == \
-4*log(sqrt(-z + 1)/2 + S(1)/2)/z
# TODO hyperexpand(hyper([a], [2*a + 1], z))
# TODO [S.Half, a], [S(3)/2, a+1]
assert hyperexpand(hyper([2], [b, 1], z)) == \
z**(-b/2 + S(1)/2)*besseli(b - 1, 2*sqrt(z))*gamma(b) \
+ z**(-b/2 + 1)*besseli(b, 2*sqrt(z))*gamma(b)
# TODO [a], [a - S.Half, 2*a]
def test_hyperexpand_parametric():
assert hyperexpand(hyper([a, S(1)/2 + a], [S(1)/2], z)) \
== (1 + sqrt(z))**(-2*a)/2 + (1 - sqrt(z))**(-2*a)/2
assert hyperexpand(hyper([a, -S(1)/2 + a], [2*a], z)) \
== 2**(2*a - 1)*((-z + 1)**(S(1)/2) + 1)**(-2*a + 1)
def test_shifted_sum():
from sympy import simplify
assert simplify(hyperexpand(z**4*hyper([2], [3, S('3/2')], -z**2))) \
== z*sin(2*z) + (-z**2 + S.Half)*cos(2*z) - S.Half
def _randrat():
""" Steer clear of integers. """
return S(randrange(25) + 10)/50
def randcplx(offset=-1):
""" Polys is not good with real coefficients. """
return _randrat() + I*_randrat() + I*(1 + offset)
@slow
def test_formulae():
from sympy.simplify.hyperexpand import FormulaCollection
formulae = FormulaCollection().formulae
for formula in formulae:
h = formula.func(formula.z)
rep = {}
for n, sym in enumerate(formula.symbols):
rep[sym] = randcplx(n)
# NOTE hyperexpand returns truly branched functions. We know we are
# on the main sheet, but numerical evaluation can still go wrong
# (e.g. if exp_polar cannot be evalf'd).
# Just replace all exp_polar by exp, this usually works.
# first test if the closed-form is actually correct
h = h.subs(rep)
closed_form = formula.closed_form.subs(rep).rewrite('nonrepsmall')
z = formula.z
assert tn(h, closed_form.replace(exp_polar, exp), z)
# now test the computed matrix
cl = (formula.C * formula.B)[0].subs(rep).rewrite('nonrepsmall')
assert tn(closed_form.replace(
exp_polar, exp), cl.replace(exp_polar, exp), z)
deriv1 = z*formula.B.applyfunc(lambda t: t.rewrite(
'nonrepsmall')).diff(z)
deriv2 = formula.M * formula.B
for d1, d2 in zip(deriv1, deriv2):
assert tn(d1.subs(rep).replace(exp_polar, exp),
d2.subs(rep).rewrite('nonrepsmall').replace(exp_polar, exp), z)
def test_meijerg_formulae():
from sympy.simplify.hyperexpand import MeijerFormulaCollection
formulae = MeijerFormulaCollection().formulae
for sig in formulae:
for formula in formulae[sig]:
g = meijerg(formula.func.an, formula.func.ap,
formula.func.bm, formula.func.bq,
formula.z)
rep = {}
for sym in formula.symbols:
rep[sym] = randcplx()
# first test if the closed-form is actually correct
g = g.subs(rep)
closed_form = formula.closed_form.subs(rep)
z = formula.z
assert tn(g, closed_form, z)
# now test the computed matrix
cl = (formula.C * formula.B)[0].subs(rep)
assert tn(closed_form, cl, z)
deriv1 = z*formula.B.diff(z)
deriv2 = formula.M * formula.B
for d1, d2 in zip(deriv1, deriv2):
assert tn(d1.subs(rep), d2.subs(rep), z)
def op(f):
return z*f.diff(z)
def test_plan():
assert devise_plan(Hyper_Function([0], ()),
Hyper_Function([0], ()), z) == []
with raises(ValueError):
devise_plan(Hyper_Function([1], ()), Hyper_Function((), ()), z)
with raises(ValueError):
devise_plan(Hyper_Function([2], [1]), Hyper_Function([2], [2]), z)
with raises(ValueError):
devise_plan(Hyper_Function([2], []), Hyper_Function([S("1/2")], []), z)
# We cannot use pi/(10000 + n) because polys is insanely slow.
a1, a2, b1 = (randcplx(n) for n in range(3))
b1 += 2*I
h = hyper([a1, a2], [b1], z)
h2 = hyper((a1 + 1, a2), [b1], z)
assert tn(apply_operators(h,
devise_plan(Hyper_Function((a1 + 1, a2), [b1]),
Hyper_Function((a1, a2), [b1]), z), op),
h2, z)
h2 = hyper((a1 + 1, a2 - 1), [b1], z)
assert tn(apply_operators(h,
devise_plan(Hyper_Function((a1 + 1, a2 - 1), [b1]),
Hyper_Function((a1, a2), [b1]), z), op),
h2, z)
def test_plan_derivatives():
a1, a2, a3 = 1, 2, S('1/2')
b1, b2 = 3, S('5/2')
h = Hyper_Function((a1, a2, a3), (b1, b2))
h2 = Hyper_Function((a1 + 1, a2 + 1, a3 + 2), (b1 + 1, b2 + 1))
ops = devise_plan(h2, h, z)
f = Formula(h, z, h(z), [])
deriv = make_derivative_operator(f.M, z)
assert tn((apply_operators(f.C, ops, deriv)*f.B)[0], h2(z), z)
h2 = Hyper_Function((a1, a2 - 1, a3 - 2), (b1 - 1, b2 - 1))
ops = devise_plan(h2, h, z)
assert tn((apply_operators(f.C, ops, deriv)*f.B)[0], h2(z), z)
def test_reduction_operators():
a1, a2, b1 = (randcplx(n) for n in range(3))
h = hyper([a1], [b1], z)
assert ReduceOrder(2, 0) is None
assert ReduceOrder(2, -1) is None
assert ReduceOrder(1, S('1/2')) is None
h2 = hyper((a1, a2), (b1, a2), z)
assert tn(ReduceOrder(a2, a2).apply(h, op), h2, z)
h2 = hyper((a1, a2 + 1), (b1, a2), z)
assert tn(ReduceOrder(a2 + 1, a2).apply(h, op), h2, z)
h2 = hyper((a2 + 4, a1), (b1, a2), z)
assert tn(ReduceOrder(a2 + 4, a2).apply(h, op), h2, z)
# test several step order reduction
ap = (a2 + 4, a1, b1 + 1)
bq = (a2, b1, b1)
func, ops = reduce_order(Hyper_Function(ap, bq))
assert func.ap == (a1,)
assert func.bq == (b1,)
assert tn(apply_operators(h, ops, op), hyper(ap, bq, z), z)
def test_shift_operators():
a1, a2, b1, b2, b3 = (randcplx(n) for n in range(5))
h = hyper((a1, a2), (b1, b2, b3), z)
raises(ValueError, lambda: ShiftA(0))
raises(ValueError, lambda: ShiftB(1))
assert tn(ShiftA(a1).apply(h, op), hyper((a1 + 1, a2), (b1, b2, b3), z), z)
assert tn(ShiftA(a2).apply(h, op), hyper((a1, a2 + 1), (b1, b2, b3), z), z)
assert tn(ShiftB(b1).apply(h, op), hyper((a1, a2), (b1 - 1, b2, b3), z), z)
assert tn(ShiftB(b2).apply(h, op), hyper((a1, a2), (b1, b2 - 1, b3), z), z)
assert tn(ShiftB(b3).apply(h, op), hyper((a1, a2), (b1, b2, b3 - 1), z), z)
def test_ushift_operators():
a1, a2, b1, b2, b3 = (randcplx(n) for n in range(5))
h = hyper((a1, a2), (b1, b2, b3), z)
raises(ValueError, lambda: UnShiftA((1,), (), 0, z))
raises(ValueError, lambda: UnShiftB((), (-1,), 0, z))
raises(ValueError, lambda: UnShiftA((1,), (0, -1, 1), 0, z))
raises(ValueError, lambda: UnShiftB((0, 1), (1,), 0, z))
s = UnShiftA((a1, a2), (b1, b2, b3), 0, z)
assert tn(s.apply(h, op), hyper((a1 - 1, a2), (b1, b2, b3), z), z)
s = UnShiftA((a1, a2), (b1, b2, b3), 1, z)
assert tn(s.apply(h, op), hyper((a1, a2 - 1), (b1, b2, b3), z), z)
s = UnShiftB((a1, a2), (b1, b2, b3), 0, z)
assert tn(s.apply(h, op), hyper((a1, a2), (b1 + 1, b2, b3), z), z)
s = UnShiftB((a1, a2), (b1, b2, b3), 1, z)
assert tn(s.apply(h, op), hyper((a1, a2), (b1, b2 + 1, b3), z), z)
s = UnShiftB((a1, a2), (b1, b2, b3), 2, z)
assert tn(s.apply(h, op), hyper((a1, a2), (b1, b2, b3 + 1), z), z)
def can_do_meijer(a1, a2, b1, b2, numeric=True):
"""
This helper function tries to hyperexpand() the meijer g-function
corresponding to the parameters a1, a2, b1, b2.
It returns False if this expansion still contains g-functions.
If numeric is True, it also tests the so-obtained formula numerically
(at random values) and returns False if the test fails.
Else it returns True.
"""
from sympy import unpolarify, expand
r = hyperexpand(meijerg(a1, a2, b1, b2, z))
if r.has(meijerg):
return False
# NOTE hyperexpand() returns a truly branched function, whereas numerical
# evaluation only works on the main branch. Since we are evaluating on
# the main branch, this should not be a problem, but expressions like
# exp_polar(I*pi/2*x)**a are evaluated incorrectly. We thus have to get
# rid of them. The expand heuristically does this...
r = unpolarify(expand(r, force=True, power_base=True, power_exp=False,
mul=False, log=False, multinomial=False, basic=False))
if not numeric:
return True
repl = {}
for n, a in enumerate(meijerg(a1, a2, b1, b2, z).free_symbols - set([z])):
repl[a] = randcplx(n)
return tn(meijerg(a1, a2, b1, b2, z).subs(repl), r.subs(repl), z)
@slow
def test_meijerg_expand():
from sympy import combsimp, simplify
# from mpmath docs
assert hyperexpand(meijerg([[], []], [[0], []], -z)) == exp(z)
assert hyperexpand(meijerg([[1, 1], []], [[1], [0]], z)) == \
log(z + 1)
assert hyperexpand(meijerg([[1, 1], []], [[1], [1]], z)) == \
z/(z + 1)
assert hyperexpand(meijerg([[], []], [[S(1)/2], [0]], (z/2)**2)) \
== sin(z)/sqrt(pi)
assert hyperexpand(meijerg([[], []], [[0], [S(1)/2]], (z/2)**2)) \
== cos(z)/sqrt(pi)
assert can_do_meijer([], [a], [a - 1, a - S.Half], [])
assert can_do_meijer([], [], [a/2], [-a/2], False) # branches...
assert can_do_meijer([a], [b], [a], [b, a - 1])
# wikipedia
assert hyperexpand(meijerg([1], [], [], [0], z)) == \
Piecewise((0, abs(z) < 1), (1, abs(1/z) < 1),
(meijerg([1], [], [], [0], z), True))
assert hyperexpand(meijerg([], [1], [0], [], z)) == \
Piecewise((1, abs(z) < 1), (0, abs(1/z) < 1),
(meijerg([], [1], [0], [], z), True))
# The Special Functions and their Approximations
assert can_do_meijer([], [], [a + b/2], [a, a - b/2, a + S.Half])
assert can_do_meijer(
[], [], [a], [b], False) # branches only agree for small z
assert can_do_meijer([], [S.Half], [a], [-a])
assert can_do_meijer([], [], [a, b], [])
assert can_do_meijer([], [], [a, b], [])
assert can_do_meijer([], [], [a, a + S.Half], [b, b + S.Half])
assert can_do_meijer([], [], [a, -a], [0, S.Half], False) # dito
assert can_do_meijer([], [], [a, a + S.Half, b, b + S.Half], [])
assert can_do_meijer([S.Half], [], [0], [a, -a])
assert can_do_meijer([S.Half], [], [a], [0, -a], False) # dito
assert can_do_meijer([], [a - S.Half], [a, b], [a - S.Half], False)
assert can_do_meijer([], [a + S.Half], [a + b, a - b, a], [], False)
assert can_do_meijer([a + S.Half], [], [b, 2*a - b, a], [], False)
# This for example is actually zero.
assert can_do_meijer([], [], [], [a, b])
# Testing a bug:
assert hyperexpand(meijerg([0, 2], [], [], [-1, 1], z)) == \
Piecewise((0, abs(z) < 1),
(z/2 - 1/(2*z), abs(1/z) < 1),
(meijerg([0, 2], [], [], [-1, 1], z), True))
# Test that the simplest possible answer is returned:
assert combsimp(simplify(hyperexpand(
meijerg([1], [1 - a], [-a/2, -a/2 + S(1)/2], [], 1/z)))) == \
-2*sqrt(pi)*(sqrt(z + 1) + 1)**a/a
# Test that hyper is returned
assert hyperexpand(meijerg([1], [], [a], [0, 0], z)) == hyper(
(a,), (a + 1, a + 1), z*exp_polar(I*pi))*z**a*gamma(a)/gamma(a + 1)**2
def test_meijerg_lookup():
from sympy import uppergamma, Si, Ci
assert hyperexpand(meijerg([a], [], [b, a], [], z)) == \
z**b*exp(z)*gamma(-a + b + 1)*uppergamma(a - b, z)
assert hyperexpand(meijerg([0], [], [0, 0], [], z)) == \
exp(z)*uppergamma(0, z)
assert can_do_meijer([a], [], [b, a + 1], [])
assert can_do_meijer([a], [], [b + 2, a], [])
assert can_do_meijer([a], [], [b - 2, a], [])
assert hyperexpand(meijerg([a], [], [a, a, a - S(1)/2], [], z)) == \
-sqrt(pi)*z**(a - S(1)/2)*(2*cos(2*sqrt(z))*(Si(2*sqrt(z)) - pi/2)
- 2*sin(2*sqrt(z))*Ci(2*sqrt(z))) == \
hyperexpand(meijerg([a], [], [a, a - S(1)/2, a], [], z)) == \
hyperexpand(meijerg([a], [], [a - S(1)/2, a, a], [], z))
assert can_do_meijer([a - 1], [], [a + 2, a - S(3)/2, a + 1], [])
@XFAIL
def test_meijerg_expand_fail():
# These basically test hyper([], [1/2 - a, 1/2 + 1, 1/2], z),
# which is *very* messy. But since the meijer g actually yields a
# sum of bessel functions, things can sometimes be simplified a lot and
# are then put into tables...
assert can_do_meijer([], [], [a + S.Half], [a, a - b/2, a + b/2])
assert can_do_meijer([], [], [0, S.Half], [a, -a])
assert can_do_meijer([], [], [3*a - S.Half, a, -a - S.Half], [a - S.Half])
assert can_do_meijer([], [], [0, a - S.Half, -a - S.Half], [S.Half])
assert can_do_meijer([], [], [a, b + S(1)/2, b], [2*b - a])
assert can_do_meijer([], [], [a, b + S(1)/2, b, 2*b - a])
assert can_do_meijer([S.Half], [], [-a, a], [0])
@slow
def test_meijerg():
# carefully set up the parameters.
# NOTE: this used to fail sometimes. I believe it is fixed, but if you
# hit an inexplicable test failure here, please let me know the seed.
a1, a2 = (randcplx(n) - 5*I - n*I for n in range(2))
b1, b2 = (randcplx(n) + 5*I + n*I for n in range(2))
b3, b4, b5, a3, a4, a5 = (randcplx() for n in range(6))
g = meijerg([a1], [a3, a4], [b1], [b3, b4], z)
assert ReduceOrder.meijer_minus(3, 4) is None
assert ReduceOrder.meijer_plus(4, 3) is None
g2 = meijerg([a1, a2], [a3, a4], [b1], [b3, b4, a2], z)
assert tn(ReduceOrder.meijer_plus(a2, a2).apply(g, op), g2, z)
g2 = meijerg([a1, a2], [a3, a4], [b1], [b3, b4, a2 + 1], z)
assert tn(ReduceOrder.meijer_plus(a2, a2 + 1).apply(g, op), g2, z)
g2 = meijerg([a1, a2 - 1], [a3, a4], [b1], [b3, b4, a2 + 2], z)
assert tn(ReduceOrder.meijer_plus(a2 - 1, a2 + 2).apply(g, op), g2, z)
g2 = meijerg([a1], [a3, a4, b2 - 1], [b1, b2 + 2], [b3, b4], z)
assert tn(ReduceOrder.meijer_minus(
b2 + 2, b2 - 1).apply(g, op), g2, z, tol=1e-6)
# test several-step reduction
an = [a1, a2]
bq = [b3, b4, a2 + 1]
ap = [a3, a4, b2 - 1]
bm = [b1, b2 + 1]
niq, ops = reduce_order_meijer(G_Function(an, ap, bm, bq))
assert niq.an == (a1,)
assert set(niq.ap) == set([a3, a4])
assert niq.bm == (b1,)
assert set(niq.bq) == set([b3, b4])
assert tn(apply_operators(g, ops, op), meijerg(an, ap, bm, bq, z), z)
def test_meijerg_shift_operators():
# carefully set up the parameters. XXX this still fails sometimes
a1, a2, a3, a4, a5, b1, b2, b3, b4, b5 = (randcplx(n) for n in range(10))
g = meijerg([a1], [a3, a4], [b1], [b3, b4], z)
assert tn(MeijerShiftA(b1).apply(g, op),
meijerg([a1], [a3, a4], [b1 + 1], [b3, b4], z), z)
assert tn(MeijerShiftB(a1).apply(g, op),
meijerg([a1 - 1], [a3, a4], [b1], [b3, b4], z), z)
assert tn(MeijerShiftC(b3).apply(g, op),
meijerg([a1], [a3, a4], [b1], [b3 + 1, b4], z), z)
assert tn(MeijerShiftD(a3).apply(g, op),
meijerg([a1], [a3 - 1, a4], [b1], [b3, b4], z), z)
s = MeijerUnShiftA([a1], [a3, a4], [b1], [b3, b4], 0, z)
assert tn(
s.apply(g, op), meijerg([a1], [a3, a4], [b1 - 1], [b3, b4], z), z)
s = MeijerUnShiftC([a1], [a3, a4], [b1], [b3, b4], 0, z)
assert tn(
s.apply(g, op), meijerg([a1], [a3, a4], [b1], [b3 - 1, b4], z), z)
s = MeijerUnShiftB([a1], [a3, a4], [b1], [b3, b4], 0, z)
assert tn(
s.apply(g, op), meijerg([a1 + 1], [a3, a4], [b1], [b3, b4], z), z)
s = MeijerUnShiftD([a1], [a3, a4], [b1], [b3, b4], 0, z)
assert tn(
s.apply(g, op), meijerg([a1], [a3 + 1, a4], [b1], [b3, b4], z), z)
@slow
def test_meijerg_confluence():
def t(m, a, b):
from sympy import sympify, Piecewise
a, b = sympify([a, b])
m_ = m
m = hyperexpand(m)
if not m == Piecewise((a, abs(z) < 1), (b, abs(1/z) < 1), (m_, True)):
return False
if not (m.args[0].args[0] == a and m.args[1].args[0] == b):
return False
z0 = randcplx()/10
if abs(m.subs(z, z0).n() - a.subs(z, z0).n()).n() > 1e-10:
return False
if abs(m.subs(z, 1/z0).n() - b.subs(z, 1/z0).n()).n() > 1e-10:
return False
return True
assert t(meijerg([], [1, 1], [0, 0], [], z), -log(z), 0)
assert t(meijerg(
[], [3, 1], [0, 0], [], z), -z**2/4 + z - log(z)/2 - S(3)/4, 0)
assert t(meijerg([], [3, 1], [-1, 0], [], z),
z**2/12 - z/2 + log(z)/2 + S(1)/4 + 1/(6*z), 0)
assert t(meijerg([], [1, 1, 1, 1], [0, 0, 0, 0], [], z), -log(z)**3/6, 0)
assert t(meijerg([1, 1], [], [], [0, 0], z), 0, -log(1/z))
assert t(meijerg([1, 1], [2, 2], [1, 1], [0, 0], z),
-z*log(z) + 2*z, -log(1/z) + 2)
assert t(meijerg([S(1)/2], [1, 1], [0, 0], [S(3)/2], z), log(z)/2 - 1, 0)
def u(an, ap, bm, bq):
m = meijerg(an, ap, bm, bq, z)
m2 = hyperexpand(m, allow_hyper=True)
if m2.has(meijerg) and not (m2.is_Piecewise and len(m2.args) == 3):
return False
return tn(m, m2, z)
assert u([], [1], [0, 0], [])
assert u([1, 1], [], [], [0])
assert u([1, 1], [2, 2, 5], [1, 1, 6], [0, 0])
assert u([1, 1], [2, 2, 5], [1, 1, 6], [0])
def test_lerchphi():
from sympy import combsimp, exp_polar, polylog, log, lerchphi
assert hyperexpand(hyper([1, a], [a + 1], z)/a) == lerchphi(z, 1, a)
assert hyperexpand(
hyper([1, a, a], [a + 1, a + 1], z)/a**2) == lerchphi(z, 2, a)
assert hyperexpand(hyper([1, a, a, a], [a + 1, a + 1, a + 1], z)/a**3) == \
lerchphi(z, 3, a)
assert hyperexpand(hyper([1] + [a]*10, [a + 1]*10, z)/a**10) == \
lerchphi(z, 10, a)
assert combsimp(hyperexpand(meijerg([0, 1 - a], [], [0],
[-a], exp_polar(-I*pi)*z))) == lerchphi(z, 1, a)
assert combsimp(hyperexpand(meijerg([0, 1 - a, 1 - a], [], [0],
[-a, -a], exp_polar(-I*pi)*z))) == lerchphi(z, 2, a)
assert combsimp(hyperexpand(meijerg([0, 1 - a, 1 - a, 1 - a], [], [0],
[-a, -a, -a], exp_polar(-I*pi)*z))) == lerchphi(z, 3, a)
assert hyperexpand(z*hyper([1, 1], [2], z)) == -log(1 + -z)
assert hyperexpand(z*hyper([1, 1, 1], [2, 2], z)) == polylog(2, z)
assert hyperexpand(z*hyper([1, 1, 1, 1], [2, 2, 2], z)) == polylog(3, z)
assert hyperexpand(hyper([1, a, 1 + S(1)/2], [a + 1, S(1)/2], z)) == \
-2*a/(z - 1) + (-2*a**2 + a)*lerchphi(z, 1, a)
# Now numerical tests. These make sure reductions etc are carried out
# correctly
# a rational function (polylog at negative integer order)
assert can_do([2, 2, 2], [1, 1])
# NOTE these contain log(1-x) etc ... better make sure we have |z| < 1
# reduction of order for polylog
assert can_do([1, 1, 1, b + 5], [2, 2, b], div=10)
# reduction of order for lerchphi
# XXX lerchphi in mpmath is flaky
assert can_do(
[1, a, a, a, b + 5], [a + 1, a + 1, a + 1, b], numerical=False)
# test a bug
from sympy import Abs
assert hyperexpand(hyper([S(1)/2, S(1)/2, S(1)/2, 1],
[S(3)/2, S(3)/2, S(3)/2], S(1)/4)) == \
Abs(-polylog(3, exp_polar(I*pi)/2) + polylog(3, S(1)/2))
def test_partial_simp():
# First test that hypergeometric function formulae work.
a, b, c, d, e = (randcplx() for _ in range(5))
for func in [Hyper_Function([a, b, c], [d, e]),
Hyper_Function([], [a, b, c, d, e])]:
f = build_hypergeometric_formula(func)
z = f.z
assert f.closed_form == func(z)
deriv1 = f.B.diff(z)*z
deriv2 = f.M*f.B
for func1, func2 in zip(deriv1, deriv2):
assert tn(func1, func2, z)
# Now test that formulae are partially simplified.
from sympy.abc import a, b, z
assert hyperexpand(hyper([3, a], [1, b], z)) == \
(-a*b/2 + a*z/2 + 2*a)*hyper([a + 1], [b], z) \
+ (a*b/2 - 2*a + 1)*hyper([a], [b], z)
assert tn(
hyperexpand(hyper([3, d], [1, e], z)), hyper([3, d], [1, e], z), z)
assert hyperexpand(hyper([3], [1, a, b], z)) == \
hyper((), (a, b), z) \
+ z*hyper((), (a + 1, b), z)/(2*a) \
- z*(b - 4)*hyper((), (a + 1, b + 1), z)/(2*a*b)
assert tn(
hyperexpand(hyper([3], [1, d, e], z)), hyper([3], [1, d, e], z), z)
def test_hyperexpand_special():
assert hyperexpand(hyper([a, b], [c], 1)) == \
gamma(c)*gamma(c - a - b)/gamma(c - a)/gamma(c - b)
assert hyperexpand(hyper([a, b], [1 + a - b], -1)) == \
gamma(1 + a/2)*gamma(1 + a - b)/gamma(1 + a)/gamma(1 + a/2 - b)
assert hyperexpand(hyper([a, b], [1 + b - a], -1)) == \
gamma(1 + b/2)*gamma(1 + b - a)/gamma(1 + b)/gamma(1 + b/2 - a)
assert hyperexpand(meijerg([1 - z - a/2], [1 - z + a/2], [b/2], [-b/2], 1)) == \
gamma(1 - 2*z)*gamma(z + a/2 + b/2)/gamma(1 - z + a/2 - b/2) \
/gamma(1 - z - a/2 + b/2)/gamma(1 - z + a/2 + b/2)
assert hyperexpand(hyper([a], [b], 0)) == 1
assert hyper([a], [b], 0) != 0
def test_Mod1_behavior():
from sympy import Symbol, simplify, lowergamma
n = Symbol('n', integer=True)
# Note: this should not hang.
assert simplify(hyperexpand(meijerg([1], [], [n + 1], [0], z))) == \
lowergamma(n + 1, z)
@slow
def test_prudnikov_misc():
assert can_do([1, (3 + I)/2, (3 - I)/2], [S(3)/2, 2])
assert can_do([S.Half, a - 1], [S(3)/2, a + 1], lowerplane=True)
assert can_do([], [b + 1])
assert can_do([a], [a - 1, b + 1])
assert can_do([a], [a - S.Half, 2*a])
assert can_do([a], [a - S.Half, 2*a + 1])
assert can_do([a], [a - S.Half, 2*a - 1])
assert can_do([a], [a + S.Half, 2*a])
assert can_do([a], [a + S.Half, 2*a + 1])
assert can_do([a], [a + S.Half, 2*a - 1])
assert can_do([S.Half], [b, 2 - b])
assert can_do([S.Half], [b, 3 - b])
assert can_do([1], [2, b])
assert can_do([a, a + S.Half], [2*a, b, 2*a - b + 1])
assert can_do([a, a + S.Half], [S.Half, 2*a, 2*a + S.Half])
assert can_do([a], [a + 1], lowerplane=True) # lowergamma
@slow
def test_prudnikov_1():
# A. P. Prudnikov, Yu. A. Brychkov and O. I. Marichev (1990).
# Integrals and Series: More Special Functions, Vol. 3,.
# Gordon and Breach Science Publisher
# 7.3.1
assert can_do([a, -a], [S.Half])
assert can_do([a, 1 - a], [S.Half])
assert can_do([a, 1 - a], [S(3)/2])
assert can_do([a, 2 - a], [S.Half])
assert can_do([a, 2 - a], [S(3)/2])
assert can_do([a, 2 - a], [S(3)/2])
assert can_do([a, a + S(1)/2], [2*a - 1])
assert can_do([a, a + S(1)/2], [2*a])
assert can_do([a, a + S(1)/2], [2*a + 1])
assert can_do([a, a + S(1)/2], [S(1)/2])
assert can_do([a, a + S(1)/2], [S(3)/2])
assert can_do([a, a/2 + 1], [a/2])
assert can_do([1, b], [2])
assert can_do([1, b], [b + 1], numerical=False) # Lerch Phi
# NOTE: branches are complicated for |z| > 1
assert can_do([a], [2*a])
assert can_do([a], [2*a + 1])
assert can_do([a], [2*a - 1])
@slow
def test_prudnikov_2():
h = S.Half
assert can_do([-h, -h], [h])
assert can_do([-h, h], [3*h])
assert can_do([-h, h], [5*h])
assert can_do([-h, h], [7*h])
assert can_do([-h, 1], [h])
for p in [-h, h]:
for n in [-h, h, 1, 3*h, 2, 5*h, 3, 7*h, 4]:
for m in [-h, h, 3*h, 5*h, 7*h]:
assert can_do([p, n], [m])
for n in [1, 2, 3, 4]:
for m in [1, 2, 3, 4]:
assert can_do([p, n], [m])
@slow
def test_prudnikov_3():
h = S.Half
assert can_do([S(1)/4, S(3)/4], [h])
assert can_do([S(1)/4, S(3)/4], [3*h])
assert can_do([S(1)/3, S(2)/3], [3*h])
assert can_do([S(3)/4, S(5)/4], [h])
assert can_do([S(3)/4, S(5)/4], [3*h])
for p in [1, 2, 3, 4]:
for n in [-h, h, 1, 3*h, 2, 5*h, 3, 7*h, 4, 9*h]:
for m in [1, 3*h, 2, 5*h, 3, 7*h, 4]:
assert can_do([p, m], [n])
@slow
def test_prudnikov_4():
h = S.Half
for p in [3*h, 5*h, 7*h]:
for n in [-h, h, 3*h, 5*h, 7*h]:
for m in [3*h, 2, 5*h, 3, 7*h, 4]:
assert can_do([p, m], [n])
for n in [1, 2, 3, 4]:
for m in [2, 3, 4]:
assert can_do([p, m], [n])
@slow
def test_prudnikov_5():
h = S.Half
for p in [1, 2, 3]:
for q in range(p, 4):
for r in [1, 2, 3]:
for s in range(r, 4):
assert can_do([-h, p, q], [r, s])
for p in [h, 1, 3*h, 2, 5*h, 3]:
for q in [h, 3*h, 5*h]:
for r in [h, 3*h, 5*h]:
for s in [h, 3*h, 5*h]:
if s <= q and s <= r:
assert can_do([-h, p, q], [r, s])
for p in [h, 1, 3*h, 2, 5*h, 3]:
for q in [1, 2, 3]:
for r in [h, 3*h, 5*h]:
for s in [1, 2, 3]:
assert can_do([-h, p, q], [r, s])
@slow
def test_prudnikov_6():
h = S.Half
for m in [3*h, 5*h]:
for n in [1, 2, 3]:
for q in [h, 1, 2]:
for p in [1, 2, 3]:
assert can_do([h, q, p], [m, n])
for q in [1, 2, 3]:
for p in [3*h, 5*h]:
assert can_do([h, q, p], [m, n])
for q in [1, 2]:
for p in [1, 2, 3]:
for m in [1, 2, 3]:
for n in [1, 2, 3]:
assert can_do([h, q, p], [m, n])
assert can_do([h, h, 5*h], [3*h, 3*h])
assert can_do([h, 1, 5*h], [3*h, 3*h])
assert can_do([h, 2, 2], [1, 3])
# pages 435 to 457 contain more PFDD and stuff like this
@slow
def test_prudnikov_7():
assert can_do([3], [6])
h = S.Half
for n in [h, 3*h, 5*h, 7*h]:
assert can_do([-h], [n])
for m in [-h, h, 1, 3*h, 2, 5*h, 3, 7*h, 4]: # HERE
for n in [-h, h, 3*h, 5*h, 7*h, 1, 2, 3, 4]:
assert can_do([m], [n])
@slow
def test_prudnikov_8():
h = S.Half
# 7.12.2
for a in [1, 2, 3]:
for b in [1, 2, 3]:
for c in range(1, a + 1):
for d in [h, 1, 3*h, 2, 5*h, 3]:
assert can_do([a, b], [c, d])
for b in [3*h, 5*h]:
for c in [h, 1, 3*h, 2, 5*h, 3]:
for d in [1, 2, 3]:
assert can_do([a, b], [c, d])
for a in [-h, h, 3*h, 5*h]:
for b in [1, 2, 3]:
for c in [h, 1, 3*h, 2, 5*h, 3]:
for d in [1, 2, 3]:
assert can_do([a, b], [c, d])
for b in [h, 3*h, 5*h]:
for c in [h, 3*h, 5*h, 3]:
for d in [h, 1, 3*h, 2, 5*h, 3]:
if c <= b:
assert can_do([a, b], [c, d])
def test_prudnikov_9():
# 7.13.1 [we have a general formula ... so this is a bit pointless]
for i in range(9):
assert can_do([], [(S(i) + 1)/2])
for i in range(5):
assert can_do([], [-(2*S(i) + 1)/2])
@slow
def test_prudnikov_10():
# 7.14.2
h = S.Half
for p in [-h, h, 1, 3*h, 2, 5*h, 3, 7*h, 4]:
for m in [1, 2, 3, 4]:
for n in range(m, 5):
assert can_do([p], [m, n])
for p in [1, 2, 3, 4]:
for n in [h, 3*h, 5*h, 7*h]:
for m in [1, 2, 3, 4]:
assert can_do([p], [n, m])
for p in [3*h, 5*h, 7*h]:
for m in [h, 1, 2, 5*h, 3, 7*h, 4]:
assert can_do([p], [h, m])
assert can_do([p], [3*h, m])
for m in [h, 1, 2, 5*h, 3, 7*h, 4]:
assert can_do([7*h], [5*h, m])
assert can_do([-S(1)/2], [S(1)/2, S(1)/2]) # shine-integral shi
def test_prudnikov_11():
# 7.15
assert can_do([a, a + S.Half], [2*a, b, 2*a - b])
assert can_do([a, a + S.Half], [S(3)/2, 2*a, 2*a - S(1)/2])
assert can_do([S(1)/4, S(3)/4], [S(1)/2, S(1)/2, 1])
assert can_do([S(5)/4, S(3)/4], [S(3)/2, S(1)/2, 2])
assert can_do([S(5)/4, S(3)/4], [S(3)/2, S(3)/2, 1])
assert can_do([S(5)/4, S(7)/4], [S(3)/2, S(5)/2, 2])
assert can_do([1, 1], [S(3)/2, 2, 2]) # cosh-integral chi
@slow
def test_prudnikov_12():
# 7.16
assert can_do(
[], [a, a + S.Half, 2*a], False) # branches only agree for some z!
assert can_do([], [a, a + S.Half, 2*a + 1], False) # dito
assert can_do([], [S.Half, a, a + S.Half])
assert can_do([], [S(3)/2, a, a + S.Half])
assert can_do([], [S(1)/4, S(1)/2, S(3)/4])
assert can_do([], [S(1)/2, S(1)/2, 1])
assert can_do([], [S(1)/2, S(3)/2, 1])
assert can_do([], [S(3)/4, S(3)/2, S(5)/4])
assert can_do([], [1, 1, S(3)/2])
assert can_do([], [1, 2, S(3)/2])
assert can_do([], [1, S(3)/2, S(3)/2])
assert can_do([], [S(5)/4, S(3)/2, S(7)/4])
assert can_do([], [2, S(3)/2, S(3)/2])
@slow
def test_prudnikov_2F1():
h = S.Half
# Elliptic integrals
for p in [-h, h]:
for m in [h, 3*h, 5*h, 7*h]:
for n in [1, 2, 3, 4]:
assert can_do([p, m], [n])
@XFAIL
def test_prudnikov_fail_2F1():
assert can_do([a, b], [b + 1]) # incomplete beta function
assert can_do([-1, b], [c]) # Poly. also -2, -3 etc
# TODO polys
# Legendre functions:
assert can_do([a, b], [a + b + S.Half])
assert can_do([a, b], [a + b - S.Half])
assert can_do([a, b], [a + b + S(3)/2])
assert can_do([a, b], [(a + b + 1)/2])
assert can_do([a, b], [(a + b)/2 + 1])
assert can_do([a, b], [a - b + 1])
assert can_do([a, b], [a - b + 2])
assert can_do([a, b], [2*b])
assert can_do([a, b], [S.Half])
assert can_do([a, b], [S(3)/2])
assert can_do([a, 1 - a], [c])
assert can_do([a, 2 - a], [c])
assert can_do([a, 3 - a], [c])
assert can_do([a, a + S(1)/2], [c])
assert can_do([1, b], [c])
assert can_do([1, b], [S(3)/2])
assert can_do([S(1)/4, S(3)/4], [1])
# PFDD
o = S(1)
assert can_do([o/8, 1], [o/8*9])
assert can_do([o/6, 1], [o/6*7])
assert can_do([o/6, 1], [o/6*13])
assert can_do([o/5, 1], [o/5*6])
assert can_do([o/5, 1], [o/5*11])
assert can_do([o/4, 1], [o/4*5])
assert can_do([o/4, 1], [o/4*9])
assert can_do([o/3, 1], [o/3*4])
assert can_do([o/3, 1], [o/3*7])
assert can_do([o/8*3, 1], [o/8*11])
assert can_do([o/5*2, 1], [o/5*7])
assert can_do([o/5*2, 1], [o/5*12])
assert can_do([o/5*3, 1], [o/5*8])
assert can_do([o/5*3, 1], [o/5*13])
assert can_do([o/8*5, 1], [o/8*13])
assert can_do([o/4*3, 1], [o/4*7])
assert can_do([o/4*3, 1], [o/4*11])
assert can_do([o/3*2, 1], [o/3*5])
assert can_do([o/3*2, 1], [o/3*8])
assert can_do([o/5*4, 1], [o/5*9])
assert can_do([o/5*4, 1], [o/5*14])
assert can_do([o/6*5, 1], [o/6*11])
assert can_do([o/6*5, 1], [o/6*17])
assert can_do([o/8*7, 1], [o/8*15])
@XFAIL
def test_prudnikov_fail_3F2():
assert can_do([a, a + S(1)/3, a + S(2)/3], [S(1)/3, S(2)/3])
assert can_do([a, a + S(1)/3, a + S(2)/3], [S(2)/3, S(4)/3])
assert can_do([a, a + S(1)/3, a + S(2)/3], [S(4)/3, S(5)/3])
# page 421
assert can_do([a, a + S(1)/3, a + S(2)/3], [3*a/2, (3*a + 1)/2])
# pages 422 ...
assert can_do([-S.Half, S.Half, S.Half], [1, 1]) # elliptic integrals
assert can_do([-S.Half, S.Half, 1], [S(3)/2, S(3)/2])
# TODO LOTS more
# PFDD
assert can_do([S(1)/8, S(3)/8, 1], [S(9)/8, S(11)/8])
assert can_do([S(1)/8, S(5)/8, 1], [S(9)/8, S(13)/8])
assert can_do([S(1)/8, S(7)/8, 1], [S(9)/8, S(15)/8])
assert can_do([S(1)/6, S(1)/3, 1], [S(7)/6, S(4)/3])
assert can_do([S(1)/6, S(2)/3, 1], [S(7)/6, S(5)/3])
assert can_do([S(1)/6, S(2)/3, 1], [S(5)/3, S(13)/6])
assert can_do([S.Half, 1, 1], [S(1)/4, S(3)/4])
# LOTS more
@XFAIL
def test_prudnikov_fail_other():
# 7.11.2
# 7.12.1
assert can_do([1, a], [b, 1 - 2*a + b]) # ???
# 7.14.2
assert can_do([-S(1)/2], [S(1)/2, 1]) # struve
assert can_do([1], [S(1)/2, S(1)/2]) # struve
assert can_do([S(1)/4], [S(1)/2, S(5)/4]) # PFDD
assert can_do([S(3)/4], [S(3)/2, S(7)/4]) # PFDD
assert can_do([1], [S(1)/4, S(3)/4]) # PFDD
assert can_do([1], [S(3)/4, S(5)/4]) # PFDD
assert can_do([1], [S(5)/4, S(7)/4]) # PFDD
# TODO LOTS more
# 7.15.2
assert can_do([S(1)/2, 1], [S(3)/4, S(5)/4, S(3)/2]) # PFDD
assert can_do([S(1)/2, 1], [S(7)/4, S(5)/4, S(3)/2]) # PFDD
# 7.16.1
assert can_do([], [S(1)/3, S(2/3)]) # PFDD
assert can_do([], [S(2)/3, S(4/3)]) # PFDD
assert can_do([], [S(5)/3, S(4/3)]) # PFDD
# XXX this does not *evaluate* right??
assert can_do([], [a, a + S.Half, 2*a - 1])
def test_bug():
h = hyper([-1, 1], [z], -1)
assert hyperexpand(h) == (z + 1)/z
| 36.755424 | 85 | 0.498658 | from random import randrange
from sympy.simplify.hyperexpand import (ShiftA, ShiftB, UnShiftA, UnShiftB,
MeijerShiftA, MeijerShiftB, MeijerShiftC, MeijerShiftD,
MeijerUnShiftA, MeijerUnShiftB, MeijerUnShiftC,
MeijerUnShiftD,
ReduceOrder, reduce_order, apply_operators,
devise_plan, make_derivative_operator, Formula,
hyperexpand, Hyper_Function, G_Function,
reduce_order_meijer,
build_hypergeometric_formula)
from sympy import hyper, I, S, meijerg, Piecewise
from sympy.abc import z, a, b, c
from sympy.utilities.pytest import XFAIL, raises, slow
from sympy.utilities.randtest import verify_numerically as tn
from sympy.core.compatibility import range
from sympy import (cos, sin, log, exp, asin, lowergamma, atanh, besseli,
gamma, sqrt, pi, erf, exp_polar)
def test_branch_bug():
assert hyperexpand(hyper((-S(1)/3, S(1)/2), (S(2)/3, S(3)/2), -z)) == \
-z**S('1/3')*lowergamma(exp_polar(I*pi)/3, z)/5 \
+ sqrt(pi)*erf(sqrt(z))/(5*sqrt(z))
assert hyperexpand(meijerg([S(7)/6, 1], [], [S(2)/3], [S(1)/6, 0], z)) == \
2*z**S('2/3')*(2*sqrt(pi)*erf(sqrt(z))/sqrt(z) - 2*lowergamma(
S(2)/3, z)/z**S('2/3'))*gamma(S(2)/3)/gamma(S(5)/3)
def test_hyperexpand():
assert hyperexpand(hyper([], [], z)) == exp(z)
assert hyperexpand(hyper([1, 1], [2], -z)*z) == log(1 + z)
assert hyperexpand(hyper([], [S.Half], -z**2/4)) == cos(z)
assert hyperexpand(z*hyper([], [S('3/2')], -z**2/4)) == sin(z)
assert hyperexpand(hyper([S('1/2'), S('1/2')], [S('3/2')], z**2)*z) \
== asin(z)
def can_do(ap, bq, numerical=True, div=1, lowerplane=False):
from sympy import exp_polar, exp
r = hyperexpand(hyper(ap, bq, z))
if r.has(hyper):
return False
if not numerical:
return True
repl = {}
for n, a in enumerate(r.free_symbols - set([z])):
repl[a] = randcplx(n)/div
[a, b, c, d] = [2, -1, 3, 1]
if lowerplane:
[a, b, c, d] = [2, -2, 3, -1]
return tn(
hyper(ap, bq, z).subs(repl),
r.replace(exp_polar, exp).subs(repl),
z, a=a, b=b, c=c, d=d)
def test_roach():
assert can_do([S(1)/2], [S(9)/2])
assert can_do([], [1, S(5)/2, 4])
assert can_do([-S.Half, 1, 2], [3, 4])
assert can_do([S(1)/3], [-S(2)/3, -S(1)/2, S(1)/2, 1])
assert can_do([-S(3)/2, -S(1)/2], [-S(5)/2, 1])
assert can_do([-S(3)/2, ], [-S(1)/2, S(1)/2])
assert can_do([-S(3)/2, -S(1)/2], [2])
@XFAIL
def test_roach_fail():
assert can_do([-S(1)/2, 1], [S(1)/4, S(1)/2, S(3)/4])
assert can_do([S(3)/2], [S(5)/2, 5])
assert can_do([-S(1)/2, S(1)/2, 1], [S(3)/2, S(5)/2])
assert can_do([1, 2, 3], [S(1)/2, 4])
assert can_do([S(1)/2], [-S(1)/3, -S(1)/2, -S(2)/3])
def test_polynomial():
from sympy import oo
assert hyperexpand(hyper([], [-1], z)) == oo
assert hyperexpand(hyper([-2], [-1], z)) == oo
assert hyperexpand(hyper([0, 0], [-1], z)) == 1
assert can_do([-5, -2, randcplx(), randcplx()], [-10, randcplx()])
def test_hyperexpand_bases():
assert hyperexpand(hyper([2], [a], z)) == \
a + z**(-a + 1)*(-a**2 + 3*a + z*(a - 1) - 2)*exp(z)* \
lowergamma(a - 1, z) - 1
assert hyperexpand(hyper([1, 2], [3], z)) == -2/z - 2*log(-z + 1)/z**2
assert hyperexpand(hyper([S.Half, 2], [S(3)/2], z)) == \
-1/(2*z - 2) + atanh(sqrt(z))/sqrt(z)/2
assert hyperexpand(hyper([S(1)/2, S(1)/2], [S(5)/2], z)) == \
(-3*z + 3)/4/(z*sqrt(-z + 1)) \
+ (6*z - 3)*asin(sqrt(z))/(4*z**(S(3)/2))
assert hyperexpand(hyper([1, 2], [S(3)/2], z)) == -1/(2*z - 2) \
- asin(sqrt(z))/(sqrt(z)*(2*z - 2)*sqrt(-z + 1))
assert hyperexpand(hyper([-S.Half - 1, 1, 2], [S.Half, 3], z)) == \
sqrt(z)*(6*z/7 - S(6)/5)*atanh(sqrt(z)) \
+ (-30*z**2 + 32*z - 6)/35/z - 6*log(-z + 1)/(35*z**2)
assert hyperexpand(hyper([1 + S.Half, 1, 1], [2, 2], z)) == \
-4*log(sqrt(-z + 1)/2 + S(1)/2)/z
assert hyperexpand(hyper([2], [b, 1], z)) == \
z**(-b/2 + S(1)/2)*besseli(b - 1, 2*sqrt(z))*gamma(b) \
+ z**(-b/2 + 1)*besseli(b, 2*sqrt(z))*gamma(b)
def test_hyperexpand_parametric():
assert hyperexpand(hyper([a, S(1)/2 + a], [S(1)/2], z)) \
== (1 + sqrt(z))**(-2*a)/2 + (1 - sqrt(z))**(-2*a)/2
assert hyperexpand(hyper([a, -S(1)/2 + a], [2*a], z)) \
== 2**(2*a - 1)*((-z + 1)**(S(1)/2) + 1)**(-2*a + 1)
def test_shifted_sum():
from sympy import simplify
assert simplify(hyperexpand(z**4*hyper([2], [3, S('3/2')], -z**2))) \
== z*sin(2*z) + (-z**2 + S.Half)*cos(2*z) - S.Half
def _randrat():
return S(randrange(25) + 10)/50
def randcplx(offset=-1):
return _randrat() + I*_randrat() + I*(1 + offset)
@slow
def test_formulae():
from sympy.simplify.hyperexpand import FormulaCollection
formulae = FormulaCollection().formulae
for formula in formulae:
h = formula.func(formula.z)
rep = {}
for n, sym in enumerate(formula.symbols):
rep[sym] = randcplx(n)
# Just replace all exp_polar by exp, this usually works.
# first test if the closed-form is actually correct
h = h.subs(rep)
closed_form = formula.closed_form.subs(rep).rewrite('nonrepsmall')
z = formula.z
assert tn(h, closed_form.replace(exp_polar, exp), z)
# now test the computed matrix
cl = (formula.C * formula.B)[0].subs(rep).rewrite('nonrepsmall')
assert tn(closed_form.replace(
exp_polar, exp), cl.replace(exp_polar, exp), z)
deriv1 = z*formula.B.applyfunc(lambda t: t.rewrite(
'nonrepsmall')).diff(z)
deriv2 = formula.M * formula.B
for d1, d2 in zip(deriv1, deriv2):
assert tn(d1.subs(rep).replace(exp_polar, exp),
d2.subs(rep).rewrite('nonrepsmall').replace(exp_polar, exp), z)
def test_meijerg_formulae():
from sympy.simplify.hyperexpand import MeijerFormulaCollection
formulae = MeijerFormulaCollection().formulae
for sig in formulae:
for formula in formulae[sig]:
g = meijerg(formula.func.an, formula.func.ap,
formula.func.bm, formula.func.bq,
formula.z)
rep = {}
for sym in formula.symbols:
rep[sym] = randcplx()
# first test if the closed-form is actually correct
g = g.subs(rep)
closed_form = formula.closed_form.subs(rep)
z = formula.z
assert tn(g, closed_form, z)
# now test the computed matrix
cl = (formula.C * formula.B)[0].subs(rep)
assert tn(closed_form, cl, z)
deriv1 = z*formula.B.diff(z)
deriv2 = formula.M * formula.B
for d1, d2 in zip(deriv1, deriv2):
assert tn(d1.subs(rep), d2.subs(rep), z)
def op(f):
return z*f.diff(z)
def test_plan():
assert devise_plan(Hyper_Function([0], ()),
Hyper_Function([0], ()), z) == []
with raises(ValueError):
devise_plan(Hyper_Function([1], ()), Hyper_Function((), ()), z)
with raises(ValueError):
devise_plan(Hyper_Function([2], [1]), Hyper_Function([2], [2]), z)
with raises(ValueError):
devise_plan(Hyper_Function([2], []), Hyper_Function([S("1/2")], []), z)
# We cannot use pi/(10000 + n) because polys is insanely slow.
a1, a2, b1 = (randcplx(n) for n in range(3))
b1 += 2*I
h = hyper([a1, a2], [b1], z)
h2 = hyper((a1 + 1, a2), [b1], z)
assert tn(apply_operators(h,
devise_plan(Hyper_Function((a1 + 1, a2), [b1]),
Hyper_Function((a1, a2), [b1]), z), op),
h2, z)
h2 = hyper((a1 + 1, a2 - 1), [b1], z)
assert tn(apply_operators(h,
devise_plan(Hyper_Function((a1 + 1, a2 - 1), [b1]),
Hyper_Function((a1, a2), [b1]), z), op),
h2, z)
def test_plan_derivatives():
a1, a2, a3 = 1, 2, S('1/2')
b1, b2 = 3, S('5/2')
h = Hyper_Function((a1, a2, a3), (b1, b2))
h2 = Hyper_Function((a1 + 1, a2 + 1, a3 + 2), (b1 + 1, b2 + 1))
ops = devise_plan(h2, h, z)
f = Formula(h, z, h(z), [])
deriv = make_derivative_operator(f.M, z)
assert tn((apply_operators(f.C, ops, deriv)*f.B)[0], h2(z), z)
h2 = Hyper_Function((a1, a2 - 1, a3 - 2), (b1 - 1, b2 - 1))
ops = devise_plan(h2, h, z)
assert tn((apply_operators(f.C, ops, deriv)*f.B)[0], h2(z), z)
def test_reduction_operators():
a1, a2, b1 = (randcplx(n) for n in range(3))
h = hyper([a1], [b1], z)
assert ReduceOrder(2, 0) is None
assert ReduceOrder(2, -1) is None
assert ReduceOrder(1, S('1/2')) is None
h2 = hyper((a1, a2), (b1, a2), z)
assert tn(ReduceOrder(a2, a2).apply(h, op), h2, z)
h2 = hyper((a1, a2 + 1), (b1, a2), z)
assert tn(ReduceOrder(a2 + 1, a2).apply(h, op), h2, z)
h2 = hyper((a2 + 4, a1), (b1, a2), z)
assert tn(ReduceOrder(a2 + 4, a2).apply(h, op), h2, z)
# test several step order reduction
ap = (a2 + 4, a1, b1 + 1)
bq = (a2, b1, b1)
func, ops = reduce_order(Hyper_Function(ap, bq))
assert func.ap == (a1,)
assert func.bq == (b1,)
assert tn(apply_operators(h, ops, op), hyper(ap, bq, z), z)
def test_shift_operators():
a1, a2, b1, b2, b3 = (randcplx(n) for n in range(5))
h = hyper((a1, a2), (b1, b2, b3), z)
raises(ValueError, lambda: ShiftA(0))
raises(ValueError, lambda: ShiftB(1))
assert tn(ShiftA(a1).apply(h, op), hyper((a1 + 1, a2), (b1, b2, b3), z), z)
assert tn(ShiftA(a2).apply(h, op), hyper((a1, a2 + 1), (b1, b2, b3), z), z)
assert tn(ShiftB(b1).apply(h, op), hyper((a1, a2), (b1 - 1, b2, b3), z), z)
assert tn(ShiftB(b2).apply(h, op), hyper((a1, a2), (b1, b2 - 1, b3), z), z)
assert tn(ShiftB(b3).apply(h, op), hyper((a1, a2), (b1, b2, b3 - 1), z), z)
def test_ushift_operators():
a1, a2, b1, b2, b3 = (randcplx(n) for n in range(5))
h = hyper((a1, a2), (b1, b2, b3), z)
raises(ValueError, lambda: UnShiftA((1,), (), 0, z))
raises(ValueError, lambda: UnShiftB((), (-1,), 0, z))
raises(ValueError, lambda: UnShiftA((1,), (0, -1, 1), 0, z))
raises(ValueError, lambda: UnShiftB((0, 1), (1,), 0, z))
s = UnShiftA((a1, a2), (b1, b2, b3), 0, z)
assert tn(s.apply(h, op), hyper((a1 - 1, a2), (b1, b2, b3), z), z)
s = UnShiftA((a1, a2), (b1, b2, b3), 1, z)
assert tn(s.apply(h, op), hyper((a1, a2 - 1), (b1, b2, b3), z), z)
s = UnShiftB((a1, a2), (b1, b2, b3), 0, z)
assert tn(s.apply(h, op), hyper((a1, a2), (b1 + 1, b2, b3), z), z)
s = UnShiftB((a1, a2), (b1, b2, b3), 1, z)
assert tn(s.apply(h, op), hyper((a1, a2), (b1, b2 + 1, b3), z), z)
s = UnShiftB((a1, a2), (b1, b2, b3), 2, z)
assert tn(s.apply(h, op), hyper((a1, a2), (b1, b2, b3 + 1), z), z)
def can_do_meijer(a1, a2, b1, b2, numeric=True):
from sympy import unpolarify, expand
r = hyperexpand(meijerg(a1, a2, b1, b2, z))
if r.has(meijerg):
return False
# NOTE hyperexpand() returns a truly branched function, whereas numerical
# evaluation only works on the main branch. Since we are evaluating on
# the main branch, this should not be a problem, but expressions like
# exp_polar(I*pi/2*x)**a are evaluated incorrectly. We thus have to get
# rid of them. The expand heuristically does this...
r = unpolarify(expand(r, force=True, power_base=True, power_exp=False,
mul=False, log=False, multinomial=False, basic=False))
if not numeric:
return True
repl = {}
for n, a in enumerate(meijerg(a1, a2, b1, b2, z).free_symbols - set([z])):
repl[a] = randcplx(n)
return tn(meijerg(a1, a2, b1, b2, z).subs(repl), r.subs(repl), z)
@slow
def test_meijerg_expand():
from sympy import combsimp, simplify
# from mpmath docs
assert hyperexpand(meijerg([[], []], [[0], []], -z)) == exp(z)
assert hyperexpand(meijerg([[1, 1], []], [[1], [0]], z)) == \
log(z + 1)
assert hyperexpand(meijerg([[1, 1], []], [[1], [1]], z)) == \
z/(z + 1)
assert hyperexpand(meijerg([[], []], [[S(1)/2], [0]], (z/2)**2)) \
== sin(z)/sqrt(pi)
assert hyperexpand(meijerg([[], []], [[0], [S(1)/2]], (z/2)**2)) \
== cos(z)/sqrt(pi)
assert can_do_meijer([], [a], [a - 1, a - S.Half], [])
assert can_do_meijer([], [], [a/2], [-a/2], False) # branches...
assert can_do_meijer([a], [b], [a], [b, a - 1])
# wikipedia
assert hyperexpand(meijerg([1], [], [], [0], z)) == \
Piecewise((0, abs(z) < 1), (1, abs(1/z) < 1),
(meijerg([1], [], [], [0], z), True))
assert hyperexpand(meijerg([], [1], [0], [], z)) == \
Piecewise((1, abs(z) < 1), (0, abs(1/z) < 1),
(meijerg([], [1], [0], [], z), True))
# The Special Functions and their Approximations
assert can_do_meijer([], [], [a + b/2], [a, a - b/2, a + S.Half])
assert can_do_meijer(
[], [], [a], [b], False) # branches only agree for small z
assert can_do_meijer([], [S.Half], [a], [-a])
assert can_do_meijer([], [], [a, b], [])
assert can_do_meijer([], [], [a, b], [])
assert can_do_meijer([], [], [a, a + S.Half], [b, b + S.Half])
assert can_do_meijer([], [], [a, -a], [0, S.Half], False) # dito
assert can_do_meijer([], [], [a, a + S.Half, b, b + S.Half], [])
assert can_do_meijer([S.Half], [], [0], [a, -a])
assert can_do_meijer([S.Half], [], [a], [0, -a], False) # dito
assert can_do_meijer([], [a - S.Half], [a, b], [a - S.Half], False)
assert can_do_meijer([], [a + S.Half], [a + b, a - b, a], [], False)
assert can_do_meijer([a + S.Half], [], [b, 2*a - b, a], [], False)
# This for example is actually zero.
assert can_do_meijer([], [], [], [a, b])
# Testing a bug:
assert hyperexpand(meijerg([0, 2], [], [], [-1, 1], z)) == \
Piecewise((0, abs(z) < 1),
(z/2 - 1/(2*z), abs(1/z) < 1),
(meijerg([0, 2], [], [], [-1, 1], z), True))
# Test that the simplest possible answer is returned:
assert combsimp(simplify(hyperexpand(
meijerg([1], [1 - a], [-a/2, -a/2 + S(1)/2], [], 1/z)))) == \
-2*sqrt(pi)*(sqrt(z + 1) + 1)**a/a
# Test that hyper is returned
assert hyperexpand(meijerg([1], [], [a], [0, 0], z)) == hyper(
(a,), (a + 1, a + 1), z*exp_polar(I*pi))*z**a*gamma(a)/gamma(a + 1)**2
def test_meijerg_lookup():
from sympy import uppergamma, Si, Ci
assert hyperexpand(meijerg([a], [], [b, a], [], z)) == \
z**b*exp(z)*gamma(-a + b + 1)*uppergamma(a - b, z)
assert hyperexpand(meijerg([0], [], [0, 0], [], z)) == \
exp(z)*uppergamma(0, z)
assert can_do_meijer([a], [], [b, a + 1], [])
assert can_do_meijer([a], [], [b + 2, a], [])
assert can_do_meijer([a], [], [b - 2, a], [])
assert hyperexpand(meijerg([a], [], [a, a, a - S(1)/2], [], z)) == \
-sqrt(pi)*z**(a - S(1)/2)*(2*cos(2*sqrt(z))*(Si(2*sqrt(z)) - pi/2)
- 2*sin(2*sqrt(z))*Ci(2*sqrt(z))) == \
hyperexpand(meijerg([a], [], [a, a - S(1)/2, a], [], z)) == \
hyperexpand(meijerg([a], [], [a - S(1)/2, a, a], [], z))
assert can_do_meijer([a - 1], [], [a + 2, a - S(3)/2, a + 1], [])
@XFAIL
def test_meijerg_expand_fail():
# These basically test hyper([], [1/2 - a, 1/2 + 1, 1/2], z),
# which is *very* messy. But since the meijer g actually yields a
# sum of bessel functions, things can sometimes be simplified a lot and
# are then put into tables...
assert can_do_meijer([], [], [a + S.Half], [a, a - b/2, a + b/2])
assert can_do_meijer([], [], [0, S.Half], [a, -a])
assert can_do_meijer([], [], [3*a - S.Half, a, -a - S.Half], [a - S.Half])
assert can_do_meijer([], [], [0, a - S.Half, -a - S.Half], [S.Half])
assert can_do_meijer([], [], [a, b + S(1)/2, b], [2*b - a])
assert can_do_meijer([], [], [a, b + S(1)/2, b, 2*b - a])
assert can_do_meijer([S.Half], [], [-a, a], [0])
@slow
def test_meijerg():
# carefully set up the parameters.
# NOTE: this used to fail sometimes. I believe it is fixed, but if you
# hit an inexplicable test failure here, please let me know the seed.
a1, a2 = (randcplx(n) - 5*I - n*I for n in range(2))
b1, b2 = (randcplx(n) + 5*I + n*I for n in range(2))
b3, b4, b5, a3, a4, a5 = (randcplx() for n in range(6))
g = meijerg([a1], [a3, a4], [b1], [b3, b4], z)
assert ReduceOrder.meijer_minus(3, 4) is None
assert ReduceOrder.meijer_plus(4, 3) is None
g2 = meijerg([a1, a2], [a3, a4], [b1], [b3, b4, a2], z)
assert tn(ReduceOrder.meijer_plus(a2, a2).apply(g, op), g2, z)
g2 = meijerg([a1, a2], [a3, a4], [b1], [b3, b4, a2 + 1], z)
assert tn(ReduceOrder.meijer_plus(a2, a2 + 1).apply(g, op), g2, z)
g2 = meijerg([a1, a2 - 1], [a3, a4], [b1], [b3, b4, a2 + 2], z)
assert tn(ReduceOrder.meijer_plus(a2 - 1, a2 + 2).apply(g, op), g2, z)
g2 = meijerg([a1], [a3, a4, b2 - 1], [b1, b2 + 2], [b3, b4], z)
assert tn(ReduceOrder.meijer_minus(
b2 + 2, b2 - 1).apply(g, op), g2, z, tol=1e-6)
# test several-step reduction
an = [a1, a2]
bq = [b3, b4, a2 + 1]
ap = [a3, a4, b2 - 1]
bm = [b1, b2 + 1]
niq, ops = reduce_order_meijer(G_Function(an, ap, bm, bq))
assert niq.an == (a1,)
assert set(niq.ap) == set([a3, a4])
assert niq.bm == (b1,)
assert set(niq.bq) == set([b3, b4])
assert tn(apply_operators(g, ops, op), meijerg(an, ap, bm, bq, z), z)
def test_meijerg_shift_operators():
# carefully set up the parameters. XXX this still fails sometimes
a1, a2, a3, a4, a5, b1, b2, b3, b4, b5 = (randcplx(n) for n in range(10))
g = meijerg([a1], [a3, a4], [b1], [b3, b4], z)
assert tn(MeijerShiftA(b1).apply(g, op),
meijerg([a1], [a3, a4], [b1 + 1], [b3, b4], z), z)
assert tn(MeijerShiftB(a1).apply(g, op),
meijerg([a1 - 1], [a3, a4], [b1], [b3, b4], z), z)
assert tn(MeijerShiftC(b3).apply(g, op),
meijerg([a1], [a3, a4], [b1], [b3 + 1, b4], z), z)
assert tn(MeijerShiftD(a3).apply(g, op),
meijerg([a1], [a3 - 1, a4], [b1], [b3, b4], z), z)
s = MeijerUnShiftA([a1], [a3, a4], [b1], [b3, b4], 0, z)
assert tn(
s.apply(g, op), meijerg([a1], [a3, a4], [b1 - 1], [b3, b4], z), z)
s = MeijerUnShiftC([a1], [a3, a4], [b1], [b3, b4], 0, z)
assert tn(
s.apply(g, op), meijerg([a1], [a3, a4], [b1], [b3 - 1, b4], z), z)
s = MeijerUnShiftB([a1], [a3, a4], [b1], [b3, b4], 0, z)
assert tn(
s.apply(g, op), meijerg([a1 + 1], [a3, a4], [b1], [b3, b4], z), z)
s = MeijerUnShiftD([a1], [a3, a4], [b1], [b3, b4], 0, z)
assert tn(
s.apply(g, op), meijerg([a1], [a3 + 1, a4], [b1], [b3, b4], z), z)
@slow
def test_meijerg_confluence():
def t(m, a, b):
from sympy import sympify, Piecewise
a, b = sympify([a, b])
m_ = m
m = hyperexpand(m)
if not m == Piecewise((a, abs(z) < 1), (b, abs(1/z) < 1), (m_, True)):
return False
if not (m.args[0].args[0] == a and m.args[1].args[0] == b):
return False
z0 = randcplx()/10
if abs(m.subs(z, z0).n() - a.subs(z, z0).n()).n() > 1e-10:
return False
if abs(m.subs(z, 1/z0).n() - b.subs(z, 1/z0).n()).n() > 1e-10:
return False
return True
assert t(meijerg([], [1, 1], [0, 0], [], z), -log(z), 0)
assert t(meijerg(
[], [3, 1], [0, 0], [], z), -z**2/4 + z - log(z)/2 - S(3)/4, 0)
assert t(meijerg([], [3, 1], [-1, 0], [], z),
z**2/12 - z/2 + log(z)/2 + S(1)/4 + 1/(6*z), 0)
assert t(meijerg([], [1, 1, 1, 1], [0, 0, 0, 0], [], z), -log(z)**3/6, 0)
assert t(meijerg([1, 1], [], [], [0, 0], z), 0, -log(1/z))
assert t(meijerg([1, 1], [2, 2], [1, 1], [0, 0], z),
-z*log(z) + 2*z, -log(1/z) + 2)
assert t(meijerg([S(1)/2], [1, 1], [0, 0], [S(3)/2], z), log(z)/2 - 1, 0)
def u(an, ap, bm, bq):
m = meijerg(an, ap, bm, bq, z)
m2 = hyperexpand(m, allow_hyper=True)
if m2.has(meijerg) and not (m2.is_Piecewise and len(m2.args) == 3):
return False
return tn(m, m2, z)
assert u([], [1], [0, 0], [])
assert u([1, 1], [], [], [0])
assert u([1, 1], [2, 2, 5], [1, 1, 6], [0, 0])
assert u([1, 1], [2, 2, 5], [1, 1, 6], [0])
def test_lerchphi():
from sympy import combsimp, exp_polar, polylog, log, lerchphi
assert hyperexpand(hyper([1, a], [a + 1], z)/a) == lerchphi(z, 1, a)
assert hyperexpand(
hyper([1, a, a], [a + 1, a + 1], z)/a**2) == lerchphi(z, 2, a)
assert hyperexpand(hyper([1, a, a, a], [a + 1, a + 1, a + 1], z)/a**3) == \
lerchphi(z, 3, a)
assert hyperexpand(hyper([1] + [a]*10, [a + 1]*10, z)/a**10) == \
lerchphi(z, 10, a)
assert combsimp(hyperexpand(meijerg([0, 1 - a], [], [0],
[-a], exp_polar(-I*pi)*z))) == lerchphi(z, 1, a)
assert combsimp(hyperexpand(meijerg([0, 1 - a, 1 - a], [], [0],
[-a, -a], exp_polar(-I*pi)*z))) == lerchphi(z, 2, a)
assert combsimp(hyperexpand(meijerg([0, 1 - a, 1 - a, 1 - a], [], [0],
[-a, -a, -a], exp_polar(-I*pi)*z))) == lerchphi(z, 3, a)
assert hyperexpand(z*hyper([1, 1], [2], z)) == -log(1 + -z)
assert hyperexpand(z*hyper([1, 1, 1], [2, 2], z)) == polylog(2, z)
assert hyperexpand(z*hyper([1, 1, 1, 1], [2, 2, 2], z)) == polylog(3, z)
assert hyperexpand(hyper([1, a, 1 + S(1)/2], [a + 1, S(1)/2], z)) == \
-2*a/(z - 1) + (-2*a**2 + a)*lerchphi(z, 1, a)
# Now numerical tests. These make sure reductions etc are carried out
# correctly
# a rational function (polylog at negative integer order)
assert can_do([2, 2, 2], [1, 1])
# NOTE these contain log(1-x) etc ... better make sure we have |z| < 1
# reduction of order for polylog
assert can_do([1, 1, 1, b + 5], [2, 2, b], div=10)
# reduction of order for lerchphi
# XXX lerchphi in mpmath is flaky
assert can_do(
[1, a, a, a, b + 5], [a + 1, a + 1, a + 1, b], numerical=False)
# test a bug
from sympy import Abs
assert hyperexpand(hyper([S(1)/2, S(1)/2, S(1)/2, 1],
[S(3)/2, S(3)/2, S(3)/2], S(1)/4)) == \
Abs(-polylog(3, exp_polar(I*pi)/2) + polylog(3, S(1)/2))
def test_partial_simp():
# First test that hypergeometric function formulae work.
a, b, c, d, e = (randcplx() for _ in range(5))
for func in [Hyper_Function([a, b, c], [d, e]),
Hyper_Function([], [a, b, c, d, e])]:
f = build_hypergeometric_formula(func)
z = f.z
assert f.closed_form == func(z)
deriv1 = f.B.diff(z)*z
deriv2 = f.M*f.B
for func1, func2 in zip(deriv1, deriv2):
assert tn(func1, func2, z)
# Now test that formulae are partially simplified.
from sympy.abc import a, b, z
assert hyperexpand(hyper([3, a], [1, b], z)) == \
(-a*b/2 + a*z/2 + 2*a)*hyper([a + 1], [b], z) \
+ (a*b/2 - 2*a + 1)*hyper([a], [b], z)
assert tn(
hyperexpand(hyper([3, d], [1, e], z)), hyper([3, d], [1, e], z), z)
assert hyperexpand(hyper([3], [1, a, b], z)) == \
hyper((), (a, b), z) \
+ z*hyper((), (a + 1, b), z)/(2*a) \
- z*(b - 4)*hyper((), (a + 1, b + 1), z)/(2*a*b)
assert tn(
hyperexpand(hyper([3], [1, d, e], z)), hyper([3], [1, d, e], z), z)
def test_hyperexpand_special():
assert hyperexpand(hyper([a, b], [c], 1)) == \
gamma(c)*gamma(c - a - b)/gamma(c - a)/gamma(c - b)
assert hyperexpand(hyper([a, b], [1 + a - b], -1)) == \
gamma(1 + a/2)*gamma(1 + a - b)/gamma(1 + a)/gamma(1 + a/2 - b)
assert hyperexpand(hyper([a, b], [1 + b - a], -1)) == \
gamma(1 + b/2)*gamma(1 + b - a)/gamma(1 + b)/gamma(1 + b/2 - a)
assert hyperexpand(meijerg([1 - z - a/2], [1 - z + a/2], [b/2], [-b/2], 1)) == \
gamma(1 - 2*z)*gamma(z + a/2 + b/2)/gamma(1 - z + a/2 - b/2) \
/gamma(1 - z - a/2 + b/2)/gamma(1 - z + a/2 + b/2)
assert hyperexpand(hyper([a], [b], 0)) == 1
assert hyper([a], [b], 0) != 0
def test_Mod1_behavior():
from sympy import Symbol, simplify, lowergamma
n = Symbol('n', integer=True)
# Note: this should not hang.
assert simplify(hyperexpand(meijerg([1], [], [n + 1], [0], z))) == \
lowergamma(n + 1, z)
@slow
def test_prudnikov_misc():
assert can_do([1, (3 + I)/2, (3 - I)/2], [S(3)/2, 2])
assert can_do([S.Half, a - 1], [S(3)/2, a + 1], lowerplane=True)
assert can_do([], [b + 1])
assert can_do([a], [a - 1, b + 1])
assert can_do([a], [a - S.Half, 2*a])
assert can_do([a], [a - S.Half, 2*a + 1])
assert can_do([a], [a - S.Half, 2*a - 1])
assert can_do([a], [a + S.Half, 2*a])
assert can_do([a], [a + S.Half, 2*a + 1])
assert can_do([a], [a + S.Half, 2*a - 1])
assert can_do([S.Half], [b, 2 - b])
assert can_do([S.Half], [b, 3 - b])
assert can_do([1], [2, b])
assert can_do([a, a + S.Half], [2*a, b, 2*a - b + 1])
assert can_do([a, a + S.Half], [S.Half, 2*a, 2*a + S.Half])
assert can_do([a], [a + 1], lowerplane=True) # lowergamma
@slow
def test_prudnikov_1():
# A. P. Prudnikov, Yu. A. Brychkov and O. I. Marichev (1990).
# Integrals and Series: More Special Functions, Vol. 3,.
# Gordon and Breach Science Publisher
# 7.3.1
assert can_do([a, -a], [S.Half])
assert can_do([a, 1 - a], [S.Half])
assert can_do([a, 1 - a], [S(3)/2])
assert can_do([a, 2 - a], [S.Half])
assert can_do([a, 2 - a], [S(3)/2])
assert can_do([a, 2 - a], [S(3)/2])
assert can_do([a, a + S(1)/2], [2*a - 1])
assert can_do([a, a + S(1)/2], [2*a])
assert can_do([a, a + S(1)/2], [2*a + 1])
assert can_do([a, a + S(1)/2], [S(1)/2])
assert can_do([a, a + S(1)/2], [S(3)/2])
assert can_do([a, a/2 + 1], [a/2])
assert can_do([1, b], [2])
assert can_do([1, b], [b + 1], numerical=False) # Lerch Phi
# NOTE: branches are complicated for |z| > 1
assert can_do([a], [2*a])
assert can_do([a], [2*a + 1])
assert can_do([a], [2*a - 1])
@slow
def test_prudnikov_2():
h = S.Half
assert can_do([-h, -h], [h])
assert can_do([-h, h], [3*h])
assert can_do([-h, h], [5*h])
assert can_do([-h, h], [7*h])
assert can_do([-h, 1], [h])
for p in [-h, h]:
for n in [-h, h, 1, 3*h, 2, 5*h, 3, 7*h, 4]:
for m in [-h, h, 3*h, 5*h, 7*h]:
assert can_do([p, n], [m])
for n in [1, 2, 3, 4]:
for m in [1, 2, 3, 4]:
assert can_do([p, n], [m])
@slow
def test_prudnikov_3():
h = S.Half
assert can_do([S(1)/4, S(3)/4], [h])
assert can_do([S(1)/4, S(3)/4], [3*h])
assert can_do([S(1)/3, S(2)/3], [3*h])
assert can_do([S(3)/4, S(5)/4], [h])
assert can_do([S(3)/4, S(5)/4], [3*h])
for p in [1, 2, 3, 4]:
for n in [-h, h, 1, 3*h, 2, 5*h, 3, 7*h, 4, 9*h]:
for m in [1, 3*h, 2, 5*h, 3, 7*h, 4]:
assert can_do([p, m], [n])
@slow
def test_prudnikov_4():
h = S.Half
for p in [3*h, 5*h, 7*h]:
for n in [-h, h, 3*h, 5*h, 7*h]:
for m in [3*h, 2, 5*h, 3, 7*h, 4]:
assert can_do([p, m], [n])
for n in [1, 2, 3, 4]:
for m in [2, 3, 4]:
assert can_do([p, m], [n])
@slow
def test_prudnikov_5():
h = S.Half
for p in [1, 2, 3]:
for q in range(p, 4):
for r in [1, 2, 3]:
for s in range(r, 4):
assert can_do([-h, p, q], [r, s])
for p in [h, 1, 3*h, 2, 5*h, 3]:
for q in [h, 3*h, 5*h]:
for r in [h, 3*h, 5*h]:
for s in [h, 3*h, 5*h]:
if s <= q and s <= r:
assert can_do([-h, p, q], [r, s])
for p in [h, 1, 3*h, 2, 5*h, 3]:
for q in [1, 2, 3]:
for r in [h, 3*h, 5*h]:
for s in [1, 2, 3]:
assert can_do([-h, p, q], [r, s])
@slow
def test_prudnikov_6():
h = S.Half
for m in [3*h, 5*h]:
for n in [1, 2, 3]:
for q in [h, 1, 2]:
for p in [1, 2, 3]:
assert can_do([h, q, p], [m, n])
for q in [1, 2, 3]:
for p in [3*h, 5*h]:
assert can_do([h, q, p], [m, n])
for q in [1, 2]:
for p in [1, 2, 3]:
for m in [1, 2, 3]:
for n in [1, 2, 3]:
assert can_do([h, q, p], [m, n])
assert can_do([h, h, 5*h], [3*h, 3*h])
assert can_do([h, 1, 5*h], [3*h, 3*h])
assert can_do([h, 2, 2], [1, 3])
# pages 435 to 457 contain more PFDD and stuff like this
@slow
def test_prudnikov_7():
assert can_do([3], [6])
h = S.Half
for n in [h, 3*h, 5*h, 7*h]:
assert can_do([-h], [n])
for m in [-h, h, 1, 3*h, 2, 5*h, 3, 7*h, 4]: # HERE
for n in [-h, h, 3*h, 5*h, 7*h, 1, 2, 3, 4]:
assert can_do([m], [n])
@slow
def test_prudnikov_8():
h = S.Half
# 7.12.2
for a in [1, 2, 3]:
for b in [1, 2, 3]:
for c in range(1, a + 1):
for d in [h, 1, 3*h, 2, 5*h, 3]:
assert can_do([a, b], [c, d])
for b in [3*h, 5*h]:
for c in [h, 1, 3*h, 2, 5*h, 3]:
for d in [1, 2, 3]:
assert can_do([a, b], [c, d])
for a in [-h, h, 3*h, 5*h]:
for b in [1, 2, 3]:
for c in [h, 1, 3*h, 2, 5*h, 3]:
for d in [1, 2, 3]:
assert can_do([a, b], [c, d])
for b in [h, 3*h, 5*h]:
for c in [h, 3*h, 5*h, 3]:
for d in [h, 1, 3*h, 2, 5*h, 3]:
if c <= b:
assert can_do([a, b], [c, d])
def test_prudnikov_9():
# 7.13.1 [we have a general formula ... so this is a bit pointless]
for i in range(9):
assert can_do([], [(S(i) + 1)/2])
for i in range(5):
assert can_do([], [-(2*S(i) + 1)/2])
@slow
def test_prudnikov_10():
# 7.14.2
h = S.Half
for p in [-h, h, 1, 3*h, 2, 5*h, 3, 7*h, 4]:
for m in [1, 2, 3, 4]:
for n in range(m, 5):
assert can_do([p], [m, n])
for p in [1, 2, 3, 4]:
for n in [h, 3*h, 5*h, 7*h]:
for m in [1, 2, 3, 4]:
assert can_do([p], [n, m])
for p in [3*h, 5*h, 7*h]:
for m in [h, 1, 2, 5*h, 3, 7*h, 4]:
assert can_do([p], [h, m])
assert can_do([p], [3*h, m])
for m in [h, 1, 2, 5*h, 3, 7*h, 4]:
assert can_do([7*h], [5*h, m])
assert can_do([-S(1)/2], [S(1)/2, S(1)/2]) # shine-integral shi
def test_prudnikov_11():
# 7.15
assert can_do([a, a + S.Half], [2*a, b, 2*a - b])
assert can_do([a, a + S.Half], [S(3)/2, 2*a, 2*a - S(1)/2])
assert can_do([S(1)/4, S(3)/4], [S(1)/2, S(1)/2, 1])
assert can_do([S(5)/4, S(3)/4], [S(3)/2, S(1)/2, 2])
assert can_do([S(5)/4, S(3)/4], [S(3)/2, S(3)/2, 1])
assert can_do([S(5)/4, S(7)/4], [S(3)/2, S(5)/2, 2])
assert can_do([1, 1], [S(3)/2, 2, 2]) # cosh-integral chi
@slow
def test_prudnikov_12():
# 7.16
assert can_do(
[], [a, a + S.Half, 2*a], False) # branches only agree for some z!
assert can_do([], [a, a + S.Half, 2*a + 1], False) # dito
assert can_do([], [S.Half, a, a + S.Half])
assert can_do([], [S(3)/2, a, a + S.Half])
assert can_do([], [S(1)/4, S(1)/2, S(3)/4])
assert can_do([], [S(1)/2, S(1)/2, 1])
assert can_do([], [S(1)/2, S(3)/2, 1])
assert can_do([], [S(3)/4, S(3)/2, S(5)/4])
assert can_do([], [1, 1, S(3)/2])
assert can_do([], [1, 2, S(3)/2])
assert can_do([], [1, S(3)/2, S(3)/2])
assert can_do([], [S(5)/4, S(3)/2, S(7)/4])
assert can_do([], [2, S(3)/2, S(3)/2])
@slow
def test_prudnikov_2F1():
h = S.Half
# Elliptic integrals
for p in [-h, h]:
for m in [h, 3*h, 5*h, 7*h]:
for n in [1, 2, 3, 4]:
assert can_do([p, m], [n])
@XFAIL
def test_prudnikov_fail_2F1():
assert can_do([a, b], [b + 1]) # incomplete beta function
assert can_do([-1, b], [c]) # Poly. also -2, -3 etc
# TODO polys
# Legendre functions:
assert can_do([a, b], [a + b + S.Half])
assert can_do([a, b], [a + b - S.Half])
assert can_do([a, b], [a + b + S(3)/2])
assert can_do([a, b], [(a + b + 1)/2])
assert can_do([a, b], [(a + b)/2 + 1])
assert can_do([a, b], [a - b + 1])
assert can_do([a, b], [a - b + 2])
assert can_do([a, b], [2*b])
assert can_do([a, b], [S.Half])
assert can_do([a, b], [S(3)/2])
assert can_do([a, 1 - a], [c])
assert can_do([a, 2 - a], [c])
assert can_do([a, 3 - a], [c])
assert can_do([a, a + S(1)/2], [c])
assert can_do([1, b], [c])
assert can_do([1, b], [S(3)/2])
assert can_do([S(1)/4, S(3)/4], [1])
# PFDD
o = S(1)
assert can_do([o/8, 1], [o/8*9])
assert can_do([o/6, 1], [o/6*7])
assert can_do([o/6, 1], [o/6*13])
assert can_do([o/5, 1], [o/5*6])
assert can_do([o/5, 1], [o/5*11])
assert can_do([o/4, 1], [o/4*5])
assert can_do([o/4, 1], [o/4*9])
assert can_do([o/3, 1], [o/3*4])
assert can_do([o/3, 1], [o/3*7])
assert can_do([o/8*3, 1], [o/8*11])
assert can_do([o/5*2, 1], [o/5*7])
assert can_do([o/5*2, 1], [o/5*12])
assert can_do([o/5*3, 1], [o/5*8])
assert can_do([o/5*3, 1], [o/5*13])
assert can_do([o/8*5, 1], [o/8*13])
assert can_do([o/4*3, 1], [o/4*7])
assert can_do([o/4*3, 1], [o/4*11])
assert can_do([o/3*2, 1], [o/3*5])
assert can_do([o/3*2, 1], [o/3*8])
assert can_do([o/5*4, 1], [o/5*9])
assert can_do([o/5*4, 1], [o/5*14])
assert can_do([o/6*5, 1], [o/6*11])
assert can_do([o/6*5, 1], [o/6*17])
assert can_do([o/8*7, 1], [o/8*15])
@XFAIL
def test_prudnikov_fail_3F2():
assert can_do([a, a + S(1)/3, a + S(2)/3], [S(1)/3, S(2)/3])
assert can_do([a, a + S(1)/3, a + S(2)/3], [S(2)/3, S(4)/3])
assert can_do([a, a + S(1)/3, a + S(2)/3], [S(4)/3, S(5)/3])
# page 421
assert can_do([a, a + S(1)/3, a + S(2)/3], [3*a/2, (3*a + 1)/2])
# pages 422 ...
assert can_do([-S.Half, S.Half, S.Half], [1, 1]) # elliptic integrals
assert can_do([-S.Half, S.Half, 1], [S(3)/2, S(3)/2])
# TODO LOTS more
# PFDD
assert can_do([S(1)/8, S(3)/8, 1], [S(9)/8, S(11)/8])
assert can_do([S(1)/8, S(5)/8, 1], [S(9)/8, S(13)/8])
assert can_do([S(1)/8, S(7)/8, 1], [S(9)/8, S(15)/8])
assert can_do([S(1)/6, S(1)/3, 1], [S(7)/6, S(4)/3])
assert can_do([S(1)/6, S(2)/3, 1], [S(7)/6, S(5)/3])
assert can_do([S(1)/6, S(2)/3, 1], [S(5)/3, S(13)/6])
assert can_do([S.Half, 1, 1], [S(1)/4, S(3)/4])
# LOTS more
@XFAIL
def test_prudnikov_fail_other():
# 7.11.2
# 7.12.1
assert can_do([1, a], [b, 1 - 2*a + b]) # ???
# 7.14.2
assert can_do([-S(1)/2], [S(1)/2, 1]) # struve
assert can_do([1], [S(1)/2, S(1)/2]) # struve
assert can_do([S(1)/4], [S(1)/2, S(5)/4]) # PFDD
assert can_do([S(3)/4], [S(3)/2, S(7)/4]) # PFDD
assert can_do([1], [S(1)/4, S(3)/4]) # PFDD
assert can_do([1], [S(3)/4, S(5)/4]) # PFDD
assert can_do([1], [S(5)/4, S(7)/4]) # PFDD
# TODO LOTS more
# 7.15.2
assert can_do([S(1)/2, 1], [S(3)/4, S(5)/4, S(3)/2]) # PFDD
assert can_do([S(1)/2, 1], [S(7)/4, S(5)/4, S(3)/2]) # PFDD
# 7.16.1
assert can_do([], [S(1)/3, S(2/3)]) # PFDD
assert can_do([], [S(2)/3, S(4/3)]) # PFDD
assert can_do([], [S(5)/3, S(4/3)]) # PFDD
# XXX this does not *evaluate* right??
assert can_do([], [a, a + S.Half, 2*a - 1])
def test_bug():
h = hyper([-1, 1], [z], -1)
assert hyperexpand(h) == (z + 1)/z
| true | true |
1c311cc4a86d1fcfc51f40fb745398e3291d2ee1 | 2,144 | py | Python | joladnijo/serializers.py | joladnijo/joladnijo-backend | 89240e3990ce9cdad86a1d212d28062c07a58edb | [
"MIT"
] | null | null | null | joladnijo/serializers.py | joladnijo/joladnijo-backend | 89240e3990ce9cdad86a1d212d28062c07a58edb | [
"MIT"
] | 40 | 2022-03-06T19:46:07.000Z | 2022-03-27T11:50:02.000Z | joladnijo/serializers.py | joladnijo/joladnijo-backend | 89240e3990ce9cdad86a1d212d28062c07a58edb | [
"MIT"
] | 1 | 2022-03-29T08:53:21.000Z | 2022-03-29T08:53:21.000Z | from rest_framework import serializers
from . import models
class ContactSerializer(serializers.ModelSerializer):
class Meta:
model = models.Contact
exclude = ['organization', 'aid_center']
class OrganizationSerializer(serializers.ModelSerializer):
contact = ContactSerializer()
class Meta:
model = models.Organization
fields = '__all__'
class AssetCategorySerializer(serializers.ModelSerializer):
class Meta:
model = models.AssetCategory
fields = ['name', 'icon']
read_only_fields = ['name', 'icon']
class AssetTypeSerializer(serializers.ModelSerializer):
category = AssetCategorySerializer()
icon = serializers.CharField()
class Meta:
model = models.AssetType
fields = '__all__'
read_only_fields = ['name', 'icon', 'category']
class AssetRequestSerializer(serializers.ModelSerializer):
type = AssetTypeSerializer()
class Meta:
model = models.AssetRequest
exclude = ['aid_center']
def build_standard_field(self, field_name, model_field):
field_class, field_kwargs = super(AssetRequestSerializer, self).build_standard_field(field_name, model_field)
if field_name == 'status':
field_kwargs['required'] = True
return field_class, field_kwargs
class FeedItemSerializer(serializers.ModelSerializer):
aid_center_name = serializers.CharField(source='aid_center.name')
aid_center_slug = serializers.CharField(source='aid_center.slug')
class Meta:
model = models.FeedItem
exclude = ['user', 'aid_center']
class AidCenterSerializer(serializers.ModelSerializer):
organization = OrganizationSerializer()
contact = ContactSerializer()
geo_location = serializers.JSONField()
assets_requested = AssetRequestSerializer(many=True, read_only=True)
assets_urgent = AssetRequestSerializer(many=True, read_only=True)
assets_fulfilled = AssetRequestSerializer(many=True, read_only=True)
feed = FeedItemSerializer(many=True, read_only=True)
class Meta:
model = models.AidCenter
fields = '__all__'
| 29.777778 | 117 | 0.71222 | from rest_framework import serializers
from . import models
class ContactSerializer(serializers.ModelSerializer):
class Meta:
model = models.Contact
exclude = ['organization', 'aid_center']
class OrganizationSerializer(serializers.ModelSerializer):
contact = ContactSerializer()
class Meta:
model = models.Organization
fields = '__all__'
class AssetCategorySerializer(serializers.ModelSerializer):
class Meta:
model = models.AssetCategory
fields = ['name', 'icon']
read_only_fields = ['name', 'icon']
class AssetTypeSerializer(serializers.ModelSerializer):
category = AssetCategorySerializer()
icon = serializers.CharField()
class Meta:
model = models.AssetType
fields = '__all__'
read_only_fields = ['name', 'icon', 'category']
class AssetRequestSerializer(serializers.ModelSerializer):
type = AssetTypeSerializer()
class Meta:
model = models.AssetRequest
exclude = ['aid_center']
def build_standard_field(self, field_name, model_field):
field_class, field_kwargs = super(AssetRequestSerializer, self).build_standard_field(field_name, model_field)
if field_name == 'status':
field_kwargs['required'] = True
return field_class, field_kwargs
class FeedItemSerializer(serializers.ModelSerializer):
aid_center_name = serializers.CharField(source='aid_center.name')
aid_center_slug = serializers.CharField(source='aid_center.slug')
class Meta:
model = models.FeedItem
exclude = ['user', 'aid_center']
class AidCenterSerializer(serializers.ModelSerializer):
organization = OrganizationSerializer()
contact = ContactSerializer()
geo_location = serializers.JSONField()
assets_requested = AssetRequestSerializer(many=True, read_only=True)
assets_urgent = AssetRequestSerializer(many=True, read_only=True)
assets_fulfilled = AssetRequestSerializer(many=True, read_only=True)
feed = FeedItemSerializer(many=True, read_only=True)
class Meta:
model = models.AidCenter
fields = '__all__'
| true | true |
1c311d91ed8d7ddec962d080c66a6e62be771ba9 | 7,380 | py | Python | trainerpi.py | derillina/trainerpi | 15268c5765ee5e12f217e9585af7b29e57ba59d8 | [
"MIT"
] | 33 | 2019-07-04T19:05:33.000Z | 2022-01-12T19:36:27.000Z | trainerpi.py | derillina/trainerpi | 15268c5765ee5e12f217e9585af7b29e57ba59d8 | [
"MIT"
] | 5 | 2018-07-24T18:36:12.000Z | 2021-01-31T05:17:39.000Z | trainerpi.py | derillina/trainerpi | 15268c5765ee5e12f217e9585af7b29e57ba59d8 | [
"MIT"
] | 11 | 2019-07-21T15:48:38.000Z | 2022-03-29T20:08:42.000Z | import asyncio
import bleCSC
import collections
import numpy
import os
import pygame
import time
# --------------------------------------------------------------------------- #
# SETTINGS #
# --------------------------------------------------------------------------- #
ROLLING_LENGTH = 2096. # mm
POWER_CURVE = numpy.loadtxt("power-4.csv", delimiter=",")
SCREEN_SIZE = WIDTH, HEIGHT = 320, 240
BORDER = 10
FONT_NAME = "DejaVuSans"
FONT_SIZE = 28
SCREEN_UPDATE_DELAY = 0.05 # Display update should be fast for the timer to "look" right
CSC_SENSOR_ADDRESSES = (
"D0:AC:A5:BF:B7:52",
"C6:F9:84:6A:C0:8E"
)
display_column = collections.namedtuple("display_column", ("title", "data"))
display_data = {}
SIGNAL_EXIT = False
class TrainerThread:
def __init__(self):
self.display_row = None
class CSCTrainer(TrainerThread):
def __init__(self, address: str, display_row: int):
super().__init__()
self.address = address
self.display_row = display_row
self._location = ""
self.should_activity_timer_run = False # Should the activity timer be running?
def handle_notification(self, wheel_speed: float, crank_speed: float, cumulative_rotations: int) -> None:
global display_data
self.should_activity_timer_run = (wheel_speed is not None and wheel_speed > 0) or\
(crank_speed is not None and crank_speed > 0)
if "Wheel" in self._location and wheel_speed is not None:
speed = wheel_speed * 3600. * ROLLING_LENGTH / 1e+6
power = numpy.interp(speed, POWER_CURVE[:, 0], POWER_CURVE[:, 1])
display_data[(self.display_row, 0)] = display_column(
self._location,
"{:2.0f} km/h".format(
wheel_speed * 3600. * ROLLING_LENGTH / 1e+6
)
)
display_data[(self.display_row, 1)] = display_column(
"{:6.2f} km".format(cumulative_rotations * ROLLING_LENGTH / 1e+6),
"{:3.0f} W".format(power)
)
if "Crank" in self._location and crank_speed is not None:
display_data[(self.display_row, 0)] = display_column(
self._location,
"{:3.0f} RPM".format(
crank_speed * 60.
)
)
async def worker(self):
global SIGNAL_EXIT, display_data
display_data[(self.display_row, 0)] = display_column("Connecting for Sensor:", self.address)
sensor = bleCSC.CSCSensor()
sensor.connect(self.address, self.handle_notification)
display_data[(self.display_row, 0)] = display_column("Waiting for Loc'n:", self.address)
await asyncio.sleep(0.0)
self._location = sensor.get_location()
display_data[(self.display_row, 0)] = display_column("Waiting for Data:", self.address)
await asyncio.sleep(0.0)
sensor.notifications(True)
while not SIGNAL_EXIT:
await asyncio.sleep(0.0)
notify_ret = await sensor.wait_for_notifications(1.0)
if notify_ret:
continue
display_data[(self.display_row, 0)] = display_column("Waiting for Sensor:", self.address)
self.should_activity_timer_run = False
class ActivityTimer(TrainerThread):
def __init__(self, monitor_threads: list, display_row: int):
super().__init__()
self.monitor_threads = monitor_threads
self.prev_accumulated_time = 0
self.running = False
self.start_time = 0
self.display_row = display_row
async def worker(self):
global SIGNAL_EXIT, display_data
while not SIGNAL_EXIT:
if any([t.should_activity_timer_run for t in self.monitor_threads]): # Timer should be running
if not self.running:
self.start_time = time.time()
self.running = True
time_to_display = self.prev_accumulated_time
else:
time_to_display = self.prev_accumulated_time + time.time() - self.start_time
else: # Timer should not be running
if self.running: # Timer needs to stop
self.prev_accumulated_time += time.time() - self.start_time
self.running = False
time_to_display = self.prev_accumulated_time
display_data[(self.display_row, 0)] = display_column(
"Activity Time",
time.strftime("%H:%M:%S", time.gmtime(time_to_display))
)
await asyncio.sleep(SCREEN_UPDATE_DELAY)
class ScreenUpdateTrainer(TrainerThread):
def __init__(self, thread_list):
super().__init__()
self.thread_list = thread_list
self.use_pygame = True
try:
os.putenv("SDL_FBDEV", "/dev/fb1")
pygame.init()
pygame.mouse.set_visible(False)
self.screen = pygame.display.set_mode(SCREEN_SIZE)
self.clock = pygame.time.Clock()
self.font = pygame.font.SysFont(FONT_NAME, FONT_SIZE)
except pygame.error:
self.use_pygame = False
async def worker(self):
global SIGNAL_EXIT, display_data
while not SIGNAL_EXIT:
if self.use_pygame:
for event in pygame.event.get():
if event.type == pygame.QUIT:
SIGNAL_EXIT = True
if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
SIGNAL_EXIT = True
self.screen.fill((0, 0, 0))
for seg, seg_data in display_data.items():
if seg_data is not None:
self.draw_segment(seg, seg_data.title, seg_data.data, (255, 255, 255))
pygame.display.flip()
else:
for seg, seg_data in display_data.items():
if seg_data is not None:
print("{}\t{}\t{}".format(seg, seg_data.title, seg_data.data))
await asyncio.sleep(SCREEN_UPDATE_DELAY)
def draw_segment(self, seg: tuple, title: str, data: str, color: tuple):
seg_width = WIDTH // 2
seg_height = HEIGHT // 3
x0 = seg_width * seg[1] + BORDER
y0 = seg_height * seg[0] + BORDER
x1 = seg_width * (seg[1] + 1) - BORDER
y1 = seg_height * (seg[0] + 1) - BORDER
title_text = self.font.render(title, True, color)
self.screen.blit(title_text, (x0, y0))
data_text = self.font.render(data, True, color)
self.screen.blit(data_text, (x1 - data_text.get_width(), y1 - data_text.get_height()))
def run_trainer():
csc_threads = list(
[CSCTrainer(address, i + 1) for (i, address) in enumerate(CSC_SENSOR_ADDRESSES)]
)
all_threads = csc_threads.copy()
all_threads.append(ActivityTimer(csc_threads, 0))
all_threads.append(ScreenUpdateTrainer(all_threads))
io_loop = asyncio.get_event_loop()
tasks = list(
[io_loop.create_task(thread.worker()) for thread in all_threads]
)
wait_tasks = asyncio.wait(tasks)
io_loop.run_until_complete(wait_tasks)
io_loop.close()
if __name__ == "__main__":
run_trainer()
| 36.9 | 109 | 0.581165 | import asyncio
import bleCSC
import collections
import numpy
import os
import pygame
import time
ROLLING_LENGTH = 2096.
POWER_CURVE = numpy.loadtxt("power-4.csv", delimiter=",")
SCREEN_SIZE = WIDTH, HEIGHT = 320, 240
BORDER = 10
FONT_NAME = "DejaVuSans"
FONT_SIZE = 28
SCREEN_UPDATE_DELAY = 0.05
CSC_SENSOR_ADDRESSES = (
"D0:AC:A5:BF:B7:52",
"C6:F9:84:6A:C0:8E"
)
display_column = collections.namedtuple("display_column", ("title", "data"))
display_data = {}
SIGNAL_EXIT = False
class TrainerThread:
def __init__(self):
self.display_row = None
class CSCTrainer(TrainerThread):
def __init__(self, address: str, display_row: int):
super().__init__()
self.address = address
self.display_row = display_row
self._location = ""
self.should_activity_timer_run = False
def handle_notification(self, wheel_speed: float, crank_speed: float, cumulative_rotations: int) -> None:
global display_data
self.should_activity_timer_run = (wheel_speed is not None and wheel_speed > 0) or\
(crank_speed is not None and crank_speed > 0)
if "Wheel" in self._location and wheel_speed is not None:
speed = wheel_speed * 3600. * ROLLING_LENGTH / 1e+6
power = numpy.interp(speed, POWER_CURVE[:, 0], POWER_CURVE[:, 1])
display_data[(self.display_row, 0)] = display_column(
self._location,
"{:2.0f} km/h".format(
wheel_speed * 3600. * ROLLING_LENGTH / 1e+6
)
)
display_data[(self.display_row, 1)] = display_column(
"{:6.2f} km".format(cumulative_rotations * ROLLING_LENGTH / 1e+6),
"{:3.0f} W".format(power)
)
if "Crank" in self._location and crank_speed is not None:
display_data[(self.display_row, 0)] = display_column(
self._location,
"{:3.0f} RPM".format(
crank_speed * 60.
)
)
async def worker(self):
global SIGNAL_EXIT, display_data
display_data[(self.display_row, 0)] = display_column("Connecting for Sensor:", self.address)
sensor = bleCSC.CSCSensor()
sensor.connect(self.address, self.handle_notification)
display_data[(self.display_row, 0)] = display_column("Waiting for Loc'n:", self.address)
await asyncio.sleep(0.0)
self._location = sensor.get_location()
display_data[(self.display_row, 0)] = display_column("Waiting for Data:", self.address)
await asyncio.sleep(0.0)
sensor.notifications(True)
while not SIGNAL_EXIT:
await asyncio.sleep(0.0)
notify_ret = await sensor.wait_for_notifications(1.0)
if notify_ret:
continue
display_data[(self.display_row, 0)] = display_column("Waiting for Sensor:", self.address)
self.should_activity_timer_run = False
class ActivityTimer(TrainerThread):
def __init__(self, monitor_threads: list, display_row: int):
super().__init__()
self.monitor_threads = monitor_threads
self.prev_accumulated_time = 0
self.running = False
self.start_time = 0
self.display_row = display_row
async def worker(self):
global SIGNAL_EXIT, display_data
while not SIGNAL_EXIT:
if any([t.should_activity_timer_run for t in self.monitor_threads]): # Timer should be running
if not self.running:
self.start_time = time.time()
self.running = True
time_to_display = self.prev_accumulated_time
else:
time_to_display = self.prev_accumulated_time + time.time() - self.start_time
else: # Timer should not be running
if self.running: # Timer needs to stop
self.prev_accumulated_time += time.time() - self.start_time
self.running = False
time_to_display = self.prev_accumulated_time
display_data[(self.display_row, 0)] = display_column(
"Activity Time",
time.strftime("%H:%M:%S", time.gmtime(time_to_display))
)
await asyncio.sleep(SCREEN_UPDATE_DELAY)
class ScreenUpdateTrainer(TrainerThread):
def __init__(self, thread_list):
super().__init__()
self.thread_list = thread_list
self.use_pygame = True
try:
os.putenv("SDL_FBDEV", "/dev/fb1")
pygame.init()
pygame.mouse.set_visible(False)
self.screen = pygame.display.set_mode(SCREEN_SIZE)
self.clock = pygame.time.Clock()
self.font = pygame.font.SysFont(FONT_NAME, FONT_SIZE)
except pygame.error:
self.use_pygame = False
async def worker(self):
global SIGNAL_EXIT, display_data
while not SIGNAL_EXIT:
if self.use_pygame:
for event in pygame.event.get():
if event.type == pygame.QUIT:
SIGNAL_EXIT = True
if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
SIGNAL_EXIT = True
self.screen.fill((0, 0, 0))
for seg, seg_data in display_data.items():
if seg_data is not None:
self.draw_segment(seg, seg_data.title, seg_data.data, (255, 255, 255))
pygame.display.flip()
else:
for seg, seg_data in display_data.items():
if seg_data is not None:
print("{}\t{}\t{}".format(seg, seg_data.title, seg_data.data))
await asyncio.sleep(SCREEN_UPDATE_DELAY)
def draw_segment(self, seg: tuple, title: str, data: str, color: tuple):
seg_width = WIDTH // 2
seg_height = HEIGHT // 3
x0 = seg_width * seg[1] + BORDER
y0 = seg_height * seg[0] + BORDER
x1 = seg_width * (seg[1] + 1) - BORDER
y1 = seg_height * (seg[0] + 1) - BORDER
title_text = self.font.render(title, True, color)
self.screen.blit(title_text, (x0, y0))
data_text = self.font.render(data, True, color)
self.screen.blit(data_text, (x1 - data_text.get_width(), y1 - data_text.get_height()))
def run_trainer():
csc_threads = list(
[CSCTrainer(address, i + 1) for (i, address) in enumerate(CSC_SENSOR_ADDRESSES)]
)
all_threads = csc_threads.copy()
all_threads.append(ActivityTimer(csc_threads, 0))
all_threads.append(ScreenUpdateTrainer(all_threads))
io_loop = asyncio.get_event_loop()
tasks = list(
[io_loop.create_task(thread.worker()) for thread in all_threads]
)
wait_tasks = asyncio.wait(tasks)
io_loop.run_until_complete(wait_tasks)
io_loop.close()
if __name__ == "__main__":
run_trainer()
| true | true |
1c311e21d5aa989de05b6b21c1dec8a37917990b | 2,965 | py | Python | test/test_melgan_layers.py | Reaiot/kiswahili_tts | 1bbbff49f7c6cf899e5e3fd4c8cb7d6a7d1b6e79 | [
"Apache-2.0"
] | 1,961 | 2020-07-31T07:31:27.000Z | 2022-03-31T20:39:29.000Z | test/test_melgan_layers.py | Reaiot/kiswahili_tts | 1bbbff49f7c6cf899e5e3fd4c8cb7d6a7d1b6e79 | [
"Apache-2.0"
] | 587 | 2020-07-31T03:24:54.000Z | 2022-03-29T02:31:50.000Z | test/test_melgan_layers.py | Reaiot/kiswahili_tts | 1bbbff49f7c6cf899e5e3fd4c8cb7d6a7d1b6e79 | [
"Apache-2.0"
] | 483 | 2020-07-31T17:48:32.000Z | 2022-03-31T13:55:49.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import numpy as np
import pytest
import tensorflow as tf
from tensorflow_tts.models.melgan import (
TFConvTranspose1d,
TFReflectionPad1d,
TFResidualStack,
)
os.environ["CUDA_VISIBLE_DEVICES"] = ""
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
@pytest.mark.parametrize("padding_size", [(3), (5)])
def test_padding(padding_size):
fake_input_1d = tf.random.normal(shape=[4, 8000, 256], dtype=tf.float32)
out = TFReflectionPad1d(padding_size=padding_size)(fake_input_1d)
assert np.array_equal(
tf.keras.backend.int_shape(out), [4, 8000 + 2 * padding_size, 256]
)
@pytest.mark.parametrize(
"filters,kernel_size,strides,padding,is_weight_norm",
[(512, 40, 8, "same", False), (768, 15, 8, "same", True)],
)
def test_convtranpose1d(filters, kernel_size, strides, padding, is_weight_norm):
fake_input_1d = tf.random.normal(shape=[4, 8000, 256], dtype=tf.float32)
conv1d_transpose = TFConvTranspose1d(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
is_weight_norm=is_weight_norm,
initializer_seed=42,
)
out = conv1d_transpose(fake_input_1d)
assert np.array_equal(tf.keras.backend.int_shape(out), [4, 8000 * strides, filters])
@pytest.mark.parametrize(
"kernel_size,filters,dilation_rate,use_bias,nonlinear_activation,nonlinear_activation_params,is_weight_norm",
[
(3, 256, 1, True, "LeakyReLU", {"alpha": 0.3}, True),
(3, 256, 3, True, "ReLU", {}, False),
],
)
def test_residualblock(
kernel_size,
filters,
dilation_rate,
use_bias,
nonlinear_activation,
nonlinear_activation_params,
is_weight_norm,
):
fake_input_1d = tf.random.normal(shape=[4, 8000, 256], dtype=tf.float32)
residual_block = TFResidualStack(
kernel_size=kernel_size,
filters=filters,
dilation_rate=dilation_rate,
use_bias=use_bias,
nonlinear_activation=nonlinear_activation,
nonlinear_activation_params=nonlinear_activation_params,
is_weight_norm=is_weight_norm,
initializer_seed=42,
)
out = residual_block(fake_input_1d)
assert np.array_equal(tf.keras.backend.int_shape(out), [4, 8000, filters])
| 31.88172 | 113 | 0.707251 |
import logging
import os
import numpy as np
import pytest
import tensorflow as tf
from tensorflow_tts.models.melgan import (
TFConvTranspose1d,
TFReflectionPad1d,
TFResidualStack,
)
os.environ["CUDA_VISIBLE_DEVICES"] = ""
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
@pytest.mark.parametrize("padding_size", [(3), (5)])
def test_padding(padding_size):
fake_input_1d = tf.random.normal(shape=[4, 8000, 256], dtype=tf.float32)
out = TFReflectionPad1d(padding_size=padding_size)(fake_input_1d)
assert np.array_equal(
tf.keras.backend.int_shape(out), [4, 8000 + 2 * padding_size, 256]
)
@pytest.mark.parametrize(
"filters,kernel_size,strides,padding,is_weight_norm",
[(512, 40, 8, "same", False), (768, 15, 8, "same", True)],
)
def test_convtranpose1d(filters, kernel_size, strides, padding, is_weight_norm):
fake_input_1d = tf.random.normal(shape=[4, 8000, 256], dtype=tf.float32)
conv1d_transpose = TFConvTranspose1d(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
is_weight_norm=is_weight_norm,
initializer_seed=42,
)
out = conv1d_transpose(fake_input_1d)
assert np.array_equal(tf.keras.backend.int_shape(out), [4, 8000 * strides, filters])
@pytest.mark.parametrize(
"kernel_size,filters,dilation_rate,use_bias,nonlinear_activation,nonlinear_activation_params,is_weight_norm",
[
(3, 256, 1, True, "LeakyReLU", {"alpha": 0.3}, True),
(3, 256, 3, True, "ReLU", {}, False),
],
)
def test_residualblock(
kernel_size,
filters,
dilation_rate,
use_bias,
nonlinear_activation,
nonlinear_activation_params,
is_weight_norm,
):
fake_input_1d = tf.random.normal(shape=[4, 8000, 256], dtype=tf.float32)
residual_block = TFResidualStack(
kernel_size=kernel_size,
filters=filters,
dilation_rate=dilation_rate,
use_bias=use_bias,
nonlinear_activation=nonlinear_activation,
nonlinear_activation_params=nonlinear_activation_params,
is_weight_norm=is_weight_norm,
initializer_seed=42,
)
out = residual_block(fake_input_1d)
assert np.array_equal(tf.keras.backend.int_shape(out), [4, 8000, filters])
| true | true |
1c311e4869fadf9f2ff3dee2ed081123eb53101a | 5,290 | py | Python | train.py | endaaman/prostate | e08beb862fc61ab0bcef672ab77d2ff528259094 | [
"BSD-2-Clause"
] | null | null | null | train.py | endaaman/prostate | e08beb862fc61ab0bcef672ab77d2ff528259094 | [
"BSD-2-Clause"
] | 1 | 2020-06-12T07:59:58.000Z | 2020-06-12T07:59:59.000Z | train.py | endaaman/prostate | e08beb862fc61ab0bcef672ab77d2ff528259094 | [
"BSD-2-Clause"
] | null | null | null | import os
import math
import re
import gc
import argparse
from enum import Enum, auto
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import LambdaLR
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torchvision import datasets, models
from torchvision.transforms import ToTensor, Normalize, Compose
from models import get_model
from datasets import TrainingDataset
from store import Store
from metrics import Metrics, Coef
from formula import *
from utils import now_str, pp, CrossEntropyLoss2d
parser = argparse.ArgumentParser()
parser.add_argument('-w', '--weight')
parser.add_argument('-b', '--batch-size', type=int, default=32)
parser.add_argument('-e', '--epoch', type=int, default=100)
parser.add_argument('-t', '--tile', type=int, default=224)
parser.add_argument('-m', '--model', default='unet11')
parser.add_argument('-d', '--dest', default='weights')
parser.add_argument('--num-workers', type=int, default=4)
parser.add_argument('--cpu', action="store_true")
parser.add_argument('--fake', action="store_true")
parser.add_argument('--target', default='train')
args = parser.parse_args()
STARTING_WEIGHT = args.weight
BATCH_SIZE = args.batch_size
NUM_WORKERS = args.num_workers
EPOCH_COUNT = args.epoch
TILE_SIZE = args.tile
MODEL_NAME = args.model
DEST_BASE_DIR = args.dest
TARGET = args.target
FAKE = args.fake
USE_GPU = not args.cpu and torch.cuda.is_available()
USE_MULTI_GPU = USE_GPU and torch.cuda.device_count() > 1
DEST_DIR = os.path.join(DEST_BASE_DIR, MODEL_NAME)
os.makedirs(DEST_DIR, exist_ok=True)
if not os.path.isdir(DEST_DIR):
print(f'Invalid dest dir: `{DEST_DIR}`')
exit(1)
store = Store()
mode = ('multi' if USE_MULTI_GPU else 'single') if USE_GPU else 'cpu'
device = 'cuda' if USE_GPU else 'cpu'
# EPOCH
first_epoch = 1
if STARTING_WEIGHT:
basename = os.path.splitext(os.path.basename(STARTING_WEIGHT))[0]
nums = re.findall(r'\d+', basename)
if len(nums) > 0 and not nums[-1].isdigit():
print(f'Invalid pt file')
exit(1)
first_epoch = int(nums[-1]) + 1
store.load(STARTING_WEIGHT)
epoch = first_epoch
print(f'Preparing MODEL:{MODEL_NAME} BATCH:{BATCH_SIZE} EPOCH:{EPOCH_COUNT} MODE:{mode} ({now_str()})')
# MDOEL
Model = get_model(MODEL_NAME)
model = Model(num_classes=NUM_CLASSES).to(device)
if store.weights:
model.load_state_dict(store.weights)
if USE_MULTI_GPU:
model = torch.nn.DataParallel(model)
# DATA
I = np.identity(NUM_CLASSES, dtype=np.float32)
def transform_y(arr):
arr[arr > 0] = 1 # to 1bit each color
arr = np.sum(np.multiply(arr, (1, 2, 4, 8)), axis=2) # to 4bit each pixel
arr = arr - 7 # to 3bit + 1
arr[arr < 0] = 0 # fill overrun
return ToTensor()(I[INDEX_MAP[arr]])
data_set = TrainingDataset(
transform_x=Compose([
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
]),
transform_y=transform_y,
tile_size=TILE_SIZE,
target=TARGET)
data_loader = DataLoader(data_set, batch_size=BATCH_SIZE, shuffle=True, num_workers=NUM_WORKERS)
# TRAIN
def lr_func_exp(step):
return 0.95 ** step
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
if store.optims:
optimizer.load_state_dict(store.optims)
scheduler = LambdaLR(optimizer, lr_lambda=lr_func_exp, last_epoch=epoch if store.optims else -1)
# criterion = nn.BCELoss()
# criterion = nn.BCEWithLogitsLoss()
criterion = CrossEntropyLoss2d()
metrics = Metrics()
if store.metrics:
metrics.load_state_dict(store.metrics)
if FAKE:
print('STOP TRAINING')
exit(0)
# LOOP
print(f'Starting ({now_str()})')
iter_count = len(data_set) // BATCH_SIZE
while epoch < first_epoch + EPOCH_COUNT:
iter_metrics = Metrics()
lr = scheduler.get_lr()[0]
for i, (inputs, labels) in enumerate(data_loader):
inputs = inputs.to(device)
labels = labels.to(device)
optimizer.zero_grad()
outputs = model(inputs).to(device)
loss = criterion(outputs, labels)
coef = Coef.calc(outputs, labels)
iter_metrics.append_loss(loss.item())
iter_metrics.append_coef(coef)
pp('epoch[{ep}]:{i}/{I} iou:{c.pjac:.4f} acc:{c.pdice:.4f} loss:{loss:.4f} lr:{lr:.4f} ({t})'.format(
ep=epoch, i=i+1, I=iter_count, lr=lr, t=now_str(), loss=loss.item(), c=coef))
loss.backward()
optimizer.step()
pp('epoch[{ep}]:Done. iou:{c.pjac:.4f} acc:{c.pdice:.4f} gsi:{c.gsensi:.4f} gsp:{c.gspec:.4f} tsi:{c.tsensi:.4f} tsp:{c.tspec:.4f} loss:{loss:.4f} lr:{lr:.4f} ({t})'.format(
ep=epoch, t=now_str(), lr=lr, loss=iter_metrics.avg('losses'), c=iter_metrics.avg_coef()
))
gc.collect()
print()
weight_path = os.path.join(DEST_DIR, f'{Model.__name__.lower()}_{epoch}.pt')
weights = model.module.cpu().state_dict() if USE_MULTI_GPU else model.cpu().state_dict()
metrics.append_coef(iter_metrics.avg_coef())
metrics.append_loss(iter_metrics.avg_loss())
store.set_states(weights, optimizer.state_dict(), metrics.state_dict())
store.save(weight_path)
print(f'save weights to {weight_path}')
model = model.to(device)
scheduler.step()
epoch += 1
print(f'Finished training\n')
| 33.0625 | 177 | 0.693762 | import os
import math
import re
import gc
import argparse
from enum import Enum, auto
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import LambdaLR
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torchvision import datasets, models
from torchvision.transforms import ToTensor, Normalize, Compose
from models import get_model
from datasets import TrainingDataset
from store import Store
from metrics import Metrics, Coef
from formula import *
from utils import now_str, pp, CrossEntropyLoss2d
parser = argparse.ArgumentParser()
parser.add_argument('-w', '--weight')
parser.add_argument('-b', '--batch-size', type=int, default=32)
parser.add_argument('-e', '--epoch', type=int, default=100)
parser.add_argument('-t', '--tile', type=int, default=224)
parser.add_argument('-m', '--model', default='unet11')
parser.add_argument('-d', '--dest', default='weights')
parser.add_argument('--num-workers', type=int, default=4)
parser.add_argument('--cpu', action="store_true")
parser.add_argument('--fake', action="store_true")
parser.add_argument('--target', default='train')
args = parser.parse_args()
STARTING_WEIGHT = args.weight
BATCH_SIZE = args.batch_size
NUM_WORKERS = args.num_workers
EPOCH_COUNT = args.epoch
TILE_SIZE = args.tile
MODEL_NAME = args.model
DEST_BASE_DIR = args.dest
TARGET = args.target
FAKE = args.fake
USE_GPU = not args.cpu and torch.cuda.is_available()
USE_MULTI_GPU = USE_GPU and torch.cuda.device_count() > 1
DEST_DIR = os.path.join(DEST_BASE_DIR, MODEL_NAME)
os.makedirs(DEST_DIR, exist_ok=True)
if not os.path.isdir(DEST_DIR):
print(f'Invalid dest dir: `{DEST_DIR}`')
exit(1)
store = Store()
mode = ('multi' if USE_MULTI_GPU else 'single') if USE_GPU else 'cpu'
device = 'cuda' if USE_GPU else 'cpu'
first_epoch = 1
if STARTING_WEIGHT:
basename = os.path.splitext(os.path.basename(STARTING_WEIGHT))[0]
nums = re.findall(r'\d+', basename)
if len(nums) > 0 and not nums[-1].isdigit():
print(f'Invalid pt file')
exit(1)
first_epoch = int(nums[-1]) + 1
store.load(STARTING_WEIGHT)
epoch = first_epoch
print(f'Preparing MODEL:{MODEL_NAME} BATCH:{BATCH_SIZE} EPOCH:{EPOCH_COUNT} MODE:{mode} ({now_str()})')
Model = get_model(MODEL_NAME)
model = Model(num_classes=NUM_CLASSES).to(device)
if store.weights:
model.load_state_dict(store.weights)
if USE_MULTI_GPU:
model = torch.nn.DataParallel(model)
I = np.identity(NUM_CLASSES, dtype=np.float32)
def transform_y(arr):
arr[arr > 0] = 1
arr = np.sum(np.multiply(arr, (1, 2, 4, 8)), axis=2)
arr = arr - 7
arr[arr < 0] = 0
return ToTensor()(I[INDEX_MAP[arr]])
data_set = TrainingDataset(
transform_x=Compose([
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
]),
transform_y=transform_y,
tile_size=TILE_SIZE,
target=TARGET)
data_loader = DataLoader(data_set, batch_size=BATCH_SIZE, shuffle=True, num_workers=NUM_WORKERS)
def lr_func_exp(step):
return 0.95 ** step
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
if store.optims:
optimizer.load_state_dict(store.optims)
scheduler = LambdaLR(optimizer, lr_lambda=lr_func_exp, last_epoch=epoch if store.optims else -1)
criterion = CrossEntropyLoss2d()
metrics = Metrics()
if store.metrics:
metrics.load_state_dict(store.metrics)
if FAKE:
print('STOP TRAINING')
exit(0)
print(f'Starting ({now_str()})')
iter_count = len(data_set) // BATCH_SIZE
while epoch < first_epoch + EPOCH_COUNT:
iter_metrics = Metrics()
lr = scheduler.get_lr()[0]
for i, (inputs, labels) in enumerate(data_loader):
inputs = inputs.to(device)
labels = labels.to(device)
optimizer.zero_grad()
outputs = model(inputs).to(device)
loss = criterion(outputs, labels)
coef = Coef.calc(outputs, labels)
iter_metrics.append_loss(loss.item())
iter_metrics.append_coef(coef)
pp('epoch[{ep}]:{i}/{I} iou:{c.pjac:.4f} acc:{c.pdice:.4f} loss:{loss:.4f} lr:{lr:.4f} ({t})'.format(
ep=epoch, i=i+1, I=iter_count, lr=lr, t=now_str(), loss=loss.item(), c=coef))
loss.backward()
optimizer.step()
pp('epoch[{ep}]:Done. iou:{c.pjac:.4f} acc:{c.pdice:.4f} gsi:{c.gsensi:.4f} gsp:{c.gspec:.4f} tsi:{c.tsensi:.4f} tsp:{c.tspec:.4f} loss:{loss:.4f} lr:{lr:.4f} ({t})'.format(
ep=epoch, t=now_str(), lr=lr, loss=iter_metrics.avg('losses'), c=iter_metrics.avg_coef()
))
gc.collect()
print()
weight_path = os.path.join(DEST_DIR, f'{Model.__name__.lower()}_{epoch}.pt')
weights = model.module.cpu().state_dict() if USE_MULTI_GPU else model.cpu().state_dict()
metrics.append_coef(iter_metrics.avg_coef())
metrics.append_loss(iter_metrics.avg_loss())
store.set_states(weights, optimizer.state_dict(), metrics.state_dict())
store.save(weight_path)
print(f'save weights to {weight_path}')
model = model.to(device)
scheduler.step()
epoch += 1
print(f'Finished training\n')
| true | true |
1c311e9e8f6b393ac57896e04339007088012c4a | 4,835 | py | Python | cli/sawtooth_cli/transaction.py | mealchain/beta | 7dc1a1aea175bfb3f1008939f098a1d58bb455a6 | [
"Apache-2.0"
] | 1 | 2017-08-04T10:31:00.000Z | 2017-08-04T10:31:00.000Z | cli/sawtooth_cli/transaction.py | mealchain/beta | 7dc1a1aea175bfb3f1008939f098a1d58bb455a6 | [
"Apache-2.0"
] | null | null | null | cli/sawtooth_cli/transaction.py | mealchain/beta | 7dc1a1aea175bfb3f1008939f098a1d58bb455a6 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import argparse
from base64 import b64decode
from sawtooth_cli import format_utils as fmt
from sawtooth_cli.rest_client import RestClient
from sawtooth_cli.exceptions import CliException
from sawtooth_cli.parent_parsers import base_http_parser
from sawtooth_cli.parent_parsers import base_list_parser
from sawtooth_cli.parent_parsers import base_show_parser
def add_transaction_parser(subparsers, parent_parser):
"""Adds argument parsers for the transaction list and show commands
Args:
subparsers: Add parsers to this subparser object
parent_parser: The parent argparse.ArgumentParser object
"""
parser = subparsers.add_parser('transaction')
grand_parsers = parser.add_subparsers(title='grandchildcommands',
dest='subcommand')
grand_parsers.required = True
epilog = '''details:
Lists committed transactions from newest to oldest, including their id
(i.e. header_signature), transaction family and version, and their payload.
'''
grand_parsers.add_parser(
'list', epilog=epilog,
parents=[base_http_parser(), base_list_parser()],
formatter_class=argparse.RawDescriptionHelpFormatter)
epilog = '''details:
Shows the data for a single transaction, or for a particular property
within that transaction or its header. Displays data in YAML (default),
or JSON formats.
'''
show_parser = grand_parsers.add_parser(
'show', epilog=epilog,
parents=[base_http_parser(), base_show_parser()],
formatter_class=argparse.RawDescriptionHelpFormatter)
show_parser.add_argument(
'transaction_id',
type=str,
help='the id (i.e. header_signature) of the transaction')
def do_transaction(args):
"""Runs the transaction list or show command, printing to the console
Args:
args: The parsed arguments sent to the command at runtime
"""
rest_client = RestClient(args.url, args.user)
if args.subcommand == 'list':
transactions = rest_client.list_transactions()
keys = ('transaction_id', 'family', 'version', 'size', 'payload')
headers = tuple(k.upper() if k != 'version' else 'VERS' for k in keys)
def parse_txn_row(transaction, decode=True):
decoded = b64decode(transaction['payload'])
return (
transaction['header_signature'],
transaction['header']['family_name'],
transaction['header']['family_version'],
len(decoded),
str(decoded) if decode else transaction['payload'])
if args.format == 'default':
fmt.print_terminal_table(headers, transactions, parse_txn_row)
elif args.format == 'csv':
fmt.print_csv(headers, transactions, parse_txn_row)
elif args.format == 'json' or args.format == 'yaml':
data = [{k: d for k, d in zip(keys, parse_txn_row(b, False))}
for b in transactions]
if args.format == 'yaml':
fmt.print_yaml(data)
elif args.format == 'json':
fmt.print_json(data)
else:
raise AssertionError('Missing handler: {}'.format(args.format))
else:
raise AssertionError('Missing handler: {}'.format(args.format))
if args.subcommand == 'show':
output = rest_client.get_transaction(args.transaction_id)
if args.key:
if args.key == 'payload':
output = b64decode(output['payload'])
elif args.key in output:
output = output[args.key]
elif args.key in output['header']:
output = output['header'][args.key]
else:
raise CliException(
'Key "{}" not found in transaction or header'.format(
args.key))
if args.format == 'yaml':
fmt.print_yaml(output)
elif args.format == 'json':
fmt.print_json(output)
else:
raise AssertionError('Missing handler: {}'.format(args.format))
| 38.373016 | 80 | 0.63061 |
import argparse
from base64 import b64decode
from sawtooth_cli import format_utils as fmt
from sawtooth_cli.rest_client import RestClient
from sawtooth_cli.exceptions import CliException
from sawtooth_cli.parent_parsers import base_http_parser
from sawtooth_cli.parent_parsers import base_list_parser
from sawtooth_cli.parent_parsers import base_show_parser
def add_transaction_parser(subparsers, parent_parser):
parser = subparsers.add_parser('transaction')
grand_parsers = parser.add_subparsers(title='grandchildcommands',
dest='subcommand')
grand_parsers.required = True
epilog = '''details:
Lists committed transactions from newest to oldest, including their id
(i.e. header_signature), transaction family and version, and their payload.
'''
grand_parsers.add_parser(
'list', epilog=epilog,
parents=[base_http_parser(), base_list_parser()],
formatter_class=argparse.RawDescriptionHelpFormatter)
epilog = '''details:
Shows the data for a single transaction, or for a particular property
within that transaction or its header. Displays data in YAML (default),
or JSON formats.
'''
show_parser = grand_parsers.add_parser(
'show', epilog=epilog,
parents=[base_http_parser(), base_show_parser()],
formatter_class=argparse.RawDescriptionHelpFormatter)
show_parser.add_argument(
'transaction_id',
type=str,
help='the id (i.e. header_signature) of the transaction')
def do_transaction(args):
rest_client = RestClient(args.url, args.user)
if args.subcommand == 'list':
transactions = rest_client.list_transactions()
keys = ('transaction_id', 'family', 'version', 'size', 'payload')
headers = tuple(k.upper() if k != 'version' else 'VERS' for k in keys)
def parse_txn_row(transaction, decode=True):
decoded = b64decode(transaction['payload'])
return (
transaction['header_signature'],
transaction['header']['family_name'],
transaction['header']['family_version'],
len(decoded),
str(decoded) if decode else transaction['payload'])
if args.format == 'default':
fmt.print_terminal_table(headers, transactions, parse_txn_row)
elif args.format == 'csv':
fmt.print_csv(headers, transactions, parse_txn_row)
elif args.format == 'json' or args.format == 'yaml':
data = [{k: d for k, d in zip(keys, parse_txn_row(b, False))}
for b in transactions]
if args.format == 'yaml':
fmt.print_yaml(data)
elif args.format == 'json':
fmt.print_json(data)
else:
raise AssertionError('Missing handler: {}'.format(args.format))
else:
raise AssertionError('Missing handler: {}'.format(args.format))
if args.subcommand == 'show':
output = rest_client.get_transaction(args.transaction_id)
if args.key:
if args.key == 'payload':
output = b64decode(output['payload'])
elif args.key in output:
output = output[args.key]
elif args.key in output['header']:
output = output['header'][args.key]
else:
raise CliException(
'Key "{}" not found in transaction or header'.format(
args.key))
if args.format == 'yaml':
fmt.print_yaml(output)
elif args.format == 'json':
fmt.print_json(output)
else:
raise AssertionError('Missing handler: {}'.format(args.format))
| true | true |
1c311eaff781636af920b6c76513759dd4b2e600 | 3,235 | py | Python | setup.py | Caleydo/taco_server | be2d4786fbc8ad62ecb5b599572fe09f8c2ea05e | [
"BSD-3-Clause"
] | 2 | 2017-03-30T05:12:54.000Z | 2019-07-11T09:42:06.000Z | setup.py | Caleydo/taco_server | be2d4786fbc8ad62ecb5b599572fe09f8c2ea05e | [
"BSD-3-Clause"
] | 11 | 2016-11-18T17:13:37.000Z | 2021-03-26T11:35:43.000Z | setup.py | Caleydo/taco_server | be2d4786fbc8ad62ecb5b599572fe09f8c2ea05e | [
"BSD-3-Clause"
] | 2 | 2018-01-26T09:56:41.000Z | 2019-10-26T04:41:31.000Z | ###############################################################################
# Caleydo - Visualization for Molecular Biology - http://caleydo.org
# Copyright (c) The Caleydo Team. All rights reserved.
# Licensed under the new BSD license, available at http://caleydo.org/license
###############################################################################
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
def read_it(name):
with open(path.join(here, name), encoding='utf-8') as f:
return f.read()
# read package.json information
with open(path.join(here, 'package.json'), encoding='utf-8') as json_data:
import json
pkg = json.load(json_data)
def packaged(*files):
r = {}
global pkg
r[pkg['name']] = list(files)
return r
def requirements(file):
return [r.strip() for r in read_it(file).strip().split('\n') if not r.startswith('-e git+https://')]
def to_version(v):
import datetime
now = datetime.datetime.utcnow()
return v.replace('SNAPSHOT', now.strftime('%Y%m%d-%H%M%S'))
setup(
name=pkg['name'].lower(),
version=to_version(pkg['version']),
url=pkg['homepage'],
description=pkg['description'],
long_description=read_it('README.md'),
long_description_content_type='text/markdown',
keywords=pkg.get('keywords', ''),
author=pkg['author']['name'],
author_email=pkg['author']['email'],
license=pkg['license'],
zip_safe=False,
entry_points={
'phovea.registry': ['{0} = {0}:phovea'.format(pkg['name'])],
'phovea.config': ['{0} = {0}:phovea_config'.format(pkg['name'])]
},
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Intended Audience :: Developers',
'Operating System :: OS Independent',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: ' + ('BSD License' if pkg['license'] == 'BSD-3-Clause' else pkg['license']),
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4'
],
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['docs', 'tests*']),
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=requirements('requirements.txt'),
tests_require=requirements('requirements_dev.txt'),
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data=packaged('config.json', 'buildInfo.json'),
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
data_files=[] # [('my_data', ['data/data_file'])],
)
| 34.414894 | 108 | 0.662442 | true | true | |
1c311ef6c77d2d2c696899f5c5518bbfbe901764 | 1,778 | py | Python | tests/integration/preview/acc_security/service/test_verification_check.py | fefi95/twilio-python | b9bfea293b6133fe84d4d8d3ac4e2a75381c3881 | [
"MIT"
] | 1 | 2019-12-30T21:46:55.000Z | 2019-12-30T21:46:55.000Z | tests/integration/preview/acc_security/service/test_verification_check.py | fefi95/twilio-python | b9bfea293b6133fe84d4d8d3ac4e2a75381c3881 | [
"MIT"
] | null | null | null | tests/integration/preview/acc_security/service/test_verification_check.py | fefi95/twilio-python | b9bfea293b6133fe84d4d8d3ac4e2a75381c3881 | [
"MIT"
] | null | null | null | # coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class VerificationCheckTestCase(IntegrationTestCase):
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.preview.acc_security.services(sid="VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.verification_checks.create(code="code")
values = {'Code': "code", }
self.holodeck.assert_has_request(Request(
'post',
'https://preview.twilio.com/Verification/Services/VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/VerificationCheck',
data=values,
))
def test_verification_checks_response(self):
self.holodeck.mock(Response(
201,
'''
{
"sid": "VEaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"to": "+15017122661",
"channel": "sms",
"status": "approved",
"valid": false,
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z"
}
'''
))
actual = self.client.preview.acc_security.services(sid="VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.verification_checks.create(code="code")
self.assertIsNotNone(actual)
| 32.925926 | 116 | 0.577615 |
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class VerificationCheckTestCase(IntegrationTestCase):
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.preview.acc_security.services(sid="VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.verification_checks.create(code="code")
values = {'Code': "code", }
self.holodeck.assert_has_request(Request(
'post',
'https://preview.twilio.com/Verification/Services/VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/VerificationCheck',
data=values,
))
def test_verification_checks_response(self):
self.holodeck.mock(Response(
201,
'''
{
"sid": "VEaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"to": "+15017122661",
"channel": "sms",
"status": "approved",
"valid": false,
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z"
}
'''
))
actual = self.client.preview.acc_security.services(sid="VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.verification_checks.create(code="code")
self.assertIsNotNone(actual)
| true | true |
1c311f75778dffa8637a08e97bcd150cd5fab9d0 | 3,184 | py | Python | gpt-example/deps/gpt/src/sample.py | Antaego/gpt-companion | 071d1218661cb8dddfd31d50da91c1af7a9be21b | [
"Unlicense"
] | null | null | null | gpt-example/deps/gpt/src/sample.py | Antaego/gpt-companion | 071d1218661cb8dddfd31d50da91c1af7a9be21b | [
"Unlicense"
] | null | null | null | gpt-example/deps/gpt/src/sample.py | Antaego/gpt-companion | 071d1218661cb8dddfd31d50da91c1af7a9be21b | [
"Unlicense"
] | null | null | null | import tensorflow as tf
from deps.gpt.src import model
def top_k_logits(logits, k):
if k == 0:
# no truncation
return logits
def _top_k():
values, _ = tf.nn.top_k(logits, k=k)
min_values = values[:, -1, tf.newaxis]
return tf.where(
logits < min_values,
tf.ones_like(logits, dtype=logits.dtype) * -1e10,
logits,
)
return tf.cond(
tf.equal(k, 0),
lambda: logits,
lambda: _top_k(),
)
def top_p_logits(logits, p):
"""Nucleus sampling"""
batch, _ = logits.shape.as_list()
sorted_logits = tf.sort(logits, direction='DESCENDING', axis=-1)
cumulative_probs = tf.cumsum(tf.nn.softmax(sorted_logits, axis=-1), axis=-1)
indices = tf.stack([
tf.range(0, batch),
# number of indices to include
tf.maximum(tf.reduce_sum(tf.cast(cumulative_probs <= p, tf.int32), axis=-1) - 1, 0),
], axis=-1)
min_values = tf.gather_nd(sorted_logits, indices)
return tf.where(
logits < min_values,
tf.ones_like(logits) * -1e10,
logits,
)
def sample_sequence(*, hparams, length, start_token=None, batch_size=None, context=None, temperature=1, top_k=0, top_p=1):
if start_token is None:
assert context is not None, 'Specify exactly one of start_token and context!'
else:
assert context is None, 'Specify exactly one of start_token and context!'
context = tf.fill([batch_size, 1], start_token)
def step(hparams, tokens, past=None):
lm_output = model.model(hparams=hparams, X=tokens, past=past, reuse=tf.AUTO_REUSE)
logits = lm_output['logits'][:, :, :hparams.n_vocab]
presents = lm_output['present']
presents.set_shape(model.past_shape(hparams=hparams, batch_size=batch_size))
return {
'logits': logits,
'presents': presents,
}
with tf.name_scope('sample_sequence'):
def body(past, prev, output):
next_outputs = step(hparams, prev, past=past)
logits = next_outputs['logits'][:, -1, :] / tf.to_float(temperature)
logits = top_k_logits(logits, k=top_k)
logits = top_p_logits(logits, p=top_p)
samples = tf.multinomial(logits, num_samples=1, output_dtype=tf.int32)
return [
next_outputs['presents'] if past is None else tf.concat([past, next_outputs['presents']], axis=-2),
samples,
tf.concat([output, samples], axis=1)
]
past, prev, output = body(None, context, context)
def cond(*args):
return True
_, _, tokens = tf.while_loop(
cond=cond, body=body,
maximum_iterations=length - 1,
loop_vars=[
past,
prev,
output
],
shape_invariants=[
tf.TensorShape(model.past_shape(hparams=hparams, batch_size=batch_size)),
tf.TensorShape([batch_size, None]),
tf.TensorShape([batch_size, None]),
],
back_prop=False,
)
return tokens
| 33.166667 | 122 | 0.576947 | import tensorflow as tf
from deps.gpt.src import model
def top_k_logits(logits, k):
if k == 0:
return logits
def _top_k():
values, _ = tf.nn.top_k(logits, k=k)
min_values = values[:, -1, tf.newaxis]
return tf.where(
logits < min_values,
tf.ones_like(logits, dtype=logits.dtype) * -1e10,
logits,
)
return tf.cond(
tf.equal(k, 0),
lambda: logits,
lambda: _top_k(),
)
def top_p_logits(logits, p):
batch, _ = logits.shape.as_list()
sorted_logits = tf.sort(logits, direction='DESCENDING', axis=-1)
cumulative_probs = tf.cumsum(tf.nn.softmax(sorted_logits, axis=-1), axis=-1)
indices = tf.stack([
tf.range(0, batch),
tf.maximum(tf.reduce_sum(tf.cast(cumulative_probs <= p, tf.int32), axis=-1) - 1, 0),
], axis=-1)
min_values = tf.gather_nd(sorted_logits, indices)
return tf.where(
logits < min_values,
tf.ones_like(logits) * -1e10,
logits,
)
def sample_sequence(*, hparams, length, start_token=None, batch_size=None, context=None, temperature=1, top_k=0, top_p=1):
if start_token is None:
assert context is not None, 'Specify exactly one of start_token and context!'
else:
assert context is None, 'Specify exactly one of start_token and context!'
context = tf.fill([batch_size, 1], start_token)
def step(hparams, tokens, past=None):
lm_output = model.model(hparams=hparams, X=tokens, past=past, reuse=tf.AUTO_REUSE)
logits = lm_output['logits'][:, :, :hparams.n_vocab]
presents = lm_output['present']
presents.set_shape(model.past_shape(hparams=hparams, batch_size=batch_size))
return {
'logits': logits,
'presents': presents,
}
with tf.name_scope('sample_sequence'):
def body(past, prev, output):
next_outputs = step(hparams, prev, past=past)
logits = next_outputs['logits'][:, -1, :] / tf.to_float(temperature)
logits = top_k_logits(logits, k=top_k)
logits = top_p_logits(logits, p=top_p)
samples = tf.multinomial(logits, num_samples=1, output_dtype=tf.int32)
return [
next_outputs['presents'] if past is None else tf.concat([past, next_outputs['presents']], axis=-2),
samples,
tf.concat([output, samples], axis=1)
]
past, prev, output = body(None, context, context)
def cond(*args):
return True
_, _, tokens = tf.while_loop(
cond=cond, body=body,
maximum_iterations=length - 1,
loop_vars=[
past,
prev,
output
],
shape_invariants=[
tf.TensorShape(model.past_shape(hparams=hparams, batch_size=batch_size)),
tf.TensorShape([batch_size, None]),
tf.TensorShape([batch_size, None]),
],
back_prop=False,
)
return tokens
| true | true |
1c311fbb1c23401c4ba13b9d5683ed866307cd23 | 78,928 | py | Python | openmc/lattice.py | openmsr/openmc | 831c8d1c50cb4441faf8a0268ec59f6f803bb258 | [
"MIT"
] | null | null | null | openmc/lattice.py | openmsr/openmc | 831c8d1c50cb4441faf8a0268ec59f6f803bb258 | [
"MIT"
] | null | null | null | openmc/lattice.py | openmsr/openmc | 831c8d1c50cb4441faf8a0268ec59f6f803bb258 | [
"MIT"
] | null | null | null | from abc import ABC
from collections import OrderedDict
from collections.abc import Iterable
from copy import deepcopy
from math import sqrt, floor
from numbers import Real
import types
from xml.etree import ElementTree as ET
import numpy as np
import openmc
import openmc.checkvalue as cv
from ._xml import get_text
from .mixin import IDManagerMixin
class Lattice(IDManagerMixin, ABC):
"""A repeating structure wherein each element is a universe.
Parameters
----------
lattice_id : int, optional
Unique identifier for the lattice. If not specified, an identifier will
automatically be assigned.
name : str, optional
Name of the lattice. If not specified, the name is the empty string.
Attributes
----------
id : int
Unique identifier for the lattice
name : str
Name of the lattice
pitch : Iterable of float
Pitch of the lattice in each direction in cm
outer : openmc.Universe
A universe to fill all space outside the lattice
universes : Iterable of Iterable of openmc.Universe
A two-or three-dimensional list/array of universes filling each element
of the lattice
"""
next_id = 1
used_ids = openmc.Universe.used_ids
def __init__(self, lattice_id=None, name=''):
# Initialize Lattice class attributes
self.id = lattice_id
self.name = name
self._pitch = None
self._outer = None
self._universes = None
@property
def name(self):
return self._name
@property
def pitch(self):
return self._pitch
@property
def outer(self):
return self._outer
@property
def universes(self):
return self._universes
@name.setter
def name(self, name):
if name is not None:
cv.check_type('lattice name', name, str)
self._name = name
else:
self._name = ''
@outer.setter
def outer(self, outer):
cv.check_type('outer universe', outer, openmc.Universe)
self._outer = outer
@staticmethod
def from_hdf5(group, universes):
"""Create lattice from HDF5 group
Parameters
----------
group : h5py.Group
Group in HDF5 file
universes : dict
Dictionary mapping universe IDs to instances of
:class:`openmc.Universe`.
Returns
-------
openmc.Lattice
Instance of lattice subclass
"""
lattice_type = group['type'][()].decode()
if lattice_type == 'rectangular':
return openmc.RectLattice.from_hdf5(group, universes)
elif lattice_type == 'hexagonal':
return openmc.HexLattice.from_hdf5(group, universes)
else:
raise ValueError(f'Unknown lattice type: {lattice_type}')
def get_unique_universes(self):
"""Determine all unique universes in the lattice
Returns
-------
universes : collections.OrderedDict
Dictionary whose keys are universe IDs and values are
:class:`openmc.Universe` instances
"""
univs = OrderedDict()
for k in range(len(self._universes)):
for j in range(len(self._universes[k])):
if isinstance(self._universes[k][j], openmc.Universe):
u = self._universes[k][j]
univs[u._id] = u
else:
for i in range(len(self._universes[k][j])):
u = self._universes[k][j][i]
assert isinstance(u, openmc.Universe)
univs[u._id] = u
if self.outer is not None:
univs[self.outer._id] = self.outer
return univs
def get_nuclides(self):
"""Returns all nuclides in the lattice
Returns
-------
nuclides : list of str
List of nuclide names
"""
nuclides = []
# Get all unique Universes contained in each of the lattice cells
unique_universes = self.get_unique_universes()
# Append all Universes containing each cell to the dictionary
for universe in unique_universes.values():
for nuclide in universe.get_nuclides():
if nuclide not in nuclides:
nuclides.append(nuclide)
return nuclides
def get_all_cells(self, memo=None):
"""Return all cells that are contained within the lattice
Returns
-------
cells : collections.OrderedDict
Dictionary whose keys are cell IDs and values are :class:`Cell`
instances
"""
cells = OrderedDict()
if memo and self in memo:
return cells
if memo is not None:
memo.add(self)
unique_universes = self.get_unique_universes()
for universe in unique_universes.values():
cells.update(universe.get_all_cells(memo))
return cells
def get_all_materials(self, memo=None):
"""Return all materials that are contained within the lattice
Returns
-------
materials : collections.OrderedDict
Dictionary whose keys are material IDs and values are
:class:`Material` instances
"""
materials = OrderedDict()
# Append all Cells in each Cell in the Universe to the dictionary
cells = self.get_all_cells(memo)
for cell in cells.values():
materials.update(cell.get_all_materials(memo))
return materials
def get_all_universes(self):
"""Return all universes that are contained within the lattice
Returns
-------
universes : collections.OrderedDict
Dictionary whose keys are universe IDs and values are
:class:`Universe` instances
"""
# Initialize a dictionary of all Universes contained by the Lattice
# in each nested Universe level
all_universes = OrderedDict()
# Get all unique Universes contained in each of the lattice cells
unique_universes = self.get_unique_universes()
# Add the unique Universes filling each Lattice cell
all_universes.update(unique_universes)
# Append all Universes containing each cell to the dictionary
for universe in unique_universes.values():
all_universes.update(universe.get_all_universes())
return all_universes
def get_universe(self, idx):
r"""Return universe corresponding to a lattice element index
Parameters
----------
idx : Iterable of int
Lattice element indices. For a rectangular lattice, the indices are
given in the :math:`(x,y)` or :math:`(x,y,z)` coordinate system. For
hexagonal lattices, they are given in the :math:`x,\alpha` or
:math:`x,\alpha,z` coordinate systems for "y" orientations and
:math:`\alpha,y` or :math:`\alpha,y,z` coordinate systems for "x"
orientations.
Returns
-------
openmc.Universe
Universe with given indices
"""
idx_u = self.get_universe_index(idx)
if self.ndim == 2:
return self.universes[idx_u[0]][idx_u[1]]
else:
return self.universes[idx_u[0]][idx_u[1]][idx_u[2]]
def find(self, point):
"""Find cells/universes/lattices which contain a given point
Parameters
----------
point : 3-tuple of float
Cartesian coordinates of the point
Returns
-------
list
Sequence of universes, cells, and lattices which are traversed to
find the given point
"""
idx, p = self.find_element(point)
if self.is_valid_index(idx):
u = self.get_universe(idx)
else:
if self.outer is not None:
u = self.outer
else:
return []
return [(self, idx)] + u.find(p)
def clone(self, clone_materials=True, clone_regions=True, memo=None):
"""Create a copy of this lattice with a new unique ID, and clones
all universes within this lattice.
Parameters
----------
clone_materials : bool
Whether to create separate copies of the materials filling cells
contained in this lattice and its outer universe.
clone_regions : bool
Whether to create separate copies of the regions bounding cells
contained in this lattice and its outer universe.
memo : dict or None
A nested dictionary of previously cloned objects. This parameter
is used internally and should not be specified by the user.
Returns
-------
clone : openmc.Lattice
The clone of this lattice
"""
if memo is None:
memo = {}
# If no memoize'd clone exists, instantiate one
if self not in memo:
clone = deepcopy(self)
clone.id = None
if self.outer is not None:
clone.outer = self.outer.clone(clone_materials, clone_regions,
memo)
# Assign universe clones to the lattice clone
for i in self.indices:
if isinstance(self, RectLattice):
clone.universes[i] = self.universes[i].clone(
clone_materials, clone_regions, memo)
else:
if self.ndim == 2:
clone.universes[i[0]][i[1]] = \
self.universes[i[0]][i[1]].clone(clone_materials,
clone_regions, memo)
else:
clone.universes[i[0]][i[1]][i[2]] = \
self.universes[i[0]][i[1]][i[2]].clone(
clone_materials, clone_regions, memo)
# Memoize the clone
memo[self] = clone
return memo[self]
class RectLattice(Lattice):
"""A lattice consisting of rectangular prisms.
To completely define a rectangular lattice, the
:attr:`RectLattice.lower_left` :attr:`RectLattice.pitch`,
:attr:`RectLattice.outer`, and :attr:`RectLattice.universes` properties need
to be set.
Most methods for this class use a natural indexing scheme wherein elements
are assigned an index corresponding to their position relative to the
(x,y,z) axes in a Cartesian coordinate system, i.e., an index of (0,0,0) in
the lattice gives the element whose x, y, and z coordinates are the
smallest. However, note that when universes are assigned to lattice elements
using the :attr:`RectLattice.universes` property, the array indices do not
correspond to natural indices.
Parameters
----------
lattice_id : int, optional
Unique identifier for the lattice. If not specified, an identifier will
automatically be assigned.
name : str, optional
Name of the lattice. If not specified, the name is the empty string.
Attributes
----------
id : int
Unique identifier for the lattice
name : str
Name of the lattice
pitch : Iterable of float
Pitch of the lattice in the x, y, and (if applicable) z directions in
cm.
outer : openmc.Universe
A universe to fill all space outside the lattice
universes : Iterable of Iterable of openmc.Universe
A two- or three-dimensional list/array of universes filling each element
of the lattice. The first dimension corresponds to the z-direction (if
applicable), the second dimension corresponds to the y-direction, and
the third dimension corresponds to the x-direction. Note that for the
y-direction, a higher index corresponds to a lower physical
y-value. Each z-slice in the array can be thought of as a top-down view
of the lattice.
lower_left : Iterable of float
The Cartesian coordinates of the lower-left corner of the lattice. If
the lattice is two-dimensional, only the x- and y-coordinates are
specified.
indices : list of tuple
A list of all possible (z,y,x) or (y,x) lattice element indices. These
indices correspond to indices in the :attr:`RectLattice.universes`
property.
ndim : int
The number of dimensions of the lattice
shape : Iterable of int
An array of two or three integers representing the number of lattice
cells in the x- and y- (and z-) directions, respectively.
"""
def __init__(self, lattice_id=None, name=''):
super().__init__(lattice_id, name)
# Initialize Lattice class attributes
self._lower_left = None
def __repr__(self):
string = 'RectLattice\n'
string += '{: <16}=\t{}\n'.format('\tID', self._id)
string += '{: <16}=\t{}\n'.format('\tName', self._name)
string += '{: <16}=\t{}\n'.format('\tShape', self.shape)
string += '{: <16}=\t{}\n'.format('\tLower Left', self._lower_left)
string += '{: <16}=\t{}\n'.format('\tPitch', self._pitch)
string += '{: <16}=\t{}\n'.format(
'\tOuter', self._outer._id if self._outer is not None else None)
string += '{: <16}\n'.format('\tUniverses')
# Lattice nested Universe IDs
for i, universe in enumerate(np.ravel(self._universes)):
string += f'{universe._id} '
# Add a newline character every time we reach end of row of cells
if (i + 1) % self.shape[0] == 0:
string += '\n'
string = string.rstrip('\n')
return string
@property
def indices(self):
if self.ndim == 2:
return list(np.broadcast(*np.ogrid[
:self.shape[1], :self.shape[0]]))
else:
return list(np.broadcast(*np.ogrid[
:self.shape[2], :self.shape[1], :self.shape[0]]))
@property
def _natural_indices(self):
"""Iterate over all possible (x,y) or (x,y,z) lattice element indices.
This property is used when constructing distributed cell and material
paths. Most importantly, the iteration order matches that used on the
Fortran side.
"""
if self.ndim == 2:
nx, ny = self.shape
for iy in range(ny):
for ix in range(nx):
yield (ix, iy)
else:
nx, ny, nz = self.shape
for iz in range(nz):
for iy in range(ny):
for ix in range(nx):
yield (ix, iy, iz)
@property
def lower_left(self):
return self._lower_left
@property
def ndim(self):
if self.pitch is not None:
return len(self.pitch)
else:
raise ValueError('Number of dimensions cannot be determined until '
'the lattice pitch has been set.')
@property
def shape(self):
return self._universes.shape[::-1]
@lower_left.setter
def lower_left(self, lower_left):
cv.check_type('lattice lower left corner', lower_left, Iterable, Real)
cv.check_length('lattice lower left corner', lower_left, 2, 3)
self._lower_left = lower_left
@Lattice.pitch.setter
def pitch(self, pitch):
cv.check_type('lattice pitch', pitch, Iterable, Real)
cv.check_length('lattice pitch', pitch, 2, 3)
for dim in pitch:
cv.check_greater_than('lattice pitch', dim, 0.0)
self._pitch = pitch
@Lattice.universes.setter
def universes(self, universes):
cv.check_iterable_type('lattice universes', universes, openmc.UniverseBase,
min_depth=2, max_depth=3)
self._universes = np.asarray(universes)
def find_element(self, point):
"""Determine index of lattice element and local coordinates for a point
Parameters
----------
point : Iterable of float
Cartesian coordinates of point
Returns
-------
2- or 3-tuple of int
A tuple of the corresponding (x,y,z) lattice element indices
3-tuple of float
Carestian coordinates of the point in the corresponding lattice
element coordinate system
"""
ix = floor((point[0] - self.lower_left[0])/self.pitch[0])
iy = floor((point[1] - self.lower_left[1])/self.pitch[1])
if self.ndim == 2:
idx = (ix, iy)
else:
iz = floor((point[2] - self.lower_left[2])/self.pitch[2])
idx = (ix, iy, iz)
return idx, self.get_local_coordinates(point, idx)
def get_local_coordinates(self, point, idx):
"""Determine local coordinates of a point within a lattice element
Parameters
----------
point : Iterable of float
Cartesian coordinates of point
idx : Iterable of int
(x,y,z) indices of lattice element. If the lattice is 2D, the z
index can be omitted.
Returns
-------
3-tuple of float
Cartesian coordinates of point in the lattice element coordinate
system
"""
x = point[0] - (self.lower_left[0] + (idx[0] + 0.5)*self.pitch[0])
y = point[1] - (self.lower_left[1] + (idx[1] + 0.5)*self.pitch[1])
if self.ndim == 2:
z = point[2]
else:
z = point[2] - (self.lower_left[2] + (idx[2] + 0.5)*self.pitch[2])
return (x, y, z)
def get_universe_index(self, idx):
"""Return index in the universes array corresponding
to a lattice element index
Parameters
----------
idx : Iterable of int
Lattice element indices in the :math:`(x,y,z)` coordinate system
Returns
-------
2- or 3-tuple of int
Indices used when setting the :attr:`RectLattice.universes` property
"""
max_y = self.shape[1] - 1
if self.ndim == 2:
x, y = idx
return (max_y - y, x)
else:
x, y, z = idx
return (z, max_y - y, x)
def is_valid_index(self, idx):
"""Determine whether lattice element index is within defined range
Parameters
----------
idx : Iterable of int
Lattice element indices in the :math:`(x,y,z)` coordinate system
Returns
-------
bool
Whether index is valid
"""
if self.ndim == 2:
return (0 <= idx[0] < self.shape[0] and
0 <= idx[1] < self.shape[1])
else:
return (0 <= idx[0] < self.shape[0] and
0 <= idx[1] < self.shape[1] and
0 <= idx[2] < self.shape[2])
def discretize(self, strategy="degenerate",
universes_to_ignore=[],
materials_to_clone=[],
lattice_neighbors=[], key=lambda univ: univ.id):
"""Discretize the lattice with either a degenerate or a local neighbor
symmetry strategy
'Degenerate' clones every universe in the lattice, thus making them all
uniquely defined. This is typically required if depletion or thermal
hydraulics will make every universe's environment unique.
'Local neighbor symmetry' groups universes with similar neighborhoods.
These clusters of cells and materials provide increased convergence
speed to multi-group cross sections tallies. The user can specify
the lattice's neighbors to discriminate between two sides of a
lattice for example.
Parameters
----------
strategy : {'degenerate', 'lns'}
Which strategy to adopt when discretizing the lattice
universes_to_ignore : Iterable of Universe
Lattice universes that need not be discretized
materials_to_clone : Iterable of Material
List of materials that should be cloned when discretizing
lattice_neighbors : Iterable of Universe
List of the lattice's neighbors. By default, if present, the
lattice outer universe will be used. The neighbors should be
ordered as follows [top left, top, top right, left, right,
bottom left, bottom, bottom right]
key : function
Function of argument a universe that is used to extract a
comparison key. This function will be called on each universe's
neighbors in the lattice to form a neighbor pattern. This pattern
is then used to identify unique neighbor symmetries.
"""
# Check routine inputs
if self.ndim != 2:
raise NotImplementedError("LNS discretization is not implemented "
"for 1D and 3D lattices")
cv.check_value('strategy', strategy, ('degenerate', 'lns'))
cv.check_type('universes_to_ignore', universes_to_ignore, Iterable,
openmc.Universe)
cv.check_type('materials_to_clone', materials_to_clone, Iterable,
openmc.Material)
cv.check_type('lattice_neighbors', lattice_neighbors, Iterable,
openmc.Universe)
cv.check_value('number of lattice_neighbors', len(lattice_neighbors),
(0, 8))
cv.check_type('key', key, types.FunctionType)
# Use outer universe if neighbors are missing and outer is defined
if self.outer is not None and len(lattice_neighbors) == 0:
lattice_neighbors = [key(self.outer) for i in range(8)]
elif len(lattice_neighbors) == 8:
lattice_neighbors = [key(universe) for universe in
lattice_neighbors]
# Dictionary that will keep track of where each pattern appears, how
# it was rotated and/or symmetrized
patterns = {}
# Initialize pattern array
pattern = np.empty(shape=(3, 3), dtype=type(key(self.universes[0][0])))
# Define an auxiliary function that returns a universe's neighbors
# that are outside the lattice
def find_edge_neighbors(pattern, i, j):
# If no neighbors have been specified, start with an empty array
if len(lattice_neighbors) == 0:
return
# Left edge
if i == 0:
pattern[:, 0] = lattice_neighbors[3]
if j == 0:
pattern[0, 0] = lattice_neighbors[0]
elif j == self.shape[1] - 1:
pattern[2, 0] = lattice_neighbors[5]
# Bottom edge
if j == 0:
pattern[0, 1] = lattice_neighbors[1]
if i != 0:
pattern[0, 0] = lattice_neighbors[1]
if i != self.shape[0] - 1:
pattern[0, 2] = lattice_neighbors[1]
# Right edge
if i == self.shape[0] - 1:
pattern[:, 2] = lattice_neighbors[4]
if j == 0:
pattern[0, 2] = lattice_neighbors[2]
elif j == self.shape[1] - 1:
pattern[2, 2] = lattice_neighbors[7]
# Top edge
if j == self.shape[1] - 1:
pattern[2, 1] = lattice_neighbors[6]
if i != 0:
pattern[2, 0] = lattice_neighbors[6]
if i != self.shape[0] - 1:
pattern[2, 2] = lattice_neighbors[6]
# Define an auxiliary function that returns a universe's neighbors
# among the universes inside the lattice
def find_lattice_neighbors(pattern, i, j):
# Away from left edge
if i != 0:
if j > 0:
pattern[0, 0] = key(self.universes[j-1][i-1])
pattern[1, 0] = key(self.universes[j][i-1])
if j < self.shape[1] - 1:
pattern[2, 0] = key(self.universes[j+1][i-1])
# Away from bottom edge
if j != 0:
if i > 0:
pattern[0, 0] = key(self.universes[j-1][i-1])
pattern[0, 1] = key(self.universes[j-1][i])
if i < self.shape[0] - 1:
pattern[0, 2] = key(self.universes[j-1][i+1])
# Away from right edge
if i != self.shape[0] - 1:
if j > 0:
pattern[0, 2] = key(self.universes[j-1][i+1])
pattern[1, 2] = key(self.universes[j][i+1])
if j < self.shape[1] - 1:
pattern[2, 2] = key(self.universes[j+1][i+1])
# Away from top edge
if j != self.shape[1] - 1:
if i > 0:
pattern[2, 0] = key(self.universes[j+1][i-1])
pattern[2, 1] = key(self.universes[j+1][i])
if i < self.shape[0] - 1:
pattern[2, 2] = key(self.universes[j+1][i+1])
# Analyze lattice, find unique patterns in groups of universes
for j in range(self.shape[1]):
for i in range(self.shape[0]):
# Skip universes to ignore
if self.universes[j][i] in universes_to_ignore:
continue
# Create a neighborhood pattern based on the universe's
# neighbors in the grid, and lattice's neighbors at the edges
# Degenerate discretization has all universes be different
if strategy == "degenerate":
patterns[(i, j)] = {'locations': [(i, j)]}
continue
# Find neighbors among lattice's neighbors at the edges
find_edge_neighbors(pattern, i, j)
# Find neighbors among the lattice's universes
find_lattice_neighbors(pattern, i, j)
pattern[1, 1] = key(self.universes[j][i])
# Look for pattern in dictionary of patterns found
found = False
for known_pattern, pattern_data in patterns.items():
# Look at all rotations of pattern
for rot in range(4):
if not found and tuple(map(tuple, pattern)) ==\
known_pattern:
found = True
# Save location of the pattern in the lattice
pattern_data['locations'].append((i, j))
# Rotate pattern
pattern = np.rot90(pattern)
# Look at transpose of pattern and its rotations
pattern = np.transpose(pattern)
for rot in range(4):
if not found and tuple(map(tuple, pattern)) ==\
known_pattern:
found = True
# Save location of the pattern in the lattice
pattern_data['locations'].append((i, j))
# Rotate pattern
pattern = np.rot90(pattern)
# Transpose pattern back for the next search
pattern = np.transpose(pattern)
# Create new pattern and add to the patterns dictionary
if not found:
patterns[tuple(map(tuple, pattern))] =\
{'locations': [(i, j)]}
# Discretize lattice
for pattern, pattern_data in patterns.items():
first_pos = pattern_data['locations'][0]
# Create a clone of the universe, without cloning materials
new_universe = self.universes[first_pos[1]][first_pos[0]].clone(
clone_materials=False, clone_regions=False)
# Replace only the materials in materials_to_clone
for material in materials_to_clone:
material_cloned = False
for cell in new_universe.get_all_cells().values():
if cell.fill_type == 'material':
if cell.fill.id == material.id:
# Only a single clone of each material is necessary
if not material_cloned:
material_clone = material.clone()
material_cloned = True
cell.fill = material_clone
elif cell.fill_type == 'distribmat':
raise(ValueError, "Lattice discretization should not "
"be used with distributed materials")
elif len(cell.temperature) > 1 or len(cell.fill) > 1:
raise(ValueError, "Lattice discretization should not "
"be used with distributed cells")
# Rebuild lattice from list of locations with this pattern
for index, location in enumerate(pattern_data['locations']):
self.universes[location[1]][location[0]] = new_universe
def create_xml_subelement(self, xml_element, memo=None):
"""Add the lattice xml representation to an incoming xml element
Parameters
----------
xml_element : xml.etree.ElementTree.Element
XML element to be added to
memo : set or None
A set of object id's representing geometry entities already
written to the xml_element. This parameter is used internally
and should not be specified by users.
Returns
-------
None
"""
# If the element already contains the Lattice subelement, then return
if memo and self in memo:
return
if memo is not None:
memo.add(self)
lattice_subelement = ET.Element("lattice")
lattice_subelement.set("id", str(self._id))
if len(self._name) > 0:
lattice_subelement.set("name", str(self._name))
# Export the Lattice cell pitch
pitch = ET.SubElement(lattice_subelement, "pitch")
pitch.text = ' '.join(map(str, self._pitch))
# Export the Lattice outer Universe (if specified)
if self._outer is not None:
outer = ET.SubElement(lattice_subelement, "outer")
outer.text = str(self._outer._id)
self._outer.create_xml_subelement(xml_element, memo)
# Export Lattice cell dimensions
dimension = ET.SubElement(lattice_subelement, "dimension")
dimension.text = ' '.join(map(str, self.shape))
# Export Lattice lower left
lower_left = ET.SubElement(lattice_subelement, "lower_left")
lower_left.text = ' '.join(map(str, self._lower_left))
# Export the Lattice nested Universe IDs - column major for Fortran
universe_ids = '\n'
# 3D Lattices
if self.ndim == 3:
for z in range(self.shape[2]):
for y in range(self.shape[1]):
for x in range(self.shape[0]):
universe = self._universes[z][y][x]
# Append Universe ID to the Lattice XML subelement
universe_ids += f'{universe._id} '
# Create XML subelement for this Universe
universe.create_xml_subelement(xml_element, memo)
# Add newline character when we reach end of row of cells
universe_ids += '\n'
# Add newline character when we reach end of row of cells
universe_ids += '\n'
# 2D Lattices
else:
for y in range(self.shape[1]):
for x in range(self.shape[0]):
universe = self._universes[y][x]
# Append Universe ID to Lattice XML subelement
universe_ids += f'{universe._id} '
# Create XML subelement for this Universe
universe.create_xml_subelement(xml_element, memo)
# Add newline character when we reach end of row of cells
universe_ids += '\n'
# Remove trailing newline character from Universe IDs string
universe_ids = universe_ids.rstrip('\n')
universes = ET.SubElement(lattice_subelement, "universes")
universes.text = universe_ids
# Append the XML subelement for this Lattice to the XML element
xml_element.append(lattice_subelement)
@classmethod
def from_xml_element(cls, elem, get_universe):
"""Generate rectangular lattice from XML element
Parameters
----------
elem : xml.etree.ElementTree.Element
`<lattice>` element
get_universe : function
Function returning universe (defined in
:meth:`openmc.Geometry.from_xml`)
Returns
-------
RectLattice
Rectangular lattice
"""
lat_id = int(get_text(elem, 'id'))
name = get_text(elem, 'name')
lat = cls(lat_id, name)
lat.lower_left = [float(i)
for i in get_text(elem, 'lower_left').split()]
lat.pitch = [float(i) for i in get_text(elem, 'pitch').split()]
outer = get_text(elem, 'outer')
if outer is not None:
lat.outer = get_universe(int(outer))
# Get array of universes
dimension = get_text(elem, 'dimension').split()
shape = np.array(dimension, dtype=int)[::-1]
uarray = np.array([get_universe(int(i)) for i in
get_text(elem, 'universes').split()])
uarray.shape = shape
lat.universes = uarray
return lat
@classmethod
def from_hdf5(cls, group, universes):
"""Create rectangular lattice from HDF5 group
Parameters
----------
group : h5py.Group
Group in HDF5 file
universes : dict
Dictionary mapping universe IDs to instances of
:class:`openmc.Universe`.
Returns
-------
openmc.RectLattice
Rectangular lattice
"""
dimension = group['dimension'][...]
lower_left = group['lower_left'][...]
pitch = group['pitch'][...]
outer = group['outer'][()]
universe_ids = group['universes'][...]
# Create the Lattice
lattice_id = int(group.name.split('/')[-1].lstrip('lattice '))
name = group['name'][()].decode() if 'name' in group else ''
lattice = cls(lattice_id, name)
lattice.lower_left = lower_left
lattice.pitch = pitch
# If the Universe specified outer the Lattice is not void
if outer >= 0:
lattice.outer = universes[outer]
# Build array of Universe pointers for the Lattice
uarray = np.empty(universe_ids.shape, dtype=openmc.Universe)
for z in range(universe_ids.shape[0]):
for y in range(universe_ids.shape[1]):
for x in range(universe_ids.shape[2]):
uarray[z, y, x] = universes[universe_ids[z, y, x]]
# Use 2D NumPy array to store lattice universes for 2D lattices
if len(dimension) == 2:
uarray = np.squeeze(uarray)
uarray = np.atleast_2d(uarray)
# Set the universes for the lattice
lattice.universes = uarray
return lattice
class HexLattice(Lattice):
r"""A lattice consisting of hexagonal prisms.
To completely define a hexagonal lattice, the :attr:`HexLattice.center`,
:attr:`HexLattice.pitch`, :attr:`HexLattice.universes`, and
:attr:`HexLattice.outer` properties need to be set.
Most methods for this class use a natural indexing scheme wherein elements
are assigned an index corresponding to their position relative to skewed
:math:`(x,\alpha,z)` or :math:`(\alpha,y,z)` bases, depending on the lattice
orientation, as described fully in :ref:`hexagonal_indexing`. However, note
that when universes are assigned to lattice elements using the
:attr:`HexLattice.universes` property, the array indices do not correspond
to natural indices.
.. versionchanged:: 0.11
The orientation of the lattice can now be changed with the
:attr:`orientation` attribute.
Parameters
----------
lattice_id : int, optional
Unique identifier for the lattice. If not specified, an identifier will
automatically be assigned.
name : str, optional
Name of the lattice. If not specified, the name is the empty string.
Attributes
----------
id : int
Unique identifier for the lattice
name : str
Name of the lattice
pitch : Iterable of float
Pitch of the lattice in cm. The first item in the iterable specifies the
pitch in the radial direction and, if the lattice is 3D, the second item
in the iterable specifies the pitch in the axial direction.
outer : openmc.Universe
A universe to fill all space outside the lattice
universes : Nested Iterable of openmc.Universe
A two- or three-dimensional list/array of universes filling each element
of the lattice. Each sub-list corresponds to one ring of universes and
should be ordered from outermost ring to innermost ring. The universes
within each sub-list are ordered from the "top" and proceed in a
clockwise fashion. The :meth:`HexLattice.show_indices` method can be
used to help figure out indices for this property.
center : Iterable of float
Coordinates of the center of the lattice. If the lattice does not have
axial sections then only the x- and y-coordinates are specified
indices : list of tuple
A list of all possible (z,r,i) or (r,i) lattice element indices that are
possible, where z is the axial index, r is in the ring index (starting
from the outermost ring), and i is the index with a ring starting from
the top and proceeding clockwise.
orientation : {'x', 'y'}
str by default 'y' orientation of main lattice diagonal another option
- 'x'
num_rings : int
Number of radial ring positions in the xy-plane
num_axial : int
Number of positions along the z-axis.
"""
def __init__(self, lattice_id=None, name=''):
super().__init__(lattice_id, name)
# Initialize Lattice class attributes
self._num_rings = None
self._num_axial = None
self._center = None
self._orientation = 'y'
def __repr__(self):
string = 'HexLattice\n'
string += '{0: <16}{1}{2}\n'.format('\tID', '=\t', self._id)
string += '{0: <16}{1}{2}\n'.format('\tName', '=\t', self._name)
string += '{0: <16}{1}{2}\n'.format('\tOrientation', '=\t',
self._orientation)
string += '{0: <16}{1}{2}\n'.format('\t# Rings', '=\t', self._num_rings)
string += '{0: <16}{1}{2}\n'.format('\t# Axial', '=\t', self._num_axial)
string += '{0: <16}{1}{2}\n'.format('\tCenter', '=\t',
self._center)
string += '{0: <16}{1}{2}\n'.format('\tPitch', '=\t', self._pitch)
if self._outer is not None:
string += '{0: <16}{1}{2}\n'.format('\tOuter', '=\t',
self._outer._id)
else:
string += '{0: <16}{1}{2}\n'.format('\tOuter', '=\t',
self._outer)
string += '{0: <16}\n'.format('\tUniverses')
if self._num_axial is not None:
slices = [self._repr_axial_slice(x) for x in self._universes]
string += '\n'.join(slices)
else:
string += self._repr_axial_slice(self._universes)
return string
@property
def num_rings(self):
return self._num_rings
@property
def orientation(self):
return self._orientation
@property
def num_axial(self):
return self._num_axial
@property
def center(self):
return self._center
@property
def indices(self):
if self.num_axial is None:
return [(r, i) for r in range(self.num_rings)
for i in range(max(6*(self.num_rings - 1 - r), 1))]
else:
return [(z, r, i) for z in range(self.num_axial)
for r in range(self.num_rings)
for i in range(max(6*(self.num_rings - 1 - r), 1))]
@property
def _natural_indices(self):
"""Iterate over all possible (x,alpha) or (x,alpha,z) lattice element
indices.
This property is used when constructing distributed cell and material
paths. Most importantly, the iteration order matches that used on the
Fortran side.
"""
r = self.num_rings
if self.num_axial is None:
for a in range(-r + 1, r):
for x in range(-r + 1, r):
idx = (x, a)
if self.is_valid_index(idx):
yield idx
else:
for z in range(self.num_axial):
for a in range(-r + 1, r):
for x in range(-r + 1, r):
idx = (x, a, z)
if self.is_valid_index(idx):
yield idx
@property
def ndim(self):
return 2 if isinstance(self.universes[0][0], openmc.Universe) else 3
@center.setter
def center(self, center):
cv.check_type('lattice center', center, Iterable, Real)
cv.check_length('lattice center', center, 2, 3)
self._center = center
@orientation.setter
def orientation(self, orientation):
cv.check_value('orientation', orientation.lower(), ('x', 'y'))
self._orientation = orientation.lower()
@Lattice.pitch.setter
def pitch(self, pitch):
cv.check_type('lattice pitch', pitch, Iterable, Real)
cv.check_length('lattice pitch', pitch, 1, 2)
for dim in pitch:
cv.check_greater_than('lattice pitch', dim, 0)
self._pitch = pitch
@Lattice.universes.setter
def universes(self, universes):
cv.check_iterable_type('lattice universes', universes, openmc.Universe,
min_depth=2, max_depth=3)
self._universes = universes
# NOTE: This routine assumes that the user creates a "ragged" list of
# lists, where each sub-list corresponds to one ring of Universes.
# The sub-lists are ordered from outermost ring to innermost ring.
# The Universes within each sub-list are ordered from the "top" in a
# clockwise fashion.
# Set the number of axial positions.
if self.ndim == 3:
self._num_axial = len(self._universes)
else:
self._num_axial = None
# Set the number of rings and make sure this number is consistent for
# all axial positions.
if self.ndim == 3:
self._num_rings = len(self._universes[0])
for rings in self._universes:
if len(rings) != self._num_rings:
msg = 'HexLattice ID={0:d} has an inconsistent number of ' \
'rings per axial position'.format(self._id)
raise ValueError(msg)
else:
self._num_rings = len(self._universes)
# Make sure there are the correct number of elements in each ring.
if self.ndim == 3:
for axial_slice in self._universes:
# Check the center ring.
if len(axial_slice[-1]) != 1:
msg = 'HexLattice ID={0:d} has the wrong number of ' \
'elements in the innermost ring. Only 1 element is ' \
'allowed in the innermost ring.'.format(self._id)
raise ValueError(msg)
# Check the outer rings.
for r in range(self._num_rings-1):
if len(axial_slice[r]) != 6*(self._num_rings - 1 - r):
msg = 'HexLattice ID={0:d} has the wrong number of ' \
'elements in ring number {1:d} (counting from the '\
'outermost ring). This ring should have {2:d} ' \
'elements.'.format(self._id, r,
6*(self._num_rings - 1 - r))
raise ValueError(msg)
else:
axial_slice = self._universes
# Check the center ring.
if len(axial_slice[-1]) != 1:
msg = 'HexLattice ID={0:d} has the wrong number of ' \
'elements in the innermost ring. Only 1 element is ' \
'allowed in the innermost ring.'.format(self._id)
raise ValueError(msg)
# Check the outer rings.
for r in range(self._num_rings-1):
if len(axial_slice[r]) != 6*(self._num_rings - 1 - r):
msg = 'HexLattice ID={0:d} has the wrong number of ' \
'elements in ring number {1:d} (counting from the '\
'outermost ring). This ring should have {2:d} ' \
'elements.'.format(self._id, r,
6*(self._num_rings - 1 - r))
raise ValueError(msg)
def find_element(self, point):
r"""Determine index of lattice element and local coordinates for a point
Parameters
----------
point : Iterable of float
Cartesian coordinates of point
Returns
-------
3-tuple of int
Indices of corresponding lattice element in :math:`(x,\alpha,z)`
or :math:`(\alpha,y,z)` bases
numpy.ndarray
Carestian coordinates of the point in the corresponding lattice
element coordinate system
"""
# Convert coordinates to skewed bases
x = point[0] - self.center[0]
y = point[1] - self.center[1]
if self._num_axial is None:
iz = 1
else:
z = point[2] - self.center[2]
iz = floor(z/self.pitch[1] + 0.5*self.num_axial)
if self._orientation == 'x':
alpha = y - x*sqrt(3.)
i1 = floor(-alpha/(sqrt(3.0) * self.pitch[0]))
i2 = floor(y/(sqrt(0.75) * self.pitch[0]))
else:
alpha = y - x/sqrt(3.)
i1 = floor(x/(sqrt(0.75) * self.pitch[0]))
i2 = floor(alpha/self.pitch[0])
# Check four lattice elements to see which one is closest based on local
# coordinates
indices = [(i1, i2, iz), (i1 + 1, i2, iz), (i1, i2 + 1, iz),
(i1 + 1, i2 + 1, iz)]
d_min = np.inf
for idx in indices:
p = self.get_local_coordinates(point, idx)
d = p[0]**2 + p[1]**2
if d < d_min:
d_min = d
idx_min = idx
p_min = p
return idx_min, p_min
def get_local_coordinates(self, point, idx):
r"""Determine local coordinates of a point within a lattice element
Parameters
----------
point : Iterable of float
Cartesian coordinates of point
idx : Iterable of int
Indices of lattice element in :math:`(x,\alpha,z)`
or :math:`(\alpha,y,z)` bases
Returns
-------
3-tuple of float
Cartesian coordinates of point in the lattice element coordinate
system
"""
if self._orientation == 'x':
x = point[0] - (self.center[0] + (idx[0] + 0.5*idx[1])*self.pitch[0])
y = point[1] - (self.center[1] + sqrt(0.75)*self.pitch[0]*idx[1])
else:
x = point[0] - (self.center[0] + sqrt(0.75)*self.pitch[0]*idx[0])
y = point[1] - (self.center[1] + (0.5*idx[0] + idx[1])*self.pitch[0])
if self._num_axial is None:
z = point[2]
else:
z = point[2] - (self.center[2] + (idx[2] + 0.5 - 0.5*self.num_axial) *
self.pitch[1])
return (x, y, z)
def get_universe_index(self, idx):
r"""Return index in the universes array corresponding
to a lattice element index
Parameters
----------
idx : Iterable of int
Lattice element indices in the :math:`(x,\alpha,z)` coordinate
system in 'y' orientation case, or indices in the
:math:`(\alpha,y,z)` coordinate system in 'x' one
Returns
-------
2- or 3-tuple of int
Indices used when setting the :attr:`HexLattice.universes`
property
"""
# First we determine which ring the index corresponds to.
x = idx[0]
a = idx[1]
z = -a - x
g = max(abs(x), abs(a), abs(z))
# Next we use a clever method to figure out where along the ring we are.
i_ring = self._num_rings - 1 - g
if x >= 0:
if a >= 0:
i_within = x
else:
i_within = 2*g + z
else:
if a <= 0:
i_within = 3*g - x
else:
i_within = 5*g - z
if self._orientation == 'x' and g > 0:
i_within = (i_within + 5*g) % (6*g)
if self.num_axial is None:
return (i_ring, i_within)
else:
return (idx[2], i_ring, i_within)
def is_valid_index(self, idx):
r"""Determine whether lattice element index is within defined range
Parameters
----------
idx : Iterable of int
Lattice element indices in the both :math:`(x,\alpha,z)`
and :math:`(\alpha,y,z)` coordinate system
Returns
-------
bool
Whether index is valid
"""
x = idx[0]
y = idx[1]
z = 0 - y - x
g = max(abs(x), abs(y), abs(z))
if self.num_axial is None:
return g < self.num_rings
else:
return g < self.num_rings and 0 <= idx[2] < self.num_axial
def create_xml_subelement(self, xml_element, memo=None):
# If this subelement has already been written, return
if memo and self in memo:
return
if memo is not None:
memo.add(self)
lattice_subelement = ET.Element("hex_lattice")
lattice_subelement.set("id", str(self._id))
if len(self._name) > 0:
lattice_subelement.set("name", str(self._name))
# Export the Lattice cell pitch
pitch = ET.SubElement(lattice_subelement, "pitch")
pitch.text = ' '.join(map(str, self._pitch))
# Export the Lattice outer Universe (if specified)
if self._outer is not None:
outer = ET.SubElement(lattice_subelement, "outer")
outer.text = str(self._outer._id)
self._outer.create_xml_subelement(xml_element, memo)
lattice_subelement.set("n_rings", str(self._num_rings))
# If orientation is "x" export it to XML
if self._orientation == 'x':
lattice_subelement.set("orientation", "x")
if self._num_axial is not None:
lattice_subelement.set("n_axial", str(self._num_axial))
# Export Lattice cell center
center = ET.SubElement(lattice_subelement, "center")
center.text = ' '.join(map(str, self._center))
# Export the Lattice nested Universe IDs.
# 3D Lattices
if self._num_axial is not None:
slices = []
for z in range(self._num_axial):
# Initialize the center universe.
universe = self._universes[z][-1][0]
universe.create_xml_subelement(xml_element, memo)
# Initialize the remaining universes.
for r in range(self._num_rings-1):
for theta in range(6*(self._num_rings - 1 - r)):
universe = self._universes[z][r][theta]
universe.create_xml_subelement(xml_element, memo)
# Get a string representation of the universe IDs.
slices.append(self._repr_axial_slice(self._universes[z]))
# Collapse the list of axial slices into a single string.
universe_ids = '\n'.join(slices)
# 2D Lattices
else:
# Initialize the center universe.
universe = self._universes[-1][0]
universe.create_xml_subelement(xml_element, memo)
# Initialize the remaining universes.
for r in range(self._num_rings - 1):
for theta in range(6*(self._num_rings - 1 - r)):
universe = self._universes[r][theta]
universe.create_xml_subelement(xml_element, memo)
# Get a string representation of the universe IDs.
universe_ids = self._repr_axial_slice(self._universes)
universes = ET.SubElement(lattice_subelement, "universes")
universes.text = '\n' + universe_ids
# Append the XML subelement for this Lattice to the XML element
xml_element.append(lattice_subelement)
@classmethod
def from_xml_element(cls, elem, get_universe):
"""Generate hexagonal lattice from XML element
Parameters
----------
elem : xml.etree.ElementTree.Element
`<hex_lattice>` element
get_universe : function
Function returning universe (defined in
:meth:`openmc.Geometry.from_xml`)
Returns
-------
HexLattice
Hexagonal lattice
"""
lat_id = int(get_text(elem, 'id'))
name = get_text(elem, 'name')
lat = cls(lat_id, name)
lat.center = [float(i) for i in get_text(elem, 'center').split()]
lat.pitch = [float(i) for i in get_text(elem, 'pitch').split()]
lat.orientation = get_text(elem, 'orientation', 'y')
outer = get_text(elem, 'outer')
if outer is not None:
lat.outer = get_universe(int(outer))
# Get nested lists of universes
lat._num_rings = n_rings = int(get_text(elem, 'n_rings'))
lat._num_axial = n_axial = int(get_text(elem, 'n_axial', 1))
# Create empty nested lists for one axial level
univs = [[None for _ in range(max(6*(n_rings - 1 - r), 1))]
for r in range(n_rings)]
if n_axial > 1:
univs = [deepcopy(univs) for i in range(n_axial)]
# Get flat array of universes
uarray = np.array([get_universe(int(i)) for i in
get_text(elem, 'universes').split()])
# Fill nested lists
j = 0
for z in range(n_axial):
# Get list for a single axial level
axial_level = univs[z] if n_axial > 1 else univs
if lat.orientation == 'y':
# Start iterating from top
x, alpha = 0, n_rings - 1
while True:
# Set entry in list based on (x,alpha,z) coordinates
_, i_ring, i_within = lat.get_universe_index((x, alpha, z))
axial_level[i_ring][i_within] = uarray[j]
# Move to the right
x += 2
alpha -= 1
if not lat.is_valid_index((x, alpha, z)):
# Move down in y direction
alpha += x - 1
x = 1 - x
if not lat.is_valid_index((x, alpha, z)):
# Move to the right
x += 2
alpha -= 1
if not lat.is_valid_index((x, alpha, z)):
# Reached the bottom
break
j += 1
else:
# Start iterating from top
alpha, y = 1 - n_rings, n_rings - 1
while True:
# Set entry in list based on (alpha,y,z) coordinates
_, i_ring, i_within = lat.get_universe_index((alpha, y, z))
axial_level[i_ring][i_within] = uarray[j]
# Move to the right
alpha += 1
if not lat.is_valid_index((alpha, y, z)):
# Move down to next row
alpha = 1 - n_rings
y -= 1
# Check if we've reached the bottom
if y == -n_rings:
break
while not lat.is_valid_index((alpha, y, z)):
# Move to the right
alpha += 1
j += 1
lat.universes = univs
return lat
def _repr_axial_slice(self, universes):
"""Return string representation for the given 2D group of universes.
The 'universes' argument should be a list of lists of universes where
each sub-list represents a single ring. The first list should be the
outer ring.
"""
if self._orientation == 'x':
return self._repr_axial_slice_x(universes)
else:
return self._repr_axial_slice_y(universes)
def _repr_axial_slice_x(self, universes):
"""Return string representation for the given 2D group of universes
in 'x' orientation case.
The 'universes' argument should be a list of lists of universes where
each sub-list represents a single ring. The first list should be the
outer ring.
"""
# Find the largest universe ID and count the number of digits so we can
# properly pad the output string later.
largest_id = max([max([univ._id for univ in ring])
for ring in universes])
n_digits = len(str(largest_id))
pad = ' '*n_digits
id_form = '{: ^' + str(n_digits) + 'd}'
# Initialize the list for each row.
rows = [[] for i in range(2*self._num_rings - 1)]
middle = self._num_rings - 1
# Start with the degenerate first ring.
universe = universes[-1][0]
rows[middle] = [id_form.format(universe._id)]
# Add universes one ring at a time.
for r in range(1, self._num_rings):
# r_prime increments down while r increments up.
r_prime = self._num_rings - 1 - r
theta = 0
y = middle
# Climb down the bottom-right
for i in range(r):
# Add the universe.
universe = universes[r_prime][theta]
rows[y].append(id_form.format(universe._id))
# Translate the indices.
y += 1
theta += 1
# Climb left across the bottom
for i in range(r):
# Add the universe.
universe = universes[r_prime][theta]
rows[y].insert(0, id_form.format(universe._id))
# Translate the indices.
theta += 1
# Climb up the bottom-left
for i in range(r):
# Add the universe.
universe = universes[r_prime][theta]
rows[y].insert(0, id_form.format(universe._id))
# Translate the indices.
y -= 1
theta += 1
# Climb up the top-left
for i in range(r):
# Add the universe.
universe = universes[r_prime][theta]
rows[y].insert(0, id_form.format(universe._id))
# Translate the indices.
y -= 1
theta += 1
# Climb right across the top
for i in range(r):
# Add the universe.
universe = universes[r_prime][theta]
rows[y].append(id_form.format(universe._id))
# Translate the indices.
theta += 1
# Climb down the top-right
for i in range(r):
# Add the universe.
universe = universes[r_prime][theta]
rows[y].append(id_form.format(universe._id))
# Translate the indices.
y += 1
theta += 1
# Flip the rows and join each row into a single string.
rows = [pad.join(x) for x in rows]
# Pad the beginning of the rows so they line up properly.
for y in range(self._num_rings - 1):
rows[y] = (self._num_rings - 1 - y)*pad + rows[y]
rows[-1 - y] = (self._num_rings - 1 - y)*pad + rows[-1 - y]
# Join the rows together and return the string.
universe_ids = '\n'.join(rows)
return universe_ids
def _repr_axial_slice_y(self, universes):
"""Return string representation for the given 2D group of universes in
'y' orientation case..
The 'universes' argument should be a list of lists of universes where
each sub-list represents a single ring. The first list should be the
outer ring.
"""
# Find the largest universe ID and count the number of digits so we can
# properly pad the output string later.
largest_id = max([max([univ._id for univ in ring])
for ring in universes])
n_digits = len(str(largest_id))
pad = ' '*n_digits
id_form = '{: ^' + str(n_digits) + 'd}'
# Initialize the list for each row.
rows = [[] for i in range(1 + 4 * (self._num_rings-1))]
middle = 2 * (self._num_rings - 1)
# Start with the degenerate first ring.
universe = universes[-1][0]
rows[middle] = [id_form.format(universe._id)]
# Add universes one ring at a time.
for r in range(1, self._num_rings):
# r_prime increments down while r increments up.
r_prime = self._num_rings - 1 - r
theta = 0
y = middle + 2*r
# Climb down the top-right.
for i in range(r):
# Add the universe.
universe = universes[r_prime][theta]
rows[y].append(id_form.format(universe._id))
# Translate the indices.
y -= 1
theta += 1
# Climb down the right.
for i in range(r):
# Add the universe.
universe = universes[r_prime][theta]
rows[y].append(id_form.format(universe._id))
# Translate the indices.
y -= 2
theta += 1
# Climb down the bottom-right.
for i in range(r):
# Add the universe.
universe = universes[r_prime][theta]
rows[y].append(id_form.format(universe._id))
# Translate the indices.
y -= 1
theta += 1
# Climb up the bottom-left.
for i in range(r):
# Add the universe.
universe = universes[r_prime][theta]
rows[y].insert(0, id_form.format(universe._id))
# Translate the indices.
y += 1
theta += 1
# Climb up the left.
for i in range(r):
# Add the universe.
universe = universes[r_prime][theta]
rows[y].insert(0, id_form.format(universe._id))
# Translate the indices.
y += 2
theta += 1
# Climb up the top-left.
for i in range(r):
# Add the universe.
universe = universes[r_prime][theta]
rows[y].insert(0, id_form.format(universe._id))
# Translate the indices.
y += 1
theta += 1
# Flip the rows and join each row into a single string.
rows = [pad.join(x) for x in rows[::-1]]
# Pad the beginning of the rows so they line up properly.
for y in range(self._num_rings - 1):
rows[y] = (self._num_rings - 1 - y)*pad + rows[y]
rows[-1 - y] = (self._num_rings - 1 - y)*pad + rows[-1 - y]
for y in range(self._num_rings % 2, self._num_rings, 2):
rows[middle + y] = pad + rows[middle + y]
if y != 0:
rows[middle - y] = pad + rows[middle - y]
# Join the rows together and return the string.
universe_ids = '\n'.join(rows)
return universe_ids
@staticmethod
def _show_indices_y(num_rings):
"""Return a diagram of the hexagonal lattice layout with indices.
This method can be used to show the proper indices to be used when
setting the :attr:`HexLattice.universes` property. For example, running
this method with num_rings=3 will return the following diagram::
(0, 0)
(0,11) (0, 1)
(0,10) (1, 0) (0, 2)
(1, 5) (1, 1)
(0, 9) (2, 0) (0, 3)
(1, 4) (1, 2)
(0, 8) (1, 3) (0, 4)
(0, 7) (0, 5)
(0, 6)
Parameters
----------
num_rings : int
Number of rings in the hexagonal lattice
Returns
-------
str
Diagram of the hexagonal lattice showing indices
"""
# Find the largest string and count the number of digits so we can
# properly pad the output string later
largest_index = 6*(num_rings - 1)
n_digits_index = len(str(largest_index))
n_digits_ring = len(str(num_rings - 1))
str_form = '({{:{}}},{{:{}}})'.format(n_digits_ring, n_digits_index)
pad = ' '*(n_digits_index + n_digits_ring + 3)
# Initialize the list for each row.
rows = [[] for i in range(1 + 4 * (num_rings-1))]
middle = 2 * (num_rings - 1)
# Start with the degenerate first ring.
rows[middle] = [str_form.format(num_rings - 1, 0)]
# Add universes one ring at a time.
for r in range(1, num_rings):
# r_prime increments down while r increments up.
r_prime = num_rings - 1 - r
theta = 0
y = middle + 2*r
for i in range(r):
# Climb down the top-right.
rows[y].append(str_form.format(r_prime, theta))
y -= 1
theta += 1
for i in range(r):
# Climb down the right.
rows[y].append(str_form.format(r_prime, theta))
y -= 2
theta += 1
for i in range(r):
# Climb down the bottom-right.
rows[y].append(str_form.format(r_prime, theta))
y -= 1
theta += 1
for i in range(r):
# Climb up the bottom-left.
rows[y].insert(0, str_form.format(r_prime, theta))
y += 1
theta += 1
for i in range(r):
# Climb up the left.
rows[y].insert(0, str_form.format(r_prime, theta))
y += 2
theta += 1
for i in range(r):
# Climb up the top-left.
rows[y].insert(0, str_form.format(r_prime, theta))
y += 1
theta += 1
# Flip the rows and join each row into a single string.
rows = [pad.join(x) for x in rows[::-1]]
# Pad the beginning of the rows so they line up properly.
for y in range(num_rings - 1):
rows[y] = (num_rings - 1 - y)*pad + rows[y]
rows[-1 - y] = (num_rings - 1 - y)*pad + rows[-1 - y]
for y in range(num_rings % 2, num_rings, 2):
rows[middle + y] = pad + rows[middle + y]
if y != 0:
rows[middle - y] = pad + rows[middle - y]
# Join the rows together and return the string.
return '\n'.join(rows)
@staticmethod
def _show_indices_x(num_rings):
"""Return a diagram of the hexagonal lattice with x orientation
layout with indices.
This method can be used to show the proper indices to be used when
setting the :attr:`HexLattice.universes` property. For example,running
this method with num_rings=3 will return the similar diagram::
(0, 8) (0, 9) (0,10)
(0, 7) (1, 4) (1, 5) (0,11)
(0, 6) (1, 3) (2, 0) (1, 0) (0, 0)
(0, 5) (1, 2) (1, 1) (0, 1)
(0, 4) (0, 3) (0, 2)
Parameters
----------
num_rings : int
Number of rings in the hexagonal lattice
Returns
-------
str
Diagram of the hexagonal lattice showing indices in OX orientation
"""
# Find the largest string and count the number of digits so we can
# properly pad the output string later
largest_index = 6*(num_rings - 1)
n_digits_index = len(str(largest_index))
n_digits_ring = len(str(num_rings - 1))
str_form = '({{:{}}},{{:{}}})'.format(n_digits_ring, n_digits_index)
pad = ' '*(n_digits_index + n_digits_ring + 3)
# Initialize the list for each row.
rows = [[] for i in range(2*num_rings - 1)]
middle = num_rings - 1
# Start with the degenerate first ring.
rows[middle] = [str_form.format(num_rings - 1, 0)]
# Add universes one ring at a time.
for r in range(1, num_rings):
# r_prime increments down while r increments up.
r_prime = num_rings - 1 - r
theta = 0
y = middle
for i in range(r):
# Climb down the bottom-right
rows[y].append(str_form.format(r_prime, theta))
y += 1
theta += 1
for i in range(r):
# Climb left across the bottom
rows[y].insert(0, str_form.format(r_prime, theta))
theta += 1
for i in range(r):
# Climb up the bottom-left
rows[y].insert(0, str_form.format(r_prime, theta))
y -= 1
theta += 1
for i in range(r):
# Climb up the top-left
rows[y].insert(0, str_form.format(r_prime, theta))
y -= 1
theta += 1
for i in range(r):
# Climb right across the top
rows[y].append(str_form.format(r_prime, theta))
theta += 1
for i in range(r):
# Climb down the top-right
rows[y].append(str_form.format(r_prime, theta))
y += 1
theta += 1
# Flip the rows and join each row into a single string.
rows = [pad.join(x) for x in rows]
# Pad the beginning of the rows so they line up properly.
for y in range(num_rings - 1):
rows[y] = (num_rings - 1 - y)*pad + rows[y]
rows[-1 - y] = (num_rings - 1 - y)*pad + rows[-1 - y]
# Join the rows together and return the string.
return '\n\n'.join(rows)
@staticmethod
def show_indices(num_rings, orientation="y"):
"""Return a diagram of the hexagonal lattice layout with indices.
Parameters
----------
num_rings : int
Number of rings in the hexagonal lattice
orientation : {"x", "y"}
Orientation of the hexagonal lattice
Returns
-------
str
Diagram of the hexagonal lattice showing indices
"""
if orientation == 'x':
return HexLattice._show_indices_x(num_rings)
else:
return HexLattice._show_indices_y(num_rings)
@classmethod
def from_hdf5(cls, group, universes):
"""Create rectangular lattice from HDF5 group
Parameters
----------
group : h5py.Group
Group in HDF5 file
universes : dict
Dictionary mapping universe IDs to instances of
:class:`openmc.Universe`.
Returns
-------
openmc.RectLattice
Rectangular lattice
"""
n_rings = group['n_rings'][()]
n_axial = group['n_axial'][()]
center = group['center'][()]
pitch = group['pitch'][()]
outer = group['outer'][()]
if 'orientation' in group:
orientation = group['orientation'][()].decode()
else:
orientation = "y"
universe_ids = group['universes'][()]
# Create the Lattice
lattice_id = int(group.name.split('/')[-1].lstrip('lattice '))
name = group['name'][()].decode() if 'name' in group else ''
lattice = openmc.HexLattice(lattice_id, name)
lattice.center = center
lattice.pitch = pitch
lattice.orientation = orientation
# If the Universe specified outer the Lattice is not void
if outer >= 0:
lattice.outer = universes[outer]
if orientation == "y":
# Build array of Universe pointers for the Lattice. Note that
# we need to convert between the HDF5's square array of
# (x, alpha, z) to the Python API's format of a ragged nested
# list of (z, ring, theta).
uarray = []
for z in range(n_axial):
# Add a list for this axial level.
uarray.append([])
x = n_rings - 1
a = 2*n_rings - 2
for r in range(n_rings - 1, 0, -1):
# Add a list for this ring.
uarray[-1].append([])
# Climb down the top-right.
for i in range(r):
uarray[-1][-1].append(universe_ids[z, a, x])
x += 1
a -= 1
# Climb down the right.
for i in range(r):
uarray[-1][-1].append(universe_ids[z, a, x])
a -= 1
# Climb down the bottom-right.
for i in range(r):
uarray[-1][-1].append(universe_ids[z, a, x])
x -= 1
# Climb up the bottom-left.
for i in range(r):
uarray[-1][-1].append(universe_ids[z, a, x])
x -= 1
a += 1
# Climb up the left.
for i in range(r):
uarray[-1][-1].append(universe_ids[z, a, x])
a += 1
# Climb up the top-left.
for i in range(r):
uarray[-1][-1].append(universe_ids[z, a, x])
x += 1
# Move down to the next ring.
a -= 1
# Convert the ids into Universe objects.
uarray[-1][-1] = [universes[u_id]
for u_id in uarray[-1][-1]]
# Handle the degenerate center ring separately.
u_id = universe_ids[z, a, x]
uarray[-1].append([universes[u_id]])
else:
# Build array of Universe pointers for the Lattice. Note that
# we need to convert between the HDF5's square array of
# (alpha, y, z) to the Python API's format of a ragged nested
# list of (z, ring, theta).
uarray = []
for z in range(n_axial):
# Add a list for this axial level.
uarray.append([])
a = 2*n_rings - 2
y = n_rings - 1
for r in range(n_rings - 1, 0, -1):
# Add a list for this ring.
uarray[-1].append([])
# Climb down the bottom-right.
for i in range(r):
uarray[-1][-1].append(universe_ids[z, y, a])
y -= 1
# Climb across the bottom.
for i in range(r):
uarray[-1][-1].append(universe_ids[z, y, a])
a -= 1
# Climb up the bottom-left.
for i in range(r):
uarray[-1][-1].append(universe_ids[z, y, a])
a -= 1
y += 1
# Climb up the top-left.
for i in range(r):
uarray[-1][-1].append(universe_ids[z, y, a])
y += 1
# Climb across the top.
for i in range(r):
uarray[-1][-1].append(universe_ids[z, y, a])
a += 1
# Climb down the top-right.
for i in range(r):
uarray[-1][-1].append(universe_ids[z, y, a])
a += 1
y -= 1
# Move down to the next ring.
a -= 1
# Convert the ids into Universe objects.
uarray[-1][-1] = [universes[u_id]
for u_id in uarray[-1][-1]]
# Handle the degenerate center ring separately.
u_id = universe_ids[z, y, a]
uarray[-1].append([universes[u_id]])
# Add the universes to the lattice.
if len(pitch) == 2:
# Lattice is 3D
lattice.universes = uarray
else:
# Lattice is 2D; extract the only axial level
lattice.universes = uarray[0]
return lattice
| 35.843778 | 83 | 0.530205 | from abc import ABC
from collections import OrderedDict
from collections.abc import Iterable
from copy import deepcopy
from math import sqrt, floor
from numbers import Real
import types
from xml.etree import ElementTree as ET
import numpy as np
import openmc
import openmc.checkvalue as cv
from ._xml import get_text
from .mixin import IDManagerMixin
class Lattice(IDManagerMixin, ABC):
next_id = 1
used_ids = openmc.Universe.used_ids
def __init__(self, lattice_id=None, name=''):
self.id = lattice_id
self.name = name
self._pitch = None
self._outer = None
self._universes = None
@property
def name(self):
return self._name
@property
def pitch(self):
return self._pitch
@property
def outer(self):
return self._outer
@property
def universes(self):
return self._universes
@name.setter
def name(self, name):
if name is not None:
cv.check_type('lattice name', name, str)
self._name = name
else:
self._name = ''
@outer.setter
def outer(self, outer):
cv.check_type('outer universe', outer, openmc.Universe)
self._outer = outer
@staticmethod
def from_hdf5(group, universes):
lattice_type = group['type'][()].decode()
if lattice_type == 'rectangular':
return openmc.RectLattice.from_hdf5(group, universes)
elif lattice_type == 'hexagonal':
return openmc.HexLattice.from_hdf5(group, universes)
else:
raise ValueError(f'Unknown lattice type: {lattice_type}')
def get_unique_universes(self):
univs = OrderedDict()
for k in range(len(self._universes)):
for j in range(len(self._universes[k])):
if isinstance(self._universes[k][j], openmc.Universe):
u = self._universes[k][j]
univs[u._id] = u
else:
for i in range(len(self._universes[k][j])):
u = self._universes[k][j][i]
assert isinstance(u, openmc.Universe)
univs[u._id] = u
if self.outer is not None:
univs[self.outer._id] = self.outer
return univs
def get_nuclides(self):
nuclides = []
unique_universes = self.get_unique_universes()
for universe in unique_universes.values():
for nuclide in universe.get_nuclides():
if nuclide not in nuclides:
nuclides.append(nuclide)
return nuclides
def get_all_cells(self, memo=None):
cells = OrderedDict()
if memo and self in memo:
return cells
if memo is not None:
memo.add(self)
unique_universes = self.get_unique_universes()
for universe in unique_universes.values():
cells.update(universe.get_all_cells(memo))
return cells
def get_all_materials(self, memo=None):
materials = OrderedDict()
cells = self.get_all_cells(memo)
for cell in cells.values():
materials.update(cell.get_all_materials(memo))
return materials
def get_all_universes(self):
all_universes = OrderedDict()
unique_universes = self.get_unique_universes()
all_universes.update(unique_universes)
for universe in unique_universes.values():
all_universes.update(universe.get_all_universes())
return all_universes
def get_universe(self, idx):
idx_u = self.get_universe_index(idx)
if self.ndim == 2:
return self.universes[idx_u[0]][idx_u[1]]
else:
return self.universes[idx_u[0]][idx_u[1]][idx_u[2]]
def find(self, point):
idx, p = self.find_element(point)
if self.is_valid_index(idx):
u = self.get_universe(idx)
else:
if self.outer is not None:
u = self.outer
else:
return []
return [(self, idx)] + u.find(p)
def clone(self, clone_materials=True, clone_regions=True, memo=None):
if memo is None:
memo = {}
if self not in memo:
clone = deepcopy(self)
clone.id = None
if self.outer is not None:
clone.outer = self.outer.clone(clone_materials, clone_regions,
memo)
# Assign universe clones to the lattice clone
for i in self.indices:
if isinstance(self, RectLattice):
clone.universes[i] = self.universes[i].clone(
clone_materials, clone_regions, memo)
else:
if self.ndim == 2:
clone.universes[i[0]][i[1]] = \
self.universes[i[0]][i[1]].clone(clone_materials,
clone_regions, memo)
else:
clone.universes[i[0]][i[1]][i[2]] = \
self.universes[i[0]][i[1]][i[2]].clone(
clone_materials, clone_regions, memo)
# Memoize the clone
memo[self] = clone
return memo[self]
class RectLattice(Lattice):
def __init__(self, lattice_id=None, name=''):
super().__init__(lattice_id, name)
# Initialize Lattice class attributes
self._lower_left = None
def __repr__(self):
string = 'RectLattice\n'
string += '{: <16}=\t{}\n'.format('\tID', self._id)
string += '{: <16}=\t{}\n'.format('\tName', self._name)
string += '{: <16}=\t{}\n'.format('\tShape', self.shape)
string += '{: <16}=\t{}\n'.format('\tLower Left', self._lower_left)
string += '{: <16}=\t{}\n'.format('\tPitch', self._pitch)
string += '{: <16}=\t{}\n'.format(
'\tOuter', self._outer._id if self._outer is not None else None)
string += '{: <16}\n'.format('\tUniverses')
# Lattice nested Universe IDs
for i, universe in enumerate(np.ravel(self._universes)):
string += f'{universe._id} '
# Add a newline character every time we reach end of row of cells
if (i + 1) % self.shape[0] == 0:
string += '\n'
string = string.rstrip('\n')
return string
@property
def indices(self):
if self.ndim == 2:
return list(np.broadcast(*np.ogrid[
:self.shape[1], :self.shape[0]]))
else:
return list(np.broadcast(*np.ogrid[
:self.shape[2], :self.shape[1], :self.shape[0]]))
@property
def _natural_indices(self):
if self.ndim == 2:
nx, ny = self.shape
for iy in range(ny):
for ix in range(nx):
yield (ix, iy)
else:
nx, ny, nz = self.shape
for iz in range(nz):
for iy in range(ny):
for ix in range(nx):
yield (ix, iy, iz)
@property
def lower_left(self):
return self._lower_left
@property
def ndim(self):
if self.pitch is not None:
return len(self.pitch)
else:
raise ValueError('Number of dimensions cannot be determined until '
'the lattice pitch has been set.')
@property
def shape(self):
return self._universes.shape[::-1]
@lower_left.setter
def lower_left(self, lower_left):
cv.check_type('lattice lower left corner', lower_left, Iterable, Real)
cv.check_length('lattice lower left corner', lower_left, 2, 3)
self._lower_left = lower_left
@Lattice.pitch.setter
def pitch(self, pitch):
cv.check_type('lattice pitch', pitch, Iterable, Real)
cv.check_length('lattice pitch', pitch, 2, 3)
for dim in pitch:
cv.check_greater_than('lattice pitch', dim, 0.0)
self._pitch = pitch
@Lattice.universes.setter
def universes(self, universes):
cv.check_iterable_type('lattice universes', universes, openmc.UniverseBase,
min_depth=2, max_depth=3)
self._universes = np.asarray(universes)
def find_element(self, point):
ix = floor((point[0] - self.lower_left[0])/self.pitch[0])
iy = floor((point[1] - self.lower_left[1])/self.pitch[1])
if self.ndim == 2:
idx = (ix, iy)
else:
iz = floor((point[2] - self.lower_left[2])/self.pitch[2])
idx = (ix, iy, iz)
return idx, self.get_local_coordinates(point, idx)
def get_local_coordinates(self, point, idx):
x = point[0] - (self.lower_left[0] + (idx[0] + 0.5)*self.pitch[0])
y = point[1] - (self.lower_left[1] + (idx[1] + 0.5)*self.pitch[1])
if self.ndim == 2:
z = point[2]
else:
z = point[2] - (self.lower_left[2] + (idx[2] + 0.5)*self.pitch[2])
return (x, y, z)
def get_universe_index(self, idx):
max_y = self.shape[1] - 1
if self.ndim == 2:
x, y = idx
return (max_y - y, x)
else:
x, y, z = idx
return (z, max_y - y, x)
def is_valid_index(self, idx):
if self.ndim == 2:
return (0 <= idx[0] < self.shape[0] and
0 <= idx[1] < self.shape[1])
else:
return (0 <= idx[0] < self.shape[0] and
0 <= idx[1] < self.shape[1] and
0 <= idx[2] < self.shape[2])
def discretize(self, strategy="degenerate",
universes_to_ignore=[],
materials_to_clone=[],
lattice_neighbors=[], key=lambda univ: univ.id):
# Check routine inputs
if self.ndim != 2:
raise NotImplementedError("LNS discretization is not implemented "
"for 1D and 3D lattices")
cv.check_value('strategy', strategy, ('degenerate', 'lns'))
cv.check_type('universes_to_ignore', universes_to_ignore, Iterable,
openmc.Universe)
cv.check_type('materials_to_clone', materials_to_clone, Iterable,
openmc.Material)
cv.check_type('lattice_neighbors', lattice_neighbors, Iterable,
openmc.Universe)
cv.check_value('number of lattice_neighbors', len(lattice_neighbors),
(0, 8))
cv.check_type('key', key, types.FunctionType)
# Use outer universe if neighbors are missing and outer is defined
if self.outer is not None and len(lattice_neighbors) == 0:
lattice_neighbors = [key(self.outer) for i in range(8)]
elif len(lattice_neighbors) == 8:
lattice_neighbors = [key(universe) for universe in
lattice_neighbors]
# Dictionary that will keep track of where each pattern appears, how
# it was rotated and/or symmetrized
patterns = {}
# Initialize pattern array
pattern = np.empty(shape=(3, 3), dtype=type(key(self.universes[0][0])))
# Define an auxiliary function that returns a universe's neighbors
def find_edge_neighbors(pattern, i, j):
if len(lattice_neighbors) == 0:
return
if i == 0:
pattern[:, 0] = lattice_neighbors[3]
if j == 0:
pattern[0, 0] = lattice_neighbors[0]
elif j == self.shape[1] - 1:
pattern[2, 0] = lattice_neighbors[5]
if j == 0:
pattern[0, 1] = lattice_neighbors[1]
if i != 0:
pattern[0, 0] = lattice_neighbors[1]
if i != self.shape[0] - 1:
pattern[0, 2] = lattice_neighbors[1]
if i == self.shape[0] - 1:
pattern[:, 2] = lattice_neighbors[4]
if j == 0:
pattern[0, 2] = lattice_neighbors[2]
elif j == self.shape[1] - 1:
pattern[2, 2] = lattice_neighbors[7]
if j == self.shape[1] - 1:
pattern[2, 1] = lattice_neighbors[6]
if i != 0:
pattern[2, 0] = lattice_neighbors[6]
if i != self.shape[0] - 1:
pattern[2, 2] = lattice_neighbors[6]
# among the universes inside the lattice
def find_lattice_neighbors(pattern, i, j):
# Away from left edge
if i != 0:
if j > 0:
pattern[0, 0] = key(self.universes[j-1][i-1])
pattern[1, 0] = key(self.universes[j][i-1])
if j < self.shape[1] - 1:
pattern[2, 0] = key(self.universes[j+1][i-1])
# Away from bottom edge
if j != 0:
if i > 0:
pattern[0, 0] = key(self.universes[j-1][i-1])
pattern[0, 1] = key(self.universes[j-1][i])
if i < self.shape[0] - 1:
pattern[0, 2] = key(self.universes[j-1][i+1])
# Away from right edge
if i != self.shape[0] - 1:
if j > 0:
pattern[0, 2] = key(self.universes[j-1][i+1])
pattern[1, 2] = key(self.universes[j][i+1])
if j < self.shape[1] - 1:
pattern[2, 2] = key(self.universes[j+1][i+1])
# Away from top edge
if j != self.shape[1] - 1:
if i > 0:
pattern[2, 0] = key(self.universes[j+1][i-1])
pattern[2, 1] = key(self.universes[j+1][i])
if i < self.shape[0] - 1:
pattern[2, 2] = key(self.universes[j+1][i+1])
# Analyze lattice, find unique patterns in groups of universes
for j in range(self.shape[1]):
for i in range(self.shape[0]):
# Skip universes to ignore
if self.universes[j][i] in universes_to_ignore:
continue
# Create a neighborhood pattern based on the universe's
# Degenerate discretization has all universes be different
if strategy == "degenerate":
patterns[(i, j)] = {'locations': [(i, j)]}
continue
# Find neighbors among lattice's neighbors at the edges
find_edge_neighbors(pattern, i, j)
find_lattice_neighbors(pattern, i, j)
pattern[1, 1] = key(self.universes[j][i])
# Look for pattern in dictionary of patterns found
found = False
for known_pattern, pattern_data in patterns.items():
# Look at all rotations of pattern
for rot in range(4):
if not found and tuple(map(tuple, pattern)) ==\
known_pattern:
found = True
# Save location of the pattern in the lattice
pattern_data['locations'].append((i, j))
# Rotate pattern
pattern = np.rot90(pattern)
# Look at transpose of pattern and its rotations
pattern = np.transpose(pattern)
for rot in range(4):
if not found and tuple(map(tuple, pattern)) ==\
known_pattern:
found = True
# Save location of the pattern in the lattice
pattern_data['locations'].append((i, j))
# Rotate pattern
pattern = np.rot90(pattern)
# Transpose pattern back for the next search
pattern = np.transpose(pattern)
# Create new pattern and add to the patterns dictionary
if not found:
patterns[tuple(map(tuple, pattern))] =\
{'locations': [(i, j)]}
# Discretize lattice
for pattern, pattern_data in patterns.items():
first_pos = pattern_data['locations'][0]
# Create a clone of the universe, without cloning materials
new_universe = self.universes[first_pos[1]][first_pos[0]].clone(
clone_materials=False, clone_regions=False)
# Replace only the materials in materials_to_clone
for material in materials_to_clone:
material_cloned = False
for cell in new_universe.get_all_cells().values():
if cell.fill_type == 'material':
if cell.fill.id == material.id:
# Only a single clone of each material is necessary
if not material_cloned:
material_clone = material.clone()
material_cloned = True
cell.fill = material_clone
elif cell.fill_type == 'distribmat':
raise(ValueError, "Lattice discretization should not "
"be used with distributed materials")
elif len(cell.temperature) > 1 or len(cell.fill) > 1:
raise(ValueError, "Lattice discretization should not "
"be used with distributed cells")
# Rebuild lattice from list of locations with this pattern
for index, location in enumerate(pattern_data['locations']):
self.universes[location[1]][location[0]] = new_universe
def create_xml_subelement(self, xml_element, memo=None):
# If the element already contains the Lattice subelement, then return
if memo and self in memo:
return
if memo is not None:
memo.add(self)
lattice_subelement = ET.Element("lattice")
lattice_subelement.set("id", str(self._id))
if len(self._name) > 0:
lattice_subelement.set("name", str(self._name))
# Export the Lattice cell pitch
pitch = ET.SubElement(lattice_subelement, "pitch")
pitch.text = ' '.join(map(str, self._pitch))
# Export the Lattice outer Universe (if specified)
if self._outer is not None:
outer = ET.SubElement(lattice_subelement, "outer")
outer.text = str(self._outer._id)
self._outer.create_xml_subelement(xml_element, memo)
# Export Lattice cell dimensions
dimension = ET.SubElement(lattice_subelement, "dimension")
dimension.text = ' '.join(map(str, self.shape))
# Export Lattice lower left
lower_left = ET.SubElement(lattice_subelement, "lower_left")
lower_left.text = ' '.join(map(str, self._lower_left))
# Export the Lattice nested Universe IDs - column major for Fortran
universe_ids = '\n'
# 3D Lattices
if self.ndim == 3:
for z in range(self.shape[2]):
for y in range(self.shape[1]):
for x in range(self.shape[0]):
universe = self._universes[z][y][x]
# Append Universe ID to the Lattice XML subelement
universe_ids += f'{universe._id} '
# Create XML subelement for this Universe
universe.create_xml_subelement(xml_element, memo)
# Add newline character when we reach end of row of cells
universe_ids += '\n'
# Add newline character when we reach end of row of cells
universe_ids += '\n'
# 2D Lattices
else:
for y in range(self.shape[1]):
for x in range(self.shape[0]):
universe = self._universes[y][x]
# Append Universe ID to Lattice XML subelement
universe_ids += f'{universe._id} '
# Create XML subelement for this Universe
universe.create_xml_subelement(xml_element, memo)
# Add newline character when we reach end of row of cells
universe_ids += '\n'
# Remove trailing newline character from Universe IDs string
universe_ids = universe_ids.rstrip('\n')
universes = ET.SubElement(lattice_subelement, "universes")
universes.text = universe_ids
# Append the XML subelement for this Lattice to the XML element
xml_element.append(lattice_subelement)
@classmethod
def from_xml_element(cls, elem, get_universe):
lat_id = int(get_text(elem, 'id'))
name = get_text(elem, 'name')
lat = cls(lat_id, name)
lat.lower_left = [float(i)
for i in get_text(elem, 'lower_left').split()]
lat.pitch = [float(i) for i in get_text(elem, 'pitch').split()]
outer = get_text(elem, 'outer')
if outer is not None:
lat.outer = get_universe(int(outer))
# Get array of universes
dimension = get_text(elem, 'dimension').split()
shape = np.array(dimension, dtype=int)[::-1]
uarray = np.array([get_universe(int(i)) for i in
get_text(elem, 'universes').split()])
uarray.shape = shape
lat.universes = uarray
return lat
@classmethod
def from_hdf5(cls, group, universes):
dimension = group['dimension'][...]
lower_left = group['lower_left'][...]
pitch = group['pitch'][...]
outer = group['outer'][()]
universe_ids = group['universes'][...]
# Create the Lattice
lattice_id = int(group.name.split('/')[-1].lstrip('lattice '))
name = group['name'][()].decode() if 'name' in group else ''
lattice = cls(lattice_id, name)
lattice.lower_left = lower_left
lattice.pitch = pitch
# If the Universe specified outer the Lattice is not void
if outer >= 0:
lattice.outer = universes[outer]
# Build array of Universe pointers for the Lattice
uarray = np.empty(universe_ids.shape, dtype=openmc.Universe)
for z in range(universe_ids.shape[0]):
for y in range(universe_ids.shape[1]):
for x in range(universe_ids.shape[2]):
uarray[z, y, x] = universes[universe_ids[z, y, x]]
# Use 2D NumPy array to store lattice universes for 2D lattices
if len(dimension) == 2:
uarray = np.squeeze(uarray)
uarray = np.atleast_2d(uarray)
# Set the universes for the lattice
lattice.universes = uarray
return lattice
class HexLattice(Lattice):
def __init__(self, lattice_id=None, name=''):
super().__init__(lattice_id, name)
# Initialize Lattice class attributes
self._num_rings = None
self._num_axial = None
self._center = None
self._orientation = 'y'
def __repr__(self):
string = 'HexLattice\n'
string += '{0: <16}{1}{2}\n'.format('\tID', '=\t', self._id)
string += '{0: <16}{1}{2}\n'.format('\tName', '=\t', self._name)
string += '{0: <16}{1}{2}\n'.format('\tOrientation', '=\t',
self._orientation)
string += '{0: <16}{1}{2}\n'.format('\t
string += '{0: <16}{1}{2}\n'.format('\t
string += '{0: <16}{1}{2}\n'.format('\tCenter', '=\t',
self._center)
string += '{0: <16}{1}{2}\n'.format('\tPitch', '=\t', self._pitch)
if self._outer is not None:
string += '{0: <16}{1}{2}\n'.format('\tOuter', '=\t',
self._outer._id)
else:
string += '{0: <16}{1}{2}\n'.format('\tOuter', '=\t',
self._outer)
string += '{0: <16}\n'.format('\tUniverses')
if self._num_axial is not None:
slices = [self._repr_axial_slice(x) for x in self._universes]
string += '\n'.join(slices)
else:
string += self._repr_axial_slice(self._universes)
return string
@property
def num_rings(self):
return self._num_rings
@property
def orientation(self):
return self._orientation
@property
def num_axial(self):
return self._num_axial
@property
def center(self):
return self._center
@property
def indices(self):
if self.num_axial is None:
return [(r, i) for r in range(self.num_rings)
for i in range(max(6*(self.num_rings - 1 - r), 1))]
else:
return [(z, r, i) for z in range(self.num_axial)
for r in range(self.num_rings)
for i in range(max(6*(self.num_rings - 1 - r), 1))]
@property
def _natural_indices(self):
r = self.num_rings
if self.num_axial is None:
for a in range(-r + 1, r):
for x in range(-r + 1, r):
idx = (x, a)
if self.is_valid_index(idx):
yield idx
else:
for z in range(self.num_axial):
for a in range(-r + 1, r):
for x in range(-r + 1, r):
idx = (x, a, z)
if self.is_valid_index(idx):
yield idx
@property
def ndim(self):
return 2 if isinstance(self.universes[0][0], openmc.Universe) else 3
@center.setter
def center(self, center):
cv.check_type('lattice center', center, Iterable, Real)
cv.check_length('lattice center', center, 2, 3)
self._center = center
@orientation.setter
def orientation(self, orientation):
cv.check_value('orientation', orientation.lower(), ('x', 'y'))
self._orientation = orientation.lower()
@Lattice.pitch.setter
def pitch(self, pitch):
cv.check_type('lattice pitch', pitch, Iterable, Real)
cv.check_length('lattice pitch', pitch, 1, 2)
for dim in pitch:
cv.check_greater_than('lattice pitch', dim, 0)
self._pitch = pitch
@Lattice.universes.setter
def universes(self, universes):
cv.check_iterable_type('lattice universes', universes, openmc.Universe,
min_depth=2, max_depth=3)
self._universes = universes
# NOTE: This routine assumes that the user creates a "ragged" list of
# lists, where each sub-list corresponds to one ring of Universes.
# The sub-lists are ordered from outermost ring to innermost ring.
# The Universes within each sub-list are ordered from the "top" in a
# clockwise fashion.
# Set the number of axial positions.
if self.ndim == 3:
self._num_axial = len(self._universes)
else:
self._num_axial = None
# Set the number of rings and make sure this number is consistent for
# all axial positions.
if self.ndim == 3:
self._num_rings = len(self._universes[0])
for rings in self._universes:
if len(rings) != self._num_rings:
msg = 'HexLattice ID={0:d} has an inconsistent number of ' \
'rings per axial position'.format(self._id)
raise ValueError(msg)
else:
self._num_rings = len(self._universes)
# Make sure there are the correct number of elements in each ring.
if self.ndim == 3:
for axial_slice in self._universes:
# Check the center ring.
if len(axial_slice[-1]) != 1:
msg = 'HexLattice ID={0:d} has the wrong number of ' \
'elements in the innermost ring. Only 1 element is ' \
'allowed in the innermost ring.'.format(self._id)
raise ValueError(msg)
# Check the outer rings.
for r in range(self._num_rings-1):
if len(axial_slice[r]) != 6*(self._num_rings - 1 - r):
msg = 'HexLattice ID={0:d} has the wrong number of ' \
'elements in ring number {1:d} (counting from the '\
'outermost ring). This ring should have {2:d} ' \
'elements.'.format(self._id, r,
6*(self._num_rings - 1 - r))
raise ValueError(msg)
else:
axial_slice = self._universes
# Check the center ring.
if len(axial_slice[-1]) != 1:
msg = 'HexLattice ID={0:d} has the wrong number of ' \
'elements in the innermost ring. Only 1 element is ' \
'allowed in the innermost ring.'.format(self._id)
raise ValueError(msg)
# Check the outer rings.
for r in range(self._num_rings-1):
if len(axial_slice[r]) != 6*(self._num_rings - 1 - r):
msg = 'HexLattice ID={0:d} has the wrong number of ' \
'elements in ring number {1:d} (counting from the '\
'outermost ring). This ring should have {2:d} ' \
'elements.'.format(self._id, r,
6*(self._num_rings - 1 - r))
raise ValueError(msg)
def find_element(self, point):
# Convert coordinates to skewed bases
x = point[0] - self.center[0]
y = point[1] - self.center[1]
if self._num_axial is None:
iz = 1
else:
z = point[2] - self.center[2]
iz = floor(z/self.pitch[1] + 0.5*self.num_axial)
if self._orientation == 'x':
alpha = y - x*sqrt(3.)
i1 = floor(-alpha/(sqrt(3.0) * self.pitch[0]))
i2 = floor(y/(sqrt(0.75) * self.pitch[0]))
else:
alpha = y - x/sqrt(3.)
i1 = floor(x/(sqrt(0.75) * self.pitch[0]))
i2 = floor(alpha/self.pitch[0])
# Check four lattice elements to see which one is closest based on local
# coordinates
indices = [(i1, i2, iz), (i1 + 1, i2, iz), (i1, i2 + 1, iz),
(i1 + 1, i2 + 1, iz)]
d_min = np.inf
for idx in indices:
p = self.get_local_coordinates(point, idx)
d = p[0]**2 + p[1]**2
if d < d_min:
d_min = d
idx_min = idx
p_min = p
return idx_min, p_min
def get_local_coordinates(self, point, idx):
if self._orientation == 'x':
x = point[0] - (self.center[0] + (idx[0] + 0.5*idx[1])*self.pitch[0])
y = point[1] - (self.center[1] + sqrt(0.75)*self.pitch[0]*idx[1])
else:
x = point[0] - (self.center[0] + sqrt(0.75)*self.pitch[0]*idx[0])
y = point[1] - (self.center[1] + (0.5*idx[0] + idx[1])*self.pitch[0])
if self._num_axial is None:
z = point[2]
else:
z = point[2] - (self.center[2] + (idx[2] + 0.5 - 0.5*self.num_axial) *
self.pitch[1])
return (x, y, z)
def get_universe_index(self, idx):
# First we determine which ring the index corresponds to.
x = idx[0]
a = idx[1]
z = -a - x
g = max(abs(x), abs(a), abs(z))
# Next we use a clever method to figure out where along the ring we are.
i_ring = self._num_rings - 1 - g
if x >= 0:
if a >= 0:
i_within = x
else:
i_within = 2*g + z
else:
if a <= 0:
i_within = 3*g - x
else:
i_within = 5*g - z
if self._orientation == 'x' and g > 0:
i_within = (i_within + 5*g) % (6*g)
if self.num_axial is None:
return (i_ring, i_within)
else:
return (idx[2], i_ring, i_within)
def is_valid_index(self, idx):
x = idx[0]
y = idx[1]
z = 0 - y - x
g = max(abs(x), abs(y), abs(z))
if self.num_axial is None:
return g < self.num_rings
else:
return g < self.num_rings and 0 <= idx[2] < self.num_axial
def create_xml_subelement(self, xml_element, memo=None):
# If this subelement has already been written, return
if memo and self in memo:
return
if memo is not None:
memo.add(self)
lattice_subelement = ET.Element("hex_lattice")
lattice_subelement.set("id", str(self._id))
if len(self._name) > 0:
lattice_subelement.set("name", str(self._name))
# Export the Lattice cell pitch
pitch = ET.SubElement(lattice_subelement, "pitch")
pitch.text = ' '.join(map(str, self._pitch))
# Export the Lattice outer Universe (if specified)
if self._outer is not None:
outer = ET.SubElement(lattice_subelement, "outer")
outer.text = str(self._outer._id)
self._outer.create_xml_subelement(xml_element, memo)
lattice_subelement.set("n_rings", str(self._num_rings))
# If orientation is "x" export it to XML
if self._orientation == 'x':
lattice_subelement.set("orientation", "x")
if self._num_axial is not None:
lattice_subelement.set("n_axial", str(self._num_axial))
# Export Lattice cell center
center = ET.SubElement(lattice_subelement, "center")
center.text = ' '.join(map(str, self._center))
# Export the Lattice nested Universe IDs.
# 3D Lattices
if self._num_axial is not None:
slices = []
for z in range(self._num_axial):
# Initialize the center universe.
universe = self._universes[z][-1][0]
universe.create_xml_subelement(xml_element, memo)
# Initialize the remaining universes.
for r in range(self._num_rings-1):
for theta in range(6*(self._num_rings - 1 - r)):
universe = self._universes[z][r][theta]
universe.create_xml_subelement(xml_element, memo)
# Get a string representation of the universe IDs.
slices.append(self._repr_axial_slice(self._universes[z]))
# Collapse the list of axial slices into a single string.
universe_ids = '\n'.join(slices)
# 2D Lattices
else:
# Initialize the center universe.
universe = self._universes[-1][0]
universe.create_xml_subelement(xml_element, memo)
# Initialize the remaining universes.
for r in range(self._num_rings - 1):
for theta in range(6*(self._num_rings - 1 - r)):
universe = self._universes[r][theta]
universe.create_xml_subelement(xml_element, memo)
# Get a string representation of the universe IDs.
universe_ids = self._repr_axial_slice(self._universes)
universes = ET.SubElement(lattice_subelement, "universes")
universes.text = '\n' + universe_ids
# Append the XML subelement for this Lattice to the XML element
xml_element.append(lattice_subelement)
@classmethod
def from_xml_element(cls, elem, get_universe):
lat_id = int(get_text(elem, 'id'))
name = get_text(elem, 'name')
lat = cls(lat_id, name)
lat.center = [float(i) for i in get_text(elem, 'center').split()]
lat.pitch = [float(i) for i in get_text(elem, 'pitch').split()]
lat.orientation = get_text(elem, 'orientation', 'y')
outer = get_text(elem, 'outer')
if outer is not None:
lat.outer = get_universe(int(outer))
# Get nested lists of universes
lat._num_rings = n_rings = int(get_text(elem, 'n_rings'))
lat._num_axial = n_axial = int(get_text(elem, 'n_axial', 1))
# Create empty nested lists for one axial level
univs = [[None for _ in range(max(6*(n_rings - 1 - r), 1))]
for r in range(n_rings)]
if n_axial > 1:
univs = [deepcopy(univs) for i in range(n_axial)]
# Get flat array of universes
uarray = np.array([get_universe(int(i)) for i in
get_text(elem, 'universes').split()])
# Fill nested lists
j = 0
for z in range(n_axial):
# Get list for a single axial level
axial_level = univs[z] if n_axial > 1 else univs
if lat.orientation == 'y':
# Start iterating from top
x, alpha = 0, n_rings - 1
while True:
# Set entry in list based on (x,alpha,z) coordinates
_, i_ring, i_within = lat.get_universe_index((x, alpha, z))
axial_level[i_ring][i_within] = uarray[j]
# Move to the right
x += 2
alpha -= 1
if not lat.is_valid_index((x, alpha, z)):
# Move down in y direction
alpha += x - 1
x = 1 - x
if not lat.is_valid_index((x, alpha, z)):
# Move to the right
x += 2
alpha -= 1
if not lat.is_valid_index((x, alpha, z)):
# Reached the bottom
break
j += 1
else:
# Start iterating from top
alpha, y = 1 - n_rings, n_rings - 1
while True:
# Set entry in list based on (alpha,y,z) coordinates
_, i_ring, i_within = lat.get_universe_index((alpha, y, z))
axial_level[i_ring][i_within] = uarray[j]
# Move to the right
alpha += 1
if not lat.is_valid_index((alpha, y, z)):
# Move down to next row
alpha = 1 - n_rings
y -= 1
# Check if we've reached the bottom
if y == -n_rings:
break
while not lat.is_valid_index((alpha, y, z)):
alpha += 1
j += 1
lat.universes = univs
return lat
def _repr_axial_slice(self, universes):
if self._orientation == 'x':
return self._repr_axial_slice_x(universes)
else:
return self._repr_axial_slice_y(universes)
def _repr_axial_slice_x(self, universes):
largest_id = max([max([univ._id for univ in ring])
for ring in universes])
n_digits = len(str(largest_id))
pad = ' '*n_digits
id_form = '{: ^' + str(n_digits) + 'd}'
rows = [[] for i in range(2*self._num_rings - 1)]
middle = self._num_rings - 1
universe = universes[-1][0]
rows[middle] = [id_form.format(universe._id)]
for r in range(1, self._num_rings):
r_prime = self._num_rings - 1 - r
theta = 0
y = middle
for i in range(r):
universe = universes[r_prime][theta]
rows[y].append(id_form.format(universe._id))
y += 1
theta += 1
for i in range(r):
universe = universes[r_prime][theta]
rows[y].insert(0, id_form.format(universe._id))
theta += 1
for i in range(r):
universe = universes[r_prime][theta]
rows[y].insert(0, id_form.format(universe._id))
y -= 1
theta += 1
for i in range(r):
universe = universes[r_prime][theta]
rows[y].insert(0, id_form.format(universe._id))
y -= 1
theta += 1
for i in range(r):
universe = universes[r_prime][theta]
rows[y].append(id_form.format(universe._id))
theta += 1
for i in range(r):
universe = universes[r_prime][theta]
rows[y].append(id_form.format(universe._id))
y += 1
theta += 1
rows = [pad.join(x) for x in rows]
for y in range(self._num_rings - 1):
rows[y] = (self._num_rings - 1 - y)*pad + rows[y]
rows[-1 - y] = (self._num_rings - 1 - y)*pad + rows[-1 - y]
universe_ids = '\n'.join(rows)
return universe_ids
def _repr_axial_slice_y(self, universes):
largest_id = max([max([univ._id for univ in ring])
for ring in universes])
n_digits = len(str(largest_id))
pad = ' '*n_digits
id_form = '{: ^' + str(n_digits) + 'd}'
rows = [[] for i in range(1 + 4 * (self._num_rings-1))]
middle = 2 * (self._num_rings - 1)
universe = universes[-1][0]
rows[middle] = [id_form.format(universe._id)]
for r in range(1, self._num_rings):
r_prime = self._num_rings - 1 - r
theta = 0
y = middle + 2*r
for i in range(r):
universe = universes[r_prime][theta]
rows[y].append(id_form.format(universe._id))
y -= 1
theta += 1
for i in range(r):
universe = universes[r_prime][theta]
rows[y].append(id_form.format(universe._id))
y -= 2
theta += 1
for i in range(r):
universe = universes[r_prime][theta]
rows[y].append(id_form.format(universe._id))
y -= 1
theta += 1
for i in range(r):
universe = universes[r_prime][theta]
rows[y].insert(0, id_form.format(universe._id))
y += 1
theta += 1
for i in range(r):
universe = universes[r_prime][theta]
rows[y].insert(0, id_form.format(universe._id))
y += 2
theta += 1
for i in range(r):
universe = universes[r_prime][theta]
rows[y].insert(0, id_form.format(universe._id))
y += 1
theta += 1
rows = [pad.join(x) for x in rows[::-1]]
for y in range(self._num_rings - 1):
rows[y] = (self._num_rings - 1 - y)*pad + rows[y]
rows[-1 - y] = (self._num_rings - 1 - y)*pad + rows[-1 - y]
for y in range(self._num_rings % 2, self._num_rings, 2):
rows[middle + y] = pad + rows[middle + y]
if y != 0:
rows[middle - y] = pad + rows[middle - y]
universe_ids = '\n'.join(rows)
return universe_ids
@staticmethod
def _show_indices_y(num_rings):
largest_index = 6*(num_rings - 1)
n_digits_index = len(str(largest_index))
n_digits_ring = len(str(num_rings - 1))
str_form = '({{:{}}},{{:{}}})'.format(n_digits_ring, n_digits_index)
pad = ' '*(n_digits_index + n_digits_ring + 3)
rows = [[] for i in range(1 + 4 * (num_rings-1))]
middle = 2 * (num_rings - 1)
rows[middle] = [str_form.format(num_rings - 1, 0)]
for r in range(1, num_rings):
r_prime = num_rings - 1 - r
theta = 0
y = middle + 2*r
for i in range(r):
rows[y].append(str_form.format(r_prime, theta))
y -= 1
theta += 1
for i in range(r):
rows[y].append(str_form.format(r_prime, theta))
y -= 2
theta += 1
for i in range(r):
rows[y].append(str_form.format(r_prime, theta))
y -= 1
theta += 1
for i in range(r):
rows[y].insert(0, str_form.format(r_prime, theta))
y += 1
theta += 1
for i in range(r):
rows[y].insert(0, str_form.format(r_prime, theta))
y += 2
theta += 1
for i in range(r):
rows[y].insert(0, str_form.format(r_prime, theta))
y += 1
theta += 1
rows = [pad.join(x) for x in rows[::-1]]
for y in range(num_rings - 1):
rows[y] = (num_rings - 1 - y)*pad + rows[y]
rows[-1 - y] = (num_rings - 1 - y)*pad + rows[-1 - y]
for y in range(num_rings % 2, num_rings, 2):
rows[middle + y] = pad + rows[middle + y]
if y != 0:
rows[middle - y] = pad + rows[middle - y]
return '\n'.join(rows)
@staticmethod
def _show_indices_x(num_rings):
largest_index = 6*(num_rings - 1)
n_digits_index = len(str(largest_index))
n_digits_ring = len(str(num_rings - 1))
str_form = '({{:{}}},{{:{}}})'.format(n_digits_ring, n_digits_index)
pad = ' '*(n_digits_index + n_digits_ring + 3)
rows = [[] for i in range(2*num_rings - 1)]
middle = num_rings - 1
rows[middle] = [str_form.format(num_rings - 1, 0)]
for r in range(1, num_rings):
r_prime = num_rings - 1 - r
theta = 0
y = middle
for i in range(r):
rows[y].append(str_form.format(r_prime, theta))
y += 1
theta += 1
for i in range(r):
rows[y].insert(0, str_form.format(r_prime, theta))
theta += 1
for i in range(r):
rows[y].insert(0, str_form.format(r_prime, theta))
y -= 1
theta += 1
for i in range(r):
rows[y].insert(0, str_form.format(r_prime, theta))
y -= 1
theta += 1
for i in range(r):
rows[y].append(str_form.format(r_prime, theta))
theta += 1
for i in range(r):
rows[y].append(str_form.format(r_prime, theta))
y += 1
theta += 1
rows = [pad.join(x) for x in rows]
for y in range(num_rings - 1):
rows[y] = (num_rings - 1 - y)*pad + rows[y]
rows[-1 - y] = (num_rings - 1 - y)*pad + rows[-1 - y]
return '\n\n'.join(rows)
@staticmethod
def show_indices(num_rings, orientation="y"):
if orientation == 'x':
return HexLattice._show_indices_x(num_rings)
else:
return HexLattice._show_indices_y(num_rings)
@classmethod
def from_hdf5(cls, group, universes):
n_rings = group['n_rings'][()]
n_axial = group['n_axial'][()]
center = group['center'][()]
pitch = group['pitch'][()]
outer = group['outer'][()]
if 'orientation' in group:
orientation = group['orientation'][()].decode()
else:
orientation = "y"
universe_ids = group['universes'][()]
lattice_id = int(group.name.split('/')[-1].lstrip('lattice '))
name = group['name'][()].decode() if 'name' in group else ''
lattice = openmc.HexLattice(lattice_id, name)
lattice.center = center
lattice.pitch = pitch
lattice.orientation = orientation
if outer >= 0:
lattice.outer = universes[outer]
if orientation == "y":
# (x, alpha, z) to the Python API's format of a ragged nested
uarray = []
for z in range(n_axial):
uarray.append([])
x = n_rings - 1
a = 2*n_rings - 2
for r in range(n_rings - 1, 0, -1):
uarray[-1].append([])
for i in range(r):
uarray[-1][-1].append(universe_ids[z, a, x])
x += 1
a -= 1
for i in range(r):
uarray[-1][-1].append(universe_ids[z, a, x])
a -= 1
for i in range(r):
uarray[-1][-1].append(universe_ids[z, a, x])
x -= 1
for i in range(r):
uarray[-1][-1].append(universe_ids[z, a, x])
x -= 1
a += 1
for i in range(r):
uarray[-1][-1].append(universe_ids[z, a, x])
a += 1
for i in range(r):
uarray[-1][-1].append(universe_ids[z, a, x])
x += 1
a -= 1
uarray[-1][-1] = [universes[u_id]
for u_id in uarray[-1][-1]]
u_id = universe_ids[z, a, x]
uarray[-1].append([universes[u_id]])
else:
# (alpha, y, z) to the Python API's format of a ragged nested
uarray = []
for z in range(n_axial):
uarray.append([])
a = 2*n_rings - 2
y = n_rings - 1
for r in range(n_rings - 1, 0, -1):
uarray[-1].append([])
for i in range(r):
uarray[-1][-1].append(universe_ids[z, y, a])
y -= 1
for i in range(r):
uarray[-1][-1].append(universe_ids[z, y, a])
a -= 1
for i in range(r):
uarray[-1][-1].append(universe_ids[z, y, a])
a -= 1
y += 1
for i in range(r):
uarray[-1][-1].append(universe_ids[z, y, a])
y += 1
for i in range(r):
uarray[-1][-1].append(universe_ids[z, y, a])
a += 1
for i in range(r):
uarray[-1][-1].append(universe_ids[z, y, a])
a += 1
y -= 1
a -= 1
uarray[-1][-1] = [universes[u_id]
for u_id in uarray[-1][-1]]
u_id = universe_ids[z, y, a]
uarray[-1].append([universes[u_id]])
if len(pitch) == 2:
lattice.universes = uarray
else:
lattice.universes = uarray[0]
return lattice
| true | true |
1c31205fde9e085f681e8304eadebb64056c8636 | 268 | py | Python | setup.py | onshoremanover/dist | 96a52b23e6e651d6d6b73614c73a5aa0d0c4bd14 | [
"MIT"
] | 1 | 2021-11-04T14:02:57.000Z | 2021-11-04T14:02:57.000Z | setup.py | onshoremanover/dcfe | 65256aac5a3212a98896cbf0d04533af83bb4ce8 | [
"MIT"
] | null | null | null | setup.py | onshoremanover/dcfe | 65256aac5a3212a98896cbf0d04533af83bb4ce8 | [
"MIT"
] | null | null | null |
from setuptools import setup
setup(
name = 'dcfe',
version = '0.1.4',
packages = ['dcfe'],
entry_points = {
'console_scripts': [
'dcfe = dcfe.__main__:main'
]
})
| 20.615385 | 43 | 0.399254 |
from setuptools import setup
setup(
name = 'dcfe',
version = '0.1.4',
packages = ['dcfe'],
entry_points = {
'console_scripts': [
'dcfe = dcfe.__main__:main'
]
})
| true | true |
1c31217294f3c2bc4855de6abfe75bfa6885b338 | 1,356 | py | Python | app/recipe/serializers.py | garden117/recipe-app-api | ce58a993cac38660ddd25b99ae1e6cffeff537eb | [
"MIT"
] | null | null | null | app/recipe/serializers.py | garden117/recipe-app-api | ce58a993cac38660ddd25b99ae1e6cffeff537eb | [
"MIT"
] | null | null | null | app/recipe/serializers.py | garden117/recipe-app-api | ce58a993cac38660ddd25b99ae1e6cffeff537eb | [
"MIT"
] | null | null | null | from rest_framework import serializers
from core.models import Tag, Ingredient, Recipe
class TagSerializer(serializers.ModelSerializer):
"""serializer for tag objects"""
class Meta:
model = Tag
fields = ('id', 'name')
read_only_fields = ('id',)
class IngredientSerializer(serializers.ModelSerializer):
"""serializer for ingredient objects"""
class Meta:
model = Ingredient
fields = ('id', 'name')
read_only_fields = ('id',)
class RecipeSerializer(serializers.ModelSerializer):
"""serializer for recipe objects"""
ingredients = serializers.PrimaryKeyRelatedField(many=True, queryset=Ingredient.objects.all())
tags = serializers.PrimaryKeyRelatedField(many=True, queryset=Tag.objects.all())
class Meta:
model = Recipe
fields = ('id', 'title', 'time_minutes', 'price', 'link', 'ingredients', 'tags')
read_only_fields = ('id',)
class RecipeDetailSerializer(RecipeSerializer):
"""serialize a recipe detail"""
ingredients = IngredientSerializer(many=True, read_only=True)
tags = TagSerializer(many=True, read_only=True)
class RecipeImageSerializer(serializers.ModelSerializer):
"""Serializer for uploading the images"""
class Meta:
model = Recipe
fields = ('id', 'image')
read_only_fields = ('id',)
| 28.25 | 98 | 0.676991 | from rest_framework import serializers
from core.models import Tag, Ingredient, Recipe
class TagSerializer(serializers.ModelSerializer):
class Meta:
model = Tag
fields = ('id', 'name')
read_only_fields = ('id',)
class IngredientSerializer(serializers.ModelSerializer):
class Meta:
model = Ingredient
fields = ('id', 'name')
read_only_fields = ('id',)
class RecipeSerializer(serializers.ModelSerializer):
ingredients = serializers.PrimaryKeyRelatedField(many=True, queryset=Ingredient.objects.all())
tags = serializers.PrimaryKeyRelatedField(many=True, queryset=Tag.objects.all())
class Meta:
model = Recipe
fields = ('id', 'title', 'time_minutes', 'price', 'link', 'ingredients', 'tags')
read_only_fields = ('id',)
class RecipeDetailSerializer(RecipeSerializer):
ingredients = IngredientSerializer(many=True, read_only=True)
tags = TagSerializer(many=True, read_only=True)
class RecipeImageSerializer(serializers.ModelSerializer):
class Meta:
model = Recipe
fields = ('id', 'image')
read_only_fields = ('id',)
| true | true |
1c31223d26a10e794076573f547fb1b5b9a01c27 | 13,816 | py | Python | mypy/stubdoc.py | Phlogistique/mypy | eea4c76de4a67a36e3a2293eae9a2e775c636e1d | [
"PSF-2.0"
] | 12,496 | 2016-02-19T13:38:26.000Z | 2022-03-31T23:56:19.000Z | mypy/stubdoc.py | Phlogistique/mypy | eea4c76de4a67a36e3a2293eae9a2e775c636e1d | [
"PSF-2.0"
] | 9,429 | 2016-02-19T13:41:32.000Z | 2022-03-31T23:29:38.000Z | mypy/stubdoc.py | Zeckie/baselinedmypy | 142c896a7ec0a10697375833fd897b293a748699 | [
"PSF-2.0"
] | 2,770 | 2016-02-19T16:18:19.000Z | 2022-03-31T08:12:49.000Z | """Parsing/inferring signatures from documentation.
This module provides several functions to generate better stubs using
docstrings and Sphinx docs (.rst files).
"""
import re
import io
import contextlib
import tokenize
from typing import (
Optional, MutableMapping, MutableSequence, List, Sequence, Tuple, NamedTuple, Any
)
from typing_extensions import Final
# Type alias for signatures strings in format ('func_name', '(arg, opt_arg=False)').
Sig = Tuple[str, str]
_TYPE_RE: Final = re.compile(r"^[a-zA-Z_][\w\[\], ]*(\.[a-zA-Z_][\w\[\], ]*)*$")
_ARG_NAME_RE: Final = re.compile(r"\**[A-Za-z_][A-Za-z0-9_]*$")
def is_valid_type(s: str) -> bool:
"""Try to determine whether a string might be a valid type annotation."""
if s in ('True', 'False', 'retval'):
return False
if ',' in s and '[' not in s:
return False
return _TYPE_RE.match(s) is not None
class ArgSig:
"""Signature info for a single argument."""
def __init__(self, name: str, type: Optional[str] = None, default: bool = False):
self.name = name
if type and not is_valid_type(type):
raise ValueError("Invalid type: " + type)
self.type = type
# Does this argument have a default value?
self.default = default
def __repr__(self) -> str:
return "ArgSig(name={}, type={}, default={})".format(repr(self.name), repr(self.type),
repr(self.default))
def __eq__(self, other: Any) -> bool:
if isinstance(other, ArgSig):
return (self.name == other.name and self.type == other.type and
self.default == other.default)
return False
FunctionSig = NamedTuple('FunctionSig', [
('name', str),
('args', List[ArgSig]),
('ret_type', str)
])
# States of the docstring parser.
STATE_INIT: Final = 1
STATE_FUNCTION_NAME: Final = 2
STATE_ARGUMENT_LIST: Final = 3
STATE_ARGUMENT_TYPE: Final = 4
STATE_ARGUMENT_DEFAULT: Final = 5
STATE_RETURN_VALUE: Final = 6
STATE_OPEN_BRACKET: Final = 7 # For generic types.
class DocStringParser:
"""Parse function signatures in documentation."""
def __init__(self, function_name: str) -> None:
# Only search for signatures of function with this name.
self.function_name = function_name
self.state = [STATE_INIT]
self.accumulator = ""
self.arg_type: Optional[str] = None
self.arg_name = ""
self.arg_default: Optional[str] = None
self.ret_type = "Any"
self.found = False
self.args: List[ArgSig] = []
# Valid signatures found so far.
self.signatures: List[FunctionSig] = []
def add_token(self, token: tokenize.TokenInfo) -> None:
"""Process next token from the token stream."""
if (token.type == tokenize.NAME and token.string == self.function_name and
self.state[-1] == STATE_INIT):
self.state.append(STATE_FUNCTION_NAME)
elif (token.type == tokenize.OP and token.string == '(' and
self.state[-1] == STATE_FUNCTION_NAME):
self.state.pop()
self.accumulator = ""
self.found = True
self.state.append(STATE_ARGUMENT_LIST)
elif self.state[-1] == STATE_FUNCTION_NAME:
# Reset state, function name not followed by '('.
self.state.pop()
elif (token.type == tokenize.OP and token.string in ('[', '(', '{') and
self.state[-1] != STATE_INIT):
self.accumulator += token.string
self.state.append(STATE_OPEN_BRACKET)
elif (token.type == tokenize.OP and token.string in (']', ')', '}') and
self.state[-1] == STATE_OPEN_BRACKET):
self.accumulator += token.string
self.state.pop()
elif (token.type == tokenize.OP and token.string == ':' and
self.state[-1] == STATE_ARGUMENT_LIST):
self.arg_name = self.accumulator
self.accumulator = ""
self.state.append(STATE_ARGUMENT_TYPE)
elif (token.type == tokenize.OP and token.string == '=' and
self.state[-1] in (STATE_ARGUMENT_LIST, STATE_ARGUMENT_TYPE)):
if self.state[-1] == STATE_ARGUMENT_TYPE:
self.arg_type = self.accumulator
self.state.pop()
else:
self.arg_name = self.accumulator
self.accumulator = ""
self.state.append(STATE_ARGUMENT_DEFAULT)
elif (token.type == tokenize.OP and token.string in (',', ')') and
self.state[-1] in (STATE_ARGUMENT_LIST, STATE_ARGUMENT_DEFAULT,
STATE_ARGUMENT_TYPE)):
if self.state[-1] == STATE_ARGUMENT_DEFAULT:
self.arg_default = self.accumulator
self.state.pop()
elif self.state[-1] == STATE_ARGUMENT_TYPE:
self.arg_type = self.accumulator
self.state.pop()
elif self.state[-1] == STATE_ARGUMENT_LIST:
self.arg_name = self.accumulator
if not (token.string == ')' and self.accumulator.strip() == '') \
and not _ARG_NAME_RE.match(self.arg_name):
# Invalid argument name.
self.reset()
return
if token.string == ')':
self.state.pop()
# arg_name is empty when there are no args. e.g. func()
if self.arg_name:
try:
self.args.append(ArgSig(name=self.arg_name, type=self.arg_type,
default=bool(self.arg_default)))
except ValueError:
# wrong type, use Any
self.args.append(ArgSig(name=self.arg_name, type=None,
default=bool(self.arg_default)))
self.arg_name = ""
self.arg_type = None
self.arg_default = None
self.accumulator = ""
elif token.type == tokenize.OP and token.string == '->' and self.state[-1] == STATE_INIT:
self.accumulator = ""
self.state.append(STATE_RETURN_VALUE)
# ENDMAKER is necessary for python 3.4 and 3.5.
elif (token.type in (tokenize.NEWLINE, tokenize.ENDMARKER) and
self.state[-1] in (STATE_INIT, STATE_RETURN_VALUE)):
if self.state[-1] == STATE_RETURN_VALUE:
if not is_valid_type(self.accumulator):
self.reset()
return
self.ret_type = self.accumulator
self.accumulator = ""
self.state.pop()
if self.found:
self.signatures.append(FunctionSig(name=self.function_name, args=self.args,
ret_type=self.ret_type))
self.found = False
self.args = []
self.ret_type = 'Any'
# Leave state as INIT.
else:
self.accumulator += token.string
def reset(self) -> None:
self.state = [STATE_INIT]
self.args = []
self.found = False
self.accumulator = ""
def get_signatures(self) -> List[FunctionSig]:
"""Return sorted copy of the list of signatures found so far."""
def has_arg(name: str, signature: FunctionSig) -> bool:
return any(x.name == name for x in signature.args)
def args_kwargs(signature: FunctionSig) -> bool:
return has_arg('*args', signature) and has_arg('**kwargs', signature)
# Move functions with (*args, **kwargs) in their signature to last place.
return list(sorted(self.signatures, key=lambda x: 1 if args_kwargs(x) else 0))
def infer_sig_from_docstring(docstr: Optional[str], name: str) -> Optional[List[FunctionSig]]:
"""Convert function signature to list of TypedFunctionSig
Look for function signatures of function in docstring. Signature is a string of
the format <function_name>(<signature>) -> <return type> or perhaps without
the return type.
Returns empty list, when no signature is found, one signature in typical case,
multiple signatures, if docstring specifies multiple signatures for overload functions.
Return None if the docstring is empty.
Arguments:
* docstr: docstring
* name: name of function for which signatures are to be found
"""
if not docstr:
return None
state = DocStringParser(name)
# Return all found signatures, even if there is a parse error after some are found.
with contextlib.suppress(tokenize.TokenError):
try:
tokens = tokenize.tokenize(io.BytesIO(docstr.encode('utf-8')).readline)
for token in tokens:
state.add_token(token)
except IndentationError:
return None
sigs = state.get_signatures()
def is_unique_args(sig: FunctionSig) -> bool:
"""return true if function argument names are unique"""
return len(sig.args) == len(set((arg.name for arg in sig.args)))
# Return only signatures that have unique argument names. Mypy fails on non-unique arg names.
return [sig for sig in sigs if is_unique_args(sig)]
def infer_arg_sig_from_anon_docstring(docstr: str) -> List[ArgSig]:
"""Convert signature in form of "(self: TestClass, arg0: str='ada')" to List[TypedArgList]."""
ret = infer_sig_from_docstring("stub" + docstr, "stub")
if ret:
return ret[0].args
return []
def infer_ret_type_sig_from_docstring(docstr: str, name: str) -> Optional[str]:
"""Convert signature in form of "func(self: TestClass, arg0) -> int" to their return type."""
ret = infer_sig_from_docstring(docstr, name)
if ret:
return ret[0].ret_type
return None
def infer_ret_type_sig_from_anon_docstring(docstr: str) -> Optional[str]:
"""Convert signature in form of "(self: TestClass, arg0) -> int" to their return type."""
return infer_ret_type_sig_from_docstring("stub" + docstr.strip(), "stub")
def parse_signature(sig: str) -> Optional[Tuple[str,
List[str],
List[str]]]:
"""Split function signature into its name, positional an optional arguments.
The expected format is "func_name(arg, opt_arg=False)". Return the name of function
and lists of positional and optional argument names.
"""
m = re.match(r'([.a-zA-Z0-9_]+)\(([^)]*)\)', sig)
if not m:
return None
name = m.group(1)
name = name.split('.')[-1]
arg_string = m.group(2)
if not arg_string.strip():
# Simple case -- no arguments.
return name, [], []
args = [arg.strip() for arg in arg_string.split(',')]
positional = []
optional = []
i = 0
while i < len(args):
# Accept optional arguments as in both formats: x=None and [x].
if args[i].startswith('[') or '=' in args[i]:
break
positional.append(args[i].rstrip('['))
i += 1
if args[i - 1].endswith('['):
break
while i < len(args):
arg = args[i]
arg = arg.strip('[]')
arg = arg.split('=')[0]
optional.append(arg)
i += 1
return name, positional, optional
def build_signature(positional: Sequence[str],
optional: Sequence[str]) -> str:
"""Build function signature from lists of positional and optional argument names."""
args: MutableSequence[str] = []
args.extend(positional)
for arg in optional:
if arg.startswith('*'):
args.append(arg)
else:
args.append('%s=...' % arg)
sig = '(%s)' % ', '.join(args)
# Ad-hoc fixes.
sig = sig.replace('(self)', '')
return sig
def parse_all_signatures(lines: Sequence[str]) -> Tuple[List[Sig],
List[Sig]]:
"""Parse all signatures in a given reST document.
Return lists of found signatures for functions and classes.
"""
sigs = []
class_sigs = []
for line in lines:
line = line.strip()
m = re.match(r'\.\. *(function|method|class) *:: *[a-zA-Z_]', line)
if m:
sig = line.split('::')[1].strip()
parsed = parse_signature(sig)
if parsed:
name, fixed, optional = parsed
if m.group(1) != 'class':
sigs.append((name, build_signature(fixed, optional)))
else:
class_sigs.append((name, build_signature(fixed, optional)))
return sorted(sigs), sorted(class_sigs)
def find_unique_signatures(sigs: Sequence[Sig]) -> List[Sig]:
"""Remove names with duplicate found signatures."""
sig_map: MutableMapping[str, List[str]] = {}
for name, sig in sigs:
sig_map.setdefault(name, []).append(sig)
result = []
for name, name_sigs in sig_map.items():
if len(set(name_sigs)) == 1:
result.append((name, name_sigs[0]))
return sorted(result)
def infer_prop_type_from_docstring(docstr: Optional[str]) -> Optional[str]:
"""Check for Google/Numpy style docstring type annotation for a property.
The docstring has the format "<type>: <descriptions>".
In the type string, we allow the following characters:
* dot: because sometimes classes are annotated using full path
* brackets: to allow type hints like List[int]
* comma/space: things like Tuple[int, int]
"""
if not docstr:
return None
test_str = r'^([a-zA-Z0-9_, \.\[\]]*): '
m = re.match(test_str, docstr)
return m.group(1) if m else None
| 37.040214 | 98 | 0.585191 | import re
import io
import contextlib
import tokenize
from typing import (
Optional, MutableMapping, MutableSequence, List, Sequence, Tuple, NamedTuple, Any
)
from typing_extensions import Final
Sig = Tuple[str, str]
_TYPE_RE: Final = re.compile(r"^[a-zA-Z_][\w\[\], ]*(\.[a-zA-Z_][\w\[\], ]*)*$")
_ARG_NAME_RE: Final = re.compile(r"\**[A-Za-z_][A-Za-z0-9_]*$")
def is_valid_type(s: str) -> bool:
if s in ('True', 'False', 'retval'):
return False
if ',' in s and '[' not in s:
return False
return _TYPE_RE.match(s) is not None
class ArgSig:
def __init__(self, name: str, type: Optional[str] = None, default: bool = False):
self.name = name
if type and not is_valid_type(type):
raise ValueError("Invalid type: " + type)
self.type = type
self.default = default
def __repr__(self) -> str:
return "ArgSig(name={}, type={}, default={})".format(repr(self.name), repr(self.type),
repr(self.default))
def __eq__(self, other: Any) -> bool:
if isinstance(other, ArgSig):
return (self.name == other.name and self.type == other.type and
self.default == other.default)
return False
FunctionSig = NamedTuple('FunctionSig', [
('name', str),
('args', List[ArgSig]),
('ret_type', str)
])
STATE_INIT: Final = 1
STATE_FUNCTION_NAME: Final = 2
STATE_ARGUMENT_LIST: Final = 3
STATE_ARGUMENT_TYPE: Final = 4
STATE_ARGUMENT_DEFAULT: Final = 5
STATE_RETURN_VALUE: Final = 6
STATE_OPEN_BRACKET: Final = 7
class DocStringParser:
def __init__(self, function_name: str) -> None:
self.function_name = function_name
self.state = [STATE_INIT]
self.accumulator = ""
self.arg_type: Optional[str] = None
self.arg_name = ""
self.arg_default: Optional[str] = None
self.ret_type = "Any"
self.found = False
self.args: List[ArgSig] = []
self.signatures: List[FunctionSig] = []
def add_token(self, token: tokenize.TokenInfo) -> None:
if (token.type == tokenize.NAME and token.string == self.function_name and
self.state[-1] == STATE_INIT):
self.state.append(STATE_FUNCTION_NAME)
elif (token.type == tokenize.OP and token.string == '(' and
self.state[-1] == STATE_FUNCTION_NAME):
self.state.pop()
self.accumulator = ""
self.found = True
self.state.append(STATE_ARGUMENT_LIST)
elif self.state[-1] == STATE_FUNCTION_NAME:
self.state.pop()
elif (token.type == tokenize.OP and token.string in ('[', '(', '{') and
self.state[-1] != STATE_INIT):
self.accumulator += token.string
self.state.append(STATE_OPEN_BRACKET)
elif (token.type == tokenize.OP and token.string in (']', ')', '}') and
self.state[-1] == STATE_OPEN_BRACKET):
self.accumulator += token.string
self.state.pop()
elif (token.type == tokenize.OP and token.string == ':' and
self.state[-1] == STATE_ARGUMENT_LIST):
self.arg_name = self.accumulator
self.accumulator = ""
self.state.append(STATE_ARGUMENT_TYPE)
elif (token.type == tokenize.OP and token.string == '=' and
self.state[-1] in (STATE_ARGUMENT_LIST, STATE_ARGUMENT_TYPE)):
if self.state[-1] == STATE_ARGUMENT_TYPE:
self.arg_type = self.accumulator
self.state.pop()
else:
self.arg_name = self.accumulator
self.accumulator = ""
self.state.append(STATE_ARGUMENT_DEFAULT)
elif (token.type == tokenize.OP and token.string in (',', ')') and
self.state[-1] in (STATE_ARGUMENT_LIST, STATE_ARGUMENT_DEFAULT,
STATE_ARGUMENT_TYPE)):
if self.state[-1] == STATE_ARGUMENT_DEFAULT:
self.arg_default = self.accumulator
self.state.pop()
elif self.state[-1] == STATE_ARGUMENT_TYPE:
self.arg_type = self.accumulator
self.state.pop()
elif self.state[-1] == STATE_ARGUMENT_LIST:
self.arg_name = self.accumulator
if not (token.string == ')' and self.accumulator.strip() == '') \
and not _ARG_NAME_RE.match(self.arg_name):
self.reset()
return
if token.string == ')':
self.state.pop()
if self.arg_name:
try:
self.args.append(ArgSig(name=self.arg_name, type=self.arg_type,
default=bool(self.arg_default)))
except ValueError:
self.args.append(ArgSig(name=self.arg_name, type=None,
default=bool(self.arg_default)))
self.arg_name = ""
self.arg_type = None
self.arg_default = None
self.accumulator = ""
elif token.type == tokenize.OP and token.string == '->' and self.state[-1] == STATE_INIT:
self.accumulator = ""
self.state.append(STATE_RETURN_VALUE)
elif (token.type in (tokenize.NEWLINE, tokenize.ENDMARKER) and
self.state[-1] in (STATE_INIT, STATE_RETURN_VALUE)):
if self.state[-1] == STATE_RETURN_VALUE:
if not is_valid_type(self.accumulator):
self.reset()
return
self.ret_type = self.accumulator
self.accumulator = ""
self.state.pop()
if self.found:
self.signatures.append(FunctionSig(name=self.function_name, args=self.args,
ret_type=self.ret_type))
self.found = False
self.args = []
self.ret_type = 'Any'
else:
self.accumulator += token.string
def reset(self) -> None:
self.state = [STATE_INIT]
self.args = []
self.found = False
self.accumulator = ""
def get_signatures(self) -> List[FunctionSig]:
def has_arg(name: str, signature: FunctionSig) -> bool:
return any(x.name == name for x in signature.args)
def args_kwargs(signature: FunctionSig) -> bool:
return has_arg('*args', signature) and has_arg('**kwargs', signature)
return list(sorted(self.signatures, key=lambda x: 1 if args_kwargs(x) else 0))
def infer_sig_from_docstring(docstr: Optional[str], name: str) -> Optional[List[FunctionSig]]:
if not docstr:
return None
state = DocStringParser(name)
with contextlib.suppress(tokenize.TokenError):
try:
tokens = tokenize.tokenize(io.BytesIO(docstr.encode('utf-8')).readline)
for token in tokens:
state.add_token(token)
except IndentationError:
return None
sigs = state.get_signatures()
def is_unique_args(sig: FunctionSig) -> bool:
return len(sig.args) == len(set((arg.name for arg in sig.args)))
return [sig for sig in sigs if is_unique_args(sig)]
def infer_arg_sig_from_anon_docstring(docstr: str) -> List[ArgSig]:
ret = infer_sig_from_docstring("stub" + docstr, "stub")
if ret:
return ret[0].args
return []
def infer_ret_type_sig_from_docstring(docstr: str, name: str) -> Optional[str]:
ret = infer_sig_from_docstring(docstr, name)
if ret:
return ret[0].ret_type
return None
def infer_ret_type_sig_from_anon_docstring(docstr: str) -> Optional[str]:
return infer_ret_type_sig_from_docstring("stub" + docstr.strip(), "stub")
def parse_signature(sig: str) -> Optional[Tuple[str,
List[str],
List[str]]]:
m = re.match(r'([.a-zA-Z0-9_]+)\(([^)]*)\)', sig)
if not m:
return None
name = m.group(1)
name = name.split('.')[-1]
arg_string = m.group(2)
if not arg_string.strip():
return name, [], []
args = [arg.strip() for arg in arg_string.split(',')]
positional = []
optional = []
i = 0
while i < len(args):
if args[i].startswith('[') or '=' in args[i]:
break
positional.append(args[i].rstrip('['))
i += 1
if args[i - 1].endswith('['):
break
while i < len(args):
arg = args[i]
arg = arg.strip('[]')
arg = arg.split('=')[0]
optional.append(arg)
i += 1
return name, positional, optional
def build_signature(positional: Sequence[str],
optional: Sequence[str]) -> str:
args: MutableSequence[str] = []
args.extend(positional)
for arg in optional:
if arg.startswith('*'):
args.append(arg)
else:
args.append('%s=...' % arg)
sig = '(%s)' % ', '.join(args)
sig = sig.replace('(self)', '')
return sig
def parse_all_signatures(lines: Sequence[str]) -> Tuple[List[Sig],
List[Sig]]:
sigs = []
class_sigs = []
for line in lines:
line = line.strip()
m = re.match(r'\.\. *(function|method|class) *:: *[a-zA-Z_]', line)
if m:
sig = line.split('::')[1].strip()
parsed = parse_signature(sig)
if parsed:
name, fixed, optional = parsed
if m.group(1) != 'class':
sigs.append((name, build_signature(fixed, optional)))
else:
class_sigs.append((name, build_signature(fixed, optional)))
return sorted(sigs), sorted(class_sigs)
def find_unique_signatures(sigs: Sequence[Sig]) -> List[Sig]:
sig_map: MutableMapping[str, List[str]] = {}
for name, sig in sigs:
sig_map.setdefault(name, []).append(sig)
result = []
for name, name_sigs in sig_map.items():
if len(set(name_sigs)) == 1:
result.append((name, name_sigs[0]))
return sorted(result)
def infer_prop_type_from_docstring(docstr: Optional[str]) -> Optional[str]:
if not docstr:
return None
test_str = r'^([a-zA-Z0-9_, \.\[\]]*): '
m = re.match(test_str, docstr)
return m.group(1) if m else None
| true | true |
1c31227aa951cf3036977b7f40365ac03c47458f | 621 | py | Python | src/pythonModules/CsvReader.py | apurva1795/calculator | a72ec7cd961d65da0ebcf3d2c5cea974011ea977 | [
"MIT"
] | null | null | null | src/pythonModules/CsvReader.py | apurva1795/calculator | a72ec7cd961d65da0ebcf3d2c5cea974011ea977 | [
"MIT"
] | null | null | null | src/pythonModules/CsvReader.py | apurva1795/calculator | a72ec7cd961d65da0ebcf3d2c5cea974011ea977 | [
"MIT"
] | null | null | null | import csv
from pprint import pprint
def ClassFactory(class_name, dictionary):
return type(class_name, (object,), dictionary)
class CsvReader:
data = []
def __init__(self, filepath):
self.data = []
with open(filepath) as text_data:
csv_data = csv.DictReader(text_data, delimiter=',')
for row in csv_data:
self.data.append(row)
pprint(row)
pass
def return_data_as_objects(self, class_name):
objects = []
for row in self.data:
objects.append(ClassFactory(class_name, row))
return objects | 27 | 63 | 0.605475 | import csv
from pprint import pprint
def ClassFactory(class_name, dictionary):
return type(class_name, (object,), dictionary)
class CsvReader:
data = []
def __init__(self, filepath):
self.data = []
with open(filepath) as text_data:
csv_data = csv.DictReader(text_data, delimiter=',')
for row in csv_data:
self.data.append(row)
pprint(row)
pass
def return_data_as_objects(self, class_name):
objects = []
for row in self.data:
objects.append(ClassFactory(class_name, row))
return objects | true | true |
1c31229798c28406b90c4cd36c7258c85aafd348 | 3,463 | py | Python | ros2/src/roboy_vision/convertions/convertions.py | HackRoboy/dialogic | 0c0b766409aeb2b717e4c396000d79909658cbb2 | [
"MIT"
] | null | null | null | ros2/src/roboy_vision/convertions/convertions.py | HackRoboy/dialogic | 0c0b766409aeb2b717e4c396000d79909658cbb2 | [
"MIT"
] | 1 | 2018-12-07T09:56:14.000Z | 2018-12-07T09:56:14.000Z | ros2/src/roboy_vision/convertions/convertions.py | ro-boy/ravestate | f67bbb378d327d9e29de21795770fd5e51141608 | [
"MIT"
] | 1 | 2018-11-09T19:05:14.000Z | 2018-11-09T19:05:14.000Z | import sys
from sensor_msgs.msg import Image
import numpy as np
from convertions.registry import converts_to_numpy, converts_from_numpy
name_to_dtypes = {
"rgb8": (np.uint8, 3),
"rgba8": (np.uint8, 4),
"rgb16": (np.uint16, 3),
"rgba16": (np.uint16, 4),
"bgr8": (np.uint8, 3),
"bgra8": (np.uint8, 4),
"bgr16": (np.uint16, 3),
"bgra16": (np.uint16, 4),
"mono8": (np.uint8, 1),
"mono16": (np.uint16, 1),
# for bayer image (based on cv_bridge.cpp)
"bayer_rggb8": (np.uint8, 1),
"bayer_bggr8": (np.uint8, 1),
"bayer_gbrg8": (np.uint8, 1),
"bayer_grbg8": (np.uint8, 1),
"bayer_rggb16": (np.uint16, 1),
"bayer_bggr16": (np.uint16, 1),
"bayer_gbrg16": (np.uint16, 1),
"bayer_grbg16": (np.uint16, 1),
# OpenCV CvMat types
"8UC1": (np.uint8, 1),
"8UC2": (np.uint8, 2),
"8UC3": (np.uint8, 3),
"8UC4": (np.uint8, 4),
"8SC1": (np.int8, 1),
"8SC2": (np.int8, 2),
"8SC3": (np.int8, 3),
"8SC4": (np.int8, 4),
"16UC1": (np.int16, 1),
"16UC2": (np.int16, 2),
"16UC3": (np.int16, 3),
"16UC4": (np.int16, 4),
"16SC1": (np.uint16, 1),
"16SC2": (np.uint16, 2),
"16SC3": (np.uint16, 3),
"16SC4": (np.uint16, 4),
"32SC1": (np.int32, 1),
"32SC2": (np.int32, 2),
"32SC3": (np.int32, 3),
"32SC4": (np.int32, 4),
"32FC1": (np.float32, 1),
"32FC2": (np.float32, 2),
"32FC3": (np.float32, 3),
"32FC4": (np.float32, 4),
"64FC1": (np.float64, 1),
"64FC2": (np.float64, 2),
"64FC3": (np.float64, 3),
"64FC4": (np.float64, 4)
}
@converts_to_numpy(Image)
def image_to_numpy(msg):
if not msg.encoding in name_to_dtypes:
raise TypeError('Unrecognized encoding {}'.format(msg.encoding))
dtype_class, channels = name_to_dtypes[msg.encoding]
dtype = np.dtype(dtype_class)
dtype = dtype.newbyteorder('>' if msg.is_bigendian else '<')
shape = (msg.height, msg.width, channels)
data = np.array(msg.data, dtype=dtype).reshape(shape)
data.strides = (
msg.step,
dtype.itemsize * channels,
dtype.itemsize
)
if channels == 1:
data = data[..., 0]
return data
@converts_from_numpy(Image)
def numpy_to_image(arr, encoding):
if not encoding in name_to_dtypes:
raise TypeError('Unrecognized encoding {}'.format(encoding))
im = Image(encoding=encoding)
# extract width, height, and channels
dtype_class, exp_channels = name_to_dtypes[encoding]
dtype = np.dtype(dtype_class)
if len(arr.shape) == 2:
im.height, im.width, channels = arr.shape + (1,)
elif len(arr.shape) == 3:
im.height, im.width, channels = arr.shape
else:
raise TypeError("Array must be two or three dimensional")
# check type and channels
if exp_channels != channels:
raise TypeError("Array has {} channels, {} requires {}".format(
channels, encoding, exp_channels
))
if dtype_class != arr.dtype.type:
raise TypeError("Array is {}, {} requires {}".format(
arr.dtype.type, encoding, dtype_class
))
# make the array contiguous in memory, as mostly required by the format
contig = np.ascontiguousarray(arr)
im.data = contig.tostring()
im.step = contig.strides[0]
im.is_bigendian = (
arr.dtype.byteorder == '>' or
arr.dtype.byteorder == '=' and sys.byteorder == 'big'
)
return im
| 28.154472 | 75 | 0.587063 | import sys
from sensor_msgs.msg import Image
import numpy as np
from convertions.registry import converts_to_numpy, converts_from_numpy
name_to_dtypes = {
"rgb8": (np.uint8, 3),
"rgba8": (np.uint8, 4),
"rgb16": (np.uint16, 3),
"rgba16": (np.uint16, 4),
"bgr8": (np.uint8, 3),
"bgra8": (np.uint8, 4),
"bgr16": (np.uint16, 3),
"bgra16": (np.uint16, 4),
"mono8": (np.uint8, 1),
"mono16": (np.uint16, 1),
"bayer_rggb8": (np.uint8, 1),
"bayer_bggr8": (np.uint8, 1),
"bayer_gbrg8": (np.uint8, 1),
"bayer_grbg8": (np.uint8, 1),
"bayer_rggb16": (np.uint16, 1),
"bayer_bggr16": (np.uint16, 1),
"bayer_gbrg16": (np.uint16, 1),
"bayer_grbg16": (np.uint16, 1),
"8UC1": (np.uint8, 1),
"8UC2": (np.uint8, 2),
"8UC3": (np.uint8, 3),
"8UC4": (np.uint8, 4),
"8SC1": (np.int8, 1),
"8SC2": (np.int8, 2),
"8SC3": (np.int8, 3),
"8SC4": (np.int8, 4),
"16UC1": (np.int16, 1),
"16UC2": (np.int16, 2),
"16UC3": (np.int16, 3),
"16UC4": (np.int16, 4),
"16SC1": (np.uint16, 1),
"16SC2": (np.uint16, 2),
"16SC3": (np.uint16, 3),
"16SC4": (np.uint16, 4),
"32SC1": (np.int32, 1),
"32SC2": (np.int32, 2),
"32SC3": (np.int32, 3),
"32SC4": (np.int32, 4),
"32FC1": (np.float32, 1),
"32FC2": (np.float32, 2),
"32FC3": (np.float32, 3),
"32FC4": (np.float32, 4),
"64FC1": (np.float64, 1),
"64FC2": (np.float64, 2),
"64FC3": (np.float64, 3),
"64FC4": (np.float64, 4)
}
@converts_to_numpy(Image)
def image_to_numpy(msg):
if not msg.encoding in name_to_dtypes:
raise TypeError('Unrecognized encoding {}'.format(msg.encoding))
dtype_class, channels = name_to_dtypes[msg.encoding]
dtype = np.dtype(dtype_class)
dtype = dtype.newbyteorder('>' if msg.is_bigendian else '<')
shape = (msg.height, msg.width, channels)
data = np.array(msg.data, dtype=dtype).reshape(shape)
data.strides = (
msg.step,
dtype.itemsize * channels,
dtype.itemsize
)
if channels == 1:
data = data[..., 0]
return data
@converts_from_numpy(Image)
def numpy_to_image(arr, encoding):
if not encoding in name_to_dtypes:
raise TypeError('Unrecognized encoding {}'.format(encoding))
im = Image(encoding=encoding)
dtype_class, exp_channels = name_to_dtypes[encoding]
dtype = np.dtype(dtype_class)
if len(arr.shape) == 2:
im.height, im.width, channels = arr.shape + (1,)
elif len(arr.shape) == 3:
im.height, im.width, channels = arr.shape
else:
raise TypeError("Array must be two or three dimensional")
if exp_channels != channels:
raise TypeError("Array has {} channels, {} requires {}".format(
channels, encoding, exp_channels
))
if dtype_class != arr.dtype.type:
raise TypeError("Array is {}, {} requires {}".format(
arr.dtype.type, encoding, dtype_class
))
contig = np.ascontiguousarray(arr)
im.data = contig.tostring()
im.step = contig.strides[0]
im.is_bigendian = (
arr.dtype.byteorder == '>' or
arr.dtype.byteorder == '=' and sys.byteorder == 'big'
)
return im
| true | true |
1c31252b57e09ecac5110c11b73a11a5e8449f6c | 3,012 | py | Python | server/Pybuilder/env/Lib/site-packages/pybuilder/plugins/python/pylint_plugin.py | abhnvx/DataMetric | adde84ea9b0b7792349ce24eac00b0eee7bbed51 | [
"RSA-MD"
] | 1,419 | 2015-01-02T20:51:04.000Z | 2022-03-23T21:26:00.000Z | server/Pybuilder/env/Lib/site-packages/pybuilder/plugins/python/pylint_plugin.py | abhnvx/DataMetric | adde84ea9b0b7792349ce24eac00b0eee7bbed51 | [
"RSA-MD"
] | 670 | 2015-01-01T10:26:03.000Z | 2022-02-23T16:33:13.000Z | src/main/python/pybuilder/plugins/python/pylint_plugin.py | paolodedios/pybuilder | 12ea2f54e04f97daada375dc3309a3f525f1b5e1 | [
"Apache-2.0"
] | 270 | 2015-01-02T05:01:53.000Z | 2022-01-20T10:22:59.000Z | # -*- coding: utf-8 -*-
#
# This file is part of PyBuilder
#
# Copyright 2011-2020 PyBuilder Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pybuilder.core import use_plugin, after, init, task
from pybuilder.errors import BuildFailedException
from pybuilder.pluginhelper.external_command import ExternalCommandBuilder
use_plugin("python.core")
use_plugin("analysis")
DEFAULT_PYLINT_OPTIONS = ["--max-line-length=100", "--no-docstring-rgx=.*"]
@init
def init_pylint(project):
project.plugin_depends_on("pylint")
project.set_property_if_unset("pylint_options", DEFAULT_PYLINT_OPTIONS)
project.set_property_if_unset("pylint_break_build", False)
project.set_property_if_unset("pylint_include_test_sources", False)
project.set_property_if_unset("pylint_include_scripts", False)
@after("prepare")
def check_pylint_availability(project, logger, reactor):
logger.debug("Checking availability of PyLint")
reactor.pybuilder_venv.verify_can_execute(["pylint"], "pylint", "plugin python.pylint")
@task("analyze")
def execute_pylint(project, logger, reactor):
logger.info("Executing pylint on project sources")
verbose = project.get_property("verbose")
project.set_property_if_unset("pylint_verbose_output", verbose)
command = ExternalCommandBuilder("pylint", project, reactor)
for opt in project.get_property("pylint_options"):
command.use_argument(opt)
include_test_sources = project.get_property("pylint_include_test_sources")
include_scripts = project.get_property("pylint_include_scripts")
result = command.run_on_production_source_files(logger,
include_test_sources=include_test_sources,
include_scripts=include_scripts)
break_build = project.get_property("pylint_break_build")
if result.exit_code == 32 and break_build:
raise BuildFailedException("PyLint failed with exit code %s", result.exit_code)
warnings = [line.rstrip()
for line in result.report_lines
if line.find(".py:") >= 0]
warning_count = len(warnings)
if warning_count:
for warning in warnings:
logger.warn("pylint: %s", warning)
message = "PyLint found {} warning(s).".format(warning_count)
if break_build:
logger.error(message)
raise BuildFailedException(message)
else:
logger.warn(message)
| 36.731707 | 94 | 0.707503 |
from pybuilder.core import use_plugin, after, init, task
from pybuilder.errors import BuildFailedException
from pybuilder.pluginhelper.external_command import ExternalCommandBuilder
use_plugin("python.core")
use_plugin("analysis")
DEFAULT_PYLINT_OPTIONS = ["--max-line-length=100", "--no-docstring-rgx=.*"]
@init
def init_pylint(project):
project.plugin_depends_on("pylint")
project.set_property_if_unset("pylint_options", DEFAULT_PYLINT_OPTIONS)
project.set_property_if_unset("pylint_break_build", False)
project.set_property_if_unset("pylint_include_test_sources", False)
project.set_property_if_unset("pylint_include_scripts", False)
@after("prepare")
def check_pylint_availability(project, logger, reactor):
logger.debug("Checking availability of PyLint")
reactor.pybuilder_venv.verify_can_execute(["pylint"], "pylint", "plugin python.pylint")
@task("analyze")
def execute_pylint(project, logger, reactor):
logger.info("Executing pylint on project sources")
verbose = project.get_property("verbose")
project.set_property_if_unset("pylint_verbose_output", verbose)
command = ExternalCommandBuilder("pylint", project, reactor)
for opt in project.get_property("pylint_options"):
command.use_argument(opt)
include_test_sources = project.get_property("pylint_include_test_sources")
include_scripts = project.get_property("pylint_include_scripts")
result = command.run_on_production_source_files(logger,
include_test_sources=include_test_sources,
include_scripts=include_scripts)
break_build = project.get_property("pylint_break_build")
if result.exit_code == 32 and break_build:
raise BuildFailedException("PyLint failed with exit code %s", result.exit_code)
warnings = [line.rstrip()
for line in result.report_lines
if line.find(".py:") >= 0]
warning_count = len(warnings)
if warning_count:
for warning in warnings:
logger.warn("pylint: %s", warning)
message = "PyLint found {} warning(s).".format(warning_count)
if break_build:
logger.error(message)
raise BuildFailedException(message)
else:
logger.warn(message)
| true | true |
1c3125571a27dde6334f07b0b8b3ced20a5bab94 | 21,608 | py | Python | pandas/tests/indexes/period/test_construction.py | developing-coder/pandas | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | [
"BSD-3-Clause"
] | 1 | 2019-05-04T03:42:25.000Z | 2019-05-04T03:42:25.000Z | pandas/tests/indexes/period/test_construction.py | developing-coder/pandas | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | [
"BSD-3-Clause"
] | null | null | null | pandas/tests/indexes/period/test_construction.py | developing-coder/pandas | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import pytest
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.compat import lmap, lrange
from pandas.core.dtypes.dtypes import PeriodDtype
import pandas as pd
from pandas import (
Index, Period, PeriodIndex, Series, date_range, offsets, period_range)
import pandas.core.indexes.period as period
import pandas.util.testing as tm
class TestPeriodIndex:
def setup_method(self, method):
pass
def test_construction_base_constructor(self):
# GH 13664
arr = [pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='M')]
tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.PeriodIndex(np.array(arr)))
arr = [np.nan, pd.NaT, pd.Period('2011-03', freq='M')]
tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.PeriodIndex(np.array(arr)))
arr = [pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='D')]
tm.assert_index_equal(pd.Index(arr), pd.Index(arr, dtype=object))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.Index(np.array(arr), dtype=object))
def test_constructor_use_start_freq(self):
# GH #1118
p = Period('4/2/2012', freq='B')
with tm.assert_produces_warning(FutureWarning):
index = PeriodIndex(start=p, periods=10)
expected = period_range(start='4/2/2012', periods=10, freq='B')
tm.assert_index_equal(index, expected)
index = period_range(start=p, periods=10)
tm.assert_index_equal(index, expected)
def test_constructor_field_arrays(self):
# GH #1264
years = np.arange(1990, 2010).repeat(4)[2:-2]
quarters = np.tile(np.arange(1, 5), 20)[2:-2]
index = PeriodIndex(year=years, quarter=quarters, freq='Q-DEC')
expected = period_range('1990Q3', '2009Q2', freq='Q-DEC')
tm.assert_index_equal(index, expected)
index2 = PeriodIndex(year=years, quarter=quarters, freq='2Q-DEC')
tm.assert_numpy_array_equal(index.asi8, index2.asi8)
index = PeriodIndex(year=years, quarter=quarters)
tm.assert_index_equal(index, expected)
years = [2007, 2007, 2007]
months = [1, 2]
msg = "Mismatched Period array lengths"
with pytest.raises(ValueError, match=msg):
PeriodIndex(year=years, month=months, freq='M')
with pytest.raises(ValueError, match=msg):
PeriodIndex(year=years, month=months, freq='2M')
msg = "Can either instantiate from fields or endpoints, but not both"
with pytest.raises(ValueError, match=msg):
PeriodIndex(year=years, month=months, freq='M',
start=Period('2007-01', freq='M'))
years = [2007, 2007, 2007]
months = [1, 2, 3]
idx = PeriodIndex(year=years, month=months, freq='M')
exp = period_range('2007-01', periods=3, freq='M')
tm.assert_index_equal(idx, exp)
def test_constructor_U(self):
# U was used as undefined period
with pytest.raises(ValueError, match="Invalid frequency: X"):
period_range('2007-1-1', periods=500, freq='X')
def test_constructor_nano(self):
idx = period_range(start=Period(ordinal=1, freq='N'),
end=Period(ordinal=4, freq='N'), freq='N')
exp = PeriodIndex([Period(ordinal=1, freq='N'),
Period(ordinal=2, freq='N'),
Period(ordinal=3, freq='N'),
Period(ordinal=4, freq='N')], freq='N')
tm.assert_index_equal(idx, exp)
def test_constructor_arrays_negative_year(self):
years = np.arange(1960, 2000, dtype=np.int64).repeat(4)
quarters = np.tile(np.array([1, 2, 3, 4], dtype=np.int64), 40)
pindex = PeriodIndex(year=years, quarter=quarters)
tm.assert_index_equal(pindex.year, pd.Index(years))
tm.assert_index_equal(pindex.quarter, pd.Index(quarters))
def test_constructor_invalid_quarters(self):
msg = "Quarter must be 1 <= q <= 4"
with pytest.raises(ValueError, match=msg):
PeriodIndex(year=lrange(2000, 2004), quarter=lrange(4),
freq='Q-DEC')
def test_constructor_corner(self):
msg = "Not enough parameters to construct Period range"
with pytest.raises(ValueError, match=msg):
PeriodIndex(periods=10, freq='A')
start = Period('2007', freq='A-JUN')
end = Period('2010', freq='A-DEC')
msg = "start and end must have same freq"
with pytest.raises(ValueError, match=msg):
PeriodIndex(start=start, end=end)
msg = ("Of the three parameters: start, end, and periods, exactly two"
" must be specified")
with pytest.raises(ValueError, match=msg):
PeriodIndex(start=start)
with pytest.raises(ValueError, match=msg):
PeriodIndex(end=end)
result = period_range('2007-01', periods=10.5, freq='M')
exp = period_range('2007-01', periods=10, freq='M')
tm.assert_index_equal(result, exp)
def test_constructor_fromarraylike(self):
idx = period_range('2007-01', periods=20, freq='M')
# values is an array of Period, thus can retrieve freq
tm.assert_index_equal(PeriodIndex(idx.values), idx)
tm.assert_index_equal(PeriodIndex(list(idx.values)), idx)
msg = "freq not specified and cannot be inferred"
with pytest.raises(ValueError, match=msg):
PeriodIndex(idx._ndarray_values)
with pytest.raises(ValueError, match=msg):
PeriodIndex(list(idx._ndarray_values))
msg = "'Period' object is not iterable"
with pytest.raises(TypeError, match=msg):
PeriodIndex(data=Period('2007', freq='A'))
result = PeriodIndex(iter(idx))
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx)
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx, freq='M')
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx, freq=offsets.MonthEnd())
tm.assert_index_equal(result, idx)
assert result.freq == 'M'
result = PeriodIndex(idx, freq='2M')
tm.assert_index_equal(result, idx.asfreq('2M'))
assert result.freq == '2M'
result = PeriodIndex(idx, freq=offsets.MonthEnd(2))
tm.assert_index_equal(result, idx.asfreq('2M'))
assert result.freq == '2M'
result = PeriodIndex(idx, freq='D')
exp = idx.asfreq('D', 'e')
tm.assert_index_equal(result, exp)
def test_constructor_datetime64arr(self):
vals = np.arange(100000, 100000 + 10000, 100, dtype=np.int64)
vals = vals.view(np.dtype('M8[us]'))
msg = r"Wrong dtype: datetime64\[us\]"
with pytest.raises(ValueError, match=msg):
PeriodIndex(vals, freq='D')
@pytest.mark.parametrize('box', [None, 'series', 'index'])
def test_constructor_datetime64arr_ok(self, box):
# https://github.com/pandas-dev/pandas/issues/23438
data = pd.date_range('2017', periods=4, freq="M")
if box is None:
data = data._values
elif box == 'series':
data = pd.Series(data)
result = PeriodIndex(data, freq='D')
expected = PeriodIndex([
'2017-01-31', '2017-02-28', '2017-03-31', '2017-04-30'
], freq="D")
tm.assert_index_equal(result, expected)
def test_constructor_dtype(self):
# passing a dtype with a tz should localize
idx = PeriodIndex(['2013-01', '2013-03'], dtype='period[M]')
exp = PeriodIndex(['2013-01', '2013-03'], freq='M')
tm.assert_index_equal(idx, exp)
assert idx.dtype == 'period[M]'
idx = PeriodIndex(['2013-01-05', '2013-03-05'], dtype='period[3D]')
exp = PeriodIndex(['2013-01-05', '2013-03-05'], freq='3D')
tm.assert_index_equal(idx, exp)
assert idx.dtype == 'period[3D]'
# if we already have a freq and its not the same, then asfreq
# (not changed)
idx = PeriodIndex(['2013-01-01', '2013-01-02'], freq='D')
res = PeriodIndex(idx, dtype='period[M]')
exp = PeriodIndex(['2013-01', '2013-01'], freq='M')
tm.assert_index_equal(res, exp)
assert res.dtype == 'period[M]'
res = PeriodIndex(idx, freq='M')
tm.assert_index_equal(res, exp)
assert res.dtype == 'period[M]'
msg = 'specified freq and dtype are different'
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex(['2011-01'], freq='M', dtype='period[D]')
def test_constructor_empty(self):
idx = pd.PeriodIndex([], freq='M')
assert isinstance(idx, PeriodIndex)
assert len(idx) == 0
assert idx.freq == 'M'
with pytest.raises(ValueError, match='freq not specified'):
pd.PeriodIndex([])
def test_constructor_pi_nat(self):
idx = PeriodIndex([Period('2011-01', freq='M'), pd.NaT,
Period('2011-01', freq='M')])
exp = PeriodIndex(['2011-01', 'NaT', '2011-01'], freq='M')
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(np.array([Period('2011-01', freq='M'), pd.NaT,
Period('2011-01', freq='M')]))
tm.assert_index_equal(idx, exp)
idx = PeriodIndex([pd.NaT, pd.NaT, Period('2011-01', freq='M'),
Period('2011-01', freq='M')])
exp = PeriodIndex(['NaT', 'NaT', '2011-01', '2011-01'], freq='M')
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(np.array([pd.NaT, pd.NaT,
Period('2011-01', freq='M'),
Period('2011-01', freq='M')]))
tm.assert_index_equal(idx, exp)
idx = PeriodIndex([pd.NaT, pd.NaT, '2011-01', '2011-01'], freq='M')
tm.assert_index_equal(idx, exp)
with pytest.raises(ValueError, match='freq not specified'):
PeriodIndex([pd.NaT, pd.NaT])
with pytest.raises(ValueError, match='freq not specified'):
PeriodIndex(np.array([pd.NaT, pd.NaT]))
with pytest.raises(ValueError, match='freq not specified'):
PeriodIndex(['NaT', 'NaT'])
with pytest.raises(ValueError, match='freq not specified'):
PeriodIndex(np.array(['NaT', 'NaT']))
def test_constructor_incompat_freq(self):
msg = "Input has different freq=D from PeriodIndex\\(freq=M\\)"
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex([Period('2011-01', freq='M'), pd.NaT,
Period('2011-01', freq='D')])
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex(np.array([Period('2011-01', freq='M'), pd.NaT,
Period('2011-01', freq='D')]))
# first element is pd.NaT
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex([pd.NaT, Period('2011-01', freq='M'),
Period('2011-01', freq='D')])
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex(np.array([pd.NaT, Period('2011-01', freq='M'),
Period('2011-01', freq='D')]))
def test_constructor_mixed(self):
idx = PeriodIndex(['2011-01', pd.NaT, Period('2011-01', freq='M')])
exp = PeriodIndex(['2011-01', 'NaT', '2011-01'], freq='M')
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(['NaT', pd.NaT, Period('2011-01', freq='M')])
exp = PeriodIndex(['NaT', 'NaT', '2011-01'], freq='M')
tm.assert_index_equal(idx, exp)
idx = PeriodIndex([Period('2011-01-01', freq='D'), pd.NaT,
'2012-01-01'])
exp = PeriodIndex(['2011-01-01', 'NaT', '2012-01-01'], freq='D')
tm.assert_index_equal(idx, exp)
def test_constructor_simple_new(self):
idx = period_range('2007-01', name='p', periods=2, freq='M')
result = idx._simple_new(idx, name='p', freq=idx.freq)
tm.assert_index_equal(result, idx)
result = idx._simple_new(idx.astype('i8'), name='p', freq=idx.freq)
tm.assert_index_equal(result, idx)
def test_constructor_simple_new_empty(self):
# GH13079
idx = PeriodIndex([], freq='M', name='p')
result = idx._simple_new(idx, name='p', freq='M')
tm.assert_index_equal(result, idx)
@pytest.mark.parametrize('floats', [[1.1, 2.1], np.array([1.1, 2.1])])
def test_constructor_floats(self, floats):
msg = r"PeriodIndex\._simple_new does not accept floats"
with pytest.raises(TypeError, match=msg):
pd.PeriodIndex._simple_new(floats, freq='M')
msg = "PeriodIndex does not allow floating point in construction"
with pytest.raises(TypeError, match=msg):
pd.PeriodIndex(floats, freq='M')
def test_constructor_nat(self):
msg = "start and end must not be NaT"
with pytest.raises(ValueError, match=msg):
period_range(start='NaT', end='2011-01-01', freq='M')
with pytest.raises(ValueError, match=msg):
period_range(start='2011-01-01', end='NaT', freq='M')
def test_constructor_year_and_quarter(self):
year = pd.Series([2001, 2002, 2003])
quarter = year - 2000
idx = PeriodIndex(year=year, quarter=quarter)
strs = ['%dQ%d' % t for t in zip(quarter, year)]
lops = list(map(Period, strs))
p = PeriodIndex(lops)
tm.assert_index_equal(p, idx)
@pytest.mark.parametrize('func, warning', [
(PeriodIndex, FutureWarning),
(period_range, None)
])
def test_constructor_freq_mult(self, func, warning):
# GH #7811
with tm.assert_produces_warning(warning):
# must be the same, but for sure...
pidx = func(start='2014-01', freq='2M', periods=4)
expected = PeriodIndex(['2014-01', '2014-03',
'2014-05', '2014-07'], freq='2M')
tm.assert_index_equal(pidx, expected)
with tm.assert_produces_warning(warning):
pidx = func(start='2014-01-02', end='2014-01-15', freq='3D')
expected = PeriodIndex(['2014-01-02', '2014-01-05',
'2014-01-08', '2014-01-11',
'2014-01-14'], freq='3D')
tm.assert_index_equal(pidx, expected)
with tm.assert_produces_warning(warning):
pidx = func(end='2014-01-01 17:00', freq='4H', periods=3)
expected = PeriodIndex(['2014-01-01 09:00', '2014-01-01 13:00',
'2014-01-01 17:00'], freq='4H')
tm.assert_index_equal(pidx, expected)
msg = ('Frequency must be positive, because it'
' represents span: -1M')
with pytest.raises(ValueError, match=msg):
PeriodIndex(['2011-01'], freq='-1M')
msg = ('Frequency must be positive, because it' ' represents span: 0M')
with pytest.raises(ValueError, match=msg):
PeriodIndex(['2011-01'], freq='0M')
msg = ('Frequency must be positive, because it' ' represents span: 0M')
with pytest.raises(ValueError, match=msg):
period_range('2011-01', periods=3, freq='0M')
@pytest.mark.parametrize('freq', ['A', 'M', 'D', 'T', 'S'])
@pytest.mark.parametrize('mult', [1, 2, 3, 4, 5])
def test_constructor_freq_mult_dti_compat(self, mult, freq):
freqstr = str(mult) + freq
pidx = period_range(start='2014-04-01', freq=freqstr, periods=10)
expected = date_range(start='2014-04-01', freq=freqstr,
periods=10).to_period(freqstr)
tm.assert_index_equal(pidx, expected)
def test_constructor_freq_combined(self):
for freq in ['1D1H', '1H1D']:
pidx = PeriodIndex(['2016-01-01', '2016-01-02'], freq=freq)
expected = PeriodIndex(['2016-01-01 00:00', '2016-01-02 00:00'],
freq='25H')
for freq in ['1D1H', '1H1D']:
pidx = period_range(start='2016-01-01', periods=2, freq=freq)
expected = PeriodIndex(['2016-01-01 00:00', '2016-01-02 01:00'],
freq='25H')
tm.assert_index_equal(pidx, expected)
def test_constructor_range_based_deprecated(self):
with tm.assert_produces_warning(FutureWarning):
pi = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
assert len(pi) == 9
def test_constructor_range_based_deprecated_different_freq(self):
with tm.assert_produces_warning(FutureWarning) as m:
PeriodIndex(start='2000', periods=2)
warning, = m
assert 'freq="A-DEC"' in str(warning.message)
def test_constructor(self):
pi = period_range(freq='A', start='1/1/2001', end='12/1/2009')
assert len(pi) == 9
pi = period_range(freq='Q', start='1/1/2001', end='12/1/2009')
assert len(pi) == 4 * 9
pi = period_range(freq='M', start='1/1/2001', end='12/1/2009')
assert len(pi) == 12 * 9
pi = period_range(freq='D', start='1/1/2001', end='12/31/2009')
assert len(pi) == 365 * 9 + 2
pi = period_range(freq='B', start='1/1/2001', end='12/31/2009')
assert len(pi) == 261 * 9
pi = period_range(freq='H', start='1/1/2001', end='12/31/2001 23:00')
assert len(pi) == 365 * 24
pi = period_range(freq='Min', start='1/1/2001', end='1/1/2001 23:59')
assert len(pi) == 24 * 60
pi = period_range(freq='S', start='1/1/2001', end='1/1/2001 23:59:59')
assert len(pi) == 24 * 60 * 60
start = Period('02-Apr-2005', 'B')
i1 = period_range(start=start, periods=20)
assert len(i1) == 20
assert i1.freq == start.freq
assert i1[0] == start
end_intv = Period('2006-12-31', 'W')
i1 = period_range(end=end_intv, periods=10)
assert len(i1) == 10
assert i1.freq == end_intv.freq
assert i1[-1] == end_intv
end_intv = Period('2006-12-31', '1w')
i2 = period_range(end=end_intv, periods=10)
assert len(i1) == len(i2)
assert (i1 == i2).all()
assert i1.freq == i2.freq
end_intv = Period('2006-12-31', ('w', 1))
i2 = period_range(end=end_intv, periods=10)
assert len(i1) == len(i2)
assert (i1 == i2).all()
assert i1.freq == i2.freq
end_intv = Period('2005-05-01', 'B')
i1 = period_range(start=start, end=end_intv)
# infer freq from first element
i2 = PeriodIndex([end_intv, Period('2005-05-05', 'B')])
assert len(i2) == 2
assert i2[0] == end_intv
i2 = PeriodIndex(np.array([end_intv, Period('2005-05-05', 'B')]))
assert len(i2) == 2
assert i2[0] == end_intv
# Mixed freq should fail
vals = [end_intv, Period('2006-12-31', 'w')]
msg = r"Input has different freq=W-SUN from PeriodIndex\(freq=B\)"
with pytest.raises(IncompatibleFrequency, match=msg):
PeriodIndex(vals)
vals = np.array(vals)
with pytest.raises(IncompatibleFrequency, match=msg):
PeriodIndex(vals)
def test_constructor_error(self):
start = Period('02-Apr-2005', 'B')
end_intv = Period('2006-12-31', ('w', 1))
msg = 'start and end must have same freq'
with pytest.raises(ValueError, match=msg):
PeriodIndex(start=start, end=end_intv)
msg = ('Of the three parameters: start, end, and periods, '
'exactly two must be specified')
with pytest.raises(ValueError, match=msg):
PeriodIndex(start=start)
@pytest.mark.parametrize('freq', ['M', 'Q', 'A', 'D', 'B',
'T', 'S', 'L', 'U', 'N', 'H'])
def test_recreate_from_data(self, freq):
org = period_range(start='2001/04/01', freq=freq, periods=1)
idx = PeriodIndex(org.values, freq=freq)
tm.assert_index_equal(idx, org)
def test_map_with_string_constructor(self):
raw = [2005, 2007, 2009]
index = PeriodIndex(raw, freq='A')
expected = Index(lmap(str, raw))
res = index.map(str)
# should return an Index
assert isinstance(res, Index)
# preserve element types
assert all(isinstance(resi, str) for resi in res)
# lastly, values should compare equal
tm.assert_index_equal(res, expected)
class TestSeriesPeriod:
def setup_method(self, method):
self.series = Series(period_range('2000-01-01', periods=10, freq='D'))
def test_constructor_cant_cast_period(self):
msg = "Cannot cast PeriodArray to dtype float64"
with pytest.raises(TypeError, match=msg):
Series(period_range('2000-01-01', periods=10, freq='D'),
dtype=float)
def test_constructor_cast_object(self):
s = Series(period_range('1/1/2000', periods=10),
dtype=PeriodDtype("D"))
exp = Series(period_range('1/1/2000', periods=10))
tm.assert_series_equal(s, exp)
| 39.575092 | 79 | 0.584367 | import numpy as np
import pytest
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.compat import lmap, lrange
from pandas.core.dtypes.dtypes import PeriodDtype
import pandas as pd
from pandas import (
Index, Period, PeriodIndex, Series, date_range, offsets, period_range)
import pandas.core.indexes.period as period
import pandas.util.testing as tm
class TestPeriodIndex:
def setup_method(self, method):
pass
def test_construction_base_constructor(self):
arr = [pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='M')]
tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.PeriodIndex(np.array(arr)))
arr = [np.nan, pd.NaT, pd.Period('2011-03', freq='M')]
tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.PeriodIndex(np.array(arr)))
arr = [pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='D')]
tm.assert_index_equal(pd.Index(arr), pd.Index(arr, dtype=object))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.Index(np.array(arr), dtype=object))
def test_constructor_use_start_freq(self):
p = Period('4/2/2012', freq='B')
with tm.assert_produces_warning(FutureWarning):
index = PeriodIndex(start=p, periods=10)
expected = period_range(start='4/2/2012', periods=10, freq='B')
tm.assert_index_equal(index, expected)
index = period_range(start=p, periods=10)
tm.assert_index_equal(index, expected)
def test_constructor_field_arrays(self):
years = np.arange(1990, 2010).repeat(4)[2:-2]
quarters = np.tile(np.arange(1, 5), 20)[2:-2]
index = PeriodIndex(year=years, quarter=quarters, freq='Q-DEC')
expected = period_range('1990Q3', '2009Q2', freq='Q-DEC')
tm.assert_index_equal(index, expected)
index2 = PeriodIndex(year=years, quarter=quarters, freq='2Q-DEC')
tm.assert_numpy_array_equal(index.asi8, index2.asi8)
index = PeriodIndex(year=years, quarter=quarters)
tm.assert_index_equal(index, expected)
years = [2007, 2007, 2007]
months = [1, 2]
msg = "Mismatched Period array lengths"
with pytest.raises(ValueError, match=msg):
PeriodIndex(year=years, month=months, freq='M')
with pytest.raises(ValueError, match=msg):
PeriodIndex(year=years, month=months, freq='2M')
msg = "Can either instantiate from fields or endpoints, but not both"
with pytest.raises(ValueError, match=msg):
PeriodIndex(year=years, month=months, freq='M',
start=Period('2007-01', freq='M'))
years = [2007, 2007, 2007]
months = [1, 2, 3]
idx = PeriodIndex(year=years, month=months, freq='M')
exp = period_range('2007-01', periods=3, freq='M')
tm.assert_index_equal(idx, exp)
def test_constructor_U(self):
with pytest.raises(ValueError, match="Invalid frequency: X"):
period_range('2007-1-1', periods=500, freq='X')
def test_constructor_nano(self):
idx = period_range(start=Period(ordinal=1, freq='N'),
end=Period(ordinal=4, freq='N'), freq='N')
exp = PeriodIndex([Period(ordinal=1, freq='N'),
Period(ordinal=2, freq='N'),
Period(ordinal=3, freq='N'),
Period(ordinal=4, freq='N')], freq='N')
tm.assert_index_equal(idx, exp)
def test_constructor_arrays_negative_year(self):
years = np.arange(1960, 2000, dtype=np.int64).repeat(4)
quarters = np.tile(np.array([1, 2, 3, 4], dtype=np.int64), 40)
pindex = PeriodIndex(year=years, quarter=quarters)
tm.assert_index_equal(pindex.year, pd.Index(years))
tm.assert_index_equal(pindex.quarter, pd.Index(quarters))
def test_constructor_invalid_quarters(self):
msg = "Quarter must be 1 <= q <= 4"
with pytest.raises(ValueError, match=msg):
PeriodIndex(year=lrange(2000, 2004), quarter=lrange(4),
freq='Q-DEC')
def test_constructor_corner(self):
msg = "Not enough parameters to construct Period range"
with pytest.raises(ValueError, match=msg):
PeriodIndex(periods=10, freq='A')
start = Period('2007', freq='A-JUN')
end = Period('2010', freq='A-DEC')
msg = "start and end must have same freq"
with pytest.raises(ValueError, match=msg):
PeriodIndex(start=start, end=end)
msg = ("Of the three parameters: start, end, and periods, exactly two"
" must be specified")
with pytest.raises(ValueError, match=msg):
PeriodIndex(start=start)
with pytest.raises(ValueError, match=msg):
PeriodIndex(end=end)
result = period_range('2007-01', periods=10.5, freq='M')
exp = period_range('2007-01', periods=10, freq='M')
tm.assert_index_equal(result, exp)
def test_constructor_fromarraylike(self):
idx = period_range('2007-01', periods=20, freq='M')
tm.assert_index_equal(PeriodIndex(idx.values), idx)
tm.assert_index_equal(PeriodIndex(list(idx.values)), idx)
msg = "freq not specified and cannot be inferred"
with pytest.raises(ValueError, match=msg):
PeriodIndex(idx._ndarray_values)
with pytest.raises(ValueError, match=msg):
PeriodIndex(list(idx._ndarray_values))
msg = "'Period' object is not iterable"
with pytest.raises(TypeError, match=msg):
PeriodIndex(data=Period('2007', freq='A'))
result = PeriodIndex(iter(idx))
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx)
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx, freq='M')
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx, freq=offsets.MonthEnd())
tm.assert_index_equal(result, idx)
assert result.freq == 'M'
result = PeriodIndex(idx, freq='2M')
tm.assert_index_equal(result, idx.asfreq('2M'))
assert result.freq == '2M'
result = PeriodIndex(idx, freq=offsets.MonthEnd(2))
tm.assert_index_equal(result, idx.asfreq('2M'))
assert result.freq == '2M'
result = PeriodIndex(idx, freq='D')
exp = idx.asfreq('D', 'e')
tm.assert_index_equal(result, exp)
def test_constructor_datetime64arr(self):
vals = np.arange(100000, 100000 + 10000, 100, dtype=np.int64)
vals = vals.view(np.dtype('M8[us]'))
msg = r"Wrong dtype: datetime64\[us\]"
with pytest.raises(ValueError, match=msg):
PeriodIndex(vals, freq='D')
@pytest.mark.parametrize('box', [None, 'series', 'index'])
def test_constructor_datetime64arr_ok(self, box):
data = pd.date_range('2017', periods=4, freq="M")
if box is None:
data = data._values
elif box == 'series':
data = pd.Series(data)
result = PeriodIndex(data, freq='D')
expected = PeriodIndex([
'2017-01-31', '2017-02-28', '2017-03-31', '2017-04-30'
], freq="D")
tm.assert_index_equal(result, expected)
def test_constructor_dtype(self):
idx = PeriodIndex(['2013-01', '2013-03'], dtype='period[M]')
exp = PeriodIndex(['2013-01', '2013-03'], freq='M')
tm.assert_index_equal(idx, exp)
assert idx.dtype == 'period[M]'
idx = PeriodIndex(['2013-01-05', '2013-03-05'], dtype='period[3D]')
exp = PeriodIndex(['2013-01-05', '2013-03-05'], freq='3D')
tm.assert_index_equal(idx, exp)
assert idx.dtype == 'period[3D]'
idx = PeriodIndex(['2013-01-01', '2013-01-02'], freq='D')
res = PeriodIndex(idx, dtype='period[M]')
exp = PeriodIndex(['2013-01', '2013-01'], freq='M')
tm.assert_index_equal(res, exp)
assert res.dtype == 'period[M]'
res = PeriodIndex(idx, freq='M')
tm.assert_index_equal(res, exp)
assert res.dtype == 'period[M]'
msg = 'specified freq and dtype are different'
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex(['2011-01'], freq='M', dtype='period[D]')
def test_constructor_empty(self):
idx = pd.PeriodIndex([], freq='M')
assert isinstance(idx, PeriodIndex)
assert len(idx) == 0
assert idx.freq == 'M'
with pytest.raises(ValueError, match='freq not specified'):
pd.PeriodIndex([])
def test_constructor_pi_nat(self):
idx = PeriodIndex([Period('2011-01', freq='M'), pd.NaT,
Period('2011-01', freq='M')])
exp = PeriodIndex(['2011-01', 'NaT', '2011-01'], freq='M')
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(np.array([Period('2011-01', freq='M'), pd.NaT,
Period('2011-01', freq='M')]))
tm.assert_index_equal(idx, exp)
idx = PeriodIndex([pd.NaT, pd.NaT, Period('2011-01', freq='M'),
Period('2011-01', freq='M')])
exp = PeriodIndex(['NaT', 'NaT', '2011-01', '2011-01'], freq='M')
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(np.array([pd.NaT, pd.NaT,
Period('2011-01', freq='M'),
Period('2011-01', freq='M')]))
tm.assert_index_equal(idx, exp)
idx = PeriodIndex([pd.NaT, pd.NaT, '2011-01', '2011-01'], freq='M')
tm.assert_index_equal(idx, exp)
with pytest.raises(ValueError, match='freq not specified'):
PeriodIndex([pd.NaT, pd.NaT])
with pytest.raises(ValueError, match='freq not specified'):
PeriodIndex(np.array([pd.NaT, pd.NaT]))
with pytest.raises(ValueError, match='freq not specified'):
PeriodIndex(['NaT', 'NaT'])
with pytest.raises(ValueError, match='freq not specified'):
PeriodIndex(np.array(['NaT', 'NaT']))
def test_constructor_incompat_freq(self):
msg = "Input has different freq=D from PeriodIndex\\(freq=M\\)"
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex([Period('2011-01', freq='M'), pd.NaT,
Period('2011-01', freq='D')])
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex(np.array([Period('2011-01', freq='M'), pd.NaT,
Period('2011-01', freq='D')]))
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex([pd.NaT, Period('2011-01', freq='M'),
Period('2011-01', freq='D')])
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex(np.array([pd.NaT, Period('2011-01', freq='M'),
Period('2011-01', freq='D')]))
def test_constructor_mixed(self):
idx = PeriodIndex(['2011-01', pd.NaT, Period('2011-01', freq='M')])
exp = PeriodIndex(['2011-01', 'NaT', '2011-01'], freq='M')
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(['NaT', pd.NaT, Period('2011-01', freq='M')])
exp = PeriodIndex(['NaT', 'NaT', '2011-01'], freq='M')
tm.assert_index_equal(idx, exp)
idx = PeriodIndex([Period('2011-01-01', freq='D'), pd.NaT,
'2012-01-01'])
exp = PeriodIndex(['2011-01-01', 'NaT', '2012-01-01'], freq='D')
tm.assert_index_equal(idx, exp)
def test_constructor_simple_new(self):
idx = period_range('2007-01', name='p', periods=2, freq='M')
result = idx._simple_new(idx, name='p', freq=idx.freq)
tm.assert_index_equal(result, idx)
result = idx._simple_new(idx.astype('i8'), name='p', freq=idx.freq)
tm.assert_index_equal(result, idx)
def test_constructor_simple_new_empty(self):
idx = PeriodIndex([], freq='M', name='p')
result = idx._simple_new(idx, name='p', freq='M')
tm.assert_index_equal(result, idx)
@pytest.mark.parametrize('floats', [[1.1, 2.1], np.array([1.1, 2.1])])
def test_constructor_floats(self, floats):
msg = r"PeriodIndex\._simple_new does not accept floats"
with pytest.raises(TypeError, match=msg):
pd.PeriodIndex._simple_new(floats, freq='M')
msg = "PeriodIndex does not allow floating point in construction"
with pytest.raises(TypeError, match=msg):
pd.PeriodIndex(floats, freq='M')
def test_constructor_nat(self):
msg = "start and end must not be NaT"
with pytest.raises(ValueError, match=msg):
period_range(start='NaT', end='2011-01-01', freq='M')
with pytest.raises(ValueError, match=msg):
period_range(start='2011-01-01', end='NaT', freq='M')
def test_constructor_year_and_quarter(self):
year = pd.Series([2001, 2002, 2003])
quarter = year - 2000
idx = PeriodIndex(year=year, quarter=quarter)
strs = ['%dQ%d' % t for t in zip(quarter, year)]
lops = list(map(Period, strs))
p = PeriodIndex(lops)
tm.assert_index_equal(p, idx)
@pytest.mark.parametrize('func, warning', [
(PeriodIndex, FutureWarning),
(period_range, None)
])
def test_constructor_freq_mult(self, func, warning):
with tm.assert_produces_warning(warning):
pidx = func(start='2014-01', freq='2M', periods=4)
expected = PeriodIndex(['2014-01', '2014-03',
'2014-05', '2014-07'], freq='2M')
tm.assert_index_equal(pidx, expected)
with tm.assert_produces_warning(warning):
pidx = func(start='2014-01-02', end='2014-01-15', freq='3D')
expected = PeriodIndex(['2014-01-02', '2014-01-05',
'2014-01-08', '2014-01-11',
'2014-01-14'], freq='3D')
tm.assert_index_equal(pidx, expected)
with tm.assert_produces_warning(warning):
pidx = func(end='2014-01-01 17:00', freq='4H', periods=3)
expected = PeriodIndex(['2014-01-01 09:00', '2014-01-01 13:00',
'2014-01-01 17:00'], freq='4H')
tm.assert_index_equal(pidx, expected)
msg = ('Frequency must be positive, because it'
' represents span: -1M')
with pytest.raises(ValueError, match=msg):
PeriodIndex(['2011-01'], freq='-1M')
msg = ('Frequency must be positive, because it' ' represents span: 0M')
with pytest.raises(ValueError, match=msg):
PeriodIndex(['2011-01'], freq='0M')
msg = ('Frequency must be positive, because it' ' represents span: 0M')
with pytest.raises(ValueError, match=msg):
period_range('2011-01', periods=3, freq='0M')
@pytest.mark.parametrize('freq', ['A', 'M', 'D', 'T', 'S'])
@pytest.mark.parametrize('mult', [1, 2, 3, 4, 5])
def test_constructor_freq_mult_dti_compat(self, mult, freq):
freqstr = str(mult) + freq
pidx = period_range(start='2014-04-01', freq=freqstr, periods=10)
expected = date_range(start='2014-04-01', freq=freqstr,
periods=10).to_period(freqstr)
tm.assert_index_equal(pidx, expected)
def test_constructor_freq_combined(self):
for freq in ['1D1H', '1H1D']:
pidx = PeriodIndex(['2016-01-01', '2016-01-02'], freq=freq)
expected = PeriodIndex(['2016-01-01 00:00', '2016-01-02 00:00'],
freq='25H')
for freq in ['1D1H', '1H1D']:
pidx = period_range(start='2016-01-01', periods=2, freq=freq)
expected = PeriodIndex(['2016-01-01 00:00', '2016-01-02 01:00'],
freq='25H')
tm.assert_index_equal(pidx, expected)
def test_constructor_range_based_deprecated(self):
with tm.assert_produces_warning(FutureWarning):
pi = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
assert len(pi) == 9
def test_constructor_range_based_deprecated_different_freq(self):
with tm.assert_produces_warning(FutureWarning) as m:
PeriodIndex(start='2000', periods=2)
warning, = m
assert 'freq="A-DEC"' in str(warning.message)
def test_constructor(self):
pi = period_range(freq='A', start='1/1/2001', end='12/1/2009')
assert len(pi) == 9
pi = period_range(freq='Q', start='1/1/2001', end='12/1/2009')
assert len(pi) == 4 * 9
pi = period_range(freq='M', start='1/1/2001', end='12/1/2009')
assert len(pi) == 12 * 9
pi = period_range(freq='D', start='1/1/2001', end='12/31/2009')
assert len(pi) == 365 * 9 + 2
pi = period_range(freq='B', start='1/1/2001', end='12/31/2009')
assert len(pi) == 261 * 9
pi = period_range(freq='H', start='1/1/2001', end='12/31/2001 23:00')
assert len(pi) == 365 * 24
pi = period_range(freq='Min', start='1/1/2001', end='1/1/2001 23:59')
assert len(pi) == 24 * 60
pi = period_range(freq='S', start='1/1/2001', end='1/1/2001 23:59:59')
assert len(pi) == 24 * 60 * 60
start = Period('02-Apr-2005', 'B')
i1 = period_range(start=start, periods=20)
assert len(i1) == 20
assert i1.freq == start.freq
assert i1[0] == start
end_intv = Period('2006-12-31', 'W')
i1 = period_range(end=end_intv, periods=10)
assert len(i1) == 10
assert i1.freq == end_intv.freq
assert i1[-1] == end_intv
end_intv = Period('2006-12-31', '1w')
i2 = period_range(end=end_intv, periods=10)
assert len(i1) == len(i2)
assert (i1 == i2).all()
assert i1.freq == i2.freq
end_intv = Period('2006-12-31', ('w', 1))
i2 = period_range(end=end_intv, periods=10)
assert len(i1) == len(i2)
assert (i1 == i2).all()
assert i1.freq == i2.freq
end_intv = Period('2005-05-01', 'B')
i1 = period_range(start=start, end=end_intv)
i2 = PeriodIndex([end_intv, Period('2005-05-05', 'B')])
assert len(i2) == 2
assert i2[0] == end_intv
i2 = PeriodIndex(np.array([end_intv, Period('2005-05-05', 'B')]))
assert len(i2) == 2
assert i2[0] == end_intv
vals = [end_intv, Period('2006-12-31', 'w')]
msg = r"Input has different freq=W-SUN from PeriodIndex\(freq=B\)"
with pytest.raises(IncompatibleFrequency, match=msg):
PeriodIndex(vals)
vals = np.array(vals)
with pytest.raises(IncompatibleFrequency, match=msg):
PeriodIndex(vals)
def test_constructor_error(self):
start = Period('02-Apr-2005', 'B')
end_intv = Period('2006-12-31', ('w', 1))
msg = 'start and end must have same freq'
with pytest.raises(ValueError, match=msg):
PeriodIndex(start=start, end=end_intv)
msg = ('Of the three parameters: start, end, and periods, '
'exactly two must be specified')
with pytest.raises(ValueError, match=msg):
PeriodIndex(start=start)
@pytest.mark.parametrize('freq', ['M', 'Q', 'A', 'D', 'B',
'T', 'S', 'L', 'U', 'N', 'H'])
def test_recreate_from_data(self, freq):
org = period_range(start='2001/04/01', freq=freq, periods=1)
idx = PeriodIndex(org.values, freq=freq)
tm.assert_index_equal(idx, org)
def test_map_with_string_constructor(self):
raw = [2005, 2007, 2009]
index = PeriodIndex(raw, freq='A')
expected = Index(lmap(str, raw))
res = index.map(str)
assert isinstance(res, Index)
assert all(isinstance(resi, str) for resi in res)
tm.assert_index_equal(res, expected)
class TestSeriesPeriod:
def setup_method(self, method):
self.series = Series(period_range('2000-01-01', periods=10, freq='D'))
def test_constructor_cant_cast_period(self):
msg = "Cannot cast PeriodArray to dtype float64"
with pytest.raises(TypeError, match=msg):
Series(period_range('2000-01-01', periods=10, freq='D'),
dtype=float)
def test_constructor_cast_object(self):
s = Series(period_range('1/1/2000', periods=10),
dtype=PeriodDtype("D"))
exp = Series(period_range('1/1/2000', periods=10))
tm.assert_series_equal(s, exp)
| true | true |
1c312568dd030c2e506792ee49485e62f3f8f42a | 3,182 | py | Python | Rendering/Volume/Testing/Python/TestBunykRayCastFunction.py | jasper-yeh/VtkDotNet | 84b56f781cb511694e4380cebfb245bbefe2560b | [
"BSD-3-Clause"
] | 3 | 2020-06-20T23:31:06.000Z | 2021-01-11T02:17:16.000Z | Rendering/Volume/Testing/Python/TestBunykRayCastFunction.py | jasper-yeh/VtkDotNet | 84b56f781cb511694e4380cebfb245bbefe2560b | [
"BSD-3-Clause"
] | 1 | 2020-12-01T23:21:02.000Z | 2020-12-02T23:44:43.000Z | Rendering/Volume/Testing/Python/TestBunykRayCastFunction.py | jasper-yeh/VtkDotNet | 84b56f781cb511694e4380cebfb245bbefe2560b | [
"BSD-3-Clause"
] | 5 | 2015-10-09T04:12:29.000Z | 2021-12-15T16:57:11.000Z | #!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Create the standard renderer, render window
# and interactor
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
iren.SetDesiredUpdateRate(3)
# Create the reader for the data
# This is the data the will be volume rendered
reader = vtk.vtkStructuredPointsReader()
reader.SetFileName("" + str(VTK_DATA_ROOT) + "/Data/ironProt.vtk")
# create a reader for the other data that will
# be contoured and displayed as a polygonal mesh
reader2 = vtk.vtkSLCReader()
reader2.SetFileName("" + str(VTK_DATA_ROOT) + "/Data/neghip.slc")
# convert from vtkImageData to vtkUnstructuredGrid, remove
# any cells where all values are below 80
thresh = vtk.vtkThreshold()
thresh.ThresholdByUpper(80)
thresh.AllScalarsOff()
thresh.SetInputConnection(reader.GetOutputPort())
# make sure we have only tetrahedra
trifilter = vtk.vtkDataSetTriangleFilter()
trifilter.SetInputConnection(thresh.GetOutputPort())
# Create transfer mapping scalar value to opacity
opacityTransferFunction = vtk.vtkPiecewiseFunction()
opacityTransferFunction.AddPoint(80,0.0)
opacityTransferFunction.AddPoint(120,0.2)
opacityTransferFunction.AddPoint(255,0.2)
# Create transfer mapping scalar value to color
colorTransferFunction = vtk.vtkColorTransferFunction()
colorTransferFunction.AddRGBPoint(80.0,0.0,0.0,0.0)
colorTransferFunction.AddRGBPoint(120.0,0.0,0.0,1.0)
colorTransferFunction.AddRGBPoint(160.0,1.0,0.0,0.0)
colorTransferFunction.AddRGBPoint(200.0,0.0,1.0,0.0)
colorTransferFunction.AddRGBPoint(255.0,0.0,1.0,1.0)
# The property describes how the data will look
volumeProperty = vtk.vtkVolumeProperty()
volumeProperty.SetColor(colorTransferFunction)
volumeProperty.SetScalarOpacity(opacityTransferFunction)
volumeProperty.ShadeOff()
volumeProperty.SetInterpolationTypeToLinear()
# The mapper / ray cast function know how to render the data
volumeMapper = vtk.vtkUnstructuredGridVolumeRayCastMapper()
volumeMapper.SetInputConnection(trifilter.GetOutputPort())
# The volume holds the mapper and the property and
# can be used to position/orient the volume
volume = vtk.vtkVolume()
volume.SetMapper(volumeMapper)
volume.SetProperty(volumeProperty)
# contour the second dataset
contour = vtk.vtkContourFilter()
contour.SetValue(0,80)
contour.SetInputConnection(reader2.GetOutputPort())
# create a mapper for the polygonal data
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(contour.GetOutputPort())
mapper.ScalarVisibilityOff()
# create an actor for the polygonal data
actor = vtk.vtkActor()
actor.SetMapper(mapper)
ren1.AddViewProp(actor)
ren1.AddVolume(volume)
renWin.SetSize(300,300)
ren1.ResetCamera()
ren1.GetActiveCamera().Azimuth(20.0)
ren1.GetActiveCamera().Elevation(10.0)
ren1.GetActiveCamera().Zoom(1.5)
renWin.Render()
def TkCheckAbort (__vtk__temp0=0,__vtk__temp1=0):
foo = renWin.GetEventPending()
if (foo != 0):
renWin.SetAbortRender(1)
pass
renWin.AddObserver("AbortCheckEvent",TkCheckAbort)
iren.Initialize()
# --- end of script --
| 37 | 66 | 0.803268 |
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
iren.SetDesiredUpdateRate(3)
reader = vtk.vtkStructuredPointsReader()
reader.SetFileName("" + str(VTK_DATA_ROOT) + "/Data/ironProt.vtk")
reader2 = vtk.vtkSLCReader()
reader2.SetFileName("" + str(VTK_DATA_ROOT) + "/Data/neghip.slc")
thresh = vtk.vtkThreshold()
thresh.ThresholdByUpper(80)
thresh.AllScalarsOff()
thresh.SetInputConnection(reader.GetOutputPort())
trifilter = vtk.vtkDataSetTriangleFilter()
trifilter.SetInputConnection(thresh.GetOutputPort())
opacityTransferFunction = vtk.vtkPiecewiseFunction()
opacityTransferFunction.AddPoint(80,0.0)
opacityTransferFunction.AddPoint(120,0.2)
opacityTransferFunction.AddPoint(255,0.2)
colorTransferFunction = vtk.vtkColorTransferFunction()
colorTransferFunction.AddRGBPoint(80.0,0.0,0.0,0.0)
colorTransferFunction.AddRGBPoint(120.0,0.0,0.0,1.0)
colorTransferFunction.AddRGBPoint(160.0,1.0,0.0,0.0)
colorTransferFunction.AddRGBPoint(200.0,0.0,1.0,0.0)
colorTransferFunction.AddRGBPoint(255.0,0.0,1.0,1.0)
volumeProperty = vtk.vtkVolumeProperty()
volumeProperty.SetColor(colorTransferFunction)
volumeProperty.SetScalarOpacity(opacityTransferFunction)
volumeProperty.ShadeOff()
volumeProperty.SetInterpolationTypeToLinear()
volumeMapper = vtk.vtkUnstructuredGridVolumeRayCastMapper()
volumeMapper.SetInputConnection(trifilter.GetOutputPort())
volume = vtk.vtkVolume()
volume.SetMapper(volumeMapper)
volume.SetProperty(volumeProperty)
contour = vtk.vtkContourFilter()
contour.SetValue(0,80)
contour.SetInputConnection(reader2.GetOutputPort())
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(contour.GetOutputPort())
mapper.ScalarVisibilityOff()
actor = vtk.vtkActor()
actor.SetMapper(mapper)
ren1.AddViewProp(actor)
ren1.AddVolume(volume)
renWin.SetSize(300,300)
ren1.ResetCamera()
ren1.GetActiveCamera().Azimuth(20.0)
ren1.GetActiveCamera().Elevation(10.0)
ren1.GetActiveCamera().Zoom(1.5)
renWin.Render()
def TkCheckAbort (__vtk__temp0=0,__vtk__temp1=0):
foo = renWin.GetEventPending()
if (foo != 0):
renWin.SetAbortRender(1)
pass
renWin.AddObserver("AbortCheckEvent",TkCheckAbort)
iren.Initialize()
| true | true |
1c3126bad5ba59783881d63b1e1d9ee4ad1c4671 | 13,205 | py | Python | lenstronomy_extensions/Itterative/iterative_source.py | Thomas-01/lenstronomy_extensions | fbbfe24dcfd71eae9e7c2dd60865a9b94db67fe8 | [
"MIT"
] | 27 | 2018-02-28T08:54:44.000Z | 2022-03-25T00:13:43.000Z | lenstronomy_extensions/Itterative/iterative_source.py | Thomas-01/lenstronomy_extensions | fbbfe24dcfd71eae9e7c2dd60865a9b94db67fe8 | [
"MIT"
] | 3 | 2019-03-12T13:37:51.000Z | 2020-10-30T03:03:59.000Z | lenstronomy_extensions/Itterative/iterative_source.py | Thomas-01/lenstronomy_extensions | fbbfe24dcfd71eae9e7c2dd60865a9b94db67fe8 | [
"MIT"
] | 33 | 2018-03-19T18:47:38.000Z | 2022-03-27T02:55:04.000Z | __author__ = 'sibirrer'
import numpy as np
import lenstronomy.Util.util as util
from lenstronomy.LightModel.Profiles.shapelets import Shapelets
from lenstronomy.ImSim.image_model import ImageModel
class MakeImageIter(ImageModel):
"""
class to perform an iterative source reconstruction
goal: find the floor in the source information (minimal image residuals for a given lens model)
Steps:
1: reconstruct source with shapelets
2: find N local maximas in positive residuals (image-model), indicating not enough peaky positive surface brightness
3: compute magnification at this position -> minimum scale to be resolved
4: Add N gaussians with minimal scale at that position
5: Perform reconstruction of source with shapelets and Gaussians
6: iterate over
"""
def find_max_residuals(self, residuals, ra_coords, dec_coords, N):
"""
:param residuals: reduced residual map
:return: pixel coords of maximas
"""
ra_mins, dec_mins, values = util.neighborSelect(residuals, ra_coords, dec_coords)
ra_pos = util.selectBest(np.array(ra_mins), -np.array(values), N, highest=True)
dec_pos = util.selectBest(np.array(dec_mins), -np.array(values), N, highest=True)
return ra_pos, dec_pos
def check_overlap_in_source(self, x, y, ra_pos, dec_pos, r_min, N):
"""
check whether different residuals correspond to the same position in the source plane (modulo magnification)
:param ra_pos:
:param dec_pos:
:param kwargs_lens:
:param kwargs_else:
:return:
"""
n = len(x)
count = 0
i = 0
x_pos_select = []
y_pos_select = []
ra_pos_select = []
dec_pos_select = []
r_min_select = []
while count < N and i < n:
if i == 0:
x_pos_select.append(x[i])
y_pos_select.append(y[i])
ra_pos_select.append(ra_pos[i])
dec_pos_select.append(dec_pos[i])
r_min_select.append(r_min[i])
count += 1
else:
r_delta = np.sqrt((x - x[i])**2 + (y - y[i])**2)
if np.min(r_delta[0:i]) > r_min[i]:
x_pos_select.append(x[i])
y_pos_select.append(y[i])
ra_pos_select.append(ra_pos[i])
dec_pos_select.append(dec_pos[i])
r_min_select.append(r_min[i])
count += 1
i += 1
return x_pos_select, y_pos_select, r_min_select, ra_pos_select, dec_pos_select
def find_clump_param(self, residuals, ra_coords, dec_coords, N, kwargs_lens, kwargs_else, deltaPix, clump_scale):
ra_pos, dec_pos = self.find_max_residuals(residuals, ra_coords, dec_coords, 5*N)
n = len(ra_pos)
x = np.zeros(n)
y = np.zeros(n)
r_min = np.zeros(n)
for i in range(n):
x[i], y[i], r_min[i] = self.position_size_estimate(ra_pos[i], dec_pos[i], kwargs_lens, kwargs_else, deltaPix, scale=clump_scale)
x_pos, y_pos, sigma, ra_pos_select, dec_pos_select = self.check_overlap_in_source(x, y, ra_pos, dec_pos, r_min, N)
return np.array(x_pos), np.array(y_pos), np.array(sigma), np.array(ra_pos_select), np.array(dec_pos_select)
def clump_response(self, x_source, y_source, x_pos, y_pos, sigma, deltaPix, numPix, subgrid_res, kwargs_psf, mask=1):
"""
response matrix of gaussian clumps
:param x_source:
:param y_source:
:param x_pos:
:param y_pos:
:param sigma:
:return:
"""
num_param = len(sigma)
A = np.zeros((num_param, numPix**2))
for i in range(num_param):
image = self.gaussian.function(x_source, y_source, amp=1, sigma_x=sigma[i], sigma_y=sigma[i], center_x=x_pos[i], center_y=y_pos[i])
image = util.array2image(image)
image = self.re_size_convolve(image, subgrid_res, kwargs_psf)
response = util.image2array(image*mask)
A[i, :] = response
return A
def shapelet_response(self, x_source, y_source, x_pos, y_pos, sigma, deltaPix, numPix, subgrid_res, kwargs_psf, num_order=1, mask=1):
"""
returns response matrix for general inputs
:param x_grid:
:param y_grid:
:param kwargs_lens:
:param kwargs_source:
:param kwargs_psf:
:param kwargs_lens_light:
:param kwargs_else:
:param numPix:
:param deltaPix:
:param subgrid_res:
:return:
"""
num_clump = len(x_pos)
numShapelets = (num_order+2)*(num_order+1)/2
num_param = numShapelets*num_clump
A = np.zeros((num_param, numPix**2))
k = 0
for j in range(0, num_clump):
H_x, H_y = self.shapelets.pre_calc(x_source, y_source, sigma[j], num_order, x_pos[j], y_pos[j])
n1 = 0
n2 = 0
for i in range(0, numShapelets):
kwargs_source_shapelet = {'center_x': x_pos[j], 'center_y': y_pos[j], 'n1': n1, 'n2': n2, 'beta': sigma[j], 'amp': 1}
image = self.shapelets.function(H_x, H_y, **kwargs_source_shapelet)
image = util.array2image(image)
image = self.re_size_convolve(image, numPix, deltaPix, subgrid_res, kwargs_psf)
response = util.image2array(image*mask)
A[k, :] = response
if n1 == 0:
n1 = n2 + 1
n2 = 0
else:
n1 -= 1
n2 += 1
k += 1
return A
def make_image_iteration(self, x_grid, y_grid, kwargs_lens, kwargs_source, kwargs_psf, kwargs_lens_light, kwargs_else, numPix, deltaPix, subgrid_res, inv_bool=False, no_lens=False):
map_error = self.kwargs_options.get('error_map', False)
num_order = self.kwargs_options.get('shapelet_order', 0)
data = self.kwargs_data['image_data']
mask = self.kwargs_options['mask']
num_clumps = self.kwargs_options.get('num_clumps', 0)
clump_scale = self.kwargs_options.get('clump_scale', 1)
if no_lens is True:
x_source, y_source = x_grid, y_grid
else:
x_source, y_source = self.mapping_IS(x_grid, y_grid, kwargs_else, **kwargs_lens)
A, error_map, _ = self.get_response_matrix(x_grid, y_grid, x_source, y_source, kwargs_lens, kwargs_source, kwargs_psf, kwargs_lens_light, kwargs_else, numPix, deltaPix, subgrid_res, num_order, mask, map_error=map_error, shapelets_off=self.kwargs_options.get('shapelets_off', False))
d = util.image2array(data*mask)
param, cov_param, wls_model = self.DeLens.get_param_WLS(A.T, 1/(self.C_D+error_map), d, inv_bool=inv_bool)
if num_clumps > 0:
residuals = (wls_model-d)/np.sqrt(self.C_D+error_map)
#ra_pos, dec_pos = self.find_max_residuals(residuals, self.ra_coords, self.dec_coords, num_clumps)
#x_pos, y_pos, sigma = self.position_size_estimate(ra_pos, dec_pos, kwargs_lens, kwargs_else, deltaPix, clump_scale)
x_pos, y_pos, sigma, ra_pos, dec_pos = self.find_clump_param(residuals, self.ra_coords, self.dec_coords, num_clumps, kwargs_lens, kwargs_else, deltaPix, clump_scale)
if self.kwargs_options.get('source_clump_type', 'Gaussian') == 'Gaussian':
A_clump = self.clump_response(x_source, y_source, x_pos, y_pos, sigma, deltaPix, numPix, subgrid_res, kwargs_psf, mask=mask)
elif self.kwargs_options.get('source_clump_type', 'Gaussian') == 'Shapelets':
A_clump = self.shapelet_response(x_source, y_source, x_pos, y_pos, sigma, deltaPix, numPix, subgrid_res, kwargs_psf, mask=mask, num_order=self.kwargs_options.get('num_order_clump', 1))
else:
raise ValueError("clump_type %s not valid." %(self.kwargs_options['source_clump_type']))
A = np.append(A, A_clump, axis=0)
param, cov_param, wls_model = self.DeLens.get_param_WLS(A.T, 1/(self.C_D+error_map), d, inv_bool=inv_bool)
else:
x_pos, y_pos, sigma, ra_pos, dec_pos = None, None, None, None, None
grid_final = util.array2image(wls_model)
if not self.kwargs_options['source_type'] == 'NONE':
kwargs_source['I0_sersic'] = param[0]
i = 1
else:
i = 0
kwargs_lens_light['I0_sersic'] = param[i]
if self.kwargs_options['lens_light_type'] == 'TRIPLE_SERSIC':
kwargs_lens_light['I0_3'] = param[i+1]
kwargs_lens_light['I0_2'] = param[i+2]
if map_error is True:
error_map = util.array2image(error_map)
else:
error_map = np.zeros_like(grid_final)
return grid_final, error_map, cov_param, param, x_pos, y_pos, sigma, ra_pos, dec_pos
def get_source_iter(self, param, num_order, beta, x_grid, y_grid, kwargs_source, x_pos, y_pos, sigma, cov_param=None):
"""
:param param:
:param num_order:
:param beta:
:return:
"""
if not self.kwargs_options['source_type'] == 'NONE':
new = {'I0_sersic': param[0], 'center_x': 0, 'center_y': 0}
kwargs_source_new = kwargs_source.copy()
kwargs_source_new.update(new)
source = self.get_surface_brightness(x_grid, y_grid, **kwargs_source_new)
else:
source = np.zeros_like(x_grid)
x_center = kwargs_source['center_x']
y_center = kwargs_source['center_y']
num_clumps = self.kwargs_options.get('num_clumps', 0)
num_param_shapelets = (num_order+2)*(num_order+1)/2
if not self.kwargs_options.get('source_clump_type', 'Gaussian') == 'Shapelets':
numShapelets_clump = 1
else:
num_order_clump = self.kwargs_options.get('num_order_clump', 1)
numShapelets_clump = (num_order_clump+2)*(num_order_clump+1)/2
shapelets = Shapelets(interpolation=False, precalc=False)
error_map_source = np.zeros_like(x_grid)
n1 = 0
n2 = 0
basis_functions = np.zeros((len(param), len(x_grid)))
for i in range(len(param)-num_param_shapelets-num_clumps*numShapelets_clump, len(param)-num_clumps*numShapelets_clump):
source += shapelets.function(x_grid, y_grid, param[i], beta, n1, n2, center_x=0, center_y=0)
basis_functions[i, :] = shapelets.function(x_grid, y_grid, 1, beta, n1, n2, center_x=0, center_y=0)
if n1 == 0:
n1 = n2 + 1
n2 = 0
else:
n1 -= 1
n2 += 1
if self.kwargs_options.get('source_clump_type', 'Gaussian') == 'Gaussian':
for i in range(num_clumps):
j = i + len(param) - num_clumps*numShapelets_clump
source += self.gaussian.function(x_grid, y_grid, amp=param[j], sigma_x=sigma[i], sigma_y=sigma[i], center_x=x_pos[i]-x_center, center_y=y_pos[i]-y_center)
elif self.kwargs_options.get('source_clump_type', 'Gaussian') == 'Shapelets':
i = len(param)-num_clumps*numShapelets_clump
for j in range(0, num_clumps):
H_x, H_y = self.shapelets.pre_calc(x_grid, y_grid, sigma[j], num_order, x_pos[j]-x_center, y_pos[j]-y_center)
n1 = 0
n2 = 0
for k in range(0, numShapelets_clump):
kwargs_source_shapelet = {'center_x': x_pos[j], 'center_y': y_pos[j], 'n1': n1, 'n2': n2, 'beta': sigma[j], 'amp': param[i]}
source += self.shapelets.function(H_x, H_y, **kwargs_source_shapelet)
if n1 == 0:
n1 = n2 + 1
n2 = 0
else:
n1 -= 1
n2 += 1
i += 1
else:
raise ValueError("clump_type %s not valid." %(self.kwargs_options['source_clump_type']))
if cov_param is not None:
error_map_source = np.zeros_like(x_grid)
for i in range(len(error_map_source)):
error_map_source[i] = basis_functions[:, i].T.dot(cov_param).dot(basis_functions[:,i])
return util.array2image(source), util.array2image(error_map_source)
def position_size_estimate(self, ra_pos, dec_pos, kwargs_lens, kwargs_else, delta, scale=1):
"""
estimate the magnification at the positions and define resolution limit
:param ra_pos:
:param dec_pos:
:param kwargs_lens:
:param kwargs_else:
:return:
"""
x, y = self.LensModel.ray_shooting(ra_pos, dec_pos, kwargs_else, **kwargs_lens)
d_x, d_y = util.points_on_circle(delta*2, 10)
x_s, y_s = self.LensModel.ray_shooting(ra_pos + d_x, dec_pos + d_y, kwargs_else, **kwargs_lens)
x_m = np.mean(x_s)
y_m = np.mean(y_s)
r_m = np.sqrt((x_s - x_m) ** 2 + (y_s - y_m) ** 2)
r_min = np.sqrt(r_m.min(axis=0)*r_m.max(axis=0))/2 * scale
return x, y, r_min
| 48.193431 | 290 | 0.605225 | __author__ = 'sibirrer'
import numpy as np
import lenstronomy.Util.util as util
from lenstronomy.LightModel.Profiles.shapelets import Shapelets
from lenstronomy.ImSim.image_model import ImageModel
class MakeImageIter(ImageModel):
def find_max_residuals(self, residuals, ra_coords, dec_coords, N):
ra_mins, dec_mins, values = util.neighborSelect(residuals, ra_coords, dec_coords)
ra_pos = util.selectBest(np.array(ra_mins), -np.array(values), N, highest=True)
dec_pos = util.selectBest(np.array(dec_mins), -np.array(values), N, highest=True)
return ra_pos, dec_pos
def check_overlap_in_source(self, x, y, ra_pos, dec_pos, r_min, N):
n = len(x)
count = 0
i = 0
x_pos_select = []
y_pos_select = []
ra_pos_select = []
dec_pos_select = []
r_min_select = []
while count < N and i < n:
if i == 0:
x_pos_select.append(x[i])
y_pos_select.append(y[i])
ra_pos_select.append(ra_pos[i])
dec_pos_select.append(dec_pos[i])
r_min_select.append(r_min[i])
count += 1
else:
r_delta = np.sqrt((x - x[i])**2 + (y - y[i])**2)
if np.min(r_delta[0:i]) > r_min[i]:
x_pos_select.append(x[i])
y_pos_select.append(y[i])
ra_pos_select.append(ra_pos[i])
dec_pos_select.append(dec_pos[i])
r_min_select.append(r_min[i])
count += 1
i += 1
return x_pos_select, y_pos_select, r_min_select, ra_pos_select, dec_pos_select
def find_clump_param(self, residuals, ra_coords, dec_coords, N, kwargs_lens, kwargs_else, deltaPix, clump_scale):
ra_pos, dec_pos = self.find_max_residuals(residuals, ra_coords, dec_coords, 5*N)
n = len(ra_pos)
x = np.zeros(n)
y = np.zeros(n)
r_min = np.zeros(n)
for i in range(n):
x[i], y[i], r_min[i] = self.position_size_estimate(ra_pos[i], dec_pos[i], kwargs_lens, kwargs_else, deltaPix, scale=clump_scale)
x_pos, y_pos, sigma, ra_pos_select, dec_pos_select = self.check_overlap_in_source(x, y, ra_pos, dec_pos, r_min, N)
return np.array(x_pos), np.array(y_pos), np.array(sigma), np.array(ra_pos_select), np.array(dec_pos_select)
def clump_response(self, x_source, y_source, x_pos, y_pos, sigma, deltaPix, numPix, subgrid_res, kwargs_psf, mask=1):
num_param = len(sigma)
A = np.zeros((num_param, numPix**2))
for i in range(num_param):
image = self.gaussian.function(x_source, y_source, amp=1, sigma_x=sigma[i], sigma_y=sigma[i], center_x=x_pos[i], center_y=y_pos[i])
image = util.array2image(image)
image = self.re_size_convolve(image, subgrid_res, kwargs_psf)
response = util.image2array(image*mask)
A[i, :] = response
return A
def shapelet_response(self, x_source, y_source, x_pos, y_pos, sigma, deltaPix, numPix, subgrid_res, kwargs_psf, num_order=1, mask=1):
num_clump = len(x_pos)
numShapelets = (num_order+2)*(num_order+1)/2
num_param = numShapelets*num_clump
A = np.zeros((num_param, numPix**2))
k = 0
for j in range(0, num_clump):
H_x, H_y = self.shapelets.pre_calc(x_source, y_source, sigma[j], num_order, x_pos[j], y_pos[j])
n1 = 0
n2 = 0
for i in range(0, numShapelets):
kwargs_source_shapelet = {'center_x': x_pos[j], 'center_y': y_pos[j], 'n1': n1, 'n2': n2, 'beta': sigma[j], 'amp': 1}
image = self.shapelets.function(H_x, H_y, **kwargs_source_shapelet)
image = util.array2image(image)
image = self.re_size_convolve(image, numPix, deltaPix, subgrid_res, kwargs_psf)
response = util.image2array(image*mask)
A[k, :] = response
if n1 == 0:
n1 = n2 + 1
n2 = 0
else:
n1 -= 1
n2 += 1
k += 1
return A
def make_image_iteration(self, x_grid, y_grid, kwargs_lens, kwargs_source, kwargs_psf, kwargs_lens_light, kwargs_else, numPix, deltaPix, subgrid_res, inv_bool=False, no_lens=False):
map_error = self.kwargs_options.get('error_map', False)
num_order = self.kwargs_options.get('shapelet_order', 0)
data = self.kwargs_data['image_data']
mask = self.kwargs_options['mask']
num_clumps = self.kwargs_options.get('num_clumps', 0)
clump_scale = self.kwargs_options.get('clump_scale', 1)
if no_lens is True:
x_source, y_source = x_grid, y_grid
else:
x_source, y_source = self.mapping_IS(x_grid, y_grid, kwargs_else, **kwargs_lens)
A, error_map, _ = self.get_response_matrix(x_grid, y_grid, x_source, y_source, kwargs_lens, kwargs_source, kwargs_psf, kwargs_lens_light, kwargs_else, numPix, deltaPix, subgrid_res, num_order, mask, map_error=map_error, shapelets_off=self.kwargs_options.get('shapelets_off', False))
d = util.image2array(data*mask)
param, cov_param, wls_model = self.DeLens.get_param_WLS(A.T, 1/(self.C_D+error_map), d, inv_bool=inv_bool)
if num_clumps > 0:
residuals = (wls_model-d)/np.sqrt(self.C_D+error_map)
x_pos, y_pos, sigma, ra_pos, dec_pos = self.find_clump_param(residuals, self.ra_coords, self.dec_coords, num_clumps, kwargs_lens, kwargs_else, deltaPix, clump_scale)
if self.kwargs_options.get('source_clump_type', 'Gaussian') == 'Gaussian':
A_clump = self.clump_response(x_source, y_source, x_pos, y_pos, sigma, deltaPix, numPix, subgrid_res, kwargs_psf, mask=mask)
elif self.kwargs_options.get('source_clump_type', 'Gaussian') == 'Shapelets':
A_clump = self.shapelet_response(x_source, y_source, x_pos, y_pos, sigma, deltaPix, numPix, subgrid_res, kwargs_psf, mask=mask, num_order=self.kwargs_options.get('num_order_clump', 1))
else:
raise ValueError("clump_type %s not valid." %(self.kwargs_options['source_clump_type']))
A = np.append(A, A_clump, axis=0)
param, cov_param, wls_model = self.DeLens.get_param_WLS(A.T, 1/(self.C_D+error_map), d, inv_bool=inv_bool)
else:
x_pos, y_pos, sigma, ra_pos, dec_pos = None, None, None, None, None
grid_final = util.array2image(wls_model)
if not self.kwargs_options['source_type'] == 'NONE':
kwargs_source['I0_sersic'] = param[0]
i = 1
else:
i = 0
kwargs_lens_light['I0_sersic'] = param[i]
if self.kwargs_options['lens_light_type'] == 'TRIPLE_SERSIC':
kwargs_lens_light['I0_3'] = param[i+1]
kwargs_lens_light['I0_2'] = param[i+2]
if map_error is True:
error_map = util.array2image(error_map)
else:
error_map = np.zeros_like(grid_final)
return grid_final, error_map, cov_param, param, x_pos, y_pos, sigma, ra_pos, dec_pos
def get_source_iter(self, param, num_order, beta, x_grid, y_grid, kwargs_source, x_pos, y_pos, sigma, cov_param=None):
if not self.kwargs_options['source_type'] == 'NONE':
new = {'I0_sersic': param[0], 'center_x': 0, 'center_y': 0}
kwargs_source_new = kwargs_source.copy()
kwargs_source_new.update(new)
source = self.get_surface_brightness(x_grid, y_grid, **kwargs_source_new)
else:
source = np.zeros_like(x_grid)
x_center = kwargs_source['center_x']
y_center = kwargs_source['center_y']
num_clumps = self.kwargs_options.get('num_clumps', 0)
num_param_shapelets = (num_order+2)*(num_order+1)/2
if not self.kwargs_options.get('source_clump_type', 'Gaussian') == 'Shapelets':
numShapelets_clump = 1
else:
num_order_clump = self.kwargs_options.get('num_order_clump', 1)
numShapelets_clump = (num_order_clump+2)*(num_order_clump+1)/2
shapelets = Shapelets(interpolation=False, precalc=False)
error_map_source = np.zeros_like(x_grid)
n1 = 0
n2 = 0
basis_functions = np.zeros((len(param), len(x_grid)))
for i in range(len(param)-num_param_shapelets-num_clumps*numShapelets_clump, len(param)-num_clumps*numShapelets_clump):
source += shapelets.function(x_grid, y_grid, param[i], beta, n1, n2, center_x=0, center_y=0)
basis_functions[i, :] = shapelets.function(x_grid, y_grid, 1, beta, n1, n2, center_x=0, center_y=0)
if n1 == 0:
n1 = n2 + 1
n2 = 0
else:
n1 -= 1
n2 += 1
if self.kwargs_options.get('source_clump_type', 'Gaussian') == 'Gaussian':
for i in range(num_clumps):
j = i + len(param) - num_clumps*numShapelets_clump
source += self.gaussian.function(x_grid, y_grid, amp=param[j], sigma_x=sigma[i], sigma_y=sigma[i], center_x=x_pos[i]-x_center, center_y=y_pos[i]-y_center)
elif self.kwargs_options.get('source_clump_type', 'Gaussian') == 'Shapelets':
i = len(param)-num_clumps*numShapelets_clump
for j in range(0, num_clumps):
H_x, H_y = self.shapelets.pre_calc(x_grid, y_grid, sigma[j], num_order, x_pos[j]-x_center, y_pos[j]-y_center)
n1 = 0
n2 = 0
for k in range(0, numShapelets_clump):
kwargs_source_shapelet = {'center_x': x_pos[j], 'center_y': y_pos[j], 'n1': n1, 'n2': n2, 'beta': sigma[j], 'amp': param[i]}
source += self.shapelets.function(H_x, H_y, **kwargs_source_shapelet)
if n1 == 0:
n1 = n2 + 1
n2 = 0
else:
n1 -= 1
n2 += 1
i += 1
else:
raise ValueError("clump_type %s not valid." %(self.kwargs_options['source_clump_type']))
if cov_param is not None:
error_map_source = np.zeros_like(x_grid)
for i in range(len(error_map_source)):
error_map_source[i] = basis_functions[:, i].T.dot(cov_param).dot(basis_functions[:,i])
return util.array2image(source), util.array2image(error_map_source)
def position_size_estimate(self, ra_pos, dec_pos, kwargs_lens, kwargs_else, delta, scale=1):
x, y = self.LensModel.ray_shooting(ra_pos, dec_pos, kwargs_else, **kwargs_lens)
d_x, d_y = util.points_on_circle(delta*2, 10)
x_s, y_s = self.LensModel.ray_shooting(ra_pos + d_x, dec_pos + d_y, kwargs_else, **kwargs_lens)
x_m = np.mean(x_s)
y_m = np.mean(y_s)
r_m = np.sqrt((x_s - x_m) ** 2 + (y_s - y_m) ** 2)
r_min = np.sqrt(r_m.min(axis=0)*r_m.max(axis=0))/2 * scale
return x, y, r_min
| true | true |
1c3127322990bde39d34eb74ce48ed98c6892598 | 12,778 | py | Python | src/sagemaker/predictor.py | yifeim/sagemaker-python-sdk | d60f8d3889b4bbada745ff67ce4d0aae2013285a | [
"Apache-2.0"
] | null | null | null | src/sagemaker/predictor.py | yifeim/sagemaker-python-sdk | d60f8d3889b4bbada745ff67ce4d0aae2013285a | [
"Apache-2.0"
] | null | null | null | src/sagemaker/predictor.py | yifeim/sagemaker-python-sdk | d60f8d3889b4bbada745ff67ce4d0aae2013285a | [
"Apache-2.0"
] | 1 | 2020-07-30T13:26:45.000Z | 2020-07-30T13:26:45.000Z | # Copyright 2017-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import print_function, absolute_import
import codecs
import csv
import json
import numpy as np
import six
from six import StringIO, BytesIO
from sagemaker.content_types import CONTENT_TYPE_JSON, CONTENT_TYPE_CSV, CONTENT_TYPE_NPY
from sagemaker.session import Session
class RealTimePredictor(object):
"""Make prediction requests to an Amazon SageMaker endpoint.
"""
def __init__(self, endpoint, sagemaker_session=None, serializer=None, deserializer=None,
content_type=None, accept=None):
"""Initialize a ``RealTimePredictor``.
Behavior for serialization of input data and deserialization of result data
can be configured through initializer arguments. If not specified, a sequence
of bytes is expected and the API sends it in the request body without modifications.
In response, the API returns the sequence of bytes from the prediction result without any modifications.
Args:
endpoint (str): Name of the Amazon SageMaker endpoint to which requests are sent.
sagemaker_session (sagemaker.session.Session): A SageMaker Session object, used for SageMaker
interactions (default: None). If not specified, one is created using the default AWS configuration chain.
serializer (callable): Accepts a single argument, the input data, and returns a sequence
of bytes. It may provide a ``content_type`` attribute that defines the endpoint request content type.
If not specified, a sequence of bytes is expected for the data.
deserializer (callable): Accepts two arguments, the result data and the response content type,
and returns a sequence of bytes. It may provide a ``content_type`` attribute that defines the endpoint
response's "Accept" content type. If not specified, a sequence of bytes is expected for the data.
content_type (str): The invocation's "ContentType", overriding any ``content_type`` from
the serializer (default: None).
accept (str): The invocation's "Accept", overriding any accept from the deserializer (default: None).
"""
self.endpoint = endpoint
self.sagemaker_session = sagemaker_session or Session()
self.serializer = serializer
self.deserializer = deserializer
self.content_type = content_type or getattr(serializer, 'content_type', None)
self.accept = accept or getattr(deserializer, 'accept', None)
def predict(self, data, initial_args=None):
"""Return the inference from the specified endpoint.
Args:
data (object): Input data for which you want the model to provide inference.
If a serializer was specified when creating the RealTimePredictor, the result of the
serializer is sent as input data. Otherwise the data must be sequence of bytes, and
the predict method then sends the bytes in the request body as is.
initial_args (dict[str,str]): Optional. Default arguments for boto3
``invoke_endpoint`` call. Default is None (no default arguments).
Returns:
object: Inference for the given input. If a deserializer was specified when creating
the RealTimePredictor, the result of the deserializer is returned. Otherwise the response
returns the sequence of bytes as is.
"""
request_args = self._create_request_args(data, initial_args)
response = self.sagemaker_session.sagemaker_runtime_client.invoke_endpoint(**request_args)
return self._handle_response(response)
def _handle_response(self, response):
response_body = response['Body']
if self.deserializer is not None:
# It's the deserializer's responsibility to close the stream
return self.deserializer(response_body, response['ContentType'])
data = response_body.read()
response_body.close()
return data
def _create_request_args(self, data, initial_args=None):
args = dict(initial_args) if initial_args else {}
if 'EndpointName' not in args:
args['EndpointName'] = self.endpoint
if self.content_type and 'ContentType' not in args:
args['ContentType'] = self.content_type
if self.accept and 'Accept' not in args:
args['Accept'] = self.accept
if self.serializer is not None:
data = self.serializer(data)
args['Body'] = data
return args
def delete_endpoint(self):
"""Delete the Amazon SageMaker endpoint backing this predictor.
"""
self.sagemaker_session.delete_endpoint(self.endpoint)
class _CsvSerializer(object):
def __init__(self):
self.content_type = CONTENT_TYPE_CSV
def __call__(self, data):
"""Take data of various data formats and serialize them into CSV.
Args:
data (object): Data to be serialized.
Returns:
object: Sequence of bytes to be used for the request body.
"""
# For inputs which represent multiple "rows", the result should be newline-separated CSV rows
if _is_mutable_sequence_like(data) and len(data) > 0 and _is_sequence_like(data[0]):
return '\n'.join([_CsvSerializer._serialize_row(row) for row in data])
return _CsvSerializer._serialize_row(data)
@staticmethod
def _serialize_row(data):
# Don't attempt to re-serialize a string
if isinstance(data, str):
return data
if isinstance(data, np.ndarray):
data = np.ndarray.flatten(data)
if hasattr(data, '__len__'):
if len(data):
return _csv_serialize_python_array(data)
else:
raise ValueError("Cannot serialize empty array")
# files and buffers
if hasattr(data, 'read'):
return _csv_serialize_from_buffer(data)
raise ValueError("Unable to handle input format: ", type(data))
def _csv_serialize_python_array(data):
return _csv_serialize_object(data)
def _csv_serialize_from_buffer(buff):
return buff.read()
def _csv_serialize_object(data):
csv_buffer = StringIO()
csv_writer = csv.writer(csv_buffer, delimiter=',')
csv_writer.writerow(data)
return csv_buffer.getvalue().rstrip('\r\n')
csv_serializer = _CsvSerializer()
def _is_mutable_sequence_like(obj):
return _is_sequence_like(obj) and hasattr(obj, '__setitem__')
def _is_sequence_like(obj):
# Need to explicitly check on str since str lacks the iterable magic methods in Python 2
return (hasattr(obj, '__iter__') and hasattr(obj, '__getitem__')) or isinstance(obj, str)
def _row_to_csv(obj):
if isinstance(obj, str):
return obj
return ','.join(obj)
class BytesDeserializer(object):
"""Return the response as an undecoded array of bytes.
Args:
accept (str): The Accept header to send to the server (optional).
"""
def __init__(self, accept=None):
self.accept = accept
def __call__(self, stream, content_type):
try:
return stream.read()
finally:
stream.close()
class StringDeserializer(object):
"""Return the response as a decoded string.
Args:
encoding (str): The string encoding to use (default=utf-8).
accept (str): The Accept header to send to the server (optional).
"""
def __init__(self, encoding='utf-8', accept=None):
self.encoding = encoding
self.accept = accept
def __call__(self, stream, content_type):
try:
return stream.read().decode(self.encoding)
finally:
stream.close()
class StreamDeserializer(object):
"""Returns the tuple of the response stream and the content-type of the response.
It is the receivers responsibility to close the stream when they're done
reading the stream.
Args:
accept (str): The Accept header to send to the server (optional).
"""
def __init__(self, accept=None):
self.accept = accept
def __call__(self, stream, content_type):
return (stream, content_type)
class _JsonSerializer(object):
def __init__(self):
self.content_type = CONTENT_TYPE_JSON
def __call__(self, data):
"""Take data of various formats and serialize them into the expected request body.
This uses information about supported input formats for the deployed model.
Args:
data (object): Data to be serialized.
Returns:
object: Serialized data used for the request.
"""
if isinstance(data, dict):
# convert each value in dict from a numpy array to a list if necessary, so they can be json serialized
return json.dumps({k: _ndarray_to_list(v) for k, v in six.iteritems(data)})
# files and buffers
if hasattr(data, 'read'):
return _json_serialize_from_buffer(data)
return json.dumps(_ndarray_to_list(data))
json_serializer = _JsonSerializer()
def _ndarray_to_list(data):
return data.tolist() if isinstance(data, np.ndarray) else data
def _json_serialize_from_buffer(buff):
return buff.read()
class _JsonDeserializer(object):
def __init__(self):
self.accept = CONTENT_TYPE_JSON
def __call__(self, stream, content_type):
"""Decode a JSON object into the corresponding Python object.
Args:
stream (stream): The response stream to be deserialized.
content_type (str): The content type of the response.
Returns:
object: Body of the response deserialized into a JSON object.
"""
try:
return json.load(codecs.getreader('utf-8')(stream))
finally:
stream.close()
json_deserializer = _JsonDeserializer()
class _NumpyDeserializer(object):
def __init__(self, accept=CONTENT_TYPE_NPY, dtype=None):
self.accept = accept
self.dtype = dtype
def __call__(self, stream, content_type=CONTENT_TYPE_NPY):
"""Decode from serialized data into a Numpy array.
Args:
stream (stream): The response stream to be deserialized.
content_type (str): The content type of the response. Can accept CSV, JSON, or NPY data.
Returns:
object: Body of the response deserialized into a Numpy array.
"""
try:
if content_type == CONTENT_TYPE_CSV:
return np.genfromtxt(codecs.getreader('utf-8')(stream), delimiter=',', dtype=self.dtype)
elif content_type == CONTENT_TYPE_JSON:
return np.array(json.load(codecs.getreader('utf-8')(stream)), dtype=self.dtype)
elif content_type == CONTENT_TYPE_NPY:
return np.load(BytesIO(stream.read()))
finally:
stream.close()
numpy_deserializer = _NumpyDeserializer()
class _NPYSerializer(object):
def __init__(self):
self.content_type = CONTENT_TYPE_NPY
def __call__(self, data, dtype=None):
"""Serialize data into the request body in NPY format.
Args:
data (object): Data to be serialized. Can be a numpy array, list, file, or buffer.
Returns:
object: NPY serialized data used for the request.
"""
if isinstance(data, np.ndarray):
if not data.size > 0:
raise ValueError("empty array can't be serialized")
return _npy_serialize(data)
if isinstance(data, list):
if not len(data) > 0:
raise ValueError("empty array can't be serialized")
return _npy_serialize(np.array(data, dtype))
# files and buffers. Assumed to hold npy-formatted data.
if hasattr(data, 'read'):
return data.read()
return _npy_serialize(np.array(data))
def _npy_serialize(data):
buffer = BytesIO()
np.save(buffer, data)
return buffer.getvalue()
npy_serializer = _NPYSerializer()
| 35.201102 | 120 | 0.659649 |
from __future__ import print_function, absolute_import
import codecs
import csv
import json
import numpy as np
import six
from six import StringIO, BytesIO
from sagemaker.content_types import CONTENT_TYPE_JSON, CONTENT_TYPE_CSV, CONTENT_TYPE_NPY
from sagemaker.session import Session
class RealTimePredictor(object):
def __init__(self, endpoint, sagemaker_session=None, serializer=None, deserializer=None,
content_type=None, accept=None):
self.endpoint = endpoint
self.sagemaker_session = sagemaker_session or Session()
self.serializer = serializer
self.deserializer = deserializer
self.content_type = content_type or getattr(serializer, 'content_type', None)
self.accept = accept or getattr(deserializer, 'accept', None)
def predict(self, data, initial_args=None):
request_args = self._create_request_args(data, initial_args)
response = self.sagemaker_session.sagemaker_runtime_client.invoke_endpoint(**request_args)
return self._handle_response(response)
def _handle_response(self, response):
response_body = response['Body']
if self.deserializer is not None:
return self.deserializer(response_body, response['ContentType'])
data = response_body.read()
response_body.close()
return data
def _create_request_args(self, data, initial_args=None):
args = dict(initial_args) if initial_args else {}
if 'EndpointName' not in args:
args['EndpointName'] = self.endpoint
if self.content_type and 'ContentType' not in args:
args['ContentType'] = self.content_type
if self.accept and 'Accept' not in args:
args['Accept'] = self.accept
if self.serializer is not None:
data = self.serializer(data)
args['Body'] = data
return args
def delete_endpoint(self):
self.sagemaker_session.delete_endpoint(self.endpoint)
class _CsvSerializer(object):
def __init__(self):
self.content_type = CONTENT_TYPE_CSV
def __call__(self, data):
if _is_mutable_sequence_like(data) and len(data) > 0 and _is_sequence_like(data[0]):
return '\n'.join([_CsvSerializer._serialize_row(row) for row in data])
return _CsvSerializer._serialize_row(data)
@staticmethod
def _serialize_row(data):
if isinstance(data, str):
return data
if isinstance(data, np.ndarray):
data = np.ndarray.flatten(data)
if hasattr(data, '__len__'):
if len(data):
return _csv_serialize_python_array(data)
else:
raise ValueError("Cannot serialize empty array")
# files and buffers
if hasattr(data, 'read'):
return _csv_serialize_from_buffer(data)
raise ValueError("Unable to handle input format: ", type(data))
def _csv_serialize_python_array(data):
return _csv_serialize_object(data)
def _csv_serialize_from_buffer(buff):
return buff.read()
def _csv_serialize_object(data):
csv_buffer = StringIO()
csv_writer = csv.writer(csv_buffer, delimiter=',')
csv_writer.writerow(data)
return csv_buffer.getvalue().rstrip('\r\n')
csv_serializer = _CsvSerializer()
def _is_mutable_sequence_like(obj):
return _is_sequence_like(obj) and hasattr(obj, '__setitem__')
def _is_sequence_like(obj):
# Need to explicitly check on str since str lacks the iterable magic methods in Python 2
return (hasattr(obj, '__iter__') and hasattr(obj, '__getitem__')) or isinstance(obj, str)
def _row_to_csv(obj):
if isinstance(obj, str):
return obj
return ','.join(obj)
class BytesDeserializer(object):
def __init__(self, accept=None):
self.accept = accept
def __call__(self, stream, content_type):
try:
return stream.read()
finally:
stream.close()
class StringDeserializer(object):
def __init__(self, encoding='utf-8', accept=None):
self.encoding = encoding
self.accept = accept
def __call__(self, stream, content_type):
try:
return stream.read().decode(self.encoding)
finally:
stream.close()
class StreamDeserializer(object):
def __init__(self, accept=None):
self.accept = accept
def __call__(self, stream, content_type):
return (stream, content_type)
class _JsonSerializer(object):
def __init__(self):
self.content_type = CONTENT_TYPE_JSON
def __call__(self, data):
if isinstance(data, dict):
# convert each value in dict from a numpy array to a list if necessary, so they can be json serialized
return json.dumps({k: _ndarray_to_list(v) for k, v in six.iteritems(data)})
# files and buffers
if hasattr(data, 'read'):
return _json_serialize_from_buffer(data)
return json.dumps(_ndarray_to_list(data))
json_serializer = _JsonSerializer()
def _ndarray_to_list(data):
return data.tolist() if isinstance(data, np.ndarray) else data
def _json_serialize_from_buffer(buff):
return buff.read()
class _JsonDeserializer(object):
def __init__(self):
self.accept = CONTENT_TYPE_JSON
def __call__(self, stream, content_type):
try:
return json.load(codecs.getreader('utf-8')(stream))
finally:
stream.close()
json_deserializer = _JsonDeserializer()
class _NumpyDeserializer(object):
def __init__(self, accept=CONTENT_TYPE_NPY, dtype=None):
self.accept = accept
self.dtype = dtype
def __call__(self, stream, content_type=CONTENT_TYPE_NPY):
try:
if content_type == CONTENT_TYPE_CSV:
return np.genfromtxt(codecs.getreader('utf-8')(stream), delimiter=',', dtype=self.dtype)
elif content_type == CONTENT_TYPE_JSON:
return np.array(json.load(codecs.getreader('utf-8')(stream)), dtype=self.dtype)
elif content_type == CONTENT_TYPE_NPY:
return np.load(BytesIO(stream.read()))
finally:
stream.close()
numpy_deserializer = _NumpyDeserializer()
class _NPYSerializer(object):
def __init__(self):
self.content_type = CONTENT_TYPE_NPY
def __call__(self, data, dtype=None):
if isinstance(data, np.ndarray):
if not data.size > 0:
raise ValueError("empty array can't be serialized")
return _npy_serialize(data)
if isinstance(data, list):
if not len(data) > 0:
raise ValueError("empty array can't be serialized")
return _npy_serialize(np.array(data, dtype))
# files and buffers. Assumed to hold npy-formatted data.
if hasattr(data, 'read'):
return data.read()
return _npy_serialize(np.array(data))
def _npy_serialize(data):
buffer = BytesIO()
np.save(buffer, data)
return buffer.getvalue()
npy_serializer = _NPYSerializer()
| true | true |
1c3127e03f7f9fb78734fc9dd2b6659ba51bc514 | 20,553 | py | Python | tests/test_indefinite_freeze_attack.py | KainaatSingh/tuf | 08f48d52df95aaaa44ab3f3143c3f148cd65f3aa | [
"Apache-2.0",
"MIT"
] | null | null | null | tests/test_indefinite_freeze_attack.py | KainaatSingh/tuf | 08f48d52df95aaaa44ab3f3143c3f148cd65f3aa | [
"Apache-2.0",
"MIT"
] | null | null | null | tests/test_indefinite_freeze_attack.py | KainaatSingh/tuf | 08f48d52df95aaaa44ab3f3143c3f148cd65f3aa | [
"Apache-2.0",
"MIT"
] | null | null | null | #!/usr/bin/env python
# Copyright 2012 - 2017, New York University and the TUF contributors
# SPDX-License-Identifier: MIT OR Apache-2.0
"""
<Program Name>
test_indefinite_freeze_attack.py
<Author>
Konstantin Andrianov.
<Started>
March 10, 2012.
April 1, 2014.
Refactored to use the 'unittest' module (test conditions in code, rather
than verifying text output), use pre-generated repository files, and
discontinue use of the old repository tools. -vladimir.v.diaz
March 9, 2016.
Additional test added relating to issue:
https://github.com/theupdateframework/tuf/issues/322
If a metadata file is not updated (no indication of a new version
available), the expiration of the pre-existing, locally trusted metadata
must still be detected. This additional test complains if such does not
occur, and accompanies code in tuf.client.updater:refresh() to detect it.
-sebastien.awwad
<Copyright>
See LICENSE-MIT OR LICENSE for licensing information.
<Purpose>
Simulate an indefinite freeze attack. In an indefinite freeze attack,
attacker is able to respond to client's requests with the same, outdated
metadata without the client being aware.
"""
# Help with Python 3 compatibility, where the print statement is a function, an
# implicit relative import is invalid, and the '/' operator performs true
# division. Example: print 'hello world' raises a 'SyntaxError' exception.
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import datetime
import os
import time
import tempfile
import shutil
import json
import logging
import unittest
import sys
from urllib import request
if sys.version_info >= (3, 3):
import unittest.mock as mock
else:
import mock
import tuf.formats
import tuf.log
import tuf.client.updater as updater
import tuf.repository_tool as repo_tool
import tuf.unittest_toolbox as unittest_toolbox
import tuf.roledb
import tuf.keydb
import tuf.exceptions
from tests import utils
import securesystemslib
# The repository tool is imported and logs console messages by default. Disable
# console log messages generated by this unit test.
repo_tool.disable_console_log_messages()
logger = logging.getLogger(__name__)
class TestIndefiniteFreezeAttack(unittest_toolbox.Modified_TestCase):
@classmethod
def setUpClass(cls):
# Create a temporary directory to store the repository, metadata, and target
# files. 'temporary_directory' must be deleted in TearDownModule() so that
# temporary files are always removed, even when exceptions occur.
cls.temporary_directory = tempfile.mkdtemp(dir=os.getcwd())
# Launch a SimpleHTTPServer (serves files in the current directory).
# Test cases will request metadata and target files that have been
# pre-generated in 'tuf/tests/repository_data', which will be served by the
# SimpleHTTPServer launched here. The test cases of this unit test assume
# the pre-generated metadata files have a specific structure, such
# as a delegated role 'targets/role1', three target files, five key files,
# etc.
cls.server_process_handler = utils.TestServerProcess(log=logger)
@classmethod
def tearDownClass(cls):
# Cleans the resources and flush the logged lines (if any).
cls.server_process_handler.clean()
# Remove the temporary repository directory, which should contain all the
# metadata, targets, and key files generated of all the test cases.
shutil.rmtree(cls.temporary_directory)
def setUp(self):
# We are inheriting from custom class.
unittest_toolbox.Modified_TestCase.setUp(self)
self.repository_name = 'test_repository1'
# Copy the original repository files provided in the test folder so that
# any modifications made to repository files are restricted to the copies.
# The 'repository_data' directory is expected to exist in 'tuf/tests/'.
original_repository_files = os.path.join(os.getcwd(), 'repository_data')
temporary_repository_root = \
self.make_temp_directory(directory=self.temporary_directory)
# The original repository, keystore, and client directories will be copied
# for each test case.
original_repository = os.path.join(original_repository_files, 'repository')
original_client = os.path.join(original_repository_files, 'client')
original_keystore = os.path.join(original_repository_files, 'keystore')
# Save references to the often-needed client repository directories.
# Test cases need these references to access metadata and target files.
self.repository_directory = \
os.path.join(temporary_repository_root, 'repository')
self.client_directory = os.path.join(temporary_repository_root, 'client')
self.keystore_directory = os.path.join(temporary_repository_root, 'keystore')
# Copy the original 'repository', 'client', and 'keystore' directories
# to the temporary repository the test cases can use.
shutil.copytree(original_repository, self.repository_directory)
shutil.copytree(original_client, self.client_directory)
shutil.copytree(original_keystore, self.keystore_directory)
# Set the url prefix required by the 'tuf/client/updater.py' updater.
# 'path/to/tmp/repository' -> 'localhost:8001/tmp/repository'.
repository_basepath = self.repository_directory[len(os.getcwd()):]
url_prefix = 'http://' + utils.TEST_HOST_ADDRESS + ':' \
+ str(self.server_process_handler.port) + repository_basepath
# Setting 'tuf.settings.repository_directory' with the temporary client
# directory copied from the original repository files.
tuf.settings.repositories_directory = self.client_directory
self.repository_mirrors = {'mirror1': {'url_prefix': url_prefix,
'metadata_path': 'metadata',
'targets_path': 'targets'}}
# Create the repository instance. The test cases will use this client
# updater to refresh metadata, fetch target files, etc.
self.repository_updater = updater.Updater(self.repository_name,
self.repository_mirrors)
def tearDown(self):
# Modified_TestCase.tearDown() automatically deletes temporary files and
# directories that may have been created during each test case.
unittest_toolbox.Modified_TestCase.tearDown(self)
tuf.roledb.clear_roledb(clear_all=True)
tuf.keydb.clear_keydb(clear_all=True)
# Logs stdout and stderr from the sever subprocess.
self.server_process_handler.flush_log()
def test_without_tuf(self):
# Without TUF, Test 1 and Test 2 are functionally equivalent, so we skip
# Test 1 and only perform Test 2.
#
# Test 1: If we find that the timestamp acquired from a mirror indicates
# that there is no new snapshot file, and our current snapshot
# file is expired, is it recognized as such?
# Test 2: If an expired timestamp is downloaded, is it recognized as such?
# Test 2 Begin:
#
# 'timestamp.json' specifies the latest version of the repository files. A
# client should only accept the same version of this file up to a certain
# point, or else it cannot detect that new files are available for
# download. Modify the repository's timestamp.json' so that it expires
# soon, copy it over to the client, and attempt to re-fetch the same
# expired version.
#
# A non-TUF client (without a way to detect when metadata has expired) is
# expected to download the same version, and thus the same outdated files.
# Verify that the downloaded 'timestamp.json' contains the same file size
# and hash as the one available locally.
timestamp_path = os.path.join(self.repository_directory, 'metadata',
'timestamp.json')
timestamp_metadata = securesystemslib.util.load_json_file(timestamp_path)
expiry_time = time.time() - 10
expires = tuf.formats.unix_timestamp_to_datetime(int(expiry_time))
expires = expires.isoformat() + 'Z'
timestamp_metadata['signed']['expires'] = expires
tuf.formats.check_signable_object_format(timestamp_metadata)
with open(timestamp_path, 'wb') as file_object:
# Explicitly specify the JSON separators for Python 2 + 3 consistency.
timestamp_content = \
json.dumps(timestamp_metadata, indent=1, separators=(',', ': '),
sort_keys=True).encode('utf-8')
file_object.write(timestamp_content)
client_timestamp_path = os.path.join(self.client_directory, 'timestamp.json')
shutil.copy(timestamp_path, client_timestamp_path)
length, hashes = securesystemslib.util.get_file_details(timestamp_path)
fileinfo = tuf.formats.make_targets_fileinfo(length, hashes)
url_prefix = self.repository_mirrors['mirror1']['url_prefix']
url_file = os.path.join(url_prefix, 'metadata', 'timestamp.json')
request.urlretrieve(url_file.replace('\\', '/'), client_timestamp_path)
length, hashes = securesystemslib.util.get_file_details(client_timestamp_path)
download_fileinfo = tuf.formats.make_targets_fileinfo(length, hashes)
# Verify 'download_fileinfo' is equal to the current local file.
self.assertEqual(download_fileinfo, fileinfo)
def test_with_tuf(self):
# Three tests are conducted here.
#
# Test 1: If we find that the timestamp acquired from a mirror indicates
# that there is no new snapshot file, and our current snapshot
# file is expired, is it recognized as such?
# Test 2: If an expired timestamp is downloaded, is it recognized as such?
# Test 3: If an expired Snapshot is downloaded, is it (1) rejected? (2) the
# local Snapshot file deleted? (3) and is the client able to recover when
# given a new, valid Snapshot?
# Test 1 Begin:
#
# Addresses this issue: https://github.com/theupdateframework/tuf/issues/322
#
# If time has passed and our snapshot or targets role is expired, and
# the mirror whose timestamp we fetched doesn't indicate the existence of a
# new snapshot version, we still need to check that it's expired and notify
# the software update system / application / user. This test creates that
# scenario. The correct behavior is to raise an exception.
#
# Background: Expiration checks (updater._ensure_not_expired) were
# previously conducted when the metadata file was downloaded. If no new
# metadata file was downloaded, no expiry check would occur. In particular,
# while root was checked for expiration at the beginning of each
# updater.refresh() cycle, and timestamp was always checked because it was
# always fetched, snapshot and targets were never checked if the user did
# not receive evidence that they had changed. This bug allowed a class of
# freeze attacks.
# That bug was fixed and this test tests that fix going forward.
# Modify the timestamp file on the remote repository. 'timestamp.json'
# must be properly updated and signed with 'repository_tool.py', otherwise
# the client will reject it as invalid metadata.
# Load the repository
repository = repo_tool.load_repository(self.repository_directory)
# Load the snapshot and timestamp keys
key_file = os.path.join(self.keystore_directory, 'timestamp_key')
timestamp_private = repo_tool.import_ed25519_privatekey_from_file(key_file,
'password')
repository.timestamp.load_signing_key(timestamp_private)
key_file = os.path.join(self.keystore_directory, 'snapshot_key')
snapshot_private = repo_tool.import_ed25519_privatekey_from_file(key_file,
'password')
repository.snapshot.load_signing_key(snapshot_private)
# sign snapshot with expiry in near future (earlier than e.g. timestamp)
expiry = int(time.time() + 60*60)
repository.snapshot.expiration = tuf.formats.unix_timestamp_to_datetime(
expiry)
repository.mark_dirty(['snapshot', 'timestamp'])
repository.writeall()
# And move the staged metadata to the "live" metadata.
shutil.rmtree(os.path.join(self.repository_directory, 'metadata'))
shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'),
os.path.join(self.repository_directory, 'metadata'))
# Refresh metadata on the client. For this refresh, all data is not expired.
logger.info('Test: Refreshing #1 - Initial metadata refresh occurring.')
self.repository_updater.refresh()
logger.info('Test: Refreshing #2 - refresh after local snapshot expiry.')
# mock current time to one second after snapshot expiry
mock_time = mock.Mock()
mock_time.return_value = expiry + 1
with mock.patch('time.time', mock_time):
try:
self.repository_updater.refresh() # We expect this to fail!
except tuf.exceptions.ExpiredMetadataError:
logger.info('Test: Refresh #2 - failed as expected. Expired local'
' snapshot case generated a tuf.exceptions.ExpiredMetadataError'
' exception as expected. Test pass.')
else:
self.fail('TUF failed to detect expired stale snapshot metadata. Freeze'
' attack successful.')
# Test 2 Begin:
#
# 'timestamp.json' specifies the latest version of the repository files.
# A client should only accept the same version of this file up to a certain
# point, or else it cannot detect that new files are available for download.
# Modify the repository's 'timestamp.json' so that it is about to expire,
# copy it over the to client, wait a moment until it expires, and attempt to
# re-fetch the same expired version.
# The same scenario as in test_without_tuf() is followed here, except with
# a TUF client. The TUF client performs a refresh of top-level metadata,
# which includes 'timestamp.json', and should detect a freeze attack if
# the repository serves an outdated 'timestamp.json'.
# Modify the timestamp file on the remote repository. 'timestamp.json'
# must be properly updated and signed with 'repository_tool.py', otherwise
# the client will reject it as invalid metadata. The resulting
# 'timestamp.json' should be valid metadata, but expired (as intended).
repository = repo_tool.load_repository(self.repository_directory)
key_file = os.path.join(self.keystore_directory, 'timestamp_key')
timestamp_private = repo_tool.import_ed25519_privatekey_from_file(key_file,
'password')
repository.timestamp.load_signing_key(timestamp_private)
# Set timestamp metadata to expire soon.
# We cannot set the timestamp expiration with
# 'repository.timestamp.expiration = ...' with already-expired timestamp
# metadata because of consistency checks that occur during that assignment.
expiry_time = time.time() + 60*60
datetime_object = tuf.formats.unix_timestamp_to_datetime(int(expiry_time))
repository.timestamp.expiration = datetime_object
repository.writeall()
# Move the staged metadata to the "live" metadata.
shutil.rmtree(os.path.join(self.repository_directory, 'metadata'))
shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'),
os.path.join(self.repository_directory, 'metadata'))
# mock current time to one second after timestamp expiry
mock_time = mock.Mock()
mock_time.return_value = expiry_time + 1
with mock.patch('time.time', mock_time):
try:
self.repository_updater.refresh() # We expect NoWorkingMirrorError.
except tuf.exceptions.NoWorkingMirrorError as e:
# Make sure the contained error is ExpiredMetadataError
for mirror_url, mirror_error in e.mirror_errors.items():
self.assertTrue(isinstance(mirror_error, tuf.exceptions.ExpiredMetadataError))
else:
self.fail('TUF failed to detect expired, stale timestamp metadata.'
' Freeze attack successful.')
# Test 3 Begin:
#
# Serve the client expired Snapshot. The client should reject the given,
# expired Snapshot and the locally trusted one, which should now be out of
# date.
# After the attack, attempt to re-issue a valid Snapshot to verify that
# the client is still able to update. A bug previously caused snapshot
# expiration or replay to result in an indefinite freeze; see
# github.com/theupdateframework/tuf/issues/736
repository = repo_tool.load_repository(self.repository_directory)
ts_key_file = os.path.join(self.keystore_directory, 'timestamp_key')
snapshot_key_file = os.path.join(self.keystore_directory, 'snapshot_key')
timestamp_private = repo_tool.import_ed25519_privatekey_from_file(
ts_key_file, 'password')
snapshot_private = repo_tool.import_ed25519_privatekey_from_file(
snapshot_key_file, 'password')
repository.timestamp.load_signing_key(timestamp_private)
repository.snapshot.load_signing_key(snapshot_private)
# Set ts to expire in 1 month.
ts_expiry_time = time.time() + 2630000
# Set snapshot to expire in 1 hour.
snapshot_expiry_time = time.time() + 60*60
ts_datetime_object = tuf.formats.unix_timestamp_to_datetime(
int(ts_expiry_time))
snapshot_datetime_object = tuf.formats.unix_timestamp_to_datetime(
int(snapshot_expiry_time))
repository.timestamp.expiration = ts_datetime_object
repository.snapshot.expiration = snapshot_datetime_object
repository.writeall()
# Move the staged metadata to the "live" metadata.
shutil.rmtree(os.path.join(self.repository_directory, 'metadata'))
shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'),
os.path.join(self.repository_directory, 'metadata'))
# mock current time to one second after snapshot expiry
mock_time = mock.Mock()
mock_time.return_value = snapshot_expiry_time + 1
with mock.patch('time.time', mock_time):
try:
# We expect the following refresh() to raise a NoWorkingMirrorError.
self.repository_updater.refresh()
except tuf.exceptions.NoWorkingMirrorError as e:
# Make sure the contained error is ExpiredMetadataError
for mirror_url, mirror_error in e.mirror_errors.items():
self.assertTrue(isinstance(mirror_error, tuf.exceptions.ExpiredMetadataError))
self.assertTrue(mirror_url.endswith('snapshot.json'))
else:
self.fail('TUF failed to detect expired, stale Snapshot metadata.'
' Freeze attack successful.')
# The client should have rejected the malicious Snapshot metadata, and
# distrusted the local snapshot file that is no longer valid.
self.assertTrue('snapshot' not in self.repository_updater.metadata['current'])
self.assertEqual(sorted(['root', 'targets', 'timestamp']),
sorted(self.repository_updater.metadata['current']))
# Verify that the client is able to recover from the malicious Snapshot.
# Re-sign a valid Snapshot file that the client should accept.
repository = repo_tool.load_repository(self.repository_directory)
repository.timestamp.load_signing_key(timestamp_private)
repository.snapshot.load_signing_key(snapshot_private)
# Set snapshot to expire in 1 month.
snapshot_expiry_time = time.time() + 2630000
snapshot_datetime_object = tuf.formats.unix_timestamp_to_datetime(
int(snapshot_expiry_time))
repository.snapshot.expiration = snapshot_datetime_object
repository.writeall()
# Move the staged metadata to the "live" metadata.
shutil.rmtree(os.path.join(self.repository_directory, 'metadata'))
shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'),
os.path.join(self.repository_directory, 'metadata'))
# Verify that the client accepts the valid metadata file.
self.repository_updater.refresh()
self.assertTrue('snapshot' in self.repository_updater.metadata['current'])
self.assertEqual(sorted(['root', 'targets', 'timestamp', 'snapshot']),
sorted(self.repository_updater.metadata['current']))
if __name__ == '__main__':
utils.configure_test_logging(sys.argv)
unittest.main()
| 43.269474 | 88 | 0.724225 |
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import datetime
import os
import time
import tempfile
import shutil
import json
import logging
import unittest
import sys
from urllib import request
if sys.version_info >= (3, 3):
import unittest.mock as mock
else:
import mock
import tuf.formats
import tuf.log
import tuf.client.updater as updater
import tuf.repository_tool as repo_tool
import tuf.unittest_toolbox as unittest_toolbox
import tuf.roledb
import tuf.keydb
import tuf.exceptions
from tests import utils
import securesystemslib
repo_tool.disable_console_log_messages()
logger = logging.getLogger(__name__)
class TestIndefiniteFreezeAttack(unittest_toolbox.Modified_TestCase):
@classmethod
def setUpClass(cls):
cls.temporary_directory = tempfile.mkdtemp(dir=os.getcwd())
cls.server_process_handler = utils.TestServerProcess(log=logger)
@classmethod
def tearDownClass(cls):
cls.server_process_handler.clean()
shutil.rmtree(cls.temporary_directory)
def setUp(self):
unittest_toolbox.Modified_TestCase.setUp(self)
self.repository_name = 'test_repository1'
original_repository_files = os.path.join(os.getcwd(), 'repository_data')
temporary_repository_root = \
self.make_temp_directory(directory=self.temporary_directory)
original_repository = os.path.join(original_repository_files, 'repository')
original_client = os.path.join(original_repository_files, 'client')
original_keystore = os.path.join(original_repository_files, 'keystore')
self.repository_directory = \
os.path.join(temporary_repository_root, 'repository')
self.client_directory = os.path.join(temporary_repository_root, 'client')
self.keystore_directory = os.path.join(temporary_repository_root, 'keystore')
shutil.copytree(original_repository, self.repository_directory)
shutil.copytree(original_client, self.client_directory)
shutil.copytree(original_keystore, self.keystore_directory)
repository_basepath = self.repository_directory[len(os.getcwd()):]
url_prefix = 'http://' + utils.TEST_HOST_ADDRESS + ':' \
+ str(self.server_process_handler.port) + repository_basepath
tuf.settings.repositories_directory = self.client_directory
self.repository_mirrors = {'mirror1': {'url_prefix': url_prefix,
'metadata_path': 'metadata',
'targets_path': 'targets'}}
self.repository_updater = updater.Updater(self.repository_name,
self.repository_mirrors)
def tearDown(self):
unittest_toolbox.Modified_TestCase.tearDown(self)
tuf.roledb.clear_roledb(clear_all=True)
tuf.keydb.clear_keydb(clear_all=True)
self.server_process_handler.flush_log()
def test_without_tuf(self):
timestamp_path = os.path.join(self.repository_directory, 'metadata',
'timestamp.json')
timestamp_metadata = securesystemslib.util.load_json_file(timestamp_path)
expiry_time = time.time() - 10
expires = tuf.formats.unix_timestamp_to_datetime(int(expiry_time))
expires = expires.isoformat() + 'Z'
timestamp_metadata['signed']['expires'] = expires
tuf.formats.check_signable_object_format(timestamp_metadata)
with open(timestamp_path, 'wb') as file_object:
timestamp_content = \
json.dumps(timestamp_metadata, indent=1, separators=(',', ': '),
sort_keys=True).encode('utf-8')
file_object.write(timestamp_content)
client_timestamp_path = os.path.join(self.client_directory, 'timestamp.json')
shutil.copy(timestamp_path, client_timestamp_path)
length, hashes = securesystemslib.util.get_file_details(timestamp_path)
fileinfo = tuf.formats.make_targets_fileinfo(length, hashes)
url_prefix = self.repository_mirrors['mirror1']['url_prefix']
url_file = os.path.join(url_prefix, 'metadata', 'timestamp.json')
request.urlretrieve(url_file.replace('\\', '/'), client_timestamp_path)
length, hashes = securesystemslib.util.get_file_details(client_timestamp_path)
download_fileinfo = tuf.formats.make_targets_fileinfo(length, hashes)
self.assertEqual(download_fileinfo, fileinfo)
def test_with_tuf(self):
# new snapshot version, we still need to check that it's expired and notify
repository = repo_tool.load_repository(self.repository_directory)
key_file = os.path.join(self.keystore_directory, 'timestamp_key')
timestamp_private = repo_tool.import_ed25519_privatekey_from_file(key_file,
'password')
repository.timestamp.load_signing_key(timestamp_private)
key_file = os.path.join(self.keystore_directory, 'snapshot_key')
snapshot_private = repo_tool.import_ed25519_privatekey_from_file(key_file,
'password')
repository.snapshot.load_signing_key(snapshot_private)
expiry = int(time.time() + 60*60)
repository.snapshot.expiration = tuf.formats.unix_timestamp_to_datetime(
expiry)
repository.mark_dirty(['snapshot', 'timestamp'])
repository.writeall()
shutil.rmtree(os.path.join(self.repository_directory, 'metadata'))
shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'),
os.path.join(self.repository_directory, 'metadata'))
logger.info('Test: Refreshing #1 - Initial metadata refresh occurring.')
self.repository_updater.refresh()
logger.info('Test: Refreshing #2 - refresh after local snapshot expiry.')
mock_time = mock.Mock()
mock_time.return_value = expiry + 1
with mock.patch('time.time', mock_time):
try:
self.repository_updater.refresh()
except tuf.exceptions.ExpiredMetadataError:
logger.info('Test: Refresh #2 - failed as expected. Expired local'
' snapshot case generated a tuf.exceptions.ExpiredMetadataError'
' exception as expected. Test pass.')
else:
self.fail('TUF failed to detect expired stale snapshot metadata. Freeze'
' attack successful.')
# copy it over the to client, wait a moment until it expires, and attempt to
# re-fetch the same expired version.
# The same scenario as in test_without_tuf() is followed here, except with
# a TUF client. The TUF client performs a refresh of top-level metadata,
# which includes 'timestamp.json', and should detect a freeze attack if
# the repository serves an outdated 'timestamp.json'.
# Modify the timestamp file on the remote repository. 'timestamp.json'
# must be properly updated and signed with 'repository_tool.py', otherwise
# the client will reject it as invalid metadata. The resulting
# 'timestamp.json' should be valid metadata, but expired (as intended).
repository = repo_tool.load_repository(self.repository_directory)
key_file = os.path.join(self.keystore_directory, 'timestamp_key')
timestamp_private = repo_tool.import_ed25519_privatekey_from_file(key_file,
'password')
repository.timestamp.load_signing_key(timestamp_private)
# Set timestamp metadata to expire soon.
# We cannot set the timestamp expiration with
# 'repository.timestamp.expiration = ...' with already-expired timestamp
# metadata because of consistency checks that occur during that assignment.
expiry_time = time.time() + 60*60
datetime_object = tuf.formats.unix_timestamp_to_datetime(int(expiry_time))
repository.timestamp.expiration = datetime_object
repository.writeall()
# Move the staged metadata to the "live" metadata.
shutil.rmtree(os.path.join(self.repository_directory, 'metadata'))
shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'),
os.path.join(self.repository_directory, 'metadata'))
# mock current time to one second after timestamp expiry
mock_time = mock.Mock()
mock_time.return_value = expiry_time + 1
with mock.patch('time.time', mock_time):
try:
self.repository_updater.refresh() # We expect NoWorkingMirrorError.
except tuf.exceptions.NoWorkingMirrorError as e:
# Make sure the contained error is ExpiredMetadataError
for mirror_url, mirror_error in e.mirror_errors.items():
self.assertTrue(isinstance(mirror_error, tuf.exceptions.ExpiredMetadataError))
else:
self.fail('TUF failed to detect expired, stale timestamp metadata.'
' Freeze attack successful.')
# Test 3 Begin:
#
# Serve the client expired Snapshot. The client should reject the given,
# expired Snapshot and the locally trusted one, which should now be out of
# date.
# After the attack, attempt to re-issue a valid Snapshot to verify that
# the client is still able to update. A bug previously caused snapshot
# expiration or replay to result in an indefinite freeze; see
# github.com/theupdateframework/tuf/issues/736
repository = repo_tool.load_repository(self.repository_directory)
ts_key_file = os.path.join(self.keystore_directory, 'timestamp_key')
snapshot_key_file = os.path.join(self.keystore_directory, 'snapshot_key')
timestamp_private = repo_tool.import_ed25519_privatekey_from_file(
ts_key_file, 'password')
snapshot_private = repo_tool.import_ed25519_privatekey_from_file(
snapshot_key_file, 'password')
repository.timestamp.load_signing_key(timestamp_private)
repository.snapshot.load_signing_key(snapshot_private)
# Set ts to expire in 1 month.
ts_expiry_time = time.time() + 2630000
# Set snapshot to expire in 1 hour.
snapshot_expiry_time = time.time() + 60*60
ts_datetime_object = tuf.formats.unix_timestamp_to_datetime(
int(ts_expiry_time))
snapshot_datetime_object = tuf.formats.unix_timestamp_to_datetime(
int(snapshot_expiry_time))
repository.timestamp.expiration = ts_datetime_object
repository.snapshot.expiration = snapshot_datetime_object
repository.writeall()
# Move the staged metadata to the "live" metadata.
shutil.rmtree(os.path.join(self.repository_directory, 'metadata'))
shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'),
os.path.join(self.repository_directory, 'metadata'))
# mock current time to one second after snapshot expiry
mock_time = mock.Mock()
mock_time.return_value = snapshot_expiry_time + 1
with mock.patch('time.time', mock_time):
try:
# We expect the following refresh() to raise a NoWorkingMirrorError.
self.repository_updater.refresh()
except tuf.exceptions.NoWorkingMirrorError as e:
# Make sure the contained error is ExpiredMetadataError
for mirror_url, mirror_error in e.mirror_errors.items():
self.assertTrue(isinstance(mirror_error, tuf.exceptions.ExpiredMetadataError))
self.assertTrue(mirror_url.endswith('snapshot.json'))
else:
self.fail('TUF failed to detect expired, stale Snapshot metadata.'
' Freeze attack successful.')
# The client should have rejected the malicious Snapshot metadata, and
# distrusted the local snapshot file that is no longer valid.
self.assertTrue('snapshot' not in self.repository_updater.metadata['current'])
self.assertEqual(sorted(['root', 'targets', 'timestamp']),
sorted(self.repository_updater.metadata['current']))
# Verify that the client is able to recover from the malicious Snapshot.
# Re-sign a valid Snapshot file that the client should accept.
repository = repo_tool.load_repository(self.repository_directory)
repository.timestamp.load_signing_key(timestamp_private)
repository.snapshot.load_signing_key(snapshot_private)
# Set snapshot to expire in 1 month.
snapshot_expiry_time = time.time() + 2630000
snapshot_datetime_object = tuf.formats.unix_timestamp_to_datetime(
int(snapshot_expiry_time))
repository.snapshot.expiration = snapshot_datetime_object
repository.writeall()
# Move the staged metadata to the "live" metadata.
shutil.rmtree(os.path.join(self.repository_directory, 'metadata'))
shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'),
os.path.join(self.repository_directory, 'metadata'))
# Verify that the client accepts the valid metadata file.
self.repository_updater.refresh()
self.assertTrue('snapshot' in self.repository_updater.metadata['current'])
self.assertEqual(sorted(['root', 'targets', 'timestamp', 'snapshot']),
sorted(self.repository_updater.metadata['current']))
if __name__ == '__main__':
utils.configure_test_logging(sys.argv)
unittest.main()
| true | true |
1c3128678f21598c7caa8347da40ac2a26954faf | 32,068 | py | Python | test/functional/fundrawtransaction.py | taowen1990/merit | d5cd9ff6c2c77caccf6a6b936884e58f2b88faed | [
"MIT"
] | 229 | 2018-01-01T09:43:38.000Z | 2022-03-21T23:11:20.000Z | test/functional/fundrawtransaction.py | taowen1990/merit | d5cd9ff6c2c77caccf6a6b936884e58f2b88faed | [
"MIT"
] | 109 | 2018-01-01T17:23:02.000Z | 2020-10-31T00:06:19.000Z | test/functional/fundrawtransaction.py | taowen1990/merit | d5cd9ff6c2c77caccf6a6b936884e58f2b88faed | [
"MIT"
] | 26 | 2018-01-02T22:05:19.000Z | 2020-10-30T21:10:55.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the fundrawtransaction RPC."""
from test_framework.test_framework import MeritTestFramework
from test_framework.util import *
def get_unspent(listunspent, amount):
for utx in listunspent:
if utx['amount'] == amount:
return utx
raise AssertionError('Could not find unspent with amount={}'.format(amount))
class RawTransactionsTest(MeritTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
def setup_network(self, split=False):
self.setup_nodes()
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 1, 2)
connect_nodes_bi(self.nodes, 0, 2)
connect_nodes_bi(self.nodes, 0, 3)
def run_test(self):
min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
# neg. delta always fail the tests.
# The size of the signature of every input may be at most 2 bytes larger
# than a minimum sized signature.
# = 2 bytes * minRelayTxFeePerByte
feeTolerance = 2 * min_relay_tx_fee/1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
# ensure that setting changePosition in fundraw with an exact match is handled properly
rawmatch = self.nodes[2].createrawtransaction([], {self.nodes[2].getnewaddress():50})
rawmatch = self.nodes[2].fundrawtransaction(rawmatch, {"changePosition":1, "subtractFeeFromOutputs":[0]})
assert_equal(rawmatch["changepos"], -1)
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].validateaddress(watchonly_address)["pubkey"]
watchonly_amount = Decimal(200)
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount)
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 5.0)
self.nodes[0].generate(1)
self.sync_all()
###############
# simple test #
###############
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test that we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.2 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
################################
# simple test with two outputs #
################################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6, self.nodes[1].getnewaddress() : 2.5 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
#########################################################################
# test a fundrawtransaction with a VIN greater than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#####################################################################
# test a fundrawtransaction with which will not get a change output #
#####################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : Decimal(5.0) - fee - feeTolerance }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(rawtxfund['changepos'], -1)
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
####################################################
# test a fundrawtransaction with an invalid option #
####################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_jsonrpc(-3, "Unexpected key foo", self.nodes[2].fundrawtransaction, rawtx, {'foo':'bar'})
############################################################
# test a fundrawtransaction with an invalid change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_jsonrpc(-5, "changeAddress must be a valid merit address", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':'foobar'})
############################################################
# test a fundrawtransaction with a provided change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
change = self.nodes[2].getnewaddress()
assert_raises_jsonrpc(-8, "changePosition out of bounds", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':change, 'changePosition':2})
rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': change, 'changePosition': 0})
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
out = dec_tx['vout'][0]
assert_equal(change, out['scriptPubKey']['addresses'][0])
#########################################################################
# test a fundrawtransaction with a VIN smaller than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
# 4-byte version + 1-byte vin count + 36-byte prevout then script_len
rawtx = rawtx[:82] + "0100" + rawtx[84:]
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for i, out in enumerate(dec_tx['vout']):
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
else:
assert_equal(i, rawtxfund['changepos'])
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
###########################################
# test a fundrawtransaction with two VINs #
###########################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
matchingIns = 0
for vinOut in dec_tx['vin']:
for vinIn in inputs:
if vinIn['txid'] == vinOut['txid']:
matchingIns+=1
assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params
#########################################################
# test a fundrawtransaction with two VINs and two vOUTs #
#########################################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0, self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 2)
assert_equal(len(dec_tx['vout']), 3)
##############################################
# test a fundrawtransaction with invalid vin #
##############################################
inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin!
outputs = { self.nodes[0].getnewaddress() : 1.0}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_raises_jsonrpc(-4, "Insufficient funds", self.nodes[2].fundrawtransaction, rawtx)
############################################################
#compare fee of a standard pubkeyhash transaction
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction with multiple outputs
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1,self.nodes[1].getnewaddress():1.2,self.nodes[1].getnewaddress():0.1,self.nodes[1].getnewaddress():1.3,self.nodes[1].getnewaddress():0.2,self.nodes[1].getnewaddress():0.3}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendmany("", outputs)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a 2of2 multisig p2sh transaction
# create 2of2 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
mSigObj = self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
inputs = []
outputs = {mSigObj:1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction
# create 4of5 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr3 = self.nodes[1].getnewaddress()
addr4 = self.nodes[1].getnewaddress()
addr5 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
addr3Obj = self.nodes[1].validateaddress(addr3)
addr4Obj = self.nodes[1].validateaddress(addr4)
addr5Obj = self.nodes[1].validateaddress(addr5)
mSigObj = self.nodes[1].addmultisigaddress(4, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey'], addr4Obj['pubkey'], addr5Obj['pubkey']])
inputs = []
outputs = {mSigObj:1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
# spend a 2of2 multisig transaction over fundraw
# create 2of2 addr
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
# send 1.2 MRT to msig addr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
oldBalance = self.nodes[1].getbalance()
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[2].fundrawtransaction(rawtx)
signedTx = self.nodes[2].signrawtransaction(fundedTx['hex'])
txId = self.nodes[2].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('1.10000000'), self.nodes[1].getbalance())
############################################################
# locked wallet test
self.stop_node(0)
self.nodes[1].node_encrypt_wallet("test")
self.stop_node(2)
self.stop_node(3)
self.start_nodes()
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.sync_all()
# drain the keypool
self.nodes[1].getnewaddress()
self.nodes[1].getrawchangeaddress()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
# fund a transaction that requires a new key for the change output
# creating the key must be impossible because the wallet is locked
assert_raises_jsonrpc(-4, "Keypool ran out, please call keypoolrefill first", self.nodes[1].fundrawtransaction, rawtx)
#refill the keypool
self.nodes[1].walletpassphrase("test", 100)
self.nodes[1].keypoolrefill(8) #need to refill the keypool to get an internal change address
self.nodes[1].walletlock()
assert_raises_jsonrpc(-13, "walletpassphrase", self.nodes[1].sendtoaddress, self.nodes[0].getnewaddress(), 1.2)
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
#now we need to unlock
self.nodes[1].walletpassphrase("test", 600)
signedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(signedTx['hex'])
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('51.10000000'), self.nodes[0].getbalance())
###############################################
# multiple (~19) inputs tx test | Compare fee #
###############################################
#empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[1].sendmany("", outputs)
signedFee = self.nodes[1].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance*19) #~19 inputs
#############################################
# multiple (~19) inputs tx test | sign/send #
#############################################
#again, empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
fundedAndSignedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(oldBalance+Decimal('50.19000000'), self.nodes[0].getbalance()) #0.19+block reward
#####################################################
# test fundrawtransaction with OP_RETURN and no vin #
#####################################################
rawtx = "0100000000010000000000000000066a047465737400000000"
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(len(dec_tx['vin']), 0)
assert_equal(len(dec_tx['vout']), 1)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_greater_than(len(dec_tx['vin']), 0) # at least one vin
assert_equal(len(dec_tx['vout']), 2) # one change output added
##################################################
# test a fundrawtransaction using only watchonly #
##################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount / 2}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, {'includeWatching': True })
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 1)
assert_equal(res_dec["vin"][0]["txid"], watchonly_txid)
assert("fee" in result.keys())
assert_greater_than(result["changepos"], -1)
###############################################################
# test fundrawtransaction using the entirety of watched funds #
###############################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
# Backward compatibility test (2nd param is includeWatching)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 2)
assert(res_dec["vin"][0]["txid"] == watchonly_txid or res_dec["vin"][1]["txid"] == watchonly_txid)
assert_greater_than(result["fee"], 0)
assert_greater_than(result["changepos"], -1)
assert_equal(result["fee"] + res_dec["vout"][result["changepos"]]["value"], watchonly_amount / 10)
signedtx = self.nodes[3].signrawtransaction(result["hex"])
assert(not signedtx["complete"])
signedtx = self.nodes[0].signrawtransaction(signedtx["hex"])
assert(signedtx["complete"])
self.nodes[0].sendrawtransaction(signedtx["hex"])
self.nodes[0].generate(1)
self.sync_all()
#######################
# Test feeRate option #
#######################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[3].getnewaddress() : 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx) # uses min_relay_tx_fee (set by settxfee)
result2 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee})
result3 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 10*min_relay_tx_fee})
result_fee_rate = result['fee'] * 1000 / count_bytes(result['hex'])
assert_fee_amount(result2['fee'], count_bytes(result2['hex']), 2 * result_fee_rate)
assert_fee_amount(result3['fee'], count_bytes(result3['hex']), 10 * result_fee_rate)
################################
# Test no address reuse occurs #
################################
result3 = self.nodes[3].fundrawtransaction(rawtx)
res_dec = self.nodes[0].decoderawtransaction(result3["hex"])
changeaddress = ""
for out in res_dec['vout']:
if out['value'] > 1.0:
changeaddress += out['scriptPubKey']['addresses'][0]
assert(changeaddress != "")
nextaddr = self.nodes[3].getnewaddress()
# Now the change address key should be removed from the keypool
assert(changeaddress != nextaddr)
######################################
# Test subtractFeeFromOutputs option #
######################################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[2].getnewaddress(): 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx), # uses min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": []}), # empty subtraction list
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0]}), # uses min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee}),
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee, "subtractFeeFromOutputs": [0]})]
dec_tx = [self.nodes[3].decoderawtransaction(tx['hex']) for tx in result]
output = [d['vout'][1 - r['changepos']]['value'] for d, r in zip(dec_tx, result)]
change = [d['vout'][r['changepos']]['value'] for d, r in zip(dec_tx, result)]
assert_equal(result[0]['fee'], result[1]['fee'], result[2]['fee'])
assert_equal(result[3]['fee'], result[4]['fee'])
assert_equal(change[0], change[1])
assert_equal(output[0], output[1])
assert_equal(output[0], output[2] + result[2]['fee'])
assert_equal(change[0] + result[0]['fee'], change[2])
assert_equal(output[3], output[4] + result[4]['fee'])
assert_equal(change[3] + result[3]['fee'], change[4])
inputs = []
outputs = {self.nodes[2].getnewaddress(): value for value in (1.0, 1.1, 1.2, 1.3)}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx),
# split the fee between outputs 0, 2, and 3, but not output 1
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0, 2, 3]})]
dec_tx = [self.nodes[3].decoderawtransaction(result[0]['hex']),
self.nodes[3].decoderawtransaction(result[1]['hex'])]
# Nested list of non-change output amounts for each transaction
output = [[out['value'] for i, out in enumerate(d['vout']) if i != r['changepos']]
for d, r in zip(dec_tx, result)]
# List of differences in output amounts between normal and subtractFee transactions
share = [o0 - o1 for o0, o1 in zip(output[0], output[1])]
# output 1 is the same in both transactions
assert_equal(share[1], 0)
# the other 3 outputs are smaller as a result of subtractFeeFromOutputs
assert_greater_than(share[0], 0)
assert_greater_than(share[2], 0)
assert_greater_than(share[3], 0)
# outputs 2 and 3 take the same share of the fee
assert_equal(share[2], share[3])
# output 0 takes at least as much share of the fee, and no more than 2 satoshis more, than outputs 2 and 3
assert_greater_than_or_equal(share[0], share[2])
assert_greater_than_or_equal(share[2] + Decimal(2e-8), share[0])
# the fee is the same in both transactions
assert_equal(result[0]['fee'], result[1]['fee'])
# the total subtracted from the outputs is equal to the fee
assert_equal(share[0] + share[2] + share[3], result[0]['fee'])
if __name__ == '__main__':
RawTransactionsTest().main()
| 43.868673 | 223 | 0.569103 |
from test_framework.test_framework import MeritTestFramework
from test_framework.util import *
def get_unspent(listunspent, amount):
for utx in listunspent:
if utx['amount'] == amount:
return utx
raise AssertionError('Could not find unspent with amount={}'.format(amount))
class RawTransactionsTest(MeritTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
def setup_network(self, split=False):
self.setup_nodes()
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 1, 2)
connect_nodes_bi(self.nodes, 0, 2)
connect_nodes_bi(self.nodes, 0, 3)
def run_test(self):
min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
feeTolerance = 2 * min_relay_tx_fee/1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
rawmatch = self.nodes[2].createrawtransaction([], {self.nodes[2].getnewaddress():50})
rawmatch = self.nodes[2].fundrawtransaction(rawmatch, {"changePosition":1, "subtractFeeFromOutputs":[0]})
assert_equal(rawmatch["changepos"], -1)
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].validateaddress(watchonly_address)["pubkey"]
watchonly_amount = Decimal(200)
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount)
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 5.0)
self.nodes[0].generate(1)
self.sync_all()
ransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0)
| true | true |
1c312882f690d45be0acf85dd13c25cd0fdf9bbe | 1,154 | py | Python | tests/data/source_code/python/default/asciidoxy/default_values.py | RerrerBuub/asciidoxy | 3402f37d59e30975e9919653465839e396f05513 | [
"Apache-2.0"
] | 14 | 2020-04-28T08:51:43.000Z | 2022-02-12T13:40:34.000Z | tests/data/source_code/python/default/asciidoxy/default_values.py | RerrerBuub/asciidoxy | 3402f37d59e30975e9919653465839e396f05513 | [
"Apache-2.0"
] | 47 | 2020-05-18T14:19:31.000Z | 2022-03-04T13:46:46.000Z | tests/data/source_code/python/default/asciidoxy/default_values.py | RerrerBuub/asciidoxy | 3402f37d59e30975e9919653465839e396f05513 | [
"Apache-2.0"
] | 8 | 2020-05-17T20:52:42.000Z | 2022-02-25T16:16:01.000Z | # Copyright (C) 2019-2021, TomTom (http://tomtom.com).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Point:
"""Class representing a simple point."""
def __init__(self, x: int = 0, y: int = 1):
"""Construct a point.
Args:
x: The X coordinate.
y: The Y coordinate.
"""
...
def increment(self, x: int = 2, y: int = 3) -> "Point":
"""Create a new incremented point.
Args:
x: Value to increment the X coordinate with.
y: Value to increment the Y coordinate with.
Returns:
A new incremented Point.
"""
...
| 30.368421 | 74 | 0.627383 |
class Point:
def __init__(self, x: int = 0, y: int = 1):
...
def increment(self, x: int = 2, y: int = 3) -> "Point":
...
| true | true |
1c3128bf0ed7b0c9cf485afafd436c3b34c03a80 | 973 | py | Python | taskify/todo/models.py | tricelex/taskify | cf967bbccc39aef65efd13c429d48455f38e1fb6 | [
"MIT"
] | null | null | null | taskify/todo/models.py | tricelex/taskify | cf967bbccc39aef65efd13c429d48455f38e1fb6 | [
"MIT"
] | 1 | 2022-03-01T11:20:20.000Z | 2022-03-01T11:20:20.000Z | taskify/todo/models.py | tricelex/taskify | cf967bbccc39aef65efd13c429d48455f38e1fb6 | [
"MIT"
] | null | null | null | import uuid
from django.conf import settings
from django.db import models
from django.urls import reverse
from taskify.utils.models import BaseAbstractModel
class TaskList(BaseAbstractModel):
id = models.UUIDField(
primary_key=True, default=uuid.uuid4, editable=False, unique=True
)
title = models.CharField(max_length=250)
description = models.CharField(max_length=125, blank=True)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
null=True,
on_delete=models.CASCADE,
related_name="user",
)
def __str__(self):
return f"{self.title} - {self.user}"
def get_absolute_url(self):
return reverse("todos:task_list_detail", kwargs={"uuid": self.id})
class Task(BaseAbstractModel):
id = models.UUIDField(
primary_key=True, default=uuid.uuid4, editable=False, unique=True
)
title = models.CharField(max_length=250)
status = models.BooleanField(default=False)
| 27.027778 | 74 | 0.700925 | import uuid
from django.conf import settings
from django.db import models
from django.urls import reverse
from taskify.utils.models import BaseAbstractModel
class TaskList(BaseAbstractModel):
id = models.UUIDField(
primary_key=True, default=uuid.uuid4, editable=False, unique=True
)
title = models.CharField(max_length=250)
description = models.CharField(max_length=125, blank=True)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
null=True,
on_delete=models.CASCADE,
related_name="user",
)
def __str__(self):
return f"{self.title} - {self.user}"
def get_absolute_url(self):
return reverse("todos:task_list_detail", kwargs={"uuid": self.id})
class Task(BaseAbstractModel):
id = models.UUIDField(
primary_key=True, default=uuid.uuid4, editable=False, unique=True
)
title = models.CharField(max_length=250)
status = models.BooleanField(default=False)
| true | true |
1c31290a48f8cfbfa4d86d7e15fa5b566ee63b58 | 8,954 | py | Python | src/pyOpenMS/pyTOPP/MapAlignerPoseClustering.py | liangoaix/OpenMS | cccbc5d872320f197091596db275f35b4d0458cd | [
"Zlib",
"Apache-2.0"
] | null | null | null | src/pyOpenMS/pyTOPP/MapAlignerPoseClustering.py | liangoaix/OpenMS | cccbc5d872320f197091596db275f35b4d0458cd | [
"Zlib",
"Apache-2.0"
] | null | null | null | src/pyOpenMS/pyTOPP/MapAlignerPoseClustering.py | liangoaix/OpenMS | cccbc5d872320f197091596db275f35b4d0458cd | [
"Zlib",
"Apache-2.0"
] | null | null | null | import argparse
import pyopenms as pms
from common import addDataProcessing, writeParamsIfRequested, updateDefaults
def align(in_files, out_files, out_trafos, reference_index,
reference_file, params):
in_types = set(pms.FileHandler.getType(in_) for in_ in in_files)
if in_types <= set((pms.Type.MZML, pms.Type.MZXML, pms.Type.MZDATA)):
align_features = False
elif in_types == set((pms.Type.FEATUREXML,)):
align_features = True
else:
raise Exception("different kinds of input files")
algorithm = pms.MapAlignmentAlgorithmPoseClustering()
alignment_params = params.copy("algorithm:", True)
algorithm.setParameters(alignment_params)
algorithm.setLogType(pms.LogType.CMD)
plog = pms.ProgressLogger()
plog.setLogType(pms.LogType.CMD)
if reference_file:
file_ = reference_file
elif reference_index > 0:
file_ = in_files[reference_index-1]
else:
sizes = []
if align_features:
fh = pms.FeatureXMLFile()
plog.startProgress(0, len(in_files), "Determine Reference map")
for i, in_f in enumerate(in_files):
sizes.append((fh.loadSize(in_f), in_f))
plog.setProgress(i)
else:
fh = pms.MzMLFile()
mse = pms.MSExperiment()
plog.startProgress(0, len(in_files), "Determine Reference map")
for i, in_f in enumerate(in_files):
fh.load(in_f, mse)
mse.updateRanges()
sizes.append((mse.getSize(), in_f))
plog.setProgress(i)
plog.endProgress()
__, file_ = max(sizes)
f_fmxl = pms.FeatureXMLFile()
if not out_files:
options = f_fmxl.getOptions()
options.setLoadConvexHull(False)
options.setLoadSubordinates(False)
f_fmxl.setOptions(options)
if align_features:
map_ref = pms.FeatureMap()
f_fxml_tmp = pms.FeatureXMLFile()
options = f_fmxl.getOptions()
options.setLoadConvexHull(False)
options.setLoadSubordinates(False)
f_fxml_tmp.setOptions(options)
f_fxml_tmp.load(file_, map_ref)
algorithm.setReference(map_ref)
else:
map_ref = pms.MSExperiment()
pms.MzMLFile().load(file_, map_ref)
algorithm.setReference(map_ref)
plog.startProgress(0, len(in_files), "Align input maps")
for i, in_file in enumerate(in_files):
trafo = pms.TransformationDescription()
if align_features:
map_ = pms.FeatureMap()
f_fxml_tmp = pms.FeatureXMLFile()
f_fxml_tmp.setOptions(f_fmxl.getOptions())
f_fxml_tmp.load(in_file, map_)
if in_file == file_:
trafo.fitModel("identity")
else:
algorithm.align(map_, trafo)
if out_files:
pms.MapAlignmentTransformer.transformSingleFeatureMap(map_, trafo)
addDataProcessing(map_, params, pms.ProcessingAction.ALIGNMENT)
f_fxml_tmp.store(out_files[i], map_)
else:
map_ = pms.MSExperiment()
pms.MzMLFile().load(in_file, map_)
if in_file == file_:
trafo.fitModel("identity")
else:
algorithm.align(map_, trafo)
if out_files:
pms.MapAlignmentTransformer.transformSinglePeakMap(map_, trafo)
addDataProcessing(map_, params, pms.ProcessingAction.ALIGNMENT)
pms.MzMLFile().store(out_files[i], map_)
if out_trafos:
pms.TransformationXMLFile().store(out_trafos[i], trafo)
plog.setProgress(i+1)
plog.endProgress()
def getModelDefaults(default_model):
params = pms.Param()
params.setValue("type", default_model, "Type of model")
model_types = [ "linear", "interpolated"]
if default_model not in model_types:
model_types.insert(0, default_model)
params.setValidStrings("type", model_types)
model_params = pms.Param()
pms.TransformationModelLinear.getDefaultParameters(model_params)
params.insert("linear:", model_params)
params.setSectionDescription("linear", "Parameters for 'linear' model")
pms.TransformationModelInterpolated.getDefaultParameters(model_params)
entry = model_params.getEntry("interpolation_type")
interpolation_types = entry.valid_strings
model_params.setValidStrings("interpolation_type", interpolation_types)
params.insert("interpolated:", model_params)
params.setSectionDescription("interpolated", "Parameters for 'interpolated' model")
return params
def getDefaultParameters():
model_param = getModelDefaults("linear")
algo_param = pms.MapAlignmentAlgorithmPoseClustering().getParameters()
default = pms.Param()
default.insert("model:", model_param)
default.insert("algorithm:", algo_param)
return default
def main():
parser = argparse.ArgumentParser(description="PeakPickerHiRes")
parser.add_argument("-in",
action="append",
type=str,
dest="in_",
metavar="input_file",
)
parser.add_argument("-seeds",
action="store",
type=str,
metavar="seeds_file",
)
parser.add_argument("-out",
action="append",
type=str,
metavar="output_file",
)
parser.add_argument("-trafo_out",
action="append",
type=str,
metavar="output_file",
)
parser.add_argument("-ini",
action="store",
type=str,
metavar="ini_file",
)
parser.add_argument("-dict_ini",
action="store",
type=str,
metavar="python_dict_ini_file",
)
parser.add_argument("-write_ini",
action="store",
type=str,
metavar="ini_file",
)
parser.add_argument("-write_dict_ini",
action="store",
type=str,
metavar="python_dict_ini_file",
)
parser.add_argument("-reference:file",
action="store",
type=str,
metavar="reference_file",
dest="reference_file",
)
parser.add_argument("-reference:index",
action="store",
type=int,
metavar="reference_index",
dest="reference_index",
)
args = parser.parse_args()
def collect(args):
return [f.strip() for arg in args or [] for f in arg.split(",")]
in_files = collect(args.in_)
out_files = collect(args.out)
trafo_out_files = collect(args.trafo_out)
run_mode = (in_files and (out_files or trafo_out_files))\
and (args.ini is not None or args.dict_ini is not None)
write_mode = args.write_ini is not None or args.write_dict_ini is not None
ok = run_mode or write_mode
if not ok:
parser.error("either specify -in, -(trafo_)out and -(dict)ini for running "
"the map aligner\nor -write(dict)ini for creating std "
"ini file")
defaults = getDefaultParameters()
write_requested = writeParamsIfRequested(args, defaults)
if not write_requested:
updateDefaults(args, defaults)
if not out_files and not trafo_out_files:
parser.error("need -out or -trafo_out files")
if out_files and len(out_files) != len(in_files):
parser.error("need as many -out files as -in files")
if trafo_out_files and len(trafo_out_files) != len(in_files):
parser.error("need as many -trafo_out files as -in files")
if args.reference_index is not None and args.reference_file is not None:
parser.error("can only handle either reference:index or reference:file")
if args.reference_index is not None:
if args.reference_index <0 or args.reference_index >= len(in_files):
parser.error("reference:index invalid")
if args.reference_file is not None:
if args.reference_file not in in_files:
parser.error("reference_file not in input files")
align(in_files, out_files, trafo_out_files, args.reference_index or 0,
args.reference_file or "", defaults)
if __name__ == "__main__":
main()
| 34.976563 | 87 | 0.580634 | import argparse
import pyopenms as pms
from common import addDataProcessing, writeParamsIfRequested, updateDefaults
def align(in_files, out_files, out_trafos, reference_index,
reference_file, params):
in_types = set(pms.FileHandler.getType(in_) for in_ in in_files)
if in_types <= set((pms.Type.MZML, pms.Type.MZXML, pms.Type.MZDATA)):
align_features = False
elif in_types == set((pms.Type.FEATUREXML,)):
align_features = True
else:
raise Exception("different kinds of input files")
algorithm = pms.MapAlignmentAlgorithmPoseClustering()
alignment_params = params.copy("algorithm:", True)
algorithm.setParameters(alignment_params)
algorithm.setLogType(pms.LogType.CMD)
plog = pms.ProgressLogger()
plog.setLogType(pms.LogType.CMD)
if reference_file:
file_ = reference_file
elif reference_index > 0:
file_ = in_files[reference_index-1]
else:
sizes = []
if align_features:
fh = pms.FeatureXMLFile()
plog.startProgress(0, len(in_files), "Determine Reference map")
for i, in_f in enumerate(in_files):
sizes.append((fh.loadSize(in_f), in_f))
plog.setProgress(i)
else:
fh = pms.MzMLFile()
mse = pms.MSExperiment()
plog.startProgress(0, len(in_files), "Determine Reference map")
for i, in_f in enumerate(in_files):
fh.load(in_f, mse)
mse.updateRanges()
sizes.append((mse.getSize(), in_f))
plog.setProgress(i)
plog.endProgress()
__, file_ = max(sizes)
f_fmxl = pms.FeatureXMLFile()
if not out_files:
options = f_fmxl.getOptions()
options.setLoadConvexHull(False)
options.setLoadSubordinates(False)
f_fmxl.setOptions(options)
if align_features:
map_ref = pms.FeatureMap()
f_fxml_tmp = pms.FeatureXMLFile()
options = f_fmxl.getOptions()
options.setLoadConvexHull(False)
options.setLoadSubordinates(False)
f_fxml_tmp.setOptions(options)
f_fxml_tmp.load(file_, map_ref)
algorithm.setReference(map_ref)
else:
map_ref = pms.MSExperiment()
pms.MzMLFile().load(file_, map_ref)
algorithm.setReference(map_ref)
plog.startProgress(0, len(in_files), "Align input maps")
for i, in_file in enumerate(in_files):
trafo = pms.TransformationDescription()
if align_features:
map_ = pms.FeatureMap()
f_fxml_tmp = pms.FeatureXMLFile()
f_fxml_tmp.setOptions(f_fmxl.getOptions())
f_fxml_tmp.load(in_file, map_)
if in_file == file_:
trafo.fitModel("identity")
else:
algorithm.align(map_, trafo)
if out_files:
pms.MapAlignmentTransformer.transformSingleFeatureMap(map_, trafo)
addDataProcessing(map_, params, pms.ProcessingAction.ALIGNMENT)
f_fxml_tmp.store(out_files[i], map_)
else:
map_ = pms.MSExperiment()
pms.MzMLFile().load(in_file, map_)
if in_file == file_:
trafo.fitModel("identity")
else:
algorithm.align(map_, trafo)
if out_files:
pms.MapAlignmentTransformer.transformSinglePeakMap(map_, trafo)
addDataProcessing(map_, params, pms.ProcessingAction.ALIGNMENT)
pms.MzMLFile().store(out_files[i], map_)
if out_trafos:
pms.TransformationXMLFile().store(out_trafos[i], trafo)
plog.setProgress(i+1)
plog.endProgress()
def getModelDefaults(default_model):
params = pms.Param()
params.setValue("type", default_model, "Type of model")
model_types = [ "linear", "interpolated"]
if default_model not in model_types:
model_types.insert(0, default_model)
params.setValidStrings("type", model_types)
model_params = pms.Param()
pms.TransformationModelLinear.getDefaultParameters(model_params)
params.insert("linear:", model_params)
params.setSectionDescription("linear", "Parameters for 'linear' model")
pms.TransformationModelInterpolated.getDefaultParameters(model_params)
entry = model_params.getEntry("interpolation_type")
interpolation_types = entry.valid_strings
model_params.setValidStrings("interpolation_type", interpolation_types)
params.insert("interpolated:", model_params)
params.setSectionDescription("interpolated", "Parameters for 'interpolated' model")
return params
def getDefaultParameters():
model_param = getModelDefaults("linear")
algo_param = pms.MapAlignmentAlgorithmPoseClustering().getParameters()
default = pms.Param()
default.insert("model:", model_param)
default.insert("algorithm:", algo_param)
return default
def main():
parser = argparse.ArgumentParser(description="PeakPickerHiRes")
parser.add_argument("-in",
action="append",
type=str,
dest="in_",
metavar="input_file",
)
parser.add_argument("-seeds",
action="store",
type=str,
metavar="seeds_file",
)
parser.add_argument("-out",
action="append",
type=str,
metavar="output_file",
)
parser.add_argument("-trafo_out",
action="append",
type=str,
metavar="output_file",
)
parser.add_argument("-ini",
action="store",
type=str,
metavar="ini_file",
)
parser.add_argument("-dict_ini",
action="store",
type=str,
metavar="python_dict_ini_file",
)
parser.add_argument("-write_ini",
action="store",
type=str,
metavar="ini_file",
)
parser.add_argument("-write_dict_ini",
action="store",
type=str,
metavar="python_dict_ini_file",
)
parser.add_argument("-reference:file",
action="store",
type=str,
metavar="reference_file",
dest="reference_file",
)
parser.add_argument("-reference:index",
action="store",
type=int,
metavar="reference_index",
dest="reference_index",
)
args = parser.parse_args()
def collect(args):
return [f.strip() for arg in args or [] for f in arg.split(",")]
in_files = collect(args.in_)
out_files = collect(args.out)
trafo_out_files = collect(args.trafo_out)
run_mode = (in_files and (out_files or trafo_out_files))\
and (args.ini is not None or args.dict_ini is not None)
write_mode = args.write_ini is not None or args.write_dict_ini is not None
ok = run_mode or write_mode
if not ok:
parser.error("either specify -in, -(trafo_)out and -(dict)ini for running "
"the map aligner\nor -write(dict)ini for creating std "
"ini file")
defaults = getDefaultParameters()
write_requested = writeParamsIfRequested(args, defaults)
if not write_requested:
updateDefaults(args, defaults)
if not out_files and not trafo_out_files:
parser.error("need -out or -trafo_out files")
if out_files and len(out_files) != len(in_files):
parser.error("need as many -out files as -in files")
if trafo_out_files and len(trafo_out_files) != len(in_files):
parser.error("need as many -trafo_out files as -in files")
if args.reference_index is not None and args.reference_file is not None:
parser.error("can only handle either reference:index or reference:file")
if args.reference_index is not None:
if args.reference_index <0 or args.reference_index >= len(in_files):
parser.error("reference:index invalid")
if args.reference_file is not None:
if args.reference_file not in in_files:
parser.error("reference_file not in input files")
align(in_files, out_files, trafo_out_files, args.reference_index or 0,
args.reference_file or "", defaults)
if __name__ == "__main__":
main()
| true | true |
1c312aba90f987553c7c73457f580416290b3f39 | 2,727 | py | Python | collections/ansible_collections/redhat/satellite/plugins/modules/hardware_model.py | hindman-redhat/automated-smart-management-2 | 5450ccd71f2a4ba568a7f11b03466e1554ae0087 | [
"MIT"
] | null | null | null | collections/ansible_collections/redhat/satellite/plugins/modules/hardware_model.py | hindman-redhat/automated-smart-management-2 | 5450ccd71f2a4ba568a7f11b03466e1554ae0087 | [
"MIT"
] | null | null | null | collections/ansible_collections/redhat/satellite/plugins/modules/hardware_model.py | hindman-redhat/automated-smart-management-2 | 5450ccd71f2a4ba568a7f11b03466e1554ae0087 | [
"MIT"
] | 2 | 2021-03-30T14:26:02.000Z | 2021-04-01T18:17:29.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2020, Evgeni Golov <evgeni@golov.de>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: hardware_model
version_added: 1.0.0
short_description: Manage Hardware Models
description:
- Manage hardware models
author:
- "Evgeni Golov (@evgeni)"
options:
name:
description:
- Name of the hardware model
required: true
type: str
info:
description:
- General description of the hardware model
type: str
vendor_class:
description:
- The class of the machine as reported by the OpenBoot PROM.
- This is primarily used by Solaris SPARC builds and can be left blank for other architectures.
type: str
hardware_model:
description:
- The class of CPU supplied in this machine.
- This is primarily used by Sparc Solaris builds and can be left blank for other architectures.
type: str
extends_documentation_fragment:
- redhat.satellite.foreman
- redhat.satellite.foreman.entity_state
'''
EXAMPLES = '''
- name: "Create ACME Laptop model"
redhat.satellite.hardware_model:
username: "admin"
password: "changeme"
server_url: "https://satellite.example.com"
name: "acme laptop"
info: "this is the acme laptop"
state: present
'''
RETURN = '''
entity:
description: Final state of the affected entities grouped by their type.
returned: success
type: dict
contains:
hardware_models:
description: List of hardware models.
type: list
elements: dict
'''
from ansible_collections.redhat.satellite.plugins.module_utils.foreman_helper import ForemanEntityAnsibleModule
class ForemanModelModule(ForemanEntityAnsibleModule):
pass
def main():
module = ForemanModelModule(
foreman_spec=dict(
name=dict(required=True),
info=dict(),
vendor_class=dict(),
hardware_model=dict(),
),
)
with module.api_connection():
module.run()
if __name__ == '__main__':
main()
| 26.475728 | 111 | 0.703337 |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: hardware_model
version_added: 1.0.0
short_description: Manage Hardware Models
description:
- Manage hardware models
author:
- "Evgeni Golov (@evgeni)"
options:
name:
description:
- Name of the hardware model
required: true
type: str
info:
description:
- General description of the hardware model
type: str
vendor_class:
description:
- The class of the machine as reported by the OpenBoot PROM.
- This is primarily used by Solaris SPARC builds and can be left blank for other architectures.
type: str
hardware_model:
description:
- The class of CPU supplied in this machine.
- This is primarily used by Sparc Solaris builds and can be left blank for other architectures.
type: str
extends_documentation_fragment:
- redhat.satellite.foreman
- redhat.satellite.foreman.entity_state
'''
EXAMPLES = '''
- name: "Create ACME Laptop model"
redhat.satellite.hardware_model:
username: "admin"
password: "changeme"
server_url: "https://satellite.example.com"
name: "acme laptop"
info: "this is the acme laptop"
state: present
'''
RETURN = '''
entity:
description: Final state of the affected entities grouped by their type.
returned: success
type: dict
contains:
hardware_models:
description: List of hardware models.
type: list
elements: dict
'''
from ansible_collections.redhat.satellite.plugins.module_utils.foreman_helper import ForemanEntityAnsibleModule
class ForemanModelModule(ForemanEntityAnsibleModule):
pass
def main():
module = ForemanModelModule(
foreman_spec=dict(
name=dict(required=True),
info=dict(),
vendor_class=dict(),
hardware_model=dict(),
),
)
with module.api_connection():
module.run()
if __name__ == '__main__':
main()
| true | true |
1c312b2883b1becc78033db9aad4d87b74a36a8d | 498 | py | Python | lib/salus/scanners/report_python_modules.py | greaterninja/salus | 2f5f56aa8abe252dea9bfbe8a17e086ae3eae6fa | [
"Apache-2.0"
] | null | null | null | lib/salus/scanners/report_python_modules.py | greaterninja/salus | 2f5f56aa8abe252dea9bfbe8a17e086ae3eae6fa | [
"Apache-2.0"
] | null | null | null | lib/salus/scanners/report_python_modules.py | greaterninja/salus | 2f5f56aa8abe252dea9bfbe8a17e086ae3eae6fa | [
"Apache-2.0"
] | null | null | null | import json
try: # for pip >= 10
from pip._internal.req import parse_requirements
except ImportError: # for pip <= 9.0.3
from pip.req import parse_requirements
deps = parse_requirements('requirements.txt', session="_")
dependencies = {}
for dependency in deps:
if hasattr(dependency.req, 'key'):
dependencies[dependency.req.key] = str(dependency.req.specifier)
else:
dependencies[dependency.req.name] = str(dependency.req.specifier)
print(json.dumps(dependencies))
| 26.210526 | 71 | 0.726908 | import json
try:
from pip._internal.req import parse_requirements
except ImportError:
from pip.req import parse_requirements
deps = parse_requirements('requirements.txt', session="_")
dependencies = {}
for dependency in deps:
if hasattr(dependency.req, 'key'):
dependencies[dependency.req.key] = str(dependency.req.specifier)
else:
dependencies[dependency.req.name] = str(dependency.req.specifier)
print(json.dumps(dependencies))
| true | true |
1c312b83d918655938a1ccbc2c6b326c2c5b74d4 | 1,637 | py | Python | Vue_api_test/Myapp/views.py | archerckk/Vue_api_test | 94d37f2430ff6ead0aa64459079b96429298f8cc | [
"MIT"
] | null | null | null | Vue_api_test/Myapp/views.py | archerckk/Vue_api_test | 94d37f2430ff6ead0aa64459079b96429298f8cc | [
"MIT"
] | null | null | null | Vue_api_test/Myapp/views.py | archerckk/Vue_api_test | 94d37f2430ff6ead0aa64459079b96429298f8cc | [
"MIT"
] | null | null | null | import json
from django.shortcuts import render
from django.http import JsonResponse, HttpResponse
# Create your views here.
def index(request):
return render(request, 'index.html')
value_list = ['apple', 'pear', 'banana']
def load_login(request):
return render(request, 'login.html')
def search_key(request):
method = request.method
if method == 'POST':
body = json.loads(request.body)
if "key" not in body:
return JsonResponse([], safe=False)
key = body['key']
ret = []
for i in value_list:
if key in i:
ret.append(i)
return JsonResponse(ret, safe=False)
else:
return HttpResponse(status=404)
fruites = ['apple', 'pear', 'banana', 'orange']
def get_fruits(request):
return JsonResponse(fruites, safe=False)
def login(request):
users = [
{'user': 'user1', 'psw': 'user1'},
{'user': 'user2', 'psw': 'user2'}
]
method = request.method
if method == 'POST':
body = json.loads(request.body)
if "name" not in body or "psw" not in body:
return JsonResponse({'success': False}, safe=False)
for user in users:
if user['user'] == body['name'] and user['psw'] == body['psw']:
return JsonResponse({'success': True}, safe=False)
else:
return JsonResponse({'success': False}, safe=False)
else:
return HttpResponse(status=404)
def style_demo(request):
return render(request,'style_demo.html')
def component_info(request):
return render(request,'component_info.html') | 21.539474 | 75 | 0.597434 | import json
from django.shortcuts import render
from django.http import JsonResponse, HttpResponse
def index(request):
return render(request, 'index.html')
value_list = ['apple', 'pear', 'banana']
def load_login(request):
return render(request, 'login.html')
def search_key(request):
method = request.method
if method == 'POST':
body = json.loads(request.body)
if "key" not in body:
return JsonResponse([], safe=False)
key = body['key']
ret = []
for i in value_list:
if key in i:
ret.append(i)
return JsonResponse(ret, safe=False)
else:
return HttpResponse(status=404)
fruites = ['apple', 'pear', 'banana', 'orange']
def get_fruits(request):
return JsonResponse(fruites, safe=False)
def login(request):
users = [
{'user': 'user1', 'psw': 'user1'},
{'user': 'user2', 'psw': 'user2'}
]
method = request.method
if method == 'POST':
body = json.loads(request.body)
if "name" not in body or "psw" not in body:
return JsonResponse({'success': False}, safe=False)
for user in users:
if user['user'] == body['name'] and user['psw'] == body['psw']:
return JsonResponse({'success': True}, safe=False)
else:
return JsonResponse({'success': False}, safe=False)
else:
return HttpResponse(status=404)
def style_demo(request):
return render(request,'style_demo.html')
def component_info(request):
return render(request,'component_info.html') | true | true |
1c312b94a31e9d253cc05a00ec83f6f345642192 | 3,506 | py | Python | flight/state_settings.py | pieperm/IARC-2020 | a90bfe830ea2ceced59e8f2e7b54862dda42f5a3 | [
"MIT"
] | 12 | 2019-10-10T22:17:45.000Z | 2021-09-14T23:54:02.000Z | flight/state_settings.py | pieperm/IARC-2020 | a90bfe830ea2ceced59e8f2e7b54862dda42f5a3 | [
"MIT"
] | 178 | 2019-10-29T16:28:02.000Z | 2021-07-26T17:15:31.000Z | flight/state_settings.py | pieperm/IARC-2020 | a90bfe830ea2ceced59e8f2e7b54862dda42f5a3 | [
"MIT"
] | 6 | 2019-10-09T00:20:27.000Z | 2021-09-28T00:24:00.000Z | """Class to contain setters and getters for settings in various flight states"""
DEFAULT_EARLY_LAPS: int = 2
DEFAULT_RETURN_LAPS: int = 2
DEFAULT_VISION_TEST: str = "module"
DEFAULT_RUN_TITLE: str = "N/A"
DEFAULT_RUN_DESCRIPTION: str = "N/A"
class StateSettings:
def __init__(self):
"""Default constructor results in default settings"""
# Takeoff settings
self.simple_takeoff: bool = False
# EarlyLaps settings
self.do_early_laps: bool = True
self.num_early_laps: int = DEFAULT_EARLY_LAPS
# ToMast settings
self.go_to_mast: bool = False
# DetectModule settings
self.detect_module: bool = False
self.detect_mast_text: bool = False
self.vision_test_type: str = DEFAULT_VISION_TEST
# ReturnLaps settings
self.do_return_laps: bool = False
self.num_return_laps: int = DEFAULT_RETURN_LAPS
# Other settings
self.run_title: str = DEFAULT_RUN_TITLE
self.run_description: str = DEFAULT_RUN_DESCRIPTION
# ---- Takeoff settings ---- #
def enable_simple_takeoff(self, simple_takeoff: bool) -> None:
"""
Setter for whether to perform simple takeoff instead of regular takeoff
simple_takeoff(bool): True for drone to go straight up, False to behave normally
"""
self.simple_takeoff = simple_takeoff
# ---- EarlyLaps settings ---- #
def enable_early_laps(self, do_early_laps: bool) -> None:
"""Setter for whether to do early laps"""
self.do_early_laps = do_early_laps
def set_number_of_early_laps(self, num_laps: int) -> None:
"""Setter for how many early laps to do"""
self.num_early_laps = num_laps
# ---- ToMast settings ---- #
def enable_to_mast(self, go_to_mast: bool) -> None:
"""Setter for whether to go to the mast"""
self.go_to_mast = go_to_mast
# ---- DetectModule settings ---- #
def enable_module_detection(self, detect_module: bool) -> None:
"""Setter for whether to detect the module"""
self.detect_module = detect_module
def enable_text_detection(self, detect_text: bool) -> None:
"""Setter for whether to detect the mast text"""
self.detect_mast_text = detect_text
def set_vision_test(self, test_type: str) -> None:
"""
Setter for the type of vision test to run
This should only generally only be used with simple takeoff
test_type(str) 'module' for module detection or 'text' for mast text detection
"""
if test_type == "module" or test_type == "text":
self.vision_test_type = test_type
else:
raise ValueError(f"test_type must be 'module' or 'text', got {test_type}")
# ---- ReturnLaps settings ---- #
def enable_return_laps(self, do_return_laps: bool) -> None:
"""Setter for whether to do return laps"""
self.do_return_laps = do_return_laps
def set_number_of_return_laps(self, num_laps: int) -> None:
"""Setter for how many return laps to do"""
self.num_return_laps = num_laps
# ---- Other settings ---- #
def set_run_title(self, title: str) -> None:
"""Set a title for the run/test to be output in logging"""
self.run_title = title
def set_run_description(self, description: str) -> None:
"""Set a description for the run/test to be output in logging"""
self.run_description = description
| 34.372549 | 92 | 0.648032 |
DEFAULT_EARLY_LAPS: int = 2
DEFAULT_RETURN_LAPS: int = 2
DEFAULT_VISION_TEST: str = "module"
DEFAULT_RUN_TITLE: str = "N/A"
DEFAULT_RUN_DESCRIPTION: str = "N/A"
class StateSettings:
def __init__(self):
self.simple_takeoff: bool = False
self.do_early_laps: bool = True
self.num_early_laps: int = DEFAULT_EARLY_LAPS
self.go_to_mast: bool = False
self.detect_module: bool = False
self.detect_mast_text: bool = False
self.vision_test_type: str = DEFAULT_VISION_TEST
self.do_return_laps: bool = False
self.num_return_laps: int = DEFAULT_RETURN_LAPS
self.run_title: str = DEFAULT_RUN_TITLE
self.run_description: str = DEFAULT_RUN_DESCRIPTION
def enable_simple_takeoff(self, simple_takeoff: bool) -> None:
self.simple_takeoff = simple_takeoff
def enable_early_laps(self, do_early_laps: bool) -> None:
self.do_early_laps = do_early_laps
def set_number_of_early_laps(self, num_laps: int) -> None:
self.num_early_laps = num_laps
def enable_to_mast(self, go_to_mast: bool) -> None:
self.go_to_mast = go_to_mast
def enable_module_detection(self, detect_module: bool) -> None:
self.detect_module = detect_module
def enable_text_detection(self, detect_text: bool) -> None:
self.detect_mast_text = detect_text
def set_vision_test(self, test_type: str) -> None:
if test_type == "module" or test_type == "text":
self.vision_test_type = test_type
else:
raise ValueError(f"test_type must be 'module' or 'text', got {test_type}")
def enable_return_laps(self, do_return_laps: bool) -> None:
self.do_return_laps = do_return_laps
def set_number_of_return_laps(self, num_laps: int) -> None:
self.num_return_laps = num_laps
def set_run_title(self, title: str) -> None:
self.run_title = title
def set_run_description(self, description: str) -> None:
self.run_description = description
| true | true |
1c312c16399deb036897c7350ef9d6f9245e655e | 41,208 | py | Python | tests/data_context/test_data_context.py | cicdw/great_expectations | 0aecddf7da591df19389c8abadbb1700a51b8739 | [
"Apache-2.0"
] | null | null | null | tests/data_context/test_data_context.py | cicdw/great_expectations | 0aecddf7da591df19389c8abadbb1700a51b8739 | [
"Apache-2.0"
] | null | null | null | tests/data_context/test_data_context.py | cicdw/great_expectations | 0aecddf7da591df19389c8abadbb1700a51b8739 | [
"Apache-2.0"
] | null | null | null | import json
import os
import shutil
from collections import OrderedDict
import pytest
from ruamel.yaml import YAML
from great_expectations.core import (
ExpectationConfiguration,
ExpectationSuite,
expectationSuiteSchema,
)
from great_expectations.data_context import (
BaseDataContext,
DataContext,
ExplorerDataContext,
)
from great_expectations.data_context.store import ExpectationsStore
from great_expectations.data_context.types.base import DataContextConfig
from great_expectations.data_context.types.resource_identifiers import (
ExpectationSuiteIdentifier,
)
from great_expectations.data_context.util import (
file_relative_path,
safe_mmkdir,
)
from great_expectations.dataset import Dataset
from great_expectations.datasource import Datasource
from great_expectations.datasource.types.batch_kwargs import PathBatchKwargs
from great_expectations.exceptions import (
BatchKwargsError,
ConfigNotFoundError,
DataContextError,
)
from great_expectations.util import gen_directory_tree_str
from tests.test_utils import safe_remove
try:
from unittest import mock
except ImportError:
import mock
try:
from unittest import mock
except ImportError:
import mock
yaml = YAML()
@pytest.fixture()
def parameterized_expectation_suite():
fixture_path = file_relative_path(
__file__,
"../test_fixtures/expectation_suites/parameterized_expectation_suite_fixture.json",
)
with open(fixture_path, "r",) as suite:
return json.load(suite)
def test_create_duplicate_expectation_suite(titanic_data_context):
# create new expectation suite
assert titanic_data_context.create_expectation_suite(expectation_suite_name="titanic.test_create_expectation_suite")
# attempt to create expectation suite with name that already exists on data asset
with pytest.raises(DataContextError):
titanic_data_context.create_expectation_suite(expectation_suite_name="titanic.test_create_expectation_suite")
# create expectation suite with name that already exists on data asset, but pass overwrite_existing=True
assert titanic_data_context.create_expectation_suite(expectation_suite_name="titanic.test_create_expectation_suite", overwrite_existing=True)
def test_get_available_data_asset_names_with_one_datasource_including_a_single_generator(empty_data_context, filesystem_csv):
empty_data_context.add_datasource("my_datasource",
module_name="great_expectations.datasource",
class_name="PandasDatasource",
generators={
"subdir_reader": {
"class_name": "SubdirReaderBatchKwargsGenerator",
"base_directory": str(filesystem_csv)
}
}
)
available_asset_names = empty_data_context.get_available_data_asset_names()
assert set(available_asset_names["my_datasource"]["subdir_reader"]["names"]) == {('f3', 'directory'), ('f2', 'file'), ('f1', 'file')}
def test_get_available_data_asset_names_with_one_datasource_without_a_generator_returns_empty_dict(
empty_data_context,
):
empty_data_context.add_datasource(
"my_datasource",
module_name="great_expectations.datasource",
class_name="PandasDatasource",
)
obs = empty_data_context.get_available_data_asset_names()
assert obs == {"my_datasource": {}}
def test_get_available_data_asset_names_with_multiple_datasources_with_and_without_generators(
empty_data_context
):
"""Test datasources with and without generators."""
context = empty_data_context
connection_kwargs = {"drivername": "sqlite"}
context.add_datasource(
"first",
class_name="SqlAlchemyDatasource",
generators={"foo": {"class_name": "TableBatchKwargsGenerator", }},
**connection_kwargs
)
context.add_datasource(
"second",
class_name="SqlAlchemyDatasource",
**connection_kwargs
)
context.add_datasource(
"third",
class_name="SqlAlchemyDatasource",
generators={"bar": {"class_name": "TableBatchKwargsGenerator", }},
**connection_kwargs
)
obs = context.get_available_data_asset_names()
assert isinstance(obs, dict)
assert set(obs.keys()) == {"first", "second", "third"}
assert obs == {
"first": {"foo": {"is_complete_list": True, "names": []}},
"second": {},
"third": {"bar": {"is_complete_list": True, "names": []}},
}
def test_list_expectation_suite_keys(data_context):
assert data_context.list_expectation_suites() == [
ExpectationSuiteIdentifier(
expectation_suite_name="my_dag_node.default"
)
]
def test_get_existing_expectation_suite(data_context):
expectation_suite = data_context.get_expectation_suite('my_dag_node.default')
assert expectation_suite.expectation_suite_name == 'my_dag_node.default'
assert len(expectation_suite.expectations) == 2
def test_get_new_expectation_suite(data_context):
expectation_suite = data_context.create_expectation_suite('this_data_asset_does_not_exist.default')
assert expectation_suite.expectation_suite_name == 'this_data_asset_does_not_exist.default'
assert len(expectation_suite.expectations) == 0
def test_save_expectation_suite(data_context):
expectation_suite = data_context.create_expectation_suite('this_data_asset_config_does_not_exist.default')
expectation_suite.expectations.append(ExpectationConfiguration(
expectation_type="expect_table_row_count_to_equal",
kwargs={
"value": 10
}))
data_context.save_expectation_suite(expectation_suite)
expectation_suite_saved = data_context.get_expectation_suite('this_data_asset_config_does_not_exist.default')
assert expectation_suite.expectations == expectation_suite_saved.expectations
def test_compile_evaluation_parameter_dependencies(data_context):
assert data_context._evaluation_parameter_dependencies == {}
data_context._compile_evaluation_parameter_dependencies()
assert data_context._evaluation_parameter_dependencies == {
'source_diabetes_data.default': [{
"metric_kwargs_id": {
"column=patient_nbr": ["expect_column_unique_value_count_to_be_between.result.observed_value"]
}
}],
'source_patient_data.default': ["expect_table_row_count_to_equal.result.observed_value"]
}
def test_list_datasources(data_context):
datasources = data_context.list_datasources()
assert OrderedDict(datasources) == OrderedDict([
{
'name': 'mydatasource',
'class_name': 'PandasDatasource'
}
])
data_context.add_datasource("second_pandas_source",
module_name="great_expectations.datasource",
class_name="PandasDatasource",
)
datasources = data_context.list_datasources()
assert OrderedDict(datasources) == OrderedDict([
{
'name': 'mydatasource',
'class_name': 'PandasDatasource'
},
{
'name': 'second_pandas_source',
'class_name': 'PandasDatasource'
}
])
def test_data_context_get_validation_result(titanic_data_context):
"""
Test that validation results can be correctly fetched from the configured results store
"""
profiling_results = titanic_data_context.profile_datasource("mydatasource")
all_validation_result = titanic_data_context.get_validation_result(
"mydatasource.mygenerator.Titanic.BasicDatasetProfiler",
run_id="profiling"
)
assert len(all_validation_result.results) == 51
failed_validation_result = titanic_data_context.get_validation_result(
"mydatasource.mygenerator.Titanic.BasicDatasetProfiler",
run_id="profiling",
failed_only=True,
)
assert len(failed_validation_result.results) == 8
def test_data_context_get_datasource(titanic_data_context):
isinstance(titanic_data_context.get_datasource("mydatasource"), Datasource)
def test_data_context_get_datasource_on_non_existent_one_raises_helpful_error(titanic_data_context):
with pytest.raises(ValueError):
_ = titanic_data_context.get_datasource("fakey_mc_fake")
def test_data_context_profile_datasource_on_non_existent_one_raises_helpful_error(titanic_data_context):
with pytest.raises(ValueError):
_ = titanic_data_context.profile_datasource("fakey_mc_fake")
@pytest.mark.rendered_output
def test_render_full_static_site_from_empty_project(tmp_path_factory, filesystem_csv_3):
# TODO : Use a standard test fixture
# TODO : Have that test fixture copy a directory, rather than building a new one from scratch
base_dir = str(tmp_path_factory.mktemp("project_dir"))
project_dir = os.path.join(base_dir, "project_path")
os.mkdir(project_dir)
os.makedirs(os.path.join(project_dir, "data"))
os.makedirs(os.path.join(project_dir, "data/titanic"))
shutil.copy(
file_relative_path(__file__, "../test_sets/Titanic.csv"),
str(os.path.join(project_dir, "data/titanic/Titanic.csv"))
)
os.makedirs(os.path.join(project_dir, "data/random"))
shutil.copy(
os.path.join(filesystem_csv_3, "f1.csv"),
str(os.path.join(project_dir, "data/random/f1.csv"))
)
shutil.copy(
os.path.join(filesystem_csv_3, "f2.csv"),
str(os.path.join(project_dir, "data/random/f2.csv"))
)
assert gen_directory_tree_str(project_dir) == """\
project_path/
data/
random/
f1.csv
f2.csv
titanic/
Titanic.csv
"""
context = DataContext.create(project_dir)
ge_directory = os.path.join(project_dir, "great_expectations")
context.add_datasource("titanic",
module_name="great_expectations.datasource",
class_name="PandasDatasource",
generators={
"subdir_reader": {
"class_name": "SubdirReaderBatchKwargsGenerator",
"base_directory": os.path.join(project_dir, "data/titanic/")
}
}
)
context.add_datasource("random",
module_name="great_expectations.datasource",
class_name="PandasDatasource",
generators={
"subdir_reader": {
"class_name": "SubdirReaderBatchKwargsGenerator",
"base_directory": os.path.join(project_dir, "data/random/")
}
}
)
context.profile_datasource("titanic")
# Replicate the batch id of the batch that will be profiled in order to generate the file path of the
# validation result
titanic_profiled_batch_id = PathBatchKwargs({
'path': os.path.join(project_dir, 'data/titanic/Titanic.csv'),
'datasource': 'titanic'}
).to_id()
tree_str = gen_directory_tree_str(project_dir)
assert tree_str == """project_path/
data/
random/
f1.csv
f2.csv
titanic/
Titanic.csv
great_expectations/
.gitignore
great_expectations.yml
expectations/
titanic/
subdir_reader/
Titanic/
BasicDatasetProfiler.json
notebooks/
pandas/
validation_playground.ipynb
spark/
validation_playground.ipynb
sql/
validation_playground.ipynb
plugins/
custom_data_docs/
renderers/
styles/
data_docs_custom_styles.css
views/
uncommitted/
config_variables.yml
data_docs/
validations/
titanic/
subdir_reader/
Titanic/
BasicDatasetProfiler/
profiling/
{}.json
""".format(titanic_profiled_batch_id)
context.profile_datasource("random")
context.build_data_docs()
f1_profiled_batch_id = PathBatchKwargs({
'path': os.path.join(project_dir, 'data/random/f1.csv'),
'datasource': 'random'}
).to_id()
f2_profiled_batch_id = PathBatchKwargs({
'path': os.path.join(project_dir, 'data/random/f2.csv'),
'datasource': 'random'}
).to_id()
data_docs_dir = os.path.join(project_dir, "great_expectations/uncommitted/data_docs")
observed = gen_directory_tree_str(data_docs_dir)
assert observed == """\
data_docs/
local_site/
index.html
expectations/
random/
subdir_reader/
f1/
BasicDatasetProfiler.html
f2/
BasicDatasetProfiler.html
titanic/
subdir_reader/
Titanic/
BasicDatasetProfiler.html
static/
fonts/
HKGrotesk/
HKGrotesk-Bold.otf
HKGrotesk-BoldItalic.otf
HKGrotesk-Italic.otf
HKGrotesk-Light.otf
HKGrotesk-LightItalic.otf
HKGrotesk-Medium.otf
HKGrotesk-MediumItalic.otf
HKGrotesk-Regular.otf
HKGrotesk-SemiBold.otf
HKGrotesk-SemiBoldItalic.otf
images/
favicon.ico
glossary_scroller.gif
iterative-dev-loop.png
logo-long-vector.svg
logo-long.png
short-logo-vector.svg
short-logo.png
validation_failed_unexpected_values.gif
styles/
data_docs_custom_styles_template.css
data_docs_default_styles.css
validations/
random/
subdir_reader/
f1/
BasicDatasetProfiler/
profiling/
{0:s}.html
f2/
BasicDatasetProfiler/
profiling/
{1:s}.html
titanic/
subdir_reader/
Titanic/
BasicDatasetProfiler/
profiling/
{2:s}.html
""".format(f1_profiled_batch_id, f2_profiled_batch_id, titanic_profiled_batch_id)
# save data_docs locally
safe_mmkdir("./tests/data_context/output")
safe_mmkdir("./tests/data_context/output/data_docs")
if os.path.isdir("./tests/data_context/output/data_docs"):
shutil.rmtree("./tests/data_context/output/data_docs")
shutil.copytree(
os.path.join(
ge_directory,
"uncommitted/data_docs/"
),
"./tests/data_context/output/data_docs"
)
def test_add_store(empty_data_context):
assert "my_new_store" not in empty_data_context.stores.keys()
assert "my_new_store" not in empty_data_context.get_config()["stores"]
new_store = empty_data_context.add_store(
"my_new_store",
{
"module_name": "great_expectations.data_context.store",
"class_name": "ExpectationsStore",
}
)
assert "my_new_store" in empty_data_context.stores.keys()
assert "my_new_store" in empty_data_context.get_config()["stores"]
assert isinstance(new_store, ExpectationsStore)
@pytest.fixture
def basic_data_context_config():
return DataContextConfig(**{
"commented_map": {},
"config_version": 1,
"plugins_directory": "plugins/",
"evaluation_parameter_store_name": "evaluation_parameter_store",
"validations_store_name": "does_not_have_to_be_real",
"expectations_store_name": "expectations_store",
"config_variables_file_path": "uncommitted/config_variables.yml",
"datasources": {},
"stores": {
"expectations_store": {
"class_name": "ExpectationsStore",
"store_backend": {
"class_name": "TupleFilesystemStoreBackend",
"base_directory": "expectations/",
},
},
"evaluation_parameter_store" : {
"module_name": "great_expectations.data_context.store",
"class_name": "EvaluationParameterStore",
}
},
"data_docs_sites": {},
"validation_operators": {
"default": {
"class_name": "ActionListValidationOperator",
"action_list": []
}
}
})
def test_ExplorerDataContext(titanic_data_context):
context_root_directory = titanic_data_context.root_directory
explorer_data_context = ExplorerDataContext(context_root_directory)
assert explorer_data_context._expectation_explorer_manager
def test_ConfigOnlyDataContext__initialization(tmp_path_factory, basic_data_context_config):
config_path = str(tmp_path_factory.mktemp('test_ConfigOnlyDataContext__initialization__dir'))
context = BaseDataContext(
basic_data_context_config,
config_path,
)
assert context.root_directory.split("/")[-1] == "test_ConfigOnlyDataContext__initialization__dir0"
assert context.plugins_directory.split("/")[-3:] == ["test_ConfigOnlyDataContext__initialization__dir0", "plugins",""]
def test__normalize_absolute_or_relative_path(tmp_path_factory, basic_data_context_config):
config_path = str(tmp_path_factory.mktemp('test__normalize_absolute_or_relative_path__dir'))
context = BaseDataContext(
basic_data_context_config,
config_path,
)
assert str(os.path.join("test__normalize_absolute_or_relative_path__dir0", "yikes")) in context._normalize_absolute_or_relative_path("yikes")
assert "test__normalize_absolute_or_relative_path__dir" not in context._normalize_absolute_or_relative_path("/yikes")
assert "/yikes" == context._normalize_absolute_or_relative_path("/yikes")
def test_load_data_context_from_environment_variables(tmp_path_factory):
curdir = os.path.abspath(os.getcwd())
try:
project_path = str(tmp_path_factory.mktemp('data_context'))
context_path = os.path.join(project_path, "great_expectations")
safe_mmkdir(context_path)
os.chdir(context_path)
with pytest.raises(DataContextError) as err:
DataContext.find_context_root_dir()
assert isinstance(err.value, ConfigNotFoundError)
shutil.copy(file_relative_path(__file__, "../test_fixtures/great_expectations_basic.yml"),
str(os.path.join(context_path, "great_expectations.yml")))
os.environ["GE_HOME"] = context_path
assert DataContext.find_context_root_dir() == context_path
except Exception:
raise
finally:
# Make sure we unset the environment variable we're using
if "GE_HOME" in os.environ:
del os.environ["GE_HOME"]
os.chdir(curdir)
def test_data_context_updates_expectation_suite_names(data_context):
# A data context should update the data_asset_name and expectation_suite_name of expectation suites
# that it creates when it saves them.
expectation_suites = data_context.list_expectation_suites()
# We should have a single expectation suite defined
assert len(expectation_suites) == 1
expectation_suite_name = expectation_suites[0].expectation_suite_name
# We'll get that expectation suite and then update its name and re-save, then verify that everything
# has been properly updated
expectation_suite = data_context.get_expectation_suite(expectation_suite_name)
# Note we codify here the current behavior of having a string data_asset_name though typed ExpectationSuite objects
# will enable changing that
assert expectation_suite.expectation_suite_name == expectation_suite_name
# We will now change the data_asset_name and then save the suite in three ways:
# 1. Directly using the new name,
# 2. Using a different name that should be overwritten
# 3. Using the new name but having the context draw that from the suite
# Finally, we will try to save without a name (deleting it first) to demonstrate that saving will fail.
expectation_suite.expectation_suite_name = 'a_new_suite_name'
data_context.save_expectation_suite(
expectation_suite=expectation_suite,
expectation_suite_name='a_new_suite_name'
)
fetched_expectation_suite = data_context.get_expectation_suite('a_new_suite_name')
assert fetched_expectation_suite.expectation_suite_name == 'a_new_suite_name'
# 2. Using a different name that should be overwritten
data_context.save_expectation_suite(
expectation_suite=expectation_suite,
expectation_suite_name='a_new_new_suite_name'
)
fetched_expectation_suite = data_context.get_expectation_suite('a_new_new_suite_name')
assert fetched_expectation_suite.expectation_suite_name == 'a_new_new_suite_name'
# Check that the saved name difference is actually persisted on disk
with open(os.path.join(
data_context.root_directory,
"expectations",
"a_new_new_suite_name.json"
), 'r') as suite_file:
loaded_suite = expectationSuiteSchema.load(json.load(suite_file)).data
assert loaded_suite.expectation_suite_name == 'a_new_new_suite_name'
# 3. Using the new name but having the context draw that from the suite
expectation_suite.expectation_suite_name = "a_third_suite_name"
data_context.save_expectation_suite(
expectation_suite=expectation_suite
)
fetched_expectation_suite = data_context.get_expectation_suite("a_third_suite_name")
assert fetched_expectation_suite.expectation_suite_name == "a_third_suite_name"
def test_data_context_create_does_not_raise_error_or_warning_if_ge_dir_exists(tmp_path_factory):
project_path = str(tmp_path_factory.mktemp('data_context'))
DataContext.create(project_path)
@pytest.fixture()
def empty_context(tmp_path_factory):
project_path = str(tmp_path_factory.mktemp('data_context'))
DataContext.create(project_path)
ge_dir = os.path.join(project_path, "great_expectations")
assert os.path.isdir(ge_dir)
assert os.path.isfile(os.path.join(ge_dir, DataContext.GE_YML))
context = DataContext(ge_dir)
assert isinstance(context, DataContext)
return context
def test_data_context_does_ge_yml_exist_returns_true_when_it_does_exist(empty_context):
ge_dir = empty_context.root_directory
assert DataContext.does_config_exist_on_disk(ge_dir) == True
def test_data_context_does_ge_yml_exist_returns_false_when_it_does_not_exist(
empty_context,
):
ge_dir = empty_context.root_directory
# mangle project
safe_remove(os.path.join(ge_dir, empty_context.GE_YML))
assert DataContext.does_config_exist_on_disk(ge_dir) == False
def test_data_context_does_project_have_a_datasource_in_config_file_returns_true_when_it_has_a_datasource_configured_in_yml_file_on_disk(
empty_context,
):
ge_dir = empty_context.root_directory
empty_context.add_datasource("arthur", **{"class_name": "PandasDatasource"})
assert DataContext.does_project_have_a_datasource_in_config_file(ge_dir) == True
def test_data_context_does_project_have_a_datasource_in_config_file_returns_false_when_it_does_not_have_a_datasource_configured_in_yml_file_on_disk(
empty_context,
):
ge_dir = empty_context.root_directory
assert DataContext.does_project_have_a_datasource_in_config_file(ge_dir) == False
def test_data_context_does_project_have_a_datasource_in_config_file_returns_false_when_it_does_not_have_a_ge_yml_file(
empty_context,
):
ge_dir = empty_context.root_directory
safe_remove(os.path.join(ge_dir, empty_context.GE_YML))
assert DataContext.does_project_have_a_datasource_in_config_file(ge_dir) == False
def test_data_context_does_project_have_a_datasource_in_config_file_returns_false_when_it_does_not_have_a_ge_dir(
empty_context,
):
ge_dir = empty_context.root_directory
safe_remove(os.path.join(ge_dir))
assert DataContext.does_project_have_a_datasource_in_config_file(ge_dir) == False
def test_data_context_does_project_have_a_datasource_in_config_file_returns_false_when_the_project_has_an_invalid_config_file(
empty_context,
):
ge_dir = empty_context.root_directory
with open(os.path.join(ge_dir, DataContext.GE_YML), "w") as yml:
yml.write("this file: is not a valid ge config")
assert DataContext.does_project_have_a_datasource_in_config_file(ge_dir) == False
def test_data_context_is_project_initialized_returns_true_when_its_valid_context_has_one_datasource_and_one_suite(
empty_context,
):
context = empty_context
ge_dir = context.root_directory
context.add_datasource("arthur", class_name="PandasDatasource")
context.create_expectation_suite("dent")
assert len(context.list_expectation_suites()) == 1
assert DataContext.is_project_initialized(ge_dir) == True
def test_data_context_is_project_initialized_returns_true_when_its_valid_context_has_one_datasource_and_no_suites(
empty_context,
):
context = empty_context
ge_dir = context.root_directory
context.add_datasource("arthur", class_name="PandasDatasource")
assert len(context.list_expectation_suites()) == 0
assert DataContext.is_project_initialized(ge_dir) == False
def test_data_context_is_project_initialized_returns_false_when_its_valid_context_has_no_datasource(
empty_context,
):
ge_dir = empty_context.root_directory
assert DataContext.is_project_initialized(ge_dir) == False
def test_data_context_is_project_initialized_returns_false_when_config_yml_is_missing(empty_context):
ge_dir = empty_context.root_directory
# mangle project
safe_remove(os.path.join(ge_dir, empty_context.GE_YML))
assert DataContext.is_project_initialized(ge_dir) == False
def test_data_context_is_project_initialized_returns_false_when_uncommitted_dir_is_missing(empty_context):
ge_dir = empty_context.root_directory
# mangle project
shutil.rmtree(os.path.join(ge_dir, empty_context.GE_UNCOMMITTED_DIR))
assert DataContext.is_project_initialized(ge_dir) == False
def test_data_context_is_project_initialized_returns_false_when_uncommitted_data_docs_dir_is_missing(empty_context):
ge_dir = empty_context.root_directory
# mangle project
shutil.rmtree(os.path.join(ge_dir, empty_context.GE_UNCOMMITTED_DIR, "data_docs"))
assert DataContext.is_project_initialized(ge_dir) == False
def test_data_context_is_project_initialized_returns_false_when_uncommitted_validations_dir_is_missing(empty_context):
ge_dir = empty_context.root_directory
# mangle project
shutil.rmtree(os.path.join(ge_dir, empty_context.GE_UNCOMMITTED_DIR, "validations"))
assert DataContext.is_project_initialized(ge_dir) == False
def test_data_context_is_project_initialized_returns_false_when_config_variable_yml_is_missing(empty_context):
ge_dir = empty_context.root_directory
# mangle project
safe_remove(os.path.join(ge_dir, empty_context.GE_UNCOMMITTED_DIR, "config_variables.yml"))
assert DataContext.is_project_initialized(ge_dir) == False
def test_data_context_create_raises_warning_and_leaves_existing_yml_untouched(tmp_path_factory):
project_path = str(tmp_path_factory.mktemp('data_context'))
DataContext.create(project_path)
ge_yml = os.path.join(
project_path,
"great_expectations/great_expectations.yml"
)
with open(ge_yml, "a") as ff:
ff.write("# LOOK I WAS MODIFIED")
with pytest.warns(UserWarning):
DataContext.create(project_path)
with open(ge_yml, "r") as ff:
obs = ff.read()
assert "# LOOK I WAS MODIFIED" in obs
def test_data_context_create_makes_uncommitted_dirs_when_all_are_missing(tmp_path_factory):
project_path = str(tmp_path_factory.mktemp('data_context'))
DataContext.create(project_path)
# mangle the existing setup
ge_dir = os.path.join(project_path, "great_expectations")
uncommitted_dir = os.path.join(ge_dir, "uncommitted")
shutil.rmtree(uncommitted_dir)
# re-run create to simulate onboarding
DataContext.create(project_path)
obs = gen_directory_tree_str(ge_dir)
print(obs)
assert os.path.isdir(uncommitted_dir), "No uncommitted directory created"
assert obs == """\
great_expectations/
.gitignore
great_expectations.yml
expectations/
notebooks/
pandas/
validation_playground.ipynb
spark/
validation_playground.ipynb
sql/
validation_playground.ipynb
plugins/
custom_data_docs/
renderers/
styles/
data_docs_custom_styles.css
views/
uncommitted/
config_variables.yml
data_docs/
validations/
"""
def test_data_context_create_does_nothing_if_all_uncommitted_dirs_exist(tmp_path_factory):
expected = """\
great_expectations/
.gitignore
great_expectations.yml
expectations/
notebooks/
pandas/
validation_playground.ipynb
spark/
validation_playground.ipynb
sql/
validation_playground.ipynb
plugins/
custom_data_docs/
renderers/
styles/
data_docs_custom_styles.css
views/
uncommitted/
config_variables.yml
data_docs/
validations/
"""
project_path = str(tmp_path_factory.mktemp('stuff'))
ge_dir = os.path.join(project_path, "great_expectations")
DataContext.create(project_path)
fixture = gen_directory_tree_str(ge_dir)
print(fixture)
assert fixture == expected
# re-run create to simulate onboarding
DataContext.create(project_path)
obs = gen_directory_tree_str(ge_dir)
assert obs == expected
def test_data_context_do_all_uncommitted_dirs_exist(tmp_path_factory):
expected = """\
uncommitted/
config_variables.yml
data_docs/
validations/
"""
project_path = str(tmp_path_factory.mktemp('stuff'))
ge_dir = os.path.join(project_path, "great_expectations")
uncommitted_dir = os.path.join(ge_dir, "uncommitted")
DataContext.create(project_path)
fixture = gen_directory_tree_str(uncommitted_dir)
print(fixture)
assert fixture == expected
# Test that all exist
assert DataContext.all_uncommitted_directories_exist(ge_dir)
# remove a few
shutil.rmtree(os.path.join(uncommitted_dir, "data_docs"))
shutil.rmtree(os.path.join(uncommitted_dir, "validations"))
# Test that not all exist
assert not DataContext.all_uncommitted_directories_exist(project_path)
def test_data_context_create_does_not_overwrite_existing_config_variables_yml(tmp_path_factory):
project_path = str(tmp_path_factory.mktemp('data_context'))
DataContext.create(project_path)
ge_dir = os.path.join(project_path, "great_expectations")
uncommitted_dir = os.path.join(ge_dir, "uncommitted")
config_vars_yml = os.path.join(uncommitted_dir, "config_variables.yml")
# modify config variables
with open(config_vars_yml, "a") as ff:
ff.write("# LOOK I WAS MODIFIED")
# re-run create to simulate onboarding
with pytest.warns(UserWarning):
DataContext.create(project_path)
with open(config_vars_yml, "r") as ff:
obs = ff.read()
print(obs)
assert "# LOOK I WAS MODIFIED" in obs
def test_scaffold_directories_and_notebooks(tmp_path_factory):
empty_directory = str(tmp_path_factory.mktemp("test_scaffold_directories_and_notebooks"))
DataContext.scaffold_directories(empty_directory)
DataContext.scaffold_notebooks(empty_directory)
assert set(os.listdir(empty_directory)) == {
'plugins',
'expectations',
'.gitignore',
'uncommitted',
'notebooks'
}
assert set(os.listdir(os.path.join(empty_directory, "uncommitted"))) == {
'data_docs',
'validations'
}
for subdir in DataContext.NOTEBOOK_SUBDIRECTORIES:
subdir_path = os.path.join(empty_directory, "notebooks", subdir)
assert set(os.listdir(subdir_path)) == {
"validation_playground.ipynb"
}
def test_build_batch_kwargs(titanic_multibatch_data_context):
batch_kwargs = titanic_multibatch_data_context.build_batch_kwargs("mydatasource", "mygenerator", name="titanic", partition_id="Titanic_1912")
assert os.path.relpath("./data/titanic/Titanic_1912.csv") in batch_kwargs["path"]
batch_kwargs = titanic_multibatch_data_context.build_batch_kwargs("mydatasource", "mygenerator", name="titanic", partition_id="Titanic_1911")
assert os.path.relpath("./data/titanic/Titanic_1911.csv") in batch_kwargs["path"]
paths = []
batch_kwargs = titanic_multibatch_data_context.build_batch_kwargs("mydatasource", "mygenerator", name="titanic")
paths.append(os.path.basename(batch_kwargs["path"]))
batch_kwargs = titanic_multibatch_data_context.build_batch_kwargs("mydatasource", "mygenerator", name="titanic")
paths.append(os.path.basename(batch_kwargs["path"]))
assert set(["Titanic_1912.csv", "Titanic_1911.csv"]) == set(paths)
def test_existing_local_data_docs_urls_returns_url_on_project_with_no_datasources_and_a_site_configured(tmp_path_factory):
"""
This test ensures that a url will be returned for a default site even if a
datasource is not configured, and docs are not built.
"""
empty_directory = str(tmp_path_factory.mktemp("another_empty_project"))
DataContext.create(empty_directory)
context = DataContext(os.path.join(empty_directory, DataContext.GE_DIR))
obs = context.get_docs_sites_urls()
assert len(obs) == 1
assert obs[0].endswith("great_expectations/uncommitted/data_docs/local_site/index.html")
def test_existing_local_data_docs_urls_returns_single_url_from_customized_local_site(tmp_path_factory):
empty_directory = str(tmp_path_factory.mktemp("yo_yo"))
DataContext.create(empty_directory)
ge_dir = os.path.join(empty_directory, DataContext.GE_DIR)
context = DataContext(ge_dir)
context._project_config["data_docs_sites"] = {
"my_rad_site": {
"class_name": "SiteBuilder",
"store_backend": {
"class_name": "TupleFilesystemStoreBackend",
"base_directory": "uncommitted/data_docs/some/local/path/"
}
}
}
# TODO Workaround project config programmatic config manipulation
# statefulness issues by writing to disk and re-upping a new context
context._save_project_config()
context = DataContext(ge_dir)
context.build_data_docs()
expected_path = os.path.join(ge_dir, "uncommitted/data_docs/some/local/path/index.html")
assert os.path.isfile(expected_path)
obs = context.get_docs_sites_urls()
assert obs == ["file://{}".format(expected_path)]
def test_existing_local_data_docs_urls_returns_multiple_urls_from_customized_local_site(tmp_path_factory):
empty_directory = str(tmp_path_factory.mktemp("yo_yo_ma"))
DataContext.create(empty_directory)
ge_dir = os.path.join(empty_directory, DataContext.GE_DIR)
context = DataContext(ge_dir)
context._project_config["data_docs_sites"] = {
"my_rad_site": {
"class_name": "SiteBuilder",
"store_backend": {
"class_name": "TupleFilesystemStoreBackend",
"base_directory": "uncommitted/data_docs/some/path/"
}
},
"another_just_amazing_site": {
"class_name": "SiteBuilder",
"store_backend": {
"class_name": "TupleFilesystemStoreBackend",
"base_directory": "uncommitted/data_docs/another/path/"
}
}
}
# TODO Workaround project config programmatic config manipulation
# statefulness issues by writing to disk and re-upping a new context
context._save_project_config()
context = DataContext(ge_dir)
context.build_data_docs()
data_docs_dir = os.path.join(ge_dir, "uncommitted/data_docs/")
path_1 = os.path.join(data_docs_dir, "some/path/index.html")
path_2 = os.path.join(data_docs_dir, "another/path/index.html")
for expected_path in [path_1, path_2]:
assert os.path.isfile(expected_path)
obs = context.get_docs_sites_urls()
assert set(obs) == set([
"file://{}".format(path_1),
"file://{}".format(path_2),
])
def test_load_config_variables_file(basic_data_context_config, tmp_path_factory):
# Setup:
base_path = str(tmp_path_factory.mktemp('test_load_config_variables_file'))
safe_mmkdir(os.path.join(base_path, "uncommitted"))
with open(os.path.join(base_path, "uncommitted", "dev_variables.yml"), "w") as outfile:
yaml.dump({'env': 'dev'}, outfile)
with open(os.path.join(base_path, "uncommitted", "prod_variables.yml"), "w") as outfile:
yaml.dump({'env': 'prod'}, outfile)
basic_data_context_config["config_variables_file_path"] = "uncommitted/${TEST_CONFIG_FILE_ENV}_variables.yml"
try:
# We should be able to load different files based on an environment variable
os.environ["TEST_CONFIG_FILE_ENV"] = "dev"
context = BaseDataContext(basic_data_context_config, context_root_dir=base_path)
config_vars = context._load_config_variables_file()
assert config_vars['env'] == 'dev'
os.environ["TEST_CONFIG_FILE_ENV"] = "prod"
context = BaseDataContext(basic_data_context_config, context_root_dir=base_path)
config_vars = context._load_config_variables_file()
assert config_vars['env'] == 'prod'
except Exception:
raise
finally:
# Make sure we unset the environment variable we're using
del os.environ["TEST_CONFIG_FILE_ENV"]
def test_list_expectation_suite_with_no_suites(titanic_data_context):
observed = titanic_data_context.list_expectation_suite_names()
assert isinstance(observed, list)
assert observed == []
def test_list_expectation_suite_with_one_suite(titanic_data_context):
titanic_data_context.create_expectation_suite('warning')
observed = titanic_data_context.list_expectation_suite_names()
assert isinstance(observed, list)
assert observed == ['warning']
def test_list_expectation_suite_with_multiple_suites(titanic_data_context):
titanic_data_context.create_expectation_suite('a.warning')
titanic_data_context.create_expectation_suite('b.warning')
titanic_data_context.create_expectation_suite('c.warning')
observed = titanic_data_context.list_expectation_suite_names()
assert isinstance(observed, list)
assert observed == ['a.warning', 'b.warning', 'c.warning']
assert len(observed) == 3
def test_get_batch_raises_error_when_passed_a_non_string_type_for_suite_parameter(
titanic_data_context,
):
with pytest.raises(DataContextError):
titanic_data_context.get_batch({}, 99)
def test_get_batch_raises_error_when_passed_a_non_dict_or_batch_kwarg_type_for_batch_kwarg_parameter(
titanic_data_context,
):
with pytest.raises(BatchKwargsError):
titanic_data_context.get_batch(99, "foo")
def test_get_batch_when_passed_a_suite_name(titanic_data_context):
context = titanic_data_context
root_dir = context.root_directory
batch_kwargs = {
"datasource": "mydatasource",
"path": os.path.join(root_dir, "..", "data", "Titanic.csv"),
}
context.create_expectation_suite("foo")
assert context.list_expectation_suite_names() == ["foo"]
batch = context.get_batch(batch_kwargs, "foo")
assert isinstance(batch, Dataset)
assert isinstance(batch.get_expectation_suite(), ExpectationSuite)
def test_get_batch_when_passed_a_suite(titanic_data_context):
context = titanic_data_context
root_dir = context.root_directory
batch_kwargs = {
"datasource": "mydatasource",
"path": os.path.join(root_dir, "..", "data", "Titanic.csv"),
}
context.create_expectation_suite("foo")
assert context.list_expectation_suite_names() == ["foo"]
suite = context.get_expectation_suite("foo")
batch = context.get_batch(batch_kwargs, suite)
assert isinstance(batch, Dataset)
assert isinstance(batch.get_expectation_suite(), ExpectationSuite)
| 36.792857 | 148 | 0.69899 | import json
import os
import shutil
from collections import OrderedDict
import pytest
from ruamel.yaml import YAML
from great_expectations.core import (
ExpectationConfiguration,
ExpectationSuite,
expectationSuiteSchema,
)
from great_expectations.data_context import (
BaseDataContext,
DataContext,
ExplorerDataContext,
)
from great_expectations.data_context.store import ExpectationsStore
from great_expectations.data_context.types.base import DataContextConfig
from great_expectations.data_context.types.resource_identifiers import (
ExpectationSuiteIdentifier,
)
from great_expectations.data_context.util import (
file_relative_path,
safe_mmkdir,
)
from great_expectations.dataset import Dataset
from great_expectations.datasource import Datasource
from great_expectations.datasource.types.batch_kwargs import PathBatchKwargs
from great_expectations.exceptions import (
BatchKwargsError,
ConfigNotFoundError,
DataContextError,
)
from great_expectations.util import gen_directory_tree_str
from tests.test_utils import safe_remove
try:
from unittest import mock
except ImportError:
import mock
try:
from unittest import mock
except ImportError:
import mock
yaml = YAML()
@pytest.fixture()
def parameterized_expectation_suite():
fixture_path = file_relative_path(
__file__,
"../test_fixtures/expectation_suites/parameterized_expectation_suite_fixture.json",
)
with open(fixture_path, "r",) as suite:
return json.load(suite)
def test_create_duplicate_expectation_suite(titanic_data_context):
assert titanic_data_context.create_expectation_suite(expectation_suite_name="titanic.test_create_expectation_suite")
with pytest.raises(DataContextError):
titanic_data_context.create_expectation_suite(expectation_suite_name="titanic.test_create_expectation_suite")
assert titanic_data_context.create_expectation_suite(expectation_suite_name="titanic.test_create_expectation_suite", overwrite_existing=True)
def test_get_available_data_asset_names_with_one_datasource_including_a_single_generator(empty_data_context, filesystem_csv):
empty_data_context.add_datasource("my_datasource",
module_name="great_expectations.datasource",
class_name="PandasDatasource",
generators={
"subdir_reader": {
"class_name": "SubdirReaderBatchKwargsGenerator",
"base_directory": str(filesystem_csv)
}
}
)
available_asset_names = empty_data_context.get_available_data_asset_names()
assert set(available_asset_names["my_datasource"]["subdir_reader"]["names"]) == {('f3', 'directory'), ('f2', 'file'), ('f1', 'file')}
def test_get_available_data_asset_names_with_one_datasource_without_a_generator_returns_empty_dict(
empty_data_context,
):
empty_data_context.add_datasource(
"my_datasource",
module_name="great_expectations.datasource",
class_name="PandasDatasource",
)
obs = empty_data_context.get_available_data_asset_names()
assert obs == {"my_datasource": {}}
def test_get_available_data_asset_names_with_multiple_datasources_with_and_without_generators(
empty_data_context
):
context = empty_data_context
connection_kwargs = {"drivername": "sqlite"}
context.add_datasource(
"first",
class_name="SqlAlchemyDatasource",
generators={"foo": {"class_name": "TableBatchKwargsGenerator", }},
**connection_kwargs
)
context.add_datasource(
"second",
class_name="SqlAlchemyDatasource",
**connection_kwargs
)
context.add_datasource(
"third",
class_name="SqlAlchemyDatasource",
generators={"bar": {"class_name": "TableBatchKwargsGenerator", }},
**connection_kwargs
)
obs = context.get_available_data_asset_names()
assert isinstance(obs, dict)
assert set(obs.keys()) == {"first", "second", "third"}
assert obs == {
"first": {"foo": {"is_complete_list": True, "names": []}},
"second": {},
"third": {"bar": {"is_complete_list": True, "names": []}},
}
def test_list_expectation_suite_keys(data_context):
assert data_context.list_expectation_suites() == [
ExpectationSuiteIdentifier(
expectation_suite_name="my_dag_node.default"
)
]
def test_get_existing_expectation_suite(data_context):
expectation_suite = data_context.get_expectation_suite('my_dag_node.default')
assert expectation_suite.expectation_suite_name == 'my_dag_node.default'
assert len(expectation_suite.expectations) == 2
def test_get_new_expectation_suite(data_context):
expectation_suite = data_context.create_expectation_suite('this_data_asset_does_not_exist.default')
assert expectation_suite.expectation_suite_name == 'this_data_asset_does_not_exist.default'
assert len(expectation_suite.expectations) == 0
def test_save_expectation_suite(data_context):
expectation_suite = data_context.create_expectation_suite('this_data_asset_config_does_not_exist.default')
expectation_suite.expectations.append(ExpectationConfiguration(
expectation_type="expect_table_row_count_to_equal",
kwargs={
"value": 10
}))
data_context.save_expectation_suite(expectation_suite)
expectation_suite_saved = data_context.get_expectation_suite('this_data_asset_config_does_not_exist.default')
assert expectation_suite.expectations == expectation_suite_saved.expectations
def test_compile_evaluation_parameter_dependencies(data_context):
assert data_context._evaluation_parameter_dependencies == {}
data_context._compile_evaluation_parameter_dependencies()
assert data_context._evaluation_parameter_dependencies == {
'source_diabetes_data.default': [{
"metric_kwargs_id": {
"column=patient_nbr": ["expect_column_unique_value_count_to_be_between.result.observed_value"]
}
}],
'source_patient_data.default': ["expect_table_row_count_to_equal.result.observed_value"]
}
def test_list_datasources(data_context):
datasources = data_context.list_datasources()
assert OrderedDict(datasources) == OrderedDict([
{
'name': 'mydatasource',
'class_name': 'PandasDatasource'
}
])
data_context.add_datasource("second_pandas_source",
module_name="great_expectations.datasource",
class_name="PandasDatasource",
)
datasources = data_context.list_datasources()
assert OrderedDict(datasources) == OrderedDict([
{
'name': 'mydatasource',
'class_name': 'PandasDatasource'
},
{
'name': 'second_pandas_source',
'class_name': 'PandasDatasource'
}
])
def test_data_context_get_validation_result(titanic_data_context):
profiling_results = titanic_data_context.profile_datasource("mydatasource")
all_validation_result = titanic_data_context.get_validation_result(
"mydatasource.mygenerator.Titanic.BasicDatasetProfiler",
run_id="profiling"
)
assert len(all_validation_result.results) == 51
failed_validation_result = titanic_data_context.get_validation_result(
"mydatasource.mygenerator.Titanic.BasicDatasetProfiler",
run_id="profiling",
failed_only=True,
)
assert len(failed_validation_result.results) == 8
def test_data_context_get_datasource(titanic_data_context):
isinstance(titanic_data_context.get_datasource("mydatasource"), Datasource)
def test_data_context_get_datasource_on_non_existent_one_raises_helpful_error(titanic_data_context):
with pytest.raises(ValueError):
_ = titanic_data_context.get_datasource("fakey_mc_fake")
def test_data_context_profile_datasource_on_non_existent_one_raises_helpful_error(titanic_data_context):
with pytest.raises(ValueError):
_ = titanic_data_context.profile_datasource("fakey_mc_fake")
@pytest.mark.rendered_output
def test_render_full_static_site_from_empty_project(tmp_path_factory, filesystem_csv_3):
base_dir = str(tmp_path_factory.mktemp("project_dir"))
project_dir = os.path.join(base_dir, "project_path")
os.mkdir(project_dir)
os.makedirs(os.path.join(project_dir, "data"))
os.makedirs(os.path.join(project_dir, "data/titanic"))
shutil.copy(
file_relative_path(__file__, "../test_sets/Titanic.csv"),
str(os.path.join(project_dir, "data/titanic/Titanic.csv"))
)
os.makedirs(os.path.join(project_dir, "data/random"))
shutil.copy(
os.path.join(filesystem_csv_3, "f1.csv"),
str(os.path.join(project_dir, "data/random/f1.csv"))
)
shutil.copy(
os.path.join(filesystem_csv_3, "f2.csv"),
str(os.path.join(project_dir, "data/random/f2.csv"))
)
assert gen_directory_tree_str(project_dir) == """\
project_path/
data/
random/
f1.csv
f2.csv
titanic/
Titanic.csv
"""
context = DataContext.create(project_dir)
ge_directory = os.path.join(project_dir, "great_expectations")
context.add_datasource("titanic",
module_name="great_expectations.datasource",
class_name="PandasDatasource",
generators={
"subdir_reader": {
"class_name": "SubdirReaderBatchKwargsGenerator",
"base_directory": os.path.join(project_dir, "data/titanic/")
}
}
)
context.add_datasource("random",
module_name="great_expectations.datasource",
class_name="PandasDatasource",
generators={
"subdir_reader": {
"class_name": "SubdirReaderBatchKwargsGenerator",
"base_directory": os.path.join(project_dir, "data/random/")
}
}
)
context.profile_datasource("titanic")
titanic_profiled_batch_id = PathBatchKwargs({
'path': os.path.join(project_dir, 'data/titanic/Titanic.csv'),
'datasource': 'titanic'}
).to_id()
tree_str = gen_directory_tree_str(project_dir)
assert tree_str == """project_path/
data/
random/
f1.csv
f2.csv
titanic/
Titanic.csv
great_expectations/
.gitignore
great_expectations.yml
expectations/
titanic/
subdir_reader/
Titanic/
BasicDatasetProfiler.json
notebooks/
pandas/
validation_playground.ipynb
spark/
validation_playground.ipynb
sql/
validation_playground.ipynb
plugins/
custom_data_docs/
renderers/
styles/
data_docs_custom_styles.css
views/
uncommitted/
config_variables.yml
data_docs/
validations/
titanic/
subdir_reader/
Titanic/
BasicDatasetProfiler/
profiling/
{}.json
""".format(titanic_profiled_batch_id)
context.profile_datasource("random")
context.build_data_docs()
f1_profiled_batch_id = PathBatchKwargs({
'path': os.path.join(project_dir, 'data/random/f1.csv'),
'datasource': 'random'}
).to_id()
f2_profiled_batch_id = PathBatchKwargs({
'path': os.path.join(project_dir, 'data/random/f2.csv'),
'datasource': 'random'}
).to_id()
data_docs_dir = os.path.join(project_dir, "great_expectations/uncommitted/data_docs")
observed = gen_directory_tree_str(data_docs_dir)
assert observed == """\
data_docs/
local_site/
index.html
expectations/
random/
subdir_reader/
f1/
BasicDatasetProfiler.html
f2/
BasicDatasetProfiler.html
titanic/
subdir_reader/
Titanic/
BasicDatasetProfiler.html
static/
fonts/
HKGrotesk/
HKGrotesk-Bold.otf
HKGrotesk-BoldItalic.otf
HKGrotesk-Italic.otf
HKGrotesk-Light.otf
HKGrotesk-LightItalic.otf
HKGrotesk-Medium.otf
HKGrotesk-MediumItalic.otf
HKGrotesk-Regular.otf
HKGrotesk-SemiBold.otf
HKGrotesk-SemiBoldItalic.otf
images/
favicon.ico
glossary_scroller.gif
iterative-dev-loop.png
logo-long-vector.svg
logo-long.png
short-logo-vector.svg
short-logo.png
validation_failed_unexpected_values.gif
styles/
data_docs_custom_styles_template.css
data_docs_default_styles.css
validations/
random/
subdir_reader/
f1/
BasicDatasetProfiler/
profiling/
{0:s}.html
f2/
BasicDatasetProfiler/
profiling/
{1:s}.html
titanic/
subdir_reader/
Titanic/
BasicDatasetProfiler/
profiling/
{2:s}.html
""".format(f1_profiled_batch_id, f2_profiled_batch_id, titanic_profiled_batch_id)
safe_mmkdir("./tests/data_context/output")
safe_mmkdir("./tests/data_context/output/data_docs")
if os.path.isdir("./tests/data_context/output/data_docs"):
shutil.rmtree("./tests/data_context/output/data_docs")
shutil.copytree(
os.path.join(
ge_directory,
"uncommitted/data_docs/"
),
"./tests/data_context/output/data_docs"
)
def test_add_store(empty_data_context):
assert "my_new_store" not in empty_data_context.stores.keys()
assert "my_new_store" not in empty_data_context.get_config()["stores"]
new_store = empty_data_context.add_store(
"my_new_store",
{
"module_name": "great_expectations.data_context.store",
"class_name": "ExpectationsStore",
}
)
assert "my_new_store" in empty_data_context.stores.keys()
assert "my_new_store" in empty_data_context.get_config()["stores"]
assert isinstance(new_store, ExpectationsStore)
@pytest.fixture
def basic_data_context_config():
return DataContextConfig(**{
"commented_map": {},
"config_version": 1,
"plugins_directory": "plugins/",
"evaluation_parameter_store_name": "evaluation_parameter_store",
"validations_store_name": "does_not_have_to_be_real",
"expectations_store_name": "expectations_store",
"config_variables_file_path": "uncommitted/config_variables.yml",
"datasources": {},
"stores": {
"expectations_store": {
"class_name": "ExpectationsStore",
"store_backend": {
"class_name": "TupleFilesystemStoreBackend",
"base_directory": "expectations/",
},
},
"evaluation_parameter_store" : {
"module_name": "great_expectations.data_context.store",
"class_name": "EvaluationParameterStore",
}
},
"data_docs_sites": {},
"validation_operators": {
"default": {
"class_name": "ActionListValidationOperator",
"action_list": []
}
}
})
def test_ExplorerDataContext(titanic_data_context):
context_root_directory = titanic_data_context.root_directory
explorer_data_context = ExplorerDataContext(context_root_directory)
assert explorer_data_context._expectation_explorer_manager
def test_ConfigOnlyDataContext__initialization(tmp_path_factory, basic_data_context_config):
config_path = str(tmp_path_factory.mktemp('test_ConfigOnlyDataContext__initialization__dir'))
context = BaseDataContext(
basic_data_context_config,
config_path,
)
assert context.root_directory.split("/")[-1] == "test_ConfigOnlyDataContext__initialization__dir0"
assert context.plugins_directory.split("/")[-3:] == ["test_ConfigOnlyDataContext__initialization__dir0", "plugins",""]
def test__normalize_absolute_or_relative_path(tmp_path_factory, basic_data_context_config):
config_path = str(tmp_path_factory.mktemp('test__normalize_absolute_or_relative_path__dir'))
context = BaseDataContext(
basic_data_context_config,
config_path,
)
assert str(os.path.join("test__normalize_absolute_or_relative_path__dir0", "yikes")) in context._normalize_absolute_or_relative_path("yikes")
assert "test__normalize_absolute_or_relative_path__dir" not in context._normalize_absolute_or_relative_path("/yikes")
assert "/yikes" == context._normalize_absolute_or_relative_path("/yikes")
def test_load_data_context_from_environment_variables(tmp_path_factory):
curdir = os.path.abspath(os.getcwd())
try:
project_path = str(tmp_path_factory.mktemp('data_context'))
context_path = os.path.join(project_path, "great_expectations")
safe_mmkdir(context_path)
os.chdir(context_path)
with pytest.raises(DataContextError) as err:
DataContext.find_context_root_dir()
assert isinstance(err.value, ConfigNotFoundError)
shutil.copy(file_relative_path(__file__, "../test_fixtures/great_expectations_basic.yml"),
str(os.path.join(context_path, "great_expectations.yml")))
os.environ["GE_HOME"] = context_path
assert DataContext.find_context_root_dir() == context_path
except Exception:
raise
finally:
if "GE_HOME" in os.environ:
del os.environ["GE_HOME"]
os.chdir(curdir)
def test_data_context_updates_expectation_suite_names(data_context):
# A data context should update the data_asset_name and expectation_suite_name of expectation suites
# that it creates when it saves them.
expectation_suites = data_context.list_expectation_suites()
# We should have a single expectation suite defined
assert len(expectation_suites) == 1
expectation_suite_name = expectation_suites[0].expectation_suite_name
# We'll get that expectation suite and then update its name and re-save, then verify that everything
expectation_suite = data_context.get_expectation_suite(expectation_suite_name)
assert expectation_suite.expectation_suite_name == expectation_suite_name
expectation_suite.expectation_suite_name = 'a_new_suite_name'
data_context.save_expectation_suite(
expectation_suite=expectation_suite,
expectation_suite_name='a_new_suite_name'
)
fetched_expectation_suite = data_context.get_expectation_suite('a_new_suite_name')
assert fetched_expectation_suite.expectation_suite_name == 'a_new_suite_name'
data_context.save_expectation_suite(
expectation_suite=expectation_suite,
expectation_suite_name='a_new_new_suite_name'
)
fetched_expectation_suite = data_context.get_expectation_suite('a_new_new_suite_name')
assert fetched_expectation_suite.expectation_suite_name == 'a_new_new_suite_name'
with open(os.path.join(
data_context.root_directory,
"expectations",
"a_new_new_suite_name.json"
), 'r') as suite_file:
loaded_suite = expectationSuiteSchema.load(json.load(suite_file)).data
assert loaded_suite.expectation_suite_name == 'a_new_new_suite_name'
expectation_suite.expectation_suite_name = "a_third_suite_name"
data_context.save_expectation_suite(
expectation_suite=expectation_suite
)
fetched_expectation_suite = data_context.get_expectation_suite("a_third_suite_name")
assert fetched_expectation_suite.expectation_suite_name == "a_third_suite_name"
def test_data_context_create_does_not_raise_error_or_warning_if_ge_dir_exists(tmp_path_factory):
project_path = str(tmp_path_factory.mktemp('data_context'))
DataContext.create(project_path)
@pytest.fixture()
def empty_context(tmp_path_factory):
project_path = str(tmp_path_factory.mktemp('data_context'))
DataContext.create(project_path)
ge_dir = os.path.join(project_path, "great_expectations")
assert os.path.isdir(ge_dir)
assert os.path.isfile(os.path.join(ge_dir, DataContext.GE_YML))
context = DataContext(ge_dir)
assert isinstance(context, DataContext)
return context
def test_data_context_does_ge_yml_exist_returns_true_when_it_does_exist(empty_context):
ge_dir = empty_context.root_directory
assert DataContext.does_config_exist_on_disk(ge_dir) == True
def test_data_context_does_ge_yml_exist_returns_false_when_it_does_not_exist(
empty_context,
):
ge_dir = empty_context.root_directory
safe_remove(os.path.join(ge_dir, empty_context.GE_YML))
assert DataContext.does_config_exist_on_disk(ge_dir) == False
def test_data_context_does_project_have_a_datasource_in_config_file_returns_true_when_it_has_a_datasource_configured_in_yml_file_on_disk(
empty_context,
):
ge_dir = empty_context.root_directory
empty_context.add_datasource("arthur", **{"class_name": "PandasDatasource"})
assert DataContext.does_project_have_a_datasource_in_config_file(ge_dir) == True
def test_data_context_does_project_have_a_datasource_in_config_file_returns_false_when_it_does_not_have_a_datasource_configured_in_yml_file_on_disk(
empty_context,
):
ge_dir = empty_context.root_directory
assert DataContext.does_project_have_a_datasource_in_config_file(ge_dir) == False
def test_data_context_does_project_have_a_datasource_in_config_file_returns_false_when_it_does_not_have_a_ge_yml_file(
empty_context,
):
ge_dir = empty_context.root_directory
safe_remove(os.path.join(ge_dir, empty_context.GE_YML))
assert DataContext.does_project_have_a_datasource_in_config_file(ge_dir) == False
def test_data_context_does_project_have_a_datasource_in_config_file_returns_false_when_it_does_not_have_a_ge_dir(
empty_context,
):
ge_dir = empty_context.root_directory
safe_remove(os.path.join(ge_dir))
assert DataContext.does_project_have_a_datasource_in_config_file(ge_dir) == False
def test_data_context_does_project_have_a_datasource_in_config_file_returns_false_when_the_project_has_an_invalid_config_file(
empty_context,
):
ge_dir = empty_context.root_directory
with open(os.path.join(ge_dir, DataContext.GE_YML), "w") as yml:
yml.write("this file: is not a valid ge config")
assert DataContext.does_project_have_a_datasource_in_config_file(ge_dir) == False
def test_data_context_is_project_initialized_returns_true_when_its_valid_context_has_one_datasource_and_one_suite(
empty_context,
):
context = empty_context
ge_dir = context.root_directory
context.add_datasource("arthur", class_name="PandasDatasource")
context.create_expectation_suite("dent")
assert len(context.list_expectation_suites()) == 1
assert DataContext.is_project_initialized(ge_dir) == True
def test_data_context_is_project_initialized_returns_true_when_its_valid_context_has_one_datasource_and_no_suites(
empty_context,
):
context = empty_context
ge_dir = context.root_directory
context.add_datasource("arthur", class_name="PandasDatasource")
assert len(context.list_expectation_suites()) == 0
assert DataContext.is_project_initialized(ge_dir) == False
def test_data_context_is_project_initialized_returns_false_when_its_valid_context_has_no_datasource(
empty_context,
):
ge_dir = empty_context.root_directory
assert DataContext.is_project_initialized(ge_dir) == False
def test_data_context_is_project_initialized_returns_false_when_config_yml_is_missing(empty_context):
ge_dir = empty_context.root_directory
safe_remove(os.path.join(ge_dir, empty_context.GE_YML))
assert DataContext.is_project_initialized(ge_dir) == False
def test_data_context_is_project_initialized_returns_false_when_uncommitted_dir_is_missing(empty_context):
ge_dir = empty_context.root_directory
shutil.rmtree(os.path.join(ge_dir, empty_context.GE_UNCOMMITTED_DIR))
assert DataContext.is_project_initialized(ge_dir) == False
def test_data_context_is_project_initialized_returns_false_when_uncommitted_data_docs_dir_is_missing(empty_context):
ge_dir = empty_context.root_directory
shutil.rmtree(os.path.join(ge_dir, empty_context.GE_UNCOMMITTED_DIR, "data_docs"))
assert DataContext.is_project_initialized(ge_dir) == False
def test_data_context_is_project_initialized_returns_false_when_uncommitted_validations_dir_is_missing(empty_context):
ge_dir = empty_context.root_directory
shutil.rmtree(os.path.join(ge_dir, empty_context.GE_UNCOMMITTED_DIR, "validations"))
assert DataContext.is_project_initialized(ge_dir) == False
def test_data_context_is_project_initialized_returns_false_when_config_variable_yml_is_missing(empty_context):
ge_dir = empty_context.root_directory
safe_remove(os.path.join(ge_dir, empty_context.GE_UNCOMMITTED_DIR, "config_variables.yml"))
assert DataContext.is_project_initialized(ge_dir) == False
def test_data_context_create_raises_warning_and_leaves_existing_yml_untouched(tmp_path_factory):
project_path = str(tmp_path_factory.mktemp('data_context'))
DataContext.create(project_path)
ge_yml = os.path.join(
project_path,
"great_expectations/great_expectations.yml"
)
with open(ge_yml, "a") as ff:
ff.write("# LOOK I WAS MODIFIED")
with pytest.warns(UserWarning):
DataContext.create(project_path)
with open(ge_yml, "r") as ff:
obs = ff.read()
assert "# LOOK I WAS MODIFIED" in obs
def test_data_context_create_makes_uncommitted_dirs_when_all_are_missing(tmp_path_factory):
project_path = str(tmp_path_factory.mktemp('data_context'))
DataContext.create(project_path)
ge_dir = os.path.join(project_path, "great_expectations")
uncommitted_dir = os.path.join(ge_dir, "uncommitted")
shutil.rmtree(uncommitted_dir)
DataContext.create(project_path)
obs = gen_directory_tree_str(ge_dir)
print(obs)
assert os.path.isdir(uncommitted_dir), "No uncommitted directory created"
assert obs == """\
great_expectations/
.gitignore
great_expectations.yml
expectations/
notebooks/
pandas/
validation_playground.ipynb
spark/
validation_playground.ipynb
sql/
validation_playground.ipynb
plugins/
custom_data_docs/
renderers/
styles/
data_docs_custom_styles.css
views/
uncommitted/
config_variables.yml
data_docs/
validations/
"""
def test_data_context_create_does_nothing_if_all_uncommitted_dirs_exist(tmp_path_factory):
expected = """\
great_expectations/
.gitignore
great_expectations.yml
expectations/
notebooks/
pandas/
validation_playground.ipynb
spark/
validation_playground.ipynb
sql/
validation_playground.ipynb
plugins/
custom_data_docs/
renderers/
styles/
data_docs_custom_styles.css
views/
uncommitted/
config_variables.yml
data_docs/
validations/
"""
project_path = str(tmp_path_factory.mktemp('stuff'))
ge_dir = os.path.join(project_path, "great_expectations")
DataContext.create(project_path)
fixture = gen_directory_tree_str(ge_dir)
print(fixture)
assert fixture == expected
DataContext.create(project_path)
obs = gen_directory_tree_str(ge_dir)
assert obs == expected
def test_data_context_do_all_uncommitted_dirs_exist(tmp_path_factory):
expected = """\
uncommitted/
config_variables.yml
data_docs/
validations/
"""
project_path = str(tmp_path_factory.mktemp('stuff'))
ge_dir = os.path.join(project_path, "great_expectations")
uncommitted_dir = os.path.join(ge_dir, "uncommitted")
DataContext.create(project_path)
fixture = gen_directory_tree_str(uncommitted_dir)
print(fixture)
assert fixture == expected
assert DataContext.all_uncommitted_directories_exist(ge_dir)
shutil.rmtree(os.path.join(uncommitted_dir, "data_docs"))
shutil.rmtree(os.path.join(uncommitted_dir, "validations"))
assert not DataContext.all_uncommitted_directories_exist(project_path)
def test_data_context_create_does_not_overwrite_existing_config_variables_yml(tmp_path_factory):
project_path = str(tmp_path_factory.mktemp('data_context'))
DataContext.create(project_path)
ge_dir = os.path.join(project_path, "great_expectations")
uncommitted_dir = os.path.join(ge_dir, "uncommitted")
config_vars_yml = os.path.join(uncommitted_dir, "config_variables.yml")
with open(config_vars_yml, "a") as ff:
ff.write("# LOOK I WAS MODIFIED")
with pytest.warns(UserWarning):
DataContext.create(project_path)
with open(config_vars_yml, "r") as ff:
obs = ff.read()
print(obs)
assert "# LOOK I WAS MODIFIED" in obs
def test_scaffold_directories_and_notebooks(tmp_path_factory):
empty_directory = str(tmp_path_factory.mktemp("test_scaffold_directories_and_notebooks"))
DataContext.scaffold_directories(empty_directory)
DataContext.scaffold_notebooks(empty_directory)
assert set(os.listdir(empty_directory)) == {
'plugins',
'expectations',
'.gitignore',
'uncommitted',
'notebooks'
}
assert set(os.listdir(os.path.join(empty_directory, "uncommitted"))) == {
'data_docs',
'validations'
}
for subdir in DataContext.NOTEBOOK_SUBDIRECTORIES:
subdir_path = os.path.join(empty_directory, "notebooks", subdir)
assert set(os.listdir(subdir_path)) == {
"validation_playground.ipynb"
}
def test_build_batch_kwargs(titanic_multibatch_data_context):
batch_kwargs = titanic_multibatch_data_context.build_batch_kwargs("mydatasource", "mygenerator", name="titanic", partition_id="Titanic_1912")
assert os.path.relpath("./data/titanic/Titanic_1912.csv") in batch_kwargs["path"]
batch_kwargs = titanic_multibatch_data_context.build_batch_kwargs("mydatasource", "mygenerator", name="titanic", partition_id="Titanic_1911")
assert os.path.relpath("./data/titanic/Titanic_1911.csv") in batch_kwargs["path"]
paths = []
batch_kwargs = titanic_multibatch_data_context.build_batch_kwargs("mydatasource", "mygenerator", name="titanic")
paths.append(os.path.basename(batch_kwargs["path"]))
batch_kwargs = titanic_multibatch_data_context.build_batch_kwargs("mydatasource", "mygenerator", name="titanic")
paths.append(os.path.basename(batch_kwargs["path"]))
assert set(["Titanic_1912.csv", "Titanic_1911.csv"]) == set(paths)
def test_existing_local_data_docs_urls_returns_url_on_project_with_no_datasources_and_a_site_configured(tmp_path_factory):
empty_directory = str(tmp_path_factory.mktemp("another_empty_project"))
DataContext.create(empty_directory)
context = DataContext(os.path.join(empty_directory, DataContext.GE_DIR))
obs = context.get_docs_sites_urls()
assert len(obs) == 1
assert obs[0].endswith("great_expectations/uncommitted/data_docs/local_site/index.html")
def test_existing_local_data_docs_urls_returns_single_url_from_customized_local_site(tmp_path_factory):
empty_directory = str(tmp_path_factory.mktemp("yo_yo"))
DataContext.create(empty_directory)
ge_dir = os.path.join(empty_directory, DataContext.GE_DIR)
context = DataContext(ge_dir)
context._project_config["data_docs_sites"] = {
"my_rad_site": {
"class_name": "SiteBuilder",
"store_backend": {
"class_name": "TupleFilesystemStoreBackend",
"base_directory": "uncommitted/data_docs/some/local/path/"
}
}
}
context._save_project_config()
context = DataContext(ge_dir)
context.build_data_docs()
expected_path = os.path.join(ge_dir, "uncommitted/data_docs/some/local/path/index.html")
assert os.path.isfile(expected_path)
obs = context.get_docs_sites_urls()
assert obs == ["file://{}".format(expected_path)]
def test_existing_local_data_docs_urls_returns_multiple_urls_from_customized_local_site(tmp_path_factory):
empty_directory = str(tmp_path_factory.mktemp("yo_yo_ma"))
DataContext.create(empty_directory)
ge_dir = os.path.join(empty_directory, DataContext.GE_DIR)
context = DataContext(ge_dir)
context._project_config["data_docs_sites"] = {
"my_rad_site": {
"class_name": "SiteBuilder",
"store_backend": {
"class_name": "TupleFilesystemStoreBackend",
"base_directory": "uncommitted/data_docs/some/path/"
}
},
"another_just_amazing_site": {
"class_name": "SiteBuilder",
"store_backend": {
"class_name": "TupleFilesystemStoreBackend",
"base_directory": "uncommitted/data_docs/another/path/"
}
}
}
context._save_project_config()
context = DataContext(ge_dir)
context.build_data_docs()
data_docs_dir = os.path.join(ge_dir, "uncommitted/data_docs/")
path_1 = os.path.join(data_docs_dir, "some/path/index.html")
path_2 = os.path.join(data_docs_dir, "another/path/index.html")
for expected_path in [path_1, path_2]:
assert os.path.isfile(expected_path)
obs = context.get_docs_sites_urls()
assert set(obs) == set([
"file://{}".format(path_1),
"file://{}".format(path_2),
])
def test_load_config_variables_file(basic_data_context_config, tmp_path_factory):
base_path = str(tmp_path_factory.mktemp('test_load_config_variables_file'))
safe_mmkdir(os.path.join(base_path, "uncommitted"))
with open(os.path.join(base_path, "uncommitted", "dev_variables.yml"), "w") as outfile:
yaml.dump({'env': 'dev'}, outfile)
with open(os.path.join(base_path, "uncommitted", "prod_variables.yml"), "w") as outfile:
yaml.dump({'env': 'prod'}, outfile)
basic_data_context_config["config_variables_file_path"] = "uncommitted/${TEST_CONFIG_FILE_ENV}_variables.yml"
try:
os.environ["TEST_CONFIG_FILE_ENV"] = "dev"
context = BaseDataContext(basic_data_context_config, context_root_dir=base_path)
config_vars = context._load_config_variables_file()
assert config_vars['env'] == 'dev'
os.environ["TEST_CONFIG_FILE_ENV"] = "prod"
context = BaseDataContext(basic_data_context_config, context_root_dir=base_path)
config_vars = context._load_config_variables_file()
assert config_vars['env'] == 'prod'
except Exception:
raise
finally:
del os.environ["TEST_CONFIG_FILE_ENV"]
def test_list_expectation_suite_with_no_suites(titanic_data_context):
observed = titanic_data_context.list_expectation_suite_names()
assert isinstance(observed, list)
assert observed == []
def test_list_expectation_suite_with_one_suite(titanic_data_context):
titanic_data_context.create_expectation_suite('warning')
observed = titanic_data_context.list_expectation_suite_names()
assert isinstance(observed, list)
assert observed == ['warning']
def test_list_expectation_suite_with_multiple_suites(titanic_data_context):
titanic_data_context.create_expectation_suite('a.warning')
titanic_data_context.create_expectation_suite('b.warning')
titanic_data_context.create_expectation_suite('c.warning')
observed = titanic_data_context.list_expectation_suite_names()
assert isinstance(observed, list)
assert observed == ['a.warning', 'b.warning', 'c.warning']
assert len(observed) == 3
def test_get_batch_raises_error_when_passed_a_non_string_type_for_suite_parameter(
titanic_data_context,
):
with pytest.raises(DataContextError):
titanic_data_context.get_batch({}, 99)
def test_get_batch_raises_error_when_passed_a_non_dict_or_batch_kwarg_type_for_batch_kwarg_parameter(
titanic_data_context,
):
with pytest.raises(BatchKwargsError):
titanic_data_context.get_batch(99, "foo")
def test_get_batch_when_passed_a_suite_name(titanic_data_context):
context = titanic_data_context
root_dir = context.root_directory
batch_kwargs = {
"datasource": "mydatasource",
"path": os.path.join(root_dir, "..", "data", "Titanic.csv"),
}
context.create_expectation_suite("foo")
assert context.list_expectation_suite_names() == ["foo"]
batch = context.get_batch(batch_kwargs, "foo")
assert isinstance(batch, Dataset)
assert isinstance(batch.get_expectation_suite(), ExpectationSuite)
def test_get_batch_when_passed_a_suite(titanic_data_context):
context = titanic_data_context
root_dir = context.root_directory
batch_kwargs = {
"datasource": "mydatasource",
"path": os.path.join(root_dir, "..", "data", "Titanic.csv"),
}
context.create_expectation_suite("foo")
assert context.list_expectation_suite_names() == ["foo"]
suite = context.get_expectation_suite("foo")
batch = context.get_batch(batch_kwargs, suite)
assert isinstance(batch, Dataset)
assert isinstance(batch.get_expectation_suite(), ExpectationSuite)
| true | true |
1c312caf3764cd4dad921eaa796c13378b4645a6 | 3,030 | py | Python | logging_server/logger.py | Geson-anko/logging_server | 4617e6a971c81fc4df1cad1c35cdae5f09e20382 | [
"MIT"
] | 1 | 2022-03-29T23:00:56.000Z | 2022-03-29T23:00:56.000Z | logging_server/logger.py | Geson-anko/logging_server | 4617e6a971c81fc4df1cad1c35cdae5f09e20382 | [
"MIT"
] | 1 | 2022-03-23T11:54:02.000Z | 2022-03-23T11:54:02.000Z | logging_server/logger.py | Geson-anko/logging_server | 4617e6a971c81fc4df1cad1c35cdae5f09e20382 | [
"MIT"
] | null | null | null | """
Logger class for mutiprocessing logging.
Usage:
from logging_server import SocketLogger
logger = SocketLogger(__name__)
logger.setLevel(0)
logger.debug("debug")
logger.info("info")
logger.warning("warning")
logger.error("error")
logger.exception("exception")
このロガークラスはlogging.Loggerクラスを継承せずにラップしているため、
純粋なロガークラスの様に振る舞わないことに注意してください。
ロギングに必要なメソッドのみを提供します。
また任意のハンドラーを追加しても正常に機能しないことがあります。
"""
import os
import logging
import logging.handlers
from typing import *
def _check_pid(func):
def check(self,*args, **kwds):
if self.logger is None:
self.set_logger()
pid = os.getpid()
if self._pid != pid:
self._pid = pid
self.reset_logger()
func(self,*args, **kwds)
return check
class SocketLogger:
_pid:int = None
__logger:logging.Logger = None
def __init__(
self, name:str, level:int=logging.NOTSET, host="localhost",
port:int=logging.handlers.DEFAULT_TCP_LOGGING_PORT,
) -> None:
self._pid = os.getpid()
self.name = name
self.level = level
self.host = host
self.port = port
self.set_logger()
@property
def logger(self):
return self.__logger
def setLevel(self, level:int) -> None:
self.logger.setLevel(level)
def set_logger(self):
"""set logger class, name, level and socket handler."""
self.__logger = logging.Logger(self.name)
self.__logger.setLevel(self.level)
socket_handler = logging.handlers.SocketHandler(self.host, self.port)
socket_handler.setLevel(logging.NOTSET)
self.__logger.addHandler(socket_handler)
self.__logger.propagate=False # Because another logger is propagating in server process.
def remove_handlers(self):
"""remove handlers of logger."""
for hdlr in self.__logger.handlers:
self.__logger.removeHandler(hdlr)
def reset_logger(self):
"""reset logger class"""
self.remove_handlers()
self.set_logger()
def __reduce__(self):
"""
Picking helper method.
Removes internal Logger class because it is not picklable.
"""
self.__logger = None
return super().__reduce__()
@_check_pid
def debug(self,*args, **kwds) -> None: self.logger.debug(*args, **kwds)
@_check_pid
def info(self,*args, **kwds) -> None: self.logger.info(*args, **kwds)
@_check_pid
def warn(self,*args, **kwds) -> None: self.logger.warn(*args, **kwds)
@_check_pid
def warning(self,*args, **kwds) -> None: self.logger.warning(*args, **kwds)
@_check_pid
def error(self,*args, **kwds) -> None: self.logger.error(*args, **kwds)
@_check_pid
def critical(self,*args, **kwds) -> None: self.logger.critical(*args, **kwds)
@_check_pid
def exception(self,*args, **kwds) -> None: self.logger.exception(*args, **kwds)
@_check_pid
def log(self, *args,**kwds) -> None: self.logger.log(*args,**kwds)
| 29.417476 | 96 | 0.639934 | import os
import logging
import logging.handlers
from typing import *
def _check_pid(func):
def check(self,*args, **kwds):
if self.logger is None:
self.set_logger()
pid = os.getpid()
if self._pid != pid:
self._pid = pid
self.reset_logger()
func(self,*args, **kwds)
return check
class SocketLogger:
_pid:int = None
__logger:logging.Logger = None
def __init__(
self, name:str, level:int=logging.NOTSET, host="localhost",
port:int=logging.handlers.DEFAULT_TCP_LOGGING_PORT,
) -> None:
self._pid = os.getpid()
self.name = name
self.level = level
self.host = host
self.port = port
self.set_logger()
@property
def logger(self):
return self.__logger
def setLevel(self, level:int) -> None:
self.logger.setLevel(level)
def set_logger(self):
self.__logger = logging.Logger(self.name)
self.__logger.setLevel(self.level)
socket_handler = logging.handlers.SocketHandler(self.host, self.port)
socket_handler.setLevel(logging.NOTSET)
self.__logger.addHandler(socket_handler)
self.__logger.propagate=False
def remove_handlers(self):
for hdlr in self.__logger.handlers:
self.__logger.removeHandler(hdlr)
def reset_logger(self):
self.remove_handlers()
self.set_logger()
def __reduce__(self):
self.__logger = None
return super().__reduce__()
@_check_pid
def debug(self,*args, **kwds) -> None: self.logger.debug(*args, **kwds)
@_check_pid
def info(self,*args, **kwds) -> None: self.logger.info(*args, **kwds)
@_check_pid
def warn(self,*args, **kwds) -> None: self.logger.warn(*args, **kwds)
@_check_pid
def warning(self,*args, **kwds) -> None: self.logger.warning(*args, **kwds)
@_check_pid
def error(self,*args, **kwds) -> None: self.logger.error(*args, **kwds)
@_check_pid
def critical(self,*args, **kwds) -> None: self.logger.critical(*args, **kwds)
@_check_pid
def exception(self,*args, **kwds) -> None: self.logger.exception(*args, **kwds)
@_check_pid
def log(self, *args,**kwds) -> None: self.logger.log(*args,**kwds)
| true | true |
1c312d1dd12914eccb845d112a48c2d3462790c7 | 135 | py | Python | frontend/admin.py | ebmdatalab/openpathology-web | e0620a39b174f2789df2cbea4e12bc413c1723ac | [
"MIT"
] | 2 | 2019-10-08T10:13:25.000Z | 2019-10-08T21:55:38.000Z | frontend/admin.py | HDRUK/openpathology-web | e0620a39b174f2789df2cbea4e12bc413c1723ac | [
"MIT"
] | 44 | 2019-09-25T06:36:28.000Z | 2021-08-18T11:59:24.000Z | frontend/admin.py | HDRUK/openpathology-web | e0620a39b174f2789df2cbea4e12bc413c1723ac | [
"MIT"
] | 4 | 2019-08-12T14:02:54.000Z | 2020-06-16T20:33:11.000Z | from django.contrib import admin
from .models import Measure
@admin.register(Measure)
class MeasureAdmin(admin.ModelAdmin):
pass
| 16.875 | 37 | 0.792593 | from django.contrib import admin
from .models import Measure
@admin.register(Measure)
class MeasureAdmin(admin.ModelAdmin):
pass
| true | true |
1c312e102b9cebf8bdc37dfacda2a7151ffc0173 | 47,881 | py | Python | testing/python/collect.py | cristianMeli/pytest | 1824349f74298112722396be6f84a121bc9d6d63 | [
"MIT"
] | 1 | 2021-11-09T10:45:59.000Z | 2021-11-09T10:45:59.000Z | testing/python/collect.py | cristianMeli/pytest | 1824349f74298112722396be6f84a121bc9d6d63 | [
"MIT"
] | 59 | 2020-10-27T20:30:33.000Z | 2022-03-28T03:02:29.000Z | testing/python/collect.py | symonk/pytest | a53abe93d87083bbd5c183bd654f5787c0376934 | [
"MIT"
] | null | null | null | import os
import sys
import textwrap
from typing import Any
from typing import Dict
import _pytest._code
import pytest
from _pytest.config import ExitCode
from _pytest.main import Session
from _pytest.monkeypatch import MonkeyPatch
from _pytest.nodes import Collector
from _pytest.pytester import Pytester
from _pytest.python import Class
from _pytest.python import Instance
class TestModule:
def test_failing_import(self, pytester: Pytester) -> None:
modcol = pytester.getmodulecol("import alksdjalskdjalkjals")
pytest.raises(Collector.CollectError, modcol.collect)
def test_import_duplicate(self, pytester: Pytester) -> None:
a = pytester.mkdir("a")
b = pytester.mkdir("b")
p1 = a.joinpath("test_whatever.py")
p1.touch()
p2 = b.joinpath("test_whatever.py")
p2.touch()
# ensure we don't have it imported already
sys.modules.pop(p1.stem, None)
result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"*import*mismatch*",
"*imported*test_whatever*",
"*%s*" % p1,
"*not the same*",
"*%s*" % p2,
"*HINT*",
]
)
def test_import_prepend_append(
self, pytester: Pytester, monkeypatch: MonkeyPatch
) -> None:
root1 = pytester.mkdir("root1")
root2 = pytester.mkdir("root2")
root1.joinpath("x456.py").touch()
root2.joinpath("x456.py").touch()
p = root2.joinpath("test_x456.py")
monkeypatch.syspath_prepend(str(root1))
p.write_text(
textwrap.dedent(
"""\
import x456
def test():
assert x456.__file__.startswith({!r})
""".format(
str(root2)
)
)
)
with monkeypatch.context() as mp:
mp.chdir(root2)
reprec = pytester.inline_run("--import-mode=append")
reprec.assertoutcome(passed=0, failed=1)
reprec = pytester.inline_run()
reprec.assertoutcome(passed=1)
def test_syntax_error_in_module(self, pytester: Pytester) -> None:
modcol = pytester.getmodulecol("this is a syntax error")
pytest.raises(modcol.CollectError, modcol.collect)
pytest.raises(modcol.CollectError, modcol.collect)
def test_module_considers_pluginmanager_at_import(self, pytester: Pytester) -> None:
modcol = pytester.getmodulecol("pytest_plugins='xasdlkj',")
pytest.raises(ImportError, lambda: modcol.obj)
def test_invalid_test_module_name(self, pytester: Pytester) -> None:
a = pytester.mkdir("a")
a.joinpath("test_one.part1.py").touch()
result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"ImportError while importing test module*test_one.part1*",
"Hint: make sure your test modules/packages have valid Python names.",
]
)
@pytest.mark.parametrize("verbose", [0, 1, 2])
def test_show_traceback_import_error(
self, pytester: Pytester, verbose: int
) -> None:
"""Import errors when collecting modules should display the traceback (#1976).
With low verbosity we omit pytest and internal modules, otherwise show all traceback entries.
"""
pytester.makepyfile(
foo_traceback_import_error="""
from bar_traceback_import_error import NOT_AVAILABLE
""",
bar_traceback_import_error="",
)
pytester.makepyfile(
"""
import foo_traceback_import_error
"""
)
args = ("-v",) * verbose
result = pytester.runpytest(*args)
result.stdout.fnmatch_lines(
[
"ImportError while importing test module*",
"Traceback:",
"*from bar_traceback_import_error import NOT_AVAILABLE",
"*cannot import name *NOT_AVAILABLE*",
]
)
assert result.ret == 2
stdout = result.stdout.str()
if verbose == 2:
assert "_pytest" in stdout
else:
assert "_pytest" not in stdout
def test_show_traceback_import_error_unicode(self, pytester: Pytester) -> None:
"""Check test modules collected which raise ImportError with unicode messages
are handled properly (#2336).
"""
pytester.makepyfile("raise ImportError('Something bad happened ☺')")
result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"ImportError while importing test module*",
"Traceback:",
"*raise ImportError*Something bad happened*",
]
)
assert result.ret == 2
class TestClass:
def test_class_with_init_warning(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
class TestClass1(object):
def __init__(self):
pass
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"*cannot collect test class 'TestClass1' because it has "
"a __init__ constructor (from: test_class_with_init_warning.py)"
]
)
def test_class_with_new_warning(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
class TestClass1(object):
def __new__(self):
pass
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"*cannot collect test class 'TestClass1' because it has "
"a __new__ constructor (from: test_class_with_new_warning.py)"
]
)
def test_class_subclassobject(self, pytester: Pytester) -> None:
pytester.getmodulecol(
"""
class test(object):
pass
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(["*collected 0*"])
def test_static_method(self, pytester: Pytester) -> None:
"""Support for collecting staticmethod tests (#2528, #2699)"""
pytester.getmodulecol(
"""
import pytest
class Test(object):
@staticmethod
def test_something():
pass
@pytest.fixture
def fix(self):
return 1
@staticmethod
def test_fix(fix):
assert fix == 1
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(["*collected 2 items*", "*2 passed in*"])
def test_setup_teardown_class_as_classmethod(self, pytester: Pytester) -> None:
pytester.makepyfile(
test_mod1="""
class TestClassMethod(object):
@classmethod
def setup_class(cls):
pass
def test_1(self):
pass
@classmethod
def teardown_class(cls):
pass
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(["*1 passed*"])
def test_issue1035_obj_has_getattr(self, pytester: Pytester) -> None:
modcol = pytester.getmodulecol(
"""
class Chameleon(object):
def __getattr__(self, name):
return True
chameleon = Chameleon()
"""
)
colitems = modcol.collect()
assert len(colitems) == 0
def test_issue1579_namedtuple(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
import collections
TestCase = collections.namedtuple('TestCase', ['a'])
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(
"*cannot collect test class 'TestCase' "
"because it has a __new__ constructor*"
)
def test_issue2234_property(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
class TestCase(object):
@property
def prop(self):
raise NotImplementedError()
"""
)
result = pytester.runpytest()
assert result.ret == ExitCode.NO_TESTS_COLLECTED
class TestFunction:
def test_getmodulecollector(self, pytester: Pytester) -> None:
item = pytester.getitem("def test_func(): pass")
modcol = item.getparent(pytest.Module)
assert isinstance(modcol, pytest.Module)
assert hasattr(modcol.obj, "test_func")
@pytest.mark.filterwarnings("default")
def test_function_as_object_instance_ignored(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
class A(object):
def __call__(self, tmp_path):
0/0
test_a = A()
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"collected 0 items",
"*test_function_as_object_instance_ignored.py:2: "
"*cannot collect 'test_a' because it is not a function.",
]
)
@staticmethod
def make_function(pytester: Pytester, **kwargs: Any) -> Any:
from _pytest.fixtures import FixtureManager
config = pytester.parseconfigure()
session = Session.from_config(config)
session._fixturemanager = FixtureManager(session)
return pytest.Function.from_parent(parent=session, **kwargs)
def test_function_equality(self, pytester: Pytester) -> None:
def func1():
pass
def func2():
pass
f1 = self.make_function(pytester, name="name", callobj=func1)
assert f1 == f1
f2 = self.make_function(
pytester, name="name", callobj=func2, originalname="foobar"
)
assert f1 != f2
def test_repr_produces_actual_test_id(self, pytester: Pytester) -> None:
f = self.make_function(
pytester, name=r"test[\xe5]", callobj=self.test_repr_produces_actual_test_id
)
assert repr(f) == r"<Function test[\xe5]>"
def test_issue197_parametrize_emptyset(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
import pytest
@pytest.mark.parametrize('arg', [])
def test_function(arg):
pass
"""
)
reprec = pytester.inline_run()
reprec.assertoutcome(skipped=1)
def test_single_tuple_unwraps_values(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
import pytest
@pytest.mark.parametrize(('arg',), [(1,)])
def test_function(arg):
assert arg == 1
"""
)
reprec = pytester.inline_run()
reprec.assertoutcome(passed=1)
def test_issue213_parametrize_value_no_equal(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
import pytest
class A(object):
def __eq__(self, other):
raise ValueError("not possible")
@pytest.mark.parametrize('arg', [A()])
def test_function(arg):
assert arg.__class__.__name__ == "A"
"""
)
reprec = pytester.inline_run("--fulltrace")
reprec.assertoutcome(passed=1)
def test_parametrize_with_non_hashable_values(self, pytester: Pytester) -> None:
"""Test parametrization with non-hashable values."""
pytester.makepyfile(
"""
archival_mapping = {
'1.0': {'tag': '1.0'},
'1.2.2a1': {'tag': 'release-1.2.2a1'},
}
import pytest
@pytest.mark.parametrize('key value'.split(),
archival_mapping.items())
def test_archival_to_version(key, value):
assert key in archival_mapping
assert value == archival_mapping[key]
"""
)
rec = pytester.inline_run()
rec.assertoutcome(passed=2)
def test_parametrize_with_non_hashable_values_indirect(
self, pytester: Pytester
) -> None:
"""Test parametrization with non-hashable values with indirect parametrization."""
pytester.makepyfile(
"""
archival_mapping = {
'1.0': {'tag': '1.0'},
'1.2.2a1': {'tag': 'release-1.2.2a1'},
}
import pytest
@pytest.fixture
def key(request):
return request.param
@pytest.fixture
def value(request):
return request.param
@pytest.mark.parametrize('key value'.split(),
archival_mapping.items(), indirect=True)
def test_archival_to_version(key, value):
assert key in archival_mapping
assert value == archival_mapping[key]
"""
)
rec = pytester.inline_run()
rec.assertoutcome(passed=2)
def test_parametrize_overrides_fixture(self, pytester: Pytester) -> None:
"""Test parametrization when parameter overrides existing fixture with same name."""
pytester.makepyfile(
"""
import pytest
@pytest.fixture
def value():
return 'value'
@pytest.mark.parametrize('value',
['overridden'])
def test_overridden_via_param(value):
assert value == 'overridden'
@pytest.mark.parametrize('somevalue', ['overridden'])
def test_not_overridden(value, somevalue):
assert value == 'value'
assert somevalue == 'overridden'
@pytest.mark.parametrize('other,value', [('foo', 'overridden')])
def test_overridden_via_multiparam(other, value):
assert other == 'foo'
assert value == 'overridden'
"""
)
rec = pytester.inline_run()
rec.assertoutcome(passed=3)
def test_parametrize_overrides_parametrized_fixture(
self, pytester: Pytester
) -> None:
"""Test parametrization when parameter overrides existing parametrized fixture with same name."""
pytester.makepyfile(
"""
import pytest
@pytest.fixture(params=[1, 2])
def value(request):
return request.param
@pytest.mark.parametrize('value',
['overridden'])
def test_overridden_via_param(value):
assert value == 'overridden'
"""
)
rec = pytester.inline_run()
rec.assertoutcome(passed=1)
def test_parametrize_overrides_indirect_dependency_fixture(
self, pytester: Pytester
) -> None:
"""Test parametrization when parameter overrides a fixture that a test indirectly depends on"""
pytester.makepyfile(
"""
import pytest
fix3_instantiated = False
@pytest.fixture
def fix1(fix2):
return fix2 + '1'
@pytest.fixture
def fix2(fix3):
return fix3 + '2'
@pytest.fixture
def fix3():
global fix3_instantiated
fix3_instantiated = True
return '3'
@pytest.mark.parametrize('fix2', ['2'])
def test_it(fix1):
assert fix1 == '21'
assert not fix3_instantiated
"""
)
rec = pytester.inline_run()
rec.assertoutcome(passed=1)
def test_parametrize_with_mark(self, pytester: Pytester) -> None:
items = pytester.getitems(
"""
import pytest
@pytest.mark.foo
@pytest.mark.parametrize('arg', [
1,
pytest.param(2, marks=[pytest.mark.baz, pytest.mark.bar])
])
def test_function(arg):
pass
"""
)
keywords = [item.keywords for item in items]
assert (
"foo" in keywords[0]
and "bar" not in keywords[0]
and "baz" not in keywords[0]
)
assert "foo" in keywords[1] and "bar" in keywords[1] and "baz" in keywords[1]
def test_parametrize_with_empty_string_arguments(self, pytester: Pytester) -> None:
items = pytester.getitems(
"""\
import pytest
@pytest.mark.parametrize('v', ('', ' '))
@pytest.mark.parametrize('w', ('', ' '))
def test(v, w): ...
"""
)
names = {item.name for item in items}
assert names == {"test[-]", "test[ -]", "test[- ]", "test[ - ]"}
def test_function_equality_with_callspec(self, pytester: Pytester) -> None:
items = pytester.getitems(
"""
import pytest
@pytest.mark.parametrize('arg', [1,2])
def test_function(arg):
pass
"""
)
assert items[0] != items[1]
assert not (items[0] == items[1])
def test_pyfunc_call(self, pytester: Pytester) -> None:
item = pytester.getitem("def test_func(): raise ValueError")
config = item.config
class MyPlugin1:
def pytest_pyfunc_call(self):
raise ValueError
class MyPlugin2:
def pytest_pyfunc_call(self):
return True
config.pluginmanager.register(MyPlugin1())
config.pluginmanager.register(MyPlugin2())
config.hook.pytest_runtest_setup(item=item)
config.hook.pytest_pyfunc_call(pyfuncitem=item)
def test_multiple_parametrize(self, pytester: Pytester) -> None:
modcol = pytester.getmodulecol(
"""
import pytest
@pytest.mark.parametrize('x', [0, 1])
@pytest.mark.parametrize('y', [2, 3])
def test1(x, y):
pass
"""
)
colitems = modcol.collect()
assert colitems[0].name == "test1[2-0]"
assert colitems[1].name == "test1[2-1]"
assert colitems[2].name == "test1[3-0]"
assert colitems[3].name == "test1[3-1]"
def test_issue751_multiple_parametrize_with_ids(self, pytester: Pytester) -> None:
modcol = pytester.getmodulecol(
"""
import pytest
@pytest.mark.parametrize('x', [0], ids=['c'])
@pytest.mark.parametrize('y', [0, 1], ids=['a', 'b'])
class Test(object):
def test1(self, x, y):
pass
def test2(self, x, y):
pass
"""
)
colitems = modcol.collect()[0].collect()[0].collect()
assert colitems[0].name == "test1[a-c]"
assert colitems[1].name == "test1[b-c]"
assert colitems[2].name == "test2[a-c]"
assert colitems[3].name == "test2[b-c]"
def test_parametrize_skipif(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
import pytest
m = pytest.mark.skipif('True')
@pytest.mark.parametrize('x', [0, 1, pytest.param(2, marks=m)])
def test_skip_if(x):
assert x < 2
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(["* 2 passed, 1 skipped in *"])
def test_parametrize_skip(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
import pytest
m = pytest.mark.skip('')
@pytest.mark.parametrize('x', [0, 1, pytest.param(2, marks=m)])
def test_skip(x):
assert x < 2
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(["* 2 passed, 1 skipped in *"])
def test_parametrize_skipif_no_skip(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
import pytest
m = pytest.mark.skipif('False')
@pytest.mark.parametrize('x', [0, 1, m(2)])
def test_skipif_no_skip(x):
assert x < 2
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(["* 1 failed, 2 passed in *"])
def test_parametrize_xfail(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
import pytest
m = pytest.mark.xfail('True')
@pytest.mark.parametrize('x', [0, 1, pytest.param(2, marks=m)])
def test_xfail(x):
assert x < 2
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(["* 2 passed, 1 xfailed in *"])
def test_parametrize_passed(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
import pytest
m = pytest.mark.xfail('True')
@pytest.mark.parametrize('x', [0, 1, pytest.param(2, marks=m)])
def test_xfail(x):
pass
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(["* 2 passed, 1 xpassed in *"])
def test_parametrize_xfail_passed(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
import pytest
m = pytest.mark.xfail('False')
@pytest.mark.parametrize('x', [0, 1, m(2)])
def test_passed(x):
pass
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(["* 3 passed in *"])
def test_function_originalname(self, pytester: Pytester) -> None:
items = pytester.getitems(
"""
import pytest
@pytest.mark.parametrize('arg', [1,2])
def test_func(arg):
pass
def test_no_param():
pass
"""
)
originalnames = []
for x in items:
assert isinstance(x, pytest.Function)
originalnames.append(x.originalname)
assert originalnames == [
"test_func",
"test_func",
"test_no_param",
]
def test_function_with_square_brackets(self, pytester: Pytester) -> None:
"""Check that functions with square brackets don't cause trouble."""
p1 = pytester.makepyfile(
"""
locals()["test_foo[name]"] = lambda: None
"""
)
result = pytester.runpytest("-v", str(p1))
result.stdout.fnmatch_lines(
[
"test_function_with_square_brackets.py::test_foo[[]name[]] PASSED *",
"*= 1 passed in *",
]
)
class TestSorting:
def test_check_equality(self, pytester: Pytester) -> None:
modcol = pytester.getmodulecol(
"""
def test_pass(): pass
def test_fail(): assert 0
"""
)
fn1 = pytester.collect_by_name(modcol, "test_pass")
assert isinstance(fn1, pytest.Function)
fn2 = pytester.collect_by_name(modcol, "test_pass")
assert isinstance(fn2, pytest.Function)
assert fn1 == fn2
assert fn1 != modcol
assert hash(fn1) == hash(fn2)
fn3 = pytester.collect_by_name(modcol, "test_fail")
assert isinstance(fn3, pytest.Function)
assert not (fn1 == fn3)
assert fn1 != fn3
for fn in fn1, fn2, fn3:
assert fn != 3 # type: ignore[comparison-overlap]
assert fn != modcol
assert fn != [1, 2, 3] # type: ignore[comparison-overlap]
assert [1, 2, 3] != fn # type: ignore[comparison-overlap]
assert modcol != fn
def test_allow_sane_sorting_for_decorators(self, pytester: Pytester) -> None:
modcol = pytester.getmodulecol(
"""
def dec(f):
g = lambda: f(2)
g.place_as = f
return g
def test_b(y):
pass
test_b = dec(test_b)
def test_a(y):
pass
test_a = dec(test_a)
"""
)
colitems = modcol.collect()
assert len(colitems) == 2
assert [item.name for item in colitems] == ["test_b", "test_a"]
def test_ordered_by_definition_order(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""\
class Test1:
def test_foo(): pass
def test_bar(): pass
class Test2:
def test_foo(): pass
test_bar = Test1.test_bar
class Test3(Test2):
def test_baz(): pass
"""
)
result = pytester.runpytest("--collect-only")
result.stdout.fnmatch_lines(
[
"*Class Test1*",
"*Function test_foo*",
"*Function test_bar*",
"*Class Test2*",
# previously the order was flipped due to Test1.test_bar reference
"*Function test_foo*",
"*Function test_bar*",
"*Class Test3*",
"*Function test_foo*",
"*Function test_bar*",
"*Function test_baz*",
]
)
class TestConftestCustomization:
def test_pytest_pycollect_module(self, pytester: Pytester) -> None:
pytester.makeconftest(
"""
import pytest
class MyModule(pytest.Module):
pass
def pytest_pycollect_makemodule(fspath, parent):
if fspath.name == "test_xyz.py":
return MyModule.from_parent(path=fspath, parent=parent)
"""
)
pytester.makepyfile("def test_some(): pass")
pytester.makepyfile(test_xyz="def test_func(): pass")
result = pytester.runpytest("--collect-only")
result.stdout.fnmatch_lines(["*<Module*test_pytest*", "*<MyModule*xyz*"])
def test_customized_pymakemodule_issue205_subdir(self, pytester: Pytester) -> None:
b = pytester.path.joinpath("a", "b")
b.mkdir(parents=True)
b.joinpath("conftest.py").write_text(
textwrap.dedent(
"""\
import pytest
@pytest.hookimpl(hookwrapper=True)
def pytest_pycollect_makemodule():
outcome = yield
mod = outcome.get_result()
mod.obj.hello = "world"
"""
)
)
b.joinpath("test_module.py").write_text(
textwrap.dedent(
"""\
def test_hello():
assert hello == "world"
"""
)
)
reprec = pytester.inline_run()
reprec.assertoutcome(passed=1)
def test_customized_pymakeitem(self, pytester: Pytester) -> None:
b = pytester.path.joinpath("a", "b")
b.mkdir(parents=True)
b.joinpath("conftest.py").write_text(
textwrap.dedent(
"""\
import pytest
@pytest.hookimpl(hookwrapper=True)
def pytest_pycollect_makeitem():
outcome = yield
if outcome.excinfo is None:
result = outcome.get_result()
if result:
for func in result:
func._some123 = "world"
"""
)
)
b.joinpath("test_module.py").write_text(
textwrap.dedent(
"""\
import pytest
@pytest.fixture()
def obj(request):
return request.node._some123
def test_hello(obj):
assert obj == "world"
"""
)
)
reprec = pytester.inline_run()
reprec.assertoutcome(passed=1)
def test_pytest_pycollect_makeitem(self, pytester: Pytester) -> None:
pytester.makeconftest(
"""
import pytest
class MyFunction(pytest.Function):
pass
def pytest_pycollect_makeitem(collector, name, obj):
if name == "some":
return MyFunction.from_parent(name=name, parent=collector)
"""
)
pytester.makepyfile("def some(): pass")
result = pytester.runpytest("--collect-only")
result.stdout.fnmatch_lines(["*MyFunction*some*"])
def test_issue2369_collect_module_fileext(self, pytester: Pytester) -> None:
"""Ensure we can collect files with weird file extensions as Python
modules (#2369)"""
# We'll implement a little finder and loader to import files containing
# Python source code whose file extension is ".narf".
pytester.makeconftest(
"""
import sys, os, imp
from _pytest.python import Module
class Loader(object):
def load_module(self, name):
return imp.load_source(name, name + ".narf")
class Finder(object):
def find_module(self, name, path=None):
if os.path.exists(name + ".narf"):
return Loader()
sys.meta_path.append(Finder())
def pytest_collect_file(fspath, parent):
if fspath.suffix == ".narf":
return Module.from_parent(path=fspath, parent=parent)"""
)
pytester.makefile(
".narf",
"""\
def test_something():
assert 1 + 1 == 2""",
)
# Use runpytest_subprocess, since we're futzing with sys.meta_path.
result = pytester.runpytest_subprocess()
result.stdout.fnmatch_lines(["*1 passed*"])
def test_early_ignored_attributes(self, pytester: Pytester) -> None:
"""Builtin attributes should be ignored early on, even if
configuration would otherwise allow them.
This tests a performance optimization, not correctness, really,
although it tests PytestCollectionWarning is not raised, while
it would have been raised otherwise.
"""
pytester.makeini(
"""
[pytest]
python_classes=*
python_functions=*
"""
)
pytester.makepyfile(
"""
class TestEmpty:
pass
test_empty = TestEmpty()
def test_real():
pass
"""
)
items, rec = pytester.inline_genitems()
assert rec.ret == 0
assert len(items) == 1
def test_setup_only_available_in_subdir(pytester: Pytester) -> None:
sub1 = pytester.mkpydir("sub1")
sub2 = pytester.mkpydir("sub2")
sub1.joinpath("conftest.py").write_text(
textwrap.dedent(
"""\
import pytest
def pytest_runtest_setup(item):
assert item.path.stem == "test_in_sub1"
def pytest_runtest_call(item):
assert item.path.stem == "test_in_sub1"
def pytest_runtest_teardown(item):
assert item.path.stem == "test_in_sub1"
"""
)
)
sub2.joinpath("conftest.py").write_text(
textwrap.dedent(
"""\
import pytest
def pytest_runtest_setup(item):
assert item.path.stem == "test_in_sub2"
def pytest_runtest_call(item):
assert item.path.stem == "test_in_sub2"
def pytest_runtest_teardown(item):
assert item.path.stem == "test_in_sub2"
"""
)
)
sub1.joinpath("test_in_sub1.py").write_text("def test_1(): pass")
sub2.joinpath("test_in_sub2.py").write_text("def test_2(): pass")
result = pytester.runpytest("-v", "-s")
result.assert_outcomes(passed=2)
def test_modulecol_roundtrip(pytester: Pytester) -> None:
modcol = pytester.getmodulecol("pass", withinit=False)
trail = modcol.nodeid
newcol = modcol.session.perform_collect([trail], genitems=0)[0]
assert modcol.name == newcol.name
class TestTracebackCutting:
def test_skip_simple(self):
with pytest.raises(pytest.skip.Exception) as excinfo:
pytest.skip("xxx")
assert excinfo.traceback[-1].frame.code.name == "skip"
assert excinfo.traceback[-1].ishidden()
assert excinfo.traceback[-2].frame.code.name == "test_skip_simple"
assert not excinfo.traceback[-2].ishidden()
def test_traceback_argsetup(self, pytester: Pytester) -> None:
pytester.makeconftest(
"""
import pytest
@pytest.fixture
def hello(request):
raise ValueError("xyz")
"""
)
p = pytester.makepyfile("def test(hello): pass")
result = pytester.runpytest(p)
assert result.ret != 0
out = result.stdout.str()
assert "xyz" in out
assert "conftest.py:5: ValueError" in out
numentries = out.count("_ _ _") # separator for traceback entries
assert numentries == 0
result = pytester.runpytest("--fulltrace", p)
out = result.stdout.str()
assert "conftest.py:5: ValueError" in out
numentries = out.count("_ _ _ _") # separator for traceback entries
assert numentries > 3
def test_traceback_error_during_import(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
x = 1
x = 2
x = 17
asd
"""
)
result = pytester.runpytest()
assert result.ret != 0
out = result.stdout.str()
assert "x = 1" not in out
assert "x = 2" not in out
result.stdout.fnmatch_lines([" *asd*", "E*NameError*"])
result = pytester.runpytest("--fulltrace")
out = result.stdout.str()
assert "x = 1" in out
assert "x = 2" in out
result.stdout.fnmatch_lines([">*asd*", "E*NameError*"])
def test_traceback_filter_error_during_fixture_collection(
self, pytester: Pytester
) -> None:
"""Integration test for issue #995."""
pytester.makepyfile(
"""
import pytest
def fail_me(func):
ns = {}
exec('def w(): raise ValueError("fail me")', ns)
return ns['w']
@pytest.fixture(scope='class')
@fail_me
def fail_fixture():
pass
def test_failing_fixture(fail_fixture):
pass
"""
)
result = pytester.runpytest()
assert result.ret != 0
out = result.stdout.str()
assert "INTERNALERROR>" not in out
result.stdout.fnmatch_lines(["*ValueError: fail me*", "* 1 error in *"])
def test_filter_traceback_generated_code(self) -> None:
"""Test that filter_traceback() works with the fact that
_pytest._code.code.Code.path attribute might return an str object.
In this case, one of the entries on the traceback was produced by
dynamically generated code.
See: https://bitbucket.org/pytest-dev/py/issues/71
This fixes #995.
"""
from _pytest._code import filter_traceback
tb = None
try:
ns: Dict[str, Any] = {}
exec("def foo(): raise ValueError", ns)
ns["foo"]()
except ValueError:
_, _, tb = sys.exc_info()
assert tb is not None
traceback = _pytest._code.Traceback(tb)
assert isinstance(traceback[-1].path, str)
assert not filter_traceback(traceback[-1])
def test_filter_traceback_path_no_longer_valid(self, pytester: Pytester) -> None:
"""Test that filter_traceback() works with the fact that
_pytest._code.code.Code.path attribute might return an str object.
In this case, one of the files in the traceback no longer exists.
This fixes #1133.
"""
from _pytest._code import filter_traceback
pytester.syspathinsert()
pytester.makepyfile(
filter_traceback_entry_as_str="""
def foo():
raise ValueError
"""
)
tb = None
try:
import filter_traceback_entry_as_str
filter_traceback_entry_as_str.foo()
except ValueError:
_, _, tb = sys.exc_info()
assert tb is not None
pytester.path.joinpath("filter_traceback_entry_as_str.py").unlink()
traceback = _pytest._code.Traceback(tb)
assert isinstance(traceback[-1].path, str)
assert filter_traceback(traceback[-1])
class TestReportInfo:
def test_itemreport_reportinfo(self, pytester: Pytester) -> None:
pytester.makeconftest(
"""
import pytest
class MyFunction(pytest.Function):
def reportinfo(self):
return "ABCDE", 42, "custom"
def pytest_pycollect_makeitem(collector, name, obj):
if name == "test_func":
return MyFunction.from_parent(name=name, parent=collector)
"""
)
item = pytester.getitem("def test_func(): pass")
item.config.pluginmanager.getplugin("runner")
assert item.location == ("ABCDE", 42, "custom")
def test_func_reportinfo(self, pytester: Pytester) -> None:
item = pytester.getitem("def test_func(): pass")
path, lineno, modpath = item.reportinfo()
assert os.fspath(path) == str(item.path)
assert lineno == 0
assert modpath == "test_func"
def test_class_reportinfo(self, pytester: Pytester) -> None:
modcol = pytester.getmodulecol(
"""
# lineno 0
class TestClass(object):
def test_hello(self): pass
"""
)
classcol = pytester.collect_by_name(modcol, "TestClass")
assert isinstance(classcol, Class)
path, lineno, msg = classcol.reportinfo()
assert os.fspath(path) == str(modcol.path)
assert lineno == 1
assert msg == "TestClass"
@pytest.mark.filterwarnings(
"ignore:usage of Generator.Function is deprecated, please use pytest.Function instead"
)
def test_reportinfo_with_nasty_getattr(self, pytester: Pytester) -> None:
# https://github.com/pytest-dev/pytest/issues/1204
modcol = pytester.getmodulecol(
"""
# lineno 0
class TestClass(object):
def __getattr__(self, name):
return "this is not an int"
def intest_foo(self):
pass
"""
)
classcol = pytester.collect_by_name(modcol, "TestClass")
assert isinstance(classcol, Class)
instance = list(classcol.collect())[0]
assert isinstance(instance, Instance)
path, lineno, msg = instance.reportinfo()
def test_customized_python_discovery(pytester: Pytester) -> None:
pytester.makeini(
"""
[pytest]
python_files=check_*.py
python_classes=Check
python_functions=check
"""
)
p = pytester.makepyfile(
"""
def check_simple():
pass
class CheckMyApp(object):
def check_meth(self):
pass
"""
)
p2 = p.with_name(p.name.replace("test", "check"))
p.rename(p2)
result = pytester.runpytest("--collect-only", "-s")
result.stdout.fnmatch_lines(
["*check_customized*", "*check_simple*", "*CheckMyApp*", "*check_meth*"]
)
result = pytester.runpytest()
assert result.ret == 0
result.stdout.fnmatch_lines(["*2 passed*"])
def test_customized_python_discovery_functions(pytester: Pytester) -> None:
pytester.makeini(
"""
[pytest]
python_functions=_test
"""
)
pytester.makepyfile(
"""
def _test_underscore():
pass
"""
)
result = pytester.runpytest("--collect-only", "-s")
result.stdout.fnmatch_lines(["*_test_underscore*"])
result = pytester.runpytest()
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
def test_unorderable_types(pytester: Pytester) -> None:
pytester.makepyfile(
"""
class TestJoinEmpty(object):
pass
def make_test():
class Test(object):
pass
Test.__name__ = "TestFoo"
return Test
TestFoo = make_test()
"""
)
result = pytester.runpytest()
result.stdout.no_fnmatch_line("*TypeError*")
assert result.ret == ExitCode.NO_TESTS_COLLECTED
@pytest.mark.filterwarnings("default::pytest.PytestCollectionWarning")
def test_dont_collect_non_function_callable(pytester: Pytester) -> None:
"""Test for issue https://github.com/pytest-dev/pytest/issues/331
In this case an INTERNALERROR occurred trying to report the failure of
a test like this one because pytest failed to get the source lines.
"""
pytester.makepyfile(
"""
class Oh(object):
def __call__(self):
pass
test_a = Oh()
def test_real():
pass
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"*collected 1 item*",
"*test_dont_collect_non_function_callable.py:2: *cannot collect 'test_a' because it is not a function*",
"*1 passed, 1 warning in *",
]
)
def test_class_injection_does_not_break_collection(pytester: Pytester) -> None:
"""Tests whether injection during collection time will terminate testing.
In this case the error should not occur if the TestClass itself
is modified during collection time, and the original method list
is still used for collection.
"""
pytester.makeconftest(
"""
from test_inject import TestClass
def pytest_generate_tests(metafunc):
TestClass.changed_var = {}
"""
)
pytester.makepyfile(
test_inject='''
class TestClass(object):
def test_injection(self):
"""Test being parametrized."""
pass
'''
)
result = pytester.runpytest()
assert (
"RuntimeError: dictionary changed size during iteration"
not in result.stdout.str()
)
result.stdout.fnmatch_lines(["*1 passed*"])
def test_syntax_error_with_non_ascii_chars(pytester: Pytester) -> None:
"""Fix decoding issue while formatting SyntaxErrors during collection (#578)."""
pytester.makepyfile("☃")
result = pytester.runpytest()
result.stdout.fnmatch_lines(["*ERROR collecting*", "*SyntaxError*", "*1 error in*"])
def test_collect_error_with_fulltrace(pytester: Pytester) -> None:
pytester.makepyfile("assert 0")
result = pytester.runpytest("--fulltrace")
result.stdout.fnmatch_lines(
[
"collected 0 items / 1 error",
"",
"*= ERRORS =*",
"*_ ERROR collecting test_collect_error_with_fulltrace.py _*",
"",
"> assert 0",
"E assert 0",
"",
"test_collect_error_with_fulltrace.py:1: AssertionError",
"*! Interrupted: 1 error during collection !*",
]
)
def test_skip_duplicates_by_default(pytester: Pytester) -> None:
"""Test for issue https://github.com/pytest-dev/pytest/issues/1609 (#1609)
Ignore duplicate directories.
"""
a = pytester.mkdir("a")
fh = a.joinpath("test_a.py")
fh.write_text(
textwrap.dedent(
"""\
import pytest
def test_real():
pass
"""
)
)
result = pytester.runpytest(str(a), str(a))
result.stdout.fnmatch_lines(["*collected 1 item*"])
def test_keep_duplicates(pytester: Pytester) -> None:
"""Test for issue https://github.com/pytest-dev/pytest/issues/1609 (#1609)
Use --keep-duplicates to collect tests from duplicate directories.
"""
a = pytester.mkdir("a")
fh = a.joinpath("test_a.py")
fh.write_text(
textwrap.dedent(
"""\
import pytest
def test_real():
pass
"""
)
)
result = pytester.runpytest("--keep-duplicates", str(a), str(a))
result.stdout.fnmatch_lines(["*collected 2 item*"])
def test_package_collection_infinite_recursion(pytester: Pytester) -> None:
pytester.copy_example("collect/package_infinite_recursion")
result = pytester.runpytest()
result.stdout.fnmatch_lines(["*1 passed*"])
def test_package_collection_init_given_as_argument(pytester: Pytester) -> None:
"""Regression test for #3749"""
p = pytester.copy_example("collect/package_init_given_as_arg")
result = pytester.runpytest(p / "pkg" / "__init__.py")
result.stdout.fnmatch_lines(["*1 passed*"])
def test_package_with_modules(pytester: Pytester) -> None:
"""
.
└── root
├── __init__.py
├── sub1
│ ├── __init__.py
│ └── sub1_1
│ ├── __init__.py
│ └── test_in_sub1.py
└── sub2
└── test
└── test_in_sub2.py
"""
root = pytester.mkpydir("root")
sub1 = root.joinpath("sub1")
sub1_test = sub1.joinpath("sub1_1")
sub1_test.mkdir(parents=True)
for d in (sub1, sub1_test):
d.joinpath("__init__.py").touch()
sub2 = root.joinpath("sub2")
sub2_test = sub2.joinpath("test")
sub2_test.mkdir(parents=True)
sub1_test.joinpath("test_in_sub1.py").write_text("def test_1(): pass")
sub2_test.joinpath("test_in_sub2.py").write_text("def test_2(): pass")
# Execute from .
result = pytester.runpytest("-v", "-s")
result.assert_outcomes(passed=2)
# Execute from . with one argument "root"
result = pytester.runpytest("-v", "-s", "root")
result.assert_outcomes(passed=2)
# Chdir into package's root and execute with no args
os.chdir(root)
result = pytester.runpytest("-v", "-s")
result.assert_outcomes(passed=2)
def test_package_ordering(pytester: Pytester) -> None:
"""
.
└── root
├── Test_root.py
├── __init__.py
├── sub1
│ ├── Test_sub1.py
│ └── __init__.py
└── sub2
└── test
└── test_sub2.py
"""
pytester.makeini(
"""
[pytest]
python_files=*.py
"""
)
root = pytester.mkpydir("root")
sub1 = root.joinpath("sub1")
sub1.mkdir()
sub1.joinpath("__init__.py").touch()
sub2 = root.joinpath("sub2")
sub2_test = sub2.joinpath("test")
sub2_test.mkdir(parents=True)
root.joinpath("Test_root.py").write_text("def test_1(): pass")
sub1.joinpath("Test_sub1.py").write_text("def test_2(): pass")
sub2_test.joinpath("test_sub2.py").write_text("def test_3(): pass")
# Execute from .
result = pytester.runpytest("-v", "-s")
result.assert_outcomes(passed=3)
| 32.199731 | 116 | 0.549299 | import os
import sys
import textwrap
from typing import Any
from typing import Dict
import _pytest._code
import pytest
from _pytest.config import ExitCode
from _pytest.main import Session
from _pytest.monkeypatch import MonkeyPatch
from _pytest.nodes import Collector
from _pytest.pytester import Pytester
from _pytest.python import Class
from _pytest.python import Instance
class TestModule:
def test_failing_import(self, pytester: Pytester) -> None:
modcol = pytester.getmodulecol("import alksdjalskdjalkjals")
pytest.raises(Collector.CollectError, modcol.collect)
def test_import_duplicate(self, pytester: Pytester) -> None:
a = pytester.mkdir("a")
b = pytester.mkdir("b")
p1 = a.joinpath("test_whatever.py")
p1.touch()
p2 = b.joinpath("test_whatever.py")
p2.touch()
sys.modules.pop(p1.stem, None)
result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"*import*mismatch*",
"*imported*test_whatever*",
"*%s*" % p1,
"*not the same*",
"*%s*" % p2,
"*HINT*",
]
)
def test_import_prepend_append(
self, pytester: Pytester, monkeypatch: MonkeyPatch
) -> None:
root1 = pytester.mkdir("root1")
root2 = pytester.mkdir("root2")
root1.joinpath("x456.py").touch()
root2.joinpath("x456.py").touch()
p = root2.joinpath("test_x456.py")
monkeypatch.syspath_prepend(str(root1))
p.write_text(
textwrap.dedent(
"""\
import x456
def test():
assert x456.__file__.startswith({!r})
""".format(
str(root2)
)
)
)
with monkeypatch.context() as mp:
mp.chdir(root2)
reprec = pytester.inline_run("--import-mode=append")
reprec.assertoutcome(passed=0, failed=1)
reprec = pytester.inline_run()
reprec.assertoutcome(passed=1)
def test_syntax_error_in_module(self, pytester: Pytester) -> None:
modcol = pytester.getmodulecol("this is a syntax error")
pytest.raises(modcol.CollectError, modcol.collect)
pytest.raises(modcol.CollectError, modcol.collect)
def test_module_considers_pluginmanager_at_import(self, pytester: Pytester) -> None:
modcol = pytester.getmodulecol("pytest_plugins='xasdlkj',")
pytest.raises(ImportError, lambda: modcol.obj)
def test_invalid_test_module_name(self, pytester: Pytester) -> None:
a = pytester.mkdir("a")
a.joinpath("test_one.part1.py").touch()
result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"ImportError while importing test module*test_one.part1*",
"Hint: make sure your test modules/packages have valid Python names.",
]
)
@pytest.mark.parametrize("verbose", [0, 1, 2])
def test_show_traceback_import_error(
self, pytester: Pytester, verbose: int
) -> None:
pytester.makepyfile(
foo_traceback_import_error="""
from bar_traceback_import_error import NOT_AVAILABLE
""",
bar_traceback_import_error="",
)
pytester.makepyfile(
"""
import foo_traceback_import_error
"""
)
args = ("-v",) * verbose
result = pytester.runpytest(*args)
result.stdout.fnmatch_lines(
[
"ImportError while importing test module*",
"Traceback:",
"*from bar_traceback_import_error import NOT_AVAILABLE",
"*cannot import name *NOT_AVAILABLE*",
]
)
assert result.ret == 2
stdout = result.stdout.str()
if verbose == 2:
assert "_pytest" in stdout
else:
assert "_pytest" not in stdout
def test_show_traceback_import_error_unicode(self, pytester: Pytester) -> None:
pytester.makepyfile("raise ImportError('Something bad happened ☺')")
result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"ImportError while importing test module*",
"Traceback:",
"*raise ImportError*Something bad happened*",
]
)
assert result.ret == 2
class TestClass:
def test_class_with_init_warning(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
class TestClass1(object):
def __init__(self):
pass
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"*cannot collect test class 'TestClass1' because it has "
"a __init__ constructor (from: test_class_with_init_warning.py)"
]
)
def test_class_with_new_warning(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
class TestClass1(object):
def __new__(self):
pass
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"*cannot collect test class 'TestClass1' because it has "
"a __new__ constructor (from: test_class_with_new_warning.py)"
]
)
def test_class_subclassobject(self, pytester: Pytester) -> None:
pytester.getmodulecol(
"""
class test(object):
pass
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(["*collected 0*"])
def test_static_method(self, pytester: Pytester) -> None:
pytester.getmodulecol(
"""
import pytest
class Test(object):
@staticmethod
def test_something():
pass
@pytest.fixture
def fix(self):
return 1
@staticmethod
def test_fix(fix):
assert fix == 1
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(["*collected 2 items*", "*2 passed in*"])
def test_setup_teardown_class_as_classmethod(self, pytester: Pytester) -> None:
pytester.makepyfile(
test_mod1="""
class TestClassMethod(object):
@classmethod
def setup_class(cls):
pass
def test_1(self):
pass
@classmethod
def teardown_class(cls):
pass
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(["*1 passed*"])
def test_issue1035_obj_has_getattr(self, pytester: Pytester) -> None:
modcol = pytester.getmodulecol(
"""
class Chameleon(object):
def __getattr__(self, name):
return True
chameleon = Chameleon()
"""
)
colitems = modcol.collect()
assert len(colitems) == 0
def test_issue1579_namedtuple(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
import collections
TestCase = collections.namedtuple('TestCase', ['a'])
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(
"*cannot collect test class 'TestCase' "
"because it has a __new__ constructor*"
)
def test_issue2234_property(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
class TestCase(object):
@property
def prop(self):
raise NotImplementedError()
"""
)
result = pytester.runpytest()
assert result.ret == ExitCode.NO_TESTS_COLLECTED
class TestFunction:
def test_getmodulecollector(self, pytester: Pytester) -> None:
item = pytester.getitem("def test_func(): pass")
modcol = item.getparent(pytest.Module)
assert isinstance(modcol, pytest.Module)
assert hasattr(modcol.obj, "test_func")
@pytest.mark.filterwarnings("default")
def test_function_as_object_instance_ignored(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
class A(object):
def __call__(self, tmp_path):
0/0
test_a = A()
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"collected 0 items",
"*test_function_as_object_instance_ignored.py:2: "
"*cannot collect 'test_a' because it is not a function.",
]
)
@staticmethod
def make_function(pytester: Pytester, **kwargs: Any) -> Any:
from _pytest.fixtures import FixtureManager
config = pytester.parseconfigure()
session = Session.from_config(config)
session._fixturemanager = FixtureManager(session)
return pytest.Function.from_parent(parent=session, **kwargs)
def test_function_equality(self, pytester: Pytester) -> None:
def func1():
pass
def func2():
pass
f1 = self.make_function(pytester, name="name", callobj=func1)
assert f1 == f1
f2 = self.make_function(
pytester, name="name", callobj=func2, originalname="foobar"
)
assert f1 != f2
def test_repr_produces_actual_test_id(self, pytester: Pytester) -> None:
f = self.make_function(
pytester, name=r"test[\xe5]", callobj=self.test_repr_produces_actual_test_id
)
assert repr(f) == r"<Function test[\xe5]>"
def test_issue197_parametrize_emptyset(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
import pytest
@pytest.mark.parametrize('arg', [])
def test_function(arg):
pass
"""
)
reprec = pytester.inline_run()
reprec.assertoutcome(skipped=1)
def test_single_tuple_unwraps_values(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
import pytest
@pytest.mark.parametrize(('arg',), [(1,)])
def test_function(arg):
assert arg == 1
"""
)
reprec = pytester.inline_run()
reprec.assertoutcome(passed=1)
def test_issue213_parametrize_value_no_equal(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
import pytest
class A(object):
def __eq__(self, other):
raise ValueError("not possible")
@pytest.mark.parametrize('arg', [A()])
def test_function(arg):
assert arg.__class__.__name__ == "A"
"""
)
reprec = pytester.inline_run("--fulltrace")
reprec.assertoutcome(passed=1)
def test_parametrize_with_non_hashable_values(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
archival_mapping = {
'1.0': {'tag': '1.0'},
'1.2.2a1': {'tag': 'release-1.2.2a1'},
}
import pytest
@pytest.mark.parametrize('key value'.split(),
archival_mapping.items())
def test_archival_to_version(key, value):
assert key in archival_mapping
assert value == archival_mapping[key]
"""
)
rec = pytester.inline_run()
rec.assertoutcome(passed=2)
def test_parametrize_with_non_hashable_values_indirect(
self, pytester: Pytester
) -> None:
pytester.makepyfile(
"""
archival_mapping = {
'1.0': {'tag': '1.0'},
'1.2.2a1': {'tag': 'release-1.2.2a1'},
}
import pytest
@pytest.fixture
def key(request):
return request.param
@pytest.fixture
def value(request):
return request.param
@pytest.mark.parametrize('key value'.split(),
archival_mapping.items(), indirect=True)
def test_archival_to_version(key, value):
assert key in archival_mapping
assert value == archival_mapping[key]
"""
)
rec = pytester.inline_run()
rec.assertoutcome(passed=2)
def test_parametrize_overrides_fixture(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
import pytest
@pytest.fixture
def value():
return 'value'
@pytest.mark.parametrize('value',
['overridden'])
def test_overridden_via_param(value):
assert value == 'overridden'
@pytest.mark.parametrize('somevalue', ['overridden'])
def test_not_overridden(value, somevalue):
assert value == 'value'
assert somevalue == 'overridden'
@pytest.mark.parametrize('other,value', [('foo', 'overridden')])
def test_overridden_via_multiparam(other, value):
assert other == 'foo'
assert value == 'overridden'
"""
)
rec = pytester.inline_run()
rec.assertoutcome(passed=3)
def test_parametrize_overrides_parametrized_fixture(
self, pytester: Pytester
) -> None:
pytester.makepyfile(
"""
import pytest
@pytest.fixture(params=[1, 2])
def value(request):
return request.param
@pytest.mark.parametrize('value',
['overridden'])
def test_overridden_via_param(value):
assert value == 'overridden'
"""
)
rec = pytester.inline_run()
rec.assertoutcome(passed=1)
def test_parametrize_overrides_indirect_dependency_fixture(
self, pytester: Pytester
) -> None:
pytester.makepyfile(
"""
import pytest
fix3_instantiated = False
@pytest.fixture
def fix1(fix2):
return fix2 + '1'
@pytest.fixture
def fix2(fix3):
return fix3 + '2'
@pytest.fixture
def fix3():
global fix3_instantiated
fix3_instantiated = True
return '3'
@pytest.mark.parametrize('fix2', ['2'])
def test_it(fix1):
assert fix1 == '21'
assert not fix3_instantiated
"""
)
rec = pytester.inline_run()
rec.assertoutcome(passed=1)
def test_parametrize_with_mark(self, pytester: Pytester) -> None:
items = pytester.getitems(
"""
import pytest
@pytest.mark.foo
@pytest.mark.parametrize('arg', [
1,
pytest.param(2, marks=[pytest.mark.baz, pytest.mark.bar])
])
def test_function(arg):
pass
"""
)
keywords = [item.keywords for item in items]
assert (
"foo" in keywords[0]
and "bar" not in keywords[0]
and "baz" not in keywords[0]
)
assert "foo" in keywords[1] and "bar" in keywords[1] and "baz" in keywords[1]
def test_parametrize_with_empty_string_arguments(self, pytester: Pytester) -> None:
items = pytester.getitems(
"""\
import pytest
@pytest.mark.parametrize('v', ('', ' '))
@pytest.mark.parametrize('w', ('', ' '))
def test(v, w): ...
"""
)
names = {item.name for item in items}
assert names == {"test[-]", "test[ -]", "test[- ]", "test[ - ]"}
def test_function_equality_with_callspec(self, pytester: Pytester) -> None:
items = pytester.getitems(
"""
import pytest
@pytest.mark.parametrize('arg', [1,2])
def test_function(arg):
pass
"""
)
assert items[0] != items[1]
assert not (items[0] == items[1])
def test_pyfunc_call(self, pytester: Pytester) -> None:
item = pytester.getitem("def test_func(): raise ValueError")
config = item.config
class MyPlugin1:
def pytest_pyfunc_call(self):
raise ValueError
class MyPlugin2:
def pytest_pyfunc_call(self):
return True
config.pluginmanager.register(MyPlugin1())
config.pluginmanager.register(MyPlugin2())
config.hook.pytest_runtest_setup(item=item)
config.hook.pytest_pyfunc_call(pyfuncitem=item)
def test_multiple_parametrize(self, pytester: Pytester) -> None:
modcol = pytester.getmodulecol(
"""
import pytest
@pytest.mark.parametrize('x', [0, 1])
@pytest.mark.parametrize('y', [2, 3])
def test1(x, y):
pass
"""
)
colitems = modcol.collect()
assert colitems[0].name == "test1[2-0]"
assert colitems[1].name == "test1[2-1]"
assert colitems[2].name == "test1[3-0]"
assert colitems[3].name == "test1[3-1]"
def test_issue751_multiple_parametrize_with_ids(self, pytester: Pytester) -> None:
modcol = pytester.getmodulecol(
"""
import pytest
@pytest.mark.parametrize('x', [0], ids=['c'])
@pytest.mark.parametrize('y', [0, 1], ids=['a', 'b'])
class Test(object):
def test1(self, x, y):
pass
def test2(self, x, y):
pass
"""
)
colitems = modcol.collect()[0].collect()[0].collect()
assert colitems[0].name == "test1[a-c]"
assert colitems[1].name == "test1[b-c]"
assert colitems[2].name == "test2[a-c]"
assert colitems[3].name == "test2[b-c]"
def test_parametrize_skipif(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
import pytest
m = pytest.mark.skipif('True')
@pytest.mark.parametrize('x', [0, 1, pytest.param(2, marks=m)])
def test_skip_if(x):
assert x < 2
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(["* 2 passed, 1 skipped in *"])
def test_parametrize_skip(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
import pytest
m = pytest.mark.skip('')
@pytest.mark.parametrize('x', [0, 1, pytest.param(2, marks=m)])
def test_skip(x):
assert x < 2
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(["* 2 passed, 1 skipped in *"])
def test_parametrize_skipif_no_skip(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
import pytest
m = pytest.mark.skipif('False')
@pytest.mark.parametrize('x', [0, 1, m(2)])
def test_skipif_no_skip(x):
assert x < 2
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(["* 1 failed, 2 passed in *"])
def test_parametrize_xfail(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
import pytest
m = pytest.mark.xfail('True')
@pytest.mark.parametrize('x', [0, 1, pytest.param(2, marks=m)])
def test_xfail(x):
assert x < 2
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(["* 2 passed, 1 xfailed in *"])
def test_parametrize_passed(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
import pytest
m = pytest.mark.xfail('True')
@pytest.mark.parametrize('x', [0, 1, pytest.param(2, marks=m)])
def test_xfail(x):
pass
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(["* 2 passed, 1 xpassed in *"])
def test_parametrize_xfail_passed(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
import pytest
m = pytest.mark.xfail('False')
@pytest.mark.parametrize('x', [0, 1, m(2)])
def test_passed(x):
pass
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(["* 3 passed in *"])
def test_function_originalname(self, pytester: Pytester) -> None:
items = pytester.getitems(
"""
import pytest
@pytest.mark.parametrize('arg', [1,2])
def test_func(arg):
pass
def test_no_param():
pass
"""
)
originalnames = []
for x in items:
assert isinstance(x, pytest.Function)
originalnames.append(x.originalname)
assert originalnames == [
"test_func",
"test_func",
"test_no_param",
]
def test_function_with_square_brackets(self, pytester: Pytester) -> None:
p1 = pytester.makepyfile(
"""
locals()["test_foo[name]"] = lambda: None
"""
)
result = pytester.runpytest("-v", str(p1))
result.stdout.fnmatch_lines(
[
"test_function_with_square_brackets.py::test_foo[[]name[]] PASSED *",
"*= 1 passed in *",
]
)
class TestSorting:
def test_check_equality(self, pytester: Pytester) -> None:
modcol = pytester.getmodulecol(
"""
def test_pass(): pass
def test_fail(): assert 0
"""
)
fn1 = pytester.collect_by_name(modcol, "test_pass")
assert isinstance(fn1, pytest.Function)
fn2 = pytester.collect_by_name(modcol, "test_pass")
assert isinstance(fn2, pytest.Function)
assert fn1 == fn2
assert fn1 != modcol
assert hash(fn1) == hash(fn2)
fn3 = pytester.collect_by_name(modcol, "test_fail")
assert isinstance(fn3, pytest.Function)
assert not (fn1 == fn3)
assert fn1 != fn3
for fn in fn1, fn2, fn3:
assert fn != 3 # type: ignore[comparison-overlap]
assert fn != modcol
assert fn != [1, 2, 3] # type: ignore[comparison-overlap]
assert [1, 2, 3] != fn # type: ignore[comparison-overlap]
assert modcol != fn
def test_allow_sane_sorting_for_decorators(self, pytester: Pytester) -> None:
modcol = pytester.getmodulecol(
"""
def dec(f):
g = lambda: f(2)
g.place_as = f
return g
def test_b(y):
pass
test_b = dec(test_b)
def test_a(y):
pass
test_a = dec(test_a)
"""
)
colitems = modcol.collect()
assert len(colitems) == 2
assert [item.name for item in colitems] == ["test_b", "test_a"]
def test_ordered_by_definition_order(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""\
class Test1:
def test_foo(): pass
def test_bar(): pass
class Test2:
def test_foo(): pass
test_bar = Test1.test_bar
class Test3(Test2):
def test_baz(): pass
"""
)
result = pytester.runpytest("--collect-only")
result.stdout.fnmatch_lines(
[
"*Class Test1*",
"*Function test_foo*",
"*Function test_bar*",
"*Class Test2*",
# previously the order was flipped due to Test1.test_bar reference
"*Function test_foo*",
"*Function test_bar*",
"*Class Test3*",
"*Function test_foo*",
"*Function test_bar*",
"*Function test_baz*",
]
)
class TestConftestCustomization:
def test_pytest_pycollect_module(self, pytester: Pytester) -> None:
pytester.makeconftest(
"""
import pytest
class MyModule(pytest.Module):
pass
def pytest_pycollect_makemodule(fspath, parent):
if fspath.name == "test_xyz.py":
return MyModule.from_parent(path=fspath, parent=parent)
"""
)
pytester.makepyfile("def test_some(): pass")
pytester.makepyfile(test_xyz="def test_func(): pass")
result = pytester.runpytest("--collect-only")
result.stdout.fnmatch_lines(["*<Module*test_pytest*", "*<MyModule*xyz*"])
def test_customized_pymakemodule_issue205_subdir(self, pytester: Pytester) -> None:
b = pytester.path.joinpath("a", "b")
b.mkdir(parents=True)
b.joinpath("conftest.py").write_text(
textwrap.dedent(
"""\
import pytest
@pytest.hookimpl(hookwrapper=True)
def pytest_pycollect_makemodule():
outcome = yield
mod = outcome.get_result()
mod.obj.hello = "world"
"""
)
)
b.joinpath("test_module.py").write_text(
textwrap.dedent(
"""\
def test_hello():
assert hello == "world"
"""
)
)
reprec = pytester.inline_run()
reprec.assertoutcome(passed=1)
def test_customized_pymakeitem(self, pytester: Pytester) -> None:
b = pytester.path.joinpath("a", "b")
b.mkdir(parents=True)
b.joinpath("conftest.py").write_text(
textwrap.dedent(
"""\
import pytest
@pytest.hookimpl(hookwrapper=True)
def pytest_pycollect_makeitem():
outcome = yield
if outcome.excinfo is None:
result = outcome.get_result()
if result:
for func in result:
func._some123 = "world"
"""
)
)
b.joinpath("test_module.py").write_text(
textwrap.dedent(
"""\
import pytest
@pytest.fixture()
def obj(request):
return request.node._some123
def test_hello(obj):
assert obj == "world"
"""
)
)
reprec = pytester.inline_run()
reprec.assertoutcome(passed=1)
def test_pytest_pycollect_makeitem(self, pytester: Pytester) -> None:
pytester.makeconftest(
"""
import pytest
class MyFunction(pytest.Function):
pass
def pytest_pycollect_makeitem(collector, name, obj):
if name == "some":
return MyFunction.from_parent(name=name, parent=collector)
"""
)
pytester.makepyfile("def some(): pass")
result = pytester.runpytest("--collect-only")
result.stdout.fnmatch_lines(["*MyFunction*some*"])
def test_issue2369_collect_module_fileext(self, pytester: Pytester) -> None:
# We'll implement a little finder and loader to import files containing
pytester.makeconftest(
"""
import sys, os, imp
from _pytest.python import Module
class Loader(object):
def load_module(self, name):
return imp.load_source(name, name + ".narf")
class Finder(object):
def find_module(self, name, path=None):
if os.path.exists(name + ".narf"):
return Loader()
sys.meta_path.append(Finder())
def pytest_collect_file(fspath, parent):
if fspath.suffix == ".narf":
return Module.from_parent(path=fspath, parent=parent)"""
)
pytester.makefile(
".narf",
"""\
def test_something():
assert 1 + 1 == 2""",
)
result = pytester.runpytest_subprocess()
result.stdout.fnmatch_lines(["*1 passed*"])
def test_early_ignored_attributes(self, pytester: Pytester) -> None:
pytester.makeini(
"""
[pytest]
python_classes=*
python_functions=*
"""
)
pytester.makepyfile(
"""
class TestEmpty:
pass
test_empty = TestEmpty()
def test_real():
pass
"""
)
items, rec = pytester.inline_genitems()
assert rec.ret == 0
assert len(items) == 1
def test_setup_only_available_in_subdir(pytester: Pytester) -> None:
sub1 = pytester.mkpydir("sub1")
sub2 = pytester.mkpydir("sub2")
sub1.joinpath("conftest.py").write_text(
textwrap.dedent(
"""\
import pytest
def pytest_runtest_setup(item):
assert item.path.stem == "test_in_sub1"
def pytest_runtest_call(item):
assert item.path.stem == "test_in_sub1"
def pytest_runtest_teardown(item):
assert item.path.stem == "test_in_sub1"
"""
)
)
sub2.joinpath("conftest.py").write_text(
textwrap.dedent(
"""\
import pytest
def pytest_runtest_setup(item):
assert item.path.stem == "test_in_sub2"
def pytest_runtest_call(item):
assert item.path.stem == "test_in_sub2"
def pytest_runtest_teardown(item):
assert item.path.stem == "test_in_sub2"
"""
)
)
sub1.joinpath("test_in_sub1.py").write_text("def test_1(): pass")
sub2.joinpath("test_in_sub2.py").write_text("def test_2(): pass")
result = pytester.runpytest("-v", "-s")
result.assert_outcomes(passed=2)
def test_modulecol_roundtrip(pytester: Pytester) -> None:
modcol = pytester.getmodulecol("pass", withinit=False)
trail = modcol.nodeid
newcol = modcol.session.perform_collect([trail], genitems=0)[0]
assert modcol.name == newcol.name
class TestTracebackCutting:
def test_skip_simple(self):
with pytest.raises(pytest.skip.Exception) as excinfo:
pytest.skip("xxx")
assert excinfo.traceback[-1].frame.code.name == "skip"
assert excinfo.traceback[-1].ishidden()
assert excinfo.traceback[-2].frame.code.name == "test_skip_simple"
assert not excinfo.traceback[-2].ishidden()
def test_traceback_argsetup(self, pytester: Pytester) -> None:
pytester.makeconftest(
"""
import pytest
@pytest.fixture
def hello(request):
raise ValueError("xyz")
"""
)
p = pytester.makepyfile("def test(hello): pass")
result = pytester.runpytest(p)
assert result.ret != 0
out = result.stdout.str()
assert "xyz" in out
assert "conftest.py:5: ValueError" in out
numentries = out.count("_ _ _") # separator for traceback entries
assert numentries == 0
result = pytester.runpytest("--fulltrace", p)
out = result.stdout.str()
assert "conftest.py:5: ValueError" in out
numentries = out.count("_ _ _ _") # separator for traceback entries
assert numentries > 3
def test_traceback_error_during_import(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
x = 1
x = 2
x = 17
asd
"""
)
result = pytester.runpytest()
assert result.ret != 0
out = result.stdout.str()
assert "x = 1" not in out
assert "x = 2" not in out
result.stdout.fnmatch_lines([" *asd*", "E*NameError*"])
result = pytester.runpytest("--fulltrace")
out = result.stdout.str()
assert "x = 1" in out
assert "x = 2" in out
result.stdout.fnmatch_lines([">*asd*", "E*NameError*"])
def test_traceback_filter_error_during_fixture_collection(
self, pytester: Pytester
) -> None:
pytester.makepyfile(
"""
import pytest
def fail_me(func):
ns = {}
exec('def w(): raise ValueError("fail me")', ns)
return ns['w']
@pytest.fixture(scope='class')
@fail_me
def fail_fixture():
pass
def test_failing_fixture(fail_fixture):
pass
"""
)
result = pytester.runpytest()
assert result.ret != 0
out = result.stdout.str()
assert "INTERNALERROR>" not in out
result.stdout.fnmatch_lines(["*ValueError: fail me*", "* 1 error in *"])
def test_filter_traceback_generated_code(self) -> None:
from _pytest._code import filter_traceback
tb = None
try:
ns: Dict[str, Any] = {}
exec("def foo(): raise ValueError", ns)
ns["foo"]()
except ValueError:
_, _, tb = sys.exc_info()
assert tb is not None
traceback = _pytest._code.Traceback(tb)
assert isinstance(traceback[-1].path, str)
assert not filter_traceback(traceback[-1])
def test_filter_traceback_path_no_longer_valid(self, pytester: Pytester) -> None:
from _pytest._code import filter_traceback
pytester.syspathinsert()
pytester.makepyfile(
filter_traceback_entry_as_str="""
def foo():
raise ValueError
"""
)
tb = None
try:
import filter_traceback_entry_as_str
filter_traceback_entry_as_str.foo()
except ValueError:
_, _, tb = sys.exc_info()
assert tb is not None
pytester.path.joinpath("filter_traceback_entry_as_str.py").unlink()
traceback = _pytest._code.Traceback(tb)
assert isinstance(traceback[-1].path, str)
assert filter_traceback(traceback[-1])
class TestReportInfo:
def test_itemreport_reportinfo(self, pytester: Pytester) -> None:
pytester.makeconftest(
"""
import pytest
class MyFunction(pytest.Function):
def reportinfo(self):
return "ABCDE", 42, "custom"
def pytest_pycollect_makeitem(collector, name, obj):
if name == "test_func":
return MyFunction.from_parent(name=name, parent=collector)
"""
)
item = pytester.getitem("def test_func(): pass")
item.config.pluginmanager.getplugin("runner")
assert item.location == ("ABCDE", 42, "custom")
def test_func_reportinfo(self, pytester: Pytester) -> None:
item = pytester.getitem("def test_func(): pass")
path, lineno, modpath = item.reportinfo()
assert os.fspath(path) == str(item.path)
assert lineno == 0
assert modpath == "test_func"
def test_class_reportinfo(self, pytester: Pytester) -> None:
modcol = pytester.getmodulecol(
"""
# lineno 0
class TestClass(object):
def test_hello(self): pass
"""
)
classcol = pytester.collect_by_name(modcol, "TestClass")
assert isinstance(classcol, Class)
path, lineno, msg = classcol.reportinfo()
assert os.fspath(path) == str(modcol.path)
assert lineno == 1
assert msg == "TestClass"
@pytest.mark.filterwarnings(
"ignore:usage of Generator.Function is deprecated, please use pytest.Function instead"
)
def test_reportinfo_with_nasty_getattr(self, pytester: Pytester) -> None:
# https://github.com/pytest-dev/pytest/issues/1204
modcol = pytester.getmodulecol(
"""
# lineno 0
class TestClass(object):
def __getattr__(self, name):
return "this is not an int"
def intest_foo(self):
pass
"""
)
classcol = pytester.collect_by_name(modcol, "TestClass")
assert isinstance(classcol, Class)
instance = list(classcol.collect())[0]
assert isinstance(instance, Instance)
path, lineno, msg = instance.reportinfo()
def test_customized_python_discovery(pytester: Pytester) -> None:
pytester.makeini(
"""
[pytest]
python_files=check_*.py
python_classes=Check
python_functions=check
"""
)
p = pytester.makepyfile(
"""
def check_simple():
pass
class CheckMyApp(object):
def check_meth(self):
pass
"""
)
p2 = p.with_name(p.name.replace("test", "check"))
p.rename(p2)
result = pytester.runpytest("--collect-only", "-s")
result.stdout.fnmatch_lines(
["*check_customized*", "*check_simple*", "*CheckMyApp*", "*check_meth*"]
)
result = pytester.runpytest()
assert result.ret == 0
result.stdout.fnmatch_lines(["*2 passed*"])
def test_customized_python_discovery_functions(pytester: Pytester) -> None:
pytester.makeini(
"""
[pytest]
python_functions=_test
"""
)
pytester.makepyfile(
"""
def _test_underscore():
pass
"""
)
result = pytester.runpytest("--collect-only", "-s")
result.stdout.fnmatch_lines(["*_test_underscore*"])
result = pytester.runpytest()
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
def test_unorderable_types(pytester: Pytester) -> None:
pytester.makepyfile(
"""
class TestJoinEmpty(object):
pass
def make_test():
class Test(object):
pass
Test.__name__ = "TestFoo"
return Test
TestFoo = make_test()
"""
)
result = pytester.runpytest()
result.stdout.no_fnmatch_line("*TypeError*")
assert result.ret == ExitCode.NO_TESTS_COLLECTED
@pytest.mark.filterwarnings("default::pytest.PytestCollectionWarning")
def test_dont_collect_non_function_callable(pytester: Pytester) -> None:
pytester.makepyfile(
"""
class Oh(object):
def __call__(self):
pass
test_a = Oh()
def test_real():
pass
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"*collected 1 item*",
"*test_dont_collect_non_function_callable.py:2: *cannot collect 'test_a' because it is not a function*",
"*1 passed, 1 warning in *",
]
)
def test_class_injection_does_not_break_collection(pytester: Pytester) -> None:
pytester.makeconftest(
"""
from test_inject import TestClass
def pytest_generate_tests(metafunc):
TestClass.changed_var = {}
"""
)
pytester.makepyfile(
test_inject='''
class TestClass(object):
def test_injection(self):
"""Test being parametrized."""
pass
'''
)
result = pytester.runpytest()
assert (
"RuntimeError: dictionary changed size during iteration"
not in result.stdout.str()
)
result.stdout.fnmatch_lines(["*1 passed*"])
def test_syntax_error_with_non_ascii_chars(pytester: Pytester) -> None:
pytester.makepyfile("☃")
result = pytester.runpytest()
result.stdout.fnmatch_lines(["*ERROR collecting*", "*SyntaxError*", "*1 error in*"])
def test_collect_error_with_fulltrace(pytester: Pytester) -> None:
pytester.makepyfile("assert 0")
result = pytester.runpytest("--fulltrace")
result.stdout.fnmatch_lines(
[
"collected 0 items / 1 error",
"",
"*= ERRORS =*",
"*_ ERROR collecting test_collect_error_with_fulltrace.py _*",
"",
"> assert 0",
"E assert 0",
"",
"test_collect_error_with_fulltrace.py:1: AssertionError",
"*! Interrupted: 1 error during collection !*",
]
)
def test_skip_duplicates_by_default(pytester: Pytester) -> None:
a = pytester.mkdir("a")
fh = a.joinpath("test_a.py")
fh.write_text(
textwrap.dedent(
"""\
import pytest
def test_real():
pass
"""
)
)
result = pytester.runpytest(str(a), str(a))
result.stdout.fnmatch_lines(["*collected 1 item*"])
def test_keep_duplicates(pytester: Pytester) -> None:
a = pytester.mkdir("a")
fh = a.joinpath("test_a.py")
fh.write_text(
textwrap.dedent(
"""\
import pytest
def test_real():
pass
"""
)
)
result = pytester.runpytest("--keep-duplicates", str(a), str(a))
result.stdout.fnmatch_lines(["*collected 2 item*"])
def test_package_collection_infinite_recursion(pytester: Pytester) -> None:
pytester.copy_example("collect/package_infinite_recursion")
result = pytester.runpytest()
result.stdout.fnmatch_lines(["*1 passed*"])
def test_package_collection_init_given_as_argument(pytester: Pytester) -> None:
p = pytester.copy_example("collect/package_init_given_as_arg")
result = pytester.runpytest(p / "pkg" / "__init__.py")
result.stdout.fnmatch_lines(["*1 passed*"])
def test_package_with_modules(pytester: Pytester) -> None:
root = pytester.mkpydir("root")
sub1 = root.joinpath("sub1")
sub1_test = sub1.joinpath("sub1_1")
sub1_test.mkdir(parents=True)
for d in (sub1, sub1_test):
d.joinpath("__init__.py").touch()
sub2 = root.joinpath("sub2")
sub2_test = sub2.joinpath("test")
sub2_test.mkdir(parents=True)
sub1_test.joinpath("test_in_sub1.py").write_text("def test_1(): pass")
sub2_test.joinpath("test_in_sub2.py").write_text("def test_2(): pass")
# Execute from .
result = pytester.runpytest("-v", "-s")
result.assert_outcomes(passed=2)
# Execute from . with one argument "root"
result = pytester.runpytest("-v", "-s", "root")
result.assert_outcomes(passed=2)
# Chdir into package's root and execute with no args
os.chdir(root)
result = pytester.runpytest("-v", "-s")
result.assert_outcomes(passed=2)
def test_package_ordering(pytester: Pytester) -> None:
pytester.makeini(
"""
[pytest]
python_files=*.py
"""
)
root = pytester.mkpydir("root")
sub1 = root.joinpath("sub1")
sub1.mkdir()
sub1.joinpath("__init__.py").touch()
sub2 = root.joinpath("sub2")
sub2_test = sub2.joinpath("test")
sub2_test.mkdir(parents=True)
root.joinpath("Test_root.py").write_text("def test_1(): pass")
sub1.joinpath("Test_sub1.py").write_text("def test_2(): pass")
sub2_test.joinpath("test_sub2.py").write_text("def test_3(): pass")
result = pytester.runpytest("-v", "-s")
result.assert_outcomes(passed=3)
| true | true |
1c312e8c942f41e88f77ac62566074eccc27a9e4 | 565 | py | Python | src/basics/factorial_dffierent_operator.py | sungheeyun/PythonLectures | 3a748672bf5b39568b2f42e813a0b9402711ad8e | [
"Unlicense"
] | null | null | null | src/basics/factorial_dffierent_operator.py | sungheeyun/PythonLectures | 3a748672bf5b39568b2f42e813a0b9402711ad8e | [
"Unlicense"
] | null | null | null | src/basics/factorial_dffierent_operator.py | sungheeyun/PythonLectures | 3a748672bf5b39568b2f42e813a0b9402711ad8e | [
"Unlicense"
] | null | null | null | """
Below we show that the statement
result = result * x
can be rewritten to
result *= x
Like this, Python provides operators such as
+=
-=
*=
/=
"""
def factorial(n):
"""
Return the factorial of n.
Parameters
----------
n :
an integer of which the factorial is evaluated.
Returns
-------
result :
The factorial of n.
"""
result = 1
for x in range(2, n + 1):
result *= x
return result
if __name__ == "__main__":
m = 10
print(m, "! =", factorial(m))
| 13.139535 | 55 | 0.511504 |
def factorial(n):
result = 1
for x in range(2, n + 1):
result *= x
return result
if __name__ == "__main__":
m = 10
print(m, "! =", factorial(m))
| true | true |
1c3131316358511257cd0a22ed38d1797a92d67b | 4,000 | py | Python | mycnn/alexnet.py | jacky10001/tf2-mycnn | 6a631ee71b2a91fc4e6e7a43f8f9179260a1d7fa | [
"MIT"
] | null | null | null | mycnn/alexnet.py | jacky10001/tf2-mycnn | 6a631ee71b2a91fc4e6e7a43f8f9179260a1d7fa | [
"MIT"
] | 20 | 2022-01-24T15:28:48.000Z | 2022-02-13T14:56:25.000Z | mycnn/alexnet.py | jacky10001/tf2-mycnn | 6a631ee71b2a91fc4e6e7a43f8f9179260a1d7fa | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import tensorflow as tf
from tensorflow.keras import layers
from .core.base_model import KerasModel
class LRN(layers.Layer):
""" Implement Local Response Normalization """
def __init__(self,
alpha=0.0001,
k=2,
beta=0.75,
n=5,
**kwargs):
super(LRN, self).__init__(**kwargs)
self.alpha = alpha
self.k = k
self.beta = beta
self.n = n
def call(self, x):
return tf.nn.lrn(x,
depth_radius=self.n,
bias=self.k,
alpha=self.alpha,
beta=self.beta)
def get_config(self):
config = {"alpha": self.alpha,
"k": self.k,
"beta": self.beta,
"n": self.n}
base_config = super(LRN, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class AlexNet(KerasModel):
""" AlexNet+BN (超參數依照論文設置) """
def __init__(self,
input_shape=(227, 227, 3),
classes_num=10,
**kwargs):
self.input_shape = input_shape
self.classes_num = classes_num
super().__init__(**kwargs)
def build(self, **kwargs):
x_in = layers.Input(shape=self.input_shape, name="image")
x = layers.Conv2D(
filters=96,
kernel_size=(11, 11),
strides=(4, 4),
# kernel_initializer=tf.random_normal_initializer(mean=0.0, stddev=0.01),
padding='valid'
)(x_in)
x = layers.BatchNormalization()(x)
# x = LRN()(x)
x = layers.ReLU()(x)
x = layers.MaxPooling2D(pool_size=(3, 3),
strides=(2, 2))(x)
x = layers.Conv2D(
filters=256,
kernel_size=(5, 5),
# kernel_initializer=tf.random_normal_initializer(mean=0.0, stddev=0.01),
# bias_initializer='ones',
padding='same'
)(x)
x = layers.BatchNormalization()(x)
# x = LRN()(x)
x = layers.ReLU()(x)
x = layers.MaxPooling2D(pool_size=(3, 3),
strides=(2, 2))(x)
x = layers.Conv2D(
filters=384,
kernel_size=(3, 3),
# kernel_initializer=tf.random_normal_initializer(mean=0.0, stddev=0.01),
padding='same'
)(x)
x = layers.BatchNormalization()(x)
x = layers.ReLU()(x)
x = layers.Conv2D(
filters=384,
kernel_size=(3, 3),
# kernel_initializer=tf.random_normal_initializer(mean=0.0, stddev=0.01),
# bias_initializer='ones',
padding='same'
)(x)
x = layers.BatchNormalization()(x)
x = layers.ReLU()(x)
x = layers.Conv2D(
filters=256,
kernel_size=(3, 3),
# kernel_initializer=tf.random_normal_initializer(mean=0.0, stddev=0.01),
# bias_initializer='ones',
padding='same'
)(x)
x = layers.BatchNormalization()(x)
x = layers.ReLU()(x)
x = layers.MaxPooling2D(
pool_size=(3, 3),
strides=(2, 2)
)(x)
x = layers.Flatten()(x)
x = layers.Dense(
4096,
# kernel_initializer=tf.random_normal_initializer(mean=0.0, stddev=0.01),
# bias_initializer='ones'
)(x)
x = layers.ReLU()(x)
x = layers.Dropout(0.5)(x)
x = layers.Dense(
4096,
# kernel_initializer=tf.random_normal_initializer(mean=0.0, stddev=0.01),
# bias_initializer='ones'
)(x)
x = layers.ReLU()(x)
x = layers.Dropout(0.5)(x)
x_out = layers.Dense(self.classes_num, activation='softmax')(x)
self.setup_model(x_in, x_out, name="AlexNet", **kwargs) | 30.534351 | 86 | 0.497 |
import tensorflow as tf
from tensorflow.keras import layers
from .core.base_model import KerasModel
class LRN(layers.Layer):
def __init__(self,
alpha=0.0001,
k=2,
beta=0.75,
n=5,
**kwargs):
super(LRN, self).__init__(**kwargs)
self.alpha = alpha
self.k = k
self.beta = beta
self.n = n
def call(self, x):
return tf.nn.lrn(x,
depth_radius=self.n,
bias=self.k,
alpha=self.alpha,
beta=self.beta)
def get_config(self):
config = {"alpha": self.alpha,
"k": self.k,
"beta": self.beta,
"n": self.n}
base_config = super(LRN, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class AlexNet(KerasModel):
def __init__(self,
input_shape=(227, 227, 3),
classes_num=10,
**kwargs):
self.input_shape = input_shape
self.classes_num = classes_num
super().__init__(**kwargs)
def build(self, **kwargs):
x_in = layers.Input(shape=self.input_shape, name="image")
x = layers.Conv2D(
filters=96,
kernel_size=(11, 11),
strides=(4, 4),
padding='valid'
)(x_in)
x = layers.BatchNormalization()(x)
x = layers.ReLU()(x)
x = layers.MaxPooling2D(pool_size=(3, 3),
strides=(2, 2))(x)
x = layers.Conv2D(
filters=256,
kernel_size=(5, 5),
padding='same'
)(x)
x = layers.BatchNormalization()(x)
x = layers.ReLU()(x)
x = layers.MaxPooling2D(pool_size=(3, 3),
strides=(2, 2))(x)
x = layers.Conv2D(
filters=384,
kernel_size=(3, 3),
padding='same'
)(x)
x = layers.BatchNormalization()(x)
x = layers.ReLU()(x)
x = layers.Conv2D(
filters=384,
kernel_size=(3, 3),
padding='same'
)(x)
x = layers.BatchNormalization()(x)
x = layers.ReLU()(x)
x = layers.Conv2D(
filters=256,
kernel_size=(3, 3),
padding='same'
)(x)
x = layers.BatchNormalization()(x)
x = layers.ReLU()(x)
x = layers.MaxPooling2D(
pool_size=(3, 3),
strides=(2, 2)
)(x)
x = layers.Flatten()(x)
x = layers.Dense(
4096,
)(x)
x = layers.ReLU()(x)
x = layers.Dropout(0.5)(x)
x = layers.Dense(
4096,
)(x)
x = layers.ReLU()(x)
x = layers.Dropout(0.5)(x)
x_out = layers.Dense(self.classes_num, activation='softmax')(x)
self.setup_model(x_in, x_out, name="AlexNet", **kwargs) | true | true |
1c3131353ea8a23e84ed78c4de8ea42304b785ec | 639 | py | Python | venv/lib/python3.5/site-packages/coalib/results/HiddenResult.py | prashant0598/CoffeeApp | 4fa006aebf06e12ed34766450ddcfa548ee63307 | [
"MIT"
] | null | null | null | venv/lib/python3.5/site-packages/coalib/results/HiddenResult.py | prashant0598/CoffeeApp | 4fa006aebf06e12ed34766450ddcfa548ee63307 | [
"MIT"
] | null | null | null | venv/lib/python3.5/site-packages/coalib/results/HiddenResult.py | prashant0598/CoffeeApp | 4fa006aebf06e12ed34766450ddcfa548ee63307 | [
"MIT"
] | null | null | null | from coalib.results.Result import Result
class HiddenResult(Result):
"""
This is a result that is not meant to be shown to the user. It can be used
to transfer any data from a dependent bear to others.
"""
def __init__(self, origin, contents):
"""
Creates a new HiddenResult. The contents can be accessed with
obj.contents later.
:param origin: The originating bear.
:param contents: Any object that is picklable since it will be
transferred across processes.
"""
Result.__init__(self, origin, '')
self.contents = contents
| 29.045455 | 78 | 0.629108 | from coalib.results.Result import Result
class HiddenResult(Result):
def __init__(self, origin, contents):
Result.__init__(self, origin, '')
self.contents = contents
| true | true |
1c31318d50082c5f4a09647c23e41e0b1e41fae2 | 425 | py | Python | numpoly/__init__.py | FredrikMeyer/numpoly | 8584d96370dd817df713034cc89a140708dd00a9 | [
"BSD-2-Clause"
] | null | null | null | numpoly/__init__.py | FredrikMeyer/numpoly | 8584d96370dd817df713034cc89a140708dd00a9 | [
"BSD-2-Clause"
] | null | null | null | numpoly/__init__.py | FredrikMeyer/numpoly | 8584d96370dd817df713034cc89a140708dd00a9 | [
"BSD-2-Clause"
] | null | null | null | # pylint: disable=wildcard-import
"""Numpoly -- Multivariate polynomials as numpy elements."""
from .baseclass import ndpoly
from .align import (
align_polynomials,
align_exponents,
align_indeterminants,
align_shape,
align_dtype,
)
from .construct import (
polynomial,
aspolynomial,
clean_attributes,
)
from .sympy_ import to_sympy
from .array_function import *
from .poly_function import *
| 20.238095 | 60 | 0.738824 |
from .baseclass import ndpoly
from .align import (
align_polynomials,
align_exponents,
align_indeterminants,
align_shape,
align_dtype,
)
from .construct import (
polynomial,
aspolynomial,
clean_attributes,
)
from .sympy_ import to_sympy
from .array_function import *
from .poly_function import *
| true | true |
1c31325282d4dfe6f0247bcb440be73457176259 | 4,317 | py | Python | plugins/pelican-plugins/liquid_tags/pygalcharts.py | dbgriffith01/blog_source | bc5cd3e1ac1ff068de0cbb78b1470a7db743cd53 | [
"MIT"
] | 4 | 2018-09-18T19:16:44.000Z | 2020-04-30T13:13:29.000Z | plugins/pelican-plugins/liquid_tags/pygalcharts.py | dbgriffith01/blog_source | bc5cd3e1ac1ff068de0cbb78b1470a7db743cd53 | [
"MIT"
] | 120 | 2018-09-01T20:27:51.000Z | 2021-06-30T16:43:12.000Z | pelican-plugins/liquid_tags/pygalcharts.py | JN-Blog/jn-blog.com | 669bf9a9c6813f2b7980792fb137f6718077aea1 | [
"MIT"
] | 3 | 2021-03-24T11:58:31.000Z | 2022-01-12T16:03:06.000Z | """
pygal Tag
---------
This implements a Liquid-style pygal tag for Pelican. JSON is used for the data,
and you can pass a bunch of pygal's 'config' items through as-is
[1] http://www.pygal.org/
Syntax
------
{% pygal
{
<graph data>
}
%}
Examples
--------
{%
pygal {
"type": "bar",
"title": "Test Chart",
"x-labels" : {"from": 2002, "to": 2013},
"data" : [
{"title": "Firefox",
"values": [null, null, 0, 16.6, 25, 31, 36.4, 45.5, 46.3, 42.8, 37.1]},
{"title": "Chrome",
"values": [null, null, null, null, null, null, 0, 3.9, 10.8, 23.8, 35.3]},
{"title": "IE",
"values": [85.8, 84.6, 84.7, 74.5, 66, 58.6, 54.7, 44.8, 36.2, 26.6, 20.1]},
{"title": "Others",
"values": [14.2, 15.4, 15.3, 8.9, 9, 10.4, 8.9, 5.8, 6.7, 6.8, 7.5]}
]
}
%}
{%
pygal {
"type": "pie",
"half_pie": true,
"title": "Browser usage in February 2012 (in %)",
"data" : [
{"title": "IE",
"values": 19.5},
{"title": "Firefox",
"values": 36.6},
{"title": "Chrome",
"values": 36.3},
{"title": "Safari",
"values": 4.5},
{"title": "Opera",
"values": 2.3}
]
}
%}
{%
pygal {
"type": "pie",
"config": {
"show_legend": false,
"print_values": true,
"show_y_labels": true
},
"title": "Browser usage in February 2012 (in %)",
"data" : [
{"title": "IE",
"values": 19.5},
{"title": "Firefox",
"values": 36.6},
{"title": "Chrome",
"values": 36.3},
{"title": "Safari",
"values": 4.5},
{"title": "Opera",
"values": 2.3}
]
}
%}
...
Output
------
<<div class="pygal" style="text-align: center;"><embed type="image/svg+xml" src=SVG_MARKUP_EMBEDDED style="max-width:1000px"/></div>
"""
import base64
import re
from json import loads
from .mdx_liquid_tags import LiquidTags
SYNTAX = '{% pygal (data) %}'
DOT_BLOCK_RE = re.compile(r'^\s*\{\s*(?P<code>.*\})\s*\}$', re.MULTILINE | re.DOTALL)
def run_pygal(data, options=[], format='svg'):
""" Runs pygal programs and returns image data
"""
import pygal
chart_title = data.get('title', None)
chart_type = data.get('type', '').lower()
# Config options are pretty much proxied straight through from the JSON dict into the object
config = pygal.Config()
config_dict = data.get('config', {})
for key in config_dict.keys():
setattr(config, key, config_dict[key])
if chart_type == 'bar':
chart = pygal.HorizontalBar(config) if data.get('horizontal', False) else pygal.Bar(config)
elif chart_type == 'line':
chart = pygal.Line(config)
elif chart_type == 'pie':
ir=data.get('inner_radius', 0.0)
hp=data.get('half_pie', False)
chart = pygal.Pie(config, inner_radius=ir, half_pie=hp)
else:
print('undefined or unknown chart type')
if chart is not None:
chart.title = data.get('title', None)
# Do labels (if present)
label_data = data.get('x-labels', None)
if isinstance(label_data, list):
# use list
chart.x_labels = label_data
elif isinstance(label_data, dict):
# use a range
range_from = label_data.get('from', 0)
range_to = label_data.get('to', 0)
chart.x_labels = map(str, range(range_from, range_to))
# insert data
for data_set in data.get('data', []):
title = data_set.get('title', None)
values = data_set.get('values', None)
chart.add(title, values)
# now render
result = chart.render_data_uri()
else:
result = None
return result
@LiquidTags.register('pygal')
def pygal_parser(preprocessor, tag, markup):
""" Simple pygal parser """
# Find JSON payload
data = loads(markup)
if tag == 'pygal' and data is not None:
# Run generation of chart
output = run_pygal(data)
# Return embedded SVG image
return '<div class="pygal" style="text-align: center;"><embed type="image/svg+xml" src=%s style="max-width:1000px"/></div>' % output
else:
raise ValueError('Error processing input. \nExpected syntax: {0}'.format(SYNTAX))
#----------------------------------------------------------------------
# This import allows image tag to be a Pelican plugin
from .liquid_tags import register
| 26.163636 | 140 | 0.562428 |
import base64
import re
from json import loads
from .mdx_liquid_tags import LiquidTags
SYNTAX = '{% pygal (data) %}'
DOT_BLOCK_RE = re.compile(r'^\s*\{\s*(?P<code>.*\})\s*\}$', re.MULTILINE | re.DOTALL)
def run_pygal(data, options=[], format='svg'):
import pygal
chart_title = data.get('title', None)
chart_type = data.get('type', '').lower()
config = pygal.Config()
config_dict = data.get('config', {})
for key in config_dict.keys():
setattr(config, key, config_dict[key])
if chart_type == 'bar':
chart = pygal.HorizontalBar(config) if data.get('horizontal', False) else pygal.Bar(config)
elif chart_type == 'line':
chart = pygal.Line(config)
elif chart_type == 'pie':
ir=data.get('inner_radius', 0.0)
hp=data.get('half_pie', False)
chart = pygal.Pie(config, inner_radius=ir, half_pie=hp)
else:
print('undefined or unknown chart type')
if chart is not None:
chart.title = data.get('title', None)
label_data = data.get('x-labels', None)
if isinstance(label_data, list):
chart.x_labels = label_data
elif isinstance(label_data, dict):
range_from = label_data.get('from', 0)
range_to = label_data.get('to', 0)
chart.x_labels = map(str, range(range_from, range_to))
for data_set in data.get('data', []):
title = data_set.get('title', None)
values = data_set.get('values', None)
chart.add(title, values)
result = chart.render_data_uri()
else:
result = None
return result
@LiquidTags.register('pygal')
def pygal_parser(preprocessor, tag, markup):
data = loads(markup)
if tag == 'pygal' and data is not None:
output = run_pygal(data)
return '<div class="pygal" style="text-align: center;"><embed type="image/svg+xml" src=%s style="max-width:1000px"/></div>' % output
else:
raise ValueError('Error processing input. \nExpected syntax: {0}'.format(SYNTAX))
from .liquid_tags import register
| true | true |
1c313315f6d7710923735974c2cc8f9b448ebeca | 7,248 | py | Python | networking_bagpipe/agent/bagpipe_ml2/agent_extension.py | daespinel/networking-bagpipe-1 | 7e96cc651394813c1dc80747186b6cfcaa173f14 | [
"Apache-2.0"
] | 29 | 2015-11-09T21:47:52.000Z | 2022-01-25T16:03:17.000Z | networking_bagpipe/agent/bagpipe_ml2/agent_extension.py | openstack/networking-bagpipe-l2 | d472fb7b5d05b70f9f4e12288eee1a9a01fdc9fd | [
"Apache-2.0"
] | null | null | null | networking_bagpipe/agent/bagpipe_ml2/agent_extension.py | openstack/networking-bagpipe-l2 | d472fb7b5d05b70f9f4e12288eee1a9a01fdc9fd | [
"Apache-2.0"
] | 9 | 2015-11-17T08:24:32.000Z | 2020-10-25T18:59:48.000Z | # Copyright (c) 2015 Orange.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import eventlet
eventlet.monkey_patch()
# Monkey patch the original current_thread to use the up-to-date _active
# global variable. See https://bugs.launchpad.net/bugs/1863021 and
# https://github.com/eventlet/eventlet/issues/592
import __original_module_threading as orig_threading # noqa pylint: disable=import-error
import threading # noqa
orig_threading.current_thread.__globals__['_active'] = threading._active
from oslo_concurrency import lockutils # noqa: E402
from oslo_config import cfg # noqa: E402
from oslo_config import types # noqa: E402
from oslo_log import helpers as log_helpers # noqa: E402
from oslo_log import log as logging # noqa: E402
from networking_bagpipe.agent import agent_base_info # noqa: E402
from networking_bagpipe.agent import bagpipe_bgp_agent # noqa: E402
from networking_bagpipe.bagpipe_bgp import \
constants as bbgp_const # noqa: E402
from neutron.agent.linux import ip_lib # noqa: E402
from neutron.common import config as common_config # noqa: E402
from neutron.plugins.ml2.drivers.linuxbridge.agent import \
linuxbridge_neutron_agent as lnx_agt # noqa: E402
from neutron_lib.agent import l2_extension # noqa: E402
from neutron_lib import constants as n_const # noqa: E402
LOG = logging.getLogger(__name__)
BAGPIPE_L2_SERVICE = 'bagpipe_l2'
opts = [
cfg.ListOpt('as_number', default=[64512],
item_type=types.Integer(min=1, max=2**32),
help=("Autonomous System number used to generate BGP RTs for"
"E-VPNs used by bagpipe ML2 (more than one is possible,"
"to allow a deployment to do a 2-step transition "
"to change the AS number used)")
)
]
cfg.CONF.register_opts(opts, "ml2_bagpipe_extension")
class BagpipeML2AgentExtension(l2_extension.L2AgentExtension,
agent_base_info.BaseInfoManager):
def initialize(self, connection, driver_type):
self.bagpipe_bgp_agent = (
bagpipe_bgp_agent.BaGPipeBGPAgent.get_instance(
n_const.AGENT_TYPE_LINUXBRIDGE)
)
self.bagpipe_bgp_agent.register_build_callback(
BAGPIPE_L2_SERVICE,
self.build_bagpipe_l2_attach_info)
self.ports = set()
self.bagpipe_bgp_agent.register_port_list(BAGPIPE_L2_SERVICE,
self.ports)
@log_helpers.log_method_call
def build_bagpipe_l2_attach_info(self, port_id):
port_info = self.ports_info.get(port_id)
if not port_info:
LOG.debug("no info for port %s", port_id)
return {}
LOG.debug("segmentation id: %s", port_info.network.segmentation_id)
as_numbers = cfg.CONF.ml2_bagpipe_extension.as_number
bagpipe_rts = [
"%s:%s" % (as_number, port_info.network.segmentation_id)
for as_number in as_numbers
]
attach_info = self._base_attach_info(port_info)
attach_info.update({
'linuxbr': lnx_agt.LinuxBridgeManager.get_bridge_name(
port_info.network.id
),
'vni': port_info.network.segmentation_id,
bbgp_const.RT_IMPORT: bagpipe_rts,
bbgp_const.RT_EXPORT: bagpipe_rts
})
return {
'network_id': port_info.network.id,
bbgp_const.EVPN: [
attach_info
]
}
def _base_attach_info(self, port_info):
info = {
'mac_address': port_info.mac_address,
'local_port': {
'linuxif': lnx_agt.LinuxBridgeManager.get_tap_device_name(
port_info.id)
}
}
if port_info.ip_address:
info.update({'ip_address': port_info.ip_address})
return info
@lockutils.synchronized('bagpipe-ml2')
@log_helpers.log_method_call
def handle_port(self, context, data):
if data.get('network_type') != n_const.TYPE_VXLAN:
LOG.debug("network is not of type vxlan, not handled by this "
"extension")
return
port_id = data['port_id']
tap_device_name = lnx_agt.LinuxBridgeManager.get_tap_device_name(
port_id)
if not ip_lib.device_exists(tap_device_name):
LOG.debug('skip non-existing port %s', port_id)
return
net_id = data['network_id']
net_info, port_info = (
self._get_network_port_infos(net_id, port_id)
)
def delete_hook():
self._delete_port(context, {'port_id': port_info.id})
port_info.update_admin_state(data, delete_hook)
if not port_info.admin_state_up:
return
port_info.mac_address = data['mac_address']
# take the first IPv4 (error if none, warning if many)
ip_address = None
for alloc in data.get('fixed_ips'):
if '.' in alloc['ip_address']:
if not ip_address:
ip_address = alloc['ip_address']
else:
LOG.warning("multiple IPv4 addresses for %s, ignoring %s",
port_id, alloc['ip_address'])
if ip_address is None:
LOG.debug("no IP address for port %s", port_id)
port_info.ip_address = ip_address
net_info.segmentation_id = data['segmentation_id']
self.bagpipe_bgp_agent.do_port_plug(port_id)
self.ports.add(port_id)
@lockutils.synchronized('bagpipe-ml2')
def delete_port(self, context, data):
self._delete_port(context, data)
# un-synchronized version, to be called indirectly from handle_port
@log_helpers.log_method_call
def _delete_port(self, context, data):
port_id = data['port_id']
port_info = self.ports_info.get(port_id)
if port_info:
detach_info = {
'network_id': port_info.network.id,
bbgp_const.EVPN: self._base_attach_info(port_info)
}
self._remove_network_port_infos(port_info.network.id, port_id)
self.ports.remove(port_id)
self.bagpipe_bgp_agent.do_port_plug_refresh(port_id,
detach_info)
def main():
common_config.init(sys.argv[1:])
common_config.setup_logging()
LOG.warning('This modified agent is not needed anymore. The normal '
'neutron linuxbridge agent should be used instead, along with'
'networks of type VXLAN, rather than RT.')
| 35.356098 | 89 | 0.639901 |
import sys
import eventlet
eventlet.monkey_patch()
import __original_module_threading as orig_threading
import threading
orig_threading.current_thread.__globals__['_active'] = threading._active
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_config import types
from oslo_log import helpers as log_helpers
from oslo_log import log as logging
from networking_bagpipe.agent import agent_base_info
from networking_bagpipe.agent import bagpipe_bgp_agent
from networking_bagpipe.bagpipe_bgp import \
constants as bbgp_const
from neutron.agent.linux import ip_lib
from neutron.common import config as common_config
from neutron.plugins.ml2.drivers.linuxbridge.agent import \
linuxbridge_neutron_agent as lnx_agt
from neutron_lib.agent import l2_extension
from neutron_lib import constants as n_const
LOG = logging.getLogger(__name__)
BAGPIPE_L2_SERVICE = 'bagpipe_l2'
opts = [
cfg.ListOpt('as_number', default=[64512],
item_type=types.Integer(min=1, max=2**32),
help=("Autonomous System number used to generate BGP RTs for"
"E-VPNs used by bagpipe ML2 (more than one is possible,"
"to allow a deployment to do a 2-step transition "
"to change the AS number used)")
)
]
cfg.CONF.register_opts(opts, "ml2_bagpipe_extension")
class BagpipeML2AgentExtension(l2_extension.L2AgentExtension,
agent_base_info.BaseInfoManager):
def initialize(self, connection, driver_type):
self.bagpipe_bgp_agent = (
bagpipe_bgp_agent.BaGPipeBGPAgent.get_instance(
n_const.AGENT_TYPE_LINUXBRIDGE)
)
self.bagpipe_bgp_agent.register_build_callback(
BAGPIPE_L2_SERVICE,
self.build_bagpipe_l2_attach_info)
self.ports = set()
self.bagpipe_bgp_agent.register_port_list(BAGPIPE_L2_SERVICE,
self.ports)
@log_helpers.log_method_call
def build_bagpipe_l2_attach_info(self, port_id):
port_info = self.ports_info.get(port_id)
if not port_info:
LOG.debug("no info for port %s", port_id)
return {}
LOG.debug("segmentation id: %s", port_info.network.segmentation_id)
as_numbers = cfg.CONF.ml2_bagpipe_extension.as_number
bagpipe_rts = [
"%s:%s" % (as_number, port_info.network.segmentation_id)
for as_number in as_numbers
]
attach_info = self._base_attach_info(port_info)
attach_info.update({
'linuxbr': lnx_agt.LinuxBridgeManager.get_bridge_name(
port_info.network.id
),
'vni': port_info.network.segmentation_id,
bbgp_const.RT_IMPORT: bagpipe_rts,
bbgp_const.RT_EXPORT: bagpipe_rts
})
return {
'network_id': port_info.network.id,
bbgp_const.EVPN: [
attach_info
]
}
def _base_attach_info(self, port_info):
info = {
'mac_address': port_info.mac_address,
'local_port': {
'linuxif': lnx_agt.LinuxBridgeManager.get_tap_device_name(
port_info.id)
}
}
if port_info.ip_address:
info.update({'ip_address': port_info.ip_address})
return info
@lockutils.synchronized('bagpipe-ml2')
@log_helpers.log_method_call
def handle_port(self, context, data):
if data.get('network_type') != n_const.TYPE_VXLAN:
LOG.debug("network is not of type vxlan, not handled by this "
"extension")
return
port_id = data['port_id']
tap_device_name = lnx_agt.LinuxBridgeManager.get_tap_device_name(
port_id)
if not ip_lib.device_exists(tap_device_name):
LOG.debug('skip non-existing port %s', port_id)
return
net_id = data['network_id']
net_info, port_info = (
self._get_network_port_infos(net_id, port_id)
)
def delete_hook():
self._delete_port(context, {'port_id': port_info.id})
port_info.update_admin_state(data, delete_hook)
if not port_info.admin_state_up:
return
port_info.mac_address = data['mac_address']
ip_address = None
for alloc in data.get('fixed_ips'):
if '.' in alloc['ip_address']:
if not ip_address:
ip_address = alloc['ip_address']
else:
LOG.warning("multiple IPv4 addresses for %s, ignoring %s",
port_id, alloc['ip_address'])
if ip_address is None:
LOG.debug("no IP address for port %s", port_id)
port_info.ip_address = ip_address
net_info.segmentation_id = data['segmentation_id']
self.bagpipe_bgp_agent.do_port_plug(port_id)
self.ports.add(port_id)
@lockutils.synchronized('bagpipe-ml2')
def delete_port(self, context, data):
self._delete_port(context, data)
@log_helpers.log_method_call
def _delete_port(self, context, data):
port_id = data['port_id']
port_info = self.ports_info.get(port_id)
if port_info:
detach_info = {
'network_id': port_info.network.id,
bbgp_const.EVPN: self._base_attach_info(port_info)
}
self._remove_network_port_infos(port_info.network.id, port_id)
self.ports.remove(port_id)
self.bagpipe_bgp_agent.do_port_plug_refresh(port_id,
detach_info)
def main():
common_config.init(sys.argv[1:])
common_config.setup_logging()
LOG.warning('This modified agent is not needed anymore. The normal '
'neutron linuxbridge agent should be used instead, along with'
'networks of type VXLAN, rather than RT.')
| true | true |
1c31346b9eb7cd50c1cd878990e61732e87c10f5 | 343 | py | Python | wandbox/commands/__init__.py | v1nam/wandbox-cli | 8ff88944ad3358dc99dd9bf4ac5c0cac2b98179b | [
"MIT"
] | 7 | 2021-01-21T18:45:29.000Z | 2021-01-27T06:54:17.000Z | wandbox/commands/__init__.py | v1nam/wandbox-cli | 8ff88944ad3358dc99dd9bf4ac5c0cac2b98179b | [
"MIT"
] | null | null | null | wandbox/commands/__init__.py | v1nam/wandbox-cli | 8ff88944ad3358dc99dd9bf4ac5c0cac2b98179b | [
"MIT"
] | null | null | null | from wandbox.commands.frominput import FromInput
from wandbox.commands.fromfile import FromFile
from wandbox.commands.frombuffer import FromBuffer
from wandbox.commands.base import Base
commands_dict = {
"fromfile": FromFile.runfile,
"frominput": FromInput.askinp,
"frombuffer": FromBuffer.create_buffer,
"base": Base.run,
}
| 26.384615 | 50 | 0.77551 | from wandbox.commands.frominput import FromInput
from wandbox.commands.fromfile import FromFile
from wandbox.commands.frombuffer import FromBuffer
from wandbox.commands.base import Base
commands_dict = {
"fromfile": FromFile.runfile,
"frominput": FromInput.askinp,
"frombuffer": FromBuffer.create_buffer,
"base": Base.run,
}
| true | true |
1c3134fd41be915b03b8899512c41b8f42be8099 | 11,064 | py | Python | deepchem/feat/smiles_tokenizer.py | StashOfCode/deepchem | 6c5a5405acea333ee7a65a798ddb5c9df702a0b8 | [
"MIT"
] | 3 | 2019-05-29T19:18:25.000Z | 2021-01-25T05:44:05.000Z | deepchem/feat/smiles_tokenizer.py | StashOfCode/deepchem | 6c5a5405acea333ee7a65a798ddb5c9df702a0b8 | [
"MIT"
] | 10 | 2017-02-23T19:39:22.000Z | 2017-08-31T22:21:18.000Z | deepchem/feat/smiles_tokenizer.py | StashOfCode/deepchem | 6c5a5405acea333ee7a65a798ddb5c9df702a0b8 | [
"MIT"
] | 1 | 2018-09-22T00:53:53.000Z | 2018-09-22T00:53:53.000Z | # Requriments - transformers, tokenizers
# Right now, the Smiles Tokenizer uses an exiesting vocab file from rxnfp that is fairly comprehensive and from the USPTO dataset.
# The vocab may be expanded in the near future
import collections
import os
import re
import pkg_resources
from typing import List
from transformers import BertTokenizer
from logging import getLogger
logger = getLogger(__name__)
"""
SMI_REGEX_PATTERN: str
SMILES regex pattern for tokenization. Designed by Schwaller et. al.
References
.. [1] Philippe Schwaller, Teodoro Laino, Théophile Gaudin, Peter Bolgar, Christopher A. Hunter, Costas Bekas, and Alpha A. Lee
ACS Central Science 2019 5 (9): Molecular Transformer: A Model for Uncertainty-Calibrated Chemical Reaction Prediction
1572-1583 DOI: 10.1021/acscentsci.9b00576
"""
SMI_REGEX_PATTERN = r"""(\[[^\]]+]|Br?|Cl?|N|O|S|P|F|I|b|c|n|o|s|p|\(|\)|\.|=|
#|-|\+|\\|\/|:|~|@|\?|>>?|\*|\$|\%[0-9]{2}|[0-9])"""
# add vocab_file dict
VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
def get_default_tokenizer():
default_vocab_path = (pkg_resources.resource_filename("deepchem",
"feat/tests/vocab.txt"))
return SmilesTokenizer(default_vocab_path)
class SmilesTokenizer(BertTokenizer):
"""
Creates the SmilesTokenizer class. The tokenizer heavily inherits from the BertTokenizer
implementation found in Huggingface's transformers library. It runs a WordPiece tokenization
algorithm over SMILES strings using the tokenisation SMILES regex developed by Schwaller et. al.
Please see https://github.com/huggingface/transformers
and https://github.com/rxn4chemistry/rxnfp for more details.
Examples
--------
>>> from deepchem.feat.smiles_tokenizer import SmilesTokenizer
>>> current_dir = os.path.dirname(os.path.realpath(__file__))
>>> vocab_path = os.path.join(current_dir, 'tests/data', 'vocab.txt')
>>> tokenizer = SmilesTokenizer(vocab_path)
>>> print(tokenizer.encode("CC(=O)OC1=CC=CC=C1C(=O)O"))
[12, 16, 16, 17, 22, 19, 18, 19, 16, 20, 22, 16, 16, 22, 16, 16, 22, 16, 20, 16, 17, 22, 19, 18, 19, 13]
References
----------
.. [1] Schwaller, Philippe; Probst, Daniel; Vaucher, Alain C.; Nair, Vishnu H; Kreutter, David;
Laino, Teodoro; et al. (2019): Mapping the Space of Chemical Reactions using Attention-Based Neural
Networks. ChemRxiv. Preprint. https://doi.org/10.26434/chemrxiv.9897365.v3
Notes
----
This class requires huggingface's transformers and tokenizers libraries to be installed.
"""
vocab_files_names = VOCAB_FILES_NAMES
def __init__(
self,
vocab_file: str = '',
# unk_token="[UNK]",
# sep_token="[SEP]",
# pad_token="[PAD]",
# cls_token="[CLS]",
# mask_token="[MASK]",
**kwargs):
"""Constructs a SmilesTokenizer.
Parameters
----------
vocab_file: str
Path to a SMILES character per line vocabulary file.
Default vocab file is found in deepchem/feat/tests/data/vocab.txt
"""
super().__init__(vocab_file, **kwargs)
# take into account special tokens in max length
self.max_len_single_sentence = self.max_len - 2
self.max_len_sentences_pair = self.max_len - 3
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocab file at path '{}'.".format(vocab_file))
self.vocab = load_vocab(vocab_file)
self.highest_unused_index = max(
[i for i, v in enumerate(self.vocab.keys()) if v.startswith("[unused")])
self.ids_to_tokens = collections.OrderedDict(
[(ids, tok) for tok, ids in self.vocab.items()])
self.basic_tokenizer = BasicSmilesTokenizer()
self.init_kwargs["max_len"] = self.max_len
@property
def vocab_size(self):
return len(self.vocab)
@property
def vocab_list(self):
return list(self.vocab.keys())
def _tokenize(self, text: str):
"""
Tokenize a string into a list of tokens.
Parameters
----------
text: str
Input string sequence to be tokenized.
"""
split_tokens = [token for token in self.basic_tokenizer.tokenize(text)]
return split_tokens
def _convert_token_to_id(self, token):
"""
Converts a token (str/unicode) in an id using the vocab.
Parameters
----------
token: str
String token from a larger sequence to be converted to a numerical id.
"""
return self.vocab.get(token, self.vocab.get(self.unk_token))
def _convert_id_to_token(self, index):
"""
Converts an index (integer) in a token (string/unicode) using the vocab.
Parameters
----------
index: int
Integer index to be converted back to a string-based token as part of a larger sequence.
"""
return self.ids_to_tokens.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens: List[str]):
""" Converts a sequence of tokens (string) in a single string.
Parameters
----------
tokens: List[str]
List of tokens for a given string sequence.
Returns
-------
out_string: str
Single string from combined tokens.
"""
out_string: str = " ".join(tokens).replace(" ##", "").strip()
return out_string
def add_special_tokens_ids_single_sequence(self, token_ids: List[int]):
"""
Adds special tokens to the a sequence for sequence classification tasks.
A BERT sequence has the following format: [CLS] X [SEP]
Parameters
----------
token_ids: list[int]
list of tokenized input ids. Can be obtained using the encode or encode_plus methods.
"""
return [self.cls_token_id] + token_ids + [self.sep_token_id]
def add_special_tokens_single_sequence(self, tokens: List[str]):
"""
Adds special tokens to the a sequence for sequence classification tasks.
A BERT sequence has the following format: [CLS] X [SEP]
Parameters
----------
tokens: List[str]
List of tokens for a given string sequence.
"""
return [self.cls_token] + tokens + [self.sep_token]
def add_special_tokens_ids_sequence_pair(self, token_ids_0: List[int],
token_ids_1: List[int]) -> List[int]:
"""
Adds special tokens to a sequence pair for sequence classification tasks.
A BERT sequence pair has the following format: [CLS] A [SEP] B [SEP]
Parameters
----------
token_ids_0: List[int]
List of ids for the first string sequence in the sequence pair (A).
token_ids_1: List[int]
List of tokens for the second string sequence in the sequence pair (B).
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
return cls + token_ids_0 + sep + token_ids_1 + sep
def add_padding_tokens(self,
token_ids: List[int],
length: int,
right: bool = True) -> List[int]:
"""
Adds padding tokens to return a sequence of length max_length.
By default padding tokens are added to the right of the sequence.
Parameters
----------
token_ids: list[int]
list of tokenized input ids. Can be obtained using the encode or encode_plus methods.
length: int
right: bool (True by default)
Returns
----------
token_ids :
list of tokenized input ids. Can be obtained using the encode or encode_plus methods.
padding: int
Integer to be added as padding token
"""
padding = [self.pad_token_id] * (length - len(token_ids))
if right:
return token_ids + padding
else:
return padding + token_ids
def save_vocabulary(
self, vocab_path: str
): # -> tuple[str]: doctest issue raised with this return type annotation
"""
Save the tokenizer vocabulary to a file.
Parameters
----------
vocab_path: obj: str
The directory in which to save the SMILES character per line vocabulary file.
Default vocab file is found in deepchem/feat/tests/data/vocab.txt
Returns
----------
vocab_file: :obj:`Tuple(str)`:
Paths to the files saved.
typle with string to a SMILES character per line vocabulary file.
Default vocab file is found in deepchem/feat/tests/data/vocab.txt
"""
index = 0
if os.path.isdir(vocab_path):
vocab_file = os.path.join(vocab_path, VOCAB_FILES_NAMES["vocab_file"])
else:
vocab_file = vocab_path
with open(vocab_file, "w", encoding="utf-8") as writer:
for token, token_index in sorted(
self.vocab.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning(
"Saving vocabulary to {}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!".format(
vocab_file))
index = token_index
writer.write(token + "\n")
index += 1
return (vocab_file,)
class BasicSmilesTokenizer(object):
"""
Run basic SMILES tokenization using a regex pattern developed by Schwaller et. al. This tokenizer is to be used
when a tokenizer that does not require the transformers library by HuggingFace is required.
Examples
--------
>>> from deepchem.feat.smiles_tokenizer import BasicSmilesTokenizer
>>> tokenizer = BasicSmilesTokenizer()
>>> print(tokenizer.tokenize("CC(=O)OC1=CC=CC=C1C(=O)O"))
['C', 'C', '(', '=', 'O', ')', 'O', 'C', '1', '=', 'C', 'C', '=', 'C', 'C', '=', 'C', '1', 'C', '(', '=', 'O', ')', 'O']
References
----------
.. [1] Philippe Schwaller, Teodoro Laino, Théophile Gaudin, Peter Bolgar, Christopher A. Hunter, Costas Bekas, and Alpha A. Lee
ACS Central Science 2019 5 (9): Molecular Transformer: A Model for Uncertainty-Calibrated Chemical Reaction Prediction
1572-1583 DOI: 10.1021/acscentsci.9b00576
"""
def __init__(self, regex_pattern: str = SMI_REGEX_PATTERN):
""" Constructs a BasicSMILESTokenizer.
Parameters
----------
regex: string
SMILES token regex
"""
self.regex_pattern = regex_pattern
self.regex = re.compile(self.regex_pattern)
def tokenize(self, text):
""" Basic Tokenization of a SMILES.
"""
tokens = [token for token in self.regex.findall(text)]
return tokens
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
with open(vocab_file, "r", encoding="utf-8") as reader:
tokens = reader.readlines()
for index, token in enumerate(tokens):
token = token.rstrip("\n")
vocab[token] = index
return vocab
| 33.026866 | 132 | 0.62491 |
import collections
import os
import re
import pkg_resources
from typing import List
from transformers import BertTokenizer
from logging import getLogger
logger = getLogger(__name__)
SMI_REGEX_PATTERN = r"""(\[[^\]]+]|Br?|Cl?|N|O|S|P|F|I|b|c|n|o|s|p|\(|\)|\.|=|
#|-|\+|\\|\/|:|~|@|\?|>>?|\*|\$|\%[0-9]{2}|[0-9])"""
VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
def get_default_tokenizer():
default_vocab_path = (pkg_resources.resource_filename("deepchem",
"feat/tests/vocab.txt"))
return SmilesTokenizer(default_vocab_path)
class SmilesTokenizer(BertTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
def __init__(
self,
vocab_file: str = '',
**kwargs):
super().__init__(vocab_file, **kwargs)
self.max_len_single_sentence = self.max_len - 2
self.max_len_sentences_pair = self.max_len - 3
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocab file at path '{}'.".format(vocab_file))
self.vocab = load_vocab(vocab_file)
self.highest_unused_index = max(
[i for i, v in enumerate(self.vocab.keys()) if v.startswith("[unused")])
self.ids_to_tokens = collections.OrderedDict(
[(ids, tok) for tok, ids in self.vocab.items()])
self.basic_tokenizer = BasicSmilesTokenizer()
self.init_kwargs["max_len"] = self.max_len
@property
def vocab_size(self):
return len(self.vocab)
@property
def vocab_list(self):
return list(self.vocab.keys())
def _tokenize(self, text: str):
split_tokens = [token for token in self.basic_tokenizer.tokenize(text)]
return split_tokens
def _convert_token_to_id(self, token):
return self.vocab.get(token, self.vocab.get(self.unk_token))
def _convert_id_to_token(self, index):
return self.ids_to_tokens.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens: List[str]):
out_string: str = " ".join(tokens).replace(" ##", "").strip()
return out_string
def add_special_tokens_ids_single_sequence(self, token_ids: List[int]):
return [self.cls_token_id] + token_ids + [self.sep_token_id]
def add_special_tokens_single_sequence(self, tokens: List[str]):
return [self.cls_token] + tokens + [self.sep_token]
def add_special_tokens_ids_sequence_pair(self, token_ids_0: List[int],
token_ids_1: List[int]) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
return cls + token_ids_0 + sep + token_ids_1 + sep
def add_padding_tokens(self,
token_ids: List[int],
length: int,
right: bool = True) -> List[int]:
padding = [self.pad_token_id] * (length - len(token_ids))
if right:
return token_ids + padding
else:
return padding + token_ids
def save_vocabulary(
self, vocab_path: str
): # -> tuple[str]: doctest issue raised with this return type annotation
index = 0
if os.path.isdir(vocab_path):
vocab_file = os.path.join(vocab_path, VOCAB_FILES_NAMES["vocab_file"])
else:
vocab_file = vocab_path
with open(vocab_file, "w", encoding="utf-8") as writer:
for token, token_index in sorted(
self.vocab.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning(
"Saving vocabulary to {}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!".format(
vocab_file))
index = token_index
writer.write(token + "\n")
index += 1
return (vocab_file,)
class BasicSmilesTokenizer(object):
def __init__(self, regex_pattern: str = SMI_REGEX_PATTERN):
self.regex_pattern = regex_pattern
self.regex = re.compile(self.regex_pattern)
def tokenize(self, text):
tokens = [token for token in self.regex.findall(text)]
return tokens
def load_vocab(vocab_file):
vocab = collections.OrderedDict()
with open(vocab_file, "r", encoding="utf-8") as reader:
tokens = reader.readlines()
for index, token in enumerate(tokens):
token = token.rstrip("\n")
vocab[token] = index
return vocab
| true | true |
1c31354c7f061a127eb92c549b1b49593a89649b | 3,235 | py | Python | profiles_project/settings.py | kenbusse1/profiles-rest-api | 5344db9a91667f55bbfec87497eb11617afee314 | [
"MIT"
] | null | null | null | profiles_project/settings.py | kenbusse1/profiles-rest-api | 5344db9a91667f55bbfec87497eb11617afee314 | [
"MIT"
] | 6 | 2020-02-12T03:12:05.000Z | 2021-06-09T18:48:58.000Z | profiles_project/settings.py | kenbusse1/profiles-rest-api | 5344db9a91667f55bbfec87497eb11617afee314 | [
"MIT"
] | null | null | null | """
Django settings for profiles_project project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-6but+soqw9&)!j1e(cbvmbr+8yfp!+l@9rm$d(fzc^#d0uk#8'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'profiles_api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'profiles_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'profiles_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'profiles_api.UserProfile'
| 25.88 | 91 | 0.699227 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = '-6but+soqw9&)!j1e(cbvmbr+8yfp!+l@9rm$d(fzc^#d0uk#8'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'profiles_api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'profiles_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'profiles_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'profiles_api.UserProfile'
| true | true |
1c3136359d7c666764adf29db4f5c6f2be46a2e4 | 3,159 | py | Python | Algorithms_Course_1/Assignments/Algorithms_PA_1.py | vivekgoe/stanford_algorithms_courses | 79afa0348dc4ecd8f631537b27e34c330abe773a | [
"MIT"
] | null | null | null | Algorithms_Course_1/Assignments/Algorithms_PA_1.py | vivekgoe/stanford_algorithms_courses | 79afa0348dc4ecd8f631537b27e34c330abe773a | [
"MIT"
] | null | null | null | Algorithms_Course_1/Assignments/Algorithms_PA_1.py | vivekgoe/stanford_algorithms_courses | 79afa0348dc4ecd8f631537b27e34c330abe773a | [
"MIT"
] | null | null | null | import time
import math
count_naive = 0
count = 0
def mul_large_naive(N1, N2):
global count_naive
assert(N1.__len__() == N2.__len__())
len = N1.__len__()
if len > 1:
a = N1[0:int(len/2)]
b = N1[int(len/2):int(len)]
c = N2[0:int(len/2)]
d = N2[int(len/2):int(len)]
#recursive calls
p1 = mul_large_naive(a,c)
p2 = mul_large_naive(a,d)
p3 = mul_large_naive(b,c)
p4 = mul_large_naive(b,d)
#merge step;no real multiplication here; only shifts with base 10
return 10**int(len)*int(p1) + 10**int(len/2)*(int(p2)+int(p3)) + int(p4)
#base case; this is the only real multiplication
count_naive = count_naive + 1
assert(N1.__len__() == 1)
assert(N2.__len__() == 1)
return int(N1)*int(N2)
def mul_large(N1, N2):
global count
assert(N1.__len__() == N2.__len__())
len = N1.__len__()
if len > 1:
p1 = p2 = p3 = 0
a = N1[0:int(len/2)]
b = N1[int(len/2):int(len)]
c = N2[0:int(len/2)]
d = N2[int(len/2):int(len)]
#recursive calls
if int(a) != 0 and int(c) != 0:
p1 = mul_large(a,c)
temp1 = str(int(a)+int(b))
temp2 = str(int(c)+int(d))
if int(temp1) != 0 and int(temp2) != 0:
if temp1.__len__() > temp2.__len__():
tlen = int(math.ceil(math.log2(int(temp1.__len__()))))
tlen = 2**tlen
temp1 = '0'*(tlen-temp1.__len__()) + temp1
temp2 = '0'*(tlen-temp2.__len__()) + temp2
else:
tlen = int(math.ceil(math.log2(int(temp2.__len__()))))
tlen = 2**tlen
temp1 = '0'*(tlen-temp1.__len__()) + temp1
temp2 = '0'*(tlen-temp2.__len__()) + temp2
p2 = mul_large(temp1,temp2)
if int(b) != 0 and int(d) != 0:
p3 = mul_large(b,d)
#merge step;no real multiplication here; only shifts with base 10
return 10**int(len)*int(p1) + 10**int(len/2)*(int(p2)-int(p1)-int(p3)) + int(p3)
#base case; this is the only real multiplication
count = count + 1
assert(N1.__len__() == 1)
assert(N2.__len__() == 1)
return int(N1)*int(N2)
if __name__ == '__main__':
Input1 = "3141592653589793238462643383279502884197169399375105820974944592"
Input2 = "2718281828459045235360287471352662497757247093699959574966967627"
start = time.time()
output = mul_large(Input1,Input2)
print("% s seconds" % (time.time() - start))
start = time.time()
output_naive = mul_large_naive(Input1,Input2)
print("% s seconds" % (time.time() - start))
print(output, count)
print(output_naive,count_naive)
print(output - output_naive)
print(output - int(Input1)*int(Input2))
print(output_naive - int(Input1)*int(Input2))
#Solution to Problem Set#1
#1 - nlog(n)
#2 - True
#3 - Sometimes yes, sometimes no (depending on f & g); yes if f(n) <= g(n) for all sufficiently large n
#4 - Omega(nk^2)
#5 - 2^(sqrt(log(n)) < sqrt(n) < n^1.5 < n^(5/3) < 10^n
| 34.714286 | 104 | 0.552073 | import time
import math
count_naive = 0
count = 0
def mul_large_naive(N1, N2):
global count_naive
assert(N1.__len__() == N2.__len__())
len = N1.__len__()
if len > 1:
a = N1[0:int(len/2)]
b = N1[int(len/2):int(len)]
c = N2[0:int(len/2)]
d = N2[int(len/2):int(len)]
p1 = mul_large_naive(a,c)
p2 = mul_large_naive(a,d)
p3 = mul_large_naive(b,c)
p4 = mul_large_naive(b,d)
return 10**int(len)*int(p1) + 10**int(len/2)*(int(p2)+int(p3)) + int(p4)
count_naive = count_naive + 1
assert(N1.__len__() == 1)
assert(N2.__len__() == 1)
return int(N1)*int(N2)
def mul_large(N1, N2):
global count
assert(N1.__len__() == N2.__len__())
len = N1.__len__()
if len > 1:
p1 = p2 = p3 = 0
a = N1[0:int(len/2)]
b = N1[int(len/2):int(len)]
c = N2[0:int(len/2)]
d = N2[int(len/2):int(len)]
if int(a) != 0 and int(c) != 0:
p1 = mul_large(a,c)
temp1 = str(int(a)+int(b))
temp2 = str(int(c)+int(d))
if int(temp1) != 0 and int(temp2) != 0:
if temp1.__len__() > temp2.__len__():
tlen = int(math.ceil(math.log2(int(temp1.__len__()))))
tlen = 2**tlen
temp1 = '0'*(tlen-temp1.__len__()) + temp1
temp2 = '0'*(tlen-temp2.__len__()) + temp2
else:
tlen = int(math.ceil(math.log2(int(temp2.__len__()))))
tlen = 2**tlen
temp1 = '0'*(tlen-temp1.__len__()) + temp1
temp2 = '0'*(tlen-temp2.__len__()) + temp2
p2 = mul_large(temp1,temp2)
if int(b) != 0 and int(d) != 0:
p3 = mul_large(b,d)
return 10**int(len)*int(p1) + 10**int(len/2)*(int(p2)-int(p1)-int(p3)) + int(p3)
count = count + 1
assert(N1.__len__() == 1)
assert(N2.__len__() == 1)
return int(N1)*int(N2)
if __name__ == '__main__':
Input1 = "3141592653589793238462643383279502884197169399375105820974944592"
Input2 = "2718281828459045235360287471352662497757247093699959574966967627"
start = time.time()
output = mul_large(Input1,Input2)
print("% s seconds" % (time.time() - start))
start = time.time()
output_naive = mul_large_naive(Input1,Input2)
print("% s seconds" % (time.time() - start))
print(output, count)
print(output_naive,count_naive)
print(output - output_naive)
print(output - int(Input1)*int(Input2))
print(output_naive - int(Input1)*int(Input2))
| true | true |
1c31365bc2dbe1275ef4e8e056e716303fbecc05 | 13,411 | py | Python | electrum/gui/qt/transaction_dialog.py | traysi/electrum-raven | b2a64a459da32afd2987149460253cfadec03384 | [
"MIT"
] | 5 | 2018-10-31T18:47:54.000Z | 2021-09-20T02:04:42.000Z | electrum/gui/qt/transaction_dialog.py | project-mynt/electrum-mynt | ca1548e008854f2a3eff900a69365307cc20bd57 | [
"MIT"
] | null | null | null | electrum/gui/qt/transaction_dialog.py | project-mynt/electrum-mynt | ca1548e008854f2a3eff900a69365307cc20bd57 | [
"MIT"
] | 11 | 2018-10-31T19:46:05.000Z | 2019-09-25T20:18:37.000Z | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import copy
import datetime
import json
import traceback
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
import qrcode
from qrcode import exceptions
from electrum.bitcoin import base_encode
from electrum.i18n import _
from electrum.plugin import run_hook
from electrum import simple_config
from electrum.util import bfh
from electrum.transaction import SerializationError, Transaction
from .util import *
SAVE_BUTTON_ENABLED_TOOLTIP = _("Save transaction offline")
SAVE_BUTTON_DISABLED_TOOLTIP = _("Please sign this transaction in order to save it")
dialogs = [] # Otherwise python randomly garbage collects the dialogs...
def show_transaction(tx, parent, desc=None, prompt_if_unsaved=False):
try:
d = TxDialog(tx, parent, desc, prompt_if_unsaved)
except SerializationError as e:
traceback.print_exc(file=sys.stderr)
parent.show_critical(_("Electrum was unable to deserialize the transaction:") + "\n" + str(e))
else:
dialogs.append(d)
d.show()
class TxDialog(QDialog, MessageBoxMixin):
def __init__(self, tx, parent, desc, prompt_if_unsaved):
'''Transactions in the wallet will show their description.
Pass desc to give a description for txs not yet in the wallet.
'''
# We want to be a top-level window
QDialog.__init__(self, parent=None)
# Take a copy; it might get updated in the main window by
# e.g. the FX plugin. If this happens during or after a long
# sign operation the signatures are lost.
self.tx = tx = copy.deepcopy(tx) # type: Transaction
try:
self.tx.deserialize()
except BaseException as e:
raise SerializationError(e)
self.main_window = parent
self.wallet = parent.wallet
self.prompt_if_unsaved = prompt_if_unsaved
self.saved = False
self.desc = desc
# if the wallet can populate the inputs with more info, do it now.
# as a result, e.g. we might learn an imported address tx is segwit,
# in which case it's ok to display txid
self.wallet.add_input_info_to_all_inputs(tx)
self.setMinimumWidth(950)
self.setWindowTitle(_("Transaction"))
vbox = QVBoxLayout()
self.setLayout(vbox)
vbox.addWidget(QLabel(_("Transaction ID:")))
self.tx_hash_e = ButtonsLineEdit()
qr_show = lambda: parent.show_qrcode(str(self.tx_hash_e.text()), 'Transaction ID', parent=self)
self.tx_hash_e.addButton(":icons/qrcode.png", qr_show, _("Show as QR code"))
self.tx_hash_e.setReadOnly(True)
vbox.addWidget(self.tx_hash_e)
self.tx_desc = QLabel()
vbox.addWidget(self.tx_desc)
self.status_label = QLabel()
vbox.addWidget(self.status_label)
self.date_label = QLabel()
vbox.addWidget(self.date_label)
self.amount_label = QLabel()
vbox.addWidget(self.amount_label)
self.size_label = QLabel()
vbox.addWidget(self.size_label)
self.fee_label = QLabel()
vbox.addWidget(self.fee_label)
self.add_io(vbox)
self.sign_button = b = QPushButton(_("Sign"))
b.clicked.connect(self.sign)
self.broadcast_button = b = QPushButton(_("Broadcast"))
b.clicked.connect(self.do_broadcast)
self.save_button = b = QPushButton(_("Save"))
save_button_disabled = not tx.is_complete()
b.setDisabled(save_button_disabled)
if save_button_disabled:
b.setToolTip(SAVE_BUTTON_DISABLED_TOOLTIP)
else:
b.setToolTip(SAVE_BUTTON_ENABLED_TOOLTIP)
b.clicked.connect(self.save)
self.export_button = b = QPushButton(_("Export"))
b.clicked.connect(self.export)
self.cancel_button = b = QPushButton(_("Close"))
b.clicked.connect(self.close)
b.setDefault(True)
self.qr_button = b = QPushButton()
b.setIcon(QIcon(":icons/qrcode.png"))
b.clicked.connect(self.show_qr)
self.copy_button = CopyButton(lambda: str(self.tx), parent.app)
# Action buttons
self.buttons = [self.sign_button, self.broadcast_button, self.cancel_button]
# Transaction sharing buttons
self.sharing_buttons = [self.copy_button, self.qr_button, self.export_button, self.save_button]
run_hook('transaction_dialog', self)
hbox = QHBoxLayout()
hbox.addLayout(Buttons(*self.sharing_buttons))
hbox.addStretch(1)
hbox.addLayout(Buttons(*self.buttons))
vbox.addLayout(hbox)
self.update()
def do_broadcast(self):
self.main_window.push_top_level_window(self)
try:
self.main_window.broadcast_transaction(self.tx, self.desc)
finally:
self.main_window.pop_top_level_window(self)
self.saved = True
self.update()
def closeEvent(self, event):
if (self.prompt_if_unsaved and not self.saved
and not self.question(_('This transaction is not saved. Close anyway?'), title=_("Warning"))):
event.ignore()
else:
event.accept()
try:
dialogs.remove(self)
except ValueError:
pass # was not in list already
def reject(self):
# Override escape-key to close normally (and invoke closeEvent)
self.close()
def show_qr(self):
text = bfh(str(self.tx))
text = base_encode(text, base=43)
try:
self.main_window.show_qrcode(text, 'Transaction', parent=self)
except qrcode.exceptions.DataOverflowError:
self.show_error(_('Failed to display QR code.') + '\n' +
_('Transaction is too large in size.'))
except Exception as e:
self.show_error(_('Failed to display QR code.') + '\n' + str(e))
def sign(self):
def sign_done(success):
# note: with segwit we could save partially signed tx, because they have a txid
if self.tx.is_complete():
self.prompt_if_unsaved = True
self.saved = False
self.save_button.setDisabled(False)
self.save_button.setToolTip(SAVE_BUTTON_ENABLED_TOOLTIP)
self.update()
self.main_window.pop_top_level_window(self)
self.sign_button.setDisabled(True)
self.main_window.push_top_level_window(self)
self.main_window.sign_tx(self.tx, sign_done)
def save(self):
self.main_window.push_top_level_window(self)
if self.main_window.save_transaction_into_wallet(self.tx):
self.save_button.setDisabled(True)
self.saved = True
self.main_window.pop_top_level_window(self)
def export(self):
name = 'signed_%s.txn' % (self.tx.txid()[0:8]) if self.tx.is_complete() else 'unsigned.txn'
fileName = self.main_window.getSaveFileName(_("Select where to save your signed transaction"), name, "*.txn")
if fileName:
with open(fileName, "w+") as f:
f.write(json.dumps(self.tx.as_dict(), indent=4) + '\n')
self.show_message(_("Transaction exported successfully"))
self.saved = True
def update(self):
desc = self.desc
base_unit = self.main_window.base_unit()
format_amount = self.main_window.format_amount
tx_hash, status, label, can_broadcast, can_rbf, amount, fee, height, conf, timestamp, exp_n = self.wallet.get_tx_info(self.tx)
size = self.tx.estimated_size()
self.broadcast_button.setEnabled(can_broadcast)
can_sign = not self.tx.is_complete() and \
(self.wallet.can_sign(self.tx) or bool(self.main_window.tx_external_keypairs))
self.sign_button.setEnabled(can_sign)
self.tx_hash_e.setText(tx_hash or _('Unknown'))
if desc is None:
self.tx_desc.hide()
else:
self.tx_desc.setText(_("Description") + ': ' + desc)
self.tx_desc.show()
self.status_label.setText(_('Status:') + ' ' + status)
if timestamp:
time_str = datetime.datetime.fromtimestamp(timestamp).isoformat(' ')[:-3]
self.date_label.setText(_("Date: {}").format(time_str))
self.date_label.show()
# elif exp_n:
# text = '%.2f MB'%(exp_n/1000000)
# self.date_label.setText(_('Position in mempool: {} from tip').format(text))
# self.date_label.show()
else:
self.date_label.hide()
if amount is None:
amount_str = _("Transaction unrelated to your wallet")
elif amount > 0:
amount_str = _("Amount received:") + ' %s'% format_amount(amount) + ' ' + base_unit
else:
amount_str = _("Amount sent:") + ' %s'% format_amount(-amount) + ' ' + base_unit
size_str = _("Size:") + ' %d bytes'% size
fee_str = _("Fee") + ': %s' % (format_amount(fee) + ' ' + base_unit if fee is not None else _('unknown'))
if fee is not None:
fee_rate = fee/size*1000
fee_str += ' ( %s ) ' % self.main_window.format_fee_rate(fee_rate)
confirm_rate = simple_config.FEERATE_WARNING_HIGH_FEE
if fee_rate > confirm_rate:
fee_str += ' - ' + _('Warning') + ': ' + _("high fee") + '!'
self.amount_label.setText(amount_str)
self.fee_label.setText(fee_str)
self.size_label.setText(size_str)
run_hook('transaction_dialog_update', self)
def add_io(self, vbox):
if self.tx.locktime > 0:
vbox.addWidget(QLabel("LockTime: %d\n" % self.tx.locktime))
vbox.addWidget(QLabel(_("Inputs") + ' (%d)'%len(self.tx.inputs())))
ext = QTextCharFormat()
rec = QTextCharFormat()
rec.setBackground(QBrush(ColorScheme.GREEN.as_color(background=True)))
rec.setToolTip(_("Wallet receive address"))
chg = QTextCharFormat()
chg.setBackground(QBrush(ColorScheme.YELLOW.as_color(background=True)))
chg.setToolTip(_("Wallet change address"))
twofactor = QTextCharFormat()
twofactor.setBackground(QBrush(ColorScheme.BLUE.as_color(background=True)))
twofactor.setToolTip(_("TrustedCoin (2FA) fee for the next batch of transactions"))
def text_format(addr):
if self.wallet.is_mine(addr):
return chg if self.wallet.is_change(addr) else rec
elif self.wallet.is_billing_address(addr):
return twofactor
return ext
def format_amount(amt):
return self.main_window.format_amount(amt, whitespaces=True)
i_text = QTextEditWithDefaultSize()
i_text.setFont(QFont(MONOSPACE_FONT))
i_text.setReadOnly(True)
cursor = i_text.textCursor()
for x in self.tx.inputs():
if x['type'] == 'coinbase':
cursor.insertText('coinbase')
else:
prevout_hash = x.get('prevout_hash')
prevout_n = x.get('prevout_n')
cursor.insertText(prevout_hash + ":%-4d " % prevout_n, ext)
addr = self.wallet.get_txin_address(x)
if addr is None:
addr = ''
cursor.insertText(addr, text_format(addr))
if x.get('value'):
cursor.insertText(format_amount(x['value']), ext)
cursor.insertBlock()
vbox.addWidget(i_text)
vbox.addWidget(QLabel(_("Outputs") + ' (%d)'%len(self.tx.outputs())))
o_text = QTextEditWithDefaultSize()
o_text.setFont(QFont(MONOSPACE_FONT))
o_text.setReadOnly(True)
cursor = o_text.textCursor()
for o in self.tx.get_outputs_for_UI():
addr, v = o.address, o.value
cursor.insertText(addr, text_format(addr))
if v is not None:
cursor.insertText('\t', ext)
cursor.insertText(format_amount(v), ext)
cursor.insertBlock()
vbox.addWidget(o_text)
class QTextEditWithDefaultSize(QTextEdit):
def sizeHint(self):
return QSize(0, 100)
| 39.560472 | 134 | 0.634628 |
import copy
import datetime
import json
import traceback
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
import qrcode
from qrcode import exceptions
from electrum.bitcoin import base_encode
from electrum.i18n import _
from electrum.plugin import run_hook
from electrum import simple_config
from electrum.util import bfh
from electrum.transaction import SerializationError, Transaction
from .util import *
SAVE_BUTTON_ENABLED_TOOLTIP = _("Save transaction offline")
SAVE_BUTTON_DISABLED_TOOLTIP = _("Please sign this transaction in order to save it")
dialogs = []
def show_transaction(tx, parent, desc=None, prompt_if_unsaved=False):
try:
d = TxDialog(tx, parent, desc, prompt_if_unsaved)
except SerializationError as e:
traceback.print_exc(file=sys.stderr)
parent.show_critical(_("Electrum was unable to deserialize the transaction:") + "\n" + str(e))
else:
dialogs.append(d)
d.show()
class TxDialog(QDialog, MessageBoxMixin):
def __init__(self, tx, parent, desc, prompt_if_unsaved):
QDialog.__init__(self, parent=None)
self.tx = tx = copy.deepcopy(tx)
try:
self.tx.deserialize()
except BaseException as e:
raise SerializationError(e)
self.main_window = parent
self.wallet = parent.wallet
self.prompt_if_unsaved = prompt_if_unsaved
self.saved = False
self.desc = desc
self.wallet.add_input_info_to_all_inputs(tx)
self.setMinimumWidth(950)
self.setWindowTitle(_("Transaction"))
vbox = QVBoxLayout()
self.setLayout(vbox)
vbox.addWidget(QLabel(_("Transaction ID:")))
self.tx_hash_e = ButtonsLineEdit()
qr_show = lambda: parent.show_qrcode(str(self.tx_hash_e.text()), 'Transaction ID', parent=self)
self.tx_hash_e.addButton(":icons/qrcode.png", qr_show, _("Show as QR code"))
self.tx_hash_e.setReadOnly(True)
vbox.addWidget(self.tx_hash_e)
self.tx_desc = QLabel()
vbox.addWidget(self.tx_desc)
self.status_label = QLabel()
vbox.addWidget(self.status_label)
self.date_label = QLabel()
vbox.addWidget(self.date_label)
self.amount_label = QLabel()
vbox.addWidget(self.amount_label)
self.size_label = QLabel()
vbox.addWidget(self.size_label)
self.fee_label = QLabel()
vbox.addWidget(self.fee_label)
self.add_io(vbox)
self.sign_button = b = QPushButton(_("Sign"))
b.clicked.connect(self.sign)
self.broadcast_button = b = QPushButton(_("Broadcast"))
b.clicked.connect(self.do_broadcast)
self.save_button = b = QPushButton(_("Save"))
save_button_disabled = not tx.is_complete()
b.setDisabled(save_button_disabled)
if save_button_disabled:
b.setToolTip(SAVE_BUTTON_DISABLED_TOOLTIP)
else:
b.setToolTip(SAVE_BUTTON_ENABLED_TOOLTIP)
b.clicked.connect(self.save)
self.export_button = b = QPushButton(_("Export"))
b.clicked.connect(self.export)
self.cancel_button = b = QPushButton(_("Close"))
b.clicked.connect(self.close)
b.setDefault(True)
self.qr_button = b = QPushButton()
b.setIcon(QIcon(":icons/qrcode.png"))
b.clicked.connect(self.show_qr)
self.copy_button = CopyButton(lambda: str(self.tx), parent.app)
# Action buttons
self.buttons = [self.sign_button, self.broadcast_button, self.cancel_button]
# Transaction sharing buttons
self.sharing_buttons = [self.copy_button, self.qr_button, self.export_button, self.save_button]
run_hook('transaction_dialog', self)
hbox = QHBoxLayout()
hbox.addLayout(Buttons(*self.sharing_buttons))
hbox.addStretch(1)
hbox.addLayout(Buttons(*self.buttons))
vbox.addLayout(hbox)
self.update()
def do_broadcast(self):
self.main_window.push_top_level_window(self)
try:
self.main_window.broadcast_transaction(self.tx, self.desc)
finally:
self.main_window.pop_top_level_window(self)
self.saved = True
self.update()
def closeEvent(self, event):
if (self.prompt_if_unsaved and not self.saved
and not self.question(_('This transaction is not saved. Close anyway?'), title=_("Warning"))):
event.ignore()
else:
event.accept()
try:
dialogs.remove(self)
except ValueError:
pass # was not in list already
def reject(self):
# Override escape-key to close normally (and invoke closeEvent)
self.close()
def show_qr(self):
text = bfh(str(self.tx))
text = base_encode(text, base=43)
try:
self.main_window.show_qrcode(text, 'Transaction', parent=self)
except qrcode.exceptions.DataOverflowError:
self.show_error(_('Failed to display QR code.') + '\n' +
_('Transaction is too large in size.'))
except Exception as e:
self.show_error(_('Failed to display QR code.') + '\n' + str(e))
def sign(self):
def sign_done(success):
# note: with segwit we could save partially signed tx, because they have a txid
if self.tx.is_complete():
self.prompt_if_unsaved = True
self.saved = False
self.save_button.setDisabled(False)
self.save_button.setToolTip(SAVE_BUTTON_ENABLED_TOOLTIP)
self.update()
self.main_window.pop_top_level_window(self)
self.sign_button.setDisabled(True)
self.main_window.push_top_level_window(self)
self.main_window.sign_tx(self.tx, sign_done)
def save(self):
self.main_window.push_top_level_window(self)
if self.main_window.save_transaction_into_wallet(self.tx):
self.save_button.setDisabled(True)
self.saved = True
self.main_window.pop_top_level_window(self)
def export(self):
name = 'signed_%s.txn' % (self.tx.txid()[0:8]) if self.tx.is_complete() else 'unsigned.txn'
fileName = self.main_window.getSaveFileName(_("Select where to save your signed transaction"), name, "*.txn")
if fileName:
with open(fileName, "w+") as f:
f.write(json.dumps(self.tx.as_dict(), indent=4) + '\n')
self.show_message(_("Transaction exported successfully"))
self.saved = True
def update(self):
desc = self.desc
base_unit = self.main_window.base_unit()
format_amount = self.main_window.format_amount
tx_hash, status, label, can_broadcast, can_rbf, amount, fee, height, conf, timestamp, exp_n = self.wallet.get_tx_info(self.tx)
size = self.tx.estimated_size()
self.broadcast_button.setEnabled(can_broadcast)
can_sign = not self.tx.is_complete() and \
(self.wallet.can_sign(self.tx) or bool(self.main_window.tx_external_keypairs))
self.sign_button.setEnabled(can_sign)
self.tx_hash_e.setText(tx_hash or _('Unknown'))
if desc is None:
self.tx_desc.hide()
else:
self.tx_desc.setText(_("Description") + ': ' + desc)
self.tx_desc.show()
self.status_label.setText(_('Status:') + ' ' + status)
if timestamp:
time_str = datetime.datetime.fromtimestamp(timestamp).isoformat(' ')[:-3]
self.date_label.setText(_("Date: {}").format(time_str))
self.date_label.show()
# elif exp_n:
# text = '%.2f MB'%(exp_n/1000000)
# self.date_label.setText(_('Position in mempool: {} from tip').format(text))
# self.date_label.show()
else:
self.date_label.hide()
if amount is None:
amount_str = _("Transaction unrelated to your wallet")
elif amount > 0:
amount_str = _("Amount received:") + ' %s'% format_amount(amount) + ' ' + base_unit
else:
amount_str = _("Amount sent:") + ' %s'% format_amount(-amount) + ' ' + base_unit
size_str = _("Size:") + ' %d bytes'% size
fee_str = _("Fee") + ': %s' % (format_amount(fee) + ' ' + base_unit if fee is not None else _('unknown'))
if fee is not None:
fee_rate = fee/size*1000
fee_str += ' ( %s ) ' % self.main_window.format_fee_rate(fee_rate)
confirm_rate = simple_config.FEERATE_WARNING_HIGH_FEE
if fee_rate > confirm_rate:
fee_str += ' - ' + _('Warning') + ': ' + _("high fee") + '!'
self.amount_label.setText(amount_str)
self.fee_label.setText(fee_str)
self.size_label.setText(size_str)
run_hook('transaction_dialog_update', self)
def add_io(self, vbox):
if self.tx.locktime > 0:
vbox.addWidget(QLabel("LockTime: %d\n" % self.tx.locktime))
vbox.addWidget(QLabel(_("Inputs") + ' (%d)'%len(self.tx.inputs())))
ext = QTextCharFormat()
rec = QTextCharFormat()
rec.setBackground(QBrush(ColorScheme.GREEN.as_color(background=True)))
rec.setToolTip(_("Wallet receive address"))
chg = QTextCharFormat()
chg.setBackground(QBrush(ColorScheme.YELLOW.as_color(background=True)))
chg.setToolTip(_("Wallet change address"))
twofactor = QTextCharFormat()
twofactor.setBackground(QBrush(ColorScheme.BLUE.as_color(background=True)))
twofactor.setToolTip(_("TrustedCoin (2FA) fee for the next batch of transactions"))
def text_format(addr):
if self.wallet.is_mine(addr):
return chg if self.wallet.is_change(addr) else rec
elif self.wallet.is_billing_address(addr):
return twofactor
return ext
def format_amount(amt):
return self.main_window.format_amount(amt, whitespaces=True)
i_text = QTextEditWithDefaultSize()
i_text.setFont(QFont(MONOSPACE_FONT))
i_text.setReadOnly(True)
cursor = i_text.textCursor()
for x in self.tx.inputs():
if x['type'] == 'coinbase':
cursor.insertText('coinbase')
else:
prevout_hash = x.get('prevout_hash')
prevout_n = x.get('prevout_n')
cursor.insertText(prevout_hash + ":%-4d " % prevout_n, ext)
addr = self.wallet.get_txin_address(x)
if addr is None:
addr = ''
cursor.insertText(addr, text_format(addr))
if x.get('value'):
cursor.insertText(format_amount(x['value']), ext)
cursor.insertBlock()
vbox.addWidget(i_text)
vbox.addWidget(QLabel(_("Outputs") + ' (%d)'%len(self.tx.outputs())))
o_text = QTextEditWithDefaultSize()
o_text.setFont(QFont(MONOSPACE_FONT))
o_text.setReadOnly(True)
cursor = o_text.textCursor()
for o in self.tx.get_outputs_for_UI():
addr, v = o.address, o.value
cursor.insertText(addr, text_format(addr))
if v is not None:
cursor.insertText('\t', ext)
cursor.insertText(format_amount(v), ext)
cursor.insertBlock()
vbox.addWidget(o_text)
class QTextEditWithDefaultSize(QTextEdit):
def sizeHint(self):
return QSize(0, 100)
| true | true |
1c3136a13a110e5c45f53c6aeb6f0ed4d8822808 | 358 | py | Python | leetcode/0062_unique-paths.py | heyf/cloaked-octo-adventure | 8180684a8a1859efb836edd48556b5f3088be398 | [
"MIT"
] | null | null | null | leetcode/0062_unique-paths.py | heyf/cloaked-octo-adventure | 8180684a8a1859efb836edd48556b5f3088be398 | [
"MIT"
] | null | null | null | leetcode/0062_unique-paths.py | heyf/cloaked-octo-adventure | 8180684a8a1859efb836edd48556b5f3088be398 | [
"MIT"
] | null | null | null | #
# @lc app=leetcode id=62 lang=python3
#
# [62] Unique Paths
#
# @lc code=start
class Solution:
def uniquePaths(self, m: int, n: int) -> int:
dp = [ 1 for i in range(n)]
for _ in range(1,m):
for j in range(1,n):
dp[j] += dp[j-1]
return dp[-1]
# @lc code=end
s = Solution()
print(s.uniquePaths(7,3))
| 18.842105 | 49 | 0.52514 |
class Solution:
def uniquePaths(self, m: int, n: int) -> int:
dp = [ 1 for i in range(n)]
for _ in range(1,m):
for j in range(1,n):
dp[j] += dp[j-1]
return dp[-1]
s = Solution()
print(s.uniquePaths(7,3))
| true | true |
1c313710cd0098036ec3618c2755120adfb5e3a8 | 455 | py | Python | util/tools.py | QuietWoods/patent-rewrite | 3bf85a3a5a0dfc7caaf602fc32fe3b727da01944 | [
"Apache-2.0"
] | null | null | null | util/tools.py | QuietWoods/patent-rewrite | 3bf85a3a5a0dfc7caaf602fc32fe3b727da01944 | [
"Apache-2.0"
] | null | null | null | util/tools.py | QuietWoods/patent-rewrite | 3bf85a3a5a0dfc7caaf602fc32fe3b727da01944 | [
"Apache-2.0"
] | 1 | 2020-09-09T14:49:25.000Z | 2020-09-09T14:49:25.000Z | # -*- coding: utf-8 -*-
# @Time : 2018/4/24 16:52
# @Author : Wang Lei
# @FileName: tools.py
# @Software: PyCharm
# @Email :1258481281@qq.com
import os
from PatentRewrite.util.gensim_word2vec import Word2vec, Sentences
from PatentRewrite.util.settings import WORD2VEC, PATENTS, TEMP_PATENTS
def train_word2vec():
model_dir = WORD2VEC
word2vec = Word2vec(model_dir)
word2vec.train()
if __name__ == "__main__":
train_word2vec()
| 20.681818 | 71 | 0.705495 |
import os
from PatentRewrite.util.gensim_word2vec import Word2vec, Sentences
from PatentRewrite.util.settings import WORD2VEC, PATENTS, TEMP_PATENTS
def train_word2vec():
model_dir = WORD2VEC
word2vec = Word2vec(model_dir)
word2vec.train()
if __name__ == "__main__":
train_word2vec()
| true | true |
1c31377812b00907ea0d4ff34371845055ca7a6c | 1,563 | py | Python | tests/conftest.py | johnnoone/aiodisque | afb6851ac907783a69b4b2e5c09456ae48a1faba | [
"MIT"
] | null | null | null | tests/conftest.py | johnnoone/aiodisque | afb6851ac907783a69b4b2e5c09456ae48a1faba | [
"MIT"
] | null | null | null | tests/conftest.py | johnnoone/aiodisque | afb6851ac907783a69b4b2e5c09456ae48a1faba | [
"MIT"
] | null | null | null | import os.path
from pytest import fixture
from tempfile import TemporaryDirectory
from subprocess import Popen, PIPE, run
from time import sleep
class Configuration:
def __init__(self, **opts):
for k, v in opts.items():
setattr(self, k, v)
class DisqueNode:
def __init__(self, port, dir):
self.port = port
self.dir = dir
self.proc = None
self.socket = os.path.join(dir, 'disque.sock')
def start(self):
if not self.proc:
cmd = ["disque-server",
"--port", str(self.port),
"--dir", self.dir,
"--unixsocket", self.socket,
"--unixsocketperm", "755"]
self.proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
cmd = ['disque', '-p', str(self.port), 'info']
while True:
sleep(.01)
if self.proc.poll():
raise Exception('already stopped!', self.proc.stderr)
resp = run(cmd, stdout=PIPE, stderr=PIPE)
if not resp.returncode:
break
def stop(self):
self.proc.kill()
self.proc = None
@property
def configuration(self):
return Configuration(port=self.port, dir=self.dir, socket=self.socket)
@fixture(scope='function')
def node(request):
tmp_dir = TemporaryDirectory()
node = DisqueNode(port=7711, dir=tmp_dir.name)
node.start()
def teardown():
node.stop()
tmp_dir.cleanup()
request.addfinalizer(teardown)
return node.configuration
| 26.05 | 78 | 0.568138 | import os.path
from pytest import fixture
from tempfile import TemporaryDirectory
from subprocess import Popen, PIPE, run
from time import sleep
class Configuration:
def __init__(self, **opts):
for k, v in opts.items():
setattr(self, k, v)
class DisqueNode:
def __init__(self, port, dir):
self.port = port
self.dir = dir
self.proc = None
self.socket = os.path.join(dir, 'disque.sock')
def start(self):
if not self.proc:
cmd = ["disque-server",
"--port", str(self.port),
"--dir", self.dir,
"--unixsocket", self.socket,
"--unixsocketperm", "755"]
self.proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
cmd = ['disque', '-p', str(self.port), 'info']
while True:
sleep(.01)
if self.proc.poll():
raise Exception('already stopped!', self.proc.stderr)
resp = run(cmd, stdout=PIPE, stderr=PIPE)
if not resp.returncode:
break
def stop(self):
self.proc.kill()
self.proc = None
@property
def configuration(self):
return Configuration(port=self.port, dir=self.dir, socket=self.socket)
@fixture(scope='function')
def node(request):
tmp_dir = TemporaryDirectory()
node = DisqueNode(port=7711, dir=tmp_dir.name)
node.start()
def teardown():
node.stop()
tmp_dir.cleanup()
request.addfinalizer(teardown)
return node.configuration
| true | true |
1c3138931e6dc6d58b703a644a6df7d2ebb08023 | 485 | py | Python | TOR/ConnectionsHandler/Models/DNSExitNode.py | AmerJod/Tor_DNS-Servers | bd95ff28bb697a4e0ba7d0276b366e83a6718a13 | [
"MIT"
] | 2 | 2019-05-22T09:42:51.000Z | 2021-06-15T19:05:22.000Z | TOR/ConnectionsHandler/Models/DNSExitNode.py | txrproject/Tor_DNS-Servers | bd95ff28bb697a4e0ba7d0276b366e83a6718a13 | [
"MIT"
] | null | null | null | TOR/ConnectionsHandler/Models/DNSExitNode.py | txrproject/Tor_DNS-Servers | bd95ff28bb697a4e0ba7d0276b366e83a6718a13 | [
"MIT"
] | 1 | 2020-12-23T05:26:32.000Z | 2020-12-23T05:26:32.000Z |
"""
This class is for each exitNode which delong to DNS resolver
"""
class DNSExitNode():
def __init__(self,nodeIP,nodeDomain,nodeModifiedDomainfull):
self.exitNodeIP = nodeIP
self.nodeDomian = nodeDomain
self.nodeModifiedDomian = nodeModifiedDomainfull
self.JSON = self.reprExitNodelistJSON()
def reprExitNodelistJSON(self):
return dict(nodeIP=self.exitNodeIP, nodeDomian=self.nodeDomian, nodeModifiedDomian= self.nodeModifiedDomian) | 34.642857 | 116 | 0.742268 |
class DNSExitNode():
def __init__(self,nodeIP,nodeDomain,nodeModifiedDomainfull):
self.exitNodeIP = nodeIP
self.nodeDomian = nodeDomain
self.nodeModifiedDomian = nodeModifiedDomainfull
self.JSON = self.reprExitNodelistJSON()
def reprExitNodelistJSON(self):
return dict(nodeIP=self.exitNodeIP, nodeDomian=self.nodeDomian, nodeModifiedDomian= self.nodeModifiedDomian) | true | true |
1c313bd1421fd2c0959b8883eeab4a41d2c364c8 | 2,545 | py | Python | code/remove_invariant_sites_from_phylip.py | nealplatt/sch_man_nwinvasion | 73f7ce5fa4843cc2352fdb709b134f22af28ad19 | [
"MIT"
] | null | null | null | code/remove_invariant_sites_from_phylip.py | nealplatt/sch_man_nwinvasion | 73f7ce5fa4843cc2352fdb709b134f22af28ad19 | [
"MIT"
] | null | null | null | code/remove_invariant_sites_from_phylip.py | nealplatt/sch_man_nwinvasion | 73f7ce5fa4843cc2352fdb709b134f22af28ad19 | [
"MIT"
] | null | null | null | ### RNPlatt
### 05 Sept 2019
### USAGE: remove_invariant_sites_from_phylip.py <list of invariants> <phylip> <outfile>
###
### This will take a list of invariant sites predetermined by raxml in a single
### site per line file and the original phylip file used in raxml and return
### a phylip file with all the invariant sites removed.
###
### REQUIRES: numpy
###
import sys
import numpy as np
#-------------------- GET CMD LINE OPTIONS--------------------------------------
invariant_file=sys.argv[1]
phylip_file=sys.argv[2]
out_file=sys.argv[3]
#-------------------- GET INVARIANT SITES --------------------------------------
#read in the file of invariant sites generated by raxml
inv_sites_infile=open(invariant_file, "r")
inv_sites=inv_sites_infile.readlines()
i=0
#update each site to 0-based (python) index from 1-based (raxml)
for entry in inv_sites:
inv_sites[i]=int(entry.rstrip())-1
i=i+1
#-------------------- GET SEQUENCE DATA AND TRIM -------------------------------
#read in the dat from the untrimmed phylip file
phylip_infile=open(phylip_file, "r")
phylip_data=phylip_infile.readlines()
#get the num samples and sites from the header line
num_samples, num_sites=phylip_data[0].rstrip('\n').split(" ")
#cycle through the seqeunce data into two seperate lists
# sample ids are in a list
# sequences is a 2d list with each base a seperate position
sample_ids=[]
sequences=[]
for entry in phylip_data[1:]:
sample_id, sequence = entry.rstrip('\n').split()
sample_ids.append(sample_id)
sequences.append(list(sequence))
#convert to 2d array
sequences=np.array(sequences)
#trim invariant sites
trimmed_seqs=np.delete(sequences, inv_sites, 1)
#now turn into strings
seq_strings=[]
for trimmed_seq in trimmed_seqs:
#convert trimmed array/list to a string
as_string=''.join(list(trimmed_seq))
#add to new list of trimmed sequences
seq_strings.append(as_string)
#-------------------- CREATING THE OUTPUT FILE ---------------------------------
#create an output file
trimmed_phylip_outfile=open(out_file, "w")
num_sites_after_trimming=len(seq_strings[0])
#print header line
trimmed_phylip_outfile.write(str(num_samples) + " " + str(num_sites_after_trimming) + "\n")
#print trimmed info to outfile
i=0
for sample_id in sample_ids:
trimmed_phylip_outfile.write(sample_id + " " + seq_strings[i] + "\n")
i=i+1
#-------------------- CLOSING FILES -------------------------------------------
inv_sites_infile.close()
phylip_infile.close()
trimmed_phylip_outfile.close()
| 29.252874 | 91 | 0.662083 | trings=[]
for trimmed_seq in trimmed_seqs:
as_string=''.join(list(trimmed_seq))
seq_strings.append(as_string)
trimmed_phylip_outfile=open(out_file, "w")
num_sites_after_trimming=len(seq_strings[0])
trimmed_phylip_outfile.write(str(num_samples) + " " + str(num_sites_after_trimming) + "\n")
i=0
for sample_id in sample_ids:
trimmed_phylip_outfile.write(sample_id + " " + seq_strings[i] + "\n")
i=i+1
inv_sites_infile.close()
phylip_infile.close()
trimmed_phylip_outfile.close()
| true | true |
1c313c2826e1f112676e1337859553b9cf492376 | 7,335 | py | Python | src/utils/chatArchiver.py | ayman2598/GabbyGums | b68ab01610ac399aa2b7daa97d5d71dd0d1b19d6 | [
"Apache-2.0"
] | 2 | 2019-12-13T20:06:14.000Z | 2022-01-23T00:34:29.000Z | src/utils/chatArchiver.py | ayman2598/GabbyGums | b68ab01610ac399aa2b7daa97d5d71dd0d1b19d6 | [
"Apache-2.0"
] | 23 | 2019-10-19T16:55:45.000Z | 2020-03-14T16:18:05.000Z | src/utils/chatArchiver.py | amadea-system/GabbyGums | b68ab01610ac399aa2b7daa97d5d71dd0d1b19d6 | [
"Apache-2.0"
] | 6 | 2019-12-13T20:06:17.000Z | 2021-02-12T16:21:04.000Z | """
Methods for generating HTML and TXT archives of a discord chat from a list of discord messages.
Part of the Gabby Gums Discord Logger.
"""
import hmac
import logging
import hashlib
from functools import partial
from datetime import datetime
from io import StringIO, SEEK_END, SEEK_SET
from typing import TYPE_CHECKING, Optional, Dict, List, Union, Tuple, NamedTuple, Match
import regex as re
from jinja2 import Template, Environment, FileSystemLoader
from utils.discordMarkdownParser import markdown
if TYPE_CHECKING:
from events.bulkMessageDelete import CompositeMessage, MessageGroups
import discord
from discord.ext import commands
log = logging.getLogger(__name__)
auth_key_pattern = re.compile(r"^<!--([0-9a-f]+)-->$")
def md(_input):
out = markdown.markdown(_input)
return out
file_loader = FileSystemLoader(searchpath="./htmlTemplates/")
env = Environment(loader=file_loader)
env.globals['markdown'] = md
env.trim_blocks = True
env.lstrip_blocks = True
template = env.get_template('mainChat.html')
class CouldNotFindAuthenticationCode(Exception):
pass
def generate_txt_archive(messages: List['CompositeMessage'], channel_name) -> StringIO:
archive = StringIO()
lines = []
for message in messages:
if message.content:
content = message.content
else:
content = "----Message contained no text----"
if message.is_pk:
author_info = f"System ID: {message.system_id}, Member ID: {message.member_id}"
else:
author: Union['discord.Member', 'discord.User'] = message.author
author_info = author.id if author else "None"
msg = f"[{message.created_at.strftime('%Y-%m-%d %H:%M:%S-UTC')}] {message.user_name_and_discrim} ({author_info}):" \
f"\n {content}\n\n"
lines.append(msg)
archive.write(f"{len(lines)} messages archived from #{channel_name} @ {datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S-UTC')}\n\n")
for line in lines:
archive.write(line)
archive.seek(0)
return archive
async def generate_html_archive(bot: 'commands.bot', channel: 'discord.TextChannel', messages: 'MessageGroups', msg_count: int) -> StringIO:
fn = partial(blocking_generate_html_archive, channel, messages, msg_count)
archive = await bot.loop.run_in_executor(None, fn)
return archive
def blocking_generate_html_archive(channel: 'discord.TextChannel', messages: 'MessageGroups', msg_count: int) -> StringIO:
archive = StringIO()
ctx = {'guild': channel.guild, 'channel': channel}
output = template.render(ctx=ctx, msg_groups=messages, msg_count=msg_count)
archive.writelines(output)
archive.seek(0)
return archive
def generate_SHA256_hash(_input: StringIO) -> str:
"""Generates a SHA256 hash for a StringIO Object and seeks the Object back to 0 at the end."""
_input.seek(0)
hasher = hashlib.sha256()
hasher.update(str(_input.read()).encode('utf-8')) # 16
_input.seek(0)
return hasher.hexdigest()
def get_hmac(_input: StringIO, security_key: bytes) -> str:
_input.seek(0)
msg = str(_input.read()).encode('utf-8')
hasher = hmac.new(security_key, msg, hashlib.sha3_256) # Create the HMAC Hasher
hash = hasher.hexdigest() # Get the hmac hash
_input.seek(0)
return hash
def write_hmac(_input: StringIO, security_key: bytes):
"""Generates a Hash-base Message Authentication Code for a given StringIO Object and writes it to the end of the file."""
_input.seek(0) # Seek the StringIO back to the beginning so it can be read.
hash = get_hmac(_input, security_key)
# log.info(f"Got HMAC: {hash}")
_input.seek(0, SEEK_END) # Make sure we are at the end of the file so we can write the hmac
_input.write(f"\n<!--{hash}-->") # Write the hash to the StringIO
_input.seek(0) # Finally Seek the StringIO back to the beginning so it's ready the next time it needs to be read.
def verify_file(file: StringIO, security_key: bytes) -> bool:
file.seek(0, SEEK_END) # Seek to the end of the file. so we can iterate backward.
pos = file.tell() # store the position that is the end of the file.
# log.info(f"Pos: {pos}")
file.seek(0, SEEK_END) # Seek back to the end of the file.
while pos > 0 and file.read(1) != '\n': # Go backwards through the file until we hit a new line or the start of the file.
pos -= 1
file.seek(pos, SEEK_SET)
# log.info(f"Pos after seeking: {pos}")
auth_code = None
if pos > 0:
file.seek(pos+1, SEEK_SET) # Go forward one char to get tot he start of the last line. This is where the auth code lies.
auth_code = file.readline() # Grab the auth code
file.seek(pos, SEEK_SET) # Go back to the new line
file.truncate() # And delete everything after it so we can get back to a HMACable file.
if auth_code is not None:
auth_code_match: Match = auth_key_pattern.match(auth_code)
if auth_code_match is not None:
auth_code = auth_code_match.group(1)
log.info(f"Got auth code: {auth_code}")
hash = get_hmac(file, security_key)
log.info(f"files hmac: {hash}")
# if hash == auth_code:
if hmac.compare_digest(hash, auth_code):
log.info("File is unmodified.")
return True
else:
log.info("Authentication Code mismatch. File has been modified.")
return False
log.info("Could not find authentication code in the archive file!")
# raise CouldNotFindAuthenticationCode("Could not find authentication code in the archive file!")
return False
# Unused, for debugging purposes.
def save_html_archive(channel: 'discord.TextChannel', messages: 'MessageGroups', msg_count: int):
"""This method does the same as generate_html_archive() except instead of returning a StringIO object suitable for passing to Discord, it saves the html for debugging. """
ctx = {'guild': channel.guild, 'channel': channel}
output = template.render(ctx=ctx, msg_groups=messages, msg_count=msg_count)
with open('archive.html', 'w', encoding="utf-8") as archive: # 16
archive.writelines(output)
# Unused, for debugging purposes.
def save_htmlDebug_txt_archive(messages: List['CompositeMessage'], channel_name):
messages.reverse()
lines = []
for message in messages:
if message.content:
content = message.content
else:
content = "----Message contained no text----"
if message.is_pk:
author_info = f"System ID: {message.system_id}, Member ID: {message.member_id}"
else:
author: Union['discord.Member', 'discord.User'] = message.author
author_info = author.id if author else "None"
msg = f"[{message.created_at.strftime('%Y-%m-%d %H:%M:%S-UTC')}] {message.user_name_and_discrim} ({author_info}):" \
f"\n\n {content}\n\n"
lines.append(msg)
with open('debug_archive.html.txt', 'w', encoding="utf-8") as archive: # 16
archive.write(f"{len(lines)} messages archived from #{channel_name} @ {datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S-UTC')}\n\n")
for line in lines:
archive.write(line)
| 36.675 | 175 | 0.667894 |
import hmac
import logging
import hashlib
from functools import partial
from datetime import datetime
from io import StringIO, SEEK_END, SEEK_SET
from typing import TYPE_CHECKING, Optional, Dict, List, Union, Tuple, NamedTuple, Match
import regex as re
from jinja2 import Template, Environment, FileSystemLoader
from utils.discordMarkdownParser import markdown
if TYPE_CHECKING:
from events.bulkMessageDelete import CompositeMessage, MessageGroups
import discord
from discord.ext import commands
log = logging.getLogger(__name__)
auth_key_pattern = re.compile(r"^<!--([0-9a-f]+)-->$")
def md(_input):
out = markdown.markdown(_input)
return out
file_loader = FileSystemLoader(searchpath="./htmlTemplates/")
env = Environment(loader=file_loader)
env.globals['markdown'] = md
env.trim_blocks = True
env.lstrip_blocks = True
template = env.get_template('mainChat.html')
class CouldNotFindAuthenticationCode(Exception):
pass
def generate_txt_archive(messages: List['CompositeMessage'], channel_name) -> StringIO:
archive = StringIO()
lines = []
for message in messages:
if message.content:
content = message.content
else:
content = "----Message contained no text----"
if message.is_pk:
author_info = f"System ID: {message.system_id}, Member ID: {message.member_id}"
else:
author: Union['discord.Member', 'discord.User'] = message.author
author_info = author.id if author else "None"
msg = f"[{message.created_at.strftime('%Y-%m-%d %H:%M:%S-UTC')}] {message.user_name_and_discrim} ({author_info}):" \
f"\n {content}\n\n"
lines.append(msg)
archive.write(f"{len(lines)} messages archived from #{channel_name} @ {datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S-UTC')}\n\n")
for line in lines:
archive.write(line)
archive.seek(0)
return archive
async def generate_html_archive(bot: 'commands.bot', channel: 'discord.TextChannel', messages: 'MessageGroups', msg_count: int) -> StringIO:
fn = partial(blocking_generate_html_archive, channel, messages, msg_count)
archive = await bot.loop.run_in_executor(None, fn)
return archive
def blocking_generate_html_archive(channel: 'discord.TextChannel', messages: 'MessageGroups', msg_count: int) -> StringIO:
archive = StringIO()
ctx = {'guild': channel.guild, 'channel': channel}
output = template.render(ctx=ctx, msg_groups=messages, msg_count=msg_count)
archive.writelines(output)
archive.seek(0)
return archive
def generate_SHA256_hash(_input: StringIO) -> str:
_input.seek(0)
hasher = hashlib.sha256()
hasher.update(str(_input.read()).encode('utf-8'))
_input.seek(0)
return hasher.hexdigest()
def get_hmac(_input: StringIO, security_key: bytes) -> str:
_input.seek(0)
msg = str(_input.read()).encode('utf-8')
hasher = hmac.new(security_key, msg, hashlib.sha3_256)
hash = hasher.hexdigest()
_input.seek(0)
return hash
def write_hmac(_input: StringIO, security_key: bytes):
_input.seek(0)
hash = get_hmac(_input, security_key)
_input.seek(0, SEEK_END)
_input.write(f"\n<!--{hash}-->")
_input.seek(0)
def verify_file(file: StringIO, security_key: bytes) -> bool:
file.seek(0, SEEK_END) # Seek to the end of the file. so we can iterate backward.
pos = file.tell() # store the position that is the end of the file.
# log.info(f"Pos: {pos}")
file.seek(0, SEEK_END) # Seek back to the end of the file.
while pos > 0 and file.read(1) != '\n': # Go backwards through the file until we hit a new line or the start of the file.
pos -= 1
file.seek(pos, SEEK_SET)
# log.info(f"Pos after seeking: {pos}")
auth_code = None
if pos > 0:
file.seek(pos+1, SEEK_SET) # Go forward one char to get tot he start of the last line. This is where the auth code lies.
auth_code = file.readline() # Grab the auth code
file.seek(pos, SEEK_SET) # Go back to the new line
file.truncate() # And delete everything after it so we can get back to a HMACable file.
if auth_code is not None:
auth_code_match: Match = auth_key_pattern.match(auth_code)
if auth_code_match is not None:
auth_code = auth_code_match.group(1)
log.info(f"Got auth code: {auth_code}")
hash = get_hmac(file, security_key)
log.info(f"files hmac: {hash}")
# if hash == auth_code:
if hmac.compare_digest(hash, auth_code):
log.info("File is unmodified.")
return True
else:
log.info("Authentication Code mismatch. File has been modified.")
return False
log.info("Could not find authentication code in the archive file!")
# raise CouldNotFindAuthenticationCode("Could not find authentication code in the archive file!")
return False
# Unused, for debugging purposes.
def save_html_archive(channel: 'discord.TextChannel', messages: 'MessageGroups', msg_count: int):
ctx = {'guild': channel.guild, 'channel': channel}
output = template.render(ctx=ctx, msg_groups=messages, msg_count=msg_count)
with open('archive.html', 'w', encoding="utf-8") as archive: # 16
archive.writelines(output)
# Unused, for debugging purposes.
def save_htmlDebug_txt_archive(messages: List['CompositeMessage'], channel_name):
messages.reverse()
lines = []
for message in messages:
if message.content:
content = message.content
else:
content = "----Message contained no text----"
if message.is_pk:
author_info = f"System ID: {message.system_id}, Member ID: {message.member_id}"
else:
author: Union['discord.Member', 'discord.User'] = message.author
author_info = author.id if author else "None"
msg = f"[{message.created_at.strftime('%Y-%m-%d %H:%M:%S-UTC')}] {message.user_name_and_discrim} ({author_info}):" \
f"\n\n {content}\n\n"
lines.append(msg)
with open('debug_archive.html.txt', 'w', encoding="utf-8") as archive: # 16
archive.write(f"{len(lines)} messages archived from #{channel_name} @ {datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S-UTC')}\n\n")
for line in lines:
archive.write(line)
| true | true |
1c313d9466ba24619116f017bf281b737b3abf9e | 1,704 | py | Python | vigobusbot/telegram_bot/services/message_generators/stop_message_text.py | David-Lor/VigoBus-TelegramBot | 2cbba9a6565e7f8c92953e79b9ca4247d53f4b33 | [
"Apache-2.0"
] | 8 | 2019-07-18T21:33:04.000Z | 2022-03-26T15:07:14.000Z | vigobusbot/telegram_bot/services/message_generators/stop_message_text.py | EnforcerZhukov/VigoBus-TelegramBot | 9a0258edf5ff34ecedab6bcf4a8238f07bc318fa | [
"Apache-2.0"
] | 3 | 2021-09-10T19:53:36.000Z | 2021-09-10T19:53:37.000Z | vigobusbot/telegram_bot/services/message_generators/stop_message_text.py | EnforcerZhukov/VigoBus-TelegramBot | 9a0258edf5ff34ecedab6bcf4a8238f07bc318fa | [
"Apache-2.0"
] | 3 | 2019-09-24T15:43:23.000Z | 2020-04-18T17:48:29.000Z | """STOP MESSAGE TEXT
Helper to generate the Stop Message text body
"""
# # Native # #
import datetime
from typing import Optional
# # Project # #
from vigobusbot.persistence_api import saved_stops
from vigobusbot.static_handler import get_messages
from vigobusbot.entities import Stop, BusesResponse
__all__ = ("generate_stop_message_text",)
def generate_stop_message_text(
stop: Stop,
buses_response: BusesResponse,
user_saved_stop: Optional[saved_stops.SavedStop]
) -> str:
messages = get_messages()
buses = buses_response.buses
# Generate Stop Name text
if user_saved_stop and user_saved_stop.stop_name:
stop_name_text = messages.stop.stop_custom_name.format(
stop_custom_name=user_saved_stop.stop_name,
stop_original_name=stop.name
)
else:
stop_name_text = stop.name
# Generate Buses text
if buses:
buses_text_lines = list()
for bus in buses:
if bus.time == 0:
time_text = messages.stop.bus_time_now
else:
time_text = messages.stop.bus_time_remaining.format(minutes=bus.time)
buses_text_lines.append(messages.stop.bus_line.format(
line=bus.line,
route=bus.route,
time=time_text
))
buses_text = "\n".join(buses_text_lines)
else:
buses_text = messages.stop.no_buses_found
last_update_text = datetime.datetime.now().strftime(messages.stop.time_format)
return messages.stop.message.format(
stop_id=stop.stop_id,
stop_name=stop_name_text,
buses=buses_text,
last_update=last_update_text
)
| 28.881356 | 85 | 0.663732 |
from typing import Optional
persistence_api import saved_stops
from vigobusbot.static_handler import get_messages
from vigobusbot.entities import Stop, BusesResponse
__all__ = ("generate_stop_message_text",)
def generate_stop_message_text(
stop: Stop,
buses_response: BusesResponse,
user_saved_stop: Optional[saved_stops.SavedStop]
) -> str:
messages = get_messages()
buses = buses_response.buses
if user_saved_stop and user_saved_stop.stop_name:
stop_name_text = messages.stop.stop_custom_name.format(
stop_custom_name=user_saved_stop.stop_name,
stop_original_name=stop.name
)
else:
stop_name_text = stop.name
if buses:
buses_text_lines = list()
for bus in buses:
if bus.time == 0:
time_text = messages.stop.bus_time_now
else:
time_text = messages.stop.bus_time_remaining.format(minutes=bus.time)
buses_text_lines.append(messages.stop.bus_line.format(
line=bus.line,
route=bus.route,
time=time_text
))
buses_text = "\n".join(buses_text_lines)
else:
buses_text = messages.stop.no_buses_found
last_update_text = datetime.datetime.now().strftime(messages.stop.time_format)
return messages.stop.message.format(
stop_id=stop.stop_id,
stop_name=stop_name_text,
buses=buses_text,
last_update=last_update_text
)
| true | true |
1c313dedbfd5a753fe40dabaad5a4b121ecda8d2 | 11,682 | py | Python | test/IECore/CompoundData.py | gcodebackups/cortex-vfx | 72fa6c6eb3327fce4faf01361c8fcc2e1e892672 | [
"BSD-3-Clause"
] | 5 | 2016-07-26T06:09:28.000Z | 2022-03-07T03:58:51.000Z | test/IECore/CompoundData.py | turbosun/cortex | 4bdc01a692652cd562f3bfa85f3dae99d07c0b15 | [
"BSD-3-Clause"
] | null | null | null | test/IECore/CompoundData.py | turbosun/cortex | 4bdc01a692652cd562f3bfa85f3dae99d07c0b15 | [
"BSD-3-Clause"
] | 3 | 2015-03-25T18:45:24.000Z | 2020-02-15T15:37:18.000Z | ##########################################################################
#
# Copyright (c) 2007-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
"""Unit test for CompoundData binding"""
import os
import math
import unittest
import sys
import subprocess
import IECore
class CompoundDataTest(unittest.TestCase):
def testConstructors(self):
"""Test constructors"""
v1 = IECore.CompoundData()
a = dict()
a["1"] = IECore.IntData(1)
v3 = IECore.CompoundData(a)
self.assertEqual(v3.size(), 1)
def testResize(self):
"""Test resizing"""
v = IECore.CompoundData()
v["0"] = IECore.FloatData(2)
self.assertEqual(v["0"], IECore.FloatData(2))
v["1"] = IECore.FloatData(0)
v["2"] = IECore.FloatData(3)
v["3"] = IECore.FloatData(2)
v["4"] = IECore.FloatData(5)
self.assertEqual(v["4"], IECore.FloatData(5))
self.assertEqual(len(v), 5)
del(v["0"])
self.assertEqual(len(v), 4)
self.assert_(v.has_key("0") == False)
v.clear()
self.assertEqual(len(v), 0)
def testAssignment(self):
"""Test assignment"""
v1 = IECore.CompoundData()
v1["0"] = IECore.FloatData(1.2)
v1["1"] = IECore.FloatData(2.3)
v2 = v1.copy()
v3 = v1
v4 = v1.copy()
self.assertEqual(len(v1), 2)
self.assertEqual(len(v1), len(v2))
self.assertEqual(v1["0"], v2["0"])
self.assertEqual(v1["1"], v2["1"])
self.assertEqual(v1["0"], v4["0"])
self.assertEqual(v1["1"], v4["1"])
self.assertRaises( TypeError, v1.__setitem__, "2", None ) # should prevent setting None as value.
def testCopyOnWrite(self):
"""Test copy-on-write behavior"""
v1 = IECore.CompoundData()
v1["0"] = IECore.FloatData(1.2)
v1["1"] = IECore.FloatData(2.3)
v2 = v1.copy()
v3 = v1.copy()
v3["0"] = IECore.UIntData(5)
self.assert_(v3["0"] == IECore.UIntData(5))
self.assert_(v2["0"] == IECore.FloatData(1.2))
v1["2"] = IECore.FloatData(5);
self.assertEqual(len(v1), 3)
self.assertEqual(len(v2), 2)
def testSearch(self):
"""Test search functions"""
v1 = IECore.CompoundData()
v1["0"] = IECore.FloatData(1.2)
v1["1"] = IECore.FloatData(2.3)
v1["2"] = IECore.FloatData(3)
self.assert_("0" in v1)
self.assert_("3" not in v1)
self.assert_(v1.has_key("1"))
self.assert_(not v1.has_key("3"))
self.assert_(v1.get("0") == IECore.FloatData(1.2))
self.assert_(v1.get("0", IECore.IntData(10)) == IECore.FloatData(1.2))
self.assert_(v1.get("xx", IECore.IntData(10)) == IECore.IntData(10))
self.assert_(v1.get("xx") == None)
self.assert_(v1.get("xx", None ) == None)
self.assertEqual(len(v1), 3)
def testUpdate(self):
"""Test update function"""
v1 = IECore.CompoundData()
v1["0"] = IECore.FloatData(1.2)
v1["1"] = IECore.FloatData(2.3)
v1["2"] = IECore.FloatData(3)
v2 = IECore.CompoundData()
v2["0"] = IECore.UIntData(5)
v2["3"] = IECore.UIntData(6)
v2.update(v1)
self.assertEqual(len(v2), 4)
self.assert_(v2["0"] == IECore.FloatData(1.2))
self.assert_(v2["3"] == IECore.UIntData(6))
v3 = dict()
v3["1"] = IECore.CharData("a")
v3["4"] = IECore.UCharData(9)
v2.update(v3)
self.assertEqual(len(v2), 5)
self.assert_(v2["1"] == IECore.CharData("a"))
self.assert_(v2["4"] == IECore.UCharData(9))
def testSetDefault(self):
"""Test setdefault function"""
v1 = IECore.CompoundData()
v1["0"] = IECore.FloatData(1.2)
v1["1"] = IECore.FloatData(2.3)
v1["2"] = IECore.FloatData(3)
v2 = v1.copy()
self.assertEqual(len(v1), 3)
self.assert_(v1.setdefault("2", IECore.UIntData(10)) == IECore.FloatData(3))
self.assertEqual(len(v1), 3)
self.assert_(v1.setdefault("x", IECore.UIntData(10)) == IECore.UIntData(10))
self.assertEqual(len(v1), 4)
def testPop(self):
"""Test pop functions"""
v1 = IECore.CompoundData()
v1["0"] = IECore.FloatData(1.2)
v1["1"] = IECore.FloatData(2.3)
v1["2"] = IECore.FloatData(3)
v1["3"] = IECore.FloatData(4)
self.assertEqual(len(v1), 4)
prev = v1.popitem()
self.assertEqual(len(v1), 3)
self.assertEqual(v1.pop("x", IECore.UIntData(10)), IECore.UIntData(10))
self.assertEqual(len(v1), 3)
def testKeyValues(self):
"""Test keys/values listing"""
v1 = IECore.CompoundData()
v1["0"] = IECore.FloatData(1)
v1["1"] = IECore.FloatData(2)
v1["2"] = IECore.FloatData(3)
self.assertEqual( set( v1.keys() ), set( ['0', '1', '2'] ) )
vals = v1.values()
self.assertEqual( set( [ x.value for x in vals ] ), set( [ 1, 2, 3 ] ) )
items = v1.items()
self.assertEqual( set( [ ( x[0], x[1].value ) for x in items ] ), set( [ ( "0", 1 ), ( "1", 2 ), ( "2", 3 ) ] ) )
def testEquality(self):
"""Test equality function"""
v1 = IECore.CompoundData()
v1["0"] = IECore.FloatData(1.2)
v1["1"] = IECore.FloatData(2.3)
v1["2"] = IECore.FloatData(3)
v2 = IECore.CompoundData()
v2["0"] = IECore.FloatData(1.2)
v2["1"] = IECore.FloatData(2.3)
v2["2"] = IECore.FloatData(3)
v3 = v2.copy()
del v3["2"]
self.assert_(v1 == v2)
self.assert_(not v1 != v2)
self.assert_(not v1 == v3)
self.assert_(not v2 == v3)
v2["-1"] = IECore.FloatData(6)
self.assert_(v1 != v2)
self.assert_(not v1 == v2)
del(v1["2"])
self.assert_(v1 == v3)
def testByValueItem(self):
"""Test by value return type"""
v1 = IECore.CompoundData()
v1["0"] = IECore.FloatData(1.2)
v1["1"] = IECore.FloatData(2.3)
v1["2"] = IECore.FloatData(3)
self.assert_(v1["0"] == IECore.FloatData(1.2))
a = v1["0"]
a = IECore.UIntData(255)
self.assert_(v1["0"] == IECore.FloatData(1.2))
self.assert_(a == IECore.UIntData(255))
def testLoadSave(self):
"""Test load/save"""
iface = IECore.IndexedIO.create( "test/CompoundData.fio", IECore.IndexedIO.OpenMode.Write )
v1 = IECore.CompoundData()
v1["0"] = IECore.FloatData(1.2)
v1["1"] = IECore.FloatData(2.3)
v1["2"] = IECore.FloatData(3)
v1["some:data"] = IECore.FloatData(3)
self.assert_(v1["0"] == IECore.FloatData(1.2))
v1.save( iface, "test" )
v2 = IECore.Object.load( iface, "test" )
self.assertEqual( v1, v2 )
def testRepr(self):
"""Test repr"""
v1 = IECore.CompoundData()
r1 = repr(v1)
self.assertEqual( eval(repr(v1)), v1 )
v1 = IECore.CompoundData()
v1["0"] = IECore.FloatData(1.2)
v1["1"] = IECore.FloatData(2.3)
v1["2"] = IECore.FloatData(3)
self.assertEqual( eval(repr(v1)), v1 )
v1 = IECore.CompoundData()
v1["0"] = IECore.StringData( "test" )
v1["1"] = IECore.CompoundData(
{ "0" : IECore.StringData( "test" ),
"1" : IECore.M33fData()
}
)
v1["someMoreData"] = IECore.V3fVectorData()
v1["A"] = IECore.Color4fVectorData()
self.assertEqual( eval(repr(v1)), v1 )
def testConstructionFromNestedDict( self ) :
c = IECore.CompoundData( {
"a" : 10,
"b" : IECore.BoolData( True ),
"c" : {
"cc" : IECore.IntData( 20 ),
},
"d" : IECore.CompoundData( {
"dd" : IECore.IntData( 5 ),
} )
} )
self.assertEqual( len( c ), 4 )
self.assertEqual( c["a"], IECore.IntData( 10 ) )
self.assertEqual( c["b"], IECore.BoolData( True ) )
self.assertEqual( len( c["c"] ), 1 )
self.assertEqual( c["c"]["cc"], IECore.IntData( 20 ) )
self.assertEqual( len( c["d"] ), 1 )
self.assertEqual( c["d"]["dd"], IECore.IntData( 5 ) )
def testUpdateFromNestedDict( self ) :
c = IECore.CompoundData( {
"a" : IECore.IntData( 30 )
}
)
d = {
"a" : 10,
"b" : IECore.BoolData( True ),
"c" : {
"cc" : IECore.IntData( 20 ),
},
"d" : IECore.CompoundData( {
"dd" : IECore.IntData( 5 ),
} )
}
c.update( d )
self.assertEqual( len( c ), 4 )
self.assertEqual( c["a"], IECore.IntData( 10 ) )
self.assertEqual( c["b"], IECore.BoolData( True ) )
self.assertEqual( len( c["c"] ), 1 )
self.assertEqual( c["c"]["cc"], IECore.IntData( 20 ) )
self.assertEqual( len( c["d"] ), 1 )
self.assertEqual( c["d"]["dd"], IECore.IntData( 5 ) )
def testHash( self ) :
o1 = IECore.CompoundData()
o2 = IECore.CompoundData()
o1["a"] = IECore.StringData( "a" )
o1["b"] = IECore.StringData( "b" )
o2["b"] = IECore.StringData( "b" )
o2["a"] = IECore.StringData( "a" )
self.assertEqual( o1.hash(), o2.hash() )
o2["c"] = IECore.StringData( "c" )
self.assertNotEqual( o1.hash(), o2.hash() )
def testHashIndependentFromOrderOfConstruction( self ) :
# CompoundData internally uses a map from InternedString to Data.
# a naive iteration over this might yield a different order in each
# process as it's dependent on the addresses of the InternedStrings.
# we need to keep hashes consistent between processes.
commands = [
"import IECore; IECore.InternedString( 'a' ); print IECore.CompoundData( { 'a' : IECore.IntData( 10 ), 'b' : IECore.IntData( 20 ) } ).hash()",
"import IECore; IECore.InternedString( 'b' ); print IECore.CompoundData( { 'a' : IECore.IntData( 10 ), 'b' : IECore.IntData( 20 ) } ).hash()",
]
hashes = set()
for command in commands :
p = subprocess.Popen( [ sys.executable, "-c", command ], stdout=subprocess.PIPE )
hash, nothing = p.communicate()
hashes.add( hash )
self.assertEqual( len( hashes ), 1 )
def testHash( self ) :
thingsToAdd = [
( "a", IECore.IntData( 1 ), True ),
( "a", IECore.UIntData( 1 ), True ),
( "a", IECore.IntData( 1 ), True ),
( "a", IECore.IntData( 1 ), False ),
( "b", IECore.StringVectorData( [ "a", "b", "c" ] ), True ),
( "b", IECore.StringVectorData( [ "a", "b" ] ), True ),
( "b", IECore.StringVectorData( [ "a", "c" ] ), True ),
( "b", IECore.StringVectorData( [ "a", "c" ] ), False ),
( "d", IECore.StringVectorData( [ "a", "c" ] ), True ),
( "d", None, True ),
]
o = IECore.CompoundData()
for t in thingsToAdd :
h = o.hash()
for i in range( 0, 10 ) :
self.assertEqual( h, o.hash() )
if t[1] is not None :
o[t[0]] = t[1]
else :
del o[t[0]]
if t[2] :
self.assertNotEqual( h, o.hash() )
else :
self.assertEqual( h, o.hash() )
h = o.hash()
def tearDown(self):
if os.path.isfile("./test/CompoundData.fio") :
os.remove("./test/CompoundData.fio")
if __name__ == "__main__":
unittest.main()
| 30.421875 | 145 | 0.621041 | re.Object.load( iface, "test" )
self.assertEqual( v1, v2 )
def testRepr(self):
v1 = IECore.CompoundData()
r1 = repr(v1)
self.assertEqual( eval(repr(v1)), v1 )
v1 = IECore.CompoundData()
v1["0"] = IECore.FloatData(1.2)
v1["1"] = IECore.FloatData(2.3)
v1["2"] = IECore.FloatData(3)
self.assertEqual( eval(repr(v1)), v1 )
v1 = IECore.CompoundData()
v1["0"] = IECore.StringData( "test" )
v1["1"] = IECore.CompoundData(
{ "0" : IECore.StringData( "test" ),
"1" : IECore.M33fData()
}
)
v1["someMoreData"] = IECore.V3fVectorData()
v1["A"] = IECore.Color4fVectorData()
self.assertEqual( eval(repr(v1)), v1 )
def testConstructionFromNestedDict( self ) :
c = IECore.CompoundData( {
"a" : 10,
"b" : IECore.BoolData( True ),
"c" : {
"cc" : IECore.IntData( 20 ),
},
"d" : IECore.CompoundData( {
"dd" : IECore.IntData( 5 ),
} )
} )
self.assertEqual( len( c ), 4 )
self.assertEqual( c["a"], IECore.IntData( 10 ) )
self.assertEqual( c["b"], IECore.BoolData( True ) )
self.assertEqual( len( c["c"] ), 1 )
self.assertEqual( c["c"]["cc"], IECore.IntData( 20 ) )
self.assertEqual( len( c["d"] ), 1 )
self.assertEqual( c["d"]["dd"], IECore.IntData( 5 ) )
def testUpdateFromNestedDict( self ) :
c = IECore.CompoundData( {
"a" : IECore.IntData( 30 )
}
)
d = {
"a" : 10,
"b" : IECore.BoolData( True ),
"c" : {
"cc" : IECore.IntData( 20 ),
},
"d" : IECore.CompoundData( {
"dd" : IECore.IntData( 5 ),
} )
}
c.update( d )
self.assertEqual( len( c ), 4 )
self.assertEqual( c["a"], IECore.IntData( 10 ) )
self.assertEqual( c["b"], IECore.BoolData( True ) )
self.assertEqual( len( c["c"] ), 1 )
self.assertEqual( c["c"]["cc"], IECore.IntData( 20 ) )
self.assertEqual( len( c["d"] ), 1 )
self.assertEqual( c["d"]["dd"], IECore.IntData( 5 ) )
def testHash( self ) :
o1 = IECore.CompoundData()
o2 = IECore.CompoundData()
o1["a"] = IECore.StringData( "a" )
o1["b"] = IECore.StringData( "b" )
o2["b"] = IECore.StringData( "b" )
o2["a"] = IECore.StringData( "a" )
self.assertEqual( o1.hash(), o2.hash() )
o2["c"] = IECore.StringData( "c" )
self.assertNotEqual( o1.hash(), o2.hash() )
def testHashIndependentFromOrderOfConstruction( self ) :
# we need to keep hashes consistent between processes.
commands = [
"import IECore; IECore.InternedString( 'a' ); print IECore.CompoundData( { 'a' : IECore.IntData( 10 ), 'b' : IECore.IntData( 20 ) } ).hash()",
"import IECore; IECore.InternedString( 'b' ); print IECore.CompoundData( { 'a' : IECore.IntData( 10 ), 'b' : IECore.IntData( 20 ) } ).hash()",
]
hashes = set()
for command in commands :
p = subprocess.Popen( [ sys.executable, "-c", command ], stdout=subprocess.PIPE )
hash, nothing = p.communicate()
hashes.add( hash )
self.assertEqual( len( hashes ), 1 )
def testHash( self ) :
thingsToAdd = [
( "a", IECore.IntData( 1 ), True ),
( "a", IECore.UIntData( 1 ), True ),
( "a", IECore.IntData( 1 ), True ),
( "a", IECore.IntData( 1 ), False ),
( "b", IECore.StringVectorData( [ "a", "b", "c" ] ), True ),
( "b", IECore.StringVectorData( [ "a", "b" ] ), True ),
( "b", IECore.StringVectorData( [ "a", "c" ] ), True ),
( "b", IECore.StringVectorData( [ "a", "c" ] ), False ),
( "d", IECore.StringVectorData( [ "a", "c" ] ), True ),
( "d", None, True ),
]
o = IECore.CompoundData()
for t in thingsToAdd :
h = o.hash()
for i in range( 0, 10 ) :
self.assertEqual( h, o.hash() )
if t[1] is not None :
o[t[0]] = t[1]
else :
del o[t[0]]
if t[2] :
self.assertNotEqual( h, o.hash() )
else :
self.assertEqual( h, o.hash() )
h = o.hash()
def tearDown(self):
if os.path.isfile("./test/CompoundData.fio") :
os.remove("./test/CompoundData.fio")
if __name__ == "__main__":
unittest.main()
| true | true |
1c313ecf741a8525f0d69119189313d14407c4bb | 72 | py | Python | oauth_login/login/__init__.py | vinoth3v/In_addon_oauth_login | b7ebfaa8d3a3c455d58300ac7c23da761273aadf | [
"Apache-2.0"
] | 1 | 2015-12-16T03:25:31.000Z | 2015-12-16T03:25:31.000Z | oauth_login/login/__init__.py | vinoth3v/In_addon_oauth_login | b7ebfaa8d3a3c455d58300ac7c23da761273aadf | [
"Apache-2.0"
] | null | null | null | oauth_login/login/__init__.py | vinoth3v/In_addon_oauth_login | b7ebfaa8d3a3c455d58300ac7c23da761273aadf | [
"Apache-2.0"
] | 1 | 2019-09-13T10:12:23.000Z | 2019-09-13T10:12:23.000Z | from .oauth_login import *
from .google import *
from .facebook import * | 24 | 26 | 0.763889 | from .oauth_login import *
from .google import *
from .facebook import * | true | true |
1c313f615a5d3d57e72c3049381ae4c7671ebbf2 | 812 | py | Python | srv/item/getBudget.py | jphacks/KB_1814 | 7ae538272f960a5f21460961ebf1112a6e819e3e | [
"MIT"
] | 5 | 2018-10-19T11:09:35.000Z | 2020-02-14T07:31:52.000Z | srv/item/getBudget.py | jphacks/KB_1814 | 7ae538272f960a5f21460961ebf1112a6e819e3e | [
"MIT"
] | null | null | null | srv/item/getBudget.py | jphacks/KB_1814 | 7ae538272f960a5f21460961ebf1112a6e819e3e | [
"MIT"
] | null | null | null | import requests
import json
def getBudget(a, b, c):
latitude = a
longitude = b
name = c
url = "https://api.gnavi.co.jp/RestSearchAPI/20150630/?keyid=264a257d88e6a732c7195178e8f86f90&format=json&latitude=" + latitude + "&longitude="+ longitude +"&name=" + name
headers = {"content-type": "application/json"}
r = requests.get(url, headers=headers)
data = r.json()
print("aaaa")
# print (json.dumps(data, indent=4))
budget = data['rest']['budget']
lunch = data['rest']['lunch']
if budget != {}:
print ("budget : " + budget + "円")
if lunch != {}:
print ("lunch : " + lunch + "円")
if __name__ == "__name__":
latitude = "34.702492"
longitude = "135.4959658"
name = "UMEDAI Garden Restaurant"
getBudget(latitude, longitude, name)
| 28 | 175 | 0.608374 | import requests
import json
def getBudget(a, b, c):
latitude = a
longitude = b
name = c
url = "https://api.gnavi.co.jp/RestSearchAPI/20150630/?keyid=264a257d88e6a732c7195178e8f86f90&format=json&latitude=" + latitude + "&longitude="+ longitude +"&name=" + name
headers = {"content-type": "application/json"}
r = requests.get(url, headers=headers)
data = r.json()
print("aaaa")
budget = data['rest']['budget']
lunch = data['rest']['lunch']
if budget != {}:
print ("budget : " + budget + "円")
if lunch != {}:
print ("lunch : " + lunch + "円")
if __name__ == "__name__":
latitude = "34.702492"
longitude = "135.4959658"
name = "UMEDAI Garden Restaurant"
getBudget(latitude, longitude, name)
| true | true |
1c31409a6f754c3d642a2e3a3784a98d7c74ce04 | 3,200 | py | Python | OneImage2Video/school_badge.py | HypoX64/bilibili | 992029667ad37d7d03131aa2c4c9923da6cca6f2 | [
"MIT"
] | 24 | 2020-05-24T10:39:24.000Z | 2022-03-09T02:38:09.000Z | OneImage2Video/school_badge.py | HypoX64/bilibili | 992029667ad37d7d03131aa2c4c9923da6cca6f2 | [
"MIT"
] | null | null | null | OneImage2Video/school_badge.py | HypoX64/bilibili | 992029667ad37d7d03131aa2c4c9923da6cca6f2 | [
"MIT"
] | 2 | 2021-03-24T13:54:17.000Z | 2021-08-07T12:23:51.000Z | import os
import cv2
import numpy as np
import sys
sys.path.append("..")
from Util import util,ffmpeg
# 用校徽看badapple
imgs_dir = './pixel_imgs/university/base'
highlight_dir = './pixel_imgs/university/highlight'
background_dir = './pixel_imgs/university/background'
cut_size = 79
pixel_resize = 0 # resize pixel_imgs, if 0, do not resize
output_pixel_num = 18 # how many pixels in the output video'width
video_path = '../Video/素材/bad_apple_bbkkbk/BadApple.flv'
change_frame = 2
# ------------------------- Load Blocks -------------------------
pixels = []
img_names = os.listdir(imgs_dir)
img_names.sort()
for name in img_names:
img = cv2.imread(os.path.join(imgs_dir,name))
for h in range(img.shape[0]//cut_size):
for w in range(img.shape[1]//cut_size):
pixel = img[h*cut_size:(h+1)*cut_size,w*cut_size:(w+1)*cut_size]
if pixel_resize != 0:
pixel = cv2.resize(pixel,(pixel_resize,pixel_resize),interpolation=cv2.INTER_AREA)
pixels.append(pixel)
pixel_size = pixels[0].shape[0]
# highlight
img_names = os.listdir(highlight_dir)
img_names.sort()
for name in img_names:
pixel = cv2.imread(os.path.join(highlight_dir,name))
pixel = cv2.resize(pixel,(pixel_size,pixel_size),interpolation=cv2.INTER_AREA)
for i in range(10):
pixels.append(pixel)
pixels = np.array(pixels)
# background
background_name = os.listdir(background_dir)[0]
background = cv2.imread(os.path.join(background_dir,background_name))
background = cv2.resize(background,(pixel_size,pixel_size),interpolation=cv2.INTER_AREA)
# ------------------------- Prcessing Video -------------------------
fps,endtime,height,width = ffmpeg.get_video_infos(video_path)
scale = height/width
util.clean_tempfiles(False)
util.makedirs('./tmp/vid2img')
util.makedirs('./tmp/output_img')
ffmpeg.video2image(video_path, './tmp/vid2img/%05d.png')
ffmpeg.video2voice(video_path, './tmp/tmp.mp3')
# ------------------------- Video2Block -------------------------
print('Video2Block...')
img_names = os.listdir('./tmp/vid2img')
img_names.sort()
frame = 0
for img_name in img_names:
img = cv2.imread(os.path.join('./tmp/vid2img',img_name))
img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
img = cv2.resize(img, (output_pixel_num,int(output_pixel_num*scale)),interpolation=cv2.INTER_AREA)
h,w = img.shape
if frame %change_frame == 0:
indexs = np.random.randint(0, pixels.shape[0]-1, (h,w))
out_img = np.zeros((h*pixel_size,w*pixel_size,3), dtype = np.uint8)
for i in range(h):
for j in range(w):
#index = np.clip(img[i,j]//level,0,len(pixels)-1)
if img[i,j] < 64:
out_img[i*pixel_size:(i+1)*pixel_size,j*pixel_size:(j+1)*pixel_size] = pixels[indexs[i,j]]
else:
out_img[i*pixel_size:(i+1)*pixel_size,j*pixel_size:(j+1)*pixel_size] = background
out_img = out_img[:(h*pixel_size//2)*2,:(w*pixel_size//2)*2]
cv2.imwrite(os.path.join('./tmp/output_img',img_name), out_img)
frame += 1
# ------------------------- Block2Video -------------------------
ffmpeg.image2video(fps, './tmp/output_img/%05d.png', './tmp/tmp.mp3', './result.mp4')
| 36.363636 | 106 | 0.647188 | import os
import cv2
import numpy as np
import sys
sys.path.append("..")
from Util import util,ffmpeg
imgs_dir = './pixel_imgs/university/base'
highlight_dir = './pixel_imgs/university/highlight'
background_dir = './pixel_imgs/university/background'
cut_size = 79
pixel_resize = 0
output_pixel_num = 18
video_path = '../Video/素材/bad_apple_bbkkbk/BadApple.flv'
change_frame = 2
# ------------------------- Load Blocks -------------------------
pixels = []
img_names = os.listdir(imgs_dir)
img_names.sort()
for name in img_names:
img = cv2.imread(os.path.join(imgs_dir,name))
for h in range(img.shape[0]//cut_size):
for w in range(img.shape[1]//cut_size):
pixel = img[h*cut_size:(h+1)*cut_size,w*cut_size:(w+1)*cut_size]
if pixel_resize != 0:
pixel = cv2.resize(pixel,(pixel_resize,pixel_resize),interpolation=cv2.INTER_AREA)
pixels.append(pixel)
pixel_size = pixels[0].shape[0]
# highlight
img_names = os.listdir(highlight_dir)
img_names.sort()
for name in img_names:
pixel = cv2.imread(os.path.join(highlight_dir,name))
pixel = cv2.resize(pixel,(pixel_size,pixel_size),interpolation=cv2.INTER_AREA)
for i in range(10):
pixels.append(pixel)
pixels = np.array(pixels)
# background
background_name = os.listdir(background_dir)[0]
background = cv2.imread(os.path.join(background_dir,background_name))
background = cv2.resize(background,(pixel_size,pixel_size),interpolation=cv2.INTER_AREA)
# ------------------------- Prcessing Video -------------------------
fps,endtime,height,width = ffmpeg.get_video_infos(video_path)
scale = height/width
util.clean_tempfiles(False)
util.makedirs('./tmp/vid2img')
util.makedirs('./tmp/output_img')
ffmpeg.video2image(video_path, './tmp/vid2img/%05d.png')
ffmpeg.video2voice(video_path, './tmp/tmp.mp3')
# ------------------------- Video2Block -------------------------
print('Video2Block...')
img_names = os.listdir('./tmp/vid2img')
img_names.sort()
frame = 0
for img_name in img_names:
img = cv2.imread(os.path.join('./tmp/vid2img',img_name))
img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
img = cv2.resize(img, (output_pixel_num,int(output_pixel_num*scale)),interpolation=cv2.INTER_AREA)
h,w = img.shape
if frame %change_frame == 0:
indexs = np.random.randint(0, pixels.shape[0]-1, (h,w))
out_img = np.zeros((h*pixel_size,w*pixel_size,3), dtype = np.uint8)
for i in range(h):
for j in range(w):
#index = np.clip(img[i,j]//level,0,len(pixels)-1)
if img[i,j] < 64:
out_img[i*pixel_size:(i+1)*pixel_size,j*pixel_size:(j+1)*pixel_size] = pixels[indexs[i,j]]
else:
out_img[i*pixel_size:(i+1)*pixel_size,j*pixel_size:(j+1)*pixel_size] = background
out_img = out_img[:(h*pixel_size//2)*2,:(w*pixel_size//2)*2]
cv2.imwrite(os.path.join('./tmp/output_img',img_name), out_img)
frame += 1
# ------------------------- Block2Video -------------------------
ffmpeg.image2video(fps, './tmp/output_img/%05d.png', './tmp/tmp.mp3', './result.mp4')
| true | true |
1c31419028d0e3ca587e782497408e42320f2b43 | 18,493 | py | Python | tests/unittests/storage/test_storage.py | aimar1986bupt/orion | 6d217af1f9002aa671f8a3260a687c540ca5336d | [
"BSD-3-Clause"
] | 4 | 2020-03-25T17:44:40.000Z | 2020-04-10T13:53:13.000Z | tests/unittests/storage/test_storage.py | aimar1986bupt/orion | 6d217af1f9002aa671f8a3260a687c540ca5336d | [
"BSD-3-Clause"
] | 2 | 2018-06-26T19:17:09.000Z | 2022-02-23T13:40:04.000Z | tests/unittests/storage/test_storage.py | aimar1986bupt/orion | 6d217af1f9002aa671f8a3260a687c540ca5336d | [
"BSD-3-Clause"
] | 2 | 2019-08-26T11:36:47.000Z | 2020-04-07T13:05:48.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Collection of tests for :mod:`orion.storage`."""
import copy
import datetime
import json
import tempfile
import pytest
from orion.core.io.database import DuplicateKeyError
from orion.core.utils.tests import OrionState
from orion.core.worker.trial import Trial
from orion.storage.base import FailedUpdate, get_storage, MissingArguments
storage_backends = [
None, # defaults to legacy with PickleDB
]
base_experiment = {
'name': 'default_name',
'version': 0,
'metadata': {
'user': 'default_user',
'user_script': 'abc',
'datetime': '2017-11-23T02:00:00'
}
}
base_trial = {
'experiment': 'default_name',
'status': 'new', # new, reserved, suspended, completed, broken
'worker': None,
'submit_time': '2017-11-23T02:00:00',
'start_time': None,
'end_time': None,
'heartbeat': None,
'results': [
{'name': 'loss',
'type': 'objective', # objective, constraint
'value': 2}
],
'params': [
{'name': '/encoding_layer',
'type': 'categorical',
'value': 'rnn'},
{'name': '/decoding_layer',
'type': 'categorical',
'value': 'lstm_with_attention'}
]
}
def _generate(obj, *args, value):
if obj is None:
return None
obj = copy.deepcopy(obj)
data = obj
for arg in args[:-1]:
data = data[arg]
data[args[-1]] = value
return obj
def make_lost_trial():
"""Make a lost trial"""
obj = copy.deepcopy(base_trial)
obj['status'] = 'reserved'
obj['heartbeat'] = datetime.datetime.utcnow() - datetime.timedelta(seconds=61 * 2)
obj['params'].append({
'name': '/index',
'type': 'categorical',
'value': 'lost_trial'
})
return obj
all_status = ['completed', 'broken', 'reserved', 'interrupted', 'suspended', 'new']
def generate_trials(status=None):
"""Generate Trials with different configurations"""
if status is None:
status = all_status
new_trials = [_generate(base_trial, 'status', value=s) for s in status]
# make each trial unique
for i, trial in enumerate(new_trials):
trial['params'].append({
'name': '/index',
'type': 'categorical',
'value': i
})
return new_trials
def generate_experiments():
"""Generate a set of experiments"""
users = ['a', 'b', 'c']
exps = [_generate(base_experiment, 'metadata', 'user', value=u) for u in users]
return [_generate(exp, 'name', value=str(i)) for i, exp in enumerate(exps)]
@pytest.mark.parametrize('storage', storage_backends)
class TestStorage:
"""Test all storage backend"""
def test_create_experiment(self, storage):
"""Test create experiment"""
with OrionState(experiments=[], database=storage) as cfg:
storage = cfg.storage()
storage.create_experiment(base_experiment)
experiments = storage.fetch_experiments({})
assert len(experiments) == 1, 'Only one experiment in the database'
experiment = experiments[0]
assert base_experiment == experiment, 'Local experiment and DB should match'
def test_create_experiment_fail(self, storage):
"""Test create experiment"""
with OrionState(experiments=[base_experiment], database=storage) as cfg:
storage = cfg.storage()
with pytest.raises(DuplicateKeyError):
storage.create_experiment(base_experiment)
def test_fetch_experiments(self, storage, name='0', user='a'):
"""Test fetch experiments"""
with OrionState(experiments=generate_experiments(), database=storage) as cfg:
storage = cfg.storage()
experiments = storage.fetch_experiments({})
assert len(experiments) == len(cfg.experiments)
experiments = storage.fetch_experiments({'name': name, 'metadata.user': user})
assert len(experiments) == 1
experiment = experiments[0]
assert experiment['name'] == name, 'name should match query'
assert experiment['metadata']['user'] == user, 'user name should match query'
experiments = storage.fetch_experiments({'name': '-1', 'metadata.user': user})
assert len(experiments) == 0
def test_update_experiment(self, monkeypatch, storage, name='0', user='a'):
"""Test fetch experiments"""
with OrionState(experiments=generate_experiments(), database=storage) as cfg:
storage = cfg.storage()
class _Dummy():
pass
experiment = cfg.experiments[0]
mocked_experiment = _Dummy()
mocked_experiment._id = experiment['_id']
storage.update_experiment(mocked_experiment, test=True)
assert storage.fetch_experiments({'_id': experiment['_id']})[0]['test']
assert 'test' not in storage.fetch_experiments({'_id': cfg.experiments[1]['_id']})[0]
storage.update_experiment(uid=experiment['_id'], test2=True)
assert storage.fetch_experiments({'_id': experiment['_id']})[0]['test2']
assert 'test2' not in storage.fetch_experiments({'_id': cfg.experiments[1]['_id']})[0]
with pytest.raises(MissingArguments):
storage.update_experiment()
with pytest.raises(AssertionError):
storage.update_experiment(experiment=mocked_experiment, uid='123')
def test_register_trial(self, storage):
"""Test register trial"""
with OrionState(experiments=[base_experiment], database=storage) as cfg:
storage = cfg.storage()
trial1 = storage.register_trial(Trial(**base_trial))
trial2 = storage.get_trial(trial1)
assert trial1.to_dict() == trial2.to_dict(), 'Trials should match after insert'
def test_register_duplicate_trial(self, storage):
"""Test register trial"""
with OrionState(
experiments=[base_experiment], trials=[base_trial], database=storage) as cfg:
storage = cfg.storage()
with pytest.raises(DuplicateKeyError):
storage.register_trial(Trial(**base_trial))
def test_register_lie(self, storage):
"""Test register lie"""
with OrionState(experiments=[base_experiment], database=storage) as cfg:
storage = cfg.storage()
storage.register_lie(Trial(**base_trial))
def test_register_lie_fail(self, storage):
"""Test register lie"""
with OrionState(experiments=[base_experiment], lies=[base_trial], database=storage) as cfg:
storage = cfg.storage()
with pytest.raises(DuplicateKeyError):
storage.register_lie(Trial(**cfg.lies[0]))
def test_reserve_trial_success(self, storage):
"""Test reserve trial"""
with OrionState(
experiments=[base_experiment], trials=[base_trial], database=storage) as cfg:
storage = cfg.storage()
experiment = cfg.get_experiment('default_name', 'default_user', version=None)
trial = storage.reserve_trial(experiment)
assert trial is not None
assert trial.status == 'reserved'
def test_reserve_trial_fail(self, storage):
"""Test reserve trial"""
with OrionState(
experiments=[base_experiment],
trials=generate_trials(status=['completed', 'reserved']),
database=storage) as cfg:
storage = cfg.storage()
experiment = cfg.get_experiment('default_name', 'default_user', version=None)
trial = storage.reserve_trial(experiment)
assert trial is None
def test_fetch_trials(self, storage):
"""Test fetch experiment trials"""
with OrionState(
experiments=[base_experiment], trials=generate_trials(), database=storage) as cfg:
storage = cfg.storage()
experiment = cfg.get_experiment('default_name', 'default_user', version=None)
trials1 = storage.fetch_trials(experiment=experiment)
trials2 = storage.fetch_trials(uid=experiment._id)
with pytest.raises(MissingArguments):
storage.fetch_trials()
with pytest.raises(AssertionError):
storage.fetch_trials(experiment=experiment, uid='123')
assert len(trials1) == len(cfg.trials), 'trial count should match'
assert len(trials2) == len(cfg.trials), 'trial count should match'
def test_get_trial(self, storage):
"""Test get trial"""
with OrionState(
experiments=[base_experiment], trials=generate_trials(), database=storage) as cfg:
storage = cfg.storage()
trial_dict = cfg.trials[0]
trial1 = storage.get_trial(trial=Trial(**trial_dict))
trial2 = storage.get_trial(uid=trial1.id)
with pytest.raises(MissingArguments):
storage.get_trial()
with pytest.raises(AssertionError):
storage.get_trial(trial=trial1, uid='123')
assert trial1.to_dict() == trial_dict
assert trial2.to_dict() == trial_dict
def test_fetch_lost_trials(self, storage):
"""Test update heartbeat"""
with OrionState(experiments=[base_experiment],
trials=generate_trials() + [make_lost_trial()], database=storage) as cfg:
storage = cfg.storage()
experiment = cfg.get_experiment('default_name', 'default_user', version=None)
trials = storage.fetch_lost_trials(experiment)
assert len(trials) == 1
def retrieve_result(self, storage, generated_result):
"""Test retrieve result"""
results_file = tempfile.NamedTemporaryFile(
mode='w', prefix='results_', suffix='.log', dir='.', delete=True
)
# Generate fake result
with open(results_file.name, 'w') as file:
json.dump([generated_result], file)
# --
with OrionState(experiments=[], trials=[], database=storage) as cfg:
storage = cfg.storage()
trial = Trial(**base_trial)
trial = storage.retrieve_result(trial, results_file)
results = trial.results
assert len(results) == 1
assert results[0].to_dict() == generated_result
def test_retrieve_result(self, storage):
"""Test retrieve result"""
self.retrieve_result(storage, generated_result={
'name': 'loss',
'type': 'objective',
'value': 2})
def test_retrieve_result_incorrect_value(self, storage):
"""Test retrieve result"""
with pytest.raises(ValueError) as exec:
self.retrieve_result(storage, generated_result={
'name': 'loss',
'type': 'objective_unsupported_type',
'value': 2})
assert exec.match(r'Given type, objective_unsupported_type')
def test_retrieve_result_nofile(self, storage):
"""Test retrieve result"""
results_file = tempfile.NamedTemporaryFile(
mode='w', prefix='results_', suffix='.log', dir='.', delete=True
)
with OrionState(experiments=[], trials=[], database=storage) as cfg:
storage = cfg.storage()
trial = Trial(**base_trial)
with pytest.raises(json.decoder.JSONDecodeError) as exec:
storage.retrieve_result(trial, results_file)
assert exec.match(r'Expecting value: line 1 column 1 \(char 0\)')
def test_push_trial_results(self, storage):
"""Successfully push a completed trial into database."""
with OrionState(experiments=[], trials=[base_trial], database=storage) as cfg:
storage = cfg.storage()
trial = storage.get_trial(Trial(**base_trial))
results = [
Trial.Result(name='loss', type='objective', value=2)
]
trial.results = results
assert storage.push_trial_results(trial), 'should update successfully'
trial2 = storage.get_trial(trial)
assert trial2.results == results
def test_change_status_success(self, storage, exp_config_file):
"""Change the status of a Trial"""
def check_status_change(new_status):
with OrionState(from_yaml=exp_config_file, database=storage) as cfg:
trial = cfg.get_trial(0)
assert trial is not None, 'was not able to retrieve trial for test'
get_storage().set_trial_status(trial, status=new_status)
assert trial.status == new_status, \
'Trial status should have been updated locally'
trial = get_storage().get_trial(trial)
assert trial.status == new_status, \
'Trial status should have been updated in the storage'
check_status_change('completed')
check_status_change('broken')
check_status_change('reserved')
check_status_change('interrupted')
check_status_change('suspended')
check_status_change('new')
def test_change_status_failed_update(self, storage, exp_config_file):
"""Successfully find new trials in db and reserve one at 'random'."""
def check_status_change(new_status):
with OrionState(from_yaml=exp_config_file, database=storage) as cfg:
trial = cfg.get_trial(1)
assert trial is not None, 'Was not able to retrieve trial for test'
assert trial.status != new_status
with pytest.raises(FailedUpdate):
trial.status = new_status
get_storage().set_trial_status(trial, status=new_status)
check_status_change('completed')
check_status_change('broken')
check_status_change('reserved')
check_status_change('interrupted')
check_status_change('suspended')
def test_fetch_pending_trials(self, storage):
"""Test fetch pending trials"""
with OrionState(
experiments=[base_experiment], trials=generate_trials(), database=storage) as cfg:
storage = cfg.storage()
experiment = cfg.get_experiment('default_name', 'default_user', version=None)
trials = storage.fetch_pending_trials(experiment)
count = 0
for trial in cfg.trials:
if trial['status'] in {'new', 'suspended', 'interrupted'}:
count += 1
assert len(trials) == count
for trial in trials:
assert trial.status in {'new', 'suspended', 'interrupted'}
def test_fetch_noncompleted_trials(self, storage):
"""Test fetch non completed trials"""
with OrionState(
experiments=[base_experiment], trials=generate_trials(), database=storage) as cfg:
storage = cfg.storage()
experiment = cfg.get_experiment('default_name', 'default_user', version=None)
trials = storage.fetch_noncompleted_trials(experiment)
count = 0
for trial in cfg.trials:
if trial['status'] != 'completed':
count += 1
assert len(trials) == count
for trial in trials:
assert trial.status != 'completed'
def test_fetch_trial_by_status(self, storage):
"""Test fetch completed trials"""
with OrionState(
experiments=[base_experiment], trials=generate_trials(), database=storage) as cfg:
count = 0
for trial in cfg.trials:
if trial['status'] == 'completed':
count += 1
storage = cfg.storage()
experiment = cfg.get_experiment('default_name', 'default_user', version=None)
trials = storage.fetch_trial_by_status(experiment, 'completed')
assert len(trials) == count
for trial in trials:
assert trial.status == 'completed', trial
def test_count_completed_trials(self, storage):
"""Test count completed trials"""
with OrionState(
experiments=[base_experiment], trials=generate_trials(), database=storage) as cfg:
count = 0
for trial in cfg.trials:
if trial['status'] == 'completed':
count += 1
storage = cfg.storage()
experiment = cfg.get_experiment('default_name', 'default_user', version=None)
trials = storage.count_completed_trials(experiment)
assert trials == count
def test_count_broken_trials(self, storage):
"""Test count broken trials"""
with OrionState(
experiments=[base_experiment], trials=generate_trials(), database=storage) as cfg:
count = 0
for trial in cfg.trials:
if trial['status'] == 'broken':
count += 1
storage = cfg.storage()
experiment = cfg.get_experiment('default_name', 'default_user', version=None)
trials = storage.count_broken_trials(experiment)
assert trials == count
def test_update_heartbeat(self, storage):
"""Test update heartbeat"""
with OrionState(
experiments=[base_experiment], trials=generate_trials(), database=storage) as cfg:
storage_name = storage
storage = cfg.storage()
exp = cfg.get_experiment(name='default_name')
trial1 = storage.fetch_trial_by_status(exp, status='reserved')[0]
storage.update_heartbeat(trial1)
trial2 = storage.get_trial(trial1)
assert trial1.heartbeat is None
assert trial2.heartbeat is not None
# this checks that heartbeat is the correct type and that it was updated prior to now
assert trial2.heartbeat < datetime.datetime.utcnow()
if storage_name is None:
trial3 = storage.fetch_trial_by_status(exp, status='completed')[0]
storage.update_heartbeat(trial3)
assert trial3.heartbeat is None, \
'Legacy does not update trials with a status different from reserved'
| 36.912176 | 99 | 0.607689 |
import copy
import datetime
import json
import tempfile
import pytest
from orion.core.io.database import DuplicateKeyError
from orion.core.utils.tests import OrionState
from orion.core.worker.trial import Trial
from orion.storage.base import FailedUpdate, get_storage, MissingArguments
storage_backends = [
None,
]
base_experiment = {
'name': 'default_name',
'version': 0,
'metadata': {
'user': 'default_user',
'user_script': 'abc',
'datetime': '2017-11-23T02:00:00'
}
}
base_trial = {
'experiment': 'default_name',
'status': 'new',
'worker': None,
'submit_time': '2017-11-23T02:00:00',
'start_time': None,
'end_time': None,
'heartbeat': None,
'results': [
{'name': 'loss',
'type': 'objective',
'value': 2}
],
'params': [
{'name': '/encoding_layer',
'type': 'categorical',
'value': 'rnn'},
{'name': '/decoding_layer',
'type': 'categorical',
'value': 'lstm_with_attention'}
]
}
def _generate(obj, *args, value):
if obj is None:
return None
obj = copy.deepcopy(obj)
data = obj
for arg in args[:-1]:
data = data[arg]
data[args[-1]] = value
return obj
def make_lost_trial():
obj = copy.deepcopy(base_trial)
obj['status'] = 'reserved'
obj['heartbeat'] = datetime.datetime.utcnow() - datetime.timedelta(seconds=61 * 2)
obj['params'].append({
'name': '/index',
'type': 'categorical',
'value': 'lost_trial'
})
return obj
all_status = ['completed', 'broken', 'reserved', 'interrupted', 'suspended', 'new']
def generate_trials(status=None):
if status is None:
status = all_status
new_trials = [_generate(base_trial, 'status', value=s) for s in status]
for i, trial in enumerate(new_trials):
trial['params'].append({
'name': '/index',
'type': 'categorical',
'value': i
})
return new_trials
def generate_experiments():
users = ['a', 'b', 'c']
exps = [_generate(base_experiment, 'metadata', 'user', value=u) for u in users]
return [_generate(exp, 'name', value=str(i)) for i, exp in enumerate(exps)]
@pytest.mark.parametrize('storage', storage_backends)
class TestStorage:
def test_create_experiment(self, storage):
with OrionState(experiments=[], database=storage) as cfg:
storage = cfg.storage()
storage.create_experiment(base_experiment)
experiments = storage.fetch_experiments({})
assert len(experiments) == 1, 'Only one experiment in the database'
experiment = experiments[0]
assert base_experiment == experiment, 'Local experiment and DB should match'
def test_create_experiment_fail(self, storage):
with OrionState(experiments=[base_experiment], database=storage) as cfg:
storage = cfg.storage()
with pytest.raises(DuplicateKeyError):
storage.create_experiment(base_experiment)
def test_fetch_experiments(self, storage, name='0', user='a'):
with OrionState(experiments=generate_experiments(), database=storage) as cfg:
storage = cfg.storage()
experiments = storage.fetch_experiments({})
assert len(experiments) == len(cfg.experiments)
experiments = storage.fetch_experiments({'name': name, 'metadata.user': user})
assert len(experiments) == 1
experiment = experiments[0]
assert experiment['name'] == name, 'name should match query'
assert experiment['metadata']['user'] == user, 'user name should match query'
experiments = storage.fetch_experiments({'name': '-1', 'metadata.user': user})
assert len(experiments) == 0
def test_update_experiment(self, monkeypatch, storage, name='0', user='a'):
with OrionState(experiments=generate_experiments(), database=storage) as cfg:
storage = cfg.storage()
class _Dummy():
pass
experiment = cfg.experiments[0]
mocked_experiment = _Dummy()
mocked_experiment._id = experiment['_id']
storage.update_experiment(mocked_experiment, test=True)
assert storage.fetch_experiments({'_id': experiment['_id']})[0]['test']
assert 'test' not in storage.fetch_experiments({'_id': cfg.experiments[1]['_id']})[0]
storage.update_experiment(uid=experiment['_id'], test2=True)
assert storage.fetch_experiments({'_id': experiment['_id']})[0]['test2']
assert 'test2' not in storage.fetch_experiments({'_id': cfg.experiments[1]['_id']})[0]
with pytest.raises(MissingArguments):
storage.update_experiment()
with pytest.raises(AssertionError):
storage.update_experiment(experiment=mocked_experiment, uid='123')
def test_register_trial(self, storage):
with OrionState(experiments=[base_experiment], database=storage) as cfg:
storage = cfg.storage()
trial1 = storage.register_trial(Trial(**base_trial))
trial2 = storage.get_trial(trial1)
assert trial1.to_dict() == trial2.to_dict(), 'Trials should match after insert'
def test_register_duplicate_trial(self, storage):
with OrionState(
experiments=[base_experiment], trials=[base_trial], database=storage) as cfg:
storage = cfg.storage()
with pytest.raises(DuplicateKeyError):
storage.register_trial(Trial(**base_trial))
def test_register_lie(self, storage):
with OrionState(experiments=[base_experiment], database=storage) as cfg:
storage = cfg.storage()
storage.register_lie(Trial(**base_trial))
def test_register_lie_fail(self, storage):
with OrionState(experiments=[base_experiment], lies=[base_trial], database=storage) as cfg:
storage = cfg.storage()
with pytest.raises(DuplicateKeyError):
storage.register_lie(Trial(**cfg.lies[0]))
def test_reserve_trial_success(self, storage):
with OrionState(
experiments=[base_experiment], trials=[base_trial], database=storage) as cfg:
storage = cfg.storage()
experiment = cfg.get_experiment('default_name', 'default_user', version=None)
trial = storage.reserve_trial(experiment)
assert trial is not None
assert trial.status == 'reserved'
def test_reserve_trial_fail(self, storage):
with OrionState(
experiments=[base_experiment],
trials=generate_trials(status=['completed', 'reserved']),
database=storage) as cfg:
storage = cfg.storage()
experiment = cfg.get_experiment('default_name', 'default_user', version=None)
trial = storage.reserve_trial(experiment)
assert trial is None
def test_fetch_trials(self, storage):
with OrionState(
experiments=[base_experiment], trials=generate_trials(), database=storage) as cfg:
storage = cfg.storage()
experiment = cfg.get_experiment('default_name', 'default_user', version=None)
trials1 = storage.fetch_trials(experiment=experiment)
trials2 = storage.fetch_trials(uid=experiment._id)
with pytest.raises(MissingArguments):
storage.fetch_trials()
with pytest.raises(AssertionError):
storage.fetch_trials(experiment=experiment, uid='123')
assert len(trials1) == len(cfg.trials), 'trial count should match'
assert len(trials2) == len(cfg.trials), 'trial count should match'
def test_get_trial(self, storage):
with OrionState(
experiments=[base_experiment], trials=generate_trials(), database=storage) as cfg:
storage = cfg.storage()
trial_dict = cfg.trials[0]
trial1 = storage.get_trial(trial=Trial(**trial_dict))
trial2 = storage.get_trial(uid=trial1.id)
with pytest.raises(MissingArguments):
storage.get_trial()
with pytest.raises(AssertionError):
storage.get_trial(trial=trial1, uid='123')
assert trial1.to_dict() == trial_dict
assert trial2.to_dict() == trial_dict
def test_fetch_lost_trials(self, storage):
with OrionState(experiments=[base_experiment],
trials=generate_trials() + [make_lost_trial()], database=storage) as cfg:
storage = cfg.storage()
experiment = cfg.get_experiment('default_name', 'default_user', version=None)
trials = storage.fetch_lost_trials(experiment)
assert len(trials) == 1
def retrieve_result(self, storage, generated_result):
results_file = tempfile.NamedTemporaryFile(
mode='w', prefix='results_', suffix='.log', dir='.', delete=True
)
with open(results_file.name, 'w') as file:
json.dump([generated_result], file)
with OrionState(experiments=[], trials=[], database=storage) as cfg:
storage = cfg.storage()
trial = Trial(**base_trial)
trial = storage.retrieve_result(trial, results_file)
results = trial.results
assert len(results) == 1
assert results[0].to_dict() == generated_result
def test_retrieve_result(self, storage):
self.retrieve_result(storage, generated_result={
'name': 'loss',
'type': 'objective',
'value': 2})
def test_retrieve_result_incorrect_value(self, storage):
with pytest.raises(ValueError) as exec:
self.retrieve_result(storage, generated_result={
'name': 'loss',
'type': 'objective_unsupported_type',
'value': 2})
assert exec.match(r'Given type, objective_unsupported_type')
def test_retrieve_result_nofile(self, storage):
results_file = tempfile.NamedTemporaryFile(
mode='w', prefix='results_', suffix='.log', dir='.', delete=True
)
with OrionState(experiments=[], trials=[], database=storage) as cfg:
storage = cfg.storage()
trial = Trial(**base_trial)
with pytest.raises(json.decoder.JSONDecodeError) as exec:
storage.retrieve_result(trial, results_file)
assert exec.match(r'Expecting value: line 1 column 1 \(char 0\)')
def test_push_trial_results(self, storage):
with OrionState(experiments=[], trials=[base_trial], database=storage) as cfg:
storage = cfg.storage()
trial = storage.get_trial(Trial(**base_trial))
results = [
Trial.Result(name='loss', type='objective', value=2)
]
trial.results = results
assert storage.push_trial_results(trial), 'should update successfully'
trial2 = storage.get_trial(trial)
assert trial2.results == results
def test_change_status_success(self, storage, exp_config_file):
def check_status_change(new_status):
with OrionState(from_yaml=exp_config_file, database=storage) as cfg:
trial = cfg.get_trial(0)
assert trial is not None, 'was not able to retrieve trial for test'
get_storage().set_trial_status(trial, status=new_status)
assert trial.status == new_status, \
'Trial status should have been updated locally'
trial = get_storage().get_trial(trial)
assert trial.status == new_status, \
'Trial status should have been updated in the storage'
check_status_change('completed')
check_status_change('broken')
check_status_change('reserved')
check_status_change('interrupted')
check_status_change('suspended')
check_status_change('new')
def test_change_status_failed_update(self, storage, exp_config_file):
def check_status_change(new_status):
with OrionState(from_yaml=exp_config_file, database=storage) as cfg:
trial = cfg.get_trial(1)
assert trial is not None, 'Was not able to retrieve trial for test'
assert trial.status != new_status
with pytest.raises(FailedUpdate):
trial.status = new_status
get_storage().set_trial_status(trial, status=new_status)
check_status_change('completed')
check_status_change('broken')
check_status_change('reserved')
check_status_change('interrupted')
check_status_change('suspended')
def test_fetch_pending_trials(self, storage):
with OrionState(
experiments=[base_experiment], trials=generate_trials(), database=storage) as cfg:
storage = cfg.storage()
experiment = cfg.get_experiment('default_name', 'default_user', version=None)
trials = storage.fetch_pending_trials(experiment)
count = 0
for trial in cfg.trials:
if trial['status'] in {'new', 'suspended', 'interrupted'}:
count += 1
assert len(trials) == count
for trial in trials:
assert trial.status in {'new', 'suspended', 'interrupted'}
def test_fetch_noncompleted_trials(self, storage):
with OrionState(
experiments=[base_experiment], trials=generate_trials(), database=storage) as cfg:
storage = cfg.storage()
experiment = cfg.get_experiment('default_name', 'default_user', version=None)
trials = storage.fetch_noncompleted_trials(experiment)
count = 0
for trial in cfg.trials:
if trial['status'] != 'completed':
count += 1
assert len(trials) == count
for trial in trials:
assert trial.status != 'completed'
def test_fetch_trial_by_status(self, storage):
with OrionState(
experiments=[base_experiment], trials=generate_trials(), database=storage) as cfg:
count = 0
for trial in cfg.trials:
if trial['status'] == 'completed':
count += 1
storage = cfg.storage()
experiment = cfg.get_experiment('default_name', 'default_user', version=None)
trials = storage.fetch_trial_by_status(experiment, 'completed')
assert len(trials) == count
for trial in trials:
assert trial.status == 'completed', trial
def test_count_completed_trials(self, storage):
with OrionState(
experiments=[base_experiment], trials=generate_trials(), database=storage) as cfg:
count = 0
for trial in cfg.trials:
if trial['status'] == 'completed':
count += 1
storage = cfg.storage()
experiment = cfg.get_experiment('default_name', 'default_user', version=None)
trials = storage.count_completed_trials(experiment)
assert trials == count
def test_count_broken_trials(self, storage):
with OrionState(
experiments=[base_experiment], trials=generate_trials(), database=storage) as cfg:
count = 0
for trial in cfg.trials:
if trial['status'] == 'broken':
count += 1
storage = cfg.storage()
experiment = cfg.get_experiment('default_name', 'default_user', version=None)
trials = storage.count_broken_trials(experiment)
assert trials == count
def test_update_heartbeat(self, storage):
with OrionState(
experiments=[base_experiment], trials=generate_trials(), database=storage) as cfg:
storage_name = storage
storage = cfg.storage()
exp = cfg.get_experiment(name='default_name')
trial1 = storage.fetch_trial_by_status(exp, status='reserved')[0]
storage.update_heartbeat(trial1)
trial2 = storage.get_trial(trial1)
assert trial1.heartbeat is None
assert trial2.heartbeat is not None
assert trial2.heartbeat < datetime.datetime.utcnow()
if storage_name is None:
trial3 = storage.fetch_trial_by_status(exp, status='completed')[0]
storage.update_heartbeat(trial3)
assert trial3.heartbeat is None, \
'Legacy does not update trials with a status different from reserved'
| true | true |
1c31447e628621eb59455f56b1ff028fed8377fa | 7,091 | py | Python | adanet/core/report_accessor_test.py | intruder1912/adanet | dfa2f0acc253d1de193aaa795b5559bc471f9ed8 | [
"Apache-2.0"
] | 1 | 2018-11-02T04:57:02.000Z | 2018-11-02T04:57:02.000Z | adanet/core/report_accessor_test.py | oz99999/adanet | 69354c4e961defca790a1ce0e042251dfbe4f410 | [
"Apache-2.0"
] | null | null | null | adanet/core/report_accessor_test.py | oz99999/adanet | 69354c4e961defca790a1ce0e042251dfbe4f410 | [
"Apache-2.0"
] | 1 | 2021-12-14T08:18:17.000Z | 2021-12-14T08:18:17.000Z | """Tests for run_report_accessor.py.
Copyright 2018 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from adanet.core import subnetwork
from adanet.core.report_accessor import _ReportAccessor
import tensorflow as tf
class ReportAccessorTest(tf.test.TestCase):
def test_read_from_empty_file(self):
report_accessor = _ReportAccessor(self.get_temp_dir())
self.assertEqual([], list(report_accessor.read_iteration_reports()))
def test_add_to_empty_file(self):
report_accessor = _ReportAccessor(self.get_temp_dir())
materialized_reports = [
subnetwork.MaterializedReport(
iteration_number=0,
name="foo",
hparams={
"p1": 1,
"p2": "hoo",
"p3": True,
},
attributes={
"a1": 1,
"a2": "aoo",
"a3": True,
},
metrics={
"m1": 1,
"m2": "moo",
"m3": True,
},
included_in_final_ensemble=True,
),
]
report_accessor.write_iteration_report(
iteration_number=0,
materialized_reports=materialized_reports,
)
actual_iteration_reports = list(report_accessor.read_iteration_reports())
self.assertEqual(1, len(actual_iteration_reports))
self.assertEqual(materialized_reports, actual_iteration_reports[0])
def test_add_to_existing_file(self):
materialized_reports = [
[
subnetwork.MaterializedReport(
iteration_number=0,
name="foo1",
hparams={
"p1": 11,
"p2": "hoo",
"p3": True,
},
attributes={
"a1": 11,
"a2": "aoo",
"a3": True,
},
metrics={
"m1": 11,
"m2": "moo",
"m3": True,
},
included_in_final_ensemble=False,
),
subnetwork.MaterializedReport(
iteration_number=0,
name="foo2",
hparams={
"p1": 12,
"p2": "hoo",
"p3": True,
},
attributes={
"a1": 12,
"a2": "aoo",
"a3": True,
},
metrics={
"m1": 12,
"m2": "moo",
"m3": True,
},
included_in_final_ensemble=True,
),
],
[
subnetwork.MaterializedReport(
iteration_number=1,
name="foo1",
hparams={
"p1": 21,
"p2": "hoo",
"p3": True,
},
attributes={
"a1": 21,
"a2": "aoo",
"a3": True,
},
metrics={
"m1": 21,
"m2": "moo",
"m3": True,
},
included_in_final_ensemble=True,
),
subnetwork.MaterializedReport(
iteration_number=1,
name="foo2",
hparams={
"p1": 22,
"p2": "hoo",
"p3": True,
},
attributes={
"a1": 22,
"a2": "aoo",
"a3": True,
},
metrics={
"m1": 22,
"m2": "moo",
"m3": True,
},
included_in_final_ensemble=False,
),
],
[
subnetwork.MaterializedReport(
iteration_number=2,
name="foo1",
hparams={
"p1": 31,
"p2": "hoo",
"p3": True,
},
attributes={
"a1": 31,
"a2": "aoo",
"a3": True,
},
metrics={
"m1": 31,
"m2": "moo",
"m3": True,
},
included_in_final_ensemble=False,
),
subnetwork.MaterializedReport(
iteration_number=2,
name="foo2",
hparams={
"p1": 32,
"p2": "hoo",
"p3": True,
},
attributes={
"a1": 32,
"a2": "aoo",
"a3": True,
},
metrics={
"m1": 32,
"m2": "moo",
"m3": True,
},
included_in_final_ensemble=True,
),
],
]
report_accessor = _ReportAccessor(self.get_temp_dir())
report_accessor.write_iteration_report(0, materialized_reports[0])
report_accessor.write_iteration_report(1, materialized_reports[1])
report_accessor.write_iteration_report(2, materialized_reports[2])
actual_reports = list(report_accessor.read_iteration_reports())
self.assertEqual(materialized_reports, actual_reports)
def test_write_iteration_report_encoding(self):
"""Tests GitHub issue #4."""
report_accessor = _ReportAccessor(self.get_temp_dir())
bytes_value = b"\n\x83\x01\n;adanet/iteration_2/ensemble_2_layer_dnn/"
materialized_reports = [
subnetwork.MaterializedReport(
iteration_number=0,
name="foo",
hparams={
"p2": bytes_value,
},
attributes={
"a2": bytes_value,
},
metrics={
"m2": bytes_value,
},
included_in_final_ensemble=True,
),
]
report_accessor.write_iteration_report(
iteration_number=0,
materialized_reports=materialized_reports,
)
actual_iteration_reports = list(report_accessor.read_iteration_reports())
self.assertEqual(1, len(actual_iteration_reports))
if __name__ == "__main__":
tf.test.main()
| 29.919831 | 77 | 0.453674 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from adanet.core import subnetwork
from adanet.core.report_accessor import _ReportAccessor
import tensorflow as tf
class ReportAccessorTest(tf.test.TestCase):
def test_read_from_empty_file(self):
report_accessor = _ReportAccessor(self.get_temp_dir())
self.assertEqual([], list(report_accessor.read_iteration_reports()))
def test_add_to_empty_file(self):
report_accessor = _ReportAccessor(self.get_temp_dir())
materialized_reports = [
subnetwork.MaterializedReport(
iteration_number=0,
name="foo",
hparams={
"p1": 1,
"p2": "hoo",
"p3": True,
},
attributes={
"a1": 1,
"a2": "aoo",
"a3": True,
},
metrics={
"m1": 1,
"m2": "moo",
"m3": True,
},
included_in_final_ensemble=True,
),
]
report_accessor.write_iteration_report(
iteration_number=0,
materialized_reports=materialized_reports,
)
actual_iteration_reports = list(report_accessor.read_iteration_reports())
self.assertEqual(1, len(actual_iteration_reports))
self.assertEqual(materialized_reports, actual_iteration_reports[0])
def test_add_to_existing_file(self):
materialized_reports = [
[
subnetwork.MaterializedReport(
iteration_number=0,
name="foo1",
hparams={
"p1": 11,
"p2": "hoo",
"p3": True,
},
attributes={
"a1": 11,
"a2": "aoo",
"a3": True,
},
metrics={
"m1": 11,
"m2": "moo",
"m3": True,
},
included_in_final_ensemble=False,
),
subnetwork.MaterializedReport(
iteration_number=0,
name="foo2",
hparams={
"p1": 12,
"p2": "hoo",
"p3": True,
},
attributes={
"a1": 12,
"a2": "aoo",
"a3": True,
},
metrics={
"m1": 12,
"m2": "moo",
"m3": True,
},
included_in_final_ensemble=True,
),
],
[
subnetwork.MaterializedReport(
iteration_number=1,
name="foo1",
hparams={
"p1": 21,
"p2": "hoo",
"p3": True,
},
attributes={
"a1": 21,
"a2": "aoo",
"a3": True,
},
metrics={
"m1": 21,
"m2": "moo",
"m3": True,
},
included_in_final_ensemble=True,
),
subnetwork.MaterializedReport(
iteration_number=1,
name="foo2",
hparams={
"p1": 22,
"p2": "hoo",
"p3": True,
},
attributes={
"a1": 22,
"a2": "aoo",
"a3": True,
},
metrics={
"m1": 22,
"m2": "moo",
"m3": True,
},
included_in_final_ensemble=False,
),
],
[
subnetwork.MaterializedReport(
iteration_number=2,
name="foo1",
hparams={
"p1": 31,
"p2": "hoo",
"p3": True,
},
attributes={
"a1": 31,
"a2": "aoo",
"a3": True,
},
metrics={
"m1": 31,
"m2": "moo",
"m3": True,
},
included_in_final_ensemble=False,
),
subnetwork.MaterializedReport(
iteration_number=2,
name="foo2",
hparams={
"p1": 32,
"p2": "hoo",
"p3": True,
},
attributes={
"a1": 32,
"a2": "aoo",
"a3": True,
},
metrics={
"m1": 32,
"m2": "moo",
"m3": True,
},
included_in_final_ensemble=True,
),
],
]
report_accessor = _ReportAccessor(self.get_temp_dir())
report_accessor.write_iteration_report(0, materialized_reports[0])
report_accessor.write_iteration_report(1, materialized_reports[1])
report_accessor.write_iteration_report(2, materialized_reports[2])
actual_reports = list(report_accessor.read_iteration_reports())
self.assertEqual(materialized_reports, actual_reports)
def test_write_iteration_report_encoding(self):
report_accessor = _ReportAccessor(self.get_temp_dir())
bytes_value = b"\n\x83\x01\n;adanet/iteration_2/ensemble_2_layer_dnn/"
materialized_reports = [
subnetwork.MaterializedReport(
iteration_number=0,
name="foo",
hparams={
"p2": bytes_value,
},
attributes={
"a2": bytes_value,
},
metrics={
"m2": bytes_value,
},
included_in_final_ensemble=True,
),
]
report_accessor.write_iteration_report(
iteration_number=0,
materialized_reports=materialized_reports,
)
actual_iteration_reports = list(report_accessor.read_iteration_reports())
self.assertEqual(1, len(actual_iteration_reports))
if __name__ == "__main__":
tf.test.main()
| true | true |
1c3145f61b2b8516ef38e743c7ccb56f655d667a | 2,816 | py | Python | video_statistics_data.py | MrSipahi/Youtube_Statistics_Data | 33a08fd334a9c00139727fe6cfa6b0bc95604eba | [
"MIT"
] | null | null | null | video_statistics_data.py | MrSipahi/Youtube_Statistics_Data | 33a08fd334a9c00139727fe6cfa6b0bc95604eba | [
"MIT"
] | null | null | null | video_statistics_data.py | MrSipahi/Youtube_Statistics_Data | 33a08fd334a9c00139727fe6cfa6b0bc95604eba | [
"MIT"
] | null | null | null |
import urllib
import urllib3
import requests
import json
from datetime import datetime
import locale
import pymysql as MySQLdb
db = MySQLdb.connect("ip","user","password","db_names" )
cursor = db.cursor()
#
keys = ["API_KEYS"]
key_numara = 0
API_KEY = keys[key_numara]
query="SELECT * FROM kanal"
cursor.execute(query)
kanallar = cursor.fetchall()
kanal_list=[]
for i in kanallar:
kanal_list.append(i[0])
locale.setlocale(locale.LC_ALL, "")
moment = datetime.now()
toplam=1
def veri_cek(metadata,toplam,API_KEY):
# Here the videoID is printed
try:
SpecificVideoID = metadata
SpecificVideoUrl = 'https://www.googleapis.com/youtube/v3/videos?part=snippet%2CcontentDetails%2Cstatistics&id='+SpecificVideoID+'&key='+API_KEY
response = urllib.request.urlopen(SpecificVideoUrl) #makes the call to a specific YouTube
except Exception as e:
print(e)
return 1
videos = json.load(response)
for video in videos['items']:
if video['kind'] == 'youtube#video':
try:
ad = video["snippet"]["title"]
ad = ad.replace("'","-")
goruntulenme= video['statistics']['viewCount']
begenme = video["statistics"]["likeCount"]
begenmeme=video["statistics"]["dislikeCount"]
yorum = video['statistics']['commentCount']
a = video['snippet']['publishedAt']
b = a.split("T")
c = b[1].split(".")
d = c[0].split("Z")
yuklenme_tarihi = b[0]
yuklenme_saati = d[0]
tarih = moment.strftime("%Y-%m-%d")
query = f"insert into data(videoID,kanal_ID,ad,goruntulenme,begenme,begenmeme,yorum,yuklenme_tarihi,yuklenme_saati,tarih) values ('{metadata}','{i}','{ad}',{goruntulenme},{begenme},{begenmeme},{yorum},'{yuklenme_tarihi}','{yuklenme_saati}','{tarih}')"
cursor.execute(query)
db.commit()
except Exception as a:
print(a)
continue
print(f"Toplam= {toplam}")
toplam = toplam + 1
for i in kanal_list:
videoMetadata=[]
query=f"SELECT DISTINCT videoID FROM videoliste where kanal_ID= '{i}' "
cursor.execute(query)
for j in cursor.fetchall():
videoid = videoMetadata.append(j[0])
a = veri_cek(j[0],toplam,keys[key_numara])
if a==1:
key_numara += 1
if key_numara == 11:
key_numara = 0
API_KEY = keys[key_numara]
veri_cek(j[0],toplam,keys[key_numara])
cursor.close()
db.commit()
db.close()
| 30.945055 | 268 | 0.559304 |
import urllib
import urllib3
import requests
import json
from datetime import datetime
import locale
import pymysql as MySQLdb
db = MySQLdb.connect("ip","user","password","db_names" )
cursor = db.cursor()
keys = ["API_KEYS"]
key_numara = 0
API_KEY = keys[key_numara]
query="SELECT * FROM kanal"
cursor.execute(query)
kanallar = cursor.fetchall()
kanal_list=[]
for i in kanallar:
kanal_list.append(i[0])
locale.setlocale(locale.LC_ALL, "")
moment = datetime.now()
toplam=1
def veri_cek(metadata,toplam,API_KEY):
try:
SpecificVideoID = metadata
SpecificVideoUrl = 'https://www.googleapis.com/youtube/v3/videos?part=snippet%2CcontentDetails%2Cstatistics&id='+SpecificVideoID+'&key='+API_KEY
response = urllib.request.urlopen(SpecificVideoUrl)
except Exception as e:
print(e)
return 1
videos = json.load(response)
for video in videos['items']:
if video['kind'] == 'youtube#video':
try:
ad = video["snippet"]["title"]
ad = ad.replace("'","-")
goruntulenme= video['statistics']['viewCount']
begenme = video["statistics"]["likeCount"]
begenmeme=video["statistics"]["dislikeCount"]
yorum = video['statistics']['commentCount']
a = video['snippet']['publishedAt']
b = a.split("T")
c = b[1].split(".")
d = c[0].split("Z")
yuklenme_tarihi = b[0]
yuklenme_saati = d[0]
tarih = moment.strftime("%Y-%m-%d")
query = f"insert into data(videoID,kanal_ID,ad,goruntulenme,begenme,begenmeme,yorum,yuklenme_tarihi,yuklenme_saati,tarih) values ('{metadata}','{i}','{ad}',{goruntulenme},{begenme},{begenmeme},{yorum},'{yuklenme_tarihi}','{yuklenme_saati}','{tarih}')"
cursor.execute(query)
db.commit()
except Exception as a:
print(a)
continue
print(f"Toplam= {toplam}")
toplam = toplam + 1
for i in kanal_list:
videoMetadata=[]
query=f"SELECT DISTINCT videoID FROM videoliste where kanal_ID= '{i}' "
cursor.execute(query)
for j in cursor.fetchall():
videoid = videoMetadata.append(j[0])
a = veri_cek(j[0],toplam,keys[key_numara])
if a==1:
key_numara += 1
if key_numara == 11:
key_numara = 0
API_KEY = keys[key_numara]
veri_cek(j[0],toplam,keys[key_numara])
cursor.close()
db.commit()
db.close()
| true | true |
1c3146ee467192cfaa82f4f675c16f5fe535c5b7 | 9,364 | py | Python | awx/main/utils/formatters.py | dvaerum/awx | eeab4b90a55864c9c80882e25780a914398b9e51 | [
"Apache-2.0"
] | 1 | 2020-04-10T21:29:52.000Z | 2020-04-10T21:29:52.000Z | awx/main/utils/formatters.py | dvaerum/awx | eeab4b90a55864c9c80882e25780a914398b9e51 | [
"Apache-2.0"
] | null | null | null | awx/main/utils/formatters.py | dvaerum/awx | eeab4b90a55864c9c80882e25780a914398b9e51 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2017 Ansible Tower by Red Hat
# All Rights Reserved.
from copy import copy
import json
import logging
import traceback
import socket
from datetime import datetime
from dateutil.tz import tzutc
from django.core.serializers.json import DjangoJSONEncoder
from django.conf import settings
class TimeFormatter(logging.Formatter):
'''
Custom log formatter used for inventory imports
'''
def format(self, record):
record.relativeSeconds = record.relativeCreated / 1000.0
return logging.Formatter.format(self, record)
class LogstashFormatterBase(logging.Formatter):
"""Base class taken from python-logstash=0.4.6
modified here since that version
For compliance purposes, this was the license at the point of divergence:
The MIT License (MIT)
Copyright (c) 2013, Volodymyr Klochan
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
def __init__(self, message_type='Logstash', fqdn=False):
self.message_type = message_type
if fqdn:
self.host = socket.getfqdn()
else:
self.host = socket.gethostname()
def get_extra_fields(self, record):
# The list contains all the attributes listed in
# http://docs.python.org/library/logging.html#logrecord-attributes
skip_list = (
'args', 'asctime', 'created', 'exc_info', 'exc_text', 'filename',
'funcName', 'id', 'levelname', 'levelno', 'lineno', 'module',
'msecs', 'msecs', 'message', 'msg', 'name', 'pathname', 'process',
'processName', 'relativeCreated', 'thread', 'threadName', 'extra')
easy_types = (str, bool, dict, float, int, list, type(None))
fields = {}
for key, value in record.__dict__.items():
if key not in skip_list:
if isinstance(value, easy_types):
fields[key] = value
else:
fields[key] = repr(value)
return fields
def get_debug_fields(self, record):
return {
'stack_trace': self.format_exception(record.exc_info),
'lineno': record.lineno,
'process': record.process,
'thread_name': record.threadName,
'funcName': record.funcName,
'processName': record.processName,
}
@classmethod
def format_exception(cls, exc_info):
return ''.join(traceback.format_exception(*exc_info)) if exc_info else ''
@classmethod
def serialize(cls, message):
return bytes(json.dumps(message, cls=DjangoJSONEncoder), 'utf-8')
class LogstashFormatter(LogstashFormatterBase):
def __init__(self, *args, **kwargs):
self.cluster_host_id = settings.CLUSTER_HOST_ID
self.tower_uuid = None
uuid = (
getattr(settings, 'LOG_AGGREGATOR_TOWER_UUID', None) or
getattr(settings, 'INSTALL_UUID', None)
)
if uuid:
self.tower_uuid = uuid
super(LogstashFormatter, self).__init__(*args, **kwargs)
def reformat_data_for_log(self, raw_data, kind=None):
'''
Process dictionaries from various contexts (job events, activity stream
changes, etc.) to give meaningful information
Output a dictionary which will be passed in logstash or syslog format
to the logging receiver
'''
if kind == 'activity_stream':
try:
raw_data['changes'] = json.loads(raw_data.get('changes', '{}'))
except Exception:
pass # best effort here, if it's not valid JSON, then meh
return raw_data
elif kind == 'system_tracking':
data = copy(raw_data['ansible_facts'])
else:
data = copy(raw_data)
if isinstance(data, str):
data = json.loads(data)
data_for_log = {}
if kind == 'job_events':
job_event = raw_data['python_objects']['job_event']
for field_object in job_event._meta.fields:
if not field_object.__class__ or not field_object.__class__.__name__:
field_class_name = ''
else:
field_class_name = field_object.__class__.__name__
if field_class_name in ['ManyToOneRel', 'ManyToManyField']:
continue
fd = field_object.name
key = fd
if field_class_name == 'ForeignKey':
fd = '{}_id'.format(field_object.name)
try:
data_for_log[key] = getattr(job_event, fd)
except Exception as e:
data_for_log[key] = 'Exception `{}` producing field'.format(e)
data_for_log['event_display'] = job_event.get_event_display2()
if hasattr(job_event, 'workflow_job_id'):
data_for_log['workflow_job_id'] = job_event.workflow_job_id
elif kind == 'system_tracking':
data.pop('ansible_python_version', None)
if 'ansible_python' in data:
data['ansible_python'].pop('version_info', None)
data_for_log['ansible_facts'] = data
data_for_log['ansible_facts_modified'] = raw_data['ansible_facts_modified']
data_for_log['inventory_id'] = raw_data['inventory_id']
data_for_log['host_name'] = raw_data['host_name']
data_for_log['job_id'] = raw_data['job_id']
elif kind == 'performance':
def convert_to_type(t, val):
if t is float:
val = val[:-1] if val.endswith('s') else val
try:
return float(val)
except ValueError:
return val
elif t is int:
try:
return int(val)
except ValueError:
return val
elif t is str:
return val
request = raw_data['python_objects']['request']
response = raw_data['python_objects']['response']
# Note: All of the below keys may not be in the response "dict"
# For example, X-API-Query-Time and X-API-Query-Count will only
# exist if SQL_DEBUG is turned on in settings.
headers = [
(float, 'X-API-Time'), # may end with an 's' "0.33s"
(float, 'X-API-Total-Time'),
(int, 'X-API-Query-Count'),
(float, 'X-API-Query-Time'), # may also end with an 's'
(str, 'X-API-Node'),
]
data_for_log['x_api'] = {k: convert_to_type(t, response[k]) for (t, k) in headers if k in response}
data_for_log['request'] = {
'method': request.method,
'path': request.path,
'path_info': request.path_info,
'query_string': request.META['QUERY_STRING'],
}
if hasattr(request, 'data'):
data_for_log['request']['data'] = request.data
return data_for_log
def get_extra_fields(self, record):
fields = super(LogstashFormatter, self).get_extra_fields(record)
if record.name.startswith('awx.analytics'):
log_kind = record.name[len('awx.analytics.'):]
fields = self.reformat_data_for_log(fields, kind=log_kind)
# General AWX metadata
fields['cluster_host_id'] = self.cluster_host_id
fields['tower_uuid'] = self.tower_uuid
return fields
def format(self, record):
stamp = datetime.utcfromtimestamp(record.created)
stamp = stamp.replace(tzinfo=tzutc())
message = {
# Field not included, but exist in related logs
# 'path': record.pathname
'@timestamp': stamp,
'message': record.getMessage(),
'host': self.host,
# Extra Fields
'level': record.levelname,
'logger_name': record.name,
}
# Add extra fields
message.update(self.get_extra_fields(record))
# If exception, add debug info
if record.exc_info:
message.update(self.get_debug_fields(record))
return self.serialize(message)
| 37.758065 | 111 | 0.595686 |
from copy import copy
import json
import logging
import traceback
import socket
from datetime import datetime
from dateutil.tz import tzutc
from django.core.serializers.json import DjangoJSONEncoder
from django.conf import settings
class TimeFormatter(logging.Formatter):
def format(self, record):
record.relativeSeconds = record.relativeCreated / 1000.0
return logging.Formatter.format(self, record)
class LogstashFormatterBase(logging.Formatter):
def __init__(self, message_type='Logstash', fqdn=False):
self.message_type = message_type
if fqdn:
self.host = socket.getfqdn()
else:
self.host = socket.gethostname()
def get_extra_fields(self, record):
(
'args', 'asctime', 'created', 'exc_info', 'exc_text', 'filename',
'funcName', 'id', 'levelname', 'levelno', 'lineno', 'module',
'msecs', 'msecs', 'message', 'msg', 'name', 'pathname', 'process',
'processName', 'relativeCreated', 'thread', 'threadName', 'extra')
easy_types = (str, bool, dict, float, int, list, type(None))
fields = {}
for key, value in record.__dict__.items():
if key not in skip_list:
if isinstance(value, easy_types):
fields[key] = value
else:
fields[key] = repr(value)
return fields
def get_debug_fields(self, record):
return {
'stack_trace': self.format_exception(record.exc_info),
'lineno': record.lineno,
'process': record.process,
'thread_name': record.threadName,
'funcName': record.funcName,
'processName': record.processName,
}
@classmethod
def format_exception(cls, exc_info):
return ''.join(traceback.format_exception(*exc_info)) if exc_info else ''
@classmethod
def serialize(cls, message):
return bytes(json.dumps(message, cls=DjangoJSONEncoder), 'utf-8')
class LogstashFormatter(LogstashFormatterBase):
def __init__(self, *args, **kwargs):
self.cluster_host_id = settings.CLUSTER_HOST_ID
self.tower_uuid = None
uuid = (
getattr(settings, 'LOG_AGGREGATOR_TOWER_UUID', None) or
getattr(settings, 'INSTALL_UUID', None)
)
if uuid:
self.tower_uuid = uuid
super(LogstashFormatter, self).__init__(*args, **kwargs)
def reformat_data_for_log(self, raw_data, kind=None):
if kind == 'activity_stream':
try:
raw_data['changes'] = json.loads(raw_data.get('changes', '{}'))
except Exception:
pass
return raw_data
elif kind == 'system_tracking':
data = copy(raw_data['ansible_facts'])
else:
data = copy(raw_data)
if isinstance(data, str):
data = json.loads(data)
data_for_log = {}
if kind == 'job_events':
job_event = raw_data['python_objects']['job_event']
for field_object in job_event._meta.fields:
if not field_object.__class__ or not field_object.__class__.__name__:
field_class_name = ''
else:
field_class_name = field_object.__class__.__name__
if field_class_name in ['ManyToOneRel', 'ManyToManyField']:
continue
fd = field_object.name
key = fd
if field_class_name == 'ForeignKey':
fd = '{}_id'.format(field_object.name)
try:
data_for_log[key] = getattr(job_event, fd)
except Exception as e:
data_for_log[key] = 'Exception `{}` producing field'.format(e)
data_for_log['event_display'] = job_event.get_event_display2()
if hasattr(job_event, 'workflow_job_id'):
data_for_log['workflow_job_id'] = job_event.workflow_job_id
elif kind == 'system_tracking':
data.pop('ansible_python_version', None)
if 'ansible_python' in data:
data['ansible_python'].pop('version_info', None)
data_for_log['ansible_facts'] = data
data_for_log['ansible_facts_modified'] = raw_data['ansible_facts_modified']
data_for_log['inventory_id'] = raw_data['inventory_id']
data_for_log['host_name'] = raw_data['host_name']
data_for_log['job_id'] = raw_data['job_id']
elif kind == 'performance':
def convert_to_type(t, val):
if t is float:
val = val[:-1] if val.endswith('s') else val
try:
return float(val)
except ValueError:
return val
elif t is int:
try:
return int(val)
except ValueError:
return val
elif t is str:
return val
request = raw_data['python_objects']['request']
response = raw_data['python_objects']['response']
# Note: All of the below keys may not be in the response "dict"
# For example, X-API-Query-Time and X-API-Query-Count will only
# exist if SQL_DEBUG is turned on in settings.
headers = [
(float, 'X-API-Time'), # may end with an 's' "0.33s"
(float, 'X-API-Total-Time'),
(int, 'X-API-Query-Count'),
(float, 'X-API-Query-Time'), # may also end with an 's'
(str, 'X-API-Node'),
]
data_for_log['x_api'] = {k: convert_to_type(t, response[k]) for (t, k) in headers if k in response}
data_for_log['request'] = {
'method': request.method,
'path': request.path,
'path_info': request.path_info,
'query_string': request.META['QUERY_STRING'],
}
if hasattr(request, 'data'):
data_for_log['request']['data'] = request.data
return data_for_log
def get_extra_fields(self, record):
fields = super(LogstashFormatter, self).get_extra_fields(record)
if record.name.startswith('awx.analytics'):
log_kind = record.name[len('awx.analytics.'):]
fields = self.reformat_data_for_log(fields, kind=log_kind)
# General AWX metadata
fields['cluster_host_id'] = self.cluster_host_id
fields['tower_uuid'] = self.tower_uuid
return fields
def format(self, record):
stamp = datetime.utcfromtimestamp(record.created)
stamp = stamp.replace(tzinfo=tzutc())
message = {
# Field not included, but exist in related logs
# 'path': record.pathname
'@timestamp': stamp,
'message': record.getMessage(),
'host': self.host,
# Extra Fields
'level': record.levelname,
'logger_name': record.name,
}
# Add extra fields
message.update(self.get_extra_fields(record))
# If exception, add debug info
if record.exc_info:
message.update(self.get_debug_fields(record))
return self.serialize(message)
| true | true |
1c3147546d1847c7fd6da0c2a520a28952b357fb | 401 | py | Python | open-codegen/opengen/functions/norm2.py | jgillis/optimization-engine | 2952af47891204d3cd080a8e7f71e616ac022e52 | [
"Apache-2.0",
"MIT"
] | null | null | null | open-codegen/opengen/functions/norm2.py | jgillis/optimization-engine | 2952af47891204d3cd080a8e7f71e616ac022e52 | [
"Apache-2.0",
"MIT"
] | null | null | null | open-codegen/opengen/functions/norm2.py | jgillis/optimization-engine | 2952af47891204d3cd080a8e7f71e616ac022e52 | [
"Apache-2.0",
"MIT"
] | null | null | null | import casadi.casadi as cs
import numpy as np
from .is_numeric import *
from .is_symbolic import *
def norm2(u):
if (isinstance(u, list) and all(is_numeric(x) for x in u))\
or isinstance(u, np.ndarray):
# if `u` is a numeric vector
return np.linalg.norm(u)
elif is_symbolic(u):
return cs.norm_2(u)
else:
raise Exception("Illegal argument") | 26.733333 | 63 | 0.628429 | import casadi.casadi as cs
import numpy as np
from .is_numeric import *
from .is_symbolic import *
def norm2(u):
if (isinstance(u, list) and all(is_numeric(x) for x in u))\
or isinstance(u, np.ndarray):
return np.linalg.norm(u)
elif is_symbolic(u):
return cs.norm_2(u)
else:
raise Exception("Illegal argument") | true | true |
1c31489dab3dea9499e3bba760b2a7bdeb0f6ada | 1,389 | py | Python | ssseg/cfgs/dmnet/cfgs_ade20k_resnet50os16.py | zhizhangxian/sssegmentation | 90613f6e0abf4cdd729cf382ab2a915e106d8649 | [
"MIT"
] | 2 | 2021-10-31T21:52:30.000Z | 2021-12-21T12:35:37.000Z | ssseg/cfgs/dmnet/cfgs_ade20k_resnet50os16.py | zhizhangxian/sssegmentation | 90613f6e0abf4cdd729cf382ab2a915e106d8649 | [
"MIT"
] | null | null | null | ssseg/cfgs/dmnet/cfgs_ade20k_resnet50os16.py | zhizhangxian/sssegmentation | 90613f6e0abf4cdd729cf382ab2a915e106d8649 | [
"MIT"
] | null | null | null | '''define the config file for ade20k and resnet50os16'''
import os
from .base_cfg import *
# modify dataset config
DATASET_CFG = DATASET_CFG.copy()
DATASET_CFG.update({
'type': 'ade20k',
'rootdir': os.path.join(os.getcwd(), 'ADE20k'),
})
# modify dataloader config
DATALOADER_CFG = DATALOADER_CFG.copy()
# modify optimizer config
OPTIMIZER_CFG = OPTIMIZER_CFG.copy()
OPTIMIZER_CFG.update(
{
'max_epochs': 130
}
)
# modify losses config
LOSSES_CFG = LOSSES_CFG.copy()
# modify segmentor config
SEGMENTOR_CFG = SEGMENTOR_CFG.copy()
SEGMENTOR_CFG.update(
{
'num_classes': 150,
'backbone': {
'type': 'resnet50',
'series': 'resnet',
'pretrained': True,
'outstride': 16,
'use_stem': True,
'selected_indices': (2, 3),
},
}
)
# modify inference config
INFERENCE_CFG = INFERENCE_CFG.copy()
# modify common config
COMMON_CFG = COMMON_CFG.copy()
COMMON_CFG['train'].update(
{
'backupdir': 'dmnet_resnet50os16_ade20k_train',
'logfilepath': 'dmnet_resnet50os16_ade20k_train/train.log',
}
)
COMMON_CFG['test'].update(
{
'backupdir': 'dmnet_resnet50os16_ade20k_test',
'logfilepath': 'dmnet_resnet50os16_ade20k_test/test.log',
'resultsavepath': 'dmnet_resnet50os16_ade20k_test/dmnet_resnet50os16_ade20k_results.pkl'
}
) | 25.722222 | 96 | 0.657307 | import os
from .base_cfg import *
DATASET_CFG = DATASET_CFG.copy()
DATASET_CFG.update({
'type': 'ade20k',
'rootdir': os.path.join(os.getcwd(), 'ADE20k'),
})
DATALOADER_CFG = DATALOADER_CFG.copy()
OPTIMIZER_CFG = OPTIMIZER_CFG.copy()
OPTIMIZER_CFG.update(
{
'max_epochs': 130
}
)
LOSSES_CFG = LOSSES_CFG.copy()
SEGMENTOR_CFG = SEGMENTOR_CFG.copy()
SEGMENTOR_CFG.update(
{
'num_classes': 150,
'backbone': {
'type': 'resnet50',
'series': 'resnet',
'pretrained': True,
'outstride': 16,
'use_stem': True,
'selected_indices': (2, 3),
},
}
)
INFERENCE_CFG = INFERENCE_CFG.copy()
COMMON_CFG = COMMON_CFG.copy()
COMMON_CFG['train'].update(
{
'backupdir': 'dmnet_resnet50os16_ade20k_train',
'logfilepath': 'dmnet_resnet50os16_ade20k_train/train.log',
}
)
COMMON_CFG['test'].update(
{
'backupdir': 'dmnet_resnet50os16_ade20k_test',
'logfilepath': 'dmnet_resnet50os16_ade20k_test/test.log',
'resultsavepath': 'dmnet_resnet50os16_ade20k_test/dmnet_resnet50os16_ade20k_results.pkl'
}
) | true | true |
1c3149e38f7b6cc0039c07d18c3679a41298157e | 34,017 | py | Python | python/interpret_community/mimic/mimic_explainer.py | bethz/interpret-community | 3932bfe93aedbc2a6409de1e169e0576cedc8b0d | [
"MIT"
] | 2 | 2020-10-14T01:02:37.000Z | 2022-02-17T01:47:49.000Z | python/interpret_community/mimic/mimic_explainer.py | bethz/interpret-community | 3932bfe93aedbc2a6409de1e169e0576cedc8b0d | [
"MIT"
] | 12 | 2021-03-10T01:29:02.000Z | 2022-02-26T21:11:42.000Z | python/interpret_community/mimic/mimic_explainer.py | bethz/interpret-community | 3932bfe93aedbc2a6409de1e169e0576cedc8b0d | [
"MIT"
] | null | null | null | # ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
"""Defines the Mimic Explainer for computing explanations on black box models or functions.
The mimic explainer trains an explainable model to reproduce the output of the given black box model.
The explainable model is called a surrogate model and the black box model is called a teacher model.
Once trained to reproduce the output of the teacher model, the surrogate model's explanation can
be used to explain the teacher model.
"""
import numpy as np
from ..common.explanation_utils import _order_imp
from ..common.model_wrapper import _wrap_model
from .._internal.raw_explain.raw_explain_utils import get_datamapper_and_transformed_data, \
transform_with_datamapper
from ..common.blackbox_explainer import BlackBoxExplainer
from .model_distill import _model_distill
from .models import LGBMExplainableModel
from ..explanation.explanation import _create_local_explanation, _create_global_explanation, \
_aggregate_global_from_local_explanation, _aggregate_streamed_local_explanations, \
_create_raw_feats_global_explanation, _create_raw_feats_local_explanation, \
_get_raw_explainer_create_explanation_kwargs
from ..dataset.decorator import tabular_decorator, init_tabular_decorator
from ..dataset.dataset_wrapper import DatasetWrapper
from ..common.constants import ExplainParams, ExplainType, ModelTask, \
ShapValuesOutput, MimicSerializationConstants, ExplainableModelType, \
LightGBMParams, Defaults, Extension
import logging
import json
import warnings
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'Starting from version 2.2.1', UserWarning)
from shap.common import DenseData
class MimicExplainer(BlackBoxExplainer):
available_explanations = [Extension.GLOBAL, Extension.LOCAL]
explainer_type = Extension.BLACKBOX
"""The Mimic Explainer for explaining black box models or functions.
:param model: The black box model or function (if is_function is True) to be explained. Also known
as the teacher model.
:type model: model that implements sklearn.predict or sklearn.predict_proba or function that accepts a 2d ndarray
:param initialization_examples: A matrix of feature vector examples (# examples x # features) for
initializing the explainer.
:type initialization_examples: numpy.array or pandas.DataFrame or iml.datatypes.DenseData or
scipy.sparse.csr_matrix
:param explainable_model: The uninitialized surrogate model used to explain the black box model.
Also known as the student model.
:type explainable_model: interpret_community.mimic.models.BaseExplainableModel
:param explainable_model_args: An optional map of arguments to pass to the explainable model
for initialization.
:type explainable_model_args: dict
:param is_function: Default set to false, set to True if passing function instead of model.
:type is_function: bool
:param augment_data: If true, oversamples the initialization examples to improve surrogate
model accuracy to fit teacher model. Useful for high-dimensional data where
the number of rows is less than the number of columns.
:type augment_data: bool
:param max_num_of_augmentations: max number of times we can increase the input data size.
:type max_num_of_augmentations: int
:param explain_subset: List of feature indices. If specified, only selects a subset of the
features in the evaluation dataset for explanation. Note for mimic explainer this will
not affect the execution time of getting the global explanation. This argument is not supported when
transformations are set.
:type explain_subset: list[int]
:param features: A list of feature names.
:type features: list[str]
:param classes: Class names as a list of strings. The order of the class names should match
that of the model output. Only required if explaining classifier.
:type classes: list[str]
:param transformations: sklearn.compose.ColumnTransformer or a list of tuples describing the column name and
transformer. When transformations are provided, explanations are of the features before the transformation.
The format for list of transformations is same as the one here:
https://github.com/scikit-learn-contrib/sklearn-pandas.
If the user is using a transformation that is not in the list of sklearn.preprocessing transformations that
we support then we cannot take a list of more than one column as input for the transformation.
A user can use the following sklearn.preprocessing transformations with a list of columns since these are
already one to many or one to one: Binarizer, KBinsDiscretizer, KernelCenterer, LabelEncoder, MaxAbsScaler,
MinMaxScaler, Normalizer, OneHotEncoder, OrdinalEncoder, PowerTransformer, QuantileTransformer, RobustScaler,
StandardScaler.
Examples for transformations that work::
[
(["col1", "col2"], sklearn_one_hot_encoder),
(["col3"], None) #col3 passes as is
]
[
(["col1"], my_own_transformer),
(["col2"], my_own_transformer),
]
Example of transformations that would raise an error since it cannot be interpreted as one to many::
[
(["col1", "col2"], my_own_transformer)
]
This would not work since it is hard to make out whether my_own_transformer gives a many to many or one to
many mapping when taking a sequence of columns.
:type transformations: sklearn.compose.ColumnTransformer or list[tuple]
:param shap_values_output: The shap values output from the explainer. Only applies to
tree-based models that are in terms of raw feature values instead of probabilities.
Can be default, probability or teacher_probability. If probability or teacher_probability
are specified, we approximate the feature importance values as probabilities instead
of using the default values. If teacher probability is specified, we use the probabilities
from the teacher model as opposed to the surrogate model.
:type shap_values_output: interpret_community.common.constants.ShapValuesOutput
:param categorical_features: Categorical feature names or indexes.
If names are passed, they will be converted into indexes first.
Note if pandas indexes are categorical, you can either pass the name of the index or the index
as if the pandas index was inserted at the end of the input dataframe.
:type categorical_features: Union[list[str], list[int]]
:param allow_all_transformations: Allow many to many and many to one transformations
:type allow_all_transformations: bool
:param model_task: Optional parameter to specify whether the model is a classification or regression model.
In most cases, the type of the model can be inferred based on the shape of the output, where a classifier
has a predict_proba method and outputs a 2 dimensional array, while a regressor has a predict method and
outputs a 1 dimensional array.
:type model_task: str
:param reset_index: Uses the pandas DataFrame index column as part of the features when training
the surrogate model.
:type reset_index: bool
"""
@init_tabular_decorator
def __init__(self, model, initialization_examples, explainable_model, explainable_model_args=None,
is_function=False, augment_data=True, max_num_of_augmentations=10, explain_subset=None,
features=None, classes=None, transformations=None, allow_all_transformations=False,
shap_values_output=ShapValuesOutput.DEFAULT, categorical_features=None,
model_task=ModelTask.Unknown, reset_index=False, **kwargs):
"""Initialize the MimicExplainer.
:param model: The black box model or function (if is_function is True) to be explained. Also known
as the teacher model.
:type model: model that implements sklearn.predict or sklearn.predict_proba or function that accepts a 2d
ndarray
:param initialization_examples: A matrix of feature vector examples (# examples x # features) for
initializing the explainer.
:type initialization_examples: numpy.array or pandas.DataFrame or iml.datatypes.DenseData or
scipy.sparse.csr_matrix
:param explainable_model: The uninitialized surrogate model used to explain the black box model.
Also known as the student model.
:type explainable_model: BaseExplainableModel
:param explainable_model_args: An optional map of arguments to pass to the explainable model
for initialization.
:type explainable_model_args: dict
:param is_function: Default set to false, set to True if passing function instead of model.
:type is_function: bool
:param augment_data: If true, oversamples the initialization examples to improve surrogate
model accuracy to fit teacher model. Useful for high-dimensional data where
the number of rows is less than the number of columns.
:type augment_data: bool
:param max_num_of_augmentations: max number of times we can increase the input data size.
:type max_num_of_augmentations: int
:param explain_subset: List of feature indices. If specified, only selects a subset of the
features in the evaluation dataset for explanation. Note for mimic explainer this will
not affect the execution time of getting the global explanation. This argument is not supported when
transformations are set.
:type explain_subset: list[int]
:param features: A list of feature names.
:type features: list[str]
:param classes: Class names as a list of strings. The order of the class names should match
that of the model output. Only required if explaining classifier.
:type classes: list[str]
:param transformations: sklearn.compose.ColumnTransformer object or a list of tuples describing the column name
and transformer. When transformations are provided, explanations are of the features before the transformation.
The format for the list of transformations is same as the one here:
https://github.com/scikit-learn-contrib/sklearn-pandas.
If the user is using a transformation that is not in the list of sklearn.preprocessing transformations that
we support then we cannot take a list of more than one column as input for the transformation.
A user can use the following sklearn.preprocessing transformations with a list of columns since these are
already one to many or one to one: Binarizer, KBinsDiscretizer, KernelCenterer, LabelEncoder, MaxAbsScaler,
MinMaxScaler, Normalizer, OneHotEncoder, OrdinalEncoder, PowerTransformer, QuantileTransformer, RobustScaler,
StandardScaler.
Examples for transformations that work:
[
(["col1", "col2"], sklearn_one_hot_encoder),
(["col3"], None) #col3 passes as is
]
[
(["col1"], my_own_transformer),
(["col2"], my_own_transformer),
]
Example of transformations that would raise an error since it cannot be interpreted as one to many:
[
(["col1", "col2"], my_own_transformer)
]
This would not work since it is hard to make out whether my_own_transformer gives a many to many or one to many
mapping when taking a sequence of columns.
:type transformations: sklearn.compose.ColumnTransformer or list[tuple]
:param shap_values_output: The shap values output from the explainer. Only applies to
tree-based models that are in terms of raw feature values instead of probabilities.
Can be default, probability or teacher_probability. If probability or teacher_probability
are specified, we approximate the feature importance values as probabilities instead
of using the default values. If teacher probability is specified, we use the probabilities
from the teacher model as opposed to the surrogate model.
:type shap_values_output: interpret_community.common.constants.ShapValuesOutput
:param categorical_features: Categorical feature names or indexes.
If names are passed, they will be converted into indexes first.
Note if pandas indexes are categorical, you can either pass the name of the index or the index
as if the pandas index was inserted at the end of the input dataframe.
:type categorical_features: Union[list[str], list[int]]
:param allow_all_transformations: Allow many to many and many to one transformations
:type allow_all_transformations: bool
:param model_task: Optional parameter to specify whether the model is a classification or regression model.
In most cases, the type of the model can be inferred based on the shape of the output, where a classifier
has a predict_proba method and outputs a 2 dimensional array, while a regressor has a predict method and
outputs a 1 dimensional array.
:type model_task: str
:param reset_index: Uses the pandas DataFrame index column as part of the features when training
the surrogate model.
:type reset_index: bool
"""
if transformations is not None and explain_subset is not None:
raise ValueError("explain_subset not supported with transformations")
self.reset_index = reset_index
if reset_index:
initialization_examples.reset_index()
self._datamapper = None
if transformations is not None:
self._datamapper, initialization_examples = get_datamapper_and_transformed_data(
examples=initialization_examples, transformations=transformations,
allow_all_transformations=allow_all_transformations)
wrapped_model, eval_ml_domain = _wrap_model(model, initialization_examples, model_task, is_function)
super(MimicExplainer, self).__init__(wrapped_model, is_function=is_function,
model_task=eval_ml_domain, **kwargs)
if explainable_model_args is None:
explainable_model_args = {}
if categorical_features is None:
categorical_features = []
self._logger.debug('Initializing MimicExplainer')
# Get the feature names from the initialization examples
self._init_features = initialization_examples.get_features(features=features)
self.features = features
# augment the data if necessary
if augment_data:
initialization_examples.augment_data(max_num_of_augmentations=max_num_of_augmentations)
original_training_data = initialization_examples.typed_dataset
# If categorical_features is a list of string column names instead of indexes, make sure to convert to indexes
if not all(isinstance(categorical_feature, int) for categorical_feature in categorical_features):
categorical_features = initialization_examples.get_column_indexes(self._init_features,
categorical_features)
# Featurize any timestamp columns
# TODO: more sophisticated featurization
self._timestamp_featurizer = initialization_examples.timestamp_featurizer()
# If model is a linear model or isn't able to handle categoricals, one-hot-encode categoricals
is_tree_model = explainable_model.explainable_model_type == ExplainableModelType.TREE_EXPLAINABLE_MODEL_TYPE
if is_tree_model and self._supports_categoricals(explainable_model):
# Index the categorical string columns for training data
self._column_indexer = initialization_examples.string_index(columns=categorical_features)
self._one_hot_encoder = None
explainable_model_args[LightGBMParams.CATEGORICAL_FEATURE] = categorical_features
else:
# One-hot-encode categoricals for models that don't support categoricals natively
self._column_indexer = initialization_examples.string_index(columns=categorical_features)
self._one_hot_encoder = initialization_examples.one_hot_encode(columns=categorical_features)
self.classes = classes
self.explain_subset = explain_subset
self.transformations = transformations
self._shap_values_output = shap_values_output
# Train the mimic model on the given model
training_data = initialization_examples.dataset
self.initialization_examples = initialization_examples
if isinstance(training_data, DenseData):
training_data = training_data.data
explainable_model_args[ExplainParams.CLASSIFICATION] = self.predict_proba_flag
if self._supports_shap_values_output(explainable_model):
explainable_model_args[ExplainParams.SHAP_VALUES_OUTPUT] = shap_values_output
self.surrogate_model = _model_distill(self.function, explainable_model, training_data,
original_training_data, explainable_model_args)
self._method = self.surrogate_model._method
self._original_eval_examples = None
self._allow_all_transformations = allow_all_transformations
def _supports_categoricals(self, explainable_model):
return issubclass(explainable_model, LGBMExplainableModel)
def _supports_shap_values_output(self, explainable_model):
return issubclass(explainable_model, LGBMExplainableModel)
def _get_explain_global_kwargs(self, evaluation_examples=None, include_local=True,
batch_size=Defaults.DEFAULT_BATCH_SIZE):
"""Get the kwargs for explain_global to create a global explanation.
:param evaluation_examples: A matrix of feature vector examples (# examples x # features) on which to
explain the model's output. If specified, computes feature importances through aggregation.
:type evaluation_examples: numpy.array or pandas.DataFrame or scipy.sparse.csr_matrix
:param include_local: Include the local explanations in the returned global explanation.
If evaluation examples are specified and include_local is False, will stream the local
explanations to aggregate to global.
:type include_local: bool
:param batch_size: If include_local is False, specifies the batch size for aggregating
local explanations to global.
:type batch_size: int
:return: Args for explain_global.
:rtype: dict
"""
classification = self.predict_proba_flag
kwargs = {ExplainParams.METHOD: ExplainType.MIMIC}
if classification:
kwargs[ExplainParams.CLASSES] = self.classes
if evaluation_examples is not None:
# Aggregate local explanation to global, either through computing the local
# explanation and then aggregating or streaming the local explanation to global
if include_local:
# Get local explanation
local_explanation = self.explain_local(evaluation_examples)
kwargs[ExplainParams.LOCAL_EXPLANATION] = local_explanation
else:
if classification:
model_task = ModelTask.Classification
else:
model_task = ModelTask.Regression
if not isinstance(evaluation_examples, DatasetWrapper):
self._logger.debug('Eval examples not wrapped, wrapping')
evaluation_examples = DatasetWrapper(evaluation_examples)
kwargs = _aggregate_streamed_local_explanations(self, evaluation_examples, model_task, self.features,
batch_size, **kwargs)
return kwargs
global_importance_values = self.surrogate_model.explain_global()
order = _order_imp(global_importance_values)
if classification:
kwargs[ExplainParams.MODEL_TASK] = ExplainType.CLASSIFICATION
else:
kwargs[ExplainParams.MODEL_TASK] = ExplainType.REGRESSION
if self.model is not None:
kwargs[ExplainParams.MODEL_TYPE] = str(type(self.model))
else:
kwargs[ExplainParams.MODEL_TYPE] = ExplainType.FUNCTION
kwargs[ExplainParams.EXPECTED_VALUES] = None
kwargs[ExplainParams.CLASSIFICATION] = classification
kwargs[ExplainParams.GLOBAL_IMPORTANCE_VALUES] = global_importance_values
kwargs[ExplainParams.GLOBAL_IMPORTANCE_RANK] = order
kwargs[ExplainParams.FEATURES] = self.features
return kwargs
def explain_global(self, evaluation_examples=None, include_local=True,
batch_size=Defaults.DEFAULT_BATCH_SIZE):
"""Globally explains the blackbox model using the surrogate model.
If evaluation_examples are unspecified, retrieves global feature importances from explainable
surrogate model. Note this will not include per class feature importances. If evaluation_examples
are specified, aggregates local explanations to global from the given evaluation_examples - which
computes both global and per class feature importances.
:param evaluation_examples: A matrix of feature vector examples (# examples x # features) on which to
explain the model's output. If specified, computes feature importances through aggregation.
:type evaluation_examples: numpy.array or pandas.DataFrame or scipy.sparse.csr_matrix
:param include_local: Include the local explanations in the returned global explanation.
If evaluation examples are specified and include_local is False, will stream the local
explanations to aggregate to global.
:type include_local: bool
:param batch_size: If include_local is False, specifies the batch size for aggregating
local explanations to global.
:type batch_size: int
:return: A model explanation object. It is guaranteed to be a GlobalExplanation. If evaluation_examples are
passed in, it will also have the properties of a LocalExplanation. If the model is a classifier (has
predict_proba), it will have the properties of ClassesMixin, and if evaluation_examples were passed in it
will also have the properties of PerClassMixin.
:rtype: DynamicGlobalExplanation
"""
if self._original_eval_examples is None:
if isinstance(evaluation_examples, DatasetWrapper):
self._original_eval_examples = evaluation_examples.original_dataset_with_type
else:
self._original_eval_examples = evaluation_examples
kwargs = self._get_explain_global_kwargs(evaluation_examples=evaluation_examples, include_local=include_local,
batch_size=batch_size)
kwargs[ExplainParams.INIT_DATA] = self.initialization_examples
if evaluation_examples is not None:
kwargs[ExplainParams.EVAL_DATA] = evaluation_examples
ys_dict = self._get_ys_dict(self._original_eval_examples,
transformations=self.transformations,
allow_all_transformations=self._allow_all_transformations)
kwargs.update(ys_dict)
if include_local:
return _aggregate_global_from_local_explanation(**kwargs)
explanation = _create_global_explanation(**kwargs)
# if transformations have been passed, then return raw features explanation
raw_kwargs = _get_raw_explainer_create_explanation_kwargs(kwargs=kwargs)
return explanation if self._datamapper is None else _create_raw_feats_global_explanation(
explanation, feature_maps=[self._datamapper.feature_map], features=self.features, **raw_kwargs)
def _get_explain_local_kwargs(self, evaluation_examples):
"""Get the kwargs for explain_local to create a local explanation.
:param evaluation_examples: A matrix of feature vector examples (# examples x # features) on which
to explain the model's output.
:type evaluation_examples: numpy.array or pandas.DataFrame or scipy.sparse.csr_matrix
:return: Args for explain_local.
:rtype: dict
"""
if self.reset_index:
evaluation_examples.reset_index()
kwargs = {}
original_evaluation_examples = evaluation_examples.typed_dataset
probabilities = None
if self._shap_values_output == ShapValuesOutput.TEACHER_PROBABILITY:
# Outputting shap values in terms of the probabilities of the teacher model
probabilities = self.function(original_evaluation_examples)
if self._timestamp_featurizer:
evaluation_examples.apply_timestamp_featurizer(self._timestamp_featurizer)
if self._column_indexer:
evaluation_examples.apply_indexer(self._column_indexer, bucket_unknown=True)
if self._one_hot_encoder:
evaluation_examples.apply_one_hot_encoder(self._one_hot_encoder)
dataset = evaluation_examples.dataset
kwargs[ExplainParams.NUM_FEATURES] = evaluation_examples.num_features
local_importance_values = self.surrogate_model.explain_local(dataset, probabilities=probabilities)
classification = isinstance(local_importance_values, list) or self.predict_proba_flag
expected_values = self.surrogate_model.expected_values
kwargs[ExplainParams.METHOD] = ExplainType.MIMIC
self.features = evaluation_examples.get_features(features=self.features)
kwargs[ExplainParams.FEATURES] = self.features
if self.predict_proba_flag:
if self.surrogate_model.multiclass:
# For multiclass case, convert to array
local_importance_values = np.array(local_importance_values)
else:
# TODO: Eventually move this back inside the surrogate model
# If binary case, we need to reformat the data to have importances per class
# and convert the expected values back to the original domain
local_importance_values = np.stack((-local_importance_values, local_importance_values))
if classification:
kwargs[ExplainParams.CLASSES] = self.classes
# Reformat local_importance_values result if explain_subset specified
if self.explain_subset:
self._logger.debug('Getting subset of local_importance_values')
if classification:
local_importance_values = local_importance_values[:, :, self.explain_subset]
else:
local_importance_values = local_importance_values[:, self.explain_subset]
if classification:
kwargs[ExplainParams.MODEL_TASK] = ExplainType.CLASSIFICATION
else:
kwargs[ExplainParams.MODEL_TASK] = ExplainType.REGRESSION
if self.model is not None:
kwargs[ExplainParams.MODEL_TYPE] = str(type(self.model))
else:
kwargs[ExplainParams.MODEL_TYPE] = ExplainType.FUNCTION
kwargs[ExplainParams.LOCAL_IMPORTANCE_VALUES] = local_importance_values
kwargs[ExplainParams.EXPECTED_VALUES] = np.array(expected_values)
kwargs[ExplainParams.CLASSIFICATION] = classification
kwargs[ExplainParams.INIT_DATA] = self.initialization_examples
kwargs[ExplainParams.EVAL_DATA] = original_evaluation_examples
ys_dict = self._get_ys_dict(self._original_eval_examples,
transformations=self.transformations,
allow_all_transformations=self._allow_all_transformations)
kwargs.update(ys_dict)
return kwargs
@tabular_decorator
def explain_local(self, evaluation_examples):
"""Locally explains the blackbox model using the surrogate model.
:param evaluation_examples: A matrix of feature vector examples (# examples x # features) on which
to explain the model's output.
:type evaluation_examples: numpy.array or pandas.DataFrame or scipy.sparse.csr_matrix
:return: A model explanation object. It is guaranteed to be a LocalExplanation. If the model is a classifier,
it will have the properties of the ClassesMixin.
:rtype: DynamicLocalExplanation
"""
if self._original_eval_examples is None:
if isinstance(evaluation_examples, DatasetWrapper):
self._original_eval_examples = evaluation_examples.original_dataset_with_type
else:
self._original_eval_examples = evaluation_examples
if self._datamapper is not None:
evaluation_examples = transform_with_datamapper(evaluation_examples, self._datamapper)
kwargs = self._get_explain_local_kwargs(evaluation_examples)
kwargs[ExplainParams.INIT_DATA] = self.initialization_examples
kwargs[ExplainParams.EVAL_DATA] = evaluation_examples
explanation = _create_local_explanation(**kwargs)
# if transformations have been passed, then return raw features explanation
raw_kwargs = _get_raw_explainer_create_explanation_kwargs(kwargs=kwargs)
return explanation if self._datamapper is None else _create_raw_feats_local_explanation(
explanation, feature_maps=[self._datamapper.feature_map], features=self.features, **raw_kwargs)
def _save(self):
"""Return a string dictionary representation of the mimic explainer.
Currently only supported scenario is Mimic Explainer with LightGBM surrogate model.
:return: A serialized dictionary representation of the mimic explainer.
:rtype: dict
"""
properties = {}
# save all of the properties
for key, value in self.__dict__.items():
if key in MimicSerializationConstants.nonify_properties:
properties[key] = None
elif key in MimicSerializationConstants.save_properties:
properties[key] = value._save()
else:
properties[key] = json.dumps(value)
# return a dictionary of strings
return properties
@staticmethod
def _load(model, properties):
"""Load a MimicExplainer from the given properties.
Currently only supported scenario is Mimic Explainer with LightGBM surrogate model.
:param model: The serialized ONNX model with a scikit-learn like API.
:type model: ONNX model.
:param properties: A serialized dictionary representation of the mimic explainer.
:type properties: dict
:return: The deserialized MimicExplainer.
:rtype: interpret_community.mimic.MimicExplainer
"""
# create the MimicExplainer without any properties using the __new__ function, similar to pickle
mimic = MimicExplainer.__new__(MimicExplainer)
# load all of the properties
for key, value in properties.items():
# Regenerate the properties on the fly
if key in MimicSerializationConstants.nonify_properties:
if key == MimicSerializationConstants.MODEL:
mimic.__dict__[key] = model
elif key == MimicSerializationConstants.LOGGER:
parent = logging.getLogger(__name__)
mimic_identity = json.loads(properties[MimicSerializationConstants.IDENTITY])
mimic.__dict__[key] = parent.getChild(mimic_identity)
elif key == MimicSerializationConstants.INITIALIZATION_EXAMPLES:
mimic.__dict__[key] = None
elif key == MimicSerializationConstants.ORIGINAL_EVAL_EXAMPLES:
mimic.__dict__[key] = None
elif key == MimicSerializationConstants.TIMESTAMP_FEATURIZER:
mimic.__dict__[key] = None
elif key == MimicSerializationConstants.FUNCTION:
# TODO add third case if is_function was passed to mimic explainer
if json.loads(properties[MimicSerializationConstants.PREDICT_PROBA_FLAG]):
mimic.__dict__[key] = model.predict_proba
else:
mimic.__dict__[key] = model.predict
else:
raise Exception("Unknown nonify key on deserialize in MimicExplainer: {}".format(key))
elif key in MimicSerializationConstants.save_properties:
mimic.__dict__[key] = LGBMExplainableModel._load(value)
elif key in MimicSerializationConstants.enum_properties:
# NOTE: If more enums added in future, will need to handle this differently
mimic.__dict__[key] = ShapValuesOutput(json.loads(value))
else:
mimic.__dict__[key] = json.loads(value)
if MimicSerializationConstants.ORIGINAL_EVAL_EXAMPLES not in mimic.__dict__:
mimic.__dict__[MimicSerializationConstants.ORIGINAL_EVAL_EXAMPLES] = None
if MimicSerializationConstants.TIMESTAMP_FEATURIZER not in mimic.__dict__:
mimic.__dict__[MimicSerializationConstants.TIMESTAMP_FEATURIZER] = None
return mimic
| 58.852941 | 119 | 0.704677 |
import numpy as np
from ..common.explanation_utils import _order_imp
from ..common.model_wrapper import _wrap_model
from .._internal.raw_explain.raw_explain_utils import get_datamapper_and_transformed_data, \
transform_with_datamapper
from ..common.blackbox_explainer import BlackBoxExplainer
from .model_distill import _model_distill
from .models import LGBMExplainableModel
from ..explanation.explanation import _create_local_explanation, _create_global_explanation, \
_aggregate_global_from_local_explanation, _aggregate_streamed_local_explanations, \
_create_raw_feats_global_explanation, _create_raw_feats_local_explanation, \
_get_raw_explainer_create_explanation_kwargs
from ..dataset.decorator import tabular_decorator, init_tabular_decorator
from ..dataset.dataset_wrapper import DatasetWrapper
from ..common.constants import ExplainParams, ExplainType, ModelTask, \
ShapValuesOutput, MimicSerializationConstants, ExplainableModelType, \
LightGBMParams, Defaults, Extension
import logging
import json
import warnings
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'Starting from version 2.2.1', UserWarning)
from shap.common import DenseData
class MimicExplainer(BlackBoxExplainer):
available_explanations = [Extension.GLOBAL, Extension.LOCAL]
explainer_type = Extension.BLACKBOX
@init_tabular_decorator
def __init__(self, model, initialization_examples, explainable_model, explainable_model_args=None,
is_function=False, augment_data=True, max_num_of_augmentations=10, explain_subset=None,
features=None, classes=None, transformations=None, allow_all_transformations=False,
shap_values_output=ShapValuesOutput.DEFAULT, categorical_features=None,
model_task=ModelTask.Unknown, reset_index=False, **kwargs):
if transformations is not None and explain_subset is not None:
raise ValueError("explain_subset not supported with transformations")
self.reset_index = reset_index
if reset_index:
initialization_examples.reset_index()
self._datamapper = None
if transformations is not None:
self._datamapper, initialization_examples = get_datamapper_and_transformed_data(
examples=initialization_examples, transformations=transformations,
allow_all_transformations=allow_all_transformations)
wrapped_model, eval_ml_domain = _wrap_model(model, initialization_examples, model_task, is_function)
super(MimicExplainer, self).__init__(wrapped_model, is_function=is_function,
model_task=eval_ml_domain, **kwargs)
if explainable_model_args is None:
explainable_model_args = {}
if categorical_features is None:
categorical_features = []
self._logger.debug('Initializing MimicExplainer')
self._init_features = initialization_examples.get_features(features=features)
self.features = features
if augment_data:
initialization_examples.augment_data(max_num_of_augmentations=max_num_of_augmentations)
original_training_data = initialization_examples.typed_dataset
if not all(isinstance(categorical_feature, int) for categorical_feature in categorical_features):
categorical_features = initialization_examples.get_column_indexes(self._init_features,
categorical_features)
self._timestamp_featurizer = initialization_examples.timestamp_featurizer()
is_tree_model = explainable_model.explainable_model_type == ExplainableModelType.TREE_EXPLAINABLE_MODEL_TYPE
if is_tree_model and self._supports_categoricals(explainable_model):
# Index the categorical string columns for training data
self._column_indexer = initialization_examples.string_index(columns=categorical_features)
self._one_hot_encoder = None
explainable_model_args[LightGBMParams.CATEGORICAL_FEATURE] = categorical_features
else:
# One-hot-encode categoricals for models that don't support categoricals natively
self._column_indexer = initialization_examples.string_index(columns=categorical_features)
self._one_hot_encoder = initialization_examples.one_hot_encode(columns=categorical_features)
self.classes = classes
self.explain_subset = explain_subset
self.transformations = transformations
self._shap_values_output = shap_values_output
training_data = initialization_examples.dataset
self.initialization_examples = initialization_examples
if isinstance(training_data, DenseData):
training_data = training_data.data
explainable_model_args[ExplainParams.CLASSIFICATION] = self.predict_proba_flag
if self._supports_shap_values_output(explainable_model):
explainable_model_args[ExplainParams.SHAP_VALUES_OUTPUT] = shap_values_output
self.surrogate_model = _model_distill(self.function, explainable_model, training_data,
original_training_data, explainable_model_args)
self._method = self.surrogate_model._method
self._original_eval_examples = None
self._allow_all_transformations = allow_all_transformations
def _supports_categoricals(self, explainable_model):
return issubclass(explainable_model, LGBMExplainableModel)
def _supports_shap_values_output(self, explainable_model):
return issubclass(explainable_model, LGBMExplainableModel)
def _get_explain_global_kwargs(self, evaluation_examples=None, include_local=True,
batch_size=Defaults.DEFAULT_BATCH_SIZE):
classification = self.predict_proba_flag
kwargs = {ExplainParams.METHOD: ExplainType.MIMIC}
if classification:
kwargs[ExplainParams.CLASSES] = self.classes
if evaluation_examples is not None:
if include_local:
local_explanation = self.explain_local(evaluation_examples)
kwargs[ExplainParams.LOCAL_EXPLANATION] = local_explanation
else:
if classification:
model_task = ModelTask.Classification
else:
model_task = ModelTask.Regression
if not isinstance(evaluation_examples, DatasetWrapper):
self._logger.debug('Eval examples not wrapped, wrapping')
evaluation_examples = DatasetWrapper(evaluation_examples)
kwargs = _aggregate_streamed_local_explanations(self, evaluation_examples, model_task, self.features,
batch_size, **kwargs)
return kwargs
global_importance_values = self.surrogate_model.explain_global()
order = _order_imp(global_importance_values)
if classification:
kwargs[ExplainParams.MODEL_TASK] = ExplainType.CLASSIFICATION
else:
kwargs[ExplainParams.MODEL_TASK] = ExplainType.REGRESSION
if self.model is not None:
kwargs[ExplainParams.MODEL_TYPE] = str(type(self.model))
else:
kwargs[ExplainParams.MODEL_TYPE] = ExplainType.FUNCTION
kwargs[ExplainParams.EXPECTED_VALUES] = None
kwargs[ExplainParams.CLASSIFICATION] = classification
kwargs[ExplainParams.GLOBAL_IMPORTANCE_VALUES] = global_importance_values
kwargs[ExplainParams.GLOBAL_IMPORTANCE_RANK] = order
kwargs[ExplainParams.FEATURES] = self.features
return kwargs
def explain_global(self, evaluation_examples=None, include_local=True,
batch_size=Defaults.DEFAULT_BATCH_SIZE):
if self._original_eval_examples is None:
if isinstance(evaluation_examples, DatasetWrapper):
self._original_eval_examples = evaluation_examples.original_dataset_with_type
else:
self._original_eval_examples = evaluation_examples
kwargs = self._get_explain_global_kwargs(evaluation_examples=evaluation_examples, include_local=include_local,
batch_size=batch_size)
kwargs[ExplainParams.INIT_DATA] = self.initialization_examples
if evaluation_examples is not None:
kwargs[ExplainParams.EVAL_DATA] = evaluation_examples
ys_dict = self._get_ys_dict(self._original_eval_examples,
transformations=self.transformations,
allow_all_transformations=self._allow_all_transformations)
kwargs.update(ys_dict)
if include_local:
return _aggregate_global_from_local_explanation(**kwargs)
explanation = _create_global_explanation(**kwargs)
raw_kwargs = _get_raw_explainer_create_explanation_kwargs(kwargs=kwargs)
return explanation if self._datamapper is None else _create_raw_feats_global_explanation(
explanation, feature_maps=[self._datamapper.feature_map], features=self.features, **raw_kwargs)
def _get_explain_local_kwargs(self, evaluation_examples):
if self.reset_index:
evaluation_examples.reset_index()
kwargs = {}
original_evaluation_examples = evaluation_examples.typed_dataset
probabilities = None
if self._shap_values_output == ShapValuesOutput.TEACHER_PROBABILITY:
probabilities = self.function(original_evaluation_examples)
if self._timestamp_featurizer:
evaluation_examples.apply_timestamp_featurizer(self._timestamp_featurizer)
if self._column_indexer:
evaluation_examples.apply_indexer(self._column_indexer, bucket_unknown=True)
if self._one_hot_encoder:
evaluation_examples.apply_one_hot_encoder(self._one_hot_encoder)
dataset = evaluation_examples.dataset
kwargs[ExplainParams.NUM_FEATURES] = evaluation_examples.num_features
local_importance_values = self.surrogate_model.explain_local(dataset, probabilities=probabilities)
classification = isinstance(local_importance_values, list) or self.predict_proba_flag
expected_values = self.surrogate_model.expected_values
kwargs[ExplainParams.METHOD] = ExplainType.MIMIC
self.features = evaluation_examples.get_features(features=self.features)
kwargs[ExplainParams.FEATURES] = self.features
if self.predict_proba_flag:
if self.surrogate_model.multiclass:
local_importance_values = np.array(local_importance_values)
else:
local_importance_values = np.stack((-local_importance_values, local_importance_values))
if classification:
kwargs[ExplainParams.CLASSES] = self.classes
if self.explain_subset:
self._logger.debug('Getting subset of local_importance_values')
if classification:
local_importance_values = local_importance_values[:, :, self.explain_subset]
else:
local_importance_values = local_importance_values[:, self.explain_subset]
if classification:
kwargs[ExplainParams.MODEL_TASK] = ExplainType.CLASSIFICATION
else:
kwargs[ExplainParams.MODEL_TASK] = ExplainType.REGRESSION
if self.model is not None:
kwargs[ExplainParams.MODEL_TYPE] = str(type(self.model))
else:
kwargs[ExplainParams.MODEL_TYPE] = ExplainType.FUNCTION
kwargs[ExplainParams.LOCAL_IMPORTANCE_VALUES] = local_importance_values
kwargs[ExplainParams.EXPECTED_VALUES] = np.array(expected_values)
kwargs[ExplainParams.CLASSIFICATION] = classification
kwargs[ExplainParams.INIT_DATA] = self.initialization_examples
kwargs[ExplainParams.EVAL_DATA] = original_evaluation_examples
ys_dict = self._get_ys_dict(self._original_eval_examples,
transformations=self.transformations,
allow_all_transformations=self._allow_all_transformations)
kwargs.update(ys_dict)
return kwargs
@tabular_decorator
def explain_local(self, evaluation_examples):
if self._original_eval_examples is None:
if isinstance(evaluation_examples, DatasetWrapper):
self._original_eval_examples = evaluation_examples.original_dataset_with_type
else:
self._original_eval_examples = evaluation_examples
if self._datamapper is not None:
evaluation_examples = transform_with_datamapper(evaluation_examples, self._datamapper)
kwargs = self._get_explain_local_kwargs(evaluation_examples)
kwargs[ExplainParams.INIT_DATA] = self.initialization_examples
kwargs[ExplainParams.EVAL_DATA] = evaluation_examples
explanation = _create_local_explanation(**kwargs)
raw_kwargs = _get_raw_explainer_create_explanation_kwargs(kwargs=kwargs)
return explanation if self._datamapper is None else _create_raw_feats_local_explanation(
explanation, feature_maps=[self._datamapper.feature_map], features=self.features, **raw_kwargs)
def _save(self):
properties = {}
for key, value in self.__dict__.items():
if key in MimicSerializationConstants.nonify_properties:
properties[key] = None
elif key in MimicSerializationConstants.save_properties:
properties[key] = value._save()
else:
properties[key] = json.dumps(value)
return properties
@staticmethod
def _load(model, properties):
mimic = MimicExplainer.__new__(MimicExplainer)
for key, value in properties.items():
if key in MimicSerializationConstants.nonify_properties:
if key == MimicSerializationConstants.MODEL:
mimic.__dict__[key] = model
elif key == MimicSerializationConstants.LOGGER:
parent = logging.getLogger(__name__)
mimic_identity = json.loads(properties[MimicSerializationConstants.IDENTITY])
mimic.__dict__[key] = parent.getChild(mimic_identity)
elif key == MimicSerializationConstants.INITIALIZATION_EXAMPLES:
mimic.__dict__[key] = None
elif key == MimicSerializationConstants.ORIGINAL_EVAL_EXAMPLES:
mimic.__dict__[key] = None
elif key == MimicSerializationConstants.TIMESTAMP_FEATURIZER:
mimic.__dict__[key] = None
elif key == MimicSerializationConstants.FUNCTION:
if json.loads(properties[MimicSerializationConstants.PREDICT_PROBA_FLAG]):
mimic.__dict__[key] = model.predict_proba
else:
mimic.__dict__[key] = model.predict
else:
raise Exception("Unknown nonify key on deserialize in MimicExplainer: {}".format(key))
elif key in MimicSerializationConstants.save_properties:
mimic.__dict__[key] = LGBMExplainableModel._load(value)
elif key in MimicSerializationConstants.enum_properties:
mimic.__dict__[key] = ShapValuesOutput(json.loads(value))
else:
mimic.__dict__[key] = json.loads(value)
if MimicSerializationConstants.ORIGINAL_EVAL_EXAMPLES not in mimic.__dict__:
mimic.__dict__[MimicSerializationConstants.ORIGINAL_EVAL_EXAMPLES] = None
if MimicSerializationConstants.TIMESTAMP_FEATURIZER not in mimic.__dict__:
mimic.__dict__[MimicSerializationConstants.TIMESTAMP_FEATURIZER] = None
return mimic
| true | true |
1c314a83c757289e9cad510ead448cfc9ded4f58 | 4,987 | py | Python | meidoo/meidoo/apps/orders/serializers.py | amourbrus/meiduo_mall | 965b3d4685d1a8fe18a3177cc864f27eeb516081 | [
"MIT"
] | null | null | null | meidoo/meidoo/apps/orders/serializers.py | amourbrus/meiduo_mall | 965b3d4685d1a8fe18a3177cc864f27eeb516081 | [
"MIT"
] | null | null | null | meidoo/meidoo/apps/orders/serializers.py | amourbrus/meiduo_mall | 965b3d4685d1a8fe18a3177cc864f27eeb516081 | [
"MIT"
] | null | null | null | from decimal import Decimal
from django.db import transaction
from django.utils import timezone
from django_redis import get_redis_connection
from rest_framework import serializers
from rest_framework.exceptions import ValidationError
from goods.models import SKU
from orders.models import OrderInfo, OrderGoods
from meidoo.utils.exceptions import logger
class CartSKUSerializer(serializers.ModelSerializer):
"""购物车商品数据序列化器"""
count = serializers.IntegerField(label='数量')
class Meta:
model = SKU
fields = ('id', 'name', 'default_image_url', 'price', 'count')
class OrderSettlementSerializer(serializers.Serializer):
"""订单结算数据序列化器"""
freight = serializers.DecimalField(label='运费', max_digits=10, decimal_places=2)
skus = CartSKUSerializer(many=True)
class SaveOrderSerializer(serializers.ModelSerializer):
"""下单数据序列化器"""
class Meta:
model = OrderInfo
fields = ('order_id', 'address', 'pay_method')
read_only_fields = ('order_id',)
extra_kwargs = {
'address': {
'write_only': True,
'required': True
},
'pay_method':{
'write_only': True,
'required': True,
}
}
def create(self, validated_data):
"""保存订单"""
# 获取当前下单用户
user = self.context['request'].user
# 组装订单编号 当前时间 + user.id
order_id = timezone.now().strftime('%Y%m%d%H%M%S') + ('%09d' % user.id)
address = validated_data['address']
pay_method = validated_data['pay_method']
# 生成订单
with transaction.atomic():
# 创建一个保存点
save_point = transaction.savepoint()
try:
# 创建订单信息
order = OrderInfo.objects.create(
order_id = order_id,
user = user,
address = address,
total_count = 0,
total_amount = Decimal(0),
freight = Decimal(10),
pay_method = pay_method,
status=OrderInfo.ORDER_STATUS_ENUM['UNSEND'] if pay_method == OrderInfo.PAY_METHODS_ENUM[
'CASH'] else OrderInfo.ORDER_STATUS_ENUM['UNPAID']
)
# 获取购物车信息
redis_conn = get_redis_connection('cart')
redis_cart = redis_conn.hgetall('cart_%s' % user.id)
cart_selected = redis_conn.smembers('cart_selected_%s' % user.id)
# 将bytes类型转换为int
cart = {}
for sku_id in cart_selected:
cart[int(sku_id)] = int(redis_cart[sku_id])
# # 一次查询出所有商品数据
# skus = SKU.objects.filter(id__in = cart.keys())
sku_id_list = cart.keys()
# 处理订单商品
# for sku in skus:
for sku_id in sku_id_list:
while True:
sku = SKU.objects.get(id=sku_id)
sku_count = cart[sku.id]
# 判断库存
origin_stock = sku.stock # 原始库存
origin_sales = sku.sales # 原始销量
if sku_count > origin_stock:
transaction.savepoint_rollback(save_point)
raise serializers.ValidationError('商品库存不足')
# 满足条件,则减少库存
new_stock = origin_stock - sku_count
new_sales = origin_sales + sku_count
sku.stock = new_stock
sku.sales = new_sales
sku.save()
# 累计商品的spu 销量信息
sku.goods.sales += sku_count
sku.goods.save()
# 累计订单基本信息的数据
order.total_count += sku_count # 累计总金额
order.total_amount += (sku.price * sku_count) # 总金额
# 保存订单商品
OrderGoods.objects.create(
order = order,
sku = sku,
count = sku_count,
price = sku.price,
)
break
# 更新订单的金额数量信息
order.total_amount += order.freight
order.save()
except ValidationError:
raise
except Exception as e:
logger.error(e)
transaction.savepoint_rollback(save_point)
raise
# 提交事务
transaction.savepoint_commit(save_point)
# 更新redis 保存的购物车数据
pl = redis_conn.pipeline()
pl.hdel('cart_%s' % user.id, *cart_selected)
pl.srem('cart_selected_%s' % user.id, *cart_selected)
pl.execute()
return order
| 33.02649 | 109 | 0.49609 | from decimal import Decimal
from django.db import transaction
from django.utils import timezone
from django_redis import get_redis_connection
from rest_framework import serializers
from rest_framework.exceptions import ValidationError
from goods.models import SKU
from orders.models import OrderInfo, OrderGoods
from meidoo.utils.exceptions import logger
class CartSKUSerializer(serializers.ModelSerializer):
count = serializers.IntegerField(label='数量')
class Meta:
model = SKU
fields = ('id', 'name', 'default_image_url', 'price', 'count')
class OrderSettlementSerializer(serializers.Serializer):
freight = serializers.DecimalField(label='运费', max_digits=10, decimal_places=2)
skus = CartSKUSerializer(many=True)
class SaveOrderSerializer(serializers.ModelSerializer):
class Meta:
model = OrderInfo
fields = ('order_id', 'address', 'pay_method')
read_only_fields = ('order_id',)
extra_kwargs = {
'address': {
'write_only': True,
'required': True
},
'pay_method':{
'write_only': True,
'required': True,
}
}
def create(self, validated_data):
user = self.context['request'].user
order_id = timezone.now().strftime('%Y%m%d%H%M%S') + ('%09d' % user.id)
address = validated_data['address']
pay_method = validated_data['pay_method']
with transaction.atomic():
save_point = transaction.savepoint()
try:
order = OrderInfo.objects.create(
order_id = order_id,
user = user,
address = address,
total_count = 0,
total_amount = Decimal(0),
freight = Decimal(10),
pay_method = pay_method,
status=OrderInfo.ORDER_STATUS_ENUM['UNSEND'] if pay_method == OrderInfo.PAY_METHODS_ENUM[
'CASH'] else OrderInfo.ORDER_STATUS_ENUM['UNPAID']
)
redis_conn = get_redis_connection('cart')
redis_cart = redis_conn.hgetall('cart_%s' % user.id)
cart_selected = redis_conn.smembers('cart_selected_%s' % user.id)
cart = {}
for sku_id in cart_selected:
cart[int(sku_id)] = int(redis_cart[sku_id])
sku_id_list = cart.keys()
for sku_id in sku_id_list:
while True:
sku = SKU.objects.get(id=sku_id)
sku_count = cart[sku.id]
origin_stock = sku.stock
origin_sales = sku.sales
if sku_count > origin_stock:
transaction.savepoint_rollback(save_point)
raise serializers.ValidationError('商品库存不足')
new_stock = origin_stock - sku_count
new_sales = origin_sales + sku_count
sku.stock = new_stock
sku.sales = new_sales
sku.save()
sku.goods.sales += sku_count
sku.goods.save()
order.total_count += sku_count
order.total_amount += (sku.price * sku_count)
OrderGoods.objects.create(
order = order,
sku = sku,
count = sku_count,
price = sku.price,
)
break
order.total_amount += order.freight
order.save()
except ValidationError:
raise
except Exception as e:
logger.error(e)
transaction.savepoint_rollback(save_point)
raise
transaction.savepoint_commit(save_point)
pl = redis_conn.pipeline()
pl.hdel('cart_%s' % user.id, *cart_selected)
pl.srem('cart_selected_%s' % user.id, *cart_selected)
pl.execute()
return order
| true | true |
1c314b335cc15f8a988e00f70740c268b55cf132 | 391 | py | Python | profile_api/migrations/0002_auto_20210516_0944.py | manishmittal050/profile-rest-api | 458806f901e42bfd98fbd14e3da37da7240a01d4 | [
"MIT"
] | null | null | null | profile_api/migrations/0002_auto_20210516_0944.py | manishmittal050/profile-rest-api | 458806f901e42bfd98fbd14e3da37da7240a01d4 | [
"MIT"
] | null | null | null | profile_api/migrations/0002_auto_20210516_0944.py | manishmittal050/profile-rest-api | 458806f901e42bfd98fbd14e3da37da7240a01d4 | [
"MIT"
] | null | null | null | # Generated by Django 2.2 on 2021-05-16 09:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profile_api', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='is_superuser',
field=models.BooleanField(default=False),
),
]
| 20.578947 | 53 | 0.606138 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profile_api', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='is_superuser',
field=models.BooleanField(default=False),
),
]
| true | true |
1c314be71dc8f37a5d141751c30c55aed4361499 | 6,174 | py | Python | tests/testshop/settings.py | 2000-ion/TIDPP-Lab3 | 3fc97e6214b6e51f40df39f1692d4deec4bb0cc2 | [
"BSD-3-Clause"
] | 2,160 | 2016-01-24T05:08:59.000Z | 2022-03-31T12:15:30.000Z | tests/testshop/settings.py | 2000-ion/TIDPP-Lab3 | 3fc97e6214b6e51f40df39f1692d4deec4bb0cc2 | [
"BSD-3-Clause"
] | 455 | 2016-01-29T22:41:33.000Z | 2022-03-23T08:28:01.000Z | tests/testshop/settings.py | 2000-ion/TIDPP-Lab3 | 3fc97e6214b6e51f40df39f1692d4deec4bb0cc2 | [
"BSD-3-Clause"
] | 818 | 2016-02-01T15:09:07.000Z | 2022-03-28T19:52:26.000Z | from django.urls import reverse_lazy
from django.utils.text import format_lazy
DEBUG = True
ROOT_URLCONF = 'testshop.urls'
SECRET_KEY = 'test'
SITE_ID = 1
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
TEMPLATES = [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'DIRS': [],
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.template.context_processors.csrf',
'django.template.context_processors.request',
'django.contrib.messages.context_processors.messages',
'sekizai.context_processors.sekizai',
'cms.context_processors.cms_settings',
]
}
}, {
'BACKEND': 'post_office.template.backends.post_office.PostOfficeTemplates',
'APP_DIRS': True,
'DIRS': [],
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.template.context_processors.request',
]
}
}]
MIDDLEWARE = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'shop.middleware.CustomerMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.gzip.GZipMiddleware',
'cms.middleware.language.LanguageCookieMiddleware',
'cms.middleware.user.CurrentUserMiddleware',
'cms.middleware.page.CurrentPageMiddleware',
'cms.middleware.utils.ApphookReloadMiddleware',
'cms.middleware.toolbar.ToolbarMiddleware',
]
INSTALLED_APPS = [
'django.contrib.auth',
'email_auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'django.contrib.staticfiles',
'jsonfield',
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'django_fsm',
'fsm_admin',
'filer',
'easy_thumbnails',
'treebeard',
'menus',
'sekizai',
'cms',
'adminsortable2',
'djangocms_text_ckeditor',
'django_select2',
'cmsplugin_cascade',
'cmsplugin_cascade.clipboard',
'cmsplugin_cascade.extra_fields',
'cmsplugin_cascade.icon',
'cmsplugin_cascade.sharable',
'cmsplugin_cascade.segmentation',
'post_office',
'shop',
'testshop',
]
USE_I18N = False
USE_L10N = True
USE_TZ = True
TIME_ZONE = 'UTC'
X_FRAME_OPTIONS = 'SAMEORIGIN'
SILENCED_SYSTEM_CHECKS = ['auth.W004']
LANGUAGES = [
('en', 'English'),
]
LANGUAGE_CODE = 'en'
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
CMS_TEMPLATES = [
('page.html', "Default Page"),
]
CMS_PLACEHOLDER_CONF = {
'Main Content': {
'plugins': ['BootstrapContainerPlugin'],
},
}
CMSPLUGIN_CASCADE_PLUGINS = [
'cmsplugin_cascade.bootstrap4',
'cmsplugin_cascade.segmentation',
'cmsplugin_cascade.generic',
'cmsplugin_cascade.icon',
'cmsplugin_cascade.leaflet',
'cmsplugin_cascade.link',
'shop.cascade',
]
CMSPLUGIN_CASCADE = {
'link_plugin_classes': [
'shop.cascade.plugin_base.CatalogLinkPluginBase',
'shop.cascade.plugin_base.CatalogLinkForm',
],
'alien_plugins': ['TextPlugin', 'TextLinkPlugin', 'AcceptConditionPlugin'],
'bootstrap4': {
'template_basedir': 'angular-ui',
},
'segmentation_mixins': [
('shop.cascade.segmentation.EmulateCustomerModelMixin', 'shop.cascade.segmentation.EmulateCustomerAdminMixin'),
],
}
THUMBNAIL_PROCESSORS = (
'easy_thumbnails.processors.colorspace',
'easy_thumbnails.processors.autocrop',
'filer.thumbnail_processors.scale_and_crop_with_subject_location',
'easy_thumbnails.processors.filters',
)
THUMBNAIL_PRESERVE_EXTENSIONS = True,
CKEDITOR_SETTINGS = {
'language': '{{ language }}',
'skin': 'moono',
'toolbar': 'CMS',
'toolbar_HTMLField': [
['Undo', 'Redo'],
['cmsplugins', '-', 'ShowBlocks'],
['Format', 'Styles'],
['TextColor', 'BGColor', '-', 'PasteText', 'PasteFromWord'],
['Maximize', ''],
'/',
['Bold', 'Italic', 'Underline', '-', 'Subscript', 'Superscript', '-', 'RemoveFormat'],
['JustifyLeft', 'JustifyCenter', 'JustifyRight'],
['HorizontalRule'],
['NumberedList', 'BulletedList', '-', 'Outdent', 'Indent', '-', 'Table'],
['Source']
],
'stylesSet': format_lazy('default:{}', reverse_lazy('admin:cascade_texteditor_config')),
}
SHOP_APP_LABEL = 'testshop'
SHOP_CART_MODIFIERS = [
'shop.modifiers.defaults.DefaultCartModifier',
'shop.modifiers.taxes.CartIncludeTaxModifier',
'shop.payment.modifiers.PayInAdvanceModifier',
'testshop.modifiers.ComplexPayInAdvanceModifier',
'shop.shipping.modifiers.SelfCollectionModifier',
]
SHOP_ORDER_WORKFLOWS = [
'shop.payment.workflows.ManualPaymentWorkflowMixin',
'shop.payment.workflows.CancelOrderWorkflowMixin',
'shop.shipping.workflows.PartialDeliveryWorkflowMixin',
]
AUTH_USER_MODEL = 'email_auth.User'
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
]
REST_AUTH_SERIALIZERS = {
'LOGIN_SERIALIZER': 'shop.serializers.auth.LoginSerializer',
}
POST_OFFICE = {
'TEMPLATE_ENGINE': 'post_office',
}
| 27.5625 | 119 | 0.671526 | from django.urls import reverse_lazy
from django.utils.text import format_lazy
DEBUG = True
ROOT_URLCONF = 'testshop.urls'
SECRET_KEY = 'test'
SITE_ID = 1
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
TEMPLATES = [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'DIRS': [],
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.template.context_processors.csrf',
'django.template.context_processors.request',
'django.contrib.messages.context_processors.messages',
'sekizai.context_processors.sekizai',
'cms.context_processors.cms_settings',
]
}
}, {
'BACKEND': 'post_office.template.backends.post_office.PostOfficeTemplates',
'APP_DIRS': True,
'DIRS': [],
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.template.context_processors.request',
]
}
}]
MIDDLEWARE = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'shop.middleware.CustomerMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.gzip.GZipMiddleware',
'cms.middleware.language.LanguageCookieMiddleware',
'cms.middleware.user.CurrentUserMiddleware',
'cms.middleware.page.CurrentPageMiddleware',
'cms.middleware.utils.ApphookReloadMiddleware',
'cms.middleware.toolbar.ToolbarMiddleware',
]
INSTALLED_APPS = [
'django.contrib.auth',
'email_auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'django.contrib.staticfiles',
'jsonfield',
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'django_fsm',
'fsm_admin',
'filer',
'easy_thumbnails',
'treebeard',
'menus',
'sekizai',
'cms',
'adminsortable2',
'djangocms_text_ckeditor',
'django_select2',
'cmsplugin_cascade',
'cmsplugin_cascade.clipboard',
'cmsplugin_cascade.extra_fields',
'cmsplugin_cascade.icon',
'cmsplugin_cascade.sharable',
'cmsplugin_cascade.segmentation',
'post_office',
'shop',
'testshop',
]
USE_I18N = False
USE_L10N = True
USE_TZ = True
TIME_ZONE = 'UTC'
X_FRAME_OPTIONS = 'SAMEORIGIN'
SILENCED_SYSTEM_CHECKS = ['auth.W004']
LANGUAGES = [
('en', 'English'),
]
LANGUAGE_CODE = 'en'
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
CMS_TEMPLATES = [
('page.html', "Default Page"),
]
CMS_PLACEHOLDER_CONF = {
'Main Content': {
'plugins': ['BootstrapContainerPlugin'],
},
}
CMSPLUGIN_CASCADE_PLUGINS = [
'cmsplugin_cascade.bootstrap4',
'cmsplugin_cascade.segmentation',
'cmsplugin_cascade.generic',
'cmsplugin_cascade.icon',
'cmsplugin_cascade.leaflet',
'cmsplugin_cascade.link',
'shop.cascade',
]
CMSPLUGIN_CASCADE = {
'link_plugin_classes': [
'shop.cascade.plugin_base.CatalogLinkPluginBase',
'shop.cascade.plugin_base.CatalogLinkForm',
],
'alien_plugins': ['TextPlugin', 'TextLinkPlugin', 'AcceptConditionPlugin'],
'bootstrap4': {
'template_basedir': 'angular-ui',
},
'segmentation_mixins': [
('shop.cascade.segmentation.EmulateCustomerModelMixin', 'shop.cascade.segmentation.EmulateCustomerAdminMixin'),
],
}
THUMBNAIL_PROCESSORS = (
'easy_thumbnails.processors.colorspace',
'easy_thumbnails.processors.autocrop',
'filer.thumbnail_processors.scale_and_crop_with_subject_location',
'easy_thumbnails.processors.filters',
)
THUMBNAIL_PRESERVE_EXTENSIONS = True,
CKEDITOR_SETTINGS = {
'language': '{{ language }}',
'skin': 'moono',
'toolbar': 'CMS',
'toolbar_HTMLField': [
['Undo', 'Redo'],
['cmsplugins', '-', 'ShowBlocks'],
['Format', 'Styles'],
['TextColor', 'BGColor', '-', 'PasteText', 'PasteFromWord'],
['Maximize', ''],
'/',
['Bold', 'Italic', 'Underline', '-', 'Subscript', 'Superscript', '-', 'RemoveFormat'],
['JustifyLeft', 'JustifyCenter', 'JustifyRight'],
['HorizontalRule'],
['NumberedList', 'BulletedList', '-', 'Outdent', 'Indent', '-', 'Table'],
['Source']
],
'stylesSet': format_lazy('default:{}', reverse_lazy('admin:cascade_texteditor_config')),
}
SHOP_APP_LABEL = 'testshop'
SHOP_CART_MODIFIERS = [
'shop.modifiers.defaults.DefaultCartModifier',
'shop.modifiers.taxes.CartIncludeTaxModifier',
'shop.payment.modifiers.PayInAdvanceModifier',
'testshop.modifiers.ComplexPayInAdvanceModifier',
'shop.shipping.modifiers.SelfCollectionModifier',
]
SHOP_ORDER_WORKFLOWS = [
'shop.payment.workflows.ManualPaymentWorkflowMixin',
'shop.payment.workflows.CancelOrderWorkflowMixin',
'shop.shipping.workflows.PartialDeliveryWorkflowMixin',
]
AUTH_USER_MODEL = 'email_auth.User'
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
]
REST_AUTH_SERIALIZERS = {
'LOGIN_SERIALIZER': 'shop.serializers.auth.LoginSerializer',
}
POST_OFFICE = {
'TEMPLATE_ENGINE': 'post_office',
}
| true | true |
1c314c09b523262c1d016eb7b0d051d3bc9ce51c | 735 | py | Python | image_downloader.py | art-litv/Space-Instagram | 98fd162cc7795bf66ca28fa2b112dc0c837914fa | [
"MIT"
] | null | null | null | image_downloader.py | art-litv/Space-Instagram | 98fd162cc7795bf66ca28fa2b112dc0c837914fa | [
"MIT"
] | null | null | null | image_downloader.py | art-litv/Space-Instagram | 98fd162cc7795bf66ca28fa2b112dc0c837914fa | [
"MIT"
] | null | null | null | import requests
import os
import urllib3
from pathlib import PurePath
def download_image(url: str, path: str, verify=True):
''' Downloads an image into "images" directory '''
''' Required for fetch_spacex.py and fetch_hubble.py '''
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
response = requests.get(url, verify=verify)
response.raise_for_status()
filename = path.split(os.sep)[-1]
directories = path[0:path.find(filename)]
try:
os.makedirs(f"images{os.sep}{directories}", exist_ok=False)
except FileExistsError:
pass
path = path + '.' + url.split(".")[-1]
with open(f"images{os.sep}{path}", 'wb') as file:
file.write(response.content)
| 27.222222 | 71 | 0.678912 | import requests
import os
import urllib3
from pathlib import PurePath
def download_image(url: str, path: str, verify=True):
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
response = requests.get(url, verify=verify)
response.raise_for_status()
filename = path.split(os.sep)[-1]
directories = path[0:path.find(filename)]
try:
os.makedirs(f"images{os.sep}{directories}", exist_ok=False)
except FileExistsError:
pass
path = path + '.' + url.split(".")[-1]
with open(f"images{os.sep}{path}", 'wb') as file:
file.write(response.content)
| true | true |
1c314d4280a04da568ad4442058981508effe981 | 2,057 | py | Python | test/test_validators.py | srobo/python-dbus-next | 934df62b29651cfbf513d244ad7ed138faab6fe4 | [
"MIT"
] | 1 | 2021-02-28T15:51:52.000Z | 2021-02-28T15:51:52.000Z | test/test_validators.py | srobo/python-dbus-next | 934df62b29651cfbf513d244ad7ed138faab6fe4 | [
"MIT"
] | null | null | null | test/test_validators.py | srobo/python-dbus-next | 934df62b29651cfbf513d244ad7ed138faab6fe4 | [
"MIT"
] | 1 | 2021-03-08T14:22:27.000Z | 2021-03-08T14:22:27.000Z | from dbus_next import (is_bus_name_valid, is_object_path_valid, is_interface_name_valid,
is_member_name_valid)
def test_object_path_validator():
valid_paths = ['/', '/foo', '/foo/bar', '/foo/bar/bat']
invalid_paths = [
None, {}, '', 'foo', 'foo/bar', '/foo/bar/', '/$/foo/bar', '/foo//bar', '/foo$bar/baz'
]
for path in valid_paths:
assert is_object_path_valid(path), f'path should be valid: "{path}"'
for path in invalid_paths:
assert not is_object_path_valid(path), f'path should be invalid: "{path}"'
def test_bus_name_validator():
valid_names = [
'foo.bar', 'foo.bar.bat', '_foo._bar', 'foo.bar69', 'foo.bar-69',
'org.mpris.MediaPlayer2.google-play-desktop-player'
]
invalid_names = [
None, {}, '', '5foo.bar', 'foo.6bar', '.foo.bar', 'bar..baz', '$foo.bar', 'foo$.ba$r'
]
for name in valid_names:
assert is_bus_name_valid(name), f'bus name should be valid: "{name}"'
for name in invalid_names:
assert not is_bus_name_valid(name), f'bus name should be invalid: "{name}"'
def test_interface_name_validator():
valid_names = ['foo.bar', 'foo.bar.bat', '_foo._bar', 'foo.bar69']
invalid_names = [
None, {}, '', '5foo.bar', 'foo.6bar', '.foo.bar', 'bar..baz', '$foo.bar', 'foo$.ba$r',
'org.mpris.MediaPlayer2.google-play-desktop-player'
]
for name in valid_names:
assert is_interface_name_valid(name), f'interface name should be valid: "{name}"'
for name in invalid_names:
assert not is_interface_name_valid(name), f'interface name should be invalid: "{name}"'
def test_member_name_validator():
valid_members = ['foo', 'FooBar', 'Bat_Baz69']
invalid_members = [None, {}, '', 'foo.bar', '5foo', 'foo$bar']
for member in valid_members:
assert is_member_name_valid(member), f'member name should be valid: "{member}"'
for member in invalid_members:
assert not is_member_name_valid(member), f'member name should be invalid: "{member}"'
| 38.811321 | 95 | 0.635391 | from dbus_next import (is_bus_name_valid, is_object_path_valid, is_interface_name_valid,
is_member_name_valid)
def test_object_path_validator():
valid_paths = ['/', '/foo', '/foo/bar', '/foo/bar/bat']
invalid_paths = [
None, {}, '', 'foo', 'foo/bar', '/foo/bar/', '/$/foo/bar', '/foo//bar', '/foo$bar/baz'
]
for path in valid_paths:
assert is_object_path_valid(path), f'path should be valid: "{path}"'
for path in invalid_paths:
assert not is_object_path_valid(path), f'path should be invalid: "{path}"'
def test_bus_name_validator():
valid_names = [
'foo.bar', 'foo.bar.bat', '_foo._bar', 'foo.bar69', 'foo.bar-69',
'org.mpris.MediaPlayer2.google-play-desktop-player'
]
invalid_names = [
None, {}, '', '5foo.bar', 'foo.6bar', '.foo.bar', 'bar..baz', '$foo.bar', 'foo$.ba$r'
]
for name in valid_names:
assert is_bus_name_valid(name), f'bus name should be valid: "{name}"'
for name in invalid_names:
assert not is_bus_name_valid(name), f'bus name should be invalid: "{name}"'
def test_interface_name_validator():
valid_names = ['foo.bar', 'foo.bar.bat', '_foo._bar', 'foo.bar69']
invalid_names = [
None, {}, '', '5foo.bar', 'foo.6bar', '.foo.bar', 'bar..baz', '$foo.bar', 'foo$.ba$r',
'org.mpris.MediaPlayer2.google-play-desktop-player'
]
for name in valid_names:
assert is_interface_name_valid(name), f'interface name should be valid: "{name}"'
for name in invalid_names:
assert not is_interface_name_valid(name), f'interface name should be invalid: "{name}"'
def test_member_name_validator():
valid_members = ['foo', 'FooBar', 'Bat_Baz69']
invalid_members = [None, {}, '', 'foo.bar', '5foo', 'foo$bar']
for member in valid_members:
assert is_member_name_valid(member), f'member name should be valid: "{member}"'
for member in invalid_members:
assert not is_member_name_valid(member), f'member name should be invalid: "{member}"'
| true | true |
1c314dfe0048db5f295b09c35b1e27c582e5f4bb | 78 | py | Python | run.py | TianxiaoHu/GomokuAgent | 8cb05025059945692846cbb0541a834e9f985ce2 | [
"MIT"
] | 15 | 2017-06-29T07:47:12.000Z | 2021-11-09T05:33:59.000Z | run.py | TianxiaoHu/GomokuAgent | 8cb05025059945692846cbb0541a834e9f985ce2 | [
"MIT"
] | null | null | null | run.py | TianxiaoHu/GomokuAgent | 8cb05025059945692846cbb0541a834e9f985ce2 | [
"MIT"
] | 1 | 2019-12-01T07:53:48.000Z | 2019-12-01T07:53:48.000Z | #!/usr/bin/env python
from app import app
app.run(debug=True, threaded=True)
| 15.6 | 34 | 0.74359 |
from app import app
app.run(debug=True, threaded=True)
| true | true |
1c314fdde8f8337ef25ea5bbed6c290af6543d97 | 5,429 | py | Python | lib/web/ui.py | Juniper/YAPT | b1a54998867c70352001415d5e4b70408480dab9 | [
"BSD-3-Clause"
] | 33 | 2018-05-17T04:16:56.000Z | 2021-11-25T21:21:02.000Z | lib/web/ui.py | Juniper/YAPT | b1a54998867c70352001415d5e4b70408480dab9 | [
"BSD-3-Clause"
] | 4 | 2021-01-10T20:45:31.000Z | 2021-09-23T23:21:16.000Z | lib/web/ui.py | Juniper/YAPT | b1a54998867c70352001415d5e4b70408480dab9 | [
"BSD-3-Clause"
] | 8 | 2018-09-19T12:18:54.000Z | 2021-01-10T03:49:10.000Z | # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER
# Copyright (c) 2018 Juniper Networks, Inc.
# All rights reserved.
# Use is subject to license terms.
#
# Author: cklewar
import socket
import jsonpickle
import lib.constants as c
from lib.amqp.amqpadapter import AMQPBlockingServerAdapter
from lib.amqp.amqpmessage import AMQPMessage
from lib.web.logviewer import LogViewer
from lib.logmsg import LogCommon
from lib.logmsg import LogUiProcessor as logmsg
from lib.processor import BackendClientProcessor
from lib.tools import Tools
from lib.web.adapter.amqp2ws import Amqp2ws
class UiProcessor(AMQPBlockingServerAdapter):
def __init__(self, group=None, target=None, name=None, args=(), kwargs=None):
super(UiProcessor, self).__init__(group=group, target=target, name=name, args=args, kwargs=kwargs)
self._logger.debug(Tools.create_log_msg(self.__class__.__name__, None,
LogCommon.IS_SUBCLASS.format(self.__class__.__name__,
issubclass(UiProcessor,
AMQPBlockingServerAdapter))))
self.url = 'ws://{0}:{1}/yapt/ws?clientname={2}'.format(c.conf.YAPT.WebUiAddress,
str(c.conf.YAPT.WebUiPort), c.conf.YAPT.WebUiPlugin)
self.amqp2ws = Amqp2ws(name=c.conf.YAPT.WebUiPlugin, url=self.url)
self.backendp = BackendClientProcessor(exchange='', routing_key=c.AMQP_RPC_BACKEND_QUEUE)
LogViewer().run_service()
def receive_message(self, ch, method, properties, body):
if body is not None:
ch.basic_ack(delivery_tag=method.delivery_tag)
body_decoded = jsonpickle.decode(body)
if isinstance(body_decoded,
AMQPMessage) and c.AMQP_MSG_TYPE_DEVICE_ADD == body_decoded.message_type:
Tools.amqp_receive_to_logger(routing_key=method.routing_key, body_decoded=body_decoded)
message = body_decoded.payload.device_to_json(action=c.UI_ACTION_ADD_DEVICE)
self.conn_hdlr(message=message)
elif isinstance(body_decoded,
AMQPMessage) and c.AMQP_MSG_TYPE_DEVICE_UPDATE == body_decoded.message_type:
Tools.amqp_receive_to_logger(routing_key=method.routing_key, body_decoded=body_decoded)
message = body_decoded.payload.device_to_json(action=c.UI_ACTION_UPDATE_DEVICE)
self.conn_hdlr(message=message)
elif isinstance(body_decoded,
AMQPMessage) and c.AMQP_MSG_TYPE_DEVICE_UPDATE_TASK_STATE == body_decoded.message_type:
Tools.amqp_receive_to_logger(routing_key=method.routing_key, body_decoded=body_decoded)
device_serial = body_decoded.payload[0]
task_name = body_decoded.payload[1]
task_state = body_decoded.payload[2]
message = self.amqp2ws.prepare_device_task_data(device_serial=device_serial,
action=c.UI_ACTION_UPDATE_TASK_STATE,
task_name=task_name, task_state=task_state)
self.conn_hdlr(message=message)
elif isinstance(body_decoded,
AMQPMessage) and c.AMQP_MSG_TYPE_UI_UPDATE_AND_RESET == body_decoded.message_type:
Tools.amqp_receive_to_logger(routing_key=method.routing_key, body_decoded=body_decoded)
message = body_decoded.payload.device_to_json(action=c.UI_ACTION_UPDATE_DEVICE_AND_RESET_TASK)
self.conn_hdlr(message=message)
elif isinstance(body_decoded,
AMQPMessage) and c.AMQP_MSG_TYPE_UI_UPDATE_AND_REBOOT == body_decoded.message_type:
Tools.amqp_receive_to_logger(routing_key=method.routing_key, body_decoded=body_decoded)
message = body_decoded.payload.device_to_json(action=c.UI_ACTION_UPDATE_DEVICE)
self.conn_hdlr(message=message)
elif isinstance(body_decoded,
AMQPMessage) and c.AMQP_MSG_TYPE_UI_UPDATE_LOG_VIEWER == body_decoded.message_type:
Tools.amqp_receive_to_logger(routing_key=method.routing_key, body_decoded=body_decoded)
self.conn_hdlr(message=body_decoded.payload)
else:
Tools.amqp_receive_to_logger(routing_key=method.routing_key, body_decoded=body_decoded)
else:
Tools.create_log_msg(self.__class__.__name__, None, logmsg.UIPRO_AMQP_MSG_NOK)
def send_message(self, message, routing_key):
pass
def conn_hdlr(self, message=None):
amqp2ws = Amqp2ws(name=c.conf.YAPT.WebUiPlugin, url=self.url)
try:
amqp2ws.connect()
if message is not None:
amqp2ws.send(message)
amqp2ws.close()
else:
Tools.create_log_msg(self.__class__.__name__, None, logmsg.UIPRO_WS_MSG_NOK)
except socket.error as se:
Tools.create_log_msg(self.__class__.__name__, None,
logmsg.UIPRO_WS_SOCK_ERR.format(se.message, se.filename, se.strerror, se.args))
| 47.208696 | 117 | 0.640081 |
import socket
import jsonpickle
import lib.constants as c
from lib.amqp.amqpadapter import AMQPBlockingServerAdapter
from lib.amqp.amqpmessage import AMQPMessage
from lib.web.logviewer import LogViewer
from lib.logmsg import LogCommon
from lib.logmsg import LogUiProcessor as logmsg
from lib.processor import BackendClientProcessor
from lib.tools import Tools
from lib.web.adapter.amqp2ws import Amqp2ws
class UiProcessor(AMQPBlockingServerAdapter):
def __init__(self, group=None, target=None, name=None, args=(), kwargs=None):
super(UiProcessor, self).__init__(group=group, target=target, name=name, args=args, kwargs=kwargs)
self._logger.debug(Tools.create_log_msg(self.__class__.__name__, None,
LogCommon.IS_SUBCLASS.format(self.__class__.__name__,
issubclass(UiProcessor,
AMQPBlockingServerAdapter))))
self.url = 'ws://{0}:{1}/yapt/ws?clientname={2}'.format(c.conf.YAPT.WebUiAddress,
str(c.conf.YAPT.WebUiPort), c.conf.YAPT.WebUiPlugin)
self.amqp2ws = Amqp2ws(name=c.conf.YAPT.WebUiPlugin, url=self.url)
self.backendp = BackendClientProcessor(exchange='', routing_key=c.AMQP_RPC_BACKEND_QUEUE)
LogViewer().run_service()
def receive_message(self, ch, method, properties, body):
if body is not None:
ch.basic_ack(delivery_tag=method.delivery_tag)
body_decoded = jsonpickle.decode(body)
if isinstance(body_decoded,
AMQPMessage) and c.AMQP_MSG_TYPE_DEVICE_ADD == body_decoded.message_type:
Tools.amqp_receive_to_logger(routing_key=method.routing_key, body_decoded=body_decoded)
message = body_decoded.payload.device_to_json(action=c.UI_ACTION_ADD_DEVICE)
self.conn_hdlr(message=message)
elif isinstance(body_decoded,
AMQPMessage) and c.AMQP_MSG_TYPE_DEVICE_UPDATE == body_decoded.message_type:
Tools.amqp_receive_to_logger(routing_key=method.routing_key, body_decoded=body_decoded)
message = body_decoded.payload.device_to_json(action=c.UI_ACTION_UPDATE_DEVICE)
self.conn_hdlr(message=message)
elif isinstance(body_decoded,
AMQPMessage) and c.AMQP_MSG_TYPE_DEVICE_UPDATE_TASK_STATE == body_decoded.message_type:
Tools.amqp_receive_to_logger(routing_key=method.routing_key, body_decoded=body_decoded)
device_serial = body_decoded.payload[0]
task_name = body_decoded.payload[1]
task_state = body_decoded.payload[2]
message = self.amqp2ws.prepare_device_task_data(device_serial=device_serial,
action=c.UI_ACTION_UPDATE_TASK_STATE,
task_name=task_name, task_state=task_state)
self.conn_hdlr(message=message)
elif isinstance(body_decoded,
AMQPMessage) and c.AMQP_MSG_TYPE_UI_UPDATE_AND_RESET == body_decoded.message_type:
Tools.amqp_receive_to_logger(routing_key=method.routing_key, body_decoded=body_decoded)
message = body_decoded.payload.device_to_json(action=c.UI_ACTION_UPDATE_DEVICE_AND_RESET_TASK)
self.conn_hdlr(message=message)
elif isinstance(body_decoded,
AMQPMessage) and c.AMQP_MSG_TYPE_UI_UPDATE_AND_REBOOT == body_decoded.message_type:
Tools.amqp_receive_to_logger(routing_key=method.routing_key, body_decoded=body_decoded)
message = body_decoded.payload.device_to_json(action=c.UI_ACTION_UPDATE_DEVICE)
self.conn_hdlr(message=message)
elif isinstance(body_decoded,
AMQPMessage) and c.AMQP_MSG_TYPE_UI_UPDATE_LOG_VIEWER == body_decoded.message_type:
Tools.amqp_receive_to_logger(routing_key=method.routing_key, body_decoded=body_decoded)
self.conn_hdlr(message=body_decoded.payload)
else:
Tools.amqp_receive_to_logger(routing_key=method.routing_key, body_decoded=body_decoded)
else:
Tools.create_log_msg(self.__class__.__name__, None, logmsg.UIPRO_AMQP_MSG_NOK)
def send_message(self, message, routing_key):
pass
def conn_hdlr(self, message=None):
amqp2ws = Amqp2ws(name=c.conf.YAPT.WebUiPlugin, url=self.url)
try:
amqp2ws.connect()
if message is not None:
amqp2ws.send(message)
amqp2ws.close()
else:
Tools.create_log_msg(self.__class__.__name__, None, logmsg.UIPRO_WS_MSG_NOK)
except socket.error as se:
Tools.create_log_msg(self.__class__.__name__, None,
logmsg.UIPRO_WS_SOCK_ERR.format(se.message, se.filename, se.strerror, se.args))
| true | true |
1c3150901e20732eec2c6bd14f28487bfc0b51c5 | 1,684 | py | Python | freezing/model/migrations/versions/f620a24f5f7e_least_variance.py | freezingsaddles/freezing-model | 3bb03739d5bdff418bcf17707a52c9994c45e52f | [
"Apache-2.0"
] | 2 | 2020-01-02T01:23:00.000Z | 2022-01-03T20:57:39.000Z | freezing/model/migrations/versions/f620a24f5f7e_least_variance.py | freezingsaddles/freezing-model | 3bb03739d5bdff418bcf17707a52c9994c45e52f | [
"Apache-2.0"
] | 8 | 2018-01-19T14:36:05.000Z | 2021-11-24T19:22:19.000Z | freezing/model/migrations/versions/f620a24f5f7e_least_variance.py | freezingsaddles/freezing-model | 3bb03739d5bdff418bcf17707a52c9994c45e52f | [
"Apache-2.0"
] | 1 | 2018-10-28T16:09:51.000Z | 2018-10-28T16:09:51.000Z | from alembic import op
import sqlalchemy as sa
"""least-variance
Revision ID: f620a24f5f7e
Revises: b4d003c71167
Create Date: 2020-01-03 23:06:50.491509
"""
# revision identifiers, used by Alembic.
revision = "f620a24f5f7e"
down_revision = "b4d003c71167"
def upgrade():
op.execute(
"""
create or replace view variance_by_day as
select
ds.athlete_id,
sum(case when ds.distance >= 1 then 1 else 0 end) ride_days,
sum(distance) total_miles,
var_pop(case when dayofweek(ds.ride_date)=1
then ds.distance end) sun_var_pop,
var_pop(case when dayofweek(ds.ride_date)=2
then ds.distance end) mon_var_pop,
var_pop(case when dayofweek(ds.ride_date)=3
then ds.distance end) tue_var_pop,
var_pop(case when dayofweek(ds.ride_date)=4
then ds.distance end) wed_var_pop,
var_pop(case when dayofweek(ds.ride_date)=5
then ds.distance end) thu_var_pop,
var_pop(case when dayofweek(ds.ride_date)=6
then ds.distance end) fri_var_pop,
var_pop(case when dayofweek(ds.ride_date)=7
then ds.distance end) sat_var_pop
from
daily_scores ds
group by ds.athlete_id;
"""
)
def downgrade():
op.execute(
"""
drop view variance_by_day
;
"""
)
| 31.773585 | 78 | 0.515439 | from alembic import op
import sqlalchemy as sa
revision = "f620a24f5f7e"
down_revision = "b4d003c71167"
def upgrade():
op.execute(
"""
create or replace view variance_by_day as
select
ds.athlete_id,
sum(case when ds.distance >= 1 then 1 else 0 end) ride_days,
sum(distance) total_miles,
var_pop(case when dayofweek(ds.ride_date)=1
then ds.distance end) sun_var_pop,
var_pop(case when dayofweek(ds.ride_date)=2
then ds.distance end) mon_var_pop,
var_pop(case when dayofweek(ds.ride_date)=3
then ds.distance end) tue_var_pop,
var_pop(case when dayofweek(ds.ride_date)=4
then ds.distance end) wed_var_pop,
var_pop(case when dayofweek(ds.ride_date)=5
then ds.distance end) thu_var_pop,
var_pop(case when dayofweek(ds.ride_date)=6
then ds.distance end) fri_var_pop,
var_pop(case when dayofweek(ds.ride_date)=7
then ds.distance end) sat_var_pop
from
daily_scores ds
group by ds.athlete_id;
"""
)
def downgrade():
op.execute(
"""
drop view variance_by_day
;
"""
)
| true | true |
1c3151b5319a6b48830c7ba8f9a693be51342a4a | 98 | py | Python | config.py | boada/microblog | 84f2d1a71327da3f6283b74a3b3d722e034b2f5f | [
"MIT"
] | 1 | 2020-02-21T16:13:45.000Z | 2020-02-21T16:13:45.000Z | config.py | boada/microblog | 84f2d1a71327da3f6283b74a3b3d722e034b2f5f | [
"MIT"
] | null | null | null | config.py | boada/microblog | 84f2d1a71327da3f6283b74a3b3d722e034b2f5f | [
"MIT"
] | null | null | null | import os
class Config(object):
SECRET_KEY = os.environ.get('SECRET_KEY') or 'my-secret-key'
| 19.6 | 64 | 0.714286 | import os
class Config(object):
SECRET_KEY = os.environ.get('SECRET_KEY') or 'my-secret-key'
| true | true |
1c3152ca4581bd3fdb80edd4e4c01537da05cec5 | 1,256 | py | Python | qcloudsdkmonitor/GetMonitorDataRequest.py | f3n9/qcloudcli | b965a4f0e6cdd79c1245c1d0cd2ca9c460a56f19 | [
"Apache-2.0"
] | null | null | null | qcloudsdkmonitor/GetMonitorDataRequest.py | f3n9/qcloudcli | b965a4f0e6cdd79c1245c1d0cd2ca9c460a56f19 | [
"Apache-2.0"
] | null | null | null | qcloudsdkmonitor/GetMonitorDataRequest.py | f3n9/qcloudcli | b965a4f0e6cdd79c1245c1d0cd2ca9c460a56f19 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from qcloudsdkcore.request import Request
class GetMonitorDataRequest(Request):
def __init__(self):
super(GetMonitorDataRequest, self).__init__(
'monitor', 'qcloudcliV1', 'GetMonitorData', 'monitor.api.qcloud.com')
def get_dimensions(self):
return self.get_params().get('dimensions')
def set_dimensions(self, dimensions):
self.add_param('dimensions', dimensions)
def get_endTime(self):
return self.get_params().get('endTime')
def set_endTime(self, endTime):
self.add_param('endTime', endTime)
def get_metricName(self):
return self.get_params().get('metricName')
def set_metricName(self, metricName):
self.add_param('metricName', metricName)
def get_namespace(self):
return self.get_params().get('namespace')
def set_namespace(self, namespace):
self.add_param('namespace', namespace)
def get_period(self):
return self.get_params().get('period')
def set_period(self, period):
self.add_param('period', period)
def get_startTime(self):
return self.get_params().get('startTime')
def set_startTime(self, startTime):
self.add_param('startTime', startTime)
| 27.304348 | 81 | 0.667197 |
from qcloudsdkcore.request import Request
class GetMonitorDataRequest(Request):
def __init__(self):
super(GetMonitorDataRequest, self).__init__(
'monitor', 'qcloudcliV1', 'GetMonitorData', 'monitor.api.qcloud.com')
def get_dimensions(self):
return self.get_params().get('dimensions')
def set_dimensions(self, dimensions):
self.add_param('dimensions', dimensions)
def get_endTime(self):
return self.get_params().get('endTime')
def set_endTime(self, endTime):
self.add_param('endTime', endTime)
def get_metricName(self):
return self.get_params().get('metricName')
def set_metricName(self, metricName):
self.add_param('metricName', metricName)
def get_namespace(self):
return self.get_params().get('namespace')
def set_namespace(self, namespace):
self.add_param('namespace', namespace)
def get_period(self):
return self.get_params().get('period')
def set_period(self, period):
self.add_param('period', period)
def get_startTime(self):
return self.get_params().get('startTime')
def set_startTime(self, startTime):
self.add_param('startTime', startTime)
| true | true |
1c3153c25f4173c0d69cf19c9085eee9f382434a | 2,768 | py | Python | plenum/test/txn_author_agreement/test_get_empty_txn_author_agreement.py | andkononykhin/plenum | 28dc1719f4b7e80d31dafbadb38cfec4da949886 | [
"Apache-2.0"
] | null | null | null | plenum/test/txn_author_agreement/test_get_empty_txn_author_agreement.py | andkononykhin/plenum | 28dc1719f4b7e80d31dafbadb38cfec4da949886 | [
"Apache-2.0"
] | 1 | 2019-03-20T14:57:22.000Z | 2019-03-20T15:01:55.000Z | plenum/test/txn_author_agreement/test_get_empty_txn_author_agreement.py | andkononykhin/plenum | 28dc1719f4b7e80d31dafbadb38cfec4da949886 | [
"Apache-2.0"
] | null | null | null | import pytest
from plenum.common.constants import REPLY, CONFIG_LEDGER_ID
from plenum.common.exceptions import RequestNackedException
from plenum.common.util import get_utc_epoch
from plenum.test.delayers import req_delay
from plenum.test.stasher import delay_rules
from plenum.test.txn_author_agreement.helper import sdk_get_txn_author_agreement, check_state_proof
whitelist = ['Unexpected combination of request parameters']
TIMESTAMP_NONE = None
@pytest.fixture(scope='module')
def nodeSetWithoutTaaAlwaysResponding(txnPoolNodeSet, looper):
global TIMESTAMP_NONE
# Simulate freshness update
txnPoolNodeSet[0].master_replica._do_send_3pc_batch(ledger_id=CONFIG_LEDGER_ID)
looper.runFor(1) # Make sure we have long enough gap between updates
TIMESTAMP_NONE = get_utc_epoch()
return txnPoolNodeSet
@pytest.fixture(scope='function', params=['all_responding', 'one_responding'])
def nodeSetWithoutTaa(request, nodeSetWithoutTaaAlwaysResponding):
if request.param == 'all_responding':
yield nodeSetWithoutTaaAlwaysResponding
else:
stashers = [node.clientIbStasher for node in nodeSetWithoutTaaAlwaysResponding[1:]]
with delay_rules(stashers, req_delay()):
yield nodeSetWithoutTaaAlwaysResponding
@pytest.mark.parametrize(argnames="params, state_key", argvalues=[
({}, '2:latest'),
({'digest': 'some_digest'}, '2:d:some_digest'),
({'version': 'some_version'}, '2:v:some_version'),
({'timestamp': TIMESTAMP_NONE}, '2:latest')
])
def test_get_txn_author_agreement_works_on_clear_state(params, state_key, looper, nodeSetWithoutTaa,
sdk_pool_handle, sdk_wallet_client):
reply = sdk_get_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_client, **params)[1]
assert reply['op'] == REPLY
result = reply['result']
assert result['data'] is None
check_state_proof(result, state_key, None)
@pytest.mark.parametrize(argnames="params", argvalues=[
{'digest': 'some_digest', 'version': 'some_version'},
{'digest': 'some_digest', 'timestamp': 374273},
{'version': 'some_version', 'timestamp': 374273},
{'digest': 'some_digest', 'version': 'some_version', 'timestamp': 374273}
])
def test_get_txn_author_agreement_cannot_have_more_than_one_parameter(params, looper, nodeSetWithoutTaa,
sdk_pool_handle, sdk_wallet_client):
with pytest.raises(RequestNackedException) as e:
sdk_get_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_client, **params)
assert e.match("GET_TXN_AUTHOR_AGREEMENT request can have at most one "
"of the following parameters: version, digest, timestamp")
| 41.939394 | 106 | 0.725072 | import pytest
from plenum.common.constants import REPLY, CONFIG_LEDGER_ID
from plenum.common.exceptions import RequestNackedException
from plenum.common.util import get_utc_epoch
from plenum.test.delayers import req_delay
from plenum.test.stasher import delay_rules
from plenum.test.txn_author_agreement.helper import sdk_get_txn_author_agreement, check_state_proof
whitelist = ['Unexpected combination of request parameters']
TIMESTAMP_NONE = None
@pytest.fixture(scope='module')
def nodeSetWithoutTaaAlwaysResponding(txnPoolNodeSet, looper):
global TIMESTAMP_NONE
txnPoolNodeSet[0].master_replica._do_send_3pc_batch(ledger_id=CONFIG_LEDGER_ID)
looper.runFor(1)
TIMESTAMP_NONE = get_utc_epoch()
return txnPoolNodeSet
@pytest.fixture(scope='function', params=['all_responding', 'one_responding'])
def nodeSetWithoutTaa(request, nodeSetWithoutTaaAlwaysResponding):
if request.param == 'all_responding':
yield nodeSetWithoutTaaAlwaysResponding
else:
stashers = [node.clientIbStasher for node in nodeSetWithoutTaaAlwaysResponding[1:]]
with delay_rules(stashers, req_delay()):
yield nodeSetWithoutTaaAlwaysResponding
@pytest.mark.parametrize(argnames="params, state_key", argvalues=[
({}, '2:latest'),
({'digest': 'some_digest'}, '2:d:some_digest'),
({'version': 'some_version'}, '2:v:some_version'),
({'timestamp': TIMESTAMP_NONE}, '2:latest')
])
def test_get_txn_author_agreement_works_on_clear_state(params, state_key, looper, nodeSetWithoutTaa,
sdk_pool_handle, sdk_wallet_client):
reply = sdk_get_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_client, **params)[1]
assert reply['op'] == REPLY
result = reply['result']
assert result['data'] is None
check_state_proof(result, state_key, None)
@pytest.mark.parametrize(argnames="params", argvalues=[
{'digest': 'some_digest', 'version': 'some_version'},
{'digest': 'some_digest', 'timestamp': 374273},
{'version': 'some_version', 'timestamp': 374273},
{'digest': 'some_digest', 'version': 'some_version', 'timestamp': 374273}
])
def test_get_txn_author_agreement_cannot_have_more_than_one_parameter(params, looper, nodeSetWithoutTaa,
sdk_pool_handle, sdk_wallet_client):
with pytest.raises(RequestNackedException) as e:
sdk_get_txn_author_agreement(looper, sdk_pool_handle, sdk_wallet_client, **params)
assert e.match("GET_TXN_AUTHOR_AGREEMENT request can have at most one "
"of the following parameters: version, digest, timestamp")
| true | true |
1c31541017e2e3db5152ae18abbb5211d1ab50d4 | 6,481 | py | Python | analyze_tls.py | khushhallchandra/CN-project | 405ce86e4e65e116531aa19287b8d05c959b1441 | [
"MIT"
] | null | null | null | analyze_tls.py | khushhallchandra/CN-project | 405ce86e4e65e116531aa19287b8d05c959b1441 | [
"MIT"
] | null | null | null | analyze_tls.py | khushhallchandra/CN-project | 405ce86e4e65e116531aa19287b8d05c959b1441 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def main(filename):
data = pd.read_csv(filename, header=None)
means = data.mean(axis = 0)
stds = data.std(axis = 0)
return means[0], means[1], stds[0], stds[1]
if __name__ == '__main__':
files_http1 = ['./results/benchmark_size/http1_txt1.csv', './results/benchmark_size/http1_txt2.csv', './results/benchmark_size/http1_txt3.csv', './results/benchmark_size/http1_txt4.csv', './results/benchmark_size/http1_txt5.csv']
files_http1_tls = ['./results/benchmark_size/http1_tls_txt1.csv', './results/benchmark_size/http1_tls_txt2.csv', './results/benchmark_size/http1_tls_txt3.csv', './results/benchmark_size/http1_tls_txt4.csv', './results/benchmark_size/http1_tls_txt5.csv']
files_http2 = ['./results/benchmark_size/http2_txt1.csv', './results/benchmark_size/http2_txt2.csv', './results/benchmark_size/http2_txt3.csv', './results/benchmark_size/http2_txt4.csv', './results/benchmark_size/http2_txt5.csv']
files_http2_tls = ['./results/benchmark_size/http2_tls_txt1.csv', './results/benchmark_size/http2_tls_txt2.csv', './results/benchmark_size/http2_tls_txt3.csv', './results/benchmark_size/http2_tls_txt4.csv', './results/benchmark_size/http2_tls_txt5.csv']
time_tot_http2, time_contentTransfer_http2 = [], []
std_tot_http2, std_contentTransfer_http2 = [], []
time_tot_http1, time_contentTransfer_http1 = [], []
std_tot_http1, std_contentTransfer_http1 = [], []
time_tot_http2_tls, time_contentTransfer_http2_tls = [], []
std_tot_http2_tls, std_contentTransfer_http2_tls = [], []
time_tot_http1_tls, time_contentTransfer_http1_tls = [], []
std_tot_http1_tls, std_contentTransfer_http1_tls = [], []
for f in files_http2:
t1, t2, std1, std2 = main(f)
time_contentTransfer_http2.append(t1)
time_tot_http2.append(t2)
std_contentTransfer_http2.append(2*std1)
std_tot_http2.append(2*std2)
for f in files_http1:
t1, t2, std1, std2 = main(f)
time_contentTransfer_http1.append(t1)
time_tot_http1.append(t2)
std_contentTransfer_http1.append(2*std1)
std_tot_http1.append(2*std2)
for f in files_http2_tls:
t1, t2, std1, std2 = main(f)
time_contentTransfer_http2_tls.append(t1)
time_tot_http2_tls.append(t2)
std_contentTransfer_http2_tls.append(2*std1)
std_tot_http2_tls.append(2*std2)
for f in files_http1_tls:
t1, t2, std1, std2 = main(f)
time_contentTransfer_http1_tls.append(t1)
time_tot_http1_tls.append(t2)
std_contentTransfer_http1_tls.append(2*std1)
std_tot_http1_tls.append(2*std2)
x = [100, 1000, 10000, 100000, 1000000]
time_tot_http2, time_contentTransfer_http2 = np.array(time_tot_http2), np.array(time_contentTransfer_http2)
std_tot_http2, std_contentTransfer_http2 = np.array(std_tot_http2), np.array(std_contentTransfer_http2)
time_tot_http1, time_contentTransfer_http1 = np.array(time_tot_http1), np.array(time_contentTransfer_http1)
std_tot_http1, std_contentTransfer_http1 = np.array(std_tot_http1), np.array(std_contentTransfer_http1)
time_tot_http2_tls, time_contentTransfer_http2_tls = np.array(time_tot_http2_tls), np.array(time_contentTransfer_http2_tls)
std_tot_http2_tls, std_contentTransfer_http2_tls = np.array(std_tot_http2_tls), np.array(std_contentTransfer_http2_tls)
time_tot_http1_tls, time_contentTransfer_http1_tls = np.array(time_tot_http1_tls), np.array(time_contentTransfer_http1_tls)
std_tot_http1_tls, std_contentTransfer_http1_tls = np.array(std_tot_http1_tls), np.array(std_contentTransfer_http1_tls)
fig, ax = plt.subplots()
ax.grid()
ax.plot(x, time_contentTransfer_http1, 'o-', color='r', label="HTTP1")
ax.plot(x, time_contentTransfer_http1_tls, 'o-', color='g', label="HTTP1_with_tls")
ax.plot(x, time_contentTransfer_http2, 'o-', color='b', label="SPDY")
ax.plot(x, time_contentTransfer_http2_tls, 'o-', color='k', label="SPDY_with_tls")
ax.fill_between(x, time_contentTransfer_http1 - std_contentTransfer_http1, time_contentTransfer_http1 + std_contentTransfer_http1, color='gray', alpha=0.3)
ax.fill_between(x, time_contentTransfer_http2 - std_contentTransfer_http2, time_contentTransfer_http2 + std_contentTransfer_http2, color='gray', alpha=0.3)
ax.fill_between(x, time_contentTransfer_http1_tls - std_contentTransfer_http1_tls, time_contentTransfer_http1_tls + std_contentTransfer_http1_tls, color='gray', alpha=0.3)
ax.fill_between(x, time_contentTransfer_http2_tls - std_contentTransfer_http2_tls, time_contentTransfer_http2_tls + std_contentTransfer_http2_tls, color='gray', alpha=0.3)
# ax.errorbar(x, time_contentTransfer_http2, yerr=std_contentTransfer_http2, fmt='-', color='r', label="HTTP2")
# ax.errorbar(x, time_contentTransfer_quic, yerr=std_contentTransfer_quic, fmt='-', color='b', label="QUIC")
ax.set_xlabel('Size of data (Length)')
ax.set_ylabel('Time (in ms)')
ax.legend()
ax.set_xscale('log')
ax.set_title('Comparison of Time Taken for Data Transfer with TLS ON/OFF')
fig.savefig('results/plots/time_contentTransfer_tls.png', dpi=fig.dpi)
fig, ax = plt.subplots()
ax.grid()
ax.plot(x, time_tot_http1, 'o-', color='r', label="HTTP1")
ax.plot(x, time_tot_http1_tls, 'o-', color='g', label="HTTP1_with_tls")
ax.plot(x, time_tot_http2, 'o-', color='b', label="SPDY")
ax.plot(x, time_tot_http2_tls, 'o-', color='k', label="SPDY_with_tls")
ax.fill_between(x, time_tot_http1 - std_tot_http1, time_tot_http1 + std_tot_http1, color='gray', alpha=0.3)
ax.fill_between(x, time_tot_http2 - std_tot_http2, time_tot_http2 + std_tot_http2, color='gray', alpha=0.3)
ax.fill_between(x, time_tot_http1_tls - std_tot_http1_tls, time_tot_http1_tls + std_tot_http1_tls, color='gray', alpha=0.3)
ax.fill_between(x, time_tot_http2_tls - std_tot_http2_tls, time_tot_http2_tls + std_tot_http2_tls, color='gray', alpha=0.3)
# ax.errorbar(x, time_tot_http2, yerr=std_tot_http2, fmt='-', color='r', label="HTTP2")
# ax.errorbar(x, time_tot_quic, yerr=std_tot_quic, fmt='-', color='b', label="QUIC")
ax.set_xlabel('Size of data (Length)')
ax.set_ylabel('Time (in ms)')
ax.legend()
ax.set_xscale('log')
ax.set_title('Comparison of Total Time with TLS ON/OFF')
fig.savefig('results/plots/total_time_tls.png', dpi=fig.dpi) | 54.923729 | 257 | 0.738158 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def main(filename):
data = pd.read_csv(filename, header=None)
means = data.mean(axis = 0)
stds = data.std(axis = 0)
return means[0], means[1], stds[0], stds[1]
if __name__ == '__main__':
files_http1 = ['./results/benchmark_size/http1_txt1.csv', './results/benchmark_size/http1_txt2.csv', './results/benchmark_size/http1_txt3.csv', './results/benchmark_size/http1_txt4.csv', './results/benchmark_size/http1_txt5.csv']
files_http1_tls = ['./results/benchmark_size/http1_tls_txt1.csv', './results/benchmark_size/http1_tls_txt2.csv', './results/benchmark_size/http1_tls_txt3.csv', './results/benchmark_size/http1_tls_txt4.csv', './results/benchmark_size/http1_tls_txt5.csv']
files_http2 = ['./results/benchmark_size/http2_txt1.csv', './results/benchmark_size/http2_txt2.csv', './results/benchmark_size/http2_txt3.csv', './results/benchmark_size/http2_txt4.csv', './results/benchmark_size/http2_txt5.csv']
files_http2_tls = ['./results/benchmark_size/http2_tls_txt1.csv', './results/benchmark_size/http2_tls_txt2.csv', './results/benchmark_size/http2_tls_txt3.csv', './results/benchmark_size/http2_tls_txt4.csv', './results/benchmark_size/http2_tls_txt5.csv']
time_tot_http2, time_contentTransfer_http2 = [], []
std_tot_http2, std_contentTransfer_http2 = [], []
time_tot_http1, time_contentTransfer_http1 = [], []
std_tot_http1, std_contentTransfer_http1 = [], []
time_tot_http2_tls, time_contentTransfer_http2_tls = [], []
std_tot_http2_tls, std_contentTransfer_http2_tls = [], []
time_tot_http1_tls, time_contentTransfer_http1_tls = [], []
std_tot_http1_tls, std_contentTransfer_http1_tls = [], []
for f in files_http2:
t1, t2, std1, std2 = main(f)
time_contentTransfer_http2.append(t1)
time_tot_http2.append(t2)
std_contentTransfer_http2.append(2*std1)
std_tot_http2.append(2*std2)
for f in files_http1:
t1, t2, std1, std2 = main(f)
time_contentTransfer_http1.append(t1)
time_tot_http1.append(t2)
std_contentTransfer_http1.append(2*std1)
std_tot_http1.append(2*std2)
for f in files_http2_tls:
t1, t2, std1, std2 = main(f)
time_contentTransfer_http2_tls.append(t1)
time_tot_http2_tls.append(t2)
std_contentTransfer_http2_tls.append(2*std1)
std_tot_http2_tls.append(2*std2)
for f in files_http1_tls:
t1, t2, std1, std2 = main(f)
time_contentTransfer_http1_tls.append(t1)
time_tot_http1_tls.append(t2)
std_contentTransfer_http1_tls.append(2*std1)
std_tot_http1_tls.append(2*std2)
x = [100, 1000, 10000, 100000, 1000000]
time_tot_http2, time_contentTransfer_http2 = np.array(time_tot_http2), np.array(time_contentTransfer_http2)
std_tot_http2, std_contentTransfer_http2 = np.array(std_tot_http2), np.array(std_contentTransfer_http2)
time_tot_http1, time_contentTransfer_http1 = np.array(time_tot_http1), np.array(time_contentTransfer_http1)
std_tot_http1, std_contentTransfer_http1 = np.array(std_tot_http1), np.array(std_contentTransfer_http1)
time_tot_http2_tls, time_contentTransfer_http2_tls = np.array(time_tot_http2_tls), np.array(time_contentTransfer_http2_tls)
std_tot_http2_tls, std_contentTransfer_http2_tls = np.array(std_tot_http2_tls), np.array(std_contentTransfer_http2_tls)
time_tot_http1_tls, time_contentTransfer_http1_tls = np.array(time_tot_http1_tls), np.array(time_contentTransfer_http1_tls)
std_tot_http1_tls, std_contentTransfer_http1_tls = np.array(std_tot_http1_tls), np.array(std_contentTransfer_http1_tls)
fig, ax = plt.subplots()
ax.grid()
ax.plot(x, time_contentTransfer_http1, 'o-', color='r', label="HTTP1")
ax.plot(x, time_contentTransfer_http1_tls, 'o-', color='g', label="HTTP1_with_tls")
ax.plot(x, time_contentTransfer_http2, 'o-', color='b', label="SPDY")
ax.plot(x, time_contentTransfer_http2_tls, 'o-', color='k', label="SPDY_with_tls")
ax.fill_between(x, time_contentTransfer_http1 - std_contentTransfer_http1, time_contentTransfer_http1 + std_contentTransfer_http1, color='gray', alpha=0.3)
ax.fill_between(x, time_contentTransfer_http2 - std_contentTransfer_http2, time_contentTransfer_http2 + std_contentTransfer_http2, color='gray', alpha=0.3)
ax.fill_between(x, time_contentTransfer_http1_tls - std_contentTransfer_http1_tls, time_contentTransfer_http1_tls + std_contentTransfer_http1_tls, color='gray', alpha=0.3)
ax.fill_between(x, time_contentTransfer_http2_tls - std_contentTransfer_http2_tls, time_contentTransfer_http2_tls + std_contentTransfer_http2_tls, color='gray', alpha=0.3)
ax.set_xlabel('Size of data (Length)')
ax.set_ylabel('Time (in ms)')
ax.legend()
ax.set_xscale('log')
ax.set_title('Comparison of Time Taken for Data Transfer with TLS ON/OFF')
fig.savefig('results/plots/time_contentTransfer_tls.png', dpi=fig.dpi)
fig, ax = plt.subplots()
ax.grid()
ax.plot(x, time_tot_http1, 'o-', color='r', label="HTTP1")
ax.plot(x, time_tot_http1_tls, 'o-', color='g', label="HTTP1_with_tls")
ax.plot(x, time_tot_http2, 'o-', color='b', label="SPDY")
ax.plot(x, time_tot_http2_tls, 'o-', color='k', label="SPDY_with_tls")
ax.fill_between(x, time_tot_http1 - std_tot_http1, time_tot_http1 + std_tot_http1, color='gray', alpha=0.3)
ax.fill_between(x, time_tot_http2 - std_tot_http2, time_tot_http2 + std_tot_http2, color='gray', alpha=0.3)
ax.fill_between(x, time_tot_http1_tls - std_tot_http1_tls, time_tot_http1_tls + std_tot_http1_tls, color='gray', alpha=0.3)
ax.fill_between(x, time_tot_http2_tls - std_tot_http2_tls, time_tot_http2_tls + std_tot_http2_tls, color='gray', alpha=0.3)
ax.set_xlabel('Size of data (Length)')
ax.set_ylabel('Time (in ms)')
ax.legend()
ax.set_xscale('log')
ax.set_title('Comparison of Total Time with TLS ON/OFF')
fig.savefig('results/plots/total_time_tls.png', dpi=fig.dpi) | true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.