content stringlengths 27 928k | path stringlengths 4 230 | size int64 27 928k | nl_text stringlengths 21 396k | nl_size int64 21 396k | nl_language stringlengths 2 3 | nl_language_score float64 0.04 1 |
|---|---|---|---|---|---|---|
"""Training a face recognizer with TensorFlow using softmax cross entropy loss
"""
# MIT License
#
# Copyright (c) 2016 David Sandberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os.path
import time
import sys
import random
import tensorflow as tf
import numpy as np
import importlib
import argparse
import facenet
import lfw
import h5py
import math
import tensorflow.contrib.slim as slim
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
def main(args):
network = importlib.import_module(args.model_def)
image_size = (args.image_size, args.image_size)
subdir = datetime.strftime(datetime.now(), '%Y-%m-%d-%H-softmax-'+args.model_def.split(".")[-1]+"-"+args.data_dir.split("/")[-1])
log_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)
if not os.path.isdir(log_dir): # Create the log directory if it doesn't exist
os.makedirs(log_dir)
model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
if not os.path.isdir(model_dir): # Create the model directory if it doesn't exist
os.makedirs(model_dir)
stat_file_name = os.path.join(log_dir, 'stat.h5')
# Write arguments to a text file
facenet.write_arguments_to_file(args, os.path.join(log_dir, 'arguments.txt'))
# Store some git revision info in a text file in the log directory
src_path,_ = os.path.split(os.path.realpath(__file__))
facenet.store_revision_info(src_path, log_dir, ' '.join(sys.argv))
np.random.seed(seed=args.seed)
random.seed(args.seed)
dataset = facenet.get_dataset(args.data_dir)
if args.filter_filename:
dataset = filter_dataset(dataset, os.path.expanduser(args.filter_filename),
args.filter_percentile, args.filter_min_nrof_images_per_class)
if args.validation_set_split_ratio>0.0:
train_set, val_set = facenet.split_dataset(dataset, args.validation_set_split_ratio, args.min_nrof_val_images_per_class, 'SPLIT_IMAGES')
else:
train_set, val_set = dataset, []
nrof_classes = len(train_set)
print('Model directory: %s' % model_dir)
print('Log directory: %s' % log_dir)
pretrained_model = None
if args.pretrained_model:
pretrained_model = os.path.expanduser(args.pretrained_model)
print('Pre-trained model: %s' % pretrained_model)
if args.lfw_dir:
print('LFW directory: %s' % args.lfw_dir)
# Read the file containing the pairs used for testing
pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
# Get the paths for the corresponding images
lfw_paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs)
with tf.Graph().as_default():
tf.set_random_seed(args.seed)
global_step = tf.Variable(0, trainable=False)
# Get a list of image paths and their labels
image_list, label_list = facenet.get_image_paths_and_labels(train_set)
assert len(image_list)>0, 'The training set should not be empty'
val_image_list, val_label_list = facenet.get_image_paths_and_labels(val_set)
# Create a queue that produces indices into the image_list and label_list
labels = ops.convert_to_tensor(label_list, dtype=tf.int32)
range_size = array_ops.shape(labels)[0]
index_queue = tf.train.range_input_producer(range_size, num_epochs=None,
shuffle=True, seed=None, capacity=32)
index_dequeue_op = index_queue.dequeue_many(args.batch_size*args.epoch_size, 'index_dequeue')
learning_rate_placeholder = tf.placeholder(tf.float32, name='learning_rate')
batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')
phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
image_paths_placeholder = tf.placeholder(tf.string, shape=(None,1), name='image_paths')
labels_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='labels')
control_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='control')
nrof_preprocess_threads = 4
input_queue = data_flow_ops.FIFOQueue(capacity=2000000,
dtypes=[tf.string, tf.int32, tf.int32],
shapes=[(1,), (1,), (1,)],
shared_name=None, name=None)
enqueue_op = input_queue.enqueue_many([image_paths_placeholder, labels_placeholder, control_placeholder], name='enqueue_op')
image_batch, label_batch = facenet.create_input_pipeline(input_queue, image_size, nrof_preprocess_threads, batch_size_placeholder)
image_batch = tf.identity(image_batch, 'image_batch')
image_batch = tf.identity(image_batch, 'input')
label_batch = tf.identity(label_batch, 'label_batch')
print('Number of classes in training set: %d' % nrof_classes)
print('Number of examples in training set: %d' % len(image_list))
print('Number of classes in validation set: %d' % len(val_set))
print('Number of examples in validation set: %d' % len(val_image_list))
print('Building training graph')
# Build the inference graph
prelogits, _ = network.inference(image_batch, args.keep_probability,
phase_train=phase_train_placeholder, bottleneck_layer_size=args.embedding_size,
weight_decay=args.weight_decay)
logits = slim.fully_connected(prelogits, len(train_set), activation_fn=None,
weights_initializer=slim.initializers.xavier_initializer(),
weights_regularizer=slim.l2_regularizer(args.weight_decay),
scope='Logits', reuse=False)
embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings')
# Norm for the prelogits
eps = 1e-4
prelogits_norm = tf.reduce_mean(tf.norm(tf.abs(prelogits)+eps, ord=args.prelogits_norm_p, axis=1))
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, prelogits_norm * args.prelogits_norm_loss_factor)
# Add center loss
prelogits_center_loss, _ = facenet.center_loss(prelogits, label_batch, args.center_loss_alfa, nrof_classes)
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, prelogits_center_loss * args.center_loss_factor)
learning_rate = tf.train.exponential_decay(learning_rate_placeholder, global_step,
args.learning_rate_decay_epochs*args.epoch_size, args.learning_rate_decay_factor, staircase=True)
tf.summary.scalar('learning_rate', learning_rate)
# Calculate the average cross entropy loss across the batch
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=label_batch, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
correct_prediction = tf.cast(tf.equal(tf.argmax(logits, 1), tf.cast(label_batch, tf.int64)), tf.float32)
accuracy = tf.reduce_mean(correct_prediction)
# Calculate the total losses
regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
total_loss = tf.add_n([cross_entropy_mean] + regularization_losses, name='total_loss')
# Build a Graph that trains the model with one batch of examples and updates the model parameters
train_op = facenet.train(total_loss, global_step, args.optimizer,
learning_rate, args.moving_average_decay, tf.global_variables(), args.log_histograms)
# Create a saver
saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3)
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.summary.merge_all()
# Start running operations on the Graph.
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
coord = tf.train.Coordinator()
tf.train.start_queue_runners(coord=coord, sess=sess)
with sess.as_default():
if pretrained_model:
print('Restoring pretrained model: %s' % pretrained_model)
ckpt = tf.train.get_checkpoint_state(pretrained_model)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
# Training and validation loop
print('Running training')
nrof_steps = args.max_nrof_epochs*args.epoch_size
nrof_val_samples = int(math.ceil(args.max_nrof_epochs / args.validate_every_n_epochs)) # Validate every validate_every_n_epochs as well as in the last epoch
stat = {
'loss': np.zeros((nrof_steps,), np.float32),
'center_loss': np.zeros((nrof_steps,), np.float32),
'reg_loss': np.zeros((nrof_steps,), np.float32),
'xent_loss': np.zeros((nrof_steps,), np.float32),
'prelogits_norm': np.zeros((nrof_steps,), np.float32),
'accuracy': np.zeros((nrof_steps,), np.float32),
'val_loss': np.zeros((nrof_val_samples,), np.float32),
'val_xent_loss': np.zeros((nrof_val_samples,), np.float32),
'val_accuracy': np.zeros((nrof_val_samples,), np.float32),
'lfw_accuracy': np.zeros((args.max_nrof_epochs,), np.float32),
'lfw_valrate2': np.zeros((args.max_nrof_epochs,), np.float32),
'lfw_valrate3': np.zeros((args.max_nrof_epochs,), np.float32),
'learning_rate': np.zeros((args.max_nrof_epochs,), np.float32),
'time_train': np.zeros((args.max_nrof_epochs,), np.float32),
'time_validate': np.zeros((args.max_nrof_epochs,), np.float32),
'time_evaluate': np.zeros((args.max_nrof_epochs,), np.float32),
'prelogits_hist': np.zeros((args.max_nrof_epochs, 1000), np.float32),
}
for epoch in range(1,args.max_nrof_epochs+1):
step = sess.run(global_step, feed_dict=None)
# Train for one epoch
t = time.time()
cont = train(args, sess, epoch, image_list, label_list, index_dequeue_op, enqueue_op, image_paths_placeholder, labels_placeholder,
learning_rate_placeholder, phase_train_placeholder, batch_size_placeholder, control_placeholder, global_step,
total_loss, train_op, summary_op, summary_writer, regularization_losses, args.learning_rate_schedule_file,
stat, cross_entropy_mean, accuracy, learning_rate,
prelogits, prelogits_center_loss, args.random_rotate, args.random_crop, args.random_flip, prelogits_norm, args.prelogits_hist_max, args.use_fixed_image_standardization)
stat['time_train'][epoch-1] = time.time() - t
if not cont:
break
t = time.time()
if len(val_image_list)>0 and ((epoch-1) % args.validate_every_n_epochs == args.validate_every_n_epochs-1 or epoch==args.max_nrof_epochs):
validate(args, sess, epoch, val_image_list, val_label_list, enqueue_op, image_paths_placeholder, labels_placeholder, control_placeholder,
phase_train_placeholder, batch_size_placeholder,
stat, total_loss, regularization_losses, cross_entropy_mean, accuracy, args.validate_every_n_epochs, args.use_fixed_image_standardization)
stat['time_validate'][epoch-1] = time.time() - t
# Save variables and the metagraph if it doesn't exist already
save_variables_and_metagraph(sess, saver, summary_writer, model_dir, subdir, epoch)
# Evaluate on LFW
t = time.time()
if args.lfw_dir:
evaluate(sess, enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder, control_placeholder,
embeddings, label_batch, lfw_paths, actual_issame, args.lfw_batch_size, args.lfw_nrof_folds, log_dir, step, summary_writer, stat, epoch,
args.lfw_distance_metric, args.lfw_subtract_mean, args.lfw_use_flipped_images, args.use_fixed_image_standardization)
stat['time_evaluate'][epoch-1] = time.time() - t
print('Saving statistics')
with h5py.File(stat_file_name, 'w') as f:
for key, value in stat.items():
f.create_dataset(key, data=value)
return model_dir
def find_threshold(var, percentile):
hist, bin_edges = np.histogram(var, 100)
cdf = np.float32(np.cumsum(hist)) / np.sum(hist)
bin_centers = (bin_edges[:-1]+bin_edges[1:])/2
#plt.plot(bin_centers, cdf)
threshold = np.interp(percentile*0.01, cdf, bin_centers)
return threshold
def filter_dataset(dataset, data_filename, percentile, min_nrof_images_per_class):
with h5py.File(data_filename,'r') as f:
distance_to_center = np.array(f.get('distance_to_center'))
label_list = np.array(f.get('label_list'))
image_list = np.array(f.get('image_list'))
distance_to_center_threshold = find_threshold(distance_to_center, percentile)
indices = np.where(distance_to_center>=distance_to_center_threshold)[0]
filtered_dataset = dataset
removelist = []
for i in indices:
label = label_list[i]
image = image_list[i]
if image in filtered_dataset[label].image_paths:
filtered_dataset[label].image_paths.remove(image)
if len(filtered_dataset[label].image_paths)<min_nrof_images_per_class:
removelist.append(label)
ix = sorted(list(set(removelist)), reverse=True)
for i in ix:
del(filtered_dataset[i])
return filtered_dataset
def train(args, sess, epoch, image_list, label_list, index_dequeue_op, enqueue_op, image_paths_placeholder, labels_placeholder,
learning_rate_placeholder, phase_train_placeholder, batch_size_placeholder, control_placeholder, step,
loss, train_op, summary_op, summary_writer, reg_losses, learning_rate_schedule_file,
stat, cross_entropy_mean, accuracy,
learning_rate, prelogits, prelogits_center_loss, random_rotate, random_crop, random_flip, prelogits_norm, prelogits_hist_max, use_fixed_image_standardization):
batch_number = 0
if args.learning_rate>0.0:
lr = args.learning_rate
else:
lr = facenet.get_learning_rate_from_file(learning_rate_schedule_file, epoch)
if lr<=0:
return False
index_epoch = sess.run(index_dequeue_op)
label_epoch = np.array(label_list)[index_epoch]
image_epoch = np.array(image_list)[index_epoch]
# Enqueue one epoch of image paths and labels
labels_array = np.expand_dims(np.array(label_epoch),1)
image_paths_array = np.expand_dims(np.array(image_epoch),1)
control_value = facenet.RANDOM_ROTATE * random_rotate + facenet.RANDOM_CROP * random_crop + facenet.RANDOM_FLIP * random_flip + facenet.FIXED_STANDARDIZATION * use_fixed_image_standardization
control_array = np.ones_like(labels_array) * control_value
sess.run(enqueue_op, {image_paths_placeholder: image_paths_array, labels_placeholder: labels_array, control_placeholder: control_array})
# Training loop
train_time = 0
while batch_number < args.epoch_size:
start_time = time.time()
feed_dict = {learning_rate_placeholder: lr, phase_train_placeholder:True, batch_size_placeholder:args.batch_size}
tensor_list = [loss, train_op, step, reg_losses, prelogits, cross_entropy_mean, learning_rate, prelogits_norm, accuracy, prelogits_center_loss]
if batch_number % 100 == 0:
loss_, _, step_, reg_losses_, prelogits_, cross_entropy_mean_, lr_, prelogits_norm_, accuracy_, center_loss_, summary_str = sess.run(tensor_list + [summary_op], feed_dict=feed_dict)
summary_writer.add_summary(summary_str, global_step=step_)
else:
loss_, _, step_, reg_losses_, prelogits_, cross_entropy_mean_, lr_, prelogits_norm_, accuracy_, center_loss_ = sess.run(tensor_list, feed_dict=feed_dict)
duration = time.time() - start_time
stat['loss'][step_-1] = loss_
stat['center_loss'][step_-1] = center_loss_
stat['reg_loss'][step_-1] = np.sum(reg_losses_)
stat['xent_loss'][step_-1] = cross_entropy_mean_
stat['prelogits_norm'][step_-1] = prelogits_norm_
stat['learning_rate'][epoch-1] = lr_
stat['accuracy'][step_-1] = accuracy_
stat['prelogits_hist'][epoch-1,:] += np.histogram(np.minimum(np.abs(prelogits_), prelogits_hist_max), bins=1000, range=(0.0, prelogits_hist_max))[0]
duration = time.time() - start_time
print('Epoch: [%d][%d/%d]\tTime %.3f\tLoss %2.3f\tXent %2.3f\tRegLoss %2.3f\tAccuracy %2.3f\tLr %2.5f\tCl %2.3f' %
(epoch, batch_number+1, args.epoch_size, duration, loss_, cross_entropy_mean_, np.sum(reg_losses_), accuracy_, lr_, center_loss_))
batch_number += 1
train_time += duration
# Add validation loss and accuracy to summary
summary = tf.Summary()
#pylint: disable=maybe-no-member
summary.value.add(tag='time/total', simple_value=train_time)
summary_writer.add_summary(summary, global_step=step_)
return True
def validate(args, sess, epoch, image_list, label_list, enqueue_op, image_paths_placeholder, labels_placeholder, control_placeholder,
phase_train_placeholder, batch_size_placeholder,
stat, loss, regularization_losses, cross_entropy_mean, accuracy, validate_every_n_epochs, use_fixed_image_standardization):
print('Running forward pass on validation set')
nrof_batches = len(label_list) // args.lfw_batch_size
nrof_images = nrof_batches * args.lfw_batch_size
# Enqueue one epoch of image paths and labels
labels_array = np.expand_dims(np.array(label_list[:nrof_images]),1)
image_paths_array = np.expand_dims(np.array(image_list[:nrof_images]),1)
control_array = np.ones_like(labels_array, np.int32)*facenet.FIXED_STANDARDIZATION * use_fixed_image_standardization
sess.run(enqueue_op, {image_paths_placeholder: image_paths_array, labels_placeholder: labels_array, control_placeholder: control_array})
loss_array = np.zeros((nrof_batches,), np.float32)
xent_array = np.zeros((nrof_batches,), np.float32)
accuracy_array = np.zeros((nrof_batches,), np.float32)
# Training loop
start_time = time.time()
for i in range(nrof_batches):
feed_dict = {phase_train_placeholder:False, batch_size_placeholder:args.lfw_batch_size}
loss_, cross_entropy_mean_, accuracy_ = sess.run([loss, cross_entropy_mean, accuracy], feed_dict=feed_dict)
loss_array[i], xent_array[i], accuracy_array[i] = (loss_, cross_entropy_mean_, accuracy_)
if i % 10 == 9:
print('.', end='')
sys.stdout.flush()
print('')
duration = time.time() - start_time
val_index = (epoch-1)//validate_every_n_epochs
stat['val_loss'][val_index] = np.mean(loss_array)
stat['val_xent_loss'][val_index] = np.mean(xent_array)
stat['val_accuracy'][val_index] = np.mean(accuracy_array)
print('Validation Epoch: %d\tTime %.3f\tLoss %2.3f\tXent %2.3f\tAccuracy %2.3f' %
(epoch, duration, np.mean(loss_array), np.mean(xent_array), np.mean(accuracy_array)))
def evaluate(sess, enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder, control_placeholder,
embeddings, labels, image_paths, actual_issame, batch_size, nrof_folds, log_dir, step, summary_writer, stat, epoch, distance_metric, subtract_mean, use_flipped_images, use_fixed_image_standardization):
start_time = time.time()
# Run forward pass to calculate embeddings
print('Runnning forward pass on LFW images')
# Enqueue one epoch of image paths and labels
nrof_embeddings = len(actual_issame)*2 # nrof_pairs * nrof_images_per_pair
nrof_flips = 2 if use_flipped_images else 1
nrof_images = nrof_embeddings * nrof_flips
labels_array = np.expand_dims(np.arange(0,nrof_images),1)
image_paths_array = np.expand_dims(np.repeat(np.array(image_paths),nrof_flips),1)
control_array = np.zeros_like(labels_array, np.int32)
if use_fixed_image_standardization:
control_array += np.ones_like(labels_array)*facenet.FIXED_STANDARDIZATION
if use_flipped_images:
# Flip every second image
control_array += (labels_array % 2)*facenet.FLIP
sess.run(enqueue_op, {image_paths_placeholder: image_paths_array, labels_placeholder: labels_array, control_placeholder: control_array})
embedding_size = int(embeddings.get_shape()[1])
assert nrof_images % batch_size == 0, 'The number of LFW images must be an integer multiple of the LFW batch size'
nrof_batches = nrof_images // batch_size
emb_array = np.zeros((nrof_images, embedding_size))
lab_array = np.zeros((nrof_images,))
for i in range(nrof_batches):
feed_dict = {phase_train_placeholder:False, batch_size_placeholder:batch_size}
emb, lab = sess.run([embeddings, labels], feed_dict=feed_dict)
lab_array[lab] = lab
emb_array[lab, :] = emb
if i % 10 == 9:
print('.', end='')
sys.stdout.flush()
print('')
embeddings = np.zeros((nrof_embeddings, embedding_size*nrof_flips))
if use_flipped_images:
# Concatenate embeddings for flipped and non flipped version of the images
embeddings[:,:embedding_size] = emb_array[0::2,:]
embeddings[:,embedding_size:] = emb_array[1::2,:]
else:
embeddings = emb_array
assert np.array_equal(lab_array, np.arange(nrof_images))==True, 'Wrong labels used for evaluation, possibly caused by training examples left in the input pipeline'
_, _, accuracy, val2, val_std2, far2, val3, val_std3, far3 = lfw.evaluate(embeddings, actual_issame, nrof_folds=nrof_folds, distance_metric=distance_metric, subtract_mean=subtract_mean)
print('Accuracy: %1.3f+-%1.3f' % (np.mean(accuracy), np.std(accuracy)))
print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val2, val_std2, far2))
print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val3, val_std3, far3))
lfw_time = time.time() - start_time
# Add validation loss and accuracy to summary
summary = tf.Summary()
#pylint: disable=maybe-no-member
summary.value.add(tag='lfw/accuracy', simple_value=np.mean(accuracy))
summary.value.add(tag='lfw/val_rate2', simple_value=val2)
summary.value.add(tag='lfw/val_rate3', simple_value=val3)
summary.value.add(tag='time/lfw', simple_value=lfw_time)
summary_writer.add_summary(summary, step)
with open(os.path.join(log_dir,'lfw_result.txt'),'at') as f:
f.write('%d\t%.5f\t%.5f\t%.5f\n' % (step, np.mean(accuracy), val2, val3))
stat['lfw_accuracy'][epoch-1] = np.mean(accuracy)
stat['lfw_valrate2'][epoch-1] = val2
stat['lfw_valrate3'][epoch-1] = val3
def save_variables_and_metagraph(sess, saver, summary_writer, model_dir, model_name, step):
# Save the model checkpoint
print('Saving variables')
start_time = time.time()
checkpoint_path = os.path.join(model_dir, 'model-%s.ckpt' % model_name)
saver.save(sess, checkpoint_path, global_step=step, write_meta_graph=False)
save_time_variables = time.time() - start_time
print('Variables saved in %.2f seconds' % save_time_variables)
metagraph_filename = os.path.join(model_dir, 'model-%s.meta' % model_name)
save_time_metagraph = 0
if not os.path.exists(metagraph_filename):
print('Saving metagraph')
start_time = time.time()
saver.export_meta_graph(metagraph_filename)
save_time_metagraph = time.time() - start_time
print('Metagraph saved in %.2f seconds' % save_time_metagraph)
summary = tf.Summary()
#pylint: disable=maybe-no-member
summary.value.add(tag='time/save_variables', simple_value=save_time_variables)
summary.value.add(tag='time/save_metagraph', simple_value=save_time_metagraph)
summary_writer.add_summary(summary, step)
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--logs_base_dir', type=str,
help='Directory where to write event logs.', default='~/logs/facenet')
parser.add_argument('--models_base_dir', type=str,
help='Directory where to write trained models and checkpoints.', default='~/models/facenet')
parser.add_argument('--gpu_memory_fraction', type=float,
help='Upper bound on the amount of GPU memory that will be used by the process.', default=1.0)
parser.add_argument('--pretrained_model', type=str,
help='Load a pretrained model before training starts.')
parser.add_argument('--data_dir', type=str,
help='Path to the data directory containing aligned face patches.',
default='~/datasets/casia/casia_maxpy_mtcnnalign_182_160')
parser.add_argument('--model_def', type=str,
help='Model definition. Points to a module containing the definition of the inference graph.', default='models.inception_resnet_v1')
parser.add_argument('--max_nrof_epochs', type=int,
help='Number of epochs to run.', default=500)
parser.add_argument('--batch_size', type=int,
help='Number of images to process in a batch.', default=90)
parser.add_argument('--image_size', type=int,
help='Image size (height, width) in pixels.', default=160)
parser.add_argument('--epoch_size', type=int,
help='Number of batches per epoch.', default=3860)
parser.add_argument('--embedding_size', type=int,
help='Dimensionality of the embedding.', default=128)
parser.add_argument('--random_crop',
help='Performs random cropping of training images. If false, the center image_size pixels from the training images are used. ' +
'If the size of the images in the data directory is equal to image_size no cropping is performed', action='store_true')
parser.add_argument('--random_flip',
help='Performs random horizontal flipping of training images.', action='store_true')
parser.add_argument('--random_rotate',
help='Performs random rotations of training images.', action='store_true')
parser.add_argument('--use_fixed_image_standardization',
help='Performs fixed standardization of images.', action='store_true')
parser.add_argument('--keep_probability', type=float,
help='Keep probability of dropout for the fully connected layer(s).', default=1.0)
parser.add_argument('--weight_decay', type=float,
help='L2 weight regularization.', default=0.0)
parser.add_argument('--center_loss_factor', type=float,
help='Center loss factor.', default=0.0)
parser.add_argument('--center_loss_alfa', type=float,
help='Center update rate for center loss.', default=0.95)
parser.add_argument('--prelogits_norm_loss_factor', type=float,
help='Loss based on the norm of the activations in the prelogits layer.', default=0.0)
parser.add_argument('--prelogits_norm_p', type=float,
help='Norm to use for prelogits norm loss.', default=1.0)
parser.add_argument('--prelogits_hist_max', type=float,
help='The max value for the prelogits histogram.', default=10.0)
parser.add_argument('--optimizer', type=str, choices=['ADAGRAD', 'ADADELTA', 'ADAM', 'RMSPROP', 'MOM'],
help='The optimization algorithm to use', default='ADAGRAD')
parser.add_argument('--learning_rate', type=float,
help='Initial learning rate. If set to a negative value a learning rate ' +
'schedule can be specified in the file "learning_rate_schedule.txt"', default=0.1)
parser.add_argument('--learning_rate_decay_epochs', type=int,
help='Number of epochs between learning rate decay.', default=100)
parser.add_argument('--learning_rate_decay_factor', type=float,
help='Learning rate decay factor.', default=1.0)
parser.add_argument('--moving_average_decay', type=float,
help='Exponential decay for tracking of training parameters.', default=0.9999)
parser.add_argument('--seed', type=int,
help='Random seed.', default=666)
parser.add_argument('--nrof_preprocess_threads', type=int,
help='Number of preprocessing (data loading and augmentation) threads.', default=4)
parser.add_argument('--log_histograms',
help='Enables logging of weight/bias histograms in tensorboard.', action='store_true')
parser.add_argument('--learning_rate_schedule_file', type=str,
help='File containing the learning rate schedule that is used when learning_rate is set to to -1.', default='data/learning_rate_schedule.txt')
parser.add_argument('--filter_filename', type=str,
help='File containing image data used for dataset filtering', default='')
parser.add_argument('--filter_percentile', type=float,
help='Keep only the percentile images closed to its class center', default=100.0)
parser.add_argument('--filter_min_nrof_images_per_class', type=int,
help='Keep only the classes with this number of examples or more', default=0)
parser.add_argument('--validate_every_n_epochs', type=int,
help='Number of epoch between validation', default=5)
parser.add_argument('--validation_set_split_ratio', type=float,
help='The ratio of the total dataset to use for validation', default=0.0)
parser.add_argument('--min_nrof_val_images_per_class', type=float,
help='Classes with fewer images will be removed from the validation set', default=0)
# Parameters for validation on LFW
parser.add_argument('--lfw_pairs', type=str,
help='The file containing the pairs to use for validation.', default='data/pairs.txt')
parser.add_argument('--lfw_dir', type=str,
help='Path to the data directory containing aligned face patches.', default='')
parser.add_argument('--lfw_batch_size', type=int,
help='Number of images to process in a batch in the LFW test set.', default=100)
parser.add_argument('--lfw_nrof_folds', type=int,
help='Number of folds to use for cross validation. Mainly used for testing.', default=10)
parser.add_argument('--lfw_distance_metric', type=int,
help='Type of distance metric to use. 0: Euclidian, 1:Cosine similarity distance.', default=0)
parser.add_argument('--lfw_use_flipped_images',
help='Concatenates embeddings for the image and its horizontally flipped counterpart.', action='store_true')
parser.add_argument('--lfw_subtract_mean',
help='Subtract feature mean before calculating distance.', action='store_true')
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
| src/train_softmax.py | 32,803 | Training a face recognizer with TensorFlow using softmax cross entropy loss
MIT License Copyright (c) 2016 David Sandberg Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Create the log directory if it doesn't exist Create the model directory if it doesn't exist Write arguments to a text file Store some git revision info in a text file in the log directory Read the file containing the pairs used for testing Get the paths for the corresponding images Get a list of image paths and their labels Create a queue that produces indices into the image_list and label_list Build the inference graph Norm for the prelogits Add center loss Calculate the average cross entropy loss across the batch Calculate the total losses Build a Graph that trains the model with one batch of examples and updates the model parameters Create a saver Build the summary operation based on the TF collection of Summaries. Start running operations on the Graph. Training and validation loop Validate every validate_every_n_epochs as well as in the last epoch Train for one epoch Save variables and the metagraph if it doesn't exist already Evaluate on LFWplt.plot(bin_centers, cdf) Enqueue one epoch of image paths and labels Training loop Add validation loss and accuracy to summarypylint: disable=maybe-no-member Enqueue one epoch of image paths and labels Training loop Run forward pass to calculate embeddings Enqueue one epoch of image paths and labels nrof_pairs * nrof_images_per_pair Flip every second image Concatenate embeddings for flipped and non flipped version of the images Add validation loss and accuracy to summarypylint: disable=maybe-no-member Save the model checkpointpylint: disable=maybe-no-member Parameters for validation on LFW | 2,708 | en | 0.833032 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
Constant values used by this library.
"""
from enum import Enum
class DataCategory(Enum):
"""
Enumeration of data categories in compliant machine learning.
Values:
- PRIVATE: data which is private. Researchers may not view this.
- PUBLIC: data which may safely be viewed by researchers.
"""
PRIVATE = 1
PUBLIC = 2
| shrike/compliant_logging/constants.py | 429 | Enumeration of data categories in compliant machine learning.
Values:
- PRIVATE: data which is private. Researchers may not view this.
- PUBLIC: data which may safely be viewed by researchers.
Constant values used by this library.
Copyright (c) Microsoft Corporation. Licensed under the MIT license. | 302 | en | 0.883478 |
#VERSION: 2.3
#AUTHORS: Vikas Yadav (https://github.com/v1k45 | http://v1k45.com)
#CONTRIBUTORS: Diego de las Heras (ngosang@hotmail.es)
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import re
from html.parser import HTMLParser
from helpers import retrieve_url
from novaprinter import prettyPrinter
class leetx(object):
url = "https://1337x.to"
name = "1337x"
supported_categories = {
'all': 'All',
'movies': 'Movies',
'tv': 'TV',
'music': 'Music',
'games': 'Games',
'anime': 'Anime',
'software': 'Apps'
}
class MyHtmlParser(HTMLParser):
A, TABLE, TR, TD, SPAN = ('a', 'table', 'tr', 'td', 'span')
""" Sub-class for parsing results """
def __init__(self, results, url):
HTMLParser.__init__(self)
self.results = results
self.url = url
self.current_result = {}
self.current_item = None
self.inside_table = False
self.inside_row = False
def handle_starttag(self, tag, attrs):
# are we inside the results table body or not
# if we are not inside the table, no need to process any further
self.inside_table = self.inside_table or tag == self.TABLE
if not self.inside_table:
return
# convert attrs tuple to dictionary
attrs = dict(attrs)
# for torrent name and link
link = attrs.get('href', '')
if tag == self.A and link.startswith('/torrent'):
self.current_result['link'] = self.url + link
self.current_result['desc_link'] = self.url + link
self.current_result['engine_url'] = self.url
self.current_item = 'name'
# to ignore uploader name attached to the torrent size in span tag
if tag == self.SPAN:
self.current_item = None
# if this is a <td> there can be seeds, leeches or size inside it.
if tag == self.TD:
self.inside_row = True
# find apporipate data key using class name of td
for item in ['seeds', 'leech', 'size']:
if item in attrs.get('class', ''):
self.current_item = item
break
def handle_data(self, data):
# if we are not inside the table, no need to process any further
if not self.inside_table:
return
# do not process data if we are not inside the table body
if self.current_item:
prev_value = self.current_result.get(self.current_item, '')
self.current_result[self.current_item] = prev_value + data
def handle_endtag(self, tag):
# are we inside the results table body or not
# if we are not inside the table, no need to process any further
if tag == self.TABLE:
self.inside_table = False
if not self.inside_table:
return
# exiting the table data and maybe moving td or tr element
if self.inside_row and tag == self.TD:
self.inside_row = False
self.current_item = None
# exiting the tr element, which means all necessary data for a torrent has been
# extracted, we should save it and clean the object's state.
if self.current_result and tag == self.TR:
if 'size' in self.current_result:
self.current_result['size'] = self.current_result['size'].replace(',', '')
# skip malformed names (eg. with @)
if 'name' in self.current_result:
prettyPrinter(self.current_result)
self.results.append('a')
self.current_result = {}
self.current_item = None
def download_torrent(self, download_url):
# since 1337x does not provide torrent links in the search results,
# we will have to fetch the page and extract the magnet link
torrent_page = retrieve_url(download_url)
magnet_match = re.search(r"href\s*\=\s*\"(magnet[^\"]+)\"", torrent_page)
if magnet_match and magnet_match.groups():
print(magnet_match.groups()[0] + " " + download_url)
else:
raise Exception('Error, please fill a bug report!')
def search(self, what, cat='all'):
cat = cat.lower()
# decide which type of search to perform based on category
search_page = "search" if cat == 'all' else 'category-search'
search_url = "{url}/{search_page}/{search_query}/".format(
url=self.url, search_page=search_page, search_query=what)
# apply search category to url, if any.
if cat != 'all':
search_url += self.supported_categories[cat] + "/"
# try to get 15 pages (20 * 15 = 300 results) and stop when we don't found results
results_list = []
parser = self.MyHtmlParser(results_list, self.url)
page = 1
while page < 16:
# download the page
html = retrieve_url(search_url + str(page) + '/')
parser.feed(html)
if len(results_list) < 1:
break
del results_list[:]
page += 1
parser.close()
| container_data/.config/qBittorrent/plugins/nova3/engines/leetx.py | 6,873 | VERSION: 2.3AUTHORS: Vikas Yadav (https://github.com/v1k45 | http://v1k45.com)CONTRIBUTORS: Diego de las Heras (ngosang@hotmail.es) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. are we inside the results table body or not if we are not inside the table, no need to process any further convert attrs tuple to dictionary for torrent name and link to ignore uploader name attached to the torrent size in span tag if this is a <td> there can be seeds, leeches or size inside it. find apporipate data key using class name of td if we are not inside the table, no need to process any further do not process data if we are not inside the table body are we inside the results table body or not if we are not inside the table, no need to process any further exiting the table data and maybe moving td or tr element exiting the tr element, which means all necessary data for a torrent has been extracted, we should save it and clean the object's state. skip malformed names (eg. with @) since 1337x does not provide torrent links in the search results, we will have to fetch the page and extract the magnet link decide which type of search to perform based on category apply search category to url, if any. try to get 15 pages (20 * 15 = 300 results) and stop when we don't found results download the page | 2,693 | en | 0.876578 |
import random
import string
from django.conf import settings
from nacl.signing import SigningKey
from nacl.encoding import Base64Encoder
import segno
import io
import cairosvg
from django.template.loader import render_to_string
import base64
import PyPDF2
import os
# Will generate a random alphanumeric string with 62^length possible combinations
def generate_random_key(length=8):
return "".join(random.choices(string.ascii_letters + string.digits, k=length))
def generate_signature_key():
"""
Generate a new random signing key and return the hex-encoded bytestring
"""
signing_key = SigningKey.generate()
return signing_key.encode(encoder=Base64Encoder).decode("utf-8")
def load_signature_key():
"""
Load the signature key from the environment
"""
try:
key = settings.QRCODE_SIGNATURE_PRIVATE_KEY
key_bytes = key.encode("utf-8")
except AttributeError:
print("Missing QRCode signing key")
raise
try:
signing_key = SigningKey(key_bytes, encoder=Base64Encoder)
except TypeError:
print("Faulty QRCode signing key")
raise
return signing_key
def generate_payload(location):
payload = "{short_code}\n{name}\n{address}, {city}".format(
short_code=location.short_code,
name=location.name,
address=location.address,
city=location.city,
)
return payload
def sign_payload(payload):
payload_bytes = payload.encode()
signing_key = load_signature_key()
signed_b64 = signing_key.sign(payload_bytes, encoder=Base64Encoder)
return signed_b64.decode()
def generate_qrcode(url):
qrcode = segno.make_qr(url)
buffer = io.BytesIO()
qrcode.save(buffer, kind="svg", xmldecl=False, scale=5, omitsize=True)
return buffer.getvalue().decode()
def get_signed_qrcode(location):
# Create payload
payload = generate_payload(location)
# Sign payload
signed = sign_payload(payload)
# Build URL
url_prefix = "https://alpha.canada.ca/covid-alert.html#"
url = url_prefix + str(signed)
qrcode = generate_qrcode(url)
return qrcode
def get_pdf_poster(location, lang="en"):
# Generate the qr code
qr_code = get_signed_qrcode(location)
poster_template = "register/posters/{lang}.svg".format(lang=lang)
address_details = "{city}, {province} {postal_code}".format(
city=location.city,
province=location.province,
postal_code=location.postal_code,
)
# Render the qr code and address details into the svg template
rendered = render_to_string(
poster_template,
{
"qr_code": qr_code,
"name": location.name,
"address": location.address,
"address_details": address_details,
},
)
buffer = io.BytesIO()
# Convert the rendered SVG to PDF
cairosvg.svg2pdf(
bytestring=rendered.encode("UTF-8"),
write_to=buffer,
output_width=815,
)
# Get instructions PDF
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
instructions = os.path.join(
BASE_DIR,
"register/templates/register/posters/instructions-{lang}.pdf".format(lang=lang),
)
pdf_instructions = PyPDF2.PdfFileReader(instructions)
# Merge the pdfs
mergeFile = PyPDF2.PdfFileMerger()
mergeFile.append(pdf_instructions)
mergeFile.append(buffer)
# Write it back to the puffer
mergeFile.write(buffer)
buffer.seek(0)
return buffer
def get_encoded_poster(location, lang="en"):
poster = get_pdf_poster(location, lang)
poster_str = poster.read()
# Base64-encode the poster for attaching
poster_encoded = base64.b64encode(poster_str).decode()
return poster_encoded
| register/utils.py | 3,770 | Generate a new random signing key and return the hex-encoded bytestring
Load the signature key from the environment
Will generate a random alphanumeric string with 62^length possible combinations Create payload Sign payload Build URL Generate the qr code Render the qr code and address details into the svg template Convert the rendered SVG to PDF Get instructions PDF Merge the pdfs Write it back to the puffer Base64-encode the poster for attaching | 452 | en | 0.64616 |
from pfrl.wrappers.cast_observation import CastObservation # NOQA
from pfrl.wrappers.cast_observation import CastObservationToFloat32 # NOQA
from pfrl.wrappers.continuing_time_limit import ContinuingTimeLimit # NOQA
from pfrl.wrappers.monitor import Monitor # NOQA
from pfrl.wrappers.normalize_action_space import NormalizeActionSpace # NOQA
from pfrl.wrappers.randomize_action import RandomizeAction # NOQA
from pfrl.wrappers.render import Render # NOQA
from pfrl.wrappers.scale_reward import ScaleReward # NOQA
from pfrl.wrappers.vector_frame_stack import VectorFrameStack # NOQA
| pfrl/wrappers/__init__.py | 598 | NOQA NOQA NOQA NOQA NOQA NOQA NOQA NOQA NOQA | 44 | uz | 0.46416 |
"""
Test functions for models.formula
"""
import string
import numpy as np
import numpy.random as R
import numpy.linalg as L
from numpy.testing import assert_almost_equal, assert_equal, assert_, \
assert_raises
from statsmodels.sandbox import formula #, contrast #, utils
from statsmodels.sandbox import contrast_old as contrast
class TestTerm(object):
def test_init(self):
t1 = formula.Term("trivial")
sqr = lambda x: x*x
t2 = formula.Term("not_so_trivial", sqr, "sqr")
assert_raises(ValueError, formula.Term, "name", termname=0)
def test_str(self):
t = formula.Term("name")
s = str(t)
def test_add(self):
t1 = formula.Term("t1")
t2 = formula.Term("t2")
f = t1 + t2
assert_(isinstance(f, formula.Formula))
assert_(f.hasterm(t1))
assert_(f.hasterm(t2))
def test_mul(self):
t1 = formula.Term("t1")
t2 = formula.Term("t2")
f = t1 * t2
assert_(isinstance(f, formula.Formula))
intercept = formula.Term("intercept")
f = t1 * intercept
assert_equal(str(f), str(formula.Formula(t1)))
f = intercept * t1
assert_equal(str(f), str(formula.Formula(t1)))
class TestFormula(object):
def setup(self):
self.X = R.standard_normal((40,10))
self.namespace = {}
self.terms = []
for i in range(10):
name = '%s' % string.ascii_uppercase[i]
self.namespace[name] = self.X[:,i]
self.terms.append(formula.Term(name))
self.formula = self.terms[0]
for i in range(1, 10):
self.formula += self.terms[i]
self.formula.namespace = self.namespace
def test_namespace(self):
space1 = {'X':np.arange(50), 'Y':np.arange(50)*2}
space2 = {'X':np.arange(20), 'Y':np.arange(20)*2}
space3 = {'X':np.arange(30), 'Y':np.arange(30)*2}
X = formula.Term('X')
Y = formula.Term('Y')
X.namespace = space1
assert_almost_equal(X(), np.arange(50))
Y.namespace = space2
assert_almost_equal(Y(), np.arange(20)*2)
f = X + Y
f.namespace = space1
assert_equal(f().shape, (2,50))
assert_almost_equal(Y(), np.arange(20)*2)
assert_almost_equal(X(), np.arange(50))
f.namespace = space2
assert_equal(f().shape, (2,20))
assert_almost_equal(Y(), np.arange(20)*2)
assert_almost_equal(X(), np.arange(50))
f.namespace = space3
assert_equal(f().shape, (2,30))
assert_almost_equal(Y(), np.arange(20)*2)
assert_almost_equal(X(), np.arange(50))
xx = X**2
assert_equal(xx().shape, (50,))
xx.namespace = space3
assert_equal(xx().shape, (30,))
xx = X * formula.I
assert_equal(xx().shape, (50,))
xx.namespace = space3
assert_equal(xx().shape, (30,))
xx = X * X
assert_equal(xx.namespace, X.namespace)
xx = X + Y
assert_equal(xx.namespace, {})
Y.namespace = {'X':np.arange(50), 'Y':np.arange(50)*2}
xx = X + Y
assert_equal(xx.namespace, {})
Y.namespace = X.namespace
xx = X+Y
assert_equal(xx.namespace, Y.namespace)
def test_termcolumns(self):
t1 = formula.Term("A")
t2 = formula.Term("B")
f = t1 + t2 + t1 * t2
def other(val):
return np.array([3.2*val,4.342*val**2, 5.234*val**3])
q = formula.Quantitative(['other%d' % i for i in range(1,4)], termname='other', func=t1, transform=other)
f += q
q.namespace = f.namespace = self.formula.namespace
a = q()
b = f()
c = f.termcolumns(q)
b = b[c]
assert_almost_equal(a,b)
def test_str(self):
s = str(self.formula)
def test_call(self):
x = self.formula()
assert_equal(np.array(x).shape, (10, 40))
def test_design(self):
x = self.formula.design()
assert_equal(x.shape, (40, 10))
def test_product(self):
prod = self.formula['A'] * self.formula['C']
f = self.formula + prod
f.namespace = self.namespace
x = f.design()
p = f['A*C']
p.namespace = self.namespace
col = f.termcolumns(prod, dict=False)
assert_almost_equal(np.squeeze(x[:,col]), self.X[:,0] * self.X[:,2])
assert_almost_equal(np.squeeze(p()), self.X[:,0] * self.X[:,2])
def test_intercept1(self):
prod = self.terms[0] * self.terms[2]
f = self.formula + formula.I
icol = f.names().index('intercept')
f.namespace = self.namespace
assert_almost_equal(f()[icol], np.ones((40,)))
def test_intercept3(self):
t = self.formula['A']
t.namespace = self.namespace
prod = t * formula.I
prod.namespace = self.formula.namespace
assert_almost_equal(np.squeeze(prod()), t())
def test_contrast1(self):
term = self.terms[0] + self.terms[2]
c = contrast.Contrast(term, self.formula)
col1 = self.formula.termcolumns(self.terms[0], dict=False)
col2 = self.formula.termcolumns(self.terms[1], dict=False)
test = [[1] + [0]*9, [0]*2 + [1] + [0]*7]
assert_almost_equal(c.matrix, test)
def test_contrast2(self):
dummy = formula.Term('zero')
self.namespace['zero'] = np.zeros((40,), np.float64)
term = dummy + self.terms[2]
c = contrast.Contrast(term, self.formula)
test = [0]*2 + [1] + [0]*7
assert_almost_equal(c.matrix, test)
def test_contrast3(self):
X = self.formula.design()
P = np.dot(X, L.pinv(X))
dummy = formula.Term('noise')
resid = np.identity(40) - P
self.namespace['noise'] = np.transpose(np.dot(resid, R.standard_normal((40,5))))
terms = dummy + self.terms[2]
terms.namespace = self.formula.namespace
c = contrast.Contrast(terms, self.formula)
assert_equal(c.matrix.shape, (10,))
def test_power(self):
t = self.terms[2]
t2 = t**2
t.namespace = t2.namespace = self.formula.namespace
assert_almost_equal(t()**2, t2())
def test_quantitative(self):
t = self.terms[2]
sint = formula.Quantitative('t', func=t, transform=np.sin)
t.namespace = sint.namespace = self.formula.namespace
assert_almost_equal(np.sin(t()), sint())
def test_factor1(self):
f = ['a','b','c']*10
fac = formula.Factor('ff', f)
fac.namespace = {'ff':f}
assert_equal(list(fac.values()), f)
def test_factor2(self):
f = ['a','b','c']*10
fac = formula.Factor('ff', f)
fac.namespace = {'ff':f}
assert_equal(fac().shape, (3,30))
def test_factor3(self):
f = ['a','b','c']*10
fac = formula.Factor('ff', f)
fac.namespace = {'ff':f}
m = fac.main_effect(reference=1)
m.namespace = fac.namespace
assert_equal(m().shape, (2,30))
def test_factor4(self):
f = ['a','b','c']*10
fac = formula.Factor('ff', f)
fac.namespace = {'ff':f}
m = fac.main_effect(reference=2)
m.namespace = fac.namespace
r = np.array([np.identity(3)]*10)
r.shape = (30,3)
r = r.T
_m = np.array([r[0]-r[2],r[1]-r[2]])
assert_almost_equal(_m, m())
def test_factor5(self):
f = ['a','b','c']*3
fac = formula.Factor('ff', f)
fac.namespace = {'ff':f}
assert_equal(fac(), [[1,0,0]*3,
[0,1,0]*3,
[0,0,1]*3])
assert_equal(fac['a'], [1,0,0]*3)
assert_equal(fac['b'], [0,1,0]*3)
assert_equal(fac['c'], [0,0,1]*3)
def test_ordinal_factor(self):
f = ['a','b','c']*3
fac = formula.Factor('ff', ['a','b','c'], ordinal=True)
fac.namespace = {'ff':f}
assert_equal(fac(), [0,1,2]*3)
assert_equal(fac['a'], [1,0,0]*3)
assert_equal(fac['b'], [0,1,0]*3)
assert_equal(fac['c'], [0,0,1]*3)
def test_ordinal_factor2(self):
f = ['b','c', 'a']*3
fac = formula.Factor('ff', ['a','b','c'], ordinal=True)
fac.namespace = {'ff':f}
assert_equal(fac(), [1,2,0]*3)
assert_equal(fac['a'], [0,0,1]*3)
assert_equal(fac['b'], [1,0,0]*3)
assert_equal(fac['c'], [0,1,0]*3)
def test_contrast4(self):
f = self.formula + self.terms[5] + self.terms[5]
f.namespace = self.namespace
estimable = False
c = contrast.Contrast(self.terms[5], f)
assert_equal(estimable, False)
def test_interactions(self):
f = formula.interactions([formula.Term(l) for l in ['a', 'b', 'c']])
assert_equal(set(f.termnames()), set(['a', 'b', 'c', 'a*b', 'a*c', 'b*c']))
f = formula.interactions([formula.Term(l) for l in ['a', 'b', 'c', 'd']], order=3)
assert_equal(set(f.termnames()), set(['a', 'b', 'c', 'd', 'a*b', 'a*c', 'a*d', 'b*c', 'b*d', 'c*d', 'a*b*c', 'a*c*d', 'a*b*d', 'b*c*d']))
f = formula.interactions([formula.Term(l) for l in ['a', 'b', 'c', 'd']], order=[1,2,3])
assert_equal(set(f.termnames()), set(['a', 'b', 'c', 'd', 'a*b', 'a*c', 'a*d', 'b*c', 'b*d', 'c*d', 'a*b*c', 'a*c*d', 'a*b*d', 'b*c*d']))
f = formula.interactions([formula.Term(l) for l in ['a', 'b', 'c', 'd']], order=[3])
assert_equal(set(f.termnames()), set(['a*b*c', 'a*c*d', 'a*b*d', 'b*c*d']))
def test_subtract(self):
f = formula.interactions([formula.Term(l) for l in ['a', 'b', 'c']])
ff = f - f['a*b']
assert_equal(set(ff.termnames()), set(['a', 'b', 'c', 'a*c', 'b*c']))
ff = f - f['a*b'] - f['a*c']
assert_equal(set(ff.termnames()), set(['a', 'b', 'c', 'b*c']))
ff = f - (f['a*b'] + f['a*c'])
assert_equal(set(ff.termnames()), set(['a', 'b', 'c', 'b*c']))
| statsmodels/sandbox/tests/test_formula.py | 9,998 | Test functions for models.formula
, contrast , utils | 53 | en | 0.235652 |
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from extensions_paths import EXAMPLES, PUBLIC_TEMPLATES, STATIC_DOCS
from local_file_system import LocalFileSystem
from render_servlet import RenderServlet
from server_instance import ServerInstance
from servlet import Request, Response
from test_util import ReadFile
class _RenderServletDelegate(RenderServlet.Delegate):
def CreateServerInstance(self):
return ServerInstance.ForTest(LocalFileSystem.Create())
class RenderServletTest(unittest.TestCase):
def _Render(self, path):
return RenderServlet(Request.ForTest(path),
_RenderServletDelegate()).Get()
def testExtensionAppRedirect(self):
self.assertEqual(
Response.Redirect('/extensions/storage.html', permanent=False),
self._Render('storage.html'))
def testChannelRedirect(self):
self.assertEqual(
Response.Redirect('/extensions/storage.html', permanent=True),
self._Render('stable/extensions/storage.html'))
def testNotFound(self):
def create_404_response(real_path):
real_404 = self._Render(real_path)
self.assertEqual(200, real_404.status)
real_404.status = 404
return real_404
root_404 = create_404_response('404.html')
extensions_404 = create_404_response('extensions/404.html')
apps_404 = create_404_response('apps/404.html')
# Note: would test that root_404 != extensions and apps but it's not
# necessarily true.
self.assertNotEqual(extensions_404, apps_404)
self.assertEqual(root_404, self._Render('not_found.html'))
self.assertEqual(root_404, self._Render('not_found/not_found.html'))
self.assertEqual(extensions_404, self._Render('extensions/not_found.html'))
self.assertEqual(
extensions_404, self._Render('extensions/manifest/not_found.html'))
self.assertEqual(
extensions_404,
self._Render('extensions/manifest/not_found/not_found.html'))
self.assertEqual(apps_404, self._Render('apps/not_found.html'))
self.assertEqual(apps_404, self._Render('apps/manifest/not_found.html'))
self.assertEqual(
apps_404, self._Render('apps/manifest/not_found/not_found.html'))
def testSampleFile(self):
sample_file = 'extensions/talking_alarm_clock/background.js'
response = self._Render('extensions/examples/%s' % sample_file)
self.assertEqual(200, response.status)
self.assertTrue(response.headers['Content-Type'] in (
'application/javascript; charset=utf-8',
'application/x-javascript; charset=utf-8'))
self.assertEqual(ReadFile('%s/%s' % (EXAMPLES, sample_file)),
response.content.ToString())
def testSampleZip(self):
sample_dir = 'extensions/talking_alarm_clock'
response = self._Render('extensions/examples/%s.zip' % sample_dir)
self.assertEqual(200, response.status)
self.assertEqual('application/zip', response.headers['Content-Type'])
def testStaticFile(self):
static_file = 'css/site.css'
response = self._Render('static/%s' % static_file)
self.assertEqual(200, response.status)
self.assertEqual('text/css; charset=utf-8',
response.headers['Content-Type'])
self.assertEqual(ReadFile('%s/%s' % (STATIC_DOCS, static_file)),
response.content.ToString())
def testHtmlTemplate(self):
html_file = 'extensions/storage.html'
response = self._Render(html_file)
self.assertEqual(200, response.status)
self.assertEqual('text/html; charset=utf-8',
response.headers.get('Content-Type'))
# Can't really test rendering all that well.
self.assertTrue(len(response.content) >
len(ReadFile('%s/%s' % (PUBLIC_TEMPLATES, html_file))))
def testDevelopersGoogleComRedirect(self):
def assert_redirect(request_path):
response = self._Render(request_path)
self.assertEqual(('//developers.google.com/chrome', False),
response.GetRedirect())
assert_redirect('')
assert_redirect('index.html')
def testIndexRedirect(self):
response = self._Render('extensions')
self.assertEqual(('/extensions/index.html', False),
response.GetRedirect())
def testOtherRedirectsJsonRedirect(self):
response = self._Render('apps/webview_tag.html')
self.assertEqual(('/apps/tags/webview.html', False),
response.GetRedirect())
if __name__ == '__main__':
unittest.main()
| chrome/common/extensions/docs/server2/render_servlet_test.py | 4,622 | !/usr/bin/env python Copyright 2013 The Chromium Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. Note: would test that root_404 != extensions and apps but it's not necessarily true. Can't really test rendering all that well. | 304 | en | 0.944157 |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def LicenseServerSource(vim, *args, **kwargs):
'''Specify a license server reachable via IPv4 network.'''
obj = vim.client.factory.create('{urn:vim25}LicenseServerSource')
# do some validation checking...
if (len(args) + len(kwargs)) < 1:
raise IndexError('Expected at least 2 arguments got: %d' % len(args))
required = [ 'licenseServer' ]
optional = [ 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| pyvisdk/do/license_server_source.py | 1,007 | Specify a license server reachable via IPv4 network.
Automatically generated, do not edit. do some validation checking... | 123 | en | 0.271469 |
from neuwon.database import Database
import numpy as np
import numba
class GameOfLife:
class _CellBaseClass:
__slots__ = ()
@classmethod
def _add_to_database(cls, database):
cell_data = database.add_class("Cell", cls)
cell_data.add_attribute("coordinates", shape=(2,), dtype=np.int32)
cell_data.add_attribute("alive", False, dtype=np.bool)
cell_data.add_connectivity_matrix("neighbors", "Cell")
return cell_data.get_instance_type()
def __init__(self, shape):
self.db = Database()
self.Cell = self._CellBaseClass._add_to_database(self.db)
self.shape = shape
self.grid = np.empty(self.shape, dtype=object)
for x in range(self.shape[0]):
for y in range(self.shape[1]):
self.grid[x,y] = self.Cell(coordinates=(x,y))
for x in range(self.shape[0]):
for y in range(self.shape[1]):
cell = self.grid[x,y]
neighbors = []
for x_offset in [-1, 0, 1]:
for y_offset in [-1, 0, 1]:
nx = x - x_offset
ny = y - y_offset
if nx < 0: nx = 0
if ny < 0: ny = 0
if nx >= self.shape[0]: nx = self.shape[0] - 1
if ny >= self.shape[1]: ny = self.shape[1] - 1
neighbor = self.grid[nx, ny]
if cell != neighbor:
neighbors.append(neighbor)
cell.neighbors = neighbors
self.db.get("Cell.neighbors").to_csr()
def randomize(self, alive_fraction):
a = self.db.get_data("Cell.alive")
a.fill(False)
a[np.random.uniform(size=a.shape) < alive_fraction] = True
def get_num_alive(self):
return sum(self.db.get_data("Cell.alive"))
def advance(self):
a = self.db.get_data("Cell.alive")
n = self.db.get_data("Cell.neighbors")
# C is the number of living neighbors for each cell.
c = n * np.array(a, dtype=np.int32)
_advance(a, c)
@numba.njit(parallel=True)
def _advance(a, c):
for idx in numba.prange(len(a)):
ci = c[idx]
if a[idx]:
if ci not in range(2, 4):
a[idx] = False
else:
if ci == 3:
a[idx] = True
| neuwon/database/examples/life/model.py | 2,429 | C is the number of living neighbors for each cell. | 50 | en | 0.944252 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import vtk
import vtk.test.Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# ------------------------------------------------------------
# Purpose: Test the parametric functions.
# ------------------------------------------------------------
class TestParametricFunctions(vtk.test.Testing.vtkTest):
def testParametricFunctions(self):
# ------------------------------------------------------------
# Get a texture
# ------------------------------------------------------------
textureReader = vtk.vtkJPEGReader()
textureReader.SetFileName(VTK_DATA_ROOT + "/Data/beach.jpg")
texture = vtk.vtkTexture()
texture.SetInputConnection(textureReader.GetOutputPort())
# ------------------------------------------------------------
# For each parametric surface:
# 1) Create it
# 2) Assign mappers and actors
# 3) Position this object
# 5) Add a label
# ------------------------------------------------------------
# ------------------------------------------------------------
# Create a torus
# ------------------------------------------------------------
torus = vtk.vtkParametricTorus()
torusSource = vtk.vtkParametricFunctionSource()
torusSource.SetParametricFunction(torus)
torusSource.SetScalarModeToPhase()
torusMapper = vtk.vtkPolyDataMapper()
torusMapper.SetInputConnection(torusSource.GetOutputPort())
torusMapper.SetScalarRange(0, 360)
torusActor = vtk.vtkActor()
torusActor.SetMapper(torusMapper)
torusActor.SetPosition(0, 12, 0)
torusTextMapper = vtk.vtkTextMapper()
torusTextMapper.SetInput("Torus")
torusTextMapper.GetTextProperty().SetJustificationToCentered()
torusTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
torusTextMapper.GetTextProperty().SetColor(1, 0, 0)
torusTextMapper.GetTextProperty().SetFontSize(14)
torusTextActor = vtk.vtkActor2D()
torusTextActor.SetMapper(torusTextMapper)
torusTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
torusTextActor.GetPositionCoordinate().SetValue(0, 9.5, 0)
# ------------------------------------------------------------
# Create a klein bottle
# ------------------------------------------------------------
klein = vtk.vtkParametricKlein()
kleinSource = vtk.vtkParametricFunctionSource()
kleinSource.SetParametricFunction(klein)
kleinSource.SetScalarModeToU0V0()
kleinMapper = vtk.vtkPolyDataMapper()
kleinMapper.SetInputConnection(kleinSource.GetOutputPort())
kleinMapper.SetScalarRange(0, 3)
kleinActor = vtk.vtkActor()
kleinActor.SetMapper(kleinMapper)
kleinActor.SetPosition(8, 10.5, 0)
kleinTextMapper = vtk.vtkTextMapper()
kleinTextMapper.SetInput("Klein")
kleinTextMapper.GetTextProperty().SetJustificationToCentered()
kleinTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
kleinTextMapper.GetTextProperty().SetColor(1, 0, 0)
kleinTextMapper.GetTextProperty().SetFontSize(14)
kleinTextActor = vtk.vtkActor2D()
kleinTextActor.SetMapper(kleinTextMapper)
kleinTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
kleinTextActor.GetPositionCoordinate().SetValue(8, 9.5, 0)
# ------------------------------------------------------------
# Create a Figure-8 Klein
# ------------------------------------------------------------
klein2 = vtk.vtkParametricFigure8Klein()
klein2Source = vtk.vtkParametricFunctionSource()
klein2Source.SetParametricFunction(klein2)
klein2Source.GenerateTextureCoordinatesOn()
klein2Mapper = vtk.vtkPolyDataMapper()
klein2Mapper.SetInputConnection(klein2Source.GetOutputPort())
klein2Mapper.SetScalarRange(0, 3)
klein2Actor = vtk.vtkActor()
klein2Actor.SetMapper(klein2Mapper)
klein2Actor.SetPosition(16, 12, 0)
klein2Actor.SetTexture(texture)
fig8KleinTextMapper = vtk.vtkTextMapper()
fig8KleinTextMapper.SetInput("Fig-8.Klein")
fig8KleinTextMapper.GetTextProperty().SetJustificationToCentered()
fig8KleinTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
fig8KleinTextMapper.GetTextProperty().SetColor(1, 0, 0)
fig8KleinTextMapper.GetTextProperty().SetFontSize(14)
fig8KleinTextActor = vtk.vtkActor2D()
fig8KleinTextActor.SetMapper(fig8KleinTextMapper)
fig8KleinTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
fig8KleinTextActor.GetPositionCoordinate().SetValue(16, 9.5, 0)
# ------------------------------------------------------------
# Create a mobius strip
# ------------------------------------------------------------
mobius = vtk.vtkParametricMobius()
mobiusSource = vtk.vtkParametricFunctionSource()
mobiusSource.SetParametricFunction(mobius)
mobiusSource.GenerateTextureCoordinatesOn()
mobiusMapper = vtk.vtkPolyDataMapper()
mobiusMapper.SetInputConnection(mobiusSource.GetOutputPort())
mobiusActor = vtk.vtkActor()
mobiusActor.SetMapper(mobiusMapper)
mobiusActor.RotateX(45)
mobiusActor.SetPosition(24, 12, 0)
mobiusActor.SetTexture(texture)
mobiusTextMapper = vtk.vtkTextMapper()
mobiusTextMapper.SetInput("Mobius")
mobiusTextMapper.GetTextProperty().SetJustificationToCentered()
mobiusTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
mobiusTextMapper.GetTextProperty().SetColor(1, 0, 0)
mobiusTextMapper.GetTextProperty().SetFontSize(14)
mobiusTextActor = vtk.vtkActor2D()
mobiusTextActor.SetMapper(mobiusTextMapper)
mobiusTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
mobiusTextActor.GetPositionCoordinate().SetValue(24, 9.5, 0)
# ------------------------------------------------------------
# Create a super toroid
# ------------------------------------------------------------
toroid = vtk.vtkParametricSuperToroid()
toroid.SetN1(2)
toroid.SetN2(3)
toroidSource = vtk.vtkParametricFunctionSource()
toroidSource.SetParametricFunction(toroid)
toroidSource.SetScalarModeToU()
toroidMapper = vtk.vtkPolyDataMapper()
toroidMapper.SetInputConnection(toroidSource.GetOutputPort())
toroidMapper.SetScalarRange(0, 6.28)
toroidActor = vtk.vtkActor()
toroidActor.SetMapper(toroidMapper)
toroidActor.SetPosition(0, 4, 0)
superToroidTextMapper = vtk.vtkTextMapper()
superToroidTextMapper.SetInput("Super.Toroid")
superToroidTextMapper.GetTextProperty().SetJustificationToCentered()
superToroidTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
superToroidTextMapper.GetTextProperty().SetColor(1, 0, 0)
superToroidTextMapper.GetTextProperty().SetFontSize(14)
superToroidTextActor = vtk.vtkActor2D()
superToroidTextActor.SetMapper(superToroidTextMapper)
superToroidTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
superToroidTextActor.GetPositionCoordinate().SetValue(0, 1.5, 0)
# ------------------------------------------------------------
# Create a super ellipsoid
# ------------------------------------------------------------
superEllipsoid = vtk.vtkParametricSuperEllipsoid()
superEllipsoid.SetXRadius(1.25)
superEllipsoid.SetYRadius(1.5)
superEllipsoid.SetZRadius(1.0)
superEllipsoid.SetN1(1.1)
superEllipsoid.SetN2(1.75)
superEllipsoidSource = vtk.vtkParametricFunctionSource()
superEllipsoidSource.SetParametricFunction(superEllipsoid)
superEllipsoidSource.SetScalarModeToV()
superEllipsoidMapper = vtk.vtkPolyDataMapper()
superEllipsoidMapper.SetInputConnection(superEllipsoidSource.GetOutputPort())
superEllipsoidMapper.SetScalarRange(0, 3.14)
superEllipsoidActor = vtk.vtkActor()
superEllipsoidActor.SetMapper(superEllipsoidMapper)
superEllipsoidActor.SetPosition(8, 4, 0)
superEllipsoidTextMapper = vtk.vtkTextMapper()
superEllipsoidTextMapper.SetInput("Super.Ellipsoid")
superEllipsoidTextMapper.GetTextProperty().SetJustificationToCentered()
superEllipsoidTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
superEllipsoidTextMapper.GetTextProperty().SetColor(1, 0, 0)
superEllipsoidTextMapper.GetTextProperty().SetFontSize(14)
superEllipsoidTextActor = vtk.vtkActor2D()
superEllipsoidTextActor.SetMapper(superEllipsoidTextMapper)
superEllipsoidTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
superEllipsoidTextActor.GetPositionCoordinate().SetValue(8, 1.5, 0)
# ------------------------------------------------------------
# Create an open 1D spline
# ------------------------------------------------------------
splinePoints = [
[0.50380158308139134, -0.60679315105396936, -0.37248976406291578],
[-0.4354646054261665, -0.85362339758017258, -0.84844312996065385],
[0.2163147512899315, -0.39797507012168643, -0.76700353518454523],
[0.97158415334838644, -0.58513467367046257, -0.35846037946569753],
[-0.64359767997804918, -0.94620739107309249, -0.90762176546623086],
[-0.39901219094126117, -0.1978931497772658, 0.0098316934936828471],
[-0.75872745167404765, 0.067719714281950116, 0.165237936733867],
[-0.84599731389712418, -0.67685466896596114, 0.10357868909071133],
[0.84702754758625654, -0.0080077177882230677, -0.58571286666473044],
[-0.076150034124101484, 0.14637647622561856, 0.1494359239700418] ]
inputPoints = vtk.vtkPoints()
for i in range(0, 10):
inputPoints.InsertPoint(i, splinePoints[i])
spline = vtk.vtkParametricSpline()
spline.SetPoints(inputPoints)
spline.ClosedOff()
splineSource = vtk.vtkParametricFunctionSource()
splineSource.SetParametricFunction(spline)
splineMapper = vtk.vtkPolyDataMapper()
splineMapper.SetInputConnection(splineSource.GetOutputPort())
splineActor = vtk.vtkActor()
splineActor.SetMapper(splineMapper)
splineActor.SetPosition(16, 4, 0)
splineActor.GetProperty().SetColor(0, 0, 0)
splineTextMapper = vtk.vtkTextMapper()
splineTextMapper.SetInput("Open.Spline")
splineTextMapper.GetTextProperty().SetJustificationToCentered()
splineTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
splineTextMapper.GetTextProperty().SetColor(1, 0, 0)
splineTextMapper.GetTextProperty().SetFontSize(14)
splineTextActor = vtk.vtkActor2D()
splineTextActor.SetMapper(splineTextMapper)
splineTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
splineTextActor.GetPositionCoordinate().SetValue(16, 1.5, 0)
# ------------------------------------------------------------
# Create a closed 1D spline
# ------------------------------------------------------------
spline2 = vtk.vtkParametricSpline()
spline2.SetPoints(inputPoints)
spline2.ClosedOn()
spline2Source = vtk.vtkParametricFunctionSource()
spline2Source.SetParametricFunction(spline2)
spline2Mapper = vtk.vtkPolyDataMapper()
spline2Mapper.SetInputConnection(spline2Source.GetOutputPort())
spline2Actor = vtk.vtkActor()
spline2Actor.SetMapper(spline2Mapper)
spline2Actor.SetPosition(24, 4, 0)
spline2Actor.GetProperty().SetColor(0, 0, 0)
spline2TextMapper = vtk.vtkTextMapper()
spline2TextMapper.SetInput("Closed.Spline")
spline2TextMapper.GetTextProperty().SetJustificationToCentered()
spline2TextMapper.GetTextProperty().SetVerticalJustificationToCentered()
spline2TextMapper.GetTextProperty().SetColor(1, 0, 0)
spline2TextMapper.GetTextProperty().SetFontSize(14)
spline2TextActor = vtk.vtkActor2D()
spline2TextActor.SetMapper(spline2TextMapper)
spline2TextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
spline2TextActor.GetPositionCoordinate().SetValue(24, 1.5, 0)
# ------------------------------------------------------------
# Create a spiral conic
# ------------------------------------------------------------
sconic = vtk.vtkParametricConicSpiral()
sconic.SetA(0.8)
sconic.SetB(2.5)
sconic.SetC(0.4)
sconicSource = vtk.vtkParametricFunctionSource()
sconicSource.SetParametricFunction(sconic)
sconicSource.SetScalarModeToDistance()
sconicMapper = vtk.vtkPolyDataMapper()
sconicMapper.SetInputConnection(sconicSource.GetOutputPort())
sconicActor = vtk.vtkActor()
sconicActor.SetMapper(sconicMapper)
sconicMapper.SetScalarRange(0, 9)
sconicActor.SetPosition(0, -4, 0)
sconicActor.SetScale(1.2, 1.2, 1.2)
sconicTextMapper = vtk.vtkTextMapper()
sconicTextMapper.SetInput("Spiral.Conic")
sconicTextMapper.GetTextProperty().SetJustificationToCentered()
sconicTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
sconicTextMapper.GetTextProperty().SetColor(1, 0, 0)
sconicTextMapper.GetTextProperty().SetFontSize(14)
sconicTextActor = vtk.vtkActor2D()
sconicTextActor.SetMapper(sconicTextMapper)
sconicTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
sconicTextActor.GetPositionCoordinate().SetValue(0, -6.5, 0)
# ------------------------------------------------------------
# Create Boy's surface
# ------------------------------------------------------------
boy = vtk.vtkParametricBoy()
boySource = vtk.vtkParametricFunctionSource()
boySource.SetParametricFunction(boy)
boySource.SetScalarModeToModulus()
boyMapper = vtk.vtkPolyDataMapper()
boyMapper.SetInputConnection(boySource.GetOutputPort())
boyMapper.SetScalarRange(0, 2)
boyActor = vtk.vtkActor()
boyActor.SetMapper(boyMapper)
boyActor.SetPosition(8, -4, 0)
boyActor.SetScale(1.5, 1.5, 1.5)
boyTextMapper = vtk.vtkTextMapper()
boyTextMapper.SetInput("Boy")
boyTextMapper.GetTextProperty().SetJustificationToCentered()
boyTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
boyTextMapper.GetTextProperty().SetColor(1, 0, 0)
boyTextMapper.GetTextProperty().SetFontSize(14)
boyTextActor = vtk.vtkActor2D()
boyTextActor.SetMapper(boyTextMapper)
boyTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
boyTextActor.GetPositionCoordinate().SetValue(8, -6.5, 0)
# ------------------------------------------------------------
# Create a cross cap
# ------------------------------------------------------------
crossCap = vtk.vtkParametricCrossCap()
crossCapSource = vtk.vtkParametricFunctionSource()
crossCapSource.SetParametricFunction(crossCap)
crossCapSource.SetScalarModeToY()
crossCapMapper = vtk.vtkPolyDataMapper()
crossCapMapper.SetInputConnection(crossCapSource.GetOutputPort())
crossCapActor = vtk.vtkActor()
crossCapActor.SetMapper(crossCapMapper)
crossCapActor.RotateX(65)
crossCapActor.SetPosition(16, -4, 0)
crossCapActor.SetScale(1.5, 1.5, 1.5)
crossCapTextMapper = vtk.vtkTextMapper()
crossCapTextMapper.SetInput("Cross.Cap")
crossCapTextMapper.GetTextProperty().SetJustificationToCentered()
crossCapTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
crossCapTextMapper.GetTextProperty().SetColor(1, 0, 0)
crossCapTextMapper.GetTextProperty().SetFontSize(14)
crossCapTextActor = vtk.vtkActor2D()
crossCapTextActor.SetMapper(crossCapTextMapper)
crossCapTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
crossCapTextActor.GetPositionCoordinate().SetValue(16, -6.5, 0)
# ------------------------------------------------------------
# Create Dini's surface
# ------------------------------------------------------------
dini = vtk.vtkParametricDini()
diniSource = vtk.vtkParametricFunctionSource()
diniSource.SetScalarModeToDistance()
diniSource.SetParametricFunction(dini)
diniMapper = vtk.vtkPolyDataMapper()
diniMapper.SetInputConnection(diniSource.GetOutputPort())
diniActor = vtk.vtkActor()
diniActor.SetMapper(diniMapper)
diniActor.RotateX(-90)
diniActor.SetPosition(24, -3, 0)
diniActor.SetScale(1.5, 1.5, 0.5)
diniTextMapper = vtk.vtkTextMapper()
diniTextMapper.SetInput("Dini")
diniTextMapper.GetTextProperty().SetJustificationToCentered()
diniTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
diniTextMapper.GetTextProperty().SetColor(1, 0, 0)
diniTextMapper.GetTextProperty().SetFontSize(14)
diniTextActor = vtk.vtkActor2D()
diniTextActor.SetMapper(diniTextMapper)
diniTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
diniTextActor.GetPositionCoordinate().SetValue(24, -6.5, 0)
# ------------------------------------------------------------
# Create Enneper's surface
# ------------------------------------------------------------
enneper = vtk.vtkParametricEnneper()
enneperSource = vtk.vtkParametricFunctionSource()
enneperSource.SetParametricFunction(enneper)
enneperSource.SetScalarModeToQuadrant()
enneperMapper = vtk.vtkPolyDataMapper()
enneperMapper.SetInputConnection(enneperSource.GetOutputPort())
enneperMapper.SetScalarRange(1, 4)
enneperActor = vtk.vtkActor()
enneperActor.SetMapper(enneperMapper)
enneperActor.SetPosition(0, -12, 0)
enneperActor.SetScale(0.25, 0.25, 0.25)
enneperTextMapper = vtk.vtkTextMapper()
enneperTextMapper.SetInput("Enneper")
enneperTextMapper.GetTextProperty().SetJustificationToCentered()
enneperTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
enneperTextMapper.GetTextProperty().SetColor(1, 0, 0)
enneperTextMapper.GetTextProperty().SetFontSize(14)
enneperTextActor = vtk.vtkActor2D()
enneperTextActor.SetMapper(enneperTextMapper)
enneperTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
enneperTextActor.GetPositionCoordinate().SetValue(0, -14.5, 0)
# ------------------------------------------------------------
# Create an ellipsoidal surface
# ------------------------------------------------------------
ellipsoid = vtk.vtkParametricEllipsoid()
ellipsoid.SetXRadius(1)
ellipsoid.SetYRadius(0.75)
ellipsoid.SetZRadius(0.5)
ellipsoidSource = vtk.vtkParametricFunctionSource()
ellipsoidSource.SetParametricFunction(ellipsoid)
ellipsoidSource.SetScalarModeToZ()
ellipsoidMapper = vtk.vtkPolyDataMapper()
ellipsoidMapper.SetInputConnection(ellipsoidSource.GetOutputPort())
ellipsoidMapper.SetScalarRange(-0.5, 0.5)
ellipsoidActor = vtk.vtkActor()
ellipsoidActor.SetMapper(ellipsoidMapper)
ellipsoidActor.SetPosition(8, -12, 0)
ellipsoidActor.SetScale(1.5, 1.5, 1.5)
ellipsoidTextMapper = vtk.vtkTextMapper()
ellipsoidTextMapper.SetInput("Ellipsoid")
ellipsoidTextMapper.GetTextProperty().SetJustificationToCentered()
ellipsoidTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
ellipsoidTextMapper.GetTextProperty().SetColor(1, 0, 0)
ellipsoidTextMapper.GetTextProperty().SetFontSize(14)
ellipsoidTextActor = vtk.vtkActor2D()
ellipsoidTextActor.SetMapper(ellipsoidTextMapper)
ellipsoidTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
ellipsoidTextActor.GetPositionCoordinate().SetValue(8, -14.5, 0)
# ------------------------------------------------------------
# Create an surface with random hills on it.
# Note that for testing, we will disable the
# random generation of the surfaces. This is
# because random number generators do not
# return the same result on different operating
# systems.
# ------------------------------------------------------------
randomHills = vtk.vtkParametricRandomHills()
randomHills.AllowRandomGenerationOff()
randomHills.GenerateTheHills()
randomHillsSource = vtk.vtkParametricFunctionSource()
randomHillsSource.SetParametricFunction(randomHills)
randomHillsSource.GenerateTextureCoordinatesOn()
randomHillsMapper = vtk.vtkPolyDataMapper()
randomHillsMapper.SetInputConnection(randomHillsSource.GetOutputPort())
randomHillsActor = vtk.vtkActor()
randomHillsActor.SetMapper(randomHillsMapper)
randomHillsActor.SetPosition(16, -14, 0)
randomHillsActor.SetScale(0.2, 0.2, 0.2)
randomHillsActor.SetTexture(texture)
randomHillsTextMapper = vtk.vtkTextMapper()
randomHillsTextMapper.SetInput("Random.Hills")
randomHillsTextMapper.GetTextProperty().SetJustificationToCentered()
randomHillsTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
randomHillsTextMapper.GetTextProperty().SetColor(1, 0, 0)
randomHillsTextMapper.GetTextProperty().SetFontSize(14)
randomHillsTextActor = vtk.vtkActor2D()
randomHillsTextActor.SetMapper(randomHillsTextMapper)
randomHillsTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
randomHillsTextActor.GetPositionCoordinate().SetValue(16, -14.5, 0)
# ------------------------------------------------------------
# Create an Steiner's Roman Surface.
# ------------------------------------------------------------
roman = vtk.vtkParametricRoman()
roman.SetRadius(1.5)
romanSource = vtk.vtkParametricFunctionSource()
romanSource.SetParametricFunction(roman)
romanSource.SetScalarModeToX()
romanMapper = vtk.vtkPolyDataMapper()
romanMapper.SetInputConnection(romanSource.GetOutputPort())
romanActor = vtk.vtkActor()
romanActor.SetMapper(romanMapper)
romanActor.SetPosition(24, -12, 0)
romanTextMapper = vtk.vtkTextMapper()
romanTextMapper.SetInput("Roman")
romanTextMapper.GetTextProperty().SetJustificationToCentered()
romanTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
romanTextMapper.GetTextProperty().SetColor(1, 0, 0)
romanTextMapper.GetTextProperty().SetFontSize(14)
romanTextActor = vtk.vtkActor2D()
romanTextActor.SetMapper(romanTextMapper)
romanTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
romanTextActor.GetPositionCoordinate().SetValue(24, -14.5, 0)
# ------------------------------------------------------------
# Create the RenderWindow, Renderer and both Actors
# ------------------------------------------------------------
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# add actors
ren.AddViewProp(torusActor)
ren.AddViewProp(kleinActor)
ren.AddViewProp(klein2Actor)
ren.AddViewProp(toroidActor)
ren.AddViewProp(superEllipsoidActor)
ren.AddViewProp(mobiusActor)
ren.AddViewProp(splineActor)
ren.AddViewProp(spline2Actor)
ren.AddViewProp(sconicActor)
ren.AddViewProp(boyActor)
ren.AddViewProp(crossCapActor)
ren.AddViewProp(diniActor)
ren.AddViewProp(enneperActor)
ren.AddViewProp(ellipsoidActor)
ren.AddViewProp(randomHillsActor)
ren.AddViewProp(romanActor)
#add text actors
ren.AddViewProp(torusTextActor)
ren.AddViewProp(kleinTextActor)
ren.AddViewProp(fig8KleinTextActor)
ren.AddViewProp(mobiusTextActor)
ren.AddViewProp(superToroidTextActor)
ren.AddViewProp(superEllipsoidTextActor)
ren.AddViewProp(splineTextActor)
ren.AddViewProp(spline2TextActor)
ren.AddViewProp(sconicTextActor)
ren.AddViewProp(boyTextActor)
ren.AddViewProp(crossCapTextActor)
ren.AddViewProp(diniTextActor)
ren.AddViewProp(enneperTextActor)
ren.AddViewProp(ellipsoidTextActor)
ren.AddViewProp(randomHillsTextActor)
ren.AddViewProp(romanTextActor)
ren.SetBackground(0.7, 0.8, 1)
renWin.SetSize(500, 500)
ren.ResetCamera()
ren.GetActiveCamera().Zoom(1.3)
iren.Initialize()
renWin.Render()
img_file = "TestParametricFunctions.png"
# NOTE: this test has a companion .tcl test. The threshold set
# here should be the same as the threshold in the .tcl
# test. Both tests should produce exactly the same results.
vtk.test.Testing.compareImage(iren.GetRenderWindow(), vtk.test.Testing.getAbsImagePath(img_file), threshold=10)
vtk.test.Testing.interact()
if __name__ == "__main__":
vtk.test.Testing.main([(TestParametricFunctions, 'test')])
| Common/ComputationalGeometry/Testing/Python/TestParametricFunctions.py | 26,254 | !/usr/bin/env python -*- coding: utf-8 -*- ------------------------------------------------------------ Purpose: Test the parametric functions. ------------------------------------------------------------ ------------------------------------------------------------ Get a texture ------------------------------------------------------------ ------------------------------------------------------------ For each parametric surface: 1) Create it 2) Assign mappers and actors 3) Position this object 5) Add a label ------------------------------------------------------------ ------------------------------------------------------------ Create a torus ------------------------------------------------------------ ------------------------------------------------------------ Create a klein bottle ------------------------------------------------------------ ------------------------------------------------------------ Create a Figure-8 Klein ------------------------------------------------------------ ------------------------------------------------------------ Create a mobius strip ------------------------------------------------------------ ------------------------------------------------------------ Create a super toroid ------------------------------------------------------------ ------------------------------------------------------------ Create a super ellipsoid ------------------------------------------------------------ ------------------------------------------------------------ Create an open 1D spline ------------------------------------------------------------ ------------------------------------------------------------ Create a closed 1D spline ------------------------------------------------------------ ------------------------------------------------------------ Create a spiral conic ------------------------------------------------------------ ------------------------------------------------------------ Create Boy's surface ------------------------------------------------------------ ------------------------------------------------------------ Create a cross cap ------------------------------------------------------------ ------------------------------------------------------------ Create Dini's surface ------------------------------------------------------------ ------------------------------------------------------------ Create Enneper's surface ------------------------------------------------------------ ------------------------------------------------------------ Create an ellipsoidal surface ------------------------------------------------------------ ------------------------------------------------------------ Create an surface with random hills on it. Note that for testing, we will disable the random generation of the surfaces. This is because random number generators do not return the same result on different operating systems. ------------------------------------------------------------ ------------------------------------------------------------ Create an Steiner's Roman Surface. ------------------------------------------------------------ ------------------------------------------------------------ Create the RenderWindow, Renderer and both Actors ------------------------------------------------------------ add actorsadd text actors NOTE: this test has a companion .tcl test. The threshold set here should be the same as the threshold in the .tcl test. Both tests should produce exactly the same results. | 3,475 | en | 0.249391 |
# Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import six
from kmip.core import enums
from kmip.core import primitives
from kmip.core import objects as cobjects
from kmip.core.factories import attributes
from kmip.core.attributes import CryptographicParameters
from kmip.core.attributes import DerivationParameters
from kmip.pie import api
from kmip.pie import exceptions
from kmip.pie import factory
from kmip.pie import objects as pobjects
from kmip.services.kmip_client import KMIPProxy
def is_connected(function):
def wrapper(self, *args, **kwargs):
if not self._is_open:
raise exceptions.ClientConnectionNotOpen()
return function(self, *args, **kwargs)
return wrapper
class ProxyKmipClient(api.KmipClient):
"""
A simplified KMIP client for conducting KMIP operations.
The ProxyKmipClient is a simpler KMIP client supporting various KMIP
operations. It wraps the original KMIPProxy, reducing the boilerplate
needed to deploy PyKMIP in client applications. The underlying proxy
client is responsible for setting up the underlying socket connection
and for writing/reading data to/from the socket.
Like the KMIPProxy, the ProxyKmipClient is not thread-safe.
"""
def __init__(self,
hostname=None,
port=None,
cert=None,
key=None,
ca=None,
ssl_version=None,
username=None,
password=None,
config='client'):
"""
Construct a ProxyKmipClient.
Args:
hostname (string): The host or IP address of a KMIP appliance.
Optional, defaults to None.
port (int): The port number used to establish a connection to a
KMIP appliance. Usually 5696 for KMIP applications. Optional,
defaults to None.
cert (string): The path to the client's certificate. Optional,
defaults to None.
key (string): The path to the key for the client's certificate.
Optional, defaults to None.
ca (string): The path to the CA certificate used to verify the
server's certificate. Optional, defaults to None.
ssl_version (string): The name of the ssl version to use for the
connection. Example: 'PROTOCOL_SSLv23'. Optional, defaults to
None.
username (string): The username of the KMIP appliance account to
use for operations. Optional, defaults to None.
password (string): The password of the KMIP appliance account to
use for operations. Optional, defaults to None.
config (string): The name of a section in the PyKMIP configuration
file. Use to load a specific set of configuration settings from
the configuration file, instead of specifying them manually.
Optional, defaults to the default client section, 'client'.
"""
self.logger = logging.getLogger()
self.attribute_factory = attributes.AttributeFactory()
self.object_factory = factory.ObjectFactory()
# TODO (peter-hamilton) Consider adding validation checks for inputs.
self.proxy = KMIPProxy(
host=hostname,
port=port,
certfile=cert,
keyfile=key,
ca_certs=ca,
ssl_version=ssl_version,
username=username,
password=password,
config=config)
# TODO (peter-hamilton) Add a multiprocessing lock for synchronization.
self._is_open = False
def open(self):
"""
Open the client connection.
Raises:
ClientConnectionFailure: if the client connection is already open
Exception: if an error occurs while trying to open the connection
"""
if self._is_open:
raise exceptions.ClientConnectionFailure(
"client connection already open")
else:
try:
self.proxy.open()
self._is_open = True
except Exception as e:
self.logger.exception("could not open client connection", e)
raise e
def close(self):
"""
Close the client connection.
Raises:
Exception: if an error occurs while trying to close the connection
"""
if not self._is_open:
return
else:
try:
self.proxy.close()
self._is_open = False
except Exception as e:
self.logger.exception("could not close client connection", e)
raise e
@is_connected
def create(self, algorithm, length, operation_policy_name=None, name=None,
cryptographic_usage_mask=None):
"""
Create a symmetric key on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the symmetric key.
length (int): The length in bits for the symmetric key.
operation_policy_name (string): The name of the operation policy
to use for the new symmetric key. Optional, defaults to None
name (string): The name to give the key. Optional, defaults to None
cryptographic_usage_mask (list): list of enumerations of crypto
usage mask passing to the symmetric key. Optional, defaults to
None
Returns:
string: The uid of the newly created symmetric key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
if cryptographic_usage_mask is not None:
if not isinstance(cryptographic_usage_mask, list) or \
all(isinstance(item, enums.CryptographicUsageMask)
for item in cryptographic_usage_mask) is False:
raise TypeError(
"cryptographic_usage_mask must be a list of "
"CryptographicUsageMask enumerations")
# Create the template containing the attributes
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(
algorithm, length, cryptographic_usage_mask)
key_attributes.extend(common_attributes)
if name:
key_attributes.extend(self._build_name_attribute(name))
template = cobjects.TemplateAttribute(attributes=key_attributes)
# Create the symmetric key and handle the results
result = self.proxy.create(enums.ObjectType.SYMMETRIC_KEY, template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def create_key_pair(self,
algorithm,
length,
operation_policy_name=None,
public_name=None,
public_usage_mask=None,
private_name=None,
private_usage_mask=None):
"""
Create an asymmetric key pair on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the key pair.
length (int): The length in bits for the key pair.
operation_policy_name (string): The name of the operation policy
to use for the new key pair. Optional, defaults to None.
public_name (string): The name to give the public key.
Optional, defaults to None.
public_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the public key should be used.
Optional, defaults to None.
private_name (string): The name to give the public key.
Optional, defaults to None.
private_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the private key should be used.
Optional, defaults to None.
Returns:
string: The uid of the newly created public key.
string: The uid of the newly created private key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
elif not isinstance(length, six.integer_types) or length <= 0:
raise TypeError("length must be a positive integer")
# Create the common attributes that are shared
common_attributes = self._build_common_attributes(
operation_policy_name
)
key_attributes = self._build_key_attributes(algorithm, length)
key_attributes.extend(common_attributes)
template = cobjects.CommonTemplateAttribute(attributes=key_attributes)
# Create public / private specific attributes
public_template = None
names = None
if public_name:
names = self._build_name_attribute(name=public_name)
attrs = []
if public_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
public_usage_mask
)
]
if names or attrs:
public_template = cobjects.PublicKeyTemplateAttribute(
names=names,
attributes=attrs
)
private_template = None
names = None
if private_name:
names = self._build_name_attribute(name=private_name)
attrs = []
if private_usage_mask:
attrs = [
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
private_usage_mask
)
]
if names or attrs:
private_template = cobjects.PrivateKeyTemplateAttribute(
names=names,
attributes=attrs
)
# Create the asymmetric key pair and handle the results
result = self.proxy.create_key_pair(
common_template_attribute=template,
private_key_template_attribute=private_template,
public_key_template_attribute=public_template)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
public_uid = result.public_key_uuid.value
private_uid = result.private_key_uuid.value
return public_uid, private_uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def register(self, managed_object):
"""
Register a managed object with a KMIP appliance.
Args:
managed_object (ManagedObject): A managed object to register. An
instantiatable subclass of ManagedObject from the Pie API.
Returns:
string: The uid of the newly registered managed object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(managed_object, pobjects.ManagedObject):
raise TypeError("managed object must be a Pie ManagedObject")
# Extract and create attributes
object_attributes = list()
if hasattr(managed_object, 'cryptographic_usage_masks'):
if managed_object.cryptographic_usage_masks is not None:
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
managed_object.cryptographic_usage_masks
)
object_attributes.append(mask_attribute)
if hasattr(managed_object, 'operation_policy_name'):
if managed_object.operation_policy_name is not None:
opn_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
managed_object.operation_policy_name
)
object_attributes.append(opn_attribute)
template = cobjects.TemplateAttribute(attributes=object_attributes)
object_type = managed_object.object_type
# Register the managed object and handle the results
secret = self.object_factory.convert(managed_object)
result = self.proxy.register(object_type, template, secret)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
return uid
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def derive_key(self,
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
**kwargs):
"""
Derive a new key or secret data from existing managed objects.
Args:
object_type (ObjectType): An ObjectType enumeration specifying
what type of object to derive. Only SymmetricKeys and
SecretData can be specified. Required.
unique_identifiers (list): A list of strings specifying the
unique IDs of the existing managed objects to use for
derivation. Multiple objects can be specified to fit the
requirements of the given derivation method. Required.
derivation_method (DerivationMethod): A DerivationMethod
enumeration specifying how key derivation should be done.
Required.
derivation_parameters (dict): A dictionary containing various
settings for the key derivation process. See Note below.
Required.
**kwargs (various): A placeholder for object attributes that
should be set on the newly derived object. Currently
supported attributes include:
cryptographic_algorithm (enums.CryptographicAlgorithm)
cryptographic_length (int)
Returns:
string: The unique ID of the newly derived object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
---------------------------|---------------------------------------
'cryptographic_parameters' | A dictionary containing additional
| cryptographic settings. See the
| decrypt method for more information.
'initialization_vector' | Bytes to be used to initialize the key
| derivation function, if needed.
'derivation_data' | Bytes to be used as the basis for the
| key derivation process (e.g., the
| bytes to be encrypted, hashed, etc).
'salt' | Bytes to used as a salt value for the
| key derivation function, if needed.
| Usually used with PBKDF2.
'iteration_count' | An integer defining how many
| iterations should be used with the key
| derivation function, if needed.
| Usually used with PBKDF2.
"""
# Check input
if not isinstance(object_type, enums.ObjectType):
raise TypeError("Object type must be an ObjectType enumeration.")
if not isinstance(unique_identifiers, list):
raise TypeError("Unique identifiers must be a list of strings.")
else:
for unique_identifier in unique_identifiers:
if not isinstance(unique_identifier, six.string_types):
raise TypeError(
"Unique identifiers must be a list of strings."
)
if not isinstance(derivation_method, enums.DerivationMethod):
raise TypeError(
"Derivation method must be a DerivationMethod enumeration."
)
if not isinstance(derivation_parameters, dict):
raise TypeError("Derivation parameters must be a dictionary.")
derivation_parameters = DerivationParameters(
cryptographic_parameters=self._build_cryptographic_parameters(
derivation_parameters.get('cryptographic_parameters')
),
initialization_vector=derivation_parameters.get(
'initialization_vector'
),
derivation_data=derivation_parameters.get('derivation_data'),
salt=derivation_parameters.get('salt'),
iteration_count=derivation_parameters.get('iteration_count')
)
# Handle object attributes
attributes = []
if kwargs.get('cryptographic_length'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
kwargs.get('cryptographic_length')
)
)
if kwargs.get('cryptographic_algorithm'):
attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
kwargs.get('cryptographic_algorithm')
)
)
template_attribute = cobjects.TemplateAttribute(
attributes=attributes
)
# Derive the new key/data and handle the results
result = self.proxy.derive_key(
object_type,
unique_identifiers,
derivation_method,
derivation_parameters,
template_attribute
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('unique_identifier')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def locate(self, maximum_items=None, storage_status_mask=None,
object_group_member=None, attributes=None):
"""
Search for managed objects, depending on the attributes specified in
the request.
Args:
maximum_items (integer): Maximum number of object identifiers the
server MAY return.
storage_status_mask (integer): A bit mask that indicates whether
on-line or archived objects are to be searched.
object_group_member (ObjectGroupMember): An enumeration that
indicates the object group member type.
attributes (list): Attributes the are REQUIRED to match those in a
candidate object.
Returns:
list: The Unique Identifiers of the located objects
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if maximum_items is not None:
if not isinstance(maximum_items, six.integer_types):
raise TypeError("maximum_items must be an integer")
if storage_status_mask is not None:
if not isinstance(storage_status_mask, six.integer_types):
raise TypeError("storage_status_mask must be an integer")
if object_group_member is not None:
if not isinstance(object_group_member, enums.ObjectGroupMember):
raise TypeError(
"object_group_member must be a ObjectGroupMember"
"enumeration")
if attributes is not None:
if not isinstance(attributes, list) or \
all(isinstance(item, cobjects.Attribute)
for item in attributes) is False:
raise TypeError(
"attributes must be a list of attributes")
# Search for managed objects and handle the results
result = self.proxy.locate(
maximum_items, storage_status_mask,
object_group_member, attributes)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uids = [uuid.value for uuid in result.uuids]
return uids
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get(self, uid=None, key_wrapping_specification=None):
"""
Get a managed object from a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to retrieve.
key_wrapping_specification (dict): A dictionary containing various
settings to be used when wrapping the key during retrieval.
See Note below. Optional, defaults to None.
Returns:
ManagedObject: The retrieved managed object object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
--------------------------------|---------------------------------
'wrapping_method' | A WrappingMethod enumeration
| that specifies how the object
| should be wrapped.
'encryption_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'mac_signature_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'attribute_names' | A list of strings representing
| the names of attributes that
| should be included with the
| wrapped object.
'encoding_option' | An EncodingOption enumeration
| that specifies the encoding of
| the object before it is wrapped.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if key_wrapping_specification is not None:
if not isinstance(key_wrapping_specification, dict):
raise TypeError(
"Key wrapping specification must be a dictionary."
)
spec = self._build_key_wrapping_specification(
key_wrapping_specification
)
# Get the managed object and handle the results
result = self.proxy.get(uid, key_wrapping_specification=spec)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
managed_object = self.object_factory.convert(result.secret)
return managed_object
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get_attributes(self, uid=None, attribute_names=None):
"""
Get the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
If the attribute_names list is not specified, the appliance will
return all viable attributes for the managed object.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attributes should be associated. Optional, defaults
to None.
attribute_names (list): A list of string attribute names
indicating which attributes should be retrieved. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if attribute_names is not None:
if not isinstance(attribute_names, list):
raise TypeError("attribute_names must be a list of strings")
else:
for attribute_name in attribute_names:
if not isinstance(attribute_name, six.string_types):
raise TypeError(
"attribute_names must be a list of strings"
)
# Get the list of attributes for a managed object
result = self.proxy.get_attributes(uid, attribute_names)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return result.uuid, result.attributes
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def get_attribute_list(self, uid=None):
"""
Get the names of the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attribute names should be associated. Optional,
defaults to None.
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Get the list of attribute names for a managed object.
result = self.proxy.get_attribute_list(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
attribute_names = sorted(result.names)
return attribute_names
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def activate(self, uid=None):
"""
Activate a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to activate.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Activate the managed object and handle the results
result = self.proxy.activate(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def revoke(self, revocation_reason, uid=None, revocation_message=None,
compromise_occurrence_date=None):
"""
Revoke a managed object stored by a KMIP appliance.
Args:
revocation_reason (RevocationReasonCode): An enumeration indicating
the revocation reason.
uid (string): The unique ID of the managed object to revoke.
Optional, defaults to None.
revocation_message (string): A message regarding the revocation.
Optional, defaults to None.
compromise_occurrence_date (int): An integer, the number of seconds
since the epoch, which will be converted to the Datetime when
the managed object was first believed to be compromised.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if not isinstance(revocation_reason, enums.RevocationReasonCode):
raise TypeError(
"revocation_reason must be a RevocationReasonCode enumeration")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if revocation_message is not None:
if not isinstance(revocation_message, six.string_types):
raise TypeError("revocation_message must be a string")
if compromise_occurrence_date is not None:
if not isinstance(compromise_occurrence_date, six.integer_types):
raise TypeError(
"compromise_occurrence_date must be an integer")
compromise_occurrence_date = primitives.DateTime(
compromise_occurrence_date,
enums.Tags.COMPROMISE_OCCURRENCE_DATE)
# revoke the managed object and handle the results
result = self.proxy.revoke(revocation_reason, uid, revocation_message,
compromise_occurrence_date)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def destroy(self, uid=None):
"""
Destroy a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to destroy.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
"""
# Check input
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
# Destroy the managed object and handle the results
result = self.proxy.destroy(uid)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
return
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
@is_connected
def encrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Encrypt data using the specified encryption key and parameters.
Args:
data (bytes): The bytes to encrypt. Required.
uid (string): The unique ID of the encryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the encryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the encryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The encrypted data.
bytes: The IV/counter/nonce used with the encryption algorithm,
only if it was autogenerated by the server.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the encryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the encryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| encryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the coutner portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Encrypt the provided data and handle the results
result = self.proxy.encrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data'), result.get('iv_counter_nonce')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def decrypt(self, data, uid=None, cryptographic_parameters=None,
iv_counter_nonce=None):
"""
Decrypt data using the specified decryption key and parameters.
Args:
data (bytes): The bytes to decrypt. Required.
uid (string): The unique ID of the decryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the decryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the decryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The decrypted data.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the decryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the decryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| decryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the counter portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError("cryptographic_parameters must be a dict")
if iv_counter_nonce is not None:
if not isinstance(iv_counter_nonce, six.binary_type):
raise TypeError("iv_counter_nonce must be bytes")
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.decrypt(
data,
uid,
cryptographic_parameters,
iv_counter_nonce
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('data')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def signature_verify(self, message, signature, uid=None,
cryptographic_parameters=None):
"""
Verify a message signature using the specified signing key.
Args:
message (bytes): The bytes of the signed message. Required.
signature (bytes): The bytes of the message signature. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for signature verification
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
ValidityIndicator: An enumeration indicating whether or not the
signature was valid.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain various key/value pairs. For a list of allowed pairs,
see the documentation for encrypt/decrypt.
"""
# Check input
if not isinstance(message, six.binary_type):
raise TypeError("Message must be bytes.")
if not isinstance(signature, six.binary_type):
raise TypeError("Signature must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Decrypt the provided data and handle the results
result = self.proxy.signature_verify(
message,
signature,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('validity_indicator')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def sign(self, data, uid=None, cryptographic_parameters=None):
"""
Create a digital signature for data using the specified signing key.
Args:
data (bytes): The bytes of the data to be signed. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for creating the signature
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
signature (bytes): Bytes representing the signature of the data
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check input
if not isinstance(data, six.binary_type):
raise TypeError("Data to be signed must be bytes.")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("Unique identifier must be a string.")
if cryptographic_parameters is not None:
if not isinstance(cryptographic_parameters, dict):
raise TypeError(
"Cryptographic parameters must be a dictionary."
)
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
# Sign the provided data and handle results
result = self.proxy.sign(
data,
uid,
cryptographic_parameters
)
status = result.get('result_status')
if status == enums.ResultStatus.SUCCESS:
return result.get('signature')
else:
raise exceptions.KmipOperationFailure(
status,
result.get('result_reason'),
result.get('result_message')
)
@is_connected
def mac(self, data, uid=None, algorithm=None):
"""
Get the message authentication code for data.
Args:
data (string): The data to be MACed.
uid (string): The unique ID of the managed object that is the key
to use for the MAC operation.
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the MAC.
Returns:
string: The unique ID of the managed object that is the key
to use for the MAC operation.
string: The data MACed
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
"""
# Check inputs
if not isinstance(data, six.binary_type):
raise TypeError("data must be bytes")
if uid is not None:
if not isinstance(uid, six.string_types):
raise TypeError("uid must be a string")
if algorithm is not None:
if not isinstance(algorithm, enums.CryptographicAlgorithm):
raise TypeError(
"algorithm must be a CryptographicAlgorithm enumeration")
parameters_attribute = self._build_cryptographic_parameters(
{'cryptographic_algorithm': algorithm}
)
# Get the message authentication code and handle the results
result = self.proxy.mac(data, uid, parameters_attribute)
status = result.result_status.value
if status == enums.ResultStatus.SUCCESS:
uid = result.uuid.value
mac_data = result.mac_data.value
return uid, mac_data
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
def _build_key_attributes(self, algorithm, length, masks=None):
# Build a list of core key attributes.
algorithm_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_ALGORITHM,
algorithm)
length_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_LENGTH,
length)
# Default crypto usage mask value
mask_value = [enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT]
if masks:
mask_value.extend(masks)
# remove duplicates
mask_value = list(set(mask_value))
mask_attribute = self.attribute_factory.create_attribute(
enums.AttributeType.CRYPTOGRAPHIC_USAGE_MASK,
mask_value)
return [algorithm_attribute, length_attribute, mask_attribute]
def _build_cryptographic_parameters(self, value):
"""
Build a CryptographicParameters struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
CryptographicParameters struct.
Returns:
CryptographicParameters: a CryptographicParameters struct
Raises:
TypeError: if the input argument is invalid
"""
if not isinstance(value, dict):
raise TypeError("Cryptographic parameters must be a dictionary.")
cryptographic_parameters = CryptographicParameters(
block_cipher_mode=value.get('block_cipher_mode'),
padding_method=value.get('padding_method'),
hashing_algorithm=value.get('hashing_algorithm'),
key_role_type=value.get('key_role_type'),
digital_signature_algorithm=value.get(
'digital_signature_algorithm'
),
cryptographic_algorithm=value.get('cryptographic_algorithm'),
random_iv=value.get('random_iv'),
iv_length=value.get('iv_length'),
tag_length=value.get('tag_length'),
fixed_field_length=value.get('fixed_field_length'),
invocation_field_length=value.get('invocation_field_length'),
counter_length=value.get('counter_length'),
initial_counter_value=value.get('initial_counter_value')
)
return cryptographic_parameters
def _build_encryption_key_information(self, value):
"""
Build an EncryptionKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
EncryptionKeyInformation struct.
Returns:
EncryptionKeyInformation: an EncryptionKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Encryption key information must be a dictionary.")
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
encryption_key_information = cobjects.EncryptionKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return encryption_key_information
def _build_mac_signature_key_information(self, value):
"""
Build an MACSignatureKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
MACSignatureKeyInformation struct.
Returns:
MACSignatureInformation: a MACSignatureKeyInformation struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError(
"MAC/signature key information must be a dictionary."
)
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
cryptographic_parameters = self._build_cryptographic_parameters(
cryptographic_parameters
)
mac_signature_key_information = cobjects.MACSignatureKeyInformation(
unique_identifier=value.get('unique_identifier'),
cryptographic_parameters=cryptographic_parameters
)
return mac_signature_key_information
def _build_key_wrapping_specification(self, value):
"""
Build a KeyWrappingSpecification struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
KeyWrappingSpecification struct.
Returns:
KeyWrappingSpecification: a KeyWrappingSpecification struct
Raises:
TypeError: if the input argument is invalid
"""
if value is None:
return None
if not isinstance(value, dict):
raise TypeError("Key wrapping specification must be a dictionary.")
encryption_key_info = self._build_encryption_key_information(
value.get('encryption_key_information')
)
mac_signature_key_info = self._build_mac_signature_key_information(
value.get('mac_signature_key_information')
)
key_wrapping_specification = cobjects.KeyWrappingSpecification(
wrapping_method=value.get('wrapping_method'),
encryption_key_information=encryption_key_info,
mac_signature_key_information=mac_signature_key_info,
attribute_names=value.get('attribute_names'),
encoding_option=value.get('encoding_option')
)
return key_wrapping_specification
def _build_common_attributes(self, operation_policy_name=None):
'''
Build a list of common attributes that are shared across
symmetric as well as asymmetric objects
'''
common_attributes = []
if operation_policy_name:
common_attributes.append(
self.attribute_factory.create_attribute(
enums.AttributeType.OPERATION_POLICY_NAME,
operation_policy_name
)
)
return common_attributes
def _build_name_attribute(self, name=None):
'''
Build a name attribute, returned in a list for ease
of use in the caller
'''
name_list = []
if name:
name_list.append(self.attribute_factory.create_attribute(
enums.AttributeType.NAME,
name)
)
return name_list
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
| kmip/pie/client.py | 59,271 | A simplified KMIP client for conducting KMIP operations.
The ProxyKmipClient is a simpler KMIP client supporting various KMIP
operations. It wraps the original KMIPProxy, reducing the boilerplate
needed to deploy PyKMIP in client applications. The underlying proxy
client is responsible for setting up the underlying socket connection
and for writing/reading data to/from the socket.
Like the KMIPProxy, the ProxyKmipClient is not thread-safe.
Construct a ProxyKmipClient.
Args:
hostname (string): The host or IP address of a KMIP appliance.
Optional, defaults to None.
port (int): The port number used to establish a connection to a
KMIP appliance. Usually 5696 for KMIP applications. Optional,
defaults to None.
cert (string): The path to the client's certificate. Optional,
defaults to None.
key (string): The path to the key for the client's certificate.
Optional, defaults to None.
ca (string): The path to the CA certificate used to verify the
server's certificate. Optional, defaults to None.
ssl_version (string): The name of the ssl version to use for the
connection. Example: 'PROTOCOL_SSLv23'. Optional, defaults to
None.
username (string): The username of the KMIP appliance account to
use for operations. Optional, defaults to None.
password (string): The password of the KMIP appliance account to
use for operations. Optional, defaults to None.
config (string): The name of a section in the PyKMIP configuration
file. Use to load a specific set of configuration settings from
the configuration file, instead of specifying them manually.
Optional, defaults to the default client section, 'client'.
Build a list of common attributes that are shared across
symmetric as well as asymmetric objects
Build a CryptographicParameters struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
CryptographicParameters struct.
Returns:
CryptographicParameters: a CryptographicParameters struct
Raises:
TypeError: if the input argument is invalid
Build an EncryptionKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
EncryptionKeyInformation struct.
Returns:
EncryptionKeyInformation: an EncryptionKeyInformation struct
Raises:
TypeError: if the input argument is invalid
Build a KeyWrappingSpecification struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
KeyWrappingSpecification struct.
Returns:
KeyWrappingSpecification: a KeyWrappingSpecification struct
Raises:
TypeError: if the input argument is invalid
Build an MACSignatureKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
MACSignatureKeyInformation struct.
Returns:
MACSignatureInformation: a MACSignatureKeyInformation struct
Raises:
TypeError: if the input argument is invalid
Build a name attribute, returned in a list for ease
of use in the caller
Activate a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to activate.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
Close the client connection.
Raises:
Exception: if an error occurs while trying to close the connection
Create a symmetric key on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the symmetric key.
length (int): The length in bits for the symmetric key.
operation_policy_name (string): The name of the operation policy
to use for the new symmetric key. Optional, defaults to None
name (string): The name to give the key. Optional, defaults to None
cryptographic_usage_mask (list): list of enumerations of crypto
usage mask passing to the symmetric key. Optional, defaults to
None
Returns:
string: The uid of the newly created symmetric key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Create an asymmetric key pair on a KMIP appliance.
Args:
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the key pair.
length (int): The length in bits for the key pair.
operation_policy_name (string): The name of the operation policy
to use for the new key pair. Optional, defaults to None.
public_name (string): The name to give the public key.
Optional, defaults to None.
public_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the public key should be used.
Optional, defaults to None.
private_name (string): The name to give the public key.
Optional, defaults to None.
private_usage_mask (list): A list of CryptographicUsageMask
enumerations indicating how the private key should be used.
Optional, defaults to None.
Returns:
string: The uid of the newly created public key.
string: The uid of the newly created private key.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Decrypt data using the specified decryption key and parameters.
Args:
data (bytes): The bytes to decrypt. Required.
uid (string): The unique ID of the decryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the decryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the decryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The decrypted data.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the decryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the decryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| decryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the counter portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
Derive a new key or secret data from existing managed objects.
Args:
object_type (ObjectType): An ObjectType enumeration specifying
what type of object to derive. Only SymmetricKeys and
SecretData can be specified. Required.
unique_identifiers (list): A list of strings specifying the
unique IDs of the existing managed objects to use for
derivation. Multiple objects can be specified to fit the
requirements of the given derivation method. Required.
derivation_method (DerivationMethod): A DerivationMethod
enumeration specifying how key derivation should be done.
Required.
derivation_parameters (dict): A dictionary containing various
settings for the key derivation process. See Note below.
Required.
**kwargs (various): A placeholder for object attributes that
should be set on the newly derived object. Currently
supported attributes include:
cryptographic_algorithm (enums.CryptographicAlgorithm)
cryptographic_length (int)
Returns:
string: The unique ID of the newly derived object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
---------------------------|---------------------------------------
'cryptographic_parameters' | A dictionary containing additional
| cryptographic settings. See the
| decrypt method for more information.
'initialization_vector' | Bytes to be used to initialize the key
| derivation function, if needed.
'derivation_data' | Bytes to be used as the basis for the
| key derivation process (e.g., the
| bytes to be encrypted, hashed, etc).
'salt' | Bytes to used as a salt value for the
| key derivation function, if needed.
| Usually used with PBKDF2.
'iteration_count' | An integer defining how many
| iterations should be used with the key
| derivation function, if needed.
| Usually used with PBKDF2.
Destroy a managed object stored by a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to destroy.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
Encrypt data using the specified encryption key and parameters.
Args:
data (bytes): The bytes to encrypt. Required.
uid (string): The unique ID of the encryption key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for the encryption.
Optional, defaults to None.
iv_counter_nonce (bytes): The bytes to use for the IV/counter/
nonce, if needed by the encryption algorithm and/or cipher
mode. Optional, defaults to None.
Returns:
bytes: The encrypted data.
bytes: The IV/counter/nonce used with the encryption algorithm,
only if it was autogenerated by the server.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain the following key/value pairs:
Keys | Value
------------------------------|-----------------------------------
'block_cipher_mode' | A BlockCipherMode enumeration
| indicating the cipher mode to use
| with the encryption algorithm.
'padding_method' | A PaddingMethod enumeration
| indicating which padding method to
| use with the encryption algorithm.
'hashing_algorithm' | A HashingAlgorithm enumeration
| indicating which hashing algorithm
| to use.
'key_role_type' | A KeyRoleType enumeration
| indicating the intended use of the
| associated cryptographic key.
'digital_signature_algorithm' | A DigitalSignatureAlgorithm
| enumeration indicating which
| digital signature algorithm to
| use.
'cryptographic_algorithm' | A CryptographicAlgorithm
| enumeration indicating which
| encryption algorithm to use.
'random_iv' | A boolean indicating whether the
| server should autogenerate an IV.
'iv_length' | An integer representing the length
| of the initialization vector (IV)
| in bits.
'tag_length' | An integer representing the length
| of the authenticator tag in bytes.
'fixed_field_length' | An integer representing the length
| of the fixed field portion of the
| IV in bits.
'invocation_field_length' | An integer representing the length
| of the invocation field portion of
| the IV in bits.
'counter_length' | An integer representing the length
| of the coutner portion of the IV
| in bits.
'initial_counter_value' | An integer representing the
| starting counter value for CTR
| mode (typically 1).
Get a managed object from a KMIP appliance.
Args:
uid (string): The unique ID of the managed object to retrieve.
key_wrapping_specification (dict): A dictionary containing various
settings to be used when wrapping the key during retrieval.
See Note below. Optional, defaults to None.
Returns:
ManagedObject: The retrieved managed object object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
Notes:
The derivation_parameters argument is a dictionary that can
contain the following key/value pairs:
Key | Value
--------------------------------|---------------------------------
'wrapping_method' | A WrappingMethod enumeration
| that specifies how the object
| should be wrapped.
'encryption_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'mac_signature_key_information' | A dictionary containing the ID
| of the wrapping key and
| associated cryptographic
| parameters.
'attribute_names' | A list of strings representing
| the names of attributes that
| should be included with the
| wrapped object.
'encoding_option' | An EncodingOption enumeration
| that specifies the encoding of
| the object before it is wrapped.
Get the names of the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attribute names should be associated. Optional,
defaults to None.
Get the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
If the attribute_names list is not specified, the appliance will
return all viable attributes for the managed object.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attributes should be associated. Optional, defaults
to None.
attribute_names (list): A list of string attribute names
indicating which attributes should be retrieved. Optional,
defaults to None.
Search for managed objects, depending on the attributes specified in
the request.
Args:
maximum_items (integer): Maximum number of object identifiers the
server MAY return.
storage_status_mask (integer): A bit mask that indicates whether
on-line or archived objects are to be searched.
object_group_member (ObjectGroupMember): An enumeration that
indicates the object group member type.
attributes (list): Attributes the are REQUIRED to match those in a
candidate object.
Returns:
list: The Unique Identifiers of the located objects
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Get the message authentication code for data.
Args:
data (string): The data to be MACed.
uid (string): The unique ID of the managed object that is the key
to use for the MAC operation.
algorithm (CryptographicAlgorithm): An enumeration defining the
algorithm to use to generate the MAC.
Returns:
string: The unique ID of the managed object that is the key
to use for the MAC operation.
string: The data MACed
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Open the client connection.
Raises:
ClientConnectionFailure: if the client connection is already open
Exception: if an error occurs while trying to open the connection
Register a managed object with a KMIP appliance.
Args:
managed_object (ManagedObject): A managed object to register. An
instantiatable subclass of ManagedObject from the Pie API.
Returns:
string: The uid of the newly registered managed object.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
Revoke a managed object stored by a KMIP appliance.
Args:
revocation_reason (RevocationReasonCode): An enumeration indicating
the revocation reason.
uid (string): The unique ID of the managed object to revoke.
Optional, defaults to None.
revocation_message (string): A message regarding the revocation.
Optional, defaults to None.
compromise_occurrence_date (int): An integer, the number of seconds
since the epoch, which will be converted to the Datetime when
the managed object was first believed to be compromised.
Optional, defaults to None.
Returns:
None
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input argument is invalid
Create a digital signature for data using the specified signing key.
Args:
data (bytes): The bytes of the data to be signed. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for creating the signature
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
signature (bytes): Bytes representing the signature of the data
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Verify a message signature using the specified signing key.
Args:
message (bytes): The bytes of the signed message. Required.
signature (bytes): The bytes of the message signature. Required.
uid (string): The unique ID of the signing key to use.
Optional, defaults to None.
cryptographic_parameters (dict): A dictionary containing various
cryptographic settings to be used for signature verification
(e.g., cryptographic algorithm, hashing algorithm, and/or
digital signature algorithm). Optional, defaults to None.
Returns:
ValidityIndicator: An enumeration indicating whether or not the
signature was valid.
Raises:
ClientConnectionNotOpen: if the client connection is unusable
KmipOperationFailure: if the operation result is a failure
TypeError: if the input arguments are invalid
Notes:
The cryptographic_parameters argument is a dictionary that can
contain various key/value pairs. For a list of allowed pairs,
see the documentation for encrypt/decrypt.
Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. TODO (peter-hamilton) Consider adding validation checks for inputs. TODO (peter-hamilton) Add a multiprocessing lock for synchronization. Check inputs Create the template containing the attributes Create the symmetric key and handle the results Check inputs Create the common attributes that are shared Create public / private specific attributes Create the asymmetric key pair and handle the results Check input Extract and create attributes Register the managed object and handle the results Check input Handle object attributes Derive the new key/data and handle the results Check inputs Search for managed objects and handle the results Check input Get the managed object and handle the results Check input Get the list of attributes for a managed object Check input Get the list of attribute names for a managed object. Check input Activate the managed object and handle the results Check input revoke the managed object and handle the results Check input Destroy the managed object and handle the results Check input Encrypt the provided data and handle the results Check input Decrypt the provided data and handle the results Check input Decrypt the provided data and handle the results Check input Sign the provided data and handle results Check inputs Get the message authentication code and handle the results Build a list of core key attributes. Default crypto usage mask value remove duplicates | 25,170 | en | 0.721865 |
# terrascript/kind/r.py
# Automatically generated by tools/makecode.py ()
import warnings
warnings.warn(
"using the 'legacy layout' is deprecated", DeprecationWarning, stacklevel=2
)
import terrascript
class kind_cluster(terrascript.Resource):
pass
| terrascript/kind/r.py | 261 | terrascript/kind/r.py Automatically generated by tools/makecode.py () | 69 | en | 0.483421 |
from __future__ import absolute_import
import json
import datetime
import os
import os.path
import sys
import traceback
from distutils import log
from .base import BaseBuildCommand
class BuildAssetsCommand(BaseBuildCommand):
user_options = BaseBuildCommand.user_options + [
(
"asset-json-path=",
None,
"Relative path for JSON manifest. Defaults to {dist_name}/assets.json",
),
(
"inplace",
"i",
"ignore build-lib and put compiled javascript files into the source "
+ "directory alongside your pure Python modules",
),
(
"force",
"f",
"Force rebuilding of static content. Defaults to rebuilding on version "
"change detection.",
),
]
description = "build static media assets"
def initialize_options(self):
self.asset_json_path = u"{}/assets.json".format(self.distribution.get_name())
BaseBuildCommand.initialize_options(self)
def get_dist_paths(self):
return ["src/sentry/static/sentry/dist"]
def get_manifest_additions(self):
return ("src/" + self.asset_json_path,)
def _get_package_version(self):
"""
Attempt to get the most correct current version of Sentry.
"""
pkg_path = os.path.join(self.work_path, "src")
sys.path.insert(0, pkg_path)
try:
import sentry
except Exception:
version = None
build = None
else:
log.info(u"pulled version information from 'sentry' module".format(sentry.__file__))
version = self.distribution.get_version()
build = sentry.__build__
finally:
sys.path.pop(0)
if not (version and build):
json_path = self.get_asset_json_path()
try:
with open(json_path) as fp:
data = json.loads(fp.read())
except Exception:
pass
else:
log.info(u"pulled version information from '{}'".format(json_path))
version, build = data["version"], data["build"]
return {"version": version, "build": build}
def _needs_static(self, version_info):
json_path = self.get_asset_json_path()
if not os.path.exists(json_path):
return True
with open(json_path) as fp:
data = json.load(fp)
if data.get("version") != version_info.get("version"):
return True
if data.get("build") != version_info.get("build"):
return True
return False
def _needs_built(self):
if BaseBuildCommand._needs_built(self):
return True
version_info = self._get_package_version()
return self._needs_static(version_info)
def _build(self):
version_info = self._get_package_version()
log.info(
u"building assets for {} v{} (build {})".format(
self.distribution.get_name(),
version_info["version"] or "UNKNOWN",
version_info["build"] or "UNKNOWN",
)
)
if not version_info["version"] or not version_info["build"]:
log.fatal("Could not determine sentry version or build")
sys.exit(1)
try:
self._build_static()
except Exception:
traceback.print_exc()
log.fatal("unable to build Sentry's static assets!")
sys.exit(1)
log.info("writing version manifest")
manifest = self._write_version_file(version_info)
log.info(u"recorded manifest\n{}".format(json.dumps(manifest, indent=2)))
def _build_static(self):
# By setting NODE_ENV=production, a few things happen
# * React optimizes out certain code paths
# * Webpack will add version strings to built/referenced assets
env = dict(os.environ)
env["SENTRY_STATIC_DIST_PATH"] = self.sentry_static_dist_path
env["NODE_ENV"] = "production"
self._run_yarn_command(["webpack", "--bail"], env=env)
def _write_version_file(self, version_info):
manifest = {
"createdAt": datetime.datetime.utcnow().isoformat() + "Z",
"version": version_info["version"],
"build": version_info["build"],
}
with open(self.get_asset_json_path(), "w") as fp:
json.dump(manifest, fp)
return manifest
@property
def sentry_static_dist_path(self):
return os.path.abspath(os.path.join(self.build_lib, "sentry/static/sentry/dist"))
def get_asset_json_path(self):
return os.path.abspath(os.path.join(self.build_lib, self.asset_json_path))
| src/sentry/utils/distutils/commands/build_assets.py | 4,802 | Attempt to get the most correct current version of Sentry.
By setting NODE_ENV=production, a few things happen * React optimizes out certain code paths * Webpack will add version strings to built/referenced assets | 219 | en | 0.911054 |
# Generated by Django 3.1.3 on 2021-02-16 11:31
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("imagedeck", "0009_auto_20201122_2300"),
("dcodex", "0034_auto_20201215_0315"),
]
operations = [
migrations.AlterField(
model_name="manuscript",
name="imagedeck",
field=models.ForeignKey(
blank=True,
default=None,
null=True,
on_delete=django.db.models.deletion.SET_DEFAULT,
to="imagedeck.deckbase",
),
),
]
| dcodex/migrations/0035_auto_20210216_0331.py | 667 | Generated by Django 3.1.3 on 2021-02-16 11:31 | 45 | en | 0.687407 |
#!/usr/bin/env python3 -u
import sys
for value in sys.stdin:
sys.stderr.write(f"consumed {value}\n")
| topics/05-data-wrangling/consume.py | 108 | !/usr/bin/env python3 -u | 24 | fr | 0.357018 |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible.errors import AnsibleFilterError
from ansible.module_utils.six import iteritems, string_types
from numbers import Number
def config(parameters, exclude=None):
exclude = exclude or []
if not isinstance(parameters, dict):
raise AnsibleFilterError('php_config expects a dict but was given a %s' % type(parameters))
[parameters.pop(key, None) for key in exclude]
result = ''
for key in sorted(parameters):
parameter = config_parameter(parameters, key)
if parameter:
result += '\n%s' % parameter
return result.lstrip()
def config_parameter(parameters, key, required=False, comment=False, **kwargs):
if not isinstance(parameters, dict):
raise AnsibleFilterError('php_config_parameter parameters expects a dict but was given a %s' % type(parameters))
if not isinstance(key, string_types):
raise AnsibleFilterError('php_config_parameter key expects a string but was given a %s' % type(key))
if key in parameters:
value = parameters.get(key)
else:
if required:
raise AnsibleFilterError('php_config_parameter requires a value for key %s' % key)
if isinstance(comment, string_types):
return comment
if 'default' not in kwargs:
raise AnsibleFilterError('php_config_parameter missing a default value for key %s' % key)
value = kwargs.get('default')
if value is True:
result = '%s = On' % key
elif value is False:
result = '%s = Off' % key
elif isinstance(value, (string_types, Number)):
result = '%s = %s' % (key, value)
else:
raise AnsibleFilterError('php_config_parameter value of an unknown type %s' % type(value))
if key not in parameters and comment:
result = ';' + result.replace('\n', '\n;')
return result
class FilterModule(object):
''' Manala php config jinja2 filters '''
def filters(self):
filters = {
'php_config': config,
'php_config_parameter': config_parameter,
}
return filters
| plugins/filter/php_config.py | 2,182 | Manala php config jinja2 filters | 32 | en | 0.09251 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# #
# RMG - Reaction Mechanism Generator #
# #
# Copyright (c) 2002-2019 Prof. William H. Green (whgreen@mit.edu), #
# Prof. Richard H. West (r.west@neu.edu) and the RMG Team (rmg_dev@mit.edu) #
# #
# Permission is hereby granted, free of charge, to any person obtaining a #
# copy of this software and associated documentation files (the 'Software'), #
# to deal in the Software without restriction, including without limitation #
# the rights to use, copy, modify, merge, publish, distribute, sublicense, #
# and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# #
###############################################################################
"""
This module contains unit test for the converter module.
"""
import unittest
from rmgpy.molecule.converter import debug_rdkit_mol, to_rdkit_mol, from_rdkit_mol, to_ob_mol, from_ob_mol
from rmgpy.molecule.molecule import Molecule
class RDKitTest(unittest.TestCase):
def test_debugger(self):
"""Test the debug_rdkit_mol(rdmol) function doesn't crash
We can't really test it in the unit testing framework, because
that already captures and redirects standard output, and that
conflicts with the function, but this checks it doesn't crash.
"""
import rdkit.Chem
import logging
rdmol = rdkit.Chem.MolFromSmiles('CCC')
message = debug_rdkit_mol(rdmol, level=logging.INFO)
self.assertIsNotNone(message)
def test_lone_pair_retention(self):
"""Test that we don't lose any lone pairs on round trip RDKit conversion."""
mol = Molecule().from_adjacency_list("""
1 C u0 p0 c0 {2,D} {3,S} {4,S}
2 O u0 p2 c0 {1,D}
3 H u0 p0 c0 {1,S}
4 H u0 p0 c0 {1,S}
""")
rdmol = to_rdkit_mol(mol)
mol2 = from_rdkit_mol(Molecule(), rdmol)
self.assertTrue(mol.is_isomorphic(mol2))
def test_atom_mapping_1(self):
"""Test that to_rdkit_mol returns correct indices and atom mappings."""
bond_order_dict = {'SINGLE': 1, 'DOUBLE': 2, 'TRIPLE': 3, 'AROMATIC': 1.5}
mol = Molecule().from_smiles('C1CCC=C1C=O')
rdkitmol, rd_atom_indices = to_rdkit_mol(mol, remove_h=False, return_mapping=True)
for atom in mol.atoms:
# Check that all atoms are found in mapping
self.assertTrue(atom in rd_atom_indices)
# Check that all bonds are in rdkitmol with correct mapping and order
for connected_atom, bond in atom.bonds.items():
bond_type = str(rdkitmol.GetBondBetweenAtoms(rd_atom_indices[atom],
rd_atom_indices[connected_atom]).GetBondType())
rdkit_bond_order = bond_order_dict[bond_type]
self.assertEqual(bond.order, rdkit_bond_order)
# Test for remove_h = True
rdkitmol2, rd_atom_indices2 = to_rdkit_mol(mol, remove_h=True, return_mapping=True)
for atom in mol.atoms:
# Check that all non-hydrogen atoms are found in mapping
if atom.symbol != 'H':
self.assertTrue(atom in rd_atom_indices2)
# Check that all bonds connected to non-hydrogen have the correct mapping and order
for connected_atom, bond in atom.bonds.items():
if connected_atom.symbol != 'H':
bond_type = str(rdkitmol2.GetBondBetweenAtoms(rd_atom_indices2[atom],
rd_atom_indices2[connected_atom]).GetBondType())
rdkit_bond_order = bond_order_dict[bond_type]
self.assertEqual(bond.order, rdkit_bond_order)
def test_atom_mapping_2(self):
"""Test that to_rdkit_mol returns correct indices and atom mappings when hydrogens are removed."""
adjlist = """
1 H u0 p0 c0 {2,S}
2 C u0 p0 c0 {1,S} {3,S} {4,S} {5,S}
3 H u0 p0 c0 {2,S}
4 H u0 p0 c0 {2,S}
5 O u0 p2 c0 {2,S} {6,S}
6 H u0 p0 c0 {5,S}
"""
mol = Molecule().from_adjacency_list(adjlist)
rdkitmol, rd_atom_indices = to_rdkit_mol(mol, remove_h=True, return_mapping=True)
heavy_atoms = [at for at in mol.atoms if at.number != 1]
for at1 in heavy_atoms:
for at2 in heavy_atoms:
if mol.has_bond(at1, at2):
try:
rdkitmol.GetBondBetweenAtoms(rd_atom_indices[at1], rd_atom_indices[at2])
except RuntimeError:
self.fail("RDKit failed in finding the bond in the original atom!")
class ConverterTest(unittest.TestCase):
def setUp(self):
"""Function run before each test in this class."""
self.test_mols = [
Molecule().from_smiles('C'),
Molecule().from_smiles('O'),
Molecule().from_smiles('N'),
Molecule().from_smiles('S'),
Molecule().from_smiles('[CH2]C'),
Molecule().from_smiles('[CH]C'),
Molecule().from_smiles('C=CC=C'),
Molecule().from_smiles('C#C[CH2]'),
Molecule().from_smiles('c1ccccc1'),
Molecule().from_smiles('[13CH3]C'),
Molecule().from_smiles('O=CCO').generate_h_bonded_structures()[0],
]
self.test_Hbond_free_mol = Molecule().from_smiles('O=CCO')
def test_rdkit_round_trip(self):
"""Test conversion to and from RDKitMol"""
for mol in self.test_mols:
rdkit_mol = to_rdkit_mol(mol)
new_mol = from_rdkit_mol(Molecule(), rdkit_mol)
self.assertTrue(mol.is_isomorphic(new_mol) or self.test_Hbond_free_mol.is_isomorphic(new_mol))
self.assertEqual(mol.get_element_count(), new_mol.get_element_count())
def test_ob_round_trip(self):
"""Test conversion to and from OBMol"""
for mol in self.test_mols:
ob_mol = to_ob_mol(mol)
new_mol = from_ob_mol(Molecule(), ob_mol)
self.assertTrue(mol.is_isomorphic(new_mol) or self.test_Hbond_free_mol.is_isomorphic(new_mol))
self.assertEqual(mol.get_element_count(), new_mol.get_element_count())
| rmgpy/molecule/converterTest.py | 7,610 | Function run before each test in this class.
Test that to_rdkit_mol returns correct indices and atom mappings.
Test that to_rdkit_mol returns correct indices and atom mappings when hydrogens are removed.
Test the debug_rdkit_mol(rdmol) function doesn't crash
We can't really test it in the unit testing framework, because
that already captures and redirects standard output, and that
conflicts with the function, but this checks it doesn't crash.
Test that we don't lose any lone pairs on round trip RDKit conversion.
Test conversion to and from OBMol
Test conversion to and from RDKitMol
This module contains unit test for the converter module.
!/usr/bin/env python -*- coding: utf-8 -*- RMG - Reaction Mechanism Generator Copyright (c) 2002-2019 Prof. William H. Green (whgreen@mit.edu), Prof. Richard H. West (r.west@neu.edu) and the RMG Team (rmg_dev@mit.edu) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Check that all atoms are found in mapping Check that all bonds are in rdkitmol with correct mapping and order Test for remove_h = True Check that all non-hydrogen atoms are found in mapping Check that all bonds connected to non-hydrogen have the correct mapping and order | 2,810 | en | 0.845555 |
"""
Lowest level connection
"""
from __future__ import division
import logging
import math
import random
import time
import uuid
import warnings
from base64 import b64decode
from threading import local
import six
from botocore.client import ClientError
from botocore.exceptions import BotoCoreError
from botocore.session import get_session
from botocore.vendored import requests
from botocore.vendored.requests import Request
from six.moves import range
from pynamodb.compat import NullHandler
from pynamodb.connection.util import pythonic
from pynamodb.constants import (
RETURN_CONSUMED_CAPACITY_VALUES, RETURN_ITEM_COLL_METRICS_VALUES, COMPARISON_OPERATOR_VALUES,
RETURN_ITEM_COLL_METRICS, RETURN_CONSUMED_CAPACITY, RETURN_VALUES_VALUES, ATTR_UPDATE_ACTIONS,
COMPARISON_OPERATOR, EXCLUSIVE_START_KEY, SCAN_INDEX_FORWARD, SCAN_FILTER_VALUES, ATTR_DEFINITIONS,
BATCH_WRITE_ITEM, CONSISTENT_READ, ATTR_VALUE_LIST, DESCRIBE_TABLE, KEY_CONDITION_EXPRESSION,
BATCH_GET_ITEM, DELETE_REQUEST, SELECT_VALUES, RETURN_VALUES, REQUEST_ITEMS, ATTR_UPDATES,
PROJECTION_EXPRESSION, SERVICE_NAME, DELETE_ITEM, PUT_REQUEST, UPDATE_ITEM, SCAN_FILTER, TABLE_NAME,
INDEX_NAME, KEY_SCHEMA, ATTR_NAME, ATTR_TYPE, TABLE_KEY, EXPECTED, KEY_TYPE, GET_ITEM, UPDATE,
PUT_ITEM, SELECT, ACTION, EXISTS, VALUE, LIMIT, QUERY, SCAN, ITEM, LOCAL_SECONDARY_INDEXES,
KEYS, KEY, EQ, SEGMENT, TOTAL_SEGMENTS, CREATE_TABLE, PROVISIONED_THROUGHPUT, READ_CAPACITY_UNITS,
WRITE_CAPACITY_UNITS, GLOBAL_SECONDARY_INDEXES, PROJECTION, EXCLUSIVE_START_TABLE_NAME, TOTAL,
DELETE_TABLE, UPDATE_TABLE, LIST_TABLES, GLOBAL_SECONDARY_INDEX_UPDATES, ATTRIBUTES,
CONSUMED_CAPACITY, CAPACITY_UNITS, QUERY_FILTER, QUERY_FILTER_VALUES, CONDITIONAL_OPERATOR,
CONDITIONAL_OPERATORS, NULL, NOT_NULL, SHORT_ATTR_TYPES, DELETE, PUT,
ITEMS, DEFAULT_ENCODING, BINARY_SHORT, BINARY_SET_SHORT, LAST_EVALUATED_KEY, RESPONSES, UNPROCESSED_KEYS,
UNPROCESSED_ITEMS, STREAM_SPECIFICATION, STREAM_VIEW_TYPE, STREAM_ENABLED, UPDATE_EXPRESSION,
EXPRESSION_ATTRIBUTE_NAMES, EXPRESSION_ATTRIBUTE_VALUES, KEY_CONDITION_OPERATOR_MAP,
CONDITION_EXPRESSION, FILTER_EXPRESSION, FILTER_EXPRESSION_OPERATOR_MAP, NOT_CONTAINS, AND)
from pynamodb.exceptions import (
TableError, QueryError, PutError, DeleteError, UpdateError, GetError, ScanError, TableDoesNotExist,
VerboseClientError
)
from pynamodb.expressions.condition import Condition
from pynamodb.expressions.operand import Path
from pynamodb.expressions.projection import create_projection_expression
from pynamodb.expressions.update import Update
from pynamodb.settings import get_settings_value
from pynamodb.signals import pre_dynamodb_send, post_dynamodb_send
from pynamodb.types import HASH, RANGE
BOTOCORE_EXCEPTIONS = (BotoCoreError, ClientError)
log = logging.getLogger(__name__)
log.addHandler(NullHandler())
class MetaTable(object):
"""
A pythonic wrapper around table metadata
"""
def __init__(self, data):
self.data = data or {}
self._range_keyname = None
self._hash_keyname = None
def __repr__(self):
if self.data:
return six.u("MetaTable<{0}>".format(self.data.get(TABLE_NAME)))
@property
def range_keyname(self):
"""
Returns the name of this table's range key
"""
if self._range_keyname is None:
for attr in self.data.get(KEY_SCHEMA):
if attr.get(KEY_TYPE) == RANGE:
self._range_keyname = attr.get(ATTR_NAME)
return self._range_keyname
@property
def hash_keyname(self):
"""
Returns the name of this table's hash key
"""
if self._hash_keyname is None:
for attr in self.data.get(KEY_SCHEMA):
if attr.get(KEY_TYPE) == HASH:
self._hash_keyname = attr.get(ATTR_NAME)
break
return self._hash_keyname
def get_key_names(self, index_name=None):
"""
Returns the names of the primary key attributes and index key attributes (if index_name is specified)
"""
key_names = [self.hash_keyname]
if self.range_keyname:
key_names.append(self.range_keyname)
if index_name is not None:
index_hash_keyname = self.get_index_hash_keyname(index_name)
if index_hash_keyname not in key_names:
key_names.append(index_hash_keyname)
index_range_keyname = self.get_index_range_keyname(index_name)
if index_range_keyname is not None and index_range_keyname not in key_names:
key_names.append(index_range_keyname)
return key_names
def get_index_hash_keyname(self, index_name):
"""
Returns the name of the hash key for a given index
"""
global_indexes = self.data.get(GLOBAL_SECONDARY_INDEXES)
local_indexes = self.data.get(LOCAL_SECONDARY_INDEXES)
indexes = []
if local_indexes:
indexes += local_indexes
if global_indexes:
indexes += global_indexes
for index in indexes:
if index.get(INDEX_NAME) == index_name:
for schema_key in index.get(KEY_SCHEMA):
if schema_key.get(KEY_TYPE) == HASH:
return schema_key.get(ATTR_NAME)
def get_index_range_keyname(self, index_name):
"""
Returns the name of the hash key for a given index
"""
global_indexes = self.data.get(GLOBAL_SECONDARY_INDEXES)
local_indexes = self.data.get(LOCAL_SECONDARY_INDEXES)
indexes = []
if local_indexes:
indexes += local_indexes
if global_indexes:
indexes += global_indexes
for index in indexes:
if index.get(INDEX_NAME) == index_name:
for schema_key in index.get(KEY_SCHEMA):
if schema_key.get(KEY_TYPE) == RANGE:
return schema_key.get(ATTR_NAME)
return None
def get_item_attribute_map(self, attributes, item_key=ITEM, pythonic_key=True):
"""
Builds up a dynamodb compatible AttributeValue map
"""
if pythonic_key:
item_key = item_key
attr_map = {
item_key: {}
}
for key, value in attributes.items():
# In this case, the user provided a mapping
# {'key': {'S': 'value'}}
if isinstance(value, dict):
attr_map[item_key][key] = value
else:
attr_map[item_key][key] = {
self.get_attribute_type(key): value
}
return attr_map
def get_attribute_type(self, attribute_name, value=None):
"""
Returns the proper attribute type for a given attribute name
"""
for attr in self.data.get(ATTR_DEFINITIONS):
if attr.get(ATTR_NAME) == attribute_name:
return attr.get(ATTR_TYPE)
if value is not None and isinstance(value, dict):
for key in SHORT_ATTR_TYPES:
if key in value:
return key
attr_names = [attr.get(ATTR_NAME) for attr in self.data.get(ATTR_DEFINITIONS)]
raise ValueError("No attribute {0} in {1}".format(attribute_name, attr_names))
def get_identifier_map(self, hash_key, range_key=None, key=KEY):
"""
Builds the identifier map that is common to several operations
"""
kwargs = {
key: {
self.hash_keyname: {
self.get_attribute_type(self.hash_keyname): hash_key
}
}
}
if range_key is not None:
kwargs[key][self.range_keyname] = {
self.get_attribute_type(self.range_keyname): range_key
}
return kwargs
def get_exclusive_start_key_map(self, exclusive_start_key):
"""
Builds the exclusive start key attribute map
"""
if isinstance(exclusive_start_key, dict) and self.hash_keyname in exclusive_start_key:
# This is useful when paginating results, as the LastEvaluatedKey returned is already
# structured properly
return {
EXCLUSIVE_START_KEY: exclusive_start_key
}
else:
return {
EXCLUSIVE_START_KEY: {
self.hash_keyname: {
self.get_attribute_type(self.hash_keyname): exclusive_start_key
}
}
}
class Connection(object):
"""
A higher level abstraction over botocore
"""
def __init__(self, region=None, host=None, session_cls=None,
request_timeout_seconds=None, max_retry_attempts=None, base_backoff_ms=None):
self._tables = {}
self.host = host
self._local = local()
self._requests_session = None
self._client = None
if region:
self.region = region
else:
self.region = get_settings_value('region')
if session_cls:
self.session_cls = session_cls
else:
self.session_cls = get_settings_value('session_cls')
if request_timeout_seconds is not None:
self._request_timeout_seconds = request_timeout_seconds
else:
self._request_timeout_seconds = get_settings_value('request_timeout_seconds')
if max_retry_attempts is not None:
self._max_retry_attempts_exception = max_retry_attempts
else:
self._max_retry_attempts_exception = get_settings_value('max_retry_attempts')
if base_backoff_ms is not None:
self._base_backoff_ms = base_backoff_ms
else:
self._base_backoff_ms = get_settings_value('base_backoff_ms')
def __repr__(self):
return six.u("Connection<{0}>".format(self.client.meta.endpoint_url))
def _log_debug(self, operation, kwargs):
"""
Sends a debug message to the logger
"""
log.debug("Calling %s with arguments %s", operation, kwargs)
def _log_debug_response(self, operation, response):
"""
Sends a debug message to the logger about a response
"""
log.debug("%s response: %s", operation, response)
def _log_error(self, operation, response):
"""
Sends an error message to the logger
"""
log.error("%s failed with status: %s, message: %s",
operation, response.status_code,response.content)
def _create_prepared_request(self, request_dict, operation_model):
"""
Create a prepared request object from request_dict, and operation_model
"""
boto_prepared_request = self.client._endpoint.create_request(request_dict, operation_model)
# The call requests_session.send(final_prepared_request) ignores the headers which are
# part of the request session. In order to include the requests session headers inside
# the request, we create a new request object, and call prepare_request with the newly
# created request object
raw_request_with_params = Request(
boto_prepared_request.method,
boto_prepared_request.url,
data=boto_prepared_request.body,
headers=boto_prepared_request.headers
)
return self.requests_session.prepare_request(raw_request_with_params)
def dispatch(self, operation_name, operation_kwargs):
"""
Dispatches `operation_name` with arguments `operation_kwargs`
Raises TableDoesNotExist if the specified table does not exist
"""
if operation_name not in [DESCRIBE_TABLE, LIST_TABLES, UPDATE_TABLE, DELETE_TABLE, CREATE_TABLE]:
if RETURN_CONSUMED_CAPACITY not in operation_kwargs:
operation_kwargs.update(self.get_consumed_capacity_map(TOTAL))
self._log_debug(operation_name, operation_kwargs)
table_name = operation_kwargs.get(TABLE_NAME)
req_uuid = uuid.uuid4()
self.send_pre_boto_callback(operation_name, req_uuid, table_name)
data = self._make_api_call(operation_name, operation_kwargs)
self.send_post_boto_callback(operation_name, req_uuid, table_name)
if data and CONSUMED_CAPACITY in data:
capacity = data.get(CONSUMED_CAPACITY)
if isinstance(capacity, dict) and CAPACITY_UNITS in capacity:
capacity = capacity.get(CAPACITY_UNITS)
log.debug("%s %s consumed %s units", data.get(TABLE_NAME, ''), operation_name, capacity)
return data
def send_post_boto_callback(self, operation_name, req_uuid, table_name):
try:
post_dynamodb_send.send(self, operation_name=operation_name, table_name=table_name, req_uuid=req_uuid)
except Exception as e:
log.exception("post_boto callback threw an exception.")
def send_pre_boto_callback(self, operation_name, req_uuid, table_name):
try:
pre_dynamodb_send.send(self, operation_name=operation_name, table_name=table_name, req_uuid=req_uuid)
except Exception as e:
log.exception("pre_boto callback threw an exception.")
def _make_api_call(self, operation_name, operation_kwargs):
"""
This private method is here for two reasons:
1. It's faster to avoid using botocore's response parsing
2. It provides a place to monkey patch requests for unit testing
"""
operation_model = self.client._service_model.operation_model(operation_name)
request_dict = self.client._convert_to_request_dict(
operation_kwargs,
operation_model
)
prepared_request = self._create_prepared_request(request_dict, operation_model)
for i in range(0, self._max_retry_attempts_exception + 1):
attempt_number = i + 1
is_last_attempt_for_exceptions = i == self._max_retry_attempts_exception
try:
response = self.requests_session.send(
prepared_request,
timeout=self._request_timeout_seconds,
proxies=self.client._endpoint.proxies,
)
data = response.json()
except (requests.RequestException, ValueError) as e:
if is_last_attempt_for_exceptions:
log.debug('Reached the maximum number of retry attempts: %s', attempt_number)
raise
else:
# No backoff for fast-fail exceptions that likely failed at the frontend
log.debug(
'Retry needed for (%s) after attempt %s, retryable %s caught: %s',
operation_name,
attempt_number,
e.__class__.__name__,
e
)
continue
if response.status_code >= 300:
# Extract error code from __type
code = data.get('__type', '')
if '#' in code:
code = code.rsplit('#', 1)[1]
botocore_expected_format = {'Error': {'Message': data.get('message', ''), 'Code': code}}
verbose_properties = {
'request_id': response.headers.get('x-amzn-RequestId')
}
if 'RequestItems' in operation_kwargs:
# Batch operations can hit multiple tables, report them comma separated
verbose_properties['table_name'] = ','.join(operation_kwargs['RequestItems'])
else:
verbose_properties['table_name'] = operation_kwargs.get('TableName')
try:
raise VerboseClientError(botocore_expected_format, operation_name, verbose_properties)
except VerboseClientError as e:
if is_last_attempt_for_exceptions:
log.debug('Reached the maximum number of retry attempts: %s', attempt_number)
raise
elif response.status_code < 500 and code != 'ProvisionedThroughputExceededException':
# We don't retry on a ConditionalCheckFailedException or other 4xx (except for
# throughput related errors) because we assume they will fail in perpetuity.
# Retrying when there is already contention could cause other problems
# in part due to unnecessary consumption of throughput.
raise
else:
# We use fully-jittered exponentially-backed-off retries:
# https://www.awsarchitectureblog.com/2015/03/backoff.html
sleep_time_ms = random.randint(0, self._base_backoff_ms * (2 ** i))
log.debug(
'Retry with backoff needed for (%s) after attempt %s,'
'sleeping for %s milliseconds, retryable %s caught: %s',
operation_name,
attempt_number,
sleep_time_ms,
e.__class__.__name__,
e
)
time.sleep(sleep_time_ms / 1000.0)
continue
return self._handle_binary_attributes(data)
@staticmethod
def _handle_binary_attributes(data):
""" Simulate botocore's binary attribute handling """
if ITEM in data:
for attr in six.itervalues(data[ITEM]):
_convert_binary(attr)
if ITEMS in data:
for item in data[ITEMS]:
for attr in six.itervalues(item):
_convert_binary(attr)
if RESPONSES in data:
for item_list in six.itervalues(data[RESPONSES]):
for item in item_list:
for attr in six.itervalues(item):
_convert_binary(attr)
if LAST_EVALUATED_KEY in data:
for attr in six.itervalues(data[LAST_EVALUATED_KEY]):
_convert_binary(attr)
if UNPROCESSED_KEYS in data:
for table_data in six.itervalues(data[UNPROCESSED_KEYS]):
for item in table_data[KEYS]:
for attr in six.itervalues(item):
_convert_binary(attr)
if UNPROCESSED_ITEMS in data:
for table_unprocessed_requests in six.itervalues(data[UNPROCESSED_ITEMS]):
for request in table_unprocessed_requests:
for item_mapping in six.itervalues(request):
for item in six.itervalues(item_mapping):
for attr in six.itervalues(item):
_convert_binary(attr)
if ATTRIBUTES in data:
for attr in six.itervalues(data[ATTRIBUTES]):
_convert_binary(attr)
return data
@property
def session(self):
"""
Returns a valid botocore session
"""
# botocore client creation is not thread safe as of v1.2.5+ (see issue #153)
if getattr(self._local, 'session', None) is None:
self._local.session = get_session()
return self._local.session
@property
def requests_session(self):
"""
Return a requests session to execute prepared requests using the same pool
"""
if self._requests_session is None:
self._requests_session = self.session_cls()
return self._requests_session
@property
def client(self):
"""
Returns a botocore dynamodb client
"""
# botocore has a known issue where it will cache empty credentials
# https://github.com/boto/botocore/blob/4d55c9b4142/botocore/credentials.py#L1016-L1021
# if the client does not have credentials, we create a new client
# otherwise the client is permanently poisoned in the case of metadata service flakiness when using IAM roles
if not self._client or (self._client._request_signer and not self._client._request_signer._credentials):
self._client = self.session.create_client(SERVICE_NAME, self.region, endpoint_url=self.host)
return self._client
def get_meta_table(self, table_name, refresh=False):
"""
Returns a MetaTable
"""
if table_name not in self._tables or refresh:
operation_kwargs = {
TABLE_NAME: table_name
}
try:
data = self.dispatch(DESCRIBE_TABLE, operation_kwargs)
self._tables[table_name] = MetaTable(data.get(TABLE_KEY))
except BotoCoreError as e:
raise TableError("Unable to describe table: {0}".format(e), e)
except ClientError as e:
if 'ResourceNotFound' in e.response['Error']['Code']:
raise TableDoesNotExist(e.response['Error']['Message'])
else:
raise
return self._tables[table_name]
def create_table(self,
table_name,
attribute_definitions=None,
key_schema=None,
read_capacity_units=None,
write_capacity_units=None,
global_secondary_indexes=None,
local_secondary_indexes=None,
stream_specification=None):
"""
Performs the CreateTable operation
"""
operation_kwargs = {
TABLE_NAME: table_name,
PROVISIONED_THROUGHPUT: {
READ_CAPACITY_UNITS: read_capacity_units,
WRITE_CAPACITY_UNITS: write_capacity_units
}
}
attrs_list = []
if attribute_definitions is None:
raise ValueError("attribute_definitions argument is required")
for attr in attribute_definitions:
attrs_list.append({
ATTR_NAME: attr.get(pythonic(ATTR_NAME)),
ATTR_TYPE: attr.get(pythonic(ATTR_TYPE))
})
operation_kwargs[ATTR_DEFINITIONS] = attrs_list
if global_secondary_indexes:
global_secondary_indexes_list = []
for index in global_secondary_indexes:
global_secondary_indexes_list.append({
INDEX_NAME: index.get(pythonic(INDEX_NAME)),
KEY_SCHEMA: sorted(index.get(pythonic(KEY_SCHEMA)), key=lambda x: x.get(KEY_TYPE)),
PROJECTION: index.get(pythonic(PROJECTION)),
PROVISIONED_THROUGHPUT: index.get(pythonic(PROVISIONED_THROUGHPUT))
})
operation_kwargs[GLOBAL_SECONDARY_INDEXES] = global_secondary_indexes_list
if key_schema is None:
raise ValueError("key_schema is required")
key_schema_list = []
for item in key_schema:
key_schema_list.append({
ATTR_NAME: item.get(pythonic(ATTR_NAME)),
KEY_TYPE: str(item.get(pythonic(KEY_TYPE))).upper()
})
operation_kwargs[KEY_SCHEMA] = sorted(key_schema_list, key=lambda x: x.get(KEY_TYPE))
local_secondary_indexes_list = []
if local_secondary_indexes:
for index in local_secondary_indexes:
local_secondary_indexes_list.append({
INDEX_NAME: index.get(pythonic(INDEX_NAME)),
KEY_SCHEMA: sorted(index.get(pythonic(KEY_SCHEMA)), key=lambda x: x.get(KEY_TYPE)),
PROJECTION: index.get(pythonic(PROJECTION)),
})
operation_kwargs[LOCAL_SECONDARY_INDEXES] = local_secondary_indexes_list
if stream_specification:
operation_kwargs[STREAM_SPECIFICATION] = {
STREAM_ENABLED: stream_specification[pythonic(STREAM_ENABLED)],
STREAM_VIEW_TYPE: stream_specification[pythonic(STREAM_VIEW_TYPE)]
}
try:
data = self.dispatch(CREATE_TABLE, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise TableError("Failed to create table: {0}".format(e), e)
return data
def delete_table(self, table_name):
"""
Performs the DeleteTable operation
"""
operation_kwargs = {
TABLE_NAME: table_name
}
try:
data = self.dispatch(DELETE_TABLE, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise TableError("Failed to delete table: {0}".format(e), e)
return data
def update_table(self,
table_name,
read_capacity_units=None,
write_capacity_units=None,
global_secondary_index_updates=None):
"""
Performs the UpdateTable operation
"""
operation_kwargs = {
TABLE_NAME: table_name
}
if read_capacity_units and not write_capacity_units or write_capacity_units and not read_capacity_units:
raise ValueError("read_capacity_units and write_capacity_units are required together")
if read_capacity_units and write_capacity_units:
operation_kwargs[PROVISIONED_THROUGHPUT] = {
READ_CAPACITY_UNITS: read_capacity_units,
WRITE_CAPACITY_UNITS: write_capacity_units
}
if global_secondary_index_updates:
global_secondary_indexes_list = []
for index in global_secondary_index_updates:
global_secondary_indexes_list.append({
UPDATE: {
INDEX_NAME: index.get(pythonic(INDEX_NAME)),
PROVISIONED_THROUGHPUT: {
READ_CAPACITY_UNITS: index.get(pythonic(READ_CAPACITY_UNITS)),
WRITE_CAPACITY_UNITS: index.get(pythonic(WRITE_CAPACITY_UNITS))
}
}
})
operation_kwargs[GLOBAL_SECONDARY_INDEX_UPDATES] = global_secondary_indexes_list
try:
return self.dispatch(UPDATE_TABLE, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise TableError("Failed to update table: {0}".format(e), e)
def list_tables(self, exclusive_start_table_name=None, limit=None):
"""
Performs the ListTables operation
"""
operation_kwargs = {}
if exclusive_start_table_name:
operation_kwargs.update({
EXCLUSIVE_START_TABLE_NAME: exclusive_start_table_name
})
if limit is not None:
operation_kwargs.update({
LIMIT: limit
})
try:
return self.dispatch(LIST_TABLES, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise TableError("Unable to list tables: {0}".format(e), e)
def describe_table(self, table_name):
"""
Performs the DescribeTable operation
"""
try:
tbl = self.get_meta_table(table_name, refresh=True)
if tbl:
return tbl.data
except ValueError:
pass
raise TableDoesNotExist(table_name)
def get_conditional_operator(self, operator):
"""
Returns a dictionary containing the correct conditional operator,
validating it first.
"""
operator = operator.upper()
if operator not in CONDITIONAL_OPERATORS:
raise ValueError(
"The {0} must be one of {1}".format(
CONDITIONAL_OPERATOR,
CONDITIONAL_OPERATORS
)
)
return {
CONDITIONAL_OPERATOR: operator
}
def get_item_attribute_map(self, table_name, attributes, item_key=ITEM, pythonic_key=True):
"""
Builds up a dynamodb compatible AttributeValue map
"""
tbl = self.get_meta_table(table_name)
if tbl is None:
raise TableError("No such table {0}".format(table_name))
return tbl.get_item_attribute_map(
attributes,
item_key=item_key,
pythonic_key=pythonic_key)
def get_expected_map(self, table_name, expected):
"""
Builds the expected map that is common to several operations
"""
kwargs = {EXPECTED: {}}
for key, condition in expected.items():
if EXISTS in condition:
kwargs[EXPECTED][key] = {
EXISTS: condition.get(EXISTS)
}
elif VALUE in condition:
kwargs[EXPECTED][key] = {
VALUE: {
self.get_attribute_type(table_name, key): condition.get(VALUE)
}
}
elif COMPARISON_OPERATOR in condition:
kwargs[EXPECTED][key] = {
COMPARISON_OPERATOR: condition.get(COMPARISON_OPERATOR),
}
values = []
for value in condition.get(ATTR_VALUE_LIST, []):
attr_type = self.get_attribute_type(table_name, key, value)
values.append({attr_type: self.parse_attribute(value)})
if condition.get(COMPARISON_OPERATOR) not in [NULL, NOT_NULL]:
kwargs[EXPECTED][key][ATTR_VALUE_LIST] = values
return kwargs
def parse_attribute(self, attribute, return_type=False):
"""
Returns the attribute value, where the attribute can be
a raw attribute value, or a dictionary containing the type:
{'S': 'String value'}
"""
if isinstance(attribute, dict):
for key in SHORT_ATTR_TYPES:
if key in attribute:
if return_type:
return key, attribute.get(key)
return attribute.get(key)
raise ValueError("Invalid attribute supplied: {0}".format(attribute))
else:
if return_type:
return None, attribute
return attribute
def get_attribute_type(self, table_name, attribute_name, value=None):
"""
Returns the proper attribute type for a given attribute name
:param value: The attribute value an be supplied just in case the type is already included
"""
tbl = self.get_meta_table(table_name)
if tbl is None:
raise TableError("No such table {0}".format(table_name))
return tbl.get_attribute_type(attribute_name, value=value)
def get_identifier_map(self, table_name, hash_key, range_key=None, key=KEY):
"""
Builds the identifier map that is common to several operations
"""
tbl = self.get_meta_table(table_name)
if tbl is None:
raise TableError("No such table {0}".format(table_name))
return tbl.get_identifier_map(hash_key, range_key=range_key, key=key)
def get_query_filter_map(self, table_name, query_filters):
"""
Builds the QueryFilter object needed for the Query operation
"""
kwargs = {
QUERY_FILTER: {}
}
for key, condition in query_filters.items():
operator = condition.get(COMPARISON_OPERATOR)
if operator not in QUERY_FILTER_VALUES:
raise ValueError("{0} must be one of {1}".format(COMPARISON_OPERATOR, QUERY_FILTER_VALUES))
attr_value_list = []
for value in condition.get(ATTR_VALUE_LIST, []):
attr_value_list.append({
self.get_attribute_type(table_name, key, value): self.parse_attribute(value)
})
kwargs[QUERY_FILTER][key] = {
COMPARISON_OPERATOR: operator
}
if len(attr_value_list):
kwargs[QUERY_FILTER][key][ATTR_VALUE_LIST] = attr_value_list
return kwargs
def get_consumed_capacity_map(self, return_consumed_capacity):
"""
Builds the consumed capacity map that is common to several operations
"""
if return_consumed_capacity.upper() not in RETURN_CONSUMED_CAPACITY_VALUES:
raise ValueError("{0} must be one of {1}".format(RETURN_ITEM_COLL_METRICS, RETURN_CONSUMED_CAPACITY_VALUES))
return {
RETURN_CONSUMED_CAPACITY: str(return_consumed_capacity).upper()
}
def get_return_values_map(self, return_values):
"""
Builds the return values map that is common to several operations
"""
if return_values.upper() not in RETURN_VALUES_VALUES:
raise ValueError("{0} must be one of {1}".format(RETURN_VALUES, RETURN_VALUES_VALUES))
return {
RETURN_VALUES: str(return_values).upper()
}
def get_item_collection_map(self, return_item_collection_metrics):
"""
Builds the item collection map
"""
if return_item_collection_metrics.upper() not in RETURN_ITEM_COLL_METRICS_VALUES:
raise ValueError("{0} must be one of {1}".format(RETURN_ITEM_COLL_METRICS, RETURN_ITEM_COLL_METRICS_VALUES))
return {
RETURN_ITEM_COLL_METRICS: str(return_item_collection_metrics).upper()
}
def get_exclusive_start_key_map(self, table_name, exclusive_start_key):
"""
Builds the exclusive start key attribute map
"""
tbl = self.get_meta_table(table_name)
if tbl is None:
raise TableError("No such table {0}".format(table_name))
return tbl.get_exclusive_start_key_map(exclusive_start_key)
def delete_item(self,
table_name,
hash_key,
range_key=None,
condition=None,
expected=None,
conditional_operator=None,
return_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None):
"""
Performs the DeleteItem operation and returns the result
"""
self._check_condition('condition', condition, expected, conditional_operator)
operation_kwargs = {TABLE_NAME: table_name}
operation_kwargs.update(self.get_identifier_map(table_name, hash_key, range_key))
name_placeholders = {}
expression_attribute_values = {}
if condition is not None:
condition_expression = condition.serialize(name_placeholders, expression_attribute_values)
operation_kwargs[CONDITION_EXPRESSION] = condition_expression
if return_values:
operation_kwargs.update(self.get_return_values_map(return_values))
if return_consumed_capacity:
operation_kwargs.update(self.get_consumed_capacity_map(return_consumed_capacity))
if return_item_collection_metrics:
operation_kwargs.update(self.get_item_collection_map(return_item_collection_metrics))
# We read the conditional operator even without expected passed in to maintain existing behavior.
conditional_operator = self.get_conditional_operator(conditional_operator or AND)
if expected:
condition_expression = self._get_condition_expression(
table_name, expected, conditional_operator, name_placeholders, expression_attribute_values)
operation_kwargs[CONDITION_EXPRESSION] = condition_expression
if name_placeholders:
operation_kwargs[EXPRESSION_ATTRIBUTE_NAMES] = self._reverse_dict(name_placeholders)
if expression_attribute_values:
operation_kwargs[EXPRESSION_ATTRIBUTE_VALUES] = expression_attribute_values
try:
return self.dispatch(DELETE_ITEM, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise DeleteError("Failed to delete item: {0}".format(e), e)
def update_item(self,
table_name,
hash_key,
range_key=None,
actions=None,
attribute_updates=None,
condition=None,
expected=None,
return_consumed_capacity=None,
conditional_operator=None,
return_item_collection_metrics=None,
return_values=None):
"""
Performs the UpdateItem operation
"""
self._check_actions(actions, attribute_updates)
self._check_condition('condition', condition, expected, conditional_operator)
operation_kwargs = {TABLE_NAME: table_name}
operation_kwargs.update(self.get_identifier_map(table_name, hash_key, range_key))
name_placeholders = {}
expression_attribute_values = {}
if condition is not None:
condition_expression = condition.serialize(name_placeholders, expression_attribute_values)
operation_kwargs[CONDITION_EXPRESSION] = condition_expression
if return_consumed_capacity:
operation_kwargs.update(self.get_consumed_capacity_map(return_consumed_capacity))
if return_item_collection_metrics:
operation_kwargs.update(self.get_item_collection_map(return_item_collection_metrics))
if return_values:
operation_kwargs.update(self.get_return_values_map(return_values))
if not actions and not attribute_updates:
raise ValueError("{0} cannot be empty".format(ATTR_UPDATES))
actions = actions or []
attribute_updates = attribute_updates or {}
update_expression = Update(*actions)
# We sort the keys here for determinism. This is mostly done to simplify testing.
for key in sorted(attribute_updates.keys()):
path = Path([key])
update = attribute_updates[key]
action = update.get(ACTION)
if action not in ATTR_UPDATE_ACTIONS:
raise ValueError("{0} must be one of {1}".format(ACTION, ATTR_UPDATE_ACTIONS))
value = update.get(VALUE)
attr_type, value = self.parse_attribute(value, return_type=True)
if attr_type is None and action != DELETE:
attr_type = self.get_attribute_type(table_name, key, value)
value = {attr_type: value}
if action == DELETE:
action = path.remove() if attr_type is None else path.delete(value)
elif action == PUT:
action = path.set(value)
else:
action = path.add(value)
update_expression.add_action(action)
operation_kwargs[UPDATE_EXPRESSION] = update_expression.serialize(name_placeholders, expression_attribute_values)
# We read the conditional operator even without expected passed in to maintain existing behavior.
conditional_operator = self.get_conditional_operator(conditional_operator or AND)
if expected:
condition_expression = self._get_condition_expression(
table_name, expected, conditional_operator, name_placeholders, expression_attribute_values)
operation_kwargs[CONDITION_EXPRESSION] = condition_expression
if name_placeholders:
operation_kwargs[EXPRESSION_ATTRIBUTE_NAMES] = self._reverse_dict(name_placeholders)
if expression_attribute_values:
operation_kwargs[EXPRESSION_ATTRIBUTE_VALUES] = expression_attribute_values
try:
return self.dispatch(UPDATE_ITEM, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise UpdateError("Failed to update item: {0}".format(e), e)
def put_item(self,
table_name,
hash_key,
range_key=None,
attributes=None,
condition=None,
expected=None,
conditional_operator=None,
return_values=None,
return_consumed_capacity=None,
return_item_collection_metrics=None):
"""
Performs the PutItem operation and returns the result
"""
self._check_condition('condition', condition, expected, conditional_operator)
operation_kwargs = {TABLE_NAME: table_name}
operation_kwargs.update(self.get_identifier_map(table_name, hash_key, range_key, key=ITEM))
name_placeholders = {}
expression_attribute_values = {}
if attributes:
attrs = self.get_item_attribute_map(table_name, attributes)
operation_kwargs[ITEM].update(attrs[ITEM])
if condition is not None:
condition_expression = condition.serialize(name_placeholders, expression_attribute_values)
operation_kwargs[CONDITION_EXPRESSION] = condition_expression
if return_consumed_capacity:
operation_kwargs.update(self.get_consumed_capacity_map(return_consumed_capacity))
if return_item_collection_metrics:
operation_kwargs.update(self.get_item_collection_map(return_item_collection_metrics))
if return_values:
operation_kwargs.update(self.get_return_values_map(return_values))
# We read the conditional operator even without expected passed in to maintain existing behavior.
conditional_operator = self.get_conditional_operator(conditional_operator or AND)
if expected:
condition_expression = self._get_condition_expression(
table_name, expected, conditional_operator, name_placeholders, expression_attribute_values)
operation_kwargs[CONDITION_EXPRESSION] = condition_expression
if name_placeholders:
operation_kwargs[EXPRESSION_ATTRIBUTE_NAMES] = self._reverse_dict(name_placeholders)
if expression_attribute_values:
operation_kwargs[EXPRESSION_ATTRIBUTE_VALUES] = expression_attribute_values
try:
return self.dispatch(PUT_ITEM, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise PutError("Failed to put item: {0}".format(e), e)
def batch_write_item(self,
table_name,
put_items=None,
delete_items=None,
return_consumed_capacity=None,
return_item_collection_metrics=None):
"""
Performs the batch_write_item operation
"""
if put_items is None and delete_items is None:
raise ValueError("Either put_items or delete_items must be specified")
operation_kwargs = {
REQUEST_ITEMS: {
table_name: []
}
}
if return_consumed_capacity:
operation_kwargs.update(self.get_consumed_capacity_map(return_consumed_capacity))
if return_item_collection_metrics:
operation_kwargs.update(self.get_item_collection_map(return_item_collection_metrics))
put_items_list = []
if put_items:
for item in put_items:
put_items_list.append({
PUT_REQUEST: self.get_item_attribute_map(table_name, item, pythonic_key=False)
})
delete_items_list = []
if delete_items:
for item in delete_items:
delete_items_list.append({
DELETE_REQUEST: self.get_item_attribute_map(table_name, item, item_key=KEY, pythonic_key=False)
})
operation_kwargs[REQUEST_ITEMS][table_name] = delete_items_list + put_items_list
try:
return self.dispatch(BATCH_WRITE_ITEM, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise PutError("Failed to batch write items: {0}".format(e), e)
def batch_get_item(self,
table_name,
keys,
consistent_read=None,
return_consumed_capacity=None,
attributes_to_get=None):
"""
Performs the batch get item operation
"""
operation_kwargs = {
REQUEST_ITEMS: {
table_name: {}
}
}
args_map = {}
name_placeholders = {}
if consistent_read:
args_map[CONSISTENT_READ] = consistent_read
if return_consumed_capacity:
operation_kwargs.update(self.get_consumed_capacity_map(return_consumed_capacity))
if attributes_to_get is not None:
projection_expression = create_projection_expression(attributes_to_get, name_placeholders)
args_map[PROJECTION_EXPRESSION] = projection_expression
if name_placeholders:
args_map[EXPRESSION_ATTRIBUTE_NAMES] = self._reverse_dict(name_placeholders)
operation_kwargs[REQUEST_ITEMS][table_name].update(args_map)
keys_map = {KEYS: []}
for key in keys:
keys_map[KEYS].append(
self.get_item_attribute_map(table_name, key)[ITEM]
)
operation_kwargs[REQUEST_ITEMS][table_name].update(keys_map)
try:
return self.dispatch(BATCH_GET_ITEM, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise GetError("Failed to batch get items: {0}".format(e), e)
def get_item(self,
table_name,
hash_key,
range_key=None,
consistent_read=False,
attributes_to_get=None):
"""
Performs the GetItem operation and returns the result
"""
operation_kwargs = {}
name_placeholders = {}
if attributes_to_get is not None:
projection_expression = create_projection_expression(attributes_to_get, name_placeholders)
operation_kwargs[PROJECTION_EXPRESSION] = projection_expression
if name_placeholders:
operation_kwargs[EXPRESSION_ATTRIBUTE_NAMES] = self._reverse_dict(name_placeholders)
operation_kwargs[CONSISTENT_READ] = consistent_read
operation_kwargs[TABLE_NAME] = table_name
operation_kwargs.update(self.get_identifier_map(table_name, hash_key, range_key))
try:
return self.dispatch(GET_ITEM, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise GetError("Failed to get item: {0}".format(e), e)
def rate_limited_scan(self,
table_name,
filter_condition=None,
attributes_to_get=None,
page_size=None,
limit=None,
conditional_operator=None,
scan_filter=None,
exclusive_start_key=None,
segment=None,
total_segments=None,
timeout_seconds=None,
read_capacity_to_consume_per_second=10,
allow_rate_limited_scan_without_consumed_capacity=None,
max_sleep_between_retry=10,
max_consecutive_exceptions=10,
consistent_read=None,
index_name=None):
"""
Performs a rate limited scan on the table. The API uses the scan API to fetch items from
DynamoDB. The rate_limited_scan uses the 'ConsumedCapacity' value returned from DynamoDB to
limit the rate of the scan. 'ProvisionedThroughputExceededException' is also handled and retried.
:param table_name: Name of the table to perform scan on.
:param filter_condition: Condition used to restrict the scan results
:param attributes_to_get: A list of attributes to return.
:param page_size: Page size of the scan to DynamoDB
:param limit: Used to limit the number of results returned
:param conditional_operator:
:param scan_filter: A map indicating the condition that evaluates the scan results
:param exclusive_start_key: If set, provides the starting point for scan.
:param segment: If set, then scans the segment
:param total_segments: If set, then specifies total segments
:param timeout_seconds: Timeout value for the rate_limited_scan method, to prevent it from running
infinitely
:param read_capacity_to_consume_per_second: Amount of read capacity to consume
every second
:param allow_rate_limited_scan_without_consumed_capacity: If set, proceeds without rate limiting if
the server does not support returning consumed capacity in responses.
:param max_sleep_between_retry: Max value for sleep in seconds in between scans during
throttling/rate limit scenarios
:param max_consecutive_exceptions: Max number of consecutive ProvisionedThroughputExceededException
exception for scan to exit
:param consistent_read: enable consistent read
:param index_name: an index to perform the scan on
"""
read_capacity_to_consume_per_ms = float(read_capacity_to_consume_per_second) / 1000
if allow_rate_limited_scan_without_consumed_capacity is None:
allow_rate_limited_scan_without_consumed_capacity = get_settings_value(
'allow_rate_limited_scan_without_consumed_capacity'
)
total_consumed_read_capacity = 0.0
last_evaluated_key = exclusive_start_key
rate_available = True
latest_scan_consumed_capacity = 0
consecutive_provision_throughput_exceeded_ex = 0
start_time = time.time()
if page_size is None:
if limit and read_capacity_to_consume_per_second > limit:
page_size = limit
else:
page_size = read_capacity_to_consume_per_second
while True:
if rate_available:
try:
data = self.scan(
table_name,
filter_condition=filter_condition,
attributes_to_get=attributes_to_get,
exclusive_start_key=last_evaluated_key,
limit=page_size,
conditional_operator=conditional_operator,
return_consumed_capacity=TOTAL,
scan_filter=scan_filter,
segment=segment,
total_segments=total_segments,
consistent_read=consistent_read,
index_name=index_name
)
for item in data.get(ITEMS):
yield item
if limit is not None:
limit -= 1
if not limit:
return
if CONSUMED_CAPACITY in data:
latest_scan_consumed_capacity = data.get(CONSUMED_CAPACITY).get(CAPACITY_UNITS)
else:
if allow_rate_limited_scan_without_consumed_capacity:
latest_scan_consumed_capacity = 0
else:
raise ScanError('Rate limited scan not possible because the server did not send back'
'consumed capacity information. If you wish scans to complete anyway'
'without functioning rate limiting, set '
'allow_rate_limited_scan_without_consumed_capacity to True in settings.')
last_evaluated_key = data.get(LAST_EVALUATED_KEY, None)
consecutive_provision_throughput_exceeded_ex = 0
except ScanError as e:
# Only retry if provision throughput is exceeded.
if isinstance(e.cause, ClientError):
code = e.cause.response['Error'].get('Code')
if code == "ProvisionedThroughputExceededException":
consecutive_provision_throughput_exceeded_ex += 1
if consecutive_provision_throughput_exceeded_ex > max_consecutive_exceptions:
# Max threshold reached
raise
else:
# Different exception, other than ProvisionedThroughputExceededException
raise
else:
# Not a Client error
raise
# No throttling, and no more scans needed. Just return
if not last_evaluated_key and consecutive_provision_throughput_exceeded_ex == 0:
return
current_time = time.time()
# elapsed_time_ms indicates the time taken in ms from the start of the
# throttled_scan call.
elapsed_time_ms = max(1, round((current_time - start_time) * 1000))
if consecutive_provision_throughput_exceeded_ex == 0:
total_consumed_read_capacity += latest_scan_consumed_capacity
consumed_rate = total_consumed_read_capacity / elapsed_time_ms
rate_available = (read_capacity_to_consume_per_ms - consumed_rate) >= 0
# consecutive_provision_throughput_exceeded_ex > 0 indicates ProvisionedThroughputExceededException occurred.
# ProvisionedThroughputExceededException can occur if:
# - The rate to consume is passed incorrectly.
# - External factors, even if the current scan is within limits.
if not rate_available or (consecutive_provision_throughput_exceeded_ex > 0):
# Minimum value is 1 second.
elapsed_time_s = math.ceil(elapsed_time_ms / 1000)
# Sleep proportional to the ratio of --consumed capacity-- to --capacity to consume--
time_to_sleep = max(1, round((total_consumed_read_capacity/ elapsed_time_s) \
/ read_capacity_to_consume_per_second))
# At any moment if the timeout_seconds hits, then return
if timeout_seconds and (elapsed_time_s + time_to_sleep) > timeout_seconds:
raise ScanError("Input timeout value {0} has expired".format(timeout_seconds))
time.sleep(min(math.ceil(time_to_sleep), max_sleep_between_retry))
# Reset the latest_scan_consumed_capacity, as no scan operation was performed.
latest_scan_consumed_capacity = 0
def scan(self,
table_name,
filter_condition=None,
attributes_to_get=None,
limit=None,
conditional_operator=None,
scan_filter=None,
return_consumed_capacity=None,
exclusive_start_key=None,
segment=None,
total_segments=None,
consistent_read=None,
index_name=None):
"""
Performs the scan operation
"""
self._check_condition('filter_condition', filter_condition, scan_filter, conditional_operator)
operation_kwargs = {TABLE_NAME: table_name}
name_placeholders = {}
expression_attribute_values = {}
if filter_condition is not None:
filter_expression = filter_condition.serialize(name_placeholders, expression_attribute_values)
operation_kwargs[FILTER_EXPRESSION] = filter_expression
if attributes_to_get is not None:
projection_expression = create_projection_expression(attributes_to_get, name_placeholders)
operation_kwargs[PROJECTION_EXPRESSION] = projection_expression
if index_name:
operation_kwargs[INDEX_NAME] = index_name
if limit is not None:
operation_kwargs[LIMIT] = limit
if return_consumed_capacity:
operation_kwargs.update(self.get_consumed_capacity_map(return_consumed_capacity))
if exclusive_start_key:
operation_kwargs.update(self.get_exclusive_start_key_map(table_name, exclusive_start_key))
if segment is not None:
operation_kwargs[SEGMENT] = segment
if total_segments:
operation_kwargs[TOTAL_SEGMENTS] = total_segments
if scan_filter:
conditional_operator = self.get_conditional_operator(conditional_operator or AND)
filter_expression = self._get_filter_expression(
table_name, scan_filter, conditional_operator, name_placeholders, expression_attribute_values)
operation_kwargs[FILTER_EXPRESSION] = filter_expression
if consistent_read:
operation_kwargs[CONSISTENT_READ] = consistent_read
if name_placeholders:
operation_kwargs[EXPRESSION_ATTRIBUTE_NAMES] = self._reverse_dict(name_placeholders)
if expression_attribute_values:
operation_kwargs[EXPRESSION_ATTRIBUTE_VALUES] = expression_attribute_values
try:
return self.dispatch(SCAN, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise ScanError("Failed to scan table: {0}".format(e), e)
def query(self,
table_name,
hash_key,
range_key_condition=None,
filter_condition=None,
attributes_to_get=None,
consistent_read=False,
exclusive_start_key=None,
index_name=None,
key_conditions=None,
query_filters=None,
conditional_operator=None,
limit=None,
return_consumed_capacity=None,
scan_index_forward=None,
select=None):
"""
Performs the Query operation and returns the result
"""
self._check_condition('range_key_condition', range_key_condition, key_conditions, conditional_operator)
self._check_condition('filter_condition', filter_condition, query_filters, conditional_operator)
operation_kwargs = {TABLE_NAME: table_name}
name_placeholders = {}
expression_attribute_values = {}
tbl = self.get_meta_table(table_name)
if tbl is None:
raise TableError("No such table: {0}".format(table_name))
if index_name:
hash_keyname = tbl.get_index_hash_keyname(index_name)
if not hash_keyname:
raise ValueError("No hash key attribute for index: {0}".format(index_name))
range_keyname = tbl.get_index_range_keyname(index_name)
else:
hash_keyname = tbl.hash_keyname
range_keyname = tbl.range_keyname
key_condition = self._get_condition(table_name, hash_keyname, '__eq__', hash_key)
if range_key_condition is not None:
if range_key_condition.is_valid_range_key_condition(range_keyname):
key_condition = key_condition & range_key_condition
elif filter_condition is None:
# Try to gracefully handle the case where a user passed in a filter as a range key condition
(filter_condition, range_key_condition) = (range_key_condition, None)
else:
raise ValueError("{0} is not a valid range key condition".format(range_key_condition))
if key_conditions is None or len(key_conditions) == 0:
pass # No comparisons on sort key
elif len(key_conditions) > 1:
raise ValueError("Multiple attributes are not supported in key_conditions: {0}".format(key_conditions))
else:
(key, condition), = key_conditions.items()
operator = condition.get(COMPARISON_OPERATOR)
if operator not in COMPARISON_OPERATOR_VALUES:
raise ValueError("{0} must be one of {1}".format(COMPARISON_OPERATOR, COMPARISON_OPERATOR_VALUES))
operator = KEY_CONDITION_OPERATOR_MAP[operator]
values = condition.get(ATTR_VALUE_LIST)
sort_key_expression = self._get_condition(table_name, key, operator, *values)
key_condition = key_condition & sort_key_expression
operation_kwargs[KEY_CONDITION_EXPRESSION] = key_condition.serialize(
name_placeholders, expression_attribute_values)
if filter_condition is not None:
filter_expression = filter_condition.serialize(name_placeholders, expression_attribute_values)
# FilterExpression does not allow key attributes. Check for hash and range key name placeholders
hash_key_placeholder = name_placeholders.get(hash_keyname)
range_key_placeholder = range_keyname and name_placeholders.get(range_keyname)
if (
hash_key_placeholder in filter_expression or
(range_key_placeholder and range_key_placeholder in filter_expression)
):
raise ValueError("'filter_condition' cannot contain key attributes")
operation_kwargs[FILTER_EXPRESSION] = filter_expression
if attributes_to_get:
projection_expression = create_projection_expression(attributes_to_get, name_placeholders)
operation_kwargs[PROJECTION_EXPRESSION] = projection_expression
if consistent_read:
operation_kwargs[CONSISTENT_READ] = True
if exclusive_start_key:
operation_kwargs.update(self.get_exclusive_start_key_map(table_name, exclusive_start_key))
if index_name:
operation_kwargs[INDEX_NAME] = index_name
if limit is not None:
operation_kwargs[LIMIT] = limit
if return_consumed_capacity:
operation_kwargs.update(self.get_consumed_capacity_map(return_consumed_capacity))
# We read the conditional operator even without a query filter passed in to maintain existing behavior.
conditional_operator = self.get_conditional_operator(conditional_operator or AND)
if query_filters:
filter_expression = self._get_filter_expression(
table_name, query_filters, conditional_operator, name_placeholders, expression_attribute_values)
operation_kwargs[FILTER_EXPRESSION] = filter_expression
if select:
if select.upper() not in SELECT_VALUES:
raise ValueError("{0} must be one of {1}".format(SELECT, SELECT_VALUES))
operation_kwargs[SELECT] = str(select).upper()
if scan_index_forward is not None:
operation_kwargs[SCAN_INDEX_FORWARD] = scan_index_forward
if name_placeholders:
operation_kwargs[EXPRESSION_ATTRIBUTE_NAMES] = self._reverse_dict(name_placeholders)
if expression_attribute_values:
operation_kwargs[EXPRESSION_ATTRIBUTE_VALUES] = expression_attribute_values
try:
return self.dispatch(QUERY, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise QueryError("Failed to query items: {0}".format(e), e)
def _get_condition_expression(self, table_name, expected, conditional_operator,
name_placeholders, expression_attribute_values):
"""
Builds the ConditionExpression needed for DeleteItem, PutItem, and UpdateItem operations
"""
condition_expression = None
conditional_operator = conditional_operator[CONDITIONAL_OPERATOR]
# We sort the keys here for determinism. This is mostly done to simplify testing.
for key in sorted(expected.keys()):
condition = expected[key]
if EXISTS in condition:
operator = NOT_NULL if condition.get(EXISTS, True) else NULL
values = []
elif VALUE in condition:
operator = EQ
values = [condition.get(VALUE)]
else:
operator = condition.get(COMPARISON_OPERATOR)
values = condition.get(ATTR_VALUE_LIST, [])
if operator not in QUERY_FILTER_VALUES:
raise ValueError("{0} must be one of {1}".format(COMPARISON_OPERATOR, QUERY_FILTER_VALUES))
not_contains = operator == NOT_CONTAINS
operator = FILTER_EXPRESSION_OPERATOR_MAP[operator]
condition = self._get_condition(table_name, key, operator, *values)
if not_contains:
condition = ~condition
if condition_expression is None:
condition_expression = condition
elif conditional_operator == AND:
condition_expression = condition_expression & condition
else:
condition_expression = condition_expression | condition
return condition_expression.serialize(name_placeholders, expression_attribute_values)
def _get_filter_expression(self, table_name, filters, conditional_operator,
name_placeholders, expression_attribute_values):
"""
Builds the FilterExpression needed for Query and Scan operations
"""
condition_expression = None
conditional_operator = conditional_operator[CONDITIONAL_OPERATOR]
# We sort the keys here for determinism. This is mostly done to simplify testing.
for key in sorted(filters.keys()):
condition = filters[key]
operator = condition.get(COMPARISON_OPERATOR)
if operator not in QUERY_FILTER_VALUES:
raise ValueError("{0} must be one of {1}".format(COMPARISON_OPERATOR, QUERY_FILTER_VALUES))
not_contains = operator == NOT_CONTAINS
operator = FILTER_EXPRESSION_OPERATOR_MAP[operator]
values = condition.get(ATTR_VALUE_LIST, [])
condition = self._get_condition(table_name, key, operator, *values)
if not_contains:
condition = ~condition
if condition_expression is None:
condition_expression = condition
elif conditional_operator == AND:
condition_expression = condition_expression & condition
else:
condition_expression = condition_expression | condition
return condition_expression.serialize(name_placeholders, expression_attribute_values)
def _get_condition(self, table_name, attribute_name, operator, *values):
values = [
{self.get_attribute_type(table_name, attribute_name, value): self.parse_attribute(value)}
for value in values
]
return getattr(Path([attribute_name]), operator)(*values)
def _check_actions(self, actions, attribute_updates):
if actions is not None:
if attribute_updates is not None:
raise ValueError("Legacy attribute updates cannot be used with update actions")
else:
if attribute_updates is not None:
warnings.warn("Legacy attribute updates are deprecated in favor of update actions")
def _check_condition(self, name, condition, expected_or_filter, conditional_operator):
if condition is not None:
if not isinstance(condition, Condition):
raise ValueError("'{0}' must be an instance of Condition".format(name))
if expected_or_filter or conditional_operator is not None:
raise ValueError("Legacy conditional parameters cannot be used with condition expressions")
else:
if expected_or_filter or conditional_operator is not None:
warnings.warn("Legacy conditional parameters are deprecated in favor of condition expressions")
@staticmethod
def _reverse_dict(d):
return dict((v, k) for k, v in six.iteritems(d))
def _convert_binary(attr):
if BINARY_SHORT in attr:
attr[BINARY_SHORT] = b64decode(attr[BINARY_SHORT].encode(DEFAULT_ENCODING))
elif BINARY_SET_SHORT in attr:
value = attr[BINARY_SET_SHORT]
if value and len(value):
attr[BINARY_SET_SHORT] = set(b64decode(v.encode(DEFAULT_ENCODING)) for v in value)
| pynamodb/connection/base.py | 68,593 | A higher level abstraction over botocore
A pythonic wrapper around table metadata
Create a prepared request object from request_dict, and operation_model
Builds the ConditionExpression needed for DeleteItem, PutItem, and UpdateItem operations
Builds the FilterExpression needed for Query and Scan operations
Simulate botocore's binary attribute handling
Sends a debug message to the logger
Sends a debug message to the logger about a response
Sends an error message to the logger
This private method is here for two reasons:
1. It's faster to avoid using botocore's response parsing
2. It provides a place to monkey patch requests for unit testing
Performs the batch get item operation
Performs the batch_write_item operation
Returns a botocore dynamodb client
Performs the CreateTable operation
Performs the DeleteItem operation and returns the result
Performs the DeleteTable operation
Performs the DescribeTable operation
Dispatches `operation_name` with arguments `operation_kwargs`
Raises TableDoesNotExist if the specified table does not exist
Returns the proper attribute type for a given attribute name
Returns the proper attribute type for a given attribute name
:param value: The attribute value an be supplied just in case the type is already included
Returns a dictionary containing the correct conditional operator,
validating it first.
Builds the consumed capacity map that is common to several operations
Builds the exclusive start key attribute map
Builds the exclusive start key attribute map
Builds the expected map that is common to several operations
Builds the identifier map that is common to several operations
Builds the identifier map that is common to several operations
Returns the name of the hash key for a given index
Returns the name of the hash key for a given index
Performs the GetItem operation and returns the result
Builds up a dynamodb compatible AttributeValue map
Builds up a dynamodb compatible AttributeValue map
Builds the item collection map
Returns the names of the primary key attributes and index key attributes (if index_name is specified)
Returns a MetaTable
Builds the QueryFilter object needed for the Query operation
Builds the return values map that is common to several operations
Returns the name of this table's hash key
Performs the ListTables operation
Returns the attribute value, where the attribute can be
a raw attribute value, or a dictionary containing the type:
{'S': 'String value'}
Performs the PutItem operation and returns the result
Performs the Query operation and returns the result
Returns the name of this table's range key
Performs a rate limited scan on the table. The API uses the scan API to fetch items from
DynamoDB. The rate_limited_scan uses the 'ConsumedCapacity' value returned from DynamoDB to
limit the rate of the scan. 'ProvisionedThroughputExceededException' is also handled and retried.
:param table_name: Name of the table to perform scan on.
:param filter_condition: Condition used to restrict the scan results
:param attributes_to_get: A list of attributes to return.
:param page_size: Page size of the scan to DynamoDB
:param limit: Used to limit the number of results returned
:param conditional_operator:
:param scan_filter: A map indicating the condition that evaluates the scan results
:param exclusive_start_key: If set, provides the starting point for scan.
:param segment: If set, then scans the segment
:param total_segments: If set, then specifies total segments
:param timeout_seconds: Timeout value for the rate_limited_scan method, to prevent it from running
infinitely
:param read_capacity_to_consume_per_second: Amount of read capacity to consume
every second
:param allow_rate_limited_scan_without_consumed_capacity: If set, proceeds without rate limiting if
the server does not support returning consumed capacity in responses.
:param max_sleep_between_retry: Max value for sleep in seconds in between scans during
throttling/rate limit scenarios
:param max_consecutive_exceptions: Max number of consecutive ProvisionedThroughputExceededException
exception for scan to exit
:param consistent_read: enable consistent read
:param index_name: an index to perform the scan on
Return a requests session to execute prepared requests using the same pool
Performs the scan operation
Returns a valid botocore session
Performs the UpdateItem operation
Performs the UpdateTable operation
Lowest level connection
In this case, the user provided a mapping {'key': {'S': 'value'}} This is useful when paginating results, as the LastEvaluatedKey returned is already structured properly The call requests_session.send(final_prepared_request) ignores the headers which are part of the request session. In order to include the requests session headers inside the request, we create a new request object, and call prepare_request with the newly created request object No backoff for fast-fail exceptions that likely failed at the frontend Extract error code from __type Batch operations can hit multiple tables, report them comma separated We don't retry on a ConditionalCheckFailedException or other 4xx (except for throughput related errors) because we assume they will fail in perpetuity. Retrying when there is already contention could cause other problems in part due to unnecessary consumption of throughput. We use fully-jittered exponentially-backed-off retries: https://www.awsarchitectureblog.com/2015/03/backoff.html botocore client creation is not thread safe as of v1.2.5+ (see issue 153) botocore has a known issue where it will cache empty credentials https://github.com/boto/botocore/blob/4d55c9b4142/botocore/credentials.pyL1016-L1021 if the client does not have credentials, we create a new client otherwise the client is permanently poisoned in the case of metadata service flakiness when using IAM roles We read the conditional operator even without expected passed in to maintain existing behavior. We sort the keys here for determinism. This is mostly done to simplify testing. We read the conditional operator even without expected passed in to maintain existing behavior. We read the conditional operator even without expected passed in to maintain existing behavior. Only retry if provision throughput is exceeded. Max threshold reached Different exception, other than ProvisionedThroughputExceededException Not a Client error No throttling, and no more scans needed. Just return elapsed_time_ms indicates the time taken in ms from the start of the throttled_scan call. consecutive_provision_throughput_exceeded_ex > 0 indicates ProvisionedThroughputExceededException occurred. ProvisionedThroughputExceededException can occur if: - The rate to consume is passed incorrectly. - External factors, even if the current scan is within limits. Minimum value is 1 second. Sleep proportional to the ratio of --consumed capacity-- to --capacity to consume-- At any moment if the timeout_seconds hits, then return Reset the latest_scan_consumed_capacity, as no scan operation was performed. Try to gracefully handle the case where a user passed in a filter as a range key condition No comparisons on sort key FilterExpression does not allow key attributes. Check for hash and range key name placeholders We read the conditional operator even without a query filter passed in to maintain existing behavior. We sort the keys here for determinism. This is mostly done to simplify testing. We sort the keys here for determinism. This is mostly done to simplify testing. | 7,504 | en | 0.813774 |
#!/usr/bin/python
# Copyright (c) 2017, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_waas_certificate_facts
short_description: Fetches details about one or multiple WaasCertificate resources in Oracle Cloud Infrastructure
description:
- Fetches details about one or multiple WaasCertificate resources in Oracle Cloud Infrastructure
- Gets a list of SSL certificates that can be used in a WAAS policy.
- If I(certificate_id) is specified, the details of a single WaasCertificate will be returned.
version_added: "2.9"
author: Oracle (@oracle)
options:
certificate_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the SSL certificate used in the WAAS policy. This number is
generated when the certificate is added to the policy.
- Required to get a specific waas_certificate.
type: str
aliases: ["id"]
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment. This number is generated when the
compartment is created.
- Required to list multiple waas_certificates.
type: str
sort_by:
description:
- The value by which certificate summaries are sorted in a paginated 'List' call. If unspecified, defaults to `timeCreated`.
type: str
choices:
- "id"
- "compartmentId"
- "displayName"
- "notValidAfter"
- "timeCreated"
sort_order:
description:
- The value of the sorting direction of resources in a paginated 'List' call. If unspecified, defaults to `DESC`.
type: str
choices:
- "ASC"
- "DESC"
display_name:
description:
- Filter certificates using a list of display names.
type: list
aliases: ["name"]
lifecycle_state:
description:
- Filter certificates using a list of lifecycle states.
type: list
choices:
- "CREATING"
- "ACTIVE"
- "FAILED"
- "UPDATING"
- "DELETING"
- "DELETED"
time_created_greater_than_or_equal_to:
description:
- A filter that matches certificates created on or after the specified date-time.
type: str
time_created_less_than:
description:
- A filter that matches certificates created before the specified date-time.
type: str
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: List waas_certificates
oci_waas_certificate_facts:
compartment_id: ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx
- name: Get a specific waas_certificate
oci_waas_certificate_facts:
certificate_id: ocid1.certificate.oc1..xxxxxxEXAMPLExxxxxx
"""
RETURN = """
waas_certificates:
description:
- List of WaasCertificate resources
returned: on success
type: complex
contains:
id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the certificate.
returned: on success
type: string
sample: ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the certificate's compartment.
returned: on success
type: string
sample: ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx
display_name:
description:
- The user-friendly name of the certificate.
returned: on success
type: string
sample: display_name_example
issued_by:
description:
- ""
returned: on success
type: string
sample: issued_by_example
subject_name:
description:
- ""
returned: on success
type: complex
contains:
country:
description:
- ISO 3166-1 alpha-2 code of the country where the organization is located. For a list of codes, see L(ISO's
website,https://www.iso.org/obp/ui/#search/code/).
returned: on success
type: string
sample: country_example
state_province:
description:
- The province where the organization is located.
returned: on success
type: string
sample: state_province_example
locality:
description:
- The city in which the organization is located.
returned: on success
type: string
sample: locality_example
organization:
description:
- The organization name.
returned: on success
type: string
sample: organization_example
organizational_unit:
description:
- The field to differentiate between divisions within an organization.
returned: on success
type: string
sample: organizational_unit_example
common_name:
description:
- The fully qualified domain name used for DNS lookups of the server.
returned: on success
type: string
sample: common_name_example
email_address:
description:
- The email address of the server's administrator.
returned: on success
type: string
sample: email_address_example
issuer_name:
description:
- ""
returned: on success
type: complex
contains:
country:
description:
- ISO 3166-1 alpha-2 code of the country where the organization is located. For a list of codes, see L(ISO's
website,https://www.iso.org/obp/ui/#search/code/).
returned: on success
type: string
sample: country_example
state_province:
description:
- The province where the organization is located.
returned: on success
type: string
sample: state_province_example
locality:
description:
- The city in which the organization is located.
returned: on success
type: string
sample: locality_example
organization:
description:
- The organization name.
returned: on success
type: string
sample: organization_example
organizational_unit:
description:
- The field to differentiate between divisions within an organization.
returned: on success
type: string
sample: organizational_unit_example
common_name:
description:
- The Certificate Authority (CA) name.
returned: on success
type: string
sample: common_name_example
email_address:
description:
- The email address of the server's administrator.
returned: on success
type: string
sample: email_address_example
serial_number:
description:
- A unique, positive integer assigned by the Certificate Authority (CA). The issuer name and serial number identify a unique certificate.
returned: on success
type: string
sample: serial_number_example
version:
description:
- The version of the encoded certificate.
returned: on success
type: int
sample: 56
signature_algorithm:
description:
- The identifier for the cryptographic algorithm used by the Certificate Authority (CA) to sign this certificate.
returned: on success
type: string
sample: signature_algorithm_example
time_not_valid_before:
description:
- The date and time the certificate will become valid, expressed in RFC 3339 timestamp format.
returned: on success
type: string
sample: 2018-11-16T21:10:29Z
time_not_valid_after:
description:
- The date and time the certificate will expire, expressed in RFC 3339 timestamp format.
returned: on success
type: string
sample: 2018-11-16T21:10:29Z
public_key_info:
description:
- ""
returned: on success
type: complex
contains:
algorithm:
description:
- The algorithm identifier and parameters for the public key.
returned: on success
type: string
sample: algorithm_example
exponent:
description:
- The private key exponent.
returned: on success
type: int
sample: 56
key_size:
description:
- The number of bits in a key used by a cryptographic algorithm.
returned: on success
type: int
sample: 56
extensions:
description:
- Additional attributes associated with users or public keys for managing relationships between Certificate Authorities.
returned: on success
type: complex
contains:
name:
description:
- The certificate extension name.
returned: on success
type: string
sample: name_example
is_critical:
description:
- The critical flag of the extension. Critical extensions must be processed, non-critical extensions can be ignored.
returned: on success
type: bool
sample: true
value:
description:
- The certificate extension value.
returned: on success
type: string
sample: value_example
freeform_tags:
description:
- Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Department\\": \\"Finance\\"}`"
returned: on success
type: dict
sample: {'Department': 'Finance'}
defined_tags:
description:
- Defined tags for this resource. Each key is predefined and scoped to a namespace.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Operations\\": {\\"CostCenter\\": \\"42\\"}}`"
returned: on success
type: dict
sample: {'Operations': {'CostCenter': 'US'}}
lifecycle_state:
description:
- The current lifecycle state of the SSL certificate.
returned: on success
type: string
sample: CREATING
time_created:
description:
- The date and time the certificate was created, expressed in RFC 3339 timestamp format.
returned: on success
type: string
sample: 2018-11-16T21:10:29Z
sample: [{
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"display_name": "display_name_example",
"issued_by": "issued_by_example",
"subject_name": {
"country": "country_example",
"state_province": "state_province_example",
"locality": "locality_example",
"organization": "organization_example",
"organizational_unit": "organizational_unit_example",
"common_name": "common_name_example",
"email_address": "email_address_example"
},
"issuer_name": {
"country": "country_example",
"state_province": "state_province_example",
"locality": "locality_example",
"organization": "organization_example",
"organizational_unit": "organizational_unit_example",
"common_name": "common_name_example",
"email_address": "email_address_example"
},
"serial_number": "serial_number_example",
"version": 56,
"signature_algorithm": "signature_algorithm_example",
"time_not_valid_before": "2018-11-16T21:10:29Z",
"time_not_valid_after": "2018-11-16T21:10:29Z",
"public_key_info": {
"algorithm": "algorithm_example",
"exponent": 56,
"key_size": 56
},
"extensions": [{
"name": "name_example",
"is_critical": true,
"value": "value_example"
}],
"freeform_tags": {'Department': 'Finance'},
"defined_tags": {'Operations': {'CostCenter': 'US'}},
"lifecycle_state": "CREATING",
"time_created": "2018-11-16T21:10:29Z"
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.waas import WaasClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class WaasCertificateFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: get, list"""
def get_required_params_for_get(self):
return [
"certificate_id",
]
def get_required_params_for_list(self):
return [
"compartment_id",
]
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_certificate,
certificate_id=self.module.params.get("certificate_id"),
)
def list_resources(self):
optional_list_method_params = [
"sort_by",
"sort_order",
"display_name",
"lifecycle_state",
"time_created_greater_than_or_equal_to",
"time_created_less_than",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.list_all_resources(
self.client.list_certificates,
compartment_id=self.module.params.get("compartment_id"),
**optional_kwargs
)
WaasCertificateFactsHelperCustom = get_custom_class("WaasCertificateFactsHelperCustom")
class ResourceFactsHelper(
WaasCertificateFactsHelperCustom, WaasCertificateFactsHelperGen
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(
certificate_id=dict(aliases=["id"], type="str"),
compartment_id=dict(type="str"),
sort_by=dict(
type="str",
choices=[
"id",
"compartmentId",
"displayName",
"notValidAfter",
"timeCreated",
],
),
sort_order=dict(type="str", choices=["ASC", "DESC"]),
display_name=dict(aliases=["name"], type="list"),
lifecycle_state=dict(
type="list",
choices=[
"CREATING",
"ACTIVE",
"FAILED",
"UPDATING",
"DELETING",
"DELETED",
],
),
time_created_greater_than_or_equal_to=dict(type="str"),
time_created_less_than=dict(type="str"),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="waas_certificate",
service_client_class=WaasClient,
namespace="waas",
)
result = []
if resource_facts_helper.is_get():
result = [resource_facts_helper.get()]
elif resource_facts_helper.is_list():
result = resource_facts_helper.list()
else:
resource_facts_helper.fail()
module.exit_json(waas_certificates=result)
if __name__ == "__main__":
main()
| plugins/modules/oci_waas_certificate_facts.py | 18,724 | Supported operations: get, list
!/usr/bin/python Copyright (c) 2017, 2021 Oracle and/or its affiliates. This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license. GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) Apache License v2.0 See LICENSE.TXT for details. GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN | 415 | en | 0.744328 |
import os
from dotenv import load_dotenv
# The prefix the bot responds to for commands
PREFIX = '!'
# Emojis the bot should use for certain events
EMOJIS = {
'DISCORD': '🗨️', # When a message is sent from Discord
'HYPIXEL': '🎮', # When a message is sent from Hypixel
'JOIN': '📥', # When a member joins Hypixel
'LEAVE': '📤' # When a member leaves Hypixel
}
# List of Owner IDs (to use commands like sumo aaaaaaaaaaaaa)
OWNER_IDS = [635097068741853204]
# Don't touch this unless you know what you're doing
load_dotenv()
TOKEN = os.getenv("TOKEN")
GUILD_CHAT_CHANNEL = int(os.getenv("GUILD_CHAT_CHANNEL"))
MINECRAFT_EMAIL = os.getenv("MINECRAFT_EMAIL")
MINECRAFT_PASSWORD = os.getenv("MINECRAFT_PASSWORD")
| constants.py | 740 | The prefix the bot responds to for commands Emojis the bot should use for certain events When a message is sent from Discord When a message is sent from Hypixel When a member joins Hypixel When a member leaves Hypixel List of Owner IDs (to use commands like sumo aaaaaaaaaaaaa) Don't touch this unless you know what you're doing | 328 | en | 0.923469 |
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import unittest
from oslo_config import types
class TypeTestHelper(object):
def setUp(self):
super(TypeTestHelper, self).setUp()
self.type_instance = self.type
def assertConvertedValue(self, s, expected):
self.assertEqual(expected, self.type_instance(s))
def assertInvalid(self, value):
self.assertRaises(ValueError, self.type_instance, value)
class StringTypeTests(TypeTestHelper, unittest.TestCase):
type = types.String()
def test_empty_string_passes(self):
self.assertConvertedValue('', '')
def test_should_return_same_string_if_valid(self):
self.assertConvertedValue('foo bar', 'foo bar')
def test_listed_value(self):
self.type_instance = types.String(choices=['foo', 'bar'])
self.assertConvertedValue('foo', 'foo')
def test_unlisted_value(self):
self.type_instance = types.String(choices=['foo', 'bar'])
self.assertInvalid('baz')
def test_with_no_values_returns_error(self):
self.type_instance = types.String(choices=[])
self.assertInvalid('foo')
def test_string_with_non_closed_quote_is_invalid(self):
self.type_instance = types.String(quotes=True)
self.assertInvalid('"foo bar')
self.assertInvalid("'bar baz")
def test_quotes_are_stripped(self):
self.type_instance = types.String(quotes=True)
self.assertConvertedValue('"foo bar"', 'foo bar')
def test_trailing_quote_is_ok(self):
self.type_instance = types.String(quotes=True)
self.assertConvertedValue('foo bar"', 'foo bar"')
def test_repr(self):
t = types.String()
self.assertEqual('String', repr(t))
def test_repr_with_choices(self):
t = types.String(choices=['foo', 'bar'])
self.assertEqual('String(choices=[\'foo\', \'bar\'])', repr(t))
def test_equal(self):
self.assertTrue(types.String() == types.String())
def test_equal_with_same_choices(self):
t1 = types.String(choices=['foo', 'bar'])
t2 = types.String(choices=['foo', 'bar'])
t3 = types.String(choices=('foo', 'bar'))
t4 = types.String(choices=['bar', 'foo'])
self.assertTrue(t1 == t2)
self.assertTrue(t1 == t3)
self.assertTrue(t1 == t4)
def test_not_equal_with_different_choices(self):
t1 = types.String(choices=['foo', 'bar'])
t2 = types.String(choices=['foo', 'baz'])
self.assertFalse(t1 == t2)
def test_equal_with_equal_quote_falgs(self):
t1 = types.String(quotes=True)
t2 = types.String(quotes=True)
self.assertTrue(t1 == t2)
def test_not_equal_with_different_quote_falgs(self):
t1 = types.String(quotes=False)
t2 = types.String(quotes=True)
self.assertFalse(t1 == t2)
def test_not_equal_to_other_class(self):
self.assertFalse(types.String() == types.Integer())
def test_regex_matches(self):
self.type_instance = types.String(regex=re.compile("^[A-Z]"))
self.assertConvertedValue("Foo", "Foo")
def test_regex_matches_uncompiled(self):
self.type_instance = types.String(regex="^[A-Z]")
self.assertConvertedValue("Foo", "Foo")
def test_regex_fails(self):
self.type_instance = types.String(regex=re.compile("^[A-Z]"))
self.assertInvalid("foo")
def test_regex_and_choices_raises(self):
self.assertRaises(ValueError,
types.String,
regex=re.compile("^[A-Z]"),
choices=["Foo", "Bar", "baz"])
def test_equal_with_same_regex(self):
t1 = types.String(regex=re.compile("^[A-Z]"))
t2 = types.String(regex=re.compile("^[A-Z]"))
self.assertTrue(t1 == t2)
def test_not_equal_with_different_regex(self):
t1 = types.String(regex=re.compile("^[A-Z]"))
t2 = types.String(regex=re.compile("^[a-z]"))
self.assertFalse(t1 == t2)
def test_ignore_case(self):
self.type_instance = types.String(choices=['foo', 'bar'],
ignore_case=True)
self.assertConvertedValue('Foo', 'Foo')
self.assertConvertedValue('bAr', 'bAr')
def test_ignore_case_raises(self):
self.type_instance = types.String(choices=['foo', 'bar'],
ignore_case=False)
self.assertRaises(ValueError, self.assertConvertedValue, 'Foo', 'Foo')
def test_regex_and_ignore_case(self):
self.type_instance = types.String(regex=re.compile("^[A-Z]"),
ignore_case=True)
self.assertConvertedValue("foo", "foo")
def test_regex_and_ignore_case_str(self):
self.type_instance = types.String(regex="^[A-Z]", ignore_case=True)
self.assertConvertedValue("foo", "foo")
def test_regex_preserve_flags(self):
self.type_instance = types.String(regex=re.compile("^[A-Z]", re.I),
ignore_case=False)
self.assertConvertedValue("foo", "foo")
def test_max_length(self):
self.type_instance = types.String(max_length=5)
self.assertInvalid('123456')
self.assertConvertedValue('12345', '12345')
class BooleanTypeTests(TypeTestHelper, unittest.TestCase):
type = types.Boolean()
def test_True(self):
self.assertConvertedValue('True', True)
def test_yes(self):
self.assertConvertedValue('yes', True)
def test_on(self):
self.assertConvertedValue('on', True)
def test_1(self):
self.assertConvertedValue('1', True)
def test_False(self):
self.assertConvertedValue('False', False)
def test_no(self):
self.assertConvertedValue('no', False)
def test_off(self):
self.assertConvertedValue('off', False)
def test_0(self):
self.assertConvertedValue('0', False)
def test_other_values_produce_error(self):
self.assertInvalid('foo')
def test_repr(self):
self.assertEqual('Boolean', repr(types.Boolean()))
def test_equal(self):
self.assertEqual(types.Boolean(), types.Boolean())
def test_not_equal_to_other_class(self):
self.assertFalse(types.Boolean() == types.String())
class IntegerTypeTests(TypeTestHelper, unittest.TestCase):
type = types.Integer()
def test_empty_string(self):
self.assertConvertedValue('', None)
def test_whitespace_string(self):
self.assertConvertedValue(" \t\t\t\t", None)
def test_positive_values_are_valid(self):
self.assertConvertedValue('123', 123)
def test_zero_is_valid(self):
self.assertConvertedValue('0', 0)
def test_negative_values_are_valid(self):
self.assertConvertedValue('-123', -123)
def test_leading_whitespace_is_ignored(self):
self.assertConvertedValue(' 5', 5)
def test_trailing_whitespace_is_ignored(self):
self.assertConvertedValue('7 ', 7)
def test_non_digits_are_invalid(self):
self.assertInvalid('12a45')
def test_repr(self):
t = types.Integer()
self.assertEqual('Integer', repr(t))
def test_repr_with_min(self):
t = types.Integer(min=123)
self.assertEqual('Integer(min=123)', repr(t))
def test_repr_with_max(self):
t = types.Integer(max=456)
self.assertEqual('Integer(max=456)', repr(t))
def test_repr_with_min_and_max(self):
t = types.Integer(min=123, max=456)
self.assertEqual('Integer(min=123, max=456)', repr(t))
t = types.Integer(min=0, max=0)
self.assertEqual('Integer(min=0, max=0)', repr(t))
def test_repr_with_choices(self):
t = types.Integer(choices=[80, 457])
self.assertEqual('Integer(choices=[80, 457])', repr(t))
def test_equal(self):
self.assertTrue(types.Integer() == types.Integer())
def test_equal_with_same_min_and_no_max(self):
self.assertTrue(types.Integer(min=123) == types.Integer(min=123))
def test_equal_with_same_max_and_no_min(self):
self.assertTrue(types.Integer(max=123) == types.Integer(max=123))
def test_equal_with_same_min_and_max(self):
t1 = types.Integer(min=1, max=123)
t2 = types.Integer(min=1, max=123)
self.assertTrue(t1 == t2)
def test_equal_with_same_choices(self):
t1 = types.Integer(choices=[80, 457])
t2 = types.Integer(choices=[457, 80])
self.assertTrue(t1 == t2)
def test_not_equal(self):
self.assertFalse(types.Integer(min=123) == types.Integer(min=456))
self.assertFalse(types.Integer(choices=[80, 457]) ==
types.Integer(choices=[80, 40]))
self.assertFalse(types.Integer(choices=[80, 457]) ==
types.Integer())
def test_not_equal_to_other_class(self):
self.assertFalse(types.Integer() == types.String())
def test_choices_with_min_max(self):
self.assertRaises(ValueError,
types.Integer,
min=10,
choices=[50, 60])
self.assertRaises(ValueError,
types.Integer,
max=100,
choices=[50, 60])
self.assertRaises(ValueError,
types.Integer,
min=10, max=100,
choices=[50, 60])
def test_min_greater_max(self):
self.assertRaises(ValueError,
types.Integer,
min=100, max=50)
self.assertRaises(ValueError,
types.Integer,
min=-50, max=-100)
self.assertRaises(ValueError,
types.Integer,
min=0, max=-50)
self.assertRaises(ValueError,
types.Integer,
min=50, max=0)
def test_with_max_and_min(self):
t = types.Integer(min=123, max=456)
self.assertRaises(ValueError, t, 122)
t(123)
t(300)
t(456)
self.assertRaises(ValueError, t, 0)
self.assertRaises(ValueError, t, 457)
def test_with_min_zero(self):
t = types.Integer(min=0, max=456)
self.assertRaises(ValueError, t, -1)
t(0)
t(123)
t(300)
t(456)
self.assertRaises(ValueError, t, -201)
self.assertRaises(ValueError, t, 457)
def test_with_max_zero(self):
t = types.Integer(min=-456, max=0)
self.assertRaises(ValueError, t, 1)
t(0)
t(-123)
t(-300)
t(-456)
self.assertRaises(ValueError, t, 201)
self.assertRaises(ValueError, t, -457)
def test_with_choices_list(self):
t = types.Integer(choices=[80, 457])
self.assertRaises(ValueError, t, 1)
self.assertRaises(ValueError, t, 200)
self.assertRaises(ValueError, t, -457)
t(80)
t(457)
def test_with_choices_tuple(self):
t = types.Integer(choices=(80, 457))
self.assertRaises(ValueError, t, 1)
self.assertRaises(ValueError, t, 200)
self.assertRaises(ValueError, t, -457)
t(80)
t(457)
class FloatTypeTests(TypeTestHelper, unittest.TestCase):
type = types.Float()
def test_decimal_format(self):
v = self.type_instance('123.456')
self.assertAlmostEqual(v, 123.456)
def test_decimal_format_negative_float(self):
v = self.type_instance('-123.456')
self.assertAlmostEqual(v, -123.456)
def test_exponential_format(self):
v = self.type_instance('123e-2')
self.assertAlmostEqual(v, 1.23)
def test_non_float_is_invalid(self):
self.assertInvalid('123,345')
self.assertInvalid('foo')
def test_repr(self):
self.assertEqual('Float', repr(types.Float()))
def test_equal(self):
self.assertTrue(types.Float() == types.Float())
def test_not_equal_to_other_class(self):
self.assertFalse(types.Float() == types.Integer())
class ListTypeTests(TypeTestHelper, unittest.TestCase):
type = types.List()
def test_empty_value(self):
self.assertConvertedValue('', [])
def test_single_value(self):
self.assertConvertedValue(' foo bar ',
['foo bar'])
def test_list_of_values(self):
self.assertConvertedValue(' foo bar, baz ',
['foo bar',
'baz'])
def test_list_of_values_containing_commas(self):
self.type_instance = types.List(types.String(quotes=True))
self.assertConvertedValue('foo,"bar, baz",bam',
['foo',
'bar, baz',
'bam'])
def test_list_of_lists(self):
self.type_instance = types.List(
types.List(types.String(), bounds=True)
)
self.assertConvertedValue('[foo],[bar, baz],[bam]',
[['foo'], ['bar', 'baz'], ['bam']])
def test_list_of_custom_type(self):
self.type_instance = types.List(types.Integer())
self.assertConvertedValue('1,2,3,5',
[1, 2, 3, 5])
def test_bounds_parsing(self):
self.type_instance = types.List(types.Integer(), bounds=True)
self.assertConvertedValue('[1,2,3]', [1, 2, 3])
def test_bounds_required(self):
self.type_instance = types.List(types.Integer(), bounds=True)
self.assertInvalid('1,2,3')
self.assertInvalid('[1,2,3')
self.assertInvalid('1,2,3]')
def test_repr(self):
t = types.List(types.Integer())
self.assertEqual('List of Integer', repr(t))
def test_equal(self):
self.assertTrue(types.List() == types.List())
def test_equal_with_equal_custom_item_types(self):
it1 = types.Integer()
it2 = types.Integer()
self.assertTrue(types.List(it1) == types.List(it2))
def test_not_equal_with_non_equal_custom_item_types(self):
it1 = types.Integer()
it2 = types.String()
self.assertFalse(it1 == it2)
self.assertFalse(types.List(it1) == types.List(it2))
def test_not_equal_to_other_class(self):
self.assertFalse(types.List() == types.Integer())
class DictTypeTests(TypeTestHelper, unittest.TestCase):
type = types.Dict()
def test_empty_value(self):
self.assertConvertedValue('', {})
def test_single_value(self):
self.assertConvertedValue(' foo: bar ',
{'foo': 'bar'})
def test_dict_of_values(self):
self.assertConvertedValue(' foo: bar, baz: 123 ',
{'foo': 'bar',
'baz': '123'})
def test_custom_value_type(self):
self.type_instance = types.Dict(types.Integer())
self.assertConvertedValue('foo:123, bar: 456',
{'foo': 123,
'bar': 456})
def test_dict_of_values_containing_commas(self):
self.type_instance = types.Dict(types.String(quotes=True))
self.assertConvertedValue('foo:"bar, baz",bam:quux',
{'foo': 'bar, baz',
'bam': 'quux'})
def test_dict_of_dicts(self):
self.type_instance = types.Dict(
types.Dict(types.String(), bounds=True)
)
self.assertConvertedValue('k1:{k1:v1,k2:v2},k2:{k3:v3}',
{'k1': {'k1': 'v1', 'k2': 'v2'},
'k2': {'k3': 'v3'}})
def test_bounds_parsing(self):
self.type_instance = types.Dict(types.String(), bounds=True)
self.assertConvertedValue('{foo:bar,baz:123}',
{'foo': 'bar',
'baz': '123'})
def test_bounds_required(self):
self.type_instance = types.Dict(types.String(), bounds=True)
self.assertInvalid('foo:bar,baz:123')
self.assertInvalid('{foo:bar,baz:123')
self.assertInvalid('foo:bar,baz:123}')
def test_no_mapping_produces_error(self):
self.assertInvalid('foo,bar')
def test_repr(self):
t = types.Dict(types.Integer())
self.assertEqual('Dict of Integer', repr(t))
def test_equal(self):
self.assertTrue(types.Dict() == types.Dict())
def test_equal_with_equal_custom_item_types(self):
it1 = types.Integer()
it2 = types.Integer()
self.assertTrue(types.Dict(it1) == types.Dict(it2))
def test_not_equal_with_non_equal_custom_item_types(self):
it1 = types.Integer()
it2 = types.String()
self.assertFalse(it1 == it2)
self.assertFalse(types.Dict(it1) == types.Dict(it2))
def test_not_equal_to_other_class(self):
self.assertFalse(types.Dict() == types.Integer())
class IPAddressTypeTests(TypeTestHelper, unittest.TestCase):
type = types.IPAddress()
def test_ipv4_address(self):
self.assertConvertedValue('192.168.0.1', '192.168.0.1')
def test_ipv6_address(self):
self.assertConvertedValue('abcd:ef::1', 'abcd:ef::1')
def test_strings(self):
self.assertInvalid('')
self.assertInvalid('foo')
def test_numbers(self):
self.assertInvalid(1)
self.assertInvalid(-1)
self.assertInvalid(3.14)
class IPv4AddressTypeTests(IPAddressTypeTests):
type = types.IPAddress(4)
def test_ipv6_address(self):
self.assertInvalid('abcd:ef::1')
class IPv6AddressTypeTests(IPAddressTypeTests):
type = types.IPAddress(6)
def test_ipv4_address(self):
self.assertInvalid('192.168.0.1')
| .tox/scenario/lib/python2.7/site-packages/oslo_config/tests/test_types.py | 18,527 | Copyright 2013 Mirantis, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 579 | en | 0.856795 |
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""A service for querying data for charts.
Functions for querying the IssueSnapshot table and associated join tables.
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import logging
import settings
import time
from framework import framework_helpers
from framework import sql
from search import search_helpers
from tracker import tracker_bizobj
from tracker import tracker_helpers
from search import query2ast
from search import ast2select
from search import ast2ast
ISSUESNAPSHOT_TABLE_NAME = 'IssueSnapshot'
ISSUESNAPSHOT2CC_TABLE_NAME = 'IssueSnapshot2Cc'
ISSUESNAPSHOT2COMPONENT_TABLE_NAME = 'IssueSnapshot2Component'
ISSUESNAPSHOT2LABEL_TABLE_NAME = 'IssueSnapshot2Label'
ISSUESNAPSHOT_COLS = ['id', 'issue_id', 'shard', 'project_id', 'local_id',
'reporter_id', 'owner_id', 'status_id', 'period_start', 'period_end',
'is_open']
ISSUESNAPSHOT2CC_COLS = ['issuesnapshot_id', 'cc_id']
ISSUESNAPSHOT2COMPONENT_COLS = ['issuesnapshot_id', 'component_id']
ISSUESNAPSHOT2LABEL_COLS = ['issuesnapshot_id', 'label_id']
class ChartService(object):
"""Class for querying chart data."""
def __init__(self, config_service):
"""Constructor for ChartService.
Args:
config_service (ConfigService): An instance of ConfigService.
"""
self.config_service = config_service
# Set up SQL table objects.
self.issuesnapshot_tbl = sql.SQLTableManager(ISSUESNAPSHOT_TABLE_NAME)
self.issuesnapshot2cc_tbl = sql.SQLTableManager(
ISSUESNAPSHOT2CC_TABLE_NAME)
self.issuesnapshot2component_tbl = sql.SQLTableManager(
ISSUESNAPSHOT2COMPONENT_TABLE_NAME)
self.issuesnapshot2label_tbl = sql.SQLTableManager(
ISSUESNAPSHOT2LABEL_TABLE_NAME)
def QueryIssueSnapshots(self, cnxn, services, unixtime, effective_ids,
project, perms, group_by=None, label_prefix=None,
query=None, canned_query=None):
"""Queries historical issue counts grouped by label or component.
Args:
cnxn: A MonorailConnection instance.
services: A Services instance.
unixtime: An integer representing the Unix time in seconds.
effective_ids: The effective User IDs associated with the current user.
project: A project object representing the current project.
perms: A permissions object associated with the current user.
group_by (str, optional): Which dimension to group by. Values can
be 'label', 'component', or None, in which case no grouping will
be applied.
label_prefix: Required when group_by is 'label.' Will limit the query to
only labels with the specified prefix (for example 'Pri').
query (str, optional): A query string from the request to apply to
the snapshot query.
canned_query (str, optional): Parsed canned query applied to the query
scope.
Returns:
1. A dict of {'2nd dimension or "total"': number of occurences}.
2. A list of any unsupported query conditions in query.
3. A boolean that is true if any results were capped.
"""
project_config = services.config.GetProjectConfig(cnxn,
project.project_id)
try:
query_left_joins, query_where, unsupported_conds = self._QueryToWhere(
cnxn, services, project_config, query, canned_query, project)
except ast2select.NoPossibleResults:
return {}, ['Invalid query.'], False
restricted_label_ids = search_helpers.GetPersonalAtRiskLabelIDs(
cnxn, None, self.config_service, effective_ids, project, perms)
left_joins = [
('Issue ON IssueSnapshot.issue_id = Issue.id', []),
]
if restricted_label_ids:
left_joins.append(
(('Issue2Label AS Forbidden_label'
' ON Issue.id = Forbidden_label.issue_id'
' AND Forbidden_label.label_id IN (%s)' % (
sql.PlaceHolders(restricted_label_ids)
)), restricted_label_ids))
if effective_ids:
left_joins.append(
('Issue2Cc AS I2cc'
' ON Issue.id = I2cc.issue_id'
' AND I2cc.cc_id IN (%s)' % sql.PlaceHolders(effective_ids),
effective_ids))
# TODO(jeffcarp): Handle case where there are issues with no labels.
where = [
('IssueSnapshot.period_start <= %s', [unixtime]),
('IssueSnapshot.period_end > %s', [unixtime]),
('IssueSnapshot.project_id = %s', [project.project_id]),
('Issue.is_spam = %s', [False]),
('Issue.deleted = %s', [False]),
]
forbidden_label_clause = 'Forbidden_label.label_id IS NULL'
if effective_ids:
if restricted_label_ids:
forbidden_label_clause = ' OR %s' % forbidden_label_clause
else:
forbidden_label_clause = ''
where.append(
((
'(Issue.reporter_id IN (%s)'
' OR Issue.owner_id IN (%s)'
' OR I2cc.cc_id IS NOT NULL'
'%s)'
) % (
sql.PlaceHolders(effective_ids), sql.PlaceHolders(effective_ids),
forbidden_label_clause
),
list(effective_ids) + list(effective_ids)
))
else:
where.append((forbidden_label_clause, []))
if group_by == 'component':
cols = ['Comp.path', 'COUNT(IssueSnapshot.issue_id)']
left_joins.extend([
(('IssueSnapshot2Component AS Is2c ON'
' Is2c.issuesnapshot_id = IssueSnapshot.id'), []),
('ComponentDef AS Comp ON Comp.id = Is2c.component_id', []),
])
group_by = ['Comp.path']
elif group_by == 'label':
cols = ['Lab.label', 'COUNT(IssueSnapshot.issue_id)']
left_joins.extend([
(('IssueSnapshot2Label AS Is2l'
' ON Is2l.issuesnapshot_id = IssueSnapshot.id'), []),
('LabelDef AS Lab ON Lab.id = Is2l.label_id', []),
])
if not label_prefix:
raise ValueError('`label_prefix` required when grouping by label.')
# TODO(jeffcarp): If LookupIDsOfLabelsMatching() is called on output,
# ensure regex is case-insensitive.
where.append(('LOWER(Lab.label) LIKE %s', [label_prefix.lower() + '-%']))
group_by = ['Lab.label']
elif group_by == 'open':
cols = ['IssueSnapshot.is_open',
'COUNT(IssueSnapshot.issue_id) AS issue_count']
group_by = ['IssueSnapshot.is_open']
elif group_by == 'status':
left_joins.append(('StatusDef AS Stats ON ' \
'Stats.id = IssueSnapshot.status_id', []))
cols = ['Stats.status', 'COUNT(IssueSnapshot.issue_id)']
group_by = ['Stats.status']
elif group_by == 'owner':
cols = ['IssueSnapshot.owner_id', 'COUNT(IssueSnapshot.issue_id)']
group_by = ['IssueSnapshot.owner_id']
elif not group_by:
cols = ['IssueSnapshot.issue_id']
else:
raise ValueError('`group_by` must be label, component, ' \
'open, status, owner or None.')
if query_left_joins:
left_joins.extend(query_left_joins)
if query_where:
where.extend(query_where)
promises = []
for shard_id in range(settings.num_logical_shards):
count_stmt, stmt_args = self._BuildSnapshotQuery(cols=cols,
where=where, joins=left_joins, group_by=group_by,
shard_id=shard_id)
promises.append(framework_helpers.Promise(cnxn.Execute,
count_stmt, stmt_args, shard_id=shard_id))
shard_values_dict = {}
search_limit_reached = False
for promise in promises:
# Wait for each query to complete and add it to the dict.
shard_values = list(promise.WaitAndGetValue())
if not shard_values:
continue
if group_by:
for name, count in shard_values:
if count >= settings.chart_query_max_rows:
search_limit_reached = True
shard_values_dict.setdefault(name, 0)
shard_values_dict[name] += count
else:
if shard_values[0][0] >= settings.chart_query_max_rows:
search_limit_reached = True
shard_values_dict.setdefault('total', 0)
shard_values_dict['total'] += shard_values[0][0]
unsupported_field_names = list(set([
field.field_name
for cond in unsupported_conds
for field in cond.field_defs
]))
return shard_values_dict, unsupported_field_names, search_limit_reached
def StoreIssueSnapshots(self, cnxn, issues, commit=True):
"""Adds an IssueSnapshot and updates the previous one for each issue."""
for issue in issues:
right_now = self._currentTime()
# Update previous snapshot of current issue's end time to right now.
self.issuesnapshot_tbl.Update(cnxn,
delta={'period_end': right_now},
where=[('IssueSnapshot.issue_id = %s', [issue.issue_id]),
('IssueSnapshot.period_end = %s',
[settings.maximum_snapshot_period_end])],
commit=commit)
config = self.config_service.GetProjectConfig(cnxn, issue.project_id)
period_end = settings.maximum_snapshot_period_end
is_open = tracker_helpers.MeansOpenInProject(
tracker_bizobj.GetStatus(issue), config)
shard = issue.issue_id % settings.num_logical_shards
status = tracker_bizobj.GetStatus(issue)
status_id = self.config_service.LookupStatusID(
cnxn, issue.project_id, status) or None
owner_id = tracker_bizobj.GetOwnerId(issue) or None
issuesnapshot_rows = [(issue.issue_id, shard, issue.project_id,
issue.local_id, issue.reporter_id, owner_id, status_id, right_now,
period_end, is_open)]
ids = self.issuesnapshot_tbl.InsertRows(
cnxn, ISSUESNAPSHOT_COLS[1:],
issuesnapshot_rows,
replace=True, commit=commit,
return_generated_ids=True)
issuesnapshot_id = ids[0]
# Add all labels to IssueSnapshot2Label.
label_rows = [
(issuesnapshot_id,
self.config_service.LookupLabelID(cnxn, issue.project_id, label))
for label in tracker_bizobj.GetLabels(issue)
]
self.issuesnapshot2label_tbl.InsertRows(
cnxn, ISSUESNAPSHOT2LABEL_COLS,
label_rows, replace=True, commit=commit)
# Add all CCs to IssueSnapshot2Cc.
cc_rows = [
(issuesnapshot_id, cc_id)
for cc_id in tracker_bizobj.GetCcIds(issue)
]
self.issuesnapshot2cc_tbl.InsertRows(
cnxn, ISSUESNAPSHOT2CC_COLS,
cc_rows,
replace=True, commit=commit)
# Add all components to IssueSnapshot2Component.
component_rows = [
(issuesnapshot_id, component_id)
for component_id in issue.component_ids
]
self.issuesnapshot2component_tbl.InsertRows(
cnxn, ISSUESNAPSHOT2COMPONENT_COLS,
component_rows,
replace=True, commit=commit)
# Add all components to IssueSnapshot2Hotlist.
# This is raw SQL to obviate passing FeaturesService down through
# the call stack wherever this function is called.
# TODO(jrobbins): sort out dependencies between service classes.
cnxn.Execute('''
INSERT INTO IssueSnapshot2Hotlist (issuesnapshot_id, hotlist_id)
SELECT %s, hotlist_id FROM Hotlist2Issue WHERE issue_id = %s
''', [issuesnapshot_id, issue.issue_id])
def ExpungeHotlistsFromIssueSnapshots(self, cnxn, hotlist_ids):
"""Expunge the existence of hotlists from issue snapshots.
This method will not commit the operation. This method will not make
changes to in-memory data.
Args:
cnxn: connection to SQL database.
hotlist_ids: list of hotlist_ids for hotlists we want to delete.
"""
vals_ph = sql.PlaceHolders(hotlist_ids)
cnxn.Execute(
'DELETE FROM IssueSnapshot2Hotlist '
'WHERE hotlist_id IN ({vals_ph})'.format(vals_ph=vals_ph),
hotlist_ids,
commit=False)
def _currentTime(self):
"""This is a separate method so it can be mocked by tests."""
return time.time()
def _QueryToWhere(self, cnxn, services, project_config, query, canned_query,
project):
"""Parses a query string into LEFT JOIN and WHERE conditions.
Args:
cnxn: A MonorailConnection instance.
services: A Services instance.
project_config: The configuration for the given project.
query (string): The query to parse.
canned_query (string): The supplied canned query.
project: The current project.
Returns:
1. A list of LEFT JOIN clauses for the SQL query.
2. A list of WHERE clases for the SQL query.
3. A list of query conditions that are unsupported with snapshots.
"""
if not (query or canned_query):
return [], [], []
query = query or ''
scope = canned_query or ''
query_ast = query2ast.ParseUserQuery(query, scope,
query2ast.BUILTIN_ISSUE_FIELDS, project_config)
query_ast = ast2ast.PreprocessAST(cnxn, query_ast, [project.project_id],
services, project_config)
left_joins, where, unsupported = ast2select.BuildSQLQuery(query_ast,
snapshot_mode=True)
return left_joins, where, unsupported
def _BuildSnapshotQuery(self, cols, where, joins, group_by, shard_id):
"""Given SQL arguments, executes a snapshot COUNT query."""
stmt = sql.Statement.MakeSelect('IssueSnapshot', cols, distinct=True)
stmt.AddJoinClauses(joins, left=True)
stmt.AddWhereTerms(where + [('IssueSnapshot.shard = %s', [shard_id])])
if group_by:
stmt.AddGroupByTerms(group_by)
stmt.SetLimitAndOffset(limit=settings.chart_query_max_rows, offset=0)
stmt_str, stmt_args = stmt.Generate()
if group_by:
if group_by[0] == 'IssueSnapshot.is_open':
count_stmt = ('SELECT IF(results.is_open = 1, "Opened", "Closed") ' \
'AS bool_open, results.issue_count ' \
'FROM (%s) AS results' % stmt_str)
else:
count_stmt = stmt_str
else:
count_stmt = 'SELECT COUNT(results.issue_id) FROM (%s) AS results' % (
stmt_str)
return count_stmt, stmt_args
| appengine/monorail/services/chart_svc.py | 14,212 | Class for querying chart data.
Expunge the existence of hotlists from issue snapshots.
This method will not commit the operation. This method will not make
changes to in-memory data.
Args:
cnxn: connection to SQL database.
hotlist_ids: list of hotlist_ids for hotlists we want to delete.
Queries historical issue counts grouped by label or component.
Args:
cnxn: A MonorailConnection instance.
services: A Services instance.
unixtime: An integer representing the Unix time in seconds.
effective_ids: The effective User IDs associated with the current user.
project: A project object representing the current project.
perms: A permissions object associated with the current user.
group_by (str, optional): Which dimension to group by. Values can
be 'label', 'component', or None, in which case no grouping will
be applied.
label_prefix: Required when group_by is 'label.' Will limit the query to
only labels with the specified prefix (for example 'Pri').
query (str, optional): A query string from the request to apply to
the snapshot query.
canned_query (str, optional): Parsed canned query applied to the query
scope.
Returns:
1. A dict of {'2nd dimension or "total"': number of occurences}.
2. A list of any unsupported query conditions in query.
3. A boolean that is true if any results were capped.
Adds an IssueSnapshot and updates the previous one for each issue.
Given SQL arguments, executes a snapshot COUNT query.
Parses a query string into LEFT JOIN and WHERE conditions.
Args:
cnxn: A MonorailConnection instance.
services: A Services instance.
project_config: The configuration for the given project.
query (string): The query to parse.
canned_query (string): The supplied canned query.
project: The current project.
Returns:
1. A list of LEFT JOIN clauses for the SQL query.
2. A list of WHERE clases for the SQL query.
3. A list of query conditions that are unsupported with snapshots.
Constructor for ChartService.
Args:
config_service (ConfigService): An instance of ConfigService.
This is a separate method so it can be mocked by tests.
A service for querying data for charts.
Functions for querying the IssueSnapshot table and associated join tables.
Copyright 2018 The Chromium Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file or at https://developers.google.com/open-source/licenses/bsd Set up SQL table objects. TODO(jeffcarp): Handle case where there are issues with no labels. TODO(jeffcarp): If LookupIDsOfLabelsMatching() is called on output, ensure regex is case-insensitive. Wait for each query to complete and add it to the dict. Update previous snapshot of current issue's end time to right now. Add all labels to IssueSnapshot2Label. Add all CCs to IssueSnapshot2Cc. Add all components to IssueSnapshot2Component. Add all components to IssueSnapshot2Hotlist. This is raw SQL to obviate passing FeaturesService down through the call stack wherever this function is called. TODO(jrobbins): sort out dependencies between service classes. | 3,125 | en | 0.82387 |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'rend'
copyright = '2020, Thomas S Hatch'
author = 'Thomas S Hatch'
# The full version, including alpha/beta/rc tags
release = '4.1'
# -- General configuration ---------------------------------------------------
master_doc = 'index'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| docs/conf.py | 1,946 | Configuration file for the Sphinx documentation builder. This file only contains a selection of the most common options. For a full list see the documentation: https://www.sphinx-doc.org/en/master/usage/configuration.html -- Path setup -------------------------------------------------------------- If extensions (or modules to document with autodoc) are in another directory, add these directories to sys.path here. If the directory is relative to the documentation root, use os.path.abspath to make it absolute, like shown here. import os import sys sys.path.insert(0, os.path.abspath('.')) -- Project information ----------------------------------------------------- The full version, including alpha/beta/rc tags -- General configuration --------------------------------------------------- Add any Sphinx extension module names here, as strings. They can be extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. Add any paths that contain templates here, relative to this directory. List of patterns, relative to source directory, that match files and directories to ignore when looking for source files. This pattern also affects html_static_path and html_extra_path. -- Options for HTML output ------------------------------------------------- The theme to use for HTML and HTML Help pages. See the documentation for a list of builtin themes. Add any paths that contain custom static files (such as style sheets) here, relative to this directory. They are copied after the builtin static files, so a file named "default.css" will overwrite the builtin "default.css". | 1,593 | en | 0.670193 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019-2020 CERN.
#
# invenio-app-ils is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Circulation Loan resolvers."""
from invenio_circulation.proxies import current_circulation
from invenio_pidstore.errors import PIDDeletedError
from invenio_app_ils.circulation.utils import resolve_item_from_loan
from invenio_app_ils.jsonresolver.api import \
get_field_value_for_record as get_field_value
from invenio_app_ils.jsonresolver.api import get_pid_or_default, pick
from invenio_app_ils.proxies import current_app_ils
from invenio_app_ils.records.resolver.resolver import get_patron
def item_resolver(loan_pid):
"""Resolve an Item given a Loan PID."""
Loan = current_circulation.loan_record_cls
loan = Loan.get_record_by_pid(loan_pid)
if not loan.get("item_pid"):
return {}
try:
# can resolve to an Item or BorrowingRequest
item = resolve_item_from_loan(loan["item_pid"])
except PIDDeletedError:
item = {}
else:
item = pick(
item,
"barcode", # not set in BorrowingRequest
"description",
"document_pid",
"medium", # not set in BorrowingRequest
"pid",
)
return item
@get_pid_or_default(default_value=dict())
def loan_patron_resolver(loan_pid):
"""Resolve a Patron given a Loan PID."""
Loan = current_circulation.loan_record_cls
try:
patron_pid = get_field_value(Loan, loan_pid, "patron_pid")
except KeyError:
return {}
return get_patron(patron_pid)
@get_pid_or_default(default_value=dict())
def document_resolver(loan_pid):
"""Resolve a Document given a Loan PID."""
Loan = current_circulation.loan_record_cls
try:
document_pid = get_field_value(Loan, loan_pid, "document_pid")
except KeyError:
return {}
Document = current_app_ils.document_record_cls
try:
document = Document.get_record_by_pid(document_pid)
except PIDDeletedError:
obj = {}
else:
obj = pick(
document,
"authors",
"edition",
"document_type",
"pid",
"title",
# TODO: add the imprint year here
)
return obj
| invenio_app_ils/circulation/jsonresolvers/loan.py | 2,368 | Resolve a Document given a Loan PID.
Resolve an Item given a Loan PID.
Resolve a Patron given a Loan PID.
Circulation Loan resolvers.
-*- coding: utf-8 -*- Copyright (C) 2019-2020 CERN. invenio-app-ils is free software; you can redistribute it and/or modify it under the terms of the MIT License; see LICENSE file for more details. can resolve to an Item or BorrowingRequest not set in BorrowingRequest not set in BorrowingRequest TODO: add the imprint year here | 464 | en | 0.806728 |
"""Tests for functions defined in the floodsystem/geo module
"""
from floodsystem import geo
from floodsystem.station import MonitoringStation
from floodsystem.stationdata import build_station_list
stations = build_station_list()
# define arbitrary stations for the tests
station_id1 = "test station id 1"
measure_id1 = "test measure id 1"
label1 = "TS1"
coord1 = (1.0, 4.0)
typical_range1 = (-2, 5)
river1 = "River Cam"
town1 = "Town 1"
TestStation1 = MonitoringStation(station_id1, measure_id1, label1, coord1, typical_range1, river1, town1)
station_id2 = "test station id 2"
measure_id2 = "test measure id 2"
label2 = "TS2"
coord2 = (0.0, 1.0)
typical_range2 = (-2, 2)
river2 = "River Cam"
town2 = "Town 2"
TestStation2 = MonitoringStation(station_id2, measure_id2, label2, coord2, typical_range2, river2, town2)
station_id3 = "test station id 3"
measure_id3 = "test measure id 3"
label3 = "TS3"
coord3 = (1.0, 1.0)
typical_range3 = (-2, 3)
river3 = "River Thames"
town3 = "Town 3"
TestStation3 = MonitoringStation(station_id3, measure_id3, label3, coord3, typical_range3, river3, town3)
test_stations = [TestStation1, TestStation2, TestStation3]
def test_stations_within_radius():
centre = (52.2053, 0.1218)
# check that no stations are at a negative distance from the centre
assert geo.stations_within_radius(stations, centre, 0) == []
# check that all stations are within 10000km of the centre
assert len(geo.stations_within_radius(stations, centre, 10000)) == len(stations)
def test_rivers_by_station_number():
lst = geo.rivers_by_station_number(stations, 2)
# check that the number of stations is greater (or equal to the second one) for the first river.
assert lst[0][1] >= lst[1][1]
def test_stations_by_distance():
test = geo.stations_by_distance(test_stations, (0,0))
# check that the results are in the right order based on the test stations provided above
assert (test[0][0], test[1][0], test[2][0]) == (TestStation2, TestStation3, TestStation1)
def test_rivers_with_station():
# check that the results are River Cam and River Thames as per the test stations provided above
assert geo.rivers_with_station(test_stations) == ['River Cam', 'River Thames']
def test_stations_by_river():
# check that the two stations on the River Cam are TestStation1 and TestStation2
assert sorted([x.name for x in geo.stations_by_river(test_stations)['River Cam']]) == [TestStation1.name, TestStation2.name]
| test_geo.py | 2,481 | Tests for functions defined in the floodsystem/geo module
define arbitrary stations for the tests check that no stations are at a negative distance from the centre check that all stations are within 10000km of the centre check that the number of stations is greater (or equal to the second one) for the first river. check that the results are in the right order based on the test stations provided above check that the results are River Cam and River Thames as per the test stations provided above check that the two stations on the River Cam are TestStation1 and TestStation2 | 578 | en | 0.935424 |
import pyopencl as cl
class DeviceInfo(object):
def __init__(self, device):
self.compute_units = device.get_info(cl.device_info.MAX_COMPUTE_UNITS)
self.maxShared = device.get_info(cl.device_info.LOCAL_MEM_SIZE) // 1024
self.compute_capability = (
device.get_info(cl.device_info.COMPUTE_CAPABILITY_MAJOR_NV),
device.get_info(cl.device_info.COMPUTE_CAPABILITY_MINOR_NV)
)
self.deviceName = device.get_info(cl.device_info.NAME)
self.deviceSimpleName = self.deviceName.replace(
'GeForce', '').replace('GTX', '').strip().replace(' ', '').lower()
print('deviceName', self.deviceName, 'compute capability', self.compute_capability)
print('compute units', self.compute_units, 'max shared memory', self.maxShared)
self.shared_memory_per_sm = None
# data comes from http://developer.download.nvidia.com/compute/cuda/CUDA_Occupancy_calculator.xls
if self.compute_capability[0] == 5:
if self.compute_capability[1] == 0:
self.shared_memory_per_sm = 65536
elif self.compute_capability[1] == 2:
self.shared_memory_per_sm = 98304
else:
raise Exception('compute capability %s not recognized' % compute_capability)
else:
raise Exception('compute capability %s not recognized' % compute_capability)
assert self.shared_memory_per_sm is not None
| gpuexperiments/deviceinfo.py | 1,470 | data comes from http://developer.download.nvidia.com/compute/cuda/CUDA_Occupancy_calculator.xls | 95 | en | 0.539008 |
import inspect
import os
import re
import subprocess
from collections import Counter
from io import StringIO
import pandas as pd
from numpy import unique
file_sep = os.path.sep
def imports_in_module(module):
"""
Get a list of strings showing what is imported in a module.
:param module: An actual module object the file of the module (as given by inspect.getfile(module)
:return: A list of strings showing the imported objects (modules, functions, variables, classes...)
Note: Requires having snakefood installed:
http://furius.ca/snakefood/doc/snakefood-doc.html#installation
You may want to use ``imports_in_py_content(py_content)`` on the actual string content itself.
# >>> print('\\n'.join(imports_in_module(__file__))) # doctest: +SKIP
# StringIO.StringIO
# collections.Counter
# inspect
# numpy.unique
# os
# pandas
# re
# subprocess
# ut.pfile.iter.get_filepath_iterator
# ut.util.code.packages.get_module_name
# ut.util.code.packages.read_requirements
"""
if not isinstance(module, str):
module = inspect.getfile(module)
if module.endswith('c'):
module = module[:-1] # remove the 'c' of '.pyc'
t = subprocess.check_output(['sfood-imports', '-u', module])
return [x for x in t.split('\n') if len(x) > 0]
def base_modules_used_in_module(module):
"""
Get a list of strings showing what base modules that are imported in a module.
:param module: An actual module object the file of the module (as given by inspect.getfile(module)
:return: A list of strings showing the imported base modules (i.e. the X of import X.Y.Z or from X.Y import Z).
Note: Requires having snakefood installed:
http://furius.ca/snakefood/doc/snakefood-doc.html#installation
>>> base_modules_used_in_module(__file__) # doctest: +SKIP
['StringIO', 'collections', 'inspect', 'numpy', 'os', 'pandas', 're', 'subprocess', 'ut']
"""
return list(unique([re.compile('\w+').findall(x)[0] for x in imports_in_module(module)]))
def base_module_imports_in_module_recursive(module):
"""
Get a list of strings showing what base modules that are imported in a module, recursively.
It's the recursive version of the base_modules_used_in_module function.
Recursive in the sense that if module is a package module (i.e. containing a __init__.py and further submodules),
the base_modules_used_in_module function will be applied to all .py files under the mother folder.
Function returns a count (Counter object) of the number of modules where each base module was found.
:param module: An actual module object the file of the module (as given by inspect.getfile(module)
:param module_names: Modules to filter for.
None: Will grab all modules
A list or tuple: Of modules to grab
If not will assume module_names is a regex to apply to find module names
:return:
"""
# if module_names is None:
# module_names = any_module_import_regex
# elif isinstance(module_names, (tuple, list)):
# module_names = mk_multiple_package_import_regex(module_names)
if inspect.ismodule(module):
module = inspect.getsourcefile(module)
if module.endswith('__init__.py'):
module = os.path.dirname(module)
if os.path.isdir(module):
c = Counter()
it = get_filepath_iterator(module, pattern='.py$')
next(it) # to skip the seed module itself, and not get into an infinite loop
for _module in it:
try:
c.update(base_module_imports_in_module_recursive(_module))
except Exception as e:
if 'sfood-imports' in e.args[1]:
raise RuntimeError("You don't have sfood-imports installed (snakefood), so I can't do my job")
else:
print(("Error with module {}: {}".format(_module, e)))
return c
elif not os.path.isfile(module):
raise ValueError("module file not found: {}".format(module))
return Counter(base_modules_used_in_module(module))
# with open(module) as fp:
# module_contents = fp.read()
# return Counter(map(lambda x: x[1:], unique(module_names.findall(module_contents))))
def requirements_packages_in_module(module, requirements=None):
if requirements is None:
requirements = list(pip_licenses_df(include_module_name=False)['package_name'])
elif isinstance(requirements, str) and os.path.isfile(requirements):
with open(requirements) as fp:
requirements = fp.read().splitlines()
p = re.compile('^[^=]+')
module_names = list()
for x in requirements:
try:
xx = p.findall(x)
if xx:
module_name = get_module_name(xx[0])
module_names.append(module_name)
except Exception as e:
print(("Error with {}\n {}".format(x, e)))
return base_module_imports_in_module_recursive(module, module_names=requirements)
word_or_letter_p = re.compile('\w')
at_least_two_spaces_p = re.compile('\s{2,}')
def pip_licenses_df(package_names=None, include_module_name=True, on_module_search_error=None):
"""
Get a dataframe of pip packages and licences
:return:
"""
pip_licenses_output = subprocess.check_output(['pip-licenses'])
t = list(map(str.strip,
list(filter(word_or_letter_p.search,
pip_licenses_output.split('\n')))))
t = [at_least_two_spaces_p.sub('\t', x) for x in t]
t = '\n'.join(t)
df = pd.read_csv(StringIO(t), sep='\t')
df = df.rename(columns={'Name': 'package_name', 'Version': 'version', 'License': 'license'})
if include_module_name:
df['module'] = [get_module_name(x, on_error=on_module_search_error) for x in df['package_name']]
df = df[['module', 'package_name', 'version', 'license']] # reorder
if package_names is not None:
df = df[df['package_name'].isin(package_names)]
return df
def get_filepath_iterator(root_folder,
pattern='',
return_full_path=True,
apply_pattern_to_full_path=False):
if apply_pattern_to_full_path:
return recursive_file_walk_iterator_with_name_filter(root_folder, pattern, return_full_path)
else:
return recursive_file_walk_iterator_with_filepath_filter(root_folder, pattern, return_full_path)
def iter_relative_files_and_folder(root_folder):
from glob import iglob
if not root_folder.endswith(file_sep):
root_folder += file_sep
return map(lambda x: x.replace(root_folder, ''), iglob(root_folder + '*'))
def pattern_filter(pattern):
pattern = re.compile(pattern)
def _pattern_filter(s):
return pattern.search(s) is not None
return _pattern_filter
def recursive_file_walk_iterator_with_name_filter(root_folder, filt='', return_full_path=True):
if isinstance(filt, str):
filt = pattern_filter(filt)
# if isinstance(pattern, basestring):
# pattern = re.compile(pattern)
for name in iter_relative_files_and_folder(root_folder):
full_path = os.path.join(root_folder, name)
if os.path.isdir(full_path):
for entry in recursive_file_walk_iterator_with_name_filter(full_path, filt, return_full_path):
yield entry
else:
if os.path.isfile(full_path):
if filt(name):
if return_full_path:
yield full_path
else:
yield name
def recursive_file_walk_iterator_with_filepath_filter(root_folder, filt='', return_full_path=True):
if isinstance(filt, str):
filt = pattern_filter(filt)
for name in iter_relative_files_and_folder(root_folder):
full_path = os.path.join(root_folder, name)
if os.path.isdir(full_path):
for entry in recursive_file_walk_iterator_with_filepath_filter(full_path, filt, return_full_path):
yield entry
else:
if os.path.isfile(full_path):
if filt(full_path):
if return_full_path:
yield full_path
else:
yield name
| tec/snake_food_import_counting.py | 8,346 | Get a list of strings showing what base modules that are imported in a module, recursively.
It's the recursive version of the base_modules_used_in_module function.
Recursive in the sense that if module is a package module (i.e. containing a __init__.py and further submodules),
the base_modules_used_in_module function will be applied to all .py files under the mother folder.
Function returns a count (Counter object) of the number of modules where each base module was found.
:param module: An actual module object the file of the module (as given by inspect.getfile(module)
:param module_names: Modules to filter for.
None: Will grab all modules
A list or tuple: Of modules to grab
If not will assume module_names is a regex to apply to find module names
:return:
Get a list of strings showing what base modules that are imported in a module.
:param module: An actual module object the file of the module (as given by inspect.getfile(module)
:return: A list of strings showing the imported base modules (i.e. the X of import X.Y.Z or from X.Y import Z).
Note: Requires having snakefood installed:
http://furius.ca/snakefood/doc/snakefood-doc.html#installation
>>> base_modules_used_in_module(__file__) # doctest: +SKIP
['StringIO', 'collections', 'inspect', 'numpy', 'os', 'pandas', 're', 'subprocess', 'ut']
Get a list of strings showing what is imported in a module.
:param module: An actual module object the file of the module (as given by inspect.getfile(module)
:return: A list of strings showing the imported objects (modules, functions, variables, classes...)
Note: Requires having snakefood installed:
http://furius.ca/snakefood/doc/snakefood-doc.html#installation
You may want to use ``imports_in_py_content(py_content)`` on the actual string content itself.
# >>> print('\n'.join(imports_in_module(__file__))) # doctest: +SKIP
# StringIO.StringIO
# collections.Counter
# inspect
# numpy.unique
# os
# pandas
# re
# subprocess
# ut.pfile.iter.get_filepath_iterator
# ut.util.code.packages.get_module_name
# ut.util.code.packages.read_requirements
Get a dataframe of pip packages and licences
:return:
remove the 'c' of '.pyc' if module_names is None: module_names = any_module_import_regex elif isinstance(module_names, (tuple, list)): module_names = mk_multiple_package_import_regex(module_names) to skip the seed module itself, and not get into an infinite loop with open(module) as fp: module_contents = fp.read() return Counter(map(lambda x: x[1:], unique(module_names.findall(module_contents)))) reorder if isinstance(pattern, basestring): pattern = re.compile(pattern) | 2,625 | en | 0.512123 |
import os
from yacs.config import CfgNode as CN
# -----------------------------------------------------------------------------
# Config definition
# -----------------------------------------------------------------------------
_C = CN()
# -----------------------------------------------------------------------------
# System
# -----------------------------------------------------------------------------
_C.SYSTEM = CN()
_C.SYSTEM.NUM_GPUS = 4
_C.SYSTEM.NUM_CPUS = 4
# -----------------------------------------------------------------------------
# Model
# -----------------------------------------------------------------------------
_C.MODEL = CN()
# Model architectures defined in the package: unet_super, super, fpn, unet_residual_3d
_C.MODEL.ARCHITECTURE = 'unet_residual_3d'
# Number of filters per unet block
_C.MODEL.FILTERS = [28, 36, 48, 64, 80]
_C.MODEL.TARGET_OPT = ['0']
_C.MODEL.WEIGHT_OPT = [['1']]
# Choose the right loss function for each target:
# 'WeightedMSE', 'WeightedBCE', 'JaccardLoss', 'DiceLoss'
_C.MODEL.LOSS_OPTION = [['WeightedBCE']]
# Weight for each loss function
_C.MODEL.LOSS_WEIGHT = [[1.0]]
# Define the number of input channels. Usually EM images are
# single-channel gray-scale image.
_C.MODEL.IN_PLANES = 1
# Define the number of output channels.
_C.MODEL.OUT_PLANES = 1
# Padding mode, possible options: 'zeros','circular', 'rep'
_C.MODEL.PAD_MODE = 'rep'
# Normalization mode, possible options: 'bn', 'abn', 'in', 'bin'
_C.MODEL.NORM_MODE = 'bn'
# Activation mode, possible options: 'relu', 'elu', 'leaky'
_C.MODEL.ACT_MODE = 'elu'
# If MODEL.EMBEDDING = 1 will do embedding
_C.MODEL.EMBEDDING = 1
# Last decoder head depth
_C.MODEL.HEAD_DEPTH = 1
_C.MODEL.INPUT_SIZE = [8, 256, 256]
_C.MODEL.OUTPUT_SIZE = [8, 256, 256]
_C.MODEL.REGU_OPT = []
_C.MODEL.REGU_WEIGHT = []
# Fine-tune suffix for model saving
_C.MODEL.FINETUNE = ''
# Exact matching: the weights shape in pretrain model and current model are identical
_C.MODEL.EXACT = True
_C.MODEL.SIZE_MATCH = True
_C.MODEL.PRE_MODEL = ''
_C.MODEL.PRE_MODEL_LAYER = ['']
_C.MODEL.PRE_MODEL_ITER = 0
_C.MODEL.PRE_MODEL_LAYER_SELECT = [-1]
# -----------------------------------------------------------------------------
# Dataset
# -----------------------------------------------------------------------------
_C.DATASET = CN()
# Scale ratio of the input data for different resolutions.
# Using a DATA_SCALE of [1., 0.5, 0.5] will downsample the
# original image by two times (e.g., 4nm -> 8nm).
_C.DATASET.DATA_SCALE = [1., 1., 1.]
# Scaling factor for super resolution
_C.DATASET.SCALE_FACTOR = [2, 3, 3]
# Specify the data path in the *.yaml files for different experiments.
_C.DATASET.IMAGE_NAME = ''
_C.DATASET.LABEL_NAME = ''
_C.DATASET.INPUT_PATH = ''
_C.DATASET.OUTPUT_PATH = ''
# Padding size for the input volumes
_C.DATASET.PAD_SIZE = [2, 64, 64]
# Half Patch size for 2D label erosion
_C.DATASET.LABEL_EROSION = 0
# If it's a binary label
_C.DATASET.LABEL_BINARY = False
_C.DATASET.LABEL_MAG = 0
# Data in tile format or not.
_C.DATASET.DO_CHUNK_TITLE = 0
# Chunk parameters for tile format: chunk_num (z,y,x), chunk_stride
_C.DATASET.DATA_CHUNK_NUM = [1, 1, 1]
# Predefined data chunk to iterate through
_C.DATASET.DATA_CHUNK_NUM_IND = []
# Boolean variable, euqal to 'int(args.data_chunk_num[-1:])==1'
_C.DATASET.DATA_CHUNK_STRIDE = True
# Chunk parameters for tile format: chunk_iter_num
_C.DATASET.DATA_CHUNK_ITER = 1000
# Number of voxel to exceed for a valid sample
_C.DATASET.DATA_INVALID_THRES = [0., 0.]
_C.DATASET.PRE_LOAD_DATA = [None,None,None]
# Reject sampling
_C.DATASET.REJECT_SIZE_THRES = 100
_C.DATASET.REJECT_P = 0.95
# -----------------------------------------------------------------------------
# Augmentor
# -----------------------------------------------------------------------------
_C.AUGMENTOR = CN()
_C.AUGMENTOR.ROTATE = True
# Probability of applying the rotation augmentation
_C.AUGMENTOR.ROTATE_P = 0.1
_C.AUGMENTOR.RESCALE = True
# Probability of applying the rescale augmentation
_C.AUGMENTOR.RESCALE_P = 0.5
_C.AUGMENTOR.FLIP = True
# Probability of applying the flip augmentation
_C.AUGMENTOR.FLIP_P = 1.0
# Conducting x-z and y-z flip only when the dataset is isotropic.
_C.AUGMENTOR.FLIP_DO_ZTRANS = 0
_C.AUGMENTOR.ELASTIC = True
# Maximum pixel-moving distance of elastic transformation
_C.AUGMENTOR.ELASTIC_ALPHA = 12.0
# Standard deviation of the Gaussian filter
_C.AUGMENTOR.ELASTIC_SIGMA = 4.0
# Probability of applying the elastic augmentation
_C.AUGMENTOR.ELASTIC_P = 0.75
_C.AUGMENTOR.GRAYSCALE = True
# Probability of applying the grayscale augmentation
_C.AUGMENTOR.GRAYSCALE_P = 0.75
_C.AUGMENTOR.MISSINGPARTS = True
# Probability of applying the missingparts augmentation
_C.AUGMENTOR.MISSINGPARTS_P = 0.9
_C.AUGMENTOR.MISSINGSECTION = True
# Probability of applying the missingsection augmentation
_C.AUGMENTOR.MISSINGSECTION_P = 0.5
_C.AUGMENTOR.MISALIGNMENT = True
# Probability of applying the misalignment augmentation
_C.AUGMENTOR.MISALIGNMENT_P = 1.0
# Maximum pixel displacement in each direction (x and y) (int)
_C.AUGMENTOR.MISALIGNMENT_DISPLACEMENT = 16
# -----------------------------------------------------------------------------
# Solver
# -----------------------------------------------------------------------------
_C.SOLVER = CN()
# Specify the learning rate scheduler.
_C.SOLVER.LR_SCHEDULER_NAME = "MultiStepLR"
_C.SOLVER.ITERATION_STEP = 1
_C.SOLVER.ITERATION_SAVE = 5000
_C.SOLVER.ITERATION_TOTAL = 40000
_C.SOLVER.BASE_LR = 0.001
_C.SOLVER.BIAS_LR_FACTOR = 1.0
_C.SOLVER.WEIGHT_DECAY_BIAS = 0.0
_C.SOLVER.MOMENTUM = 0.9
# The weight decay that's applied to parameters of normalization layers
# (typically the affine transformation)
_C.SOLVER.WEIGHT_DECAY = 0.0001
_C.SOLVER.WEIGHT_DECAY_NORM = 0.0
# The iteration number to decrease learning rate by GAMMA
_C.SOLVER.GAMMA = 0.1
# should be a tuple like (30000,)
_C.SOLVER.STEPS = (30000, 35000)
_C.SOLVER.WARMUP_FACTOR = 1.0 / 1000
_C.SOLVER.WARMUP_ITERS = 1000
_C.SOLVER.WARMUP_METHOD = "linear"
# Save a checkpoint after every this number of iterations
_C.SOLVER.CHECKPOINT_PERIOD = 5000
# Number of samples per batch across all machines.
# If we have 16 GPUs and IMS_PER_BATCH = 32,
# each GPU will see 2 images per batch.
_C.SOLVER.SAMPLES_PER_BATCH = 16
# -----------------------------------------------------------------------------
# Monitor
# -----------------------------------------------------------------------------
_C.MONITOR = CN()
_C.MONITOR.LOG_OPT = [1, 1, 0]
_C.MONITOR.VIS_OPT = [0, 8]
_C.MONITOR.ITERATION_NUM = [10, 50]
# # -----------------------------------------------------------------------------
# # Inference
# # -----------------------------------------------------------------------------
_C.INFERENCE = CN()
_C.INFERENCE.INPUT_SIZE = [8, 256, 256]
_C.INFERENCE.OUTPUT_SIZE = [8, 256, 256]
_C.INFERENCE.IMAGE_NAME = ''
_C.INFERENCE.OUTPUT_PATH = ''
_C.INFERENCE.OUTPUT_NAME = 'result.h5'
_C.INFERENCE.PAD_SIZE = [8, 64, 64]
_C.INFERENCE.STRIDE = [1, 192, 192]
_C.INFERENCE.AUG_MODE = 'mean'
_C.INFERENCE.AUG_NUM = 4
_C.INFERENCE.DO_EVAL = True
_C.INFERENCE.DO_3D = True
# If not None then select channel of output
_C.INFERENCE.MODEL_OUTPUT_ID = [None]
# Number of test workers
_C.INFERENCE.TEST_NUM = 1
# Test worker id
_C.INFERENCE.TEST_ID = 0
# Batchsize for inference
_C.INFERENCE.SAMPLES_PER_BATCH = 32
def get_cfg_defaults():
"""Get a yacs CfgNode object with default values for my_project."""
# Return a clone so that the defaults will not be altered
# This is for the "local variable" use pattern
return _C.clone()
def save_all_cfg(cfg, output_dir):
"""Save configs in the output directory."""
# Save config.yaml in the experiment directory after combine all
# non-default configurations from yaml file and command line.
path = os.path.join(output_dir, "config.yaml")
with open(path, "w") as f:
f.write(cfg.dump())
print("Full config saved to {}".format(path))
| connectomics/config/config.py | 8,102 | Get a yacs CfgNode object with default values for my_project.
Save configs in the output directory.
----------------------------------------------------------------------------- Config definition ----------------------------------------------------------------------------- ----------------------------------------------------------------------------- System ----------------------------------------------------------------------------- ----------------------------------------------------------------------------- Model ----------------------------------------------------------------------------- Model architectures defined in the package: unet_super, super, fpn, unet_residual_3d Number of filters per unet block Choose the right loss function for each target: 'WeightedMSE', 'WeightedBCE', 'JaccardLoss', 'DiceLoss' Weight for each loss function Define the number of input channels. Usually EM images are single-channel gray-scale image. Define the number of output channels. Padding mode, possible options: 'zeros','circular', 'rep' Normalization mode, possible options: 'bn', 'abn', 'in', 'bin' Activation mode, possible options: 'relu', 'elu', 'leaky' If MODEL.EMBEDDING = 1 will do embedding Last decoder head depth Fine-tune suffix for model saving Exact matching: the weights shape in pretrain model and current model are identical ----------------------------------------------------------------------------- Dataset ----------------------------------------------------------------------------- Scale ratio of the input data for different resolutions. Using a DATA_SCALE of [1., 0.5, 0.5] will downsample the original image by two times (e.g., 4nm -> 8nm). Scaling factor for super resolution Specify the data path in the *.yaml files for different experiments. Padding size for the input volumes Half Patch size for 2D label erosion If it's a binary label Data in tile format or not. Chunk parameters for tile format: chunk_num (z,y,x), chunk_stride Predefined data chunk to iterate through Boolean variable, euqal to 'int(args.data_chunk_num[-1:])==1' Chunk parameters for tile format: chunk_iter_num Number of voxel to exceed for a valid sample Reject sampling ----------------------------------------------------------------------------- Augmentor ----------------------------------------------------------------------------- Probability of applying the rotation augmentation Probability of applying the rescale augmentation Probability of applying the flip augmentation Conducting x-z and y-z flip only when the dataset is isotropic. Maximum pixel-moving distance of elastic transformation Standard deviation of the Gaussian filter Probability of applying the elastic augmentation Probability of applying the grayscale augmentation Probability of applying the missingparts augmentation Probability of applying the missingsection augmentation Probability of applying the misalignment augmentation Maximum pixel displacement in each direction (x and y) (int) ----------------------------------------------------------------------------- Solver ----------------------------------------------------------------------------- Specify the learning rate scheduler. The weight decay that's applied to parameters of normalization layers (typically the affine transformation) The iteration number to decrease learning rate by GAMMA should be a tuple like (30000,) Save a checkpoint after every this number of iterations Number of samples per batch across all machines. If we have 16 GPUs and IMS_PER_BATCH = 32, each GPU will see 2 images per batch. ----------------------------------------------------------------------------- Monitor ----------------------------------------------------------------------------- ----------------------------------------------------------------------------- Inference ----------------------------------------------------------------------------- If not None then select channel of output Number of test workers Test worker id Batchsize for inference Return a clone so that the defaults will not be altered This is for the "local variable" use pattern Save config.yaml in the experiment directory after combine all non-default configurations from yaml file and command line. | 4,223 | en | 0.492526 |
"""Computational algebraic field theory."""
import functools
import math
import mpmath
from ..config import query
from ..core import (Add, Dummy, E, GoldenRatio, I, Integer, Mul, Rational,
cacheit, pi)
from ..core.exprtools import Factors
from ..core.function import _mexpand, count_ops
from ..core.sympify import sympify
from ..domains import QQ, AlgebraicField
from ..functions import (Abs, conjugate, cos, exp_polar, im, re, root, sin,
sqrt, tan)
from ..ntheory import divisors, factorint
from ..simplify.radsimp import _split_gcd
from ..simplify.simplify import _is_sum_surds
from ..utilities import lambdify, numbered_symbols, sift
from ..utilities.iterables import uniq
from .orthopolys import chebyshevt_poly
from .polyerrors import NotAlgebraic
from .polytools import (Poly, PurePoly, degree, factor_list, groebner, lcm,
parallel_poly_from_expr, resultant)
from .rootoftools import RootOf
from .specialpolys import cyclotomic_poly
__all__ = 'minimal_polynomial', 'primitive_element', 'field_isomorphism'
def _choose_factor(factors, x, v, dom=QQ, prec=200, bound=5):
"""
Return a factor having root ``v``
It is assumed that one of the factors has root ``v``.
"""
if isinstance(factors[0], tuple):
factors = [f[0] for f in factors]
if len(factors) == 1:
return factors[0]
points = {x: v}
symbols = dom.symbols if hasattr(dom, 'symbols') else []
t = QQ(1, 10)
for n in range(bound**len(symbols)):
prec1 = 10
n_temp = n
for s in symbols:
points[s] = n_temp % bound
n_temp = n_temp // bound
while True:
candidates = []
eps = t**(prec1 // 2)
for f in factors:
if abs(f.as_expr().evalf(prec1, points, strict=False)) < eps:
candidates.append(f)
if candidates:
factors = candidates
if len(factors) == 1:
return factors[0]
if prec1 > prec:
break
prec1 *= 2
raise NotImplementedError(f'multiple candidates for the minimal polynomial of {v}')
def _separate_sq(p):
"""
Helper function for ``_minimal_polynomial_sq``.
It selects a rational ``g`` such that the polynomial ``p``
consists of a sum of terms whose surds squared have gcd equal to ``g``
and a sum of terms with surds squared prime with ``g``;
then it takes the field norm to eliminate ``sqrt(g)``
See simplify.simplify.split_surds and polytools.sqf_norm.
Examples
========
>>> p = -x + sqrt(2) + sqrt(3) + sqrt(7)
>>> p = _separate_sq(p)
>>> p
-x**2 + 2*sqrt(3)*x + 2*sqrt(7)*x - 2*sqrt(21) - 8
>>> p = _separate_sq(p)
>>> p
-x**4 + 4*sqrt(7)*x**3 - 32*x**2 + 8*sqrt(7)*x + 20
>>> p = _separate_sq(p)
>>> p
-x**8 + 48*x**6 - 536*x**4 + 1728*x**2 - 400
"""
def is_sqrt(expr):
return expr.is_Pow and expr.exp == Rational(1, 2)
p = p.doit()
# p = c1*sqrt(q1) + ... + cn*sqrt(qn) -> a = [(c1, q1), .., (cn, qn)]
a = []
for y in p.args:
if not y.is_Mul:
if is_sqrt(y):
a.append((Integer(1), y**2))
elif y.is_Atom:
a.append((y, Integer(1)))
else:
raise NotImplementedError
else:
sifted = sift(y.args, is_sqrt)
a.append((Mul(*sifted[False]), Mul(*sifted[True])**2))
a.sort(key=lambda z: z[1])
if a[-1][1] == 1:
# there are no surds
return p
surds = [z for y, z in a]
for i, si in enumerate(surds): # pragma: no branch
if si != 1:
break
_, b1, _ = _split_gcd(*surds[i:])
a1 = []
a2 = []
for y, z in a:
if z in b1:
a1.append(y*sqrt(z))
else:
a2.append(y*sqrt(z))
p1 = Add(*a1)
p2 = Add(*a2)
return _mexpand(p1**2) - _mexpand(p2**2)
def _minimal_polynomial_sq(p, n, x):
"""
Returns the minimal polynomial for the ``nth-root`` of a sum of surds
or ``None`` if it fails.
Parameters
==========
p : sum of surds
n : positive integer
x : variable of the returned polynomial
Examples
========
>>> q = 1 + sqrt(2) + sqrt(3)
>>> _minimal_polynomial_sq(q, 3, x)
x**12 - 4*x**9 - 4*x**6 + 16*x**3 - 8
"""
p = sympify(p)
n = sympify(n)
assert n.is_Integer and n > 1 and _is_sum_surds(p)
pn = root(p, n)
# eliminate the square roots
p -= x
while 1:
p1 = _separate_sq(p)
if p1 is p:
p = p1.subs({x: x**n})
break
else:
p = p1
# by construction `p` has root `pn`
# the minimal polynomial is the factor vanishing in x = pn
factors = factor_list(p)[1]
return _choose_factor(factors, x, pn)
def _minpoly_op_algebraic_element(op, ex1, ex2, x, dom, mp1=None, mp2=None):
"""
Return the minimal polynomial for ``op(ex1, ex2)``.
Parameters
==========
op : operation ``Add`` or ``Mul``
ex1, ex2 : expressions for the algebraic elements
x : indeterminate of the polynomials
dom: ground domain
mp1, mp2 : minimal polynomials for ``ex1`` and ``ex2`` or None
Examples
========
>>> p1 = sqrt(sqrt(2) + 1)
>>> p2 = sqrt(sqrt(2) - 1)
>>> _minpoly_op_algebraic_element(Mul, p1, p2, x, QQ)
x - 1
>>> q1 = sqrt(y)
>>> q2 = 1 / y
>>> _minpoly_op_algebraic_element(Add, q1, q2, x, QQ.inject(y).field)
x**2*y**2 - 2*x*y - y**3 + 1
References
==========
* https://en.wikipedia.org/wiki/Resultant
* I.M. Isaacs, Proc. Amer. Math. Soc. 25 (1970), 638
"Degrees of sums in a separable field extension".
"""
y = Dummy(str(x))
if mp1 is None:
mp1 = _minpoly_compose(ex1, x, dom)
if mp2 is None:
mp2 = _minpoly_compose(ex2, y, dom)
else:
mp2 = mp2.subs({x: y})
if op is Add:
# mp1a = mp1.subs({x: x - y})
(p1, p2), _ = parallel_poly_from_expr((mp1, x - y), x, y)
r = p1.compose(p2)
mp1a = r.as_expr()
elif op is Mul:
mp1a = _muly(mp1, x, y)
else:
raise NotImplementedError('option not available')
r = resultant(mp1a, mp2, gens=[y, x])
deg1 = degree(mp1, x)
deg2 = degree(mp2, y)
if op is Mul and deg1 == 1 or deg2 == 1:
# if deg1 = 1, then mp1 = x - a; mp1a = x - y - a;
# r = mp2(x - a), so that `r` is irreducible
return r
r = r.as_poly(x, domain=dom)
_, factors = r.factor_list()
res = _choose_factor(factors, x, op(ex1, ex2), dom)
return res.as_expr()
def _invertx(p, x):
"""Returns ``expand_mul(x**degree(p, x)*p.subs({x: 1/x}))``."""
(p1,) = parallel_poly_from_expr((p,), x)[0]
n = degree(p1)
a = [c * x**(n - i) for (i,), c in p1.terms()]
return Add(*a)
def _muly(p, x, y):
"""Returns ``_mexpand(y**deg*p.subs({x:x / y}))``."""
(p1,) = parallel_poly_from_expr((p,), x)[0]
n = degree(p1)
a = [c * x**i * y**(n - i) for (i,), c in p1.terms()]
return Add(*a)
def _minpoly_pow(ex, pw, x, dom):
"""
Returns ``minimal_polynomial(ex**pw)``
Parameters
==========
ex : algebraic element
pw : rational number
x : indeterminate of the polynomial
dom: ground domain
Examples
========
>>> p = sqrt(1 + sqrt(2))
>>> _minpoly_pow(p, 2, x, QQ)
x**2 - 2*x - 1
>>> minimal_polynomial(p**2)(x)
x**2 - 2*x - 1
>>> _minpoly_pow(y, Rational(1, 3), x, QQ.inject(y).field)
x**3 - y
>>> minimal_polynomial(cbrt(y))(x)
x**3 - y
"""
pw = sympify(pw)
mp = _minpoly_compose(ex, x, dom)
if not pw.is_rational:
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
if pw < 0:
if mp == x:
raise ZeroDivisionError(f'{ex} is zero')
mp = _invertx(mp, x)
if pw == -1:
return mp
pw = -pw
ex = 1/ex
y = Dummy(str(x))
mp = mp.subs({x: y})
n, d = pw.as_numer_denom()
res = resultant(mp, x**d - y**n, gens=[y]).as_poly(x, domain=dom)
_, factors = res.factor_list()
res = _choose_factor(factors, x, ex**pw, dom)
return res.as_expr()
def _minpoly_add(x, dom, *a):
"""Returns ``minimal_polynomial(Add(*a), dom)``."""
mp = _minpoly_op_algebraic_element(Add, a[0], a[1], x, dom)
p = a[0] + a[1]
for px in a[2:]:
mp = _minpoly_op_algebraic_element(Add, p, px, x, dom, mp1=mp)
p = p + px
return mp
def _minpoly_mul(x, dom, *a):
"""Returns ``minimal_polynomial(Mul(*a), dom)``."""
mp = _minpoly_op_algebraic_element(Mul, a[0], a[1], x, dom)
p = a[0] * a[1]
for px in a[2:]:
mp = _minpoly_op_algebraic_element(Mul, p, px, x, dom, mp1=mp)
p = p * px
return mp
def _minpoly_sin(ex, x):
"""
Returns the minimal polynomial of ``sin(ex)``
see https://mathworld.wolfram.com/TrigonometryAngles.html
"""
c, a = ex.args[0].as_coeff_Mul()
if a is pi:
n = c.denominator
q = sympify(n)
if q.is_prime:
# for a = pi*p/q with q odd prime, using chebyshevt
# write sin(q*a) = mp(sin(a))*sin(a);
# the roots of mp(x) are sin(pi*p/q) for p = 1,..., q - 1
a = chebyshevt_poly(n, polys=True).all_coeffs()
return Add(*[x**(n - i - 1)*a[n - i] for i in range(n)])
if c.numerator == 1:
if q == 9:
return 64*x**6 - 96*x**4 + 36*x**2 - 3
if n % 2 == 1:
# for a = pi*p/q with q odd, use
# sin(q*a) = 0 to see that the minimal polynomial must be
# a factor of chebyshevt_poly(n)
a = chebyshevt_poly(n, polys=True).all_coeffs()
a = [x**(n - i)*a[n - i] for i in range(n + 1)]
r = Add(*a)
_, factors = factor_list(r)
res = _choose_factor(factors, x, ex)
return res
expr = sqrt((1 - cos(2*c*pi))/2)
return _minpoly_compose(expr, x, QQ)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_cos(ex, x):
"""
Returns the minimal polynomial of ``cos(ex)``
see https://mathworld.wolfram.com/TrigonometryAngles.html
"""
c, a = ex.args[0].as_coeff_Mul()
if a is pi:
if c.numerator == 1:
if c.denominator == 7:
return 8*x**3 - 4*x**2 - 4*x + 1
elif c.denominator == 9:
return 8*x**3 - 6*x - 1
elif c.numerator == 2:
q = sympify(c.denominator)
if q.is_prime:
s = _minpoly_sin(ex, x)
return _mexpand(s.subs({x: sqrt((1 - x)/2)}))
# for a = pi*p/q, cos(q*a) =T_q(cos(a)) = (-1)**p
n = int(c.denominator)
a = chebyshevt_poly(n, polys=True).all_coeffs()
a = [x**(n - i)*a[n - i] for i in range(n + 1)]
r = Add(*a) - (-1)**c.numerator
_, factors = factor_list(r)
return _choose_factor(factors, x, ex)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_tan(ex, x):
"""Returns the minimal polynomial of ``tan(ex)``."""
c, a = ex.args[0].as_coeff_Mul()
if a is pi and c.is_Rational:
c *= 2
n = c.denominator
a = n if c.numerator % 2 == 0 else 1
terms = []
for k in range((c.numerator + 1) % 2, n + 1, 2):
terms.append(a*x**k)
a = -(a*(n - k - 1)*(n - k)) // ((k + 1)*(k + 2))
r = Add(*terms)
_, factors = factor_list(r)
return _choose_factor(factors, x, ex)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_exp(ex, x):
"""Returns the minimal polynomial of ``exp(ex)``."""
c, a = ex.exp.as_coeff_Mul()
q = sympify(c.denominator)
if a == I*pi:
if c.numerator in (1, -1):
if q == 3:
return x**2 - x + 1
if q == 4:
return x**4 + 1
if q == 6:
return x**4 - x**2 + 1
if q == 8:
return x**8 + 1
if q == 9:
return x**6 - x**3 + 1
if q == 10:
return x**8 - x**6 + x**4 - x**2 + 1
if q.is_prime:
s = 0
for i in range(q):
s += (-x)**i
return s
# x**(2*q) = product(factors)
factors = [cyclotomic_poly(i, x) for i in divisors(2*q)]
return _choose_factor(factors, x, ex)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
def _minpoly_rootof(ex, x):
"""Returns the minimal polynomial of a ``RootOf`` object."""
domain = ex.poly.domain
if domain.is_IntegerRing:
return ex.poly(x)
else:
return ex.poly.sqf_norm()[-1](x)
def _minpoly_compose(ex, x, dom):
"""
Computes the minimal polynomial of an algebraic element
using operations on minimal polynomials
Examples
========
>>> minimal_polynomial(sqrt(2) + 3*Rational(1, 3), method='compose')(x)
x**2 - 2*x - 1
>>> minimal_polynomial(sqrt(y) + 1/y, method='compose')(x)
x**2*y**2 - 2*x*y - y**3 + 1
"""
if ex.is_Rational:
return ex.denominator*x - ex.numerator
if ex is I:
return x**2 + 1
if ex is GoldenRatio:
return x**2 - x - 1
if ex == exp_polar(0):
return x - 1
if hasattr(dom, 'symbols') and ex in dom.symbols:
return x - ex
if dom.is_RationalField and _is_sum_surds(ex):
# eliminate the square roots
ex -= x
while 1:
ex1 = _separate_sq(ex)
if ex1 is ex:
return ex
else:
ex = ex1
if ex.is_Add:
res = _minpoly_add(x, dom, *sorted(ex.args, key=count_ops, reverse=True))
elif ex.is_Mul:
f = Factors(ex).factors
r = sift(f.items(), lambda itx: itx[0].is_Rational and itx[1].is_Rational)
if r[True] and dom == QQ:
ex1 = Mul(*[bx**ex for bx, ex in r[False] + r[None]])
r1 = r[True]
dens = [y.denominator for _, y in r1]
lcmdens = functools.reduce(lcm, dens, 1)
nums = [base**(y.numerator*lcmdens // y.denominator) for base, y in r1]
ex2 = Mul(*nums)
mp1 = minimal_polynomial(ex1)(x)
# use the fact that in Diofant canonicalization products of integers
# raised to rational powers are organized in relatively prime
# bases, and that in ``base**(n/d)`` a perfect power is
# simplified with the root
mp2 = ex2.denominator*x**lcmdens - ex2.numerator
ex2 = Mul(*[bx**ex for bx, ex in r1])
res = _minpoly_op_algebraic_element(Mul, ex1, ex2, x, dom, mp1=mp1, mp2=mp2)
else:
res = _minpoly_mul(x, dom, *sorted(ex.args, key=count_ops, reverse=True))
elif ex.is_Pow:
if ex.base is E:
res = _minpoly_exp(ex, x)
else:
res = _minpoly_pow(ex.base, ex.exp, x, dom)
elif isinstance(ex, sin):
res = _minpoly_sin(ex, x)
elif isinstance(ex, cos):
res = _minpoly_cos(ex, x)
elif isinstance(ex, tan):
res = _minpoly_tan(ex, x)
elif isinstance(ex, RootOf) and ex.poly.domain.is_Numerical:
res = _minpoly_rootof(ex, x)
elif isinstance(ex, conjugate):
res = _minpoly_compose(ex.args[0], x, dom)
elif isinstance(ex, Abs):
res = _minpoly_compose(sqrt(ex.args[0]*ex.args[0].conjugate()), x, dom)
elif isinstance(ex, re):
res = _minpoly_compose((ex.args[0] + ex.args[0].conjugate())/2, x, dom)
elif isinstance(ex, im):
res = _minpoly_compose((ex.args[0] - ex.args[0].conjugate())/2/I, x, dom)
else:
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic element")
return res
@cacheit
def minimal_polynomial(ex, method=None, **args):
"""
Computes the minimal polynomial of an algebraic element.
Parameters
==========
ex : algebraic element expression
method : str, optional
If ``compose``, the minimal polynomial of the subexpressions
of ``ex`` are computed, then the arithmetic operations on them are
performed using the resultant and factorization. If ``groebner``,
a bottom-up algorithm, using Gröbner bases is used.
Defaults are determined by :func:`~diofant.config.setup`.
domain : Domain, optional
If no ground domain is given, it will be generated automatically
from the expression.
Examples
========
>>> minimal_polynomial(sqrt(2))(x)
x**2 - 2
>>> minimal_polynomial(sqrt(2), domain=QQ.algebraic_field(sqrt(2)))(x)
x - sqrt(2)
>>> minimal_polynomial(sqrt(2) + sqrt(3))(x)
x**4 - 10*x**2 + 1
>>> minimal_polynomial(solve(x**3 + x + 3)[0][x])(x)
x**3 + x + 3
>>> minimal_polynomial(sqrt(y))(x)
x**2 - y
"""
if method is None:
method = query('minpoly_method')
_minpoly_methods = {'compose': _minpoly_compose, 'groebner': minpoly_groebner}
try:
_minpoly = _minpoly_methods[method]
except KeyError:
raise ValueError(f"'{method}' is not a valid algorithm for computing minimal "
' polynomial')
ex = sympify(ex)
if ex.is_number:
# not sure if it's always needed but try it for numbers (issue sympy/sympy#8354)
ex = _mexpand(ex, recursive=True)
x = Dummy('x')
domain = args.get('domain',
QQ.inject(*ex.free_symbols).field if ex.free_symbols else QQ)
result = _minpoly(ex, x, domain)
_, factors = factor_list(result, x, domain=domain)
result = _choose_factor(factors, x, ex, dom=domain)
result = result.primitive()[1]
return PurePoly(result, x, domain=domain)
def minpoly_groebner(ex, x, domain):
"""
Computes the minimal polynomial of an algebraic number
using Gröbner bases
Examples
========
>>> minimal_polynomial(sqrt(2) + 1, method='groebner')(x)
x**2 - 2*x - 1
References
==========
* :cite:`Adams1994intro`
"""
generator = numbered_symbols('a', cls=Dummy)
mapping, symbols = {}, {}
def update_mapping(ex, exp, base=None):
if ex in mapping:
return symbols[ex]
a = next(generator)
symbols[ex] = a
if base is not None:
mapping[ex] = a**exp + base
else:
mapping[ex] = exp.as_expr(a)
return a
def bottom_up_scan(ex):
if ex.is_Atom:
if ex is I:
return update_mapping(ex, 2, 1)
elif ex is GoldenRatio:
return bottom_up_scan(ex.expand(func=True))
elif ex.is_Rational:
return ex
elif ex.is_Symbol:
return ex
elif ex.is_Add or ex.is_Mul:
return ex.func(*[bottom_up_scan(g) for g in ex.args])
elif ex.is_Pow:
if ex.exp.is_Rational:
base, exp = ex.base, ex.exp
if exp.is_nonnegative:
if exp.is_noninteger:
base, exp = base**exp.numerator, Rational(1, exp.denominator)
base = bottom_up_scan(base)
else:
bmp = PurePoly(minpoly_groebner(1/base, x, domain=domain), x)
base, exp = update_mapping(1/base, bmp), -exp
return update_mapping(ex, exp.denominator, -base**exp.numerator)
elif isinstance(ex, RootOf) and ex.poly.domain.is_Numerical:
if ex.poly.domain.is_IntegerRing:
return update_mapping(ex, ex.poly)
else:
return update_mapping(ex, ex.poly.sqf_norm()[-1])
elif isinstance(ex, conjugate):
return update_mapping(ex, minimal_polynomial(ex.args[0], domain=domain,
method='groebner'))
elif isinstance(ex, Abs):
return bottom_up_scan(sqrt(ex.args[0]*ex.args[0].conjugate()))
elif isinstance(ex, re):
return bottom_up_scan((ex.args[0] + ex.args[0].conjugate())/2)
elif isinstance(ex, im):
return bottom_up_scan((ex.args[0] - ex.args[0].conjugate())/2/I)
raise NotAlgebraic(f"{ex} doesn't seem to be an algebraic number")
if ex.is_Pow and ex.exp.is_negative:
n, d = Integer(1), bottom_up_scan(1/ex)
else:
n, d = bottom_up_scan(ex), Integer(1)
F = [d*x - n] + list(mapping.values())
G = groebner(F, *(list(symbols.values()) + [x]), order='lex', domain=domain)
return G[-1] # by construction G[-1] has root `ex`
def primitive_element(extension, **args):
"""Construct a common number field for all extensions.
References
==========
* :cite:`Yokoyama1989primitive`
* :cite:`Arno1996alg`
"""
if not extension:
raise ValueError("can't compute primitive element for empty extension")
extension = list(uniq(extension))
x = Dummy('x')
domain = args.get('domain', QQ)
F = [minimal_polynomial(e, domain=domain) for e in extension]
Y = [p.gen for p in F]
for u in range(1, (len(F) - 1)*math.prod(f.degree() for f in F) + 1):
coeffs = [u**n for n in range(len(Y))]
f = x - sum(c*y for c, y in zip(coeffs, Y))
*H, g = groebner(F + [f], *(Y + [x]), domain=domain)
for i, (h, y) in enumerate(zip(H, Y)):
H[i] = (y - h).eject(*Y).retract(field=True)
if not (H[i].domain.is_RationalField or H[i].domain.is_AlgebraicField):
break # G is not a triangular set
else:
H[i] = H[i].set_domain(domain)
else:
g = g.eject(*Y).set_domain(domain)
break
else:
if len(F) == 1:
g, coeffs, H = F[0].replace(x), [Integer(1)], [x.as_poly(domain=domain)]
else: # pragma: no cover
raise RuntimeError('run out of coefficient configurations')
_, factors = factor_list(g, domain=domain)
t = sum(c*e for c, e in zip(coeffs, extension))
g = _choose_factor(factors, x, t, dom=domain)
H = [h.rem(g).rep.all_coeffs() for h in H]
_, g = PurePoly(g).clear_denoms(convert=True)
if g.LC() != 1:
for d in divisors(g.LC())[1:]: # pragma: no branch
new_g = g.compose((g.gen/d).as_poly())*d**g.degree()//d
_, new_g = new_g.monic().clear_denoms(convert=True)
if new_g.LC() == 1:
g = new_g
H = [[c/d**n for n, c in enumerate(h)] for h in H]
coeffs = [c*d for c in coeffs]
break
return g, list(coeffs), H
def field_isomorphism_pslq(a, b):
"""Construct field isomorphism using PSLQ algorithm."""
if not all(_.domain.is_RationalField and _.ext.is_real for _ in (a, b)):
raise NotImplementedError("PSLQ doesn't support complex coefficients")
f = a.minpoly
x = f.gen
g = b.minpoly.replace(x)
m = g.degree()
a, b = a.ext, b.ext
for n in mpmath.libmp.libintmath.giant_steps(32, 256): # pragma: no branch
with mpmath.workdps(n):
A, B = lambdify((), [a, b], 'mpmath')()
basis = [B**i for i in range(m)] + [A]
coeffs = mpmath.pslq(basis, maxcoeff=10**10, maxsteps=10**3)
if coeffs:
assert coeffs[-1] # basis[:-1] elements are linearly independent
h = -Poly(coeffs[:-1], x, field=True).quo_ground(coeffs[-1])
if f.compose(h).rem(g).is_zero:
return h.rep.all_coeffs()
else:
break
def field_isomorphism_factor(a, b):
"""Construct field isomorphism via factorization."""
p = a.minpoly.set_domain(b)
_, factors = p.factor_list()
for f, _ in factors:
if f.degree() == 1:
root = -f.rep[(0,)]/f.rep[(1,)]
if (a.ext - b.to_expr(root)).evalf(chop=True) == 0:
return root.rep.all_coeffs()
def field_isomorphism(a, b, **args):
"""Construct an isomorphism between two number fields."""
if not all(isinstance(_, AlgebraicField) for _ in (a, b)):
raise ValueError(f'Arguments should be algebraic fields, got {a} and {b}')
if a == b:
return a.unit.rep.all_coeffs()
n = a.minpoly.degree()
m = b.minpoly.degree()
if a.domain == b.domain:
if m % n:
return
elif a.domain.is_RationalField:
da = a.minpoly.discriminant()
db = b.minpoly.discriminant()
k = m // n
for p, q in factorint(da).items():
if q % 2 and db % (p**k):
return
if args.get('fast', True):
try:
result = field_isomorphism_pslq(a, b)
if result is not None:
return result
except NotImplementedError:
pass
return field_isomorphism_factor(a, b)
| diofant/polys/numberfields.py | 25,170 | Return a factor having root ``v``
It is assumed that one of the factors has root ``v``.
Returns ``expand_mul(x**degree(p, x)*p.subs({x: 1/x}))``.
Returns the minimal polynomial for the ``nth-root`` of a sum of surds
or ``None`` if it fails.
Parameters
==========
p : sum of surds
n : positive integer
x : variable of the returned polynomial
Examples
========
>>> q = 1 + sqrt(2) + sqrt(3)
>>> _minimal_polynomial_sq(q, 3, x)
x**12 - 4*x**9 - 4*x**6 + 16*x**3 - 8
Returns ``minimal_polynomial(Add(*a), dom)``.
Computes the minimal polynomial of an algebraic element
using operations on minimal polynomials
Examples
========
>>> minimal_polynomial(sqrt(2) + 3*Rational(1, 3), method='compose')(x)
x**2 - 2*x - 1
>>> minimal_polynomial(sqrt(y) + 1/y, method='compose')(x)
x**2*y**2 - 2*x*y - y**3 + 1
Returns the minimal polynomial of ``cos(ex)``
see https://mathworld.wolfram.com/TrigonometryAngles.html
Returns the minimal polynomial of ``exp(ex)``.
Returns ``minimal_polynomial(Mul(*a), dom)``.
Return the minimal polynomial for ``op(ex1, ex2)``.
Parameters
==========
op : operation ``Add`` or ``Mul``
ex1, ex2 : expressions for the algebraic elements
x : indeterminate of the polynomials
dom: ground domain
mp1, mp2 : minimal polynomials for ``ex1`` and ``ex2`` or None
Examples
========
>>> p1 = sqrt(sqrt(2) + 1)
>>> p2 = sqrt(sqrt(2) - 1)
>>> _minpoly_op_algebraic_element(Mul, p1, p2, x, QQ)
x - 1
>>> q1 = sqrt(y)
>>> q2 = 1 / y
>>> _minpoly_op_algebraic_element(Add, q1, q2, x, QQ.inject(y).field)
x**2*y**2 - 2*x*y - y**3 + 1
References
==========
* https://en.wikipedia.org/wiki/Resultant
* I.M. Isaacs, Proc. Amer. Math. Soc. 25 (1970), 638
"Degrees of sums in a separable field extension".
Returns ``minimal_polynomial(ex**pw)``
Parameters
==========
ex : algebraic element
pw : rational number
x : indeterminate of the polynomial
dom: ground domain
Examples
========
>>> p = sqrt(1 + sqrt(2))
>>> _minpoly_pow(p, 2, x, QQ)
x**2 - 2*x - 1
>>> minimal_polynomial(p**2)(x)
x**2 - 2*x - 1
>>> _minpoly_pow(y, Rational(1, 3), x, QQ.inject(y).field)
x**3 - y
>>> minimal_polynomial(cbrt(y))(x)
x**3 - y
Returns the minimal polynomial of a ``RootOf`` object.
Returns the minimal polynomial of ``sin(ex)``
see https://mathworld.wolfram.com/TrigonometryAngles.html
Returns the minimal polynomial of ``tan(ex)``.
Returns ``_mexpand(y**deg*p.subs({x:x / y}))``.
Helper function for ``_minimal_polynomial_sq``.
It selects a rational ``g`` such that the polynomial ``p``
consists of a sum of terms whose surds squared have gcd equal to ``g``
and a sum of terms with surds squared prime with ``g``;
then it takes the field norm to eliminate ``sqrt(g)``
See simplify.simplify.split_surds and polytools.sqf_norm.
Examples
========
>>> p = -x + sqrt(2) + sqrt(3) + sqrt(7)
>>> p = _separate_sq(p)
>>> p
-x**2 + 2*sqrt(3)*x + 2*sqrt(7)*x - 2*sqrt(21) - 8
>>> p = _separate_sq(p)
>>> p
-x**4 + 4*sqrt(7)*x**3 - 32*x**2 + 8*sqrt(7)*x + 20
>>> p = _separate_sq(p)
>>> p
-x**8 + 48*x**6 - 536*x**4 + 1728*x**2 - 400
Construct an isomorphism between two number fields.
Construct field isomorphism via factorization.
Construct field isomorphism using PSLQ algorithm.
Computes the minimal polynomial of an algebraic element.
Parameters
==========
ex : algebraic element expression
method : str, optional
If ``compose``, the minimal polynomial of the subexpressions
of ``ex`` are computed, then the arithmetic operations on them are
performed using the resultant and factorization. If ``groebner``,
a bottom-up algorithm, using Gröbner bases is used.
Defaults are determined by :func:`~diofant.config.setup`.
domain : Domain, optional
If no ground domain is given, it will be generated automatically
from the expression.
Examples
========
>>> minimal_polynomial(sqrt(2))(x)
x**2 - 2
>>> minimal_polynomial(sqrt(2), domain=QQ.algebraic_field(sqrt(2)))(x)
x - sqrt(2)
>>> minimal_polynomial(sqrt(2) + sqrt(3))(x)
x**4 - 10*x**2 + 1
>>> minimal_polynomial(solve(x**3 + x + 3)[0][x])(x)
x**3 + x + 3
>>> minimal_polynomial(sqrt(y))(x)
x**2 - y
Computes the minimal polynomial of an algebraic number
using Gröbner bases
Examples
========
>>> minimal_polynomial(sqrt(2) + 1, method='groebner')(x)
x**2 - 2*x - 1
References
==========
* :cite:`Adams1994intro`
Construct a common number field for all extensions.
References
==========
* :cite:`Yokoyama1989primitive`
* :cite:`Arno1996alg`
Computational algebraic field theory.
p = c1*sqrt(q1) + ... + cn*sqrt(qn) -> a = [(c1, q1), .., (cn, qn)] there are no surds pragma: no branch eliminate the square roots by construction `p` has root `pn` the minimal polynomial is the factor vanishing in x = pn mp1a = mp1.subs({x: x - y}) if deg1 = 1, then mp1 = x - a; mp1a = x - y - a; r = mp2(x - a), so that `r` is irreducible for a = pi*p/q with q odd prime, using chebyshevt write sin(q*a) = mp(sin(a))*sin(a); the roots of mp(x) are sin(pi*p/q) for p = 1,..., q - 1 for a = pi*p/q with q odd, use sin(q*a) = 0 to see that the minimal polynomial must be a factor of chebyshevt_poly(n) for a = pi*p/q, cos(q*a) =T_q(cos(a)) = (-1)**p x**(2*q) = product(factors) eliminate the square roots use the fact that in Diofant canonicalization products of integers raised to rational powers are organized in relatively prime bases, and that in ``base**(n/d)`` a perfect power is simplified with the root not sure if it's always needed but try it for numbers (issue sympy/sympy8354) by construction G[-1] has root `ex` G is not a triangular set pragma: no cover pragma: no branch pragma: no branch basis[:-1] elements are linearly independent | 5,618 | en | 0.591891 |
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Tag,Recipe
from recipe.serializers import TagSerializer
TAGS_URL = reverse('recipe:tag-list')
class PublicTagsApiTests(TestCase):
"""Test the publicly available tags API"""
def setUp(self):
self.client = APIClient()
def test_login_required(self):
"""Test that login required for retrieving tags"""
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateTagsApiTests(TestCase):
"""Test the authorized user tags API"""
def setUp(self):
self.user = get_user_model().objects.create_user(
'test@mytest.com',
'password'
)
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_retrieve_tags(self):
"""Test retrieving tags"""
Tag.objects.create(user=self.user, name='Vegan')
Tag.objects.create(user=self.user, name='Dessert')
res = self.client.get(TAGS_URL)
tags = Tag.objects.all().order_by('-name')
serializer = TagSerializer(tags, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_tags_limited_to_user(self):
"""Test that tags returned are for authenticated user"""
user2 = get_user_model().objects.create_user(
'thatmail@mytest.com',
'testpass'
)
Tag.objects.create(user=user2, name='Tasty')
tag = Tag.objects.create(user=self.user, name='Just Food')
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], tag.name)
def test_create_tag_successful(self):
"""Test creating a new tag"""
payload = {'name': 'Simple'}
self.client.post(TAGS_URL, payload)
exists = Tag.objects.filter(
user=self.user,
name=payload['name']
).exists()
self.assertTrue(exists)
def test_create_tag_invalid(self):
"""Test creating a new tag with invalid payload"""
payload = {'name': ''}
res = self.client.post(TAGS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_tags_assigned_to_recipes(self):
"""Test filtering tags by those assigned to recipes"""
tag1 = Tag.objects.create(user=self.user, name='Breakfast')
tag2 = Tag.objects.create(user=self.user, name='Lunch')
recipe = Recipe.objects.create(
title='Coriander eggs on toast',
time_minutes=10,
price=5.00,
user=self.user,
)
recipe.tags.add(tag1)
res = self.client.get(TAGS_URL, {'assigned_only': 1})
serializer1 = TagSerializer(tag1)
serializer2 = TagSerializer(tag2)
self.assertIn(serializer1.data, res.data)
self.assertNotIn(serializer2.data, res.data)
def test_retrieve_tags_assigned_unique(self):
"""Test filtering tags by assigned returns unique items"""
tag = Tag.objects.create(user=self.user, name='Breakfast')
Tag.objects.create(user=self.user, name='Lunch')
recipe1 = Recipe.objects.create(
title='Pancakes',
time_minutes=5,
price=3.00,
user=self.user
)
recipe1.tags.add(tag)
recipe2 = Recipe.objects.create(
title='Porridge',
time_minutes=3,
price=2.00,
user=self.user
)
recipe2.tags.add(tag)
res = self.client.get(TAGS_URL, {'assigned_only': 1})
self.assertEqual(len(res.data), 1)
| app/recipe/tests/test_tags_api.py | 3,976 | Test the authorized user tags API
Test the publicly available tags API
Test creating a new tag with invalid payload
Test creating a new tag
Test that login required for retrieving tags
Test retrieving tags
Test filtering tags by those assigned to recipes
Test filtering tags by assigned returns unique items
Test that tags returned are for authenticated user | 358 | en | 0.638873 |
from pyxform.tests_v1.pyxform_test_case import PyxformTestCase
class InvalidSurveyColumnsTests(PyxformTestCase):
def test_missing_name(self):
"""
every question needs a name (or alias of name)
"""
self.assertPyxformXform(
name='invalidcols',
ss_structure={'survey': [{'type': 'text',
'label': 'label'}]},
errored=True,
error__contains=['no name'],
)
def test_missing_name_but_has_alias_of_name(self):
self.assertPyxformXform(
name='invalidcols',
ss_structure={'survey': [{'value': 'q1',
'type': 'text',
'label': 'label'}]},
errored=False,
)
def test_missing_label(self):
self.assertPyxformXform(
name="invalidcols",
ss_structure={'survey': [{'type': 'text',
'name': 'q1'}]},
errored=True,
error__contains=['no label or hint'],
)
def test_column_case(self):
"""
Ensure that column name is case insensitive
"""
self.assertPyxformXform(
name="mixedcasecolumns",
md="""
| Survey | | | |
| | Type | name | Label |
| | text | Name | the name |
| | integer | age | the age |
| | text | gender | the gender |
""",
errored=False,
debug=True
)
class InvalidChoiceSheetColumnsTests(PyxformTestCase):
def _simple_choice_ss(self, choice_sheet=None):
if choice_sheet is None:
choice_sheet = []
return {'survey': [{'type': 'select_one l1',
'name': 'l1choice',
'label': 'select one from list l1'}],
'choices': choice_sheet}
def test_valid_choices_sheet_passes(self):
self.assertPyxformXform(
name='valid_choices',
ss_structure=self._simple_choice_ss([
{'list_name': 'l1',
'name': 'c1',
'label': 'choice 1'},
{'list_name': 'l1',
'name': 'c2',
'label': 'choice 2'}]),
errored=False,
)
def test_invalid_choices_sheet_fails(self):
self.assertPyxformXform(
name='missing_name',
ss_structure=self._simple_choice_ss([
{'list_name': 'l1',
'label': 'choice 1'},
{'list_name': 'l1',
'label': 'choice 2'},
]),
errored=True,
error__contains=['option with no name'],
)
def test_missing_list_name(self):
self.assertPyxformXform(
name='missing_list_name',
ss_structure=self._simple_choice_ss([
{'bad_column': 'l1',
'name': 'l1c1',
'label': 'choice 1'},
{'bad_column': 'l1',
'name': 'l1c1',
'label': 'choice 2'},
]),
debug=True,
errored=True,
# some basic keywords that should be in the error:
error__contains=[
'choices',
'name',
'list name',
])
class AliasesTests(PyxformTestCase):
def test_value_and_name(self):
'''
confirm that both 'name' and 'value' columns of choice list work
'''
for name_alias in ['name', 'value']:
self.assertPyxformXform(
name="aliases",
md="""
| survey | | | |
| | type | name | label |
| | select_one yn | q1 | Question 1 |
| choices | | | |
| | list name | %(name_alias)s | label |
| | yn | yes | Yes |
| | yn | no | No |
""" % ({
u'name_alias': name_alias
}),
instance__contains=[
'<q1/>',
],
model__contains=[
'<bind nodeset="/aliases/q1" type="select1"/>',
],
xml__contains=[
'<select1 ref="/aliases/q1">',
'<value>yes</value>',
'<value>no</value>',
'</select1>',
])
''' # uncomment when re-implemented
# TODO: test that this fails for the correct reason
def test_conflicting_aliased_values_raises_error(self):
# example:
# an xlsform has {"name": "q_name", "value": "q_value"}
# should not compile because "name" and "value" columns are aliases
self.assertPyxformXform(
# debug=True,
name="aliases",
md="""
| survey | | | | |
| | type | name | value | label |
| | text | q_name | q_value | Question 1 |
""",
errored=True,
)
'''
| pyxform/tests_v1/test_sheet_columns.py | 5,588 | Ensure that column name is case insensitive
every question needs a name (or alias of name)
confirm that both 'name' and 'value' columns of choice list work
some basic keywords that should be in the error: | 206 | en | 0.877587 |
from flask import Flask
from flask_restful import Api
from flask_cors import CORS
from flask_migrate import Migrate, MigrateCommand
from flask_script import Manager
from {{cookiecutter.app_name}}.config import app_config
from {{cookiecutter.app_name}}.models import db, bcrypt
from {{cookiecutter.app_name}}.resources import Login, Register
from {{cookiecutter.app_name}}.schemas import ma
def create_app(env_name):
"""
Create app
"""
# app initiliazation
app = Flask(__name__)
CORS(app)
app.config.from_object(app_config[env_name])
# initializing bcrypt and db
bcrypt.init_app(app)
db.init_app(app)
ma.init_app(app)
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
# Route
api = Api(app)
# user endpoint
api.add_resource(Login, '/auth/login')
api.add_resource(Register, '/auth/register')
return app
| {{cookiecutter.app_name}}/{{cookiecutter.app_name}}/app.py | 986 | app initiliazation initializing bcrypt and db Route user endpoint | 65 | en | 0.699814 |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Evaluate the perplexity of a trained language model.
"""
import logging
import math
import os
import torch
from fairseq import checkpoint_utils, options, tasks, utils
from fairseq.data import LMContextWindowDataset
from fairseq.logging import progress_bar
from fairseq.logging.meters import StopwatchMeter, TimeMeter
from fairseq.sequence_scorer import SequenceScorer
from fairseq import distributed_utils
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
)
logger = logging.getLogger('fairseq_cli.eval_lm')
class WordStat(object):
def __init__(self, word, is_bpe):
self.word = word
self.is_bpe = is_bpe
self.log_prob = 0
self.next_word_prob = 0
self.count = 0
self.missing_next_words = 0
def add(self, log_prob, next_word_prob):
""" increments counters for the sum of log probs of current word and next
word (given context ending at current word). Since the next word might be at the end of the example,
or it might be not counted because it is not an ending subword unit,
also keeps track of how many of those we have seen """
if next_word_prob is not None:
self.next_word_prob += next_word_prob
else:
self.missing_next_words += 1
self.log_prob += log_prob
self.count += 1
def __str__(self):
return '{}\t{}\t{}\t{}\t{}\t{}'.format(self.word, self.count, self.log_prob, self.is_bpe,
self.next_word_prob, self.count - self.missing_next_words)
def main(parsed_args, **unused_kwargs):
assert parsed_args.path is not None, '--path required for evaluation!'
if torch.cuda.is_available() and not parsed_args.cpu:
torch.cuda.set_device(parsed_args.device_id)
utils.import_user_module(parsed_args)
logger.info(parsed_args)
if parsed_args.ipex:
import intel_pytorch_extension as ipex
if args.dnnl:
ipex.core.enable_auto_dnnl()
else:
ipex.core.disable_auto_dnnl()
if args.mix_precision:
ipex.core.enable_mix_bf16_fp32()
use_cuda = torch.cuda.is_available() and not parsed_args.cpu
task = tasks.setup_task(parsed_args)
# Load ensemble
logger.info('loading model(s) from {}'.format(parsed_args.path))
models, args = checkpoint_utils.load_model_ensemble(
parsed_args.path.split(os.pathsep),
arg_overrides=eval(parsed_args.model_overrides),
task=task,
suffix=getattr(parsed_args, "checkpoint_suffix", ""),
)
for arg in vars(parsed_args).keys():
if arg not in {
'self_target', 'future_target', 'past_target', 'tokens_per_sample',
'output_size_dictionary', 'add_bos_token',
}:
setattr(args, arg, getattr(parsed_args, arg))
# reduce tokens per sample by the required context window size
args.tokens_per_sample -= args.context_window
task = tasks.setup_task(args)
# Load dataset splits
task.load_dataset(args.gen_subset)
dataset = task.dataset(args.gen_subset)
if args.context_window > 0:
dataset = LMContextWindowDataset(
dataset=dataset,
tokens_per_sample=args.tokens_per_sample,
context_window=args.context_window,
pad_idx=task.source_dictionary.pad(),
)
logger.info('{} {} {} examples'.format(args.data, args.gen_subset, len(dataset)))
# Optimize ensemble for generation and set the source and dest dicts on the model (required by scorer)
for model in models:
model.prepare_for_inference_(args)
if args.fp16:
model.half()
if use_cuda:
model.cuda()
if args.ipex:
model = model.to(device = ipex.DEVICE)
assert len(models) > 0
logger.info('num. model params: {}'.format(sum(p.numel() for p in models[0].parameters())))
itr = task.get_batch_iterator(
dataset=dataset,
max_tokens=args.max_tokens or 36000,
max_sentences=args.max_sentences,
max_positions=utils.resolve_max_positions(*[
model.max_positions() for model in models
]),
ignore_invalid_inputs=True,
num_shards=args.num_shards,
shard_id=args.shard_id,
num_workers=args.num_workers,
).next_epoch_itr(shuffle=False)
progress = progress_bar.progress_bar(
itr,
log_format=args.log_format,
log_interval=args.log_interval,
default_log_format=('tqdm' if not args.no_progress_bar else 'none'),
)
gen_timer = StopwatchMeter()
scorer = SequenceScorer(task.target_dictionary, args.softmax_batch)
score_sum = 0.
count = 0
if args.remove_bpe is not None:
if args.remove_bpe == 'sentencepiece':
raise NotImplementedError
else:
bpe_cont = args.remove_bpe.rstrip()
bpe_toks = {
i
for i in range(len(task.source_dictionary))
if task.source_dictionary[i].endswith(bpe_cont)
}
bpe_len = len(bpe_cont)
else:
bpe_toks = None
bpe_len = 0
word_stats = dict()
wps_meter = TimeMeter()
for sample in progress:
if 'net_input' not in sample:
continue
sample = utils.move_to_cuda(sample) if use_cuda else sample
sample = utils.move_to_ipex(sample) if args.ipex else sample
gen_timer.start()
hypos = scorer.generate(models, sample)
gen_timer.stop(sample['ntokens'])
for i, hypos_i in enumerate(hypos):
hypo = hypos_i[0]
sample_id = sample['id'][i]
tokens = hypo['tokens']
tgt_len = tokens.numel()
pos_scores = hypo['positional_scores'].float()
if args.add_bos_token:
assert hypo['tokens'][0].item() == task.target_dictionary.bos()
tokens = tokens[1:]
pos_scores = pos_scores[1:]
skipped_toks = 0
if bpe_toks is not None:
for i in range(tgt_len - 1):
if tokens[i].item() in bpe_toks:
skipped_toks += 1
pos_scores[i + 1] += pos_scores[i]
pos_scores[i] = 0
inf_scores = pos_scores.eq(float('inf')) | pos_scores.eq(float('-inf'))
if inf_scores.any():
logger.info(
'skipping tokens with inf scores:',
task.target_dictionary.string(tokens[inf_scores.nonzero()])
)
pos_scores = pos_scores[(~inf_scores).nonzero()]
score_sum += pos_scores.sum().cpu()
count += pos_scores.numel() - skipped_toks
if args.output_word_probs or args.output_word_stats:
w = ''
word_prob = []
is_bpe = False
for i in range(len(tokens)):
w_ind = tokens[i].item()
w += task.source_dictionary[w_ind]
if bpe_toks is not None and w_ind in bpe_toks:
w = w[:-bpe_len]
is_bpe = True
else:
word_prob.append((w, pos_scores[i].item()))
next_prob = None
ind = i + 1
while ind < len(tokens):
if pos_scores[ind].item() != 0:
next_prob = pos_scores[ind]
break
ind += 1
word_stats.setdefault(w, WordStat(w, is_bpe)).add(pos_scores[i].item(), next_prob)
is_bpe = False
w = ''
if args.output_word_probs:
logger.info(
str(int(sample_id)) + " "
+ ('\t'.join('{} [{:2f}]'.format(x[0], x[1]) for x in word_prob))
)
wps_meter.update(sample['ntokens'])
progress.log({'wps': round(wps_meter.avg)})
avg_nll_loss = -score_sum / count / math.log(2) # convert to base 2
logger.info('Evaluated {} tokens in {:.1f}s ({:.2f} tokens/s)'.format(
gen_timer.n, gen_timer.sum, 1. / gen_timer.avg
))
logger.info('Loss (base 2): {:.4f}, Perplexity: {:.2f}'.format(
avg_nll_loss, 2**avg_nll_loss
))
if args.output_word_stats:
for ws in sorted(word_stats.values(), key=lambda x: x.count, reverse=True):
logger.info(ws)
def cli_main():
parser = options.get_eval_lm_parser()
args = options.parse_args_and_arch(parser)
distributed_utils.call_main(args, main)
if __name__ == '__main__':
cli_main()
| fairseq_cli/eval_lm.py | 9,144 | increments counters for the sum of log probs of current word and next
word (given context ending at current word). Since the next word might be at the end of the example,
or it might be not counted because it is not an ending subword unit,
also keeps track of how many of those we have seen
Evaluate the perplexity of a trained language model.
!/usr/bin/env python3 -u Copyright (c) Facebook, Inc. and its affiliates. This source code is licensed under the MIT license found in the LICENSE file in the root directory of this source tree. Load ensemble reduce tokens per sample by the required context window size Load dataset splits Optimize ensemble for generation and set the source and dest dicts on the model (required by scorer) convert to base 2 | 753 | en | 0.902069 |
BACKSLASH = '\\'
class MiniString(object):
"""
Create a representation of a string object
:param str string: The string to minify
"""
def __init__(self, string, quote="'"):
self._s = string
self.safe_mode = False
self.quote = quote
def __str__(self):
"""
The smallest python literal representation of a string
:rtype: str
"""
if self._s == '':
return ''
if len(self.quote) == 1:
s = self.to_short()
else:
s = self.to_long()
try:
eval(self.quote + s + self.quote)
except UnicodeDecodeError:
if self._safe_mode:
raise
self._safe_mode = True
assert eval(self.quote + s + self.quote) == self._s
return s
def to_short(self):
s = ''
escaped = {
'\n': BACKSLASH + 'n',
'\\': BACKSLASH + BACKSLASH,
'\a': BACKSLASH + 'a',
'\b': BACKSLASH + 'b',
'\f': BACKSLASH + 'f',
'\r': BACKSLASH + 'r',
'\t': BACKSLASH + 't',
'\v': BACKSLASH + 'v',
'\0': BACKSLASH + 'x00',
self.quote: BACKSLASH + self.quote,
}
for c in self._s:
if c in escaped.keys():
s += escaped[c]
else:
if self.safe_mode:
unicode_value = ord(c)
if unicode_value <= 0x7F:
s += c
elif unicode_value <= 0xFFFF:
s += BACKSLASH + 'u' + format(unicode_value, '04x')
else:
s += BACKSLASH + 'U' + format(unicode_value, '08x')
else:
s += c
return s
def to_long(self):
s = ''
escaped = {
'\\': BACKSLASH + BACKSLASH,
'\a': BACKSLASH + 'a',
'\b': BACKSLASH + 'b',
'\f': BACKSLASH + 'f',
'\r': BACKSLASH + 'r',
'\t': BACKSLASH + 't',
'\v': BACKSLASH + 'v',
'\0': BACKSLASH + 'x00',
self.quote[0]: BACKSLASH + self.quote[0],
}
for c in self._s:
if c in escaped.keys():
s += escaped[c]
else:
if self.safe_mode:
unicode_value = ord(c)
if unicode_value <= 0x7F:
s += c
elif unicode_value <= 0xFFFF:
s += BACKSLASH + 'u' + format(unicode_value, '04x')
else:
s += BACKSLASH + 'U' + format(unicode_value, '08x')
else:
s += c
return s
class MiniBytes(object):
"""
Create a representation of a bytes object
:param bytes string: The string to minify
"""
def __init__(self, string, quote="'"):
self._b = string
self.quote = quote
def __str__(self):
"""
The smallest python literal representation of a string
:rtype: str
"""
if self._b == b'':
return ''
if len(self.quote) == 1:
s = self.to_short()
else:
s = self.to_long()
assert eval('b' + self.quote + s + self.quote) == self._b
return s
def to_short(self):
b = ''
for c in self._b:
if c == b'\\':
b += BACKSLASH
elif c == b'\n':
b += BACKSLASH + 'n'
elif c == self.quote:
b += BACKSLASH + self.quote
else:
if c >= 128:
b += BACKSLASH + chr(c)
else:
b += chr(c)
return b
def to_long(self):
b = ''
for c in self._b:
if c == b'\\':
b += BACKSLASH
elif c == self.quote:
b += BACKSLASH + self.quote
else:
if c >= 128:
b += BACKSLASH + chr(c)
else:
b += chr(c)
return b
| src/python_minifier/ministring.py | 4,227 | Create a representation of a bytes object
:param bytes string: The string to minify
Create a representation of a string object
:param str string: The string to minify
The smallest python literal representation of a string
:rtype: str
The smallest python literal representation of a string
:rtype: str | 304 | en | 0.533457 |
def path_hack():
import os, sys, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
# print('path added:', sys.path[0])
path_hack()
import traceback
import sys
import urllib.request
from urllib.request import urlopen
import json
from apis import utilities
try:
from apis import my_token
API_TUTOR_TOKEN = my_token.API_TUTOR_TOKEN
except:
title = 'IMPORTANT: You Need an Access Token!'
error_message = '\n\n\n' + '*' * len(title) + '\n' + \
title + '\n' + '*' * len(title) + \
'\nPlease download the the my_token.py file and save it in your apis directory.\n\n'
raise Exception(error_message)
def get_token(url):
try:
response = urlopen(url + '?auth_manager_token=' + API_TUTOR_TOKEN)
data = response.read()
results = data.decode('utf-8', 'ignore')
return json.loads(results)['token']
except urllib.error.HTTPError as e:
# give a good error message:
error = utilities.get_error_message(e, url)
raise Exception(error) | apis/authentication.py | 1,151 | print('path added:', sys.path[0]) give a good error message: | 60 | en | 0.562846 |
"""QuizSubmissionFiles API Tests for Version 1.0.
This is a testing template for the generated QuizSubmissionFilesAPI Class.
"""
import unittest
import requests
import secrets
from py3canvas.apis.quiz_submission_files import QuizSubmissionFilesAPI
class TestQuizSubmissionFilesAPI(unittest.TestCase):
"""Tests for the QuizSubmissionFilesAPI."""
def setUp(self):
self.client = QuizSubmissionFilesAPI(
secrets.instance_address, secrets.access_token
)
def test_upload_file(self):
"""Integration test for the QuizSubmissionFilesAPI.upload_file method."""
# This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.
pass
| py3canvas/tests/quiz_submission_files.py | 744 | Tests for the QuizSubmissionFilesAPI.
Integration test for the QuizSubmissionFilesAPI.upload_file method.
QuizSubmissionFiles API Tests for Version 1.0.
This is a testing template for the generated QuizSubmissionFilesAPI Class.
This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration. | 347 | en | 0.780346 |
from __future__ import absolute_import
import pickle
from kombu.utils.functional import lazy
from celery.five import THREAD_TIMEOUT_MAX, items, range, nextfun
from celery.utils.functional import (
LRUCache,
firstmethod,
first,
mlazy,
padlist,
maybe_list,
)
from celery.tests.case import Case
class test_LRUCache(Case):
def test_expires(self):
limit = 100
x = LRUCache(limit=limit)
slots = list(range(limit * 2))
for i in slots:
x[i] = i
self.assertListEqual(list(x.keys()), list(slots[limit:]))
self.assertTrue(x.items())
self.assertTrue(x.values())
def test_is_pickleable(self):
x = LRUCache(limit=10)
x.update(luke=1, leia=2)
y = pickle.loads(pickle.dumps(x))
self.assertEqual(y.limit, y.limit)
self.assertEqual(y, x)
def test_update_expires(self):
limit = 100
x = LRUCache(limit=limit)
slots = list(range(limit * 2))
for i in slots:
x.update({i: i})
self.assertListEqual(list(x.keys()), list(slots[limit:]))
def test_least_recently_used(self):
x = LRUCache(3)
x[1], x[2], x[3] = 1, 2, 3
self.assertEqual(list(x.keys()), [1, 2, 3])
x[4], x[5] = 4, 5
self.assertEqual(list(x.keys()), [3, 4, 5])
# access 3, which makes it the last used key.
x[3]
x[6] = 6
self.assertEqual(list(x.keys()), [5, 3, 6])
x[7] = 7
self.assertEqual(list(x.keys()), [3, 6, 7])
def assertSafeIter(self, method, interval=0.01, size=10000):
from threading import Thread, Event
from time import sleep
x = LRUCache(size)
x.update(zip(range(size), range(size)))
class Burglar(Thread):
def __init__(self, cache):
self.cache = cache
self.__is_shutdown = Event()
self.__is_stopped = Event()
Thread.__init__(self)
def run(self):
while not self.__is_shutdown.isSet():
try:
self.cache.data.popitem(last=False)
except KeyError:
break
self.__is_stopped.set()
def stop(self):
self.__is_shutdown.set()
self.__is_stopped.wait()
self.join(THREAD_TIMEOUT_MAX)
burglar = Burglar(x)
burglar.start()
try:
for _ in getattr(x, method)():
sleep(0.0001)
finally:
burglar.stop()
def test_safe_to_remove_while_iteritems(self):
self.assertSafeIter('iteritems')
def test_safe_to_remove_while_keys(self):
self.assertSafeIter('keys')
def test_safe_to_remove_while_itervalues(self):
self.assertSafeIter('itervalues')
def test_items(self):
c = LRUCache()
c.update(a=1, b=2, c=3)
self.assertTrue(list(items(c)))
class test_utils(Case):
def test_padlist(self):
self.assertListEqual(
padlist(['George', 'Costanza', 'NYC'], 3),
['George', 'Costanza', 'NYC'],
)
self.assertListEqual(
padlist(['George', 'Costanza'], 3),
['George', 'Costanza', None],
)
self.assertListEqual(
padlist(['George', 'Costanza', 'NYC'], 4, default='Earth'),
['George', 'Costanza', 'NYC', 'Earth'],
)
def test_firstmethod_AttributeError(self):
self.assertIsNone(firstmethod('foo')([object()]))
def test_firstmethod_handles_lazy(self):
class A(object):
def __init__(self, value=None):
self.value = value
def m(self):
return self.value
self.assertEqual('four', firstmethod('m')([
A(), A(), A(), A('four'), A('five')]))
self.assertEqual('four', firstmethod('m')([
A(), A(), A(), lazy(lambda: A('four')), A('five')]))
def test_first(self):
iterations = [0]
def predicate(value):
iterations[0] += 1
if value == 5:
return True
return False
self.assertEqual(5, first(predicate, range(10)))
self.assertEqual(iterations[0], 6)
iterations[0] = 0
self.assertIsNone(first(predicate, range(10, 20)))
self.assertEqual(iterations[0], 10)
def test_maybe_list(self):
self.assertEqual(maybe_list(1), [1])
self.assertEqual(maybe_list([1]), [1])
self.assertIsNone(maybe_list(None))
class test_mlazy(Case):
def test_is_memoized(self):
it = iter(range(20, 30))
p = mlazy(nextfun(it))
self.assertEqual(p(), 20)
self.assertTrue(p.evaluated)
self.assertEqual(p(), 20)
self.assertEqual(repr(p), '20')
| site-packages/celery/tests/utils/test_functional.py | 4,902 | access 3, which makes it the last used key. | 43 | en | 0.952534 |
from config.configure import Configure
conf = Configure()
conf.model_name = 'vgg16.h5'
conf.classes = ['no_breads', 'breads']
conf.no_breads_path = './dataset/data/pool/no_breads/*'
conf.breads_path = './dataset/data/pool/breads/*'
# conf.baked_breads_path = './dataset/data/pool/breads/*'
conf.lr = 1e-4
conf.momentum = 0.9
conf.batch_size = 20
conf.epochs = 20
conf.image_size = 224
| server/recognition/config/__init__.py | 388 | conf.baked_breads_path = './dataset/data/pool/breads/*' | 55 | en | 0.66671 |
import copy
import json
from abc import ABC
from datetime import datetime
from typing import Any
from cyber_sdk.util.converter import to_isoformat
def to_data(x: Any) -> Any:
if "to_data" in dir(x):
return x.to_data()
if isinstance(x, list):
return [to_data(g) for g in x]
if isinstance(x, dict):
return dict_to_data(x)
return x
def to_amino(x: Any) -> Any:
if "to_amino" in dir(x):
return x.to_amino()
if isinstance(x, list):
return [to_data(g) for g in x]
if isinstance(x, dict):
return dict_to_amino(x)
if isinstance(x, int):
return str(x)
if isinstance(x, datetime):
return to_isoformat(x)
def dict_to_amino(d: dict):
return {key: to_amino(d[key]) for key in d}
def dict_to_data(d: dict) -> dict:
"""Recursively calls to_data on dict"""
return {key: to_data(d[key]) for key in d}
class JSONSerializable(ABC):
def to_data(self) -> Any:
"""Converts the object to its JSON-serializable Python data representation."""
return dict_to_data(copy.deepcopy(self.__dict__))
def to_json(self) -> str:
"""Marshals the object into a stringified JSON serialization. Keys are first sorted
and the JSON rendered removes all unnecessary whitespace.
Returns:
str: JSON string representation
"""
return json.dumps(self.to_data(), sort_keys=True, separators=(",", ":"))
| cyber_sdk/util/json.py | 1,454 | Recursively calls to_data on dict
Converts the object to its JSON-serializable Python data representation.
Marshals the object into a stringified JSON serialization. Keys are first sorted
and the JSON rendered removes all unnecessary whitespace.
Returns:
str: JSON string representation | 290 | en | 0.639834 |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
lines = open("for_james.csv").read().splitlines()
data = [[float(x) for x in lines[i].split(", ")] for i in range(len(lines))]
# each item in data is a list of floats that can be passed to plt.hist
for i in range(9):
plt.hist(data[i], bins=np.logspace(1, 3, 20))
plt.title(f'Precipitating Energy Distribution at t = {i+0.5} sec')
plt.xscale("log"); plt.yscale("log"); plt.xlabel('Energy (KeV)'); plt.ylabel('Number of Particles')
plt.ylim(10,600); plt.xlim(10,1000)
plt.savefig(f'results/plots/preciphist{i}.png')
plt.clf() | dataPlotter.py | 643 | each item in data is a list of floats that can be passed to plt.hist | 68 | en | 0.867126 |
#! /usr/bin/python
#
# This is the answer code for the course "Learning from Data" on edX.org
# https://www.edx.org/course/caltechx/cs1156x/learning-data/1120
#
# The software is intended for course usage, no guarantee whatsoever.
# Date: 10/4/2013
# Created by: kirbs
# See notes at bottom for further details.
import sys
import os
import random
import pylab
import scipy
import numpy as np
#############################################################################
#############################################################################
# Returns a list of points with y (indicating 1/-1) as the last element
# and the x,y coordinates for the two points separating line.
# Returns a list of points; each point is a list in the following format.
# [x0, x1, x2, y] i.e. [dummy 1 to represent threshold, x1 value, x2 value, sample points correct sign (+1/-1)]
def generatePoints(numberOfPoints):
## random.seed(1) # used for testing
x1 = random.uniform(-1, 1)
y1 = random.uniform(-1, 1)
x2 = random.uniform(-1, 1)
y2 = random.uniform(-1, 1)
points = []
for i in range (0,numberOfPoints - 1):
## random.seed(1) # used for testing
x = random.uniform (-1, 1)
y = random.uniform (-1, 1)
points.append([1, x, y, targetFunction(x1, y1, x2, y2, x, y)]) # add 1/-1 indicator to the end of each point list
return x1, y1, x2, y2, points
# This function determines the cross product between a line and a given point.
# Returns 1 if above the line and -1 if below the line.
def targetFunction(x1,y1,x2,y2,x3,y3):
u = (x2-x1)*(y3-y1) - (y2-y1)*(x3-x1)
if u >= 0:
return 1
elif u < 0:
return -1
# Simple sign function
def sign(y):
if y >= 0:
return 1
elif y < 0:
return -1
# a.k.a dot product
def perceptronCalc(x, w):
return x[0]*w[0] + x[1]*w[1] + x[2]*w[2]
def train(training_points, iterationLimit):
w = [0.0,0.0,0.0] # initialize weights for w[0], w[1], w[2]
learned = False
iterations = 0 # keep track of the iteration count
# This method is the primary PLA implentation.
# It returns True when all sample points are corectly classfied by the hypothesis.
# Returns False if there was a misclassified point and the weight vector changed.
def updateWeights():
random.shuffle(training_points) # randomize training points
for point in training_points:
result = sign(perceptronCalc(point,w)) # caclulate point and determine its sign.
if point[3] != result: # does sample point's result match our calculated result?
# Use line below to watch the perceptron's weights change
# print str(iterations) + " " + str(w) + " " + str(result) + " " + str(point) + " " + str(perceptronCalc(point))
# if not update weights by sample point's result
w[0] += point[0]*point[3]
w[1] += point[1]*point[3]
w[2] += point[2]*point[3]
return False # break out of loop and return
return True # if the loop reaches this point all calculated points in the training points match their expected y's
while not learned:
iterations += 1
noErrors = updateWeights()
if iterations == iterationLimit or noErrors:
learned = True
break
return iterations, w
# Calculates approximate probability of hypothesis function returns a result
# that is different from the target function.
def findErrorProbability(x1,y1,x2,y2, weights, numberOfPointsToTest):
numberOfErrors = 0
for i in range(0, numberOfPointsToTest-1):
#generate random test points
x = random.uniform(-1,1)
y = random.uniform(-1,1)
#compare results from target function and hypothesis function
if targetFunction(x1,y1,x2,y2,x,y) != sign(perceptronCalc([1,x,y], weights)):
numberOfErrors += 1 # keep track of errors
return numberOfErrors/float(numberOfPointsToTest)
# Runs runTrial specified number of times.
# Returns average iterations, average error probability, and a histogram of trial iteration count.
def runSimulation(numberOfTrials, numberOfTestPoints, iterationLimit):
interations = []
probability = []
for t in range(1,numberOfTrials+1):
iteration_count, w, error_probability = runTrial(numberOfTestPoints, iterationLimit)
interations.append(iteration_count)
probability.append(error_probability)
print "Avg. iterations: " + str(np.mean(interations)) + " : Avg. error probability: " + str(np.mean(probability))
pylab.hist(interations)
pylab.show()
# Runs one trial based on the number of test points desired and an iteration limit to cap run time.
# If showChart is set to True, this function with also return a chart of the points, target function and hypothesis.
# Returns the number of iterations perceptron took to converge, final weights, and the error probability.
def runTrial(numberOfTestPoints, iterationLimit, showChart = False):
x1, y1, x2, y2, points = generatePoints(numberOfTestPoints)
iterations, w = train(points, iterationLimit)
errorProb = findErrorProbability(x1,y1,x2,y2,w, 10000)
if showChart:
if iterations == iterationLimit:
print "No solution found in " + str(iterations) + " iterations!"
print "Iterations: " + str(iterations) + ' | Weights: ' + str(w)
# plot points above(green) and below(blue) the target function.
green_x = []
green_y = []
blue_x = []
blue_y = []
for x in points:
if x[3] == 1:
green_x.append(x[1])
green_y.append(x[2])
else:
blue_x.append(x[1])
blue_y.append(x[2])
pylab.plot(green_x, green_y, 'go')
pylab.plot(blue_x, blue_y, 'bo')
# plot target function(black) and hypothesis function(red) lines
x = np.array( [-1,1] )
slope = (y2-y1)/(x2-x1)
intercept = y2 - slope * x2
pylab.plot(x, slope*x + intercept, 'k--')
pylab.plot( x, -w[1]/w[2] * x - w[0] / w[2] , 'r' ) # this will throw an error if w[2] == 0
pylab.ylim([-1,1])
pylab.xlim([-1,1])
pylab.show()
return iterations, w, errorProb
########################################################################
############################----NOTES----###############################
########################################################################
# Uncomment one line below and reload the script in your favorite Python
# environment. Or load the script and type the method with requireed
# paramaters you want to execute.
########################################################################
########################################################################
# runSimulation takes 3 arguments, number of trials to run, number of test points, and interation limit.
# The higher you set each parameter, the longer this method takes to run.
# This will return the average number of iterations the perceptron took to converge
# and the average error probability.
# Question 7/8
# runSimulation(1000, 10, 100)
# Question 9/10
# runSimulation(1000, 100, 1000)
#########################################################################
#########################################################################
# runTrial takes 3 arguments, number of points, iteration limit, and boolean if a chart should be shown.
# This method returns the number of iteration perceptron took to converge, the final
# weights vector, and the error probability.
# runTrial(10, 100, True) # Show graph of one trial with points, hypothesis (red line), and target funtion (black line).
# runTrial(10, 100) # No chart
# runTrial(10, 100, False) # No chart
| Homework_1/Python/homework_1_by_kirbs.py | 8,070 | ! /usr/bin/python This is the answer code for the course "Learning from Data" on edX.org https://www.edx.org/course/caltechx/cs1156x/learning-data/1120 The software is intended for course usage, no guarantee whatsoever. Date: 10/4/2013 Created by: kirbs See notes at bottom for further details. Returns a list of points with y (indicating 1/-1) as the last element and the x,y coordinates for the two points separating line. Returns a list of points; each point is a list in the following format. [x0, x1, x2, y] i.e. [dummy 1 to represent threshold, x1 value, x2 value, sample points correct sign (+1/-1)] random.seed(1) used for testing random.seed(1) used for testing add 1/-1 indicator to the end of each point list This function determines the cross product between a line and a given point. Returns 1 if above the line and -1 if below the line. Simple sign function a.k.a dot product initialize weights for w[0], w[1], w[2] keep track of the iteration count This method is the primary PLA implentation. It returns True when all sample points are corectly classfied by the hypothesis. Returns False if there was a misclassified point and the weight vector changed. randomize training points caclulate point and determine its sign. does sample point's result match our calculated result? Use line below to watch the perceptron's weights change print str(iterations) + " " + str(w) + " " + str(result) + " " + str(point) + " " + str(perceptronCalc(point)) if not update weights by sample point's result break out of loop and return if the loop reaches this point all calculated points in the training points match their expected y's Calculates approximate probability of hypothesis function returns a result that is different from the target function.generate random test pointscompare results from target function and hypothesis function keep track of errors Runs runTrial specified number of times. Returns average iterations, average error probability, and a histogram of trial iteration count. Runs one trial based on the number of test points desired and an iteration limit to cap run time. If showChart is set to True, this function with also return a chart of the points, target function and hypothesis. Returns the number of iterations perceptron took to converge, final weights, and the error probability. plot points above(green) and below(blue) the target function. plot target function(black) and hypothesis function(red) lines this will throw an error if w[2] == 0----NOTES---- Uncomment one line below and reload the script in your favorite Python environment. Or load the script and type the method with requireed paramaters you want to execute. runSimulation takes 3 arguments, number of trials to run, number of test points, and interation limit. The higher you set each parameter, the longer this method takes to run. This will return the average number of iterations the perceptron took to converge and the average error probability. Question 7/8 runSimulation(1000, 10, 100) Question 9/10 runSimulation(1000, 100, 1000) runTrial takes 3 arguments, number of points, iteration limit, and boolean if a chart should be shown. This method returns the number of iteration perceptron took to converge, the final weights vector, and the error probability. runTrial(10, 100, True) Show graph of one trial with points, hypothesis (red line), and target funtion (black line). runTrial(10, 100) No chart runTrial(10, 100, False) No chart | 3,465 | en | 0.845707 |
# -*- coding: utf-8 -*-
import sys
from contextlib import contextmanager
from shutil import rmtree as _rmtree
from tempfile import template, mkdtemp, _exists
from cms.apphook_pool import apphook_pool
from django.contrib.auth import get_user_model
from django.utils.six.moves import StringIO
from django.utils.translation import get_language, activate
class NULL:
pass
class StdOverride(object):
def __init__(self, std='out', buffer=None):
self.std = std
self.buffer = buffer or StringIO()
def __enter__(self):
setattr(sys, 'std%s' % self.std, self.buffer)
return self.buffer
def __exit__(self, type, value, traceback):
setattr(sys, 'std%s' % self.std, getattr(sys, '__std%s__' % self.std))
class StdoutOverride(StdOverride):
"""
This overrides Python's the standard output and redirects it to a StringIO
object, so that on can test the output of the program.
example:
lines = None
with StdoutOverride() as buffer:
# print stuff
lines = buffer.getvalue()
"""
def __init__(self, buffer=None):
super(StdoutOverride, self).__init__('out', buffer)
class LanguageOverride(object):
def __init__(self, language):
self.newlang = language
def __enter__(self):
self.oldlang = get_language()
activate(self.newlang)
def __exit__(self, type, value, traceback):
activate(self.oldlang)
class TemporaryDirectory:
"""Create and return a temporary directory. This has the same
behavior as mkdtemp but can be used as a context manager. For
example:
with TemporaryDirectory() as tmpdir:
...
Upon exiting the context, the directory and everthing contained
in it are removed.
"""
def __init__(self, suffix="", prefix=template, dir=None):
self.name = mkdtemp(suffix, prefix, dir)
def __enter__(self):
return self.name
def cleanup(self):
if _exists(self.name):
_rmtree(self.name)
def __exit__(self, exc, value, tb):
self.cleanup()
class UserLoginContext(object):
def __init__(self, testcase, user):
self.testcase = testcase
self.user = user
def __enter__(self):
loginok = self.testcase.client.login(username=getattr(self.user, get_user_model().USERNAME_FIELD),
password=getattr(self.user, get_user_model().USERNAME_FIELD))
self.old_user = getattr(self.testcase, 'user', None)
self.testcase.user = self.user
self.testcase.assertTrue(loginok)
def __exit__(self, exc, value, tb):
self.testcase.user = self.old_user
if not self.testcase.user:
delattr(self.testcase, 'user')
self.testcase.client.logout()
class ChangeModel(object):
"""
Changes attributes on a model while within the context.
These changes *ARE* saved to the database for the context!
"""
def __init__(self, instance, **overrides):
self.instance = instance
self.overrides = overrides
def __enter__(self):
self.old = {}
for key, value in self.overrides.items():
self.old[key] = getattr(self.instance, key, NULL)
setattr(self.instance, key, value)
self.instance.save()
def __exit__(self, exc, value, tb):
for key in self.overrides.keys():
old_value = self.old[key]
if old_value is NULL:
delattr(self.instance, key)
else:
setattr(self.instance, key, old_value)
self.instance.save()
@contextmanager
def disable_logger(logger):
old = logger.disabled
logger.disabled = True
yield
logger.disabled = old
@contextmanager
def apphooks(*hooks):
_apphooks = apphook_pool.apphooks
_apps = apphook_pool.apps
_discovered = apphook_pool.discovered
apphook_pool.clear()
for hook in hooks:
apphook_pool.register(hook)
try:
yield
finally:
apphook_pool.apphooks = _apphooks
apphook_pool.apps = _apps
apphook_pool.discovered = _discovered
@contextmanager
def signal_tester(*signals):
env = SignalTester()
for signal in signals:
signal.connect(env)
try:
yield env
finally:
for signal in signals:
signal.disconnect(env)
class SignalTester(object):
def __init__(self):
self.call_count = 0
self.calls = []
def __call__(self, *args, **kwargs):
self.call_count += 1
self.calls.append((args, kwargs))
| cms/test_utils/util/context_managers.py | 4,606 | Changes attributes on a model while within the context.
These changes *ARE* saved to the database for the context!
This overrides Python's the standard output and redirects it to a StringIO
object, so that on can test the output of the program.
example:
lines = None
with StdoutOverride() as buffer:
# print stuff
lines = buffer.getvalue()
Create and return a temporary directory. This has the same
behavior as mkdtemp but can be used as a context manager. For
example:
with TemporaryDirectory() as tmpdir:
...
Upon exiting the context, the directory and everthing contained
in it are removed.
-*- coding: utf-8 -*- | 643 | en | 0.854287 |
# Copyright 2016-2021 The Van Valen Lab at the California Institute of
# Technology (Caltech), with support from the Paul Allen Family Foundation,
# Google, & National Institutes of Health (NIH) under Grant U24CA224309-01.
# All rights reserved.
#
# Licensed under a modified Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.github.com/vanvalenlab/deepcell-tf/LICENSE
#
# The Work provided may be used for non-commercial academic purposes only.
# For any other use of the Work, including commercial use, please contact:
# vanvalenlab@gmail.com
#
# Neither the name of Caltech nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multiplex segmentation application.
Deprecated in favor of ``deepcell.applications.Mesmer`` instead.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from deepcell.applications.mesmer import Mesmer as MultiplexSegmentation
| deepcell/applications/multiplex_segmentation.py | 1,543 | Multiplex segmentation application.
Deprecated in favor of ``deepcell.applications.Mesmer`` instead.
Copyright 2016-2021 The Van Valen Lab at the California Institute of Technology (Caltech), with support from the Paul Allen Family Foundation, Google, & National Institutes of Health (NIH) under Grant U24CA224309-01. All rights reserved. Licensed under a modified Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.github.com/vanvalenlab/deepcell-tf/LICENSE The Work provided may be used for non-commercial academic purposes only. For any other use of the Work, including commercial use, please contact: vanvalenlab@gmail.com Neither the name of Caltech nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================== | 1,303 | en | 0.841135 |
#ERIMX Made By Paradox4280 aka c2FI, x2Fi, RG9t
import discord, base64, codecs, requests, urllib.parse, datetime, asyncio, sys, praw
import random, aiohttp, io, json, os, string, platform, time, bs4, colorama
from discord.ext import (
commands
)
from discord.voice_client import VoiceClient
# from discord.ext.commands import bot
from bs4 import BeautifulSoup as bs4
from colorama import Fore, Style
from discord import Permissions
from discord.utils import get
from discord import User
from os import system
with open('config.json') as f:
config = json.load(f)
def get_prefix(paradox, message):
with open('prefixes.json', 'r') as f:
prefixes = json.load(f)
paradox = commands.Bot(command_prefix = get_prefix, case_Insensitive = True)
[paradox.load_extension(f"cogs.{cog[:-3]}") for cog in os.listdir("cogs") if cog.endswith(".py")]
@paradox.event
async def on_ready():
await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name="Her"))
print(f'\n{Fore.GREEN}[>] {Fore.RESET}{Fore.CYAN}Logged in as{Fore.RESET} {Fore.YELLOW}{paradox.user.name}#{paradox.user.discriminator}\n')
print(f'\n{Fore.GREEN}[>]{Fore.RESET} {Fore.CYAN}User ID:{Fore.RESET} {Fore.YELLOW}{paradox.user.id}\n')
print(f'\n{Fore.GREEN}[>]{Fore.RESET} {Fore.CYAN}Version:{Fore.RESET} {Fore.YELLOW}{discord.__version__}\n')
@paradox.event
async def on_command_error(ctx, error):
embed = discord.Embed(description=f'Error. Try =help ({error})', color = 16202876)
await ctx.send(embed = embed)
@paradox.event
async def on_guild_join(guild):
with open('prefixes.json', 'r') as f:
prefixes = json.load(f)
prefixes[str(guild.id)] = '='
with open('prefixes.json', 'w') as f:
json.dump(prefixes, f, indent=4)
@paradox.event
async def on_guild_remove(guild):
with open('prefixes.json', 'r') as f:
prefixes = json.load(f)
prefixes.pop(str(guild.id))
with open('prefixes.json', 'w') as f:
json.dump(prefixes, f, indent=4)
@paradox.command()
async def changeprefix(ctx, prefix):
with open('prefixes.json', 'r') as f:
prefixes = json.load(f)
prefixes[str(ctx.guild.id)] = prefix
with open('prefixes.json', 'w') as f:
json.dump(prefixes, f, indent=4)
embed = discord.Embed(description = f'prefix changed to: {prefix}', color = 16202876)
await ctx.send(embed = embed)
paradox.run(os.getenv('BOT_TOKEN'))
| src/bot.py | 2,532 | ERIMX Made By Paradox4280 aka c2FI, x2Fi, RG9t from discord.ext.commands import bot | 83 | en | 0.741411 |
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFX Importer definition."""
from typing import Any, Dict, List, Optional, Type, Union
import absl
from tfx import types
from tfx.dsl.components.base import base_driver
from tfx.dsl.components.base import base_node
from tfx.orchestration import data_types
from tfx.orchestration import metadata
from tfx.types import channel_utils
from tfx.utils import doc_controls
from ml_metadata.proto import metadata_store_pb2
# Constant to access importer importing result from importer output dict.
IMPORT_RESULT_KEY = 'result'
# Constant to access artifact uri from importer exec_properties dict.
SOURCE_URI_KEY = 'artifact_uri'
# Constant to access re-import option from importer exec_properties dict.
REIMPORT_OPTION_KEY = 'reimport'
def _set_artifact_properties(artifact: types.Artifact,
properties: Optional[Dict[str, Any]],
custom_properties: Optional[Dict[str, Any]]):
"""Sets properties and custom_properties to the given artifact."""
if properties is not None:
for key, value in properties.items():
setattr(artifact, key, value)
if custom_properties is not None:
for key, value in custom_properties.items():
if isinstance(value, int):
artifact.set_int_custom_property(key, value)
elif isinstance(value, (str, bytes)):
artifact.set_string_custom_property(key, value)
else:
raise NotImplementedError(
f'Unexpected custom_property value type:{type(value)}')
def _prepare_artifact(
metadata_handler: metadata.Metadata,
uri: str,
properties: Dict[str, Any],
custom_properties: Dict[str, Any],
reimport: bool, output_artifact_class: Type[types.Artifact],
mlmd_artifact_type: Optional[metadata_store_pb2.ArtifactType]
) -> types.Artifact:
"""Prepares the Importer's output artifact.
If there is already an artifact in MLMD with the same URI and properties /
custom properties, that artifact will be reused unless the `reimport`
argument is set to True.
Args:
metadata_handler: The handler of MLMD.
uri: The uri of the artifact.
properties: The properties of the artifact, given as a dictionary from
string keys to integer / string values. Must conform to the declared
properties of the destination channel's output type.
custom_properties: The custom properties of the artifact, given as a
dictionary from string keys to integer / string values.
reimport: If set to True, will register a new artifact even if it already
exists in the database.
output_artifact_class: The class of the output artifact.
mlmd_artifact_type: The MLMD artifact type of the Artifact to be created.
Returns:
An Artifact object representing the imported artifact.
"""
absl.logging.info(
'Processing source uri: %s, properties: %s, custom_properties: %s' %
(uri, properties, custom_properties))
# Check types of custom properties.
for key, value in custom_properties.items():
if not isinstance(value, (int, str, bytes)):
raise ValueError(
('Custom property value for key %r must be a string or integer '
'(got %r instead)') % (key, value))
unfiltered_previous_artifacts = metadata_handler.get_artifacts_by_uri(
uri)
# Only consider previous artifacts as candidates to reuse, if the properties
# of the imported artifact match those of the existing artifact.
previous_artifacts = []
for candidate_mlmd_artifact in unfiltered_previous_artifacts:
is_candidate = True
candidate_artifact = output_artifact_class(mlmd_artifact_type)
candidate_artifact.set_mlmd_artifact(candidate_mlmd_artifact)
for key, value in properties.items():
if getattr(candidate_artifact, key) != value:
is_candidate = False
break
for key, value in custom_properties.items():
if isinstance(value, int):
if candidate_artifact.get_int_custom_property(key) != value:
is_candidate = False
break
elif isinstance(value, (str, bytes)):
if candidate_artifact.get_string_custom_property(key) != value:
is_candidate = False
break
if is_candidate:
previous_artifacts.append(candidate_mlmd_artifact)
result = output_artifact_class(mlmd_artifact_type)
result.uri = uri
_set_artifact_properties(result, properties, custom_properties)
# If a registered artifact has the same uri and properties and the user does
# not explicitly ask for reimport, reuse that artifact.
if bool(previous_artifacts) and not reimport:
absl.logging.info('Reusing existing artifact')
result.set_mlmd_artifact(max(previous_artifacts, key=lambda m: m.id))
return result
def generate_output_dict(
metadata_handler: metadata.Metadata,
uri: str,
properties: Dict[str, Any],
custom_properties: Dict[str, Any],
reimport: bool,
output_artifact_class: Type[types.Artifact],
mlmd_artifact_type: Optional[metadata_store_pb2.ArtifactType] = None
) -> Dict[str, List[types.Artifact]]:
"""Generates importer's output dict.
If there is already an artifact in MLMD with the same URI and properties /
custom properties, that artifact will be reused unless the `reimport`
argument is set to True.
Args:
metadata_handler: The handler of MLMD.
uri: The uri of the artifact.
properties: The properties of the artifact, given as a dictionary from
string keys to integer / string values. Must conform to the declared
properties of the destination channel's output type.
custom_properties: The custom properties of the artifact, given as a
dictionary from string keys to integer / string values.
reimport: If set to True, will register a new artifact even if it already
exists in the database.
output_artifact_class: The class of the output artifact.
mlmd_artifact_type: The MLMD artifact type of the Artifact to be created.
Returns:
a dictionary with the only key `result` whose value is the Artifact.
"""
return {
IMPORT_RESULT_KEY: [
_prepare_artifact(
metadata_handler,
uri=uri,
properties=properties,
custom_properties=custom_properties,
output_artifact_class=output_artifact_class,
mlmd_artifact_type=mlmd_artifact_type,
reimport=reimport)
]
}
class ImporterDriver(base_driver.BaseDriver):
"""Driver for Importer."""
def pre_execution(
self,
input_dict: Dict[str, types.Channel],
output_dict: Dict[str, types.Channel],
exec_properties: Dict[str, Any],
driver_args: data_types.DriverArgs,
pipeline_info: data_types.PipelineInfo,
component_info: data_types.ComponentInfo,
) -> data_types.ExecutionDecision:
# Registers contexts and execution.
contexts = self._metadata_handler.register_pipeline_contexts_if_not_exists(
pipeline_info)
execution = self._metadata_handler.register_execution(
exec_properties=exec_properties,
pipeline_info=pipeline_info,
component_info=component_info,
contexts=contexts)
# Create imported artifacts.
output_channel = output_dict[IMPORT_RESULT_KEY]
output_artifacts = generate_output_dict(
self._metadata_handler,
uri=exec_properties[SOURCE_URI_KEY],
properties=output_channel.additional_properties,
custom_properties=output_channel.additional_custom_properties,
reimport=exec_properties[REIMPORT_OPTION_KEY],
output_artifact_class=output_channel.type)
# Update execution with imported artifacts.
self._metadata_handler.update_execution(
execution=execution,
component_info=component_info,
output_artifacts=output_artifacts,
execution_state=metadata.EXECUTION_STATE_CACHED,
contexts=contexts)
output_dict[IMPORT_RESULT_KEY] = channel_utils.as_channel(
output_artifacts[IMPORT_RESULT_KEY])
return data_types.ExecutionDecision(
input_dict={},
output_dict=output_artifacts,
exec_properties=exec_properties,
execution_id=execution.id,
use_cached_results=False)
class Importer(base_node.BaseNode):
"""Definition for TFX Importer.
The Importer is a special TFX node which registers an external resource into
MLMD so that downstream nodes can use the registered artifact as an input.
Here is an example to use the Importer:
```
importer = Importer(
source_uri='uri/to/schema',
artifact_type=standard_artifacts.Schema,
reimport=False).with_id('import_schema')
schema_gen = SchemaGen(
fixed_schema=importer.outputs['result'],
examples=...)
```
"""
def __init__(self,
source_uri: str,
artifact_type: Type[types.Artifact],
reimport: Optional[bool] = False,
properties: Optional[Dict[str, Union[str, int]]] = None,
custom_properties: Optional[Dict[str, Union[str, int]]] = None):
"""Init function for the Importer.
Args:
source_uri: the URI of the resource that needs to be registered.
artifact_type: the type of the artifact to import.
reimport: whether or not to re-import as a new artifact if the URI has
been imported in before.
properties: Dictionary of properties for the imported Artifact. These
properties should be ones declared for the given artifact_type (see the
PROPERTIES attribute of the definition of the type for details).
custom_properties: Dictionary of custom properties for the imported
Artifact. These properties should be of type Text or int.
"""
self._source_uri = source_uri
self._reimport = reimport
artifact = artifact_type()
_set_artifact_properties(artifact, properties, custom_properties)
# TODO(b/161490287): remove static artifacts.
self._output_dict = {
IMPORT_RESULT_KEY:
types.Channel(
type=artifact_type,
additional_properties=properties,
additional_custom_properties=custom_properties).set_artifacts(
[artifact])
}
super().__init__(driver_class=ImporterDriver)
@property
@doc_controls.do_not_generate_docs
def inputs(self) -> Dict[str, Any]:
return {}
@property
def outputs(self) -> Dict[str, Any]:
"""Output Channel dict that contains imported artifacts."""
return self._output_dict
@property
@doc_controls.do_not_generate_docs
def exec_properties(self) -> Dict[str, Any]:
return {
SOURCE_URI_KEY: self._source_uri,
REIMPORT_OPTION_KEY: int(self._reimport),
}
| tfx/dsl/components/common/importer.py | 11,320 | Definition for TFX Importer.
The Importer is a special TFX node which registers an external resource into
MLMD so that downstream nodes can use the registered artifact as an input.
Here is an example to use the Importer:
```
importer = Importer(
source_uri='uri/to/schema',
artifact_type=standard_artifacts.Schema,
reimport=False).with_id('import_schema')
schema_gen = SchemaGen(
fixed_schema=importer.outputs['result'],
examples=...)
```
Driver for Importer.
Init function for the Importer.
Args:
source_uri: the URI of the resource that needs to be registered.
artifact_type: the type of the artifact to import.
reimport: whether or not to re-import as a new artifact if the URI has
been imported in before.
properties: Dictionary of properties for the imported Artifact. These
properties should be ones declared for the given artifact_type (see the
PROPERTIES attribute of the definition of the type for details).
custom_properties: Dictionary of custom properties for the imported
Artifact. These properties should be of type Text or int.
Prepares the Importer's output artifact.
If there is already an artifact in MLMD with the same URI and properties /
custom properties, that artifact will be reused unless the `reimport`
argument is set to True.
Args:
metadata_handler: The handler of MLMD.
uri: The uri of the artifact.
properties: The properties of the artifact, given as a dictionary from
string keys to integer / string values. Must conform to the declared
properties of the destination channel's output type.
custom_properties: The custom properties of the artifact, given as a
dictionary from string keys to integer / string values.
reimport: If set to True, will register a new artifact even if it already
exists in the database.
output_artifact_class: The class of the output artifact.
mlmd_artifact_type: The MLMD artifact type of the Artifact to be created.
Returns:
An Artifact object representing the imported artifact.
Sets properties and custom_properties to the given artifact.
Generates importer's output dict.
If there is already an artifact in MLMD with the same URI and properties /
custom properties, that artifact will be reused unless the `reimport`
argument is set to True.
Args:
metadata_handler: The handler of MLMD.
uri: The uri of the artifact.
properties: The properties of the artifact, given as a dictionary from
string keys to integer / string values. Must conform to the declared
properties of the destination channel's output type.
custom_properties: The custom properties of the artifact, given as a
dictionary from string keys to integer / string values.
reimport: If set to True, will register a new artifact even if it already
exists in the database.
output_artifact_class: The class of the output artifact.
mlmd_artifact_type: The MLMD artifact type of the Artifact to be created.
Returns:
a dictionary with the only key `result` whose value is the Artifact.
Output Channel dict that contains imported artifacts.
TFX Importer definition.
Copyright 2019 Google LLC. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Constant to access importer importing result from importer output dict. Constant to access artifact uri from importer exec_properties dict. Constant to access re-import option from importer exec_properties dict. Check types of custom properties. Only consider previous artifacts as candidates to reuse, if the properties of the imported artifact match those of the existing artifact. If a registered artifact has the same uri and properties and the user does not explicitly ask for reimport, reuse that artifact. Registers contexts and execution. Create imported artifacts. Update execution with imported artifacts. TODO(b/161490287): remove static artifacts. | 4,335 | en | 0.773153 |
from datetime import timedelta
from os import path
from re import sub as regex_sub
from shutil import rmtree
import uuid
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.validators import MinValueValidator, MaxValueValidator
from django.db import models
from django.db.models.signals import post_delete
from django.dispatch.dispatcher import receiver
from django.utils import timezone
from validator.models import DatasetConfiguration, User, CopiedValidations
from django.db.models import Q, ExpressionWrapper, F, BooleanField
class ValidationRun(models.Model):
# scaling methods
MIN_MAX = 'min_max'
LINREG = 'linreg'
MEAN_STD = 'mean_std'
NO_SCALING = 'none'
BETA_SCALING = 'cdf_beta_match'
SCALING_METHODS = (
(NO_SCALING, 'No scaling'),
(MIN_MAX, 'Min/Max'),
(LINREG, 'Linear regression'),
(MEAN_STD, 'Mean/standard deviation'),
(BETA_SCALING, 'CDF matching with beta distribution fitting'),
)
# scale to
SCALE_TO_REF = 'ref'
SCALE_TO_DATA = 'data'
SCALE_TO_OPTIONS = (
(SCALE_TO_REF, 'Scale to reference'),
(SCALE_TO_DATA, 'Scale to data')
)
# anomalies
MOVING_AVG_35_D = "moving_avg_35_d"
CLIMATOLOGY = "climatology"
NO_ANOM = "none"
ANOMALIES_METHODS = (
(NO_ANOM, 'Do not calculate'),
(MOVING_AVG_35_D, '35 day moving average'),
(CLIMATOLOGY, 'Climatology'),
)
# upscaling options
NO_UPSCALE = "none"
AVERAGE = "average"
UPSCALING_METHODS = (
(NO_UPSCALE, 'Do not upscale point measurements'),
(AVERAGE, 'Average point measurements'),
)
# fields
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name_tag = models.CharField(max_length=80, blank=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, null=True)
start_time = models.DateTimeField('started')
end_time = models.DateTimeField('finished', null=True)
total_points = models.IntegerField(default=0)
error_points = models.IntegerField(default=0)
ok_points = models.IntegerField(default=0)
progress = models.IntegerField(default=0)
reference_configuration = models.ForeignKey(to=DatasetConfiguration, on_delete=models.SET_NULL,
related_name='ref_validation_run', null=True)
scaling_ref = models.ForeignKey(to=DatasetConfiguration, on_delete=models.SET_NULL,
related_name='scaling_ref_validation_run', null=True)
scaling_method = models.CharField(max_length=20, choices=SCALING_METHODS, default=MEAN_STD)
interval_from = models.DateTimeField(null=True)
interval_to = models.DateTimeField(null=True)
anomalies = models.CharField(max_length=20, choices=ANOMALIES_METHODS, default=NO_ANOM)
min_lat = models.FloatField(null=True, blank=True, validators=[MinValueValidator(-90.0), MaxValueValidator(90.0)])
min_lon = models.FloatField(null=True, blank=True)
max_lat = models.FloatField(null=True, blank=True, validators=[MinValueValidator(-90.0), MaxValueValidator(90.0)])
max_lon = models.FloatField(null=True, blank=True)
# only applicable if anomalies with climatology is selected
anomalies_from = models.DateTimeField(null=True, blank=True)
anomalies_to = models.DateTimeField(null=True, blank=True)
# upscaling of ISMN point measurements
upscaling_method = models.CharField(max_length=50, choices=UPSCALING_METHODS, default=NO_UPSCALE, blank=True)
temporal_stability = models.BooleanField(default=False)
output_file = models.FileField(null=True, max_length=250, blank=True)
is_archived = models.BooleanField(default=False)
last_extended = models.DateTimeField(null=True, blank=True)
expiry_notified = models.BooleanField(default=False)
doi = models.CharField(max_length=255, blank=True)
publishing_in_progress = models.BooleanField(default=False)
tcol = models.BooleanField(default=False)
bootstrap_tcol_cis = models.BooleanField(default=False)
used_by = models.ManyToManyField(User, through=CopiedValidations, through_fields=('original_run', 'used_by_user'),
related_name='copied_runs')
# many-to-one relationships coming from other models:
# dataset_configurations from DatasetConfiguration
# celery_tasks from CeleryTask
@property
def expiry_date(self):
if (self.is_archived or (self.end_time is None)) and (self.progress != -1):
return None
if self.progress == -1:
initial_date = self.start_time
else:
initial_date = self.last_extended if self.last_extended else self.end_time
return initial_date + timedelta(days=settings.VALIDATION_EXPIRY_DAYS)
@property
def is_expired(self):
e = self.expiry_date
return (e is not None) and (timezone.now() > e)
@property
def is_near_expiry(self):
e = self.expiry_date
return (e is not None) and (timezone.now() > e - timedelta(days=settings.VALIDATION_EXPIRY_WARNING_DAYS))
@property
def is_unpublished(self):
return not self.doi
def archive(self, unarchive=False, commit=True):
if unarchive:
self.extend_lifespan(commit=False)
self.is_archived = False
else:
self.is_archived = True
if commit:
self.save()
def extend_lifespan(self, commit=True):
self.last_extended = timezone.now()
self.expiry_notified = False
if commit:
self.save()
def clean(self):
super(ValidationRun, self).clean()
if self.interval_from is None and self.interval_to is not None:
raise ValidationError({'interval_from': 'What has an end must have a beginning.', })
if self.interval_from is not None and self.interval_to is None:
raise ValidationError({'interval_to': 'What has a beginning must have an end.', })
if self.interval_from is not None and self.interval_to is not None and self.interval_from > self.interval_to:
raise ValidationError({'interval_from': 'From must be before To',
'interval_to': 'From must be before To', })
if self.anomalies == self.CLIMATOLOGY:
if self.anomalies_from is None or self.anomalies_to is None:
raise ValidationError({'anomalies': 'Need valid time period to calculate climatology from.', })
if self.anomalies_from > self.anomalies_to:
raise ValidationError({'anomalies_from': 'Start of climatology period must be before end.',
'anomalies_to': 'Start of climatology period must be before end.', })
else:
if self.anomalies_from is not None or self.anomalies_to is not None:
raise ValidationError(
{'anomalies': 'Time period makes no sense for anomalies calculation without climatology.', })
box = {'min_lat': self.min_lat, 'min_lon': self.min_lon, 'max_lat': self.max_lat, 'max_lon': self.max_lon}
if any(x is None for x in box.values()) and any(x is not None for x in box.values()):
affected_fields = {}
for key, value in box.items():
if value is None:
affected_fields[key] = 'For spatial subsetting, please set all bounding box coordinates.'
raise ValidationError(affected_fields)
def __str__(self):
return "id: {}, user: {}, start: {} )".format(self.id, self.user, self.start_time)
@property
def output_dir_url(self):
if bool(self.output_file) is False:
return None
url = regex_sub('[^/]+$', '', self.output_file.url)
return url
@property
def output_file_name(self):
if bool(self.output_file) is False:
return None
name = self.output_file.name.split('/')[1]
return name
@property
def is_a_copy(self):
copied_runs = CopiedValidations.objects.filter(copied_run_id=self.id)\
.annotate(is_copied=ExpressionWrapper(~Q(copied_run=F('original_run')), output_field=BooleanField())) \
.filter(is_copied=True)
return len(copied_runs) != 0
# delete model output directory on disk when model is deleted
@receiver(post_delete, sender=ValidationRun)
def auto_delete_file_on_delete(sender, instance, **kwargs):
if instance.output_file:
rundir = path.dirname(instance.output_file.path)
if path.isdir(rundir):
rmtree(rundir)
| validator/models/validation_run.py | 8,724 | scaling methods scale to anomalies upscaling options fields only applicable if anomalies with climatology is selected upscaling of ISMN point measurements many-to-one relationships coming from other models: dataset_configurations from DatasetConfiguration celery_tasks from CeleryTask delete model output directory on disk when model is deleted | 344 | en | 0.833528 |
# Refaça o exercicio009, mostrando a tabuada de um número que um usuário escolher utilizando FOR.
print('=-='*3)
print('TABUADA')
print('=-='*3)
m = 0
n = int(input('Digite o número que deseja saber a tabuada: '))
for c in range(1, 11):
m = n * c
print('{} x {} = {}.'.format(n, c, m))
| Python 3 - Curso completo/exercicio049.py | 300 | Refaça o exercicio009, mostrando a tabuada de um número que um usuário escolher utilizando FOR. | 95 | pt | 0.966125 |
from functools import lru_cache
import sqlalchemy
class lru_cache_in_transaction: # noqa: N801
"""
Decorator to wrap a function with a memoizing callable that saves up to
the `maxsize` most recent calls. The underlying cache is automatically
cleared at the end of the database transaction.
Since a dictionary is used to cache results, the positional and keyword
arguments to the function must be hashable.
For documentation of the `maxsize` and `typed` arguments, see the
documentation of :py:func:`functools.lru_cache`.
Example::
@lru_cache_in_transaction(session)
def fetch_user(userid):
return session.query(models.User).filter_by(userid=userid).one_or_none()
fetch_user('acct:foo@example.com') # => executes a query
fetch_user('acct:foo@example.com') # => returns cached value
fetch_user('acct:bar@example.com') # => executes a query
session.commit()
fetch_user('acct:foo@example.com') # => executes a query
"""
def __init__(self, session, maxsize=128, typed=False):
self._session = session
self._maxsize = maxsize
self._typed = typed
def __call__(self, func):
decorator = lru_cache(maxsize=self._maxsize, typed=self._typed)
wrapped = decorator(func)
on_transaction_end(self._session)(wrapped.cache_clear)
return wrapped
def on_transaction_end(session):
"""
Decorator for a function which should run after a top-level transaction ended.
Transactions that are either implicitly or explicitly committed or rolled back will be
closed at the end of a Pyramid view. This is here for cleaning up caches so that
code after the view, exception views for example, will not be able to access
detached instances.
Example usage:
.. code-block:: python
@util.db.on_transaction_end(session)
def flush_cache():
self._cache = {}
"""
def decorate(func):
def _handler(_, transaction):
# We only clear the cache when the top-level transaction finishes.
if transaction.parent is None:
func()
sqlalchemy.event.listen(session, "after_transaction_end", _handler)
return func
return decorate
| h/util/db.py | 2,304 | Decorator to wrap a function with a memoizing callable that saves up to
the `maxsize` most recent calls. The underlying cache is automatically
cleared at the end of the database transaction.
Since a dictionary is used to cache results, the positional and keyword
arguments to the function must be hashable.
For documentation of the `maxsize` and `typed` arguments, see the
documentation of :py:func:`functools.lru_cache`.
Example::
@lru_cache_in_transaction(session)
def fetch_user(userid):
return session.query(models.User).filter_by(userid=userid).one_or_none()
fetch_user('acct:foo@example.com') # => executes a query
fetch_user('acct:foo@example.com') # => returns cached value
fetch_user('acct:bar@example.com') # => executes a query
session.commit()
fetch_user('acct:foo@example.com') # => executes a query
Decorator for a function which should run after a top-level transaction ended.
Transactions that are either implicitly or explicitly committed or rolled back will be
closed at the end of a Pyramid view. This is here for cleaning up caches so that
code after the view, exception views for example, will not be able to access
detached instances.
Example usage:
.. code-block:: python
@util.db.on_transaction_end(session)
def flush_cache():
self._cache = {}
noqa: N801 We only clear the cache when the top-level transaction finishes. | 1,409 | en | 0.696636 |
"""
django:
https://docs.djangoproject.com/en/3.0/topics/http/middleware/
https://docs.djangoproject.com/en/3.0/ref/settings/#middleware
"""
MIDDLEWARE = (
"django_prometheus.middleware.PrometheusBeforeMiddleware",
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"django_prometheus.middleware.PrometheusAfterMiddleware",
)
| hcap/settings/general/middleware.py | 688 | django:
https://docs.djangoproject.com/en/3.0/topics/http/middleware/
https://docs.djangoproject.com/en/3.0/ref/settings/#middleware | 140 | en | 0.654501 |
# Copyright (C) 2020-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import pytest
from openvino.tools.pot.configs.config import Config
from .utils.path import TOOL_CONFIG_PATH
ALGORITHM_SETTINGS = {
'wrong_preset': (
{
'name': 'MinMaxQuantization',
'params': {
'perset': 'accuracy',
'stat_subset_size': 1
}
},
'Algorithm MinMaxQuantization. Unknown parameter: perset'
),
'wrong_stats_subset_size': (
{
'name': 'DefaultQuantization',
'params': {
'preset': 'accuracy',
'stats_subset_size': 1
}
},
'Algorithm DefaultQuantization. Unknown parameter: stats_subset_size'
),
'wrong_weights': (
{
'name': 'DefaultQuantization',
'params': {
'activations': {
'bits': 8,
'mode': 'symmetric',
'granularity': 'pertensor',
'range_estimator': {
'preset': 'quantile'
}
},
'weight': {
'bits': 8,
'level_low': -127,
'level_high': 127
},
'stat_subset_size': 1
}
},
'Algorithm DefaultQuantization. Unknown parameter: weight'
),
'wrong_mode': (
{
'name': 'DefaultQuantization',
'params': {
'activations': {
'bits': 8,
'type': 'symmetric',
'granularity': 'pertensor',
'range_estimator': {
'preset': 'quantile'
}
},
'weights': {
'bits': 8,
'level_low': -127,
'level_high': 127
},
'stat_subset_size': 1
}
},
'Algorithm DefaultQuantization. Unknown parameter: type'
),
'wrong_outlier_prob': (
{
'name': 'AccuracyAwareQuantization',
'params': {
'metric_subset_ratio': 0.5,
'ranking_subset_size': 300,
'max_iter_num': 10,
'maximal_drop': 0.005,
'drop_type': 'absolute',
'use_prev_if_drop_increase': False,
'base_algorithm': 'DefaultQuantization',
'activations': {
'bits': 8,
'mode': 'symmetric',
'granularity': 'pertensor',
'range_estimator': {
'preset': 'quantile'
}
},
'weights': {
'bits': 8,
'level_low': -127,
'level_high': 127,
'range_estimator': {
'max': {
'type': 'quantile',
'outlier': 0.0001
}
}
},
'stat_subset_size': 1
}
},
'Algorithm AccuracyAwareQuantization. Unknown parameter: outlier'
),
'wrong_maximal_drop': (
{
'name': 'AccuracyAwareQuantization',
'params': {
'metric_subset_ratio': 0.5,
'ranking_subset_size': 300,
'max_iter_num': 10,
'max_drop': 0.005,
'drop_type': 'absolute',
'use_prev_if_drop_increase': False,
'base_algorithm': 'DefaultQuantization',
'activations': {
'bits': 8,
'mode': 'symmetric',
'granularity': 'pertensor',
'range_estimator': {
'preset': 'quantile'
}
},
'weights': {
'bits': 8,
'level_low': -127,
'level_high': 127,
'range_estimator': {
'max': {
'type': 'quantile',
'outlier_prob': 0.0001
}
}
},
'stat_subset_size': 1
}
},
'Algorithm AccuracyAwareQuantization. Unknown parameter: max_drop'
)
}
@pytest.mark.parametrize(
'algorithm_settings', ALGORITHM_SETTINGS.items(),
ids=['{}_config'.format(os.path.splitext(c)[0]) for c in ALGORITHM_SETTINGS]
)
def test_algo_params_validation(algorithm_settings):
tool_config_path = TOOL_CONFIG_PATH.joinpath('mobilenet-v2-pytorch_single_dataset.json').as_posix()
config = Config.read_config(tool_config_path)
config['compression']['algorithms'][0] = algorithm_settings[1][0]
config_error = algorithm_settings[1][1]
with pytest.raises(RuntimeError, match=config_error):
config.validate_algo_config()
| tools/pot/tests/test_wrong_config.py | 5,163 | Copyright (C) 2020-2022 Intel Corporation SPDX-License-Identifier: Apache-2.0 | 77 | en | 0.26312 |
from backports import tempfile
import numpy as np
import os
import dill
import tensorflow as tf
import zipfile
import baselines.common.tf_util as U
from build_graph import build_act, build_train
from baselines import logger
from baselines.common.schedules import LinearSchedule
from baselines.deepq.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer
class ActWrapper(object):
def __init__(self, act, act_params):
self._act = act
self._act_params = act_params
@staticmethod
def load(path, num_cpu=16):
with open(path, "rb") as f:
model_data, act_params = dill.load(f)
act = build_act(**act_params)
sess = U.make_session(num_cpu=num_cpu)
sess.__enter__()
with tempfile.TemporaryDirectory() as td:
arc_path = os.path.join(td, "packed.zip")
with open(arc_path, "wb") as f:
f.write(model_data)
zipfile.ZipFile(arc_path, 'r', zipfile.ZIP_DEFLATED).extractall(td)
U.load_state(os.path.join(td, "model"))
return ActWrapper(act, act_params)
def __call__(self, *args, **kwargs):
return self._act(*args, **kwargs)
def save(self, path):
"""Save model to a pickle located at `path`"""
with tempfile.TemporaryDirectory() as td:
U.save_state(os.path.join(td, "model"))
arc_name = os.path.join(td, "packed.zip")
with zipfile.ZipFile(arc_name, 'w') as zipf:
for root, dirs, files in os.walk(td):
for fname in files:
file_path = os.path.join(root, fname)
if file_path != arc_name:
zipf.write(file_path, os.path.relpath(file_path, td))
with open(arc_name, "rb") as f:
model_data = f.read()
with open(path, "wb") as f:
dill.dump((model_data, self._act_params), f)
def load(path, num_cpu=16):
"""Load act function that was returned by learn function.
Parameters
----------
path: str
path to the act function pickle
num_cpu: int
number of cpus to use for executing the policy
Returns
-------
act: ActWrapper
function that takes a batch of observations
and returns actions.
"""
return ActWrapper.load(path, num_cpu=num_cpu)
def learn(env,
q_func,
lr=5e-4,
max_timesteps=100000,
buffer_size=50000,
exploration_fraction=0.1,
exploration_final_eps=0.02,
train_freq=1,
batch_size=32,
print_freq=1,
checkpoint_freq=10000,
learning_starts=1000,
gamma=1.0,
target_network_update_freq=500,
prioritized_replay=False,
prioritized_replay_alpha=0.6,
prioritized_replay_beta0=0.4,
prioritized_replay_beta_iters=None,
prioritized_replay_eps=1e-6,
num_cpu=16,
callback=None):
"""Train a deepq model.
Parameters
-------
env : gym.Env
environment to train on
q_func: (tf.Variable, int, str, bool) -> tf.Variable
the model that takes the following inputs:
observation_in: object
the output of observation placeholder
num_actions: int
number of actions
scope: str
reuse: bool
should be passed to outer variable scope
and returns a tensor of shape (batch_size, num_actions) with values of every action.
lr: float
learning rate for adam optimizer
max_timesteps: int
number of env steps to optimizer for
buffer_size: int
size of the replay buffer
exploration_fraction: float
fraction of entire training period over which the exploration rate is annealed
exploration_final_eps: float
final value of random action probability
train_freq: int
update the model every `train_freq` steps.
batch_size: int
size of a batched sampled from replay buffer for training
print_freq: int
how often to print out training progress
set to None to disable printing
checkpoint_freq: int
how often to save the model. This is so that the best version is restored
at the end of the training. If you do not wish to restore the best version at
the end of the training set this variable to None.
learning_starts: int
how many steps of the model to collect transitions for before learning starts
gamma: float
discount factor
target_network_update_freq: int
update the target network every `target_network_update_freq` steps.
prioritized_replay: True
if True prioritized replay buffer will be used.
prioritized_replay_alpha: float
alpha parameter for prioritized replay buffer
prioritized_replay_beta0: float
initial value of beta for prioritized replay buffer
prioritized_replay_beta_iters: int
number of iterations over which beta will be annealed from initial value
to 1.0. If set to None equals to max_timesteps.
prioritized_replay_eps: float
epsilon to add to the TD errors when updating priorities.
num_cpu: int
number of cpus to use for training
callback: (locals, globals) -> None
function called at every steps with state of the algorithm.
If callback returns true training stops.
Returns
-------
act: ActWrapper
Wrapper over act function. Adds ability to save it and load it.
See header of baselines/deepq/categorical.py for details on the act function.
"""
# Create all the functions necessary to train the model
sess = U.make_session(num_cpu=num_cpu)
sess.__enter__()
def make_obs_ph(name):
return U.BatchInput(env.observation_space.shape, name=name)
act, train, update_target, debug = build_train(
make_obs_ph=make_obs_ph,
q_func=q_func,
num_actions=env.action_space.n,
optimizer=tf.train.AdamOptimizer(learning_rate=lr),
gamma=gamma,
grad_norm_clipping=10
)
act_params = {
'make_obs_ph': make_obs_ph,
'q_func': q_func,
'num_actions': env.action_space.n,
}
# Create the replay buffer
if prioritized_replay:
replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha)
if prioritized_replay_beta_iters is None:
prioritized_replay_beta_iters = max_timesteps
beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
initial_p=prioritized_replay_beta0,
final_p=1.0)
else:
replay_buffer = ReplayBuffer(buffer_size)
beta_schedule = None
# Create the schedule for exploration starting from 1.
exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * max_timesteps),
initial_p=1.0,
final_p=exploration_final_eps)
# Initialize the parameters and copy them to the target network.
U.initialize()
update_target()
episode_rewards = [0.0]
saved_mean_reward = None
obs = env.reset()
with tempfile.TemporaryDirectory() as td:
model_saved = False
model_file = os.path.join(td, "model")
for t in range(max_timesteps):
if callback is not None:
if callback(locals(), globals()):
break
# Take action and update exploration to the newest value
action = act(np.array(obs)[None], update_eps=exploration.value(t))[0]
new_obs, rew, done, _ = env.step(action)
# Store transition in the replay buffer.
replay_buffer.add(obs, action, rew, new_obs, float(done))
obs = new_obs
episode_rewards[-1] += rew
if done:
obs = env.reset()
episode_rewards.append(0.0)
if t > learning_starts and t % train_freq == 0:
# Minimize the error in Bellman's equation on a batch sampled from replay buffer.
if prioritized_replay:
experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t))
(obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience
else:
obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size)
weights, batch_idxes = np.ones_like(rewards), None
td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights)
if prioritized_replay:
new_priorities = np.abs(td_errors) + prioritized_replay_eps
replay_buffer.update_priorities(batch_idxes, new_priorities)
if t > learning_starts and t % target_network_update_freq == 0:
# Update target network periodically.
update_target()
mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1)
num_episodes = len(episode_rewards)
if done and print_freq is not None and len(episode_rewards) % print_freq == 0:
logger.record_tabular("steps", t)
logger.record_tabular("episodes", num_episodes)
logger.record_tabular("mean 100 episode reward", mean_100ep_reward)
logger.record_tabular("% time spent exploring", int(100 * exploration.value(t)))
logger.dump_tabular()
if (checkpoint_freq is not None and t > learning_starts and
num_episodes > 100 and t % checkpoint_freq == 0):
if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward:
if print_freq is not None:
logger.log("Saving model due to mean reward increase: {} -> {}".format(
saved_mean_reward, mean_100ep_reward))
U.save_state(model_file)
model_saved = True
saved_mean_reward = mean_100ep_reward
if model_saved:
if print_freq is not None:
logger.log("Restored model with mean reward: {}".format(saved_mean_reward))
U.load_state(model_file)
return ActWrapper(act, act_params)
| baselines/deepq/simple.py | 10,501 | Train a deepq model.
Parameters
-------
env : gym.Env
environment to train on
q_func: (tf.Variable, int, str, bool) -> tf.Variable
the model that takes the following inputs:
observation_in: object
the output of observation placeholder
num_actions: int
number of actions
scope: str
reuse: bool
should be passed to outer variable scope
and returns a tensor of shape (batch_size, num_actions) with values of every action.
lr: float
learning rate for adam optimizer
max_timesteps: int
number of env steps to optimizer for
buffer_size: int
size of the replay buffer
exploration_fraction: float
fraction of entire training period over which the exploration rate is annealed
exploration_final_eps: float
final value of random action probability
train_freq: int
update the model every `train_freq` steps.
batch_size: int
size of a batched sampled from replay buffer for training
print_freq: int
how often to print out training progress
set to None to disable printing
checkpoint_freq: int
how often to save the model. This is so that the best version is restored
at the end of the training. If you do not wish to restore the best version at
the end of the training set this variable to None.
learning_starts: int
how many steps of the model to collect transitions for before learning starts
gamma: float
discount factor
target_network_update_freq: int
update the target network every `target_network_update_freq` steps.
prioritized_replay: True
if True prioritized replay buffer will be used.
prioritized_replay_alpha: float
alpha parameter for prioritized replay buffer
prioritized_replay_beta0: float
initial value of beta for prioritized replay buffer
prioritized_replay_beta_iters: int
number of iterations over which beta will be annealed from initial value
to 1.0. If set to None equals to max_timesteps.
prioritized_replay_eps: float
epsilon to add to the TD errors when updating priorities.
num_cpu: int
number of cpus to use for training
callback: (locals, globals) -> None
function called at every steps with state of the algorithm.
If callback returns true training stops.
Returns
-------
act: ActWrapper
Wrapper over act function. Adds ability to save it and load it.
See header of baselines/deepq/categorical.py for details on the act function.
Load act function that was returned by learn function.
Parameters
----------
path: str
path to the act function pickle
num_cpu: int
number of cpus to use for executing the policy
Returns
-------
act: ActWrapper
function that takes a batch of observations
and returns actions.
Save model to a pickle located at `path`
Create all the functions necessary to train the model Create the replay buffer Create the schedule for exploration starting from 1. Initialize the parameters and copy them to the target network. Take action and update exploration to the newest value Store transition in the replay buffer. Minimize the error in Bellman's equation on a batch sampled from replay buffer. Update target network periodically. | 3,177 | en | 0.811587 |
import pytest
from exchange_calendars.exchange_calendar_xshg import XSHGExchangeCalendar
from .test_exchange_calendar import ExchangeCalendarTestBase
from .test_utils import T
class TestXSHGCalendar(ExchangeCalendarTestBase):
@pytest.fixture(scope="class")
def calendar_cls(self):
yield XSHGExchangeCalendar
@pytest.fixture
def max_session_hours(self):
# Shanghai stock exchange is open from 9:30 am to 3pm
yield 5.5
@pytest.fixture
def start_bound(self):
yield T("1999-01-01")
@pytest.fixture
def end_bound(self):
yield T("2025-12-31")
@pytest.fixture
def regular_holidays_sample(self):
yield [
# 2017
"2017-01-02",
"2017-01-27",
"2017-01-30",
"2017-01-31",
"2017-02-01",
"2017-02-02",
"2017-04-03",
"2017-04-04",
"2017-05-01",
"2017-05-29",
"2017-05-30",
"2017-10-02",
"2017-10-03",
"2017-10-04",
"2017-10-05",
"2017-10-06",
# 2020
"2020-01-31"
]
| tests/test_xshg_calendar.py | 1,181 | Shanghai stock exchange is open from 9:30 am to 3pm 2017 2020 | 61 | en | 0.912652 |
# Generated by Django 4.0.3 on 2022-04-06 17:40
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('records', '0006_alter_records_phasesday'),
]
operations = [
migrations.RenameField(
model_name='records',
old_name='date',
new_name='created_date',
),
]
| records/migrations/0007_rename_date_records_created_date.py | 373 | Generated by Django 4.0.3 on 2022-04-06 17:40 | 45 | en | 0.721182 |
# Unit PYG02: Pygame Wall Ball Game version 3 操控型
import pygame,sys
pygame.init()
vINFO=pygame.display.Info()
print(vINFO)
size = width, height = vINFO.current_w,vINFO.current_h
speed = [1,1]
BLACK = 0, 0, 0
screen = pygame.display.set_mode(size,pygame.FULLSCREEN)
icon=pygame.image.load("1.png")
pygame.display.set_icon(icon)
pygame.display.set_caption("Pygame壁球")
ball = pygame.image.load("PYG02-ball.gif")
ballrect = ball.get_rect()
fps = 300
fclock = pygame.time.Clock()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
speed[0] = speed[0] if speed[0] == 0 else (abs(speed[0]) - 1)*int(speed[0]/abs(speed[0]))
elif event.key == pygame.K_RIGHT:
speed[0] = speed[0] + 1 if speed[0] > 0 else speed[0] - 1
elif event.key == pygame.K_UP:
speed[1] = speed[1] + 1 if speed[1] > 0 else speed[1] - 1
elif event.key == pygame.K_DOWN:
speed[1] = speed[1] if speed[1] == 0 else (abs(speed[1]) - 1)*int(speed[1]/abs(speed[1]))
elif event.key==pygame.K_e:
sys.exit()
elif event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
print(repr(event))
ballrect = ballrect.move(speed)
if ballrect.left < 0 or ballrect.right > width:
speed[0] = - speed[0]
if ballrect.top < 0 or ballrect.bottom > height:
speed[1] = - speed[1]
screen.fill(BLACK)
screen.blit(ball, ballrect)
pygame.display.update()
fclock.tick(fps)
| 壁球/壁球游戏2.0/main.py | 1,679 | Unit PYG02: Pygame Wall Ball Game version 3 操控型 | 48 | en | 0.277872 |
import asyncio
import os
from pstats import Stats
from tempfile import NamedTemporaryFile
from aiomisc.service.profiler import Profiler
async def test_profiler_start_stop():
profiler = Profiler(interval=0.1, top_results=10)
try:
await profiler.start()
await asyncio.sleep(0.5)
finally:
await profiler.stop()
async def test_profiler_dump():
profiler = None
fl = NamedTemporaryFile(delete=False)
path = NamedTemporaryFile(delete=False).name
fl.close()
try:
profiler = Profiler(
interval=0.1, top_results=10,
path=path
)
await profiler.start()
# Get first update
await asyncio.sleep(0.01)
stats1 = Stats(path)
# Not enough sleep till next update
await asyncio.sleep(0.01)
stats2 = Stats(path)
# Getting the same dump
assert stats1.stats == stats2.stats
# Enough sleep till next update
await asyncio.sleep(0.2)
stats3 = Stats(path)
# Getting updated dump
assert stats2.stats != stats3.stats
finally:
if profiler:
await profiler.stop()
os.remove(path)
| tests/test_profiler.py | 1,198 | Get first update Not enough sleep till next update Getting the same dump Enough sleep till next update Getting updated dump | 123 | en | 0.513932 |
# Use legacy numpy printing. This fix is made to keep doctests functional.
# For more info, see https://github.com/scikit-image/scikit-image/pull/2935 .
# TODO: remove this workaround once minimal required numpy is set to 1.14.0
from distutils.version import LooseVersion as Version
import numpy as np
if Version(np.__version__) >= Version('1.14'):
np.set_printoptions(legacy='1.13')
# List of files that pytest should ignore
collect_ignore = ["io/_plugins",]
try:
import visvis
except ImportError:
collect_ignore.append("measure/mc_meta/visual_test.py")
| venv/Lib/site-packages/skimage/conftest.py | 569 | Use legacy numpy printing. This fix is made to keep doctests functional. For more info, see https://github.com/scikit-image/scikit-image/pull/2935 . TODO: remove this workaround once minimal required numpy is set to 1.14.0 List of files that pytest should ignore | 262 | en | 0.809332 |
from model.contact import Contact
from model.group import Group
import random
def test_add_contact_in_group(app, db):
if app.contact.count() == 0:
app.contact.create_new(Contact(firstname="Contact for deletion", middlename="some middlename", lastname="some last name"))
if len(app.group.get_group_list()) == 0:
app.group.create(Group(name="Group for deletion"))
group_id = app.group.get_random_group_id()
contacts_in_group = app.contact.get_contacts_in_group(group_id)
if len(contacts_in_group) > 0:
contact = random.choice(contacts_in_group)
app.contact.remove_from_group(contact.id, group_id)
contact_ui = app.contact.get_contacts_in_group(group_id)
contact_db = db.get_contacts_in_group(group_id)
print()
print(contact_db)
print(contact_ui)
assert contact_db == contact_ui
else:
True
#
# contact = app.contact.get_contacts_in_group(group_id)
#
# contacts = db.get_contact_list()
#
# contact = random.choice(contacts)
# app.contact.add_contact_to_group(contact.id, group_id)
#
# contact_db = db.get_contacts_in_group(group_id)
# assert contact_db == contact_ui | test/test_del_contact_from_group.py | 1,216 | contact = app.contact.get_contacts_in_group(group_id) contacts = db.get_contact_list() contact = random.choice(contacts) app.contact.add_contact_to_group(contact.id, group_id) contact_db = db.get_contacts_in_group(group_id) assert contact_db == contact_ui | 255 | en | 0.125237 |
# coding: utf-8
from __future__ import unicode_literals
import unittest
import os
import shutil
import numpy as np
from monty.json import MontyDecoder
from pymatgen.io.vasp.sets import MITVaspInputSet, MITHSEVaspInputSet, \
MPVaspInputSet, MITGGAVaspInputSet, MITNEBVaspInputSet,\
MPStaticVaspInputSet, MPNonSCFVaspInputSet, MITMDVaspInputSet,\
MPHSEVaspInputSet, MPBSHSEVaspInputSet, MPStaticDielectricDFPTVaspInputSet,\
MPOpticsNonSCFVaspInputSet
from pymatgen.io.vasp.inputs import Poscar, Incar
from pymatgen import Specie, Lattice, Structure
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files')
dec = MontyDecoder()
class MITMPVaspInputSetTest(unittest.TestCase):
def setUp(self):
if "VASP_PSP_DIR" not in os.environ:
os.environ["VASP_PSP_DIR"] = test_dir
filepath = os.path.join(test_dir, 'POSCAR')
poscar = Poscar.from_file(filepath)
self.struct = poscar.structure
self.mitparamset = MITVaspInputSet()
self.mitparamset_unsorted = MITVaspInputSet(sort_structure=False)
self.mithseparamset = MITHSEVaspInputSet()
self.paramset = MPVaspInputSet()
self.userparamset = MPVaspInputSet(
user_incar_settings={'MAGMOM': {"Fe": 10, "S": -5, "Mn3+": 100}}
)
self.mitggaparam = MITGGAVaspInputSet()
self.mpstaticparamset = MPStaticVaspInputSet()
self.mpnscfparamsetu = MPNonSCFVaspInputSet(
{"NBANDS": 50}, mode="Uniform")
self.mpnscfparamsetl = MPNonSCFVaspInputSet(
{"NBANDS": 60}, mode="Line")
self.mphseparamset = MPHSEVaspInputSet()
self.mpbshseparamsetl = MPBSHSEVaspInputSet(mode="Line")
self.mpbshseparamsetu = MPBSHSEVaspInputSet(
mode="Uniform", added_kpoints=[[0.5, 0.5, 0.0]])
self.mpdielparamset = MPStaticDielectricDFPTVaspInputSet()
def test_get_poscar(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
struct = Structure(lattice, ["Fe", "Mn"], coords)
s_unsorted = self.mitparamset_unsorted.get_poscar(struct).structure
s_sorted = self.mitparamset.get_poscar(struct).structure
self.assertEqual(s_unsorted[0].specie.symbol, 'Fe')
self.assertEqual(s_sorted[0].specie.symbol, 'Mn')
def test_get_potcar_symbols(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
coords.append([0.75, 0.25, 0.75])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
struct = Structure(lattice, ["P", "Fe", "O"], coords)
syms = self.paramset.get_potcar_symbols(struct)
self.assertEqual(syms, ['Fe_pv', 'P', 'O'])
syms = MPVaspInputSet(sort_structure=False).get_potcar_symbols(struct)
self.assertEqual(syms, ['P', 'Fe_pv', 'O'])
def test_false_potcar_hash(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
coords.append([0.75, 0.25, 0.75])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
struct = Structure(lattice, ["P", "Fe", "O"], coords)
self.mitparamset.potcar_settings['Fe']['symbol'] = 'Fe_pv'
self.assertRaises(ValueError, self.mitparamset.get_potcar, struct, check_hash=True)
self.mitparamset.potcar_settings['Fe']['symbol'] = 'Fe'
def test_lda_potcar(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
struct = Structure(lattice, ["P", "Fe"], coords)
p = MITVaspInputSet(potcar_functional="LDA").get_potcar(struct)
self.assertEqual(p.functional, 'LDA')
def test_get_nelect(self):
coords = [[0]*3, [0.5]*3, [0.75]*3]
lattice = Lattice.cubic(4)
s = Structure(lattice, ['Si', 'Si', 'Fe'], coords)
self.assertAlmostEqual(MITVaspInputSet().get_nelect(s), 16)
def test_get_incar(self):
incar = self.paramset.get_incar(self.struct)
self.assertEqual(incar['LDAUU'], [5.3, 0, 0])
self.assertAlmostEqual(incar['EDIFF'], 0.0012)
incar = self.mitparamset.get_incar(self.struct)
self.assertEqual(incar['LDAUU'], [4.0, 0, 0])
self.assertAlmostEqual(incar['EDIFF'], 0.0012)
incar_gga = self.mitggaparam.get_incar(self.struct)
self.assertNotIn("LDAU", incar_gga)
incar_static = self.mpstaticparamset.get_incar(self.struct)
self.assertEqual(incar_static["NSW"], 0)
incar_nscfl = self.mpnscfparamsetl.get_incar(self.struct)
self.assertEqual(incar_nscfl["NBANDS"], 60)
incar_nscfu = self.mpnscfparamsetu.get_incar(self.struct)
self.assertEqual(incar_nscfu["ISYM"], 0)
incar_hse = self.mphseparamset.get_incar(self.struct)
self.assertEqual(incar_hse['LHFCALC'], True)
self.assertEqual(incar_hse['HFSCREEN'], 0.2)
incar_hse_bsl = self.mpbshseparamsetl.get_incar(self.struct)
self.assertEqual(incar_hse_bsl['LHFCALC'], True)
self.assertEqual(incar_hse_bsl['HFSCREEN'], 0.2)
self.assertEqual(incar_hse_bsl['NSW'], 0)
incar_hse_bsu = self.mpbshseparamsetu.get_incar(self.struct)
self.assertEqual(incar_hse_bsu['LHFCALC'], True)
self.assertEqual(incar_hse_bsu['HFSCREEN'], 0.2)
self.assertEqual(incar_hse_bsu['NSW'], 0)
incar_diel = self.mpdielparamset.get_incar(self.struct)
self.assertEqual(incar_diel['IBRION'], 8)
self.assertEqual(incar_diel['LEPSILON'], True)
si = 14
coords = list()
coords.append(np.array([0, 0, 0]))
coords.append(np.array([0.75, 0.5, 0.75]))
#Silicon structure for testing.
latt = Lattice(np.array([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]]))
struct = Structure(latt, [si, si], coords)
incar = self.paramset.get_incar(struct)
self.assertNotIn("LDAU", incar)
incar = self.mithseparamset.get_incar(self.struct)
self.assertTrue(incar['LHFCALC'])
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
struct = Structure(lattice, ["Fe", "Mn"], coords)
incar = self.paramset.get_incar(struct)
self.assertNotIn('LDAU', incar)
#check fluorides
struct = Structure(lattice, ["Fe", "F"], coords)
incar = self.paramset.get_incar(struct)
self.assertEqual(incar['LDAUU'], [5.3, 0])
self.assertEqual(incar['MAGMOM'], [5, 0.6])
struct = Structure(lattice, ["Fe", "F"], coords)
incar = self.mitparamset.get_incar(struct)
self.assertEqual(incar['LDAUU'], [4.0, 0])
#Make sure this works with species.
struct = Structure(lattice, ["Fe2+", "O2-"], coords)
incar = self.paramset.get_incar(struct)
self.assertEqual(incar['LDAUU'], [5.3, 0])
struct = Structure(lattice, ["Fe", "Mn"], coords,
site_properties={'magmom': (5.2, -4.5)})
incar = self.paramset.get_incar(struct)
self.assertEqual(incar['MAGMOM'], [-4.5, 5.2])
incar = self.mpstaticparamset.get_incar(struct)
self.assertEqual(incar['MAGMOM'], [-4.5, 5.2])
incar = self.mitparamset_unsorted.get_incar(struct)
self.assertEqual(incar['MAGMOM'], [5.2, -4.5])
struct = Structure(lattice, [Specie("Fe", 2, {'spin': 4.1}), "Mn"],
coords)
incar = self.paramset.get_incar(struct)
self.assertEqual(incar['MAGMOM'], [5, 4.1])
incar = self.mpnscfparamsetl.get_incar(struct)
self.assertEqual(incar.get('MAGMOM', None), None)
struct = Structure(lattice, ["Mn3+", "Mn4+"], coords)
incar = self.mitparamset.get_incar(struct)
self.assertEqual(incar['MAGMOM'], [4, 3])
incar = self.mpnscfparamsetu.get_incar(struct)
self.assertEqual(incar.get('MAGMOM', None), None)
self.assertEqual(self.userparamset.get_incar(struct)['MAGMOM'],
[100, 0.6])
#sulfide vs sulfate test
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
coords.append([0.25, 0.5, 0])
struct = Structure(lattice, ["Fe", "Fe", "S"], coords)
incar = self.mitparamset.get_incar(struct)
self.assertEqual(incar['LDAUU'], [1.9, 0])
#Make sure Matproject sulfides are ok.
self.assertNotIn('LDAUU', self.paramset.get_incar(struct))
self.assertNotIn('LDAUU', self.mpstaticparamset.get_incar(struct))
struct = Structure(lattice, ["Fe", "S", "O"], coords)
incar = self.mitparamset.get_incar(struct)
self.assertEqual(incar['LDAUU'], [4.0, 0, 0])
#Make sure Matproject sulfates are ok.
self.assertEqual(self.paramset.get_incar(struct)['LDAUU'], [5.3, 0, 0])
self.assertEqual(self.mpnscfparamsetl.get_incar(struct)['LDAUU'],
[5.3, 0, 0])
self.assertEqual(self.userparamset.get_incar(struct)['MAGMOM'],
[10, -5, 0.6])
def test_optics(self):
self.mpopticsparamset = MPOpticsNonSCFVaspInputSet.from_previous_vasp_run(
'{}/static_silicon'.format(test_dir), output_dir='optics_test_dir',
nedos=1145)
self.assertTrue(os.path.exists('optics_test_dir/CHGCAR'))
incar = Incar.from_file('optics_test_dir/INCAR')
self.assertTrue(incar['LOPTICS'])
self.assertEqual(incar['NEDOS'], 1145)
#Remove the directory in which the inputs have been created
shutil.rmtree('optics_test_dir')
def test_get_kpoints(self):
kpoints = self.paramset.get_kpoints(self.struct)
self.assertEqual(kpoints.kpts, [[2, 4, 6]])
self.assertEqual(kpoints.style, 'Monkhorst')
kpoints = self.mitparamset.get_kpoints(self.struct)
self.assertEqual(kpoints.kpts, [[2, 4, 6]])
self.assertEqual(kpoints.style, 'Monkhorst')
kpoints = self.mpstaticparamset.get_kpoints(self.struct)
self.assertEqual(kpoints.kpts, [[6, 6, 4]])
self.assertEqual(kpoints.style, 'Monkhorst')
kpoints = self.mpnscfparamsetl.get_kpoints(self.struct)
self.assertEqual(kpoints.num_kpts, 140)
self.assertEqual(kpoints.style, 'Reciprocal')
kpoints = self.mpnscfparamsetu.get_kpoints(self.struct)
self.assertEqual(kpoints.num_kpts, 168)
kpoints = self.mpbshseparamsetl.get_kpoints(self.struct)
self.assertAlmostEqual(kpoints.num_kpts, 164)
self.assertAlmostEqual(kpoints.kpts[10][0], 0.0)
self.assertAlmostEqual(kpoints.kpts[10][1], 0.5)
self.assertAlmostEqual(kpoints.kpts[10][2], 0.16666667)
self.assertAlmostEqual(kpoints.kpts[26][0], 0.0714285714286)
self.assertAlmostEqual(kpoints.kpts[26][1], 0.0)
self.assertAlmostEqual(kpoints.kpts[26][2], 0.0)
self.assertAlmostEqual(kpoints.kpts[-1][0], 0.5)
self.assertAlmostEqual(kpoints.kpts[-1][1], 0.5)
self.assertAlmostEqual(kpoints.kpts[-1][2], 0.5)
kpoints = self.mpbshseparamsetu.get_kpoints(self.struct)
self.assertAlmostEqual(kpoints.num_kpts, 25)
self.assertAlmostEqual(kpoints.kpts[10][0], 0.0)
self.assertAlmostEqual(kpoints.kpts[10][1], 0.5)
self.assertAlmostEqual(kpoints.kpts[10][2], 0.16666667)
self.assertAlmostEqual(kpoints.kpts[-1][0], 0.5)
self.assertAlmostEqual(kpoints.kpts[-1][1], 0.5)
self.assertAlmostEqual(kpoints.kpts[-1][2], 0.0)
def test_get_all_vasp_input(self):
d = self.mitparamset.get_all_vasp_input(self.struct)
self.assertEqual(d["INCAR"]["ISMEAR"], -5)
self.struct.make_supercell(4)
d = self.mitparamset.get_all_vasp_input(self.struct)
self.assertEqual(d["INCAR"]["ISMEAR"], 0)
def test_to_from_dict(self):
self.mitparamset = MITVaspInputSet()
self.mithseparamset = MITHSEVaspInputSet()
self.paramset = MPVaspInputSet()
self.userparamset = MPVaspInputSet(
user_incar_settings={'MAGMOM': {"Fe": 10, "S": -5, "Mn3+": 100}}
)
d = self.mitparamset.as_dict()
v = dec.process_decoded(d)
self.assertEqual(v.incar_settings["LDAUU"]["O"]["Fe"], 4)
d = self.mitggaparam.as_dict()
v = dec.process_decoded(d)
self.assertNotIn("LDAUU", v.incar_settings)
d = self.mithseparamset.as_dict()
v = dec.process_decoded(d)
self.assertEqual(v.incar_settings["LHFCALC"], True)
d = self.mphseparamset.as_dict()
v = dec.process_decoded(d)
self.assertEqual(v.incar_settings["LHFCALC"], True)
d = self.paramset.as_dict()
v = dec.process_decoded(d)
self.assertEqual(v.incar_settings["LDAUU"]["O"]["Fe"], 5.3)
d = self.userparamset.as_dict()
v = dec.process_decoded(d)
#self.assertEqual(type(v), MPVaspInputSet)
self.assertEqual(v.incar_settings["MAGMOM"],
{"Fe": 10, "S": -5, "Mn3+": 100})
class MITMDVaspInputSetTest(unittest.TestCase):
def setUp(self):
filepath = os.path.join(test_dir, 'POSCAR')
poscar = Poscar.from_file(filepath)
self.struct = poscar.structure
self.mitmdparam = MITMDVaspInputSet(300, 1200, 10000)
def test_get_potcar_symbols(self):
syms = self.mitmdparam.get_potcar_symbols(self.struct)
self.assertEqual(syms, ['Fe', 'P', 'O'])
def test_get_incar(self):
incar = self.mitmdparam.get_incar(self.struct)
self.assertNotIn("LDAUU", incar)
self.assertAlmostEqual(incar['EDIFF'], 2.4e-5)
def test_get_kpoints(self):
kpoints = self.mitmdparam.get_kpoints(self.struct)
self.assertEqual(kpoints.kpts, [(1, 1, 1)])
self.assertEqual(kpoints.style, 'Gamma')
def test_to_from_dict(self):
d = self.mitmdparam.as_dict()
v = dec.process_decoded(d)
self.assertEqual(type(v), MITMDVaspInputSet)
self.assertEqual(v.incar_settings["TEBEG"], 300)
class MITNEBVaspInputSetTest(unittest.TestCase):
def setUp(self):
filepath = os.path.join(test_dir, 'POSCAR')
poscar = Poscar.from_file(filepath)
self.struct = poscar.structure
self.vis = MITNEBVaspInputSet(nimages=10, hubbard_off=True)
def test_get_potcar_symbols(self):
syms = self.vis.get_potcar_symbols(self.struct)
self.assertEqual(syms, ['Fe', 'P', 'O'])
def test_get_incar(self):
incar = self.vis.get_incar(self.struct)
self.assertNotIn("LDAUU", incar)
self.assertAlmostEqual(incar['EDIFF'], 0.00005)
def test_get_kpoints(self):
kpoints = self.vis.get_kpoints(self.struct)
self.assertEqual(kpoints.kpts, [[2, 4, 6]])
self.assertEqual(kpoints.style, 'Monkhorst')
def test_to_from_dict(self):
d = self.vis.as_dict()
v = dec.process_decoded(d)
self.assertEqual(v.incar_settings["IMAGES"], 10)
def test_write_inputs(self):
c1 = [[0.5] * 3, [0.9] * 3]
c2 = [[0.5] * 3, [0.9, 0.1, 0.1]]
s1 = Structure(Lattice.cubic(5), ['Si', 'Si'], c1)
s2 = Structure(Lattice.cubic(5), ['Si', 'Si'], c2)
structs = []
for s in s1.interpolate(s2, 3, pbc=True):
structs.append(Structure.from_sites(s.sites,
to_unit_cell=True))
fc = self.vis._process_structures(structs)[2].frac_coords
self.assertTrue(np.allclose(fc, [[0.5]*3,[0.9, 1.033333, 1.0333333]]))
if __name__ == '__main__':
unittest.main()
| pymatgen/io/vasp/tests/test_sets.py | 16,655 | coding: utf-8Silicon structure for testing.check fluoridesMake sure this works with species.sulfide vs sulfate testMake sure Matproject sulfides are ok.Make sure Matproject sulfates are ok.Remove the directory in which the inputs have been createdself.assertEqual(type(v), MPVaspInputSet) | 288 | en | 0.781664 |
#!/usr/bin/env python3
# Copyright 2020 Efabless Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import re
import opendb as odb
parser = argparse.ArgumentParser(
description='Creates obstructions in def files.')
parser.add_argument('--lef', '-l',
nargs='+',
type=str,
default=None,
required=True,
help='LEF file needed to have a proper view of the DEF files.')
parser.add_argument('--input-def', '-id', required=True,
help='DEF view of the design that needs to be obstructed.')
parser.add_argument('--obstructions', '-obs', required=True,
help='Format: layer llx lly urx ury, ... (in microns)')
parser.add_argument('--output', '-o', required=True,
help='Output DEF file.')
args = parser.parse_args()
input_lef_file_names = args.lef
input_def_file_name = args.input_def
obs_args = args.obstructions
output_def_file_name = args.output
RE_NUMBER = r'[\-]?[0-9]+(\.[0-9]+)?'
RE_OBS = r'(?P<layer>\S+)\s+' r'(?P<bbox>' + RE_NUMBER + r'\s+' + RE_NUMBER + r'\s+' + RE_NUMBER + r'\s+' + RE_NUMBER + r')'
obses = obs_args.split(',')
obs_list = []
for obs in obses:
obs = obs.strip()
m = re.match(RE_OBS, obs)
assert m,\
"Incorrectly formatted input (%s).\n Format: layer llx lly urx ury, ..." % (obs)
layer = m.group('layer')
bbox = [float(x) for x in m.group('bbox').split()]
obs_list.append((layer, bbox))
design_db = odb.dbDatabase.create()
for lef in input_lef_file_names:
odb.read_lef(design_db, lef)
odb.read_def(design_db, input_def_file_name)
design_chip = design_db.getChip()
design_block = design_chip.getBlock()
design_insts = design_block.getInsts()
design_tech = design_db.getTech()
for obs in obs_list:
layer = obs[0]
bbox = obs[1]
dbu = design_tech.getDbUnitsPerMicron()
bbox = [int(x*dbu) for x in bbox]
print("Creating an obstruction on", layer, "at", *bbox, "(DBU)")
odb.dbObstruction_create(design_block, design_tech.findLayer(layer), *bbox)
odb.write_def(design_block, output_def_file_name)
| scripts/add_def_obstructions.py | 2,669 | !/usr/bin/env python3 Copyright 2020 Efabless Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 580 | en | 0.835673 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1.schema.predict.instance",
manifest={
"TextSentimentPredictionInstance",
},
)
class TextSentimentPredictionInstance(proto.Message):
r"""Prediction input format for Text Sentiment.
Attributes:
content (str):
The text snippet to make the predictions on.
mime_type (str):
The MIME type of the text snippet. The
supported MIME types are listed below.
- text/plain
"""
content = proto.Field(
proto.STRING,
number=1,
)
mime_type = proto.Field(
proto.STRING,
number=2,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_sentiment.py | 1,356 | Prediction input format for Text Sentiment.
Attributes:
content (str):
The text snippet to make the predictions on.
mime_type (str):
The MIME type of the text snippet. The
supported MIME types are listed below.
- text/plain
-*- coding: utf-8 -*- Copyright 2022 Google LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. type: ignore | 849 | en | 0.817936 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for training routines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python import keras
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
from tensorflow.python.training.rmsprop import RMSPropOptimizer
class TrainingTest(test.TestCase):
def test_fit_on_arrays(self):
a = keras.layers.Input(shape=(3,), name='input_a')
b = keras.layers.Input(shape=(3,), name='input_b')
dense = keras.layers.Dense(4, name='dense')
c = dense(a)
d = dense(b)
e = keras.layers.Dropout(0.5, name='dropout')(c)
model = keras.models.Model([a, b], [d, e])
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
loss_weights = [1., 0.5]
metrics = ['mae']
model.compile(optimizer, loss, metrics=metrics, loss_weights=loss_weights)
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_d_np = np.random.random((10, 4))
output_e_np = np.random.random((10, 4))
# Test fit at different verbosity
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=1,
batch_size=5,
verbose=0)
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=1,
batch_size=5,
verbose=1)
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=2,
batch_size=5,
verbose=2)
# Test with validation data
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
validation_data=([input_a_np, input_b_np], [output_d_np,
output_e_np]),
epochs=1,
batch_size=5,
verbose=0)
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
validation_data=([input_a_np, input_b_np], [output_d_np,
output_e_np]),
epochs=2,
batch_size=5,
verbose=1)
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
validation_data=([input_a_np, input_b_np], [output_d_np,
output_e_np]),
epochs=2,
batch_size=5,
verbose=2)
model.train_on_batch([input_a_np, input_b_np], [output_d_np, output_e_np])
# Test with validation split
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=2,
batch_size=5,
verbose=0,
validation_split=0.2)
# Test with dictionary inputs
model.fit(
{
'input_a': input_a_np,
'input_b': input_b_np
}, {'dense': output_d_np,
'dropout': output_e_np},
epochs=1,
batch_size=5,
verbose=0)
model.fit(
{
'input_a': input_a_np,
'input_b': input_b_np
}, {'dense': output_d_np,
'dropout': output_e_np},
epochs=1,
batch_size=5,
verbose=1)
model.fit(
{
'input_a': input_a_np,
'input_b': input_b_np
}, {'dense': output_d_np,
'dropout': output_e_np},
validation_data=({'input_a': input_a_np,
'input_b': input_b_np
},
{
'dense': output_d_np,
'dropout': output_e_np
}),
epochs=1,
batch_size=5,
verbose=0)
model.train_on_batch({
'input_a': input_a_np,
'input_b': input_b_np
}, {'dense': output_d_np,
'dropout': output_e_np})
# Test with lists for loss, metrics
loss = ['mae', 'mse']
metrics = ['acc', 'mae']
model.compile(optimizer, loss, metrics=metrics)
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=1,
batch_size=5,
verbose=0)
# Test with dictionaries for loss, metrics, loss weights
loss = {'dense': 'mse', 'dropout': 'mae'}
loss_weights = {'dense': 1., 'dropout': 0.5}
metrics = {'dense': 'mse', 'dropout': 'mae'}
model.compile(optimizer, loss, metrics=metrics, loss_weights=loss_weights)
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=1,
batch_size=5,
verbose=0)
# Invalid use cases
with self.assertRaises(AttributeError):
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=1,
validation_data=([input_a_np, input_b_np], 0, 0),
verbose=0)
with self.assertRaises(ValueError):
model.train_on_batch({'input_a': input_a_np},
[output_d_np, output_e_np])
with self.assertRaises(ValueError):
model.train_on_batch([input_a_np], [output_d_np, output_e_np])
with self.assertRaises(AttributeError):
model.train_on_batch(1, [output_d_np, output_e_np])
with self.assertRaises(ValueError):
model.train_on_batch(input_a_np, [output_d_np, output_e_np])
with self.assertRaises(ValueError):
bad_input = np.random.random((11, 3))
model.train_on_batch([bad_input, input_b_np],
[output_d_np, output_e_np])
with self.assertRaises(ValueError):
bad_target = np.random.random((11, 4))
model.train_on_batch([input_a_np, input_b_np],
[bad_target, output_e_np])
# Build single-input model
x = keras.layers.Input(shape=(3,), name='input_a')
y = keras.layers.Dense(4)(x)
model = keras.models.Model(x, y)
model.compile(optimizer=RMSPropOptimizer(learning_rate=0.001), loss='mse')
# This will work
model.fit([input_a_np], output_d_np, epochs=1)
with self.assertRaises(ValueError):
model.fit([input_a_np, input_a_np], output_d_np, epochs=1)
def test_evaluate_predict_on_arrays(self):
a = keras.layers.Input(shape=(3,), name='input_a')
b = keras.layers.Input(shape=(3,), name='input_b')
dense = keras.layers.Dense(4, name='dense')
c = dense(a)
d = dense(b)
e = keras.layers.Dropout(0.5, name='dropout')(c)
model = keras.models.Model([a, b], [d, e])
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
loss_weights = [1., 0.5]
metrics = ['acc', 'mae']
model.compile(
optimizer,
loss,
metrics=metrics,
loss_weights=loss_weights,
sample_weight_mode=None)
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_d_np = np.random.random((10, 4))
output_e_np = np.random.random((10, 4))
# Test evaluate at different verbosity
out = model.evaluate(
[input_a_np, input_b_np], [output_d_np, output_e_np],
batch_size=5,
verbose=0)
self.assertEqual(len(out), 7)
out = model.evaluate(
[input_a_np, input_b_np], [output_d_np, output_e_np],
batch_size=5,
verbose=1)
self.assertEqual(len(out), 7)
out = model.evaluate(
[input_a_np, input_b_np], [output_d_np, output_e_np],
batch_size=5,
verbose=2)
self.assertEqual(len(out), 7)
out = model.test_on_batch([input_a_np, input_b_np],
[output_d_np, output_e_np])
self.assertEqual(len(out), 7)
# Test evaluate with dictionary inputs
model.evaluate(
{
'input_a': input_a_np,
'input_b': input_b_np
}, {'dense': output_d_np,
'dropout': output_e_np},
batch_size=5,
verbose=0)
model.evaluate(
{
'input_a': input_a_np,
'input_b': input_b_np
}, {'dense': output_d_np,
'dropout': output_e_np},
batch_size=5,
verbose=1)
# Test predict
out = model.predict([input_a_np, input_b_np], batch_size=5)
self.assertEqual(len(out), 2)
out = model.predict({'input_a': input_a_np, 'input_b': input_b_np})
self.assertEqual(len(out), 2)
out = model.predict_on_batch({
'input_a': input_a_np,
'input_b': input_b_np
})
self.assertEqual(len(out), 2)
def test_invalid_loss_or_metrics(self):
num_classes = 5
train_samples = 1000
test_samples = 1000
input_dim = 5
model = keras.models.Sequential()
model.add(keras.layers.Dense(10, input_shape=(input_dim,)))
model.add(keras.layers.Activation('relu'))
model.add(keras.layers.Dense(num_classes))
model.add(keras.layers.Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001))
np.random.seed(1337)
(x_train, y_train), (_, _) = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=test_samples,
input_shape=(input_dim,),
num_classes=num_classes)
with self.assertRaises(ValueError):
model.fit(x_train, np.concatenate([y_train, y_train], axis=-1))
with self.assertRaises(TypeError):
model.compile(loss='categorical_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001),
metrics=set(0))
with self.assertRaises(ValueError):
model.compile(loss=None,
optimizer='rms')
def test_model_methods_with_eager_tensors_multi_io(self):
a = keras.layers.Input(shape=(3,), name='input_a')
b = keras.layers.Input(shape=(3,), name='input_b')
dense = keras.layers.Dense(4, name='dense')
c = dense(a)
d = dense(b)
e = keras.layers.Dropout(0.5, name='dropout')(c)
model = keras.models.Model([a, b], [d, e])
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
loss_weights = [1., 0.5]
metrics = ['mae']
model.compile(
optimizer,
loss,
metrics=metrics,
loss_weights=loss_weights,
sample_weight_mode=None)
input_a = keras.backend.zeros(shape=(10, 3))
input_b = keras.backend.zeros(shape=(10, 3))
target_d = keras.backend.zeros(shape=(10, 4))
target_e = keras.backend.zeros(shape=(10, 4))
model.fit(
[input_a, input_b], [target_d, target_e],
epochs=1,
batch_size=5,
verbose=0)
# Test: no shuffle.
model.fit(
[input_a, input_b], [target_d, target_e],
epochs=1,
batch_size=5,
verbose=0,
shuffle=False)
# Test: validation data.
model.fit([input_a, input_b], [target_d, target_e],
epochs=1, batch_size=2, verbose=0,
validation_data=([input_a, input_b], [target_d, target_e]))
model.train_on_batch([input_a, input_b], [target_d, target_e])
model.predict([input_a, input_b], batch_size=5)
model.evaluate([input_a, input_b], [target_d, target_e],
batch_size=2, verbose=0)
model.test_on_batch([input_a, input_b], [target_d, target_e])
# Test: mix np and tensors.
input_b = np.zeros(shape=(10, 3)).astype('float32')
target_e = np.zeros(shape=(10, 4)).astype('float32')
model.fit(
[input_a, input_b], [target_d, target_e],
epochs=1,
batch_size=5,
verbose=0)
model.fit([input_a, input_b], [target_d, target_e],
epochs=1, batch_size=2, verbose=0,
validation_data=([input_a, input_b], [target_d, target_e]))
model.fit(
[input_a, input_b], [target_d, target_e],
epochs=1,
batch_size=5,
verbose=0,
shuffle=False)
model.train_on_batch([input_a, input_b], [target_d, target_e])
model.predict([input_a, input_b], batch_size=5)
model.evaluate([input_a, input_b], [target_d, target_e],
batch_size=2, verbose=0)
model.test_on_batch([input_a, input_b], [target_d, target_e])
def test_model_methods_with_eager_tensors_single_io(self):
x = keras.layers.Input(shape=(3,), name='input')
y = keras.layers.Dense(4, name='dense')(x)
model = keras.Model(x, y)
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
metrics = ['mae']
model.compile(optimizer, loss, metrics=metrics)
inputs = keras.backend.zeros(shape=(10, 3))
targets = keras.backend.zeros(shape=(10, 4))
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=0)
model.fit(inputs, targets, epochs=1, batch_size=3, verbose=0, shuffle=False)
model.fit(inputs, targets, epochs=1, batch_size=4, verbose=0,
validation_data=(inputs, targets))
model.evaluate(inputs, targets, batch_size=2, verbose=0)
model.predict(inputs, batch_size=2)
model.train_on_batch(inputs, targets)
model.test_on_batch(inputs, targets)
class LossWeightingTest(test.TestCase):
def test_class_weights(self):
num_classes = 5
batch_size = 5
weighted_class = 3
train_samples = 300
test_samples = 300
input_dim = 5
model = keras.models.Sequential()
model.add(keras.layers.Dense(10, input_shape=(input_dim,)))
model.add(keras.layers.Activation('relu'))
model.add(keras.layers.Dense(num_classes))
model.add(keras.layers.Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001))
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=test_samples,
input_shape=(input_dim,),
num_classes=num_classes)
int_y_test = y_test.copy()
int_y_train = y_train.copy()
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
test_ids = np.where(int_y_test == np.array(weighted_class))[0]
class_weight = dict([(i, 1.) for i in range(num_classes)])
class_weight[weighted_class] = 4.
sample_weight = np.ones((y_train.shape[0]))
sample_weight[int_y_train == weighted_class] = 4.
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=2,
verbose=0,
class_weight=class_weight,
validation_data=(x_train, y_train, sample_weight))
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=2,
verbose=0,
class_weight=class_weight)
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=2,
verbose=0,
class_weight=class_weight,
validation_split=0.1)
model.train_on_batch(
x_train[:batch_size], y_train[:batch_size], class_weight=class_weight)
ref_score = model.evaluate(x_test, y_test, verbose=0)
score = model.evaluate(
x_test[test_ids, :], y_test[test_ids, :], verbose=0)
self.assertLess(score, ref_score)
def test_sample_weights(self):
num_classes = 5
batch_size = 5
weighted_class = 3
train_samples = 300
test_samples = 300
input_dim = 5
model = keras.models.Sequential()
model.add(keras.layers.Dense(10, input_shape=(input_dim,)))
model.add(keras.layers.Activation('relu'))
model.add(keras.layers.Dense(num_classes))
model.add(keras.layers.Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001))
np.random.seed(43)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=test_samples,
input_shape=(input_dim,),
num_classes=num_classes)
int_y_train = y_train.copy()
y_train = keras.utils.to_categorical(y_train, num_classes)
class_weight = dict([(i, 1.) for i in range(num_classes)])
class_weight[weighted_class] = 4.
sample_weight = np.ones((y_train.shape[0]))
sample_weight[int_y_train == weighted_class] = 4.
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=2,
verbose=0,
sample_weight=sample_weight)
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=2,
verbose=0,
sample_weight=sample_weight,
validation_split=0.1)
model.train_on_batch(
x_train[:batch_size],
y_train[:batch_size],
sample_weight=sample_weight[:batch_size])
model.test_on_batch(
x_train[:batch_size],
y_train[:batch_size],
sample_weight=sample_weight[:batch_size])
def test_temporal_sample_weights(self):
num_classes = 5
weighted_class = 3
train_samples = 1000
test_samples = 1000
input_dim = 5
timesteps = 3
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(num_classes),
input_shape=(timesteps, input_dim)))
model.add(keras.layers.Activation('softmax'))
np.random.seed(1337)
(_, y_train), _ = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=test_samples,
input_shape=(input_dim,),
num_classes=num_classes)
int_y_train = y_train.copy()
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
class_weight = dict([(i, 1.) for i in range(num_classes)])
class_weight[weighted_class] = 2.
sample_weight = np.ones((y_train.shape[0]))
sample_weight[int_y_train == weighted_class] = 2.
with self.assertRaises(ValueError):
model.compile(
loss='binary_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001),
sample_weight_mode='temporal')
def test_class_weight_invalid_use_case(self):
num_classes = 5
train_samples = 1000
test_samples = 1000
input_dim = 5
timesteps = 3
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(num_classes),
input_shape=(timesteps, input_dim)))
model.add(keras.layers.Activation('softmax'))
model.compile(
loss='binary_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001))
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=test_samples,
input_shape=(input_dim,),
num_classes=num_classes)
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
class_weight = dict([(i, 1.) for i in range(num_classes)])
del class_weight[1]
with self.assertRaises(ValueError):
model.fit(x_train, y_train,
epochs=0, verbose=0, class_weight=class_weight)
with self.assertRaises(ValueError):
model.compile(
loss='binary_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001),
sample_weight_mode=[])
# Build multi-output model
x = keras.Input((3,))
y1 = keras.layers.Dense(4, name='1')(x)
y2 = keras.layers.Dense(4, name='2')(x)
model = keras.models.Model(x, [y1, y2])
model.compile(optimizer=RMSPropOptimizer(learning_rate=0.001), loss='mse')
x_np = np.random.random((10, 3))
y_np = np.random.random((10, 4))
w_np = np.random.random((10,))
# This will work
model.fit(x_np, [y_np, y_np], epochs=1, sample_weight={'1': w_np})
# These will not
with self.assertRaises(ValueError):
model.fit(x_np, [y_np, y_np], epochs=1, sample_weight=[w_np])
with self.assertRaises(TypeError):
model.fit(x_np, [y_np, y_np], epochs=1, sample_weight=w_np)
with self.assertRaises(ValueError):
bad_w_np = np.random.random((11,))
model.fit(x_np, [y_np, y_np], epochs=1, sample_weight={'1': bad_w_np})
with self.assertRaises(ValueError):
bad_w_np = np.random.random((10, 2))
model.fit(x_np, [y_np, y_np], epochs=1, sample_weight={'1': bad_w_np})
with self.assertRaises(ValueError):
bad_w_np = np.random.random((10, 2, 2))
model.fit(x_np, [y_np, y_np], epochs=1, sample_weight={'1': bad_w_np})
class CorrectnessTest(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes()
def test_loss_correctness(self):
# Test that training loss is the same in eager and graph
# (by comparing it to a reference value in a deterministic case)
model = keras.Sequential()
model.add(keras.layers.Dense(3,
activation='relu',
input_dim=4,
kernel_initializer='ones'))
model.add(keras.layers.Dense(2,
activation='softmax',
kernel_initializer='ones'))
model.compile(loss='sparse_categorical_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001))
x = np.ones((100, 4))
np.random.seed(123)
y = np.random.randint(0, 1, size=(100, 1))
history = model.fit(x, y, epochs=1, batch_size=10)
self.assertEqual(
np.around(history.history['loss'][-1], decimals=4), 0.6173)
@tf_test_util.run_in_graph_and_eager_modes()
def test_metrics_correctness(self):
model = keras.Sequential()
model.add(keras.layers.Dense(3,
activation='relu',
input_dim=4,
kernel_initializer='ones'))
model.add(keras.layers.Dense(1,
activation='sigmoid',
kernel_initializer='ones'))
model.compile(loss='mae',
metrics=['acc'],
optimizer=RMSPropOptimizer(learning_rate=0.001))
x = np.ones((100, 4))
y = np.ones((100, 1))
outs = model.evaluate(x, y)
self.assertEqual(outs[1], 1.)
y = np.zeros((100, 1))
outs = model.evaluate(x, y)
self.assertEqual(outs[1], 0.)
@tf_test_util.run_in_graph_and_eager_modes()
def test_loss_correctness_with_iterator(self):
# Test that training loss is the same in eager and graph
# (by comparing it to a reference value in a deterministic case)
model = keras.Sequential()
model.add(
keras.layers.Dense(
3, activation='relu', input_dim=4, kernel_initializer='ones'))
model.add(
keras.layers.Dense(2, activation='softmax', kernel_initializer='ones'))
model.compile(
loss='sparse_categorical_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001))
x = np.ones((100, 4), dtype=np.float32)
np.random.seed(123)
y = np.random.randint(0, 1, size=(100, 1))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
iterator = dataset.make_one_shot_iterator()
history = model.fit(iterator, epochs=1, steps_per_epoch=10)
self.assertEqual(np.around(history.history['loss'][-1], decimals=4), 0.6173)
@tf_test_util.run_in_graph_and_eager_modes()
def test_metrics_correctness_with_iterator(self):
model = keras.Sequential()
model.add(
keras.layers.Dense(
8, activation='relu', input_dim=4, kernel_initializer='ones'))
model.add(
keras.layers.Dense(1, activation='sigmoid', kernel_initializer='ones'))
model.compile(
loss='binary_crossentropy',
metrics=['accuracy'],
optimizer=RMSPropOptimizer(learning_rate=0.001))
np.random.seed(123)
x = np.random.randint(10, size=(100, 4)).astype(np.float32)
y = np.random.randint(2, size=(100, 1)).astype(np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
dataset = dataset.batch(10)
iterator = dataset.make_one_shot_iterator()
outs = model.evaluate(iterator, steps=10)
self.assertEqual(np.around(outs[1], decimals=1), 0.5)
y = np.zeros((100, 1), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
iterator = dataset.make_one_shot_iterator()
outs = model.evaluate(iterator, steps=10)
self.assertEqual(outs[1], 0.)
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
| tensorflow/python/keras/engine/training_eager_test.py | 25,194 | Tests for training routines.
Copyright 2016 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================== Test fit at different verbosity Test with validation data Test with validation split Test with dictionary inputs Test with lists for loss, metrics Test with dictionaries for loss, metrics, loss weights Invalid use cases Build single-input model This will work Test evaluate at different verbosity Test evaluate with dictionary inputs Test predict Test: no shuffle. Test: validation data. Test: mix np and tensors. convert class vectors to binary class matrices convert class vectors to binary class matrices convert class vectors to binary class matrices Build multi-output model This will work These will not Test that training loss is the same in eager and graph (by comparing it to a reference value in a deterministic case) Test that training loss is the same in eager and graph (by comparing it to a reference value in a deterministic case) | 1,537 | en | 0.842624 |
### Package Import ###
from bson import ObjectId
from pydantic import BaseModel
from pydantic import fields
from pydantic.fields import Field
from typing import Optional
### AppCode Import ###
from Server.Model.POID import PyObjectId
###############################################################################
class User(BaseModel):
Id: PyObjectId = Field(default_factory=PyObjectId, alias='_id')
FirstName: str = Field(alias='FirstName')
LastName: str = Field(alias='LastName')
Email: str = Field(alias='Email')
PhoneNumber: str = Field(alias='PhoneNumber')
Password: str = Field(alias='Password')
About: Optional[str] = Field(alias = 'About')
ProfileUrl: Optional[str] = Field(alias='ProfileUrl')
class Config:
allow_population_by_field_name = True
arbitrary_types_allowed = True
json_encoders = {ObjectId: str}
schema_extra = {
"example": {
"FirstName": "Jane",
"LastName": "Doe",
"Email": "jdoe@example.com",
"PhoneNumber": "6285588974456",
"Password": "jdoee"
}
}
###############################################################################
class UserUpdateModel(BaseModel):
FirstName: Optional[str] = Field(alias ='FirstName')
LastName: Optional[str] = Field(alias='LastName')
Email: Optional[str] = Field(alias='Email')
PhoneNumber: Optional[str] = Field(alias='PhoneNumber')
Password: Optional[str] = Field(alias='Password')
About: Optional[str] = Field(alias = 'About')
ProfileUrl: Optional[str] = Field(alias='ProfileUrl')
class Config:
arbitrary_types_allowed = True
json_encoders = {ObjectId: str}
schema_extra = {
"example": {
"FirstName": "Jane",
"LastName": "Doe",
"Email": "jdoe@example.com",
"PhoneNumber": "6285588974456",
"Password": "jdoee",
"About": "About jane doe",
"ProfileUrl": "https://profileurlembed.com/file/janedoe"
}
}
############################################################################### | Server/Model/ModelUser.py | 2,223 | Package Import AppCode Import | 30 | fr | 0.249056 |
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pyarrow.compute as pc
import matplotlib.pyplot as plt
import seaborn as sns
from pr3d.nonbayesian import ConditionalGammaEVM
# load dataset first
file_addresses = ['dataset_onehop_processed.parquet']
table = pa.concat_tables(
pq.read_table(
file_address,columns=None,
) for file_address in file_addresses
)
df = table.to_pandas()
print(df)
# load the trained model
dtype = 'float64'
conditional_delay_model = ConditionalGammaEVM(
h5_addr = "onehop_tis_model.h5",
)
# find n most common queue_length occurances
n = 3
values_count = df[['queue_length']].value_counts()[:n].index.tolist()
print("{0} most common queue states: {1}".format(n,values_count))
# divide the service delay into n segments based on quantiles
m = 5
service_delays = np.squeeze(df[['service_delay']].to_numpy())
quants = np.linspace(0, 1, num=m+1)
intervals = [ (quant,quants[idx+1]) for idx, quant in enumerate(quants) if (idx+1)<len(quants) ]
print("{0} longer_delay_prob intervals: {1}".format(n,intervals))
#sns.set_palette("rocket")
# plot the conditional distributions of them
fig, axes = plt.subplots(nrows=n, ncols=m, figsize=(m*4,n*4))
for i in range(n):
for j in range(m):
ax = axes[i,j]
# take the empirical samples
conditional_df = df[
(df.queue_length==values_count[i][0]) &
(df.longer_delay_prob>=intervals[j][0]) &
(df.longer_delay_prob<intervals[j][1])
]
# sample the predictor with x (conditions) from the empirical data
X = np.squeeze(conditional_df[['queue_length','longer_delay_prob']].to_numpy())
conditional_samples = conditional_delay_model.sample_n(
x = X,
random_generator=np.random.default_rng(0),
)
# insert it to the dataset
conditional_df['predicted distribution'] = conditional_samples
conditional_df.rename(columns = {'end2end_delay':'empirical distribution'}, inplace = True)
# plot
sns.histplot(
conditional_df[['empirical distribution','predicted distribution']],
kde=True,
ax=ax,
stat="density",
).set(title="x={}, interval={}, count={}".format(
values_count[i],
["{:0.2f}".format(inter) for inter in intervals[j]],
len(conditional_df))
)
ax.title.set_size(10)
fig.tight_layout()
plt.savefig('conditional_delay_tis.png') | plot_conditionals_with_tis.py | 2,515 | load dataset first load the trained model find n most common queue_length occurances divide the service delay into n segments based on quantilessns.set_palette("rocket") plot the conditional distributions of them take the empirical samples sample the predictor with x (conditions) from the empirical data insert it to the dataset plot | 334 | en | 0.831692 |
import glob
import os
import shutil
from tests import get_device_id, get_tests_output_path, run_cli
from TTS.config.shared_configs import BaseAudioConfig
from TTS.speaker_encoder.speaker_encoder_config import SpeakerEncoderConfig
def run_test_train():
command = (
f"CUDA_VISIBLE_DEVICES='{get_device_id()}' python TTS/bin/train_encoder.py --config_path {config_path} "
f"--coqpit.output_path {output_path} "
"--coqpit.datasets.0.name ljspeech "
"--coqpit.datasets.0.meta_file_train metadata.csv "
"--coqpit.datasets.0.meta_file_val metadata.csv "
"--coqpit.datasets.0.path tests/data/ljspeech "
)
run_cli(command)
config_path = os.path.join(get_tests_output_path(), "test_speaker_encoder_config.json")
output_path = os.path.join(get_tests_output_path(), "train_outputs")
config = SpeakerEncoderConfig(
batch_size=4,
num_speakers_in_batch=1,
num_utters_per_speaker=10,
num_loader_workers=0,
max_train_step=2,
print_step=1,
save_step=1,
print_eval=True,
audio=BaseAudioConfig(num_mels=80),
)
config.audio.do_trim_silence = True
config.audio.trim_db = 60
config.save_json(config_path)
print(config)
# train the model for one epoch
run_test_train()
# Find latest folder
continue_path = max(glob.glob(os.path.join(output_path, "*/")), key=os.path.getmtime)
# restore the model and continue training for one more epoch
command_train = (
f"CUDA_VISIBLE_DEVICES='{get_device_id()}' python TTS/bin/train_encoder.py --continue_path {continue_path} "
)
run_cli(command_train)
shutil.rmtree(continue_path)
# test resnet speaker encoder
config.model_params["model_name"] = "resnet"
config.save_json(config_path)
# train the model for one epoch
run_test_train()
# Find latest folder
continue_path = max(glob.glob(os.path.join(output_path, "*/")), key=os.path.getmtime)
# restore the model and continue training for one more epoch
command_train = (
f"CUDA_VISIBLE_DEVICES='{get_device_id()}' python TTS/bin/train_encoder.py --continue_path {continue_path} "
)
run_cli(command_train)
shutil.rmtree(continue_path)
# test model with ge2e loss function
config.loss = "ge2e"
config.save_json(config_path)
run_test_train()
# test model with angleproto loss function
config.loss = "angleproto"
config.save_json(config_path)
run_test_train()
# test model with softmaxproto loss function
config.loss = "softmaxproto"
config.save_json(config_path)
run_test_train()
| tests/aux_tests/test_speaker_encoder_train.py | 2,460 | train the model for one epoch Find latest folder restore the model and continue training for one more epoch test resnet speaker encoder train the model for one epoch Find latest folder restore the model and continue training for one more epoch test model with ge2e loss function test model with angleproto loss function test model with softmaxproto loss function | 362 | en | 0.80402 |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions of generators to generate the final detections."""
import contextlib
from typing import List, Optional, Mapping
# Import libraries
import tensorflow as tf
from official.vision.beta.ops import box_ops
from official.vision.beta.ops import nms
from official.vision.beta.ops import preprocess_ops
def _generate_detections_v1(boxes: tf.Tensor,
scores: tf.Tensor,
attributes: Optional[Mapping[str,
tf.Tensor]] = None,
pre_nms_top_k: int = 5000,
pre_nms_score_threshold: float = 0.05,
nms_iou_threshold: float = 0.5,
max_num_detections: int = 100,
soft_nms_sigma: Optional[float] = None):
"""Generates the final detections given the model outputs.
The implementation unrolls the batch dimension and process images one by one.
It required the batch dimension to be statically known and it is TPU
compatible.
Args:
boxes: A `tf.Tensor` with shape `[batch_size, N, num_classes, 4]` or
`[batch_size, N, 1, 4]` for box predictions on all feature levels. The
N is the number of total anchors on all levels.
scores: A `tf.Tensor` with shape `[batch_size, N, num_classes]`, which
stacks class probability on all feature levels. The N is the number of
total anchors on all levels. The num_classes is the number of classes
predicted by the model. Note that the class_outputs here is the raw score.
attributes: None or a dict of (attribute_name, attributes) pairs. Each
attributes is a `tf.Tensor` with shape
`[batch_size, N, num_classes, attribute_size]` or
`[batch_size, N, 1, attribute_size]` for attribute predictions on all
feature levels. The N is the number of total anchors on all levels. Can
be None if no attribute learning is required.
pre_nms_top_k: An `int` number of top candidate detections per class before
NMS.
pre_nms_score_threshold: A `float` representing the threshold for deciding
when to remove boxes based on score.
nms_iou_threshold: A `float` representing the threshold for deciding whether
boxes overlap too much with respect to IOU.
max_num_detections: A scalar representing maximum number of boxes retained
over all classes.
soft_nms_sigma: A `float` representing the sigma parameter for Soft NMS.
When soft_nms_sigma=0.0 (which is default), we fall back to standard NMS.
Returns:
nms_boxes: A `float` type `tf.Tensor` of shape
`[batch_size, max_num_detections, 4]` representing top detected boxes in
`[y1, x1, y2, x2]`.
nms_scores: A `float` type `tf.Tensor` of shape
`[batch_size, max_num_detections]` representing sorted confidence scores
for detected boxes. The values are between `[0, 1]`.
nms_classes: An `int` type `tf.Tensor` of shape
`[batch_size, max_num_detections]` representing classes for detected
boxes.
valid_detections: An `int` type `tf.Tensor` of shape `[batch_size]` only the
top `valid_detections` boxes are valid detections.
nms_attributes: None or a dict of (attribute_name, attributes). Each
attribute is a `float` type `tf.Tensor` of shape
`[batch_size, max_num_detections, attribute_size]` representing attribute
predictions for detected boxes. Can be an empty dict if no attribute
learning is required.
"""
with tf.name_scope('generate_detections'):
batch_size = scores.get_shape().as_list()[0]
nmsed_boxes = []
nmsed_classes = []
nmsed_scores = []
valid_detections = []
if attributes:
nmsed_attributes = {att_name: [] for att_name in attributes.keys()}
else:
nmsed_attributes = {}
for i in range(batch_size):
(nmsed_boxes_i, nmsed_scores_i, nmsed_classes_i, valid_detections_i,
nmsed_att_i) = _generate_detections_per_image(
boxes[i],
scores[i],
attributes={
att_name: att[i] for att_name, att in attributes.items()
} if attributes else {},
pre_nms_top_k=pre_nms_top_k,
pre_nms_score_threshold=pre_nms_score_threshold,
nms_iou_threshold=nms_iou_threshold,
max_num_detections=max_num_detections,
soft_nms_sigma=soft_nms_sigma)
nmsed_boxes.append(nmsed_boxes_i)
nmsed_scores.append(nmsed_scores_i)
nmsed_classes.append(nmsed_classes_i)
valid_detections.append(valid_detections_i)
if attributes:
for att_name in attributes.keys():
nmsed_attributes[att_name].append(nmsed_att_i[att_name])
nmsed_boxes = tf.stack(nmsed_boxes, axis=0)
nmsed_scores = tf.stack(nmsed_scores, axis=0)
nmsed_classes = tf.stack(nmsed_classes, axis=0)
valid_detections = tf.stack(valid_detections, axis=0)
if attributes:
for att_name in attributes.keys():
nmsed_attributes[att_name] = tf.stack(nmsed_attributes[att_name], axis=0)
return nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections, nmsed_attributes
def _generate_detections_per_image(
boxes: tf.Tensor,
scores: tf.Tensor,
attributes: Optional[Mapping[str, tf.Tensor]] = None,
pre_nms_top_k: int = 5000,
pre_nms_score_threshold: float = 0.05,
nms_iou_threshold: float = 0.5,
max_num_detections: int = 100,
soft_nms_sigma: Optional[float] = None):
"""Generates the final detections per image given the model outputs.
Args:
boxes: A `tf.Tensor` with shape `[N, num_classes, 4]` or `[N, 1, 4]`, which
box predictions on all feature levels. The N is the number of total
anchors on all levels.
scores: A `tf.Tensor` with shape `[N, num_classes]`, which stacks class
probability on all feature levels. The N is the number of total anchors on
all levels. The num_classes is the number of classes predicted by the
model. Note that the class_outputs here is the raw score.
attributes: If not None, a dict of `tf.Tensor`. Each value is in shape
`[N, num_classes, attribute_size]` or `[N, 1, attribute_size]` of
attribute predictions on all feature levels. The N is the number of total
anchors on all levels.
pre_nms_top_k: An `int` number of top candidate detections per class before
NMS.
pre_nms_score_threshold: A `float` representing the threshold for deciding
when to remove boxes based on score.
nms_iou_threshold: A `float` representing the threshold for deciding whether
boxes overlap too much with respect to IOU.
max_num_detections: A `scalar` representing maximum number of boxes retained
over all classes.
soft_nms_sigma: A `float` representing the sigma parameter for Soft NMS.
When soft_nms_sigma=0.0, we fall back to standard NMS.
If set to None, `tf.image.non_max_suppression_padded` is called instead.
Returns:
nms_boxes: A `float` tf.Tensor of shape `[max_num_detections, 4]`
representing top detected boxes in `[y1, x1, y2, x2]`.
nms_scores: A `float` tf.Tensor of shape `[max_num_detections]` representing
sorted confidence scores for detected boxes. The values are between [0,
1].
nms_classes: An `int` tf.Tensor of shape `[max_num_detections]` representing
classes for detected boxes.
valid_detections: An `int` tf.Tensor of shape [1] only the top
`valid_detections` boxes are valid detections.
nms_attributes: None or a dict. Each value is a `float` tf.Tensor of shape
`[max_num_detections, attribute_size]` representing attribute predictions
for detected boxes. Can be an empty dict if `attributes` is None.
"""
nmsed_boxes = []
nmsed_scores = []
nmsed_classes = []
num_classes_for_box = boxes.get_shape().as_list()[1]
num_classes = scores.get_shape().as_list()[1]
if attributes:
nmsed_attributes = {att_name: [] for att_name in attributes.keys()}
else:
nmsed_attributes = {}
for i in range(num_classes):
boxes_i = boxes[:, min(num_classes_for_box - 1, i)]
scores_i = scores[:, i]
# Obtains pre_nms_top_k before running NMS.
scores_i, indices = tf.nn.top_k(
scores_i, k=tf.minimum(tf.shape(scores_i)[-1], pre_nms_top_k))
boxes_i = tf.gather(boxes_i, indices)
if soft_nms_sigma is not None:
(nmsed_indices_i,
nmsed_scores_i) = tf.image.non_max_suppression_with_scores(
tf.cast(boxes_i, tf.float32),
tf.cast(scores_i, tf.float32),
max_num_detections,
iou_threshold=nms_iou_threshold,
score_threshold=pre_nms_score_threshold,
soft_nms_sigma=soft_nms_sigma,
name='nms_detections_' + str(i))
nmsed_boxes_i = tf.gather(boxes_i, nmsed_indices_i)
nmsed_boxes_i = preprocess_ops.clip_or_pad_to_fixed_size(
nmsed_boxes_i, max_num_detections, 0.0)
nmsed_scores_i = preprocess_ops.clip_or_pad_to_fixed_size(
nmsed_scores_i, max_num_detections, -1.0)
else:
(nmsed_indices_i,
nmsed_num_valid_i) = tf.image.non_max_suppression_padded(
tf.cast(boxes_i, tf.float32),
tf.cast(scores_i, tf.float32),
max_num_detections,
iou_threshold=nms_iou_threshold,
score_threshold=pre_nms_score_threshold,
pad_to_max_output_size=True,
name='nms_detections_' + str(i))
nmsed_boxes_i = tf.gather(boxes_i, nmsed_indices_i)
nmsed_scores_i = tf.gather(scores_i, nmsed_indices_i)
# Sets scores of invalid boxes to -1.
nmsed_scores_i = tf.where(
tf.less(tf.range(max_num_detections), [nmsed_num_valid_i]),
nmsed_scores_i, -tf.ones_like(nmsed_scores_i))
nmsed_classes_i = tf.fill([max_num_detections], i)
nmsed_boxes.append(nmsed_boxes_i)
nmsed_scores.append(nmsed_scores_i)
nmsed_classes.append(nmsed_classes_i)
if attributes:
for att_name, att in attributes.items():
num_classes_for_attr = att.get_shape().as_list()[1]
att_i = att[:, min(num_classes_for_attr - 1, i)]
att_i = tf.gather(att_i, indices)
nmsed_att_i = tf.gather(att_i, nmsed_indices_i)
nmsed_att_i = preprocess_ops.clip_or_pad_to_fixed_size(
nmsed_att_i, max_num_detections, 0.0)
nmsed_attributes[att_name].append(nmsed_att_i)
# Concats results from all classes and sort them.
nmsed_boxes = tf.concat(nmsed_boxes, axis=0)
nmsed_scores = tf.concat(nmsed_scores, axis=0)
nmsed_classes = tf.concat(nmsed_classes, axis=0)
nmsed_scores, indices = tf.nn.top_k(
nmsed_scores, k=max_num_detections, sorted=True)
nmsed_boxes = tf.gather(nmsed_boxes, indices)
nmsed_classes = tf.gather(nmsed_classes, indices)
valid_detections = tf.reduce_sum(
tf.cast(tf.greater(nmsed_scores, -1), tf.int32))
if attributes:
for att_name in attributes.keys():
nmsed_attributes[att_name] = tf.concat(nmsed_attributes[att_name], axis=0)
nmsed_attributes[att_name] = tf.gather(nmsed_attributes[att_name],
indices)
return nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections, nmsed_attributes
def _select_top_k_scores(scores_in: tf.Tensor, pre_nms_num_detections: int):
"""Selects top_k scores and indices for each class.
Args:
scores_in: A `tf.Tensor` with shape `[batch_size, N, num_classes]`, which
stacks class logit outputs on all feature levels. The N is the number of
total anchors on all levels. The num_classes is the number of classes
predicted by the model.
pre_nms_num_detections: Number of candidates before NMS.
Returns:
scores and indices: A `tf.Tensor` with shape
`[batch_size, pre_nms_num_detections, num_classes]`.
"""
batch_size, num_anchors, num_class = scores_in.get_shape().as_list()
if batch_size is None:
batch_size = tf.shape(scores_in)[0]
scores_trans = tf.transpose(scores_in, perm=[0, 2, 1])
scores_trans = tf.reshape(scores_trans, [-1, num_anchors])
top_k_scores, top_k_indices = tf.nn.top_k(
scores_trans, k=pre_nms_num_detections, sorted=True)
top_k_scores = tf.reshape(top_k_scores,
[batch_size, num_class, pre_nms_num_detections])
top_k_indices = tf.reshape(top_k_indices,
[batch_size, num_class, pre_nms_num_detections])
return tf.transpose(top_k_scores,
[0, 2, 1]), tf.transpose(top_k_indices, [0, 2, 1])
def _generate_detections_v2(boxes: tf.Tensor,
scores: tf.Tensor,
pre_nms_top_k: int = 5000,
pre_nms_score_threshold: float = 0.05,
nms_iou_threshold: float = 0.5,
max_num_detections: int = 100):
"""Generates the final detections given the model outputs.
This implementation unrolls classes dimension while using the tf.while_loop
to implement the batched NMS, so that it can be parallelized at the batch
dimension. It should give better performance comparing to v1 implementation.
It is TPU compatible.
Args:
boxes: A `tf.Tensor` with shape `[batch_size, N, num_classes, 4]` or
`[batch_size, N, 1, 4]`, which box predictions on all feature levels. The
N is the number of total anchors on all levels.
scores: A `tf.Tensor` with shape `[batch_size, N, num_classes]`, which
stacks class probability on all feature levels. The N is the number of
total anchors on all levels. The num_classes is the number of classes
predicted by the model. Note that the class_outputs here is the raw score.
pre_nms_top_k: An `int` number of top candidate detections per class before
NMS.
pre_nms_score_threshold: A `float` representing the threshold for deciding
when to remove boxes based on score.
nms_iou_threshold: A `float` representing the threshold for deciding whether
boxes overlap too much with respect to IOU.
max_num_detections: A `scalar` representing maximum number of boxes retained
over all classes.
Returns:
nms_boxes: A `float` tf.Tensor of shape [batch_size, max_num_detections, 4]
representing top detected boxes in [y1, x1, y2, x2].
nms_scores: A `float` tf.Tensor of shape [batch_size, max_num_detections]
representing sorted confidence scores for detected boxes. The values are
between [0, 1].
nms_classes: An `int` tf.Tensor of shape [batch_size, max_num_detections]
representing classes for detected boxes.
valid_detections: An `int` tf.Tensor of shape [batch_size] only the top
`valid_detections` boxes are valid detections.
"""
with tf.name_scope('generate_detections'):
nmsed_boxes = []
nmsed_classes = []
nmsed_scores = []
valid_detections = []
batch_size, _, num_classes_for_box, _ = boxes.get_shape().as_list()
if batch_size is None:
batch_size = tf.shape(boxes)[0]
_, total_anchors, num_classes = scores.get_shape().as_list()
# Selects top pre_nms_num scores and indices before NMS.
scores, indices = _select_top_k_scores(
scores, min(total_anchors, pre_nms_top_k))
for i in range(num_classes):
boxes_i = boxes[:, :, min(num_classes_for_box - 1, i), :]
scores_i = scores[:, :, i]
# Obtains pre_nms_top_k before running NMS.
boxes_i = tf.gather(boxes_i, indices[:, :, i], batch_dims=1, axis=1)
# Filter out scores.
boxes_i, scores_i = box_ops.filter_boxes_by_scores(
boxes_i, scores_i, min_score_threshold=pre_nms_score_threshold)
(nmsed_scores_i, nmsed_boxes_i) = nms.sorted_non_max_suppression_padded(
tf.cast(scores_i, tf.float32),
tf.cast(boxes_i, tf.float32),
max_num_detections,
iou_threshold=nms_iou_threshold)
nmsed_classes_i = tf.fill([batch_size, max_num_detections], i)
nmsed_boxes.append(nmsed_boxes_i)
nmsed_scores.append(nmsed_scores_i)
nmsed_classes.append(nmsed_classes_i)
nmsed_boxes = tf.concat(nmsed_boxes, axis=1)
nmsed_scores = tf.concat(nmsed_scores, axis=1)
nmsed_classes = tf.concat(nmsed_classes, axis=1)
nmsed_scores, indices = tf.nn.top_k(
nmsed_scores, k=max_num_detections, sorted=True)
nmsed_boxes = tf.gather(nmsed_boxes, indices, batch_dims=1, axis=1)
nmsed_classes = tf.gather(nmsed_classes, indices, batch_dims=1)
valid_detections = tf.reduce_sum(
input_tensor=tf.cast(tf.greater(nmsed_scores, -1), tf.int32), axis=1)
return nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections
def _generate_detections_batched(boxes: tf.Tensor, scores: tf.Tensor,
pre_nms_score_threshold: float,
nms_iou_threshold: float,
max_num_detections: int):
"""Generates detected boxes with scores and classes for one-stage detector.
The function takes output of multi-level ConvNets and anchor boxes and
generates detected boxes. Note that this used batched nms, which is not
supported on TPU currently.
Args:
boxes: A `tf.Tensor` with shape `[batch_size, N, num_classes, 4]` or
`[batch_size, N, 1, 4]`, which box predictions on all feature levels. The
N is the number of total anchors on all levels.
scores: A `tf.Tensor` with shape `[batch_size, N, num_classes]`, which
stacks class probability on all feature levels. The N is the number of
total anchors on all levels. The num_classes is the number of classes
predicted by the model. Note that the class_outputs here is the raw score.
pre_nms_score_threshold: A `float` representing the threshold for deciding
when to remove boxes based on score.
nms_iou_threshold: A `float` representing the threshold for deciding whether
boxes overlap too much with respect to IOU.
max_num_detections: A `scalar` representing maximum number of boxes retained
over all classes.
Returns:
nms_boxes: A `float` tf.Tensor of shape [batch_size, max_num_detections, 4]
representing top detected boxes in [y1, x1, y2, x2].
nms_scores: A `float` tf.Tensor of shape [batch_size, max_num_detections]
representing sorted confidence scores for detected boxes. The values are
between [0, 1].
nms_classes: An `int` tf.Tensor of shape [batch_size, max_num_detections]
representing classes for detected boxes.
valid_detections: An `int` tf.Tensor of shape [batch_size] only the top
`valid_detections` boxes are valid detections.
"""
with tf.name_scope('generate_detections'):
nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections = (
tf.image.combined_non_max_suppression(
boxes,
scores,
max_output_size_per_class=max_num_detections,
max_total_size=max_num_detections,
iou_threshold=nms_iou_threshold,
score_threshold=pre_nms_score_threshold,
pad_per_class=False,
clip_boxes=False))
nmsed_classes = tf.cast(nmsed_classes, tf.int32)
return nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections
@tf.keras.utils.register_keras_serializable(package='Vision')
class DetectionGenerator(tf.keras.layers.Layer):
"""Generates the final detected boxes with scores and classes."""
def __init__(self,
apply_nms: bool = True,
pre_nms_top_k: int = 5000,
pre_nms_score_threshold: float = 0.05,
nms_iou_threshold: float = 0.5,
max_num_detections: int = 100,
nms_version: str = 'v2',
use_cpu_nms: bool = False,
soft_nms_sigma: Optional[float] = None,
**kwargs):
"""Initializes a detection generator.
Args:
apply_nms: A `bool` of whether or not apply non maximum suppression.
If False, the decoded boxes and their scores are returned.
pre_nms_top_k: An `int` of the number of top scores proposals to be kept
before applying NMS.
pre_nms_score_threshold: A `float` of the score threshold to apply before
applying NMS. Proposals whose scores are below this threshold are
thrown away.
nms_iou_threshold: A `float` in [0, 1], the NMS IoU threshold.
max_num_detections: An `int` of the final number of total detections to
generate.
nms_version: A string of `batched`, `v1` or `v2` specifies NMS version.
use_cpu_nms: A `bool` of whether or not enforce NMS to run on CPU.
soft_nms_sigma: A `float` representing the sigma parameter for Soft NMS.
When soft_nms_sigma=0.0, we fall back to standard NMS.
**kwargs: Additional keyword arguments passed to Layer.
"""
self._config_dict = {
'apply_nms': apply_nms,
'pre_nms_top_k': pre_nms_top_k,
'pre_nms_score_threshold': pre_nms_score_threshold,
'nms_iou_threshold': nms_iou_threshold,
'max_num_detections': max_num_detections,
'nms_version': nms_version,
'use_cpu_nms': use_cpu_nms,
'soft_nms_sigma': soft_nms_sigma,
}
super(DetectionGenerator, self).__init__(**kwargs)
def __call__(self,
raw_boxes: tf.Tensor,
raw_scores: tf.Tensor,
anchor_boxes: tf.Tensor,
image_shape: tf.Tensor,
regression_weights: Optional[List[float]] = None,
bbox_per_class: bool = True):
"""Generates final detections.
Args:
raw_boxes: A `tf.Tensor` of shape of `[batch_size, K, num_classes * 4]`
representing the class-specific box coordinates relative to anchors.
raw_scores: A `tf.Tensor` of shape of `[batch_size, K, num_classes]`
representing the class logits before applying score activiation.
anchor_boxes: A `tf.Tensor` of shape of `[batch_size, K, 4]` representing
the corresponding anchor boxes w.r.t `box_outputs`.
image_shape: A `tf.Tensor` of shape of `[batch_size, 2]` storing the image
height and width w.r.t. the scaled image, i.e. the same image space as
`box_outputs` and `anchor_boxes`.
regression_weights: A list of four float numbers to scale coordinates.
bbox_per_class: A `bool`. If True, perform per-class box regression.
Returns:
If `apply_nms` = True, the return is a dictionary with keys:
`detection_boxes`: A `float` tf.Tensor of shape
[batch, max_num_detections, 4] representing top detected boxes in
[y1, x1, y2, x2].
`detection_scores`: A `float` `tf.Tensor` of shape
[batch, max_num_detections] representing sorted confidence scores for
detected boxes. The values are between [0, 1].
`detection_classes`: An `int` tf.Tensor of shape
[batch, max_num_detections] representing classes for detected boxes.
`num_detections`: An `int` tf.Tensor of shape [batch] only the first
`num_detections` boxes are valid detections
If `apply_nms` = False, the return is a dictionary with keys:
`decoded_boxes`: A `float` tf.Tensor of shape [batch, num_raw_boxes, 4]
representing all the decoded boxes.
`decoded_box_scores`: A `float` tf.Tensor of shape
[batch, num_raw_boxes] representing socres of all the decoded boxes.
"""
box_scores = tf.nn.softmax(raw_scores, axis=-1)
# Removes the background class.
box_scores_shape = tf.shape(box_scores)
box_scores_shape_list = box_scores.get_shape().as_list()
batch_size = box_scores_shape[0]
num_locations = box_scores_shape_list[1]
num_classes = box_scores_shape_list[-1]
box_scores = tf.slice(box_scores, [0, 0, 1], [-1, -1, -1])
if bbox_per_class:
num_detections = num_locations * (num_classes - 1)
raw_boxes = tf.reshape(raw_boxes,
[batch_size, num_locations, num_classes, 4])
raw_boxes = tf.slice(raw_boxes, [0, 0, 1, 0], [-1, -1, -1, -1])
anchor_boxes = tf.tile(
tf.expand_dims(anchor_boxes, axis=2), [1, 1, num_classes - 1, 1])
raw_boxes = tf.reshape(raw_boxes, [batch_size, num_detections, 4])
anchor_boxes = tf.reshape(anchor_boxes, [batch_size, num_detections, 4])
# Box decoding.
decoded_boxes = box_ops.decode_boxes(
raw_boxes, anchor_boxes, weights=regression_weights)
# Box clipping
decoded_boxes = box_ops.clip_boxes(
decoded_boxes, tf.expand_dims(image_shape, axis=1))
if bbox_per_class:
decoded_boxes = tf.reshape(
decoded_boxes, [batch_size, num_locations, num_classes - 1, 4])
else:
decoded_boxes = tf.expand_dims(decoded_boxes, axis=2)
if not self._config_dict['apply_nms']:
return {
'decoded_boxes': decoded_boxes,
'decoded_box_scores': box_scores,
}
# Optionally force the NMS be run on CPU.
if self._config_dict['use_cpu_nms']:
nms_context = tf.device('cpu:0')
else:
nms_context = contextlib.nullcontext()
with nms_context:
if self._config_dict['nms_version'] == 'batched':
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections) = (
_generate_detections_batched(
decoded_boxes, box_scores,
self._config_dict['pre_nms_score_threshold'],
self._config_dict['nms_iou_threshold'],
self._config_dict['max_num_detections']))
elif self._config_dict['nms_version'] == 'v1':
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections, _) = (
_generate_detections_v1(
decoded_boxes,
box_scores,
pre_nms_top_k=self._config_dict['pre_nms_top_k'],
pre_nms_score_threshold=self
._config_dict['pre_nms_score_threshold'],
nms_iou_threshold=self._config_dict['nms_iou_threshold'],
max_num_detections=self._config_dict['max_num_detections'],
soft_nms_sigma=self._config_dict['soft_nms_sigma']))
elif self._config_dict['nms_version'] == 'v2':
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections) = (
_generate_detections_v2(
decoded_boxes,
box_scores,
pre_nms_top_k=self._config_dict['pre_nms_top_k'],
pre_nms_score_threshold=self
._config_dict['pre_nms_score_threshold'],
nms_iou_threshold=self._config_dict['nms_iou_threshold'],
max_num_detections=self._config_dict['max_num_detections']))
else:
raise ValueError('NMS version {} not supported.'.format(
self._config_dict['nms_version']))
# Adds 1 to offset the background class which has index 0.
nmsed_classes += 1
return {
'num_detections': valid_detections,
'detection_boxes': nmsed_boxes,
'detection_classes': nmsed_classes,
'detection_scores': nmsed_scores,
}
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config):
return cls(**config)
@tf.keras.utils.register_keras_serializable(package='Vision')
class MultilevelDetectionGenerator(tf.keras.layers.Layer):
"""Generates detected boxes with scores and classes for one-stage detector."""
def __init__(self,
apply_nms: bool = True,
pre_nms_top_k: int = 5000,
pre_nms_score_threshold: float = 0.05,
nms_iou_threshold: float = 0.5,
max_num_detections: int = 100,
nms_version: str = 'v1',
use_cpu_nms: bool = False,
soft_nms_sigma: Optional[float] = None,
**kwargs):
"""Initializes a multi-level detection generator.
Args:
apply_nms: A `bool` of whether or not apply non maximum suppression. If
False, the decoded boxes and their scores are returned.
pre_nms_top_k: An `int` of the number of top scores proposals to be kept
before applying NMS.
pre_nms_score_threshold: A `float` of the score threshold to apply before
applying NMS. Proposals whose scores are below this threshold are thrown
away.
nms_iou_threshold: A `float` in [0, 1], the NMS IoU threshold.
max_num_detections: An `int` of the final number of total detections to
generate.
nms_version: A string of `batched`, `v1` or `v2` specifies NMS version
use_cpu_nms: A `bool` of whether or not enforce NMS to run on CPU.
soft_nms_sigma: A `float` representing the sigma parameter for Soft NMS.
When soft_nms_sigma=0.0, we fall back to standard NMS.
**kwargs: Additional keyword arguments passed to Layer.
"""
self._config_dict = {
'apply_nms': apply_nms,
'pre_nms_top_k': pre_nms_top_k,
'pre_nms_score_threshold': pre_nms_score_threshold,
'nms_iou_threshold': nms_iou_threshold,
'max_num_detections': max_num_detections,
'nms_version': nms_version,
'use_cpu_nms': use_cpu_nms,
'soft_nms_sigma': soft_nms_sigma,
}
super(MultilevelDetectionGenerator, self).__init__(**kwargs)
def _decode_multilevel_outputs(
self,
raw_boxes: Mapping[str, tf.Tensor],
raw_scores: Mapping[str, tf.Tensor],
anchor_boxes: tf.Tensor,
image_shape: tf.Tensor,
raw_attributes: Optional[Mapping[str, tf.Tensor]] = None):
"""Collects dict of multilevel boxes, scores, attributes into lists."""
boxes = []
scores = []
if raw_attributes:
attributes = {att_name: [] for att_name in raw_attributes.keys()}
else:
attributes = {}
levels = list(raw_boxes.keys())
min_level = int(min(levels))
max_level = int(max(levels))
for i in range(min_level, max_level + 1):
raw_boxes_i = raw_boxes[str(i)]
raw_scores_i = raw_scores[str(i)]
batch_size = tf.shape(raw_boxes_i)[0]
(_, feature_h_i, feature_w_i,
num_anchors_per_locations_times_4) = raw_boxes_i.get_shape().as_list()
num_locations = feature_h_i * feature_w_i
num_anchors_per_locations = num_anchors_per_locations_times_4 // 4
num_classes = raw_scores_i.get_shape().as_list(
)[-1] // num_anchors_per_locations
# Applies score transformation and remove the implicit background class.
scores_i = tf.sigmoid(
tf.reshape(raw_scores_i, [
batch_size, num_locations * num_anchors_per_locations, num_classes
]))
scores_i = tf.slice(scores_i, [0, 0, 1], [-1, -1, -1])
# Box decoding.
# The anchor boxes are shared for all data in a batch.
# One stage detector only supports class agnostic box regression.
anchor_boxes_i = tf.reshape(
anchor_boxes[str(i)],
[batch_size, num_locations * num_anchors_per_locations, 4])
raw_boxes_i = tf.reshape(
raw_boxes_i,
[batch_size, num_locations * num_anchors_per_locations, 4])
boxes_i = box_ops.decode_boxes(raw_boxes_i, anchor_boxes_i)
# Box clipping.
boxes_i = box_ops.clip_boxes(
boxes_i, tf.expand_dims(image_shape, axis=1))
boxes.append(boxes_i)
scores.append(scores_i)
if raw_attributes:
for att_name, raw_att in raw_attributes.items():
attribute_size = raw_att[str(
i)].get_shape().as_list()[-1] // num_anchors_per_locations
att_i = tf.reshape(raw_att[str(i)], [
batch_size, num_locations * num_anchors_per_locations,
attribute_size
])
attributes[att_name].append(att_i)
boxes = tf.concat(boxes, axis=1)
boxes = tf.expand_dims(boxes, axis=2)
scores = tf.concat(scores, axis=1)
if raw_attributes:
for att_name in raw_attributes.keys():
attributes[att_name] = tf.concat(attributes[att_name], axis=1)
attributes[att_name] = tf.expand_dims(attributes[att_name], axis=2)
return boxes, scores, attributes
def __call__(self,
raw_boxes: Mapping[str, tf.Tensor],
raw_scores: Mapping[str, tf.Tensor],
anchor_boxes: tf.Tensor,
image_shape: tf.Tensor,
raw_attributes: Optional[Mapping[str, tf.Tensor]] = None):
"""Generates final detections.
Args:
raw_boxes: A `dict` with keys representing FPN levels and values
representing box tenors of shape `[batch, feature_h, feature_w,
num_anchors * 4]`.
raw_scores: A `dict` with keys representing FPN levels and values
representing logit tensors of shape `[batch, feature_h, feature_w,
num_anchors]`.
anchor_boxes: A `tf.Tensor` of shape of [batch_size, K, 4] representing
the corresponding anchor boxes w.r.t `box_outputs`.
image_shape: A `tf.Tensor` of shape of [batch_size, 2] storing the image
height and width w.r.t. the scaled image, i.e. the same image space as
`box_outputs` and `anchor_boxes`.
raw_attributes: If not None, a `dict` of (attribute_name,
attribute_prediction) pairs. `attribute_prediction` is a dict that
contains keys representing FPN levels and values representing tenors of
shape `[batch, feature_h, feature_w, num_anchors * attribute_size]`.
Returns:
If `apply_nms` = True, the return is a dictionary with keys:
`detection_boxes`: A `float` tf.Tensor of shape
[batch, max_num_detections, 4] representing top detected boxes in
[y1, x1, y2, x2].
`detection_scores`: A `float` tf.Tensor of shape
[batch, max_num_detections] representing sorted confidence scores for
detected boxes. The values are between [0, 1].
`detection_classes`: An `int` tf.Tensor of shape
[batch, max_num_detections] representing classes for detected boxes.
`num_detections`: An `int` tf.Tensor of shape [batch] only the first
`num_detections` boxes are valid detections
`detection_attributes`: A dict. Values of the dict is a `float`
tf.Tensor of shape [batch, max_num_detections, attribute_size]
representing attribute predictions for detected boxes.
If `apply_nms` = False, the return is a dictionary with keys:
`decoded_boxes`: A `float` tf.Tensor of shape [batch, num_raw_boxes, 4]
representing all the decoded boxes.
`decoded_box_scores`: A `float` tf.Tensor of shape
[batch, num_raw_boxes] representing socres of all the decoded boxes.
`decoded_box_attributes`: A dict. Values in the dict is a
`float` tf.Tensor of shape [batch, num_raw_boxes, attribute_size]
representing attribute predictions of all the decoded boxes.
"""
boxes, scores, attributes = self._decode_multilevel_outputs(
raw_boxes, raw_scores, anchor_boxes, image_shape, raw_attributes)
if not self._config_dict['apply_nms']:
return {
'decoded_boxes': boxes,
'decoded_box_scores': scores,
'decoded_box_attributes': attributes,
}
# Optionally force the NMS to run on CPU.
if self._config_dict['use_cpu_nms']:
nms_context = tf.device('cpu:0')
else:
nms_context = contextlib.nullcontext()
with nms_context:
if raw_attributes and (self._config_dict['nms_version'] != 'v1'):
raise ValueError(
'Attribute learning is only supported for NMSv1 but NMS {} is used.'
.format(self._config_dict['nms_version']))
if self._config_dict['nms_version'] == 'batched':
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections) = (
_generate_detections_batched(
boxes, scores, self._config_dict['pre_nms_score_threshold'],
self._config_dict['nms_iou_threshold'],
self._config_dict['max_num_detections']))
# Set `nmsed_attributes` to None for batched NMS.
nmsed_attributes = {}
elif self._config_dict['nms_version'] == 'v1':
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections,
nmsed_attributes) = (
_generate_detections_v1(
boxes,
scores,
attributes=attributes if raw_attributes else None,
pre_nms_top_k=self._config_dict['pre_nms_top_k'],
pre_nms_score_threshold=self
._config_dict['pre_nms_score_threshold'],
nms_iou_threshold=self._config_dict['nms_iou_threshold'],
max_num_detections=self._config_dict['max_num_detections'],
soft_nms_sigma=self._config_dict['soft_nms_sigma']))
elif self._config_dict['nms_version'] == 'v2':
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections) = (
_generate_detections_v2(
boxes,
scores,
pre_nms_top_k=self._config_dict['pre_nms_top_k'],
pre_nms_score_threshold=self
._config_dict['pre_nms_score_threshold'],
nms_iou_threshold=self._config_dict['nms_iou_threshold'],
max_num_detections=self._config_dict['max_num_detections']))
# Set `nmsed_attributes` to None for v2.
nmsed_attributes = {}
else:
raise ValueError('NMS version {} not supported.'.format(
self._config_dict['nms_version']))
# Adds 1 to offset the background class which has index 0.
nmsed_classes += 1
return {
'num_detections': valid_detections,
'detection_boxes': nmsed_boxes,
'detection_classes': nmsed_classes,
'detection_scores': nmsed_scores,
'detection_attributes': nmsed_attributes,
}
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config):
return cls(**config)
| official/vision/beta/modeling/layers/detection_generator.py | 38,620 | Generates the final detected boxes with scores and classes.
Generates detected boxes with scores and classes for one-stage detector.
Generates final detections.
Args:
raw_boxes: A `tf.Tensor` of shape of `[batch_size, K, num_classes * 4]`
representing the class-specific box coordinates relative to anchors.
raw_scores: A `tf.Tensor` of shape of `[batch_size, K, num_classes]`
representing the class logits before applying score activiation.
anchor_boxes: A `tf.Tensor` of shape of `[batch_size, K, 4]` representing
the corresponding anchor boxes w.r.t `box_outputs`.
image_shape: A `tf.Tensor` of shape of `[batch_size, 2]` storing the image
height and width w.r.t. the scaled image, i.e. the same image space as
`box_outputs` and `anchor_boxes`.
regression_weights: A list of four float numbers to scale coordinates.
bbox_per_class: A `bool`. If True, perform per-class box regression.
Returns:
If `apply_nms` = True, the return is a dictionary with keys:
`detection_boxes`: A `float` tf.Tensor of shape
[batch, max_num_detections, 4] representing top detected boxes in
[y1, x1, y2, x2].
`detection_scores`: A `float` `tf.Tensor` of shape
[batch, max_num_detections] representing sorted confidence scores for
detected boxes. The values are between [0, 1].
`detection_classes`: An `int` tf.Tensor of shape
[batch, max_num_detections] representing classes for detected boxes.
`num_detections`: An `int` tf.Tensor of shape [batch] only the first
`num_detections` boxes are valid detections
If `apply_nms` = False, the return is a dictionary with keys:
`decoded_boxes`: A `float` tf.Tensor of shape [batch, num_raw_boxes, 4]
representing all the decoded boxes.
`decoded_box_scores`: A `float` tf.Tensor of shape
[batch, num_raw_boxes] representing socres of all the decoded boxes.
Generates final detections.
Args:
raw_boxes: A `dict` with keys representing FPN levels and values
representing box tenors of shape `[batch, feature_h, feature_w,
num_anchors * 4]`.
raw_scores: A `dict` with keys representing FPN levels and values
representing logit tensors of shape `[batch, feature_h, feature_w,
num_anchors]`.
anchor_boxes: A `tf.Tensor` of shape of [batch_size, K, 4] representing
the corresponding anchor boxes w.r.t `box_outputs`.
image_shape: A `tf.Tensor` of shape of [batch_size, 2] storing the image
height and width w.r.t. the scaled image, i.e. the same image space as
`box_outputs` and `anchor_boxes`.
raw_attributes: If not None, a `dict` of (attribute_name,
attribute_prediction) pairs. `attribute_prediction` is a dict that
contains keys representing FPN levels and values representing tenors of
shape `[batch, feature_h, feature_w, num_anchors * attribute_size]`.
Returns:
If `apply_nms` = True, the return is a dictionary with keys:
`detection_boxes`: A `float` tf.Tensor of shape
[batch, max_num_detections, 4] representing top detected boxes in
[y1, x1, y2, x2].
`detection_scores`: A `float` tf.Tensor of shape
[batch, max_num_detections] representing sorted confidence scores for
detected boxes. The values are between [0, 1].
`detection_classes`: An `int` tf.Tensor of shape
[batch, max_num_detections] representing classes for detected boxes.
`num_detections`: An `int` tf.Tensor of shape [batch] only the first
`num_detections` boxes are valid detections
`detection_attributes`: A dict. Values of the dict is a `float`
tf.Tensor of shape [batch, max_num_detections, attribute_size]
representing attribute predictions for detected boxes.
If `apply_nms` = False, the return is a dictionary with keys:
`decoded_boxes`: A `float` tf.Tensor of shape [batch, num_raw_boxes, 4]
representing all the decoded boxes.
`decoded_box_scores`: A `float` tf.Tensor of shape
[batch, num_raw_boxes] representing socres of all the decoded boxes.
`decoded_box_attributes`: A dict. Values in the dict is a
`float` tf.Tensor of shape [batch, num_raw_boxes, attribute_size]
representing attribute predictions of all the decoded boxes.
Initializes a detection generator.
Args:
apply_nms: A `bool` of whether or not apply non maximum suppression.
If False, the decoded boxes and their scores are returned.
pre_nms_top_k: An `int` of the number of top scores proposals to be kept
before applying NMS.
pre_nms_score_threshold: A `float` of the score threshold to apply before
applying NMS. Proposals whose scores are below this threshold are
thrown away.
nms_iou_threshold: A `float` in [0, 1], the NMS IoU threshold.
max_num_detections: An `int` of the final number of total detections to
generate.
nms_version: A string of `batched`, `v1` or `v2` specifies NMS version.
use_cpu_nms: A `bool` of whether or not enforce NMS to run on CPU.
soft_nms_sigma: A `float` representing the sigma parameter for Soft NMS.
When soft_nms_sigma=0.0, we fall back to standard NMS.
**kwargs: Additional keyword arguments passed to Layer.
Initializes a multi-level detection generator.
Args:
apply_nms: A `bool` of whether or not apply non maximum suppression. If
False, the decoded boxes and their scores are returned.
pre_nms_top_k: An `int` of the number of top scores proposals to be kept
before applying NMS.
pre_nms_score_threshold: A `float` of the score threshold to apply before
applying NMS. Proposals whose scores are below this threshold are thrown
away.
nms_iou_threshold: A `float` in [0, 1], the NMS IoU threshold.
max_num_detections: An `int` of the final number of total detections to
generate.
nms_version: A string of `batched`, `v1` or `v2` specifies NMS version
use_cpu_nms: A `bool` of whether or not enforce NMS to run on CPU.
soft_nms_sigma: A `float` representing the sigma parameter for Soft NMS.
When soft_nms_sigma=0.0, we fall back to standard NMS.
**kwargs: Additional keyword arguments passed to Layer.
Collects dict of multilevel boxes, scores, attributes into lists.
Generates detected boxes with scores and classes for one-stage detector.
The function takes output of multi-level ConvNets and anchor boxes and
generates detected boxes. Note that this used batched nms, which is not
supported on TPU currently.
Args:
boxes: A `tf.Tensor` with shape `[batch_size, N, num_classes, 4]` or
`[batch_size, N, 1, 4]`, which box predictions on all feature levels. The
N is the number of total anchors on all levels.
scores: A `tf.Tensor` with shape `[batch_size, N, num_classes]`, which
stacks class probability on all feature levels. The N is the number of
total anchors on all levels. The num_classes is the number of classes
predicted by the model. Note that the class_outputs here is the raw score.
pre_nms_score_threshold: A `float` representing the threshold for deciding
when to remove boxes based on score.
nms_iou_threshold: A `float` representing the threshold for deciding whether
boxes overlap too much with respect to IOU.
max_num_detections: A `scalar` representing maximum number of boxes retained
over all classes.
Returns:
nms_boxes: A `float` tf.Tensor of shape [batch_size, max_num_detections, 4]
representing top detected boxes in [y1, x1, y2, x2].
nms_scores: A `float` tf.Tensor of shape [batch_size, max_num_detections]
representing sorted confidence scores for detected boxes. The values are
between [0, 1].
nms_classes: An `int` tf.Tensor of shape [batch_size, max_num_detections]
representing classes for detected boxes.
valid_detections: An `int` tf.Tensor of shape [batch_size] only the top
`valid_detections` boxes are valid detections.
Generates the final detections per image given the model outputs.
Args:
boxes: A `tf.Tensor` with shape `[N, num_classes, 4]` or `[N, 1, 4]`, which
box predictions on all feature levels. The N is the number of total
anchors on all levels.
scores: A `tf.Tensor` with shape `[N, num_classes]`, which stacks class
probability on all feature levels. The N is the number of total anchors on
all levels. The num_classes is the number of classes predicted by the
model. Note that the class_outputs here is the raw score.
attributes: If not None, a dict of `tf.Tensor`. Each value is in shape
`[N, num_classes, attribute_size]` or `[N, 1, attribute_size]` of
attribute predictions on all feature levels. The N is the number of total
anchors on all levels.
pre_nms_top_k: An `int` number of top candidate detections per class before
NMS.
pre_nms_score_threshold: A `float` representing the threshold for deciding
when to remove boxes based on score.
nms_iou_threshold: A `float` representing the threshold for deciding whether
boxes overlap too much with respect to IOU.
max_num_detections: A `scalar` representing maximum number of boxes retained
over all classes.
soft_nms_sigma: A `float` representing the sigma parameter for Soft NMS.
When soft_nms_sigma=0.0, we fall back to standard NMS.
If set to None, `tf.image.non_max_suppression_padded` is called instead.
Returns:
nms_boxes: A `float` tf.Tensor of shape `[max_num_detections, 4]`
representing top detected boxes in `[y1, x1, y2, x2]`.
nms_scores: A `float` tf.Tensor of shape `[max_num_detections]` representing
sorted confidence scores for detected boxes. The values are between [0,
1].
nms_classes: An `int` tf.Tensor of shape `[max_num_detections]` representing
classes for detected boxes.
valid_detections: An `int` tf.Tensor of shape [1] only the top
`valid_detections` boxes are valid detections.
nms_attributes: None or a dict. Each value is a `float` tf.Tensor of shape
`[max_num_detections, attribute_size]` representing attribute predictions
for detected boxes. Can be an empty dict if `attributes` is None.
Generates the final detections given the model outputs.
The implementation unrolls the batch dimension and process images one by one.
It required the batch dimension to be statically known and it is TPU
compatible.
Args:
boxes: A `tf.Tensor` with shape `[batch_size, N, num_classes, 4]` or
`[batch_size, N, 1, 4]` for box predictions on all feature levels. The
N is the number of total anchors on all levels.
scores: A `tf.Tensor` with shape `[batch_size, N, num_classes]`, which
stacks class probability on all feature levels. The N is the number of
total anchors on all levels. The num_classes is the number of classes
predicted by the model. Note that the class_outputs here is the raw score.
attributes: None or a dict of (attribute_name, attributes) pairs. Each
attributes is a `tf.Tensor` with shape
`[batch_size, N, num_classes, attribute_size]` or
`[batch_size, N, 1, attribute_size]` for attribute predictions on all
feature levels. The N is the number of total anchors on all levels. Can
be None if no attribute learning is required.
pre_nms_top_k: An `int` number of top candidate detections per class before
NMS.
pre_nms_score_threshold: A `float` representing the threshold for deciding
when to remove boxes based on score.
nms_iou_threshold: A `float` representing the threshold for deciding whether
boxes overlap too much with respect to IOU.
max_num_detections: A scalar representing maximum number of boxes retained
over all classes.
soft_nms_sigma: A `float` representing the sigma parameter for Soft NMS.
When soft_nms_sigma=0.0 (which is default), we fall back to standard NMS.
Returns:
nms_boxes: A `float` type `tf.Tensor` of shape
`[batch_size, max_num_detections, 4]` representing top detected boxes in
`[y1, x1, y2, x2]`.
nms_scores: A `float` type `tf.Tensor` of shape
`[batch_size, max_num_detections]` representing sorted confidence scores
for detected boxes. The values are between `[0, 1]`.
nms_classes: An `int` type `tf.Tensor` of shape
`[batch_size, max_num_detections]` representing classes for detected
boxes.
valid_detections: An `int` type `tf.Tensor` of shape `[batch_size]` only the
top `valid_detections` boxes are valid detections.
nms_attributes: None or a dict of (attribute_name, attributes). Each
attribute is a `float` type `tf.Tensor` of shape
`[batch_size, max_num_detections, attribute_size]` representing attribute
predictions for detected boxes. Can be an empty dict if no attribute
learning is required.
Generates the final detections given the model outputs.
This implementation unrolls classes dimension while using the tf.while_loop
to implement the batched NMS, so that it can be parallelized at the batch
dimension. It should give better performance comparing to v1 implementation.
It is TPU compatible.
Args:
boxes: A `tf.Tensor` with shape `[batch_size, N, num_classes, 4]` or
`[batch_size, N, 1, 4]`, which box predictions on all feature levels. The
N is the number of total anchors on all levels.
scores: A `tf.Tensor` with shape `[batch_size, N, num_classes]`, which
stacks class probability on all feature levels. The N is the number of
total anchors on all levels. The num_classes is the number of classes
predicted by the model. Note that the class_outputs here is the raw score.
pre_nms_top_k: An `int` number of top candidate detections per class before
NMS.
pre_nms_score_threshold: A `float` representing the threshold for deciding
when to remove boxes based on score.
nms_iou_threshold: A `float` representing the threshold for deciding whether
boxes overlap too much with respect to IOU.
max_num_detections: A `scalar` representing maximum number of boxes retained
over all classes.
Returns:
nms_boxes: A `float` tf.Tensor of shape [batch_size, max_num_detections, 4]
representing top detected boxes in [y1, x1, y2, x2].
nms_scores: A `float` tf.Tensor of shape [batch_size, max_num_detections]
representing sorted confidence scores for detected boxes. The values are
between [0, 1].
nms_classes: An `int` tf.Tensor of shape [batch_size, max_num_detections]
representing classes for detected boxes.
valid_detections: An `int` tf.Tensor of shape [batch_size] only the top
`valid_detections` boxes are valid detections.
Selects top_k scores and indices for each class.
Args:
scores_in: A `tf.Tensor` with shape `[batch_size, N, num_classes]`, which
stacks class logit outputs on all feature levels. The N is the number of
total anchors on all levels. The num_classes is the number of classes
predicted by the model.
pre_nms_num_detections: Number of candidates before NMS.
Returns:
scores and indices: A `tf.Tensor` with shape
`[batch_size, pre_nms_num_detections, num_classes]`.
Contains definitions of generators to generate the final detections.
Copyright 2021 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Import libraries Obtains pre_nms_top_k before running NMS. Sets scores of invalid boxes to -1. Concats results from all classes and sort them. Selects top pre_nms_num scores and indices before NMS. Obtains pre_nms_top_k before running NMS. Filter out scores. Removes the background class. Box decoding. Box clipping Optionally force the NMS be run on CPU. Adds 1 to offset the background class which has index 0. Applies score transformation and remove the implicit background class. Box decoding. The anchor boxes are shared for all data in a batch. One stage detector only supports class agnostic box regression. Box clipping. Optionally force the NMS to run on CPU. Set `nmsed_attributes` to None for batched NMS. Set `nmsed_attributes` to None for v2. Adds 1 to offset the background class which has index 0. | 16,334 | en | 0.735677 |
from django.contrib import admin
from .models import Car, CarShop, RepairStation, RepairWork, Reapir, Person, Component
# Register your models here.
admin.site.register(Car)
admin.site.register(CarShop)
admin.site.register(Reapir)
admin.site.register(RepairWork)
admin.site.register(RepairStation)
admin.site.register(Person)
admin.site.register(Component)
| term_project/backend/api/admin.py | 358 | Register your models here. | 26 | en | 0.957485 |
# -*- coding: utf-8 -*-
"""Launchd plist plugin."""
from __future__ import unicode_literals
from dfdatetime import semantic_time as dfdatetime_semantic_time
from plaso.containers import plist_event
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.parsers import plist
from plaso.parsers.plist_plugins import interface
class LaunchdPlugin(interface.PlistPlugin):
"""Basic plugin to extract launchd configuration information.
Further details about fields within the key:
Label:
the required key for uniquely identifying the launchd service.
Program:
absolute path to the executable. required in the absence of the
ProgramArguments key.
ProgramArguments:
command-line flags for the executable. required in the absence of the
Program key.
UserName:
the job run as the specified user.
GroupName:
the job run as the specified group.
"""
NAME = 'launchd_plist'
DESCRIPTION = 'Parser for Launchd plist files.'
# The PLIST_PATH is dynamic, the prefix filename is, by default, named using
# reverse-domain notation. For example, Chrome is com.google.chrome.plist.
# /System/Library/LaunchDaemons/*.plist
# /System/Library/LaunchAgents/*.plist
# /Library/LaunchDaemons/*.plist
# /Library/LaunchAgents/*.plist
# ~/Library/LaunchAgents
PLIST_KEYS = frozenset([
'Label',
'Program',
'ProgramArguments',
'UserName',
'GroupName',
])
# pylint: disable=arguments-differ
def Process(self, parser_mediator, plist_name, top_level, **kwargs):
"""Check if it is a valid MacOS plist file name.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
plist_name (str): name of the plist.
top_level (dict[str, object]): plist top-level key.
"""
super(LaunchdPlugin, self).Process(
parser_mediator, plist_name=self.PLIST_PATH, top_level=top_level)
# pylint: disable=arguments-differ
def GetEntries(self, parser_mediator, top_level=None, **unused_kwargs):
"""Extracts launchd information from the plist.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
top_level (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.
"""
label = top_level.get('Label')
command = top_level.get('Program', '')
program_arguments = top_level.get('ProgramArguments')
for argument in program_arguments:
command += " %s" % argument
user_name = top_level.get('UserName')
group_name = top_level.get('GroupName')
event_data = plist_event.PlistTimeEventData()
event_data.desc = ('Launchd service config {0:s} points to {1:s} with '
'user:{2:s} group:{3:s}').format(label, command,
user_name, group_name)
event_data.key = 'launchdServiceConfig'
event_data.root = '/'
date_time = dfdatetime_semantic_time.SemanticTime('Not set')
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_NOT_A_TIME)
parser_mediator.ProduceEventWithEventData(event, event_data)
plist.PlistParser.RegisterPlugin(LaunchdPlugin)
| plaso/parsers/plist_plugins/launchd.py | 3,344 | Basic plugin to extract launchd configuration information.
Further details about fields within the key:
Label:
the required key for uniquely identifying the launchd service.
Program:
absolute path to the executable. required in the absence of the
ProgramArguments key.
ProgramArguments:
command-line flags for the executable. required in the absence of the
Program key.
UserName:
the job run as the specified user.
GroupName:
the job run as the specified group.
Extracts launchd information from the plist.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
top_level (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.
Check if it is a valid MacOS plist file name.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
plist_name (str): name of the plist.
top_level (dict[str, object]): plist top-level key.
Launchd plist plugin.
-*- coding: utf-8 -*- The PLIST_PATH is dynamic, the prefix filename is, by default, named using reverse-domain notation. For example, Chrome is com.google.chrome.plist. /System/Library/LaunchDaemons/*.plist /System/Library/LaunchAgents/*.plist /Library/LaunchDaemons/*.plist /Library/LaunchAgents/*.plist ~/Library/LaunchAgents pylint: disable=arguments-differ pylint: disable=arguments-differ | 1,448 | en | 0.76554 |
import atexit
from .MecanumRover_MotorDriver import MecanumRover_MotorDriver
import traitlets
from traitlets.config.configurable import Configurable
class Motor(Configurable):
value = traitlets.Float()
# config
alpha = traitlets.Float(default_value=1.0).tag(config=True)
beta = traitlets.Float(default_value=0.0).tag(config=True)
def __init__(self, driver, channel, *args, **kwargs):
super(Motor, self).__init__(*args, **kwargs) # initializes traitlets
self._driver = driver
self._motor = self._driver.getMotor(channel)
atexit.register(self._release)
@traitlets.observe('value')
def _observe_value(self, change):
self._write_value(change['new'])
def _write_value(self, value):
"""Sets motor value between [-1, 1]"""
# ジョイスティック等の値ブレ対策
if abs(value) <= 0.05:
value = 0.0
#モータの目標速度(mm/s)に変換。※最高1300mm/s
mapped_value = int(1300.0 * (self.alpha * value + self.beta))
speed = min(max(mapped_value, -1300), 1300)
self._motor.setSpeed(speed)
def _release(self):
"""Stops motor by releasing control"""
self._motor.setSpeed(0)
| jetbot/motor.py | 1,248 | Stops motor by releasing control
Sets motor value between [-1, 1]
config initializes traitlets ジョイスティック等の値ブレ対策モータの目標速度(mm/s)に変換。※最高1300mm/s | 141 | ja | 0.48393 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
函数
在python中函数默认的返回对象是None
"""
# 默认返回值为None
def hello():
print("Hello World!")
print(type(hello()))
# 可以返回多个对象,默认是元组
def foo():
return ['xyz', 1000, -98.6]
x, y, z = foo()
print(x, y, z)
# 关键字参数
def foo1(x):
print(x)
foo1(x='abc')
"""
创建函数
def function_name(arguments):
"function documentation string"
function body suite
"""
def helloSomeOne(who):
"""hello to someone"""
print("hello" + who)
print(helloSomeOne.__doc__)
"""
内部/内嵌函数
如果内部函数的定义包含了在外部函数里定义的对象的引用,内部函数被称为闭包
"""
def fo():
def ba():
print("ba called")
print("fo called")
ba()
fo()
"""
传递函数
函数是可以被引用的(访问或者以其他变量作为别名)
对对象是函数,这个对象的所有别名都是可以调用的
"""
def foo():
print("in foo()")
bar = foo
bar()
def convert(func, seq):
return [func(eachNum) for eachNum in seq]
myseq = (123, 45.67, -6.2e8, 999999L)
print(convert(int, myseq))
print(convert(float, myseq))
| python/python_function/func.py | 1,216 | !/usr/bin/env python -*- coding: utf-8 -*- 默认返回值为None 可以返回多个对象,默认是元组 关键字参数 | 74 | zh | 0.806087 |
# -*- coding: utf-8 -*-
"""
Disaster Victim Identification, Controllers
@author: nursix
"""
module = request.controller
resourcename = request.function
if not settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
# -----------------------------------------------------------------------------
def s3_menu_postp():
# @todo: rewrite this for new framework
menu_selected = []
body_id = s3mgr.get_session("dvi", "body")
if body_id:
body = s3db.dvi_body
query = (body.id == body_id)
record = db(query).select(body.id, body.pe_label,
limitby=(0,1)).first()
if record:
label = record.pe_label
response.menu_options[-3][-1].append(
[T("Candidate Matches for Body %s" % label),
False, URL(f="person",
vars=dict(match=record.id))]
)
menu_selected.append(
["%s: %s" % (T("Body"), label),
False, URL(f="body", args=[record.id])]
)
person_id = s3mgr.get_session("pr", "person")
if person_id:
person = s3db.pr_person
query = (person.id == person_id)
record = db(query).select(person.id, limitby=(0, 1)).first()
if record:
name = s3db.pr_person_represent(record.id)
menu_selected.append(
["%s: %s" % (T("Person"), name),
False, URL(f="person", args=[record.id])]
)
if menu_selected:
menu_selected = [T("Open recent"), True, None, menu_selected]
response.menu_options.append(menu_selected)
# -----------------------------------------------------------------------------
def index():
""" Module's Home Page """
try:
module_name = settings.modules[module].name_nice
except:
module_name = T("Disaster Victim Identification")
table = s3db.dvi_body
total = db(table.deleted == False).count()
itable = s3db.dvi_identification
query = (table.deleted == False) & \
(itable.pe_id == table.pe_id) & \
(itable.deleted == False) & \
(itable.status == 3)
identified = db(query).count()
status = [[str(T("identified")), int(identified)],
[str(T("unidentified")), int(total-identified)]]
response.title = module_name
return dict(module_name=module_name,
total=total,
status=json.dumps(status))
# -----------------------------------------------------------------------------
def recreq():
""" Recovery Requests List """
table = s3db.dvi_recreq
table.person_id.default = s3_logged_in_person()
def prep(r):
if r.interactive and not r.record:
table.status.readable = False
table.status.writable = False
table.bodies_recovered.readable = False
table.bodies_recovered.writable = False
return True
s3.prep = prep
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def morgue():
""" Morgue Registry """
morgue_tabs = [(T("Morgue Details"), ""),
(T("Bodies"), "body")]
rheader = S3ResourceHeader([
[(T("Morgue"), "name")]
], tabs=morgue_tabs)
# Pre-processor
def prep(r):
# Location Filter
s3db.gis_location_filter(r)
if r.interactive and r.id and not r.component:
field = r.table.obsolete
field.readable = field.writable = True
return True
s3.prep = prep
output = s3_rest_controller(rheader=rheader)
return output
# -----------------------------------------------------------------------------
def body():
""" Dead Bodies Registry """
gender_opts = s3db.pr_gender_opts
gender_opts[1] = T("unknown")
btable = s3db.dvi_body
itable = s3db.dvi_identification
status = request.get_vars.get("status", None)
if status == "unidentified":
query = (itable.deleted == False) & \
(itable.status == 3)
ids = db(query).select(itable.pe_id)
ids = [i.pe_id for i in ids]
if ids:
s3.filter = (~(btable.pe_id.belongs(ids)))
s3db.configure("dvi_body", main="pe_label", extra="gender")
ntable = s3db.pr_note
ntable.status.readable = False
ntable.status.writable = False
dvi_tabs = [(T("Recovery"), ""),
(T("Checklist"), "checklist"),
(T("Images"), "image"),
(T("Physical Description"), "physical_description"),
(T("Effects Inventory"), "effects"),
(T("Journal"), "note"),
(T("Identification"), "identification")]
rheader = S3ResourceHeader([
[(T("ID Tag Number"), "pe_label")],
["gender"],
["age_group"],
], tabs=dvi_tabs)
output = s3_rest_controller(rheader=rheader)
return output
# -----------------------------------------------------------------------------
def person():
""" Missing Persons Registry (Match Finder) """
table = s3db.pr_person
s3.crud_strings["pr_person"].update(
title_display = T("Missing Person Details"),
title_list = T("Missing Persons"),
label_list_button = T("List Missing Persons"),
msg_list_empty = T("No Persons found"),
msg_no_match = T("No Persons currently reported missing"))
s3db.configure("pr_group_membership",
list_fields=["id",
"group_id",
"group_head",
"description"
])
s3db.configure("pr_person",
listadd=False,
editable=False,
deletable=False,
list_fields=["id",
"first_name",
"middle_name",
"last_name",
"picture",
"gender",
"age_group"
])
def prep(r):
if not r.id and not r.method and not r.component:
body_id = r.get_vars.get("match", None)
body = db(db.dvi_body.id == body_id).select(
db.dvi_body.pe_label, limitby=(0, 1)).first()
label = body and body.pe_label or "#%s" % body_id
if body_id:
query = dvi_match_query(body_id)
r.resource.add_filter(query)
s3.crud_strings["pr_person"].update(
#subtitle_list = T("Candidate Matches for Body %s" % label),
msg_no_match = T("No matching records found"))
return True
s3.prep = prep
field = table.missing
field.readable = False
field.writable = False
field.default = True
table.age_group.readable = True
table.age_group.writable = True
# Show only missing persons in list views
if len(request.args) == 0:
s3.filter = (db.pr_person.missing == True)
mpr_tabs = [
(T("Missing Report"), "missing_report"),
(T("Person Details"), None),
(T("Physical Description"), "physical_description"),
(T("Images"), "image"),
(T("Identity"), "identity"),
(T("Address"), "address"),
(T("Contact Data"), "contact"),
(T("Journal"), "note"),
]
rheader = lambda r: s3db.pr_rheader(r, tabs=mpr_tabs)
output = s3_rest_controller("pr", "person",
main="first_name",
extra="last_name",
rheader=rheader)
return output
# -------------------------------------------------------------------------
def dvi_match_query(body_id):
"""
Get a query for candidate matches between the missing
persons registry and a dead body
@param body_id: the dvi_body record ID
"""
ptable = s3db.pr_person
ntable = s3db.pr_note
btable = s3db.dvi_body
query = ((ptable.deleted == False) &
(ptable.missing == True) &
(ntable.pe_id == ptable.pe_id) &
(ntable.status == 1))
body = btable[body_id]
if not body:
return query
# last seen should be before date of recovery
if body.date_of_recovery:
q = ((ntable.timestmp <= body.date_of_recovery) |
(ntable.timestmp == None))
query = query & q
# age group should match
if body.age_group and body.age_group != 1:
q = ((ptable.age_group == None) |
(ptable.age_group == 1) |
(ptable.age_group == body.age_group))
query = query & q
# gender should match
if body.gender and body.gender != 1:
q = ((ptable.gender == None) |
(ptable.gender == 1) |
(ptable.gender == body.gender))
return query
# -----------------------------------------------------------------------------
def tooltip():
""" Ajax Tooltips """
formfield = request.vars.get("formfield", None)
if formfield:
response.view = "pr/ajaxtips/%s.html" % formfield
return dict()
# END =========================================================================
| controllers/dvi.py | 9,576 | Dead Bodies Registry
Get a query for candidate matches between the missing
persons registry and a dead body
@param body_id: the dvi_body record ID
Module's Home Page
Morgue Registry
Missing Persons Registry (Match Finder)
Recovery Requests List
Ajax Tooltips
Disaster Victim Identification, Controllers
@author: nursix
-*- coding: utf-8 -*- ----------------------------------------------------------------------------- @todo: rewrite this for new framework ----------------------------------------------------------------------------- ----------------------------------------------------------------------------- ----------------------------------------------------------------------------- Pre-processor Location Filter ----------------------------------------------------------------------------- -----------------------------------------------------------------------------subtitle_list = T("Candidate Matches for Body %s" % label), Show only missing persons in list views ------------------------------------------------------------------------- last seen should be before date of recovery age group should match gender should match ----------------------------------------------------------------------------- END ========================================================================= | 1,302 | en | 0.378676 |
# -*- coding: utf-8 -*-
# Authors: Mainak Jas <mainak@neuro.hut.fi>
# Teon Brooks <teon.brooks@gmail.com>
#
# License: BSD (3-clause)
import copy
import glob
import os
import os.path as op
import shutil
import numpy as np
from numpy.testing import assert_equal
import pytest
from matplotlib import pyplot as plt
from mne import Epochs, read_events, read_evokeds
from mne.io import read_raw_fif
from mne.datasets import testing
from mne.report import Report, open_report, _ReportScraper
from mne.utils import (_TempDir, requires_mayavi, requires_nibabel, Bunch,
run_tests_if_main, traits_test, requires_h5py)
from mne.viz import plot_alignment
data_dir = testing.data_path(download=False)
subjects_dir = op.join(data_dir, 'subjects')
report_dir = op.join(data_dir, 'MEG', 'sample')
raw_fname = op.join(report_dir, 'sample_audvis_trunc_raw.fif')
ms_fname = op.join(data_dir, 'SSS', 'test_move_anon_raw.fif')
event_fname = op.join(report_dir, 'sample_audvis_trunc_raw-eve.fif')
cov_fname = op.join(report_dir, 'sample_audvis_trunc-cov.fif')
fwd_fname = op.join(report_dir, 'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
trans_fname = op.join(report_dir, 'sample_audvis_trunc-trans.fif')
inv_fname = op.join(report_dir,
'sample_audvis_trunc-meg-eeg-oct-6-meg-inv.fif')
mri_fname = op.join(subjects_dir, 'sample', 'mri', 'T1.mgz')
base_dir = op.realpath(op.join(op.dirname(__file__), '..', 'io', 'tests',
'data'))
evoked_fname = op.join(base_dir, 'test-ave.fif')
def _get_example_figures():
"""Create two example figures."""
fig1 = plt.plot([1, 2], [1, 2])[0].figure
fig2 = plt.plot([3, 4], [3, 4])[0].figure
return [fig1, fig2]
@pytest.mark.slowtest
@testing.requires_testing_data
def test_render_report():
"""Test rendering -*.fif files for mne report."""
tempdir = _TempDir()
raw_fname_new = op.join(tempdir, 'temp_raw.fif')
ms_fname_new = op.join(tempdir, 'temp_ms_raw.fif')
event_fname_new = op.join(tempdir, 'temp_raw-eve.fif')
cov_fname_new = op.join(tempdir, 'temp_raw-cov.fif')
fwd_fname_new = op.join(tempdir, 'temp_raw-fwd.fif')
inv_fname_new = op.join(tempdir, 'temp_raw-inv.fif')
for a, b in [[raw_fname, raw_fname_new],
[ms_fname, ms_fname_new],
[event_fname, event_fname_new],
[cov_fname, cov_fname_new],
[fwd_fname, fwd_fname_new],
[inv_fname, inv_fname_new]]:
shutil.copyfile(a, b)
# create and add -epo.fif and -ave.fif files
epochs_fname = op.join(tempdir, 'temp-epo.fif')
evoked_fname = op.join(tempdir, 'temp-ave.fif')
# Speed it up by picking channels
raw = read_raw_fif(raw_fname_new, preload=True)
raw.pick_channels(['MEG 0111', 'MEG 0121'])
raw.del_proj()
epochs = Epochs(raw, read_events(event_fname), 1, -0.2, 0.2)
epochs.save(epochs_fname, overwrite=True)
# This can take forever (stall Travis), so let's make it fast
# Also, make sure crop range is wide enough to avoid rendering bug
epochs.average().crop(0.1, 0.2).save(evoked_fname)
report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir)
with pytest.warns(RuntimeWarning, match='Cannot render MRI'):
report.parse_folder(data_path=tempdir, on_error='raise')
assert repr(report)
# Check correct paths and filenames
fnames = glob.glob(op.join(tempdir, '*.fif'))
for fname in fnames:
assert (op.basename(fname) in
[op.basename(x) for x in report.fnames])
assert (''.join(report.html).find(op.basename(fname)) != -1)
assert_equal(len(report.fnames), len(fnames))
assert_equal(len(report.html), len(report.fnames))
assert_equal(len(report.fnames), len(report))
# Check saving functionality
report.data_path = tempdir
fname = op.join(tempdir, 'report.html')
report.save(fname=fname, open_browser=False)
assert (op.isfile(fname))
with open(fname, 'rb') as fid:
html = fid.read().decode('utf-8')
assert '(MaxShield on)' in html
assert_equal(len(report.html), len(fnames))
assert_equal(len(report.html), len(report.fnames))
# Check saving same report to new filename
report.save(fname=op.join(tempdir, 'report2.html'), open_browser=False)
assert (op.isfile(op.join(tempdir, 'report2.html')))
# Check overwriting file
report.save(fname=op.join(tempdir, 'report.html'), open_browser=False,
overwrite=True)
assert (op.isfile(op.join(tempdir, 'report.html')))
# Check pattern matching with multiple patterns
pattern = ['*raw.fif', '*eve.fif']
with pytest.warns(RuntimeWarning, match='Cannot render MRI'):
report.parse_folder(data_path=tempdir, pattern=pattern)
assert (repr(report))
fnames = glob.glob(op.join(tempdir, '*.raw')) + \
glob.glob(op.join(tempdir, '*.raw'))
for fname in fnames:
assert (op.basename(fname) in
[op.basename(x) for x in report.fnames])
assert (''.join(report.html).find(op.basename(fname)) != -1)
pytest.raises(ValueError, Report, image_format='foo')
pytest.raises(ValueError, Report, image_format=None)
# SVG rendering
report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir,
image_format='svg')
with pytest.warns(RuntimeWarning, match='Cannot render MRI'):
report.parse_folder(data_path=tempdir, on_error='raise')
# ndarray support smoke test
report.add_figs_to_section(np.zeros((2, 3, 3)), 'caption', 'section')
with pytest.raises(TypeError, match='Each fig must be a'):
report.add_figs_to_section('foo', 'caption', 'section')
with pytest.raises(TypeError, match='Each fig must be a'):
report.add_figs_to_section(['foo'], 'caption', 'section')
@testing.requires_testing_data
def test_report_raw_psd_and_date():
"""Test report raw PSD and DATE_NONE functionality."""
with pytest.raises(TypeError, match='dict'):
Report(raw_psd='foo')
tempdir = _TempDir()
raw = read_raw_fif(raw_fname).crop(0, 1.).load_data()
raw_fname_new = op.join(tempdir, 'temp_raw.fif')
raw.save(raw_fname_new)
report = Report(raw_psd=True)
report.parse_folder(data_path=tempdir, render_bem=False,
on_error='raise')
assert isinstance(report.html, list)
assert 'PSD' in ''.join(report.html)
assert 'GMT' in ''.join(report.html)
# DATE_NONE functionality
report = Report()
raw.anonymize()
raw.save(raw_fname_new, overwrite=True)
report.parse_folder(data_path=tempdir, render_bem=False,
on_error='raise')
assert isinstance(report.html, list)
assert 'GMT' not in ''.join(report.html)
@testing.requires_testing_data
@requires_mayavi
@traits_test
def test_render_add_sections():
"""Test adding figures/images to section."""
tempdir = _TempDir()
report = Report(subjects_dir=subjects_dir)
# Check add_figs_to_section functionality
fig = plt.plot([1, 2], [1, 2])[0].figure
report.add_figs_to_section(figs=fig, # test non-list input
captions=['evoked response'], scale=1.2,
image_format='svg')
pytest.raises(ValueError, report.add_figs_to_section, figs=[fig, fig],
captions='H')
pytest.raises(ValueError, report.add_figs_to_section, figs=fig,
captions=['foo'], scale=0, image_format='svg')
pytest.raises(ValueError, report.add_figs_to_section, figs=fig,
captions=['foo'], scale=1e-10, image_format='svg')
# need to recreate because calls above change size
fig = plt.plot([1, 2], [1, 2])[0].figure
# Check add_images_to_section with png
img_fname = op.join(tempdir, 'testimage.png')
fig.savefig(img_fname)
report.add_images_to_section(fnames=[img_fname],
captions=['evoked response'])
report.add_images_to_section(fnames=[img_fname],
captions=['evoked response'])
pytest.raises(ValueError, report.add_images_to_section,
fnames=[img_fname, img_fname], captions='H')
pytest.raises(ValueError, report.add_images_to_section,
fnames=['foobar.xxx'], captions='H')
evoked = read_evokeds(evoked_fname, condition='Left Auditory',
baseline=(-0.2, 0.0))
fig = plot_alignment(evoked.info, trans_fname, subject='sample',
subjects_dir=subjects_dir)
report.add_figs_to_section(figs=fig, # test non-list input
captions='random image', scale=1.2)
assert (repr(report))
@pytest.mark.slowtest
@testing.requires_testing_data
@requires_mayavi
@traits_test
@requires_nibabel()
def test_render_mri():
"""Test rendering MRI for mne report."""
tempdir = _TempDir()
trans_fname_new = op.join(tempdir, 'temp-trans.fif')
for a, b in [[trans_fname, trans_fname_new]]:
shutil.copyfile(a, b)
report = Report(info_fname=raw_fname,
subject='sample', subjects_dir=subjects_dir)
report.parse_folder(data_path=tempdir, mri_decim=30, pattern='*')
report.save(op.join(tempdir, 'report.html'), open_browser=False)
assert repr(report)
report.add_bem_to_section('sample', caption='extra', section='foo',
subjects_dir=subjects_dir, decim=30)
report.save(op.join(tempdir, 'report.html'), open_browser=False,
overwrite=True)
@testing.requires_testing_data
@requires_nibabel()
def test_render_mri_without_bem():
"""Test rendering MRI without BEM for mne report."""
tempdir = _TempDir()
os.mkdir(op.join(tempdir, 'sample'))
os.mkdir(op.join(tempdir, 'sample', 'mri'))
shutil.copyfile(mri_fname, op.join(tempdir, 'sample', 'mri', 'T1.mgz'))
report = Report(info_fname=raw_fname,
subject='sample', subjects_dir=tempdir)
report.parse_folder(tempdir, render_bem=False)
report.save(op.join(tempdir, 'report.html'), open_browser=False)
@testing.requires_testing_data
@requires_nibabel()
def test_add_htmls_to_section():
"""Test adding html str to mne report."""
report = Report(info_fname=raw_fname,
subject='sample', subjects_dir=subjects_dir)
html = '<b>MNE-Python is AWESOME</b>'
caption, section = 'html', 'html_section'
report.add_htmls_to_section(html, caption, section)
idx = report._sectionlabels.index('report_' + section)
html_compare = report.html[idx]
assert (html in html_compare)
assert (repr(report))
def test_add_slider_to_section():
"""Test adding a slider with a series of images to mne report."""
tempdir = _TempDir()
report = Report(info_fname=raw_fname,
subject='sample', subjects_dir=subjects_dir)
section = 'slider_section'
figs = _get_example_figures()
report.add_slider_to_section(figs, section=section, title='my title')
assert report.fnames[0] == 'my title-#-report_slider_section-#-custom'
report.save(op.join(tempdir, 'report.html'), open_browser=False)
pytest.raises(NotImplementedError, report.add_slider_to_section,
[figs, figs])
pytest.raises(ValueError, report.add_slider_to_section, figs, ['wug'])
pytest.raises(TypeError, report.add_slider_to_section, figs, 'wug')
# need at least 2
pytest.raises(ValueError, report.add_slider_to_section, figs[:1], 'wug')
# Smoke test that SVG w/unicode can be added
report = Report()
fig, ax = plt.subplots()
ax.set_xlabel(u'μ')
report.add_slider_to_section([fig] * 2, image_format='svg')
def test_validate_input():
"""Test Report input validation."""
report = Report()
items = ['a', 'b', 'c']
captions = ['Letter A', 'Letter B', 'Letter C']
section = 'ABCs'
comments = ['First letter of the alphabet.',
'Second letter of the alphabet',
'Third letter of the alphabet']
pytest.raises(ValueError, report._validate_input, items, captions[:-1],
section, comments=None)
pytest.raises(ValueError, report._validate_input, items, captions, section,
comments=comments[:-1])
values = report._validate_input(items, captions, section, comments=None)
items_new, captions_new, comments_new = values
assert_equal(len(comments_new), len(items))
@requires_h5py
def test_open_report():
"""Test the open_report function."""
tempdir = _TempDir()
hdf5 = op.join(tempdir, 'report.h5')
# Test creating a new report through the open_report function
fig1 = _get_example_figures()[0]
with open_report(hdf5, subjects_dir=subjects_dir) as report:
assert report.subjects_dir == subjects_dir
assert report._fname == hdf5
report.add_figs_to_section(figs=fig1, captions=['evoked response'])
# Exiting the context block should have triggered saving to HDF5
assert op.exists(hdf5)
# Load the HDF5 version of the report and check equivalence
report2 = open_report(hdf5)
assert report2._fname == hdf5
assert report2.subjects_dir == report.subjects_dir
assert report2.html == report.html
assert report2.__getstate__() == report.__getstate__()
assert '_fname' not in report2.__getstate__()
# Check parameters when loading a report
pytest.raises(ValueError, open_report, hdf5, foo='bar') # non-existing
pytest.raises(ValueError, open_report, hdf5, subjects_dir='foo')
open_report(hdf5, subjects_dir=subjects_dir) # This should work
# Check that the context manager doesn't swallow exceptions
with pytest.raises(ZeroDivisionError):
with open_report(hdf5, subjects_dir=subjects_dir) as report:
1 / 0
def test_remove():
"""Test removing figures from a report."""
r = Report()
fig1, fig2 = _get_example_figures()
r.add_figs_to_section(fig1, 'figure1', 'mysection')
r.add_slider_to_section([fig1, fig2], title='figure1',
section='othersection')
r.add_figs_to_section(fig2, 'figure1', 'mysection')
r.add_figs_to_section(fig2, 'figure2', 'mysection')
# Test removal by caption
r2 = copy.deepcopy(r)
removed_index = r2.remove(caption='figure1')
assert removed_index == 2
assert len(r2.html) == 3
assert r2.html[0] == r.html[0]
assert r2.html[1] == r.html[1]
assert r2.html[2] == r.html[3]
# Test restricting to section
r2 = copy.deepcopy(r)
removed_index = r2.remove(caption='figure1', section='othersection')
assert removed_index == 1
assert len(r2.html) == 3
assert r2.html[0] == r.html[0]
assert r2.html[1] == r.html[2]
assert r2.html[2] == r.html[3]
# Test removal of empty sections
r2 = copy.deepcopy(r)
r2.remove(caption='figure1', section='othersection')
assert r2.sections == ['mysection']
assert r2._sectionvars == {'mysection': 'report_mysection'}
def test_add_or_replace():
"""Test replacing existing figures in a report."""
r = Report()
fig1, fig2 = _get_example_figures()
r.add_figs_to_section(fig1, 'duplicate', 'mysection')
r.add_figs_to_section(fig1, 'duplicate', 'mysection')
r.add_figs_to_section(fig1, 'duplicate', 'othersection')
r.add_figs_to_section(fig2, 'nonduplicate', 'mysection')
# By default, replace=False, so all figures should be there
assert len(r.html) == 4
old_r = copy.deepcopy(r)
# Re-add fig1 with replace=True, it should overwrite the last occurrence of
# fig1 in section 'mysection'.
r.add_figs_to_section(fig2, 'duplicate', 'mysection', replace=True)
assert len(r.html) == 4
assert r.html[1] != old_r.html[1] # This figure should have changed
# All other figures should be the same
assert r.html[0] == old_r.html[0]
assert r.html[2] == old_r.html[2]
assert r.html[3] == old_r.html[3]
def test_scraper(tmpdir):
"""Test report scraping."""
r = Report()
fig1, fig2 = _get_example_figures()
r.add_figs_to_section(fig1, 'a', 'mysection')
r.add_figs_to_section(fig2, 'b', 'mysection')
# Mock a Sphinx + sphinx_gallery config
app = Bunch(builder=Bunch(srcdir=str(tmpdir),
outdir=op.join(str(tmpdir), '_build', 'html')))
scraper = _ReportScraper()
scraper.app = app
gallery_conf = dict(src_dir=app.builder.srcdir, builder_name='html')
img_fname = op.join(app.builder.srcdir, 'auto_examples', 'images',
'sg_img.png')
target_file = op.join(app.builder.srcdir, 'auto_examples', 'sg.py')
os.makedirs(op.dirname(img_fname))
os.makedirs(app.builder.outdir)
block_vars = dict(image_path_iterator=(img for img in [img_fname]),
example_globals=dict(a=1), target_file=target_file)
# Nothing yet
block = None
rst = scraper(block, block_vars, gallery_conf)
assert rst == ''
# Still nothing
block_vars['example_globals']['r'] = r
rst = scraper(block, block_vars, gallery_conf)
# Once it's saved, add it
assert rst == ''
fname = op.join(str(tmpdir), 'my_html.html')
r.save(fname, open_browser=False)
rst = scraper(block, block_vars, gallery_conf)
out_html = op.join(app.builder.outdir, 'auto_examples', 'my_html.html')
assert not op.isfile(out_html)
os.makedirs(op.join(app.builder.outdir, 'auto_examples'))
scraper.copyfiles()
assert op.isfile(out_html)
assert rst.count('"') == 6
assert "<iframe" in rst
assert op.isfile(img_fname.replace('png', 'svg'))
run_tests_if_main()
| mne/tests/test_report.py | 17,721 | Create two example figures.
Test adding html str to mne report.
Test replacing existing figures in a report.
Test adding a slider with a series of images to mne report.
Test the open_report function.
Test removing figures from a report.
Test adding figures/images to section.
Test rendering MRI for mne report.
Test rendering MRI without BEM for mne report.
Test rendering -*.fif files for mne report.
Test report raw PSD and DATE_NONE functionality.
Test report scraping.
Test Report input validation.
-*- coding: utf-8 -*- Authors: Mainak Jas <mainak@neuro.hut.fi> Teon Brooks <teon.brooks@gmail.com> License: BSD (3-clause) create and add -epo.fif and -ave.fif files Speed it up by picking channels This can take forever (stall Travis), so let's make it fast Also, make sure crop range is wide enough to avoid rendering bug Check correct paths and filenames Check saving functionality Check saving same report to new filename Check overwriting file Check pattern matching with multiple patterns SVG rendering ndarray support smoke test DATE_NONE functionality Check add_figs_to_section functionality test non-list input need to recreate because calls above change size Check add_images_to_section with png test non-list input need at least 2 Smoke test that SVG w/unicode can be added Test creating a new report through the open_report function Exiting the context block should have triggered saving to HDF5 Load the HDF5 version of the report and check equivalence Check parameters when loading a report non-existing This should work Check that the context manager doesn't swallow exceptions Test removal by caption Test restricting to section Test removal of empty sections By default, replace=False, so all figures should be there Re-add fig1 with replace=True, it should overwrite the last occurrence of fig1 in section 'mysection'. This figure should have changed All other figures should be the same Mock a Sphinx + sphinx_gallery config Nothing yet Still nothing Once it's saved, add it | 2,007 | en | 0.757208 |
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 1.3.31
#
# Don't modify this file, modify the SWIG interface instead.
# This file is compatible with both classic and new-style classes.
import _sequencer_osx
import new
new_instancemethod = new.instancemethod
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if name == "thisown":
return self.this.own(value)
if name == "this":
if type(value).__name__ == "PySwigObject":
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static) or hasattr(self, name):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr(self, class_type, name):
if name == "thisown":
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
raise AttributeError(name)
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except:
strthis = ""
return "<%s.%s; %s >" % (
self.__class__.__module__,
self.__class__.__name__,
strthis,
)
import types
try:
_object = object
_newclass = 1
except AttributeError:
class _object:
pass
_newclass = 0
del types
_MIDIGetNumberOfDevices = _sequencer_osx._MIDIGetNumberOfDevices
_MIDIClientCreate = _sequencer_osx._MIDIClientCreate
_MIDIClientDispose = _sequencer_osx._MIDIClientDispose
_MIDISourceCreate = _sequencer_osx._MIDISourceCreate
_MIDIOutputPortCreate = _sequencer_osx._MIDIOutputPortCreate
_MIDIPortConnectSource = _sequencer_osx._MIDIPortConnectSource
| src/sequencer_osx/sequencer_osx.py | 2,030 | This file was automatically generated by SWIG (http://www.swig.org). Version 1.3.31 Don't modify this file, modify the SWIG interface instead. This file is compatible with both classic and new-style classes. Python < 2.2 doesn't have 'property'. | 245 | en | 0.90428 |
import argparse
import binascii
import os
from enum import Enum
from stor.plotters.bladebit import get_bladebit_install_info, plot_bladebit
from stor.plotters.chiapos import get_chiapos_install_info, plot_stor
from stor.plotters.madmax import get_madmax_install_info, plot_madmax
from stor.plotters.install_plotter import install_plotter
from pathlib import Path
from typing import Any, Dict, Optional
class Options(Enum):
TMP_DIR = 1
TMP_DIR2 = 2
FINAL_DIR = 3
K = 4
MEMO = 5
ID = 6
BUFF = 7
NUM_BUCKETS = 8
STRIPE_SIZE = 9
NUM_THREADS = 10
NOBITFIELD = 11
PLOT_COUNT = 12
MADMAX_NUM_BUCKETS_PHRASE3 = 13
MADMAX_WAITFORCOPY = 14
POOLKEY = 15
FARMERKEY = 16
MADMAX_TMPTOGGLE = 17
POOLCONTRACT = 18
MADMAX_RMULTI2 = 19
BLADEBIT_WARMSTART = 20
BLADEBIT_NONUMA = 21
VERBOSE = 22
OVERRIDE_K = 23
ALT_FINGERPRINT = 24
EXCLUDE_FINAL_DIR = 25
CONNECT_TO_DAEMON = 26
stor_plotter = [
Options.TMP_DIR,
Options.TMP_DIR2,
Options.FINAL_DIR,
Options.K,
Options.MEMO,
Options.ID,
Options.BUFF,
Options.NUM_BUCKETS,
Options.STRIPE_SIZE,
Options.NUM_THREADS,
Options.NOBITFIELD,
Options.OVERRIDE_K,
Options.ALT_FINGERPRINT,
Options.POOLCONTRACT,
Options.FARMERKEY,
Options.POOLKEY,
Options.PLOT_COUNT,
Options.EXCLUDE_FINAL_DIR,
Options.CONNECT_TO_DAEMON,
]
madmax_plotter = [
Options.K,
Options.PLOT_COUNT,
Options.NUM_THREADS,
Options.NUM_BUCKETS,
Options.MADMAX_NUM_BUCKETS_PHRASE3,
Options.TMP_DIR,
Options.TMP_DIR2,
Options.FINAL_DIR,
Options.MADMAX_WAITFORCOPY,
Options.POOLKEY,
Options.FARMERKEY,
Options.POOLCONTRACT,
Options.MADMAX_TMPTOGGLE,
Options.MADMAX_RMULTI2,
Options.CONNECT_TO_DAEMON,
]
bladebit_plotter = [
Options.NUM_THREADS,
Options.PLOT_COUNT,
Options.FARMERKEY,
Options.POOLKEY,
Options.POOLCONTRACT,
Options.ID,
Options.BLADEBIT_WARMSTART,
Options.BLADEBIT_NONUMA,
Options.FINAL_DIR,
Options.VERBOSE,
Options.CONNECT_TO_DAEMON,
]
def get_plotters_root_path(root_path: Path) -> Path:
return root_path / "plotters"
def build_parser(subparsers, root_path, option_list, name, plotter_desc):
parser = subparsers.add_parser(name, description=plotter_desc)
for option in option_list:
if option is Options.K:
parser.add_argument(
"-k",
"--size",
type=int,
help="K value.",
default=32,
)
u_default = 0 if name == "chiapos" else 256
if option is Options.NUM_BUCKETS:
parser.add_argument(
"-u",
"--buckets",
type=int,
help="Number of buckets.",
default=u_default,
)
if option is Options.STRIPE_SIZE:
parser.add_argument(
"-s",
"--stripes",
type=int,
help="Stripe size.",
default=0,
)
if option is Options.TMP_DIR:
parser.add_argument(
"-t",
"--tmp_dir",
type=str,
dest="tmpdir",
help="Temporary directory 1.",
default=str(root_path) + "/",
)
if option is Options.TMP_DIR2:
parser.add_argument(
"-2",
"--tmp_dir2",
type=str,
dest="tmpdir2",
help="Temporary directory 2.",
default=str(root_path) + "/",
)
if option is Options.FINAL_DIR:
parser.add_argument(
"-d",
"--final_dir",
type=str,
dest="finaldir",
help="Final directory.",
default=str(root_path) + "/",
)
if option is Options.BUFF:
parser.add_argument(
"-b",
"--buffer",
type=int,
help="Size of the buffer, in MB.",
default=0,
)
r_default = 4 if name == "madmax" else 0
if option is Options.NUM_THREADS:
parser.add_argument(
"-r",
"--threads",
type=int,
help="Num threads.",
default=r_default,
)
if option is Options.NOBITFIELD:
parser.add_argument(
"-e",
"--nobitfield",
action="store_true",
help="Disable bitfield.",
default=False,
)
if option is Options.MEMO:
parser.add_argument(
"-m",
"--memo",
type=binascii.unhexlify,
help="Memo variable.",
)
if option is Options.ID:
parser.add_argument(
"-i",
"--id",
type=binascii.unhexlify,
help="Plot id",
)
if option is Options.PLOT_COUNT:
parser.add_argument(
"-n",
"--count",
type=int,
help="Number of plots to create (default = 1)",
default=1,
)
if option is Options.MADMAX_NUM_BUCKETS_PHRASE3:
parser.add_argument(
"-v",
"--buckets3",
type=int,
help="Number of buckets for phase 3+4 (default = 256)",
default=256,
)
if option is Options.MADMAX_WAITFORCOPY:
parser.add_argument(
"-w",
"--waitforcopy",
action="store_true",
help="Wait for copy to start next plot",
default=False,
)
if option is Options.MADMAX_TMPTOGGLE:
parser.add_argument(
"-G",
"--tmptoggle",
action="store_true",
help="Alternate tmpdir/tmpdir2 (default = false)",
default=False,
)
if option is Options.POOLCONTRACT:
parser.add_argument(
"-c",
"--contract",
type=str,
help="Pool Contract Address (64 chars)",
default="",
)
if option is Options.MADMAX_RMULTI2:
parser.add_argument(
"-K",
"--rmulti2",
type=int,
help="Thread multiplier for P2 (default = 1)",
default=1,
)
if option is Options.POOLKEY:
parser.add_argument(
"-p",
"--pool-key",
type=binascii.unhexlify,
help="Pool Public Key (48 bytes)",
default="",
)
if option is Options.FARMERKEY:
parser.add_argument(
"-f",
"--farmerkey",
type=binascii.unhexlify,
help="Farmer Public Key (48 bytes)",
default="",
)
if option is Options.BLADEBIT_WARMSTART:
parser.add_argument(
"-w",
"--warmstart",
action="store_true",
help="Warm start",
default=False,
)
if option is Options.BLADEBIT_NONUMA:
parser.add_argument(
"-m",
"--nonuma",
action="store_true",
help="Disable numa",
default=False,
)
if option is Options.VERBOSE:
parser.add_argument(
"-v",
"--verbose",
action="store_true",
help="Set verbose",
default=False,
)
if option is Options.OVERRIDE_K:
parser.add_argument(
"--override-k",
dest="override",
action="store_true",
help="Force size smaller than 32",
default=False,
)
if option is Options.ALT_FINGERPRINT:
parser.add_argument(
"-a",
"--alt_fingerprint",
type=int,
default=None,
help="Enter the alternative fingerprint of the key you want to use",
)
if option is Options.EXCLUDE_FINAL_DIR:
parser.add_argument(
"-x",
"--exclude_final_dir",
action="store_true",
help="Skips adding [final dir] to harvester for farming",
default=False,
)
if option is Options.CONNECT_TO_DAEMON:
parser.add_argument(
"-D",
"--connect-to-daemon",
action="store_true",
help=argparse.SUPPRESS,
default=False,
)
def call_plotters(root_path: Path, args):
# Add `plotters` section in STOR_ROOT.
stor_root_path = root_path
root_path = get_plotters_root_path(root_path)
if not root_path.is_dir():
if os.path.exists(root_path):
try:
os.remove(root_path)
except Exception as e:
print(f"Exception deleting old root path: {type(e)} {e}.")
if not os.path.exists(root_path):
print(f"Creating plotters folder within STOR_ROOT: {root_path}")
try:
os.mkdir(root_path)
except Exception as e:
print(f"Cannot create plotters root path {root_path} {type(e)} {e}.")
plotters = argparse.ArgumentParser(description="Available options.")
subparsers = plotters.add_subparsers(help="Available options", dest="plotter")
build_parser(subparsers, root_path, stor_plotter, "chiapos", "Storpos Plotter")
build_parser(subparsers, root_path, madmax_plotter, "madmax", "Madmax Plotter")
build_parser(subparsers, root_path, bladebit_plotter, "bladebit", "Bladebit Plotter")
install_parser = subparsers.add_parser("install", description="Install custom plotters.")
install_parser.add_argument(
"install_plotter", type=str, help="The plotters available for installing. Choose from madmax or bladebit."
)
args = plotters.parse_args(args)
if args.plotter == "chiapos":
plot_stor(args, stor_root_path)
if args.plotter == "madmax":
plot_madmax(args, stor_root_path, root_path)
if args.plotter == "bladebit":
plot_bladebit(args, stor_root_path, root_path)
if args.plotter == "install":
install_plotter(args.install_plotter, root_path)
def get_available_plotters(root_path) -> Dict[str, Any]:
plotters_root_path: Path = get_plotters_root_path(root_path)
plotters: Dict[str, Any] = {}
chiapos: Optional[Dict[str, Any]] = get_chiapos_install_info()
bladebit: Optional[Dict[str, Any]] = get_bladebit_install_info(plotters_root_path)
madmax: Optional[Dict[str, Any]] = get_madmax_install_info(plotters_root_path)
if chiapos is not None:
plotters["chiapos"] = chiapos
if bladebit is not None:
plotters["bladebit"] = bladebit
if madmax is not None:
plotters["madmax"] = madmax
return plotters
| stor/plotters/plotters.py | 11,498 | Add `plotters` section in STOR_ROOT. | 36 | en | 0.273164 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
readme = open('README.rst').read()
requirements = [
'tweepy>=2.1',
'pymongo>=2.8.0',
'tendo>=0.0.18',
'boto>=0.0.1',
'nltk>=0.0.1',
'zc.lockfile>=0.0.1',
'flask>=0.0.1',
'flask-bootstrap>=0.0.1'
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='chattersum',
version='0.1.0',
description='test',
author='Shane Eller',
author_email='shane.eller@gmail.com',
url='https://github.com/ellerrs/chattersum',
packages=[
'chattersum',
],
package_dir={'chattersum':
'chattersum'},
include_package_data=True,
install_requires=requirements,
license="BSD",
zip_safe=False,
keywords='chattersum',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
test_suite='tests'
)
| setup.py | 1,398 | !/usr/bin/env python -*- coding: utf-8 -*- TODO: put package test requirements here | 83 | en | 0.515467 |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.20.7
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class IoXK8sClusterV1alpha4MachineSpecBootstrap(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'config_ref': 'IoXK8sClusterV1alpha4MachineSpecBootstrapConfigRef',
'data_secret_name': 'str'
}
attribute_map = {
'config_ref': 'configRef',
'data_secret_name': 'dataSecretName'
}
def __init__(self, config_ref=None, data_secret_name=None, local_vars_configuration=None): # noqa: E501
"""IoXK8sClusterV1alpha4MachineSpecBootstrap - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._config_ref = None
self._data_secret_name = None
self.discriminator = None
if config_ref is not None:
self.config_ref = config_ref
if data_secret_name is not None:
self.data_secret_name = data_secret_name
@property
def config_ref(self):
"""Gets the config_ref of this IoXK8sClusterV1alpha4MachineSpecBootstrap. # noqa: E501
:return: The config_ref of this IoXK8sClusterV1alpha4MachineSpecBootstrap. # noqa: E501
:rtype: IoXK8sClusterV1alpha4MachineSpecBootstrapConfigRef
"""
return self._config_ref
@config_ref.setter
def config_ref(self, config_ref):
"""Sets the config_ref of this IoXK8sClusterV1alpha4MachineSpecBootstrap.
:param config_ref: The config_ref of this IoXK8sClusterV1alpha4MachineSpecBootstrap. # noqa: E501
:type: IoXK8sClusterV1alpha4MachineSpecBootstrapConfigRef
"""
self._config_ref = config_ref
@property
def data_secret_name(self):
"""Gets the data_secret_name of this IoXK8sClusterV1alpha4MachineSpecBootstrap. # noqa: E501
DataSecretName is the name of the secret that stores the bootstrap data script. If nil, the Machine should remain in the Pending state. # noqa: E501
:return: The data_secret_name of this IoXK8sClusterV1alpha4MachineSpecBootstrap. # noqa: E501
:rtype: str
"""
return self._data_secret_name
@data_secret_name.setter
def data_secret_name(self, data_secret_name):
"""Sets the data_secret_name of this IoXK8sClusterV1alpha4MachineSpecBootstrap.
DataSecretName is the name of the secret that stores the bootstrap data script. If nil, the Machine should remain in the Pending state. # noqa: E501
:param data_secret_name: The data_secret_name of this IoXK8sClusterV1alpha4MachineSpecBootstrap. # noqa: E501
:type: str
"""
self._data_secret_name = data_secret_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, IoXK8sClusterV1alpha4MachineSpecBootstrap):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, IoXK8sClusterV1alpha4MachineSpecBootstrap):
return True
return self.to_dict() != other.to_dict()
| kubernetes/client/models/io_xk8s_cluster_v1alpha4_machine_spec_bootstrap.py | 5,002 | NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Returns true if both objects are equal
IoXK8sClusterV1alpha4MachineSpecBootstrap - a model defined in OpenAPI
Returns true if both objects are not equal
For `print` and `pprint`
Gets the config_ref of this IoXK8sClusterV1alpha4MachineSpecBootstrap. # noqa: E501
:return: The config_ref of this IoXK8sClusterV1alpha4MachineSpecBootstrap. # noqa: E501
:rtype: IoXK8sClusterV1alpha4MachineSpecBootstrapConfigRef
Sets the config_ref of this IoXK8sClusterV1alpha4MachineSpecBootstrap.
:param config_ref: The config_ref of this IoXK8sClusterV1alpha4MachineSpecBootstrap. # noqa: E501
:type: IoXK8sClusterV1alpha4MachineSpecBootstrapConfigRef
Gets the data_secret_name of this IoXK8sClusterV1alpha4MachineSpecBootstrap. # noqa: E501
DataSecretName is the name of the secret that stores the bootstrap data script. If nil, the Machine should remain in the Pending state. # noqa: E501
:return: The data_secret_name of this IoXK8sClusterV1alpha4MachineSpecBootstrap. # noqa: E501
:rtype: str
Sets the data_secret_name of this IoXK8sClusterV1alpha4MachineSpecBootstrap.
DataSecretName is the name of the secret that stores the bootstrap data script. If nil, the Machine should remain in the Pending state. # noqa: E501
:param data_secret_name: The data_secret_name of this IoXK8sClusterV1alpha4MachineSpecBootstrap. # noqa: E501
:type: str
Returns the model properties as a dict
Returns the string representation of the model
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.20.7
Generated by: https://openapi-generator.tech
coding: utf-8 noqa: F401 noqa: E501 noqa: E501 | 1,828 | en | 0.478514 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from __future__ import division
from __future__ import print_function
import os
import abc
import six
import time
import queue
import bisect
import logging
import importlib
import traceback
import numpy as np
import pandas as pd
from multiprocessing import Pool
from .cache import H
from ..config import C
from .ops import *
from ..log import get_module_logger
from ..utils import parse_field, read_bin, hash_args, normalize_cache_fields
from .base import Feature
from .cache import DiskDatasetCache, DiskExpressionCache
@six.add_metaclass(abc.ABCMeta)
class CalendarProvider(object):
"""Calendar provider base class
Provide calendar data.
"""
@abc.abstractmethod
def calendar(self, start_time=None, end_time=None, freq="day", future=False):
"""Get calendar of certain market in given time range.
Parameters
----------
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency, available: year/quarter/month/week/day
future : bool
whether including future trading day
Returns
----------
list
calendar list
"""
raise NotImplementedError("Subclass of CalendarProvider must implement `calendar` method")
def locate_index(self, start_time, end_time, freq, future):
"""Locate the start time index and end time index in a calendar under certain frequency.
Parameters
----------
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency, available: year/quarter/month/week/day
future : bool
whether including future trading day
Returns
-------
pd.Timestamp
the real start time
pd.Timestamp
the real end time
int
the index of start time
int
the index of end time
"""
start_time = pd.Timestamp(start_time)
end_time = pd.Timestamp(end_time)
calendar, calendar_index = self._get_calendar(freq=freq, future=future)
if start_time not in calendar_index:
try:
start_time = calendar[bisect.bisect_left(calendar, start_time)]
except IndexError:
raise IndexError(
"`start_time` uses a future date, if you want to get future trading days, you can use: `future=True`"
)
start_index = calendar_index[start_time]
if end_time not in calendar_index:
end_time = calendar[bisect.bisect_right(calendar, end_time) - 1]
end_index = calendar_index[end_time]
return start_time, end_time, start_index, end_index
def _get_calendar(self, freq, future):
"""Load calendar using memcache.
Parameters
----------
freq : str
frequency of read calendar file
future : bool
whether including future trading day
Returns
-------
list
list of timestamps
dict
dict composed by timestamp as key and index as value for fast search
"""
flag = f"{freq}_future_{future}"
if flag in H["c"]:
_calendar, _calendar_index = H["c"][flag]
else:
_calendar = np.array(self._load_calendar(freq, future))
_calendar_index = {x: i for i, x in enumerate(_calendar)} # for fast search
H["c"][flag] = _calendar, _calendar_index
return _calendar, _calendar_index
def _uri(self, start_time, end_time, freq, future=False):
"""Get the uri of calendar generation task."""
return hash_args(start_time, end_time, freq, future)
@six.add_metaclass(abc.ABCMeta)
class InstrumentProvider(object):
"""Instrument provider base class
Provide instrument data.
"""
@staticmethod
def instruments(market="all", filter_pipe=None):
"""Get the general config dictionary for a base market adding several dynamic filters.
Parameters
----------
market : str
market/industry/index shortname, e.g. all/sse/szse/sse50/csi300/csi500
filter_pipe : list
the list of dynamic filters
Returns
----------
dict
dict of stockpool config
{`market`=>base market name, `filter_pipe`=>list of filters}
example :
{'market': 'csi500',
'filter_pipe': [{'filter_type': 'ExpressionDFilter',
'rule_expression': '$open<40',
'filter_start_time': None,
'filter_end_time': None,
'keep': False},
{'filter_type': 'NameDFilter',
'name_rule_re': 'SH[0-9]{4}55',
'filter_start_time': None,
'filter_end_time': None}]}
"""
if filter_pipe is None:
filter_pipe = []
config = {"market": market, "filter_pipe": []}
# the order of the filters will affect the result, so we need to keep
# the order
for filter_t in filter_pipe:
config["filter_pipe"].append(filter_t.to_config())
return config
@abc.abstractmethod
def list_instruments(self, instruments, start_time=None, end_time=None, freq="day", as_list=False):
"""List the instruments based on a certain stockpool config.
Parameters
----------
instruments : dict
stockpool config
start_time : str
start of the time range
end_time : str
end of the time range
as_list : bool
return instruments as list or dict
Returns
-------
dict or list
instruments list or dictionary with time spans
"""
raise NotImplementedError("Subclass of InstrumentProvider must implement `list_instruments` method")
def _uri(self, instruments, start_time=None, end_time=None, freq="day", as_list=False):
return hash_args(instruments, start_time, end_time, freq, as_list)
# instruments type
LIST = "LIST"
DICT = "DICT"
CONF = "CONF"
@classmethod
def get_inst_type(cls, inst):
if "market" in inst:
return cls.CONF
if isinstance(inst, dict):
return cls.DICT
if isinstance(inst, (list, tuple, pd.Index, np.ndarray)):
return cls.LIST
raise ValueError(f"Unknown instrument type {inst}")
@six.add_metaclass(abc.ABCMeta)
class FeatureProvider(object):
"""Feature provider class
Provide feature data.
"""
@abc.abstractmethod
def feature(self, instrument, field, start_time, end_time, freq):
"""Get feature data.
Parameters
----------
instrument : str
a certain instrument
field : str
a certain field of feature
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency, available: year/quarter/month/week/day
Returns
-------
pd.Series
data of a certain feature
"""
raise NotImplementedError("Subclass of FeatureProvider must implement `feature` method")
@six.add_metaclass(abc.ABCMeta)
class ExpressionProvider(object):
"""Expression provider class
Provide Expression data.
"""
def __init__(self):
self.expression_instance_cache = {}
def get_expression_instance(self, field):
try:
if field in self.expression_instance_cache:
expression = self.expression_instance_cache[field]
else:
expression = eval(parse_field(field))
self.expression_instance_cache[field] = expression
except NameError as e:
get_module_logger("data").exception(
"ERROR: field [%s] contains invalid operator/variable [%s]" % (str(field), str(e).split()[1])
)
raise
except SyntaxError:
get_module_logger("data").exception("ERROR: field [%s] contains invalid syntax" % str(field))
raise
return expression
@abc.abstractmethod
def expression(self, instrument, field, start_time=None, end_time=None, freq="day"):
"""Get Expression data.
Parameters
----------
instrument : str
a certain instrument
field : str
a certain field of feature
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency, available: year/quarter/month/week/day
Returns
-------
pd.Series
data of a certain expression
"""
raise NotImplementedError("Subclass of ExpressionProvider must implement `Expression` method")
@six.add_metaclass(abc.ABCMeta)
class DatasetProvider(object):
"""Dataset provider class
Provide Dataset data.
"""
@abc.abstractmethod
def dataset(self, instruments, fields, start_time=None, end_time=None, freq="day"):
"""Get dataset data.
Parameters
----------
instruments : list or dict
list/dict of instruments or dict of stockpool config
fields : list
list of feature instances
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency
Returns
----------
pd.DataFrame
a pandas dataframe with <instrument, datetime> index
"""
raise NotImplementedError("Subclass of DatasetProvider must implement `Dataset` method")
def _uri(
self,
instruments,
fields,
start_time=None,
end_time=None,
freq="day",
disk_cache=1,
**kwargs,
):
"""Get task uri, used when generating rabbitmq task in qlib_server
Parameters
----------
instruments : list or dict
list/dict of instruments or dict of stockpool config
fields : list
list of feature instances
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency
disk_cache : int
whether to skip(0)/use(1)/replace(2) disk_cache
"""
return DiskDatasetCache._uri(instruments, fields, start_time, end_time, freq, disk_cache)
@staticmethod
def get_instruments_d(instruments, freq):
"""
Parse different types of input instruments to output instruments_d
Wrong format of input instruments will lead to exception.
"""
if isinstance(instruments, dict):
if "market" in instruments:
# dict of stockpool config
instruments_d = Inst.list_instruments(instruments=instruments, freq=freq, as_list=False)
else:
# dict of instruments and timestamp
instruments_d = instruments
elif isinstance(instruments, (list, tuple, pd.Index, np.ndarray)):
# list or tuple of a group of instruments
instruments_d = list(instruments)
else:
raise ValueError("Unsupported input type for param `instrument`")
return instruments_d
@staticmethod
def get_column_names(fields):
"""
Get column names from input fields
"""
if len(fields) == 0:
raise ValueError("fields cannot be empty")
fields = fields.copy()
column_names = [str(f) for f in fields]
return column_names
@staticmethod
def parse_fields(fields):
# parse and check the input fields
return [ExpressionD.get_expression_instance(f) for f in fields]
@staticmethod
def dataset_processor(instruments_d, column_names, start_time, end_time, freq):
"""
Load and process the data, return the data set.
- default using multi-kernel method.
"""
normalize_column_names = normalize_cache_fields(column_names)
data = dict()
# One process for one task, so that the memory will be freed quicker.
if C.maxtasksperchild is None:
p = Pool(processes=C.kernels)
else:
p = Pool(processes=C.kernels, maxtasksperchild=C.maxtasksperchild)
if isinstance(instruments_d, dict):
for inst, spans in instruments_d.items():
data[inst] = p.apply_async(
DatasetProvider.expression_calculator,
args=(
inst,
start_time,
end_time,
freq,
normalize_column_names,
spans,
C,
),
)
else:
for inst in instruments_d:
data[inst] = p.apply_async(
DatasetProvider.expression_calculator,
args=(
inst,
start_time,
end_time,
freq,
normalize_column_names,
None,
C,
),
)
p.close()
p.join()
new_data = dict()
for inst in sorted(data.keys()):
if len(data[inst].get()) > 0:
# NOTE: Python version >= 3.6; in versions after python3.6, dict will always guarantee the insertion order
new_data[inst] = data[inst].get()
if len(new_data) > 0:
data = pd.concat(new_data, names=["instrument"], sort=False)
data = DiskDatasetCache.cache_to_origin_data(data, column_names)
else:
data = pd.DataFrame(columns=column_names)
return data
@staticmethod
def expression_calculator(inst, start_time, end_time, freq, column_names, spans=None, C=None):
"""
Calculate the expressions for one instrument, return a df result.
If the expression has been calculated before, load from cache.
return value: A data frame with index 'datetime' and other data columns.
"""
# NOTE: This place is compatible with windows, windows multi-process is spawn
if getattr(ExpressionD, "_provider", None) is None:
register_all_wrappers()
obj = dict()
for field in column_names:
# The client does not have expression provider, the data will be loaded from cache using static method.
obj[field] = ExpressionD.expression(inst, field, start_time, end_time, freq)
data = pd.DataFrame(obj)
_calendar = Cal.calendar(freq=freq)
data.index = _calendar[data.index.values.astype(np.int)]
data.index.names = ["datetime"]
if spans is None:
return data
else:
mask = np.zeros(len(data), dtype=np.bool)
for begin, end in spans:
mask |= (data.index >= begin) & (data.index <= end)
return data[mask]
class LocalCalendarProvider(CalendarProvider):
"""Local calendar data provider class
Provide calendar data from local data source.
"""
def __init__(self, **kwargs):
self.remote = kwargs.get("remote", False)
@property
def _uri_cal(self):
"""Calendar file uri."""
if self.remote:
return os.path.join(C.mount_path, "calendars", "{}.txt")
else:
return os.path.join(C.provider_uri, "calendars", "{}.txt")
def _load_calendar(self, freq, future):
"""Load original calendar timestamp from file.
Parameters
----------
freq : str
frequency of read calendar file
Returns
----------
list
list of timestamps
"""
if future:
fname = self._uri_cal.format(freq + "_future")
# if future calendar not exists, return current calendar
if not os.path.exists(fname):
get_module_logger("data").warning(f"{freq}_future.txt not exists, return current calendar!")
fname = self._uri_cal.format(freq)
else:
fname = self._uri_cal.format(freq)
if not os.path.exists(fname):
raise ValueError("calendar not exists for freq " + freq)
with open(fname) as f:
return [pd.Timestamp(x.strip()) for x in f]
def calendar(self, start_time=None, end_time=None, freq="day", future=False):
_calendar, _calendar_index = self._get_calendar(freq, future)
if start_time == "None":
start_time = None
if end_time == "None":
end_time = None
# strip
if start_time:
start_time = pd.Timestamp(start_time)
if start_time > _calendar[-1]:
return np.array([])
else:
start_time = _calendar[0]
if end_time:
end_time = pd.Timestamp(end_time)
if end_time < _calendar[0]:
return np.array([])
else:
end_time = _calendar[-1]
_, _, si, ei = self.locate_index(start_time, end_time, freq, future)
return _calendar[si : ei + 1]
class LocalInstrumentProvider(InstrumentProvider):
"""Local instrument data provider class
Provide instrument data from local data source.
"""
def __init__(self):
pass
@property
def _uri_inst(self):
"""Instrument file uri."""
return os.path.join(C.provider_uri, "instruments", "{}.txt")
def _load_instruments(self, market):
fname = self._uri_inst.format(market)
if not os.path.exists(fname):
raise ValueError("instruments not exists for market " + market)
_instruments = dict()
with open(fname) as f:
for line in f:
inst_time = line.strip().split()
inst = inst_time[0]
if len(inst_time) == 3:
# `day`
begin = inst_time[1]
end = inst_time[2]
elif len(inst_time) == 5:
# `1min`
begin = inst_time[1] + " " + inst_time[2]
end = inst_time[3] + " " + inst_time[4]
_instruments.setdefault(inst, []).append((pd.Timestamp(begin), pd.Timestamp(end)))
return _instruments
def list_instruments(self, instruments, start_time=None, end_time=None, freq="day", as_list=False):
market = instruments["market"]
if market in H["i"]:
_instruments = H["i"][market]
else:
_instruments = self._load_instruments(market)
H["i"][market] = _instruments
# strip
# use calendar boundary
cal = Cal.calendar(freq=freq)
start_time = pd.Timestamp(start_time or cal[0])
end_time = pd.Timestamp(end_time or cal[-1])
_instruments_filtered = {
inst: list(
filter(
lambda x: x[0] <= x[1],
[(max(start_time, x[0]), min(end_time, x[1])) for x in spans],
)
)
for inst, spans in _instruments.items()
}
_instruments_filtered = {key: value for key, value in _instruments_filtered.items() if value}
# filter
filter_pipe = instruments["filter_pipe"]
for filter_config in filter_pipe:
from . import filter as F
filter_t = getattr(F, filter_config["filter_type"]).from_config(filter_config)
_instruments_filtered = filter_t(_instruments_filtered, start_time, end_time, freq)
# as list
if as_list:
return list(_instruments_filtered)
return _instruments_filtered
class LocalFeatureProvider(FeatureProvider):
"""Local feature data provider class
Provide feature data from local data source.
"""
def __init__(self, **kwargs):
self.remote = kwargs.get("remote", False)
@property
def _uri_data(self):
"""Static feature file uri."""
if self.remote:
return os.path.join(C.mount_path, "features", "{}", "{}.{}.bin")
else:
return os.path.join(C.provider_uri, "features", "{}", "{}.{}.bin")
def feature(self, instrument, field, start_index, end_index, freq):
# validate
field = str(field).lower()[1:]
uri_data = self._uri_data.format(instrument.lower(), field, freq)
if not os.path.exists(uri_data):
get_module_logger("data").warning("WARN: data not found for %s.%s" % (instrument, field))
return pd.Series()
# raise ValueError('uri_data not found: ' + uri_data)
# load
series = read_bin(uri_data, start_index, end_index)
return series
class LocalExpressionProvider(ExpressionProvider):
"""Local expression data provider class
Provide expression data from local data source.
"""
def __init__(self):
super().__init__()
def expression(self, instrument, field, start_time=None, end_time=None, freq="day"):
expression = self.get_expression_instance(field)
start_time = pd.Timestamp(start_time)
end_time = pd.Timestamp(end_time)
_, _, start_index, end_index = Cal.locate_index(start_time, end_time, freq, future=False)
lft_etd, rght_etd = expression.get_extended_window_size()
series = expression.load(instrument, max(0, start_index - lft_etd), end_index + rght_etd, freq)
# Ensure that each column type is consistent
# FIXME: The stock data is currently float. If there is other types of data, this part needs to be re-implemented.
try:
series = series.astype(float)
except ValueError:
pass
if not series.empty:
series = series.loc[start_index:end_index]
return series
class LocalDatasetProvider(DatasetProvider):
"""Local dataset data provider class
Provide dataset data from local data source.
"""
def __init__(self):
pass
def dataset(self, instruments, fields, start_time=None, end_time=None, freq="day"):
instruments_d = self.get_instruments_d(instruments, freq)
column_names = self.get_column_names(fields)
cal = Cal.calendar(start_time, end_time, freq)
if len(cal) == 0:
return pd.DataFrame(columns=column_names)
start_time = cal[0]
end_time = cal[-1]
data = self.dataset_processor(instruments_d, column_names, start_time, end_time, freq)
return data
@staticmethod
def multi_cache_walker(instruments, fields, start_time=None, end_time=None, freq="day"):
"""
This method is used to prepare the expression cache for the client.
Then the client will load the data from expression cache by itself.
"""
instruments_d = DatasetProvider.get_instruments_d(instruments, freq)
column_names = DatasetProvider.get_column_names(fields)
cal = Cal.calendar(start_time, end_time, freq)
if len(cal) == 0:
return
start_time = cal[0]
end_time = cal[-1]
if C.maxtasksperchild is None:
p = Pool(processes=C.kernels)
else:
p = Pool(processes=C.kernels, maxtasksperchild=C.maxtasksperchild)
for inst in instruments_d:
p.apply_async(
LocalDatasetProvider.cache_walker,
args=(
inst,
start_time,
end_time,
freq,
column_names,
),
)
p.close()
p.join()
@staticmethod
def cache_walker(inst, start_time, end_time, freq, column_names):
"""
If the expressions of one instrument haven't been calculated before,
calculate it and write it into expression cache.
"""
for field in column_names:
ExpressionD.expression(inst, field, start_time, end_time, freq)
class ClientCalendarProvider(CalendarProvider):
"""Client calendar data provider class
Provide calendar data by requesting data from server as a client.
"""
def __init__(self):
self.conn = None
self.queue = queue.Queue()
def set_conn(self, conn):
self.conn = conn
def calendar(self, start_time=None, end_time=None, freq="day", future=False):
self.conn.send_request(
request_type="calendar",
request_content={
"start_time": str(start_time),
"end_time": str(end_time),
"freq": freq,
"future": future,
},
msg_queue=self.queue,
msg_proc_func=lambda response_content: [pd.Timestamp(c) for c in response_content],
)
result = self.queue.get(timeout=C["timeout"])
return result
class ClientInstrumentProvider(InstrumentProvider):
"""Client instrument data provider class
Provide instrument data by requesting data from server as a client.
"""
def __init__(self):
self.conn = None
self.queue = queue.Queue()
def set_conn(self, conn):
self.conn = conn
def list_instruments(self, instruments, start_time=None, end_time=None, freq="day", as_list=False):
def inst_msg_proc_func(response_content):
if isinstance(response_content, dict):
instrument = {
i: [(pd.Timestamp(s), pd.Timestamp(e)) for s, e in t] for i, t in response_content.items()
}
else:
instrument = response_content
return instrument
self.conn.send_request(
request_type="instrument",
request_content={
"instruments": instruments,
"start_time": str(start_time),
"end_time": str(end_time),
"freq": freq,
"as_list": as_list,
},
msg_queue=self.queue,
msg_proc_func=inst_msg_proc_func,
)
result = self.queue.get(timeout=C["timeout"])
if isinstance(result, Exception):
raise result
get_module_logger("data").debug("get result")
return result
class ClientDatasetProvider(DatasetProvider):
"""Client dataset data provider class
Provide dataset data by requesting data from server as a client.
"""
def __init__(self):
self.conn = None
def set_conn(self, conn):
self.conn = conn
self.queue = queue.Queue()
def dataset(
self,
instruments,
fields,
start_time=None,
end_time=None,
freq="day",
disk_cache=0,
return_uri=False,
):
if Inst.get_inst_type(instruments) == Inst.DICT:
get_module_logger("data").warning(
"Getting features from a dict of instruments is not recommended because the features will not be "
"cached! "
"The dict of instruments will be cleaned every day."
)
if disk_cache == 0:
"""
Call the server to generate the expression cache.
Then load the data from the expression cache directly.
- default using multi-kernel method.
"""
self.conn.send_request(
request_type="feature",
request_content={
"instruments": instruments,
"fields": fields,
"start_time": start_time,
"end_time": end_time,
"freq": freq,
"disk_cache": 0,
},
msg_queue=self.queue,
)
feature_uri = self.queue.get(timeout=C["timeout"])
if isinstance(feature_uri, Exception):
raise feature_uri
else:
instruments_d = self.get_instruments_d(instruments, freq)
column_names = self.get_column_names(fields)
cal = Cal.calendar(start_time, end_time, freq)
if len(cal) == 0:
return pd.DataFrame(columns=column_names)
start_time = cal[0]
end_time = cal[-1]
data = self.dataset_processor(instruments_d, column_names, start_time, end_time, freq)
if return_uri:
return data, feature_uri
else:
return data
else:
"""
Call the server to generate the data-set cache, get the uri of the cache file.
Then load the data from the file on NFS directly.
- using single-process implementation.
"""
self.conn.send_request(
request_type="feature",
request_content={
"instruments": instruments,
"fields": fields,
"start_time": start_time,
"end_time": end_time,
"freq": freq,
"disk_cache": 1,
},
msg_queue=self.queue,
)
# - Done in callback
feature_uri = self.queue.get(timeout=C["timeout"])
if isinstance(feature_uri, Exception):
raise feature_uri
get_module_logger("data").debug("get result")
try:
# pre-mound nfs, used for demo
mnt_feature_uri = os.path.join(C.mount_path, C.dataset_cache_dir_name, feature_uri)
df = DiskDatasetCache.read_data_from_cache(mnt_feature_uri, start_time, end_time, fields)
get_module_logger("data").debug("finish slicing data")
if return_uri:
return df, feature_uri
return df
except AttributeError:
raise IOError("Unable to fetch instruments from remote server!")
class BaseProvider:
"""Local provider class
To keep compatible with old qlib provider.
"""
def calendar(self, start_time=None, end_time=None, freq="day", future=False):
return Cal.calendar(start_time, end_time, freq, future=future)
def instruments(self, market="all", filter_pipe=None, start_time=None, end_time=None):
if start_time is not None or end_time is not None:
get_module_logger("Provider").warning(
"The instruments corresponds to a stock pool. "
"Parameters `start_time` and `end_time` does not take effect now."
)
return InstrumentProvider.instruments(market, filter_pipe)
def list_instruments(self, instruments, start_time=None, end_time=None, freq="day", as_list=False):
return Inst.list_instruments(instruments, start_time, end_time, freq, as_list)
def features(
self,
instruments,
fields,
start_time=None,
end_time=None,
freq="day",
disk_cache=None,
):
"""
disk_cache : int
whether to skip(0)/use(1)/replace(2) disk_cache
This function will try to use cache method which has a keyword `disk_cache`,
and will use provider method if a type error is raised because the DatasetD instance
is a provider class.
"""
disk_cache = C.default_disk_cache if disk_cache is None else disk_cache
if C.disable_disk_cache:
disk_cache = False
try:
return DatasetD.dataset(instruments, fields, start_time, end_time, freq, disk_cache)
except TypeError:
return DatasetD.dataset(instruments, fields, start_time, end_time, freq)
class LocalProvider(BaseProvider):
def _uri(self, type, **kwargs):
"""_uri
The server hope to get the uri of the request. The uri will be decided
by the dataprovider. For ex, different cache layer has different uri.
:param type: The type of resource for the uri
:param **kwargs:
"""
if type == "calendar":
return Cal._uri(**kwargs)
elif type == "instrument":
return Inst._uri(**kwargs)
elif type == "feature":
return DatasetD._uri(**kwargs)
def features_uri(self, instruments, fields, start_time, end_time, freq, disk_cache=1):
"""features_uri
Return the uri of the generated cache of features/dataset
:param disk_cache:
:param instruments:
:param fields:
:param start_time:
:param end_time:
:param freq:
"""
return DatasetD._dataset_uri(instruments, fields, start_time, end_time, freq, disk_cache)
class ClientProvider(BaseProvider):
"""Client Provider
Requesting data from server as a client. Can propose requests:
- Calendar : Directly respond a list of calendars
- Instruments (without filter): Directly respond a list/dict of instruments
- Instruments (with filters): Respond a list/dict of instruments
- Features : Respond a cache uri
The general workflow is described as follows:
When the user use client provider to propose a request, the client provider will connect the server and send the request. The client will start to wait for the response. The response will be made instantly indicating whether the cache is available. The waiting procedure will terminate only when the client get the reponse saying `feature_available` is true.
`BUG` : Everytime we make request for certain data we need to connect to the server, wait for the response and disconnect from it. We can't make a sequence of requests within one connection. You can refer to https://python-socketio.readthedocs.io/en/latest/client.html for documentation of python-socketIO client.
"""
def __init__(self):
from .client import Client
self.client = Client(C.flask_server, C.flask_port)
self.logger = get_module_logger(self.__class__.__name__)
if isinstance(Cal, ClientCalendarProvider):
Cal.set_conn(self.client)
Inst.set_conn(self.client)
if hasattr(DatasetD, "provider"):
DatasetD.provider.set_conn(self.client)
else:
DatasetD.set_conn(self.client)
class Wrapper(object):
"""Data Provider Wrapper"""
def __init__(self):
self._provider = None
def register(self, provider):
self._provider = provider
def __getattr__(self, key):
if self._provider is None:
raise AttributeError("Please run qlib.init() first using qlib")
return getattr(self._provider, key)
def get_cls_from_name(cls_name):
return getattr(importlib.import_module(".data", package="qlib"), cls_name)
def get_provider_obj(config, **params):
if isinstance(config, dict):
params.update(config["kwargs"])
config = config["class"]
return get_cls_from_name(config)(**params)
def register_wrapper(wrapper, cls_or_obj):
"""register_wrapper
:param wrapper: A wrapper of all kinds of providers
:param cls_or_obj: A class or class name or object instance in data/data.py
"""
if isinstance(cls_or_obj, str):
cls_or_obj = get_cls_from_name(cls_or_obj)
obj = cls_or_obj() if isinstance(cls_or_obj, type) else cls_or_obj
wrapper.register(obj)
Cal = Wrapper()
Inst = Wrapper()
FeatureD = Wrapper()
ExpressionD = Wrapper()
DatasetD = Wrapper()
D = Wrapper()
def register_all_wrappers():
"""register_all_wrappers"""
logger = get_module_logger("data")
_calendar_provider = get_provider_obj(C.calendar_provider)
if getattr(C, "calendar_cache", None) is not None:
_calendar_provider = get_provider_obj(C.calendar_cache, provider=_calendar_provider)
register_wrapper(Cal, _calendar_provider)
logger.debug(f"registering Cal {C.calendar_provider}-{C.calenar_cache}")
register_wrapper(Inst, C.instrument_provider)
logger.debug(f"registering Inst {C.instrument_provider}")
if getattr(C, "feature_provider", None) is not None:
feature_provider = get_provider_obj(C.feature_provider)
register_wrapper(FeatureD, feature_provider)
logger.debug(f"registering FeatureD {C.feature_provider}")
if getattr(C, "expression_provider", None) is not None:
# This provider is unnecessary in client provider
_eprovider = get_provider_obj(C.expression_provider)
if getattr(C, "expression_cache", None) is not None:
_eprovider = get_provider_obj(C.expression_cache, provider=_eprovider)
register_wrapper(ExpressionD, _eprovider)
logger.debug(f"registering ExpressioneD {C.expression_provider}-{C.expression_cache}")
_dprovider = get_provider_obj(C.dataset_provider)
if getattr(C, "dataset_cache", None) is not None:
_dprovider = get_provider_obj(C.dataset_cache, provider=_dprovider)
register_wrapper(DatasetD, _dprovider)
logger.debug(f"registering DataseteD {C.dataset_provider}-{C.dataset_cache}")
register_wrapper(D, C.provider)
logger.debug(f"registering D {C.provider}")
| qlib/data/data.py | 37,793 | Local provider class
To keep compatible with old qlib provider.
Calendar provider base class
Provide calendar data.
Client calendar data provider class
Provide calendar data by requesting data from server as a client.
Client dataset data provider class
Provide dataset data by requesting data from server as a client.
Client instrument data provider class
Provide instrument data by requesting data from server as a client.
Client Provider
Requesting data from server as a client. Can propose requests:
- Calendar : Directly respond a list of calendars
- Instruments (without filter): Directly respond a list/dict of instruments
- Instruments (with filters): Respond a list/dict of instruments
- Features : Respond a cache uri
The general workflow is described as follows:
When the user use client provider to propose a request, the client provider will connect the server and send the request. The client will start to wait for the response. The response will be made instantly indicating whether the cache is available. The waiting procedure will terminate only when the client get the reponse saying `feature_available` is true.
`BUG` : Everytime we make request for certain data we need to connect to the server, wait for the response and disconnect from it. We can't make a sequence of requests within one connection. You can refer to https://python-socketio.readthedocs.io/en/latest/client.html for documentation of python-socketIO client.
Dataset provider class
Provide Dataset data.
Expression provider class
Provide Expression data.
Feature provider class
Provide feature data.
Instrument provider base class
Provide instrument data.
Local calendar data provider class
Provide calendar data from local data source.
Local dataset data provider class
Provide dataset data from local data source.
Local expression data provider class
Provide expression data from local data source.
Local feature data provider class
Provide feature data from local data source.
Local instrument data provider class
Provide instrument data from local data source.
Data Provider Wrapper
Load calendar using memcache.
Parameters
----------
freq : str
frequency of read calendar file
future : bool
whether including future trading day
Returns
-------
list
list of timestamps
dict
dict composed by timestamp as key and index as value for fast search
Load original calendar timestamp from file.
Parameters
----------
freq : str
frequency of read calendar file
Returns
----------
list
list of timestamps
Get the uri of calendar generation task.
Get task uri, used when generating rabbitmq task in qlib_server
Parameters
----------
instruments : list or dict
list/dict of instruments or dict of stockpool config
fields : list
list of feature instances
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency
disk_cache : int
whether to skip(0)/use(1)/replace(2) disk_cache
_uri
The server hope to get the uri of the request. The uri will be decided
by the dataprovider. For ex, different cache layer has different uri.
:param type: The type of resource for the uri
:param **kwargs:
Calendar file uri.
Static feature file uri.
Instrument file uri.
If the expressions of one instrument haven't been calculated before,
calculate it and write it into expression cache.
Get calendar of certain market in given time range.
Parameters
----------
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency, available: year/quarter/month/week/day
future : bool
whether including future trading day
Returns
----------
list
calendar list
Get dataset data.
Parameters
----------
instruments : list or dict
list/dict of instruments or dict of stockpool config
fields : list
list of feature instances
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency
Returns
----------
pd.DataFrame
a pandas dataframe with <instrument, datetime> index
Load and process the data, return the data set.
- default using multi-kernel method.
Get Expression data.
Parameters
----------
instrument : str
a certain instrument
field : str
a certain field of feature
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency, available: year/quarter/month/week/day
Returns
-------
pd.Series
data of a certain expression
Calculate the expressions for one instrument, return a df result.
If the expression has been calculated before, load from cache.
return value: A data frame with index 'datetime' and other data columns.
Get feature data.
Parameters
----------
instrument : str
a certain instrument
field : str
a certain field of feature
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency, available: year/quarter/month/week/day
Returns
-------
pd.Series
data of a certain feature
disk_cache : int
whether to skip(0)/use(1)/replace(2) disk_cache
This function will try to use cache method which has a keyword `disk_cache`,
and will use provider method if a type error is raised because the DatasetD instance
is a provider class.
features_uri
Return the uri of the generated cache of features/dataset
:param disk_cache:
:param instruments:
:param fields:
:param start_time:
:param end_time:
:param freq:
Get column names from input fields
Parse different types of input instruments to output instruments_d
Wrong format of input instruments will lead to exception.
Get the general config dictionary for a base market adding several dynamic filters.
Parameters
----------
market : str
market/industry/index shortname, e.g. all/sse/szse/sse50/csi300/csi500
filter_pipe : list
the list of dynamic filters
Returns
----------
dict
dict of stockpool config
{`market`=>base market name, `filter_pipe`=>list of filters}
example :
{'market': 'csi500',
'filter_pipe': [{'filter_type': 'ExpressionDFilter',
'rule_expression': '$open<40',
'filter_start_time': None,
'filter_end_time': None,
'keep': False},
{'filter_type': 'NameDFilter',
'name_rule_re': 'SH[0-9]{4}55',
'filter_start_time': None,
'filter_end_time': None}]}
List the instruments based on a certain stockpool config.
Parameters
----------
instruments : dict
stockpool config
start_time : str
start of the time range
end_time : str
end of the time range
as_list : bool
return instruments as list or dict
Returns
-------
dict or list
instruments list or dictionary with time spans
Locate the start time index and end time index in a calendar under certain frequency.
Parameters
----------
start_time : str
start of the time range
end_time : str
end of the time range
freq : str
time frequency, available: year/quarter/month/week/day
future : bool
whether including future trading day
Returns
-------
pd.Timestamp
the real start time
pd.Timestamp
the real end time
int
the index of start time
int
the index of end time
This method is used to prepare the expression cache for the client.
Then the client will load the data from expression cache by itself.
register_all_wrappers
register_wrapper
:param wrapper: A wrapper of all kinds of providers
:param cls_or_obj: A class or class name or object instance in data/data.py
Copyright (c) Microsoft Corporation. Licensed under the MIT License. for fast search the order of the filters will affect the result, so we need to keep the order instruments type dict of stockpool config dict of instruments and timestamp list or tuple of a group of instruments parse and check the input fields One process for one task, so that the memory will be freed quicker. NOTE: Python version >= 3.6; in versions after python3.6, dict will always guarantee the insertion order NOTE: This place is compatible with windows, windows multi-process is spawn The client does not have expression provider, the data will be loaded from cache using static method. if future calendar not exists, return current calendar strip `day` `1min` strip use calendar boundary filter as list validate raise ValueError('uri_data not found: ' + uri_data) load Ensure that each column type is consistent FIXME: The stock data is currently float. If there is other types of data, this part needs to be re-implemented. - Done in callback pre-mound nfs, used for demo This provider is unnecessary in client provider | 8,597 | en | 0.679407 |
from flask_unchained.bundles.sqlalchemy import SessionManager, SQLAlchemyUnchained
def setup(db: SQLAlchemyUnchained):
session_manager = SessionManager(db)
class Foo(db.Model):
class Meta:
lazy_mapped = False
name = db.Column(db.String)
db.create_all()
return Foo, session_manager
class TestSessionManager:
def test_save(self, db: SQLAlchemyUnchained):
Foo, session_manager = setup(db)
foo = Foo(name='foo')
session_manager.save(foo)
# check it's added to the session but not committed
assert foo in db.session
with db.session.no_autoflush:
assert Foo.q.get_by(name='foo') is None
# check the commit kwarg works
session_manager.save(foo, commit=True)
assert Foo.q.get_by(name='foo') == foo
def test_save_all(self, db: SQLAlchemyUnchained):
Foo, session_manager = setup(db)
foo1 = Foo(name='one')
foo2 = Foo(name='two')
foo3 = Foo(name='three')
all_ = [foo1, foo2, foo3]
session_manager.save_all(all_)
with db.session.no_autoflush:
for foo in all_:
assert foo in db.session
assert Foo.q.get_by(name=foo.name) is None
session_manager.save_all(all_, commit=True)
for foo in all_:
assert Foo.q.get_by(name=foo.name) == foo
def test_delete(self, db: SQLAlchemyUnchained):
Foo, session_manager = setup(db)
foo1 = Foo(name='one')
foo2 = Foo(name='two')
all_ = [foo1, foo2]
session_manager.save_all(all_, commit=True)
for foo in all_:
assert foo in db.session
assert Foo.q.get_by(name=foo.name) == foo
session_manager.delete(foo1, commit=True)
assert foo1 not in db.session
assert Foo.q.get_by(name='one') is None
assert foo2 in db.session
assert Foo.q.get_by(name='two') == foo2
| tests/bundles/sqlalchemy/services/test_session_manager.py | 1,970 | check it's added to the session but not committed check the commit kwarg works | 78 | en | 0.944622 |
#corresponde ao video 6 do curso
# Primeiros passos
n = input('Digite algo: ')
print(n.isnumeric()) # se é numerico
print(n.isalpha()) # se é letra
print(n.isalnum()) # se é alpha numerico
print(n.isupper()) # ta em letra maiuscula | Aula 1/aula2.py | 235 | corresponde ao video 6 do curso Primeiros passos se é numerico se é letra se é alpha numerico ta em letra maiuscula | 115 | pt | 0.955731 |
#!/usr/bin/env python3
import numpy as np
import qiskit
num_params = 2 # make sure you set this correctly to the number of parameters used by the ansatz
## Previously used for Helium VQE in Rigetti implementation
#
def tiny_ansatz_2(current_params):
q = qiskit.QuantumRegister(2, "q")
qc = qiskit.QuantumCircuit(q, qiskit.ClassicalRegister(2, "c"))
qc.x(q[0])
qc.x(q[1])
qc.rx( np.pi/2, q[0])
qc.h(q[1])
qc.cx(q[0], q[1])
qc.rz(current_params[0], q[1])
qc.cx(q[0], q[1])
qc.rx(-np.pi/2, q[0])
qc.h(q[1])
qc.h(q[0])
qc.rx( np.pi/2, q[1])
qc.cx(q[0], q[1])
qc.rz(current_params[1], q[1])
qc.cx(q[0], q[1])
qc.h(q[0])
qc.rx(-np.pi/2, q[1])
return qc
| soft/template.qiskit.ansatz/python_code/tiny2/custom_ansatz.py | 733 | !/usr/bin/env python3 make sure you set this correctly to the number of parameters used by the ansatz Previously used for Helium VQE in Rigetti implementation | 158 | en | 0.60755 |
import os
import time
import traceback
from conans.client.tools.files import human_size
from conans.errors import AuthenticationException, ConanConnectionError, ConanException, \
NotFoundException
from conans.util.files import mkdir, save_append, sha1sum, to_file_bytes
from conans.util.log import logger
from conans.util.tracer import log_download
class Uploader(object):
def __init__(self, requester, output, verify, chunk_size=1000):
self.chunk_size = chunk_size
self.output = output
self.requester = requester
self.verify = verify
def upload(self, url, abs_path, auth=None, dedup=False, retry=1, retry_wait=0, headers=None):
# Send always the header with the Sha1
headers = headers or {}
headers["X-Checksum-Sha1"] = sha1sum(abs_path)
if dedup:
dedup_headers = {"X-Checksum-Deploy": "true"}
if headers:
dedup_headers.update(headers)
response = self.requester.put(url, data="", verify=self.verify, headers=dedup_headers,
auth=auth)
if response.status_code == 403:
if auth.token is None:
raise AuthenticationException(response.content)
raise ForbiddenException(response.content)
if response.status_code == 201: # Artifactory returns 201 if the file is there
return response
self.output.info("")
# Actual transfer of the real content
it = load_in_chunks(abs_path, self.chunk_size)
# Now it is a chunked read file
file_size = os.stat(abs_path).st_size
it = upload_with_progress(file_size, it, self.chunk_size, self.output)
# Now it will print progress in each iteration
iterable_to_file = IterableToFileAdapter(it, file_size)
# Now it is prepared to work with request
ret = call_with_retry(self.output, retry, retry_wait, self._upload_file, url,
data=iterable_to_file, headers=headers, auth=auth)
return ret
def _upload_file(self, url, data, headers, auth):
try:
response = self.requester.put(url, data=data, verify=self.verify,
headers=headers, auth=auth)
if response.status_code == 403:
if auth.token is None:
raise AuthenticationException(response.content)
raise ForbiddenException(response.content)
except ConanException:
raise
except Exception as exc:
raise ConanException(exc)
return response
class IterableToFileAdapter(object):
def __init__(self, iterable, total_size):
self.iterator = iter(iterable)
self.total_size = total_size
def read(self, size=-1): # @UnusedVariable
return next(self.iterator, b'')
def __len__(self):
return self.total_size
def __iter__(self):
return self.iterator.__iter__()
class upload_with_progress(object):
def __init__(self, totalsize, iterator, chunk_size, output):
self.totalsize = totalsize
self.output = output
self.chunk_size = chunk_size
self.aprox_chunks = self.totalsize * 1.0 / chunk_size
self.groups = iterator
def __iter__(self):
last_progress = None
for index, chunk in enumerate(self.groups):
if self.aprox_chunks == 0:
index = self.aprox_chunks
units = progress_units(index, self.aprox_chunks)
progress = human_readable_progress(index * self.chunk_size, self.totalsize)
if last_progress != units: # Avoid screen refresh if nothing has change
print_progress(self.output, units, progress)
last_progress = units
yield chunk
progress = human_readable_progress(self.totalsize, self.totalsize)
print_progress(self.output, progress_units(100, 100), progress)
def __len__(self):
return self.totalsize
def load_in_chunks(path, chunk_size=1024):
"""Lazy function (generator) to read a file piece by piece.
Default chunk size: 1k."""
with open(path, 'rb') as file_object:
while True:
data = file_object.read(chunk_size)
if not data:
break
yield data
class Downloader(object):
def __init__(self, requester, output, verify, chunk_size=1000):
self.chunk_size = chunk_size
self.output = output
self.requester = requester
self.verify = verify
def download(self, url, file_path=None, auth=None, retry=3, retry_wait=0, overwrite=False,
headers=None):
if file_path and not os.path.isabs(file_path):
file_path = os.path.abspath(file_path)
if file_path and os.path.exists(file_path):
if overwrite:
if self.output:
self.output.warn("file '%s' already exists, overwriting" % file_path)
else:
# Should not happen, better to raise, probably we had to remove
# the dest folder before
raise ConanException("Error, the file to download already exists: '%s'" % file_path)
return call_with_retry(self.output, retry, retry_wait, self._download_file, url, auth,
headers, file_path)
def _download_file(self, url, auth, headers, file_path):
t1 = time.time()
try:
response = self.requester.get(url, stream=True, verify=self.verify, auth=auth,
headers=headers)
except Exception as exc:
raise ConanException("Error downloading file %s: '%s'" % (url, exc))
if not response.ok:
if response.status_code == 404:
raise NotFoundException("Not found: %s" % url)
elif response.status_code == 401:
raise AuthenticationException()
raise ConanException("Error %d downloading file %s" % (response.status_code, url))
try:
logger.debug("DOWNLOAD: %s" % url)
data = self._download_data(response, file_path)
duration = time.time() - t1
log_download(url, duration)
return data
except Exception as e:
logger.debug(e.__class__)
logger.debug(traceback.format_exc())
# If this part failed, it means problems with the connection to server
raise ConanConnectionError("Download failed, check server, possibly try again\n%s"
% str(e))
def _download_data(self, response, file_path):
ret = bytearray()
total_length = response.headers.get('content-length')
if total_length is None: # no content length header
if not file_path:
ret += response.content
else:
if self.output:
total_length = len(response.content)
progress = human_readable_progress(total_length, total_length)
print_progress(self.output, 50, progress)
save_append(file_path, response.content)
else:
total_length = int(total_length)
encoding = response.headers.get('content-encoding')
gzip = (encoding == "gzip")
# chunked can be a problem: https://www.greenbytes.de/tech/webdav/rfc2616.html#rfc.section.4.4
# It will not send content-length or should be ignored
def download_chunks(file_handler=None, ret_buffer=None):
"""Write to a buffer or to a file handler"""
chunk_size = 1024 if not file_path else 1024 * 100
download_size = 0
last_progress = None
for data in response.iter_content(chunk_size):
download_size += len(data)
if ret_buffer is not None:
ret_buffer.extend(data)
if file_handler is not None:
file_handler.write(to_file_bytes(data))
if self.output:
units = progress_units(download_size, total_length)
progress = human_readable_progress(download_size, total_length)
if last_progress != units: # Avoid screen refresh if nothing has change
print_progress(self.output, units, progress)
last_progress = units
return download_size
if file_path:
mkdir(os.path.dirname(file_path))
with open(file_path, 'wb') as handle:
dl_size = download_chunks(file_handler=handle)
else:
dl_size = download_chunks(ret_buffer=ret)
response.close()
if dl_size != total_length and not gzip:
raise ConanException("Transfer interrupted before "
"complete: %s < %s" % (dl_size, total_length))
if not file_path:
return bytes(ret)
else:
return
def progress_units(progress, total):
if total == 0:
return 0
return min(50, int(50 * progress / total))
def human_readable_progress(bytes_transferred, total_bytes):
return "%s/%s" % (human_size(bytes_transferred), human_size(total_bytes))
def print_progress(output, units, progress=""):
if output.is_terminal:
output.rewrite_line("[%s%s] %s" % ('=' * units, ' ' * (50 - units), progress))
def call_with_retry(out, retry, retry_wait, method, *args, **kwargs):
for counter in range(retry):
try:
return method(*args, **kwargs)
except NotFoundException:
raise
except ConanException as exc:
if counter == (retry - 1):
raise
else:
if out:
out.error(exc)
out.info("Waiting %d seconds to retry..." % retry_wait)
time.sleep(retry_wait)
| conans/client/rest/uploader_downloader.py | 10,205 | Write to a buffer or to a file handler
Lazy function (generator) to read a file piece by piece.
Default chunk size: 1k.
Send always the header with the Sha1 Artifactory returns 201 if the file is there Actual transfer of the real content Now it is a chunked read file Now it will print progress in each iteration Now it is prepared to work with request @UnusedVariable Avoid screen refresh if nothing has change Should not happen, better to raise, probably we had to remove the dest folder before If this part failed, it means problems with the connection to server no content length header chunked can be a problem: https://www.greenbytes.de/tech/webdav/rfc2616.htmlrfc.section.4.4 It will not send content-length or should be ignored Avoid screen refresh if nothing has change | 780 | en | 0.85461 |
"""example URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path("polls/", include("polls.urls")),
path("admin/", admin.site.urls),
]
| example/example/urls.py | 801 | example URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) | 623 | en | 0.616572 |
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test privcyd with different proxy configuration.
Test plan:
- Start privcyd's with different proxy configurations
- Use addnode to initiate connections
- Verify that proxies are connected to, and the right connection command is given
- Proxy configurations to test on privcyd side:
- `-proxy` (proxy everything)
- `-onion` (proxy just onions)
- `-proxyrandomize` Circuit randomization
- Proxy configurations to test on proxy side,
- support no authentication (other proxy)
- support no authentication + user/pass authentication (Tor)
- proxy on IPv6
- Create various proxies (as threads)
- Create privcyds that connect to them
- Manipulate the privcyds using addnode (onetry) an observe effects
addnode connect to IPv4
addnode connect to IPv6
addnode connect to onion
addnode connect to generic DNS name
"""
import socket
import os
from test_framework.socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
PORT_MIN,
PORT_RANGE,
assert_equal,
)
from test_framework.netutil import test_ipv6_local
RANGE_BEGIN = PORT_MIN + 2 * PORT_RANGE # Start after p2p and rpc ports
class ProxyTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
def setup_nodes(self):
self.have_ipv6 = test_ipv6_local()
# Create two proxies on different ports
# ... one unauthenticated
self.conf1 = Socks5Configuration()
self.conf1.addr = ('127.0.0.1', RANGE_BEGIN + (os.getpid() % 1000))
self.conf1.unauth = True
self.conf1.auth = False
# ... one supporting authenticated and unauthenticated (Tor)
self.conf2 = Socks5Configuration()
self.conf2.addr = ('127.0.0.1', RANGE_BEGIN + 1000 + (os.getpid() % 1000))
self.conf2.unauth = True
self.conf2.auth = True
if self.have_ipv6:
# ... one on IPv6 with similar configuration
self.conf3 = Socks5Configuration()
self.conf3.af = socket.AF_INET6
self.conf3.addr = ('::1', RANGE_BEGIN + 2000 + (os.getpid() % 1000))
self.conf3.unauth = True
self.conf3.auth = True
else:
self.log.warning("Testing without local IPv6 support")
self.serv1 = Socks5Server(self.conf1)
self.serv1.start()
self.serv2 = Socks5Server(self.conf2)
self.serv2.start()
if self.have_ipv6:
self.serv3 = Socks5Server(self.conf3)
self.serv3.start()
# Note: proxies are not used to connect to local nodes
# this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost
args = [
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'],
['-listen', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
[]
]
if self.have_ipv6:
args[3] = ['-listen', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0', '-noonion']
self.add_nodes(self.num_nodes, extra_args=args)
self.start_nodes()
def node_test(self, node, proxies, auth, test_onion=True):
rv = []
# Test: outgoing IPv4 connection through node
node.addnode("15.61.23.23:1234", "onetry")
cmd = proxies[0].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: privcyd's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"15.61.23.23")
assert_equal(cmd.port, 1234)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if self.have_ipv6:
# Test: outgoing IPv6 connection through node
node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry")
cmd = proxies[1].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: privcyd's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"1233:3432:2434:2343:3234:2345:6546:4534")
assert_equal(cmd.port, 5443)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if test_onion:
# Test: outgoing onion connection through node
node.addnode("bitcoinostk4e4re.onion:8333", "onetry")
cmd = proxies[2].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"bitcoinostk4e4re.onion")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing DNS name connection through node
node.addnode("node.noumenon:8333", "onetry")
cmd = proxies[3].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"node.noumenon")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
return rv
def run_test(self):
# basic -proxy
self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False)
# -proxy plus -onion
self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False)
# -proxy plus -onion, -proxyrandomize
rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True)
# Check that credentials as used for -proxyrandomize connections are unique
credentials = set((x.username,x.password) for x in rv)
assert_equal(len(credentials), len(rv))
if self.have_ipv6:
# proxy on IPv6 localhost
self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False, False)
def networks_dict(d):
r = {}
for x in d['networks']:
r[x['name']] = x
return r
# test RPC getnetworkinfo
n0 = networks_dict(self.nodes[0].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n0[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n0[net]['proxy_randomize_credentials'], True)
assert_equal(n0['onion']['reachable'], True)
n1 = networks_dict(self.nodes[1].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n1[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n1[net]['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n1['onion']['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['reachable'], True)
n2 = networks_dict(self.nodes[2].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n2[net]['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n2[net]['proxy_randomize_credentials'], True)
assert_equal(n2['onion']['reachable'], True)
if self.have_ipv6:
n3 = networks_dict(self.nodes[3].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n3[net]['proxy'], '[%s]:%i' % (self.conf3.addr))
assert_equal(n3[net]['proxy_randomize_credentials'], False)
assert_equal(n3['onion']['reachable'], False)
if __name__ == '__main__':
ProxyTest().main()
| test/functional/proxy_test.py | 8,339 | Test privcyd with different proxy configuration.
Test plan:
- Start privcyd's with different proxy configurations
- Use addnode to initiate connections
- Verify that proxies are connected to, and the right connection command is given
- Proxy configurations to test on privcyd side:
- `-proxy` (proxy everything)
- `-onion` (proxy just onions)
- `-proxyrandomize` Circuit randomization
- Proxy configurations to test on proxy side,
- support no authentication (other proxy)
- support no authentication + user/pass authentication (Tor)
- proxy on IPv6
- Create various proxies (as threads)
- Create privcyds that connect to them
- Manipulate the privcyds using addnode (onetry) an observe effects
addnode connect to IPv4
addnode connect to IPv6
addnode connect to onion
addnode connect to generic DNS name
!/usr/bin/env python3 Copyright (c) 2015-2016 The Bitcoin Core developers Distributed under the MIT software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. Start after p2p and rpc ports Create two proxies on different ports ... one unauthenticated ... one supporting authenticated and unauthenticated (Tor) ... one on IPv6 with similar configuration Note: proxies are not used to connect to local nodes this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost Test: outgoing IPv4 connection through node Note: privcyd's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6 Test: outgoing IPv6 connection through node Note: privcyd's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6 Test: outgoing onion connection through node Test: outgoing DNS name connection through node basic -proxy -proxy plus -onion -proxy plus -onion, -proxyrandomize Check that credentials as used for -proxyrandomize connections are unique proxy on IPv6 localhost test RPC getnetworkinfo | 1,980 | en | 0.759737 |
# -*- coding: utf-8 -*-
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as OriginalUserAdmin
from django.contrib.auth.models import User as OriginalUser
from cms.utils.compat.dj import get_user_model
if getattr(OriginalUser._meta, 'swapped', False):
class UserAdmin(OriginalUserAdmin):
list_display = ('username', 'email', 'get_full_name', 'is_staff')
search_fields = ('username', 'email',)
admin.site.register(get_user_model(), UserAdmin)
| cms/test_utils/project/customuserapp/admin.py | 500 | -*- coding: utf-8 -*- | 21 | en | 0.767281 |
"""Author: Brandon Trabucco
Calculate the part of speech tagger using the brown corpus.
"""
import glove.configuration
import glove.tagger
config = glove.configuration.TaggerConfiguration(
tagger_dir="./")
glove.tagger.dump(config)
| tagger/calculate_tagger.py | 243 | Author: Brandon Trabucco
Calculate the part of speech tagger using the brown corpus. | 84 | en | 0.526124 |
import torch
from transformers import *
import pdb
import operator
from collections import OrderedDict
import sys
# OPTIONAL: if you want to have more information on what's happening, activate the logger as follows
import logging
logging.basicConfig(level=logging.INFO)
PATH='bert-base-cased'
# Load pre-trained model tokenizer (vocabulary)
tokenizer = BertTokenizer.from_pretrained(PATH,do_lower_case=False)
model = BertForMaskedLM.from_pretrained(PATH)
model.eval()
def get_sent():
print("Enter sentence:")
sent = input()
if (not sent.endswith(".")):
print("Appending period to do dummy masking")
sent = sent + " ."
return '[CLS] ' + sent + '[SEP]'
def print_tokens(tokenized_text):
dstr = ""
for i in range(len(tokenized_text)):
dstr += " " + str(i) + ":"+tokenized_text[i]
print(dstr)
print()
def get_pos():
while True:
masked_index = 0
try:
masked_index = int(input())
return masked_index
except:
print("Enter valid number: (0 to quit)")
masked_index = int(input())
if (masked_index == 0):
print("Quitting")
sys.exit()
return masked_index
while (True):
text = get_sent()
tokenized_text = tokenizer.tokenize(text)
print_tokens(tokenized_text)
#pdb.set_trace()
indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)
# Create the segments tensors.
segments_ids = [0] * len(tokenized_text)
masked_index = len(tokenized_text) - 2
tokenized_text[masked_index] = "[MASK]"
indexed_tokens[masked_index] = 103
results_dict = {}
# Convert inputs to PyTorch tensors
tokens_tensor = torch.tensor([indexed_tokens])
segments_tensors = torch.tensor([segments_ids])
with torch.no_grad():
predictions = model(tokens_tensor, segments_tensors)
while True:
print_tokens(tokenized_text)
print("Enter any term position neighbor:")
masked_index = get_pos()
results_dict = {}
for i in range(len(predictions[0][0,masked_index])):
tok = tokenizer.convert_ids_to_tokens([i])[0]
results_dict[tok] = float(predictions[0][0,masked_index][i].tolist())
k = 0
hist_d = {}
sorted_d = OrderedDict(sorted(results_dict.items(), key=lambda kv: kv[1], reverse=True))
first = True
max_val = 0
for i in sorted_d:
if (first):
max_val = sorted_d[i]
first = False
val = round(float(sorted_d[i])/max_val,1)
if (val in hist_d):
hist_d[val] += 1
else:
hist_d[val] = 1
k += 1
if (k <= 20):
print(i,sorted_d[i])
fp = open("top_k.txt","w")
hist_d_sorted = OrderedDict(sorted(hist_d.items(), key=lambda kv: kv[0], reverse=False))
for i in hist_d_sorted:
fp.write(str(i) + " " + str(hist_d_sorted[i]) + "\n")
fp.close()
| examine_vectors.py | 3,075 | OPTIONAL: if you want to have more information on what's happening, activate the logger as follows Load pre-trained model tokenizer (vocabulary)pdb.set_trace() Create the segments tensors. Convert inputs to PyTorch tensors | 222 | en | 0.801832 |
# -*- coding: utf-8 -*-
"""
slicr.resources.links
~~~~~~~~~~~~~~~~~~~~~
Slicr link resource.
:copyright: © 2018
"""
from flask import current_app
from flask_restful import Resource
from webargs import fields
from webargs.flaskparser import use_args
from slicr.models import Link, LinkSchema
from slicr.utils import convert_args
link_args = {
'url': fields.Str(required=True),
'domain_id': fields.Int(missing=None)
}
# pylint: disable=R0201
class LinkResource(Resource):
"""Link resource."""
endpoints = ['/links', '/links/<int:link_id>']
schema = LinkSchema()
def get(self, link_id):
"""Get link resource.
.. :quickref: Link collection.
**Example request**:
.. sourcecode:: http
GET /links/1 HTTP/1.1
Host: example.com
Accept: application/json, text/javascript
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: text/javascript
{
"data": {
"clicks": 0,
"created": "2018-08-21T19:13:34.157470+00:00",
"short_link": "b",
"updated": null,
"url": "https://www.google.com"
},
"id": 1,
"type": "links",
"url": "/links"
}
:jsonparam string url: url for which to create short link.
:reqheader Accept: The response content type depends on
:mailheader:`Accept` header
:reqheader Authorization: Optional authentication token.
:resheader Content-Type: this depends on :mailheader:`Accept`
header of request
:statuscode 201: Link created
"""
link = Link.query.filter_by(id=link_id).first()
link_data, errors = self.schema.dump(link)
if errors:
current_app.logger.warning(errors)
response_out = {
'id': link.id,
'data': link_data,
'url': '/links',
'type': 'link'
}
return response_out, 200
@use_args(link_args)
def post(self, args):
"""Create shortened link.
.. :quickref: Link collection.
**Example request**:
.. sourcecode:: http
POST /links HTTP/1.1
Host: example.com
Accept: application/json, text/javascript
{
"url": "https://www.google.com"
}
**Example response**:
.. sourcecode:: http
HTTP/1.1 201 OK
Vary: Accept
Content-Type: text/javascript
{
"data": {
"clicks": 0,
"created": "2018-08-21T19:13:34.157470+00:00",
"short_link": "b",
"updated": null,
"url": "https://www.google.com"
},
"id": 1,
"type": "links",
"url": "/links"
}
:jsonparam string url: url for which to create short link.
:reqheader Accept: The response content type depends on
:mailheader:`Accept` header
:reqheader Authorization: Optional authentication token.
:resheader Content-Type: this depends on :mailheader:`Accept`
header of request
:statuscode 201: Link created
"""
args = convert_args(args)
link = Link(
url=args.url,
domain_id=args.domain_id,
salt=int(current_app.config.get('ENCODER_SALT'))
).save()
link_data, errors = self.schema.dump(link)
if errors:
current_app.logger.warning(errors)
response_out = {
'id': link.id,
'data': link_data,
'url': '/links',
'type': 'link'
}
return response_out, 201
| slicr/resources/links.py | 3,983 | Link resource.
Get link resource.
.. :quickref: Link collection.
**Example request**:
.. sourcecode:: http
GET /links/1 HTTP/1.1
Host: example.com
Accept: application/json, text/javascript
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: text/javascript
{
"data": {
"clicks": 0,
"created": "2018-08-21T19:13:34.157470+00:00",
"short_link": "b",
"updated": null,
"url": "https://www.google.com"
},
"id": 1,
"type": "links",
"url": "/links"
}
:jsonparam string url: url for which to create short link.
:reqheader Accept: The response content type depends on
:mailheader:`Accept` header
:reqheader Authorization: Optional authentication token.
:resheader Content-Type: this depends on :mailheader:`Accept`
header of request
:statuscode 201: Link created
Create shortened link.
.. :quickref: Link collection.
**Example request**:
.. sourcecode:: http
POST /links HTTP/1.1
Host: example.com
Accept: application/json, text/javascript
{
"url": "https://www.google.com"
}
**Example response**:
.. sourcecode:: http
HTTP/1.1 201 OK
Vary: Accept
Content-Type: text/javascript
{
"data": {
"clicks": 0,
"created": "2018-08-21T19:13:34.157470+00:00",
"short_link": "b",
"updated": null,
"url": "https://www.google.com"
},
"id": 1,
"type": "links",
"url": "/links"
}
:jsonparam string url: url for which to create short link.
:reqheader Accept: The response content type depends on
:mailheader:`Accept` header
:reqheader Authorization: Optional authentication token.
:resheader Content-Type: this depends on :mailheader:`Accept`
header of request
:statuscode 201: Link created
slicr.resources.links
~~~~~~~~~~~~~~~~~~~~~
Slicr link resource.
:copyright: © 2018
-*- coding: utf-8 -*- pylint: disable=R0201 | 2,045 | en | 0.541019 |
import requests
import json
from .config import auth_token, base_url
from .recommendation_client import RecommendationClient
from .json_parser import json_parser
class ExperimentClient(object):
"""Experiment Client Class
This object defines a Thor experiment within the Python environment. In
particular, an experiment is defined by its name, the date at which it was
created, and the dimensions of the machine learning model. Moreover, an
authentication token is required for requesting new parameter
configurations, for submitting observations of parameters, for viewing
pending parameter configurations and for obtaining the best configuration
of parameters that has been evaluated so far.
Parameters:
identifier (int): A unique identifier that indicates which experiment
on the server-side is being interacted with by the client.
name (str): A name for the machine learning experiment. Consumers of the
Thor service must have unique experiment names, so make sure all of
your experiments are named different things!
date (datetime): The datetime at which the experiment was created on the
server side.
dims (list of dictionaries): A list of dictionaries describing the
parameter space of the optimization problem. Each dimension is given
a name, a maximum value, a minimum value, and a dimension type that
roughly describes how points are spaced.
auth_token (str): String containing a user's specific API key provided
by the Thor server. This is used to authenticate with the Thor
server as a handshake that these experiments belong to a user and
can be viewed and edited by them.
base_url (str): String indicating the URL template for API calls.
"""
def __init__(self, identifier, name, date, dims, auth_token=auth_token,
base_url=base_url):
"""Initialize parameters of the experiment client object."""
self.experiment_id = identifier
self.name = name
self.date = date
self.dims = dims
self.auth_token = auth_token
self.base_url = base_url
def submit_observation(self, config, target):
"""Upload a pairing of a configuration alongside an observed target
variable.
Parameters:
config (dictionary): A dictionary mapping dimension names to values
indicating the configuration of parameters.
target (float): A number indicating the performance of this
configuration of model parameters.
Examples:
This utility is helpful in the event that a machine learning
practitioner already has a few existing evaluations of the system at
given inputs. For instance, the consumer may have already performed
a grid search to obtain parameter values.
Suppose that a particular experiment has two dimensions named "x"
and "y". Then to upload a configuration to the Thor server, we
proceed as follows:
>>> d = {"x": 1.5, "y": 3.1}
>>> v = f(d["x"], d["y"])
>>> exp.submit_observation(d, v)
"""
post_data = {
"auth_token": self.auth_token,
"experiment_id": self.experiment_id,
"configuration": json.dumps(config),
"target": target
}
result = requests.post(
url=self.base_url.format("submit_observation"),
json=post_data
)
return json_parser(result, self.auth_token)
def create_recommendation(
self,
rand_prob=0.,
n_models=5,
description="",
acq_func="expected_improvement",
integrate_acq=True
):
"""Get a recommendation for a point to evaluate next.
The create recommendation utility represents the core of the Thor
Bayesian optimization software. This function will contact the Thor
server and request a new configuration of machine learning parameters
that serve the object of maximizing the metric of interest.
Parameters:
rand_prob (optional, float): This parameter represents that a random
point in the input space is chosen instead of selecting a
configuration of parameters using Bayesian optimization. As
such, this parameter can be used to benchmark against random
search and otherwise to perform pure exploration of the
parameter space.
n_models (optional, int): The number of Gaussian process models to
sample using elliptical slice sampling. Setting this to a large
number will produce a better characterization of uncertainty in
the acquisition function.
description (optional, str): An optional per-observation
descriptor, potentially useful for identifying one observation
among many others in a large experiment. Defaults to "".
acq_func (optional, str): A string specifying which acquisition
function should be used to construct the newest recommendation.
It can be useful to sometimes vary the acquisition function to
enable exploitation towards the end of an experiment.
integrate_acq (optional, bool): An indicator for whether or not we
should construct an integrated acquisition function using models
sampled from the posterior. The alternative is to not integrate
and to return a single recommendation for each of the sampled
models, of which there are `n_models`.
Returns:
RecommendationClient: A recommendation client object
corresponding to the recommended set of parameters. If the
acquisition function is not integrated, a list of
RecommendationClient objects may be returned instead, one for
each sampled model.
"""
post_data = {
"auth_token": self.auth_token,
"experiment_id": self.experiment_id,
"n_models": n_models,
"rand_prob": rand_prob,
"description": description,
"acq_func": acq_func,
"integrate_acq": integrate_acq
}
result = requests.post(
url=self.base_url.format("create_recommendation"),
json=post_data
)
recs = json_parser(result, self.auth_token, RecommendationClient)
return recs[0] if len(recs) == 1 else recs
def best_configuration(self):
"""Get the configuration of parameters that produced the best value of
the objective function.
Returns:
dictionary: A dictionary containing a detailed view of the
configuration of model parameters that produced the maximal
value of the metric. This includes the date the observation was
created, the value of the metric, and the configuration itself.
"""
post_data = {
"auth_token": self.auth_token,
"experiment_id": self.experiment_id
}
result = requests.post(
url=self.base_url.format("best_configuration"),
json=post_data
)
return json_parser(result, self.auth_token)
def pending_recommendations(self):
"""Query for pending recommendations that have yet to be evaluated.
Sometimes client-side computations may fail for a given input
configuration of model parameters, leaving the recommendation in a kind
of "limbo" state in which is not being evaluated but still exists. In
this case, it can be advantageous for the client to query for such
pending observations and to evaluate them. This function returns a list
of pending recommendations which can then be evaluated by the client.
Returns:
list of RecommendationClient: A list of
recommendation client objects, where each element in the list
corresponds to a pending observation.
"""
post_data = {
"auth_token": self.auth_token,
"experiment_id": self.experiment_id
}
result = requests.post(
url=self.base_url.format("pending_recommendations"),
json=post_data
)
return json_parser(result, self.auth_token, RecommendationClient)
@classmethod
def from_dict(cls, dictionary, auth_token):
"""Create an experiment object from a dictionary representation. Pass
the authentication token as an additional parameter.
TODO:
Can the authentication token be a return parameter?
"""
return cls(
identifier=dictionary["id"],
name=dictionary["name"],
date=dictionary["date"],
dims=dictionary["dimensions"],
auth_token=auth_token
)
| thor_client/experiment_client.py | 9,207 | Experiment Client Class
This object defines a Thor experiment within the Python environment. In
particular, an experiment is defined by its name, the date at which it was
created, and the dimensions of the machine learning model. Moreover, an
authentication token is required for requesting new parameter
configurations, for submitting observations of parameters, for viewing
pending parameter configurations and for obtaining the best configuration
of parameters that has been evaluated so far.
Parameters:
identifier (int): A unique identifier that indicates which experiment
on the server-side is being interacted with by the client.
name (str): A name for the machine learning experiment. Consumers of the
Thor service must have unique experiment names, so make sure all of
your experiments are named different things!
date (datetime): The datetime at which the experiment was created on the
server side.
dims (list of dictionaries): A list of dictionaries describing the
parameter space of the optimization problem. Each dimension is given
a name, a maximum value, a minimum value, and a dimension type that
roughly describes how points are spaced.
auth_token (str): String containing a user's specific API key provided
by the Thor server. This is used to authenticate with the Thor
server as a handshake that these experiments belong to a user and
can be viewed and edited by them.
base_url (str): String indicating the URL template for API calls.
Initialize parameters of the experiment client object.
Get the configuration of parameters that produced the best value of
the objective function.
Returns:
dictionary: A dictionary containing a detailed view of the
configuration of model parameters that produced the maximal
value of the metric. This includes the date the observation was
created, the value of the metric, and the configuration itself.
Get a recommendation for a point to evaluate next.
The create recommendation utility represents the core of the Thor
Bayesian optimization software. This function will contact the Thor
server and request a new configuration of machine learning parameters
that serve the object of maximizing the metric of interest.
Parameters:
rand_prob (optional, float): This parameter represents that a random
point in the input space is chosen instead of selecting a
configuration of parameters using Bayesian optimization. As
such, this parameter can be used to benchmark against random
search and otherwise to perform pure exploration of the
parameter space.
n_models (optional, int): The number of Gaussian process models to
sample using elliptical slice sampling. Setting this to a large
number will produce a better characterization of uncertainty in
the acquisition function.
description (optional, str): An optional per-observation
descriptor, potentially useful for identifying one observation
among many others in a large experiment. Defaults to "".
acq_func (optional, str): A string specifying which acquisition
function should be used to construct the newest recommendation.
It can be useful to sometimes vary the acquisition function to
enable exploitation towards the end of an experiment.
integrate_acq (optional, bool): An indicator for whether or not we
should construct an integrated acquisition function using models
sampled from the posterior. The alternative is to not integrate
and to return a single recommendation for each of the sampled
models, of which there are `n_models`.
Returns:
RecommendationClient: A recommendation client object
corresponding to the recommended set of parameters. If the
acquisition function is not integrated, a list of
RecommendationClient objects may be returned instead, one for
each sampled model.
Create an experiment object from a dictionary representation. Pass
the authentication token as an additional parameter.
TODO:
Can the authentication token be a return parameter?
Query for pending recommendations that have yet to be evaluated.
Sometimes client-side computations may fail for a given input
configuration of model parameters, leaving the recommendation in a kind
of "limbo" state in which is not being evaluated but still exists. In
this case, it can be advantageous for the client to query for such
pending observations and to evaluate them. This function returns a list
of pending recommendations which can then be evaluated by the client.
Returns:
list of RecommendationClient: A list of
recommendation client objects, where each element in the list
corresponds to a pending observation.
Upload a pairing of a configuration alongside an observed target
variable.
Parameters:
config (dictionary): A dictionary mapping dimension names to values
indicating the configuration of parameters.
target (float): A number indicating the performance of this
configuration of model parameters.
Examples:
This utility is helpful in the event that a machine learning
practitioner already has a few existing evaluations of the system at
given inputs. For instance, the consumer may have already performed
a grid search to obtain parameter values.
Suppose that a particular experiment has two dimensions named "x"
and "y". Then to upload a configuration to the Thor server, we
proceed as follows:
>>> d = {"x": 1.5, "y": 3.1}
>>> v = f(d["x"], d["y"])
>>> exp.submit_observation(d, v) | 5,709 | en | 0.827873 |
# -*- coding: utf-8 -*-
"""
pygments.lexers.math
~~~~~~~~~~~~~~~~~~~~
Lexers for math languages.
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
try:
set
except NameError:
from sets import Set as set
from pygments.lexer import Lexer, RegexLexer, bygroups, include, do_insertions
from pygments.token import Comment, String, Punctuation, Keyword, Name, \
Operator, Number, Text, Generic
from pygments.lexers.agile import PythonLexer
__all__ = ['MuPADLexer', 'MatlabLexer', 'MatlabSessionLexer', 'NumPyLexer',
'SLexer']
class MuPADLexer(RegexLexer):
"""
A `MuPAD <http://www.mupad.com>`_ lexer.
Contributed by Christopher Creutzig <christopher@creutzig.de>.
*New in Pygments 0.8.*
"""
name = 'MuPAD'
aliases = ['mupad']
filenames = ['*.mu']
tokens = {
'root' : [
(r'//.*?$', Comment.Single),
(r'/\*', Comment.Multiline, 'comment'),
(r'"(?:[^"\\]|\\.)*"', String),
(r'\(|\)|\[|\]|\{|\}', Punctuation),
(r'''(?x)\b(?:
next|break|end|
axiom|end_axiom|category|end_category|domain|end_domain|inherits|
if|%if|then|elif|else|end_if|
case|of|do|otherwise|end_case|
while|end_while|
repeat|until|end_repeat|
for|from|to|downto|step|end_for|
proc|local|option|save|begin|end_proc|
delete|frame
)\b''', Keyword),
(r'''(?x)\b(?:
DOM_ARRAY|DOM_BOOL|DOM_COMPLEX|DOM_DOMAIN|DOM_EXEC|DOM_EXPR|
DOM_FAIL|DOM_FLOAT|DOM_FRAME|DOM_FUNC_ENV|DOM_HFARRAY|DOM_IDENT|
DOM_INT|DOM_INTERVAL|DOM_LIST|DOM_NIL|DOM_NULL|DOM_POLY|DOM_PROC|
DOM_PROC_ENV|DOM_RAT|DOM_SET|DOM_STRING|DOM_TABLE|DOM_VAR
)\b''', Name.Class),
(r'''(?x)\b(?:
PI|EULER|E|CATALAN|
NIL|FAIL|undefined|infinity|
TRUE|FALSE|UNKNOWN
)\b''',
Name.Constant),
(r'\b(?:dom|procname)\b', Name.Builtin.Pseudo),
(r'\.|,|:|;|=|\+|-|\*|/|\^|@|>|<|\$|\||!|\'|%|~=', Operator),
(r'''(?x)\b(?:
and|or|not|xor|
assuming|
div|mod|
union|minus|intersect|in|subset
)\b''',
Operator.Word),
(r'\b(?:I|RDN_INF|RD_NINF|RD_NAN)\b', Number),
#(r'\b(?:adt|linalg|newDomain|hold)\b', Name.Builtin),
(r'''(?x)
((?:[a-zA-Z_#][a-zA-Z_#0-9]*|`[^`]*`)
(?:::[a-zA-Z_#][a-zA-Z_#0-9]*|`[^`]*`)*)\s*([(])''',
bygroups(Name.Function, Punctuation)),
(r'''(?x)
(?:[a-zA-Z_#][a-zA-Z_#0-9]*|`[^`]*`)
(?:::[a-zA-Z_#][a-zA-Z_#0-9]*|`[^`]*`)*''', Name.Variable),
(r'[0-9]+(?:\.[0-9]*)?(?:e[0-9]+)?', Number),
(r'\.[0-9]+(?:e[0-9]+)?', Number),
(r'.', Text)
],
'comment' : [
(r'[^*/]', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline)
]
}
class MatlabLexer(RegexLexer):
"""
For Matlab (or GNU Octave) source code.
Contributed by Ken Schutte <kschutte@csail.mit.edu>.
*New in Pygments 0.10.*
"""
name = 'Matlab'
aliases = ['matlab', 'octave']
filenames = ['*.m']
mimetypes = ['text/matlab']
#
# These lists are generated automatically.
# Run the following in bash shell:
#
# for f in elfun specfun elmat; do
# echo -n "$f = "
# matlab -nojvm -r "help $f;exit;" | perl -ne \
# 'push(@c,$1) if /^ (\w+)\s+-/; END {print q{["}.join(q{","},@c).qq{"]\n};}'
# done
#
# elfun: Elementary math functions
# specfun: Special Math functions
# elmat: Elementary matrices and matrix manipulation
#
# taken from Matlab version 7.4.0.336 (R2007a)
#
elfun = ["sin","sind","sinh","asin","asind","asinh","cos","cosd","cosh",
"acos","acosd","acosh","tan","tand","tanh","atan","atand","atan2",
"atanh","sec","secd","sech","asec","asecd","asech","csc","cscd",
"csch","acsc","acscd","acsch","cot","cotd","coth","acot","acotd",
"acoth","hypot","exp","expm1","log","log1p","log10","log2","pow2",
"realpow","reallog","realsqrt","sqrt","nthroot","nextpow2","abs",
"angle","complex","conj","imag","real","unwrap","isreal","cplxpair",
"fix","floor","ceil","round","mod","rem","sign"]
specfun = ["airy","besselj","bessely","besselh","besseli","besselk","beta",
"betainc","betaln","ellipj","ellipke","erf","erfc","erfcx",
"erfinv","expint","gamma","gammainc","gammaln","psi","legendre",
"cross","dot","factor","isprime","primes","gcd","lcm","rat",
"rats","perms","nchoosek","factorial","cart2sph","cart2pol",
"pol2cart","sph2cart","hsv2rgb","rgb2hsv"]
elmat = ["zeros","ones","eye","repmat","rand","randn","linspace","logspace",
"freqspace","meshgrid","accumarray","size","length","ndims","numel",
"disp","isempty","isequal","isequalwithequalnans","cat","reshape",
"diag","blkdiag","tril","triu","fliplr","flipud","flipdim","rot90",
"find","end","sub2ind","ind2sub","bsxfun","ndgrid","permute",
"ipermute","shiftdim","circshift","squeeze","isscalar","isvector",
"ans","eps","realmax","realmin","pi","i","inf","nan","isnan",
"isinf","isfinite","j","why","compan","gallery","hadamard","hankel",
"hilb","invhilb","magic","pascal","rosser","toeplitz","vander",
"wilkinson"]
tokens = {
'root': [
# line starting with '!' is sent as a system command. not sure what
# label to use...
(r'^!.*', String.Other),
(r'%.*$', Comment),
(r'^\s*function', Keyword, 'deffunc'),
# from 'iskeyword' on version 7.4.0.336 (R2007a):
(r'(break|case|catch|classdef|continue|else|elseif|end|for|function|'
r'global|if|otherwise|parfor|persistent|return|switch|try|while)\b',
Keyword),
("(" + "|".join(elfun+specfun+elmat) + r')\b', Name.Builtin),
# operators:
(r'-|==|~=|<|>|<=|>=|&&|&|~|\|\|?', Operator),
# operators requiring escape for re:
(r'\.\*|\*|\+|\.\^|\.\\|\.\/|\/|\\', Operator),
# punctuation:
(r'\[|\]|\(|\)|\{|\}|:|@|\.|,', Punctuation),
(r'=|:|;', Punctuation),
# quote can be transpose, instead of string:
# (not great, but handles common cases...)
(r'(?<=[\w\)\]])\'', Operator),
(r'(?<![\w\)\]])\'', String, 'string'),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
(r'.', Text),
],
'string': [
(r'[^\']*\'', String, '#pop')
],
'deffunc': [
(r'(\s*)(?:(.+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
bygroups(Text.Whitespace, Text, Text.Whitespace, Punctuation,
Text.Whitespace, Name.Function, Punctuation, Text,
Punctuation, Text.Whitespace), '#pop'),
],
}
def analyse_text(text):
if re.match('^\s*%', text, re.M): # comment
return 0.9
elif re.match('^!\w+', text, re.M): # system cmd
return 0.9
return 0.1
line_re = re.compile('.*?\n')
class MatlabSessionLexer(Lexer):
"""
For Matlab (or GNU Octave) sessions. Modeled after PythonConsoleLexer.
Contributed by Ken Schutte <kschutte@csail.mit.edu>.
*New in Pygments 0.10.*
"""
name = 'Matlab session'
aliases = ['matlabsession']
def get_tokens_unprocessed(self, text):
mlexer = MatlabLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
if line.startswith('>>'):
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:3])]))
curcode += line[3:]
elif line.startswith('???'):
idx = len(curcode)
# without is showing error on same line as before...?
line = "\n" + line
token = (0, Generic.Traceback, line)
insertions.append( (idx, [token,]) )
else:
if curcode:
for item in do_insertions(
insertions, mlexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
yield match.start(), Generic.Output, line
if curcode: # or item:
for item in do_insertions(
insertions, mlexer.get_tokens_unprocessed(curcode)):
yield item
class NumPyLexer(PythonLexer):
'''
A Python lexer recognizing Numerical Python builtins.
*New in Pygments 0.10.*
'''
name = 'NumPy'
aliases = ['numpy']
# override the mimetypes to not inherit them from python
mimetypes = []
filenames = []
EXTRA_KEYWORDS = set([
'abs', 'absolute', 'accumulate', 'add', 'alen', 'all', 'allclose',
'alltrue', 'alterdot', 'amax', 'amin', 'angle', 'any', 'append',
'apply_along_axis', 'apply_over_axes', 'arange', 'arccos', 'arccosh',
'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh', 'argmax', 'argmin',
'argsort', 'argwhere', 'around', 'array', 'array2string', 'array_equal',
'array_equiv', 'array_repr', 'array_split', 'array_str', 'arrayrange',
'asanyarray', 'asarray', 'asarray_chkfinite', 'ascontiguousarray',
'asfarray', 'asfortranarray', 'asmatrix', 'asscalar', 'astype',
'atleast_1d', 'atleast_2d', 'atleast_3d', 'average', 'bartlett',
'base_repr', 'beta', 'binary_repr', 'bincount', 'binomial',
'bitwise_and', 'bitwise_not', 'bitwise_or', 'bitwise_xor', 'blackman',
'bmat', 'broadcast', 'byte_bounds', 'bytes', 'byteswap', 'c_',
'can_cast', 'ceil', 'choose', 'clip', 'column_stack', 'common_type',
'compare_chararrays', 'compress', 'concatenate', 'conj', 'conjugate',
'convolve', 'copy', 'corrcoef', 'correlate', 'cos', 'cosh', 'cov',
'cross', 'cumprod', 'cumproduct', 'cumsum', 'delete', 'deprecate',
'diag', 'diagflat', 'diagonal', 'diff', 'digitize', 'disp', 'divide',
'dot', 'dsplit', 'dstack', 'dtype', 'dump', 'dumps', 'ediff1d', 'empty',
'empty_like', 'equal', 'exp', 'expand_dims', 'expm1', 'extract', 'eye',
'fabs', 'fastCopyAndTranspose', 'fft', 'fftfreq', 'fftshift', 'fill',
'finfo', 'fix', 'flat', 'flatnonzero', 'flatten', 'fliplr', 'flipud',
'floor', 'floor_divide', 'fmod', 'frexp', 'fromarrays', 'frombuffer',
'fromfile', 'fromfunction', 'fromiter', 'frompyfunc', 'fromstring',
'generic', 'get_array_wrap', 'get_include', 'get_numarray_include',
'get_numpy_include', 'get_printoptions', 'getbuffer', 'getbufsize',
'geterr', 'geterrcall', 'geterrobj', 'getfield', 'gradient', 'greater',
'greater_equal', 'gumbel', 'hamming', 'hanning', 'histogram',
'histogram2d', 'histogramdd', 'hsplit', 'hstack', 'hypot', 'i0',
'identity', 'ifft', 'imag', 'index_exp', 'indices', 'inf', 'info',
'inner', 'insert', 'int_asbuffer', 'interp', 'intersect1d',
'intersect1d_nu', 'inv', 'invert', 'iscomplex', 'iscomplexobj',
'isfinite', 'isfortran', 'isinf', 'isnan', 'isneginf', 'isposinf',
'isreal', 'isrealobj', 'isscalar', 'issctype', 'issubclass_',
'issubdtype', 'issubsctype', 'item', 'itemset', 'iterable', 'ix_',
'kaiser', 'kron', 'ldexp', 'left_shift', 'less', 'less_equal', 'lexsort',
'linspace', 'load', 'loads', 'loadtxt', 'log', 'log10', 'log1p', 'log2',
'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'logspace',
'lstsq', 'mat', 'matrix', 'max', 'maximum', 'maximum_sctype',
'may_share_memory', 'mean', 'median', 'meshgrid', 'mgrid', 'min',
'minimum', 'mintypecode', 'mod', 'modf', 'msort', 'multiply', 'nan',
'nan_to_num', 'nanargmax', 'nanargmin', 'nanmax', 'nanmin', 'nansum',
'ndenumerate', 'ndim', 'ndindex', 'negative', 'newaxis', 'newbuffer',
'newbyteorder', 'nonzero', 'not_equal', 'obj2sctype', 'ogrid', 'ones',
'ones_like', 'outer', 'permutation', 'piecewise', 'pinv', 'pkgload',
'place', 'poisson', 'poly', 'poly1d', 'polyadd', 'polyder', 'polydiv',
'polyfit', 'polyint', 'polymul', 'polysub', 'polyval', 'power', 'prod',
'product', 'ptp', 'put', 'putmask', 'r_', 'randint', 'random_integers',
'random_sample', 'ranf', 'rank', 'ravel', 'real', 'real_if_close',
'recarray', 'reciprocal', 'reduce', 'remainder', 'repeat', 'require',
'reshape', 'resize', 'restoredot', 'right_shift', 'rint', 'roll',
'rollaxis', 'roots', 'rot90', 'round', 'round_', 'row_stack', 's_',
'sample', 'savetxt', 'sctype2char', 'searchsorted', 'seed', 'select',
'set_numeric_ops', 'set_printoptions', 'set_string_function',
'setbufsize', 'setdiff1d', 'seterr', 'seterrcall', 'seterrobj',
'setfield', 'setflags', 'setmember1d', 'setxor1d', 'shape',
'show_config', 'shuffle', 'sign', 'signbit', 'sin', 'sinc', 'sinh',
'size', 'slice', 'solve', 'sometrue', 'sort', 'sort_complex', 'source',
'split', 'sqrt', 'square', 'squeeze', 'standard_normal', 'std',
'subtract', 'sum', 'svd', 'swapaxes', 'take', 'tan', 'tanh', 'tensordot',
'test', 'tile', 'tofile', 'tolist', 'tostring', 'trace', 'transpose',
'trapz', 'tri', 'tril', 'trim_zeros', 'triu', 'true_divide', 'typeDict',
'typename', 'uniform', 'union1d', 'unique', 'unique1d', 'unravel_index',
'unwrap', 'vander', 'var', 'vdot', 'vectorize', 'view', 'vonmises',
'vsplit', 'vstack', 'weibull', 'where', 'who', 'zeros', 'zeros_like'
])
def get_tokens_unprocessed(self, text):
for index, token, value in \
PythonLexer.get_tokens_unprocessed(self, text):
if token is Name and value in self.EXTRA_KEYWORDS:
yield index, Keyword.Pseudo, value
else:
yield index, token, value
class SLexer(RegexLexer):
"""
For S, S-plus, and R source code.
*New in Pygments 0.10.*
"""
name = 'S'
aliases = ['splus', 's', 'r']
filenames = ['*.S', '*.R']
mimetypes = ['text/S-plus', 'text/S', 'text/R']
tokens = {
'comments': [
(r'#.*$', Comment.Single),
],
'valid_name': [
(r'[a-zA-Z][0-9a-zA-Z\._]+', Text),
(r'`.+`', String.Backtick),
],
'punctuation': [
(r'\[|\]|\[\[|\]\]|\$|\(|\)|@|:::?|;|,', Punctuation),
],
'keywords': [
(r'for(?=\s*\()|while(?=\s*\()|if(?=\s*\()|(?<=\s)else|'
r'(?<=\s)break(?=;|$)|return(?=\s*\()|function(?=\s*\()',
Keyword.Reserved)
],
'operators': [
(r'<-|-|==|<=|>=|<|>|&&|&|!=|\|\|?', Operator),
(r'\*|\+|\^|/|%%|%/%|=', Operator),
(r'%in%|%*%', Operator)
],
'builtin_symbols': [
(r'(NULL|NA|TRUE|FALSE|NaN)\b', Keyword.Constant),
(r'(T|F)\b', Keyword.Variable),
],
'numbers': [
(r'(?<![0-9a-zA-Z\)\}\]`\"])(?=\s*)[-\+]?[0-9]+'
r'(\.[0-9]*)?(E[0-9][-\+]?(\.[0-9]*)?)?', Number),
(r'\.[0-9]*(E[0-9][-\+]?(\.[0-9]*)?)?', Number),
],
'statements': [
include('comments'),
# whitespaces
(r'\s+', Text),
(r'\'', String, 'string_squote'),
(r'\"', String, 'string_dquote'),
include('builtin_symbols'),
include('numbers'),
include('keywords'),
include('punctuation'),
include('operators'),
include('valid_name'),
],
'root': [
include('statements'),
# blocks:
(r'\{|\}', Punctuation),
#(r'\{', Punctuation, 'block'),
(r'.', Text),
],
#'block': [
# include('statements'),
# ('\{', Punctuation, '#push'),
# ('\}', Punctuation, '#pop')
#],
'string_squote': [
(r'[^\']*\'', String, '#pop'),
],
'string_dquote': [
(r'[^\"]*\"', String, '#pop'),
],
}
def analyse_text(text):
return '<-' in text
| tools/yuidoc/bin/pygments/lexers/math.py | 16,831 | For Matlab (or GNU Octave) source code.
Contributed by Ken Schutte <kschutte@csail.mit.edu>.
*New in Pygments 0.10.*
For Matlab (or GNU Octave) sessions. Modeled after PythonConsoleLexer.
Contributed by Ken Schutte <kschutte@csail.mit.edu>.
*New in Pygments 0.10.*
A `MuPAD <http://www.mupad.com>`_ lexer.
Contributed by Christopher Creutzig <christopher@creutzig.de>.
*New in Pygments 0.8.*
A Python lexer recognizing Numerical Python builtins.
*New in Pygments 0.10.*
For S, S-plus, and R source code.
*New in Pygments 0.10.*
pygments.lexers.math
~~~~~~~~~~~~~~~~~~~~
Lexers for math languages.
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
-*- coding: utf-8 -*-(r'\b(?:adt|linalg|newDomain|hold)\b', Name.Builtin), These lists are generated automatically. Run the following in bash shell: for f in elfun specfun elmat; do echo -n "$f = " matlab -nojvm -r "help $f;exit;" | perl -ne \ 'push(@c,$1) if /^ (\w+)\s+-/; END {print q{["}.join(q{","},@c).qq{"]\n};}' done elfun: Elementary math functions specfun: Special Math functions elmat: Elementary matrices and matrix manipulation taken from Matlab version 7.4.0.336 (R2007a) line starting with '!' is sent as a system command. not sure what label to use... from 'iskeyword' on version 7.4.0.336 (R2007a): operators: operators requiring escape for re: punctuation: quote can be transpose, instead of string: (not great, but handles common cases...) comment system cmd without is showing error on same line as before...? or item: override the mimetypes to not inherit them from python whitespaces blocks:(r'\{', Punctuation, 'block'),'block': [ include('statements'), ('\{', Punctuation, 'push'), ('\}', Punctuation, 'pop')], | 1,767 | en | 0.701211 |
# -*- coding: utf-8 -*-
"""
Indices library
===============
This module describes climate indicator functions. Functions are listed in alphabetical order and describe the raw
computation performed over xarray.DataArrays. DataArrays should carry unit information to allow for any needed
unit conversions. The output's attributes (CF-Convention) are not modified. Validation checks and output attributes
are handled by indicator classes described in files named by the physical variable (temperature, precip, streamflow).
Notes for docstring
-------------------
The docstrings adhere to the `NumPy`_ style convention and is meant as a way to store CF-Convention metadata as
well as information relevant to third party libraries (such as a WPS server).
The first line of the docstring (the short summary), will be assigned to the output's `long_name` attribute. The
`long_name` attribute is defined by the NetCDF User Guide to contain a long descriptive name which may, for example,
be used for labeling plots
The second paragraph will be considered as the "*abstract*", or the CF global "*comment*" (miscellaneous information
about the data or methods used to produce it).
The third and fourth sections are the **Parameters** and **Returns** sections describing the input and output values
respectively.
.. code-block:: python
Parameters
----------
<standard_name> : xarray.DataArray
<Long_name> of variable [acceptable units].
threshold : string
Description of the threshold / units.
e.g. The 10th percentile of historical temperature [K].
freq : str, optional
Resampling frequency.
Returns
-------
xarray.DataArray
Output's <long_name> [units]
The next sections would be **Notes** and **References**:
.. code-block:: python
Notes
-----
This is where the mathematical equation is described.
At the end of the description, convention suggests
to add a reference [example]_:
.. math::
3987^12 + 4365^12 = 4472^12
References
----------
.. [example] Smith, T.J. and Huard, D. (2018). "CF Docstrings:
A manifesto on conventions and the metaphysical nature
of ontological python documentation." Climate Aesthetics,
vol. 1, pp. 121-155.
Indice descriptions
===================
.. _`NumPy`: https://numpydoc.readthedocs.io/en/latest/format.html#docstring-standard
"""
from ._simple import *
from ._threshold import *
from ._multivariate import *
# TODO: Define a unit conversion system for temperature [K, C, F] and precipitation [mm h-1, Kg m-2 s-1] metrics
# TODO: Move utility functions to another file.
# TODO: Should we reference the standard vocabulary we're using ?
# E.g. http://vocab.nerc.ac.uk/collection/P07/current/BHMHISG2/
| xclim/indices/__init__.py | 2,772 | Indices library
===============
This module describes climate indicator functions. Functions are listed in alphabetical order and describe the raw
computation performed over xarray.DataArrays. DataArrays should carry unit information to allow for any needed
unit conversions. The output's attributes (CF-Convention) are not modified. Validation checks and output attributes
are handled by indicator classes described in files named by the physical variable (temperature, precip, streamflow).
Notes for docstring
-------------------
The docstrings adhere to the `NumPy`_ style convention and is meant as a way to store CF-Convention metadata as
well as information relevant to third party libraries (such as a WPS server).
The first line of the docstring (the short summary), will be assigned to the output's `long_name` attribute. The
`long_name` attribute is defined by the NetCDF User Guide to contain a long descriptive name which may, for example,
be used for labeling plots
The second paragraph will be considered as the "*abstract*", or the CF global "*comment*" (miscellaneous information
about the data or methods used to produce it).
The third and fourth sections are the **Parameters** and **Returns** sections describing the input and output values
respectively.
.. code-block:: python
Parameters
----------
<standard_name> : xarray.DataArray
<Long_name> of variable [acceptable units].
threshold : string
Description of the threshold / units.
e.g. The 10th percentile of historical temperature [K].
freq : str, optional
Resampling frequency.
Returns
-------
xarray.DataArray
Output's <long_name> [units]
The next sections would be **Notes** and **References**:
.. code-block:: python
Notes
-----
This is where the mathematical equation is described.
At the end of the description, convention suggests
to add a reference [example]_:
.. math::
3987^12 + 4365^12 = 4472^12
References
----------
.. [example] Smith, T.J. and Huard, D. (2018). "CF Docstrings:
A manifesto on conventions and the metaphysical nature
of ontological python documentation." Climate Aesthetics,
vol. 1, pp. 121-155.
Indice descriptions
===================
.. _`NumPy`: https://numpydoc.readthedocs.io/en/latest/format.html#docstring-standard
-*- coding: utf-8 -*- TODO: Define a unit conversion system for temperature [K, C, F] and precipitation [mm h-1, Kg m-2 s-1] metrics TODO: Move utility functions to another file. TODO: Should we reference the standard vocabulary we're using ? E.g. http://vocab.nerc.ac.uk/collection/P07/current/BHMHISG2/ | 2,674 | en | 0.722771 |
import asyncio
import logging
from typing import List, Optional, Set, Tuple
import aiosqlite
from blspy import G1Element
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.util.db_wrapper import DBWrapper
from chia.util.ints import uint32
from chia.wallet.derivation_record import DerivationRecord
from chia.wallet.util.wallet_types import WalletType
log = logging.getLogger(__name__)
class WalletPuzzleStore:
"""
WalletPuzzleStore keeps track of all generated puzzle_hashes and their derivation path / wallet.
"""
db_connection: aiosqlite.Connection
lock: asyncio.Lock
cache_size: uint32
all_puzzle_hashes: Set[bytes32]
db_wrapper: DBWrapper
@classmethod
async def create(cls, db_wrapper: DBWrapper, cache_size: uint32 = uint32(600000)):
self = cls()
self.cache_size = cache_size
self.db_wrapper = db_wrapper
self.db_connection = self.db_wrapper.db
await self.db_connection.execute("pragma journal_mode=wal")
await self.db_connection.execute("pragma synchronous=2")
await self.db_connection.execute(
(
"CREATE TABLE IF NOT EXISTS derivation_paths("
"derivation_index int,"
" pubkey text,"
" puzzle_hash text PRIMARY_KEY,"
" wallet_type int,"
" wallet_id int,"
" used tinyint)"
)
)
await self.db_connection.execute(
"CREATE INDEX IF NOT EXISTS derivation_index_index on derivation_paths(derivation_index)"
)
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS ph on derivation_paths(puzzle_hash)")
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS pubkey on derivation_paths(pubkey)")
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS wallet_type on derivation_paths(wallet_type)")
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS wallet_id on derivation_paths(wallet_id)")
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS used on derivation_paths(wallet_type)")
await self.db_connection.commit()
# Lock
self.lock = asyncio.Lock() # external
await self._init_cache()
return self
async def close(self):
await self.db_connection.close()
async def _init_cache(self):
self.all_puzzle_hashes = await self.get_all_puzzle_hashes()
async def _clear_database(self):
cursor = await self.db_connection.execute("DELETE FROM derivation_paths")
await cursor.close()
await self.db_connection.commit()
async def add_derivation_paths(self, records: List[DerivationRecord]) -> None:
"""
Insert many derivation paths into the database.
"""
async with self.db_wrapper.lock:
sql_records = []
for record in records:
self.all_puzzle_hashes.add(record.puzzle_hash)
sql_records.append(
(
record.index,
bytes(record.pubkey).hex(),
record.puzzle_hash.hex(),
record.wallet_type,
record.wallet_id,
0,
),
)
cursor = await self.db_connection.executemany(
"INSERT OR REPLACE INTO derivation_paths VALUES(?, ?, ?, ?, ?, ?)",
sql_records,
)
await cursor.close()
await self.db_connection.commit()
async def get_derivation_record(self, index: uint32, wallet_id: uint32) -> Optional[DerivationRecord]:
"""
Returns the derivation record by index and wallet id.
"""
cursor = await self.db_connection.execute(
"SELECT * FROM derivation_paths WHERE derivation_index=? and wallet_id=?;",
(
index,
wallet_id,
),
)
row = await cursor.fetchone()
await cursor.close()
if row is not None and row[0] is not None:
return DerivationRecord(
uint32(row[0]),
bytes32.fromhex(row[2]),
G1Element.from_bytes(bytes.fromhex(row[1])),
WalletType(row[3]),
uint32(row[4]),
)
return None
async def get_derivation_record_for_puzzle_hash(self, puzzle_hash: str) -> Optional[DerivationRecord]:
"""
Returns the derivation record by index and wallet id.
"""
cursor = await self.db_connection.execute(
"SELECT * FROM derivation_paths WHERE puzzle_hash=?;",
(puzzle_hash,),
)
row = await cursor.fetchone()
await cursor.close()
if row is not None and row[0] is not None:
return DerivationRecord(
uint32(row[0]),
bytes32.fromhex(row[2]),
G1Element.from_bytes(bytes.fromhex(row[1])),
WalletType(row[3]),
uint32(row[4]),
)
return None
async def set_used_up_to(self, index: uint32, in_transaction=False) -> None:
"""
Sets a derivation path to used so we don't use it again.
"""
if not in_transaction:
await self.db_wrapper.lock.acquire()
try:
cursor = await self.db_connection.execute(
"UPDATE derivation_paths SET used=1 WHERE derivation_index<=?",
(index,),
)
await cursor.close()
finally:
if not in_transaction:
await self.db_connection.commit()
self.db_wrapper.lock.release()
async def puzzle_hash_exists(self, puzzle_hash: bytes32) -> bool:
"""
Checks if passed puzzle_hash is present in the db.
"""
cursor = await self.db_connection.execute(
"SELECT * from derivation_paths WHERE puzzle_hash=?", (puzzle_hash.hex(),)
)
row = await cursor.fetchone()
await cursor.close()
return row is not None
async def one_of_puzzle_hashes_exists(self, puzzle_hashes: List[bytes32]) -> bool:
"""
Checks if one of the passed puzzle_hashes is present in the db.
"""
if len(puzzle_hashes) < 1:
return False
for ph in puzzle_hashes:
if ph in self.all_puzzle_hashes:
return True
return False
async def index_for_pubkey(self, pubkey: G1Element) -> Optional[uint32]:
"""
Returns derivation paths for the given pubkey.
Returns None if not present.
"""
cursor = await self.db_connection.execute(
"SELECT * from derivation_paths WHERE pubkey=?", (bytes(pubkey).hex(),)
)
row = await cursor.fetchone()
await cursor.close()
if row is not None:
return uint32(row[0])
return None
async def index_for_puzzle_hash(self, puzzle_hash: bytes32) -> Optional[uint32]:
"""
Returns the derivation path for the puzzle_hash.
Returns None if not present.
"""
cursor = await self.db_connection.execute(
"SELECT * from derivation_paths WHERE puzzle_hash=?", (puzzle_hash.hex(),)
)
row = await cursor.fetchone()
await cursor.close()
if row is not None:
return uint32(row[0])
return None
async def index_for_puzzle_hash_and_wallet(self, puzzle_hash: bytes32, wallet_id: uint32) -> Optional[uint32]:
"""
Returns the derivation path for the puzzle_hash.
Returns None if not present.
"""
cursor = await self.db_connection.execute(
"SELECT * from derivation_paths WHERE puzzle_hash=? and wallet_id=?;",
(
puzzle_hash.hex(),
wallet_id,
),
)
row = await cursor.fetchone()
await cursor.close()
if row is not None:
return uint32(row[0])
return None
async def wallet_info_for_puzzle_hash(self, puzzle_hash: bytes32) -> Optional[Tuple[uint32, WalletType]]:
"""
Returns the derivation path for the puzzle_hash.
Returns None if not present.
"""
cursor = await self.db_connection.execute(
"SELECT * from derivation_paths WHERE puzzle_hash=?", (puzzle_hash.hex(),)
)
row = await cursor.fetchone()
await cursor.close()
if row is not None:
return row[4], WalletType(row[3])
return None
async def get_all_puzzle_hashes(self) -> Set[bytes32]:
"""
Return a set containing all puzzle_hashes we generated.
"""
cursor = await self.db_connection.execute("SELECT * from derivation_paths")
rows = await cursor.fetchall()
await cursor.close()
result: Set[bytes32] = set()
for row in rows:
result.add(bytes32(bytes.fromhex(row[2])))
return result
async def get_last_derivation_path(self) -> Optional[uint32]:
"""
Returns the last derivation path by derivation_index.
"""
cursor = await self.db_connection.execute("SELECT MAX(derivation_index) FROM derivation_paths;")
row = await cursor.fetchone()
await cursor.close()
if row is not None and row[0] is not None:
return uint32(row[0])
return None
async def get_last_derivation_path_for_wallet(self, wallet_id: int) -> Optional[uint32]:
"""
Returns the last derivation path by derivation_index.
"""
cursor = await self.db_connection.execute(
f"SELECT MAX(derivation_index) FROM derivation_paths WHERE wallet_id={wallet_id};"
)
row = await cursor.fetchone()
await cursor.close()
if row is not None and row[0] is not None:
return uint32(row[0])
return None
async def get_current_derivation_record_for_wallet(self, wallet_id: uint32) -> Optional[DerivationRecord]:
"""
Returns the current derivation record by derivation_index.
"""
cursor = await self.db_connection.execute(
f"SELECT MAX(derivation_index) FROM derivation_paths WHERE wallet_id={wallet_id} and used=1;"
)
row = await cursor.fetchone()
await cursor.close()
if row is not None and row[0] is not None:
index = uint32(row[0])
return await self.get_derivation_record(index, wallet_id)
return None
async def get_unused_derivation_path(self) -> Optional[uint32]:
"""
Returns the first unused derivation path by derivation_index.
"""
cursor = await self.db_connection.execute("SELECT MIN(derivation_index) FROM derivation_paths WHERE used=0;")
row = await cursor.fetchone()
await cursor.close()
if row is not None and row[0] is not None:
return uint32(row[0])
return None
| chia/wallet/wallet_puzzle_store.py | 11,198 | WalletPuzzleStore keeps track of all generated puzzle_hashes and their derivation path / wallet.
Lock external | 112 | en | 0.865409 |
# Copyright (C) 2019 by Landmark Acoustics LLC
r"""A class to write a WAV-formatted file."""
import wave
class WaveFile:
'''A wrapper for `Wave_write` from Python STL's `wave` module.
Parameters
----------
name : str
The name to save the file as. It should include path and extension.
sample_rate : int
The number of samples per second that the file will use.
bit_rate : int
The number of bits the file will use per sample.
channels : int
The number of channels that the file has.
See Also
--------
wave : the Python STL module
'''
def __init__(self,
name: str,
sample_rate: int,
bit_rate: int,
channels: int) -> None:
self._channels = channels
self._sample_rate = sample_rate
self._byte_rate = bit_rate // 8
self._filehandle = wave.open(name, 'wb')
self._filehandle.setnchannels(self.channels)
self._filehandle.setsampwidth(self.byte_rate)
self._filehandle.setframerate(self.sample_rate)
@property
def channels(self) -> int:
'''The number of channels the file has.'''
return self._channels
@property
def sample_rate(self) -> int:
'''The number of samples per second.'''
return self._sample_rate
@property
def byte_rate(self) -> int:
'''The number of bytes per sample.'''
return self._byte_rate
@property
def bit_rate(self) -> int:
'''The number of bits per sample.'''
return self.byte_rate * 8
def write_frames(self, data) -> int:
'''Add some data to the file.
Parameters
----------
data : bytes-like object
The user must ensure that the data's format matches the file's!
Returns
-------
int : the number of frames written
'''
pos = self._filehandle.tell()
self._filehandle.writeframes(data)
return self._filehandle.tell() - pos
@property
def frame_size(self) -> int:
'''The number of bytes per frame.'''
return self.byte_rate * self.channels
def __enter__(self):
self._filehandle.__enter__()
return self
def __exit__(self, *args, **kwargs):
return self._filehandle.__exit__(*args, **kwargs)
if __name__ == '__main__':
import array
import sys
wvf = WaveFile(sys.argv[1], 44100, 28, 3)
a = array.array('b')
a.extend([0 for i in range(12000 * wvf.frame_size)])
N = wvf.write_frames(a)
print(f'Wrote {N} frames in {wvf.channels} {wvf.bit_rate}-bit channels.')
| lacaudiofiles/wave/wavefile.py | 2,671 | A wrapper for `Wave_write` from Python STL's `wave` module.
Parameters
----------
name : str
The name to save the file as. It should include path and extension.
sample_rate : int
The number of samples per second that the file will use.
bit_rate : int
The number of bits the file will use per sample.
channels : int
The number of channels that the file has.
See Also
--------
wave : the Python STL module
The number of bits per sample.
The number of bytes per sample.
The number of channels the file has.
The number of bytes per frame.
The number of samples per second.
Add some data to the file.
Parameters
----------
data : bytes-like object
The user must ensure that the data's format matches the file's!
Returns
-------
int : the number of frames written
A class to write a WAV-formatted file.
Copyright (C) 2019 by Landmark Acoustics LLC | 870 | en | 0.692383 |
# Generated by Django 3.1.2 on 2020-10-08 05:13
from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('notes', '0003_auto_20201006_0607'),
]
operations = [
migrations.AlterUniqueTogether(
name='publicsharednote',
unique_together={('user', 'note')},
),
]
| simple_notes/notes/migrations/0004_auto_20201008_0513.py | 459 | Generated by Django 3.1.2 on 2020-10-08 05:13 | 45 | en | 0.743927 |
"""
Deployment helpers
==================
"""
import os
import logging
from ..definitions import ROOT_DIR
from .docker import Docker
from .ecr import ECR
from .s3 import S3
from .sagemaker import Sagemaker
logger = logging.getLogger(__name__)
def build(run, project, model_type):
docker = Docker()
docker_path = os.path.join(ROOT_DIR, 'sagemaker', model_type)
image_name = get_image_name(run, project)
docker.build(docker_path, image_name)
def push(run, project, model_type):
docker = Docker()
s3 = S3()
image_name = get_image_name(run, project)
docker.push(image_name)
s3.upload_model(run, image_name, model_type=model_type)
def build_and_push(run, project, model_type):
build(run, project, model_type)
push(run, project, model_type)
def run_local(run, project, model_type):
# build image
build(run, project, model_type)
# run it
docker = Docker()
image_name = get_image_name(run, project)
docker.run(image_name, run, model_type)
def create_model_and_configuration(run, project, question_tag, model_type, instance_type):
# init helpers
ecr = ECR()
s3 = S3()
sm = Sagemaker()
# build deploy arguments
image_name = get_image_name(run, project)
ecr_image_name = ecr.get_ecr_image_name(image_name)
s3_model_path = s3.get_model_s3_path(image_name)
tags = [{'Key': 'project_name', 'Value': project},
{'Key': 'question_tag', 'Value': question_tag},
{'Key': 'run_name', 'Value': run},
{'Key': 'model_type', 'Value': model_type}]
# create model and endpoint configuration
sm.create_model_and_configuration(ecr_image_name, s3_model_path, tags=tags, instance_type=instance_type)
def deploy(run, project, question_tag, model_type, instance_type):
# initialize stuff
# build image and push to ECR
build_and_push(run, project, model_type)
# create model and endpoint configuration
create_model_and_configuration(run, project, question_tag, model_type, instance_type)
def get_image_name(run, project):
return f'crowdbreaks_{project}_{run}'
| txcl/utils/deploy_helpers.py | 2,108 | Deployment helpers
==================
build image run it init helpers build deploy arguments create model and endpoint configuration initialize stuff build image and push to ECR create model and endpoint configuration | 219 | en | 0.603858 |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'goodshare.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| manage.py | 665 | Run administrative tasks.
Django's command-line utility for administrative tasks.
!/usr/bin/env python | 103 | en | 0.725633 |
import json
import os
import httpx
import time
def get_cities(cfg):
return cfg['cities'].keys()
def get_usable_bounding_boxes(nominal_boxes, cfg):
FLICKR_PUBLIC = get_secret('flickr_api_key')
FLICKR_SECRET = get_secret('flickr_api_secret')
flickr = FlickrAPI(FLICKR_PUBLIC, FLICKR_SECRET, format='parsed-json')
boxes = []
working = nominal_boxes.copy()
license = "1,2,3,4,5,6,7,8,9,10"
extras ='description,license,date_upload,date_taken,original_format,'
extras+='last_update,geo,tags, machine_tags, o_dims, media,'
extras+='url_m,url_n,url_z,url_c,url_l,url_o'
city_total=0
# print(' area_km2 count type bounding_box')
while len(working) > 0:
box = working.pop()
temp = list(map(str, box))
str_box = ",".join(temp)
box_area = est_area(box)
divide_flag = False
if box_area > cfg["max_area"]:
total_imgs = -1
divide_flag = True
else:
time.sleep(cfg["time_delay"])
try:
box_pics = flickr.photos.search(
privacy_filter=PRIVACY_FILTER, bbox=str_box,
content_type=CONTENT_TYPE,
has_geo=HAS_GEO, geo_context=GEO_CTX,
license=license, extras=extras, per_page=cfg["page_size"])
total_imgs = int(box_pics['photos']['total'])
divide_flag = (total_imgs >= cfg["density_limit"] and box_area > cfg["min_area"])
except FlickrError as err:
print(f'Error retrieving intitial page for bounding box {bbox}')
print(f'{err}')
# print('%10.4f %5i %s %s' % (box_area/1.E6, total_imgs, 'branch'
# if divide_flag else 'leaf ', box))
if divide_flag:
new_box_1 = box.copy()
new_box_2 = box.copy()
if box[2] - box[0] > box[3] - box[1]: #wide
border = (box[0] + box[2])/2
new_box_1[2] = border
new_box_2[0] = border
else: #tall
border = (box[1] + box[3])/2
new_box_1[3] = border
new_box_2[1] = border
working.append(new_box_1)
working.append(new_box_2)
elif total_imgs == 0:
continue
else:
city_total += total_imgs
boxes.append(box)
print(city_total)
return boxes
def read_metadata(file_root, cities, url_field):
metadata = {}
urls = {}
# for key in cfg['cities']:
# city=key.replace(" ", "_")
for city in cities:
urls[city]=set()
file_path=f'{file_root}/{city}/metadata.json'
if os.path.exists(file_path):
with open(file_path, 'r') as f:
loaded = json.load(f)
for img in loaded['images']:
if url_field in img and not img[url_field] in urls:
urls[city].add(img[url_field])
metadata[city]= loaded
return metadata, urls
def get_known_urls(file_root, cities):
urls = {}
for key in cities:
city=key.replace(" ", "_")
file_path=f'{file_root}/{city}/urls.txt'
city_urls=set()
if os.path.exists(file_path):
with open(file_path, 'r') as f:
lines = f.readlines()
for line in lines:
city_urls.add(line.strip())
urls[key] = city_urls
return urls
def write_urls(urls, cfg):
for key in cfg['cities']:
city=key.replace(" ", "_")
directory=os.path.join('/data', city)
if not os.path.exists(directory):
os.mkdir(directory)
file_path=os.path.join(directory, 'urls')
if cfg['cities'][key]['download'] != 'photos':
print(f"printing {len(urls[city])} urls for city {city} at {file_path}")
try:
with open(file_path, 'w') as f:
for url in urls[city]:
f.write(f'{url}\n')
f.flush()
f.close()
except Exception as err:
print(f"error: {err} opening file {file_path}")
def get_metadata(cfg, file_root):
metadata = None
cities = get_cities(cfg)
url_field = cfg['url_field']
urls = get_known_urls(file_root, cities)
metadata, urls = read_metadata(file_root, cities, url_field)
if cfg['refresh_metadata']:
print('fetching metadata')
metadata,urls = fetch_metadata(cfg, metadata, urls)
print('writing metadata')
write_metadata(metadata, cfg, file_root)
print('writing url list')
write_urls(urls, cfg)
return metadata
def fetch_metadata(cfg, metadata, urls):
FLICKR_PUBLIC = get_secret('flickr_api_key')
FLICKR_SECRET = get_secret('flickr_api_secret')
flickr = FlickrAPI(FLICKR_PUBLIC, FLICKR_SECRET, format='parsed-json')
license = "1,2,3,4,5,6,7,8,9,10"
extras ='description,license,date_upload,date_taken,original_format,'
extras+='last_update,geo,tags, machine_tags, o_dims, media,'
extras+='url_m,url_n,url_z,url_c,url_l,url_o'
inserted_ids=[]
for key in cfg['cities']:
count=0
dl_limit = cfg['cities'][key]['download_limit']
if dl_limit != -1 and dl_limit > 1000:
boxes = get_usable_bounding_boxes(list(cfg['cities'][key]['bounding_boxes']), cfg)
else:
boxes = list(cfg['cities'][key]['bounding_boxes'])
city_urls = urls[key]
if not key in metadata:
metadata[key]={}
metadata[key]['image_count'] = 0
metadata[key]['images'] = []
total = 0
for bbox in tqdm(boxes, desc=key):
temp = list(map(str, bbox))
bbox_str = ",".join(temp)
time.sleep(cfg["time_delay"])
total_pages=0
try:
city_pics = flickr.photos.search(
privacy_filter=PRIVACY_FILTER, bbox=bbox_str,
content_type=CONTENT_TYPE,
has_geo=HAS_GEO, geo_context=GEO_CTX,
license=license, extras=extras, per_page=cfg["page_size"])
total_pages = city_pics['photos']['pages']
total += int(city_pics['photos']['total'])
except FlickrError as err:
print(f'Error retrieving intitial page for bounding box {bbox}')
print(f'{err}')
for p in range(1, total_pages):
try:
time.sleep(cfg["time_delay"])
city_pics = flickr.photos.search(
privacy_filter=PRIVACY_FILTER, bbox=bbox_str,
content_type=CONTENT_TYPE,
has_geo=HAS_GEO, geo_context=GEO_CTX,
license=license, extras=extras, per_page=cfg["page_size"],
page=p)
for ph in city_pics['photos']['photo']:
# metadata[key]['images'].append(ph)
if dl_limit != -1 and count > dl_limit:
break
if cfg["url_field"] in ph and not ph[cfg["url_field"]] in city_urls:
metadata[key]['images'].append(ph)
city_urls.add(ph[cfg["url_field"]])
metadata[key]['image_count']+=1
count += 1
except FlickrError as err:
print(f'Error retrieving page {p} for bounding box {bbox}')
print(f'{err}')
# metadata[key]['image_count'] = total
# print(f"length of inserted ids for {key}: {len(inserted_ids)}")
# print(f"total for {key}: {len(metadata[key]['images'])}")
return metadata, urls
def write_metadata(metadata, cfg, file_root):
for key in metadata:
city=key.replace(" ", "_")
directory=os.path.join(file_root,city)
if not os.path.exists(directory):
os.mkdir(directory)
file_path=os.path.join(directory,'metadata.json')
dl_flag =cfg['cities'][key]['download']
if cfg['cities'][key]['download'] != 'photos':
with open(file_path, 'w') as f:
json.dump(metadata[key], f, indent=2)
| tools/download/flickr/src/metadata.py | 8,400 | print(' area_km2 count type bounding_box') print('%10.4f %5i %s %s' % (box_area/1.E6, total_imgs, 'branch' if divide_flag else 'leaf ', box))widetall for key in cfg['cities']: city=key.replace(" ", "_") metadata[key]['images'].append(ph) metadata[key]['image_count'] = total print(f"length of inserted ids for {key}: {len(inserted_ids)}") print(f"total for {key}: {len(metadata[key]['images'])}") | 428 | en | 0.337704 |
# -*- coding: utf-8 -*-
#
# ColorHandPose3DNetwork - Network for estimating 3D Hand Pose from a single RGB Image
# Copyright (C) 2017 Christian Zimmermann
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function, unicode_literals
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
import numpy as np
import math
import cv2
class NetworkOps(object):
""" Operations that are frequently used within networks. """
neg_slope_of_relu = 0.01
@classmethod
def leaky_relu(cls, tensor, name='relu'):
out_tensor = tf.maximum(tensor, cls.neg_slope_of_relu*tensor, name=name)
return out_tensor
@classmethod
def conv(cls, in_tensor, layer_name, kernel_size, stride, out_chan, trainable=True):
with tf.variable_scope(layer_name):
in_size = in_tensor.get_shape().as_list()
strides = [1, stride, stride, 1]
kernel_shape = [kernel_size, kernel_size, in_size[3], out_chan]
# conv
kernel = tf.get_variable('weights', kernel_shape, tf.float32,
tf.contrib.layers.xavier_initializer_conv2d(), trainable=trainable, collections=['wd', 'variables', 'filters'])
tmp_result = tf.nn.conv2d(in_tensor, kernel, strides, padding='SAME')
# bias
biases = tf.get_variable('biases', [kernel_shape[3]], tf.float32,
tf.constant_initializer(0.0001), trainable=trainable, collections=['wd', 'variables', 'biases'])
out_tensor = tf.nn.bias_add(tmp_result, biases, name='out')
return out_tensor
@classmethod
def conv_relu(cls, in_tensor, layer_name, kernel_size, stride, out_chan, trainable=True):
tensor = cls.conv(in_tensor, layer_name, kernel_size, stride, out_chan, trainable)
out_tensor = cls.leaky_relu(tensor, name='out')
return out_tensor
@classmethod
def max_pool(cls, bottom, name='pool'):
pooled = tf.nn.max_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='VALID', name=name)
return pooled
@classmethod
def upconv(cls, in_tensor, layer_name, output_shape, kernel_size, stride, trainable=True):
with tf.variable_scope(layer_name):
in_size = in_tensor.get_shape().as_list()
kernel_shape = [kernel_size, kernel_size, in_size[3], in_size[3]]
strides = [1, stride, stride, 1]
# conv
kernel = cls.get_deconv_filter(kernel_shape, trainable)
tmp_result = tf.nn.conv2d_transpose(value=in_tensor, filter=kernel, output_shape=output_shape,
strides=strides, padding='SAME')
# bias
biases = tf.get_variable('biases', [kernel_shape[2]], tf.float32,
tf.constant_initializer(0.0), trainable=trainable, collections=['wd', 'variables', 'biases'])
out_tensor = tf.nn.bias_add(tmp_result, biases)
return out_tensor
@classmethod
def upconv_relu(cls, in_tensor, layer_name, output_shape, kernel_size, stride, trainable=True):
tensor = cls.upconv(in_tensor, layer_name, output_shape, kernel_size, stride, trainable)
out_tensor = cls.leaky_relu(tensor, name='out')
return out_tensor
@staticmethod
def get_deconv_filter(f_shape, trainable):
width = f_shape[0]
height = f_shape[1]
f = math.ceil(width/2.0)
c = (2 * f - 1 - f % 2) / (2.0 * f)
bilinear = np.zeros([f_shape[0], f_shape[1]])
for x in range(width):
for y in range(height):
value = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
bilinear[x, y] = value
weights = np.zeros(f_shape)
for i in range(f_shape[2]):
weights[:, :, i, i] = bilinear
init = tf.constant_initializer(value=weights,
dtype=tf.float32)
return tf.get_variable(name="weights", initializer=init,
shape=weights.shape, trainable=trainable, collections=['wd', 'variables', 'filters'])
@staticmethod
def fully_connected(in_tensor, layer_name, out_chan, trainable=True):
with tf.variable_scope(layer_name):
in_size = in_tensor.get_shape().as_list()
assert len(in_size) == 2, 'Input to a fully connected layer must be a vector.'
weights_shape = [in_size[1], out_chan]
# weight matrix
weights = tf.get_variable('weights', weights_shape, tf.float32,
tf.contrib.layers.xavier_initializer(), trainable=trainable)
weights = tf.check_numerics(weights, 'weights: %s' % layer_name)
# bias
biases = tf.get_variable('biases', [out_chan], tf.float32,
tf.constant_initializer(0.0001), trainable=trainable)
biases = tf.check_numerics(biases, 'biases: %s' % layer_name)
out_tensor = tf.matmul(in_tensor, weights) + biases
return out_tensor
@classmethod
def fully_connected_relu(cls, in_tensor, layer_name, out_chan, trainable=True):
tensor = cls.fully_connected(in_tensor, layer_name, out_chan, trainable)
out_tensor = tf.maximum(tensor, cls.neg_slope_of_relu*tensor, name='out')
return out_tensor
@staticmethod
def dropout(in_tensor, keep_prob, evaluation):
""" Dropout: Each neuron is dropped independently. """
with tf.variable_scope('dropout'):
tensor_shape = in_tensor.get_shape().as_list()
out_tensor = tf.cond(evaluation,
lambda: tf.nn.dropout(in_tensor, 1.0,
noise_shape=tensor_shape),
lambda: tf.nn.dropout(in_tensor, keep_prob,
noise_shape=tensor_shape))
return out_tensor
@staticmethod
def spatial_dropout(in_tensor, keep_prob, evaluation):
""" Spatial dropout: Not each neuron is dropped independently, but feature map wise. """
with tf.variable_scope('spatial_dropout'):
tensor_shape = in_tensor.get_shape().as_list()
out_tensor = tf.cond(evaluation,
lambda: tf.nn.dropout(in_tensor, 1.0,
noise_shape=tensor_shape),
lambda: tf.nn.dropout(in_tensor, keep_prob,
noise_shape=[tensor_shape[0], 1, 1, tensor_shape[3]]))
return out_tensor
def crop_image_from_xy(image, crop_location, crop_size, scale=1.0):
"""
Crops an image. When factor is not given does an central crop.
Inputs:
image: 4D tensor, [batch, height, width, channels] which will be cropped in height and width dimension
crop_location: tensor, [batch, 2] which represent the height and width location of the crop
crop_size: int, describes the extension of the crop
Outputs:
image_crop: 4D tensor, [batch, crop_size, crop_size, channels]
"""
with tf.name_scope('crop_image_from_xy'):
s = image.get_shape().as_list()
assert len(s) == 4, "Image needs to be of shape [batch, width, height, channel]"
scale = tf.reshape(scale, [-1])
crop_location = tf.cast(crop_location, tf.float32)
crop_location = tf.reshape(crop_location, [s[0], 2])
crop_size = tf.cast(crop_size, tf.float32)
crop_size_scaled = crop_size / scale
y1 = crop_location[:, 0] - crop_size_scaled//2
y2 = y1 + crop_size_scaled
x1 = crop_location[:, 1] - crop_size_scaled//2
x2 = x1 + crop_size_scaled
y1 /= s[1]
y2 /= s[1]
x1 /= s[2]
x2 /= s[2]
boxes = tf.stack([y1, x1, y2, x2], -1)
crop_size = tf.cast(tf.stack([crop_size, crop_size]), tf.int32)
box_ind = tf.range(s[0])
image_c = tf.image.crop_and_resize(tf.cast(image, tf.float32), boxes, box_ind, crop_size, name='crop')
return image_c
def find_max_location(scoremap):
""" Returns the coordinates of the given scoremap with maximum value. """
with tf.variable_scope('find_max_location'):
s = scoremap.get_shape().as_list()
if len(s) == 4:
scoremap = tf.squeeze(scoremap, [3])
if len(s) == 2:
scoremap = tf.expand_dims(scoremap, 0)
s = scoremap.get_shape().as_list()
assert len(s) == 3, "Scoremap must be 3D."
assert (s[0] < s[1]) and (s[0] < s[2]), "Scoremap must be [Batch, Width, Height]"
# my meshgrid
x_range = tf.expand_dims(tf.range(s[1]), 1)
y_range = tf.expand_dims(tf.range(s[2]), 0)
X = tf.tile(x_range, [1, s[2]])
Y = tf.tile(y_range, [s[1], 1])
x_vec = tf.reshape(X, [-1])
y_vec = tf.reshape(Y, [-1])
scoremap_vec = tf.reshape(scoremap, [s[0], -1])
max_ind_vec = tf.cast(tf.argmax(scoremap_vec, dimension=1), tf.int32)
xy_loc = list()
for i in range(s[0]):
x_loc = tf.reshape(x_vec[max_ind_vec[i]], [1])
y_loc = tf.reshape(y_vec[max_ind_vec[i]], [1])
xy_loc.append(tf.concat([x_loc, y_loc], 0))
xy_loc = tf.stack(xy_loc, 0)
return xy_loc
def single_obj_scoremap(scoremap):
""" Applies my algorithm to figure out the most likely object from a given segmentation scoremap. """
with tf.variable_scope('single_obj_scoremap'):
filter_size = 21
s = scoremap.get_shape().as_list()
assert len(s) == 4, "Scoremap must be 4D."
scoremap_softmax = tf.nn.softmax(scoremap) #B, H, W, C --> normalizes across last dimension
scoremap_fg = tf.reduce_max(scoremap_softmax[:, :, :, 1:], 3) # B, H, W
detmap_fg = tf.round(scoremap_fg) # B, H, W
# find maximum in the fg scoremap
max_loc = find_max_location(scoremap_fg)
# use maximum to start "growing" our objectmap
objectmap_list = list()
kernel_dil = tf.ones((filter_size, filter_size, 1)) / float(filter_size*filter_size)
for i in range(s[0]):
# create initial objectmap (put a one at the maximum)
sparse_ind = tf.reshape(max_loc[i, :], [1, 2]) # reshape that its one point with 2dim)
objectmap = tf.sparse_to_dense(sparse_ind, [s[1], s[2]], 1.0)
# grow the map by dilation and pixelwise and
num_passes = max(s[1], s[2]) // (filter_size//2) # number of passes needes to make sure the map can spread over the whole image
for j in range(num_passes):
objectmap = tf.reshape(objectmap, [1, s[1], s[2], 1])
objectmap_dil = tf.nn.dilation2d(objectmap, kernel_dil, [1, 1, 1, 1], [1, 1, 1, 1], 'SAME')
objectmap_dil = tf.reshape(objectmap_dil, [s[1], s[2]])
objectmap = tf.round(tf.multiply(detmap_fg[i, :, :], objectmap_dil))
objectmap = tf.reshape(objectmap, [s[1], s[2], 1])
objectmap_list.append(objectmap)
objectmap = tf.stack(objectmap_list)
return objectmap
def calc_center_bb(binary_class_mask):
""" Returns the center of mass coordinates for the given binary_class_mask. """
with tf.variable_scope('calc_center_bb'):
binary_class_mask = tf.cast(binary_class_mask, tf.int32)
binary_class_mask = tf.equal(binary_class_mask, 1)
s = binary_class_mask.get_shape().as_list()
if len(s) == 4:
binary_class_mask = tf.squeeze(binary_class_mask, [3])
s = binary_class_mask.get_shape().as_list()
assert len(s) == 3, "binary_class_mask must be 3D."
assert (s[0] < s[1]) and (s[0] < s[2]), "binary_class_mask must be [Batch, Width, Height]"
# my meshgrid
x_range = tf.expand_dims(tf.range(s[1]), 1)
y_range = tf.expand_dims(tf.range(s[2]), 0)
X = tf.tile(x_range, [1, s[2]])
Y = tf.tile(y_range, [s[1], 1])
bb_list = list()
center_list = list()
crop_size_list = list()
for i in range(s[0]):
X_masked = tf.cast(tf.boolean_mask(X, binary_class_mask[i, :, :]), tf.float32)
Y_masked = tf.cast(tf.boolean_mask(Y, binary_class_mask[i, :, :]), tf.float32)
x_min = tf.reduce_min(X_masked)
x_max = tf.reduce_max(X_masked)
y_min = tf.reduce_min(Y_masked)
y_max = tf.reduce_max(Y_masked)
start = tf.stack([x_min, y_min])
end = tf.stack([x_max, y_max])
bb = tf.stack([start, end], 1)
bb_list.append(bb)
center_x = 0.5*(x_max + x_min)
center_y = 0.5*(y_max + y_min)
center = tf.stack([center_x, center_y], 0)
center = tf.cond(tf.reduce_all(tf.is_finite(center)), lambda: center,
lambda: tf.constant([160.0, 160.0]))
center.set_shape([2])
center_list.append(center)
crop_size_x = x_max - x_min
crop_size_y = y_max - y_min
crop_size = tf.expand_dims(tf.maximum(crop_size_x, crop_size_y), 0)
crop_size = tf.cond(tf.reduce_all(tf.is_finite(crop_size)), lambda: crop_size,
lambda: tf.constant([100.0]))
crop_size.set_shape([1])
crop_size_list.append(crop_size)
bb = tf.stack(bb_list)
center = tf.stack(center_list)
crop_size = tf.stack(crop_size_list)
return center, bb, crop_size
def detect_keypoints(scoremaps):
""" Performs detection per scoremap for the hands keypoints. """
if len(scoremaps.shape) == 4:
scoremaps = np.squeeze(scoremaps)
s = scoremaps.shape
assert len(s) == 3, "This function was only designed for 3D Scoremaps."
assert (s[2] < s[1]) and (s[2] < s[0]), "Probably the input is not correct, because [H, W, C] is expected."
keypoint_coords = np.zeros((s[2], 2))
for i in range(s[2]):
v, u = np.unravel_index(np.argmax(scoremaps[:, :, i]), (s[0], s[1]))
keypoint_coords[i, 0] = v
keypoint_coords[i, 1] = u
return keypoint_coords
def trafo_coords(keypoints_crop_coords, centers, scale, crop_size):
""" Transforms coords into global image coordinates. """
keypoints_coords = np.copy(keypoints_crop_coords)
keypoints_coords -= crop_size // 2
keypoints_coords /= scale
keypoints_coords += centers
return keypoints_coords
def plot_hand(coords_hw, axis, color_fixed=None, linewidth='1'):
""" Plots a hand stick figure into a matplotlib figure. """
colors = np.array([[0., 0., 0.5],
[0., 0., 0.73172906],
[0., 0., 0.96345811],
[0., 0.12745098, 1.],
[0., 0.33137255, 1.],
[0., 0.55098039, 1.],
[0., 0.75490196, 1.],
[0.06008855, 0.9745098, 0.90765338],
[0.22454143, 1., 0.74320051],
[0.40164453, 1., 0.56609741],
[0.56609741, 1., 0.40164453],
[0.74320051, 1., 0.22454143],
[0.90765338, 1., 0.06008855],
[1., 0.82861293, 0.],
[1., 0.63979666, 0.],
[1., 0.43645606, 0.],
[1., 0.2476398, 0.],
[0.96345811, 0.0442992, 0.],
[0.73172906, 0., 0.],
[0.5, 0., 0.]])
# define connections and colors of the bones
bones = [((0, 4), colors[0, :]),
((4, 3), colors[1, :]),
((3, 2), colors[2, :]),
((2, 1), colors[3, :]),
((0, 8), colors[4, :]),
((8, 7), colors[5, :]),
((7, 6), colors[6, :]),
((6, 5), colors[7, :]),
((0, 12), colors[8, :]),
((12, 11), colors[9, :]),
((11, 10), colors[10, :]),
((10, 9), colors[11, :]),
((0, 16), colors[12, :]),
((16, 15), colors[13, :]),
((15, 14), colors[14, :]),
((14, 13), colors[15, :]),
((0, 20), colors[16, :]),
((20, 19), colors[17, :]),
((19, 18), colors[18, :]),
((18, 17), colors[19, :])]
for connection, color in bones:
coord1 = coords_hw[connection[0], :]
coord2 = coords_hw[connection[1], :]
coords = np.stack([coord1, coord2])
if color_fixed is None:
axis.plot(coords[:, 1], coords[:, 0], color=color, linewidth=linewidth)
else:
axis.plot(coords[:, 1], coords[:, 0], color_fixed, linewidth=linewidth)
def plot_hand_3d(coords_xyz, axis, color_fixed=None, linewidth='1'):
""" Plots a hand stick figure into a matplotlib figure. """
colors = np.array([[0., 0., 0.5],
[0., 0., 0.73172906],
[0., 0., 0.96345811],
[0., 0.12745098, 1.],
[0., 0.33137255, 1.],
[0., 0.55098039, 1.],
[0., 0.75490196, 1.],
[0.06008855, 0.9745098, 0.90765338],
[0.22454143, 1., 0.74320051],
[0.40164453, 1., 0.56609741],
[0.56609741, 1., 0.40164453],
[0.74320051, 1., 0.22454143],
[0.90765338, 1., 0.06008855],
[1., 0.82861293, 0.],
[1., 0.63979666, 0.],
[1., 0.43645606, 0.],
[1., 0.2476398, 0.],
[0.96345811, 0.0442992, 0.],
[0.73172906, 0., 0.],
[0.5, 0., 0.]])
# define connections and colors of the bones
bones = [((0, 4), colors[0, :]),
((4, 3), colors[1, :]),
((3, 2), colors[2, :]),
((2, 1), colors[3, :]),
((0, 8), colors[4, :]),
((8, 7), colors[5, :]),
((7, 6), colors[6, :]),
((6, 5), colors[7, :]),
((0, 12), colors[8, :]),
((12, 11), colors[9, :]),
((11, 10), colors[10, :]),
((10, 9), colors[11, :]),
((0, 16), colors[12, :]),
((16, 15), colors[13, :]),
((15, 14), colors[14, :]),
((14, 13), colors[15, :]),
((0, 20), colors[16, :]),
((20, 19), colors[17, :]),
((19, 18), colors[18, :]),
((18, 17), colors[19, :])]
for connection, color in bones:
coord1 = coords_xyz[connection[0], :]
coord2 = coords_xyz[connection[1], :]
coords = np.stack([coord1, coord2])
if color_fixed is None:
axis.plot(coords[:, 0], coords[:, 1], coords[:, 2], color=color, linewidth=linewidth)
else:
axis.plot(coords[:, 0], coords[:, 1], coords[:, 2], color_fixed, linewidth=linewidth)
axis.view_init(azim=-90., elev=90.)
def plot_hand_2d(coords_hw, image, color_fixed=None, linewidth=2):
""" Plots a hand stick figure into a matplotlib figure. """
colors = [(0, 0, 127),
(0, 0, 187),
(0, 0, 246),
(0, 32, 255),
(0, 85, 255),
(0, 140, 255),
(0, 192, 255),
(15, 248, 231),
(57, 255, 190),
(102, 1, 144),
(144, 1, 102),
(190, 1, 57),
(231, 1, 15),
(1, 211, 0),
(1, 163, 0),
(1, 111, 0),
(1, 63, 0),
(246, 11, 0),
(187, 0, 0),
(127, 0, 0)]
# define connections and colors of the bones
bones = [((0, 4), colors[0]),
((4, 3), colors[1]),
((3, 2), colors[2]),
((2, 1), colors[3]),
((0, 8), colors[4]),
((8, 7), colors[5]),
((7, 6), colors[6]),
((6, 5), colors[7]),
((0, 12), colors[8]),
((12, 11), colors[9]),
((11, 10), colors[10]),
((10, 9), colors[11]),
((0, 16), colors[12]),
((16, 15), colors[13]),
((15, 14), colors[14]),
((14, 13), colors[15]),
((0, 20), colors[16]),
((20, 19), colors[17]),
((19, 18), colors[18]),
((18, 17), colors[19])]
for connection, color in bones:
coord1 = coords_hw[connection[0], :]
coord2 = coords_hw[connection[1], :]
coords = np.stack([coord1, coord2])
coord1_t = (int(coord1[1]), int(coord1[0]))
coord2_t = (int(coord2[1]), int(coord2[0]))
if color_fixed is None:
cv2.line(image, coord2_t, coord1_t, color, linewidth)
else:
cv2.line(image, coord1_t, coord2_t, color_fixed, linewidth)
class LearningRateScheduler:
"""
Provides scalar tensors at certain iteration as is needed for a multistep learning rate schedule.
"""
def __init__(self, steps, values):
self.steps = steps
self.values = values
assert len(steps)+1 == len(values), "There must be one more element in value as step."
def get_lr(self, global_step):
with tf.name_scope('lr_scheduler'):
if len(self.values) == 1: #1 value -> no step
learning_rate = tf.constant(self.values[0])
elif len(self.values) == 2: #2 values -> one step
cond = tf.greater(global_step, self.steps[0])
learning_rate = tf.where(cond, self.values[1], self.values[0])
else: # n values -> n-1 steps
cond_first = tf.less(global_step, self.steps[0])
cond_between = list()
for ind, step in enumerate(range(0, len(self.steps)-1)):
cond_between.append(tf.logical_and(tf.less(global_step, self.steps[ind+1]),
tf.greater_equal(global_step, self.steps[ind])))
cond_last = tf.greater_equal(global_step, self.steps[-1])
cond_full = [cond_first]
cond_full.extend(cond_between)
cond_full.append(cond_last)
cond_vec = tf.stack(cond_full)
lr_vec = tf.stack(self.values)
learning_rate = tf.where(cond_vec, lr_vec, tf.zeros_like(lr_vec))
learning_rate = tf.reduce_sum(learning_rate)
return learning_rate
class EvalUtil:
""" Util class for evaluation networks.
"""
def __init__(self, num_kp=21):
# init empty data storage
self.data = list()
self.num_kp = num_kp
for _ in range(num_kp):
self.data.append(list())
def feed(self, keypoint_gt, keypoint_vis, keypoint_pred):
""" Used to feed data to the class. Stores the euclidean distance between gt and pred, when it is visible. """
keypoint_gt = np.squeeze(keypoint_gt)
keypoint_pred = np.squeeze(keypoint_pred)
keypoint_vis = np.squeeze(keypoint_vis).astype('bool')
assert len(keypoint_gt.shape) == 2
assert len(keypoint_pred.shape) == 2
assert len(keypoint_vis.shape) == 1
# calc euclidean distance
diff = keypoint_gt - keypoint_pred
euclidean_dist = np.sqrt(np.sum(np.square(diff), axis=1))
num_kp = keypoint_gt.shape[0]
for i in range(num_kp):
if keypoint_vis[i]:
self.data[i].append(euclidean_dist[i])
def _get_pck(self, kp_id, threshold):
""" Returns pck for one keypoint for the given threshold. """
if len(self.data[kp_id]) == 0:
return None
data = np.array(self.data[kp_id])
pck = np.mean((data <= threshold).astype('float'))
return pck
def _get_epe(self, kp_id):
""" Returns end point error for one keypoint. """
if len(self.data[kp_id]) == 0:
return None, None
data = np.array(self.data[kp_id])
epe_mean = np.mean(data)
epe_median = np.median(data)
return epe_mean, epe_median
def get_measures(self, val_min, val_max, steps):
""" Outputs the average mean and median error as well as the pck score. """
thresholds = np.linspace(val_min, val_max, steps)
thresholds = np.array(thresholds)
norm_factor = np.trapz(np.ones_like(thresholds), thresholds)
# init mean measures
epe_mean_all = list()
epe_median_all = list()
auc_all = list()
pck_curve_all = list()
# Create one plot for each part
for part_id in range(self.num_kp):
# mean/median error
mean, median = self._get_epe(part_id)
if mean is None:
# there was no valid measurement for this keypoint
continue
epe_mean_all.append(mean)
epe_median_all.append(median)
# pck/auc
pck_curve = list()
for t in thresholds:
pck = self._get_pck(part_id, t)
pck_curve.append(pck)
pck_curve = np.array(pck_curve)
pck_curve_all.append(pck_curve)
auc = np.trapz(pck_curve, thresholds)
auc /= norm_factor
auc_all.append(auc)
epe_mean_all = np.mean(np.array(epe_mean_all))
epe_median_all = np.mean(np.array(epe_median_all))
auc_all = np.mean(np.array(auc_all))
pck_curve_all = np.mean(np.array(pck_curve_all), 0) # mean only over keypoints
return epe_mean_all, epe_median_all, auc_all, pck_curve_all, thresholds
def load_weights_from_snapshot(session, checkpoint_path, discard_list=None, rename_dict=None):
""" Loads weights from a snapshot except the ones indicated with discard_list. Others are possibly renamed. """
reader = pywrap_tensorflow.NewCheckpointReader(checkpoint_path)
var_to_shape_map = reader.get_variable_to_shape_map()
# Remove everything from the discard list
if discard_list is not None:
num_disc = 0
var_to_shape_map_new = dict()
for k, v in var_to_shape_map.items():
good = True
for dis_str in discard_list:
if dis_str in k:
good = False
if good:
var_to_shape_map_new[k] = v
else:
num_disc += 1
var_to_shape_map = dict(var_to_shape_map_new)
print('Discarded %d items' % num_disc)
# rename everything according to rename_dict
num_rename = 0
var_to_shape_map_new = dict()
for name in var_to_shape_map.keys():
new_name = name
if rename_dict is not None:
for rename_str in rename_dict.keys():
if rename_str in name:
new_name = new_name.replace(rename_str, rename_dict[rename_str])
num_rename += 1
var_to_shape_map_new[new_name] = reader.get_tensor(name)
var_to_shape_map = dict(var_to_shape_map_new)
init_op, init_feed = tf.contrib.framework.assign_from_values(var_to_shape_map)
session.run(init_op, init_feed)
print('Initialized %d variables from %s.' % (len(var_to_shape_map), checkpoint_path))
def calc_auc(x, y):
""" Given x and y values it calculates the approx. integral and normalizes it: area under curve"""
integral = np.trapz(y, x)
norm = np.trapz(np.ones_like(y), x)
return integral / norm
def get_stb_ref_curves():
"""
Returns results of various baseline methods on the Stereo Tracking Benchmark Dataset reported by:
Zhang et al., ‘3d Hand Pose Tracking and Estimation Using Stereo Matching’, 2016
"""
curve_list = list()
thresh_mm = np.array([20.0, 25, 30, 35, 40, 45, 50])
pso_b1 = np.array([0.32236842, 0.53947368, 0.67434211, 0.75657895, 0.80921053, 0.86513158, 0.89473684])
curve_list.append((thresh_mm, pso_b1, 'PSO (AUC=%.3f)' % calc_auc(thresh_mm, pso_b1)))
icppso_b1 = np.array([ 0.51973684, 0.64473684, 0.71710526, 0.77302632, 0.80921053, 0.84868421, 0.86842105])
curve_list.append((thresh_mm, icppso_b1, 'ICPPSO (AUC=%.3f)' % calc_auc(thresh_mm, icppso_b1)))
chpr_b1 = np.array([ 0.56578947, 0.71710526, 0.82236842, 0.88157895, 0.91447368, 0.9375, 0.96052632])
curve_list.append((thresh_mm, chpr_b1, 'CHPR (AUC=%.3f)' % calc_auc(thresh_mm, chpr_b1)))
return curve_list
| utils/general.py | 29,686 | Util class for evaluation networks.
Provides scalar tensors at certain iteration as is needed for a multistep learning rate schedule.
Operations that are frequently used within networks.
Returns end point error for one keypoint.
Returns pck for one keypoint for the given threshold.
Given x and y values it calculates the approx. integral and normalizes it: area under curve
Returns the center of mass coordinates for the given binary_class_mask.
Crops an image. When factor is not given does an central crop.
Inputs:
image: 4D tensor, [batch, height, width, channels] which will be cropped in height and width dimension
crop_location: tensor, [batch, 2] which represent the height and width location of the crop
crop_size: int, describes the extension of the crop
Outputs:
image_crop: 4D tensor, [batch, crop_size, crop_size, channels]
Performs detection per scoremap for the hands keypoints.
Dropout: Each neuron is dropped independently.
Used to feed data to the class. Stores the euclidean distance between gt and pred, when it is visible.
Returns the coordinates of the given scoremap with maximum value.
Outputs the average mean and median error as well as the pck score.
Returns results of various baseline methods on the Stereo Tracking Benchmark Dataset reported by:
Zhang et al., ‘3d Hand Pose Tracking and Estimation Using Stereo Matching’, 2016
Loads weights from a snapshot except the ones indicated with discard_list. Others are possibly renamed.
Plots a hand stick figure into a matplotlib figure.
Plots a hand stick figure into a matplotlib figure.
Plots a hand stick figure into a matplotlib figure.
Applies my algorithm to figure out the most likely object from a given segmentation scoremap.
Spatial dropout: Not each neuron is dropped independently, but feature map wise.
Transforms coords into global image coordinates.
-*- coding: utf-8 -*- ColorHandPose3DNetwork - Network for estimating 3D Hand Pose from a single RGB Image Copyright (C) 2017 Christian Zimmermann This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. conv bias conv bias weight matrix bias my meshgridB, H, W, C --> normalizes across last dimension B, H, W B, H, W find maximum in the fg scoremap use maximum to start "growing" our objectmap create initial objectmap (put a one at the maximum) reshape that its one point with 2dim) grow the map by dilation and pixelwise and number of passes needes to make sure the map can spread over the whole image my meshgrid define connections and colors of the bones define connections and colors of the bones define connections and colors of the bones1 value -> no step2 values -> one step n values -> n-1 steps init empty data storage calc euclidean distance init mean measures Create one plot for each part mean/median error there was no valid measurement for this keypoint pck/auc mean only over keypoints Remove everything from the discard list rename everything according to rename_dict | 3,538 | en | 0.859506 |
# -*- coding: utf-8 -*-
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, average_precision_score, precision_score, recall_score, f1_score, roc_auc_score, matthews_corrcoef
from sklearn.multiclass import OneVsRestClassifier
from sklearn.preprocessing import MultiLabelBinarizer
from bionev.utils import *
def LinkPrediction(embedding_look_up, original_graph, train_graph, test_pos_edges, seed):
random.seed(seed)
train_neg_edges = generate_neg_edges(original_graph, len(train_graph.edges()), seed)
# create a auxiliary graph to ensure that testing negative edges will not used in training
G_aux = copy.deepcopy(original_graph)
G_aux.add_edges_from(train_neg_edges)
test_neg_edges = generate_neg_edges(G_aux, len(test_pos_edges), seed)
# construct X_train, y_train, X_test, y_test
X_train = []
y_train = []
for edge in train_graph.edges():
node_u_emb = embedding_look_up[edge[0]]
node_v_emb = embedding_look_up[edge[1]]
feature_vector = np.append(node_u_emb, node_v_emb)
X_train.append(feature_vector)
y_train.append(1)
for edge in train_neg_edges:
node_u_emb = embedding_look_up[edge[0]]
node_v_emb = embedding_look_up[edge[1]]
feature_vector = np.append(node_u_emb, node_v_emb)
X_train.append(feature_vector)
y_train.append(0)
X_test = []
y_test = []
for edge in test_pos_edges:
node_u_emb = embedding_look_up[edge[0]]
node_v_emb = embedding_look_up[edge[1]]
feature_vector = np.append(node_u_emb, node_v_emb)
X_test.append(feature_vector)
y_test.append(1)
for edge in test_neg_edges:
node_u_emb = embedding_look_up[edge[0]]
node_v_emb = embedding_look_up[edge[1]]
feature_vector = np.append(node_u_emb, node_v_emb)
X_test.append(feature_vector)
y_test.append(0)
# shuffle for training and testing
c = list(zip(X_train, y_train))
random.shuffle(c)
X_train, y_train = zip(*c)
c = list(zip(X_test, y_test))
random.shuffle(c)
X_test, y_test = zip(*c)
X_train = np.array(X_train)
y_train = np.array(y_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
clf1 = LogisticRegression(random_state=seed, max_iter=1000, solver='lbfgs')
clf1.fit(X_train, y_train)
y_pred_proba = clf1.predict_proba(X_test)[:, 1]
y_pred = clf1.predict(X_test)
auc_roc = roc_auc_score(y_test, y_pred_proba)
avg_pr = average_precision_score(y_test, y_pred_proba)
precision = precision_score(y_test, y_pred, average='binary')
recall = recall_score(y_test, y_pred, average='binary')
accuracy = accuracy_score(y_test, y_pred)
f1 = f1_score(y_test, y_pred)
mcc = matthews_corrcoef(y_test, y_pred)
top_1, top_3 = predHits(y_test, y_pred, clf1.predict(X_test), clf1.predict(X_test))
print('#' * 35 + ' Link Prediction Performance ' + '#' * 35)
print(f'AUC-ROC: {auc_roc:.3f}, AVG-PR: {avg_pr:.3f}, Precision: {precision:.3f}, Recall: {recall:.3f}, Accuracy: {accuracy:.3f}, F1: {f1:.3f}, MCC: {mcc:.3f}, Top_1: {top_1:.3f}, Top_3: {top_3:.3f}')
print('#' * 100)
return auc_roc, avg_pr, precision, recall, accuracy, f1, mcc, top_1, top_3
def NodeClassification(embedding_look_up, node_list, labels, testing_ratio, seed):
X_train, y_train, X_test, y_test = split_train_test_classify(embedding_look_up, node_list, labels,
testing_ratio=testing_ratio,seed=seed)
binarizer = MultiLabelBinarizer(sparse_output=True)
y_all = np.append(y_train, y_test)
binarizer.fit(y_all)
y_train = binarizer.transform(y_train).todense()
y_test = binarizer.transform(y_test).todense()
model = OneVsRestClassifier(LogisticRegression(random_state=seed, max_iter=1000, solver='lbfgs'))
model.fit(X_train, y_train)
y_pred_prob = model.predict_proba(X_test)
## small trick : we assume that we know how many label to predict
y_pred = get_y_pred(y_test, y_pred_prob)
accuracy = accuracy_score(y_test, y_pred)
micro_f1 = f1_score(y_test, y_pred, average="micro")
macro_f1 = f1_score(y_test, y_pred, average="macro")
print('#' * 9 + ' Node Classification Performance ' + '#' * 9)
print(f'Accuracy: {accuracy:.3f}, Micro-F1: {micro_f1:.3f}, Macro-F1: {macro_f1:.3f}')
print('#' * 50)
return accuracy, micro_f1, macro_f1
def predHits(truth, pred1, pred2, pred3):
hits_1 = 0
hits_3 = 0
pred1 = np.rint(pred1).astype(np.int32)
pred2 = np.rint(pred2).astype(np.int32)
pred3 = np.rint(pred3).astype(np.int32)
for i in range(len(truth)):
if truth[i] == pred1[i]:
hits_1 = hits_1 + 1
if (truth[i] == pred1[i]) or (truth[i] == pred2[i]) or (truth[i] == pred3[i]):
hits_3 = hits_3 + 1
top_1 = hits_1/len(truth)
top_3 = hits_3/len(truth)
return top_1, top_3
| src/bionev/evaluation.py | 5,012 | -*- coding: utf-8 -*- create a auxiliary graph to ensure that testing negative edges will not used in training construct X_train, y_train, X_test, y_test shuffle for training and testing small trick : we assume that we know how many label to predict | 249 | en | 0.916849 |
'''
A command library help user upload their results to dashboard.
'''
#!/usr/bin/env python
import json
import argparse
from .._utils import file_utils
from . import main
def import_local_resources(args):
'''Entrance of importing local resources'''
parser = argparse.ArgumentParser(prog="cotk import", \
description="Import local resources")
parser.add_argument("file_id", type=str, help="Name of resource")
parser.add_argument("file_path", type=str, help="Path to resource")
cargs = parser.parse_args(args)
file_utils.import_local_resources(cargs.file_id, cargs.file_path)
main.LOGGER.info("Successfully import local resource {}.".format(cargs.file_id))
| cotk/scripts/import_local_resources.py | 686 | Entrance of importing local resources
A command library help user upload their results to dashboard.
!/usr/bin/env python | 122 | en | 0.87018 |
# coding: utf-8
import numpy as np
from frequent_direction import FrequentDirection
from sklearn.preprocessing import normalize
from sklearn.metrics.pairwise import pairwise_kernels
def laplacian_sketch(X,ell,k,do_normalize_feature,normed,callback,**args):
fd = FrequentDirection(ell,k)
D = np.array([np.sum(callback(X,i,**args)) for i in range(len(X))])
if normed:
D = np.sqrt(D)
isolation_mask = D==0
if do_normalize_feature:
# normalize original feature (for cosine distance)
X[-isolation_mask] = normalize(X[-isolation_mask],norm='l2', axis=1, copy=False)
D[:] = 1 # set 1 even to D==0 samples to avoid 0 division.
for i,isolation in enumerate(isolation_mask):
A_i = -1 * callback(X,i,**args)
if normed:
A_i /= D[i]
A_i /= D
A_i[i] = 1 - isolation # set 0 to isolated node.
else:
A_i[i] = D[i]
fd.add_sample(-A_i)
return fd.get_result().T, D
def laplacian_sketch_rbf_kernel(X,ell,k,normed=True,gamma=None):
return laplacian_sketch(X,ell,k,False,normed,one_row_rbf_kernel,gamma=None)
def laplacian_sketch_cosine_similarity(X,ell,k,normed=True):
return laplacian_sketch(X,ell,k,True,normed,one_row_cosine_similarity)
def one_row_rbf_kernel(X,i,gamma=None):
"""
X : array of shape (n_samples_X, n_features)
i : target sample in X (X[i])
gamma : float, default None
If None, defaults to 1.0 / n_samples_X
K(x, y) = exp(-gamma ||x-xi||^2)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
if gamma is None:
gamma = 1.0 / X.shape[0]
d = np.sum(np.power(X-X[i],2),axis=1)
return np.array(np.exp(-gamma * d))
def one_row_cosine_similarity(X,i):
"""
X : normalized matrix
i : target sample in X
"""
a = (np.dot(X,X[i].T)+1)/2
a[a<0]=0
return a
def debug_one_row_rbf_kernel(X,gamma=None):
W = np.zeros((X.shape[0],X.shape[0]))
W_gt = pairwise_kernels(X, metric='rbf',
filter_params=True,
gamma=gamma)
for i,row in enumerate(X):
W[i] = one_row_rbf_kernel(X,i,gamma=gamma)
#print(W)
#print(W_gt)
#print(np.sum(W-W_gt))
def debug_one_row_cosine_similarity(X):
W = np.zeros((X.shape[0],X.shape[0]))
W_gt = pairwise_kernels(X, metric='cosine',
filter_params=True)
for i,row in enumerate(X):
W[i] = one_row_cosine_similarity(X,i)
print(W)
print(W_gt)
print(np.sum(W-W_gt))
| spectral_clustering_fd/laplacian_sketch.py | 2,591 | X : normalized matrix
i : target sample in X
X : array of shape (n_samples_X, n_features)
i : target sample in X (X[i])
gamma : float, default None
If None, defaults to 1.0 / n_samples_X
K(x, y) = exp(-gamma ||x-xi||^2)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
coding: utf-8 normalize original feature (for cosine distance) set 1 even to D==0 samples to avoid 0 division. set 0 to isolated node.print(W)print(W_gt)print(np.sum(W-W_gt)) | 474 | en | 0.614375 |
#!/usr/bin/env python
import os
import xmltodict # sudo easy_install xmltodict
import subprocess
import zipfile
class PackAndroid(object):
def __init__(self, root, project_folder, project, input_apk, destination, keystore, keystore_alias, apk_name=None, zipalign=None, jarsigner=None, configuration='Release', keystore_password=None):
self.name = project_folder
self.proj_folder = project_folder
self.project = project
self.input_apk = input_apk
self.destination = os.path.expanduser(destination)
self.configuration = configuration
self.keystore = keystore
self.keystore_alias = keystore_alias
self.keystore_password = keystore_password
# Name of the final apk
self.apk_name = apk_name
if self.apk_name is None and self.keystore_alias is not None:
self.apk_name = self.keystore_alias.lower()
if self.apk_name is None:
projf = os.path.basename(project)
self.apk_name = projf.replace('.csproj', '')
self.final_apk = os.path.join(self.destination, "%s-" % self.apk_name)
self.signed_apk = os.path.join(self.destination, "%s-signed.apk" % self.apk_name)
self.zipalign = zipalign
if self.zipalign is None:
self.zipalign = '/usr/bin/zipalign'
self.jarsigner = jarsigner
if self.jarsigner is None:
self.jarsigner = "/usr/bin/jarsigner"
self.keystore = os.path.join(root, self.keystore)
self.project = os.path.join(root, self.project)
self.proj_folder = os.path.join(root, self.proj_folder)
self.input_apk = os.path.join(self.proj_folder, self.input_apk)
if not os.path.exists(self.keystore):
exit("Failed to locate keystore - " + self.keystore)
if not os.path.exists(self.zipalign):
exit("Failed to locate zipalign - " + self.zipalign)
if not os.path.exists(self.jarsigner):
exit("Failed to locate jarsigner - " + self.jarsigner)
def clean(self):
bin_folder = os.path.join(self.proj_folder, 'bin')
obj_folder = os.path.join(self.proj_folder, 'obj')
if os.path.exists(bin_folder):
print 'Clearing away ' + bin_folder
os.system('rm -fdr ' + bin_folder)
if os.path.exists(obj_folder):
print 'Clearing away ' + obj_folder
os.system('rm -fdr ' + obj_folder)
def get_manifest_dictionary(self):
manifest = os.path.join(self.proj_folder, 'Properties/AndroidManifest.xml')
if not os.path.exists(manifest):
exit("Failed to locate AndroidManifest.xml - " + manifest)
f = file(manifest)
xml = f.read()
f.close()
doc = xmltodict.parse(xml)
return doc
def get_build_number(self):
doc = self.get_manifest_dictionary()
return doc['manifest']['@android:versionCode']
def get_version_number(self):
doc = self.get_manifest_dictionary()
return doc['manifest']['@android:versionName']
def set_build_number(self, build_num):
doc = self.get_manifest_dictionary()
doc['manifest']['@android:versionCode'] = build_num
xml = xmltodict.unparse(doc, pretty=True)
manifest = os.path.join(self.proj_folder, 'Properties/AndroidManifest.xml')
if not os.path.exists(manifest):
exit("Failed to locate AndroidManifest.xml - " + manifest)
f = file(manifest, 'w')
f.write(xml)
f.close()
def increment_build_number(self):
build_number = self.get_build_number()
if build_number is None:
build_number = "1"
else:
build_number = str(int(build_number)+1)
self.set_build_number(build_number)
def decrement_build_number(self):
build_number = self.get_build_number()
if build_number is None:
build_number = "1"
else:
build_number = str(int(build_number)-1)
self.set_build_number(build_number)
def set_version_number(self, version):
doc = self.get_manifest_dictionary()
doc['manifest']['@android:versionName'] = version
xml = xmltodict.unparse(doc, pretty=True)
manifest = os.path.join(self.proj_folder, 'Properties/AndroidManifest.xml')
if not os.path.exists(manifest):
exit("Failed to locate AndroidManifest.xml - " + manifest)
f = file(manifest, 'w')
f.write(xml)
f.close()
def build(self):
cmd_update = "msbuild %s /t:UpdateAndroidResources /p:Configuration=%s" % (self.project, self.configuration)
os.system(cmd_update)
cmd = "msbuild %s /t:SignAndroidPackage /p:Configuration=%s" % (self.project, self.configuration)
os.system(cmd)
if not os.path.exists(self.input_apk):
exit("Failed to build raw apk, i.e. its missing - " + self.input_apk)
@staticmethod
def convert_windows_path(any_path):
chars = []
for i in range(len(any_path)):
char = any_path[i]
if char == '\\':
chars.append('/')
else:
chars.append(char)
return ''.join(chars)
@staticmethod
def update_solution_resources(solution,configuration):
if not os.path.exists(solution):
exit("Failed to locate %s - " % os.path.basename(solution))
f = file(solution)
sln = f.read()
f.close()
projects = []
lines = sln.split('\n')
for line in lines:
if line.startswith("Project("):
start = line.find(",")
rest = line[start+3:len(line)]
end = rest.find(",")
projects.append(os.path.abspath(os.path.join(os.path.dirname(solution),PackAndroid.convert_windows_path(rest[0:end-1]))))
# print projects
for project in projects:
cmd_update = "msbuild %s /t:UpdateAndroidResources /p:Configuration=%s" % (project, configuration)
os.system(cmd_update)
def sign(self):
sign_cmd = [self.jarsigner, "-verbose", "-sigalg", "MD5withRSA", "-digestalg", "SHA1", "-keystore", self.keystore]
if not self.keystore_password is None:
sign_cmd.extend(["-storepass",self.keystore_password])
sign_cmd.extend(["-signedjar", self.signed_apk, self.input_apk, self.keystore_alias])
subprocess.call(sign_cmd)
subprocess.call([self.zipalign, "-f", "-v", "4", self.signed_apk, self.final_apk])
if os.path.exists(self.final_apk):
if os.path.exists(self.signed_apk):
os.system('rm ' + self.signed_apk)
def update_version(self):
build_number = self.get_build_number()
print build_number
q = raw_input("Would you like to increment the build number for %s? y/n\n> " % self.apk_name)
if q == "y":
build_number = str(int(build_number)+1)
self.set_build_number(build_number)
version_number = self.get_version_number()
print version_number
q = raw_input("Would you like to change the version number for %s? y/n\n> " % self.apk_name)
if q == "y":
version_number = raw_input("What to?> ")
self.set_version_number(version_number)
def copy_symbols(self):
artifacts_folder = os.path.join(self.proj_folder, 'bin', 'Release')
stuff = os.listdir(artifacts_folder)
msym_folder = None
for name in stuff:
if name.endswith(".mSYM"):
msym_folder = os.path.join(artifacts_folder, name)
break
if msym_folder is not None:
def zipdir(path, ziph):
for root, dirs, files in os.walk(path):
for file in files:
ziph.write(os.path.join(root, file),os.path.relpath(os.path.join(root, file), os.path.join(path, '..')))
msym_destination = os.path.join(os.path.expanduser("~/Desktop/"), os.path.basename(self.final_apk)) + ".mSYM.zip"
zipf = zipfile.ZipFile(msym_destination, 'w', zipfile.ZIP_DEFLATED)
zipdir(msym_folder, zipf)
zipf.close()
def run(self, update_versions=True, confirm_build=True):
self.clean()
self.final_apk = os.path.join(self.destination, "%s-" % self.apk_name)
if update_versions:
self.update_version()
build_number = self.get_build_number()
version_number = self.get_version_number()
if confirm_build:
print 'So thats version ' + version_number + " build " + build_number
q = raw_input("Would you like to continue? y/n\n> ")
if q != "y":
print "Ok, not doing the build, suit yourself..."
return None
self.final_apk = self.final_apk + build_number + '-' + version_number + '.apk'
print self.final_apk
self.build()
self.sign()
self.copy_symbols()
return self.final_apk
| packandroid.py | 9,111 | !/usr/bin/env python sudo easy_install xmltodict Name of the final apk print projects | 85 | en | 0.458 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.