code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
__docformat__ = 'epytext en'
import os, re
from codebay.l2tpserver import constants
from codebay.l2tpserver import helpers
# list of pci hw files
_hwdata_sources = constants.PCI_HWDATA_SOURCES
# NB: some pci data has broken IDs, e.g. '003' instead of '0003' or
# '01234' instead of '1234' We need to fix these or skip. Regexps
# below accept them and _fix_id() later converts them to valid IDs.
_re_class_line = re.compile(r'^C')
_re_vendor_line = re.compile(r'^([0-9a-fA-F]+)\s+(.*?)\s+\n?$')
_re_device_line = re.compile(r'^\t([0-9a-fA-F]+)\s+(.*?)\s+\n?$')
_re_subvendordevice_line = re.compile(r'^\t\t([0-9a-fA-F]+)\s+([0-9a-fA-F]+)\s+(.*?)\s+\n?$')
class PciData:
def __init__(self):
self.vendors = {} # '8086' -> name
self.devices = {} # '8086:1234' -> device name
for i in _hwdata_sources:
try:
# this is assumed to parse all formats
self._parse_pciids_hwdata(i)
except:
pass
# FIXME: 2009-10-20: _fix_id() throws exception in some cases, len(id) is applied
# to an object which does not support id() -- make this more robust
def _fix_id(self, id):
if len(id) == 4:
return id
if len(id) < 4:
return ('0000' + id)[-4:]
if len(id) > 4:
return id[-4:] # XXX: this assumes the ID is of the form '<bogus>1234'
def _parse_pciids_hwdata(self, name):
f = None
try:
f = open(name, 'rb')
vendor = None
while True:
l = f.readline()
if l == '': break
# skip class lines
m = _re_class_line.match(l)
if m is not None:
continue
m = _re_vendor_line.match(l)
if m is not None:
vendor = self._fix_id(m.group(1))
if not self.vendors.has_key(vendor):
self.vendors[vendor] = m.group(2)
continue
m = _re_device_line.match(l)
if m is not None:
device = self._fix_id(m.group(1))
if vendor is None:
# XXX: warning?
continue
str = '%s:%s' % (vendor, device)
if not self.devices.has_key(str):
self.devices[str] = m.group(2)
continue
m = _re_subvendordevice_line.match(l)
if m is not None:
subvendor, subdevice = self._fix_id(m.group(1)), self._fix_id(m.group(2))
# XXX: We skip these now
str = '%s:%s' % (subvendor, subdevice)
if not self.devices.has_key(str):
self.devices[str] = m.group(3)
except:
# XXX: no use to raise here
pass
if f is not None:
f.close()
def pci_vendor_lookup(self, vendor):
# FIXME: here 'vendor' may not be a string
id = self._fix_id(vendor)
if self.vendors.has_key(id):
return self.vendors[id]
return None
def pci_device_lookup(self, vendor, device):
str = '%s:%s' % (self._fix_id(vendor), self._fix_id(device))
if self.devices.has_key(str):
return self.devices[str]
return None
class NetworkDeviceInfo:
def __init__(self):
self.device = None
self.vendor_id = None
self.device_id = None
self.vendor_string = None
self.device_string = None
self.mac = None
self.vmware = False
# XXX: virtual pc, virtual server
# XXX: parallels
def _readfile(self, name):
t = None
f = None
try:
if os.path.exists(name):
f = open(name, 'rb')
t = f.read()
t = t.strip()
f.close()
f = None
except:
pass
if f is not None: f.close()
return t
def _identify(self, dev, pcidata):
"""Call only once."""
self.device = dev
dir1 = '/sys/class/net/%s' % dev
dir2 = os.path.join(dir1, 'device')
if not os.path.exists(dir1):
return
self.mac = self._readfile(os.path.join(dir1, 'address'))
if not os.path.exists(dir2):
return
self.vendor_id = self._readfile(os.path.join(dir2, 'vendor'))
self.device_id = self._readfile(os.path.join(dir2, 'device'))
self.vendor_string = pcidata.pci_vendor_lookup(self.vendor_id)
self.device_string = pcidata.pci_device_lookup(self.vendor_id, self.device_id)
self.vmware = helpers.host_is_vmware()
_global_pcidata = None
def initialize_database():
"""Initialize the (PCI) device database.
This takes a few seconds, and is initialized also "on demand" if
not initialized manually beforehand.
"""
global _global_pcidata
# takes a few seconds to load...
if _global_pcidata is None:
_global_pcidata = PciData()
def identify_device(devname):
"""Identify a (PCI) network device.
To speed up, call initialize_database() beforehand. The device
database is initialized only once
"""
initialize_database()
i = NetworkDeviceInfo()
i._identify(devname, _global_pcidata)
return i | src/python/codebay/l2tpserver/netidentify.py | __docformat__ = 'epytext en'
import os, re
from codebay.l2tpserver import constants
from codebay.l2tpserver import helpers
# list of pci hw files
_hwdata_sources = constants.PCI_HWDATA_SOURCES
# NB: some pci data has broken IDs, e.g. '003' instead of '0003' or
# '01234' instead of '1234' We need to fix these or skip. Regexps
# below accept them and _fix_id() later converts them to valid IDs.
_re_class_line = re.compile(r'^C')
_re_vendor_line = re.compile(r'^([0-9a-fA-F]+)\s+(.*?)\s+\n?$')
_re_device_line = re.compile(r'^\t([0-9a-fA-F]+)\s+(.*?)\s+\n?$')
_re_subvendordevice_line = re.compile(r'^\t\t([0-9a-fA-F]+)\s+([0-9a-fA-F]+)\s+(.*?)\s+\n?$')
class PciData:
def __init__(self):
self.vendors = {} # '8086' -> name
self.devices = {} # '8086:1234' -> device name
for i in _hwdata_sources:
try:
# this is assumed to parse all formats
self._parse_pciids_hwdata(i)
except:
pass
# FIXME: 2009-10-20: _fix_id() throws exception in some cases, len(id) is applied
# to an object which does not support id() -- make this more robust
def _fix_id(self, id):
if len(id) == 4:
return id
if len(id) < 4:
return ('0000' + id)[-4:]
if len(id) > 4:
return id[-4:] # XXX: this assumes the ID is of the form '<bogus>1234'
def _parse_pciids_hwdata(self, name):
f = None
try:
f = open(name, 'rb')
vendor = None
while True:
l = f.readline()
if l == '': break
# skip class lines
m = _re_class_line.match(l)
if m is not None:
continue
m = _re_vendor_line.match(l)
if m is not None:
vendor = self._fix_id(m.group(1))
if not self.vendors.has_key(vendor):
self.vendors[vendor] = m.group(2)
continue
m = _re_device_line.match(l)
if m is not None:
device = self._fix_id(m.group(1))
if vendor is None:
# XXX: warning?
continue
str = '%s:%s' % (vendor, device)
if not self.devices.has_key(str):
self.devices[str] = m.group(2)
continue
m = _re_subvendordevice_line.match(l)
if m is not None:
subvendor, subdevice = self._fix_id(m.group(1)), self._fix_id(m.group(2))
# XXX: We skip these now
str = '%s:%s' % (subvendor, subdevice)
if not self.devices.has_key(str):
self.devices[str] = m.group(3)
except:
# XXX: no use to raise here
pass
if f is not None:
f.close()
def pci_vendor_lookup(self, vendor):
# FIXME: here 'vendor' may not be a string
id = self._fix_id(vendor)
if self.vendors.has_key(id):
return self.vendors[id]
return None
def pci_device_lookup(self, vendor, device):
str = '%s:%s' % (self._fix_id(vendor), self._fix_id(device))
if self.devices.has_key(str):
return self.devices[str]
return None
class NetworkDeviceInfo:
def __init__(self):
self.device = None
self.vendor_id = None
self.device_id = None
self.vendor_string = None
self.device_string = None
self.mac = None
self.vmware = False
# XXX: virtual pc, virtual server
# XXX: parallels
def _readfile(self, name):
t = None
f = None
try:
if os.path.exists(name):
f = open(name, 'rb')
t = f.read()
t = t.strip()
f.close()
f = None
except:
pass
if f is not None: f.close()
return t
def _identify(self, dev, pcidata):
"""Call only once."""
self.device = dev
dir1 = '/sys/class/net/%s' % dev
dir2 = os.path.join(dir1, 'device')
if not os.path.exists(dir1):
return
self.mac = self._readfile(os.path.join(dir1, 'address'))
if not os.path.exists(dir2):
return
self.vendor_id = self._readfile(os.path.join(dir2, 'vendor'))
self.device_id = self._readfile(os.path.join(dir2, 'device'))
self.vendor_string = pcidata.pci_vendor_lookup(self.vendor_id)
self.device_string = pcidata.pci_device_lookup(self.vendor_id, self.device_id)
self.vmware = helpers.host_is_vmware()
_global_pcidata = None
def initialize_database():
"""Initialize the (PCI) device database.
This takes a few seconds, and is initialized also "on demand" if
not initialized manually beforehand.
"""
global _global_pcidata
# takes a few seconds to load...
if _global_pcidata is None:
_global_pcidata = PciData()
def identify_device(devname):
"""Identify a (PCI) network device.
To speed up, call initialize_database() beforehand. The device
database is initialized only once
"""
initialize_database()
i = NetworkDeviceInfo()
i._identify(devname, _global_pcidata)
return i | 0.296552 | 0.088702 |
import tensorflow as tf
from tensorflow.contrib import slim
from tensorflow import keras
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('image_height', 28, 'the height of image')
tf.app.flags.DEFINE_integer('image_width', 28, 'the width of image')
tf.app.flags.DEFINE_integer('batch_size', 128, 'Number of images to process in a batch')
TRAIN_EXAMPLES_NUM = 55000
VALIDATION_EXAMPLES_NUM = 5000
TEST_EXAMPLES_NUM = 10000
def parse_data(example_proto):
features = {'img_raw': tf.FixedLenFeature([], tf.string, ''),
'label': tf.FixedLenFeature([], tf.int64, 0)}
parsed_features = tf.parse_single_example(example_proto, features)
image = tf.decode_raw(parsed_features['img_raw'], tf.uint8)
label = tf.cast(parsed_features['label'], tf.int64)
image = tf.reshape(image, [FLAGS.image_height, FLAGS.image_width, 1])
image = tf.cast(image, tf.float32)
return image, label
def read_mnist_tfrecords(filename_queue):
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(serialized_example, features={
'img_raw': tf.FixedLenFeature([], tf.string, ''),
'label': tf.FixedLenFeature([], tf.int64, 0)
})
image = tf.decode_raw(features['img_raw'], tf.uint8)
label = tf.cast(features['label'], tf.int64)
image = tf.reshape(image, [FLAGS.image_height, FLAGS.image_width, 1])
return image, label
def inputs(filenames, examples_num, batch_size, shuffle):
for f in filenames:
if not tf.gfile.Exists(f):
raise ValueError('Failed to find file: ' + f)
with tf.name_scope('inputs'):
filename_queue = tf.train.string_input_producer(filenames)
image, label = read_mnist_tfrecords(filename_queue)
image = tf.cast(image, tf.float32)
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(min_fraction_of_examples_in_queue * examples_num)
num_process_threads = 16
if shuffle:
images, labels = tf.train.shuffle_batch([image, label], batch_size=batch_size,
num_threads=num_process_threads,
capacity=min_queue_examples + batch_size * 3,
min_after_dequeue=min_queue_examples)
else:
images, labels = tf.train.batch([image, label], batch_size=batch_size,
num_threads=num_process_threads,
capacity=min_queue_examples + batch_size * 3)
return images, labels
def inference(images, training):
with tf.variable_scope('conv1'):
conv1 = tf.layers.conv2d(inputs=images,
filters=32,
kernel_size=[5, 5],
padding='same',
activation=tf.nn.relu)
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2) # 14*14*32
with tf.variable_scope('conv2'):
conv2 = tf.layers.conv2d(inputs=pool1,
filters=64,
kernel_size=[5, 5],
padding='same',
activation=tf.nn.relu)
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2) # 7*7*64
with tf.variable_scope('fc1'):
pool2_flat = tf.reshape(pool2, [-1, 7*7*64])
fc1 = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)
dropout1 = tf.layers.dropout(inputs=fc1, rate=0.4, training=training)
with tf.variable_scope('logits'):
logits = tf.layers.dense(inputs=dropout1, units=10) # 使用该值计算交叉熵损失
predict = tf.nn.softmax(logits)
return logits, predict
def loss(logits, labels):
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits, name='cross_entropy')
cross_entropy_loss = tf.reduce_mean(cross_entropy)
return cross_entropy_loss
def train(total_loss, global_step):
num_batches_per_epoch = TRAIN_EXAMPLES_NUM / FLAGS.batch_size
decay_steps = int(num_batches_per_epoch * 10)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(learning_rate=0.001,
global_step=global_step,
decay_steps=decay_steps,
decay_rate=0.1,
staircase=True)
# opt = tf.train.GradientDescentOptimizer(lr)
# opt = tf.train.MomentumOptimizer(learning_rate=0.001, momentum=0.99)
opt = tf.train.AdamOptimizer(learning_rate=lr)
grad = opt.compute_gradients(total_loss)
apply_grad_op = opt.apply_gradients(grad, global_step)
return apply_grad_op
def model_slim(images, labels, is_training):
net = slim.conv2d(images, 32, [5, 5], scope='conv1')
net = slim.max_pool2d(net, [2, 2], stride=2, scope='pool1')
net = slim.conv2d(net, 64, [5, 5], scope='conv2')
net = slim.max_pool2d(net, [2, 2], stride=2, scope='pool2')
net = slim.flatten(net, scope='flatten')
net = slim.fully_connected(net, 1024, scope='fully_connected1')
net = slim.dropout(net, keep_prob=0.6, is_training=is_training)
logits = slim.fully_connected(net, 10, activation_fn=None, scope='fully_connected2')
prob = slim.softmax(logits)
loss = slim.losses.sparse_softmax_cross_entropy(logits, labels)
global_step = tf.train.get_or_create_global_step()
num_batches_per_epoch = TRAIN_EXAMPLES_NUM / FLAGS.batch_size
decay_steps = int(num_batches_per_epoch * 10)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(learning_rate=0.001,
global_step=global_step,
decay_steps=decay_steps,
decay_rate=0.1,
staircase=True)
opt = tf.train.AdamOptimizer(learning_rate=lr)
return opt, loss, prob
def model_fn(features, labels, mode):
with tf.variable_scope('conv1'):
conv1 = tf.layers.conv2d(inputs=features,
filters=32,
kernel_size=[5, 5],
padding='same',
activation=tf.nn.relu)
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2) # 14*14*32
with tf.variable_scope('conv2'):
conv2 = tf.layers.conv2d(inputs=pool1,
filters=64,
kernel_size=[5, 5],
padding='same',
activation=tf.nn.relu)
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2) # 7*7*64
with tf.variable_scope('fc1'):
pool2_flat = tf.reshape(pool2, [-1, 7*7*64])
fc1 = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)
dropout1 = tf.layers.dropout(inputs=fc1, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)
with tf.variable_scope('logits'):
logits = tf.layers.dense(inputs=dropout1, units=10) # 使用该值计算交叉熵损失
predict = tf.nn.softmax(logits)
predictions = {
# Generate predictions (for PREDICT and EVAL mode)
"classes": tf.argmax(input=logits, axis=1),
# Add `softmax_tensor` to the graph. It is used for PREDICT and by the
# `logging_hook`.
"probabilities": tf.nn.softmax(logits, name="softmax_tensor")
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
accuracy = tf.metrics.accuracy(labels=labels, predictions=predictions["classes"])
tf.summary.scalar('accuracy', accuracy[1])
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.train.get_global_step()
train_op = train(loss, global_step)
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
# Add evaluation metrics (for EVAL mode)
eval_metric_ops = {"eval_accuracy": accuracy}
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def input_fn(filenames, training):
dataset = tf.data.TFRecordDataset(filenames)
dataset = dataset.map(parse_data)
if training:
dataset = dataset.shuffle(buffer_size=50000)
dataset = dataset.batch(FLAGS.batch_size)
if training:
dataset = dataset.repeat()
iterator = dataset.make_one_shot_iterator()
features, labels = iterator.get_next()
return features, labels
def model_keras():
model = keras.Sequential()
model.add(keras.layers.Conv2D(filters=32,
kernel_size=[5, 5],
padding='same',
activation=tf.nn.relu,
input_shape=[FLAGS.image_height, FLAGS.image_width, 1]))
model.add(keras.layers.MaxPool2D(pool_size=[2, 2], strides=2))
model.add(keras.layers.Conv2D(filters=64,
kernel_size=[5, 5],
padding='same',
activation=tf.nn.relu))
model.add(keras.layers.MaxPool2D(pool_size=[2, 2], strides=2))
model.add(keras.layers.Flatten(input_shape=[7, 7, 64]))
model.add(keras.layers.Dense(units=1024, activation=tf.nn.relu))
model.add(keras.layers.Dropout(rate=0.4))
model.add(keras.layers.Dense(units=10))
model.add(keras.layers.Activation(tf.nn.softmax))
opt = keras.optimizers.Adam(0.001)
model.compile(optimizer=opt, loss='sparse_categorical_crossentropy', metrics=['accuracy'])
return model | mnist.py | import tensorflow as tf
from tensorflow.contrib import slim
from tensorflow import keras
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('image_height', 28, 'the height of image')
tf.app.flags.DEFINE_integer('image_width', 28, 'the width of image')
tf.app.flags.DEFINE_integer('batch_size', 128, 'Number of images to process in a batch')
TRAIN_EXAMPLES_NUM = 55000
VALIDATION_EXAMPLES_NUM = 5000
TEST_EXAMPLES_NUM = 10000
def parse_data(example_proto):
features = {'img_raw': tf.FixedLenFeature([], tf.string, ''),
'label': tf.FixedLenFeature([], tf.int64, 0)}
parsed_features = tf.parse_single_example(example_proto, features)
image = tf.decode_raw(parsed_features['img_raw'], tf.uint8)
label = tf.cast(parsed_features['label'], tf.int64)
image = tf.reshape(image, [FLAGS.image_height, FLAGS.image_width, 1])
image = tf.cast(image, tf.float32)
return image, label
def read_mnist_tfrecords(filename_queue):
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(serialized_example, features={
'img_raw': tf.FixedLenFeature([], tf.string, ''),
'label': tf.FixedLenFeature([], tf.int64, 0)
})
image = tf.decode_raw(features['img_raw'], tf.uint8)
label = tf.cast(features['label'], tf.int64)
image = tf.reshape(image, [FLAGS.image_height, FLAGS.image_width, 1])
return image, label
def inputs(filenames, examples_num, batch_size, shuffle):
for f in filenames:
if not tf.gfile.Exists(f):
raise ValueError('Failed to find file: ' + f)
with tf.name_scope('inputs'):
filename_queue = tf.train.string_input_producer(filenames)
image, label = read_mnist_tfrecords(filename_queue)
image = tf.cast(image, tf.float32)
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(min_fraction_of_examples_in_queue * examples_num)
num_process_threads = 16
if shuffle:
images, labels = tf.train.shuffle_batch([image, label], batch_size=batch_size,
num_threads=num_process_threads,
capacity=min_queue_examples + batch_size * 3,
min_after_dequeue=min_queue_examples)
else:
images, labels = tf.train.batch([image, label], batch_size=batch_size,
num_threads=num_process_threads,
capacity=min_queue_examples + batch_size * 3)
return images, labels
def inference(images, training):
with tf.variable_scope('conv1'):
conv1 = tf.layers.conv2d(inputs=images,
filters=32,
kernel_size=[5, 5],
padding='same',
activation=tf.nn.relu)
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2) # 14*14*32
with tf.variable_scope('conv2'):
conv2 = tf.layers.conv2d(inputs=pool1,
filters=64,
kernel_size=[5, 5],
padding='same',
activation=tf.nn.relu)
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2) # 7*7*64
with tf.variable_scope('fc1'):
pool2_flat = tf.reshape(pool2, [-1, 7*7*64])
fc1 = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)
dropout1 = tf.layers.dropout(inputs=fc1, rate=0.4, training=training)
with tf.variable_scope('logits'):
logits = tf.layers.dense(inputs=dropout1, units=10) # 使用该值计算交叉熵损失
predict = tf.nn.softmax(logits)
return logits, predict
def loss(logits, labels):
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits, name='cross_entropy')
cross_entropy_loss = tf.reduce_mean(cross_entropy)
return cross_entropy_loss
def train(total_loss, global_step):
num_batches_per_epoch = TRAIN_EXAMPLES_NUM / FLAGS.batch_size
decay_steps = int(num_batches_per_epoch * 10)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(learning_rate=0.001,
global_step=global_step,
decay_steps=decay_steps,
decay_rate=0.1,
staircase=True)
# opt = tf.train.GradientDescentOptimizer(lr)
# opt = tf.train.MomentumOptimizer(learning_rate=0.001, momentum=0.99)
opt = tf.train.AdamOptimizer(learning_rate=lr)
grad = opt.compute_gradients(total_loss)
apply_grad_op = opt.apply_gradients(grad, global_step)
return apply_grad_op
def model_slim(images, labels, is_training):
net = slim.conv2d(images, 32, [5, 5], scope='conv1')
net = slim.max_pool2d(net, [2, 2], stride=2, scope='pool1')
net = slim.conv2d(net, 64, [5, 5], scope='conv2')
net = slim.max_pool2d(net, [2, 2], stride=2, scope='pool2')
net = slim.flatten(net, scope='flatten')
net = slim.fully_connected(net, 1024, scope='fully_connected1')
net = slim.dropout(net, keep_prob=0.6, is_training=is_training)
logits = slim.fully_connected(net, 10, activation_fn=None, scope='fully_connected2')
prob = slim.softmax(logits)
loss = slim.losses.sparse_softmax_cross_entropy(logits, labels)
global_step = tf.train.get_or_create_global_step()
num_batches_per_epoch = TRAIN_EXAMPLES_NUM / FLAGS.batch_size
decay_steps = int(num_batches_per_epoch * 10)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(learning_rate=0.001,
global_step=global_step,
decay_steps=decay_steps,
decay_rate=0.1,
staircase=True)
opt = tf.train.AdamOptimizer(learning_rate=lr)
return opt, loss, prob
def model_fn(features, labels, mode):
with tf.variable_scope('conv1'):
conv1 = tf.layers.conv2d(inputs=features,
filters=32,
kernel_size=[5, 5],
padding='same',
activation=tf.nn.relu)
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2) # 14*14*32
with tf.variable_scope('conv2'):
conv2 = tf.layers.conv2d(inputs=pool1,
filters=64,
kernel_size=[5, 5],
padding='same',
activation=tf.nn.relu)
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2) # 7*7*64
with tf.variable_scope('fc1'):
pool2_flat = tf.reshape(pool2, [-1, 7*7*64])
fc1 = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)
dropout1 = tf.layers.dropout(inputs=fc1, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)
with tf.variable_scope('logits'):
logits = tf.layers.dense(inputs=dropout1, units=10) # 使用该值计算交叉熵损失
predict = tf.nn.softmax(logits)
predictions = {
# Generate predictions (for PREDICT and EVAL mode)
"classes": tf.argmax(input=logits, axis=1),
# Add `softmax_tensor` to the graph. It is used for PREDICT and by the
# `logging_hook`.
"probabilities": tf.nn.softmax(logits, name="softmax_tensor")
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
accuracy = tf.metrics.accuracy(labels=labels, predictions=predictions["classes"])
tf.summary.scalar('accuracy', accuracy[1])
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.train.get_global_step()
train_op = train(loss, global_step)
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
# Add evaluation metrics (for EVAL mode)
eval_metric_ops = {"eval_accuracy": accuracy}
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def input_fn(filenames, training):
dataset = tf.data.TFRecordDataset(filenames)
dataset = dataset.map(parse_data)
if training:
dataset = dataset.shuffle(buffer_size=50000)
dataset = dataset.batch(FLAGS.batch_size)
if training:
dataset = dataset.repeat()
iterator = dataset.make_one_shot_iterator()
features, labels = iterator.get_next()
return features, labels
def model_keras():
model = keras.Sequential()
model.add(keras.layers.Conv2D(filters=32,
kernel_size=[5, 5],
padding='same',
activation=tf.nn.relu,
input_shape=[FLAGS.image_height, FLAGS.image_width, 1]))
model.add(keras.layers.MaxPool2D(pool_size=[2, 2], strides=2))
model.add(keras.layers.Conv2D(filters=64,
kernel_size=[5, 5],
padding='same',
activation=tf.nn.relu))
model.add(keras.layers.MaxPool2D(pool_size=[2, 2], strides=2))
model.add(keras.layers.Flatten(input_shape=[7, 7, 64]))
model.add(keras.layers.Dense(units=1024, activation=tf.nn.relu))
model.add(keras.layers.Dropout(rate=0.4))
model.add(keras.layers.Dense(units=10))
model.add(keras.layers.Activation(tf.nn.softmax))
opt = keras.optimizers.Adam(0.001)
model.compile(optimizer=opt, loss='sparse_categorical_crossentropy', metrics=['accuracy'])
return model | 0.896455 | 0.371023 |
################################################################################
## Form generated from reading UI file 'UIiEDzGo.ui'
##
## Created by: Qt User Interface Compiler version 5.15.2
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
if not MainWindow.objectName():
MainWindow.setObjectName(u"MainWindow")
MainWindow.resize(600, 240)
MainWindow.setMinimumSize(QSize(500, 200))
MainWindow.setMaximumSize(QSize(1231241, 1231241))
font = QFont()
font.setFamily(u"Arial Black")
font.setPointSize(8)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
MainWindow.setFont(font)
MainWindow.setCursor(QCursor(Qt.ArrowCursor))
MainWindow.setStyleSheet(u"background-color:rgba(19, 14, 34,0);")
self.centralwidget = QWidget(MainWindow)
self.centralwidget.setObjectName(u"centralwidget")
self.centralwidget.setEnabled(True)
sizePolicy = QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.centralwidget.sizePolicy().hasHeightForWidth())
self.centralwidget.setSizePolicy(sizePolicy)
self.ServerList = QTableWidget(self.centralwidget)
if (self.ServerList.columnCount() < 1):
self.ServerList.setColumnCount(1)
brush = QBrush(QColor(19, 14, 13, 255))
brush.setStyle(Qt.SolidPattern)
font1 = QFont()
font1.setFamily(u"Arial Black")
font1.setBold(False)
font1.setItalic(False)
font1.setUnderline(False)
font1.setWeight(50)
font1.setStrikeOut(False)
font1.setKerning(True)
__qtablewidgetitem = QTableWidgetItem()
__qtablewidgetitem.setFont(font1);
__qtablewidgetitem.setBackground(QColor(89, 254, 149));
__qtablewidgetitem.setForeground(brush);
self.ServerList.setHorizontalHeaderItem(0, __qtablewidgetitem)
self.ServerList.setObjectName(u"ServerList")
self.ServerList.setEnabled(True)
self.ServerList.setGeometry(QRect(10, 10, 432, 216))
sizePolicy1 = QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Expanding)
sizePolicy1.setHorizontalStretch(0)
sizePolicy1.setVerticalStretch(0)
sizePolicy1.setHeightForWidth(self.ServerList.sizePolicy().hasHeightForWidth())
self.ServerList.setSizePolicy(sizePolicy1)
self.ServerList.setMaximumSize(QSize(16777215, 16777215))
font2 = QFont()
font2.setFamily(u"Arial Black")
font2.setPointSize(10)
font2.setBold(False)
font2.setItalic(False)
font2.setWeight(10)
self.ServerList.setFont(font2)
self.ServerList.viewport().setProperty("cursor", QCursor(Qt.PointingHandCursor))
self.ServerList.setFocusPolicy(Qt.NoFocus)
self.ServerList.setContextMenuPolicy(Qt.NoContextMenu)
self.ServerList.setAutoFillBackground(False)
self.ServerList.setStyleSheet(u"QTableWidget {\n"
" border-radius: 5px;\n"
" background-color:rgba(255, 255, 255, 0);\n"
" font: 87 10pt \"Arial Black\";\n"
"\n"
"}\n"
"\n"
"QTableWidget::item {\n"
" padding: 5px;\n"
" margin-top:5px;\n"
" border-radius: 5px;\n"
" background-color:rgb(39, 34, 54);\n"
" color:rgb(141, 123, 195);\n"
"\n"
"}\n"
"\n"
"QHeaderView::section {\n"
" background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:1, stop:0 rgba(255, 137, 64, 255), stop:1 rgba(210, 64, 255, 255));\n"
" border-radius: 5px;\n"
"}\n"
"\n"
"QTableWidget::item:selected {\n"
" border: 1px solid qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:1, stop:0 rgba(255, 137, 64, 255), stop:1 rgba(210, 64, 255, 255));\n"
"}")
self.ServerList.setLineWidth(0)
self.ServerList.setMidLineWidth(0)
self.ServerList.setAutoScrollMargin(1)
self.ServerList.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.ServerList.setProperty("showDropIndicator", False)
self.ServerList.setDragDropOverwriteMode(False)
self.ServerList.setAlternatingRowColors(False)
self.ServerList.setSelectionMode(QAbstractItemView.SingleSelection)
self.ServerList.setSelectionBehavior(QAbstractItemView.SelectItems)
self.ServerList.setTextElideMode(Qt.ElideMiddle)
self.ServerList.setVerticalScrollMode(QAbstractItemView.ScrollPerPixel)
self.ServerList.setHorizontalScrollMode(QAbstractItemView.ScrollPerPixel)
self.ServerList.setShowGrid(False)
self.ServerList.setSortingEnabled(False)
self.ServerList.setWordWrap(False)
self.ServerList.setCornerButtonEnabled(False)
self.ServerList.setRowCount(0)
self.ServerList.setColumnCount(1)
self.ServerList.horizontalHeader().setVisible(True)
self.ServerList.horizontalHeader().setCascadingSectionResizes(True)
self.ServerList.horizontalHeader().setMinimumSectionSize(0)
self.ServerList.horizontalHeader().setDefaultSectionSize(115)
self.ServerList.horizontalHeader().setHighlightSections(False)
self.ServerList.horizontalHeader().setProperty("showSortIndicator", False)
self.ServerList.horizontalHeader().setStretchLastSection(True)
self.ServerList.verticalHeader().setVisible(False)
self.ServerList.verticalHeader().setDefaultSectionSize(30)
self.ServerList.verticalHeader().setHighlightSections(False)
self.frame = QFrame(self.centralwidget)
self.frame.setObjectName(u"frame")
self.frame.setEnabled(True)
self.frame.setGeometry(QRect(0, 0, 600, 240))
self.frame.setStyleSheet(u"border-radius: 25px;\n"
"background-color:rgb(19, 14, 34);")
self.frame.setFrameShape(QFrame.StyledPanel)
self.frame.setFrameShadow(QFrame.Raised)
self.startButton = QPushButton(self.frame)
self.startButton.setObjectName(u"startButton")
self.startButton.setEnabled(False)
self.startButton.setGeometry(QRect(480, 50, 90, 30))
self.startButton.setStyleSheet(u"QPushButton{background: qlineargradient(spread:pad, x1:1, y1:1, x2:0, y2:0, stop:0 rgba(121, 194, 27, 255), stop:1 rgba(38, 194, 27, 255));\n"
"border-radius: 5px;\n"
"font: 87 8pt \"Arial Black\";\n"
"}\n"
"QPushButton::hover{\n"
"background: qlineargradient(spread:pad, x1:1, y1:1, x2:0, y2:0, stop:0 rgba(133, 214, 30, 255), stop:1 rgba(48, 244, 34, 255));\n"
"border: 1px solid rgb(207, 207, 207)\n"
"\n"
"}\n"
"QPushButton::pressed{\n"
"background: qlineargradient(spread:pad, x1:1, y1:1, x2:0, y2:0, stop:0 rgba(110, 176, 24, 255), stop:1 rgba(38, 196, 27, 255));\n"
"border: 1px solid rgb(0, 0, 0)\n"
"}\n"
"QPushButton::disabled{\n"
"color: rgba(0, 0, 0,150);\n"
"background: qlineargradient(spread:pad, x1:1, y1:1, x2:0, y2:0, stop:0 rgba(121, 194, 27, 150), stop:1 rgba(38, 194, 27, 150))\n"
"}")
self.cancelButton = QPushButton(self.frame)
self.cancelButton.setObjectName(u"cancelButton")
self.cancelButton.setGeometry(QRect(480, 160, 90, 30))
self.cancelButton.setStyleSheet(u"QPushButton{background: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:1, stop:0 rgba(212, 33, 27, 255), stop:1 rgba(212, 27, 107, 255));\n"
"border-radius: 5px;\n"
"font: 87 8pt \"Arial Black\";\n"
"}\n"
"QPushButton::hover{\n"
"background: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:1, stop:0 rgba(244, 43, 36, 255), stop:1 rgba(251, 32, 127, 255));\n"
"border: 1px solid rgb(207, 207, 207)\n"
"\n"
"}\n"
"QPushButton::pressed{\n"
"background: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:1, stop:0 rgba(178, 28, 23, 255), stop:1 rgba(167, 21, 84, 255));\n"
"border: 1px solid rgb(0, 0, 0)\n"
"}")
self.cancelButton.setCheckable(False)
self.label = QLabel(self.frame)
self.label.setObjectName(u"label")
self.label.setGeometry(QRect(480, 220, 101, 16))
self.label.setStyleSheet(u"color: rgb(39, 34, 54)")
MainWindow.setCentralWidget(self.centralwidget)
self.frame.raise_()
self.ServerList.raise_()
self.retranslateUi(MainWindow)
QMetaObject.connectSlotsByName(MainWindow)
# setupUi
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QCoreApplication.translate("MainWindow", u"MainWindow", None))
___qtablewidgetitem = self.ServerList.horizontalHeaderItem(0)
___qtablewidgetitem.setText(QCoreApplication.translate("MainWindow", u"Server", None));
self.startButton.setText(QCoreApplication.translate("MainWindow", u"Start", None))
self.cancelButton.setText(QCoreApplication.translate("MainWindow", u"Cancel", None))
self.label.setText(QCoreApplication.translate("MainWindow", u"Created by: Henfox", None))
# retranslateUi | UI.py |
################################################################################
## Form generated from reading UI file 'UIiEDzGo.ui'
##
## Created by: Qt User Interface Compiler version 5.15.2
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
if not MainWindow.objectName():
MainWindow.setObjectName(u"MainWindow")
MainWindow.resize(600, 240)
MainWindow.setMinimumSize(QSize(500, 200))
MainWindow.setMaximumSize(QSize(1231241, 1231241))
font = QFont()
font.setFamily(u"Arial Black")
font.setPointSize(8)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
MainWindow.setFont(font)
MainWindow.setCursor(QCursor(Qt.ArrowCursor))
MainWindow.setStyleSheet(u"background-color:rgba(19, 14, 34,0);")
self.centralwidget = QWidget(MainWindow)
self.centralwidget.setObjectName(u"centralwidget")
self.centralwidget.setEnabled(True)
sizePolicy = QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.centralwidget.sizePolicy().hasHeightForWidth())
self.centralwidget.setSizePolicy(sizePolicy)
self.ServerList = QTableWidget(self.centralwidget)
if (self.ServerList.columnCount() < 1):
self.ServerList.setColumnCount(1)
brush = QBrush(QColor(19, 14, 13, 255))
brush.setStyle(Qt.SolidPattern)
font1 = QFont()
font1.setFamily(u"Arial Black")
font1.setBold(False)
font1.setItalic(False)
font1.setUnderline(False)
font1.setWeight(50)
font1.setStrikeOut(False)
font1.setKerning(True)
__qtablewidgetitem = QTableWidgetItem()
__qtablewidgetitem.setFont(font1);
__qtablewidgetitem.setBackground(QColor(89, 254, 149));
__qtablewidgetitem.setForeground(brush);
self.ServerList.setHorizontalHeaderItem(0, __qtablewidgetitem)
self.ServerList.setObjectName(u"ServerList")
self.ServerList.setEnabled(True)
self.ServerList.setGeometry(QRect(10, 10, 432, 216))
sizePolicy1 = QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Expanding)
sizePolicy1.setHorizontalStretch(0)
sizePolicy1.setVerticalStretch(0)
sizePolicy1.setHeightForWidth(self.ServerList.sizePolicy().hasHeightForWidth())
self.ServerList.setSizePolicy(sizePolicy1)
self.ServerList.setMaximumSize(QSize(16777215, 16777215))
font2 = QFont()
font2.setFamily(u"Arial Black")
font2.setPointSize(10)
font2.setBold(False)
font2.setItalic(False)
font2.setWeight(10)
self.ServerList.setFont(font2)
self.ServerList.viewport().setProperty("cursor", QCursor(Qt.PointingHandCursor))
self.ServerList.setFocusPolicy(Qt.NoFocus)
self.ServerList.setContextMenuPolicy(Qt.NoContextMenu)
self.ServerList.setAutoFillBackground(False)
self.ServerList.setStyleSheet(u"QTableWidget {\n"
" border-radius: 5px;\n"
" background-color:rgba(255, 255, 255, 0);\n"
" font: 87 10pt \"Arial Black\";\n"
"\n"
"}\n"
"\n"
"QTableWidget::item {\n"
" padding: 5px;\n"
" margin-top:5px;\n"
" border-radius: 5px;\n"
" background-color:rgb(39, 34, 54);\n"
" color:rgb(141, 123, 195);\n"
"\n"
"}\n"
"\n"
"QHeaderView::section {\n"
" background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:1, stop:0 rgba(255, 137, 64, 255), stop:1 rgba(210, 64, 255, 255));\n"
" border-radius: 5px;\n"
"}\n"
"\n"
"QTableWidget::item:selected {\n"
" border: 1px solid qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:1, stop:0 rgba(255, 137, 64, 255), stop:1 rgba(210, 64, 255, 255));\n"
"}")
self.ServerList.setLineWidth(0)
self.ServerList.setMidLineWidth(0)
self.ServerList.setAutoScrollMargin(1)
self.ServerList.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.ServerList.setProperty("showDropIndicator", False)
self.ServerList.setDragDropOverwriteMode(False)
self.ServerList.setAlternatingRowColors(False)
self.ServerList.setSelectionMode(QAbstractItemView.SingleSelection)
self.ServerList.setSelectionBehavior(QAbstractItemView.SelectItems)
self.ServerList.setTextElideMode(Qt.ElideMiddle)
self.ServerList.setVerticalScrollMode(QAbstractItemView.ScrollPerPixel)
self.ServerList.setHorizontalScrollMode(QAbstractItemView.ScrollPerPixel)
self.ServerList.setShowGrid(False)
self.ServerList.setSortingEnabled(False)
self.ServerList.setWordWrap(False)
self.ServerList.setCornerButtonEnabled(False)
self.ServerList.setRowCount(0)
self.ServerList.setColumnCount(1)
self.ServerList.horizontalHeader().setVisible(True)
self.ServerList.horizontalHeader().setCascadingSectionResizes(True)
self.ServerList.horizontalHeader().setMinimumSectionSize(0)
self.ServerList.horizontalHeader().setDefaultSectionSize(115)
self.ServerList.horizontalHeader().setHighlightSections(False)
self.ServerList.horizontalHeader().setProperty("showSortIndicator", False)
self.ServerList.horizontalHeader().setStretchLastSection(True)
self.ServerList.verticalHeader().setVisible(False)
self.ServerList.verticalHeader().setDefaultSectionSize(30)
self.ServerList.verticalHeader().setHighlightSections(False)
self.frame = QFrame(self.centralwidget)
self.frame.setObjectName(u"frame")
self.frame.setEnabled(True)
self.frame.setGeometry(QRect(0, 0, 600, 240))
self.frame.setStyleSheet(u"border-radius: 25px;\n"
"background-color:rgb(19, 14, 34);")
self.frame.setFrameShape(QFrame.StyledPanel)
self.frame.setFrameShadow(QFrame.Raised)
self.startButton = QPushButton(self.frame)
self.startButton.setObjectName(u"startButton")
self.startButton.setEnabled(False)
self.startButton.setGeometry(QRect(480, 50, 90, 30))
self.startButton.setStyleSheet(u"QPushButton{background: qlineargradient(spread:pad, x1:1, y1:1, x2:0, y2:0, stop:0 rgba(121, 194, 27, 255), stop:1 rgba(38, 194, 27, 255));\n"
"border-radius: 5px;\n"
"font: 87 8pt \"Arial Black\";\n"
"}\n"
"QPushButton::hover{\n"
"background: qlineargradient(spread:pad, x1:1, y1:1, x2:0, y2:0, stop:0 rgba(133, 214, 30, 255), stop:1 rgba(48, 244, 34, 255));\n"
"border: 1px solid rgb(207, 207, 207)\n"
"\n"
"}\n"
"QPushButton::pressed{\n"
"background: qlineargradient(spread:pad, x1:1, y1:1, x2:0, y2:0, stop:0 rgba(110, 176, 24, 255), stop:1 rgba(38, 196, 27, 255));\n"
"border: 1px solid rgb(0, 0, 0)\n"
"}\n"
"QPushButton::disabled{\n"
"color: rgba(0, 0, 0,150);\n"
"background: qlineargradient(spread:pad, x1:1, y1:1, x2:0, y2:0, stop:0 rgba(121, 194, 27, 150), stop:1 rgba(38, 194, 27, 150))\n"
"}")
self.cancelButton = QPushButton(self.frame)
self.cancelButton.setObjectName(u"cancelButton")
self.cancelButton.setGeometry(QRect(480, 160, 90, 30))
self.cancelButton.setStyleSheet(u"QPushButton{background: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:1, stop:0 rgba(212, 33, 27, 255), stop:1 rgba(212, 27, 107, 255));\n"
"border-radius: 5px;\n"
"font: 87 8pt \"Arial Black\";\n"
"}\n"
"QPushButton::hover{\n"
"background: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:1, stop:0 rgba(244, 43, 36, 255), stop:1 rgba(251, 32, 127, 255));\n"
"border: 1px solid rgb(207, 207, 207)\n"
"\n"
"}\n"
"QPushButton::pressed{\n"
"background: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:1, stop:0 rgba(178, 28, 23, 255), stop:1 rgba(167, 21, 84, 255));\n"
"border: 1px solid rgb(0, 0, 0)\n"
"}")
self.cancelButton.setCheckable(False)
self.label = QLabel(self.frame)
self.label.setObjectName(u"label")
self.label.setGeometry(QRect(480, 220, 101, 16))
self.label.setStyleSheet(u"color: rgb(39, 34, 54)")
MainWindow.setCentralWidget(self.centralwidget)
self.frame.raise_()
self.ServerList.raise_()
self.retranslateUi(MainWindow)
QMetaObject.connectSlotsByName(MainWindow)
# setupUi
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QCoreApplication.translate("MainWindow", u"MainWindow", None))
___qtablewidgetitem = self.ServerList.horizontalHeaderItem(0)
___qtablewidgetitem.setText(QCoreApplication.translate("MainWindow", u"Server", None));
self.startButton.setText(QCoreApplication.translate("MainWindow", u"Start", None))
self.cancelButton.setText(QCoreApplication.translate("MainWindow", u"Cancel", None))
self.label.setText(QCoreApplication.translate("MainWindow", u"Created by: Henfox", None))
# retranslateUi | 0.385953 | 0.05715 |
import logging
import cv2
import numpy as np
import pandas as pd
from torchvision import datasets, transforms
import torch
import random
from facenet_pytorch import MTCNN
from PIL import Image
from multiprocessing import cpu_count
class MTCNN_Model:
def __init__(self, model_parameters, inference_parameters):
#---------dataset_infos
self.X = None
self.input_images = None
self.subfolders = None
#---------model_parameters
self.image_size = model_parameters['image_size']
self.margin = model_parameters['margin']
self.min_face_size = model_parameters['min_face_size']
self.thresholds = model_parameters['thresholds']
self.factor = model_parameters['factor']
self.keep_all = model_parameters['keep_all']
self.device = 'cuda:0' if (model_parameters['device']=="cuda" and torch.cuda.is_available()) else 'cpu'
self.seed = model_parameters['seed']
self.post_process = False
#---------Inference_parameters
self.inference_batch_size = inference_parameters['inference_batch_size']
self.input_square_transformation_size = inference_parameters['input_square_transformation_size']
#------- Other
self.num_workers = cpu_count()
#------- MTCNN
self.mtcnn = MTCNN(image_size=self.image_size,
margin=self.margin,
min_face_size=self.min_face_size,
thresholds=self.thresholds,
factor=self.factor,
post_process=self.post_process,
keep_all=self.keep_all,
device=self.device)
#------- Reproducibility
random.seed(self.seed)
np.random.seed(self.seed)
torch.random.manual_seed(self.seed)
torch.cuda.manual_seed(self.seed)
#------- Results
self.df_result = None
def predict(self, img_arr: np.ndarray):
'''
Parameters
----------
img_arr : np.ndarray, list
A array containing the data of a 3D image or a list of
3D image arrays (as a batch)
Returns
-------
Tuple
First element represents a list of lists of bbox found in each
batch image. Shape: (N, B, 4), where N is the batch size and B
is the associated bbox found in the image.
Second element represents a list of a list of probabilities associated
with each batch image and each bbox. Shape: (N, B)
If no bbox was found, first element is represented by None, and the second
[None]
'''
# Convert to nd.array
if isinstance(img_arr, list):
img_arr = np.array(img_arr)
# Adding batch dimension
if len(img_arr.shape) == 3:
img_arr = np.expand_dims(img_arr, 0)
# Resize image to network input size
original_image_shapes = []
reshaped_images = []
for img in img_arr:
original_image_shapes.append(img.shape)
reshaped_images.append(cv2.resize(img, (self.input_square_transformation_size, self.input_square_transformation_size)))
# Convert to np.ndarray
reshaped_images = np.array(reshaped_images)
# Migth popup a warning when not fiding any bbox
batch_bboxes, batch_probs = self.mtcnn.detect(reshaped_images, landmarks=False)
for i, bboxes in enumerate(batch_bboxes):
if bboxes is None: continue
# Reshape bbox to match original image shape
original_shape = original_image_shapes[i]
batch_bboxes[i] = [self._bbox_to_original_shape(bbox, original_shape) for bbox in bboxes]
return (batch_bboxes, batch_probs)
def _post_process_results(self, batch_results, index_range):
'''
Parameters
----------
batch_results : np.ndarray, list
A array containing the data of each batch prediction
index_range : np.ndarray, list
Containing each image index in the batch
Returns
-------
pd.DataFrame
Returns a pd.DataFrame containing the batch results,
for each image, with image_path, bbox and probability
'''
# Resulting arrays from batches results
paths, bboxes, probs = [], [], []
# Get batch image paths
batch_img_paths = self.X[index_range]
# Zip data for loop
zipped_loop = zip(batch_results[0], batch_results[1], batch_img_paths)
for bboxes_data, probs_data, image_path in zipped_loop:
# Not found bbox
if bboxes_data is None:
paths.append(image_path)
bboxes.append(None)
probs.append(None)
continue
# Assure to be a numpy array
bboxes_data = np.array(bboxes_data)
for bbox_id in range(bboxes_data.shape[0]):
paths.append(image_path)
bboxes.append(bboxes_data[bbox_id])
probs.append(probs_data[bbox_id])
df = pd.DataFrame()
df["image"] = paths
df["coords(x_min,y_min,x_max,y_max)"] = bboxes
df["probability"] = probs
# Ordering column names
df = df[["image","coords(x_min,y_min,x_max,y_max)","probability"]]
return df
def _bbox_to_original_shape(self, bbox, original_shape):
'''
Parameters
----------
bbox : np.ndarray, list
A array containing the bbox data (e.g. [x1, y1, x2, y2])
original_shape : tuple
Containing a original image tuple shape (e.g. (256, 256, *))
Returns
-------
np.ndarray
A array containing the bbox data (e.g. [x1', y1', x2', y2'])
'''
x1 = bbox[0] * original_shape[1]/self.input_square_transformation_size
x2 = bbox[2] * original_shape[1]/self.input_square_transformation_size
y1 = bbox[1] * original_shape[0]/self.input_square_transformation_size
y2 = bbox[3] * original_shape[0]/self.input_square_transformation_size
return np.array([x1, y1, x2, y2]).astype(int)
def _build_batch(self, index_range):
'''
Build a batch of images to be used in prediction
'''
# Create a batch of images
batch = []
image_paths = self.X[index_range]
for image_path in image_paths:
# Read image
v_cap = cv2.VideoCapture(image_path)
success, frame = v_cap.read()
# If image is not read correctly, skip it
if not success: continue
img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
batch.append(img)
return np.array(batch)
def _construct_result_dataframe(self):
'''
Build a dataframe that contains for each input image
the respective predicted bbox and probabilities according
to mtcnn pretreined model
'''
# Define the df_result format
self.df_result = pd.DataFrame(columns=["image","coords(x_min,y_min,x_max,y_max)","probability"])
# Perform batch prediction
max_size = len(self.X)
step = min(max_size, self.inference_batch_size)
for i in range(0, max_size, step):
# Batch index range
index_range = range(i, min(i+step, max_size))
# Build a batch of images
batch = self._build_batch(index_range)
# Infer the batch
batch_results = self.predict(batch)
# Post process results from batch inference
batch_df = self._post_process_results(batch_results, index_range)
# Add results to final df
self.df_result = pd.concat((self.df_result, batch_df))
# Reseting indices
self.df_result = self.df_result.reset_index(drop=True)
def get_result_dataframe(self, X):
'''
Parameters
----------
X : np.ndarray, list
A array or list containing each image path
Returns
-------
pd.DataFrame
Returns a pd.DataFrame containing all the results of
the inferences. The dataframe has the columns "image",
"coords(x_min,y_min,x_max,y_max)" and "probability".
'''
self.X = X
self._construct_result_dataframe()
return self.df_result | tasks/cv-mtcnn-face-detection/mtcnn.py | import logging
import cv2
import numpy as np
import pandas as pd
from torchvision import datasets, transforms
import torch
import random
from facenet_pytorch import MTCNN
from PIL import Image
from multiprocessing import cpu_count
class MTCNN_Model:
def __init__(self, model_parameters, inference_parameters):
#---------dataset_infos
self.X = None
self.input_images = None
self.subfolders = None
#---------model_parameters
self.image_size = model_parameters['image_size']
self.margin = model_parameters['margin']
self.min_face_size = model_parameters['min_face_size']
self.thresholds = model_parameters['thresholds']
self.factor = model_parameters['factor']
self.keep_all = model_parameters['keep_all']
self.device = 'cuda:0' if (model_parameters['device']=="cuda" and torch.cuda.is_available()) else 'cpu'
self.seed = model_parameters['seed']
self.post_process = False
#---------Inference_parameters
self.inference_batch_size = inference_parameters['inference_batch_size']
self.input_square_transformation_size = inference_parameters['input_square_transformation_size']
#------- Other
self.num_workers = cpu_count()
#------- MTCNN
self.mtcnn = MTCNN(image_size=self.image_size,
margin=self.margin,
min_face_size=self.min_face_size,
thresholds=self.thresholds,
factor=self.factor,
post_process=self.post_process,
keep_all=self.keep_all,
device=self.device)
#------- Reproducibility
random.seed(self.seed)
np.random.seed(self.seed)
torch.random.manual_seed(self.seed)
torch.cuda.manual_seed(self.seed)
#------- Results
self.df_result = None
def predict(self, img_arr: np.ndarray):
'''
Parameters
----------
img_arr : np.ndarray, list
A array containing the data of a 3D image or a list of
3D image arrays (as a batch)
Returns
-------
Tuple
First element represents a list of lists of bbox found in each
batch image. Shape: (N, B, 4), where N is the batch size and B
is the associated bbox found in the image.
Second element represents a list of a list of probabilities associated
with each batch image and each bbox. Shape: (N, B)
If no bbox was found, first element is represented by None, and the second
[None]
'''
# Convert to nd.array
if isinstance(img_arr, list):
img_arr = np.array(img_arr)
# Adding batch dimension
if len(img_arr.shape) == 3:
img_arr = np.expand_dims(img_arr, 0)
# Resize image to network input size
original_image_shapes = []
reshaped_images = []
for img in img_arr:
original_image_shapes.append(img.shape)
reshaped_images.append(cv2.resize(img, (self.input_square_transformation_size, self.input_square_transformation_size)))
# Convert to np.ndarray
reshaped_images = np.array(reshaped_images)
# Migth popup a warning when not fiding any bbox
batch_bboxes, batch_probs = self.mtcnn.detect(reshaped_images, landmarks=False)
for i, bboxes in enumerate(batch_bboxes):
if bboxes is None: continue
# Reshape bbox to match original image shape
original_shape = original_image_shapes[i]
batch_bboxes[i] = [self._bbox_to_original_shape(bbox, original_shape) for bbox in bboxes]
return (batch_bboxes, batch_probs)
def _post_process_results(self, batch_results, index_range):
'''
Parameters
----------
batch_results : np.ndarray, list
A array containing the data of each batch prediction
index_range : np.ndarray, list
Containing each image index in the batch
Returns
-------
pd.DataFrame
Returns a pd.DataFrame containing the batch results,
for each image, with image_path, bbox and probability
'''
# Resulting arrays from batches results
paths, bboxes, probs = [], [], []
# Get batch image paths
batch_img_paths = self.X[index_range]
# Zip data for loop
zipped_loop = zip(batch_results[0], batch_results[1], batch_img_paths)
for bboxes_data, probs_data, image_path in zipped_loop:
# Not found bbox
if bboxes_data is None:
paths.append(image_path)
bboxes.append(None)
probs.append(None)
continue
# Assure to be a numpy array
bboxes_data = np.array(bboxes_data)
for bbox_id in range(bboxes_data.shape[0]):
paths.append(image_path)
bboxes.append(bboxes_data[bbox_id])
probs.append(probs_data[bbox_id])
df = pd.DataFrame()
df["image"] = paths
df["coords(x_min,y_min,x_max,y_max)"] = bboxes
df["probability"] = probs
# Ordering column names
df = df[["image","coords(x_min,y_min,x_max,y_max)","probability"]]
return df
def _bbox_to_original_shape(self, bbox, original_shape):
'''
Parameters
----------
bbox : np.ndarray, list
A array containing the bbox data (e.g. [x1, y1, x2, y2])
original_shape : tuple
Containing a original image tuple shape (e.g. (256, 256, *))
Returns
-------
np.ndarray
A array containing the bbox data (e.g. [x1', y1', x2', y2'])
'''
x1 = bbox[0] * original_shape[1]/self.input_square_transformation_size
x2 = bbox[2] * original_shape[1]/self.input_square_transformation_size
y1 = bbox[1] * original_shape[0]/self.input_square_transformation_size
y2 = bbox[3] * original_shape[0]/self.input_square_transformation_size
return np.array([x1, y1, x2, y2]).astype(int)
def _build_batch(self, index_range):
'''
Build a batch of images to be used in prediction
'''
# Create a batch of images
batch = []
image_paths = self.X[index_range]
for image_path in image_paths:
# Read image
v_cap = cv2.VideoCapture(image_path)
success, frame = v_cap.read()
# If image is not read correctly, skip it
if not success: continue
img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
batch.append(img)
return np.array(batch)
def _construct_result_dataframe(self):
'''
Build a dataframe that contains for each input image
the respective predicted bbox and probabilities according
to mtcnn pretreined model
'''
# Define the df_result format
self.df_result = pd.DataFrame(columns=["image","coords(x_min,y_min,x_max,y_max)","probability"])
# Perform batch prediction
max_size = len(self.X)
step = min(max_size, self.inference_batch_size)
for i in range(0, max_size, step):
# Batch index range
index_range = range(i, min(i+step, max_size))
# Build a batch of images
batch = self._build_batch(index_range)
# Infer the batch
batch_results = self.predict(batch)
# Post process results from batch inference
batch_df = self._post_process_results(batch_results, index_range)
# Add results to final df
self.df_result = pd.concat((self.df_result, batch_df))
# Reseting indices
self.df_result = self.df_result.reset_index(drop=True)
def get_result_dataframe(self, X):
'''
Parameters
----------
X : np.ndarray, list
A array or list containing each image path
Returns
-------
pd.DataFrame
Returns a pd.DataFrame containing all the results of
the inferences. The dataframe has the columns "image",
"coords(x_min,y_min,x_max,y_max)" and "probability".
'''
self.X = X
self._construct_result_dataframe()
return self.df_result | 0.654564 | 0.32146 |
import tensorflow as tf
from ...data import fields
from ...layers import Layer
from ...structures import ImageList, box_list
from ..backbone import build_backbone
from ..necks import build_neck
from ..proposal_generator import build_proposal_generator
from ..roi_heads import build_roi_heads
from ..postprocessing import detector_postprocess
from .build import META_ARCH_REGISTRY
__all__ = ["GeneralizedRCNN", "ProposalNetwork"]
@META_ARCH_REGISTRY.register()
class GeneralizedRCNN(Layer):
"""
Generalized R-CNN. Any models that contains the following three components:
1. Per-image feature extraction (aka backbone)
2. Region proposal generation
3. Per-region feature extraction and prediction
"""
def __init__(self, cfg):
super().__init__()
self.backbone = build_backbone(cfg, scope="backbone")
self.neck = build_neck(cfg, self.backbone.output_shape(), scope="neck")
self.proposal_generator = build_proposal_generator(
cfg, self.neck.output_shape(), scope="proposal_generator")
self.roi_heads = build_roi_heads(cfg, self.neck.output_shape(), scope="roi_heads")
assert len(cfg.MODEL.PIXEL_MEAN) == len(cfg.MODEL.PIXEL_STD)
pixel_mean = tf.convert_to_tensor(cfg.MODEL.PIXEL_MEAN, tf.float32)
pixel_std = tf.convert_to_tensor(cfg.MODEL.PIXEL_STD, tf.float32)
self.input_format = cfg.MODEL.INPUT_FORMAT
self.normalizer = lambda x: (x - pixel_mean) / pixel_std
self.segmentation_output_format = cfg.MODEL.SEGMENTATION_OUTPUT.FORMAT
self.segmentation_output_resolution = cfg.MODEL.SEGMENTATION_OUTPUT.FIXED_RESOLUTION
def call(self, batched_inputs):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper` .
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* image: Tensor, image in (C, H, W) format.
* instances (optional): groundtruth :class:`Instances`
* proposals (optional): :class:`Instances`, precomputed proposals.
Other information that's included in the original dicts, such as:
* "height", "width" (int): the output resolution of the model, used in inference.
See :meth:`postprocess` for details.
Returns:
dict:
Dict is the outputs for input images.
The dict contains one key "instances" whose value is a :class:`Dict`.
The :class:`Dict` object has the following keys:
"boxes", "classes", "scores", "masks",
"""
if not self.training:
return self.inference(batched_inputs)
images = self.preprocess_image(batched_inputs)
if "instances" in batched_inputs:
gt_instances = batched_inputs["instances"]
elif "targets" in batched_inputs:
tf.logging.warn(
"'targets' in the model inputs is now renamed to 'instances'!"
)
gt_instances = batched_inputs["targets"]
else:
gt_instances = None
features = self.neck(self.backbone(images.tensor))
if self.proposal_generator:
proposals, proposal_losses, _ = self.proposal_generator(images, features, gt_instances)
else:
assert "proposals" in batched_inputs
proposals = batched_inputs["proposals"]
proposal_losses = {}
_, detector_losses = self.roi_heads(images, features, proposals, gt_instances)
losses = {}
losses.update(detector_losses)
losses.update(proposal_losses)
return losses
def inference(self, batched_inputs, detected_instances=None):
"""
Run inference on the given inputs.
Args:
batched_inputs (list[dict]): same as in :meth:`forward`
detected_instances (None or BoxList): if not None, it
contains an `Instances` object per image. The `Instances`
object contains "pred_boxes" and "pred_classes" which are
known boxes in the image.
The inference will then skip the detection of bounding boxes,
and only predict other per-ROI outputs.
Returns:
same as in :meth:`call`.
"""
assert not self.training
images = self.preprocess_image(batched_inputs)
features = self.neck(self.backbone(images.tensor))
if detected_instances is None:
if self.proposal_generator:
proposals, *_ = self.proposal_generator(images, features, None)
else:
assert "proposals" in batched_inputs
proposals = batched_inputs["proposals"]
results, _ = self.roi_heads(images, features, proposals, None)
else:
detected_instances = box_list.SparseBoxList.from_dense(detected_instances)
results, _ = self.roi_heads.forward_with_given_boxes(
features, detected_instances, tf.shape(images.tensor)[1:3]
)
if self.segmentation_output_format != "raw":
if self.segmentation_output_format == "fixed":
output_shape = [
self.segmentation_output_resolution, self.segmentation_output_resolution
]
elif self.segmentation_output_format == "conventional":
output_shape = tf.shape(images.tensor)[1:3]
results = detector_postprocess(
results, output_shape, self.segmentation_output_format, images.image_shapes
)
result_fields = fields.ResultFields
detected_results = {
result_fields.boxes: results.boxes,
result_fields.classes: results.get_field("pred_classes"),
result_fields.scores: results.get_field("scores"),
result_fields.is_valid: results.get_field("is_valid"),
}
if results.has_field("pred_masks"):
detected_results[result_fields.masks] = results.get_field("pred_masks")
return {"instances": detected_results}
def preprocess_image(self, batched_inputs):
"""
Normalize, pad and batch the input images.
"""
images = batched_inputs["image"]
images = self.normalizer(images)
if self.input_format == "BGR": images = images[..., ::-1]
image_shapes = batched_inputs["image_shape"]
images = ImageList.from_tensors(
images, image_shapes, self.neck.size_divisibility
)
return images
@META_ARCH_REGISTRY.register()
class ProposalNetwork(Layer):
def __init__(self, cfg):
super().__init__()
self.backbone = build_backbone(cfg)
self.neck = build_neck(cfg, self.backbone.output_shape(), scope="neck")
self.proposal_generator = build_proposal_generator(
cfg, self.neck.output_shape(), scope="proposal_generator")
pixel_mean = tf.convert_to_tensor(cfg.MODEL.PIXEL_MEAN, tf.float32)
pixel_std = tf.convert_to_tensor(cfg.MODEL.PIXEL_STD, tf.float32)
self.input_format = cfg.MODEL.INPUT_FORMAT
self.normalizer = lambda x: (x - pixel_mean) / pixel_std
def call(self, batched_inputs):
"""
Args:
Same as in :class:`GeneralizedRCNN.forward`
Returns:
list[dict]: Each dict is the output for one input image.
The dict contains one key "proposals" whose value is a
:class:`Instances` with keys "proposal_boxes" and "objectness_logits".
"""
images = self.preprocess_image(batched_inputs)
features = self.neck(self.backbone(images.tensor))
if "instances" in batched_inputs:
gt_instances = batched_inputs["instances"]
elif "targets" in batched_inputs:
tf.logging.warn(
"'targets' in the model inputs is now renamed to 'instances'!"
)
gt_instances = batched_inputs["targets"]
else:
gt_instances = None
proposals, proposal_losses, _ = self.proposal_generator(images, features, gt_instances)
# In training, the proposals are not useful at all but we generate them anyway.
# This makes RPN-only models about 5% slower.
if self.training:
return proposal_losses
result_fields = fields.ResultFields
results = {
result_fields.boxes: proposals.boxes,
result_fields.is_valid: proposals.get_field("is_valid"),
}
scores = proposals.get_field("objectness_logits")
classes = tf.zeros_like(scores, dtype=tf.int64)
results[result_fields.classes] = classes
results[result_fields.scores] = tf.nn.sigmoid(scores)
return results
def preprocess_image(self, batched_inputs):
"""
Normalize, pad and batch the input images.
"""
images = batched_inputs["image"]
images = self.normalizer(images)
if self.input_format == "BGR": images = images[..., ::-1]
image_shapes = batched_inputs["image_shape"]
images = ImageList.from_tensors(
images, image_shapes, self.neck.size_divisibility
)
return images | lib/modeling/meta_arch/rcnn.py | import tensorflow as tf
from ...data import fields
from ...layers import Layer
from ...structures import ImageList, box_list
from ..backbone import build_backbone
from ..necks import build_neck
from ..proposal_generator import build_proposal_generator
from ..roi_heads import build_roi_heads
from ..postprocessing import detector_postprocess
from .build import META_ARCH_REGISTRY
__all__ = ["GeneralizedRCNN", "ProposalNetwork"]
@META_ARCH_REGISTRY.register()
class GeneralizedRCNN(Layer):
"""
Generalized R-CNN. Any models that contains the following three components:
1. Per-image feature extraction (aka backbone)
2. Region proposal generation
3. Per-region feature extraction and prediction
"""
def __init__(self, cfg):
super().__init__()
self.backbone = build_backbone(cfg, scope="backbone")
self.neck = build_neck(cfg, self.backbone.output_shape(), scope="neck")
self.proposal_generator = build_proposal_generator(
cfg, self.neck.output_shape(), scope="proposal_generator")
self.roi_heads = build_roi_heads(cfg, self.neck.output_shape(), scope="roi_heads")
assert len(cfg.MODEL.PIXEL_MEAN) == len(cfg.MODEL.PIXEL_STD)
pixel_mean = tf.convert_to_tensor(cfg.MODEL.PIXEL_MEAN, tf.float32)
pixel_std = tf.convert_to_tensor(cfg.MODEL.PIXEL_STD, tf.float32)
self.input_format = cfg.MODEL.INPUT_FORMAT
self.normalizer = lambda x: (x - pixel_mean) / pixel_std
self.segmentation_output_format = cfg.MODEL.SEGMENTATION_OUTPUT.FORMAT
self.segmentation_output_resolution = cfg.MODEL.SEGMENTATION_OUTPUT.FIXED_RESOLUTION
def call(self, batched_inputs):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper` .
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* image: Tensor, image in (C, H, W) format.
* instances (optional): groundtruth :class:`Instances`
* proposals (optional): :class:`Instances`, precomputed proposals.
Other information that's included in the original dicts, such as:
* "height", "width" (int): the output resolution of the model, used in inference.
See :meth:`postprocess` for details.
Returns:
dict:
Dict is the outputs for input images.
The dict contains one key "instances" whose value is a :class:`Dict`.
The :class:`Dict` object has the following keys:
"boxes", "classes", "scores", "masks",
"""
if not self.training:
return self.inference(batched_inputs)
images = self.preprocess_image(batched_inputs)
if "instances" in batched_inputs:
gt_instances = batched_inputs["instances"]
elif "targets" in batched_inputs:
tf.logging.warn(
"'targets' in the model inputs is now renamed to 'instances'!"
)
gt_instances = batched_inputs["targets"]
else:
gt_instances = None
features = self.neck(self.backbone(images.tensor))
if self.proposal_generator:
proposals, proposal_losses, _ = self.proposal_generator(images, features, gt_instances)
else:
assert "proposals" in batched_inputs
proposals = batched_inputs["proposals"]
proposal_losses = {}
_, detector_losses = self.roi_heads(images, features, proposals, gt_instances)
losses = {}
losses.update(detector_losses)
losses.update(proposal_losses)
return losses
def inference(self, batched_inputs, detected_instances=None):
"""
Run inference on the given inputs.
Args:
batched_inputs (list[dict]): same as in :meth:`forward`
detected_instances (None or BoxList): if not None, it
contains an `Instances` object per image. The `Instances`
object contains "pred_boxes" and "pred_classes" which are
known boxes in the image.
The inference will then skip the detection of bounding boxes,
and only predict other per-ROI outputs.
Returns:
same as in :meth:`call`.
"""
assert not self.training
images = self.preprocess_image(batched_inputs)
features = self.neck(self.backbone(images.tensor))
if detected_instances is None:
if self.proposal_generator:
proposals, *_ = self.proposal_generator(images, features, None)
else:
assert "proposals" in batched_inputs
proposals = batched_inputs["proposals"]
results, _ = self.roi_heads(images, features, proposals, None)
else:
detected_instances = box_list.SparseBoxList.from_dense(detected_instances)
results, _ = self.roi_heads.forward_with_given_boxes(
features, detected_instances, tf.shape(images.tensor)[1:3]
)
if self.segmentation_output_format != "raw":
if self.segmentation_output_format == "fixed":
output_shape = [
self.segmentation_output_resolution, self.segmentation_output_resolution
]
elif self.segmentation_output_format == "conventional":
output_shape = tf.shape(images.tensor)[1:3]
results = detector_postprocess(
results, output_shape, self.segmentation_output_format, images.image_shapes
)
result_fields = fields.ResultFields
detected_results = {
result_fields.boxes: results.boxes,
result_fields.classes: results.get_field("pred_classes"),
result_fields.scores: results.get_field("scores"),
result_fields.is_valid: results.get_field("is_valid"),
}
if results.has_field("pred_masks"):
detected_results[result_fields.masks] = results.get_field("pred_masks")
return {"instances": detected_results}
def preprocess_image(self, batched_inputs):
"""
Normalize, pad and batch the input images.
"""
images = batched_inputs["image"]
images = self.normalizer(images)
if self.input_format == "BGR": images = images[..., ::-1]
image_shapes = batched_inputs["image_shape"]
images = ImageList.from_tensors(
images, image_shapes, self.neck.size_divisibility
)
return images
@META_ARCH_REGISTRY.register()
class ProposalNetwork(Layer):
def __init__(self, cfg):
super().__init__()
self.backbone = build_backbone(cfg)
self.neck = build_neck(cfg, self.backbone.output_shape(), scope="neck")
self.proposal_generator = build_proposal_generator(
cfg, self.neck.output_shape(), scope="proposal_generator")
pixel_mean = tf.convert_to_tensor(cfg.MODEL.PIXEL_MEAN, tf.float32)
pixel_std = tf.convert_to_tensor(cfg.MODEL.PIXEL_STD, tf.float32)
self.input_format = cfg.MODEL.INPUT_FORMAT
self.normalizer = lambda x: (x - pixel_mean) / pixel_std
def call(self, batched_inputs):
"""
Args:
Same as in :class:`GeneralizedRCNN.forward`
Returns:
list[dict]: Each dict is the output for one input image.
The dict contains one key "proposals" whose value is a
:class:`Instances` with keys "proposal_boxes" and "objectness_logits".
"""
images = self.preprocess_image(batched_inputs)
features = self.neck(self.backbone(images.tensor))
if "instances" in batched_inputs:
gt_instances = batched_inputs["instances"]
elif "targets" in batched_inputs:
tf.logging.warn(
"'targets' in the model inputs is now renamed to 'instances'!"
)
gt_instances = batched_inputs["targets"]
else:
gt_instances = None
proposals, proposal_losses, _ = self.proposal_generator(images, features, gt_instances)
# In training, the proposals are not useful at all but we generate them anyway.
# This makes RPN-only models about 5% slower.
if self.training:
return proposal_losses
result_fields = fields.ResultFields
results = {
result_fields.boxes: proposals.boxes,
result_fields.is_valid: proposals.get_field("is_valid"),
}
scores = proposals.get_field("objectness_logits")
classes = tf.zeros_like(scores, dtype=tf.int64)
results[result_fields.classes] = classes
results[result_fields.scores] = tf.nn.sigmoid(scores)
return results
def preprocess_image(self, batched_inputs):
"""
Normalize, pad and batch the input images.
"""
images = batched_inputs["image"]
images = self.normalizer(images)
if self.input_format == "BGR": images = images[..., ::-1]
image_shapes = batched_inputs["image_shape"]
images = ImageList.from_tensors(
images, image_shapes, self.neck.size_divisibility
)
return images | 0.892445 | 0.366165 |
import tkinter as tk
window = tk.Tk()
window.title('Claculator')
numbers = ['0']
action = ['null']
### FUNCTIONS ###
# OUTPUT
def output_update(text):
value = lbl_output['text']
if len(numbers)==1:
if numbers[0]=='0':
value = value[:-1]
numbers.clear()
elif numbers[0]=='clear':
value = ''
numbers.clear()
numbers.append(text)
lbl_output['text'] = value+str(text)
def output_clear():
numbers.clear()
numbers.append('0')
action[0] = 'null'
lbl_output['text'] = '0'
lbl_holder['text'] = ''
def switch_prefix():
value = lbl_output['text']
if numbers[0] == 'clear':
value = '0'
numbers.clear()
numbers.append('0')
try:
if value[0] == '-': value = value[1:]
else: value = '-'+value
except: value = '-'
lbl_output['text'] = value
def dot():
value = lbl_output['text']+'.'
numbers.append('.')
c=0
for x in value:
if x == '.': c+=1
if c>1: value = value[:-1]
else:
try:
null=str(value[len(value)-2])
try: null=float(value[len(value)-2])
except: value = value[:-1]+'0.'
except: value = '0.'
lbl_output['text'] = value
def backspace():
value = lbl_output['text']
value = value[:-1]
if numbers[0] == 'clear':
numbers.clear()
numbers.append('0')
value = '0'
else: numbers.pop()
if value=='' or value=='-':value+='0';numbers.append('0')
lbl_output['text'] = value
def pi():
numbers.clear()
for i in "3.1415926535897932384626433832795":
numbers.append(i)
lbl_output['text'] = "3.1415926535897932384626433832795"
# OPERATORS
def equals():
value = lbl_output['text']
holder = lbl_holder['text']
if holder[0] == '√':
holder = holder[1:]
holder = holder.split()
try: number = holder[0]
except: return
if action[0] == 'add':
try: value = float(number) + float(value)
except: return
if action[0] == 'sub':
try: value = float(number) - float(value)
except: return
if action[0] == 'mul':
try: value = float(number) * float(value)
except: return
if action[0] == 'div':
try: value = float(number) / float(value)
except: return
if action[0] == 'root':
try: value = float(number)**(1/float(value))
except: return
if action[0] == 'pow':
try: value = float(number)**(float(value))
except: return
action[0] = 'null'
holder = ''
numbers.clear()
numbers.append('clear')
value = str(value)
if value[len((value))-2]+value[len(value)-1] == '.0':
value = value[:-2]
lbl_output['text'] = str(value)
lbl_holder['text'] = str(holder)
def addition():
value = lbl_output['text']
holder = lbl_holder['text']
if len(holder) > 0:
equals()
value = lbl_output['text']
action[0] = 'add'
holder = f"{value} +"
value = '0'
numbers.clear()
numbers.append('0')
lbl_output['text'] = value
lbl_holder['text'] = holder
def subtraction():
value = lbl_output['text']
holder = lbl_holder['text']
if len(holder) > 0:
equals()
value = lbl_output['text']
action[0] = 'sub'
holder = f"{value} -"
value = '0'
numbers.clear()
numbers.append('0')
lbl_output['text'] = value
lbl_holder['text'] = holder
def multiplication():
value = lbl_output['text']
holder = lbl_holder['text']
if len(holder) > 0:
equals()
value = lbl_output['text']
action[0] = 'mul'
holder = f"{value} ×"
value = '0'
numbers.clear()
numbers.append('0')
lbl_output['text'] = value
lbl_holder['text'] = holder
def division():
value = lbl_output['text']
holder = lbl_holder['text']
if len(holder) > 0:
equals()
value = lbl_output['text']
action[0] = 'div'
holder = f"{value} ÷"
value = '0'
numbers.clear()
numbers.append('0')
lbl_output['text'] = value
lbl_holder['text'] = holder
def root():
value = lbl_output['text']
holder = lbl_holder['text']
if len(holder) > 0:
equals()
value = lbl_output['text']
action[0] = 'root'
holder = f"√{value}"
value = '0'
numbers.clear()
numbers.append('0')
lbl_output['text'] = value
lbl_holder['text'] = holder
def power():
value = lbl_output['text']
holder = lbl_holder['text']
if len(holder) > 0:
equals()
value = lbl_output['text']
action[0] = 'pow'
holder = f"{value} ^"
value = '0'
numbers.clear()
numbers.append('0')
lbl_output['text'] = value
lbl_holder['text'] = holder
def factorial():
value = lbl_output['text']
holder = lbl_holder['text']
if len(holder) > 0:
equals()
value = lbl_output['text']
n = 1
for i in range(int(value)):
i+=1
n = n*i
n = n+(float(value)-int(value))
value = str(n)
holder = ''
numbers.clear()
numbers.append('clear')
if value[len((value))-2]+value[len(value)-1] == '.0':
value = value[:-2]
lbl_output['text'] = str(value)
lbl_holder['text'] = str(holder)
### WINDOW ###
output = tk.Frame(
master=window
)
panel = tk.Frame(
master=window
)
lbl_holder = tk.Label(
master=output,
text='',
width=20,
height=4,
bg='gray'
)
lbl_holder.pack(padx=5, pady=5, fill=tk.BOTH, expand=True)
lbl_output = tk.Label(
master=output,
text='0',
width=20,
height=4,
bg='silver'
)
lbl_output.pack(padx=5, pady=5, fill=tk.BOTH, expand=True)
panel.columnconfigure([1, 2, 3, 4], weight=1, minsize=50)
panel.rowconfigure([1, 2, 3, 4, 5, 6], weight=1, minsize=50)
### BUTTONS ###
# NUMBERS
class panel_button:
def __init__(self, text, row, column):
self.text = text
self.frame = tk.Frame(
master=panel
)
self.frame.grid(row=row, column=column, padx=5, pady=5, sticky='nsew')
self.button = tk.Button(
master=self.frame,
text=text,
width=20,
height=4,
command=self.method
)
self.button.pack(padx=5, pady=5, fill=tk.BOTH, expand=True)
def method(self):
output_update(self.text)
button1 = panel_button('1', 3, 1)
button2 = panel_button('2', 3, 2)
button3 = panel_button('3', 3, 3)
button4 = panel_button('4', 4, 1)
button5 = panel_button('5', 4, 2)
button6 = panel_button('6', 4, 3)
button7 = panel_button('7', 5, 1)
button8 = panel_button('8', 5, 2)
button9 = panel_button('9', 5, 3)
button0 = panel_button('0', 6, 2)
dot_frame = tk.Frame(
master=panel
)
dot_frame.grid(row=6, column=3, padx=5, pady=5, sticky='nsew')
dot_button = tk.Button(
master=dot_frame,
text='.',
width=20,
height=4,
command=dot
)
dot_button.pack(padx=5, pady=5, fill=tk.BOTH, expand=True)
# UTILITY
clear_frame = tk.Frame(
master=panel
)
clear_frame.grid(row=1, column=3, padx=5, pady=5, sticky='nsew')
clear_button = tk.Button(
master=clear_frame,
text='Clear',
width=20,
height=4,
command=output_clear
)
clear_button.pack(padx=5, pady=5, fill=tk.BOTH, expand=True)
backspace_frame = tk.Frame(
master=panel
)
backspace_frame.grid(row=1, column=4, padx=5, pady=5, sticky='nsew')
backspace_button = tk.Button(
master=backspace_frame,
text='Back',
width=20,
height=4,
command=backspace
)
backspace_button.pack(padx=5, pady=5, fill=tk.BOTH, expand=True)
switch_prefix_frame = tk.Frame(
master=panel
)
switch_prefix_frame.grid(row=6, column=1, padx=5, pady=5, sticky='nsew')
switch_prefix_button = tk.Button(
master=switch_prefix_frame,
text='+/-',
width=20,
height=4,
command=switch_prefix
)
switch_prefix_button.pack(padx=5, pady=5, fill=tk.BOTH, expand=True)
# OPERATORS
equals_frame = tk.Frame(
master=panel
)
equals_frame.grid(row=6, column=4, padx=5, pady=5, sticky='nsew')
equals_button = tk.Button(
master=equals_frame,
text='=',
width=20,
height=4,
command=equals
)
equals_button.pack(padx=5, pady=5, fill=tk.BOTH, expand=True)
addition_frame = tk.Frame(
master=panel
)
addition_frame.grid(row=5, column=4, padx=5, pady=5, sticky='nsew')
addition_button = tk.Button(
master=addition_frame,
text='+',
width=20,
height=4,
command=addition
)
addition_button.pack(padx=5, pady=5, fill=tk.BOTH, expand=True)
subtraction_frame = tk.Frame(
master=panel
)
subtraction_frame.grid(row=4, column=4, padx=5, pady=5, sticky='nsew')
subtraction_button = tk.Button(
master=subtraction_frame,
text='-',
width=20,
height=4,
command=subtraction
)
subtraction_button.pack(padx=5, pady=5, fill=tk.BOTH, expand=True)
multiplication_frame = tk.Frame(
master=panel
)
multiplication_frame.grid(row=3, column=4, padx=5, pady=5, sticky='nsew')
multiplication_button = tk.Button(
master=multiplication_frame,
text='×',
width=20,
height=4,
command=multiplication
)
multiplication_button.pack(padx=5, pady=5, fill=tk.BOTH, expand=True)
division_frame = tk.Frame(
master=panel
)
division_frame.grid(row=2, column=4, padx=5, pady=5, sticky='nsew')
division_button = tk.Button(
master=division_frame,
text='÷',
width=20,
height=4,
command=division
)
division_button.pack(padx=5, pady=5, fill=tk.BOTH, expand=True)
root_frame = tk.Frame(
master=panel
)
root_frame.grid(row=2, column=1, padx=5, pady=5, sticky='nsew')
root_button = tk.Button(
master=root_frame,
text='√',
width=20,
height=4,
command=root
)
root_button.pack(padx=5, pady=5, fill=tk.BOTH, expand=True)
power_frame = tk.Frame(
master=panel
)
power_frame.grid(row=2, column=2, padx=5, pady=5, sticky='nsew')
power_button = tk.Button(
master=power_frame,
text='^',
width=20,
height=4,
command=power
)
power_button.pack(padx=5, pady=5, fill=tk.BOTH, expand=True)
factorial_frame = tk.Frame(
master=panel
)
factorial_frame.grid(row=2, column=3, padx=5, pady=5, sticky='nsew')
factorial_button = tk.Button(
master=factorial_frame,
text='!n',
width=20,
height=4,
command=factorial
)
factorial_button.pack(padx=5, pady=5, fill=tk.BOTH, expand=True)
pi_frame = tk.Frame(
master=panel
)
pi_frame.grid(row=1, column=1, padx=5, pady=5, sticky='nsew')
pi_button = tk.Button(
master=pi_frame,
text='π',
width=20,
height=4,
command=pi
)
pi_button.pack(padx=5, pady=5, fill=tk.BOTH, expand=True)
output.pack(fill=tk.BOTH, expand=True)
panel.pack(fill=tk.BOTH, expand=True)
window.mainloop() | tkinter calculator v1.py | import tkinter as tk
window = tk.Tk()
window.title('Claculator')
numbers = ['0']
action = ['null']
### FUNCTIONS ###
# OUTPUT
def output_update(text):
value = lbl_output['text']
if len(numbers)==1:
if numbers[0]=='0':
value = value[:-1]
numbers.clear()
elif numbers[0]=='clear':
value = ''
numbers.clear()
numbers.append(text)
lbl_output['text'] = value+str(text)
def output_clear():
numbers.clear()
numbers.append('0')
action[0] = 'null'
lbl_output['text'] = '0'
lbl_holder['text'] = ''
def switch_prefix():
value = lbl_output['text']
if numbers[0] == 'clear':
value = '0'
numbers.clear()
numbers.append('0')
try:
if value[0] == '-': value = value[1:]
else: value = '-'+value
except: value = '-'
lbl_output['text'] = value
def dot():
value = lbl_output['text']+'.'
numbers.append('.')
c=0
for x in value:
if x == '.': c+=1
if c>1: value = value[:-1]
else:
try:
null=str(value[len(value)-2])
try: null=float(value[len(value)-2])
except: value = value[:-1]+'0.'
except: value = '0.'
lbl_output['text'] = value
def backspace():
value = lbl_output['text']
value = value[:-1]
if numbers[0] == 'clear':
numbers.clear()
numbers.append('0')
value = '0'
else: numbers.pop()
if value=='' or value=='-':value+='0';numbers.append('0')
lbl_output['text'] = value
def pi():
numbers.clear()
for i in "3.1415926535897932384626433832795":
numbers.append(i)
lbl_output['text'] = "3.1415926535897932384626433832795"
# OPERATORS
def equals():
value = lbl_output['text']
holder = lbl_holder['text']
if holder[0] == '√':
holder = holder[1:]
holder = holder.split()
try: number = holder[0]
except: return
if action[0] == 'add':
try: value = float(number) + float(value)
except: return
if action[0] == 'sub':
try: value = float(number) - float(value)
except: return
if action[0] == 'mul':
try: value = float(number) * float(value)
except: return
if action[0] == 'div':
try: value = float(number) / float(value)
except: return
if action[0] == 'root':
try: value = float(number)**(1/float(value))
except: return
if action[0] == 'pow':
try: value = float(number)**(float(value))
except: return
action[0] = 'null'
holder = ''
numbers.clear()
numbers.append('clear')
value = str(value)
if value[len((value))-2]+value[len(value)-1] == '.0':
value = value[:-2]
lbl_output['text'] = str(value)
lbl_holder['text'] = str(holder)
def addition():
value = lbl_output['text']
holder = lbl_holder['text']
if len(holder) > 0:
equals()
value = lbl_output['text']
action[0] = 'add'
holder = f"{value} +"
value = '0'
numbers.clear()
numbers.append('0')
lbl_output['text'] = value
lbl_holder['text'] = holder
def subtraction():
value = lbl_output['text']
holder = lbl_holder['text']
if len(holder) > 0:
equals()
value = lbl_output['text']
action[0] = 'sub'
holder = f"{value} -"
value = '0'
numbers.clear()
numbers.append('0')
lbl_output['text'] = value
lbl_holder['text'] = holder
def multiplication():
value = lbl_output['text']
holder = lbl_holder['text']
if len(holder) > 0:
equals()
value = lbl_output['text']
action[0] = 'mul'
holder = f"{value} ×"
value = '0'
numbers.clear()
numbers.append('0')
lbl_output['text'] = value
lbl_holder['text'] = holder
def division():
value = lbl_output['text']
holder = lbl_holder['text']
if len(holder) > 0:
equals()
value = lbl_output['text']
action[0] = 'div'
holder = f"{value} ÷"
value = '0'
numbers.clear()
numbers.append('0')
lbl_output['text'] = value
lbl_holder['text'] = holder
def root():
value = lbl_output['text']
holder = lbl_holder['text']
if len(holder) > 0:
equals()
value = lbl_output['text']
action[0] = 'root'
holder = f"√{value}"
value = '0'
numbers.clear()
numbers.append('0')
lbl_output['text'] = value
lbl_holder['text'] = holder
def power():
value = lbl_output['text']
holder = lbl_holder['text']
if len(holder) > 0:
equals()
value = lbl_output['text']
action[0] = 'pow'
holder = f"{value} ^"
value = '0'
numbers.clear()
numbers.append('0')
lbl_output['text'] = value
lbl_holder['text'] = holder
def factorial():
value = lbl_output['text']
holder = lbl_holder['text']
if len(holder) > 0:
equals()
value = lbl_output['text']
n = 1
for i in range(int(value)):
i+=1
n = n*i
n = n+(float(value)-int(value))
value = str(n)
holder = ''
numbers.clear()
numbers.append('clear')
if value[len((value))-2]+value[len(value)-1] == '.0':
value = value[:-2]
lbl_output['text'] = str(value)
lbl_holder['text'] = str(holder)
### WINDOW ###
output = tk.Frame(
master=window
)
panel = tk.Frame(
master=window
)
lbl_holder = tk.Label(
master=output,
text='',
width=20,
height=4,
bg='gray'
)
lbl_holder.pack(padx=5, pady=5, fill=tk.BOTH, expand=True)
lbl_output = tk.Label(
master=output,
text='0',
width=20,
height=4,
bg='silver'
)
lbl_output.pack(padx=5, pady=5, fill=tk.BOTH, expand=True)
panel.columnconfigure([1, 2, 3, 4], weight=1, minsize=50)
panel.rowconfigure([1, 2, 3, 4, 5, 6], weight=1, minsize=50)
### BUTTONS ###
# NUMBERS
class panel_button:
def __init__(self, text, row, column):
self.text = text
self.frame = tk.Frame(
master=panel
)
self.frame.grid(row=row, column=column, padx=5, pady=5, sticky='nsew')
self.button = tk.Button(
master=self.frame,
text=text,
width=20,
height=4,
command=self.method
)
self.button.pack(padx=5, pady=5, fill=tk.BOTH, expand=True)
def method(self):
output_update(self.text)
button1 = panel_button('1', 3, 1)
button2 = panel_button('2', 3, 2)
button3 = panel_button('3', 3, 3)
button4 = panel_button('4', 4, 1)
button5 = panel_button('5', 4, 2)
button6 = panel_button('6', 4, 3)
button7 = panel_button('7', 5, 1)
button8 = panel_button('8', 5, 2)
button9 = panel_button('9', 5, 3)
button0 = panel_button('0', 6, 2)
dot_frame = tk.Frame(
master=panel
)
dot_frame.grid(row=6, column=3, padx=5, pady=5, sticky='nsew')
dot_button = tk.Button(
master=dot_frame,
text='.',
width=20,
height=4,
command=dot
)
dot_button.pack(padx=5, pady=5, fill=tk.BOTH, expand=True)
# UTILITY
clear_frame = tk.Frame(
master=panel
)
clear_frame.grid(row=1, column=3, padx=5, pady=5, sticky='nsew')
clear_button = tk.Button(
master=clear_frame,
text='Clear',
width=20,
height=4,
command=output_clear
)
clear_button.pack(padx=5, pady=5, fill=tk.BOTH, expand=True)
backspace_frame = tk.Frame(
master=panel
)
backspace_frame.grid(row=1, column=4, padx=5, pady=5, sticky='nsew')
backspace_button = tk.Button(
master=backspace_frame,
text='Back',
width=20,
height=4,
command=backspace
)
backspace_button.pack(padx=5, pady=5, fill=tk.BOTH, expand=True)
switch_prefix_frame = tk.Frame(
master=panel
)
switch_prefix_frame.grid(row=6, column=1, padx=5, pady=5, sticky='nsew')
switch_prefix_button = tk.Button(
master=switch_prefix_frame,
text='+/-',
width=20,
height=4,
command=switch_prefix
)
switch_prefix_button.pack(padx=5, pady=5, fill=tk.BOTH, expand=True)
# OPERATORS
equals_frame = tk.Frame(
master=panel
)
equals_frame.grid(row=6, column=4, padx=5, pady=5, sticky='nsew')
equals_button = tk.Button(
master=equals_frame,
text='=',
width=20,
height=4,
command=equals
)
equals_button.pack(padx=5, pady=5, fill=tk.BOTH, expand=True)
addition_frame = tk.Frame(
master=panel
)
addition_frame.grid(row=5, column=4, padx=5, pady=5, sticky='nsew')
addition_button = tk.Button(
master=addition_frame,
text='+',
width=20,
height=4,
command=addition
)
addition_button.pack(padx=5, pady=5, fill=tk.BOTH, expand=True)
subtraction_frame = tk.Frame(
master=panel
)
subtraction_frame.grid(row=4, column=4, padx=5, pady=5, sticky='nsew')
subtraction_button = tk.Button(
master=subtraction_frame,
text='-',
width=20,
height=4,
command=subtraction
)
subtraction_button.pack(padx=5, pady=5, fill=tk.BOTH, expand=True)
multiplication_frame = tk.Frame(
master=panel
)
multiplication_frame.grid(row=3, column=4, padx=5, pady=5, sticky='nsew')
multiplication_button = tk.Button(
master=multiplication_frame,
text='×',
width=20,
height=4,
command=multiplication
)
multiplication_button.pack(padx=5, pady=5, fill=tk.BOTH, expand=True)
division_frame = tk.Frame(
master=panel
)
division_frame.grid(row=2, column=4, padx=5, pady=5, sticky='nsew')
division_button = tk.Button(
master=division_frame,
text='÷',
width=20,
height=4,
command=division
)
division_button.pack(padx=5, pady=5, fill=tk.BOTH, expand=True)
root_frame = tk.Frame(
master=panel
)
root_frame.grid(row=2, column=1, padx=5, pady=5, sticky='nsew')
root_button = tk.Button(
master=root_frame,
text='√',
width=20,
height=4,
command=root
)
root_button.pack(padx=5, pady=5, fill=tk.BOTH, expand=True)
power_frame = tk.Frame(
master=panel
)
power_frame.grid(row=2, column=2, padx=5, pady=5, sticky='nsew')
power_button = tk.Button(
master=power_frame,
text='^',
width=20,
height=4,
command=power
)
power_button.pack(padx=5, pady=5, fill=tk.BOTH, expand=True)
factorial_frame = tk.Frame(
master=panel
)
factorial_frame.grid(row=2, column=3, padx=5, pady=5, sticky='nsew')
factorial_button = tk.Button(
master=factorial_frame,
text='!n',
width=20,
height=4,
command=factorial
)
factorial_button.pack(padx=5, pady=5, fill=tk.BOTH, expand=True)
pi_frame = tk.Frame(
master=panel
)
pi_frame.grid(row=1, column=1, padx=5, pady=5, sticky='nsew')
pi_button = tk.Button(
master=pi_frame,
text='π',
width=20,
height=4,
command=pi
)
pi_button.pack(padx=5, pady=5, fill=tk.BOTH, expand=True)
output.pack(fill=tk.BOTH, expand=True)
panel.pack(fill=tk.BOTH, expand=True)
window.mainloop() | 0.081095 | 0.147893 |
from numpy import *
from pytrain.lib import convert
from pytrain.lib import ptmath
import operator
class HierarchicalClustering:
def __init__(self, mat_data, K, dist_func):
self.mat_data = convert.list2npfloat(mat_data)
self.dist_func = ptmath.distfunc(dist_func)
self.K = K
self.col_len = len(self.mat_data[0])
self.row_len = len(self.mat_data)
self.unique_idx = 0
self.group_list = []
self.dist_list = []
self.cluster_points = []
def fit(self):
return self.cluster()
unique_idx = 0
group_map = {}
class Group:
def __init__(self, vt, didx):
self.unique_idx = HierarchicalClustering.unique_idx
self.vector = vt
self.data_idx = didx
HierarchicalClustering.group_map[self.unique_idx] = self
HierarchicalClustering.unique_idx += 1
def log(self):
print "[ Group", self.unique_idx, "] vt : ", self.vector, ", data_idx : ", str(self.data_idx)
class Dist:
def __init__(self, s_grp, t_grp, dist_func):
self.src_idx = s_grp.unique_idx
self.trg_idx = t_grp.unique_idx
self.distance = dist_func(s_grp.vector, t_grp.vector)
def log(self):
print "[ Dist ]", self.src_idx, "-", self.trg_idx, " = ", self.distance
def remove_from_dist_list(self, grp_idx):
self.dist_list = [dist_obj for dist_obj in self.dist_list \
if (dist_obj.src_idx != grp_idx and dist_obj.trg_idx != grp_idx)]
def remove_from_group_list(self, grp_idx):
self.group_list = [grp_obj for grp_obj in self.group_list \
if (grp_obj.unique_idx != grp_idx) ]
def insert_new_group(self, grp):
for oth in self.group_list:
new_dis = self.Dist(grp, oth, self.dist_func)
for idx, old_dis in enumerate(self.dist_list):
if new_dis.distance >= old_dis.distance:
self.dist_list.insert(idx, new_dis)
break
self.group_list.append(grp)
def merge_group(self, grp_1_idx, grp_2_idx):
grp_1 = self.group_map[grp_1_idx]
grp_2 = self.group_map[grp_2_idx]
mgd_vt = ( (grp_1.vector * len(grp_1.data_idx)) \
+ (grp_2.vector * len(grp_2.data_idx)) ) \
/ ( len(grp_1.data_idx) + len(grp_2.data_idx))
mgd_didx = []
mgd_didx.extend(grp_1.data_idx)
mgd_didx.extend(grp_2.data_idx)
mgd_grp = self.Group(mgd_vt, mgd_didx)
return mgd_grp
def cluster(self):
# make initial groups
for idx, vt in enumerate(self.mat_data):
self.group_list.append(self.Group(vt, [idx]))
# make dist_list
for i, src_g in enumerate(self.group_list):
for j in range(i+1,len(self.group_list)):
trg_g = self.group_list[j]
self.dist_list.append(self.Dist(src_g, trg_g, self.dist_func))
# merge group until length of group list less than K
self.dist_list.sort(key=lambda x : x.distance,reverse = True)
while len(self.group_list) > self.K :
selected_dist = self.dist_list.pop()
new_group = self.merge_group(selected_dist.src_idx, selected_dist.trg_idx)
self.remove_from_dist_list(selected_dist.src_idx)
self.remove_from_dist_list(selected_dist.trg_idx)
self.remove_from_group_list(selected_dist.src_idx)
self.remove_from_group_list(selected_dist.trg_idx)
self.insert_new_group(new_group)
# loop group list & fill label data
self.label_data = [-1 for x in range(len(self.mat_data))]
for grp_idx, grp in enumerate(self.group_list):
self.cluster_points.append(grp.vector)
for idx in grp.data_idx:
self.label_data[idx] = grp_idx
return self.label_data
# assign input array to cluster
def predict(self, input_array):
input_array = convert.list2npfloat(input_array)
return self.assign_row(self.cluster_points, input_array)
def assign_row(self, cluster_points, row):
min_idx = -1
min_dist = None
for i, cp in enumerate(cluster_points):
cp_dist = self.dist_func(row, cp)
if min_dist == None or min_dist > cp_dist:
min_dist = cp_dist
min_idx = i
return min_idx | pytrain/HierarchicalClustering/HierarchicalClustering.py |
from numpy import *
from pytrain.lib import convert
from pytrain.lib import ptmath
import operator
class HierarchicalClustering:
def __init__(self, mat_data, K, dist_func):
self.mat_data = convert.list2npfloat(mat_data)
self.dist_func = ptmath.distfunc(dist_func)
self.K = K
self.col_len = len(self.mat_data[0])
self.row_len = len(self.mat_data)
self.unique_idx = 0
self.group_list = []
self.dist_list = []
self.cluster_points = []
def fit(self):
return self.cluster()
unique_idx = 0
group_map = {}
class Group:
def __init__(self, vt, didx):
self.unique_idx = HierarchicalClustering.unique_idx
self.vector = vt
self.data_idx = didx
HierarchicalClustering.group_map[self.unique_idx] = self
HierarchicalClustering.unique_idx += 1
def log(self):
print "[ Group", self.unique_idx, "] vt : ", self.vector, ", data_idx : ", str(self.data_idx)
class Dist:
def __init__(self, s_grp, t_grp, dist_func):
self.src_idx = s_grp.unique_idx
self.trg_idx = t_grp.unique_idx
self.distance = dist_func(s_grp.vector, t_grp.vector)
def log(self):
print "[ Dist ]", self.src_idx, "-", self.trg_idx, " = ", self.distance
def remove_from_dist_list(self, grp_idx):
self.dist_list = [dist_obj for dist_obj in self.dist_list \
if (dist_obj.src_idx != grp_idx and dist_obj.trg_idx != grp_idx)]
def remove_from_group_list(self, grp_idx):
self.group_list = [grp_obj for grp_obj in self.group_list \
if (grp_obj.unique_idx != grp_idx) ]
def insert_new_group(self, grp):
for oth in self.group_list:
new_dis = self.Dist(grp, oth, self.dist_func)
for idx, old_dis in enumerate(self.dist_list):
if new_dis.distance >= old_dis.distance:
self.dist_list.insert(idx, new_dis)
break
self.group_list.append(grp)
def merge_group(self, grp_1_idx, grp_2_idx):
grp_1 = self.group_map[grp_1_idx]
grp_2 = self.group_map[grp_2_idx]
mgd_vt = ( (grp_1.vector * len(grp_1.data_idx)) \
+ (grp_2.vector * len(grp_2.data_idx)) ) \
/ ( len(grp_1.data_idx) + len(grp_2.data_idx))
mgd_didx = []
mgd_didx.extend(grp_1.data_idx)
mgd_didx.extend(grp_2.data_idx)
mgd_grp = self.Group(mgd_vt, mgd_didx)
return mgd_grp
def cluster(self):
# make initial groups
for idx, vt in enumerate(self.mat_data):
self.group_list.append(self.Group(vt, [idx]))
# make dist_list
for i, src_g in enumerate(self.group_list):
for j in range(i+1,len(self.group_list)):
trg_g = self.group_list[j]
self.dist_list.append(self.Dist(src_g, trg_g, self.dist_func))
# merge group until length of group list less than K
self.dist_list.sort(key=lambda x : x.distance,reverse = True)
while len(self.group_list) > self.K :
selected_dist = self.dist_list.pop()
new_group = self.merge_group(selected_dist.src_idx, selected_dist.trg_idx)
self.remove_from_dist_list(selected_dist.src_idx)
self.remove_from_dist_list(selected_dist.trg_idx)
self.remove_from_group_list(selected_dist.src_idx)
self.remove_from_group_list(selected_dist.trg_idx)
self.insert_new_group(new_group)
# loop group list & fill label data
self.label_data = [-1 for x in range(len(self.mat_data))]
for grp_idx, grp in enumerate(self.group_list):
self.cluster_points.append(grp.vector)
for idx in grp.data_idx:
self.label_data[idx] = grp_idx
return self.label_data
# assign input array to cluster
def predict(self, input_array):
input_array = convert.list2npfloat(input_array)
return self.assign_row(self.cluster_points, input_array)
def assign_row(self, cluster_points, row):
min_idx = -1
min_dist = None
for i, cp in enumerate(cluster_points):
cp_dist = self.dist_func(row, cp)
if min_dist == None or min_dist > cp_dist:
min_dist = cp_dist
min_idx = i
return min_idx | 0.458591 | 0.226217 |
from django.contrib.auth import get_user_model
from django.db import models, transaction
from django.db.models.signals import pre_delete, pre_save
from django.dispatch import receiver
from django.utils.translation import gettext_lazy as _
from rules.contrib.models import RulesModel
from booking import rules
from booking.models import PartOfDay, Event
from users.models import Group
from users.models.user import get_sentinel_user
class Game(RulesModel):
creator = models.ForeignKey(
get_user_model(),
# The receiver "_user_delete" replaces the creator with a sentinel user in all
# related games before deleting a user to ensure that the game is still
# associated to the group.
on_delete=models.DO_NOTHING,
verbose_name=_("creator"),
related_name="games",
editable=False,
)
name = models.CharField(verbose_name=_("Game name"), max_length=250)
day = models.DateField(
verbose_name=_("day"), help_text=_("On what day are the materials needed?")
)
group = models.ForeignKey(
Group,
on_delete=models.CASCADE,
verbose_name=_("group"),
)
event = models.ForeignKey(Event, on_delete=models.CASCADE, verbose_name=_("event"))
part_of_day = models.CharField(
verbose_name=_("daypart"),
max_length=2,
choices=PartOfDay.PART_OF_DAY_CHOICES,
default=PartOfDay.MORNING,
help_text=_("At what daypart are the materials needed?"),
)
location = models.CharField(
verbose_name=_("location"),
max_length=250,
null=True,
blank=True,
help_text=_(
"Where do you need the materials for this game to be delivered? This "
"defaults to the location of the part of day."
),
)
order = models.PositiveIntegerField(
verbose_name=_("order"),
help_text=_("Defines an ordering for the games within a day(part)"),
editable=False,
default=0,
)
class Meta:
verbose_name = _("game")
verbose_name_plural = _("games")
ordering = ["day", "part_of_day", "order"]
default_permissions = [] # Removed default permissions as we don't check them
rules_permissions = {
"change": rules.change_game,
"book_on": rules.book_on_game,
"add_group": rules.add_game_to_group,
}
def __str__(self):
return self.name
@property
def _siblings(self):
return Game.objects.filter(
day=self.day,
part_of_day=self.part_of_day,
group=self.group,
event=self.event,
)
@property
def previous(self):
"""
Returns the Game that goes before this game in order.
:return Game: previous Game
"""
return self._siblings.filter(order__lt=self.order).last()
@property
def next(self):
"""
Returns the Game that goes after this game in order.
:return Game: next Game
"""
return self._siblings.filter(order__gt=self.order).first()
def swap(self, replacement):
"""
Swaps the order of the supplied Game and this Game.
:param Game replacement: Game for this Game to swap order with
:return: None
"""
with transaction.atomic():
self_order, replacement_order = self.order, replacement.order
self.order = replacement_order
replacement.order = self_order
self.save()
replacement.save()
swap.alters_data = True
def up(self):
"""
Move Game up one position.
:return: None
"""
previous = self.previous
if previous:
self.swap(previous)
up.alters_data = True
def down(self):
"""
Move Game down one position.
:return: None
"""
_next = self.next
if _next:
self.swap(_next)
down.alters_data = True
@property
def form(self):
from booking.forms import GameForm
return GameForm(instance=self, auto_id="id_game_%s_" + str(self.id))
@property
def booking_form(self):
from booking.forms import BookingForm
return BookingForm(
initial={"game": self}, auto_id="id_game_booking_%s_" + str(self.id)
)
@receiver(pre_save, sender=Game)
def _game_manage_order(sender, instance, **kwargs):
"""
If a game is added newly or the part of day of the game has been changed,
we change the order of the game such that it is the last sibling.
:param Game sender:
:param Game instance:
:param kwargs:
:return: None
"""
def append_order(game):
"""
Returns the order for appending a game to a day part.
:param Game game: instance
:return int: the order for appending
"""
last_sibling = game._siblings.last()
if last_sibling:
return last_sibling.order + 1
else:
return 1
try:
current_game = sender.objects.get(pk=instance.pk)
except sender.DoesNotExist:
# Game is new
instance.order = append_order(instance)
else:
if not current_game.part_of_day == instance.part_of_day:
# The part of day of the game has been changed
instance.order = append_order(instance)
else:
pass # Keep game order as-is
@receiver(pre_delete, sender=get_user_model(), dispatch_uid="user_delete_signal_game")
def _user_delete(sender, instance, using, **kwargs):
"""
Changes games of a user that gets deleted such that the creator becomes a
sentinel user associated to the same group.
"""
Game.objects.filter(creator=instance).update(
creator=get_sentinel_user(instance.group)
) | booking/models/game.py | from django.contrib.auth import get_user_model
from django.db import models, transaction
from django.db.models.signals import pre_delete, pre_save
from django.dispatch import receiver
from django.utils.translation import gettext_lazy as _
from rules.contrib.models import RulesModel
from booking import rules
from booking.models import PartOfDay, Event
from users.models import Group
from users.models.user import get_sentinel_user
class Game(RulesModel):
creator = models.ForeignKey(
get_user_model(),
# The receiver "_user_delete" replaces the creator with a sentinel user in all
# related games before deleting a user to ensure that the game is still
# associated to the group.
on_delete=models.DO_NOTHING,
verbose_name=_("creator"),
related_name="games",
editable=False,
)
name = models.CharField(verbose_name=_("Game name"), max_length=250)
day = models.DateField(
verbose_name=_("day"), help_text=_("On what day are the materials needed?")
)
group = models.ForeignKey(
Group,
on_delete=models.CASCADE,
verbose_name=_("group"),
)
event = models.ForeignKey(Event, on_delete=models.CASCADE, verbose_name=_("event"))
part_of_day = models.CharField(
verbose_name=_("daypart"),
max_length=2,
choices=PartOfDay.PART_OF_DAY_CHOICES,
default=PartOfDay.MORNING,
help_text=_("At what daypart are the materials needed?"),
)
location = models.CharField(
verbose_name=_("location"),
max_length=250,
null=True,
blank=True,
help_text=_(
"Where do you need the materials for this game to be delivered? This "
"defaults to the location of the part of day."
),
)
order = models.PositiveIntegerField(
verbose_name=_("order"),
help_text=_("Defines an ordering for the games within a day(part)"),
editable=False,
default=0,
)
class Meta:
verbose_name = _("game")
verbose_name_plural = _("games")
ordering = ["day", "part_of_day", "order"]
default_permissions = [] # Removed default permissions as we don't check them
rules_permissions = {
"change": rules.change_game,
"book_on": rules.book_on_game,
"add_group": rules.add_game_to_group,
}
def __str__(self):
return self.name
@property
def _siblings(self):
return Game.objects.filter(
day=self.day,
part_of_day=self.part_of_day,
group=self.group,
event=self.event,
)
@property
def previous(self):
"""
Returns the Game that goes before this game in order.
:return Game: previous Game
"""
return self._siblings.filter(order__lt=self.order).last()
@property
def next(self):
"""
Returns the Game that goes after this game in order.
:return Game: next Game
"""
return self._siblings.filter(order__gt=self.order).first()
def swap(self, replacement):
"""
Swaps the order of the supplied Game and this Game.
:param Game replacement: Game for this Game to swap order with
:return: None
"""
with transaction.atomic():
self_order, replacement_order = self.order, replacement.order
self.order = replacement_order
replacement.order = self_order
self.save()
replacement.save()
swap.alters_data = True
def up(self):
"""
Move Game up one position.
:return: None
"""
previous = self.previous
if previous:
self.swap(previous)
up.alters_data = True
def down(self):
"""
Move Game down one position.
:return: None
"""
_next = self.next
if _next:
self.swap(_next)
down.alters_data = True
@property
def form(self):
from booking.forms import GameForm
return GameForm(instance=self, auto_id="id_game_%s_" + str(self.id))
@property
def booking_form(self):
from booking.forms import BookingForm
return BookingForm(
initial={"game": self}, auto_id="id_game_booking_%s_" + str(self.id)
)
@receiver(pre_save, sender=Game)
def _game_manage_order(sender, instance, **kwargs):
"""
If a game is added newly or the part of day of the game has been changed,
we change the order of the game such that it is the last sibling.
:param Game sender:
:param Game instance:
:param kwargs:
:return: None
"""
def append_order(game):
"""
Returns the order for appending a game to a day part.
:param Game game: instance
:return int: the order for appending
"""
last_sibling = game._siblings.last()
if last_sibling:
return last_sibling.order + 1
else:
return 1
try:
current_game = sender.objects.get(pk=instance.pk)
except sender.DoesNotExist:
# Game is new
instance.order = append_order(instance)
else:
if not current_game.part_of_day == instance.part_of_day:
# The part of day of the game has been changed
instance.order = append_order(instance)
else:
pass # Keep game order as-is
@receiver(pre_delete, sender=get_user_model(), dispatch_uid="user_delete_signal_game")
def _user_delete(sender, instance, using, **kwargs):
"""
Changes games of a user that gets deleted such that the creator becomes a
sentinel user associated to the same group.
"""
Game.objects.filter(creator=instance).update(
creator=get_sentinel_user(instance.group)
) | 0.688049 | 0.179279 |
import unittest
from ..pygraph import (UndirectedGraph, find_biconnected_components, find_articulation_vertices, merge_graphs,
build_triangle_graph, build_square_graph, build_diamond_graph,
build_tetrahedral_graph, build_5_cycle_graph, build_gem_graph)
from . import utility_functions
class BiconnectedComponentsTest(unittest.TestCase):
def test_empty_graph(self):
"""Does the ''find_biconnected_components'' function return an empty set of edges for an empty graph?"""
graph = UndirectedGraph()
expected = []
calculated = find_biconnected_components(graph)
self.assertEqual(expected, calculated)
def test_single_node_graph(self):
"""Does the ''find_biconnected_components'' function return an empty set of edges for a graph with 1 node?"""
graph = utility_functions.build_single_node_graph()
expected = []
calculated = find_biconnected_components(graph)
self.assertEqual(expected, calculated)
def test_2_node_graph(self):
"""Does the ''find_biconnected_components'' function return a single edge list for a 2-node connected graph?"""
graph = utility_functions.build_2_node_graph()
expected = [[1]]
calculated = find_biconnected_components(graph)
self.assertEqual(expected, calculated)
def test_triangle_graph(self):
"""Does the ''find_biconnected_components'' function return a single edge list for a triangle graph?"""
graph = build_triangle_graph()
expected = [[1, 2, 3]]
calculated = find_biconnected_components(graph)
calculated[0].sort()
self.assertEqual(expected, calculated)
def test_square_graph(self):
"""Does the ''find_biconnected_components'' function return a single edge list for a square graph?"""
graph = build_square_graph()
expected = [[1, 2, 3, 4]]
calculated = find_biconnected_components(graph)
calculated[0].sort()
self.assertEqual(expected, calculated)
def test_diamond_graph(self):
"""Does the ''find_biconnected_components'' function return a single edge list for a diamond graph?"""
graph = build_diamond_graph()
expected = [[1, 2, 3, 4, 5]]
calculated = find_biconnected_components(graph)
calculated[0].sort()
self.assertEqual(expected, calculated)
def test_tetrahedral_graph(self):
"""Does the ''find_biconnected_components'' function return a single edge list for a tetrahedral graph?"""
graph = build_tetrahedral_graph()
expected = [[1, 2, 3, 4, 5, 6]]
calculated = find_biconnected_components(graph)
calculated[0].sort()
self.assertEqual(expected, calculated)
def test_5_cycle_graph(self):
"""Does the ''find_biconnected_components'' function return a single edge list for a 5-cycle graph?"""
graph = build_5_cycle_graph()
expected = [[1, 2, 3, 4, 5]]
calculated = find_biconnected_components(graph)
calculated[0].sort()
self.assertEqual(expected, calculated)
def test_gem_graph(self):
"""Does the ''find_biconnected_components'' function return a single edge list for a gem graph?"""
graph = build_gem_graph()
expected = [[1, 2, 3, 4, 5, 6, 7]]
calculated = find_biconnected_components(graph)
calculated[0].sort()
self.assertEqual(expected, calculated)
def test_fully_biconnected_graph(self):
"""Does the ''find_biconnected_components'' function correctly return
the entire graph for a fully biconnected graph?"""
graph = utility_functions.build_fully_biconnected_test_graph()
expected_edges = list(range(1, 20)) # There are 19 edges in the test graph, so their IDs go from 1-19
calculated_edges = find_biconnected_components(graph)
# Verify that there is only a single component in the calculated edge list
self.assertEqual(1, len(calculated_edges))
# Verify all edges exist within the calculated edge list
component = calculated_edges[0]
for edge_id in expected_edges:
self.assertIn(edge_id, component)
# Verify that there are precisely the number of edges expected in the calculated edge list
self.assertEqual(19, len(component))
def test_biconnected_graph(self):
"""Does the ''find_biconnected_components'' function correctly identify the
components in a graph with multiple biconnected components?"""
graph = utility_functions.build_biconnected_test_graph()
component_a = [1, 2, 3]
component_b = [4, 5, 6, 7, 8]
component_c = [9, 10, 11, 12, 13, 14, 15, 16]
known_components = [component_a, component_b, component_c]
calculated_components = find_biconnected_components(graph)
# Verify that there are the expected number of components
self.assertEqual(3, len(calculated_components))
# --Verify each known component exists and has the correct number of edges
found_components_count = 0
for kc in known_components:
found_known_component = False
for c in calculated_components:
# --Determine if the current component is a superset of known component
# --(it might have more edges than the known component)
superset_match = True
for e in kc:
if e not in c:
# --This is not the correct component, go to the next one
superset_match = False
break
if superset_match:
# --Determine if the current component has precisely the same number of
# --edges in it as the known component
found_known_component = (len(kc) == len(c))
if found_known_component:
found_components_count += 1
break
if not found_known_component:
# --We know the current component was not found in the connected components
# --list, fail with an error message
msg = 'Component {} not found in {}'.format(kc, calculated_components)
self.fail(msg)
# --This verifies that we found all three known components in the calculated components
# --Prior tests should stop things before we get this far if there are errors,
# --but it's a simple sanity check test
self.assertEqual(3, found_components_count)
def test_disconnected_graph(self):
"""Does the ''find_biconnected_components'' function return components for each connected component?"""
graph = utility_functions.build_biconnected_test_graph()
addition_graph = build_triangle_graph()
node_map, edge_map = merge_graphs(graph, addition_graph)
calculated_components = find_biconnected_components(graph)
# Verify that there are the expected number of components
self.assertEqual(4, len(calculated_components))
class ArticulationVerticesTest(unittest.TestCase):
def test_articulation_vertices_empty_graph(self):
"""Does the ''find_articulation_vertices'' function return an empty list when run on an empty graph?"""
graph = UndirectedGraph()
expected = []
calculated = find_articulation_vertices(graph)
self.assertEqual(expected, calculated)
def test_articulation_vertices_fully_biconnected_graph(self):
"""Does the ''find_articulation_vertices'' function return an empty list
when run on a fully biconnected graph?"""
graph = utility_functions.build_fully_biconnected_test_graph()
expected = []
calculated = find_articulation_vertices(graph)
self.assertEqual(expected, calculated)
def test_articulation_vertices_single_cut_vertex(self):
"""Does the ''find_articulation_vertices'' function return a single
articulation vertex for a graph with a single cut vertex?"""
graph = utility_functions.build_3_node_line_graph()
expected = [2]
calculated = find_articulation_vertices(graph)
self.assertEqual(expected, calculated)
def test_articulation_vertices_single_cut_vertex_is_root(self):
"""Does the ''find_articulation_vertices'' function return a single
articulation vertex for a graph where the root node is the single cut vertex?"""
graph = utility_functions.build_3_node_line_root_articulation_graph()
expected = [1]
calculated = find_articulation_vertices(graph)
self.assertEqual(expected, calculated)
def test_articulation_vertices_dual_cut_vertices(self):
"""Does the ''find_articulation_vertices'' function return a pair of
articulation vertices for a graph where there are two?"""
graph = utility_functions.build_simple_test_graph()
expected = [1, 2]
calculated = find_articulation_vertices(graph)
calculated.sort()
self.assertEqual(expected, calculated)
def test_articulation_vertices_biconnected_graph(self):
"""Does the ''find_articulation_vertices'' function return the correct list
of articulation vertices for a graph with multiple biconnected components?"""
graph = utility_functions.build_biconnected_test_graph()
expected = [2, 5, 7, 8]
calculated = find_articulation_vertices(graph)
calculated.sort()
self.assertEqual(expected, calculated) | tests/test_biconnected_components.py |
import unittest
from ..pygraph import (UndirectedGraph, find_biconnected_components, find_articulation_vertices, merge_graphs,
build_triangle_graph, build_square_graph, build_diamond_graph,
build_tetrahedral_graph, build_5_cycle_graph, build_gem_graph)
from . import utility_functions
class BiconnectedComponentsTest(unittest.TestCase):
def test_empty_graph(self):
"""Does the ''find_biconnected_components'' function return an empty set of edges for an empty graph?"""
graph = UndirectedGraph()
expected = []
calculated = find_biconnected_components(graph)
self.assertEqual(expected, calculated)
def test_single_node_graph(self):
"""Does the ''find_biconnected_components'' function return an empty set of edges for a graph with 1 node?"""
graph = utility_functions.build_single_node_graph()
expected = []
calculated = find_biconnected_components(graph)
self.assertEqual(expected, calculated)
def test_2_node_graph(self):
"""Does the ''find_biconnected_components'' function return a single edge list for a 2-node connected graph?"""
graph = utility_functions.build_2_node_graph()
expected = [[1]]
calculated = find_biconnected_components(graph)
self.assertEqual(expected, calculated)
def test_triangle_graph(self):
"""Does the ''find_biconnected_components'' function return a single edge list for a triangle graph?"""
graph = build_triangle_graph()
expected = [[1, 2, 3]]
calculated = find_biconnected_components(graph)
calculated[0].sort()
self.assertEqual(expected, calculated)
def test_square_graph(self):
"""Does the ''find_biconnected_components'' function return a single edge list for a square graph?"""
graph = build_square_graph()
expected = [[1, 2, 3, 4]]
calculated = find_biconnected_components(graph)
calculated[0].sort()
self.assertEqual(expected, calculated)
def test_diamond_graph(self):
"""Does the ''find_biconnected_components'' function return a single edge list for a diamond graph?"""
graph = build_diamond_graph()
expected = [[1, 2, 3, 4, 5]]
calculated = find_biconnected_components(graph)
calculated[0].sort()
self.assertEqual(expected, calculated)
def test_tetrahedral_graph(self):
"""Does the ''find_biconnected_components'' function return a single edge list for a tetrahedral graph?"""
graph = build_tetrahedral_graph()
expected = [[1, 2, 3, 4, 5, 6]]
calculated = find_biconnected_components(graph)
calculated[0].sort()
self.assertEqual(expected, calculated)
def test_5_cycle_graph(self):
"""Does the ''find_biconnected_components'' function return a single edge list for a 5-cycle graph?"""
graph = build_5_cycle_graph()
expected = [[1, 2, 3, 4, 5]]
calculated = find_biconnected_components(graph)
calculated[0].sort()
self.assertEqual(expected, calculated)
def test_gem_graph(self):
"""Does the ''find_biconnected_components'' function return a single edge list for a gem graph?"""
graph = build_gem_graph()
expected = [[1, 2, 3, 4, 5, 6, 7]]
calculated = find_biconnected_components(graph)
calculated[0].sort()
self.assertEqual(expected, calculated)
def test_fully_biconnected_graph(self):
"""Does the ''find_biconnected_components'' function correctly return
the entire graph for a fully biconnected graph?"""
graph = utility_functions.build_fully_biconnected_test_graph()
expected_edges = list(range(1, 20)) # There are 19 edges in the test graph, so their IDs go from 1-19
calculated_edges = find_biconnected_components(graph)
# Verify that there is only a single component in the calculated edge list
self.assertEqual(1, len(calculated_edges))
# Verify all edges exist within the calculated edge list
component = calculated_edges[0]
for edge_id in expected_edges:
self.assertIn(edge_id, component)
# Verify that there are precisely the number of edges expected in the calculated edge list
self.assertEqual(19, len(component))
def test_biconnected_graph(self):
"""Does the ''find_biconnected_components'' function correctly identify the
components in a graph with multiple biconnected components?"""
graph = utility_functions.build_biconnected_test_graph()
component_a = [1, 2, 3]
component_b = [4, 5, 6, 7, 8]
component_c = [9, 10, 11, 12, 13, 14, 15, 16]
known_components = [component_a, component_b, component_c]
calculated_components = find_biconnected_components(graph)
# Verify that there are the expected number of components
self.assertEqual(3, len(calculated_components))
# --Verify each known component exists and has the correct number of edges
found_components_count = 0
for kc in known_components:
found_known_component = False
for c in calculated_components:
# --Determine if the current component is a superset of known component
# --(it might have more edges than the known component)
superset_match = True
for e in kc:
if e not in c:
# --This is not the correct component, go to the next one
superset_match = False
break
if superset_match:
# --Determine if the current component has precisely the same number of
# --edges in it as the known component
found_known_component = (len(kc) == len(c))
if found_known_component:
found_components_count += 1
break
if not found_known_component:
# --We know the current component was not found in the connected components
# --list, fail with an error message
msg = 'Component {} not found in {}'.format(kc, calculated_components)
self.fail(msg)
# --This verifies that we found all three known components in the calculated components
# --Prior tests should stop things before we get this far if there are errors,
# --but it's a simple sanity check test
self.assertEqual(3, found_components_count)
def test_disconnected_graph(self):
"""Does the ''find_biconnected_components'' function return components for each connected component?"""
graph = utility_functions.build_biconnected_test_graph()
addition_graph = build_triangle_graph()
node_map, edge_map = merge_graphs(graph, addition_graph)
calculated_components = find_biconnected_components(graph)
# Verify that there are the expected number of components
self.assertEqual(4, len(calculated_components))
class ArticulationVerticesTest(unittest.TestCase):
def test_articulation_vertices_empty_graph(self):
"""Does the ''find_articulation_vertices'' function return an empty list when run on an empty graph?"""
graph = UndirectedGraph()
expected = []
calculated = find_articulation_vertices(graph)
self.assertEqual(expected, calculated)
def test_articulation_vertices_fully_biconnected_graph(self):
"""Does the ''find_articulation_vertices'' function return an empty list
when run on a fully biconnected graph?"""
graph = utility_functions.build_fully_biconnected_test_graph()
expected = []
calculated = find_articulation_vertices(graph)
self.assertEqual(expected, calculated)
def test_articulation_vertices_single_cut_vertex(self):
"""Does the ''find_articulation_vertices'' function return a single
articulation vertex for a graph with a single cut vertex?"""
graph = utility_functions.build_3_node_line_graph()
expected = [2]
calculated = find_articulation_vertices(graph)
self.assertEqual(expected, calculated)
def test_articulation_vertices_single_cut_vertex_is_root(self):
"""Does the ''find_articulation_vertices'' function return a single
articulation vertex for a graph where the root node is the single cut vertex?"""
graph = utility_functions.build_3_node_line_root_articulation_graph()
expected = [1]
calculated = find_articulation_vertices(graph)
self.assertEqual(expected, calculated)
def test_articulation_vertices_dual_cut_vertices(self):
"""Does the ''find_articulation_vertices'' function return a pair of
articulation vertices for a graph where there are two?"""
graph = utility_functions.build_simple_test_graph()
expected = [1, 2]
calculated = find_articulation_vertices(graph)
calculated.sort()
self.assertEqual(expected, calculated)
def test_articulation_vertices_biconnected_graph(self):
"""Does the ''find_articulation_vertices'' function return the correct list
of articulation vertices for a graph with multiple biconnected components?"""
graph = utility_functions.build_biconnected_test_graph()
expected = [2, 5, 7, 8]
calculated = find_articulation_vertices(graph)
calculated.sort()
self.assertEqual(expected, calculated) | 0.757436 | 0.64777 |
from __future__ import print_function
# PyDSTool imports
from PyDSTool import *
from PyDSTool.Toolbox.ParamEst import BoundMin, L2_feature_1D
from PyDSTool.common import metric_float_1D
import HH_model, IF_squarespike_model
# ----------------------------------------------------------------
trange = [0, 15]
par_args_HH = {'gna': 100, 'gk': 80, 'gl': 0.1,
'vna': 50, 'vk': -100, 'vl': -67,
'Iapp': 1.35, 'C': 1.0}
# deliberately set Iapp not quite 1.3, as used for IF neuron
ic_args_HH = {'v':-70.0, 'm': 0, 'h': 1, 'n': 0}
HH = HH_model.makeHHneuron('goalHH', par_args_HH, ic_args_HH)
HH.set(tdata=trange)
HH_traj = HH.compute('test')
HH_sampleData = {}
HH_sampleData['t'] = []
HH_sampleData['v'] = []
sample_dt = 0.06
count = 0
countlim = 5
print("Generating non-uniform samples from HH orbit...")
tsamples = arange(0, 14, sample_dt)
vsamples = HH_traj(tsamples, ['v']).toarray()
for i in range(len(tsamples)):
t = tsamples[i]
v = vsamples[i]
if v > -57:
HH_sampleData['t'].append(t)
HH_sampleData['v'].append(v)
else:
# reduce sample rate for non-spiking region
count += 1
if count == countlim:
HH_sampleData['t'].append(t)
HH_sampleData['v'].append(v)
count = 0
print("... done")
tableArgs = {'tdata': HH_sampleData['t'],
'ics': {'v': HH_sampleData['v']},
'name': 'HH_data'}
HH_DataTable = Generator.LookupTable(tableArgs)
tmesh_par = HH_sampleData['t']
par_args_linear = {'Iapp': 1.3, 'gl': 0.1, 'vl': -67, 'threshval': -60, 'C': 1.0}
par_args_spike = {'splen': 1.0}
## Parameter estimation for firing threshold
icdict = {'v': -70.0, 'excited': 0}
IFmodel_thr = IF_squarespike_model.makeIFneuron('IF_thr_fit', par_args_linear,
par_args_spike, icdict=icdict)
# un-fitted IF trajectory
IFmodel_thr.compute(trajname='orig', tdata=[0, tmesh_par[-1]],
ics={'v':-70, 'excited':0}, verboselevel=2)
orig_pdata = IFmodel_thr.sample('orig', ['v'], 0.1)
HH_event_args = {'name': 'HH_zerothresh',
'eventtol': 1e-2,
'eventdelay': 1e-3,
'starttime': 0,
'active': True}
HH_thresh_ev = Events.makePythonStateZeroCrossEvent('v', 0, 1, HH_event_args,
HH_traj.variables['v'])
result = HH_thresh_ev.searchForEvents((0, 15))
HH_spike_t = result[0][0]
print("HH spike time found at ", HH_spike_t)
class IF_spike_feat(qt_feature_leaf):
def _local_init(self):
self.metric = metric_float_1D()
self.metric_len = 1
def evaluate(self, target):
tparts = target.test_traj.timePartitions
if len(tparts) == 1:
spike_t = 1000
else:
spike_t = tparts[1][1]
return self.metric(self.ref_traj.sample()[0], spike_t)
# set tdata here so that it persists beyond any one call to compute
IFmodel_thr.set(tdata=[0, 15])
feat = IF_spike_feat('t_similar', pars=args(debug=True))
pest_condition = condition({feat: True})
class ext_iface(extModelInterface):
# holds the data (external from the model)
pass
class int_iface(intModelInterface):
# holds the test model
pass
pest_data_interface = ext_iface(numeric_to_traj([[HH_spike_t]], 'ref', ['st'],
indepvarname='ix',
indepvar=[0]),
pest_condition)
pest_context = context([ (pest_data_interface, int_iface) ])
pest_thr = BoundMin(freeParams=['threshval'],
testModel=IFmodel_thr,
context=pest_context
)
pestData_thr = pest_thr.run(parConstraints=[-65,-57],
xtol=5e-3,
verbose=True)
## Parameter estimation for spike length
print("\nParam est. for spike length ...")
if not pestData_thr['success']:
raise RuntimeError("Failure: will not continue")
thresh_fit = pestData_thr['pars_sol']['threshval']
par_args_linear = {'Iapp': 1.3, 'gl': 0.1, 'vl': -67, 'threshval': thresh_fit,
'C': 1.0}
par_args_spike = {'splen': 1.0}
HH_datatable_traj = HH_DataTable.compute('goaltraj')
# find closest (t, v) point for i.c. near spike
ic_not_found = True
tmesh_ic = []
for t in HH_sampleData['t']:
if t >= 7.0 and t < 11:
tmesh_ic.append(t)
if ic_not_found:
t_ic = t
v_ic = HH_datatable_traj(t, ['v'])
ic_not_found = False
if t >= 11:
break
IFmodel_splen = IF_squarespike_model.makeIFneuron('IF_splen_fit', par_args_linear,
par_args_spike, icdict={'v':-70, 'excited':0})
## test IF trajectory
IFmodel_splen.compute(trajname='test', tdata=[0, t_ic])
IF_ic = IFmodel_splen('test', t_ic, ['v'])
IFmodel_splen.set(tdata=[t_ic, 12])
print("\n----------------------")
IFmodel_splen.set(ics={'v': IF_ic})
splen_feat = L2_feature_1D('splen', pars=args(t_samples=tmesh_ic,
coord='v',
tol=1e-3))
splen_condition = condition({splen_feat: True})
splen_data_interface = ext_iface(HH_datatable_traj,
splen_condition)
splen_context = context([ (splen_data_interface, int_iface) ])
pest_splen = BoundMin(freeParams=['splen'],
testModel=IFmodel_splen,
context=splen_context
)
pestData_splen = pest_splen.run(xtol=0.01, parConstraints=[0.2,1.0],
verbose=True)
IFmodel_splen.set(pars={'splen': pestData_splen['pars_sol']['splen'],
'threshval': thresh_fit})
IFmodel_splen.compute(trajname='disp',
tdata=[0,15],
ics={'v':-70, 'excited':0})
## Plot data
print("Acquiring plot data")
origline=plot(orig_pdata['t'], orig_pdata['v'])
origleg = "Un-fitted IF orbit"
IF_sampleData = []
for t in HH_sampleData['t']:
IF_sampleData.append(IFmodel_splen('disp', t, ['v']))
plt.ylabel('w')
plt.xlabel('t')
goalline=plot(HH_sampleData['t'], HH_sampleData['v'], 'bo')
goalleg = 'HH reference'
estline_splen = plot(HH_sampleData['t'], IF_sampleData, 'k-',
linewidth=2)
estleg_splen = 'IF spike thresh \& width fitted'
plt.legend([goalline, estline_splen, origline],
[goalleg, estleg_splen, origleg],
'lower left')
show() | examples/pest_test2.py | from __future__ import print_function
# PyDSTool imports
from PyDSTool import *
from PyDSTool.Toolbox.ParamEst import BoundMin, L2_feature_1D
from PyDSTool.common import metric_float_1D
import HH_model, IF_squarespike_model
# ----------------------------------------------------------------
trange = [0, 15]
par_args_HH = {'gna': 100, 'gk': 80, 'gl': 0.1,
'vna': 50, 'vk': -100, 'vl': -67,
'Iapp': 1.35, 'C': 1.0}
# deliberately set Iapp not quite 1.3, as used for IF neuron
ic_args_HH = {'v':-70.0, 'm': 0, 'h': 1, 'n': 0}
HH = HH_model.makeHHneuron('goalHH', par_args_HH, ic_args_HH)
HH.set(tdata=trange)
HH_traj = HH.compute('test')
HH_sampleData = {}
HH_sampleData['t'] = []
HH_sampleData['v'] = []
sample_dt = 0.06
count = 0
countlim = 5
print("Generating non-uniform samples from HH orbit...")
tsamples = arange(0, 14, sample_dt)
vsamples = HH_traj(tsamples, ['v']).toarray()
for i in range(len(tsamples)):
t = tsamples[i]
v = vsamples[i]
if v > -57:
HH_sampleData['t'].append(t)
HH_sampleData['v'].append(v)
else:
# reduce sample rate for non-spiking region
count += 1
if count == countlim:
HH_sampleData['t'].append(t)
HH_sampleData['v'].append(v)
count = 0
print("... done")
tableArgs = {'tdata': HH_sampleData['t'],
'ics': {'v': HH_sampleData['v']},
'name': 'HH_data'}
HH_DataTable = Generator.LookupTable(tableArgs)
tmesh_par = HH_sampleData['t']
par_args_linear = {'Iapp': 1.3, 'gl': 0.1, 'vl': -67, 'threshval': -60, 'C': 1.0}
par_args_spike = {'splen': 1.0}
## Parameter estimation for firing threshold
icdict = {'v': -70.0, 'excited': 0}
IFmodel_thr = IF_squarespike_model.makeIFneuron('IF_thr_fit', par_args_linear,
par_args_spike, icdict=icdict)
# un-fitted IF trajectory
IFmodel_thr.compute(trajname='orig', tdata=[0, tmesh_par[-1]],
ics={'v':-70, 'excited':0}, verboselevel=2)
orig_pdata = IFmodel_thr.sample('orig', ['v'], 0.1)
HH_event_args = {'name': 'HH_zerothresh',
'eventtol': 1e-2,
'eventdelay': 1e-3,
'starttime': 0,
'active': True}
HH_thresh_ev = Events.makePythonStateZeroCrossEvent('v', 0, 1, HH_event_args,
HH_traj.variables['v'])
result = HH_thresh_ev.searchForEvents((0, 15))
HH_spike_t = result[0][0]
print("HH spike time found at ", HH_spike_t)
class IF_spike_feat(qt_feature_leaf):
def _local_init(self):
self.metric = metric_float_1D()
self.metric_len = 1
def evaluate(self, target):
tparts = target.test_traj.timePartitions
if len(tparts) == 1:
spike_t = 1000
else:
spike_t = tparts[1][1]
return self.metric(self.ref_traj.sample()[0], spike_t)
# set tdata here so that it persists beyond any one call to compute
IFmodel_thr.set(tdata=[0, 15])
feat = IF_spike_feat('t_similar', pars=args(debug=True))
pest_condition = condition({feat: True})
class ext_iface(extModelInterface):
# holds the data (external from the model)
pass
class int_iface(intModelInterface):
# holds the test model
pass
pest_data_interface = ext_iface(numeric_to_traj([[HH_spike_t]], 'ref', ['st'],
indepvarname='ix',
indepvar=[0]),
pest_condition)
pest_context = context([ (pest_data_interface, int_iface) ])
pest_thr = BoundMin(freeParams=['threshval'],
testModel=IFmodel_thr,
context=pest_context
)
pestData_thr = pest_thr.run(parConstraints=[-65,-57],
xtol=5e-3,
verbose=True)
## Parameter estimation for spike length
print("\nParam est. for spike length ...")
if not pestData_thr['success']:
raise RuntimeError("Failure: will not continue")
thresh_fit = pestData_thr['pars_sol']['threshval']
par_args_linear = {'Iapp': 1.3, 'gl': 0.1, 'vl': -67, 'threshval': thresh_fit,
'C': 1.0}
par_args_spike = {'splen': 1.0}
HH_datatable_traj = HH_DataTable.compute('goaltraj')
# find closest (t, v) point for i.c. near spike
ic_not_found = True
tmesh_ic = []
for t in HH_sampleData['t']:
if t >= 7.0 and t < 11:
tmesh_ic.append(t)
if ic_not_found:
t_ic = t
v_ic = HH_datatable_traj(t, ['v'])
ic_not_found = False
if t >= 11:
break
IFmodel_splen = IF_squarespike_model.makeIFneuron('IF_splen_fit', par_args_linear,
par_args_spike, icdict={'v':-70, 'excited':0})
## test IF trajectory
IFmodel_splen.compute(trajname='test', tdata=[0, t_ic])
IF_ic = IFmodel_splen('test', t_ic, ['v'])
IFmodel_splen.set(tdata=[t_ic, 12])
print("\n----------------------")
IFmodel_splen.set(ics={'v': IF_ic})
splen_feat = L2_feature_1D('splen', pars=args(t_samples=tmesh_ic,
coord='v',
tol=1e-3))
splen_condition = condition({splen_feat: True})
splen_data_interface = ext_iface(HH_datatable_traj,
splen_condition)
splen_context = context([ (splen_data_interface, int_iface) ])
pest_splen = BoundMin(freeParams=['splen'],
testModel=IFmodel_splen,
context=splen_context
)
pestData_splen = pest_splen.run(xtol=0.01, parConstraints=[0.2,1.0],
verbose=True)
IFmodel_splen.set(pars={'splen': pestData_splen['pars_sol']['splen'],
'threshval': thresh_fit})
IFmodel_splen.compute(trajname='disp',
tdata=[0,15],
ics={'v':-70, 'excited':0})
## Plot data
print("Acquiring plot data")
origline=plot(orig_pdata['t'], orig_pdata['v'])
origleg = "Un-fitted IF orbit"
IF_sampleData = []
for t in HH_sampleData['t']:
IF_sampleData.append(IFmodel_splen('disp', t, ['v']))
plt.ylabel('w')
plt.xlabel('t')
goalline=plot(HH_sampleData['t'], HH_sampleData['v'], 'bo')
goalleg = 'HH reference'
estline_splen = plot(HH_sampleData['t'], IF_sampleData, 'k-',
linewidth=2)
estleg_splen = 'IF spike thresh \& width fitted'
plt.legend([goalline, estline_splen, origline],
[goalleg, estleg_splen, origleg],
'lower left')
show() | 0.474631 | 0.335759 |
""" Presence detection (determine if an occupant is present in the house) """
import time
import wifi
from server.ping import async_ping
from server.notifier import Notifier
from server.server import Server
from tools import useful, jsonconfig, lang, tasking
class PresenceConfig(jsonconfig.JsonConfig):
""" Configuration class of presence detection """
def __init__(self):
jsonconfig.JsonConfig.__init__(self)
# Indicates if the presence detection is activated
self.activated = False
# Ip addresses of smartphones
self.smartphones = [b"",b"",b"",b"",b""]
# Notify presence
self.notify = True
class Presence:
""" Presence detection of smartphones """
ABSENCE_TIMEOUT = 1201
NO_ANSWER_TIMEOUT = 607
FAST_POLLING = 2.
SLOW_POLLING = 53
DNS_POLLING = 67
PING_TIMEOUT = 0.5
PING_COUNT = 4
detected = [False]
@staticmethod
def is_detected():
""" Indicates if presence detected """
return Presence.detected[0]
@staticmethod
def set_detection(state):
""" Force presence detection """
Presence.detected[0] = state
@staticmethod
def init():
""" Initialize the task """
Presence.readConfig = 0.
Presence.pollingDuration = Presence.FAST_POLLING
Presence.config = PresenceConfig()
Presence.activated = None
Presence.lastTime = 0
Presence.lastDnsTime = 0
Presence.detected[0] = False
Presence.configRefreshCounter = 0
@staticmethod
async def task():
""" Run the task """
# If configuration must be read
if Presence.config:
if Presence.configRefreshCounter % 7 == 0 or Presence.pollingDuration == Presence.SLOW_POLLING:
if Presence.config.is_changed():
if Presence.config.load() is False:
Presence.config.save()
useful.syslog("Change presence config %s"%Presence.config.to_string(), display=False)
Presence.configRefreshCounter += 1
if Presence.config.activated is True and wifi.Wifi.is_lan_available():
if Presence.lastDnsTime + Presence.DNS_POLLING < time.time():
Presence.lastDnsTime = time.time()
sent,received,success = await async_ping(wifi.Wifi.get_dns(), count=Presence.PING_COUNT, timeout=Presence.PING_TIMEOUT, quiet=True)
if received == 0:
wifi.Wifi.lan_disconnected()
else:
wifi.Wifi.lan_connected()
if Presence.config.activated is True and wifi.Wifi.is_lan_available():
presents = []
currentDetected = None
smartphoneInList = False
for smartphone in Presence.config.smartphones:
# If smartphone present
if smartphone != b"":
smartphoneInList = True
# Ping smartphone
sent,received,success = await async_ping(smartphone, count=Presence.PING_COUNT, timeout=Presence.PING_TIMEOUT, quiet=True)
# If a response received from smartphone
if received > 0:
presents.append(smartphone)
Presence.lastTime = time.time()
currentDetected = True
wifi.Wifi.lan_connected()
# If no smartphones detected during a very long time
if Presence.lastTime + Presence.ABSENCE_TIMEOUT < time.time():
# Nobody in the house
currentDetected = False
# If smartphone detected
if currentDetected is True:
# If no smartphone previously detected
if Presence.is_detected() != currentDetected:
# Notify the house is not empty
msg = b""
for present in presents:
msg += b"%s "%present
if Presence.config.notify:
await Notifier.notify(lang.presence_of_s%(msg))
Presence.set_detection(True)
# If no smartphone detected
elif currentDetected is False:
# If smartphone previously detected
if Presence.is_detected() != currentDetected:
# Notify the house in empty
if Presence.config.notify:
await Notifier.notify(lang.empty_house)
Presence.set_detection(False)
# If all smartphones not responded during a long time
if Presence.lastTime + Presence.NO_ANSWER_TIMEOUT < time.time() and smartphoneInList is True:
# Set fast polling rate
Presence.pollingDuration = Presence.FAST_POLLING
else:
# Reduce polling rate
Presence.pollingDuration = Presence.SLOW_POLLING
else:
Presence.pollingDuration = Presence.SLOW_POLLING
Presence.set_detection(False)
# If the presence detection change
if Presence.activated != Presence.config.activated:
if Presence.config.notify:
if Presence.config.activated:
await Notifier.notify(lang.presence_detection_on)
else:
await Notifier.notify(lang.presence_detection_off)
Presence.activated = Presence.config.activated
# Wait before new ping
await Server.wait_resume(Presence.pollingDuration)
return True
async def detect_presence():
""" Detect the presence of occupants of the housing and automatically suspend the detection (ping the ip of occupants smartphones) """
Presence.init()
await tasking.task_monitoring(Presence.task) | modules/lib/server/presence.py | """ Presence detection (determine if an occupant is present in the house) """
import time
import wifi
from server.ping import async_ping
from server.notifier import Notifier
from server.server import Server
from tools import useful, jsonconfig, lang, tasking
class PresenceConfig(jsonconfig.JsonConfig):
""" Configuration class of presence detection """
def __init__(self):
jsonconfig.JsonConfig.__init__(self)
# Indicates if the presence detection is activated
self.activated = False
# Ip addresses of smartphones
self.smartphones = [b"",b"",b"",b"",b""]
# Notify presence
self.notify = True
class Presence:
""" Presence detection of smartphones """
ABSENCE_TIMEOUT = 1201
NO_ANSWER_TIMEOUT = 607
FAST_POLLING = 2.
SLOW_POLLING = 53
DNS_POLLING = 67
PING_TIMEOUT = 0.5
PING_COUNT = 4
detected = [False]
@staticmethod
def is_detected():
""" Indicates if presence detected """
return Presence.detected[0]
@staticmethod
def set_detection(state):
""" Force presence detection """
Presence.detected[0] = state
@staticmethod
def init():
""" Initialize the task """
Presence.readConfig = 0.
Presence.pollingDuration = Presence.FAST_POLLING
Presence.config = PresenceConfig()
Presence.activated = None
Presence.lastTime = 0
Presence.lastDnsTime = 0
Presence.detected[0] = False
Presence.configRefreshCounter = 0
@staticmethod
async def task():
""" Run the task """
# If configuration must be read
if Presence.config:
if Presence.configRefreshCounter % 7 == 0 or Presence.pollingDuration == Presence.SLOW_POLLING:
if Presence.config.is_changed():
if Presence.config.load() is False:
Presence.config.save()
useful.syslog("Change presence config %s"%Presence.config.to_string(), display=False)
Presence.configRefreshCounter += 1
if Presence.config.activated is True and wifi.Wifi.is_lan_available():
if Presence.lastDnsTime + Presence.DNS_POLLING < time.time():
Presence.lastDnsTime = time.time()
sent,received,success = await async_ping(wifi.Wifi.get_dns(), count=Presence.PING_COUNT, timeout=Presence.PING_TIMEOUT, quiet=True)
if received == 0:
wifi.Wifi.lan_disconnected()
else:
wifi.Wifi.lan_connected()
if Presence.config.activated is True and wifi.Wifi.is_lan_available():
presents = []
currentDetected = None
smartphoneInList = False
for smartphone in Presence.config.smartphones:
# If smartphone present
if smartphone != b"":
smartphoneInList = True
# Ping smartphone
sent,received,success = await async_ping(smartphone, count=Presence.PING_COUNT, timeout=Presence.PING_TIMEOUT, quiet=True)
# If a response received from smartphone
if received > 0:
presents.append(smartphone)
Presence.lastTime = time.time()
currentDetected = True
wifi.Wifi.lan_connected()
# If no smartphones detected during a very long time
if Presence.lastTime + Presence.ABSENCE_TIMEOUT < time.time():
# Nobody in the house
currentDetected = False
# If smartphone detected
if currentDetected is True:
# If no smartphone previously detected
if Presence.is_detected() != currentDetected:
# Notify the house is not empty
msg = b""
for present in presents:
msg += b"%s "%present
if Presence.config.notify:
await Notifier.notify(lang.presence_of_s%(msg))
Presence.set_detection(True)
# If no smartphone detected
elif currentDetected is False:
# If smartphone previously detected
if Presence.is_detected() != currentDetected:
# Notify the house in empty
if Presence.config.notify:
await Notifier.notify(lang.empty_house)
Presence.set_detection(False)
# If all smartphones not responded during a long time
if Presence.lastTime + Presence.NO_ANSWER_TIMEOUT < time.time() and smartphoneInList is True:
# Set fast polling rate
Presence.pollingDuration = Presence.FAST_POLLING
else:
# Reduce polling rate
Presence.pollingDuration = Presence.SLOW_POLLING
else:
Presence.pollingDuration = Presence.SLOW_POLLING
Presence.set_detection(False)
# If the presence detection change
if Presence.activated != Presence.config.activated:
if Presence.config.notify:
if Presence.config.activated:
await Notifier.notify(lang.presence_detection_on)
else:
await Notifier.notify(lang.presence_detection_off)
Presence.activated = Presence.config.activated
# Wait before new ping
await Server.wait_resume(Presence.pollingDuration)
return True
async def detect_presence():
""" Detect the presence of occupants of the housing and automatically suspend the detection (ping the ip of occupants smartphones) """
Presence.init()
await tasking.task_monitoring(Presence.task) | 0.384219 | 0.144662 |
from __future__ import annotations
import os
import typing
from ..blockchain.network_type import OptionalNetworkType
from ... import util
__all__ = ['MosaicNonce']
RawNonceType = typing.Union[int, bytes, str]
def nonce_as_bytes(nonce: RawNonceType):
"""Convert nonce to underlying byte array."""
if isinstance(nonce, int):
return util.u32_to_catbuffer(nonce)
elif isinstance(nonce, str):
return util.unhexlify(nonce)
elif isinstance(nonce, bytes):
return nonce
else:
raise TypeError(f"Invalid nonce type, got {type(nonce)}.")
# TODO(ahuszagh) Change to an object, not an actual Model.
@util.inherit_doc
@util.dataclass(frozen=True)
class MosaicNonce(util.Model):
"""
Nonce for a mosaic.
:param nonce: Mosaic nonce.
"""
nonce: bytes
CATBUFFER_SIZE = util.U32_BYTES
def __init__(self, nonce: typing.Union[int, str, bytes]) -> None:
self._set('nonce', nonce_as_bytes(nonce))
if len(self.nonce) != 4:
raise ValueError("Nonce length is incorrect.")
def __int__(self) -> int:
return util.u32_from_catbuffer(self.nonce)
@classmethod
def create_random(cls, entropy=os.urandom):
"""
Create new mosaic nonce from random bytes.
:param entropy: (Optional) Callback to generate random bytes.
"""
nonce: bytes = entropy(4)
return cls(nonce)
@classmethod
def create_from_hex(cls, data: str):
"""
Create mosaic nonce from hex-encoded nonce.
:param data: Hex-encoded nonce data.
"""
return cls(util.unhexlify(data))
@classmethod
def create_from_int(cls, nonce: int):
"""
Create mosaic nonce from 32-bit integer.
:param nonce: Nonce as 32-bit unsigned integer.
"""
return cls(nonce)
@classmethod
def validate_dto(cls, data: int) -> bool:
"""Validate the data-transfer object."""
return isinstance(data, int) and 0 <= data < (1 << 32)
def to_dto(
self,
network_type: OptionalNetworkType = None,
) -> int:
return int(self)
@classmethod
def create_from_dto(
cls,
data: int,
network_type: OptionalNetworkType = None,
):
# Rest api returns negative number but it should be unsigned. Anyway, the size
# stays 4B so this mask should be OK
data &= 0xFFFFFFFF
if not cls.validate_dto(data):
raise ValueError('Invalid data-transfer object.')
return cls(data)
def to_catbuffer(
self,
network_type: OptionalNetworkType = None,
fee_strategy: typing.Optional[util.FeeCalculationStrategy] = util.FeeCalculationStrategy.MEDIUM,
) -> bytes:
return util.u32_to_catbuffer(int(self))
@classmethod
def create_from_catbuffer(
cls,
data: bytes,
network_type: OptionalNetworkType = None,
):
size = cls.CATBUFFER_SIZE
return cls(util.u32_from_catbuffer(data[:size])) | xpxchain/models/mosaic/mosaic_nonce.py | from __future__ import annotations
import os
import typing
from ..blockchain.network_type import OptionalNetworkType
from ... import util
__all__ = ['MosaicNonce']
RawNonceType = typing.Union[int, bytes, str]
def nonce_as_bytes(nonce: RawNonceType):
"""Convert nonce to underlying byte array."""
if isinstance(nonce, int):
return util.u32_to_catbuffer(nonce)
elif isinstance(nonce, str):
return util.unhexlify(nonce)
elif isinstance(nonce, bytes):
return nonce
else:
raise TypeError(f"Invalid nonce type, got {type(nonce)}.")
# TODO(ahuszagh) Change to an object, not an actual Model.
@util.inherit_doc
@util.dataclass(frozen=True)
class MosaicNonce(util.Model):
"""
Nonce for a mosaic.
:param nonce: Mosaic nonce.
"""
nonce: bytes
CATBUFFER_SIZE = util.U32_BYTES
def __init__(self, nonce: typing.Union[int, str, bytes]) -> None:
self._set('nonce', nonce_as_bytes(nonce))
if len(self.nonce) != 4:
raise ValueError("Nonce length is incorrect.")
def __int__(self) -> int:
return util.u32_from_catbuffer(self.nonce)
@classmethod
def create_random(cls, entropy=os.urandom):
"""
Create new mosaic nonce from random bytes.
:param entropy: (Optional) Callback to generate random bytes.
"""
nonce: bytes = entropy(4)
return cls(nonce)
@classmethod
def create_from_hex(cls, data: str):
"""
Create mosaic nonce from hex-encoded nonce.
:param data: Hex-encoded nonce data.
"""
return cls(util.unhexlify(data))
@classmethod
def create_from_int(cls, nonce: int):
"""
Create mosaic nonce from 32-bit integer.
:param nonce: Nonce as 32-bit unsigned integer.
"""
return cls(nonce)
@classmethod
def validate_dto(cls, data: int) -> bool:
"""Validate the data-transfer object."""
return isinstance(data, int) and 0 <= data < (1 << 32)
def to_dto(
self,
network_type: OptionalNetworkType = None,
) -> int:
return int(self)
@classmethod
def create_from_dto(
cls,
data: int,
network_type: OptionalNetworkType = None,
):
# Rest api returns negative number but it should be unsigned. Anyway, the size
# stays 4B so this mask should be OK
data &= 0xFFFFFFFF
if not cls.validate_dto(data):
raise ValueError('Invalid data-transfer object.')
return cls(data)
def to_catbuffer(
self,
network_type: OptionalNetworkType = None,
fee_strategy: typing.Optional[util.FeeCalculationStrategy] = util.FeeCalculationStrategy.MEDIUM,
) -> bytes:
return util.u32_to_catbuffer(int(self))
@classmethod
def create_from_catbuffer(
cls,
data: bytes,
network_type: OptionalNetworkType = None,
):
size = cls.CATBUFFER_SIZE
return cls(util.u32_from_catbuffer(data[:size])) | 0.773302 | 0.375134 |
import pprint
import sgtk
HookBaseClass = sgtk.get_hook_baseclass()
class ShellActions(HookBaseClass):
"""
Stub implementation of the shell actions, used for testing.
"""
def generate_actions(self, sg_publish_data, actions, ui_area):
"""
Return a list of action instances for a particular publish.
This method is called each time a user clicks a publish somewhere in the UI.
The data returned from this hook will be used to populate the actions menu for a publish.
The mapping between Publish types and actions are kept in a different place
(in the configuration) so at the point when this hook is called, the loader app
has already established *which* actions are appropriate for this object.
The hook should return at least one action for each item passed in via the
actions parameter.
This method needs to return detailed data for those actions, in the form of a list
of dictionaries, each with name, params, caption and description keys.
Because you are operating on a particular publish, you may tailor the output
(caption, tooltip etc) to contain custom information suitable for this publish.
The ui_area parameter is a string and indicates where the publish is to be shown.
- If it will be shown in the main browsing area, "main" is passed.
- If it will be shown in the details area, "details" is passed.
- If it will be shown in the history area, "history" is passed.
Please note that it is perfectly possible to create more than one action "instance" for
an action! You can for example do scene introspection - if the action passed in
is "character_attachment" you may for example scan the scene, figure out all the nodes
where this object can be attached and return a list of action instances:
"attach to left hand", "attach to right hand" etc. In this case, when more than
one object is returned for an action, use the params key to pass additional
data into the run_action hook.
:param sg_publish_data: Shotgun data dictionary with all the standard publish fields.
:param actions: List of action strings which have been defined in the app configuration.
:param ui_area: String denoting the UI Area (see above).
:returns List of dictionaries, each with keys name, params, caption and description
"""
app = self.parent
app.log_debug("Generate actions called for UI element %s. "
"Actions: %s. Publish Data: %s" % (ui_area, actions, sg_publish_data))
action_instances = []
# For the sake of easy test, we'll reuse Maya publish types.
if "debug_action_1" in actions:
action_instances.append({"name": "debug_action_1",
"params": "Debug Action 1 'params'",
"caption": "Debug Action 1",
"description": "Executes Debug Action 1."})
if "debug_action_2" in actions:
action_instances.append({"name": "debug_action_2",
"params": "Debug Action 2 'params'",
"caption": "Debug Action 2",
"description": "Executes Debug Action 2."})
if "debug_action_3" in actions:
action_instances.append({"name": "debug_action_3",
"params": "Debug Action 3 'params'",
"caption": "Debug Action 3",
"description": "Executes Debug Action 3."})
if "debug_action_4" in actions:
action_instances.append({"name": "debug_action_4",
"params": "Debug Action 4 'params'",
"caption": "Debug Action 4",
"description": "Executes Debug Action 4."})
return action_instances
def execute_multiple_actions(self, actions):
"""
Executes the specified action on a list of items.
The default implementation dispatches each item from ``actions`` to
the ``execute_action`` method.
The ``actions`` is a list of dictionaries holding all the actions to execute.
Each entry will have the following values:
name: Name of the action to execute
sg_publish_data: Publish information coming from Shotgun
params: Parameters passed down from the generate_actions hook.
.. note::
This is the default entry point for the hook. It reuses the ``execute_action``
method for backward compatibility with hooks written for the previous
version of the loader.
.. note::
The hook will stop applying the actions on the selection if an error
is raised midway through.
:param list actions: Action dictionaries.
"""
app = self.parent
app.log_info("Executing action '%s' on the selection")
# Helps to visually scope selections
# Execute each action.
for single_action in actions:
name = single_action["name"]
sg_publish_data = single_action["sg_publish_data"]
params = single_action["params"]
self.execute_action(name, params, sg_publish_data)
def execute_action(self, name, params, sg_publish_data):
"""
Print out all actions. The data sent to this be method will
represent one of the actions enumerated by the generate_actions method.
:param name: Action name string representing one of the items returned by generate_actions.
:param params: Params data, as specified by generate_actions.
:param sg_publish_data: Shotgun data dictionary with all the standard publish fields.
:returns: No return value expected.
"""
app = self.parent
app.log_info("Action Name: %s" % name)
app.log_info("Parameters:")
for l in pprint.pformat(params, indent=4).split("\n"):
app.log_info(l)
app.log_info("Publish data:")
for l in pprint.pformat(sg_publish_data, indent=4).split("\n"):
app.log_info(l)
app.log_info("=" * 20) | install/app_store/tk-multi-loader2/v1.18.0/hooks/tk-shell_actions.py | import pprint
import sgtk
HookBaseClass = sgtk.get_hook_baseclass()
class ShellActions(HookBaseClass):
"""
Stub implementation of the shell actions, used for testing.
"""
def generate_actions(self, sg_publish_data, actions, ui_area):
"""
Return a list of action instances for a particular publish.
This method is called each time a user clicks a publish somewhere in the UI.
The data returned from this hook will be used to populate the actions menu for a publish.
The mapping between Publish types and actions are kept in a different place
(in the configuration) so at the point when this hook is called, the loader app
has already established *which* actions are appropriate for this object.
The hook should return at least one action for each item passed in via the
actions parameter.
This method needs to return detailed data for those actions, in the form of a list
of dictionaries, each with name, params, caption and description keys.
Because you are operating on a particular publish, you may tailor the output
(caption, tooltip etc) to contain custom information suitable for this publish.
The ui_area parameter is a string and indicates where the publish is to be shown.
- If it will be shown in the main browsing area, "main" is passed.
- If it will be shown in the details area, "details" is passed.
- If it will be shown in the history area, "history" is passed.
Please note that it is perfectly possible to create more than one action "instance" for
an action! You can for example do scene introspection - if the action passed in
is "character_attachment" you may for example scan the scene, figure out all the nodes
where this object can be attached and return a list of action instances:
"attach to left hand", "attach to right hand" etc. In this case, when more than
one object is returned for an action, use the params key to pass additional
data into the run_action hook.
:param sg_publish_data: Shotgun data dictionary with all the standard publish fields.
:param actions: List of action strings which have been defined in the app configuration.
:param ui_area: String denoting the UI Area (see above).
:returns List of dictionaries, each with keys name, params, caption and description
"""
app = self.parent
app.log_debug("Generate actions called for UI element %s. "
"Actions: %s. Publish Data: %s" % (ui_area, actions, sg_publish_data))
action_instances = []
# For the sake of easy test, we'll reuse Maya publish types.
if "debug_action_1" in actions:
action_instances.append({"name": "debug_action_1",
"params": "Debug Action 1 'params'",
"caption": "Debug Action 1",
"description": "Executes Debug Action 1."})
if "debug_action_2" in actions:
action_instances.append({"name": "debug_action_2",
"params": "Debug Action 2 'params'",
"caption": "Debug Action 2",
"description": "Executes Debug Action 2."})
if "debug_action_3" in actions:
action_instances.append({"name": "debug_action_3",
"params": "Debug Action 3 'params'",
"caption": "Debug Action 3",
"description": "Executes Debug Action 3."})
if "debug_action_4" in actions:
action_instances.append({"name": "debug_action_4",
"params": "Debug Action 4 'params'",
"caption": "Debug Action 4",
"description": "Executes Debug Action 4."})
return action_instances
def execute_multiple_actions(self, actions):
"""
Executes the specified action on a list of items.
The default implementation dispatches each item from ``actions`` to
the ``execute_action`` method.
The ``actions`` is a list of dictionaries holding all the actions to execute.
Each entry will have the following values:
name: Name of the action to execute
sg_publish_data: Publish information coming from Shotgun
params: Parameters passed down from the generate_actions hook.
.. note::
This is the default entry point for the hook. It reuses the ``execute_action``
method for backward compatibility with hooks written for the previous
version of the loader.
.. note::
The hook will stop applying the actions on the selection if an error
is raised midway through.
:param list actions: Action dictionaries.
"""
app = self.parent
app.log_info("Executing action '%s' on the selection")
# Helps to visually scope selections
# Execute each action.
for single_action in actions:
name = single_action["name"]
sg_publish_data = single_action["sg_publish_data"]
params = single_action["params"]
self.execute_action(name, params, sg_publish_data)
def execute_action(self, name, params, sg_publish_data):
"""
Print out all actions. The data sent to this be method will
represent one of the actions enumerated by the generate_actions method.
:param name: Action name string representing one of the items returned by generate_actions.
:param params: Params data, as specified by generate_actions.
:param sg_publish_data: Shotgun data dictionary with all the standard publish fields.
:returns: No return value expected.
"""
app = self.parent
app.log_info("Action Name: %s" % name)
app.log_info("Parameters:")
for l in pprint.pformat(params, indent=4).split("\n"):
app.log_info(l)
app.log_info("Publish data:")
for l in pprint.pformat(sg_publish_data, indent=4).split("\n"):
app.log_info(l)
app.log_info("=" * 20) | 0.724188 | 0.466359 |
import pycurl
try:
from io import BytesIO
except ImportError:
from StringIO import StringIO as BytesIO
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
import json
# Version 1.0.0
# This class is written to be compatible with Python 2 and Python 3
class CDNsunCdnApiClient(object):
_URL_PREFIX = 'https://cdnsun.com/api/'
_TIMEOUT = 60
_username = None
_password = None
# The options are listed in accordance with
# http://php.net/manual/ru/function.curl-getinfo.php
# As the tests showed that not all of these were in a response info object
_CURL_RESPONSE_INFO_OPTIONS = {
'EFFECTIVE_URL': 'url', 'CONTENT_TYPE': 'content_type',
'RESPONSE_CODE': 'http_code', 'HEADER_SIZE': 'header_size',
'REQUEST_SIZE': 'request_size', 'INFO_FILETIME': 'filetime',
'SSL_VERIFYRESULT': 'ssl_verify_result',
'REDIRECT_COUNT': 'redirect_count', 'TOTAL_TIME': 'total_time',
'NAMELOOKUP_TIME': 'namelookup_time', 'CONNECT_TIME': 'connect_time',
'PRETRANSFER_TIME': 'pretransfer_time', 'SIZE_UPLOAD': 'size_upload',
'SIZE_DOWNLOAD': 'size_download', 'SPEED_DOWNLOAD': 'speed_download',
'SPEED_UPLOAD': 'speed_upload',
'CONTENT_LENGTH_DOWNLOAD': 'download_content_length',
'CONTENT_LENGTH_UPLOAD': 'upload_content_length',
'STARTTRANSFER_TIME': 'starttransfer_time',
'REDIRECT_TIME': 'redirect_time', 'INFO_CERTINFO': 'certinfo',
'PRIMARY_IP': 'primary_ip', 'PRIMARY_PORT': 'primary_port',
'LOCAL_IP': 'local_ip', 'LOCAL_PORT': 'local_port',
'REDIRECT_URL': 'redirect_url'
}
def __init__(self, options={}):
if not options:
raise Exception('empty options')
elif not 'username' in options:
raise Exception('empty options[username]')
elif not 'password' in options:
raise Exception('empty options[password]')
self._username = options['username']
self._password = options['password']
def get(self, options={}):
if not options:
raise Exception('empty options')
options['method'] = 'GET'
return self._request(options)
def post(self, options={}):
if not options:
raise Exception('empty options')
options['method'] = 'POST'
return self._request(options)
def put(self, options={}):
if not options:
raise Exception('empty options')
options['method'] = 'PUT'
return self._request(options)
def delete(self, options={}):
if not options:
raise Exception('empty options')
options['method'] = 'DELETE'
return self._request(options)
def _request(self, options={}):
if not options:
raise Exception('empty options')
elif not 'url' in options:
raise Exception('empty options[url]')
elif not 'method' in options:
raise Exception('empty options[method]')
c = pycurl.Curl()
method = options['method']
url = options['url']
if (method == 'POST' or method == 'post'):
c.setopt(c.POST, 1)
if 'data' in options:
c.setopt(pycurl.POSTFIELDS, json.dumps(options['data']))
elif (method == 'PUT' or method == 'put'):
c.setopt(pycurl.CUSTOMREQUEST, 'PUT')
if 'data' in options:
c.setopt(pycurl.POSTFIELDS, json.dumps(options['data']))
elif (method == 'DELETE' or method == 'delete'):
c.setopt(pycurl.CUSTOMREQUEST, 'DELETE')
if 'data' in options:
c.setopt(pycurl.POSTFIELDS, json.dumps(options['data']))
elif (method == 'GET' or method == 'get'):
if 'data' in options:
url = ('%s?%s' % (url, urlencode(options['data'])))
else:
raise Exception('Unsupported method: ' + method)
# Set headers for JSON format
headers = [
'Accept: application/json',
'Content-Type: application/json'
]
c.setopt(pycurl.HTTPHEADER, headers)
# Authentication:
c.setopt(pycurl.HTTPAUTH, c.HTTPAUTH_BASIC)
c.setopt(pycurl.USERPWD, self._username + ':' + self._password)
# API endpoint
if url[:len(self._URL_PREFIX)] != self._URL_PREFIX:
url = self._URL_PREFIX + url
c.setopt(pycurl.URL, url)
c.setopt(pycurl.TIMEOUT, self._TIMEOUT)
# API call
buffer = BytesIO()
c.setopt(pycurl.WRITEDATA, buffer)
response_info_str = ''
response_error = ''
try:
c.perform()
except pycurl.error:
# If have an issue with errstr() then there is also errstr_raw()
response_error = c.errstr()
try:
response_info_str = self._get_response_info_line(c)
except pycurl.error:
# If have an issue with errstr() then there is also errstr_raw()
if response_error == '':
response_error = c.errstr()
finally:
c.close()
# Body is a string on Python 2 and a byte string on Python 3.
# If we know the encoding, we can always decode the body and
# end up with a Unicode string.
response_body = buffer.getvalue().decode("utf-8")
if not response_body or response_error:
raise Exception('curl error. response_body: ' + response_body +
', response_info: ' + response_info_str +
', response_error: ' + response_error)
response_body_decoded = None
try:
response_body_decoded = json.loads(response_body)
except Exception:
raise Exception('json_decode response_body error' +
'. response_body: ' + response_body +
', response_info: ' + response_info_str +
', response_error: ' + response_error)
return response_body_decoded
def _get_response_info_line(self, pycurl_instance):
info_options = {}
option_code = None
for (curl_opt, resp_opt) in self._CURL_RESPONSE_INFO_OPTIONS.items():
option_value = self._get_curl_info_value(pycurl_instance,
curl_opt, option_code)
if option_value != None:
info_options[resp_opt] = option_value
return json.dumps(info_options)
def _get_curl_info_value(self, pycurl_instance, curl_opt, option_code):
option_value = None
option_str_value = None
try:
option_code = getattr(pycurl_instance, curl_opt, '')
if type(option_code) == int:
option_value = pycurl_instance.getinfo_raw(option_code)
option_str_value = str(option_value)
except AttributeError:
pass
return option_str_value | cdn_api_client.py | import pycurl
try:
from io import BytesIO
except ImportError:
from StringIO import StringIO as BytesIO
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
import json
# Version 1.0.0
# This class is written to be compatible with Python 2 and Python 3
class CDNsunCdnApiClient(object):
_URL_PREFIX = 'https://cdnsun.com/api/'
_TIMEOUT = 60
_username = None
_password = None
# The options are listed in accordance with
# http://php.net/manual/ru/function.curl-getinfo.php
# As the tests showed that not all of these were in a response info object
_CURL_RESPONSE_INFO_OPTIONS = {
'EFFECTIVE_URL': 'url', 'CONTENT_TYPE': 'content_type',
'RESPONSE_CODE': 'http_code', 'HEADER_SIZE': 'header_size',
'REQUEST_SIZE': 'request_size', 'INFO_FILETIME': 'filetime',
'SSL_VERIFYRESULT': 'ssl_verify_result',
'REDIRECT_COUNT': 'redirect_count', 'TOTAL_TIME': 'total_time',
'NAMELOOKUP_TIME': 'namelookup_time', 'CONNECT_TIME': 'connect_time',
'PRETRANSFER_TIME': 'pretransfer_time', 'SIZE_UPLOAD': 'size_upload',
'SIZE_DOWNLOAD': 'size_download', 'SPEED_DOWNLOAD': 'speed_download',
'SPEED_UPLOAD': 'speed_upload',
'CONTENT_LENGTH_DOWNLOAD': 'download_content_length',
'CONTENT_LENGTH_UPLOAD': 'upload_content_length',
'STARTTRANSFER_TIME': 'starttransfer_time',
'REDIRECT_TIME': 'redirect_time', 'INFO_CERTINFO': 'certinfo',
'PRIMARY_IP': 'primary_ip', 'PRIMARY_PORT': 'primary_port',
'LOCAL_IP': 'local_ip', 'LOCAL_PORT': 'local_port',
'REDIRECT_URL': 'redirect_url'
}
def __init__(self, options={}):
if not options:
raise Exception('empty options')
elif not 'username' in options:
raise Exception('empty options[username]')
elif not 'password' in options:
raise Exception('empty options[password]')
self._username = options['username']
self._password = options['password']
def get(self, options={}):
if not options:
raise Exception('empty options')
options['method'] = 'GET'
return self._request(options)
def post(self, options={}):
if not options:
raise Exception('empty options')
options['method'] = 'POST'
return self._request(options)
def put(self, options={}):
if not options:
raise Exception('empty options')
options['method'] = 'PUT'
return self._request(options)
def delete(self, options={}):
if not options:
raise Exception('empty options')
options['method'] = 'DELETE'
return self._request(options)
def _request(self, options={}):
if not options:
raise Exception('empty options')
elif not 'url' in options:
raise Exception('empty options[url]')
elif not 'method' in options:
raise Exception('empty options[method]')
c = pycurl.Curl()
method = options['method']
url = options['url']
if (method == 'POST' or method == 'post'):
c.setopt(c.POST, 1)
if 'data' in options:
c.setopt(pycurl.POSTFIELDS, json.dumps(options['data']))
elif (method == 'PUT' or method == 'put'):
c.setopt(pycurl.CUSTOMREQUEST, 'PUT')
if 'data' in options:
c.setopt(pycurl.POSTFIELDS, json.dumps(options['data']))
elif (method == 'DELETE' or method == 'delete'):
c.setopt(pycurl.CUSTOMREQUEST, 'DELETE')
if 'data' in options:
c.setopt(pycurl.POSTFIELDS, json.dumps(options['data']))
elif (method == 'GET' or method == 'get'):
if 'data' in options:
url = ('%s?%s' % (url, urlencode(options['data'])))
else:
raise Exception('Unsupported method: ' + method)
# Set headers for JSON format
headers = [
'Accept: application/json',
'Content-Type: application/json'
]
c.setopt(pycurl.HTTPHEADER, headers)
# Authentication:
c.setopt(pycurl.HTTPAUTH, c.HTTPAUTH_BASIC)
c.setopt(pycurl.USERPWD, self._username + ':' + self._password)
# API endpoint
if url[:len(self._URL_PREFIX)] != self._URL_PREFIX:
url = self._URL_PREFIX + url
c.setopt(pycurl.URL, url)
c.setopt(pycurl.TIMEOUT, self._TIMEOUT)
# API call
buffer = BytesIO()
c.setopt(pycurl.WRITEDATA, buffer)
response_info_str = ''
response_error = ''
try:
c.perform()
except pycurl.error:
# If have an issue with errstr() then there is also errstr_raw()
response_error = c.errstr()
try:
response_info_str = self._get_response_info_line(c)
except pycurl.error:
# If have an issue with errstr() then there is also errstr_raw()
if response_error == '':
response_error = c.errstr()
finally:
c.close()
# Body is a string on Python 2 and a byte string on Python 3.
# If we know the encoding, we can always decode the body and
# end up with a Unicode string.
response_body = buffer.getvalue().decode("utf-8")
if not response_body or response_error:
raise Exception('curl error. response_body: ' + response_body +
', response_info: ' + response_info_str +
', response_error: ' + response_error)
response_body_decoded = None
try:
response_body_decoded = json.loads(response_body)
except Exception:
raise Exception('json_decode response_body error' +
'. response_body: ' + response_body +
', response_info: ' + response_info_str +
', response_error: ' + response_error)
return response_body_decoded
def _get_response_info_line(self, pycurl_instance):
info_options = {}
option_code = None
for (curl_opt, resp_opt) in self._CURL_RESPONSE_INFO_OPTIONS.items():
option_value = self._get_curl_info_value(pycurl_instance,
curl_opt, option_code)
if option_value != None:
info_options[resp_opt] = option_value
return json.dumps(info_options)
def _get_curl_info_value(self, pycurl_instance, curl_opt, option_code):
option_value = None
option_str_value = None
try:
option_code = getattr(pycurl_instance, curl_opt, '')
if type(option_code) == int:
option_value = pycurl_instance.getinfo_raw(option_code)
option_str_value = str(option_value)
except AttributeError:
pass
return option_str_value | 0.424293 | 0.085175 |
#<NAME>
'''Takes a base oligo or generates one. Chains mutations in sequentially, have any number of forks'''
#Imports
import argparse
import random
#Functions
def generate_random_seq(alpha,l=20):
'''Generates a random nucleotide sequence'''
return ''.join([random.choice(alpha) for z in range(0,l)])
def add_snp(seq,choices):
'''Introduces a single SNP, pulls from choices'''
temp,index = list(seq),random.randint(0,len(seq)-1)
before =temp[index]
new = random.choice([item for item in choices if item != before])
temp[index] = new
return ''.join(temp)
def chain_iterate(sequence,stepz,alphabet,I=1):
'''does an n-step chain of iterations, no backsteps'''
new_seqs = {'delirium':sequence}
frozen_save = sequence
while len(new_seqs) < stepz+1:
cold_save = add_snp(frozen_save,alphabet)
if cold_save not in new_seqs.values():
new_seqs['_'.join(['chain',str(I).zfill(4)])] = cold_save
frozen_save = cold_save
I+=1
del new_seqs['delirium']
return new_seqs
def add_snp_restrict(seq,banned,choices):
'''Introduces a single SNP, pulls from choices'''
temp = list(seq)
index = random.randint(0,len(seq)-1)
while index in banned:
index = random.randint(0,len(seq)-1)
before = temp[index]
new = random.choice([item for item in choices if item != before])
temp[index] = new
return ''.join(temp)
def chain_iterate_restrict(sequence,stepz,nonobases,alphabet,I=1):
'''does an n-step chain of iterations, no backsteps'''
new_seqs = {'delirium':sequence}
frozen_save = sequence
while len(new_seqs) < stepz+1:
cold_save = add_snp_restrict(frozen_save,nonobases,alphabet)
if cold_save not in new_seqs.values():
new_seqs['_'.join(['chain',str(I).zfill(4)])] = cold_save
frozen_save = cold_save
I+=1
del new_seqs['delirium']
return new_seqs
def run_forks(sequence,n_forks,n_chain,alpha,restrict_bases=None):
'''Runs the Forks'''
master = {}
for i in range(0,n_forks):
sub = chain_iterate(sequence,n_chain,alpha) if restrict_bases == None else chain_iterate_restrict(sequence,n_chain,restrict_bases,alpha)
for key, value in sorted(sub.items()):
if value not in master.items():
zkey = '_'.join(['fork',str(i+1).zfill(4),key])
master[zkey] = value
else:
continue
master['original'] = sequence
return master
def write_out_fasta(info,outfyle='out.fasta',LW=80):
'''Writes out the <.fasta> file, names are just transcript+step'''
with open(outfyle,'w') as g:
for name,seq in sorted(info.items()):
g.write('>' + name + '\n')
for i in xrange(0,len(seq),LW):
g.write(seq[i:i+LW] + '\n')
#Main Function
def main():
parser = argparse.ArgumentParser(description='Creates <.fasta> of sequence variants.')
parser.add_argument('-prior',type=str,default=None,help='[default = random] Nucleotide Sequence to iterate on')
parser.add_argument('-length',type=int,default=20, help='[default = 20] Number of bases for random prior')
parser.add_argument('-chain',type=int,default=20, help='[default = 20] Number of iterations from base seq')
parser.add_argument('-fork',type=int,default=8, help='[default = 8] Number of forks from base seq')
parser.add_argument('-name',type=str,default='out.fa', help='[default = out.fa] Name the output')
parser.add_argument('-alphabet',type=str,default='ACGT',help='[default = ACGT] Alphabet to use')
parser.add_argument('-r',type=int,default = None, help='Bases numbers which may not be permuted', nargs='+',dest='same')
args = parser.parse_args()
#Generate sequence if none provided
seed = generate_random_seq(args.alphabet,args.length) if args.prior == None else args.prior.upper()
#Do it
fasta_dict = run_forks(seed,args.fork,args.chain,args.alphabet,args.same)
#Write OUT
write_out_fasta(fasta_dict,args.name)
if __name__ == '__main__':
main() | oligo_permutations.py |
#<NAME>
'''Takes a base oligo or generates one. Chains mutations in sequentially, have any number of forks'''
#Imports
import argparse
import random
#Functions
def generate_random_seq(alpha,l=20):
'''Generates a random nucleotide sequence'''
return ''.join([random.choice(alpha) for z in range(0,l)])
def add_snp(seq,choices):
'''Introduces a single SNP, pulls from choices'''
temp,index = list(seq),random.randint(0,len(seq)-1)
before =temp[index]
new = random.choice([item for item in choices if item != before])
temp[index] = new
return ''.join(temp)
def chain_iterate(sequence,stepz,alphabet,I=1):
'''does an n-step chain of iterations, no backsteps'''
new_seqs = {'delirium':sequence}
frozen_save = sequence
while len(new_seqs) < stepz+1:
cold_save = add_snp(frozen_save,alphabet)
if cold_save not in new_seqs.values():
new_seqs['_'.join(['chain',str(I).zfill(4)])] = cold_save
frozen_save = cold_save
I+=1
del new_seqs['delirium']
return new_seqs
def add_snp_restrict(seq,banned,choices):
'''Introduces a single SNP, pulls from choices'''
temp = list(seq)
index = random.randint(0,len(seq)-1)
while index in banned:
index = random.randint(0,len(seq)-1)
before = temp[index]
new = random.choice([item for item in choices if item != before])
temp[index] = new
return ''.join(temp)
def chain_iterate_restrict(sequence,stepz,nonobases,alphabet,I=1):
'''does an n-step chain of iterations, no backsteps'''
new_seqs = {'delirium':sequence}
frozen_save = sequence
while len(new_seqs) < stepz+1:
cold_save = add_snp_restrict(frozen_save,nonobases,alphabet)
if cold_save not in new_seqs.values():
new_seqs['_'.join(['chain',str(I).zfill(4)])] = cold_save
frozen_save = cold_save
I+=1
del new_seqs['delirium']
return new_seqs
def run_forks(sequence,n_forks,n_chain,alpha,restrict_bases=None):
'''Runs the Forks'''
master = {}
for i in range(0,n_forks):
sub = chain_iterate(sequence,n_chain,alpha) if restrict_bases == None else chain_iterate_restrict(sequence,n_chain,restrict_bases,alpha)
for key, value in sorted(sub.items()):
if value not in master.items():
zkey = '_'.join(['fork',str(i+1).zfill(4),key])
master[zkey] = value
else:
continue
master['original'] = sequence
return master
def write_out_fasta(info,outfyle='out.fasta',LW=80):
'''Writes out the <.fasta> file, names are just transcript+step'''
with open(outfyle,'w') as g:
for name,seq in sorted(info.items()):
g.write('>' + name + '\n')
for i in xrange(0,len(seq),LW):
g.write(seq[i:i+LW] + '\n')
#Main Function
def main():
parser = argparse.ArgumentParser(description='Creates <.fasta> of sequence variants.')
parser.add_argument('-prior',type=str,default=None,help='[default = random] Nucleotide Sequence to iterate on')
parser.add_argument('-length',type=int,default=20, help='[default = 20] Number of bases for random prior')
parser.add_argument('-chain',type=int,default=20, help='[default = 20] Number of iterations from base seq')
parser.add_argument('-fork',type=int,default=8, help='[default = 8] Number of forks from base seq')
parser.add_argument('-name',type=str,default='out.fa', help='[default = out.fa] Name the output')
parser.add_argument('-alphabet',type=str,default='ACGT',help='[default = ACGT] Alphabet to use')
parser.add_argument('-r',type=int,default = None, help='Bases numbers which may not be permuted', nargs='+',dest='same')
args = parser.parse_args()
#Generate sequence if none provided
seed = generate_random_seq(args.alphabet,args.length) if args.prior == None else args.prior.upper()
#Do it
fasta_dict = run_forks(seed,args.fork,args.chain,args.alphabet,args.same)
#Write OUT
write_out_fasta(fasta_dict,args.name)
if __name__ == '__main__':
main() | 0.345989 | 0.230422 |
from molmodmt.utils.exceptions import *
from os.path import basename as _basename
from mdtraj.core.topology import Topology as _mdtraj_Topology
form_name=_basename(__file__).split('.')[0].replace('api_','').replace('_','.')
is_form={
_mdtraj_Topology : form_name,
'mdtraj.Topology': form_name
}
def to_aminoacids3_seq(item, selection=None, syntaxis='mdtraj'):
return ''.join([ r.name for r in item.residues ])
def to_aminoacids1_seq(item, selection=None, syntaxis='mdtraj'):
from molmodmt.forms.seqs.api_aminoacids3 import to_aminoacids1_seq as _aminoacids3_to_aminoacids1
tmp_item = to_aminoacids3_seq(item)
tmp_item = _aminoacids3_to_aminoacids1(tmp_item)
del(_aminoacids3_to_aminoacids1)
return tmp_item
def to_openmm_Topology(item, selection=None, syntaxis='mdtraj'):
return item.to_openmm()
def to_yank_Topography(item, selection=None, syntaxis='mdtraj'):
from .api_openmm_Topology import to_yank_Topography as _opennn_Topology_to_yank_Topography
tmp_form = to_openmm_Topology(item)
tmp_form = _opennn_Topology_to_yank_Topography(tmp_form)
del(_opennn_Topology_to_yank_Topography)
return tmp_form
def to_parmed_Structure(item, selection=None, syntaxis='mdtraj'):
from .api_openmm_Topology import to_parmed_Structure as _opennn_Topology_to_parmed_Structure
tmp_form = to_openmm_Topology(item)
tmp_form = _opennn_Topology_to_parmed_Structure(tmp_form)
del(_opennn_Topology_to_parmed_Structure)
return tmp_form
def to_parmed_GromacsTopologyFile(item):
from parmed.gromacs import GromacsTopologyFile as _GromacsTopologyFile
tmp_form = to_parmed_Structure(item)
return _GromacsTopologyFile.from_structure(item)
def to_top(item,filename):
from .api_parmed_GromacsTopologyFile import to_top as _to_top
tmp_form = to_parmed_GromacsTopologyFile(item)
return _to_top(tmp_form,filename)
def select_with_mdtraj(item, selection):
return item.select(selection)
def extract_atom_indices(item, atoms_selection):
return item.subset(atoms_selection)
def merge_two_items(item1, item2, in_place=False):
if in_place:
item1.join(item2)
pass
else:
tmp_item=item1.copy()
return tmp_item.join(item2) | molmodmt/forms/classes/api_mdtraj_Topology.py | from molmodmt.utils.exceptions import *
from os.path import basename as _basename
from mdtraj.core.topology import Topology as _mdtraj_Topology
form_name=_basename(__file__).split('.')[0].replace('api_','').replace('_','.')
is_form={
_mdtraj_Topology : form_name,
'mdtraj.Topology': form_name
}
def to_aminoacids3_seq(item, selection=None, syntaxis='mdtraj'):
return ''.join([ r.name for r in item.residues ])
def to_aminoacids1_seq(item, selection=None, syntaxis='mdtraj'):
from molmodmt.forms.seqs.api_aminoacids3 import to_aminoacids1_seq as _aminoacids3_to_aminoacids1
tmp_item = to_aminoacids3_seq(item)
tmp_item = _aminoacids3_to_aminoacids1(tmp_item)
del(_aminoacids3_to_aminoacids1)
return tmp_item
def to_openmm_Topology(item, selection=None, syntaxis='mdtraj'):
return item.to_openmm()
def to_yank_Topography(item, selection=None, syntaxis='mdtraj'):
from .api_openmm_Topology import to_yank_Topography as _opennn_Topology_to_yank_Topography
tmp_form = to_openmm_Topology(item)
tmp_form = _opennn_Topology_to_yank_Topography(tmp_form)
del(_opennn_Topology_to_yank_Topography)
return tmp_form
def to_parmed_Structure(item, selection=None, syntaxis='mdtraj'):
from .api_openmm_Topology import to_parmed_Structure as _opennn_Topology_to_parmed_Structure
tmp_form = to_openmm_Topology(item)
tmp_form = _opennn_Topology_to_parmed_Structure(tmp_form)
del(_opennn_Topology_to_parmed_Structure)
return tmp_form
def to_parmed_GromacsTopologyFile(item):
from parmed.gromacs import GromacsTopologyFile as _GromacsTopologyFile
tmp_form = to_parmed_Structure(item)
return _GromacsTopologyFile.from_structure(item)
def to_top(item,filename):
from .api_parmed_GromacsTopologyFile import to_top as _to_top
tmp_form = to_parmed_GromacsTopologyFile(item)
return _to_top(tmp_form,filename)
def select_with_mdtraj(item, selection):
return item.select(selection)
def extract_atom_indices(item, atoms_selection):
return item.subset(atoms_selection)
def merge_two_items(item1, item2, in_place=False):
if in_place:
item1.join(item2)
pass
else:
tmp_item=item1.copy()
return tmp_item.join(item2) | 0.330903 | 0.15084 |
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import GradientBoostingRegressor
from xgboost import XGBRegressor
from sklearn.model_selection import RandomizedSearchCV
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
from logger.LoggerClass import LoggerFileClass
logger = LoggerFileClass("boosting_model")
class BoostingModelReg:
"""This class is used to build regression models using different ensemble techniques.
Author: <NAME>
References I referred:
Reference 1 - https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.AdaBoostRegressor.html?highlight=adaboost%20regressor#sklearn.ensemble.AdaBoostRegressor
reference 2 - https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.GradientBoostingRegressor.html?highlight=gradient%20boost%20regressor#sklearn.ensemble.GradientBoostingRegressor
reference 3 - https://xgboost.readthedocs.io/en/latest/get_started.html
reference 4 - https://xgboost.readthedocs.io/en/latest/tutorials/param_tuning.html
parameters:
--------------------------------
x_train: Training data frame containing the independent features.
y_train: Training dataframe containing the dependent or target feature.
x_test: Testing dataframe containing the independent features.
y_test: Testing dataframe containing the dependent or target feature.
"""
def __init__(self, x_train, y_train, x_test, y_test):
self.x_train = x_train
self.y_train = y_train
self.x_test = x_test
self.y_test = y_test
def adaboost_regressor(self):
"""Description: This method builds a model using AdaBoostRegressor algorithm, a type of ensemble technique imported
from the sci-kit learn library. It uses cross validation technique and chooses the best estimator with the
best hyper parameters.
Raises an exception if it fails
returns
----------------------------------
The Adaboost regressor model and prints the importance of each feature
"""
logger.add_info_log(
"Enter class BoostingModelReg : adaboost_regressor function")
try:
adb = AdaBoostRegressor() # instantiating the AdaBoostRegressor object
params = {'n_estimators': [5, 10, 20, 40, 80, 100, 200],
'learning_rate': [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1],
'loss': ['linear', 'square', 'exponential']
} # parameter grid
rcv = RandomizedSearchCV(estimator=adb, param_distributions=params, n_iter=10, scoring='r2',
n_jobs=-1, cv=10, verbose=5, random_state=42, return_train_score=True)
# instantiating RandomizedSearchCV
print('Cross validation process for the Adaboost regressor')
rcv.fit(self.x_train, self.y_train) # fitting on the train data
print()
print('The best estimator for the Adaboost regressor is',
rcv.best_estimator_) # displaying the best estimator
adb = rcv.best_estimator_ # Building the best estimator recommended by the randomized search CV
# as the final Adaboost regressor.
adb.fit(self.x_train, self.y_train) # fitting on the train data
# Feature importance by the Adaboost regressor
adb_feature_imp = pd.DataFrame(adb.feature_importances_, index=self.x_train.columns,
columns=['Feature_importance'])
adb_feature_imp.sort_values(by='Feature_importance', ascending=False, inplace=True)
print()
print('Feature importance by the Adaboost regressor: ', adb_feature_imp)
print()
logger.add_info_log("class BoostingModelReg : adaboost_regressor. Model "
"Build successfully")
return adb
except Exception as e:
logger.add_exception_log(f'class BoostingModelReg : adaboost_regressor. Model '
f'Build failed. Exception {str(e)}')
def gradientboosting_regressor(self):
"""Description: This method builds a model using GradientBoostingRegressor algorithm, a type of ensemble technique imported
from the sci-kit learn library. It uses cross validation technique and chooses the best estimator with the
best hyper parameters.
Raises an exception if it fails
returns
-------------------------------------
The Gradientboosting regressor model and prints the importance of each feature
"""
logger.add_info_log(
"Enter class BoostingModelReg : gradientboosting_regressor function")
try:
gbr = GradientBoostingRegressor() # instantiating the GradientBoostingRegressor object.
params = {'n_estimators': [5, 10, 20, 40, 80, 100, 200],
'learning_rate': [0.1, 0.2, 0.5, 0.8, 1],
'loss': ['lr', 'lad', 'huber'],
'subsample': [0.001, 0.009, 0.01, 0.09, 0.1, 0.4, 0.9, 1],
'criterion': ['friedman_mse', 'mse'],
'min_samples_split': [2, 4, 8, 10],
'min_samples_leaf': [1, 10, 20, 50]
} # Parameter grid
rcv = RandomizedSearchCV(estimator=gbr, param_distributions=params, n_iter=10, scoring='r2', n_jobs=-1,
cv=10, verbose=5, random_state=42,
return_train_score=True) # instantiating RandomizedSearchCV
print('Cross validation process for the Gradient Boosting Regressor')
rcv.fit(self.x_train, self.y_train) # Fitting on the train data
print()
print('The best estimator for the GradientBoosting regressor is',
rcv.best_estimator_) # displaying the best estimator
gbr = rcv.best_estimator_ # Building the best estimator recommended by the randomized search CV
# as the final Gradient Boosting regressor.
gbr.fit(self.x_train, self.y_train) # fitting on the train data
# Feature importance by the Gradient Boosting regressor
gbr_feature_imp = pd.DataFrame(gbr.feature_importances_, index=self.x_train.columns,
columns=['Feature_importance'])
gbr_feature_imp.sort_values(by='Feature_importance', ascending=False, inplace=True)
print()
print('Feature importance by the Gradient boosting regressor: ', gbr_feature_imp)
print()
logger.add_info_log("class BoostingModelReg : gradientboosting_regressor. Model "
"Build successfully")
return gbr
except Exception as e:
logger.add_exception_log(f'class BoostingModelReg : gradientboosting_regressor. Model '
f'Build failed. Exception {str(e)}')
def xgb_regressor(self):
"""Description: This method builds a model using XGBRegressor algorithm, a type of ensemble technique imported from the
xgboost library.It uses cross validation technique and chooses the best estimator with the
best hyper parameters.
Raises an exception if it fails
returns
-----------------------------
The XGBoost regressor model and prints the importance of each feature
"""
logger.add_info_log(
"Enter class BoostingModelReg : xgb_regressor function")
try:
xgbr = XGBRegressor() # instantiating the XGBRegressor object
params = {
'learning_rate': [0.1, 0.2, 0.5, 0.8, 1],
'max_depth': [2, 3, 4, 5, 6, 7, 8, 10],
'subsample': [0.001, 0.009, 0.01, 0.09, 0.1, 0.4, 0.9, 1],
'min_child_weight': [1, 2, 4, 5, 8],
'gamma': [0.0, 0.1, 0.2, 0.3],
'colsample_bytree': [0.3, 0.5, 0.7, 1.0, 1.4],
'reg_alpha': [0, 0.1, 0.2, 0.4, 0.5, 0.7, 0.9, 1, 4, 8, 10, 50, 100],
'reg_lambda': [1, 4, 5, 10, 20, 50, 100, 200, 500, 800, 1000]
} # Parameter grid
rcv = RandomizedSearchCV(estimator=xgbr, param_distributions=params, n_iter=10,
scoring='r2', cv=10, verbose=2,
random_state=42, n_jobs=-1,
return_train_score=True) # instantiating RandomizedSearchCV
print('Cross validation process for the XGBoost regressor')
rcv.fit(self.x_train, self.y_train) # Fitting on the train data
print()
print('The best estimator for the XGBoost regressor is',
rcv.best_estimator_) # displaying the best estimator
xgbr = rcv.best_estimator_ # Building the best estimator recommended by the randomized search CV
# as the final XGBoosting regressor.
xgbr.fit(self.x_train, self.y_train) # fitting on the train data
# Feature importance by the XGBoosting regressor
xgbr_feature_imp = pd.DataFrame(xgbr.feature_importances_, index=self.x_train.columns,
columns=['Feature_importance'])
xgbr_feature_imp.sort_values(by='Feature_importance', ascending=False, inplace=True)
print()
print('Feature importance by the XGBoost regressor: ', xgbr_feature_imp)
print()
logger.add_info_log("class BoostingModelReg : xgb_regressor. Model "
"Build successfully")
return xgbr
except Exception as e:
logger.add_exception_log(f'class BoostingModelReg : xgb_regressor. Model '
f'Build failed. Exception {str(e)}')
def model_predict(self, model, X):
"""Description: This method makes predictions using the given model
raises an exception if it fails
parameters
----------------------------------
model:- model to be used for making predictions
X = A pandas dataframe with independent features
returns
----------------------------------
The predictions of the target variable.
"""
try:
logger.add_info_log(
"Enter class BoostingModelReg : model_predict function")
pred = model.predict(X)
return pred
except Exception as e:
logger.add_exception_log(f'class BoostingModelReg : model_predict. Model '
f'Build failed. Exception {str(e)}') | algo/BoostingModels.py | from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import GradientBoostingRegressor
from xgboost import XGBRegressor
from sklearn.model_selection import RandomizedSearchCV
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
from logger.LoggerClass import LoggerFileClass
logger = LoggerFileClass("boosting_model")
class BoostingModelReg:
"""This class is used to build regression models using different ensemble techniques.
Author: <NAME>
References I referred:
Reference 1 - https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.AdaBoostRegressor.html?highlight=adaboost%20regressor#sklearn.ensemble.AdaBoostRegressor
reference 2 - https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.GradientBoostingRegressor.html?highlight=gradient%20boost%20regressor#sklearn.ensemble.GradientBoostingRegressor
reference 3 - https://xgboost.readthedocs.io/en/latest/get_started.html
reference 4 - https://xgboost.readthedocs.io/en/latest/tutorials/param_tuning.html
parameters:
--------------------------------
x_train: Training data frame containing the independent features.
y_train: Training dataframe containing the dependent or target feature.
x_test: Testing dataframe containing the independent features.
y_test: Testing dataframe containing the dependent or target feature.
"""
def __init__(self, x_train, y_train, x_test, y_test):
self.x_train = x_train
self.y_train = y_train
self.x_test = x_test
self.y_test = y_test
def adaboost_regressor(self):
"""Description: This method builds a model using AdaBoostRegressor algorithm, a type of ensemble technique imported
from the sci-kit learn library. It uses cross validation technique and chooses the best estimator with the
best hyper parameters.
Raises an exception if it fails
returns
----------------------------------
The Adaboost regressor model and prints the importance of each feature
"""
logger.add_info_log(
"Enter class BoostingModelReg : adaboost_regressor function")
try:
adb = AdaBoostRegressor() # instantiating the AdaBoostRegressor object
params = {'n_estimators': [5, 10, 20, 40, 80, 100, 200],
'learning_rate': [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1],
'loss': ['linear', 'square', 'exponential']
} # parameter grid
rcv = RandomizedSearchCV(estimator=adb, param_distributions=params, n_iter=10, scoring='r2',
n_jobs=-1, cv=10, verbose=5, random_state=42, return_train_score=True)
# instantiating RandomizedSearchCV
print('Cross validation process for the Adaboost regressor')
rcv.fit(self.x_train, self.y_train) # fitting on the train data
print()
print('The best estimator for the Adaboost regressor is',
rcv.best_estimator_) # displaying the best estimator
adb = rcv.best_estimator_ # Building the best estimator recommended by the randomized search CV
# as the final Adaboost regressor.
adb.fit(self.x_train, self.y_train) # fitting on the train data
# Feature importance by the Adaboost regressor
adb_feature_imp = pd.DataFrame(adb.feature_importances_, index=self.x_train.columns,
columns=['Feature_importance'])
adb_feature_imp.sort_values(by='Feature_importance', ascending=False, inplace=True)
print()
print('Feature importance by the Adaboost regressor: ', adb_feature_imp)
print()
logger.add_info_log("class BoostingModelReg : adaboost_regressor. Model "
"Build successfully")
return adb
except Exception as e:
logger.add_exception_log(f'class BoostingModelReg : adaboost_regressor. Model '
f'Build failed. Exception {str(e)}')
def gradientboosting_regressor(self):
"""Description: This method builds a model using GradientBoostingRegressor algorithm, a type of ensemble technique imported
from the sci-kit learn library. It uses cross validation technique and chooses the best estimator with the
best hyper parameters.
Raises an exception if it fails
returns
-------------------------------------
The Gradientboosting regressor model and prints the importance of each feature
"""
logger.add_info_log(
"Enter class BoostingModelReg : gradientboosting_regressor function")
try:
gbr = GradientBoostingRegressor() # instantiating the GradientBoostingRegressor object.
params = {'n_estimators': [5, 10, 20, 40, 80, 100, 200],
'learning_rate': [0.1, 0.2, 0.5, 0.8, 1],
'loss': ['lr', 'lad', 'huber'],
'subsample': [0.001, 0.009, 0.01, 0.09, 0.1, 0.4, 0.9, 1],
'criterion': ['friedman_mse', 'mse'],
'min_samples_split': [2, 4, 8, 10],
'min_samples_leaf': [1, 10, 20, 50]
} # Parameter grid
rcv = RandomizedSearchCV(estimator=gbr, param_distributions=params, n_iter=10, scoring='r2', n_jobs=-1,
cv=10, verbose=5, random_state=42,
return_train_score=True) # instantiating RandomizedSearchCV
print('Cross validation process for the Gradient Boosting Regressor')
rcv.fit(self.x_train, self.y_train) # Fitting on the train data
print()
print('The best estimator for the GradientBoosting regressor is',
rcv.best_estimator_) # displaying the best estimator
gbr = rcv.best_estimator_ # Building the best estimator recommended by the randomized search CV
# as the final Gradient Boosting regressor.
gbr.fit(self.x_train, self.y_train) # fitting on the train data
# Feature importance by the Gradient Boosting regressor
gbr_feature_imp = pd.DataFrame(gbr.feature_importances_, index=self.x_train.columns,
columns=['Feature_importance'])
gbr_feature_imp.sort_values(by='Feature_importance', ascending=False, inplace=True)
print()
print('Feature importance by the Gradient boosting regressor: ', gbr_feature_imp)
print()
logger.add_info_log("class BoostingModelReg : gradientboosting_regressor. Model "
"Build successfully")
return gbr
except Exception as e:
logger.add_exception_log(f'class BoostingModelReg : gradientboosting_regressor. Model '
f'Build failed. Exception {str(e)}')
def xgb_regressor(self):
"""Description: This method builds a model using XGBRegressor algorithm, a type of ensemble technique imported from the
xgboost library.It uses cross validation technique and chooses the best estimator with the
best hyper parameters.
Raises an exception if it fails
returns
-----------------------------
The XGBoost regressor model and prints the importance of each feature
"""
logger.add_info_log(
"Enter class BoostingModelReg : xgb_regressor function")
try:
xgbr = XGBRegressor() # instantiating the XGBRegressor object
params = {
'learning_rate': [0.1, 0.2, 0.5, 0.8, 1],
'max_depth': [2, 3, 4, 5, 6, 7, 8, 10],
'subsample': [0.001, 0.009, 0.01, 0.09, 0.1, 0.4, 0.9, 1],
'min_child_weight': [1, 2, 4, 5, 8],
'gamma': [0.0, 0.1, 0.2, 0.3],
'colsample_bytree': [0.3, 0.5, 0.7, 1.0, 1.4],
'reg_alpha': [0, 0.1, 0.2, 0.4, 0.5, 0.7, 0.9, 1, 4, 8, 10, 50, 100],
'reg_lambda': [1, 4, 5, 10, 20, 50, 100, 200, 500, 800, 1000]
} # Parameter grid
rcv = RandomizedSearchCV(estimator=xgbr, param_distributions=params, n_iter=10,
scoring='r2', cv=10, verbose=2,
random_state=42, n_jobs=-1,
return_train_score=True) # instantiating RandomizedSearchCV
print('Cross validation process for the XGBoost regressor')
rcv.fit(self.x_train, self.y_train) # Fitting on the train data
print()
print('The best estimator for the XGBoost regressor is',
rcv.best_estimator_) # displaying the best estimator
xgbr = rcv.best_estimator_ # Building the best estimator recommended by the randomized search CV
# as the final XGBoosting regressor.
xgbr.fit(self.x_train, self.y_train) # fitting on the train data
# Feature importance by the XGBoosting regressor
xgbr_feature_imp = pd.DataFrame(xgbr.feature_importances_, index=self.x_train.columns,
columns=['Feature_importance'])
xgbr_feature_imp.sort_values(by='Feature_importance', ascending=False, inplace=True)
print()
print('Feature importance by the XGBoost regressor: ', xgbr_feature_imp)
print()
logger.add_info_log("class BoostingModelReg : xgb_regressor. Model "
"Build successfully")
return xgbr
except Exception as e:
logger.add_exception_log(f'class BoostingModelReg : xgb_regressor. Model '
f'Build failed. Exception {str(e)}')
def model_predict(self, model, X):
"""Description: This method makes predictions using the given model
raises an exception if it fails
parameters
----------------------------------
model:- model to be used for making predictions
X = A pandas dataframe with independent features
returns
----------------------------------
The predictions of the target variable.
"""
try:
logger.add_info_log(
"Enter class BoostingModelReg : model_predict function")
pred = model.predict(X)
return pred
except Exception as e:
logger.add_exception_log(f'class BoostingModelReg : model_predict. Model '
f'Build failed. Exception {str(e)}') | 0.860823 | 0.618176 |
# External Modules
import logging
import os
logger = logging.getLogger('logger')
current_dir = os.path.dirname(os.path.realpath(__file__))
class FileProvider(object):
def __init__(self, directory_to_search):
self.exclude_files = [
"ccf_ctl.v",
"design_error.inc",
"flop_ccf.sv",
"flop_fifo_2.sv",
"flop_fifo_lu.sv",
"flop_fifo.sv",
"ft_fifo_p2.v",
"ft_fifo_p.v",
"ft_fifo.v",
"gray.inc",
"lib_pipe.sv",
"push_2_fifo_double_pop.sv",
"push_2_fifo_guts.inc",
"push_2_fifo.sv",
"ram_2p_bit_en.v",
"ram_2p_dc.v",
"ram_2p_syn.v",
"ram_2p_trial_synth.v",
"ram_2p.v",
"rr_arb.sv",
"sync.v",
"README",
".gitignore",
"supported_vivado_versions.txt",
"hdk_version.txt",
"dest_register_slice.v",
"src_register_slice.v",
]
self.exclude_extensions = [
".md",
".pdf",
".jpg",
".csv",
".xdc",
".txt",
".f",
".pptx",
".PNG",
".xlsx",
".png"
]
self.exclude_paths = {
"/common/shell_v032117d7/design/ip",
"/common/shell_v04151701/design/ip"
}
self.directory = directory_to_search
self.exclude_paths = [self.directory + s for s in self.exclude_paths]
def get_files(self):
file_list = []
valid_file_list = []
invalid_file_list = []
for root, dirs, files in os.walk(self.directory, topdown=True):
# Removing the excluded paths from os.walk search
for exclude_path in self.exclude_paths:
dir_name = os.path.basename(exclude_path)
parent_path = os.path.dirname(exclude_path)
if parent_path == root:
if dir_name in dirs:
dirs.remove(dir_name)
for file_name in files:
file_path = os.path.join(root, file_name)
file_list.append(file_path)
for file_path in file_list:
file_basename = os.path.basename(file_path)
file_name, file_extension = os.path.splitext(file_path)
if file_basename in self.exclude_files:
logger.debug("Excluded File: " + file_path)
invalid_file_list.append(file_path)
continue
elif file_extension in self.exclude_extensions:
logger.debug("Excluded Extension: " + file_path)
invalid_file_list.append(file_path)
continue
else:
valid_file_list.append(file_path)
return valid_file_list | hdk/tests/validate_file_headers/fileprovider.py |
# External Modules
import logging
import os
logger = logging.getLogger('logger')
current_dir = os.path.dirname(os.path.realpath(__file__))
class FileProvider(object):
def __init__(self, directory_to_search):
self.exclude_files = [
"ccf_ctl.v",
"design_error.inc",
"flop_ccf.sv",
"flop_fifo_2.sv",
"flop_fifo_lu.sv",
"flop_fifo.sv",
"ft_fifo_p2.v",
"ft_fifo_p.v",
"ft_fifo.v",
"gray.inc",
"lib_pipe.sv",
"push_2_fifo_double_pop.sv",
"push_2_fifo_guts.inc",
"push_2_fifo.sv",
"ram_2p_bit_en.v",
"ram_2p_dc.v",
"ram_2p_syn.v",
"ram_2p_trial_synth.v",
"ram_2p.v",
"rr_arb.sv",
"sync.v",
"README",
".gitignore",
"supported_vivado_versions.txt",
"hdk_version.txt",
"dest_register_slice.v",
"src_register_slice.v",
]
self.exclude_extensions = [
".md",
".pdf",
".jpg",
".csv",
".xdc",
".txt",
".f",
".pptx",
".PNG",
".xlsx",
".png"
]
self.exclude_paths = {
"/common/shell_v032117d7/design/ip",
"/common/shell_v04151701/design/ip"
}
self.directory = directory_to_search
self.exclude_paths = [self.directory + s for s in self.exclude_paths]
def get_files(self):
file_list = []
valid_file_list = []
invalid_file_list = []
for root, dirs, files in os.walk(self.directory, topdown=True):
# Removing the excluded paths from os.walk search
for exclude_path in self.exclude_paths:
dir_name = os.path.basename(exclude_path)
parent_path = os.path.dirname(exclude_path)
if parent_path == root:
if dir_name in dirs:
dirs.remove(dir_name)
for file_name in files:
file_path = os.path.join(root, file_name)
file_list.append(file_path)
for file_path in file_list:
file_basename = os.path.basename(file_path)
file_name, file_extension = os.path.splitext(file_path)
if file_basename in self.exclude_files:
logger.debug("Excluded File: " + file_path)
invalid_file_list.append(file_path)
continue
elif file_extension in self.exclude_extensions:
logger.debug("Excluded Extension: " + file_path)
invalid_file_list.append(file_path)
continue
else:
valid_file_list.append(file_path)
return valid_file_list | 0.324342 | 0.08617 |
class Operation(object):
# no doc
@staticmethod
def AddToPourUnit(inputPour,objectsToBeAdded):
""" AddToPourUnit(inputPour: PourObject,objectsToBeAdded: List[ModelObject]) -> bool """
pass
@staticmethod
def CreateBasePoint(basePoint):
""" CreateBasePoint(basePoint: BasePoint) -> bool """
pass
@staticmethod
def DeleteBasePoint(basePoint):
""" DeleteBasePoint(basePoint: BasePoint) -> bool """
pass
@staticmethod
def DeleteMacro(fileName,macroLocation):
""" DeleteMacro(fileName: str,macroLocation: MacroLocationEnum) -> bool """
pass
@staticmethod
def dotAutoSaveModel(Comment,User):
""" dotAutoSaveModel(Comment: str,User: str) -> bool """
pass
@staticmethod
def dotCheckBoltAssemblyDefinitionsModified(ModStamp):
""" dotCheckBoltAssemblyDefinitionsModified(ModStamp: str) -> bool """
pass
@staticmethod
def dotCheckBoltDefinitionsModified(ModStamp):
""" dotCheckBoltDefinitionsModified(ModStamp: str) -> bool """
pass
@staticmethod
def dotCheckCustomPropertiesModified(ModStamp):
""" dotCheckCustomPropertiesModified(ModStamp: str) -> bool """
pass
@staticmethod
def dotCheckDrawingOptionsModified(ModStamp):
""" dotCheckDrawingOptionsModified(ModStamp: str) -> bool """
pass
@staticmethod
def dotCheckDrawingsModified(ModStamp):
""" dotCheckDrawingsModified(ModStamp: str) -> bool """
pass
@staticmethod
def dotCheckMaterialDefinitionsModified(ModStamp):
""" dotCheckMaterialDefinitionsModified(ModStamp: str) -> bool """
pass
@staticmethod
def dotCheckModelOptionsModified(ModStamp):
""" dotCheckModelOptionsModified(ModStamp: str) -> bool """
pass
@staticmethod
def dotCheckObjectModifiedAfterStamp(objectGuid,ModStamp):
""" dotCheckObjectModifiedAfterStamp(objectGuid: Guid,ModStamp: str) -> bool """
pass
@staticmethod
def dotCheckProfileDefinitionsModified(ModStamp):
""" dotCheckProfileDefinitionsModified(ModStamp: str) -> bool """
pass
@staticmethod
def dotCleanDrawingFiles(Silent,BackupPath):
""" dotCleanDrawingFiles(Silent: bool,BackupPath: str) -> bool """
pass
@staticmethod
def dotClearUndoLog():
""" dotClearUndoLog() """
pass
@staticmethod
def dotConnectToNewMultiUserServerAndOpenModel(ModelFolder,ServerName):
""" dotConnectToNewMultiUserServerAndOpenModel(ModelFolder: str,ServerName: str) -> bool """
pass
@staticmethod
def dotConvertAndOpenAsMultiUserModel(ModelFolder,ServerName):
""" dotConvertAndOpenAsMultiUserModel(ModelFolder: str,ServerName: str) -> bool """
pass
@staticmethod
def dotConvertAndOpenAsSingleUserModel(ModelFolder):
""" dotConvertAndOpenAsSingleUserModel(ModelFolder: str) -> bool """
pass
@staticmethod
def dotCreateNewMultiUserModel(ModelName,ModelPath,ServerName):
""" dotCreateNewMultiUserModel(ModelName: str,ModelPath: str,ServerName: str) -> bool """
pass
@staticmethod
def dotCreateNewSharedModel(ModelName,ModelPath):
""" dotCreateNewSharedModel(ModelName: str,ModelPath: str) -> bool """
pass
@staticmethod
def dotCreateNewSingleUserModel(ModelName,ModelPath):
""" dotCreateNewSingleUserModel(ModelName: str,ModelPath: str) -> bool """
pass
@staticmethod
def dotCreateNewSingleUserModelFromTemplate(ModelName,ModelPath,ModelTemplateName):
""" dotCreateNewSingleUserModelFromTemplate(ModelName: str,ModelPath: str,ModelTemplateName: str) -> bool """
pass
@staticmethod
def dotDisplayAutoDefaultSettings(type,componentNumber,componentName):
""" dotDisplayAutoDefaultSettings(type: ModelObjectEnum,componentNumber: int,componentName: str) -> bool """
pass
@staticmethod
def dotDisplayComponentHelp(type,componentNumber,componentName):
""" dotDisplayComponentHelp(type: ModelObjectEnum,componentNumber: int,componentName: str) -> bool """
pass
@staticmethod
def dotExcludeFromSharingAndOpen(ModelFolder):
""" dotExcludeFromSharingAndOpen(ModelFolder: str) -> bool """
pass
@staticmethod
def dotExportGetColorRepresentationForObject(ID,color):
""" dotExportGetColorRepresentationForObject(ID: int,color: Color) -> (bool,Color) """
pass
@staticmethod
def dotExportShadowRegion(PartIdentifiers):
""" dotExportShadowRegion(PartIdentifiers: ArrayList) -> ArrayList """
pass
@staticmethod
def dotExportShadowRegionComplement(PartIdentifiers):
""" dotExportShadowRegionComplement(PartIdentifiers: ArrayList) -> ArrayList """
pass
@staticmethod
def dotGetCurrentModificationStampGuid():
""" dotGetCurrentModificationStampGuid() -> str """
pass
@staticmethod
def dotGetDatabaseVersion():
""" dotGetDatabaseVersion() -> int """
pass
@staticmethod
def dotGetDataBaseVersionInfoFromModel(ModelName,ModelPath,ModelVersion,CurrentVersion):
""" dotGetDataBaseVersionInfoFromModel(ModelName: str,ModelPath: str,ModelVersion: int,CurrentVersion: int) -> (bool,int,int) """
pass
@staticmethod
def dotGetDeletedObjecs(ModStamp,ObjectTypes,returnAlsoIfObjectIsCreatedAndDeletedAfterEvent):
""" dotGetDeletedObjecs(ModStamp: str,ObjectTypes: IEnumerable[ModelObjectEnum],returnAlsoIfObjectIsCreatedAndDeletedAfterEvent: bool) -> ModelObjectEnumerator """
pass
@staticmethod
def dotGetModifications(ModStamp,ObjectTypes,returnAlsoIfObjectIsCreatedAndDeletedAfterEvent):
""" dotGetModifications(ModStamp: str,ObjectTypes: IEnumerable[ModelObjectEnum],returnAlsoIfObjectIsCreatedAndDeletedAfterEvent: bool) -> ModificationInfo """
pass
@staticmethod
def dotGetModificationsByFilter(ModStamp,FilterName):
""" dotGetModificationsByFilter(ModStamp: str,FilterName: str) -> ModelObjectEnumerator """
pass
@staticmethod
def dotGetObjectsWithAnyModification(ModStamp,ObjectTypes):
""" dotGetObjectsWithAnyModification(ModStamp: str,ObjectTypes: IEnumerable[ModelObjectEnum]) -> ModelObjectEnumerator """
pass
@staticmethod
def dotIsModelSaved(ModelFolder):
""" dotIsModelSaved(ModelFolder: str) -> bool """
pass
@staticmethod
def dotModelImportIsEnabled():
""" dotModelImportIsEnabled() -> bool """
pass
@staticmethod
def dotModelSharingLicenseInfo(ProfileId):
""" dotModelSharingLicenseInfo(ProfileId: str) -> bool """
pass
@staticmethod
def dotQuitProgram(Comment,User):
""" dotQuitProgram(Comment: str,User: str) -> bool """
pass
@staticmethod
def dotRedo():
""" dotRedo() """
pass
@staticmethod
def dotResetUserOptionToDefaultValue(VariableName):
""" dotResetUserOptionToDefaultValue(VariableName: str) -> bool """
pass
@staticmethod
def dotSaveAsModel(path,Comment,User):
""" dotSaveAsModel(path: str,Comment: str,User: str) -> bool """
pass
@staticmethod
def dotSaveModel(Comment,User):
""" dotSaveModel(Comment: str,User: str) -> bool """
pass
@staticmethod
def dotSetAdvancedOption(VariableName,Value):
"""
dotSetAdvancedOption(VariableName: str,Value: str) -> bool
dotSetAdvancedOption(VariableName: str,Value: float) -> bool
dotSetAdvancedOption(VariableName: str,Value: bool) -> bool
dotSetAdvancedOption(VariableName: str,Value: int) -> bool
"""
pass
@staticmethod
def dotSetUserModelRole(modelId,modelFolder,userId,role):
""" dotSetUserModelRole(modelId: Guid,modelFolder: str,userId: Guid,role: DotSharingPrivilegeEnum) -> bool """
pass
@staticmethod
def dotSharingCommandResult(commandId,success,ErrorCode,ErrorDetail):
""" dotSharingCommandResult(commandId: int,success: bool,ErrorCode: DotSharingErrorCodeEnum,ErrorDetail: str) -> bool """
pass
@staticmethod
def dotSharingCreateEmptyModel(modelName,modelPath):
""" dotSharingCreateEmptyModel(modelName: str,modelPath: str) -> bool """
pass
@staticmethod
def dotSharingCreateNewModel(modelName,modelPath):
""" dotSharingCreateNewModel(modelName: str,modelPath: str) -> bool """
pass
@staticmethod
def dotSharingCreateStartSharingBackup(backupFolder):
""" dotSharingCreateStartSharingBackup(backupFolder: str) -> bool """
pass
@staticmethod
def dotSharingGetVersionGuid(versionGuid):
""" dotSharingGetVersionGuid(versionGuid: Guid) -> (bool,Guid) """
pass
@staticmethod
def dotSharingIsEnabled():
""" dotSharingIsEnabled() -> bool """
pass
@staticmethod
def dotSharingLogPrint(type,message):
""" dotSharingLogPrint(type: DotSharingLogTypeEnum,message: str) """
pass
@staticmethod
def dotSharingMakeModelShareable(xml):
""" dotSharingMakeModelShareable(xml: str) -> bool """
pass
@staticmethod
def dotSharingOpenModelForJoin(modelFolder):
""" dotSharingOpenModelForJoin(modelFolder: str) -> bool """
pass
@staticmethod
def dotSharingReadIn(packetFolder,packetNumber,errorCode,errorDetail,moduleBaselines):
""" dotSharingReadIn(packetFolder: str,packetNumber: int) -> (bool,DotSharingErrorCodeEnum,str,Dictionary[str,Tuple[str,str]]) """
pass
@staticmethod
def dotSharingReadInCommit(success,joiningSharing):
""" dotSharingReadInCommit(success: bool,joiningSharing: bool) -> bool """
pass
@staticmethod
def dotSharingReadInStarting(joiningSharing):
""" dotSharingReadInStarting(joiningSharing: bool) -> bool """
pass
@staticmethod
def dotSharingRegisterPlugin(name,asynchronous):
""" dotSharingRegisterPlugin(name: str,asynchronous: bool) -> bool """
pass
@staticmethod
def dotSharingRestoreStartSharingBackup(backupFolder):
""" dotSharingRestoreStartSharingBackup(backupFolder: str) -> bool """
pass
@staticmethod
def dotSharingSaveVersionGuid(versionGuid,packetNumber,baselines):
""" dotSharingSaveVersionGuid(versionGuid: Guid,packetNumber: int,baselines: Dictionary[str,str]) -> bool """
pass
@staticmethod
def dotSharingSetMenu(privilege):
""" dotSharingSetMenu(privilege: DotSharingPrivilegeEnum) -> bool """
pass
@staticmethod
def dotSharingShowReadInChanges():
""" dotSharingShowReadInChanges() -> bool """
pass
@staticmethod
def dotSharingWriteOut(permission,packetFolder,mode,revisionInfo,errorCode,errorDetail,moduleBaselines):
""" dotSharingWriteOut(permission: DotSharingPrivilegeEnum,packetFolder: str,mode: DotSharingWriteOutModeEnum,revisionInfo: str) -> (bool,DotSharingErrorCodeEnum,str,Dictionary[str,Tuple[str,str]]) """
pass
@staticmethod
def dotSharingWriteOutCommit(success,packetFolder,packetNumber,moduleBaselines):
""" dotSharingWriteOutCommit(success: bool,packetFolder: str,packetNumber: int) -> (bool,Dictionary[str,Tuple[str,str]]) """
pass
@staticmethod
def dotStartAction(ActionName,Parameters):
""" dotStartAction(ActionName: str,Parameters: str) -> bool """
pass
@staticmethod
def dotStartCommand(CommandName,Parameters):
""" dotStartCommand(CommandName: str,Parameters: str) -> bool """
pass
@staticmethod
def dotStartCustomComponentCreation(ComponentName):
""" dotStartCustomComponentCreation(ComponentName: str) -> bool """
pass
@staticmethod
def dotStartPluginCreation(ComponentName):
""" dotStartPluginCreation(ComponentName: str) -> bool """
pass
@staticmethod
def dotUndo():
""" dotUndo() """
pass
@staticmethod
def dotWriteToSessionLog(Message):
""" dotWriteToSessionLog(Message: str) -> bool """
pass
@staticmethod
def ExportIFCFromAll(ModelName,FullFileName,ViewType,PropertySets,BasePoint,UseTimer,CreateReport):
""" ExportIFCFromAll(ModelName: str,FullFileName: str,ViewType: IFCExportViewTypeEnum,PropertySets: List[str],BasePoint: IFCExportBasePoint,UseTimer: bool,CreateReport: bool) -> bool """
pass
@staticmethod
def ExportIFCFromFilteredObjects(ModelName,FullFileName,ViewType,PropertySets,FilterName,BasePoint,UseTimer,CreateReport):
""" ExportIFCFromFilteredObjects(ModelName: str,FullFileName: str,ViewType: IFCExportViewTypeEnum,PropertySets: List[str],FilterName: str,BasePoint: IFCExportBasePoint,UseTimer: bool,CreateReport: bool) -> bool """
pass
@staticmethod
def ExportIFCFromObjects(ModelName,FullFileName,ViewType,PropertySets,ModelObjects,BasePoint,UseTimer,CreateReport):
""" ExportIFCFromObjects(ModelName: str,FullFileName: str,ViewType: IFCExportViewTypeEnum,PropertySets: List[str],ModelObjects: List[ModelObject],BasePoint: IFCExportBasePoint,UseTimer: bool,CreateReport: bool) -> bool """
pass
@staticmethod
def ExportIFCFromSelected(ModelName,FullFileName,ViewType,PropertySets,BasePoint,UseTimer,CreateReport):
""" ExportIFCFromSelected(ModelName: str,FullFileName: str,ViewType: IFCExportViewTypeEnum,PropertySets: List[str],BasePoint: IFCExportBasePoint,UseTimer: bool,CreateReport: bool) -> bool """
pass
@staticmethod
def GetBasePointByGuid(guid):
""" GetBasePointByGuid(guid: Guid) -> BasePoint """
pass
@staticmethod
def GetBasePointByName(name):
""" GetBasePointByName(name: str) -> BasePoint """
pass
@staticmethod
def GetBasePoints():
""" GetBasePoints() -> List[BasePoint] """
pass
@staticmethod
def ModifyBasePoint(basePoint):
""" ModifyBasePoint(basePoint: BasePoint) -> bool """
pass
@staticmethod
def RollbackToTestSavePoint():
""" RollbackToTestSavePoint() """
pass
@staticmethod
def SetTestSavePoint():
""" SetTestSavePoint() """
pass
DotSharingErrorCodeEnum=None
DotSharingLogTypeEnum=None
DotSharingPrivilegeEnum=None
DotSharingWriteOutModeEnum=None
IFCExportBasePoint=None
IFCExportViewTypeEnum=None
MacroLocationEnum=None
OperationsMaxMessageLength=2000
SaveOperationEnum=None
SharingOperationEnum=None
UndoOperationEnum=None
__all__=[
'__reduce_ex__',
'AddToPourUnit',
'CreateBasePoint',
'DeleteBasePoint',
'DeleteMacro',
'dotAutoSaveModel',
'dotCheckBoltAssemblyDefinitionsModified',
'dotCheckBoltDefinitionsModified',
'dotCheckCustomPropertiesModified',
'dotCheckDrawingOptionsModified',
'dotCheckDrawingsModified',
'dotCheckMaterialDefinitionsModified',
'dotCheckModelOptionsModified',
'dotCheckObjectModifiedAfterStamp',
'dotCheckProfileDefinitionsModified',
'dotCleanDrawingFiles',
'dotClearUndoLog',
'dotConnectToNewMultiUserServerAndOpenModel',
'dotConvertAndOpenAsMultiUserModel',
'dotConvertAndOpenAsSingleUserModel',
'dotCreateNewMultiUserModel',
'dotCreateNewSharedModel',
'dotCreateNewSingleUserModel',
'dotCreateNewSingleUserModelFromTemplate',
'dotDisplayAutoDefaultSettings',
'dotDisplayComponentHelp',
'dotExcludeFromSharingAndOpen',
'dotExportGetColorRepresentationForObject',
'dotExportShadowRegion',
'dotExportShadowRegionComplement',
'dotGetCurrentModificationStampGuid',
'dotGetDatabaseVersion',
'dotGetDataBaseVersionInfoFromModel',
'dotGetDeletedObjecs',
'dotGetModifications',
'dotGetModificationsByFilter',
'dotGetObjectsWithAnyModification',
'dotIsModelSaved',
'dotModelImportIsEnabled',
'dotModelSharingLicenseInfo',
'dotQuitProgram',
'dotRedo',
'dotResetUserOptionToDefaultValue',
'dotSaveAsModel',
'dotSaveModel',
'dotSetAdvancedOption',
'dotSetUserModelRole',
'dotSharingCommandResult',
'dotSharingCreateEmptyModel',
'dotSharingCreateNewModel',
'dotSharingCreateStartSharingBackup',
'DotSharingErrorCodeEnum',
'dotSharingGetVersionGuid',
'dotSharingIsEnabled',
'dotSharingLogPrint',
'DotSharingLogTypeEnum',
'dotSharingMakeModelShareable',
'dotSharingOpenModelForJoin',
'DotSharingPrivilegeEnum',
'dotSharingReadIn',
'dotSharingReadInCommit',
'dotSharingReadInStarting',
'dotSharingRegisterPlugin',
'dotSharingRestoreStartSharingBackup',
'dotSharingSaveVersionGuid',
'dotSharingSetMenu',
'dotSharingShowReadInChanges',
'dotSharingWriteOut',
'dotSharingWriteOutCommit',
'DotSharingWriteOutModeEnum',
'dotStartAction',
'dotStartCommand',
'dotStartCustomComponentCreation',
'dotStartPluginCreation',
'dotUndo',
'dotWriteToSessionLog',
'ExportIFCFromAll',
'ExportIFCFromFilteredObjects',
'ExportIFCFromObjects',
'ExportIFCFromSelected',
'GetBasePointByGuid',
'GetBasePointByName',
'GetBasePoints',
'IFCExportBasePoint',
'IFCExportViewTypeEnum',
'MacroLocationEnum',
'ModifyBasePoint',
'OperationsMaxMessageLength',
'RollbackToTestSavePoint',
'SaveOperationEnum',
'SetTestSavePoint',
'SharingOperationEnum',
'UndoOperationEnum',
] | release/stubs.min/Tekla/Structures/ModelInternal_parts/Operation.py | class Operation(object):
# no doc
@staticmethod
def AddToPourUnit(inputPour,objectsToBeAdded):
""" AddToPourUnit(inputPour: PourObject,objectsToBeAdded: List[ModelObject]) -> bool """
pass
@staticmethod
def CreateBasePoint(basePoint):
""" CreateBasePoint(basePoint: BasePoint) -> bool """
pass
@staticmethod
def DeleteBasePoint(basePoint):
""" DeleteBasePoint(basePoint: BasePoint) -> bool """
pass
@staticmethod
def DeleteMacro(fileName,macroLocation):
""" DeleteMacro(fileName: str,macroLocation: MacroLocationEnum) -> bool """
pass
@staticmethod
def dotAutoSaveModel(Comment,User):
""" dotAutoSaveModel(Comment: str,User: str) -> bool """
pass
@staticmethod
def dotCheckBoltAssemblyDefinitionsModified(ModStamp):
""" dotCheckBoltAssemblyDefinitionsModified(ModStamp: str) -> bool """
pass
@staticmethod
def dotCheckBoltDefinitionsModified(ModStamp):
""" dotCheckBoltDefinitionsModified(ModStamp: str) -> bool """
pass
@staticmethod
def dotCheckCustomPropertiesModified(ModStamp):
""" dotCheckCustomPropertiesModified(ModStamp: str) -> bool """
pass
@staticmethod
def dotCheckDrawingOptionsModified(ModStamp):
""" dotCheckDrawingOptionsModified(ModStamp: str) -> bool """
pass
@staticmethod
def dotCheckDrawingsModified(ModStamp):
""" dotCheckDrawingsModified(ModStamp: str) -> bool """
pass
@staticmethod
def dotCheckMaterialDefinitionsModified(ModStamp):
""" dotCheckMaterialDefinitionsModified(ModStamp: str) -> bool """
pass
@staticmethod
def dotCheckModelOptionsModified(ModStamp):
""" dotCheckModelOptionsModified(ModStamp: str) -> bool """
pass
@staticmethod
def dotCheckObjectModifiedAfterStamp(objectGuid,ModStamp):
""" dotCheckObjectModifiedAfterStamp(objectGuid: Guid,ModStamp: str) -> bool """
pass
@staticmethod
def dotCheckProfileDefinitionsModified(ModStamp):
""" dotCheckProfileDefinitionsModified(ModStamp: str) -> bool """
pass
@staticmethod
def dotCleanDrawingFiles(Silent,BackupPath):
""" dotCleanDrawingFiles(Silent: bool,BackupPath: str) -> bool """
pass
@staticmethod
def dotClearUndoLog():
""" dotClearUndoLog() """
pass
@staticmethod
def dotConnectToNewMultiUserServerAndOpenModel(ModelFolder,ServerName):
""" dotConnectToNewMultiUserServerAndOpenModel(ModelFolder: str,ServerName: str) -> bool """
pass
@staticmethod
def dotConvertAndOpenAsMultiUserModel(ModelFolder,ServerName):
""" dotConvertAndOpenAsMultiUserModel(ModelFolder: str,ServerName: str) -> bool """
pass
@staticmethod
def dotConvertAndOpenAsSingleUserModel(ModelFolder):
""" dotConvertAndOpenAsSingleUserModel(ModelFolder: str) -> bool """
pass
@staticmethod
def dotCreateNewMultiUserModel(ModelName,ModelPath,ServerName):
""" dotCreateNewMultiUserModel(ModelName: str,ModelPath: str,ServerName: str) -> bool """
pass
@staticmethod
def dotCreateNewSharedModel(ModelName,ModelPath):
""" dotCreateNewSharedModel(ModelName: str,ModelPath: str) -> bool """
pass
@staticmethod
def dotCreateNewSingleUserModel(ModelName,ModelPath):
""" dotCreateNewSingleUserModel(ModelName: str,ModelPath: str) -> bool """
pass
@staticmethod
def dotCreateNewSingleUserModelFromTemplate(ModelName,ModelPath,ModelTemplateName):
""" dotCreateNewSingleUserModelFromTemplate(ModelName: str,ModelPath: str,ModelTemplateName: str) -> bool """
pass
@staticmethod
def dotDisplayAutoDefaultSettings(type,componentNumber,componentName):
""" dotDisplayAutoDefaultSettings(type: ModelObjectEnum,componentNumber: int,componentName: str) -> bool """
pass
@staticmethod
def dotDisplayComponentHelp(type,componentNumber,componentName):
""" dotDisplayComponentHelp(type: ModelObjectEnum,componentNumber: int,componentName: str) -> bool """
pass
@staticmethod
def dotExcludeFromSharingAndOpen(ModelFolder):
""" dotExcludeFromSharingAndOpen(ModelFolder: str) -> bool """
pass
@staticmethod
def dotExportGetColorRepresentationForObject(ID,color):
""" dotExportGetColorRepresentationForObject(ID: int,color: Color) -> (bool,Color) """
pass
@staticmethod
def dotExportShadowRegion(PartIdentifiers):
""" dotExportShadowRegion(PartIdentifiers: ArrayList) -> ArrayList """
pass
@staticmethod
def dotExportShadowRegionComplement(PartIdentifiers):
""" dotExportShadowRegionComplement(PartIdentifiers: ArrayList) -> ArrayList """
pass
@staticmethod
def dotGetCurrentModificationStampGuid():
""" dotGetCurrentModificationStampGuid() -> str """
pass
@staticmethod
def dotGetDatabaseVersion():
""" dotGetDatabaseVersion() -> int """
pass
@staticmethod
def dotGetDataBaseVersionInfoFromModel(ModelName,ModelPath,ModelVersion,CurrentVersion):
""" dotGetDataBaseVersionInfoFromModel(ModelName: str,ModelPath: str,ModelVersion: int,CurrentVersion: int) -> (bool,int,int) """
pass
@staticmethod
def dotGetDeletedObjecs(ModStamp,ObjectTypes,returnAlsoIfObjectIsCreatedAndDeletedAfterEvent):
""" dotGetDeletedObjecs(ModStamp: str,ObjectTypes: IEnumerable[ModelObjectEnum],returnAlsoIfObjectIsCreatedAndDeletedAfterEvent: bool) -> ModelObjectEnumerator """
pass
@staticmethod
def dotGetModifications(ModStamp,ObjectTypes,returnAlsoIfObjectIsCreatedAndDeletedAfterEvent):
""" dotGetModifications(ModStamp: str,ObjectTypes: IEnumerable[ModelObjectEnum],returnAlsoIfObjectIsCreatedAndDeletedAfterEvent: bool) -> ModificationInfo """
pass
@staticmethod
def dotGetModificationsByFilter(ModStamp,FilterName):
""" dotGetModificationsByFilter(ModStamp: str,FilterName: str) -> ModelObjectEnumerator """
pass
@staticmethod
def dotGetObjectsWithAnyModification(ModStamp,ObjectTypes):
""" dotGetObjectsWithAnyModification(ModStamp: str,ObjectTypes: IEnumerable[ModelObjectEnum]) -> ModelObjectEnumerator """
pass
@staticmethod
def dotIsModelSaved(ModelFolder):
""" dotIsModelSaved(ModelFolder: str) -> bool """
pass
@staticmethod
def dotModelImportIsEnabled():
""" dotModelImportIsEnabled() -> bool """
pass
@staticmethod
def dotModelSharingLicenseInfo(ProfileId):
""" dotModelSharingLicenseInfo(ProfileId: str) -> bool """
pass
@staticmethod
def dotQuitProgram(Comment,User):
""" dotQuitProgram(Comment: str,User: str) -> bool """
pass
@staticmethod
def dotRedo():
""" dotRedo() """
pass
@staticmethod
def dotResetUserOptionToDefaultValue(VariableName):
""" dotResetUserOptionToDefaultValue(VariableName: str) -> bool """
pass
@staticmethod
def dotSaveAsModel(path,Comment,User):
""" dotSaveAsModel(path: str,Comment: str,User: str) -> bool """
pass
@staticmethod
def dotSaveModel(Comment,User):
""" dotSaveModel(Comment: str,User: str) -> bool """
pass
@staticmethod
def dotSetAdvancedOption(VariableName,Value):
"""
dotSetAdvancedOption(VariableName: str,Value: str) -> bool
dotSetAdvancedOption(VariableName: str,Value: float) -> bool
dotSetAdvancedOption(VariableName: str,Value: bool) -> bool
dotSetAdvancedOption(VariableName: str,Value: int) -> bool
"""
pass
@staticmethod
def dotSetUserModelRole(modelId,modelFolder,userId,role):
""" dotSetUserModelRole(modelId: Guid,modelFolder: str,userId: Guid,role: DotSharingPrivilegeEnum) -> bool """
pass
@staticmethod
def dotSharingCommandResult(commandId,success,ErrorCode,ErrorDetail):
""" dotSharingCommandResult(commandId: int,success: bool,ErrorCode: DotSharingErrorCodeEnum,ErrorDetail: str) -> bool """
pass
@staticmethod
def dotSharingCreateEmptyModel(modelName,modelPath):
""" dotSharingCreateEmptyModel(modelName: str,modelPath: str) -> bool """
pass
@staticmethod
def dotSharingCreateNewModel(modelName,modelPath):
""" dotSharingCreateNewModel(modelName: str,modelPath: str) -> bool """
pass
@staticmethod
def dotSharingCreateStartSharingBackup(backupFolder):
""" dotSharingCreateStartSharingBackup(backupFolder: str) -> bool """
pass
@staticmethod
def dotSharingGetVersionGuid(versionGuid):
""" dotSharingGetVersionGuid(versionGuid: Guid) -> (bool,Guid) """
pass
@staticmethod
def dotSharingIsEnabled():
""" dotSharingIsEnabled() -> bool """
pass
@staticmethod
def dotSharingLogPrint(type,message):
""" dotSharingLogPrint(type: DotSharingLogTypeEnum,message: str) """
pass
@staticmethod
def dotSharingMakeModelShareable(xml):
""" dotSharingMakeModelShareable(xml: str) -> bool """
pass
@staticmethod
def dotSharingOpenModelForJoin(modelFolder):
""" dotSharingOpenModelForJoin(modelFolder: str) -> bool """
pass
@staticmethod
def dotSharingReadIn(packetFolder,packetNumber,errorCode,errorDetail,moduleBaselines):
""" dotSharingReadIn(packetFolder: str,packetNumber: int) -> (bool,DotSharingErrorCodeEnum,str,Dictionary[str,Tuple[str,str]]) """
pass
@staticmethod
def dotSharingReadInCommit(success,joiningSharing):
""" dotSharingReadInCommit(success: bool,joiningSharing: bool) -> bool """
pass
@staticmethod
def dotSharingReadInStarting(joiningSharing):
""" dotSharingReadInStarting(joiningSharing: bool) -> bool """
pass
@staticmethod
def dotSharingRegisterPlugin(name,asynchronous):
""" dotSharingRegisterPlugin(name: str,asynchronous: bool) -> bool """
pass
@staticmethod
def dotSharingRestoreStartSharingBackup(backupFolder):
""" dotSharingRestoreStartSharingBackup(backupFolder: str) -> bool """
pass
@staticmethod
def dotSharingSaveVersionGuid(versionGuid,packetNumber,baselines):
""" dotSharingSaveVersionGuid(versionGuid: Guid,packetNumber: int,baselines: Dictionary[str,str]) -> bool """
pass
@staticmethod
def dotSharingSetMenu(privilege):
""" dotSharingSetMenu(privilege: DotSharingPrivilegeEnum) -> bool """
pass
@staticmethod
def dotSharingShowReadInChanges():
""" dotSharingShowReadInChanges() -> bool """
pass
@staticmethod
def dotSharingWriteOut(permission,packetFolder,mode,revisionInfo,errorCode,errorDetail,moduleBaselines):
""" dotSharingWriteOut(permission: DotSharingPrivilegeEnum,packetFolder: str,mode: DotSharingWriteOutModeEnum,revisionInfo: str) -> (bool,DotSharingErrorCodeEnum,str,Dictionary[str,Tuple[str,str]]) """
pass
@staticmethod
def dotSharingWriteOutCommit(success,packetFolder,packetNumber,moduleBaselines):
""" dotSharingWriteOutCommit(success: bool,packetFolder: str,packetNumber: int) -> (bool,Dictionary[str,Tuple[str,str]]) """
pass
@staticmethod
def dotStartAction(ActionName,Parameters):
""" dotStartAction(ActionName: str,Parameters: str) -> bool """
pass
@staticmethod
def dotStartCommand(CommandName,Parameters):
""" dotStartCommand(CommandName: str,Parameters: str) -> bool """
pass
@staticmethod
def dotStartCustomComponentCreation(ComponentName):
""" dotStartCustomComponentCreation(ComponentName: str) -> bool """
pass
@staticmethod
def dotStartPluginCreation(ComponentName):
""" dotStartPluginCreation(ComponentName: str) -> bool """
pass
@staticmethod
def dotUndo():
""" dotUndo() """
pass
@staticmethod
def dotWriteToSessionLog(Message):
""" dotWriteToSessionLog(Message: str) -> bool """
pass
@staticmethod
def ExportIFCFromAll(ModelName,FullFileName,ViewType,PropertySets,BasePoint,UseTimer,CreateReport):
""" ExportIFCFromAll(ModelName: str,FullFileName: str,ViewType: IFCExportViewTypeEnum,PropertySets: List[str],BasePoint: IFCExportBasePoint,UseTimer: bool,CreateReport: bool) -> bool """
pass
@staticmethod
def ExportIFCFromFilteredObjects(ModelName,FullFileName,ViewType,PropertySets,FilterName,BasePoint,UseTimer,CreateReport):
""" ExportIFCFromFilteredObjects(ModelName: str,FullFileName: str,ViewType: IFCExportViewTypeEnum,PropertySets: List[str],FilterName: str,BasePoint: IFCExportBasePoint,UseTimer: bool,CreateReport: bool) -> bool """
pass
@staticmethod
def ExportIFCFromObjects(ModelName,FullFileName,ViewType,PropertySets,ModelObjects,BasePoint,UseTimer,CreateReport):
""" ExportIFCFromObjects(ModelName: str,FullFileName: str,ViewType: IFCExportViewTypeEnum,PropertySets: List[str],ModelObjects: List[ModelObject],BasePoint: IFCExportBasePoint,UseTimer: bool,CreateReport: bool) -> bool """
pass
@staticmethod
def ExportIFCFromSelected(ModelName,FullFileName,ViewType,PropertySets,BasePoint,UseTimer,CreateReport):
""" ExportIFCFromSelected(ModelName: str,FullFileName: str,ViewType: IFCExportViewTypeEnum,PropertySets: List[str],BasePoint: IFCExportBasePoint,UseTimer: bool,CreateReport: bool) -> bool """
pass
@staticmethod
def GetBasePointByGuid(guid):
""" GetBasePointByGuid(guid: Guid) -> BasePoint """
pass
@staticmethod
def GetBasePointByName(name):
""" GetBasePointByName(name: str) -> BasePoint """
pass
@staticmethod
def GetBasePoints():
""" GetBasePoints() -> List[BasePoint] """
pass
@staticmethod
def ModifyBasePoint(basePoint):
""" ModifyBasePoint(basePoint: BasePoint) -> bool """
pass
@staticmethod
def RollbackToTestSavePoint():
""" RollbackToTestSavePoint() """
pass
@staticmethod
def SetTestSavePoint():
""" SetTestSavePoint() """
pass
DotSharingErrorCodeEnum=None
DotSharingLogTypeEnum=None
DotSharingPrivilegeEnum=None
DotSharingWriteOutModeEnum=None
IFCExportBasePoint=None
IFCExportViewTypeEnum=None
MacroLocationEnum=None
OperationsMaxMessageLength=2000
SaveOperationEnum=None
SharingOperationEnum=None
UndoOperationEnum=None
__all__=[
'__reduce_ex__',
'AddToPourUnit',
'CreateBasePoint',
'DeleteBasePoint',
'DeleteMacro',
'dotAutoSaveModel',
'dotCheckBoltAssemblyDefinitionsModified',
'dotCheckBoltDefinitionsModified',
'dotCheckCustomPropertiesModified',
'dotCheckDrawingOptionsModified',
'dotCheckDrawingsModified',
'dotCheckMaterialDefinitionsModified',
'dotCheckModelOptionsModified',
'dotCheckObjectModifiedAfterStamp',
'dotCheckProfileDefinitionsModified',
'dotCleanDrawingFiles',
'dotClearUndoLog',
'dotConnectToNewMultiUserServerAndOpenModel',
'dotConvertAndOpenAsMultiUserModel',
'dotConvertAndOpenAsSingleUserModel',
'dotCreateNewMultiUserModel',
'dotCreateNewSharedModel',
'dotCreateNewSingleUserModel',
'dotCreateNewSingleUserModelFromTemplate',
'dotDisplayAutoDefaultSettings',
'dotDisplayComponentHelp',
'dotExcludeFromSharingAndOpen',
'dotExportGetColorRepresentationForObject',
'dotExportShadowRegion',
'dotExportShadowRegionComplement',
'dotGetCurrentModificationStampGuid',
'dotGetDatabaseVersion',
'dotGetDataBaseVersionInfoFromModel',
'dotGetDeletedObjecs',
'dotGetModifications',
'dotGetModificationsByFilter',
'dotGetObjectsWithAnyModification',
'dotIsModelSaved',
'dotModelImportIsEnabled',
'dotModelSharingLicenseInfo',
'dotQuitProgram',
'dotRedo',
'dotResetUserOptionToDefaultValue',
'dotSaveAsModel',
'dotSaveModel',
'dotSetAdvancedOption',
'dotSetUserModelRole',
'dotSharingCommandResult',
'dotSharingCreateEmptyModel',
'dotSharingCreateNewModel',
'dotSharingCreateStartSharingBackup',
'DotSharingErrorCodeEnum',
'dotSharingGetVersionGuid',
'dotSharingIsEnabled',
'dotSharingLogPrint',
'DotSharingLogTypeEnum',
'dotSharingMakeModelShareable',
'dotSharingOpenModelForJoin',
'DotSharingPrivilegeEnum',
'dotSharingReadIn',
'dotSharingReadInCommit',
'dotSharingReadInStarting',
'dotSharingRegisterPlugin',
'dotSharingRestoreStartSharingBackup',
'dotSharingSaveVersionGuid',
'dotSharingSetMenu',
'dotSharingShowReadInChanges',
'dotSharingWriteOut',
'dotSharingWriteOutCommit',
'DotSharingWriteOutModeEnum',
'dotStartAction',
'dotStartCommand',
'dotStartCustomComponentCreation',
'dotStartPluginCreation',
'dotUndo',
'dotWriteToSessionLog',
'ExportIFCFromAll',
'ExportIFCFromFilteredObjects',
'ExportIFCFromObjects',
'ExportIFCFromSelected',
'GetBasePointByGuid',
'GetBasePointByName',
'GetBasePoints',
'IFCExportBasePoint',
'IFCExportViewTypeEnum',
'MacroLocationEnum',
'ModifyBasePoint',
'OperationsMaxMessageLength',
'RollbackToTestSavePoint',
'SaveOperationEnum',
'SetTestSavePoint',
'SharingOperationEnum',
'UndoOperationEnum',
] | 0.542136 | 0.179495 |
import matplotlib.pyplot as plt
import seaborn as sns
from particle.plotting import (
plot_averaged_convergence_from_clusters,
plot_averaged_avg_vel,
# plot_avg_vel,
)
# rc("text", usetex=True)
sns.set(style="white", context="talk")
search_parameters = {
"particle_count": 480,
"G": "Smooth",
"scaling": "Local",
"phi": "Gamma",
"gamma": 0.05,
# "initial_dist_x": "one_cluster",
"initial_dist_v": "pos_normal_dn",
"T_end": 2000.0,
"dt": 0.01,
"D": 1.0,
}
yaml_path = "../Experiments/cutoff_phi_no_of_clusters_ten_runs_higher_noise_smaller_gamma_long_run"
is_log_scale = True
plot_all = True
fn = "_smaller_gamma_"
fig, [ax1, ax2] = plt.subplots(1, 2, figsize=(15, 5), sharex=True)
# ax2 = plot_avg_vel(ax2, search_parameters, logx=is_log_scale, exp_yaml=yaml_path)
ax2 = plot_averaged_avg_vel(
ax2, search_parameters, logx=is_log_scale, include_traj=plot_all, exp_yaml=yaml_path
)
ax1 = plot_averaged_convergence_from_clusters(
ax1, search_parameters, yaml_path, logx=is_log_scale
)
ax1.plot([0, search_parameters["T_end"]], [7.5, 7.5], "k--", alpha=0.2)
ax2.set(xlabel="Time", ylabel=r"$M^N(t) $")
plt.subplots_adjust(left=0.07, right=0.97, bottom=0.15, top=0.9, wspace=0.23)
fig.savefig(f"img/CutOffPhiConvergence{fn}logged.jpg", dpi=300)
plt.show()
is_log_scale = False
fig, [ax1, ax2] = plt.subplots(1, 2, figsize=(14, 5), sharex=True)
# ax2 = plot_avg_vel(ax2, search_parameters, logx=is_log_scale, exp_yaml=yaml_path)
ax2 = plot_averaged_avg_vel(
ax2, search_parameters, logx=is_log_scale, include_traj=plot_all, exp_yaml=yaml_path
)
ax1 = plot_averaged_convergence_from_clusters(
ax1, search_parameters, yaml_path, logx=is_log_scale
)
ax1.plot([0, search_parameters["T_end"]], [7.5, 7.5], "k--", alpha=0.2)
ax2.set(xlabel="Time", ylabel=r"$M^N(t) $")
plt.tight_layout()
plt.subplots_adjust(left=0.07, right=0.97, bottom=0.15, top=0.9, wspace=0.23)
fig.savefig(f"img/CutOffPhiConvergence{fn}linear.jpg", dpi=300)
plt.show() | noisysystem_temp/Analysis/CutoffPhiAnalysis.py | import matplotlib.pyplot as plt
import seaborn as sns
from particle.plotting import (
plot_averaged_convergence_from_clusters,
plot_averaged_avg_vel,
# plot_avg_vel,
)
# rc("text", usetex=True)
sns.set(style="white", context="talk")
search_parameters = {
"particle_count": 480,
"G": "Smooth",
"scaling": "Local",
"phi": "Gamma",
"gamma": 0.05,
# "initial_dist_x": "one_cluster",
"initial_dist_v": "pos_normal_dn",
"T_end": 2000.0,
"dt": 0.01,
"D": 1.0,
}
yaml_path = "../Experiments/cutoff_phi_no_of_clusters_ten_runs_higher_noise_smaller_gamma_long_run"
is_log_scale = True
plot_all = True
fn = "_smaller_gamma_"
fig, [ax1, ax2] = plt.subplots(1, 2, figsize=(15, 5), sharex=True)
# ax2 = plot_avg_vel(ax2, search_parameters, logx=is_log_scale, exp_yaml=yaml_path)
ax2 = plot_averaged_avg_vel(
ax2, search_parameters, logx=is_log_scale, include_traj=plot_all, exp_yaml=yaml_path
)
ax1 = plot_averaged_convergence_from_clusters(
ax1, search_parameters, yaml_path, logx=is_log_scale
)
ax1.plot([0, search_parameters["T_end"]], [7.5, 7.5], "k--", alpha=0.2)
ax2.set(xlabel="Time", ylabel=r"$M^N(t) $")
plt.subplots_adjust(left=0.07, right=0.97, bottom=0.15, top=0.9, wspace=0.23)
fig.savefig(f"img/CutOffPhiConvergence{fn}logged.jpg", dpi=300)
plt.show()
is_log_scale = False
fig, [ax1, ax2] = plt.subplots(1, 2, figsize=(14, 5), sharex=True)
# ax2 = plot_avg_vel(ax2, search_parameters, logx=is_log_scale, exp_yaml=yaml_path)
ax2 = plot_averaged_avg_vel(
ax2, search_parameters, logx=is_log_scale, include_traj=plot_all, exp_yaml=yaml_path
)
ax1 = plot_averaged_convergence_from_clusters(
ax1, search_parameters, yaml_path, logx=is_log_scale
)
ax1.plot([0, search_parameters["T_end"]], [7.5, 7.5], "k--", alpha=0.2)
ax2.set(xlabel="Time", ylabel=r"$M^N(t) $")
plt.tight_layout()
plt.subplots_adjust(left=0.07, right=0.97, bottom=0.15, top=0.9, wspace=0.23)
fig.savefig(f"img/CutOffPhiConvergence{fn}linear.jpg", dpi=300)
plt.show() | 0.580352 | 0.561395 |
import numpy as np
import pandas as pd
import rdt
from sklearn.model_selection import train_test_split
from sdmetrics.goal import Goal
from sdmetrics.timeseries.base import TimeSeriesMetric
class TimeSeriesEfficacyMetric(TimeSeriesMetric):
"""Base class for Machine Learning Efficacy based metrics on time series.
These metrics build a Machine Learning Classifier that learns to tell the synthetic
data apart from the real data, which later on is evaluated using Cross Validation.
The output of the metric is one minus the average ROC AUC score obtained.
Attributes:
name (str):
Name to use when reports about this metric are printed.
goal (sdmetrics.goal.Goal):
The goal of this metric.
min_value (Union[float, tuple[float]]):
Minimum value or values that this metric can take.
max_value (Union[float, tuple[float]]):
Maximum value or values that this metric can take.
"""
name = 'TimeSeries Efficacy'
goal = Goal.MAXIMIZE
min_value = 0.0
max_value = np.inf
@classmethod
def _validate_inputs(cls, real_data, synthetic_data, metadata, entity_columns, target):
metadata, entity_columns = super()._validate_inputs(
real_data, synthetic_data, metadata, entity_columns)
if 'target' in metadata:
target = metadata['target']
elif target is None:
raise TypeError('`target` must be passed either directly or inside `metadata`')
return entity_columns, target
@staticmethod
def _build_xy(transformer, data, entity_columns, target_column):
X = pd.DataFrame()
y = pd.Series()
for entity_id, group in data.groupby(entity_columns):
y = y.append(pd.Series({entity_id: group.pop(target_column).iloc[0]}))
entity_data = group.drop(entity_columns, axis=1)
entity_data = transformer.transform(entity_data)
entity_data = pd.Series({
column: entity_data[column].to_numpy()
for column in entity_data.columns
}, name=entity_id)
X = X.append(entity_data)
return X, y
@classmethod
def _compute_score(cls, real_data, synthetic_data, entity_columns, target):
transformer = rdt.HyperTransformer(dtype_transformers={
'O': 'one_hot_encoding',
'M': rdt.transformers.DatetimeTransformer(strip_constant=True),
})
transformer.fit(real_data.drop(entity_columns + [target], axis=1))
real_x, real_y = cls._build_xy(transformer, real_data, entity_columns, target)
synt_x, synt_y = cls._build_xy(transformer, synthetic_data, entity_columns, target)
train, test = train_test_split(real_x.index, shuffle=True)
real_x_train, real_x_test = real_x.loc[train], real_x.loc[test]
real_y_train, real_y_test = real_y.loc[train], real_y.loc[test]
real_acc = cls._scorer(real_x_train, real_x_test, real_y_train, real_y_test)
synt_acc = cls._scorer(synt_x, real_x_test, synt_y, real_y_test)
return synt_acc / real_acc
@classmethod
def compute(cls, real_data, synthetic_data, metadata=None, entity_columns=None, target=None):
"""Compute this metric.
Args:
real_data (pandas.DataFrame):
The values from the real dataset, passed as a pandas.DataFrame.
synthetic_data (pandas.DataFrame):
The values from the synthetic dataset, passed as a pandas.DataFrame.
metadata (dict):
TimeSeries metadata dict. If not passed, it is build based on the
real_data fields and dtypes.
entity_columns (list[str]):
Names of the columns which identify different time series
sequences.
target (str):
Name of the column to use as the target.
Returns:
Union[float, tuple[float]]:
Metric output.
"""
entity_columns, target = cls._validate_inputs(
real_data, synthetic_data, metadata, entity_columns, target)
return cls._compute_score(real_data, synthetic_data, entity_columns, target) | sdmetrics/timeseries/efficacy/base.py |
import numpy as np
import pandas as pd
import rdt
from sklearn.model_selection import train_test_split
from sdmetrics.goal import Goal
from sdmetrics.timeseries.base import TimeSeriesMetric
class TimeSeriesEfficacyMetric(TimeSeriesMetric):
"""Base class for Machine Learning Efficacy based metrics on time series.
These metrics build a Machine Learning Classifier that learns to tell the synthetic
data apart from the real data, which later on is evaluated using Cross Validation.
The output of the metric is one minus the average ROC AUC score obtained.
Attributes:
name (str):
Name to use when reports about this metric are printed.
goal (sdmetrics.goal.Goal):
The goal of this metric.
min_value (Union[float, tuple[float]]):
Minimum value or values that this metric can take.
max_value (Union[float, tuple[float]]):
Maximum value or values that this metric can take.
"""
name = 'TimeSeries Efficacy'
goal = Goal.MAXIMIZE
min_value = 0.0
max_value = np.inf
@classmethod
def _validate_inputs(cls, real_data, synthetic_data, metadata, entity_columns, target):
metadata, entity_columns = super()._validate_inputs(
real_data, synthetic_data, metadata, entity_columns)
if 'target' in metadata:
target = metadata['target']
elif target is None:
raise TypeError('`target` must be passed either directly or inside `metadata`')
return entity_columns, target
@staticmethod
def _build_xy(transformer, data, entity_columns, target_column):
X = pd.DataFrame()
y = pd.Series()
for entity_id, group in data.groupby(entity_columns):
y = y.append(pd.Series({entity_id: group.pop(target_column).iloc[0]}))
entity_data = group.drop(entity_columns, axis=1)
entity_data = transformer.transform(entity_data)
entity_data = pd.Series({
column: entity_data[column].to_numpy()
for column in entity_data.columns
}, name=entity_id)
X = X.append(entity_data)
return X, y
@classmethod
def _compute_score(cls, real_data, synthetic_data, entity_columns, target):
transformer = rdt.HyperTransformer(dtype_transformers={
'O': 'one_hot_encoding',
'M': rdt.transformers.DatetimeTransformer(strip_constant=True),
})
transformer.fit(real_data.drop(entity_columns + [target], axis=1))
real_x, real_y = cls._build_xy(transformer, real_data, entity_columns, target)
synt_x, synt_y = cls._build_xy(transformer, synthetic_data, entity_columns, target)
train, test = train_test_split(real_x.index, shuffle=True)
real_x_train, real_x_test = real_x.loc[train], real_x.loc[test]
real_y_train, real_y_test = real_y.loc[train], real_y.loc[test]
real_acc = cls._scorer(real_x_train, real_x_test, real_y_train, real_y_test)
synt_acc = cls._scorer(synt_x, real_x_test, synt_y, real_y_test)
return synt_acc / real_acc
@classmethod
def compute(cls, real_data, synthetic_data, metadata=None, entity_columns=None, target=None):
"""Compute this metric.
Args:
real_data (pandas.DataFrame):
The values from the real dataset, passed as a pandas.DataFrame.
synthetic_data (pandas.DataFrame):
The values from the synthetic dataset, passed as a pandas.DataFrame.
metadata (dict):
TimeSeries metadata dict. If not passed, it is build based on the
real_data fields and dtypes.
entity_columns (list[str]):
Names of the columns which identify different time series
sequences.
target (str):
Name of the column to use as the target.
Returns:
Union[float, tuple[float]]:
Metric output.
"""
entity_columns, target = cls._validate_inputs(
real_data, synthetic_data, metadata, entity_columns, target)
return cls._compute_score(real_data, synthetic_data, entity_columns, target) | 0.943712 | 0.592254 |
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='AccessControl',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('object_id', models.PositiveIntegerField(db_index=True)),
('read', models.NullBooleanField()),
('write', models.NullBooleanField()),
('manage', models.NullBooleanField()),
('restrictions_repr', models.TextField(default=b'', blank=True)),
('content_type', models.ForeignKey(to='contenttypes.ContentType')),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'access_accesscontrol',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Attribute',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('attribute', models.CharField(max_length=255)),
],
options={
'db_table': 'access_attribute',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='AttributeValue',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('value', models.CharField(max_length=255)),
('attribute', models.ForeignKey(to='access.Attribute')),
],
options={
'db_table': 'access_attributevalue',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ExtendedGroup',
fields=[
('group_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='auth.Group')),
('type', models.CharField(max_length=1, choices=[(b'A', b'Authenticated'), (b'I', b'IP Address based'), (b'P', b'Attribute based'), (b'E', b'Everybody')])),
],
options={
'db_table': 'access_extendedgroup',
},
bases=('auth.group',),
),
migrations.CreateModel(
name='Subnet',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('subnet', models.CharField(max_length=80)),
('group', models.ForeignKey(to='access.ExtendedGroup')),
],
options={
'db_table': 'access_subnet',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='attribute',
name='group',
field=models.ForeignKey(to='access.ExtendedGroup'),
preserve_default=True,
),
migrations.AddField(
model_name='accesscontrol',
name='usergroup',
field=models.ForeignKey(blank=True, to='auth.Group', null=True),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='accesscontrol',
unique_together=set([('content_type', 'object_id', 'user', 'usergroup')]),
),
] | mdid3/core/access/migrations/0001_initial.py | from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='AccessControl',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('object_id', models.PositiveIntegerField(db_index=True)),
('read', models.NullBooleanField()),
('write', models.NullBooleanField()),
('manage', models.NullBooleanField()),
('restrictions_repr', models.TextField(default=b'', blank=True)),
('content_type', models.ForeignKey(to='contenttypes.ContentType')),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'access_accesscontrol',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Attribute',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('attribute', models.CharField(max_length=255)),
],
options={
'db_table': 'access_attribute',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='AttributeValue',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('value', models.CharField(max_length=255)),
('attribute', models.ForeignKey(to='access.Attribute')),
],
options={
'db_table': 'access_attributevalue',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ExtendedGroup',
fields=[
('group_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='auth.Group')),
('type', models.CharField(max_length=1, choices=[(b'A', b'Authenticated'), (b'I', b'IP Address based'), (b'P', b'Attribute based'), (b'E', b'Everybody')])),
],
options={
'db_table': 'access_extendedgroup',
},
bases=('auth.group',),
),
migrations.CreateModel(
name='Subnet',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('subnet', models.CharField(max_length=80)),
('group', models.ForeignKey(to='access.ExtendedGroup')),
],
options={
'db_table': 'access_subnet',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='attribute',
name='group',
field=models.ForeignKey(to='access.ExtendedGroup'),
preserve_default=True,
),
migrations.AddField(
model_name='accesscontrol',
name='usergroup',
field=models.ForeignKey(blank=True, to='auth.Group', null=True),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='accesscontrol',
unique_together=set([('content_type', 'object_id', 'user', 'usergroup')]),
),
] | 0.607314 | 0.154153 |
from PIL import Image
import os
import glob
class CleanImages:
def __init__(self, path: str, output_path: str) -> None:
"""Creates the CleanImages object and sets the path to the images.
Args:
path (str): path to the images.
"""
self.path = path
self.output_path = output_path
self.images = glob.glob(self.path + '/*.jpg')
self.resized = glob.glob(self.output_path + '/*.jpg')
def clean(self, size: int=224) -> None:
"""Resizes the images to the given size.
Adds black borders to maintain aspect ratio.
Args:
size (int, optional): dimension of image w and h. Defaults to 128.
"""
final_size = (size, size)
print("Looking for items to resize")
print(f"Found {len(self.images)} items to resize")
list_of_processed_files = [new.split("/")[-1] for new in self.resized]
for image in self.images:
print("Next image:")
if str(image.split("/")[-1]) not in list_of_processed_files:
print(f'{image.split("/")[-1]} not in resized')
print("Resizing image")
image_name = os.path.basename(image)
print(f'image_name: {image_name}')
black_image = Image.new('RGB', final_size, color='black')
img = Image.open(image)
img = img.convert('RGB')
max_dimension = max(img.width, img.height)
print(f'Max dimension: {max_dimension}')
ratio = final_size[0] / max_dimension
new_image_size = (int(img.width * ratio), int(img.height * ratio))
img = img.resize(new_image_size)
print(f'New image size: {new_image_size}')
black_image.paste(
img,
(int((final_size[0] - new_image_size[0]) / 2),
int((final_size[1] - new_image_size[1]) / 2)))
print(f'Saving image: {self.output_path}/{image_name}')
black_image.save(f'{self.output_path}/{image_name}')
else:
print(f'Image already resized: {image}')
if __name__ == "__main__":
size = 224
clean_images = CleanImages('../images', '../resized'+str(size))
clean_images.clean(size) | classes/clean_images.py | from PIL import Image
import os
import glob
class CleanImages:
def __init__(self, path: str, output_path: str) -> None:
"""Creates the CleanImages object and sets the path to the images.
Args:
path (str): path to the images.
"""
self.path = path
self.output_path = output_path
self.images = glob.glob(self.path + '/*.jpg')
self.resized = glob.glob(self.output_path + '/*.jpg')
def clean(self, size: int=224) -> None:
"""Resizes the images to the given size.
Adds black borders to maintain aspect ratio.
Args:
size (int, optional): dimension of image w and h. Defaults to 128.
"""
final_size = (size, size)
print("Looking for items to resize")
print(f"Found {len(self.images)} items to resize")
list_of_processed_files = [new.split("/")[-1] for new in self.resized]
for image in self.images:
print("Next image:")
if str(image.split("/")[-1]) not in list_of_processed_files:
print(f'{image.split("/")[-1]} not in resized')
print("Resizing image")
image_name = os.path.basename(image)
print(f'image_name: {image_name}')
black_image = Image.new('RGB', final_size, color='black')
img = Image.open(image)
img = img.convert('RGB')
max_dimension = max(img.width, img.height)
print(f'Max dimension: {max_dimension}')
ratio = final_size[0] / max_dimension
new_image_size = (int(img.width * ratio), int(img.height * ratio))
img = img.resize(new_image_size)
print(f'New image size: {new_image_size}')
black_image.paste(
img,
(int((final_size[0] - new_image_size[0]) / 2),
int((final_size[1] - new_image_size[1]) / 2)))
print(f'Saving image: {self.output_path}/{image_name}')
black_image.save(f'{self.output_path}/{image_name}')
else:
print(f'Image already resized: {image}')
if __name__ == "__main__":
size = 224
clean_images = CleanImages('../images', '../resized'+str(size))
clean_images.clean(size) | 0.63861 | 0.317876 |
import json
import time
import requests
import os
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
# zabbix认证信息
zabbix_url = "http://52.80.127.55/zabbix/api_jsonrpc.php"
zabbix_username = "Admin"
zabbix_password = "<PASSWORD>"
#全局变量定义
local_path = os.path.split(os.path.realpath(__file__))[0]
log_file = local_path + os.path.sep + "log_zabbixapi.log"
headers = {"Content-Type": "application/json"}
#记录日志模块
def log(data):
file = open(log_file, 'a+')
date = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
try:
file.write("%s %s" %(date,data)+'\n')
finally:
file.close()
#zabbix登陆
def zabbix_login():
try:
data = json.dumps(
{
"jsonrpc": "2.0",
"method": "user.login",
"params": {
"user": zabbix_username,
"password": <PASSWORD>
},
"id": 0
})
request_data = requests.post(zabbix_url, data=data, headers=headers)
return json.loads(request_data.text)['result']
except BaseException,e:
log("zabbix_login: %s" %e)
return "error"
#zabbix退出
def zabbix_logout(token):
try:
data = json.dumps(
{
"jsonrpc": "2.0",
"method": "user.logout",
"params": [],
"id": 0,
"auth": token
})
request_data = requests.post(zabbix_url, data=data, headers=headers)
result = json.loads(request_data.text)['result']
if result:
return "ok"
else:
log("登出失败,原因:%s" %e)
return "error"
except BaseException,e:
log("zabbix_logout: %s" %e)
return "error"
#获取主机组id
def get_group_id(group_name):
try:
token = zabbix_login()
data = json.dumps(
{
"jsonrpc": "2.0",
"method": "hostgroup.get",
"params": {
"output": "extend",
"filter": {
"name": [
group_name
]
}
},
"auth": token,
"id": 0
})
request = requests.post(zabbix_url, data=data, headers=headers)
group_id = json.loads(request.text)['result']
if len(group_id) == 0:
return "null"
else:
return group_id[0]['groupid']
except BaseException,e:
log("get_group_id: %s" %e)
return "error"
finally:
zabbix_logout(token)
#创建服务器组
def create_group(group_name):
try:
token = zabbix_login()
data = json.dumps(
{
"jsonrpc": "2.0",
"method": "hostgroup.create",
"params": {
"name": group_name
},
"auth": token,
"id": 0
})
request = requests.post(zabbix_url, data=data, headers=headers)
group_id = json.loads(request.text)['result']['groupids'][0]
return group_id
except BaseException,e:
log("create_group: %s" %e)
return "error"
finally:
zabbix_logout(token)
#获取模板id
def get_template_id(template_name):
try:
token = zabbix_login()
data = json.dumps(
{
"jsonrpc": "2.0",
"method": "template.get",
"params": {
"output": "extend",
"filter": {
"host": [
template_name
]
}
},
"auth": token,
"id": 0
})
request = requests.post(zabbix_url, data=data, headers=headers)
template_id = json.loads(request.text)['result'][0]['templateid']
return template_id
except BaseException,e:
log('get_template_id: %s' %e)
return "error"
finally:
zabbix_logout(token)
#创建主机
def create_host(host_name,group_name,host_ip,host_port,template_name):
try:
token = zabbix_login()
template_id = get_template_id(template_name)
if template_id == "error":
return "error"
group_id = get_group_id(group_name)
if group_id == "error":
return "error"
data = json.dumps(
{
"jsonrpc": "2.0",
"method": "host.create",
"params": {
"host": host_name,
"interfaces": [
{
"type": 1,
"main": 1,
"useip": 1,
"ip": host_ip,
"dns": "",
"port": host_port
}
],
"groups": [
{
"groupid": group_id
}
],
"templates": [
{
"templateid": template_id
}
],
},
"auth": token,
"id": 0
})
request = requests.post(zabbix_url, data=data, headers=headers)
host_id = json.loads(request.text)['result']['hostids'][0]
return host_id
except BaseException,e:
log('create_host: %s' %e)
return "error"
finally:
zabbix_logout(token)
#删除主机
def delete_host(host_id):
try:
token = zabbix_login()
data = json.dumps(
{
"jsonrpc": "2.0",
"method": "host.delete",
"params": [
host_id
],
"auth": token,
"id": 0
})
request = requests.post(zabbix_url, data=data, headers=headers)
host_id_deleted = json.loads(request.text)['result']['hostids'][0]
if host_id_deleted == host_id:
return "ok"
else:
log('delete_host: failed %s' %request.text)
return "failed"
except BaseException,e:
log('delete_host: %s' %e)
return "error"
#获取主机状态(监控状态是否正常)
def get_host_status(hostid):
try:
token = zabbix_login()
data = json.dumps(
{
"jsonrpc": "2.0",
"method": "host.get",
"params": {
"output": ["available"],
"hostids": hostid
},
"id": 0,
"auth": token
})
request = requests.post(zabbix_url, data=data, headers=headers)
host_status = json.loads(request.text)['result'][0]['available']
if host_status == '1':
return "available"
else:
return "unavailable"
except BaseException,e:
log('get_host_status: %s' %e)
return "error"
finally:
zabbix_logout(token)
#根据监控名获取监控项最新值
def get_item_value_name(host_id, item_name):
try:
token = zabbix_login()
data = json.dumps(
{
"jsonrpc": "2.0",
"method": "item.get",
"params": {
"output": "extend",
"hostids": host_id,
"search": {
"name": item_name
},
},
"auth": token,
"id": 0
})
request = requests.post(zabbix_url, data=data, headers=headers)
last_value = json.loads(request.text)['result'][0]['lastvalue']
return last_value
except BaseException,e:
log('get_item_value_name: %s' %e)
return "error"
finally:
zabbix_logout(token)
#根据监控项键值获取监控项最新值
def get_item_value_key(host_id, item_name):
try:
token = zabbix_login()
data = json.dumps(
{
"jsonrpc": "2.0",
"method": "item.get",
"params": {
"output": "extend",
"hostids": host_id,
"search": {
"key_": item_name
},
},
"auth": token,
"id": 0
})
request = requests.post(zabbix_url, data=data, headers=headers)
last_value = json.loads(request.text)['result'][0]['lastvalue']
return last_value
except BaseException,e:
log('get_item_value_key: %s' %e)
return "error"
finally:
zabbix_logout(token)
#获取某个主机组下所有主机id
def get_group_hosts_id(group_name):
try:
token = zabbix_login()
data = json.dumps(
{
"jsonrpc": "2.0",
"method": "hostgroup.get",
"params": {
"selectHosts": "hostid",
"filter": {
"name": [
group_name
]
}
},
"auth": token,
"id": 0
})
request = requests.post(zabbix_url, data=data, headers=headers)
hosts = json.loads(request.text)['result'][0]['hosts']
host_id_list = []
for host_id in hosts:
host_id_list.append(host_id)
return host_id_list
except BaseException,e:
log('get_group_hosts_id %s' %e)
return "error"
finally:
zabbix_logout(token)
#获取主机的监控项数
def get_host_item_num(host_id):
try:
token = zabbix_login()
data = json.dumps(
{
"jsonrpc": "2.0",
"method": "item.get",
"params": {
"hostids": host_id,
"countOutput": "true",
},
"auth": token,
"id": 0
})
request = requests.post(zabbix_url, data=data, headers=headers)
item_num = json.loads(request.text)['result']
return item_num
except BaseException,e:
log('get_item_num: %s' %e)
return "error"
finally:
zabbix_logout(token)
#获取主机的自发现规则id列表
def get_LLD_ids(host_id):
try:
token = zabbix_login()
data = json.dumps(
{
"jsonrpc": "2.0",
"method": "discoveryrule.get",
"params": {
"output": "extend",
"hostids": host_id
},
"auth": token,
"id": 0
})
request = requests.post(zabbix_url, data=data, headers=headers)
item_ids = json.loads(request.text)['result']
lld_id_list = []
for item_id in item_ids:
lld_id_list.append(item_id['itemid'])
return lld_id_list
except BaseException,e:
log('get_LLD_ids: %s' %e)
return "error"
finally:
zabbix_logout(token)
#开启某个主机的自发现规则
def LLD_on(item_id, host_id):
try:
token = zabbix_login()
data = json.dumps(
{
"jsonrpc": "2.0",
"method": "discoveryrule.update",
"params": {
"itemid": item_id,
"hostids": host_id,
"status": 0
},
"auth": token,
"id": 0
})
request = requests.post(zabbix_url, data=data, headers=headers)
item_result = json.loads(request.text)['result']['itemids']
if len(item_result) != 0:
return "ok"
else:
return "failed"
except BaseException,e:
log('LLD_on: %s' %e)
return "error"
finally:
zabbix_logout(token)
#关闭某个主机的自发现规则
def LLD_off(item_id, host_id):
try:
token = zabbix_login()
data = json.dumps(
{
"jsonrpc": "2.0",
"method": "discoveryrule.update",
"params": {
"itemid": item_id,
"hostids": host_id,
"status": 1
},
"auth": token,
"id": 0
})
request = requests.post(zabbix_url, data=data, headers=headers)
lld_result = json.loads(request.text)['result']['itemids']
if len(lld_result) != 0:
return "ok"
else:
return "failed"
except BaseException,e:
log('LLD_off: %s' %e)
return "error"
finally:
zabbix_logout(token) | zabbixapi.py | import json
import time
import requests
import os
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
# zabbix认证信息
zabbix_url = "http://52.80.127.55/zabbix/api_jsonrpc.php"
zabbix_username = "Admin"
zabbix_password = "<PASSWORD>"
#全局变量定义
local_path = os.path.split(os.path.realpath(__file__))[0]
log_file = local_path + os.path.sep + "log_zabbixapi.log"
headers = {"Content-Type": "application/json"}
#记录日志模块
def log(data):
file = open(log_file, 'a+')
date = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
try:
file.write("%s %s" %(date,data)+'\n')
finally:
file.close()
#zabbix登陆
def zabbix_login():
try:
data = json.dumps(
{
"jsonrpc": "2.0",
"method": "user.login",
"params": {
"user": zabbix_username,
"password": <PASSWORD>
},
"id": 0
})
request_data = requests.post(zabbix_url, data=data, headers=headers)
return json.loads(request_data.text)['result']
except BaseException,e:
log("zabbix_login: %s" %e)
return "error"
#zabbix退出
def zabbix_logout(token):
try:
data = json.dumps(
{
"jsonrpc": "2.0",
"method": "user.logout",
"params": [],
"id": 0,
"auth": token
})
request_data = requests.post(zabbix_url, data=data, headers=headers)
result = json.loads(request_data.text)['result']
if result:
return "ok"
else:
log("登出失败,原因:%s" %e)
return "error"
except BaseException,e:
log("zabbix_logout: %s" %e)
return "error"
#获取主机组id
def get_group_id(group_name):
try:
token = zabbix_login()
data = json.dumps(
{
"jsonrpc": "2.0",
"method": "hostgroup.get",
"params": {
"output": "extend",
"filter": {
"name": [
group_name
]
}
},
"auth": token,
"id": 0
})
request = requests.post(zabbix_url, data=data, headers=headers)
group_id = json.loads(request.text)['result']
if len(group_id) == 0:
return "null"
else:
return group_id[0]['groupid']
except BaseException,e:
log("get_group_id: %s" %e)
return "error"
finally:
zabbix_logout(token)
#创建服务器组
def create_group(group_name):
try:
token = zabbix_login()
data = json.dumps(
{
"jsonrpc": "2.0",
"method": "hostgroup.create",
"params": {
"name": group_name
},
"auth": token,
"id": 0
})
request = requests.post(zabbix_url, data=data, headers=headers)
group_id = json.loads(request.text)['result']['groupids'][0]
return group_id
except BaseException,e:
log("create_group: %s" %e)
return "error"
finally:
zabbix_logout(token)
#获取模板id
def get_template_id(template_name):
try:
token = zabbix_login()
data = json.dumps(
{
"jsonrpc": "2.0",
"method": "template.get",
"params": {
"output": "extend",
"filter": {
"host": [
template_name
]
}
},
"auth": token,
"id": 0
})
request = requests.post(zabbix_url, data=data, headers=headers)
template_id = json.loads(request.text)['result'][0]['templateid']
return template_id
except BaseException,e:
log('get_template_id: %s' %e)
return "error"
finally:
zabbix_logout(token)
#创建主机
def create_host(host_name,group_name,host_ip,host_port,template_name):
try:
token = zabbix_login()
template_id = get_template_id(template_name)
if template_id == "error":
return "error"
group_id = get_group_id(group_name)
if group_id == "error":
return "error"
data = json.dumps(
{
"jsonrpc": "2.0",
"method": "host.create",
"params": {
"host": host_name,
"interfaces": [
{
"type": 1,
"main": 1,
"useip": 1,
"ip": host_ip,
"dns": "",
"port": host_port
}
],
"groups": [
{
"groupid": group_id
}
],
"templates": [
{
"templateid": template_id
}
],
},
"auth": token,
"id": 0
})
request = requests.post(zabbix_url, data=data, headers=headers)
host_id = json.loads(request.text)['result']['hostids'][0]
return host_id
except BaseException,e:
log('create_host: %s' %e)
return "error"
finally:
zabbix_logout(token)
#删除主机
def delete_host(host_id):
try:
token = zabbix_login()
data = json.dumps(
{
"jsonrpc": "2.0",
"method": "host.delete",
"params": [
host_id
],
"auth": token,
"id": 0
})
request = requests.post(zabbix_url, data=data, headers=headers)
host_id_deleted = json.loads(request.text)['result']['hostids'][0]
if host_id_deleted == host_id:
return "ok"
else:
log('delete_host: failed %s' %request.text)
return "failed"
except BaseException,e:
log('delete_host: %s' %e)
return "error"
#获取主机状态(监控状态是否正常)
def get_host_status(hostid):
try:
token = zabbix_login()
data = json.dumps(
{
"jsonrpc": "2.0",
"method": "host.get",
"params": {
"output": ["available"],
"hostids": hostid
},
"id": 0,
"auth": token
})
request = requests.post(zabbix_url, data=data, headers=headers)
host_status = json.loads(request.text)['result'][0]['available']
if host_status == '1':
return "available"
else:
return "unavailable"
except BaseException,e:
log('get_host_status: %s' %e)
return "error"
finally:
zabbix_logout(token)
#根据监控名获取监控项最新值
def get_item_value_name(host_id, item_name):
try:
token = zabbix_login()
data = json.dumps(
{
"jsonrpc": "2.0",
"method": "item.get",
"params": {
"output": "extend",
"hostids": host_id,
"search": {
"name": item_name
},
},
"auth": token,
"id": 0
})
request = requests.post(zabbix_url, data=data, headers=headers)
last_value = json.loads(request.text)['result'][0]['lastvalue']
return last_value
except BaseException,e:
log('get_item_value_name: %s' %e)
return "error"
finally:
zabbix_logout(token)
#根据监控项键值获取监控项最新值
def get_item_value_key(host_id, item_name):
try:
token = zabbix_login()
data = json.dumps(
{
"jsonrpc": "2.0",
"method": "item.get",
"params": {
"output": "extend",
"hostids": host_id,
"search": {
"key_": item_name
},
},
"auth": token,
"id": 0
})
request = requests.post(zabbix_url, data=data, headers=headers)
last_value = json.loads(request.text)['result'][0]['lastvalue']
return last_value
except BaseException,e:
log('get_item_value_key: %s' %e)
return "error"
finally:
zabbix_logout(token)
#获取某个主机组下所有主机id
def get_group_hosts_id(group_name):
try:
token = zabbix_login()
data = json.dumps(
{
"jsonrpc": "2.0",
"method": "hostgroup.get",
"params": {
"selectHosts": "hostid",
"filter": {
"name": [
group_name
]
}
},
"auth": token,
"id": 0
})
request = requests.post(zabbix_url, data=data, headers=headers)
hosts = json.loads(request.text)['result'][0]['hosts']
host_id_list = []
for host_id in hosts:
host_id_list.append(host_id)
return host_id_list
except BaseException,e:
log('get_group_hosts_id %s' %e)
return "error"
finally:
zabbix_logout(token)
#获取主机的监控项数
def get_host_item_num(host_id):
try:
token = zabbix_login()
data = json.dumps(
{
"jsonrpc": "2.0",
"method": "item.get",
"params": {
"hostids": host_id,
"countOutput": "true",
},
"auth": token,
"id": 0
})
request = requests.post(zabbix_url, data=data, headers=headers)
item_num = json.loads(request.text)['result']
return item_num
except BaseException,e:
log('get_item_num: %s' %e)
return "error"
finally:
zabbix_logout(token)
#获取主机的自发现规则id列表
def get_LLD_ids(host_id):
try:
token = zabbix_login()
data = json.dumps(
{
"jsonrpc": "2.0",
"method": "discoveryrule.get",
"params": {
"output": "extend",
"hostids": host_id
},
"auth": token,
"id": 0
})
request = requests.post(zabbix_url, data=data, headers=headers)
item_ids = json.loads(request.text)['result']
lld_id_list = []
for item_id in item_ids:
lld_id_list.append(item_id['itemid'])
return lld_id_list
except BaseException,e:
log('get_LLD_ids: %s' %e)
return "error"
finally:
zabbix_logout(token)
#开启某个主机的自发现规则
def LLD_on(item_id, host_id):
try:
token = zabbix_login()
data = json.dumps(
{
"jsonrpc": "2.0",
"method": "discoveryrule.update",
"params": {
"itemid": item_id,
"hostids": host_id,
"status": 0
},
"auth": token,
"id": 0
})
request = requests.post(zabbix_url, data=data, headers=headers)
item_result = json.loads(request.text)['result']['itemids']
if len(item_result) != 0:
return "ok"
else:
return "failed"
except BaseException,e:
log('LLD_on: %s' %e)
return "error"
finally:
zabbix_logout(token)
#关闭某个主机的自发现规则
def LLD_off(item_id, host_id):
try:
token = zabbix_login()
data = json.dumps(
{
"jsonrpc": "2.0",
"method": "discoveryrule.update",
"params": {
"itemid": item_id,
"hostids": host_id,
"status": 1
},
"auth": token,
"id": 0
})
request = requests.post(zabbix_url, data=data, headers=headers)
lld_result = json.loads(request.text)['result']['itemids']
if len(lld_result) != 0:
return "ok"
else:
return "failed"
except BaseException,e:
log('LLD_off: %s' %e)
return "error"
finally:
zabbix_logout(token) | 0.118742 | 0.118105 |
import docx
import re
import pandas as pd
import os
def get_filelist(dir_path):
filelist = []
for file in os.scandir(dir_path):
filelist.append(file.path)
return filelist
def read_tplt(tplt_word):
dic_fill = {} # 记录信息位置与要填写位置的映射字典
pattern = '{\d+}' # 设定模式
document = docx.Document(tplt_word)
tbobj_list = document.tables # 返回列表类型
tbobj = tbobj_list[0] # 文件中第一个表格对象
row_num = len(tbobj.rows)
col_num = len(tbobj.columns)
for row_index in range(row_num):
for col_index in range(col_num):
cell = tbobj.cell(row_index,col_index) # 单元格
search_obj = re.search(pattern,cell.text)
if search_obj: # 查找不到则为False
dic_fill.setdefault(search_obj.group()[1:-1],(row_index,col_index)) # 键为去除{}提取其中数字,值为cell位置
# dic_fill = dict(zip(dic_fill.values(),dic_fill.keys())) # zip打包为元组,这里使字典键值互换
return dic_fill
def read_format_excel(format_excel):
df = pd.read_excel(format_excel)
return df
def add_data(filelist,dic_fill,df):
dic_length = len(dic_fill)
for file in filelist:
ls_data = ['NaN']*dic_length # 初始化行数据
document = docx.Document(file)
tbobj_list = document.tables # 返回列表类型
tbobj = tbobj_list[0] # 文件中第一个表格对象
for key,value in dic_fill.items():
row,col = value[0],value[1]
ls_data[int(key)-1] = tbobj.cell(row,col).text
df.loc[len(df)] = ls_data
return df
def write_excel(df,save_path='result.xlsx'):
try:
df.to_excel(save_path,index=False)
print("当前路径是{}".format(os.getcwd()))
print("{} 存储成功".format(save_path))
except Exception as err:
print(err)
print("存储失败")
def main():
print("请输入word模板文件路径:")
tplt_word = input().replace('"','') # word模板
print("请输入excel模板文件路径:")
format_excel = input().replace('"','') # excel模板
print("请输入要进行汇总的word文件夹路径:")
dir_path = input().replace('"','') # 数据文件夹
# 模板表中每个字段对应的位置,键是字段,值是所在的位置
try:
dic_fill = read_tplt(tplt_word)
df = read_format_excel(format_excel)
filelist = get_filelist(dir_path)
df = add_data(filelist, dic_fill, df)
write_excel(df)
except IndexError:
print("请检查word模板")
except Exception as err:
print(err)
print("请检查输入的文件以及文件夹路径")
if __name__ == "__main__":
main() | CoolTurnProject/WordToExcel/word_to_excel.py | import docx
import re
import pandas as pd
import os
def get_filelist(dir_path):
filelist = []
for file in os.scandir(dir_path):
filelist.append(file.path)
return filelist
def read_tplt(tplt_word):
dic_fill = {} # 记录信息位置与要填写位置的映射字典
pattern = '{\d+}' # 设定模式
document = docx.Document(tplt_word)
tbobj_list = document.tables # 返回列表类型
tbobj = tbobj_list[0] # 文件中第一个表格对象
row_num = len(tbobj.rows)
col_num = len(tbobj.columns)
for row_index in range(row_num):
for col_index in range(col_num):
cell = tbobj.cell(row_index,col_index) # 单元格
search_obj = re.search(pattern,cell.text)
if search_obj: # 查找不到则为False
dic_fill.setdefault(search_obj.group()[1:-1],(row_index,col_index)) # 键为去除{}提取其中数字,值为cell位置
# dic_fill = dict(zip(dic_fill.values(),dic_fill.keys())) # zip打包为元组,这里使字典键值互换
return dic_fill
def read_format_excel(format_excel):
df = pd.read_excel(format_excel)
return df
def add_data(filelist,dic_fill,df):
dic_length = len(dic_fill)
for file in filelist:
ls_data = ['NaN']*dic_length # 初始化行数据
document = docx.Document(file)
tbobj_list = document.tables # 返回列表类型
tbobj = tbobj_list[0] # 文件中第一个表格对象
for key,value in dic_fill.items():
row,col = value[0],value[1]
ls_data[int(key)-1] = tbobj.cell(row,col).text
df.loc[len(df)] = ls_data
return df
def write_excel(df,save_path='result.xlsx'):
try:
df.to_excel(save_path,index=False)
print("当前路径是{}".format(os.getcwd()))
print("{} 存储成功".format(save_path))
except Exception as err:
print(err)
print("存储失败")
def main():
print("请输入word模板文件路径:")
tplt_word = input().replace('"','') # word模板
print("请输入excel模板文件路径:")
format_excel = input().replace('"','') # excel模板
print("请输入要进行汇总的word文件夹路径:")
dir_path = input().replace('"','') # 数据文件夹
# 模板表中每个字段对应的位置,键是字段,值是所在的位置
try:
dic_fill = read_tplt(tplt_word)
df = read_format_excel(format_excel)
filelist = get_filelist(dir_path)
df = add_data(filelist, dic_fill, df)
write_excel(df)
except IndexError:
print("请检查word模板")
except Exception as err:
print(err)
print("请检查输入的文件以及文件夹路径")
if __name__ == "__main__":
main() | 0.10179 | 0.165526 |
import pickle as pkl
from collections import Iterable
import os
import logging
import io
import numpy as np
try:
xrange
except NameError: # python3
xrange = range
logger = logging.getLogger('LineCache')
class LineCache(object):
'''
LineCache caches the line position of a file in the memory. Everytime it access a line, it will seek to the related postion and readline().
Noticing that it may cost some time when you first cache lines of a file.
Usage:
from linecache_ligth import LineCache
linecache = LineCache('a.txt', cache_suffix='.cache')
num_lines = len(linecache)
line_0 = linecache[0]
line_100 = linecache[100]
'''
def __init__(self, filename, cache_suffix='.cache.npy', encoding="utf-8"):
self.filename = filename
self.encoding = encoding
if os.path.exists(self.filename + cache_suffix):
self.line_seek = np.load(self.filename + cache_suffix, mmap_mode="r")
self.num_lines = len(self.line_seek)
else:
self._build_seek_index(cache_suffix)
def _build_seek_index(self, cache_suffix):
logger.info("Caching lines informaiton to %s" % (self.filename + cache_suffix))
with io.open(self.filename, 'r', encoding=self.encoding, errors="ignore") as f:
self.line_seek = []
while True:
seek_pos = f.tell()
line = f.readline()
if not line:
break
self.line_seek.append(seek_pos)
self.line_seek = np.array(self.line_seek)
np.save(self.filename + cache_suffix, self.line_seek)
# Reload
self.line_seek = np.load(self.filename + cache_suffix, mmap_mode="r")
self.num_lines = len(self.line_seek)
def __getitem__(self, line_no):
if isinstance(line_no, slice):
return [self[ii] for ii in xrange(*line_no.indices(len(self)))]
elif isinstance(line_no, Iterable):
return [self[ii] for ii in line_no]
else:
if line_no >= self.num_lines:
raise IndexError("Out of index: line_no:%s num_lines: %s" % (line_no, self.num_lines))
with io.open(self.filename, 'r', encoding=self.encoding, errors="ignore") as fhandle:
fhandle.seek(self.line_seek[line_no])
line = fhandle.readline()
return line
def __len__(self):
return self.num_lines | linecache_light/linecache_light.py | import pickle as pkl
from collections import Iterable
import os
import logging
import io
import numpy as np
try:
xrange
except NameError: # python3
xrange = range
logger = logging.getLogger('LineCache')
class LineCache(object):
'''
LineCache caches the line position of a file in the memory. Everytime it access a line, it will seek to the related postion and readline().
Noticing that it may cost some time when you first cache lines of a file.
Usage:
from linecache_ligth import LineCache
linecache = LineCache('a.txt', cache_suffix='.cache')
num_lines = len(linecache)
line_0 = linecache[0]
line_100 = linecache[100]
'''
def __init__(self, filename, cache_suffix='.cache.npy', encoding="utf-8"):
self.filename = filename
self.encoding = encoding
if os.path.exists(self.filename + cache_suffix):
self.line_seek = np.load(self.filename + cache_suffix, mmap_mode="r")
self.num_lines = len(self.line_seek)
else:
self._build_seek_index(cache_suffix)
def _build_seek_index(self, cache_suffix):
logger.info("Caching lines informaiton to %s" % (self.filename + cache_suffix))
with io.open(self.filename, 'r', encoding=self.encoding, errors="ignore") as f:
self.line_seek = []
while True:
seek_pos = f.tell()
line = f.readline()
if not line:
break
self.line_seek.append(seek_pos)
self.line_seek = np.array(self.line_seek)
np.save(self.filename + cache_suffix, self.line_seek)
# Reload
self.line_seek = np.load(self.filename + cache_suffix, mmap_mode="r")
self.num_lines = len(self.line_seek)
def __getitem__(self, line_no):
if isinstance(line_no, slice):
return [self[ii] for ii in xrange(*line_no.indices(len(self)))]
elif isinstance(line_no, Iterable):
return [self[ii] for ii in line_no]
else:
if line_no >= self.num_lines:
raise IndexError("Out of index: line_no:%s num_lines: %s" % (line_no, self.num_lines))
with io.open(self.filename, 'r', encoding=self.encoding, errors="ignore") as fhandle:
fhandle.seek(self.line_seek[line_no])
line = fhandle.readline()
return line
def __len__(self):
return self.num_lines | 0.392104 | 0.087175 |
from rest_framework import generics, views
from rest_framework import status
from rest_framework.parsers import MultiPartParser
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from src.apps.core.views import BaseModelViewSet
from src.apps.core.utilities.response_utils import ResponseHandler
from src.apps.user_profile.api.serializers import (UserProfileSerializer,
PassportSerializer)
from src.services.file_uploads import FileUpload
class UserProfileUpdate(generics.RetrieveUpdateAPIView):
"""Class representing the view for getting and updating a user profile"""
serializer_class = UserProfileSerializer
permission_classes = [IsAuthenticated]
def get(self, request, *args, **kwargs) -> object:
"""
Getting the profile of a logged in user.
"""
instance = self.get_queryset()
serializer = self.get_serializer(instance)
response = ResponseHandler.response(serializer.data)
return Response(response)
def patch(self, request, *args, **kwargs) -> object:
"""
Updates user profile.
"""
instance = self.get_queryset()
serializer = self.get_serializer(instance,
data=request.data,
partial=True)
if serializer.is_valid():
self.perform_update(serializer)
response = ResponseHandler.response(serializer.data)
return Response(response)
error = ResponseHandler.response(serializer.errors,
key='USR_O3',
status='error')
return Response(error, status=status.HTTP_400_BAD_REQUEST)
def get_queryset(self):
"""
Default query set.
"""
user = self.request.user
return user.user_profile
class PassportViewSet(BaseModelViewSet):
"""
View set for Passport.
"""
serializer_class = PassportSerializer
permission_classes = [IsAuthenticated]
BaseModelViewSet.http_method_names += ['delete']
def create(self, request, *args, **kwargs):
"""
Add passport.
"""
return super(self.__class__, self).create(request, key='PASSPORT')
def get_queryset(self):
"""
Default query set
"""
user = self.request.user
return user.user_profile.passports.filter(deleted=False)
class ImageUpload(views.APIView):
"""Passport photograph upload."""
permission_classes = [IsAuthenticated]
parser_classes = [MultiPartParser]
def put(self, request, *args, **kwargs):
"""Put method to upload passport."""
file_obj = request.FILES.get('photo', None)
user_profile_qs = self.get_queryset()
FileUpload.image(file_obj, user_profile_qs)
response = ResponseHandler.response(data=[], key='PHOTO_UPLOAD')
return Response(response)
def get_queryset(self):
"""
Default query set.
"""
user = self.request.user
return user.user_profile | src/apps/user_profile/api/views.py |
from rest_framework import generics, views
from rest_framework import status
from rest_framework.parsers import MultiPartParser
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from src.apps.core.views import BaseModelViewSet
from src.apps.core.utilities.response_utils import ResponseHandler
from src.apps.user_profile.api.serializers import (UserProfileSerializer,
PassportSerializer)
from src.services.file_uploads import FileUpload
class UserProfileUpdate(generics.RetrieveUpdateAPIView):
"""Class representing the view for getting and updating a user profile"""
serializer_class = UserProfileSerializer
permission_classes = [IsAuthenticated]
def get(self, request, *args, **kwargs) -> object:
"""
Getting the profile of a logged in user.
"""
instance = self.get_queryset()
serializer = self.get_serializer(instance)
response = ResponseHandler.response(serializer.data)
return Response(response)
def patch(self, request, *args, **kwargs) -> object:
"""
Updates user profile.
"""
instance = self.get_queryset()
serializer = self.get_serializer(instance,
data=request.data,
partial=True)
if serializer.is_valid():
self.perform_update(serializer)
response = ResponseHandler.response(serializer.data)
return Response(response)
error = ResponseHandler.response(serializer.errors,
key='USR_O3',
status='error')
return Response(error, status=status.HTTP_400_BAD_REQUEST)
def get_queryset(self):
"""
Default query set.
"""
user = self.request.user
return user.user_profile
class PassportViewSet(BaseModelViewSet):
"""
View set for Passport.
"""
serializer_class = PassportSerializer
permission_classes = [IsAuthenticated]
BaseModelViewSet.http_method_names += ['delete']
def create(self, request, *args, **kwargs):
"""
Add passport.
"""
return super(self.__class__, self).create(request, key='PASSPORT')
def get_queryset(self):
"""
Default query set
"""
user = self.request.user
return user.user_profile.passports.filter(deleted=False)
class ImageUpload(views.APIView):
"""Passport photograph upload."""
permission_classes = [IsAuthenticated]
parser_classes = [MultiPartParser]
def put(self, request, *args, **kwargs):
"""Put method to upload passport."""
file_obj = request.FILES.get('photo', None)
user_profile_qs = self.get_queryset()
FileUpload.image(file_obj, user_profile_qs)
response = ResponseHandler.response(data=[], key='PHOTO_UPLOAD')
return Response(response)
def get_queryset(self):
"""
Default query set.
"""
user = self.request.user
return user.user_profile | 0.767167 | 0.118742 |
from thinglang.utils.exception_utils import ThinglangException
class TargetNotCallable(ThinglangException):
"""
An attempt was made to call a target which is not a method, or is not callable.
For example, attempting to call a class member
"""
class CapturedVoidMethod(ThinglangException):
"""
An attempt was made to use the result of a void method.
"""
class NoMatchingOverload(ThinglangException):
"""
The method was not called with the expected number of arguments
"""
def __init__(self, methods, arguments, exact_matches, inheritance_matches, cast_matches, source_ref):
super().__init__()
self.methods, self.arguments, self.exact_matches, self.inheritance_matches, self.cast_matches, self.source_ref = \
methods, arguments, exact_matches, inheritance_matches, cast_matches, source_ref
def __str__(self):
return f'No matching overload for {self.methods[0].name} using arguments {[x.type for x in self.arguments]} was found.\n' + \
f'Allowable overloads: {", ".join(str(method.arguments) for method in self.methods)}.\n' + \
f'At {self.source_ref}'
class DuplicateHandlerError(ThinglangException):
"""
Multiple handlers of the same exception type were registered
"""
def __init__(self, handler_types):
super().__init__()
self.handler_types = handler_types
def __str__(self):
return f'Duplicate handlers were registered ({", ".join(str(handler_type) for handler_type in self.handler_types)})'
class NoExceptionHandlers(ThinglangException):
"""
No exception handlers were registered for a try blck
"""
def __init__(self, node):
super().__init__()
self.node = node
def __str__(self):
return f'No exception handling blocks were registered (at {self.node.source_ref})'
class ExceptionSpecificityError(ThinglangException):
"""
A handler for an exception was registered after a handler that also catches it.
"""
def __init__(self, specified_exception, prior):
super().__init__()
self.specified_exception, self.prior = specified_exception, prior
def __str__(self):
return f'The exception handler for {self.specified_exception} cannot be reached. ' \
f'Exceptions of this type will be handled by the handler for {self.prior}.'
class InvalidReference(ThinglangException):
"""
Reference to an invalid entity - e.g., missing member or method
"""
def __init__(self, target, search, original_target):
super().__init__()
self.target, self.search, self.original_target = target, search, original_target
def __str__(self):
return f'Cannot find reference {self.search.name}.{self.target} (at {self.original_target.source_ref})'
class SelfInStaticMethod(ThinglangException):
"""
Reference to `self` in a static method
"""
def __init__(self, target):
super().__init__()
self.target = target
def __str__(self):
return f'Usage of self in static method (at {self.target.source_ref})'
class UnfilledGenericParameters(ThinglangException):
"""
A generic symbol map was selected without specifying type parameters
"""
def __init__(self, target, container, element):
super().__init__()
self.container, self.element, self.target = container, element, target
def __str__(self):
return f'Usage of generic class {self.container.name}.{self.element.name if self.element else ""} without specifying parameter types (at {self.target.source_ref})'
class CalledInstanceMethodOnClass(ThinglangException):
"""
An instance method was called on a class
"""
def __init__(self, reference, source_ref):
super().__init__()
self.reference, self.source_ref = reference, source_ref
def __str__(self):
return f'Cannot call instance method on class {self.reference.type} (at {self.source_ref}' | thinglang/compiler/errors.py | from thinglang.utils.exception_utils import ThinglangException
class TargetNotCallable(ThinglangException):
"""
An attempt was made to call a target which is not a method, or is not callable.
For example, attempting to call a class member
"""
class CapturedVoidMethod(ThinglangException):
"""
An attempt was made to use the result of a void method.
"""
class NoMatchingOverload(ThinglangException):
"""
The method was not called with the expected number of arguments
"""
def __init__(self, methods, arguments, exact_matches, inheritance_matches, cast_matches, source_ref):
super().__init__()
self.methods, self.arguments, self.exact_matches, self.inheritance_matches, self.cast_matches, self.source_ref = \
methods, arguments, exact_matches, inheritance_matches, cast_matches, source_ref
def __str__(self):
return f'No matching overload for {self.methods[0].name} using arguments {[x.type for x in self.arguments]} was found.\n' + \
f'Allowable overloads: {", ".join(str(method.arguments) for method in self.methods)}.\n' + \
f'At {self.source_ref}'
class DuplicateHandlerError(ThinglangException):
"""
Multiple handlers of the same exception type were registered
"""
def __init__(self, handler_types):
super().__init__()
self.handler_types = handler_types
def __str__(self):
return f'Duplicate handlers were registered ({", ".join(str(handler_type) for handler_type in self.handler_types)})'
class NoExceptionHandlers(ThinglangException):
"""
No exception handlers were registered for a try blck
"""
def __init__(self, node):
super().__init__()
self.node = node
def __str__(self):
return f'No exception handling blocks were registered (at {self.node.source_ref})'
class ExceptionSpecificityError(ThinglangException):
"""
A handler for an exception was registered after a handler that also catches it.
"""
def __init__(self, specified_exception, prior):
super().__init__()
self.specified_exception, self.prior = specified_exception, prior
def __str__(self):
return f'The exception handler for {self.specified_exception} cannot be reached. ' \
f'Exceptions of this type will be handled by the handler for {self.prior}.'
class InvalidReference(ThinglangException):
"""
Reference to an invalid entity - e.g., missing member or method
"""
def __init__(self, target, search, original_target):
super().__init__()
self.target, self.search, self.original_target = target, search, original_target
def __str__(self):
return f'Cannot find reference {self.search.name}.{self.target} (at {self.original_target.source_ref})'
class SelfInStaticMethod(ThinglangException):
"""
Reference to `self` in a static method
"""
def __init__(self, target):
super().__init__()
self.target = target
def __str__(self):
return f'Usage of self in static method (at {self.target.source_ref})'
class UnfilledGenericParameters(ThinglangException):
"""
A generic symbol map was selected without specifying type parameters
"""
def __init__(self, target, container, element):
super().__init__()
self.container, self.element, self.target = container, element, target
def __str__(self):
return f'Usage of generic class {self.container.name}.{self.element.name if self.element else ""} without specifying parameter types (at {self.target.source_ref})'
class CalledInstanceMethodOnClass(ThinglangException):
"""
An instance method was called on a class
"""
def __init__(self, reference, source_ref):
super().__init__()
self.reference, self.source_ref = reference, source_ref
def __str__(self):
return f'Cannot call instance method on class {self.reference.type} (at {self.source_ref}' | 0.901487 | 0.278649 |
import shutil
import subprocess
import webbrowser
from pathlib import Path
from typing import Union
import typer
from rich import print
from rich.table import Table
from clumper import Clumper
from skedulord import __version__ as lord_version
from skedulord.job import JobRunner
from skedulord.common import SKEDULORD_PATH, heartbeat_path
from skedulord.cron import Cron, clean_cron, parse_job_from_settings
from skedulord.dashboard import Dashboard, generate_color_link_to_log
app = typer.Typer(
name="SKEDULORD",
add_completion=False,
help="SKEDULORD: helps with cronjobs and logs.",
)
@app.command()
def version():
"""Show the version."""
print(lord_version)
@app.command()
def run(
name: str = typer.Argument(..., help="The name you want to assign to the run."),
command: str = typer.Argument(
None, help="The command you want to run (in parentheses)."
),
settings_path: Union[Path, None] = typer.Option(None, help="Schedule config to reference."),
retry: int = typer.Option(2, help="The number of tries, should a job fail."),
wait: int = typer.Option(60, help="The number of seconds between tries."),
):
"""Run a single command, which is logged by skedulord."""
runner = JobRunner(retry=retry, wait=wait)
if settings_path:
settings = Clumper.read_yaml(settings_path).unpack("schedule").keep(lambda d: d['name'] == name).collect()
command = parse_job_from_settings(settings, name)
print(f"retreived command: {command}")
runner.cmd(name=name, command=command)
@app.command()
def schedule(
config: Path = typer.Argument(
..., help="The config file containing the schedule.", exists=True
)
):
"""Set (or reset) cron jobs based on config."""
Cron(config).set_new_cron()
@app.command()
def wipe(
what: str = typer.Argument(..., help="What to wipe. Either `disk` or `schedule`."),
yes: bool = typer.Option(False, is_flag=True, prompt=True, help="Are you sure?"),
really: bool = typer.Option(False, is_flag=True, prompt=True, help="Really sure?"),
user: str = typer.Option(None, help="The name of the user. Default: curent user."),
):
"""Wipe the disk or schedule state."""
if yes and really:
if what == "disk":
if Path(SKEDULORD_PATH).exists():
shutil.rmtree(SKEDULORD_PATH)
print("Disk state has been cleaned.")
if what == "schedule":
if not user:
name = subprocess.run(["whoami"], stdout=subprocess.PIPE)
user = name.stdout.decode("utf8").strip()
clean_cron(user=user)
print("Cron state has been cleaned.")
else:
print("Crisis averted.")
@app.command()
def summary(n: int = typer.Option(10, help="Max number of icons in `last run` column."),):
"""Shows a summary of all jobs."""
clump = Clumper.read_jsonl(heartbeat_path())
summary = (
clump
.group_by("name")
.mutate(fail=lambda _: _["status"] == "fail")
.agg(n_total=("id", "count"), n_fail=("fail", "sum"), max_date=("end", "max"))
.mutate(n_succes=lambda _: _["n_total"] - _["n_fail"])
)
table = Table(title=None)
table.add_column("name")
table.add_column("recent runs")
table.add_column("last run")
table.add_column("fail")
table.add_column("succes")
table.add_column("total")
for d in summary:
job_data = clump.keep(lambda _: _["name"] == d["name"]).head(n).collect()
recent = " ".join([generate_color_link_to_log(_) for _ in job_data])
table.add_row(
d["name"],
d["max_date"],
recent,
f"[red]{d['n_fail']}[/]",
f"[green]{d['n_succes']}[/]",
f"{d['n_total']}",
)
print(table)
@app.command()
def history(
n: int = typer.Option(10, help="How many rows should the table show."),
only_failures: bool = typer.Option(False, is_flag=True, help="Only show failures."),
date: str = typer.Option(None, is_flag=True, help="Only show specific date."),
name: str = typer.Option(
None, is_flag=True, help="Only show jobs with specific name."
),
):
"""Shows a table with job status."""
clump = Clumper.read_jsonl(heartbeat_path()).sort(
lambda _: _["start"], reverse=True
)
if only_failures:
clump = clump.keep(lambda _: _["status"] != "success")
if name:
clump = clump.keep(lambda _: name in _["name"])
if date:
clump = clump.keep(lambda _: date in _["start"])
table = Table(title=None)
table.add_column("status")
table.add_column("date")
table.add_column("name")
table.add_column("logfile")
for d in clump.head(n).collect():
table.add_row(
f"[{'red' if d['status'] == 'fail' else 'green'}]{d['status']}[/]",
d["start"],
d["name"],
d["logpath"],
)
print(table)
@app.command(name="build")
def build_site():
"""
Builds static html files so you may view a dashboard.
"""
data = Clumper.read_jsonl(heartbeat_path()).collect()
Dashboard(data).build()
@app.command()
def serve(
build: bool = typer.Option(
True, is_flag=True, help="Build the dashboard before opening it."
)
):
"""
Opens the dashboard in a browser.
"""
if build:
build_site()
webbrowser.open_new_tab(f"file://{heartbeat_path().parent / 'index.html'}")
if __name__ == "__main__":
app() | skedulord/__main__.py | import shutil
import subprocess
import webbrowser
from pathlib import Path
from typing import Union
import typer
from rich import print
from rich.table import Table
from clumper import Clumper
from skedulord import __version__ as lord_version
from skedulord.job import JobRunner
from skedulord.common import SKEDULORD_PATH, heartbeat_path
from skedulord.cron import Cron, clean_cron, parse_job_from_settings
from skedulord.dashboard import Dashboard, generate_color_link_to_log
app = typer.Typer(
name="SKEDULORD",
add_completion=False,
help="SKEDULORD: helps with cronjobs and logs.",
)
@app.command()
def version():
"""Show the version."""
print(lord_version)
@app.command()
def run(
name: str = typer.Argument(..., help="The name you want to assign to the run."),
command: str = typer.Argument(
None, help="The command you want to run (in parentheses)."
),
settings_path: Union[Path, None] = typer.Option(None, help="Schedule config to reference."),
retry: int = typer.Option(2, help="The number of tries, should a job fail."),
wait: int = typer.Option(60, help="The number of seconds between tries."),
):
"""Run a single command, which is logged by skedulord."""
runner = JobRunner(retry=retry, wait=wait)
if settings_path:
settings = Clumper.read_yaml(settings_path).unpack("schedule").keep(lambda d: d['name'] == name).collect()
command = parse_job_from_settings(settings, name)
print(f"retreived command: {command}")
runner.cmd(name=name, command=command)
@app.command()
def schedule(
config: Path = typer.Argument(
..., help="The config file containing the schedule.", exists=True
)
):
"""Set (or reset) cron jobs based on config."""
Cron(config).set_new_cron()
@app.command()
def wipe(
what: str = typer.Argument(..., help="What to wipe. Either `disk` or `schedule`."),
yes: bool = typer.Option(False, is_flag=True, prompt=True, help="Are you sure?"),
really: bool = typer.Option(False, is_flag=True, prompt=True, help="Really sure?"),
user: str = typer.Option(None, help="The name of the user. Default: curent user."),
):
"""Wipe the disk or schedule state."""
if yes and really:
if what == "disk":
if Path(SKEDULORD_PATH).exists():
shutil.rmtree(SKEDULORD_PATH)
print("Disk state has been cleaned.")
if what == "schedule":
if not user:
name = subprocess.run(["whoami"], stdout=subprocess.PIPE)
user = name.stdout.decode("utf8").strip()
clean_cron(user=user)
print("Cron state has been cleaned.")
else:
print("Crisis averted.")
@app.command()
def summary(n: int = typer.Option(10, help="Max number of icons in `last run` column."),):
"""Shows a summary of all jobs."""
clump = Clumper.read_jsonl(heartbeat_path())
summary = (
clump
.group_by("name")
.mutate(fail=lambda _: _["status"] == "fail")
.agg(n_total=("id", "count"), n_fail=("fail", "sum"), max_date=("end", "max"))
.mutate(n_succes=lambda _: _["n_total"] - _["n_fail"])
)
table = Table(title=None)
table.add_column("name")
table.add_column("recent runs")
table.add_column("last run")
table.add_column("fail")
table.add_column("succes")
table.add_column("total")
for d in summary:
job_data = clump.keep(lambda _: _["name"] == d["name"]).head(n).collect()
recent = " ".join([generate_color_link_to_log(_) for _ in job_data])
table.add_row(
d["name"],
d["max_date"],
recent,
f"[red]{d['n_fail']}[/]",
f"[green]{d['n_succes']}[/]",
f"{d['n_total']}",
)
print(table)
@app.command()
def history(
n: int = typer.Option(10, help="How many rows should the table show."),
only_failures: bool = typer.Option(False, is_flag=True, help="Only show failures."),
date: str = typer.Option(None, is_flag=True, help="Only show specific date."),
name: str = typer.Option(
None, is_flag=True, help="Only show jobs with specific name."
),
):
"""Shows a table with job status."""
clump = Clumper.read_jsonl(heartbeat_path()).sort(
lambda _: _["start"], reverse=True
)
if only_failures:
clump = clump.keep(lambda _: _["status"] != "success")
if name:
clump = clump.keep(lambda _: name in _["name"])
if date:
clump = clump.keep(lambda _: date in _["start"])
table = Table(title=None)
table.add_column("status")
table.add_column("date")
table.add_column("name")
table.add_column("logfile")
for d in clump.head(n).collect():
table.add_row(
f"[{'red' if d['status'] == 'fail' else 'green'}]{d['status']}[/]",
d["start"],
d["name"],
d["logpath"],
)
print(table)
@app.command(name="build")
def build_site():
"""
Builds static html files so you may view a dashboard.
"""
data = Clumper.read_jsonl(heartbeat_path()).collect()
Dashboard(data).build()
@app.command()
def serve(
build: bool = typer.Option(
True, is_flag=True, help="Build the dashboard before opening it."
)
):
"""
Opens the dashboard in a browser.
"""
if build:
build_site()
webbrowser.open_new_tab(f"file://{heartbeat_path().parent / 'index.html'}")
if __name__ == "__main__":
app() | 0.691602 | 0.154312 |
from __future__ import print_function
import os
import getopt
import sys
import subprocess
import re
def exit_with_usage(error=0, msg=""):
if error != 0:
print("Error: " + msg)
print("usage: ./skrm [OPTIONS] [COMMANDS] [TAGS]")
print("skrm stands for simple keyring manager, it stores keys with tags into a file encrypted using gpg.")
print("skrm will ask for the master password to encrypt/decrypt the storing file.")
print("OPTIONS:")
print("\t-h, --help: Print usage.")
print("\t-g, --get: Return keyrings matching strictly the given tags. This option is used by default. If a keyId is selected, a get or a search return only the keyring matching the keyId.")
print("\t-s, --search: Return keyrings matching the given tags (tags are interpreted as a regex expression).")
print("\t-c, --clip: Copy the key of the last matched keyring from a get or a search into the clipboard using xclip. Nothing will be printed out to the shell.")
print("COMMANDS:")
print("\t--file=[FILENAME]: use the given file to read/store keyrings.")
print("\t--recipient=[USER_ID_NAME]: set the user id name for gpg to get the key and encrypt the file.")
print("\t--pass=[MASTER_PASS]: set the master pass to use when encrypting or decrypting the file.")
print("\t--add=[KEY]: add a key to the file with the specified tags.")
print("\t--select=[KEYID]: select a keyring using its key id. To use with a command like \"remove\" or \"update\".")
print("\t--remove: remove the selected key.")
print("\t--update=[KEY]: update the selected key.")
print("\t--backup=[HOSTDEST]: scp the bdd file to the given host destination.")
print("TAGS:")
print("\tA list of strings to define tags you want to use for any commands keyring related management.")
sys.exit(error)
class KeyringManager:
def __init__(self, user_pref_path, bdd_path, argv):
self.read_user_prefs(user_pref_path, bdd_path)
try:
opts, args = getopt.getopt(argv, "hgsc", ["help", "file=", "get", "search", "pass=", "add=", "select=", "remove", "update=", "recipient=", "backup=", "clip"])
except getopt.GetoptError:
exit_with_usage(1, "Bad arguments.")
for opt, arg in opts:
if opt in ("-h", "--help"):
exit_with_usage()
elif opt == "--file":
self.filename = os.path.expanduser(arg)
elif opt in ("-g", "--get"):
self.command = "get"
elif opt in ("-s", "--search"):
self.command = "search"
elif opt == "--add":
self.command = "add"
self.key = arg
elif opt == "--select":
if arg.isdigit():
self.keyId = int(arg)
else:
exit_with_usage(1, "The given keyid is not a number.")
elif opt == "--remove":
self.command = "remove"
elif opt == "--update":
self.command = "update"
self.key = arg
elif opt == "--pass":
self.passphrase = arg
elif opt == "--recipient":
self.recipient = arg
elif opt == "--backup":
self.command = "backup"
self.hostdest = arg
elif opt in ("-c", "--clip"):
self.clip = 1
for arg in args:
self.tags.append(arg)
def read_user_prefs(self, user_pref_path, bdd_path):
user_pref_file = user_pref_path
self.filename = bdd_path
self.command = "get"
self.passphrase = ""
self.tags = []
self.key = ""
self.keyId = -1
self.recipient = ""
self.clip = 0
try:
with open(user_pref_file, "r") as f:
for line in f:
option = line.split("=")
option[1] = option[1].rstrip('\n')
if option[0][0] != '#':
if option[0] == "file":
self.filename = option[1]
elif option[0] == "recipient":
self.recipient = option[1]
except IOError: # use preffs not found, do nothing. args must be defined in command line arguments.
pass
def load_raw_bdd(self):
""" Decript gpg file and return the content """
args = ["gpg", "-dq"]
if self.passphrase:
args.append("--no-use-agent")
args.append("--passphrase")
args.append(self.passphrase)
args.append(self.filename)
p = subprocess.Popen(args, stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.PIPE, close_fds = True)
stdout, stderr = p.communicate(None)
if stdout == "" and stdout != "":
print(stderr)
exit(1)
return stdout.rstrip()
def save_raw_bdd(self, raw):
""" Encript gpg file """
args = ["gpg", "--yes", "-e", "-r", self.recipient, "-o", self.filename]
p = subprocess.Popen(args, stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.PIPE, close_fds = True)
stdout, stderr = p.communicate(raw)
stdout = stdout.rstrip()
stderr = stderr.rstrip()
if stdout != "":
print(stdout)
if stderr != "":
print(stderr)
def parse_raw(self, raw):
bdd = []
if raw:
keyrings = raw.split(b"\x03")
for keyring in keyrings:
bdd.append(keyring.split(b"\x02"))
return bdd
def parse_bdd(self, bdd):
raw = b""
bddLen = len(bdd)
for i, keyring in enumerate(bdd):
keyringLen = len(keyring)
for j, tag in enumerate(keyring):
if isinstance(tag, str):
tag = bytes(tag, 'utf8')
raw += tag
if j < (keyringLen - 1):
raw += b"\x02"
if i < (bddLen - 1):
raw += b"\x03"
return raw
def save_bdd(self, bdd):
raw = self.parse_bdd(bdd)
self.save_raw_bdd(raw)
def get_fonctor(self, keyring, tag):
keyringLen = len(keyring)
for i, t in enumerate(keyring):
if i < (keyringLen - 1):
if tag.upper() == t.upper().decode('utf8'):
return 1
return 0
def search_fonctor(self, keyring, tag):
keyringLen = len(keyring)
p = re.compile(tag.upper())
for i, t in enumerate(keyring):
if i < (keyringLen - 1):
if p.search(t.upper().decode('utf8')) != None:
return 1
return 0
def print_keyring(self, i, keyring):
if self.clip == 0: # print the keyring
print(i, end='')
print(":", end='')
print(keyring)
else: # copy the keyring to the clipboard
from sys import platform as _platform
if _platform == "linux" or _platform == "linux2": # linux
args = ["xclip"]
p = subprocess.Popen(args, stdin = subprocess.PIPE)
p.communicate(keyring[len(keyring) - 1])
elif _platform == "darwin": # OS X
args = ["pbcopy"]
p = subprocess.Popen(args, stdin = subprocess.PIPE)
p.communicate(keyring[len(keyring) - 1])
elif _platform == "win32": # Windows
print("Can't copy on clipboard under windows, method not implemented!")
def print_matching_keyrings(self, bdd, Functor):
if self.keyId >= 0:
print(self.keyId, end='')
print(":", end='')
print(bdd[self.keyId])
else:
for i, keyring in enumerate(bdd):
if len(self.tags) == 0:
print(i, end='')
print(":", end='')
print(keyring)
else:
foundAll = 1
for tag in self.tags:
if Functor(keyring, tag) == 0:
foundAll = 0
if foundAll == 1:
self.print_keyring(i, keyring)
def command_get(self, bdd):
print("GET")
self.print_matching_keyrings(bdd, self.get_fonctor)
def command_search(self, bdd):
print("SEARCH")
self.print_matching_keyrings(bdd, self.search_fonctor)
def command_add(self, bdd):
newKeyring = self.tags
newKeyring.append(self.key)
bdd.append(newKeyring)
self.save_bdd(bdd)
print("Add OK")
def command_remove(self, bdd):
if (self.keyId < 0 or self.keyId >= len(bdd)):
exit_with_usage(1, "Wrong argument, the given key id must be a valid number.")
print("Removing: ", end='')
print(bdd[self.keyId])
del bdd[self.keyId];
self.save_bdd(bdd)
print("Remove OK")
def command_update(self, bdd):
if (self.keyId < 0 or self.keyId >= len(bdd)):
exit_with_usage(1, "Wrong argument, the given key id must be a valid number.")
bdd[self.keyId][len(bdd[self.keyId]) - 1] = self.key;
print("New keyring: ", end='')
print(bdd[self.keyId])
self.save_bdd(bdd)
print("Update OK")
def command_backup(self):
args = ["scp", self.filename, self.hostdest]
p = subprocess.Popen(args, stdin = subprocess.PIPE, stderr = subprocess.PIPE, close_fds = True)
stdout, stderr = p.communicate(None)
stderr = stderr.rstrip()
if stderr != "":
print(stderr)
print("Backup Failed!")
exit(1)
print("Backup OK")
def run(self):
if self.command == "backup":
self.command_backup()
else:
raw_bdd = self.load_raw_bdd()
bdd = self.parse_raw(raw_bdd)
if self.command == "get":
self.command_get(bdd)
elif self.command == "search":
self.command_search(bdd)
elif self.command == "add":
self.command_add(bdd)
elif self.command == "remove":
self.command_remove(bdd)
elif self.command == "update":
self.command_update(bdd) | skrm/keyring_manager.py | from __future__ import print_function
import os
import getopt
import sys
import subprocess
import re
def exit_with_usage(error=0, msg=""):
if error != 0:
print("Error: " + msg)
print("usage: ./skrm [OPTIONS] [COMMANDS] [TAGS]")
print("skrm stands for simple keyring manager, it stores keys with tags into a file encrypted using gpg.")
print("skrm will ask for the master password to encrypt/decrypt the storing file.")
print("OPTIONS:")
print("\t-h, --help: Print usage.")
print("\t-g, --get: Return keyrings matching strictly the given tags. This option is used by default. If a keyId is selected, a get or a search return only the keyring matching the keyId.")
print("\t-s, --search: Return keyrings matching the given tags (tags are interpreted as a regex expression).")
print("\t-c, --clip: Copy the key of the last matched keyring from a get or a search into the clipboard using xclip. Nothing will be printed out to the shell.")
print("COMMANDS:")
print("\t--file=[FILENAME]: use the given file to read/store keyrings.")
print("\t--recipient=[USER_ID_NAME]: set the user id name for gpg to get the key and encrypt the file.")
print("\t--pass=[MASTER_PASS]: set the master pass to use when encrypting or decrypting the file.")
print("\t--add=[KEY]: add a key to the file with the specified tags.")
print("\t--select=[KEYID]: select a keyring using its key id. To use with a command like \"remove\" or \"update\".")
print("\t--remove: remove the selected key.")
print("\t--update=[KEY]: update the selected key.")
print("\t--backup=[HOSTDEST]: scp the bdd file to the given host destination.")
print("TAGS:")
print("\tA list of strings to define tags you want to use for any commands keyring related management.")
sys.exit(error)
class KeyringManager:
def __init__(self, user_pref_path, bdd_path, argv):
self.read_user_prefs(user_pref_path, bdd_path)
try:
opts, args = getopt.getopt(argv, "hgsc", ["help", "file=", "get", "search", "pass=", "add=", "select=", "remove", "update=", "recipient=", "backup=", "clip"])
except getopt.GetoptError:
exit_with_usage(1, "Bad arguments.")
for opt, arg in opts:
if opt in ("-h", "--help"):
exit_with_usage()
elif opt == "--file":
self.filename = os.path.expanduser(arg)
elif opt in ("-g", "--get"):
self.command = "get"
elif opt in ("-s", "--search"):
self.command = "search"
elif opt == "--add":
self.command = "add"
self.key = arg
elif opt == "--select":
if arg.isdigit():
self.keyId = int(arg)
else:
exit_with_usage(1, "The given keyid is not a number.")
elif opt == "--remove":
self.command = "remove"
elif opt == "--update":
self.command = "update"
self.key = arg
elif opt == "--pass":
self.passphrase = arg
elif opt == "--recipient":
self.recipient = arg
elif opt == "--backup":
self.command = "backup"
self.hostdest = arg
elif opt in ("-c", "--clip"):
self.clip = 1
for arg in args:
self.tags.append(arg)
def read_user_prefs(self, user_pref_path, bdd_path):
user_pref_file = user_pref_path
self.filename = bdd_path
self.command = "get"
self.passphrase = ""
self.tags = []
self.key = ""
self.keyId = -1
self.recipient = ""
self.clip = 0
try:
with open(user_pref_file, "r") as f:
for line in f:
option = line.split("=")
option[1] = option[1].rstrip('\n')
if option[0][0] != '#':
if option[0] == "file":
self.filename = option[1]
elif option[0] == "recipient":
self.recipient = option[1]
except IOError: # use preffs not found, do nothing. args must be defined in command line arguments.
pass
def load_raw_bdd(self):
""" Decript gpg file and return the content """
args = ["gpg", "-dq"]
if self.passphrase:
args.append("--no-use-agent")
args.append("--passphrase")
args.append(self.passphrase)
args.append(self.filename)
p = subprocess.Popen(args, stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.PIPE, close_fds = True)
stdout, stderr = p.communicate(None)
if stdout == "" and stdout != "":
print(stderr)
exit(1)
return stdout.rstrip()
def save_raw_bdd(self, raw):
""" Encript gpg file """
args = ["gpg", "--yes", "-e", "-r", self.recipient, "-o", self.filename]
p = subprocess.Popen(args, stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.PIPE, close_fds = True)
stdout, stderr = p.communicate(raw)
stdout = stdout.rstrip()
stderr = stderr.rstrip()
if stdout != "":
print(stdout)
if stderr != "":
print(stderr)
def parse_raw(self, raw):
bdd = []
if raw:
keyrings = raw.split(b"\x03")
for keyring in keyrings:
bdd.append(keyring.split(b"\x02"))
return bdd
def parse_bdd(self, bdd):
raw = b""
bddLen = len(bdd)
for i, keyring in enumerate(bdd):
keyringLen = len(keyring)
for j, tag in enumerate(keyring):
if isinstance(tag, str):
tag = bytes(tag, 'utf8')
raw += tag
if j < (keyringLen - 1):
raw += b"\x02"
if i < (bddLen - 1):
raw += b"\x03"
return raw
def save_bdd(self, bdd):
raw = self.parse_bdd(bdd)
self.save_raw_bdd(raw)
def get_fonctor(self, keyring, tag):
keyringLen = len(keyring)
for i, t in enumerate(keyring):
if i < (keyringLen - 1):
if tag.upper() == t.upper().decode('utf8'):
return 1
return 0
def search_fonctor(self, keyring, tag):
keyringLen = len(keyring)
p = re.compile(tag.upper())
for i, t in enumerate(keyring):
if i < (keyringLen - 1):
if p.search(t.upper().decode('utf8')) != None:
return 1
return 0
def print_keyring(self, i, keyring):
if self.clip == 0: # print the keyring
print(i, end='')
print(":", end='')
print(keyring)
else: # copy the keyring to the clipboard
from sys import platform as _platform
if _platform == "linux" or _platform == "linux2": # linux
args = ["xclip"]
p = subprocess.Popen(args, stdin = subprocess.PIPE)
p.communicate(keyring[len(keyring) - 1])
elif _platform == "darwin": # OS X
args = ["pbcopy"]
p = subprocess.Popen(args, stdin = subprocess.PIPE)
p.communicate(keyring[len(keyring) - 1])
elif _platform == "win32": # Windows
print("Can't copy on clipboard under windows, method not implemented!")
def print_matching_keyrings(self, bdd, Functor):
if self.keyId >= 0:
print(self.keyId, end='')
print(":", end='')
print(bdd[self.keyId])
else:
for i, keyring in enumerate(bdd):
if len(self.tags) == 0:
print(i, end='')
print(":", end='')
print(keyring)
else:
foundAll = 1
for tag in self.tags:
if Functor(keyring, tag) == 0:
foundAll = 0
if foundAll == 1:
self.print_keyring(i, keyring)
def command_get(self, bdd):
print("GET")
self.print_matching_keyrings(bdd, self.get_fonctor)
def command_search(self, bdd):
print("SEARCH")
self.print_matching_keyrings(bdd, self.search_fonctor)
def command_add(self, bdd):
newKeyring = self.tags
newKeyring.append(self.key)
bdd.append(newKeyring)
self.save_bdd(bdd)
print("Add OK")
def command_remove(self, bdd):
if (self.keyId < 0 or self.keyId >= len(bdd)):
exit_with_usage(1, "Wrong argument, the given key id must be a valid number.")
print("Removing: ", end='')
print(bdd[self.keyId])
del bdd[self.keyId];
self.save_bdd(bdd)
print("Remove OK")
def command_update(self, bdd):
if (self.keyId < 0 or self.keyId >= len(bdd)):
exit_with_usage(1, "Wrong argument, the given key id must be a valid number.")
bdd[self.keyId][len(bdd[self.keyId]) - 1] = self.key;
print("New keyring: ", end='')
print(bdd[self.keyId])
self.save_bdd(bdd)
print("Update OK")
def command_backup(self):
args = ["scp", self.filename, self.hostdest]
p = subprocess.Popen(args, stdin = subprocess.PIPE, stderr = subprocess.PIPE, close_fds = True)
stdout, stderr = p.communicate(None)
stderr = stderr.rstrip()
if stderr != "":
print(stderr)
print("Backup Failed!")
exit(1)
print("Backup OK")
def run(self):
if self.command == "backup":
self.command_backup()
else:
raw_bdd = self.load_raw_bdd()
bdd = self.parse_raw(raw_bdd)
if self.command == "get":
self.command_get(bdd)
elif self.command == "search":
self.command_search(bdd)
elif self.command == "add":
self.command_add(bdd)
elif self.command == "remove":
self.command_remove(bdd)
elif self.command == "update":
self.command_update(bdd) | 0.343782 | 0.167968 |
from __future__ import print_function, division
from pathlib import Path
import numpy as np
import random
import json
import sys
import os
import argparse
from shutil import copyfile
import networkx as nx
from networkx.readwrite import json_graph
import pdb
def parse_args():
parser = argparse.ArgumentParser(description="Generate subgraphs of a network.")
parser.add_argument('--input', default="../dataspace/graph/fq-tw-data/foursquare", help='Path to load data')
parser.add_argument('--output', default="../dataspace/graph/fq-tw-data/foursquare/subgraphs", help='Path to save data')
parser.add_argument('--prefix', default="ppi", help='Dataset prefix')
parser.add_argument('--min_node', type=int, default=100, help='minimum node for subgraph to be kept')
return parser.parse_args()
def main(args):
G_data = json.load(open(args.input + "/graphsage/" + "G.json"))
G = json_graph.node_link_graph(G_data)
if isinstance(G.nodes()[0], int):
def conversion(n): return int(n)
else:
def conversion(n): return n
mapping = {conversion(G.nodes()[i]):str(G.nodes()[i]) for i in range(len(G.nodes()))}
G = nx.relabel_nodes(G, mapping)
print("Original graph info: ")
print(nx.info(G))
print("Start extracting sub graph")
max_num_nodes = 0
all_subgraphs = list(nx.connected_component_subgraphs(G))
subgraphs = []
for graph in all_subgraphs:
if len(graph.nodes()) > args.min_node:
subgraphs.append(graph)
i = 0
for G in subgraphs:
save_new_graph(G, args.input, args.output + "/subgraph" + str(i) + "/" , args.prefix)
i += 1
return
def save_new_graph(G, input_dir, output_dir, prefix):
nodes = G.nodes()
if not os.path.exists(output_dir+ "/edgelist/"):
os.makedirs(output_dir+ "/edgelist/")
if not os.path.exists(output_dir+ "/graphsage/"):
os.makedirs(output_dir+ "/graphsage/")
nx.write_edgelist(G, path = output_dir + "/edgelist/" + ".edgelist" , delimiter=" ", data=['weight'])
output_prefix = output_dir + "/graphsage/"
print("Saving new class map")
input_dir += "/graphsage/"
id2idx_file = Path(input_dir + "id2idx.json")
if id2idx_file.is_file():
id2idx = json.load(open(input_dir + "id2idx.json")) # id to class
new_id2idx = {node: id2idx[node] for node in nodes}
with open(output_prefix + 'id2idx.json', 'w') as outfile:
json.dump(new_id2idx, outfile)
print("Saving new id map")
new_idmap = {node: i for i, node in enumerate(nodes)}
with open(output_prefix + 'id2idx.json', 'w') as outfile:
json.dump(new_idmap, outfile)
print("Saving features")
old_idmap = json.load(open(input_dir + "id2idx.json"))
feature_file = Path(input_dir + 'feats.npy')
features = None
if feature_file.is_file():
features = np.load(feature_file)
new_idxs = np.zeros(len(nodes)).astype(int)
for node in nodes:
new_idx = new_idmap[node]
old_idx = old_idmap[node]
new_idxs[new_idx] = old_idx
features = features[new_idxs]
np.save(output_prefix + "feats.npy", features)
print("Saving new graph")
num_nodes = len(G.nodes())
rand_indices = np.random.permutation(num_nodes)
train = rand_indices[:int(num_nodes * 0.81)]
val = rand_indices[int(num_nodes * 0.81):int(num_nodes * 0.9)]
test = rand_indices[int(num_nodes * 0.9):]
id2idx = new_idmap
res = json_graph.node_link_data(G)
res['nodes'] = [
{
'id': str(node['id']),
'val': id2idx[str(node['id'])] in val,
'test': id2idx[str(node['id'])] in test
}
for node in res['nodes']]
res['links'] = [
{
'source': link['source'],
'target': link['target']
}
for link in res['links']]
with open(output_prefix + "G.json", 'w') as outfile:
json.dump(res, outfile)
print("DONE!")
if __name__ == "__main__":
args = parse_args()
print(args)
seed = 123
random.seed(seed)
np.random.seed(seed)
main(args) | utils/get_sub_graph.py | from __future__ import print_function, division
from pathlib import Path
import numpy as np
import random
import json
import sys
import os
import argparse
from shutil import copyfile
import networkx as nx
from networkx.readwrite import json_graph
import pdb
def parse_args():
parser = argparse.ArgumentParser(description="Generate subgraphs of a network.")
parser.add_argument('--input', default="../dataspace/graph/fq-tw-data/foursquare", help='Path to load data')
parser.add_argument('--output', default="../dataspace/graph/fq-tw-data/foursquare/subgraphs", help='Path to save data')
parser.add_argument('--prefix', default="ppi", help='Dataset prefix')
parser.add_argument('--min_node', type=int, default=100, help='minimum node for subgraph to be kept')
return parser.parse_args()
def main(args):
G_data = json.load(open(args.input + "/graphsage/" + "G.json"))
G = json_graph.node_link_graph(G_data)
if isinstance(G.nodes()[0], int):
def conversion(n): return int(n)
else:
def conversion(n): return n
mapping = {conversion(G.nodes()[i]):str(G.nodes()[i]) for i in range(len(G.nodes()))}
G = nx.relabel_nodes(G, mapping)
print("Original graph info: ")
print(nx.info(G))
print("Start extracting sub graph")
max_num_nodes = 0
all_subgraphs = list(nx.connected_component_subgraphs(G))
subgraphs = []
for graph in all_subgraphs:
if len(graph.nodes()) > args.min_node:
subgraphs.append(graph)
i = 0
for G in subgraphs:
save_new_graph(G, args.input, args.output + "/subgraph" + str(i) + "/" , args.prefix)
i += 1
return
def save_new_graph(G, input_dir, output_dir, prefix):
nodes = G.nodes()
if not os.path.exists(output_dir+ "/edgelist/"):
os.makedirs(output_dir+ "/edgelist/")
if not os.path.exists(output_dir+ "/graphsage/"):
os.makedirs(output_dir+ "/graphsage/")
nx.write_edgelist(G, path = output_dir + "/edgelist/" + ".edgelist" , delimiter=" ", data=['weight'])
output_prefix = output_dir + "/graphsage/"
print("Saving new class map")
input_dir += "/graphsage/"
id2idx_file = Path(input_dir + "id2idx.json")
if id2idx_file.is_file():
id2idx = json.load(open(input_dir + "id2idx.json")) # id to class
new_id2idx = {node: id2idx[node] for node in nodes}
with open(output_prefix + 'id2idx.json', 'w') as outfile:
json.dump(new_id2idx, outfile)
print("Saving new id map")
new_idmap = {node: i for i, node in enumerate(nodes)}
with open(output_prefix + 'id2idx.json', 'w') as outfile:
json.dump(new_idmap, outfile)
print("Saving features")
old_idmap = json.load(open(input_dir + "id2idx.json"))
feature_file = Path(input_dir + 'feats.npy')
features = None
if feature_file.is_file():
features = np.load(feature_file)
new_idxs = np.zeros(len(nodes)).astype(int)
for node in nodes:
new_idx = new_idmap[node]
old_idx = old_idmap[node]
new_idxs[new_idx] = old_idx
features = features[new_idxs]
np.save(output_prefix + "feats.npy", features)
print("Saving new graph")
num_nodes = len(G.nodes())
rand_indices = np.random.permutation(num_nodes)
train = rand_indices[:int(num_nodes * 0.81)]
val = rand_indices[int(num_nodes * 0.81):int(num_nodes * 0.9)]
test = rand_indices[int(num_nodes * 0.9):]
id2idx = new_idmap
res = json_graph.node_link_data(G)
res['nodes'] = [
{
'id': str(node['id']),
'val': id2idx[str(node['id'])] in val,
'test': id2idx[str(node['id'])] in test
}
for node in res['nodes']]
res['links'] = [
{
'source': link['source'],
'target': link['target']
}
for link in res['links']]
with open(output_prefix + "G.json", 'w') as outfile:
json.dump(res, outfile)
print("DONE!")
if __name__ == "__main__":
args = parse_args()
print(args)
seed = 123
random.seed(seed)
np.random.seed(seed)
main(args) | 0.291283 | 0.159872 |
import os
from unittest.mock import patch, mock_open
import psutil
from rucio_jupyterlab.rucio.download import RucioFileDownloader
def test_rucio_file_downloader_is_downloading__lockfile_not_exists__should_return_false(mocker):
mocker.patch.object(os.path, 'isfile', return_value=False)
result = RucioFileDownloader.is_downloading('/path')
assert not result, 'Invalid return value'
def test_rucio_file_downloader_is_downloading__lockfile_exists__pid_not_exists__should_return_false(mocker):
mocker.patch.object(os.path, 'isfile', return_value=True)
mocker.patch.object(psutil, 'pid_exists', return_value=False)
with patch("builtins.open", mock_open(read_data="123")) as mock_file:
result = RucioFileDownloader.is_downloading('/path')
assert not result, 'Invalid return value'
mock_file.assert_called_with(os.path.join('/path', '.lockfile'), 'r')
def test_rucio_file_downloader_is_downloading__lockfile_exists__pid_exists__process_not_running__should_return_false(mocker):
mocker.patch.object(os.path, 'isfile', return_value=True)
mocker.patch.object(psutil, 'pid_exists', return_value=True)
class MockProcess:
def __init__(self, pid):
pass
def is_running(self): # pylint: disable=no-self-use
return False
def status(self): # pylint: disable=no-self-use
return 'running'
mocker.patch('rucio_jupyterlab.rucio.download.psutil.Process', MockProcess)
with patch("builtins.open", mock_open(read_data="123")) as mock_file:
result = RucioFileDownloader.is_downloading('/path')
assert not result, 'Invalid return value'
mock_file.assert_called_with(os.path.join('/path', '.lockfile'), 'r')
def test_rucio_file_downloader_is_downloading__lockfile_exists__pid_exists__process_running__status_running__should_return_true(mocker):
mocker.patch.object(os.path, 'isfile', return_value=True)
mocker.patch.object(psutil, 'pid_exists', return_value=True)
class MockProcess:
def __init__(self, pid):
pass
def is_running(self): # pylint: disable=no-self-use
return True
def status(self): # pylint: disable=no-self-use
return 'running'
mocker.patch('rucio_jupyterlab.rucio.download.psutil.Process', MockProcess)
with patch("builtins.open", mock_open(read_data="123")) as mock_file:
result = RucioFileDownloader.is_downloading('/path')
assert result, 'Invalid return value'
mock_file.assert_called_with(os.path.join('/path', '.lockfile'), 'r')
def test_rucio_file_downloader_is_downloading__lockfile_exists__pid_exists__process_running__status_zombie__should_return_false(mocker):
mocker.patch.object(os.path, 'isfile', return_value=True)
mocker.patch.object(psutil, 'pid_exists', return_value=True)
class MockProcess:
def __init__(self, pid):
pass
def is_running(self): # pylint: disable=no-self-use
return True
def status(self): # pylint: disable=no-self-use
return 'zombie'
mocker.patch('rucio_jupyterlab.rucio.download.psutil.Process', MockProcess)
with patch("builtins.open", mock_open(read_data="123")) as mock_file:
result = RucioFileDownloader.is_downloading('/path')
assert not result, 'Invalid return value'
mock_file.assert_called_with(os.path.join('/path', '.lockfile'), 'r')
def test_rucio_file_downloader_write_lockfile__should_write_pid(mocker):
mocker.patch.object(os, 'getpid', return_value=123)
with patch("builtins.open", mock_open()) as mock_file:
RucioFileDownloader.write_lockfile('/path')
mock_file.assert_called_with(os.path.join('/path', '.lockfile'), 'w')
mock_file.return_value.write.assert_called_once_with('123') | rucio_jupyterlab/tests/test_rucio_file_downloader.py |
import os
from unittest.mock import patch, mock_open
import psutil
from rucio_jupyterlab.rucio.download import RucioFileDownloader
def test_rucio_file_downloader_is_downloading__lockfile_not_exists__should_return_false(mocker):
mocker.patch.object(os.path, 'isfile', return_value=False)
result = RucioFileDownloader.is_downloading('/path')
assert not result, 'Invalid return value'
def test_rucio_file_downloader_is_downloading__lockfile_exists__pid_not_exists__should_return_false(mocker):
mocker.patch.object(os.path, 'isfile', return_value=True)
mocker.patch.object(psutil, 'pid_exists', return_value=False)
with patch("builtins.open", mock_open(read_data="123")) as mock_file:
result = RucioFileDownloader.is_downloading('/path')
assert not result, 'Invalid return value'
mock_file.assert_called_with(os.path.join('/path', '.lockfile'), 'r')
def test_rucio_file_downloader_is_downloading__lockfile_exists__pid_exists__process_not_running__should_return_false(mocker):
mocker.patch.object(os.path, 'isfile', return_value=True)
mocker.patch.object(psutil, 'pid_exists', return_value=True)
class MockProcess:
def __init__(self, pid):
pass
def is_running(self): # pylint: disable=no-self-use
return False
def status(self): # pylint: disable=no-self-use
return 'running'
mocker.patch('rucio_jupyterlab.rucio.download.psutil.Process', MockProcess)
with patch("builtins.open", mock_open(read_data="123")) as mock_file:
result = RucioFileDownloader.is_downloading('/path')
assert not result, 'Invalid return value'
mock_file.assert_called_with(os.path.join('/path', '.lockfile'), 'r')
def test_rucio_file_downloader_is_downloading__lockfile_exists__pid_exists__process_running__status_running__should_return_true(mocker):
mocker.patch.object(os.path, 'isfile', return_value=True)
mocker.patch.object(psutil, 'pid_exists', return_value=True)
class MockProcess:
def __init__(self, pid):
pass
def is_running(self): # pylint: disable=no-self-use
return True
def status(self): # pylint: disable=no-self-use
return 'running'
mocker.patch('rucio_jupyterlab.rucio.download.psutil.Process', MockProcess)
with patch("builtins.open", mock_open(read_data="123")) as mock_file:
result = RucioFileDownloader.is_downloading('/path')
assert result, 'Invalid return value'
mock_file.assert_called_with(os.path.join('/path', '.lockfile'), 'r')
def test_rucio_file_downloader_is_downloading__lockfile_exists__pid_exists__process_running__status_zombie__should_return_false(mocker):
mocker.patch.object(os.path, 'isfile', return_value=True)
mocker.patch.object(psutil, 'pid_exists', return_value=True)
class MockProcess:
def __init__(self, pid):
pass
def is_running(self): # pylint: disable=no-self-use
return True
def status(self): # pylint: disable=no-self-use
return 'zombie'
mocker.patch('rucio_jupyterlab.rucio.download.psutil.Process', MockProcess)
with patch("builtins.open", mock_open(read_data="123")) as mock_file:
result = RucioFileDownloader.is_downloading('/path')
assert not result, 'Invalid return value'
mock_file.assert_called_with(os.path.join('/path', '.lockfile'), 'r')
def test_rucio_file_downloader_write_lockfile__should_write_pid(mocker):
mocker.patch.object(os, 'getpid', return_value=123)
with patch("builtins.open", mock_open()) as mock_file:
RucioFileDownloader.write_lockfile('/path')
mock_file.assert_called_with(os.path.join('/path', '.lockfile'), 'w')
mock_file.return_value.write.assert_called_once_with('123') | 0.491456 | 0.205695 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from contextlib import contextmanager
import logging
import re
import subprocess
from octoeb.utils.config import get_config
from octoeb.utils.config import get_config_value
logger = logging.getLogger(__name__)
class GitError(Exception):
pass
def fetch(remote_name):
return subprocess.call(['git', 'fetch', remote_name])
def checkout(branch_name):
return subprocess.call(['git', 'checkout', branch_name])
def update(base_branch):
return subprocess.call(['git', 'pull', '-r', base_branch])
staticfiles_re = re.compile(r'^[AMD].*static.*', re.I)
pip_re = re.compile(r'M.*requirements.*', re.I)
migrations_re = re.compile(r'A.*migrations.*', re.I)
integrations_re = re.compile(r'M.*integrations', re.I)
def log_messages(base='develop', head='', number=None):
"""Return the log messages of the current branch, since base."""
cmd = ['git', 'log', '--format=%B', ]
cmd.append('{base}...'.format(base=base))
try:
logger.debug(u'Running: {}'.format(cmd))
return subprocess.check_output(cmd)
except subprocess.CalledProcessError:
raise ValueError('Can not generate log messages.')
def log(base='master', head='', directory=None, merges=False):
"""Retrun simple git log.
Args:
base (str): base branch or sha to compare with.
head (str): branch or sha with most recent changes.
directory (str): directory of the git repo, if None, we assume the
cwd.
merges (bool): default False, when true the git log will be the minimal
oneline log with merges shown. When false, the log is the more
vervose log with file changes included.
Return:
str
"""
try:
cmd = ['git', 'log', ]
if merges:
cmd.append('--oneline')
cmd.append('--merges')
else:
cmd.append('--name-status')
cmd.append('{base}..{head}'.format(base=base, head=head))
logger.debug(u'Running: {}'.format(cmd))
return subprocess.check_output(cmd)
except subprocess.CalledProcessError:
raise ValueError(
'Can not find the git log, directory may not be a repo')
def find_staticfile_changes(log):
return staticfiles_re.findall(log)
def find_migrations_changes(log):
return migrations_re.findall(log)
def find_bower_changes(log):
return re.findall(r'^[AMD].*bower.*', log, flags=re.M)
def find_requirements_changes(log):
return re.findall(r'^M.*requirements.*', log, flags=re.M)
def changelog(log, ticket_ids=False):
"""Generate changelog from a git log.
Args:
log (str): A string containing a gitlog, as from the `log` method.
ticket_ids (bool): default False, when True we return a tuple that
of the form `(ticket_ids, str_changelog)`.
Returns:
str or tuple.
"""
config = get_config()
changelog_re_pattern = get_config_value(
config, 'repo', 'changelog_re',
"merge pull request #\d+ from .*(?:[/-]([a-z]{2,4}-\d+)-(.*))"
)
issue_re_pattern = get_config_value(
config, 'repo', 'issue_re',
"merge pull request #\d+ from .*(?:[/-]([a-z]+-\d+))")
issue_re = re.compile(issue_re_pattern, re.I)
changelog_re = re.compile(changelog_re_pattern, re.I)
try:
jira_issues = issue_re.findall(log)
changelog = changelog_re.findall(log)
except subprocess.CalledProcessError:
jira_issues = []
changelog = []
else:
jira_issues = set(jira_issues)
for i, m in enumerate(changelog):
logger.debug('Changloe: {}, {}'.format(i, m))
# m[0] is the issue id
# m[1] is the issue title
changelog[i] = u'* {} : {}'.format(
m[0].upper(),
m[1].replace(u'-', u' ').replace(u'_', u' ').title()
)
changelog = u'\n'.join(sorted(set(changelog)))
if ticket_ids:
return jira_issues, changelog
return changelog
def get_deploy_relavent_changes(base, head):
log_str = log(base, head)
staticfile_changes = find_staticfile_changes(log_str)
migration_changes = find_migrations_changes(log_str)
bower_changes = find_bower_changes(log_str)
pip_changes = find_requirements_changes(log_str)
if staticfile_changes:
staticfile_msg = 'Staticfile changes:\n{}'.format(
u'\n'.join(staticfile_changes))
else:
staticfile_msg = 'No staticfile changes'
if bower_changes:
bower_msg = 'Bower chagnes:\n{}'.format(
u'\n'.join(bower_changes))
else:
bower_msg = 'No bower changes'
if pip_changes:
pip_msg = 'Pip changes:\n{}'.format(
u'\n'.join(pip_changes))
else:
pip_msg = 'No pip changes'
return (staticfile_msg, bower_msg, pip_msg), migration_changes
@contextmanager
def on_branch(name, remote_name='mainline'):
"""Quickly out a branch and then revert to the orignal state.
The `on_branch` context manager allows you to store the user's current
branch info, including any staged or unstaged changes. It will then
checkout the named branch, update it from the remote, and then do
the work inside the context manager. When finished it will go back to
the original branch and pop any stashed work.
"""
# store the current branch info
org_branch = subprocess.check_output([
'git', 'rev-parse', '--abbrev-ref', 'HEAD'
])
org_branch = org_branch.strip()
logger.debug('current branch name: {}'.format(org_branch))
logger.debug('stashing current branch')
stash_ref = subprocess.check_output(['git', 'stash', 'create', '-q'])
stash_ref = stash_ref.strip()
if stash_ref:
logger.debug('stash_ref: {}'.format(stash_ref))
subprocess.call(['git', 'stash', 'store', '-q', stash_ref])
subprocess.call(['git', 'reset', '--hard'])
# go to the new branch
subprocess.call(['git', 'checkout', '-q', name])
# update the branch from the remote
subprocess.call(['git', 'pull', '-q', remote_name, name])
# do work inside the context manager here
yield
# go back to the original branch state
logger.debug('checkout the original branch: {}'.format(org_branch))
subprocess.call(['git', 'checkout', '-q', org_branch])
if stash_ref:
subprocess.call(['git', 'stash', 'pop', '-q']) | octoeb/utils/git.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from contextlib import contextmanager
import logging
import re
import subprocess
from octoeb.utils.config import get_config
from octoeb.utils.config import get_config_value
logger = logging.getLogger(__name__)
class GitError(Exception):
pass
def fetch(remote_name):
return subprocess.call(['git', 'fetch', remote_name])
def checkout(branch_name):
return subprocess.call(['git', 'checkout', branch_name])
def update(base_branch):
return subprocess.call(['git', 'pull', '-r', base_branch])
staticfiles_re = re.compile(r'^[AMD].*static.*', re.I)
pip_re = re.compile(r'M.*requirements.*', re.I)
migrations_re = re.compile(r'A.*migrations.*', re.I)
integrations_re = re.compile(r'M.*integrations', re.I)
def log_messages(base='develop', head='', number=None):
"""Return the log messages of the current branch, since base."""
cmd = ['git', 'log', '--format=%B', ]
cmd.append('{base}...'.format(base=base))
try:
logger.debug(u'Running: {}'.format(cmd))
return subprocess.check_output(cmd)
except subprocess.CalledProcessError:
raise ValueError('Can not generate log messages.')
def log(base='master', head='', directory=None, merges=False):
"""Retrun simple git log.
Args:
base (str): base branch or sha to compare with.
head (str): branch or sha with most recent changes.
directory (str): directory of the git repo, if None, we assume the
cwd.
merges (bool): default False, when true the git log will be the minimal
oneline log with merges shown. When false, the log is the more
vervose log with file changes included.
Return:
str
"""
try:
cmd = ['git', 'log', ]
if merges:
cmd.append('--oneline')
cmd.append('--merges')
else:
cmd.append('--name-status')
cmd.append('{base}..{head}'.format(base=base, head=head))
logger.debug(u'Running: {}'.format(cmd))
return subprocess.check_output(cmd)
except subprocess.CalledProcessError:
raise ValueError(
'Can not find the git log, directory may not be a repo')
def find_staticfile_changes(log):
return staticfiles_re.findall(log)
def find_migrations_changes(log):
return migrations_re.findall(log)
def find_bower_changes(log):
return re.findall(r'^[AMD].*bower.*', log, flags=re.M)
def find_requirements_changes(log):
return re.findall(r'^M.*requirements.*', log, flags=re.M)
def changelog(log, ticket_ids=False):
"""Generate changelog from a git log.
Args:
log (str): A string containing a gitlog, as from the `log` method.
ticket_ids (bool): default False, when True we return a tuple that
of the form `(ticket_ids, str_changelog)`.
Returns:
str or tuple.
"""
config = get_config()
changelog_re_pattern = get_config_value(
config, 'repo', 'changelog_re',
"merge pull request #\d+ from .*(?:[/-]([a-z]{2,4}-\d+)-(.*))"
)
issue_re_pattern = get_config_value(
config, 'repo', 'issue_re',
"merge pull request #\d+ from .*(?:[/-]([a-z]+-\d+))")
issue_re = re.compile(issue_re_pattern, re.I)
changelog_re = re.compile(changelog_re_pattern, re.I)
try:
jira_issues = issue_re.findall(log)
changelog = changelog_re.findall(log)
except subprocess.CalledProcessError:
jira_issues = []
changelog = []
else:
jira_issues = set(jira_issues)
for i, m in enumerate(changelog):
logger.debug('Changloe: {}, {}'.format(i, m))
# m[0] is the issue id
# m[1] is the issue title
changelog[i] = u'* {} : {}'.format(
m[0].upper(),
m[1].replace(u'-', u' ').replace(u'_', u' ').title()
)
changelog = u'\n'.join(sorted(set(changelog)))
if ticket_ids:
return jira_issues, changelog
return changelog
def get_deploy_relavent_changes(base, head):
log_str = log(base, head)
staticfile_changes = find_staticfile_changes(log_str)
migration_changes = find_migrations_changes(log_str)
bower_changes = find_bower_changes(log_str)
pip_changes = find_requirements_changes(log_str)
if staticfile_changes:
staticfile_msg = 'Staticfile changes:\n{}'.format(
u'\n'.join(staticfile_changes))
else:
staticfile_msg = 'No staticfile changes'
if bower_changes:
bower_msg = 'Bower chagnes:\n{}'.format(
u'\n'.join(bower_changes))
else:
bower_msg = 'No bower changes'
if pip_changes:
pip_msg = 'Pip changes:\n{}'.format(
u'\n'.join(pip_changes))
else:
pip_msg = 'No pip changes'
return (staticfile_msg, bower_msg, pip_msg), migration_changes
@contextmanager
def on_branch(name, remote_name='mainline'):
"""Quickly out a branch and then revert to the orignal state.
The `on_branch` context manager allows you to store the user's current
branch info, including any staged or unstaged changes. It will then
checkout the named branch, update it from the remote, and then do
the work inside the context manager. When finished it will go back to
the original branch and pop any stashed work.
"""
# store the current branch info
org_branch = subprocess.check_output([
'git', 'rev-parse', '--abbrev-ref', 'HEAD'
])
org_branch = org_branch.strip()
logger.debug('current branch name: {}'.format(org_branch))
logger.debug('stashing current branch')
stash_ref = subprocess.check_output(['git', 'stash', 'create', '-q'])
stash_ref = stash_ref.strip()
if stash_ref:
logger.debug('stash_ref: {}'.format(stash_ref))
subprocess.call(['git', 'stash', 'store', '-q', stash_ref])
subprocess.call(['git', 'reset', '--hard'])
# go to the new branch
subprocess.call(['git', 'checkout', '-q', name])
# update the branch from the remote
subprocess.call(['git', 'pull', '-q', remote_name, name])
# do work inside the context manager here
yield
# go back to the original branch state
logger.debug('checkout the original branch: {}'.format(org_branch))
subprocess.call(['git', 'checkout', '-q', org_branch])
if stash_ref:
subprocess.call(['git', 'stash', 'pop', '-q']) | 0.740362 | 0.08218 |
from matplotlib.pyplot import draw
import numpy as np
import pandas as pd
import skfuzzy as fz
from skfuzzy import control as ctrl
import matplotlib.pyplot as plt
class FIS:
def __init__(self):
evoparation = ctrl.Antecedent(np.arange(0, 15, 0.2), 'evoparation') # chia độ bốc hơi từ 0-15 với khoảng cách 0.1
humidity = ctrl.Antecedent(np.arange(0, 100, 0.2), 'humidity') # chia độ ẩm từ 0-100(%) với khoảng cách 0.2
pressure = ctrl.Antecedent(np.arange(990, 1030, 0.1), 'pressure') # chia áp suất từ 990-1020(.10^2 Pa) với khoảng cách 0.1
cloud = ctrl.Antecedent(np.arange(0, 8, 1), 'cloud') # chia mây từ 0-8 với khoảng cách 1
temp = ctrl.Antecedent(np.arange(15, 40, 0.1), 'temp') # chia nhiệt độ từ 15-38(độ C) với khoảng cách 0.1
rainfall = ctrl.Consequent(np.arange(0, 120, 0.2), 'rainfall') # chia lượng mưa từ 0-120(mm) với khoảng cách 0.2
evoparation['low'] = fz.trapmf(evoparation.universe, [0, 0, 3, 4]) # độ bốc hơi thấp => mưa nhiều
evoparation['medium'] = fz.trapmf(evoparation.universe, [3.4, 4, 7, 10])
evoparation['high'] = fz.trapmf(evoparation.universe, [8, 12, 15, 15]) # độ bốc hơi quá cao => mưa nhiều
humidity['low'] = fz.trapmf(humidity.universe, [0, 0, 60, 75])
humidity['high'] = fz.trapmf(humidity.universe, [65, 80, 100, 100]) # độ ẩm cao mưa nhiều
pressure['low'] = fz.trapmf(pressure.universe, [990, 990, 1009, 1012]) # áp suất thấp mưa nhiều
pressure['high'] = fz.trapmf(pressure.universe, [1009, 1012, 1030, 1030])
cloud['low'] = fz.trapmf(cloud.universe, [0, 0, 5, 7])
cloud['high'] = fz.trapmf(cloud.universe, [6, 7, 8, 8]) # nhiều mây mưa nhiều
temp['low'] = fz.trapmf(temp.universe, [15, 15, 20, 24.2])
temp['medium'] = fz.trapmf(temp.universe, [23, 25, 29, 32]) # nhiệt độ TB thì mưa cao hơn
temp['high'] = fz.trapmf(temp.universe, [28.5, 35, 40, 40])
rainfall['very_low'] = fz.trapmf(rainfall.universe, [0, 0, 2, 4]) # không mưa
rainfall['low'] = fz.trapmf(rainfall.universe, [3, 5, 8, 12]) # mưa ít
rainfall['medium'] = fz.trapmf(rainfall.universe, [10, 15, 35, 40])
rainfall['high'] = fz.trapmf(rainfall.universe, [35, 45, 120, 120]) # mưa nhiều
rules = [
ctrl.Rule(evoparation['low'] & temp['low'] & humidity['low'] & pressure['high'] & cloud['low'] , rainfall['very_low']), # tại bốc hơi thấp
ctrl.Rule(evoparation['low'] & temp['low'] & humidity['low'] & pressure['high'] & cloud['high'] , rainfall['very_low']), # nhiệt độ thấp
ctrl.Rule(evoparation['low'] & temp['low'] & humidity['low'] & pressure['low'] & cloud['low'] , rainfall['very_low']),
ctrl.Rule(evoparation['low'] & temp['low'] & humidity['low'] & pressure['low'] & cloud['high'] , rainfall['very_low']),
ctrl.Rule(evoparation['low'] & temp['low'] & humidity['high'] & pressure['high'] & cloud['low'] , rainfall['very_low']),
ctrl.Rule(evoparation['low'] & temp['low'] & humidity['high'] & pressure['high'] & cloud['high'] , rainfall['very_low']),
ctrl.Rule(evoparation['low'] & temp['low'] & humidity['high'] & pressure['low'] & cloud['low'] , rainfall['very_low']),
ctrl.Rule(evoparation['low'] & temp['low'] & humidity['high'] & pressure['low'] & cloud['high'] , rainfall['low']),
ctrl.Rule(evoparation['medium'] & temp['low'] & humidity['low'] & pressure['high'] & cloud['low'] , rainfall['very_low']), # tại bốc hơi TB
ctrl.Rule(evoparation['medium'] & temp['low'] & humidity['low'] & pressure['high'] & cloud['high'] , rainfall['very_low']), # nhiệt độ thấp
ctrl.Rule(evoparation['medium'] & temp['low'] & humidity['low'] & pressure['low'] & cloud['low'] , rainfall['very_low']),
ctrl.Rule(evoparation['medium'] & temp['low'] & humidity['low'] & pressure['low'] & cloud['high'] , rainfall['very_low']),
ctrl.Rule(evoparation['medium'] & temp['low'] & humidity['high'] & pressure['high'] & cloud['low'] , rainfall['very_low']),
ctrl.Rule(evoparation['medium'] & temp['low'] & humidity['high'] & pressure['high'] & cloud['high'] , rainfall['very_low']),
ctrl.Rule(evoparation['medium'] & temp['low'] & humidity['high'] & pressure['low'] & cloud['low'] , rainfall['very_low']),
ctrl.Rule(evoparation['medium'] & temp['low'] & humidity['high'] & pressure['low'] & cloud['high'] , rainfall['low']),
ctrl.Rule(evoparation['high'] & temp['low'] & humidity['low'] & pressure['high'] & cloud['low'] , rainfall['very_low']), # tại bốc hơi cao
ctrl.Rule(evoparation['high'] & temp['low'] & humidity['low'] & pressure['high'] & cloud['high'] , rainfall['very_low']), # nhiệt độ thấp
ctrl.Rule(evoparation['high'] & temp['low'] & humidity['low'] & pressure['low'] & cloud['low'] , rainfall['very_low']),
ctrl.Rule(evoparation['high'] & temp['low'] & humidity['low'] & pressure['low'] & cloud['high'] , rainfall['very_low']),
ctrl.Rule(evoparation['high'] & temp['low'] & humidity['high'] & pressure['high'] & cloud['low'] , rainfall['very_low']),
ctrl.Rule(evoparation['high'] & temp['low'] & humidity['high'] & pressure['high'] & cloud['high'] , rainfall['very_low']),
ctrl.Rule(evoparation['high'] & temp['low'] & humidity['high'] & pressure['low'] & cloud['low'] , rainfall['very_low']),
ctrl.Rule(evoparation['high'] & temp['low'] & humidity['high'] & pressure['low'] & cloud['high'] , rainfall['low']),
ctrl.Rule(evoparation['low'] & temp['medium'] & humidity['low'] & pressure['high'] & cloud['low'] , rainfall['very_low']), # tại bốc hơi thấp
ctrl.Rule(evoparation['low'] & temp['medium'] & humidity['low'] & pressure['high'] & cloud['high'] , rainfall['very_low']), # nhiệt độ TB
ctrl.Rule(evoparation['low'] & temp['medium'] & humidity['low'] & pressure['low'] & cloud['low'] , rainfall['very_low']),
ctrl.Rule(evoparation['low'] & temp['medium'] & humidity['low'] & pressure['low'] & cloud['high'] , rainfall['very_low']),
ctrl.Rule(evoparation['low'] & temp['medium'] & humidity['high'] & pressure['high'] & cloud['low'] , rainfall['very_low']),
ctrl.Rule(evoparation['low'] & temp['medium'] & humidity['high'] & pressure['high'] & cloud['high'] , rainfall['low']),
ctrl.Rule(evoparation['low'] & temp['medium'] & humidity['high'] & pressure['low'] & cloud['low'] , rainfall['low']),
ctrl.Rule(evoparation['low'] & temp['medium'] & humidity['high'] & pressure['low'] & cloud['high'] , rainfall['medium']),
ctrl.Rule(evoparation['medium'] & temp['medium'] & humidity['low'] & pressure['high'] & cloud['low'] , rainfall['very_low']), # tại bốc hơi TB
ctrl.Rule(evoparation['medium'] & temp['medium'] & humidity['low'] & pressure['high'] & cloud['high'] , rainfall['very_low']), # nhiệt độ TB
ctrl.Rule(evoparation['medium'] & temp['medium'] & humidity['low'] & pressure['low'] & cloud['low'] , rainfall['very_low']),
ctrl.Rule(evoparation['medium'] & temp['medium'] & humidity['low'] & pressure['low'] & cloud['high'] , rainfall['very_low']),
ctrl.Rule(evoparation['medium'] & temp['medium'] & humidity['high'] & pressure['high'] & cloud['low'] , rainfall['very_low']),
ctrl.Rule(evoparation['medium'] & temp['medium'] & humidity['high'] & pressure['high'] & cloud['high'] , rainfall['low']),
ctrl.Rule(evoparation['medium'] & temp['medium'] & humidity['high'] & pressure['low'] & cloud['low'] , rainfall['low']),
ctrl.Rule(evoparation['medium'] & temp['medium'] & humidity['high'] & pressure['low'] & cloud['high'] , rainfall['low']),
ctrl.Rule(evoparation['high'] & temp['medium'] & humidity['low'] & pressure['high'] & cloud['low'] , rainfall['very_low']), # tại bốc hơi cao
ctrl.Rule(evoparation['high'] & temp['medium'] & humidity['low'] & pressure['high'] & cloud['high'] , rainfall['very_low']), # nhiệt độ TB
ctrl.Rule(evoparation['high'] & temp['medium'] & humidity['low'] & pressure['low'] & cloud['low'] , rainfall['very_low']),
ctrl.Rule(evoparation['high'] & temp['medium'] & humidity['low'] & pressure['low'] & cloud['high'] , rainfall['very_low']),
ctrl.Rule(evoparation['high'] & temp['medium'] & humidity['high'] & pressure['high'] & cloud['low'] , rainfall['very_low']),
ctrl.Rule(evoparation['high'] & temp['medium'] & humidity['high'] & pressure['high'] & cloud['high'] , rainfall['low']),
ctrl.Rule(evoparation['high'] & temp['medium'] & humidity['high'] & pressure['low'] & cloud['low'] , rainfall['low']),
ctrl.Rule(evoparation['high'] & temp['medium'] & humidity['high'] & pressure['low'] & cloud['high'] , rainfall['high']),
ctrl.Rule(evoparation['low'] & temp['high'] & humidity['low'] & pressure['high'] & cloud['low'] , rainfall['very_low']), # tại bốc hơi thấp
ctrl.Rule(evoparation['low'] & temp['high'] & humidity['low'] & pressure['high'] & cloud['high'] , rainfall['very_low']), # nhiệt độ cao
ctrl.Rule(evoparation['low'] & temp['high'] & humidity['low'] & pressure['low'] & cloud['low'] , rainfall['very_low']),
ctrl.Rule(evoparation['low'] & temp['high'] & humidity['low'] & pressure['low'] & cloud['high'] , rainfall['very_low']),
ctrl.Rule(evoparation['low'] & temp['high'] & humidity['high'] & pressure['high'] & cloud['low'] , rainfall['very_low']),
ctrl.Rule(evoparation['low'] & temp['high'] & humidity['high'] & pressure['high'] & cloud['high'] , rainfall['very_low']),
ctrl.Rule(evoparation['low'] & temp['high'] & humidity['high'] & pressure['low'] & cloud['low'] , rainfall['very_low']),
ctrl.Rule(evoparation['low'] & temp['high'] & humidity['high'] & pressure['low'] & cloud['high'] , rainfall['low']),
ctrl.Rule(evoparation['medium'] & temp['high'] & humidity['low'] & pressure['high'] & cloud['low'] , rainfall['very_low']), # tại bốc hơi TB
ctrl.Rule(evoparation['medium'] & temp['high'] & humidity['low'] & pressure['high'] & cloud['high'] , rainfall['very_low']), # nhiệt độ cao
ctrl.Rule(evoparation['medium'] & temp['high'] & humidity['low'] & pressure['low'] & cloud['low'] , rainfall['very_low']),
ctrl.Rule(evoparation['medium'] & temp['high'] & humidity['low'] & pressure['low'] & cloud['high'] , rainfall['very_low']),
ctrl.Rule(evoparation['medium'] & temp['high'] & humidity['high'] & pressure['high'] & cloud['low'] , rainfall['very_low']),
ctrl.Rule(evoparation['medium'] & temp['high'] & humidity['high'] & pressure['high'] & cloud['high'] , rainfall['very_low']),
ctrl.Rule(evoparation['medium'] & temp['high'] & humidity['high'] & pressure['low'] & cloud['low'] , rainfall['very_low']),
ctrl.Rule(evoparation['medium'] & temp['high'] & humidity['high'] & pressure['low'] & cloud['high'] , rainfall['low']),
ctrl.Rule(evoparation['high'] & temp['high'] & humidity['low'] & pressure['high'] & cloud['low'] , rainfall['very_low']), # tại bốc hơi cao
ctrl.Rule(evoparation['high'] & temp['high'] & humidity['low'] & pressure['high'] & cloud['high'] , rainfall['very_low']), # nhiệt độ cao
ctrl.Rule(evoparation['high'] & temp['high'] & humidity['low'] & pressure['low'] & cloud['low'] , rainfall['very_low']),
ctrl.Rule(evoparation['high'] & temp['high'] & humidity['low'] & pressure['low'] & cloud['high'] , rainfall['very_low']),
ctrl.Rule(evoparation['high'] & temp['high'] & humidity['high'] & pressure['high'] & cloud['low'] , rainfall['very_low']),
ctrl.Rule(evoparation['high'] & temp['high'] & humidity['high'] & pressure['high'] & cloud['high'] , rainfall['very_low']),
ctrl.Rule(evoparation['high'] & temp['high'] & humidity['high'] & pressure['low'] & cloud['low'] , rainfall['very_low']),
ctrl.Rule(evoparation['high'] & temp['high'] & humidity['high'] & pressure['low'] & cloud['high'] , rainfall['low']),
]
CT = ctrl.ControlSystem(rules)
self.Defz = ctrl.ControlSystemSimulation( CT )
def predict(self, evoparation, humidity, pressure, cloud, temp):
'''
:param np.array evoparation: value evoparation
:param np.array humidity: value humidity
:param np.array pressure: value pressure
:param np.array cloud: value cloud
:param np.array temp: value temp
:return int: predict value
'''
self.Defz.input['evoparation'] = evoparation
self.Defz.input['humidity'] = humidity
self.Defz.input['pressure'] = pressure
self.Defz.input['cloud'] = cloud
self.Defz.input['temp'] = temp
self.Defz.compute()
return self.Defz.output['rainfall'] | src/FuzzyLogic.py | from matplotlib.pyplot import draw
import numpy as np
import pandas as pd
import skfuzzy as fz
from skfuzzy import control as ctrl
import matplotlib.pyplot as plt
class FIS:
def __init__(self):
evoparation = ctrl.Antecedent(np.arange(0, 15, 0.2), 'evoparation') # chia độ bốc hơi từ 0-15 với khoảng cách 0.1
humidity = ctrl.Antecedent(np.arange(0, 100, 0.2), 'humidity') # chia độ ẩm từ 0-100(%) với khoảng cách 0.2
pressure = ctrl.Antecedent(np.arange(990, 1030, 0.1), 'pressure') # chia áp suất từ 990-1020(.10^2 Pa) với khoảng cách 0.1
cloud = ctrl.Antecedent(np.arange(0, 8, 1), 'cloud') # chia mây từ 0-8 với khoảng cách 1
temp = ctrl.Antecedent(np.arange(15, 40, 0.1), 'temp') # chia nhiệt độ từ 15-38(độ C) với khoảng cách 0.1
rainfall = ctrl.Consequent(np.arange(0, 120, 0.2), 'rainfall') # chia lượng mưa từ 0-120(mm) với khoảng cách 0.2
evoparation['low'] = fz.trapmf(evoparation.universe, [0, 0, 3, 4]) # độ bốc hơi thấp => mưa nhiều
evoparation['medium'] = fz.trapmf(evoparation.universe, [3.4, 4, 7, 10])
evoparation['high'] = fz.trapmf(evoparation.universe, [8, 12, 15, 15]) # độ bốc hơi quá cao => mưa nhiều
humidity['low'] = fz.trapmf(humidity.universe, [0, 0, 60, 75])
humidity['high'] = fz.trapmf(humidity.universe, [65, 80, 100, 100]) # độ ẩm cao mưa nhiều
pressure['low'] = fz.trapmf(pressure.universe, [990, 990, 1009, 1012]) # áp suất thấp mưa nhiều
pressure['high'] = fz.trapmf(pressure.universe, [1009, 1012, 1030, 1030])
cloud['low'] = fz.trapmf(cloud.universe, [0, 0, 5, 7])
cloud['high'] = fz.trapmf(cloud.universe, [6, 7, 8, 8]) # nhiều mây mưa nhiều
temp['low'] = fz.trapmf(temp.universe, [15, 15, 20, 24.2])
temp['medium'] = fz.trapmf(temp.universe, [23, 25, 29, 32]) # nhiệt độ TB thì mưa cao hơn
temp['high'] = fz.trapmf(temp.universe, [28.5, 35, 40, 40])
rainfall['very_low'] = fz.trapmf(rainfall.universe, [0, 0, 2, 4]) # không mưa
rainfall['low'] = fz.trapmf(rainfall.universe, [3, 5, 8, 12]) # mưa ít
rainfall['medium'] = fz.trapmf(rainfall.universe, [10, 15, 35, 40])
rainfall['high'] = fz.trapmf(rainfall.universe, [35, 45, 120, 120]) # mưa nhiều
rules = [
ctrl.Rule(evoparation['low'] & temp['low'] & humidity['low'] & pressure['high'] & cloud['low'] , rainfall['very_low']), # tại bốc hơi thấp
ctrl.Rule(evoparation['low'] & temp['low'] & humidity['low'] & pressure['high'] & cloud['high'] , rainfall['very_low']), # nhiệt độ thấp
ctrl.Rule(evoparation['low'] & temp['low'] & humidity['low'] & pressure['low'] & cloud['low'] , rainfall['very_low']),
ctrl.Rule(evoparation['low'] & temp['low'] & humidity['low'] & pressure['low'] & cloud['high'] , rainfall['very_low']),
ctrl.Rule(evoparation['low'] & temp['low'] & humidity['high'] & pressure['high'] & cloud['low'] , rainfall['very_low']),
ctrl.Rule(evoparation['low'] & temp['low'] & humidity['high'] & pressure['high'] & cloud['high'] , rainfall['very_low']),
ctrl.Rule(evoparation['low'] & temp['low'] & humidity['high'] & pressure['low'] & cloud['low'] , rainfall['very_low']),
ctrl.Rule(evoparation['low'] & temp['low'] & humidity['high'] & pressure['low'] & cloud['high'] , rainfall['low']),
ctrl.Rule(evoparation['medium'] & temp['low'] & humidity['low'] & pressure['high'] & cloud['low'] , rainfall['very_low']), # tại bốc hơi TB
ctrl.Rule(evoparation['medium'] & temp['low'] & humidity['low'] & pressure['high'] & cloud['high'] , rainfall['very_low']), # nhiệt độ thấp
ctrl.Rule(evoparation['medium'] & temp['low'] & humidity['low'] & pressure['low'] & cloud['low'] , rainfall['very_low']),
ctrl.Rule(evoparation['medium'] & temp['low'] & humidity['low'] & pressure['low'] & cloud['high'] , rainfall['very_low']),
ctrl.Rule(evoparation['medium'] & temp['low'] & humidity['high'] & pressure['high'] & cloud['low'] , rainfall['very_low']),
ctrl.Rule(evoparation['medium'] & temp['low'] & humidity['high'] & pressure['high'] & cloud['high'] , rainfall['very_low']),
ctrl.Rule(evoparation['medium'] & temp['low'] & humidity['high'] & pressure['low'] & cloud['low'] , rainfall['very_low']),
ctrl.Rule(evoparation['medium'] & temp['low'] & humidity['high'] & pressure['low'] & cloud['high'] , rainfall['low']),
ctrl.Rule(evoparation['high'] & temp['low'] & humidity['low'] & pressure['high'] & cloud['low'] , rainfall['very_low']), # tại bốc hơi cao
ctrl.Rule(evoparation['high'] & temp['low'] & humidity['low'] & pressure['high'] & cloud['high'] , rainfall['very_low']), # nhiệt độ thấp
ctrl.Rule(evoparation['high'] & temp['low'] & humidity['low'] & pressure['low'] & cloud['low'] , rainfall['very_low']),
ctrl.Rule(evoparation['high'] & temp['low'] & humidity['low'] & pressure['low'] & cloud['high'] , rainfall['very_low']),
ctrl.Rule(evoparation['high'] & temp['low'] & humidity['high'] & pressure['high'] & cloud['low'] , rainfall['very_low']),
ctrl.Rule(evoparation['high'] & temp['low'] & humidity['high'] & pressure['high'] & cloud['high'] , rainfall['very_low']),
ctrl.Rule(evoparation['high'] & temp['low'] & humidity['high'] & pressure['low'] & cloud['low'] , rainfall['very_low']),
ctrl.Rule(evoparation['high'] & temp['low'] & humidity['high'] & pressure['low'] & cloud['high'] , rainfall['low']),
ctrl.Rule(evoparation['low'] & temp['medium'] & humidity['low'] & pressure['high'] & cloud['low'] , rainfall['very_low']), # tại bốc hơi thấp
ctrl.Rule(evoparation['low'] & temp['medium'] & humidity['low'] & pressure['high'] & cloud['high'] , rainfall['very_low']), # nhiệt độ TB
ctrl.Rule(evoparation['low'] & temp['medium'] & humidity['low'] & pressure['low'] & cloud['low'] , rainfall['very_low']),
ctrl.Rule(evoparation['low'] & temp['medium'] & humidity['low'] & pressure['low'] & cloud['high'] , rainfall['very_low']),
ctrl.Rule(evoparation['low'] & temp['medium'] & humidity['high'] & pressure['high'] & cloud['low'] , rainfall['very_low']),
ctrl.Rule(evoparation['low'] & temp['medium'] & humidity['high'] & pressure['high'] & cloud['high'] , rainfall['low']),
ctrl.Rule(evoparation['low'] & temp['medium'] & humidity['high'] & pressure['low'] & cloud['low'] , rainfall['low']),
ctrl.Rule(evoparation['low'] & temp['medium'] & humidity['high'] & pressure['low'] & cloud['high'] , rainfall['medium']),
ctrl.Rule(evoparation['medium'] & temp['medium'] & humidity['low'] & pressure['high'] & cloud['low'] , rainfall['very_low']), # tại bốc hơi TB
ctrl.Rule(evoparation['medium'] & temp['medium'] & humidity['low'] & pressure['high'] & cloud['high'] , rainfall['very_low']), # nhiệt độ TB
ctrl.Rule(evoparation['medium'] & temp['medium'] & humidity['low'] & pressure['low'] & cloud['low'] , rainfall['very_low']),
ctrl.Rule(evoparation['medium'] & temp['medium'] & humidity['low'] & pressure['low'] & cloud['high'] , rainfall['very_low']),
ctrl.Rule(evoparation['medium'] & temp['medium'] & humidity['high'] & pressure['high'] & cloud['low'] , rainfall['very_low']),
ctrl.Rule(evoparation['medium'] & temp['medium'] & humidity['high'] & pressure['high'] & cloud['high'] , rainfall['low']),
ctrl.Rule(evoparation['medium'] & temp['medium'] & humidity['high'] & pressure['low'] & cloud['low'] , rainfall['low']),
ctrl.Rule(evoparation['medium'] & temp['medium'] & humidity['high'] & pressure['low'] & cloud['high'] , rainfall['low']),
ctrl.Rule(evoparation['high'] & temp['medium'] & humidity['low'] & pressure['high'] & cloud['low'] , rainfall['very_low']), # tại bốc hơi cao
ctrl.Rule(evoparation['high'] & temp['medium'] & humidity['low'] & pressure['high'] & cloud['high'] , rainfall['very_low']), # nhiệt độ TB
ctrl.Rule(evoparation['high'] & temp['medium'] & humidity['low'] & pressure['low'] & cloud['low'] , rainfall['very_low']),
ctrl.Rule(evoparation['high'] & temp['medium'] & humidity['low'] & pressure['low'] & cloud['high'] , rainfall['very_low']),
ctrl.Rule(evoparation['high'] & temp['medium'] & humidity['high'] & pressure['high'] & cloud['low'] , rainfall['very_low']),
ctrl.Rule(evoparation['high'] & temp['medium'] & humidity['high'] & pressure['high'] & cloud['high'] , rainfall['low']),
ctrl.Rule(evoparation['high'] & temp['medium'] & humidity['high'] & pressure['low'] & cloud['low'] , rainfall['low']),
ctrl.Rule(evoparation['high'] & temp['medium'] & humidity['high'] & pressure['low'] & cloud['high'] , rainfall['high']),
ctrl.Rule(evoparation['low'] & temp['high'] & humidity['low'] & pressure['high'] & cloud['low'] , rainfall['very_low']), # tại bốc hơi thấp
ctrl.Rule(evoparation['low'] & temp['high'] & humidity['low'] & pressure['high'] & cloud['high'] , rainfall['very_low']), # nhiệt độ cao
ctrl.Rule(evoparation['low'] & temp['high'] & humidity['low'] & pressure['low'] & cloud['low'] , rainfall['very_low']),
ctrl.Rule(evoparation['low'] & temp['high'] & humidity['low'] & pressure['low'] & cloud['high'] , rainfall['very_low']),
ctrl.Rule(evoparation['low'] & temp['high'] & humidity['high'] & pressure['high'] & cloud['low'] , rainfall['very_low']),
ctrl.Rule(evoparation['low'] & temp['high'] & humidity['high'] & pressure['high'] & cloud['high'] , rainfall['very_low']),
ctrl.Rule(evoparation['low'] & temp['high'] & humidity['high'] & pressure['low'] & cloud['low'] , rainfall['very_low']),
ctrl.Rule(evoparation['low'] & temp['high'] & humidity['high'] & pressure['low'] & cloud['high'] , rainfall['low']),
ctrl.Rule(evoparation['medium'] & temp['high'] & humidity['low'] & pressure['high'] & cloud['low'] , rainfall['very_low']), # tại bốc hơi TB
ctrl.Rule(evoparation['medium'] & temp['high'] & humidity['low'] & pressure['high'] & cloud['high'] , rainfall['very_low']), # nhiệt độ cao
ctrl.Rule(evoparation['medium'] & temp['high'] & humidity['low'] & pressure['low'] & cloud['low'] , rainfall['very_low']),
ctrl.Rule(evoparation['medium'] & temp['high'] & humidity['low'] & pressure['low'] & cloud['high'] , rainfall['very_low']),
ctrl.Rule(evoparation['medium'] & temp['high'] & humidity['high'] & pressure['high'] & cloud['low'] , rainfall['very_low']),
ctrl.Rule(evoparation['medium'] & temp['high'] & humidity['high'] & pressure['high'] & cloud['high'] , rainfall['very_low']),
ctrl.Rule(evoparation['medium'] & temp['high'] & humidity['high'] & pressure['low'] & cloud['low'] , rainfall['very_low']),
ctrl.Rule(evoparation['medium'] & temp['high'] & humidity['high'] & pressure['low'] & cloud['high'] , rainfall['low']),
ctrl.Rule(evoparation['high'] & temp['high'] & humidity['low'] & pressure['high'] & cloud['low'] , rainfall['very_low']), # tại bốc hơi cao
ctrl.Rule(evoparation['high'] & temp['high'] & humidity['low'] & pressure['high'] & cloud['high'] , rainfall['very_low']), # nhiệt độ cao
ctrl.Rule(evoparation['high'] & temp['high'] & humidity['low'] & pressure['low'] & cloud['low'] , rainfall['very_low']),
ctrl.Rule(evoparation['high'] & temp['high'] & humidity['low'] & pressure['low'] & cloud['high'] , rainfall['very_low']),
ctrl.Rule(evoparation['high'] & temp['high'] & humidity['high'] & pressure['high'] & cloud['low'] , rainfall['very_low']),
ctrl.Rule(evoparation['high'] & temp['high'] & humidity['high'] & pressure['high'] & cloud['high'] , rainfall['very_low']),
ctrl.Rule(evoparation['high'] & temp['high'] & humidity['high'] & pressure['low'] & cloud['low'] , rainfall['very_low']),
ctrl.Rule(evoparation['high'] & temp['high'] & humidity['high'] & pressure['low'] & cloud['high'] , rainfall['low']),
]
CT = ctrl.ControlSystem(rules)
self.Defz = ctrl.ControlSystemSimulation( CT )
def predict(self, evoparation, humidity, pressure, cloud, temp):
'''
:param np.array evoparation: value evoparation
:param np.array humidity: value humidity
:param np.array pressure: value pressure
:param np.array cloud: value cloud
:param np.array temp: value temp
:return int: predict value
'''
self.Defz.input['evoparation'] = evoparation
self.Defz.input['humidity'] = humidity
self.Defz.input['pressure'] = pressure
self.Defz.input['cloud'] = cloud
self.Defz.input['temp'] = temp
self.Defz.compute()
return self.Defz.output['rainfall'] | 0.173989 | 0.435301 |
from typing import Callable, Tuple, Union, Optional, Dict
import numpy as np
from inspect import signature
class Op:
def __init__(self, name: str, description: str, op: Callable, partial_difs: Tuple[Callable]):
assert len(signature(op).parameters) == len(partial_difs)
self._name = name
self._desc = description
self._op = op
self._partials = partial_difs
def __call__(self, *args):
return self._op.__call__(*args)
def __str__(self):
return f"{self._name}: {self._desc}"
@property
def name(self):
return self._name
@property
def description(self):
return self._desc
def partial(self, i):
return self._partials[i]
class OpTable:
def __init__(self, *ops: Op):
self._ops = { op.name: op for op in ops }
def __getitem__(self, op_name: str) -> Op:
return self._ops[op_name]
def __len__(self):
return len(self._ops)
def op_descriptions(self) -> Dict[str, str]:
return {op.name: op.description for op in self._ops.values()}
add = Op(
"add",
"Scalar or vecrtor addition. If one arg is a matrix then the other arg "
"must be a matrix of the same shape",
lambda x, y: x + y,
(
lambda x, y, c: c,
lambda x, y, c: c
)
)
smul = Op(
"smul",
"Scalar multiplication. The first arg must be a scalar, the second arg "
"may be a scalar or a matrix",
lambda x, y: x * y,
(
lambda x, y, c: (c * y).sum(),
lambda x, y, c: c * x * np.ones_like(y),
)
)
mmul = Op(
"mmul",
"Matrix multiplication. Both args must be matrices and have compatible "
"shapes.",
lambda x, y: x @ y,
(
lambda x, y, c: c @ y.T,
lambda x, y, c: x.T @ c,
)
)
relu = Op(
"relu",
"For each elament x in a matrix set x = max(x, 0)",
lambda x: np.maximum(x, 0.0),
(
lambda x, c: np.where(x > 0, 1.0, 0.0),
),
)
loss = Op(
"loss",
"Calculate the RMS loss between a target and observed values",
lambda target, actual: np.sqrt(np.mean(np.square(target - actual))),
(
lambda t, a, c: c * 0.5 * (t - a) * t.size,
lambda t, a, c: c * 0.5 * (a - t) * a.size,
)
)
default_op_table = OpTable(add, smul, mmul, relu, loss) | ibtd/operations.py | from typing import Callable, Tuple, Union, Optional, Dict
import numpy as np
from inspect import signature
class Op:
def __init__(self, name: str, description: str, op: Callable, partial_difs: Tuple[Callable]):
assert len(signature(op).parameters) == len(partial_difs)
self._name = name
self._desc = description
self._op = op
self._partials = partial_difs
def __call__(self, *args):
return self._op.__call__(*args)
def __str__(self):
return f"{self._name}: {self._desc}"
@property
def name(self):
return self._name
@property
def description(self):
return self._desc
def partial(self, i):
return self._partials[i]
class OpTable:
def __init__(self, *ops: Op):
self._ops = { op.name: op for op in ops }
def __getitem__(self, op_name: str) -> Op:
return self._ops[op_name]
def __len__(self):
return len(self._ops)
def op_descriptions(self) -> Dict[str, str]:
return {op.name: op.description for op in self._ops.values()}
add = Op(
"add",
"Scalar or vecrtor addition. If one arg is a matrix then the other arg "
"must be a matrix of the same shape",
lambda x, y: x + y,
(
lambda x, y, c: c,
lambda x, y, c: c
)
)
smul = Op(
"smul",
"Scalar multiplication. The first arg must be a scalar, the second arg "
"may be a scalar or a matrix",
lambda x, y: x * y,
(
lambda x, y, c: (c * y).sum(),
lambda x, y, c: c * x * np.ones_like(y),
)
)
mmul = Op(
"mmul",
"Matrix multiplication. Both args must be matrices and have compatible "
"shapes.",
lambda x, y: x @ y,
(
lambda x, y, c: c @ y.T,
lambda x, y, c: x.T @ c,
)
)
relu = Op(
"relu",
"For each elament x in a matrix set x = max(x, 0)",
lambda x: np.maximum(x, 0.0),
(
lambda x, c: np.where(x > 0, 1.0, 0.0),
),
)
loss = Op(
"loss",
"Calculate the RMS loss between a target and observed values",
lambda target, actual: np.sqrt(np.mean(np.square(target - actual))),
(
lambda t, a, c: c * 0.5 * (t - a) * t.size,
lambda t, a, c: c * 0.5 * (a - t) * a.size,
)
)
default_op_table = OpTable(add, smul, mmul, relu, loss) | 0.948965 | 0.5083 |
__version__ = '0.8.1'
__date__ = '$Date: 16 Dec 2011 $'
no_blue = False;
try:
import bluetooth
except ImportError:
no_blue = True;
no_serial = False;
try:
import serial
except ImportError:
no_serial = True;
import math
import time
uuid = "00001101-0000-1000-8000-00805F9B34FB"
class Emant300:
AIN0, AIN1, AIN2, AIN3, AIN4, AIN5, COM, DIODE = (0,1,2,3,4,5,8,15)
Unipolar, Bipolar = (1,0)
V2_5, V1_25 = (1,0)
POC_Count, POC_PWM = (0,1)
EOT_Timed, EOT_Event = (0,1)
# AIN = (AIN0, AIN1, AIN2, AIN3, AIN4, AIN5, COM, DIODE)
# POLARITY = (Unipolar, Bipolar)
def __init__(self):
self._HwId = ""
self._CommOpen = False
self._DIO_Config = 8
self._DIO = 255
self._Polarity = self.Unipolar
self._Gain = 0
self._VRef = self.V2_5
self._ODAC = 0
self._ADCON0 = 0x30
self._ADCON1 = 0x41
self._Decimation = 0x07F1
self._ACLK = 0x10
self._sock = None
self._MSINT = 100
self._EventOrTimed = self.EOT_Timed
self._PWMOrCnt = self.POC_PWM
self._Counter = 0
def HwId(self):
return self._HwId
def Open(self, Comm_Port, reset=True, dev='380'):
self._CommPort = Comm_Port
self._device = dev
if (self._device=='380'):
service_matches = bluetooth.find_service( uuid = uuid, address = Comm_Port )
if len(service_matches) == 0:
self._CommOpen = False
return self._CommOpen
first_match = service_matches[0]
port = first_match["port"]
name = first_match["name"]
host = first_match["host"]
# Create the client socket
self._sock=bluetooth.BluetoothSocket( bluetooth.RFCOMM )
self._sock.connect((host, port))
self._CommOpen = True
if (self._device=='300'):
self._serial = serial.Serial(Comm_Port, 115200, timeout=5)
self._CommOpen = self._serial.isOpen
c = '>' + self._checksum('i')
r = self._TransactCommand(c)
(st, id) = self._checksum_OK(r)
self._HwId = id
if (reset):
self.Reset()
return self._CommOpen
def Close(self):
if (self._device=='380'):
self._sock.close()
if (self._device=='300'):
self._serial.close()
# 8 bits of DIO can be configured as Input or Output
# 1 or Input and 0 for Output
def ConfigDIO(self, Value):
if (Value<=255):
self._DIO_Config = Value
return True
else:
return False
def ConfigAnalog(self, InputLimit, Polarity, SampleFreq):
if (self._VRef == self.V2_5):
reflimit = 2.5
else:
reflimit = 1.25
if (InputLimit > reflimit):
InputLimit = reflimit
Gain = int(math.log(reflimit / InputLimit)/math.log(2))
self._ADCON0 = self._ADCON0 & 0xF8
self._ADCON0 = int(Gain | self._ADCON0)
self._Gain = Gain
self._ADCON1 = self._ADCON1 & 0xBF
self._ADCON1 = int(Polarity << 6) | self._ADCON1
self._Polarity = Polarity
temp = (170/SampleFreq) - 1
if (temp < 1):
temp = 1
self._ACLK = int(temp)
# 345606.25 = 22118800 / 64
self._Decimation = int(345606.25/((self._ACLK + 1) * SampleFreq))
# correct for Decimation bug
if (self._Decimation > 2047):
self._ACLK = self._ACLK + 1
self._Decimation = int(345606.25 / ((self._ACLK + 1) * SampleFreq))
# end of add
return self.ConfigAnalogAdvance()
# Advance Setting for Analog Input
def ConfigAnalogAdvance(self):
c = ("03F" + ("%02x" % self._ODAC).upper()+ \
("%02x" % self._ADCON0).upper()+ ("%02x" % self._ADCON1).upper()+ \
("%04x" % self._Decimation).upper()+ ("%02x" % self._ACLK).upper())
actSampFreq = 22118800/((self._ACLK + 1) * self._Decimation * 64)
return (self._writecmd(c),actSampFreq)
def ReadAnalogWaveform(self, PIn, NIn, NumberOfSamples):
""" <summary>
Read Analog Waveform
</summary>
"""
wavefm = []
ai = (PIn * 16 + NIn) % 256
c = ">" + self._checksum("v" + ("%02x" % ai).upper() + ("%04x" % NumberOfSamples).upper())
r = self._TransactCommand(c)
(resbool, result) = self._checksum_OK(r)
if resbool:
i = 0
while i < NumberOfSamples:
hexstr = result[i * 4: (i+1) * 4]
rawdata = int(hexstr,16)
if (self._Polarity == self.Bipolar):
if rawdata > 0x7FFF:
rawdata = rawdata - 0x10000
rawdata = rawdata * 2
g = 1 << self._Gain
rawdata = rawdata / g
volt = (rawdata * 1.25 * (1 + self._VRef))/0xFFFF
wavefm.append(volt)
i += 1
return wavefm
def ReadAnalog(self, PIn, NIn):
# if PIn not in self.AIN: raise ValueError("Not a valid input: %r" % PositiveInput)
# if NIn not in self.AIN: raise ValueError("Not a valid input: %r" % PositiveInput)
ai = (PIn * 16 + NIn) % 256
c = '>' + self._checksum('t' + ("%02x" % ai).upper())
r = self._TransactCommand(c)
(resbool, result) = self._checksum_OK(r)
if resbool:
rawdata = int(result, 16)
if (self._Polarity == self.Bipolar):
if rawdata > 0x7FFFFF:
rawdata = rawdata - 0x1000000
rawdata = rawdata * 2
g = 1 << self._Gain
rawdata = rawdata / g
volt = (rawdata * 1.25 * (1 + self._VRef))/0xFFFFFF
RawData = rawdata
else:
RawData = 0
return(volt,RawData)
# Write to IDAC
def WriteAnalog(self, Value):
if Value > 1:
return False
if Value < 0:
return False
temp = int(Value * 255)
c = ('E01' + ("%02x" % temp).upper())
return self._writecmd(c)
def ConfigPWMCounter(self, PWMOrCnt, EventOrTimed=EOT_Timed, MSInt=100, SetCount=0):
self._MSINT = MSInt
self._PWMOrCnt = PWMOrCnt
self._EventOrTimed = EventOrTimed
temp = PWMOrCnt + EventOrTimed * 2
if (self._PWMOrCnt == self.POC_Count) and (self._EventOrTimed == self.EOT_Event):
c = "133" + ("%02x" % self._MSINT).upper() + ("%02x" % temp).upper() + ("%04x" % SetCount).upper()
return self._writecmd(c)
else:
c = "130" + ("%02x" % self._MSINT).upper() + ("%02x" % temp).upper()
return self._writecmd(c)
def WritePWM(self, Period, DutyCycle):
""" <summary>
Write to PWM
</summary>
"""
Per1 = Period * 1.8432
Dut1 = DutyCycle / 100.0
PerH = self._DeadTimeComp(Per1 * (1 - Dut1))
PerL = self._DeadTimeComp(Per1 * Dut1)
c = ("EF0" + ("%04x" % PerH).upper() + ("%04x" % PerL).upper())
return self._writecmd(c)
def ReadCounter(self):
""" <summary>
Read 16 bit value from counter
</summary>
"""
c = ">" + self._checksum("h")
r = self._TransactCommand(c)
(resbool, result) = self._checksum_OK(r)
if resbool:
self._Counter = int(result,16)
if (self._Counter==0):
Period = 0.0000000001
else:
Period = (float(self._MSINT + 1) / self._Counter) / 1000
return (self._Counter, Period)
else:
return (-1, 0)
# Read state from digital bit addressed
def ReadDigitalBit(self, Address):
mask = 1
self.ReadDigitalPort()
maskresult = self._DIO & (mask << Address)
return (maskresult <> 0)
# Read 8 bit value from digital port
def ReadDigitalPort(self):
c = '>' + self._checksum('d')
r = self._TransactCommand(c)
(resbool, result) = self._checksum_OK(r)
if resbool:
self._DIO = int(result,16)
return self._DIO
else:
return 1
# Write to digital bit addressed
def WriteDigitalBit(self, Address, State):
mask = 1
mask = mask << Address
if State:
maskresult = self._DIO | mask
else:
maskresult = self._DIO & (mask ^ 255)
return self.WriteDigitalPort(maskresult)
# Write 8 bit value to digital port
def WriteDigitalPort(self, Value):
Value = Value | self._DIO_Config
c = 'D' + ("%02x" % Value).upper()
if self._writecmd(c):
self._DIO = Value
return True
else:
return False
# Reset the DAQ module
def Reset(self):
if (self._writecmd('R')):
time.sleep(0.5)
return True
else:
return False
def _TransactCommand(self, sendstring):
if (self._device=='380'):
self._sock.send(sendstring)
return self._bt_receive_data()
if (self._device=='300'):
self._serial.write(sendstring)
return self._serial_receive_data()
def _serial_receive_data(self):
buffer = ""
while 1:
data = self._serial.read(1)
buffer = buffer + data
if data == '\r':
return buffer
def _bt_receive_data(self):
buffer = ""
while 1:
data = self._sock.recv(1)
buffer = buffer + data
if data == '\r':
return buffer
def _DeadTimeComp(self, RawValue):
temp = 65535 + 11 - int(RawValue)
if temp < 0:
return 0
elif temp > 65535:
return 65535
else:
return temp
def _writecmd(self, str_input):
c = '>' + self._checksum(str_input)
r = self._TransactCommand(c)
if r[0:1] == 'A':
return True
else:
return False
def _checksum(self, str_input):
_cs = 0
for i in xrange(len(str_input)):
ch=str_input[i]
_cs = (_cs + ord(ch)) % 256
res = str_input + ("%02x" % _cs).upper()
return res
def _checksum_OK(self, str_input):
str_input = str_input[0:len(str_input)-1]
str_output = ''
if str_input[0:1] == 'A':
str_output = str_input[1:len(str_input)-2]
chksum = str_input[len(str_input)-2:len(str_input)]
_cs = 0
for i in xrange(len(str_output)):
ch=str_output[i]
_cs = (_cs + ord(ch)) % 256
if ("%02x" % _cs).upper() == chksum:
return (True,str_output)
return (False,"Err") | libs/emant.py | __version__ = '0.8.1'
__date__ = '$Date: 16 Dec 2011 $'
no_blue = False;
try:
import bluetooth
except ImportError:
no_blue = True;
no_serial = False;
try:
import serial
except ImportError:
no_serial = True;
import math
import time
uuid = "00001101-0000-1000-8000-00805F9B34FB"
class Emant300:
AIN0, AIN1, AIN2, AIN3, AIN4, AIN5, COM, DIODE = (0,1,2,3,4,5,8,15)
Unipolar, Bipolar = (1,0)
V2_5, V1_25 = (1,0)
POC_Count, POC_PWM = (0,1)
EOT_Timed, EOT_Event = (0,1)
# AIN = (AIN0, AIN1, AIN2, AIN3, AIN4, AIN5, COM, DIODE)
# POLARITY = (Unipolar, Bipolar)
def __init__(self):
self._HwId = ""
self._CommOpen = False
self._DIO_Config = 8
self._DIO = 255
self._Polarity = self.Unipolar
self._Gain = 0
self._VRef = self.V2_5
self._ODAC = 0
self._ADCON0 = 0x30
self._ADCON1 = 0x41
self._Decimation = 0x07F1
self._ACLK = 0x10
self._sock = None
self._MSINT = 100
self._EventOrTimed = self.EOT_Timed
self._PWMOrCnt = self.POC_PWM
self._Counter = 0
def HwId(self):
return self._HwId
def Open(self, Comm_Port, reset=True, dev='380'):
self._CommPort = Comm_Port
self._device = dev
if (self._device=='380'):
service_matches = bluetooth.find_service( uuid = uuid, address = Comm_Port )
if len(service_matches) == 0:
self._CommOpen = False
return self._CommOpen
first_match = service_matches[0]
port = first_match["port"]
name = first_match["name"]
host = first_match["host"]
# Create the client socket
self._sock=bluetooth.BluetoothSocket( bluetooth.RFCOMM )
self._sock.connect((host, port))
self._CommOpen = True
if (self._device=='300'):
self._serial = serial.Serial(Comm_Port, 115200, timeout=5)
self._CommOpen = self._serial.isOpen
c = '>' + self._checksum('i')
r = self._TransactCommand(c)
(st, id) = self._checksum_OK(r)
self._HwId = id
if (reset):
self.Reset()
return self._CommOpen
def Close(self):
if (self._device=='380'):
self._sock.close()
if (self._device=='300'):
self._serial.close()
# 8 bits of DIO can be configured as Input or Output
# 1 or Input and 0 for Output
def ConfigDIO(self, Value):
if (Value<=255):
self._DIO_Config = Value
return True
else:
return False
def ConfigAnalog(self, InputLimit, Polarity, SampleFreq):
if (self._VRef == self.V2_5):
reflimit = 2.5
else:
reflimit = 1.25
if (InputLimit > reflimit):
InputLimit = reflimit
Gain = int(math.log(reflimit / InputLimit)/math.log(2))
self._ADCON0 = self._ADCON0 & 0xF8
self._ADCON0 = int(Gain | self._ADCON0)
self._Gain = Gain
self._ADCON1 = self._ADCON1 & 0xBF
self._ADCON1 = int(Polarity << 6) | self._ADCON1
self._Polarity = Polarity
temp = (170/SampleFreq) - 1
if (temp < 1):
temp = 1
self._ACLK = int(temp)
# 345606.25 = 22118800 / 64
self._Decimation = int(345606.25/((self._ACLK + 1) * SampleFreq))
# correct for Decimation bug
if (self._Decimation > 2047):
self._ACLK = self._ACLK + 1
self._Decimation = int(345606.25 / ((self._ACLK + 1) * SampleFreq))
# end of add
return self.ConfigAnalogAdvance()
# Advance Setting for Analog Input
def ConfigAnalogAdvance(self):
c = ("03F" + ("%02x" % self._ODAC).upper()+ \
("%02x" % self._ADCON0).upper()+ ("%02x" % self._ADCON1).upper()+ \
("%04x" % self._Decimation).upper()+ ("%02x" % self._ACLK).upper())
actSampFreq = 22118800/((self._ACLK + 1) * self._Decimation * 64)
return (self._writecmd(c),actSampFreq)
def ReadAnalogWaveform(self, PIn, NIn, NumberOfSamples):
""" <summary>
Read Analog Waveform
</summary>
"""
wavefm = []
ai = (PIn * 16 + NIn) % 256
c = ">" + self._checksum("v" + ("%02x" % ai).upper() + ("%04x" % NumberOfSamples).upper())
r = self._TransactCommand(c)
(resbool, result) = self._checksum_OK(r)
if resbool:
i = 0
while i < NumberOfSamples:
hexstr = result[i * 4: (i+1) * 4]
rawdata = int(hexstr,16)
if (self._Polarity == self.Bipolar):
if rawdata > 0x7FFF:
rawdata = rawdata - 0x10000
rawdata = rawdata * 2
g = 1 << self._Gain
rawdata = rawdata / g
volt = (rawdata * 1.25 * (1 + self._VRef))/0xFFFF
wavefm.append(volt)
i += 1
return wavefm
def ReadAnalog(self, PIn, NIn):
# if PIn not in self.AIN: raise ValueError("Not a valid input: %r" % PositiveInput)
# if NIn not in self.AIN: raise ValueError("Not a valid input: %r" % PositiveInput)
ai = (PIn * 16 + NIn) % 256
c = '>' + self._checksum('t' + ("%02x" % ai).upper())
r = self._TransactCommand(c)
(resbool, result) = self._checksum_OK(r)
if resbool:
rawdata = int(result, 16)
if (self._Polarity == self.Bipolar):
if rawdata > 0x7FFFFF:
rawdata = rawdata - 0x1000000
rawdata = rawdata * 2
g = 1 << self._Gain
rawdata = rawdata / g
volt = (rawdata * 1.25 * (1 + self._VRef))/0xFFFFFF
RawData = rawdata
else:
RawData = 0
return(volt,RawData)
# Write to IDAC
def WriteAnalog(self, Value):
if Value > 1:
return False
if Value < 0:
return False
temp = int(Value * 255)
c = ('E01' + ("%02x" % temp).upper())
return self._writecmd(c)
def ConfigPWMCounter(self, PWMOrCnt, EventOrTimed=EOT_Timed, MSInt=100, SetCount=0):
self._MSINT = MSInt
self._PWMOrCnt = PWMOrCnt
self._EventOrTimed = EventOrTimed
temp = PWMOrCnt + EventOrTimed * 2
if (self._PWMOrCnt == self.POC_Count) and (self._EventOrTimed == self.EOT_Event):
c = "133" + ("%02x" % self._MSINT).upper() + ("%02x" % temp).upper() + ("%04x" % SetCount).upper()
return self._writecmd(c)
else:
c = "130" + ("%02x" % self._MSINT).upper() + ("%02x" % temp).upper()
return self._writecmd(c)
def WritePWM(self, Period, DutyCycle):
""" <summary>
Write to PWM
</summary>
"""
Per1 = Period * 1.8432
Dut1 = DutyCycle / 100.0
PerH = self._DeadTimeComp(Per1 * (1 - Dut1))
PerL = self._DeadTimeComp(Per1 * Dut1)
c = ("EF0" + ("%04x" % PerH).upper() + ("%04x" % PerL).upper())
return self._writecmd(c)
def ReadCounter(self):
""" <summary>
Read 16 bit value from counter
</summary>
"""
c = ">" + self._checksum("h")
r = self._TransactCommand(c)
(resbool, result) = self._checksum_OK(r)
if resbool:
self._Counter = int(result,16)
if (self._Counter==0):
Period = 0.0000000001
else:
Period = (float(self._MSINT + 1) / self._Counter) / 1000
return (self._Counter, Period)
else:
return (-1, 0)
# Read state from digital bit addressed
def ReadDigitalBit(self, Address):
mask = 1
self.ReadDigitalPort()
maskresult = self._DIO & (mask << Address)
return (maskresult <> 0)
# Read 8 bit value from digital port
def ReadDigitalPort(self):
c = '>' + self._checksum('d')
r = self._TransactCommand(c)
(resbool, result) = self._checksum_OK(r)
if resbool:
self._DIO = int(result,16)
return self._DIO
else:
return 1
# Write to digital bit addressed
def WriteDigitalBit(self, Address, State):
mask = 1
mask = mask << Address
if State:
maskresult = self._DIO | mask
else:
maskresult = self._DIO & (mask ^ 255)
return self.WriteDigitalPort(maskresult)
# Write 8 bit value to digital port
def WriteDigitalPort(self, Value):
Value = Value | self._DIO_Config
c = 'D' + ("%02x" % Value).upper()
if self._writecmd(c):
self._DIO = Value
return True
else:
return False
# Reset the DAQ module
def Reset(self):
if (self._writecmd('R')):
time.sleep(0.5)
return True
else:
return False
def _TransactCommand(self, sendstring):
if (self._device=='380'):
self._sock.send(sendstring)
return self._bt_receive_data()
if (self._device=='300'):
self._serial.write(sendstring)
return self._serial_receive_data()
def _serial_receive_data(self):
buffer = ""
while 1:
data = self._serial.read(1)
buffer = buffer + data
if data == '\r':
return buffer
def _bt_receive_data(self):
buffer = ""
while 1:
data = self._sock.recv(1)
buffer = buffer + data
if data == '\r':
return buffer
def _DeadTimeComp(self, RawValue):
temp = 65535 + 11 - int(RawValue)
if temp < 0:
return 0
elif temp > 65535:
return 65535
else:
return temp
def _writecmd(self, str_input):
c = '>' + self._checksum(str_input)
r = self._TransactCommand(c)
if r[0:1] == 'A':
return True
else:
return False
def _checksum(self, str_input):
_cs = 0
for i in xrange(len(str_input)):
ch=str_input[i]
_cs = (_cs + ord(ch)) % 256
res = str_input + ("%02x" % _cs).upper()
return res
def _checksum_OK(self, str_input):
str_input = str_input[0:len(str_input)-1]
str_output = ''
if str_input[0:1] == 'A':
str_output = str_input[1:len(str_input)-2]
chksum = str_input[len(str_input)-2:len(str_input)]
_cs = 0
for i in xrange(len(str_output)):
ch=str_output[i]
_cs = (_cs + ord(ch)) % 256
if ("%02x" % _cs).upper() == chksum:
return (True,str_output)
return (False,"Err") | 0.267026 | 0.0809 |
import time
import os
import argparse
import cv2
import glob
def calculate_flow(use_gpu=True, device_id=None, vid_file=None, flow_x=None, flow_y=None, image=None, boundary=20, opt_type=1, step=1, out_type='dir'):
command = ''
if use_gpu:
command += './extract_gpu '
else:
command += './extract_cpu '
if device_id is not None:
command = command + '-d='+str(device_id)+' '
if vid_file is not None:
command = command + '-f='+vid_file+' '
else:
print('No video file informed')
return
if flow_x is not None:
command = command + '-x='+flow_x+' '
else:
print('No flow_x destination informed')
return
if flow_y is not None:
command = command + '-y='+flow_y+' '
else:
print('No flow_y destination informed')
if image is not None:
command = command + '-i='+image+' '
if boundary is not None:
command = command + '-b='+str(boundary)+' '
else:
print('Boundary is not defined')
return
if opt_type is not None:
command = command + '-t='+str(opt_type)+' '
else:
print('Algorithm is not defined')
return
if step is not None:
command = command + '-s='+str(step)+' '
else:
print('Step is not defined')
return
if out_type is not None and out_type in ['dir', 'zip']:
command = command + '-o='+out_type
else:
print('Output type is not defined or is invalid')
return
os.system(command)
def create_flow_video(directory=None, filter=None, video_path=None, frame_rate=25, dimension=None, delete_image_files=True):
files = glob.glob(directory+'/'+filter+'*.jpg')
files.sort(reverse=False)
writer = cv2.VideoWriter(video_path,cv2.VideoWriter_fourcc('M','J','P','G'), int(frame_rate), (342,256))
for x in files:
img = cv2.imread(x)
writer.write(img)
if delete_image_files:
os.system('rm '+x)
writer.release()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='OpticalFlow estimator for CPU or GPU')
parser.add_argument('-g', '--use_gpu', type=bool, help='True to execute with GPU and False to execute with CPU')
parser.add_argument('-d', '--device_id', type=int, help='GPU id')
parser.add_argument('-f', '--file', help='path to the original video')
parser.add_argument('-x', '--flow_x', help='path to the x direction of the flows')
parser.add_argument('-y', '--flow_y', help='path to the y direction of the flows')
parser.add_argument('-i', '--image', help='path to the image of the frame')
parser.add_argument('-b', '--boundary', type=int, help='Optical flow value upper and lower limit: values outside of (-bound, bound) are truncated. (Default = 20)')
parser.add_argument('-t', '--type', type=int, help='optical flow algorithm (0 = Farneback, 1 = TVL1, 2 = Brox). (Default = 1)')
parser.add_argument('-s', '--step', type=int, help='number of frames to skip when saving optical flow and rgb frames (Default = 1)')
parser.add_argument('-o', '--out_type', help='output type - dir = images saved in directories -- zip = images saved in zip files')
args = parser.parse_args()
calculate_flow(use_gpu=args.use_gpu, device_id=args.device_id, vid_file=args.file, flow_x=args.flow_x, flow_y=args.flow_y, image=args.image, boundary=args.boundary, opt_type=args.type, step=args.step, out_type=args.out_type) | extract_flow.py | import time
import os
import argparse
import cv2
import glob
def calculate_flow(use_gpu=True, device_id=None, vid_file=None, flow_x=None, flow_y=None, image=None, boundary=20, opt_type=1, step=1, out_type='dir'):
command = ''
if use_gpu:
command += './extract_gpu '
else:
command += './extract_cpu '
if device_id is not None:
command = command + '-d='+str(device_id)+' '
if vid_file is not None:
command = command + '-f='+vid_file+' '
else:
print('No video file informed')
return
if flow_x is not None:
command = command + '-x='+flow_x+' '
else:
print('No flow_x destination informed')
return
if flow_y is not None:
command = command + '-y='+flow_y+' '
else:
print('No flow_y destination informed')
if image is not None:
command = command + '-i='+image+' '
if boundary is not None:
command = command + '-b='+str(boundary)+' '
else:
print('Boundary is not defined')
return
if opt_type is not None:
command = command + '-t='+str(opt_type)+' '
else:
print('Algorithm is not defined')
return
if step is not None:
command = command + '-s='+str(step)+' '
else:
print('Step is not defined')
return
if out_type is not None and out_type in ['dir', 'zip']:
command = command + '-o='+out_type
else:
print('Output type is not defined or is invalid')
return
os.system(command)
def create_flow_video(directory=None, filter=None, video_path=None, frame_rate=25, dimension=None, delete_image_files=True):
files = glob.glob(directory+'/'+filter+'*.jpg')
files.sort(reverse=False)
writer = cv2.VideoWriter(video_path,cv2.VideoWriter_fourcc('M','J','P','G'), int(frame_rate), (342,256))
for x in files:
img = cv2.imread(x)
writer.write(img)
if delete_image_files:
os.system('rm '+x)
writer.release()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='OpticalFlow estimator for CPU or GPU')
parser.add_argument('-g', '--use_gpu', type=bool, help='True to execute with GPU and False to execute with CPU')
parser.add_argument('-d', '--device_id', type=int, help='GPU id')
parser.add_argument('-f', '--file', help='path to the original video')
parser.add_argument('-x', '--flow_x', help='path to the x direction of the flows')
parser.add_argument('-y', '--flow_y', help='path to the y direction of the flows')
parser.add_argument('-i', '--image', help='path to the image of the frame')
parser.add_argument('-b', '--boundary', type=int, help='Optical flow value upper and lower limit: values outside of (-bound, bound) are truncated. (Default = 20)')
parser.add_argument('-t', '--type', type=int, help='optical flow algorithm (0 = Farneback, 1 = TVL1, 2 = Brox). (Default = 1)')
parser.add_argument('-s', '--step', type=int, help='number of frames to skip when saving optical flow and rgb frames (Default = 1)')
parser.add_argument('-o', '--out_type', help='output type - dir = images saved in directories -- zip = images saved in zip files')
args = parser.parse_args()
calculate_flow(use_gpu=args.use_gpu, device_id=args.device_id, vid_file=args.file, flow_x=args.flow_x, flow_y=args.flow_y, image=args.image, boundary=args.boundary, opt_type=args.type, step=args.step, out_type=args.out_type) | 0.353205 | 0.078961 |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Attachment'
db.create_table('mail_attachment', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('file', self.gf('django.db.models.fields.files.FileField')(max_length=255)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal('mail', ['Attachment'])
# Adding model 'MessageId'
db.create_table('mail_messageid', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('idd', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)),
))
db.send_create_signal('mail', ['MessageId'])
# Adding model 'MailMessage'
db.create_table('mail_mailmessage', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('email_from', self.gf('django.db.models.fields.EmailField')(max_length=256)),
('reply_to', self.gf('django.db.models.fields.EmailField')(max_length=75, null=True, blank=True)),
('body', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('subject', self.gf('django.db.models.fields.CharField')(max_length=1024)),
('request', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['requests.Request'], null=True, blank=True)),
('dated', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('slug', self.gf('django_extensions.db.fields.AutoSlugField')(allow_duplicates=False, max_length=50, separator=u'-', blank=True, populate_from=('subject',), overwrite=False)),
('direction', self.gf('django.db.models.fields.CharField')(max_length=1)),
('message_id', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('received_header', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('deprecated', self.gf('django.db.models.fields.DateTimeField')(null=True)),
('was_fwded', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('mail', ['MailMessage'])
# Adding M2M table for field to on 'MailMessage'
db.create_table('mail_mailmessage_to', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('mailmessage', models.ForeignKey(orm['mail.mailmessage'], null=False)),
('emailaddress', models.ForeignKey(orm['core.emailaddress'], null=False))
))
db.create_unique('mail_mailmessage_to', ['mailmessage_id', 'emailaddress_id'])
# Adding M2M table for field cc on 'MailMessage'
db.create_table('mail_mailmessage_cc', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('mailmessage', models.ForeignKey(orm['mail.mailmessage'], null=False)),
('emailaddress', models.ForeignKey(orm['core.emailaddress'], null=False))
))
db.create_unique('mail_mailmessage_cc', ['mailmessage_id', 'emailaddress_id'])
# Adding M2M table for field bcc on 'MailMessage'
db.create_table('mail_mailmessage_bcc', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('mailmessage', models.ForeignKey(orm['mail.mailmessage'], null=False)),
('emailaddress', models.ForeignKey(orm['core.emailaddress'], null=False))
))
db.create_unique('mail_mailmessage_bcc', ['mailmessage_id', 'emailaddress_id'])
# Adding M2M table for field attachments on 'MailMessage'
db.create_table('mail_mailmessage_attachments', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('mailmessage', models.ForeignKey(orm['mail.mailmessage'], null=False)),
('attachment', models.ForeignKey(orm['mail.attachment'], null=False))
))
db.create_unique('mail_mailmessage_attachments', ['mailmessage_id', 'attachment_id'])
# Adding M2M table for field replies on 'MailMessage'
db.create_table('mail_mailmessage_replies', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('from_mailmessage', models.ForeignKey(orm['mail.mailmessage'], null=False)),
('to_mailmessage', models.ForeignKey(orm['mail.mailmessage'], null=False))
))
db.create_unique('mail_mailmessage_replies', ['from_mailmessage_id', 'to_mailmessage_id'])
# Adding M2M table for field references on 'MailMessage'
db.create_table('mail_mailmessage_references', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('mailmessage', models.ForeignKey(orm['mail.mailmessage'], null=False)),
('messageid', models.ForeignKey(orm['mail.messageid'], null=False))
))
db.create_unique('mail_mailmessage_references', ['mailmessage_id', 'messageid_id'])
# Adding model 'MailBox'
db.create_table('mail_mailbox', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('usr', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('provisioned_email', self.gf('django.db.models.fields.EmailField')(max_length=75, null=True, blank=True)),
))
db.send_create_signal('mail', ['MailBox'])
# Adding M2M table for field messages on 'MailBox'
db.create_table('mail_mailbox_messages', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('mailbox', models.ForeignKey(orm['mail.mailbox'], null=False)),
('mailmessage', models.ForeignKey(orm['mail.mailmessage'], null=False))
))
db.create_unique('mail_mailbox_messages', ['mailbox_id', 'mailmessage_id'])
def backwards(self, orm):
# Deleting model 'Attachment'
db.delete_table('mail_attachment')
# Deleting model 'MessageId'
db.delete_table('mail_messageid')
# Deleting model 'MailMessage'
db.delete_table('mail_mailmessage')
# Removing M2M table for field to on 'MailMessage'
db.delete_table('mail_mailmessage_to')
# Removing M2M table for field cc on 'MailMessage'
db.delete_table('mail_mailmessage_cc')
# Removing M2M table for field bcc on 'MailMessage'
db.delete_table('mail_mailmessage_bcc')
# Removing M2M table for field attachments on 'MailMessage'
db.delete_table('mail_mailmessage_attachments')
# Removing M2M table for field replies on 'MailMessage'
db.delete_table('mail_mailmessage_replies')
# Removing M2M table for field references on 'MailMessage'
db.delete_table('mail_mailmessage_references')
# Deleting model 'MailBox'
db.delete_table('mail_mailbox')
# Removing M2M table for field messages on 'MailBox'
db.delete_table('mail_mailbox_messages')
models = {
'agency.agency': {
'Meta': {'object_name': 'Agency'},
'contacts': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'agency_related_contacts'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['contacts.Contact']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'deprecated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'government': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['government.Government']"}),
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'nay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "('name',)", 'overwrite': 'False'}),
'yay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('<PASSWORD>.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contacts.address': {
'Meta': {'object_name': 'Address'},
'content': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deprecated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'yay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'contacts.contact': {
'Meta': {'object_name': 'Contact'},
'addresses': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['contacts.Address']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'deprecated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'dob': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'emails': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.EmailAddress']", 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'middle_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'nay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'notes': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['contacts.Note']", 'null': 'True', 'blank': 'True'}),
'phone_numbers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['contacts.Phone']", 'null': 'True', 'blank': 'True'}),
'titles': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['contacts.Title']", 'null': 'True', 'blank': 'True'}),
'yay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'contacts.note': {
'Meta': {'object_name': 'Note'},
'content': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deprecated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'yay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'contacts.phone': {
'Meta': {'object_name': 'Phone'},
'content': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deprecated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'yay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'contacts.title': {
'Meta': {'object_name': 'Title'},
'content': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deprecated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'yay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.emailaddress': {
'Meta': {'object_name': 'EmailAddress'},
'content': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deprecated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'yay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'doccloud.document': {
'Meta': {'ordering': "['created_at']", 'object_name': 'Document'},
'access_level': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True', 'blank': 'True'}),
'dc_properties': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doccloud.DocumentCloudProperties']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "('title',)", 'overwrite': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'doccloud.documentcloudproperties': {
'Meta': {'object_name': 'DocumentCloudProperties'},
'dc_id': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'dc_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'government.adminname': {
'Meta': {'object_name': 'AdminName'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deprecated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name_plural': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'nay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'yay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'government.feeexemptionother': {
'Meta': {'object_name': 'FeeExemptionOther'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deprecated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'nay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "('name',)", 'overwrite': 'False'}),
'source': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'typee': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'yay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'government.government': {
'Meta': {'object_name': 'Government'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deprecated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'holidays': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['government.Holiday']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'nation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['government.Nation']", 'null': 'True', 'blank': 'True'}),
'nay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "('name',)", 'overwrite': 'False'}),
'statutes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'related_statutes'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['government.Statute']"}),
'yay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'government.holiday': {
'Meta': {'object_name': 'Holiday'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'deprecated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'nay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'yay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'government.language': {
'Meta': {'object_name': 'Language'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deprecated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'nay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "('name',)", 'overwrite': 'False'}),
'yay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'government.nation': {
'Meta': {'object_name': 'Nation'},
'admin_0_name': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'admin_0_nations'", 'null': 'True', 'to': "orm['government.AdminName']"}),
'admin_1_name': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'admin_1_nations'", 'null': 'True', 'to': "orm['government.AdminName']"}),
'admin_2_name': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'admin_2_nations'", 'null': 'True', 'to': "orm['government.AdminName']"}),
'admin_3_name': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'admin_3_nations'", 'null': 'True', 'to': "orm['government.AdminName']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deprecated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'foi_languages': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['government.Language']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'nay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'primary_language': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'primary_language_nations'", 'null': 'True', 'to': "orm['government.Language']"}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "('name',)", 'overwrite': 'False'}),
'yay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'government.statute': {
'Meta': {'object_name': 'Statute'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'days_till_due': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'deprecated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'designator': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'fees_exemptions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['government.FeeExemptionOther']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'short_title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "('short_title',)", 'overwrite': 'False'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'updates': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['government.Update']", 'null': 'True', 'blank': 'True'}),
'yay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'government.update': {
'Meta': {'object_name': 'Update'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deprecated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'headline': ('django.db.models.fields.CharField', [], {'default': "'The latest'", 'max_length': '1024'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'pubbed': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'text': ('django.db.models.fields.TextField', [], {}),
'yay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'mail.attachment': {
'Meta': {'object_name': 'Attachment'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'mail.mailbox': {
'Meta': {'object_name': 'MailBox'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'messages': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'mailbox_messages'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['mail.MailMessage']"}),
'provisioned_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'usr': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'mail.mailmessage': {
'Meta': {'object_name': 'MailMessage'},
'attachments': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'message_attachments'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['mail.Attachment']"}),
'bcc': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'message_bcc'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.EmailAddress']"}),
'body': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'cc': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'message_cc'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.EmailAddress']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'dated': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deprecated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'direction': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'email_from': ('django.db.models.fields.EmailField', [], {'max_length': '256'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'received_header': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'references': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'message_references'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['mail.MessageId']"}),
'replies': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'replies_rel_+'", 'null': 'True', 'to': "orm['mail.MailMessage']"}),
'reply_to': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'request': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['requests.Request']", 'null': 'True', 'blank': 'True'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "('subject',)", 'overwrite': 'False'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'to': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'message_to'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.EmailAddress']"}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'was_fwded': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'mail.messageid': {
'Meta': {'object_name': 'MessageId'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'idd': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'requests.recordtype': {
'Meta': {'object_name': 'RecordType'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "('name',)", 'overwrite': 'True'})
},
'requests.request': {
'Meta': {'object_name': 'Request'},
'acceptable_responses': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['requests.ResponseFormat']", 'null': 'True', 'blank': 'True'}),
'agency': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['agency.Agency']", 'null': 'True', 'blank': 'True'}),
'attachments': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['mail.Attachment']", 'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'contacts': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'related_contacts'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['contacts.Contact']"}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_fulfilled': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'days_outstanding': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'documents': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'related_docs'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['doccloud.Document']"}),
'due_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'fee_waiver': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'first_response_time': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'free_edit_body': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'government': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['government.Government']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keep_private': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_contact_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'lifetime': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'max_cost': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'official_stats': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'phone_contact': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'prefer_electornic': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'printed': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'printed_request'", 'null': 'True', 'to': "orm['mail.Attachment']"}),
'private': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'record_types': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['requests.RecordType']", 'null': 'True', 'blank': 'True'}),
'request_end_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'request_start_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'response_overdue': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'scheduled_send_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "('title',)", 'overwrite': 'False'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'supporters': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'supporter'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'thread_lookup': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'requests.responseformat': {
'Meta': {'object_name': 'ResponseFormat'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'file_extension': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "('name',)", 'overwrite': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'taggit.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"})
}
}
complete_apps = ['mail'] | foiamachine/apps/mail/migrations/0001_initial.py | import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Attachment'
db.create_table('mail_attachment', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('file', self.gf('django.db.models.fields.files.FileField')(max_length=255)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal('mail', ['Attachment'])
# Adding model 'MessageId'
db.create_table('mail_messageid', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('idd', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)),
))
db.send_create_signal('mail', ['MessageId'])
# Adding model 'MailMessage'
db.create_table('mail_mailmessage', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('email_from', self.gf('django.db.models.fields.EmailField')(max_length=256)),
('reply_to', self.gf('django.db.models.fields.EmailField')(max_length=75, null=True, blank=True)),
('body', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('subject', self.gf('django.db.models.fields.CharField')(max_length=1024)),
('request', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['requests.Request'], null=True, blank=True)),
('dated', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('slug', self.gf('django_extensions.db.fields.AutoSlugField')(allow_duplicates=False, max_length=50, separator=u'-', blank=True, populate_from=('subject',), overwrite=False)),
('direction', self.gf('django.db.models.fields.CharField')(max_length=1)),
('message_id', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('received_header', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('deprecated', self.gf('django.db.models.fields.DateTimeField')(null=True)),
('was_fwded', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('mail', ['MailMessage'])
# Adding M2M table for field to on 'MailMessage'
db.create_table('mail_mailmessage_to', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('mailmessage', models.ForeignKey(orm['mail.mailmessage'], null=False)),
('emailaddress', models.ForeignKey(orm['core.emailaddress'], null=False))
))
db.create_unique('mail_mailmessage_to', ['mailmessage_id', 'emailaddress_id'])
# Adding M2M table for field cc on 'MailMessage'
db.create_table('mail_mailmessage_cc', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('mailmessage', models.ForeignKey(orm['mail.mailmessage'], null=False)),
('emailaddress', models.ForeignKey(orm['core.emailaddress'], null=False))
))
db.create_unique('mail_mailmessage_cc', ['mailmessage_id', 'emailaddress_id'])
# Adding M2M table for field bcc on 'MailMessage'
db.create_table('mail_mailmessage_bcc', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('mailmessage', models.ForeignKey(orm['mail.mailmessage'], null=False)),
('emailaddress', models.ForeignKey(orm['core.emailaddress'], null=False))
))
db.create_unique('mail_mailmessage_bcc', ['mailmessage_id', 'emailaddress_id'])
# Adding M2M table for field attachments on 'MailMessage'
db.create_table('mail_mailmessage_attachments', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('mailmessage', models.ForeignKey(orm['mail.mailmessage'], null=False)),
('attachment', models.ForeignKey(orm['mail.attachment'], null=False))
))
db.create_unique('mail_mailmessage_attachments', ['mailmessage_id', 'attachment_id'])
# Adding M2M table for field replies on 'MailMessage'
db.create_table('mail_mailmessage_replies', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('from_mailmessage', models.ForeignKey(orm['mail.mailmessage'], null=False)),
('to_mailmessage', models.ForeignKey(orm['mail.mailmessage'], null=False))
))
db.create_unique('mail_mailmessage_replies', ['from_mailmessage_id', 'to_mailmessage_id'])
# Adding M2M table for field references on 'MailMessage'
db.create_table('mail_mailmessage_references', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('mailmessage', models.ForeignKey(orm['mail.mailmessage'], null=False)),
('messageid', models.ForeignKey(orm['mail.messageid'], null=False))
))
db.create_unique('mail_mailmessage_references', ['mailmessage_id', 'messageid_id'])
# Adding model 'MailBox'
db.create_table('mail_mailbox', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('usr', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('provisioned_email', self.gf('django.db.models.fields.EmailField')(max_length=75, null=True, blank=True)),
))
db.send_create_signal('mail', ['MailBox'])
# Adding M2M table for field messages on 'MailBox'
db.create_table('mail_mailbox_messages', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('mailbox', models.ForeignKey(orm['mail.mailbox'], null=False)),
('mailmessage', models.ForeignKey(orm['mail.mailmessage'], null=False))
))
db.create_unique('mail_mailbox_messages', ['mailbox_id', 'mailmessage_id'])
def backwards(self, orm):
# Deleting model 'Attachment'
db.delete_table('mail_attachment')
# Deleting model 'MessageId'
db.delete_table('mail_messageid')
# Deleting model 'MailMessage'
db.delete_table('mail_mailmessage')
# Removing M2M table for field to on 'MailMessage'
db.delete_table('mail_mailmessage_to')
# Removing M2M table for field cc on 'MailMessage'
db.delete_table('mail_mailmessage_cc')
# Removing M2M table for field bcc on 'MailMessage'
db.delete_table('mail_mailmessage_bcc')
# Removing M2M table for field attachments on 'MailMessage'
db.delete_table('mail_mailmessage_attachments')
# Removing M2M table for field replies on 'MailMessage'
db.delete_table('mail_mailmessage_replies')
# Removing M2M table for field references on 'MailMessage'
db.delete_table('mail_mailmessage_references')
# Deleting model 'MailBox'
db.delete_table('mail_mailbox')
# Removing M2M table for field messages on 'MailBox'
db.delete_table('mail_mailbox_messages')
models = {
'agency.agency': {
'Meta': {'object_name': 'Agency'},
'contacts': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'agency_related_contacts'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['contacts.Contact']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'deprecated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'government': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['government.Government']"}),
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'nay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "('name',)", 'overwrite': 'False'}),
'yay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('<PASSWORD>.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contacts.address': {
'Meta': {'object_name': 'Address'},
'content': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deprecated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'yay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'contacts.contact': {
'Meta': {'object_name': 'Contact'},
'addresses': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['contacts.Address']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'deprecated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'dob': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'emails': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.EmailAddress']", 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'middle_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'nay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'notes': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['contacts.Note']", 'null': 'True', 'blank': 'True'}),
'phone_numbers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['contacts.Phone']", 'null': 'True', 'blank': 'True'}),
'titles': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['contacts.Title']", 'null': 'True', 'blank': 'True'}),
'yay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'contacts.note': {
'Meta': {'object_name': 'Note'},
'content': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deprecated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'yay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'contacts.phone': {
'Meta': {'object_name': 'Phone'},
'content': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deprecated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'yay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'contacts.title': {
'Meta': {'object_name': 'Title'},
'content': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deprecated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'yay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.emailaddress': {
'Meta': {'object_name': 'EmailAddress'},
'content': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deprecated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'yay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'doccloud.document': {
'Meta': {'ordering': "['created_at']", 'object_name': 'Document'},
'access_level': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True', 'blank': 'True'}),
'dc_properties': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doccloud.DocumentCloudProperties']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "('title',)", 'overwrite': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'doccloud.documentcloudproperties': {
'Meta': {'object_name': 'DocumentCloudProperties'},
'dc_id': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'dc_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'government.adminname': {
'Meta': {'object_name': 'AdminName'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deprecated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name_plural': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'nay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'yay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'government.feeexemptionother': {
'Meta': {'object_name': 'FeeExemptionOther'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deprecated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'nay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "('name',)", 'overwrite': 'False'}),
'source': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'typee': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'yay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'government.government': {
'Meta': {'object_name': 'Government'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deprecated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'holidays': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['government.Holiday']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'nation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['government.Nation']", 'null': 'True', 'blank': 'True'}),
'nay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "('name',)", 'overwrite': 'False'}),
'statutes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'related_statutes'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['government.Statute']"}),
'yay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'government.holiday': {
'Meta': {'object_name': 'Holiday'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'deprecated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'nay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'yay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'government.language': {
'Meta': {'object_name': 'Language'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deprecated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'nay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "('name',)", 'overwrite': 'False'}),
'yay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'government.nation': {
'Meta': {'object_name': 'Nation'},
'admin_0_name': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'admin_0_nations'", 'null': 'True', 'to': "orm['government.AdminName']"}),
'admin_1_name': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'admin_1_nations'", 'null': 'True', 'to': "orm['government.AdminName']"}),
'admin_2_name': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'admin_2_nations'", 'null': 'True', 'to': "orm['government.AdminName']"}),
'admin_3_name': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'admin_3_nations'", 'null': 'True', 'to': "orm['government.AdminName']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deprecated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'foi_languages': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['government.Language']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'nay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'primary_language': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'primary_language_nations'", 'null': 'True', 'to': "orm['government.Language']"}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "('name',)", 'overwrite': 'False'}),
'yay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'government.statute': {
'Meta': {'object_name': 'Statute'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'days_till_due': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'deprecated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'designator': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'fees_exemptions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['government.FeeExemptionOther']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'short_title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "('short_title',)", 'overwrite': 'False'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'updates': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['government.Update']", 'null': 'True', 'blank': 'True'}),
'yay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'government.update': {
'Meta': {'object_name': 'Update'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deprecated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'headline': ('django.db.models.fields.CharField', [], {'default': "'The latest'", 'max_length': '1024'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'pubbed': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'text': ('django.db.models.fields.TextField', [], {}),
'yay_votes': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'mail.attachment': {
'Meta': {'object_name': 'Attachment'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'mail.mailbox': {
'Meta': {'object_name': 'MailBox'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'messages': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'mailbox_messages'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['mail.MailMessage']"}),
'provisioned_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'usr': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'mail.mailmessage': {
'Meta': {'object_name': 'MailMessage'},
'attachments': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'message_attachments'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['mail.Attachment']"}),
'bcc': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'message_bcc'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.EmailAddress']"}),
'body': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'cc': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'message_cc'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.EmailAddress']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'dated': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deprecated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'direction': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'email_from': ('django.db.models.fields.EmailField', [], {'max_length': '256'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'received_header': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'references': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'message_references'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['mail.MessageId']"}),
'replies': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'replies_rel_+'", 'null': 'True', 'to': "orm['mail.MailMessage']"}),
'reply_to': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'request': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['requests.Request']", 'null': 'True', 'blank': 'True'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "('subject',)", 'overwrite': 'False'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'to': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'message_to'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.EmailAddress']"}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'was_fwded': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'mail.messageid': {
'Meta': {'object_name': 'MessageId'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'idd': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'requests.recordtype': {
'Meta': {'object_name': 'RecordType'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "('name',)", 'overwrite': 'True'})
},
'requests.request': {
'Meta': {'object_name': 'Request'},
'acceptable_responses': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['requests.ResponseFormat']", 'null': 'True', 'blank': 'True'}),
'agency': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['agency.Agency']", 'null': 'True', 'blank': 'True'}),
'attachments': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['mail.Attachment']", 'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'contacts': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'related_contacts'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['contacts.Contact']"}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_fulfilled': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'days_outstanding': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'documents': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'related_docs'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['doccloud.Document']"}),
'due_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'fee_waiver': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'first_response_time': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'free_edit_body': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'government': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['government.Government']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keep_private': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_contact_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'lifetime': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'max_cost': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'official_stats': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'phone_contact': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'prefer_electornic': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'printed': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'printed_request'", 'null': 'True', 'to': "orm['mail.Attachment']"}),
'private': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'record_types': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['requests.RecordType']", 'null': 'True', 'blank': 'True'}),
'request_end_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'request_start_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'response_overdue': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'scheduled_send_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "('title',)", 'overwrite': 'False'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'supporters': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'supporter'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'thread_lookup': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'requests.responseformat': {
'Meta': {'object_name': 'ResponseFormat'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'file_extension': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "('name',)", 'overwrite': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'taggit.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"})
}
}
complete_apps = ['mail'] | 0.446012 | 0.087175 |
import logging as log
log.basicConfig(level=log.INFO)
try:
from .speech2text import Transcriber
except:
log.warning("Transcriber not imported!")
from .ontology import FoodOntology
from .analysis import Analyzer
from .dialog_management import DialogManager, Utterance, State
from .generation import Generator, ResponseType
from .text2speech import Synthesizer
class Pipeline:
def __init__(self, ontology_filepath):
self.sessions = {'default': State()}
try:
self.transcriber = Transcriber()
except:
log.warning("Transcriber not loaded!")
log.info("Loading ontology...")
self.ontology = FoodOntology(ontology_filepath)
log.info("Loading analyzer...")
self.analyzer = Analyzer(self.ontology)
log.info("Loading dialog manager...")
self.dialog_manager = DialogManager(self.ontology)
log.info("Loading generator...")
self.generator = Generator()
log.info("Loading synthesizer...")
self.synthesizer = Synthesizer()
def process_audio(self, audio_file, generate_audio=False, session_id='default'):
text = self.transcriber.transcribe(audio_file)
return self.process_text(text, generate_audio, session_id)
def process_text(self, text, generate_audio=False, session_id='default'):
try:
if not text:
response = self.generator.generate_utterance(Utterance(ResponseType.CLARIFY))
else:
concepts = self.analyzer.analyze(text)
#print("concepts = ",concepts)
utterance = self.dialog_manager.evaluate(self.sessions[session_id], concepts)
response = self.generator.generate(utterance)
if generate_audio:
response = self.synthesizer.synthesize(response)
except Exception as e:
log.error(e)
response = self.generator.generate_utterance(Utterance(ResponseType.ERROR))
raise
return response
def new_session(self, bot_initiative=False, session_id='default', session_type='cmd'):
self.sessions[session_id] = State()
self.sessions[session_id].session_type = session_type
self.sessions[session_id].has_initiative = bot_initiative
if self.sessions[session_id].has_initiative:
response = self.generator.generate_utterance(Utterance(ResponseType.GREETING_INITIATIVE))
else:
response = None
return session_id, response
def is_finished(self, session_id='default'):
return self.sessions[session_id].finished | src/pipeline.py | import logging as log
log.basicConfig(level=log.INFO)
try:
from .speech2text import Transcriber
except:
log.warning("Transcriber not imported!")
from .ontology import FoodOntology
from .analysis import Analyzer
from .dialog_management import DialogManager, Utterance, State
from .generation import Generator, ResponseType
from .text2speech import Synthesizer
class Pipeline:
def __init__(self, ontology_filepath):
self.sessions = {'default': State()}
try:
self.transcriber = Transcriber()
except:
log.warning("Transcriber not loaded!")
log.info("Loading ontology...")
self.ontology = FoodOntology(ontology_filepath)
log.info("Loading analyzer...")
self.analyzer = Analyzer(self.ontology)
log.info("Loading dialog manager...")
self.dialog_manager = DialogManager(self.ontology)
log.info("Loading generator...")
self.generator = Generator()
log.info("Loading synthesizer...")
self.synthesizer = Synthesizer()
def process_audio(self, audio_file, generate_audio=False, session_id='default'):
text = self.transcriber.transcribe(audio_file)
return self.process_text(text, generate_audio, session_id)
def process_text(self, text, generate_audio=False, session_id='default'):
try:
if not text:
response = self.generator.generate_utterance(Utterance(ResponseType.CLARIFY))
else:
concepts = self.analyzer.analyze(text)
#print("concepts = ",concepts)
utterance = self.dialog_manager.evaluate(self.sessions[session_id], concepts)
response = self.generator.generate(utterance)
if generate_audio:
response = self.synthesizer.synthesize(response)
except Exception as e:
log.error(e)
response = self.generator.generate_utterance(Utterance(ResponseType.ERROR))
raise
return response
def new_session(self, bot_initiative=False, session_id='default', session_type='cmd'):
self.sessions[session_id] = State()
self.sessions[session_id].session_type = session_type
self.sessions[session_id].has_initiative = bot_initiative
if self.sessions[session_id].has_initiative:
response = self.generator.generate_utterance(Utterance(ResponseType.GREETING_INITIATIVE))
else:
response = None
return session_id, response
def is_finished(self, session_id='default'):
return self.sessions[session_id].finished | 0.188212 | 0.044974 |
import numpy
from numpy import *
class ProcessingSequence:
def __init__(self,
mimo_demodulation,
array_preprocessing,
fourier_transformations,
noise_power_estimation,
peak_finding,
Tsl_r,Tsl_v,Tsl_a,Tsl_e,
scaling,
mimo_post_Fourier=True):
self.mimo_post_Fourier = mimo_post_Fourier
self.mimo_demodulation = mimo_demodulation
self.array_preprocessing = array_preprocessing
self.fourier_transformations = fourier_transformations
self.noise_power_estimation = noise_power_estimation
self.sidelobe_thresholding = SidelobeThresholding(Tsl_r,Tsl_v,Tsl_a,Tsl_e)
self.peak_finding = peak_finding
self.peak_finding.set_validate_peaks([self.noise_power_estimation.validate_peaks,
self.sidelobe_thresholding.validate_peaks,
self.mimo_demodulation.validate_peaks])
self.Lf = self.fourier_transformations.Lf
self.Mf = self.fourier_transformations.Mf
self.Nf = self.fourier_transformations.Nf
self.Of = self.fourier_transformations.Of
self.w_r = self.fourier_transformations.w_r
self.w_v = self.fourier_transformations.w_v
self.w_a = self.fourier_transformations.w_a
self.w_e = self.fourier_transformations.w_e
Ls,Ms,Ns,Os = self.w_r.shape[0],self.w_v.shape[0],self.w_a.shape[0],self.w_e.shape[0]
self.L,self.M,self.N,self.O = self.peak_finding.L,self.peak_finding.M,self.peak_finding.N,self.peak_finding.O
self.scaling = scaling
self.ura_channels = self.array_preprocessing.ura_channels
def __call__(self,x):
print("Fast-time preprocessing")
print(" Transpose")
x = swapaxes(x,0,1)
print(" DFT fast time")
X = self.fourier_transformations.dft_fast_time(x)[0:(self.fourier_transformations.Lf//2),:,:]
print("Preprocessing per range cell")
if not self.mimo_post_Fourier:
print(" MIMO Demodulation")
X = self.mimo_demodulation.pre_fourier(X)
print(" DFT slow time")
X = self.fourier_transformations.dft_slow_time(X)[:,:,self.ura_channels]
else:
print(" Doppler DFTs")
X = self.fourier_transformations.dft_slow_time(X)
print(" MIMO demodulation")
X = self.mimo_demodulation.post_fourier(X)[:,:,self.ura_channels]
print(" Array preprocessing")
X = self.array_preprocessing(X)
print(" DFT y-channels")
X = self.fourier_transformations.dft_y_channels(X)
print(" DFT z-channels")
X = self.fourier_transformations.dft_z_channels(X)
X *= self.fourier_transformations.scale_dft_noise
X *= self.scaling
print(" Power spectrum")
P = 20*log10(abs(X))
print(" Noise power estimation")
self.noise_power_estimation(P)
print(" 4D Peak Finding")
peak_list_4d = self.peak_finding(X,P)
return X,P,self.noise_power_estimation.P_noise,peak_list_4d
class SlowTimeMimoDemodulation:
def __init__(self,phase_codes,phase_increments=None):
self.set_phase_codes(phase_codes)
if phase_increments is not None:
self.phase_increments = phase_increments
def set_phase_codes(self,phase_codes):
self.phase_codes = phase_codes
self.Nt = self.phase_codes.shape[1]
self.M = self.phase_codes.shape[0]
self._determine_blocked_frequencies()
def _determine_blocked_frequencies(self):
self.fv_b = empty(0)
A,S,Pe = exp(1j*self.phase_codes), abs(eye(self.Nt)-1),zeros(self.M+2)
for nt in arange(self.Nt):
P = abs(fft.fft(dot(A*outer(A[:,nt],ones(self.Nt)).conj(),S[:,nt])))**2
Pe[1:-1],Pe[0],Pe[-1] = P,P[-1],P[0]
peak_mask = ((diff(sign(diff(Pe,1)),1) < 0) & (P > (P.max()*10**(-20/10))))
self.fv_b = append(self.fv_b,2*pi*arange(self.M)[peak_mask]/float(self.M))
self.fv_b = unique(self.fv_b)
def validate_peaks(self,P,lp,mp,np,op,
sz_fr,sz_fv,sz_fa,sz_fe,
fr_a=0,fv_a=0,fa_a=0,fe_a=0):
valid_mask = ones(lp.shape,bool)
Lr,Mr,Nr,Or = P.shape
P = reshape(P,(Lr,Mr,Nr*Or))
Pm=P.max(axis=2)
for fv_b in self.fv_b:
mb = numpy.round((( fv_a + sz_fv*mp + fv_b) % (2*pi))/sz_fv-fv_a).astype(int)
mb[mb<0],mb[mb>=Mr] = mp[mb<0],mp[mb>=Mr]
valid_mask &= Pm[lp,mp] >= Pm[lp,mb]
return lp[valid_mask],mp[valid_mask],np[valid_mask],op[valid_mask]
#Demodulation with arbitrary phase sequence
def pre_fourier(self,x):
Ls,Ms,Nr = x.shape
x_mimo,A = zeros((Ls,Ms,(Nr*self.Nt)),complex_),exp(-1j*self.phase_codes)
for nt in arange(self.Nt):
rc_nt = x*reshape(kronv(kronv(ones(Ls),A[:,nt]),ones(Nr)),(Ls,Ms,Nr))
x_mimo[:,:,nt*Nr:(nt+1)*Nr] = rc_nt
return x_mimo
def post_fourier(self,x):
Ls,Ms,Nr = x.shape
x_mimo = zeros((Ls,Ms,(Nr*self.Nt)),complex_)
for nt in arange(self.Nt):
shift = np.round((Ms/(2*pi))*self.phase_increments[nt]).astype(int)
x_mimo[:,:,nt*Nr:(nt+1)*Nr] = roll(x,-shift,axis=1)
return x_mimo
class ArrayPreprocessing:
def __init__(self,p_channels,dy_ura,dz_ura,ura_channels,sub_channels,Ns,Os,PC,C):
self.Nc,self.Nvc = PC.shape
self.PC,self.C = PC,C
self.Ns,self.Os = Ns,Os
p_ura = dot((self.PC/sum(self.PC,axis=0)).T,p_channels[ura_channels])
self.ns,self.os = self._calculate_ura_mapping(p_ura,dy_ura,dz_ura)
self.p_channels = p_channels
self.dy_ura,self.dz_ura = dy_ura,dz_ura
self.ura_channels,self.sub_channels = ura_channels,sub_channels
def __call__(self,x):
Lr,Mr,Nv = x.shape
x = self.map_on_ura(dot(dot(reshape(x,(Lr*Mr,self.Nc)),self.C),self.PC))
return reshape(x,(Lr,Mr,self.Ns,self.Os))
def _calculate_ura_mapping(self,p_ura,dy_ura,dz_ura):
"""
Calculate indices for mapping channel positions to URA grid.
"""
if dy_ura is None:
dy_ura=abs(diff(unique(sort(p_ura[:,1])))).min()
if dz_ura is None:
dz_ura=abs(diff(unique(sort(p_ura[:,2])))).min()
#Regular grid indices
ns=np.round((p_ura[:,1]-p_ura[:,1].min())/dy_ura).astype(int)
os=np.round((p_ura[:,2]-p_ura[:,2].min())/dz_ura).astype(int)
return ns,os
def map_on_ura(self,x):
"""
Map channel positions to URA grid based on precalculated indices.
"""
x_ura = zeros((x.shape[0],self.Ns,self.Os),complex_)
x_ura[:,self.ns,self.os] = x
return x_ura
class FourierTransformations:
def __init__(self,
Lf,Mf,Nf,Of,
w_r,w_v,w_a,w_e):
self.Lf,self.Mf,self.Nf,self.Of = Lf,Mf,Nf,Of
self.w_r,self.w_v,self.w_a,self.w_e = w_r,w_v,w_a,w_e
self.Ls,self.Ms,self.Ns,self.Os = w_r.shape[0],w_v.shape[0],w_a.shape[0],w_e.shape[0]
self.w = reshape(kronv(kronv(self.w_r,self.w_v),self.w_a),(self.Ls,self.Ms,self.Ns))
self.output_scale = 1.
self.output_scale_fast_time = 1.
self.output_scale_slow_time = 1.
self.output_scale_y_channels = 1.
self.output_scale_z_channels = 1.
self.scale_dft_noise = 1/sqrt(sum(w_r**2)*sum(w_v**2)*sum(w_a**2)*sum(w_e**2))
def __call__(self,x):
return fft.fftn(self.w*x,(self.Lf,self.Mf,self.Nf))/self.output_scale
def dft_fast_time(self,x):
Lr,Mr,Nr = x.shape
w = reshape(kronv(kronv(self.w_r,ones(Mr)),ones(Nr)),(self.Ls,Mr,Nr))
return fft.fft(w*x,self.Lf,axis=0)/self.output_scale_fast_time
def dft_slow_time(self,x):
Lr,Mr,Nr = x.shape
w = reshape(kronv(kronv(ones(Lr),self.w_v),ones(Nr)),(Lr,self.Ms,Nr))
return fft.fft(w*x,self.Mf,axis=1)/self.output_scale_slow_time
def dft_y_channels(self,x):
Lr,Mr,Ns,Os = x.shape
w = reshape(kronv(kronv(kronv(ones(Lr),ones(Mr)),self.w_a),ones(Os)),(Lr,Mr,self.Ns,Os))
return fft.fft(w*x,self.Nf,axis=2)/self.output_scale_y_channels
def dft_z_channels(self,x):
Lr,Mr,Nr,Os = x.shape
w = reshape(kronv(kronv(kronv(ones(Lr),ones(Mr)),ones(Nr)),self.w_e),(Lr,Mr,Nr,Os))
return fft.fft(w*x,self.Of,axis=3)/self.output_scale_z_channels
class NoisePowerEstimation:
def __init__(self,order,T_power=12):
self.order = order
self.T_power=T_power
def __call__(self,P):
self.P_noise = zeros(P.shape)
P_3d = sort(P,axis=1)[:,self.order,:,:]
for m in arange(P.shape[1]):
self.P_noise[:,m,:,:] = P_3d
def validate_peaks(self,P,lp,mp,np,op,
sz_fr,sz_fv,sz_fa,sz_fe,
fr_a=0,fv_a=0,fa_a=0,fe_=0):
valid_mask = P[lp,mp,np,op] >= (self.P_noise[lp,mp,np,op] + self.T_power)
return lp[valid_mask],mp[valid_mask],np[valid_mask],op[valid_mask]
class SidelobeThresholding:
def __init__(self,Tsl_r=100,Tsl_v=100,Tsl_a=100,Tsl_e=100):
self.Tsl_r,self.Tsl_v,self.Tsl_a,self.Tsl_e = Tsl_r,Tsl_v,Tsl_a,Tsl_e
def validate_peaks(self,P,lp,mp,np,op,
sz_fr,sz_fv,sz_fa,sz_fe,
fr_a=0,fv_a=0,fa_a=0,fe_a=0):
valid_mask = P[lp,mp,np,op] >= (P[:,mp,np,op].max(axis=0) - self.Tsl_r)
valid_mask &= P[lp,mp,np,op] >= (P[lp,:,np,op].max(axis=1) - self.Tsl_v)
valid_mask &= P[lp,mp,np,op] >= (P[lp,mp,:,op].max(axis=1) - self.Tsl_a)
valid_mask &= P[lp,mp,np,op] >= (P[lp,mp,np,:].max(axis=1) - self.Tsl_e)
return lp[valid_mask],mp[valid_mask],np[valid_mask],op[valid_mask]
class PeakFinding:
def __init__(self,
sz_fr,sz_fv,sz_fa,sz_fe,
L=5,M=5,N=5,O=5,
fr_a=0,fv_a=0,fa_a=0,fe_a=0):
self.L,self.M,self.N,self.O = L,M,N,O
self.lm,self.mm,self.nm,self.om = self.L//2,self.M//2,self.N//2,self.O//2
self.sz_fr,self.sz_fv,self.sz_fa,self.sz_fe = sz_fr,sz_fv,sz_fa,sz_fe
self.validate_peaks = []
self.fr_a,self.fv_a,self.fa_a,self.fe_a = fr_a,fv_a,fa_a,fe_a
def set_validate_peaks(self,validate_peaks):
self.validate_peaks = validate_peaks
def __call__(self,X,P,Y=None):
Lr,Mr,Nr,Or = X.shape
wrap_data = array([(self.fr_a + self.sz_fr*Lr) == (2*pi),
(self.fv_a + self.sz_fv*Mr) == (2*pi),
(self.fa_a + self.sz_fa*Nr) == (2*pi),
((self.fe_a + self.sz_fe*Or) == (2*pi) and (Or > 2))])
Pe = extend_data(P,1,1,1,1,wrap_data,-500)
#Peak mask
peaks_r = (diff(sign(diff(Pe,1,axis=0)),1,axis=0) < 0)
peaks_v = (diff(sign(diff(Pe,1,axis=1)),1,axis=1) < 0)
peaks_a = (diff(sign(diff(Pe,1,axis=2)),1,axis=2) < 0)
peaks_e = (diff(sign(diff(Pe,1,axis=3)),1,axis=3) < 0)
peaks_4d = (peaks_r[:,1:-1,1:-1,1:-1] &
peaks_v[1:-1,:,1:-1,1:-1] &
peaks_a[1:-1,1:-1,:,1:-1] &
peaks_e[1:-1,1:-1,1:-1,:])
#Peak indices
lp = reshape(kronv(kronv(kronv(arange(Lr),ones(Mr)),ones(Nr)),ones(Or)),(Lr,Mr,Nr,Or))[peaks_4d].astype(int)
mp = reshape(kronv(kronv(kronv(ones(Lr),arange(Mr)),ones(Nr)),ones(Or)),(Lr,Mr,Nr,Or))[peaks_4d].astype(int)
np = reshape(kronv(kronv(kronv(ones(Lr),ones(Mr)),arange(Nr)),ones(Or)),(Lr,Mr,Nr,Or))[peaks_4d].astype(int)
op = reshape(kronv(kronv(kronv(ones(Lr),ones(Mr)),ones(Nr)),arange(Or)),(Lr,Mr,Nr,Or))[peaks_4d].astype(int)
for vp in self.validate_peaks:
lp,mp,np,op = vp(P,lp,mp,np,op,
self.sz_fr,self.sz_fv,self.sz_fa,self.sz_fe,
self.fr_a,self.fv_a,self.fa_a,self.fe_a)
K = lp.shape[0]
fr_ap,fv_ap,fa_ap,fe_ap = zeros(K),zeros(K),zeros(K),zeros(K)
Xp = zeros((K,self.L,self.M,self.N,self.O),complex_)
Xe = extend_data(X,self.L//2,self.M//2,self.N//2,self.O//2,wrap_data)
if Y is not None:
Yp = zeros((K,self.L,self.M,Y.shape[2]),complex_)
Ye = extend_data(Y,self.L//2,self.M//2,self.N//2,self.O//2,wrap_data)
else:
Yp = None
for i,(l_pc,m_pc,n_pc,o_pc) in enumerate(zip(lp,mp,np,op)):
l_ab = l_pc + arange(-(self.L//2),self.L//2+1)
m_ab = m_pc + arange(-(self.M//2),self.M//2+1)
n_ab = n_pc + arange(-(self.N//2),self.N//2+1)
o_ab = o_pc + arange(-(self.O//2),self.O//2+1)
fr_ap[i] = self.fr_a + self.sz_fr*l_ab[0]
fv_ap[i] = self.fv_a + self.sz_fv*m_ab[0]
fa_ap[i] = self.fa_a + self.sz_fa*n_ab[0]
fe_ap[i] = self.fe_a + self.sz_fe*o_ab[0]
Xp[i,:] = (((Xe[l_ab+self.L//2,:,:,:])[:,m_ab+self.M//2,:,:])[:,:,n_ab+self.N//2,:])[:,:,:,o_ab+self.O//2]
if Y != None:
Yp[i,:] = ((Ye[l_ab+self.L//2,:,:])[:,m_ab+self.M//2,:])[:,:,:]
fp = {"Range" : fr_ap,
"Velocity" : fv_ap,
"Azimuth angle" : fa_ap,
"Elevation angle" : fe_ap}
#4D peak list
peak_list_4d = {"Peak neighborhood" : Xp,
"MIMO channels" : Yp,
"Peak frequencies" : fp}
return peak_list_4d
def extend_data(x,le,me,ne,oe,wrap=array([True,True,True,True]),min_value=0):
L,M,N,O = x.shape
xe = min_value*ones((L+2*le,M+2*me,N+2*ne,O+2*oe),dtype=x.dtype)
xe[le:-le,me:-me,ne:-ne,oe:-oe] = x
if wrap[0]:
xe[:le,me:-me,ne:-ne,oe:-oe] = x[-le:,:,:,:]
xe[-le:,me:-me,ne:-ne,oe:-oe] = x[:le,:,:,:]
if wrap[1]:
xe[le:-le,:me,ne:-ne,oe:-oe] = x[:,-me:,:,:]
xe[le:-le,-me:,ne:-ne,oe:-oe] = x[:,:me,:,:]
if wrap[2]:
xe[le:-le,me:-me,:ne,oe:-oe] = x[:,:,-ne:,:]
xe[le:-le,me:-me,-ne:,oe:-oe] = x[:,:,:ne,:]
if wrap[3]:
xe[le:-le,me:-me,ne:-ne,:oe] = x[:,:,:,-oe:]
xe[le:-le,me:-me,ne:-ne,-oe:] = x[:,:,:,:oe]
return xe
def kronv(v1,v2):
return outer(v1,v2).ravel() | pre_processing.py | import numpy
from numpy import *
class ProcessingSequence:
def __init__(self,
mimo_demodulation,
array_preprocessing,
fourier_transformations,
noise_power_estimation,
peak_finding,
Tsl_r,Tsl_v,Tsl_a,Tsl_e,
scaling,
mimo_post_Fourier=True):
self.mimo_post_Fourier = mimo_post_Fourier
self.mimo_demodulation = mimo_demodulation
self.array_preprocessing = array_preprocessing
self.fourier_transformations = fourier_transformations
self.noise_power_estimation = noise_power_estimation
self.sidelobe_thresholding = SidelobeThresholding(Tsl_r,Tsl_v,Tsl_a,Tsl_e)
self.peak_finding = peak_finding
self.peak_finding.set_validate_peaks([self.noise_power_estimation.validate_peaks,
self.sidelobe_thresholding.validate_peaks,
self.mimo_demodulation.validate_peaks])
self.Lf = self.fourier_transformations.Lf
self.Mf = self.fourier_transformations.Mf
self.Nf = self.fourier_transformations.Nf
self.Of = self.fourier_transformations.Of
self.w_r = self.fourier_transformations.w_r
self.w_v = self.fourier_transformations.w_v
self.w_a = self.fourier_transformations.w_a
self.w_e = self.fourier_transformations.w_e
Ls,Ms,Ns,Os = self.w_r.shape[0],self.w_v.shape[0],self.w_a.shape[0],self.w_e.shape[0]
self.L,self.M,self.N,self.O = self.peak_finding.L,self.peak_finding.M,self.peak_finding.N,self.peak_finding.O
self.scaling = scaling
self.ura_channels = self.array_preprocessing.ura_channels
def __call__(self,x):
print("Fast-time preprocessing")
print(" Transpose")
x = swapaxes(x,0,1)
print(" DFT fast time")
X = self.fourier_transformations.dft_fast_time(x)[0:(self.fourier_transformations.Lf//2),:,:]
print("Preprocessing per range cell")
if not self.mimo_post_Fourier:
print(" MIMO Demodulation")
X = self.mimo_demodulation.pre_fourier(X)
print(" DFT slow time")
X = self.fourier_transformations.dft_slow_time(X)[:,:,self.ura_channels]
else:
print(" Doppler DFTs")
X = self.fourier_transformations.dft_slow_time(X)
print(" MIMO demodulation")
X = self.mimo_demodulation.post_fourier(X)[:,:,self.ura_channels]
print(" Array preprocessing")
X = self.array_preprocessing(X)
print(" DFT y-channels")
X = self.fourier_transformations.dft_y_channels(X)
print(" DFT z-channels")
X = self.fourier_transformations.dft_z_channels(X)
X *= self.fourier_transformations.scale_dft_noise
X *= self.scaling
print(" Power spectrum")
P = 20*log10(abs(X))
print(" Noise power estimation")
self.noise_power_estimation(P)
print(" 4D Peak Finding")
peak_list_4d = self.peak_finding(X,P)
return X,P,self.noise_power_estimation.P_noise,peak_list_4d
class SlowTimeMimoDemodulation:
def __init__(self,phase_codes,phase_increments=None):
self.set_phase_codes(phase_codes)
if phase_increments is not None:
self.phase_increments = phase_increments
def set_phase_codes(self,phase_codes):
self.phase_codes = phase_codes
self.Nt = self.phase_codes.shape[1]
self.M = self.phase_codes.shape[0]
self._determine_blocked_frequencies()
def _determine_blocked_frequencies(self):
self.fv_b = empty(0)
A,S,Pe = exp(1j*self.phase_codes), abs(eye(self.Nt)-1),zeros(self.M+2)
for nt in arange(self.Nt):
P = abs(fft.fft(dot(A*outer(A[:,nt],ones(self.Nt)).conj(),S[:,nt])))**2
Pe[1:-1],Pe[0],Pe[-1] = P,P[-1],P[0]
peak_mask = ((diff(sign(diff(Pe,1)),1) < 0) & (P > (P.max()*10**(-20/10))))
self.fv_b = append(self.fv_b,2*pi*arange(self.M)[peak_mask]/float(self.M))
self.fv_b = unique(self.fv_b)
def validate_peaks(self,P,lp,mp,np,op,
sz_fr,sz_fv,sz_fa,sz_fe,
fr_a=0,fv_a=0,fa_a=0,fe_a=0):
valid_mask = ones(lp.shape,bool)
Lr,Mr,Nr,Or = P.shape
P = reshape(P,(Lr,Mr,Nr*Or))
Pm=P.max(axis=2)
for fv_b in self.fv_b:
mb = numpy.round((( fv_a + sz_fv*mp + fv_b) % (2*pi))/sz_fv-fv_a).astype(int)
mb[mb<0],mb[mb>=Mr] = mp[mb<0],mp[mb>=Mr]
valid_mask &= Pm[lp,mp] >= Pm[lp,mb]
return lp[valid_mask],mp[valid_mask],np[valid_mask],op[valid_mask]
#Demodulation with arbitrary phase sequence
def pre_fourier(self,x):
Ls,Ms,Nr = x.shape
x_mimo,A = zeros((Ls,Ms,(Nr*self.Nt)),complex_),exp(-1j*self.phase_codes)
for nt in arange(self.Nt):
rc_nt = x*reshape(kronv(kronv(ones(Ls),A[:,nt]),ones(Nr)),(Ls,Ms,Nr))
x_mimo[:,:,nt*Nr:(nt+1)*Nr] = rc_nt
return x_mimo
def post_fourier(self,x):
Ls,Ms,Nr = x.shape
x_mimo = zeros((Ls,Ms,(Nr*self.Nt)),complex_)
for nt in arange(self.Nt):
shift = np.round((Ms/(2*pi))*self.phase_increments[nt]).astype(int)
x_mimo[:,:,nt*Nr:(nt+1)*Nr] = roll(x,-shift,axis=1)
return x_mimo
class ArrayPreprocessing:
def __init__(self,p_channels,dy_ura,dz_ura,ura_channels,sub_channels,Ns,Os,PC,C):
self.Nc,self.Nvc = PC.shape
self.PC,self.C = PC,C
self.Ns,self.Os = Ns,Os
p_ura = dot((self.PC/sum(self.PC,axis=0)).T,p_channels[ura_channels])
self.ns,self.os = self._calculate_ura_mapping(p_ura,dy_ura,dz_ura)
self.p_channels = p_channels
self.dy_ura,self.dz_ura = dy_ura,dz_ura
self.ura_channels,self.sub_channels = ura_channels,sub_channels
def __call__(self,x):
Lr,Mr,Nv = x.shape
x = self.map_on_ura(dot(dot(reshape(x,(Lr*Mr,self.Nc)),self.C),self.PC))
return reshape(x,(Lr,Mr,self.Ns,self.Os))
def _calculate_ura_mapping(self,p_ura,dy_ura,dz_ura):
"""
Calculate indices for mapping channel positions to URA grid.
"""
if dy_ura is None:
dy_ura=abs(diff(unique(sort(p_ura[:,1])))).min()
if dz_ura is None:
dz_ura=abs(diff(unique(sort(p_ura[:,2])))).min()
#Regular grid indices
ns=np.round((p_ura[:,1]-p_ura[:,1].min())/dy_ura).astype(int)
os=np.round((p_ura[:,2]-p_ura[:,2].min())/dz_ura).astype(int)
return ns,os
def map_on_ura(self,x):
"""
Map channel positions to URA grid based on precalculated indices.
"""
x_ura = zeros((x.shape[0],self.Ns,self.Os),complex_)
x_ura[:,self.ns,self.os] = x
return x_ura
class FourierTransformations:
def __init__(self,
Lf,Mf,Nf,Of,
w_r,w_v,w_a,w_e):
self.Lf,self.Mf,self.Nf,self.Of = Lf,Mf,Nf,Of
self.w_r,self.w_v,self.w_a,self.w_e = w_r,w_v,w_a,w_e
self.Ls,self.Ms,self.Ns,self.Os = w_r.shape[0],w_v.shape[0],w_a.shape[0],w_e.shape[0]
self.w = reshape(kronv(kronv(self.w_r,self.w_v),self.w_a),(self.Ls,self.Ms,self.Ns))
self.output_scale = 1.
self.output_scale_fast_time = 1.
self.output_scale_slow_time = 1.
self.output_scale_y_channels = 1.
self.output_scale_z_channels = 1.
self.scale_dft_noise = 1/sqrt(sum(w_r**2)*sum(w_v**2)*sum(w_a**2)*sum(w_e**2))
def __call__(self,x):
return fft.fftn(self.w*x,(self.Lf,self.Mf,self.Nf))/self.output_scale
def dft_fast_time(self,x):
Lr,Mr,Nr = x.shape
w = reshape(kronv(kronv(self.w_r,ones(Mr)),ones(Nr)),(self.Ls,Mr,Nr))
return fft.fft(w*x,self.Lf,axis=0)/self.output_scale_fast_time
def dft_slow_time(self,x):
Lr,Mr,Nr = x.shape
w = reshape(kronv(kronv(ones(Lr),self.w_v),ones(Nr)),(Lr,self.Ms,Nr))
return fft.fft(w*x,self.Mf,axis=1)/self.output_scale_slow_time
def dft_y_channels(self,x):
Lr,Mr,Ns,Os = x.shape
w = reshape(kronv(kronv(kronv(ones(Lr),ones(Mr)),self.w_a),ones(Os)),(Lr,Mr,self.Ns,Os))
return fft.fft(w*x,self.Nf,axis=2)/self.output_scale_y_channels
def dft_z_channels(self,x):
Lr,Mr,Nr,Os = x.shape
w = reshape(kronv(kronv(kronv(ones(Lr),ones(Mr)),ones(Nr)),self.w_e),(Lr,Mr,Nr,Os))
return fft.fft(w*x,self.Of,axis=3)/self.output_scale_z_channels
class NoisePowerEstimation:
def __init__(self,order,T_power=12):
self.order = order
self.T_power=T_power
def __call__(self,P):
self.P_noise = zeros(P.shape)
P_3d = sort(P,axis=1)[:,self.order,:,:]
for m in arange(P.shape[1]):
self.P_noise[:,m,:,:] = P_3d
def validate_peaks(self,P,lp,mp,np,op,
sz_fr,sz_fv,sz_fa,sz_fe,
fr_a=0,fv_a=0,fa_a=0,fe_=0):
valid_mask = P[lp,mp,np,op] >= (self.P_noise[lp,mp,np,op] + self.T_power)
return lp[valid_mask],mp[valid_mask],np[valid_mask],op[valid_mask]
class SidelobeThresholding:
def __init__(self,Tsl_r=100,Tsl_v=100,Tsl_a=100,Tsl_e=100):
self.Tsl_r,self.Tsl_v,self.Tsl_a,self.Tsl_e = Tsl_r,Tsl_v,Tsl_a,Tsl_e
def validate_peaks(self,P,lp,mp,np,op,
sz_fr,sz_fv,sz_fa,sz_fe,
fr_a=0,fv_a=0,fa_a=0,fe_a=0):
valid_mask = P[lp,mp,np,op] >= (P[:,mp,np,op].max(axis=0) - self.Tsl_r)
valid_mask &= P[lp,mp,np,op] >= (P[lp,:,np,op].max(axis=1) - self.Tsl_v)
valid_mask &= P[lp,mp,np,op] >= (P[lp,mp,:,op].max(axis=1) - self.Tsl_a)
valid_mask &= P[lp,mp,np,op] >= (P[lp,mp,np,:].max(axis=1) - self.Tsl_e)
return lp[valid_mask],mp[valid_mask],np[valid_mask],op[valid_mask]
class PeakFinding:
def __init__(self,
sz_fr,sz_fv,sz_fa,sz_fe,
L=5,M=5,N=5,O=5,
fr_a=0,fv_a=0,fa_a=0,fe_a=0):
self.L,self.M,self.N,self.O = L,M,N,O
self.lm,self.mm,self.nm,self.om = self.L//2,self.M//2,self.N//2,self.O//2
self.sz_fr,self.sz_fv,self.sz_fa,self.sz_fe = sz_fr,sz_fv,sz_fa,sz_fe
self.validate_peaks = []
self.fr_a,self.fv_a,self.fa_a,self.fe_a = fr_a,fv_a,fa_a,fe_a
def set_validate_peaks(self,validate_peaks):
self.validate_peaks = validate_peaks
def __call__(self,X,P,Y=None):
Lr,Mr,Nr,Or = X.shape
wrap_data = array([(self.fr_a + self.sz_fr*Lr) == (2*pi),
(self.fv_a + self.sz_fv*Mr) == (2*pi),
(self.fa_a + self.sz_fa*Nr) == (2*pi),
((self.fe_a + self.sz_fe*Or) == (2*pi) and (Or > 2))])
Pe = extend_data(P,1,1,1,1,wrap_data,-500)
#Peak mask
peaks_r = (diff(sign(diff(Pe,1,axis=0)),1,axis=0) < 0)
peaks_v = (diff(sign(diff(Pe,1,axis=1)),1,axis=1) < 0)
peaks_a = (diff(sign(diff(Pe,1,axis=2)),1,axis=2) < 0)
peaks_e = (diff(sign(diff(Pe,1,axis=3)),1,axis=3) < 0)
peaks_4d = (peaks_r[:,1:-1,1:-1,1:-1] &
peaks_v[1:-1,:,1:-1,1:-1] &
peaks_a[1:-1,1:-1,:,1:-1] &
peaks_e[1:-1,1:-1,1:-1,:])
#Peak indices
lp = reshape(kronv(kronv(kronv(arange(Lr),ones(Mr)),ones(Nr)),ones(Or)),(Lr,Mr,Nr,Or))[peaks_4d].astype(int)
mp = reshape(kronv(kronv(kronv(ones(Lr),arange(Mr)),ones(Nr)),ones(Or)),(Lr,Mr,Nr,Or))[peaks_4d].astype(int)
np = reshape(kronv(kronv(kronv(ones(Lr),ones(Mr)),arange(Nr)),ones(Or)),(Lr,Mr,Nr,Or))[peaks_4d].astype(int)
op = reshape(kronv(kronv(kronv(ones(Lr),ones(Mr)),ones(Nr)),arange(Or)),(Lr,Mr,Nr,Or))[peaks_4d].astype(int)
for vp in self.validate_peaks:
lp,mp,np,op = vp(P,lp,mp,np,op,
self.sz_fr,self.sz_fv,self.sz_fa,self.sz_fe,
self.fr_a,self.fv_a,self.fa_a,self.fe_a)
K = lp.shape[0]
fr_ap,fv_ap,fa_ap,fe_ap = zeros(K),zeros(K),zeros(K),zeros(K)
Xp = zeros((K,self.L,self.M,self.N,self.O),complex_)
Xe = extend_data(X,self.L//2,self.M//2,self.N//2,self.O//2,wrap_data)
if Y is not None:
Yp = zeros((K,self.L,self.M,Y.shape[2]),complex_)
Ye = extend_data(Y,self.L//2,self.M//2,self.N//2,self.O//2,wrap_data)
else:
Yp = None
for i,(l_pc,m_pc,n_pc,o_pc) in enumerate(zip(lp,mp,np,op)):
l_ab = l_pc + arange(-(self.L//2),self.L//2+1)
m_ab = m_pc + arange(-(self.M//2),self.M//2+1)
n_ab = n_pc + arange(-(self.N//2),self.N//2+1)
o_ab = o_pc + arange(-(self.O//2),self.O//2+1)
fr_ap[i] = self.fr_a + self.sz_fr*l_ab[0]
fv_ap[i] = self.fv_a + self.sz_fv*m_ab[0]
fa_ap[i] = self.fa_a + self.sz_fa*n_ab[0]
fe_ap[i] = self.fe_a + self.sz_fe*o_ab[0]
Xp[i,:] = (((Xe[l_ab+self.L//2,:,:,:])[:,m_ab+self.M//2,:,:])[:,:,n_ab+self.N//2,:])[:,:,:,o_ab+self.O//2]
if Y != None:
Yp[i,:] = ((Ye[l_ab+self.L//2,:,:])[:,m_ab+self.M//2,:])[:,:,:]
fp = {"Range" : fr_ap,
"Velocity" : fv_ap,
"Azimuth angle" : fa_ap,
"Elevation angle" : fe_ap}
#4D peak list
peak_list_4d = {"Peak neighborhood" : Xp,
"MIMO channels" : Yp,
"Peak frequencies" : fp}
return peak_list_4d
def extend_data(x,le,me,ne,oe,wrap=array([True,True,True,True]),min_value=0):
L,M,N,O = x.shape
xe = min_value*ones((L+2*le,M+2*me,N+2*ne,O+2*oe),dtype=x.dtype)
xe[le:-le,me:-me,ne:-ne,oe:-oe] = x
if wrap[0]:
xe[:le,me:-me,ne:-ne,oe:-oe] = x[-le:,:,:,:]
xe[-le:,me:-me,ne:-ne,oe:-oe] = x[:le,:,:,:]
if wrap[1]:
xe[le:-le,:me,ne:-ne,oe:-oe] = x[:,-me:,:,:]
xe[le:-le,-me:,ne:-ne,oe:-oe] = x[:,:me,:,:]
if wrap[2]:
xe[le:-le,me:-me,:ne,oe:-oe] = x[:,:,-ne:,:]
xe[le:-le,me:-me,-ne:,oe:-oe] = x[:,:,:ne,:]
if wrap[3]:
xe[le:-le,me:-me,ne:-ne,:oe] = x[:,:,:,-oe:]
xe[le:-le,me:-me,ne:-ne,-oe:] = x[:,:,:,:oe]
return xe
def kronv(v1,v2):
return outer(v1,v2).ravel() | 0.587115 | 0.454714 |
import functools
from absl import app
from absl import flags
from acme.agents.jax import pwil
from acme.agents.jax import sac
from acme.datasets import tfds
import helpers
import launchpad as lp
FLAGS = flags.FLAGS
flags.DEFINE_string('task', 'HalfCheetah-v2', 'GYM environment task (str).')
flags.DEFINE_string(
'dataset_name', 'd4rl_mujoco_halfcheetah/v0-medium', 'What dataset to use. '
'See the TFDS catalog for possible values.')
flags.DEFINE_integer(
'num_transitions_rb', 50000,
'Number of demonstration transitions to put into the '
'replay buffer.')
flags.DEFINE_integer('seed', 0, 'Random seed.')
def make_unbatched_demonstration_iterator(
dataset_name: str) -> pwil.PWILDemonstrations:
"""Loads a demonstrations dataset and computes average episode length."""
dataset = tfds.get_tfds_dataset(dataset_name)
# Note: PWIL is not intended for large demonstration datasets.
num_steps, num_episodes = functools.reduce(
lambda accu, t: (accu[0] + 1, accu[1] + int(t.discount == 0.0)),
dataset.as_numpy_iterator(), (0, 0))
episode_length = num_steps / num_episodes if num_episodes else num_steps
return pwil.PWILDemonstrations(dataset.as_numpy_iterator(), episode_length)
def main(_):
task = FLAGS.task
environment_factory = lambda is_eval: helpers.make_environment(is_eval, task)
sac_config = sac.SACConfig(num_sgd_steps_per_step=64)
sac_builder = sac.SACBuilder(sac_config)
pwil_config = pwil.PWILConfig(num_transitions_rb=FLAGS.num_transitions_rb)
agent = pwil.DistributedPWIL(
environment_factory=environment_factory,
rl_agent=sac_builder,
config=pwil_config,
network_factory=sac.make_networks,
seed=FLAGS.seed,
demonstrations_fn=functools.partial(
make_unbatched_demonstration_iterator,
dataset_name=FLAGS.dataset_name,
),
policy_network=sac.apply_policy_and_sample,
evaluator_policy_network=(
lambda n: sac.apply_policy_and_sample(n, eval_mode=True)),
num_actors=4,
max_number_of_steps=1000000)
# Launch experiment.
lp.launch(agent.build(), lp.LaunchType.LOCAL_MULTI_PROCESSING)
if __name__ == '__main__':
app.run(main) | examples/gym/lp_local_pwil_jax.py | import functools
from absl import app
from absl import flags
from acme.agents.jax import pwil
from acme.agents.jax import sac
from acme.datasets import tfds
import helpers
import launchpad as lp
FLAGS = flags.FLAGS
flags.DEFINE_string('task', 'HalfCheetah-v2', 'GYM environment task (str).')
flags.DEFINE_string(
'dataset_name', 'd4rl_mujoco_halfcheetah/v0-medium', 'What dataset to use. '
'See the TFDS catalog for possible values.')
flags.DEFINE_integer(
'num_transitions_rb', 50000,
'Number of demonstration transitions to put into the '
'replay buffer.')
flags.DEFINE_integer('seed', 0, 'Random seed.')
def make_unbatched_demonstration_iterator(
dataset_name: str) -> pwil.PWILDemonstrations:
"""Loads a demonstrations dataset and computes average episode length."""
dataset = tfds.get_tfds_dataset(dataset_name)
# Note: PWIL is not intended for large demonstration datasets.
num_steps, num_episodes = functools.reduce(
lambda accu, t: (accu[0] + 1, accu[1] + int(t.discount == 0.0)),
dataset.as_numpy_iterator(), (0, 0))
episode_length = num_steps / num_episodes if num_episodes else num_steps
return pwil.PWILDemonstrations(dataset.as_numpy_iterator(), episode_length)
def main(_):
task = FLAGS.task
environment_factory = lambda is_eval: helpers.make_environment(is_eval, task)
sac_config = sac.SACConfig(num_sgd_steps_per_step=64)
sac_builder = sac.SACBuilder(sac_config)
pwil_config = pwil.PWILConfig(num_transitions_rb=FLAGS.num_transitions_rb)
agent = pwil.DistributedPWIL(
environment_factory=environment_factory,
rl_agent=sac_builder,
config=pwil_config,
network_factory=sac.make_networks,
seed=FLAGS.seed,
demonstrations_fn=functools.partial(
make_unbatched_demonstration_iterator,
dataset_name=FLAGS.dataset_name,
),
policy_network=sac.apply_policy_and_sample,
evaluator_policy_network=(
lambda n: sac.apply_policy_and_sample(n, eval_mode=True)),
num_actors=4,
max_number_of_steps=1000000)
# Launch experiment.
lp.launch(agent.build(), lp.LaunchType.LOCAL_MULTI_PROCESSING)
if __name__ == '__main__':
app.run(main) | 0.666171 | 0.257193 |
import numpy as np
import operator
import json
import os
import tree_plotter
def create_test_dataset():
dataset = [[1,1,'yes'],
[1,1,'yes'],
[1,0,'no'],
[0,1,'no'],
[0,1,'no']];
feature_names = ['no surfacing','flippers']
return dataset,feature_names
def calc_shannon_ent(dataset):
label_count_map = {}
for data_vec in dataset:
label_count = label_count_map.get(data_vec[-1],0)
label_count_map[data_vec[-1]] = label_count + 1
num_entries = len(dataset)
shannon_ent = 0
for key in label_count_map:
prop = float(label_count_map[key]) / num_entries
shannon_ent -= prop * np.log2(prop)
return shannon_ent
def split_dataset(dataset,feature_index,feature_value):
ret_data_set = []
for data_vec in dataset:
if(data_vec[feature_index]==feature_value):
ret_vec = data_vec[0:feature_index] #type: list
ret_vec.extend(data_vec[feature_index+1:])
ret_data_set.append(ret_vec)
return ret_data_set
def choose_best_feature_to_split(dataset):
# 这是决策树算法的关键,选择信息增益最大的
shannon_ent = calc_shannon_ent(dataset)
num_feature = len(dataset[0]) - 1
best_info_gain = -1
best_feature_index = -1
for feature_index in range(0,num_feature):
feature_values = [row_vec[feature_index] for row_vec in dataset]
feature_values_set = set(feature_values)
shannon_ent_split = 0
for feature_value in feature_values_set:
dataset_split = split_dataset(dataset,feature_index,feature_value)
dataset_split_prop = float(len(dataset_split)) / float(len(dataset))
shannon_ent_split += ( dataset_split_prop * calc_shannon_ent(dataset_split) )
info_gain = shannon_ent - shannon_ent_split
if info_gain > best_info_gain:
best_info_gain = info_gain
best_feature_index = feature_index
return best_feature_index
def majority_cnt(class_list):
class_count = {}
for classify in class_list:
cnt = class_count.get(classify[-1],0)
class_count[classify[-1]] = cnt + 1
#根据dic的value进行排序,生成list
sorted_class_count = sorted(class_count.iteritems(),key=operator.itemgetter(1),reverse=True)
return sorted_class_count[0][0]
def create_tree(dataset,feature_names):
class_list = [row_vec[-1] for row_vec in dataset]
#数据集合里面的所有分类都是一样
if class_list.count(class_list[0]) == len(class_list):
return class_list[0]
#数据集已经没有特征,按照最大权重进行投票
if len(dataset[0])==1:
return majority_cnt()
best_split_feature_index = choose_best_feature_to_split(dataset)
best_split_feature_name = feature_names[best_split_feature_index]
tree = {best_split_feature_name:{}}
feature_values_set = set( [row_vec[best_split_feature_index] for row_vec in dataset] )
for feature_value in feature_values_set:
sub_dataset = split_dataset(dataset,best_split_feature_index,feature_value)
sub_feature_names = feature_names[0:best_split_feature_index] #type:list
sub_feature_names.extend( feature_names[best_split_feature_index+1:])
tree[best_split_feature_name][feature_value] = create_tree(sub_dataset,sub_feature_names)
return tree
def classify(my_tree,feature_lables,to_classify_vec): #type:(dict,list,list)->str
#first_str = my_tree.keys()[0]
first_str = next(iter(my_tree))
search_dict = my_tree[first_str] #type:dict
feature_index = feature_lables.index(first_str)
classify_label = ''
for key,node in search_dict.items():
if to_classify_vec[feature_index] == key:
if type(node).__name__=='dict':
classify_label = classify(node,feature_lables,to_classify_vec)
else:
classify_label = node
return classify_label
def load_tree(filename):
with open(filename,'r') as file_obj:
load_dict = json.load(file_obj,ensure_ascii=True)
print(load_dict)
return load_dict
def write_tree(my_tree,filename):
with open(filename,'w') as file_obj:
json.dump(my_tree,file_obj)
def read_data_set():
abs_cur_dir = os.path.abspath(os.curdir)
file_name = os.path.join(abs_cur_dir,"data/ch03/lenses.txt")
data_set = []
with open(file_name,'r') as file_obj:
for line in file_obj:
data_vec = line.strip().split('\t')
data_set.append(data_vec)
return data_set
if __name__ == '__main__':
#dataset,feature_names = create_test_dataset()
#print calc_shannon_ent(dataset)
#print choose_best_feature_to_split(dataset)
#tree = create_tree(dataset,feature_names)
#write_tree(tree,'d:\\tree.json')
#load_tree('d:\\tree.json')
#print tree
data_set = read_data_set()
data_lables = ['age','prescript','astigmatic','tearrate']
my_tree = create_tree(data_set,data_lables)
#tree_plotter.create_plot(my_tree)
error_count = 0
for data_vec in data_set:
ret = classify(my_tree,data_lables,data_vec)
ret_right = data_vec[-1]
if(ret != ret_right):
error_count += 1
error_percentage = float(error_count) / float(len(data_set));
print(error_percentage) | trees.py | import numpy as np
import operator
import json
import os
import tree_plotter
def create_test_dataset():
dataset = [[1,1,'yes'],
[1,1,'yes'],
[1,0,'no'],
[0,1,'no'],
[0,1,'no']];
feature_names = ['no surfacing','flippers']
return dataset,feature_names
def calc_shannon_ent(dataset):
label_count_map = {}
for data_vec in dataset:
label_count = label_count_map.get(data_vec[-1],0)
label_count_map[data_vec[-1]] = label_count + 1
num_entries = len(dataset)
shannon_ent = 0
for key in label_count_map:
prop = float(label_count_map[key]) / num_entries
shannon_ent -= prop * np.log2(prop)
return shannon_ent
def split_dataset(dataset,feature_index,feature_value):
ret_data_set = []
for data_vec in dataset:
if(data_vec[feature_index]==feature_value):
ret_vec = data_vec[0:feature_index] #type: list
ret_vec.extend(data_vec[feature_index+1:])
ret_data_set.append(ret_vec)
return ret_data_set
def choose_best_feature_to_split(dataset):
# 这是决策树算法的关键,选择信息增益最大的
shannon_ent = calc_shannon_ent(dataset)
num_feature = len(dataset[0]) - 1
best_info_gain = -1
best_feature_index = -1
for feature_index in range(0,num_feature):
feature_values = [row_vec[feature_index] for row_vec in dataset]
feature_values_set = set(feature_values)
shannon_ent_split = 0
for feature_value in feature_values_set:
dataset_split = split_dataset(dataset,feature_index,feature_value)
dataset_split_prop = float(len(dataset_split)) / float(len(dataset))
shannon_ent_split += ( dataset_split_prop * calc_shannon_ent(dataset_split) )
info_gain = shannon_ent - shannon_ent_split
if info_gain > best_info_gain:
best_info_gain = info_gain
best_feature_index = feature_index
return best_feature_index
def majority_cnt(class_list):
class_count = {}
for classify in class_list:
cnt = class_count.get(classify[-1],0)
class_count[classify[-1]] = cnt + 1
#根据dic的value进行排序,生成list
sorted_class_count = sorted(class_count.iteritems(),key=operator.itemgetter(1),reverse=True)
return sorted_class_count[0][0]
def create_tree(dataset,feature_names):
class_list = [row_vec[-1] for row_vec in dataset]
#数据集合里面的所有分类都是一样
if class_list.count(class_list[0]) == len(class_list):
return class_list[0]
#数据集已经没有特征,按照最大权重进行投票
if len(dataset[0])==1:
return majority_cnt()
best_split_feature_index = choose_best_feature_to_split(dataset)
best_split_feature_name = feature_names[best_split_feature_index]
tree = {best_split_feature_name:{}}
feature_values_set = set( [row_vec[best_split_feature_index] for row_vec in dataset] )
for feature_value in feature_values_set:
sub_dataset = split_dataset(dataset,best_split_feature_index,feature_value)
sub_feature_names = feature_names[0:best_split_feature_index] #type:list
sub_feature_names.extend( feature_names[best_split_feature_index+1:])
tree[best_split_feature_name][feature_value] = create_tree(sub_dataset,sub_feature_names)
return tree
def classify(my_tree,feature_lables,to_classify_vec): #type:(dict,list,list)->str
#first_str = my_tree.keys()[0]
first_str = next(iter(my_tree))
search_dict = my_tree[first_str] #type:dict
feature_index = feature_lables.index(first_str)
classify_label = ''
for key,node in search_dict.items():
if to_classify_vec[feature_index] == key:
if type(node).__name__=='dict':
classify_label = classify(node,feature_lables,to_classify_vec)
else:
classify_label = node
return classify_label
def load_tree(filename):
with open(filename,'r') as file_obj:
load_dict = json.load(file_obj,ensure_ascii=True)
print(load_dict)
return load_dict
def write_tree(my_tree,filename):
with open(filename,'w') as file_obj:
json.dump(my_tree,file_obj)
def read_data_set():
abs_cur_dir = os.path.abspath(os.curdir)
file_name = os.path.join(abs_cur_dir,"data/ch03/lenses.txt")
data_set = []
with open(file_name,'r') as file_obj:
for line in file_obj:
data_vec = line.strip().split('\t')
data_set.append(data_vec)
return data_set
if __name__ == '__main__':
#dataset,feature_names = create_test_dataset()
#print calc_shannon_ent(dataset)
#print choose_best_feature_to_split(dataset)
#tree = create_tree(dataset,feature_names)
#write_tree(tree,'d:\\tree.json')
#load_tree('d:\\tree.json')
#print tree
data_set = read_data_set()
data_lables = ['age','prescript','astigmatic','tearrate']
my_tree = create_tree(data_set,data_lables)
#tree_plotter.create_plot(my_tree)
error_count = 0
for data_vec in data_set:
ret = classify(my_tree,data_lables,data_vec)
ret_right = data_vec[-1]
if(ret != ret_right):
error_count += 1
error_percentage = float(error_count) / float(len(data_set));
print(error_percentage) | 0.147371 | 0.342681 |
from sap.pet_impl import *
from sap.battle import Battle
from test_helpers import dummy_pet, TestRandom, DummyPlayer
import logging
class TestPetImplBattle:
def test_solo_mosquito(self):
b = Battle(
[Mosquito.spawn()], [dummy_pet(toughness=1)]
)
b.battle()
assert len(b.team_1) == 1
assert b.team_2 == []
def test_flamingo(self):
b = Battle(
[Flamingo.spawn(), Pet.spawn(), Pet.spawn()], [Pet.spawn()]
)
b.battle()
team = b.team_1
assert team[0].power, b.team_1[0].toughness == (2, 2)
assert team[1].power, b.team_1[1].toughness == (2, 2)
def test_hedgehog(self):
last_pet_standing = Pet(power=1, toughness=3, symbol="T")
b = Battle(
[Hedgehog.spawn(), Pet.spawn()],
[Pet.spawn(), Pet.spawn(), Pet.spawn(), Pet.spawn(), last_pet_standing]
)
b.battle()
last_pet_standing.take_damage(2)
assert b.team_1 == []
assert b.team_2 == [last_pet_standing]
def test_double_hedgehog_with_summons(self):
b = Battle(
[Hedgehog.spawn()],
[Hedgehog.spawn(), Cricket.spawn()]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert type(b.team_2[0]) == ZombieCricket
def test_hedgehog_badger_summons(self):
b = Battle(
[Dodo.spawn(), Cricket.spawn(), Hedgehog.spawn()],
[Hedgehog.spawn(), Cricket.spawn()],
)
b.battle()
assert b.team_1 == []
assert b.team_2 == []
def test_hedgehog_flamingo(self):
b = Battle(
[Pet(power=2, toughness=1, symbol="P"), Flamingo.spawn(), Hedgehog.spawn(), Cricket.spawn()],
[Hedgehog.spawn()]
)
b.battle()
assert b.team_2 == []
assert len(b.team_1) == 1
assert type(b.team_1[0]) == ZombieCricket
assert b.team_1[0].power, b.team_1[0].toughness == (1, 1)
def test_peacock(self):
b = Battle(
[Peacock.spawn()],
[dummy_pet(toughness=1), dummy_pet(toughness=3), dummy_pet(toughness=5), dummy_pet(toughness=7),
dummy_pet(toughness=9)]
)
b.battle()
assert b.team_1 == []
assert b.team_2 == []
def test_rat(self):
b = Battle(
[Rat.spawn(), dummy_pet(power=1, toughness=1), dummy_pet(power=1, toughness=1)],
[dummy_pet(power=5, toughness=6)]
)
b.battle()
assert b.team_1 == []
assert b.team_2 == []
def test_dog(self):
r = TestRandom()
r.choices = [True, True]
b = Battle(
[Cricket.spawn(), Cricket.spawn(), Dog(symbol="D", power=2, toughness=2, random_gen=r)],
[dummy_pet(power=10, toughness=8)]
)
b.battle()
assert b.team_1 == []
assert b.team_2 == []
def test_spider(self):
r = TestRandom()
r.choices = [Dog]
spider = Spider(power=2, toughness=2, symbol="S", random_gen=r)
b = Battle(
[spider],
[dummy_pet(power=2, toughness=4)]
)
b.battle()
assert b.team_1 == []
assert b.team_2 == []
def test_badger_hedgehog_clusterfuck(self):
b = Battle(
[Hedgehog.spawn(), Hedgehog.spawn(), dummy_pet(toughness=9), Badger.spawn(), dummy_pet(toughness=9)],
[dummy_pet(power=2, toughness=8)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_badger_other_team(self):
b = Battle(
[Badger.spawn(), dummy_pet(toughness=5)],
[dummy_pet(power=4, toughness=11)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_badger(self):
b = Battle(
[Badger.spawn(), dummy_pet(toughness=5)],
[dummy_pet(power=4, toughness=1), dummy_pet(toughness=6)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_blowfish(self):
b = Battle(
[Hedgehog.spawn(), Blowfish(power=3, toughness=7, symbol="Blowfish")],
[dummy_pet(power=3, toughness=16)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_camel(self):
b = Battle(
[Camel.spawn(), dummy_pet(power=1, toughness=1)],
[dummy_pet(power=1, toughness=56)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_giraffe(self):
giraffe = Giraffe.spawn()
team_1 = [dummy_pet(power=1, toughness=1), giraffe]
team_2 = [dummy_pet(power=5, toughness=5)]
giraffe.apply_trigger(Trigger(TriggerType.TURN_ENDED), team_1, team_2)
b = Battle(
team_1,
team_2
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_kangaroo(self):
b = Battle(
[Cricket.spawn(), Kangaroo.spawn()],
[dummy_pet(power=6, toughness=8)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_sheep(self):
b = Battle(
[Sheep.spawn(), Sheep.spawn(),
dummy_pet(power=1, toughness=1), dummy_pet(power=1, toughness=1), dummy_pet(power=1, toughness=1)],
[dummy_pet(power=2, toughness=2 * 2 + 2 * 3 + 1 * 3 + 1)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_snail_lost(self):
player = DummyPlayer()
player.won_last = False
snail = Snail.spawn()
team_1 = [snail, dummy_pet(power=1, toughness=1)]
team_2 = [dummy_pet(power=2, toughness=6)]
snail.apply_trigger(Trigger(TriggerType.PET_BOUGHT, snail, player=player), team_1, team_2)
b = Battle(team_1, team_2)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_snail_won(self):
player = DummyPlayer()
player.won_last = True
snail = Snail.spawn()
team_1 = [snail, dummy_pet(power=1, toughness=1)]
team_2 = [dummy_pet(power=2, toughness=4)]
snail.apply_trigger(Trigger(TriggerType.PET_BOUGHT, snail, player=player), team_1, team_2)
b = Battle(team_1, team_2)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_whale(self):
whale = Whale.spawn()
whale.experience = 3 # level 2
b = Battle(
[Sheep.spawn(), whale],
[dummy_pet(power=6, toughness=2 * 2 + 2 + 3 * 4 + 1)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_bison(self):
bison = Bison.spawn()
team_1 = [dummy_pet(power=1, toughness=1, experience=6), bison]
team_2 = [dummy_pet(power=8, toughness=10)]
bison.apply_trigger(Trigger(TriggerType.TURN_ENDED), team_1, team_2)
b = Battle(
team_1,
team_2
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_dolphin(self):
b = Battle(
[Dolphin.spawn()],
[dummy_pet(power=6, toughness=6), dummy_pet(power=100, toughness=5)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 2
def test_hippo(self):
b = Battle(
[Hippo.spawn()],
[Cricket.spawn(), dummy_pet(power=6, toughness=9)]
)
b.battle()
print(b)
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_penguin(self):
penguin = Penguin.spawn()
team_1 = [dummy_pet(power=1, toughness=1, experience=7), penguin]
team_2 = [dummy_pet(power=2, toughness=4)]
penguin.apply_trigger(Trigger(TriggerType.TURN_ENDED), team_1, team_2)
b = Battle(
team_1,
team_2
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_rooster(self):
b = Battle(
[Rooster.spawn()],
[dummy_pet(power=3, toughness=8)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_skunk(self):
b = Battle(
[Skunk(symbol="S", power=3, toughness=6, experience=7)],
[dummy_pet(power=100, toughness=100), dummy_pet(power=6, toughness=4)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_monkey(self):
monkey = Monkey.spawn()
team_1 = [dummy_pet(power=1, toughness=1), monkey]
team_2 = [dummy_pet(power=4, toughness=6)]
monkey.apply_trigger(Trigger(TriggerType.TURN_ENDED), team_1, team_2)
b = Battle(
team_1,
team_2
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_crocodile(self):
b = Battle(
[Crocodile.spawn()],
[dummy_pet(power=4, toughness=9), dummy_pet(power=100, toughness=8)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_rhino(self):
b = Battle(
[Rhino.spawn()],
[Cricket.spawn(), Rooster.spawn(), dummy_pet(power=7, toughness=9), dummy_pet(power=1, toughness=1)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_shark(self):
b = Battle(
[Cricket.spawn(), Shark.spawn()],
[dummy_pet(toughness=1 + 1 + 8 + 1, power=6)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_turkey(self):
b = Battle(
[Cricket.spawn(), Turkey.spawn()],
[dummy_pet(toughness=1 + 4 + 3 + 1, power=4)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_boar(self):
b = Battle(
[Boar.spawn()],
[Cricket.spawn(), dummy_pet(toughness=15, power=10)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_dragon(self):
dragon = Dragon.spawn()
team_1 = [Cricket.spawn(), dragon]
team_2 = [dummy_pet(toughness=2 + 1 + 7 + 1, power=9)]
dragon.apply_trigger(Trigger(TriggerType.PET_BOUGHT, team_1[0]), team_1, team_2)
b = Battle(team_1, team_2)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_fly(self):
b = Battle(
[Cricket.spawn(), Cricket.spawn(), Fly.spawn()],
[dummy_pet(toughness=1 + 5 + 1 + 5 + 1 + 5 + 1 + 5 + 1, power=5)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_leopard(self):
b = Battle(
[Leopard.spawn()],
[dummy_pet(toughness=16, power=4)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_mammoth(self):
b = Battle(
[Mammoth.spawn(), Cricket.spawn()],
[dummy_pet(toughness=3 + 3 + 1 + 1, power=10)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_snake(self):
b = Battle(
[dummy_pet(toughness=1, power=1), dummy_pet(toughness=1, power=1), Snake.spawn()],
[dummy_pet(power=6, toughness=1 + 1 + 5 + 6 + 1)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_tiger(self):
b = Battle(
[dummy_pet(1, 1), Snake.spawn(), Tiger.spawn()],
[dummy_pet(power=6, toughness=1 + 5 + 5 + 6 + 4 + 1)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_honey(self):
b = Battle(
[Pet(symbol="P", power=1, toughness=1, equipped_food=Honey.spawn())],
[dummy_pet(power=1, toughness=3)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_meat_bone(self):
b = Battle(
[Pet(symbol="P", power=1, toughness=1, equipped_food=MeatBone.spawn())],
[dummy_pet(power=1, toughness=7)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_chilli(self):
b = Battle(
[Pet(symbol="P", power=1, toughness=1, equipped_food=Chili.spawn())],
[dummy_pet(power=1, toughness=2), dummy_pet(power=100, toughness=5)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_melon(self):
b = Battle(
[Pet(symbol="P", power=1, toughness=2, equipped_food=Melon.spawn())],
[dummy_pet(power=21, toughness=1), dummy_pet(power=2, toughness=2)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_mushroom(self):
cricket = Cricket.spawn()
cricket.equipped_food = Mushroom.spawn()
b = Battle(
[cricket],
[dummy_pet(power=2, toughness=1+1+1+1+1)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_ox(self):
b = Battle(
[Cricket.spawn(), Ox.spawn()],
[dummy_pet(power=20, toughness=1+1+5+5+1)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_turtle(self):
b = Battle(
[Turtle.spawn(), dummy_pet(power=1, toughness=1)],
[dummy_pet(power=20, toughness=1+1+1+1)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_deer(self):
b = Battle(
[Deer.spawn()],
[dummy_pet(toughness=2, power=5),dummy_pet(power=100, toughness=5), dummy_pet(power=100, toughness=1)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
# TODO: test tiger whale
def test_scorpion(self):
b = Battle(
[Scorpion.spawn(), Scorpion.spawn(), Scorpion.spawn()],
[Pet(symbol="P", toughness=100, power=100, equipped_food=Garlic.spawn()), # garlic reduces to 1, so dies
Pet(symbol="P", toughness=100, power=100, equipped_food=Melon.spawn()) # should take a hit though
]
)
b.battle()
assert b.team_1 == []
assert b.team_2 == []
def test_gorilla(self):
b = Battle(
[Gorilla.spawn()],
[dummy_pet(power=8, toughness=6), dummy_pet(power=100, toughness=6), dummy_pet(power=1, toughness=7)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_whale_with_fainted_pet(self):
b = Battle(
[Sheep.spawn(), Whale.spawn()],
[Dolphin.spawn()]
)
b.battle()
assert b.team_2 == []
assert len(b.team_1) == 1
assert b.team_1[0].toughness == 2
def test_skunk_with_fainted_pet(self):
b = Battle(
[Skunk.spawn()],
[Cricket(power=1, toughness=100, symbol="C"), Whale.spawn()]
)
b.battle() | tests/test_pet_impl_battle.py | from sap.pet_impl import *
from sap.battle import Battle
from test_helpers import dummy_pet, TestRandom, DummyPlayer
import logging
class TestPetImplBattle:
def test_solo_mosquito(self):
b = Battle(
[Mosquito.spawn()], [dummy_pet(toughness=1)]
)
b.battle()
assert len(b.team_1) == 1
assert b.team_2 == []
def test_flamingo(self):
b = Battle(
[Flamingo.spawn(), Pet.spawn(), Pet.spawn()], [Pet.spawn()]
)
b.battle()
team = b.team_1
assert team[0].power, b.team_1[0].toughness == (2, 2)
assert team[1].power, b.team_1[1].toughness == (2, 2)
def test_hedgehog(self):
last_pet_standing = Pet(power=1, toughness=3, symbol="T")
b = Battle(
[Hedgehog.spawn(), Pet.spawn()],
[Pet.spawn(), Pet.spawn(), Pet.spawn(), Pet.spawn(), last_pet_standing]
)
b.battle()
last_pet_standing.take_damage(2)
assert b.team_1 == []
assert b.team_2 == [last_pet_standing]
def test_double_hedgehog_with_summons(self):
b = Battle(
[Hedgehog.spawn()],
[Hedgehog.spawn(), Cricket.spawn()]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert type(b.team_2[0]) == ZombieCricket
def test_hedgehog_badger_summons(self):
b = Battle(
[Dodo.spawn(), Cricket.spawn(), Hedgehog.spawn()],
[Hedgehog.spawn(), Cricket.spawn()],
)
b.battle()
assert b.team_1 == []
assert b.team_2 == []
def test_hedgehog_flamingo(self):
b = Battle(
[Pet(power=2, toughness=1, symbol="P"), Flamingo.spawn(), Hedgehog.spawn(), Cricket.spawn()],
[Hedgehog.spawn()]
)
b.battle()
assert b.team_2 == []
assert len(b.team_1) == 1
assert type(b.team_1[0]) == ZombieCricket
assert b.team_1[0].power, b.team_1[0].toughness == (1, 1)
def test_peacock(self):
b = Battle(
[Peacock.spawn()],
[dummy_pet(toughness=1), dummy_pet(toughness=3), dummy_pet(toughness=5), dummy_pet(toughness=7),
dummy_pet(toughness=9)]
)
b.battle()
assert b.team_1 == []
assert b.team_2 == []
def test_rat(self):
b = Battle(
[Rat.spawn(), dummy_pet(power=1, toughness=1), dummy_pet(power=1, toughness=1)],
[dummy_pet(power=5, toughness=6)]
)
b.battle()
assert b.team_1 == []
assert b.team_2 == []
def test_dog(self):
r = TestRandom()
r.choices = [True, True]
b = Battle(
[Cricket.spawn(), Cricket.spawn(), Dog(symbol="D", power=2, toughness=2, random_gen=r)],
[dummy_pet(power=10, toughness=8)]
)
b.battle()
assert b.team_1 == []
assert b.team_2 == []
def test_spider(self):
r = TestRandom()
r.choices = [Dog]
spider = Spider(power=2, toughness=2, symbol="S", random_gen=r)
b = Battle(
[spider],
[dummy_pet(power=2, toughness=4)]
)
b.battle()
assert b.team_1 == []
assert b.team_2 == []
def test_badger_hedgehog_clusterfuck(self):
b = Battle(
[Hedgehog.spawn(), Hedgehog.spawn(), dummy_pet(toughness=9), Badger.spawn(), dummy_pet(toughness=9)],
[dummy_pet(power=2, toughness=8)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_badger_other_team(self):
b = Battle(
[Badger.spawn(), dummy_pet(toughness=5)],
[dummy_pet(power=4, toughness=11)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_badger(self):
b = Battle(
[Badger.spawn(), dummy_pet(toughness=5)],
[dummy_pet(power=4, toughness=1), dummy_pet(toughness=6)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_blowfish(self):
b = Battle(
[Hedgehog.spawn(), Blowfish(power=3, toughness=7, symbol="Blowfish")],
[dummy_pet(power=3, toughness=16)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_camel(self):
b = Battle(
[Camel.spawn(), dummy_pet(power=1, toughness=1)],
[dummy_pet(power=1, toughness=56)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_giraffe(self):
giraffe = Giraffe.spawn()
team_1 = [dummy_pet(power=1, toughness=1), giraffe]
team_2 = [dummy_pet(power=5, toughness=5)]
giraffe.apply_trigger(Trigger(TriggerType.TURN_ENDED), team_1, team_2)
b = Battle(
team_1,
team_2
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_kangaroo(self):
b = Battle(
[Cricket.spawn(), Kangaroo.spawn()],
[dummy_pet(power=6, toughness=8)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_sheep(self):
b = Battle(
[Sheep.spawn(), Sheep.spawn(),
dummy_pet(power=1, toughness=1), dummy_pet(power=1, toughness=1), dummy_pet(power=1, toughness=1)],
[dummy_pet(power=2, toughness=2 * 2 + 2 * 3 + 1 * 3 + 1)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_snail_lost(self):
player = DummyPlayer()
player.won_last = False
snail = Snail.spawn()
team_1 = [snail, dummy_pet(power=1, toughness=1)]
team_2 = [dummy_pet(power=2, toughness=6)]
snail.apply_trigger(Trigger(TriggerType.PET_BOUGHT, snail, player=player), team_1, team_2)
b = Battle(team_1, team_2)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_snail_won(self):
player = DummyPlayer()
player.won_last = True
snail = Snail.spawn()
team_1 = [snail, dummy_pet(power=1, toughness=1)]
team_2 = [dummy_pet(power=2, toughness=4)]
snail.apply_trigger(Trigger(TriggerType.PET_BOUGHT, snail, player=player), team_1, team_2)
b = Battle(team_1, team_2)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_whale(self):
whale = Whale.spawn()
whale.experience = 3 # level 2
b = Battle(
[Sheep.spawn(), whale],
[dummy_pet(power=6, toughness=2 * 2 + 2 + 3 * 4 + 1)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_bison(self):
bison = Bison.spawn()
team_1 = [dummy_pet(power=1, toughness=1, experience=6), bison]
team_2 = [dummy_pet(power=8, toughness=10)]
bison.apply_trigger(Trigger(TriggerType.TURN_ENDED), team_1, team_2)
b = Battle(
team_1,
team_2
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_dolphin(self):
b = Battle(
[Dolphin.spawn()],
[dummy_pet(power=6, toughness=6), dummy_pet(power=100, toughness=5)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 2
def test_hippo(self):
b = Battle(
[Hippo.spawn()],
[Cricket.spawn(), dummy_pet(power=6, toughness=9)]
)
b.battle()
print(b)
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_penguin(self):
penguin = Penguin.spawn()
team_1 = [dummy_pet(power=1, toughness=1, experience=7), penguin]
team_2 = [dummy_pet(power=2, toughness=4)]
penguin.apply_trigger(Trigger(TriggerType.TURN_ENDED), team_1, team_2)
b = Battle(
team_1,
team_2
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_rooster(self):
b = Battle(
[Rooster.spawn()],
[dummy_pet(power=3, toughness=8)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_skunk(self):
b = Battle(
[Skunk(symbol="S", power=3, toughness=6, experience=7)],
[dummy_pet(power=100, toughness=100), dummy_pet(power=6, toughness=4)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_monkey(self):
monkey = Monkey.spawn()
team_1 = [dummy_pet(power=1, toughness=1), monkey]
team_2 = [dummy_pet(power=4, toughness=6)]
monkey.apply_trigger(Trigger(TriggerType.TURN_ENDED), team_1, team_2)
b = Battle(
team_1,
team_2
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_crocodile(self):
b = Battle(
[Crocodile.spawn()],
[dummy_pet(power=4, toughness=9), dummy_pet(power=100, toughness=8)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_rhino(self):
b = Battle(
[Rhino.spawn()],
[Cricket.spawn(), Rooster.spawn(), dummy_pet(power=7, toughness=9), dummy_pet(power=1, toughness=1)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_shark(self):
b = Battle(
[Cricket.spawn(), Shark.spawn()],
[dummy_pet(toughness=1 + 1 + 8 + 1, power=6)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_turkey(self):
b = Battle(
[Cricket.spawn(), Turkey.spawn()],
[dummy_pet(toughness=1 + 4 + 3 + 1, power=4)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_boar(self):
b = Battle(
[Boar.spawn()],
[Cricket.spawn(), dummy_pet(toughness=15, power=10)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_dragon(self):
dragon = Dragon.spawn()
team_1 = [Cricket.spawn(), dragon]
team_2 = [dummy_pet(toughness=2 + 1 + 7 + 1, power=9)]
dragon.apply_trigger(Trigger(TriggerType.PET_BOUGHT, team_1[0]), team_1, team_2)
b = Battle(team_1, team_2)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_fly(self):
b = Battle(
[Cricket.spawn(), Cricket.spawn(), Fly.spawn()],
[dummy_pet(toughness=1 + 5 + 1 + 5 + 1 + 5 + 1 + 5 + 1, power=5)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_leopard(self):
b = Battle(
[Leopard.spawn()],
[dummy_pet(toughness=16, power=4)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_mammoth(self):
b = Battle(
[Mammoth.spawn(), Cricket.spawn()],
[dummy_pet(toughness=3 + 3 + 1 + 1, power=10)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_snake(self):
b = Battle(
[dummy_pet(toughness=1, power=1), dummy_pet(toughness=1, power=1), Snake.spawn()],
[dummy_pet(power=6, toughness=1 + 1 + 5 + 6 + 1)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_tiger(self):
b = Battle(
[dummy_pet(1, 1), Snake.spawn(), Tiger.spawn()],
[dummy_pet(power=6, toughness=1 + 5 + 5 + 6 + 4 + 1)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_honey(self):
b = Battle(
[Pet(symbol="P", power=1, toughness=1, equipped_food=Honey.spawn())],
[dummy_pet(power=1, toughness=3)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_meat_bone(self):
b = Battle(
[Pet(symbol="P", power=1, toughness=1, equipped_food=MeatBone.spawn())],
[dummy_pet(power=1, toughness=7)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_chilli(self):
b = Battle(
[Pet(symbol="P", power=1, toughness=1, equipped_food=Chili.spawn())],
[dummy_pet(power=1, toughness=2), dummy_pet(power=100, toughness=5)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_melon(self):
b = Battle(
[Pet(symbol="P", power=1, toughness=2, equipped_food=Melon.spawn())],
[dummy_pet(power=21, toughness=1), dummy_pet(power=2, toughness=2)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_mushroom(self):
cricket = Cricket.spawn()
cricket.equipped_food = Mushroom.spawn()
b = Battle(
[cricket],
[dummy_pet(power=2, toughness=1+1+1+1+1)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_ox(self):
b = Battle(
[Cricket.spawn(), Ox.spawn()],
[dummy_pet(power=20, toughness=1+1+5+5+1)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_turtle(self):
b = Battle(
[Turtle.spawn(), dummy_pet(power=1, toughness=1)],
[dummy_pet(power=20, toughness=1+1+1+1)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_deer(self):
b = Battle(
[Deer.spawn()],
[dummy_pet(toughness=2, power=5),dummy_pet(power=100, toughness=5), dummy_pet(power=100, toughness=1)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
# TODO: test tiger whale
def test_scorpion(self):
b = Battle(
[Scorpion.spawn(), Scorpion.spawn(), Scorpion.spawn()],
[Pet(symbol="P", toughness=100, power=100, equipped_food=Garlic.spawn()), # garlic reduces to 1, so dies
Pet(symbol="P", toughness=100, power=100, equipped_food=Melon.spawn()) # should take a hit though
]
)
b.battle()
assert b.team_1 == []
assert b.team_2 == []
def test_gorilla(self):
b = Battle(
[Gorilla.spawn()],
[dummy_pet(power=8, toughness=6), dummy_pet(power=100, toughness=6), dummy_pet(power=1, toughness=7)]
)
b.battle()
assert b.team_1 == []
assert len(b.team_2) == 1
assert b.team_2[0].toughness == 1
def test_whale_with_fainted_pet(self):
b = Battle(
[Sheep.spawn(), Whale.spawn()],
[Dolphin.spawn()]
)
b.battle()
assert b.team_2 == []
assert len(b.team_1) == 1
assert b.team_1[0].toughness == 2
def test_skunk_with_fainted_pet(self):
b = Battle(
[Skunk.spawn()],
[Cricket(power=1, toughness=100, symbol="C"), Whale.spawn()]
)
b.battle() | 0.608129 | 0.65736 |
import time
import json
import logging
import threading
from urllib.request import urlopen
from urllib.error import HTTPError, URLError
class Observer:
def __init__(self, host, event_callback):
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(threadName)s\t%(levelname)-8s\t%(message)s')
self.host = host.replace('rtmp://', '').replace('http://', '').replace('https://', '')
if self.host and self.host[-1] == '/': # remove if string ends with dash
self.host = self.host[:-1]
self.thread = None
self.is_running = False
self.event_callback = event_callback
self.polling_interval = 5 # seconds
def run(self):
logging.info('observer started')
while True:
try:
with urlopen('http://' + self.host + '/v1/states') as res:
response = json.loads(res.read().decode())
status = response['repeat_to_local_nginx']['type']
self.handle_status(status)
except HTTPError as e:
self.handle_status('http_error')
logging.error(e)
except URLError as e:
if 'Connection refused' in str(e.reason):
self.handle_status('server_not_reachable')
else:
self.handle_status('url_error')
logging.error(e)
# check if loop should be exited
start_time = time.time()
while (time.time()-start_time) < self.polling_interval:
if not self.is_running:
return
time.sleep(0.5)
'''
This function gets called for each request, status contains a string identifier,
which gets passed to the event_callback method. The id is one of the following:
[ 'connected', 'connecting', 'disconnected', 'stopped', 'error', 'server_not_reachable', 'http_error', 'url_error' ]
'''
def handle_status(self, status):
if self.is_running:
self.event_callback(status)
else:
logging.debug('Not sending \'{}\' event, as observer is stopping'.format(status))
def start(self):
if not self.is_running:
self.is_running = True
self.thread = threading.Thread(target=self.run, name="ObserverThread")
self.thread.start()
else:
logging.debug('observer already running')
def stop(self):
logging.info('stopping observer ...')
self.is_running = False
self.thread.join() | observer.py | import time
import json
import logging
import threading
from urllib.request import urlopen
from urllib.error import HTTPError, URLError
class Observer:
def __init__(self, host, event_callback):
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(threadName)s\t%(levelname)-8s\t%(message)s')
self.host = host.replace('rtmp://', '').replace('http://', '').replace('https://', '')
if self.host and self.host[-1] == '/': # remove if string ends with dash
self.host = self.host[:-1]
self.thread = None
self.is_running = False
self.event_callback = event_callback
self.polling_interval = 5 # seconds
def run(self):
logging.info('observer started')
while True:
try:
with urlopen('http://' + self.host + '/v1/states') as res:
response = json.loads(res.read().decode())
status = response['repeat_to_local_nginx']['type']
self.handle_status(status)
except HTTPError as e:
self.handle_status('http_error')
logging.error(e)
except URLError as e:
if 'Connection refused' in str(e.reason):
self.handle_status('server_not_reachable')
else:
self.handle_status('url_error')
logging.error(e)
# check if loop should be exited
start_time = time.time()
while (time.time()-start_time) < self.polling_interval:
if not self.is_running:
return
time.sleep(0.5)
'''
This function gets called for each request, status contains a string identifier,
which gets passed to the event_callback method. The id is one of the following:
[ 'connected', 'connecting', 'disconnected', 'stopped', 'error', 'server_not_reachable', 'http_error', 'url_error' ]
'''
def handle_status(self, status):
if self.is_running:
self.event_callback(status)
else:
logging.debug('Not sending \'{}\' event, as observer is stopping'.format(status))
def start(self):
if not self.is_running:
self.is_running = True
self.thread = threading.Thread(target=self.run, name="ObserverThread")
self.thread.start()
else:
logging.debug('observer already running')
def stop(self):
logging.info('stopping observer ...')
self.is_running = False
self.thread.join() | 0.322633 | 0.075756 |
from .UartStream import *
from ..Exceptions import *
from ..Manager import *
from ..StreamDevice import *
from ..StreamParserGenerator import *
from ..StreamProtocol import *
class UartManager(Manager):
"""Serial device manager for abstracting stream and parser management.
This class implements a comprehensive management layer on top of devices,
streams, protocols, and parser/generator instances. While parent application
code can manage these things independently, this code wraps everything into
a single interface to handle filtered device connection monitoring, data
stream control, packet parsing based on an externally defined protocol, and
various types of error detection.
For many applications, the manager layer is the only one that will have to
be configured during initialization, and all lower-level interaction can be
left to the manager instance."""
AUTO_OPEN_NONE = 0
AUTO_OPEN_SINGLE = 1
AUTO_OPEN_ALL = 2
def __init__(self,
device_class=StreamDevice,
stream_class=UartStream,
parser_generator_class=StreamParserGenerator,
protocol_class=StreamProtocol):
"""Initializes a serial manager instance.
:param device_class: Class to use when instantiating new device objects
upon connection
:type device_class: SerialDevice
:param stream_class: Class to use when instantiating new stream objects
upon connection
:type stream_class: UartStream
:param parser_generator_class: Class to use when instantiating new
parser/generator objects associated with new streams
:type parser_generator_class: StreamParserGenerator
:param protocol_class: Class to use for assigning a protocol to new
parser/generator objects associated with new streams
:type protocol_class: StreamProtocol
The manager coordinates all necessary connections between a device,
stream, and parser/generator. In the Python implementation, it also
handles monitoring device connections and disconnections, especially in
the case of USB devices that may be inserted or unplugged at any time.
This is done using PySerial as a driver for device detection.
Unlike most of the overridden methods in this child class, this one runs
the parent (super) class method first.
"""
# run parent constructor
super().__init__()
# these attributes may be updated by the application
self.port_info_filter = None
self.device_class = device_class
self.stream_class = stream_class
self.parser_generator_class = parser_generator_class
self.protocol_class = protocol_class
self.on_connect_device = None
self.on_disconnect_device = None
self.on_open_stream = None
self.on_close_stream = None
self.on_open_error = None
self.on_rx_data = None
self.on_tx_data = None
self.on_rx_packet = None
self.on_tx_packet = None
self.on_rx_error = None
self.on_incoming_packet_timeout = None
self.on_waiting_packet_timeout = None
self.auto_open = UartManager.AUTO_OPEN_NONE
# these attributes are intended to be read-only
self.streams = {}
# these attributes are intended to be private
self._recently_disconnected_devices = []
def _get_connected_devices(self) -> dict:
"""Gets a collection of all currently connected serial devices.
:returns: Dictionary of connected devices (keys are device names)
:rtype: dict
The set of detected devices is merged with previously known devices
before being returned, so that devices that may have been modified in
some way (e.g. stream attached and/or opened) will retain their state.
Previously unknown devices are instantiated immediately, while known
devices are reused from their previous position in the internal device
list."""
connected_devices = {}
for port_info in serial.tools.list_ports.comports():
if port_info.device in self._recently_disconnected_devices:
# skip reporting this device for one iteration (works around rare
# but observed case where Windows shows a device as being still
# connected when a serial read operation has already thrown an
# exception due to an unavailable pipe)
continue
if port_info.device in self.devices:
# use existing device instance
connected_devices[port_info.device] = self.devices[port_info.device]
else:
# create new device and stream instance
# apply filter, skip if it doesn't pass
if self.port_info_filter is not None and not self.port_info_filter(port_info):
continue
# make sure the application provided everything necessary
if self.stream_class == None:
raise PerilibHalException("Manager cannot attach stream without defined stream_class attribute")
# create and configure data stream object
stream = self.stream_class()
stream.on_disconnect_device = self._on_disconnect_device # use internal disconnection callback
stream.on_open_stream = self.on_open_stream
stream.on_close_stream = self.on_close_stream
stream.on_open_error = self.on_open_error
stream.on_rx_data = self.on_rx_data
stream.on_tx_data = self.on_tx_data
# create and attach PySerial port instance to stream (not opened yet)
stream.port = serial.Serial()
stream.port.port = port_info.device
stream.port_info = port_info
# create device with stream attached
device = self.device_class(port_info.device, stream)
# add reference from stream back up to device for convenience
stream.device = device
# add device and stream to internal tables for management
self.streams[port_info.device] = stream
connected_devices[port_info.device] = device
# clean out list of recently disconnected devices
del self._recently_disconnected_devices[:]
# send back the list of currently connected devices
return connected_devices
def _on_connect_device(self, device) -> None:
"""Handles serial device connections.
:param device: Device that has just been connected
:type device: SerialDevice
When the connection watcher method detects a new device, that device is
passed to this method for processing. This implementation performs auto
opening if configured (either for the first device or for every device),
including the creation and attachment of stream and parser/generator
objects as required. Standard objects are used for this purpose unless
custom classes are assigned in the relevant manager attributes."""
run_builtin = True
if self.on_connect_device is not None:
# trigger the app-level connection callback
run_builtin = self.on_connect_device(device)
if run_builtin != False and self.auto_open != UartManager.AUTO_OPEN_NONE and self.stream_class is not None:
# open the stream if configured to do so
open_stream = False
if self.auto_open == UartManager.AUTO_OPEN_ALL:
# every connection opens a new stream
open_stream = True
if self.auto_open == UartManager.AUTO_OPEN_SINGLE:
# check whether we're already monitoring a stream
if len(self.devices) == 1:
# open this stream only (first connected device)
open_stream = True
if open_stream == True:
# create and configure parser/generator object if protocol is available
if self.protocol_class != None:
parser_generator = self.parser_generator_class(protocol_class=self.protocol_class, stream=self.streams[device.id])
parser_generator.on_rx_packet = self.on_rx_packet
parser_generator.on_tx_packet = self.on_tx_packet
parser_generator.on_rx_error = self.on_rx_error
parser_generator.on_incoming_packet_timeout = self.on_incoming_packet_timeout
parser_generator.on_waiting_packet_timeout = self.on_waiting_packet_timeout
self.streams[device.id].parser_generator = parser_generator
try:
# open the data stream
self.streams[device.id].open()
except serial.serialutil.SerialException as e:
# unable to open the port, but don't crash
pass
def _on_disconnect_device(self, device) -> None:
"""Handles device disconnections.
:param device: Device that has just been disconnected
:type device: SerialDevice
When the connection watcher method detects a removed device, that device
is passed to this method for processing. This implementation handles
automatic closing and removal of a data stream (if one is attached), and
resumes monitoring in the case of auto-open-first configuration."""
# mark as recently disconnected
self._recently_disconnected_devices.append(device.id)
# close and remove stream if it is open and/or just present
if device.id in self.streams:
self.streams[device.id].close()
del self.streams[device.id]
run_builtin = True
if self.on_disconnect_device is not None:
# trigger the app-level disconnection callback
run_builtin = self.on_disconnect_device(device)
# remove the device itself from our list
del self.devices[device.id] | perilib/hal/UartManager.py | from .UartStream import *
from ..Exceptions import *
from ..Manager import *
from ..StreamDevice import *
from ..StreamParserGenerator import *
from ..StreamProtocol import *
class UartManager(Manager):
"""Serial device manager for abstracting stream and parser management.
This class implements a comprehensive management layer on top of devices,
streams, protocols, and parser/generator instances. While parent application
code can manage these things independently, this code wraps everything into
a single interface to handle filtered device connection monitoring, data
stream control, packet parsing based on an externally defined protocol, and
various types of error detection.
For many applications, the manager layer is the only one that will have to
be configured during initialization, and all lower-level interaction can be
left to the manager instance."""
AUTO_OPEN_NONE = 0
AUTO_OPEN_SINGLE = 1
AUTO_OPEN_ALL = 2
def __init__(self,
device_class=StreamDevice,
stream_class=UartStream,
parser_generator_class=StreamParserGenerator,
protocol_class=StreamProtocol):
"""Initializes a serial manager instance.
:param device_class: Class to use when instantiating new device objects
upon connection
:type device_class: SerialDevice
:param stream_class: Class to use when instantiating new stream objects
upon connection
:type stream_class: UartStream
:param parser_generator_class: Class to use when instantiating new
parser/generator objects associated with new streams
:type parser_generator_class: StreamParserGenerator
:param protocol_class: Class to use for assigning a protocol to new
parser/generator objects associated with new streams
:type protocol_class: StreamProtocol
The manager coordinates all necessary connections between a device,
stream, and parser/generator. In the Python implementation, it also
handles monitoring device connections and disconnections, especially in
the case of USB devices that may be inserted or unplugged at any time.
This is done using PySerial as a driver for device detection.
Unlike most of the overridden methods in this child class, this one runs
the parent (super) class method first.
"""
# run parent constructor
super().__init__()
# these attributes may be updated by the application
self.port_info_filter = None
self.device_class = device_class
self.stream_class = stream_class
self.parser_generator_class = parser_generator_class
self.protocol_class = protocol_class
self.on_connect_device = None
self.on_disconnect_device = None
self.on_open_stream = None
self.on_close_stream = None
self.on_open_error = None
self.on_rx_data = None
self.on_tx_data = None
self.on_rx_packet = None
self.on_tx_packet = None
self.on_rx_error = None
self.on_incoming_packet_timeout = None
self.on_waiting_packet_timeout = None
self.auto_open = UartManager.AUTO_OPEN_NONE
# these attributes are intended to be read-only
self.streams = {}
# these attributes are intended to be private
self._recently_disconnected_devices = []
def _get_connected_devices(self) -> dict:
"""Gets a collection of all currently connected serial devices.
:returns: Dictionary of connected devices (keys are device names)
:rtype: dict
The set of detected devices is merged with previously known devices
before being returned, so that devices that may have been modified in
some way (e.g. stream attached and/or opened) will retain their state.
Previously unknown devices are instantiated immediately, while known
devices are reused from their previous position in the internal device
list."""
connected_devices = {}
for port_info in serial.tools.list_ports.comports():
if port_info.device in self._recently_disconnected_devices:
# skip reporting this device for one iteration (works around rare
# but observed case where Windows shows a device as being still
# connected when a serial read operation has already thrown an
# exception due to an unavailable pipe)
continue
if port_info.device in self.devices:
# use existing device instance
connected_devices[port_info.device] = self.devices[port_info.device]
else:
# create new device and stream instance
# apply filter, skip if it doesn't pass
if self.port_info_filter is not None and not self.port_info_filter(port_info):
continue
# make sure the application provided everything necessary
if self.stream_class == None:
raise PerilibHalException("Manager cannot attach stream without defined stream_class attribute")
# create and configure data stream object
stream = self.stream_class()
stream.on_disconnect_device = self._on_disconnect_device # use internal disconnection callback
stream.on_open_stream = self.on_open_stream
stream.on_close_stream = self.on_close_stream
stream.on_open_error = self.on_open_error
stream.on_rx_data = self.on_rx_data
stream.on_tx_data = self.on_tx_data
# create and attach PySerial port instance to stream (not opened yet)
stream.port = serial.Serial()
stream.port.port = port_info.device
stream.port_info = port_info
# create device with stream attached
device = self.device_class(port_info.device, stream)
# add reference from stream back up to device for convenience
stream.device = device
# add device and stream to internal tables for management
self.streams[port_info.device] = stream
connected_devices[port_info.device] = device
# clean out list of recently disconnected devices
del self._recently_disconnected_devices[:]
# send back the list of currently connected devices
return connected_devices
def _on_connect_device(self, device) -> None:
"""Handles serial device connections.
:param device: Device that has just been connected
:type device: SerialDevice
When the connection watcher method detects a new device, that device is
passed to this method for processing. This implementation performs auto
opening if configured (either for the first device or for every device),
including the creation and attachment of stream and parser/generator
objects as required. Standard objects are used for this purpose unless
custom classes are assigned in the relevant manager attributes."""
run_builtin = True
if self.on_connect_device is not None:
# trigger the app-level connection callback
run_builtin = self.on_connect_device(device)
if run_builtin != False and self.auto_open != UartManager.AUTO_OPEN_NONE and self.stream_class is not None:
# open the stream if configured to do so
open_stream = False
if self.auto_open == UartManager.AUTO_OPEN_ALL:
# every connection opens a new stream
open_stream = True
if self.auto_open == UartManager.AUTO_OPEN_SINGLE:
# check whether we're already monitoring a stream
if len(self.devices) == 1:
# open this stream only (first connected device)
open_stream = True
if open_stream == True:
# create and configure parser/generator object if protocol is available
if self.protocol_class != None:
parser_generator = self.parser_generator_class(protocol_class=self.protocol_class, stream=self.streams[device.id])
parser_generator.on_rx_packet = self.on_rx_packet
parser_generator.on_tx_packet = self.on_tx_packet
parser_generator.on_rx_error = self.on_rx_error
parser_generator.on_incoming_packet_timeout = self.on_incoming_packet_timeout
parser_generator.on_waiting_packet_timeout = self.on_waiting_packet_timeout
self.streams[device.id].parser_generator = parser_generator
try:
# open the data stream
self.streams[device.id].open()
except serial.serialutil.SerialException as e:
# unable to open the port, but don't crash
pass
def _on_disconnect_device(self, device) -> None:
"""Handles device disconnections.
:param device: Device that has just been disconnected
:type device: SerialDevice
When the connection watcher method detects a removed device, that device
is passed to this method for processing. This implementation handles
automatic closing and removal of a data stream (if one is attached), and
resumes monitoring in the case of auto-open-first configuration."""
# mark as recently disconnected
self._recently_disconnected_devices.append(device.id)
# close and remove stream if it is open and/or just present
if device.id in self.streams:
self.streams[device.id].close()
del self.streams[device.id]
run_builtin = True
if self.on_disconnect_device is not None:
# trigger the app-level disconnection callback
run_builtin = self.on_disconnect_device(device)
# remove the device itself from our list
del self.devices[device.id] | 0.812719 | 0.235493 |
from darwinexapis.API.InfoAPI.DWX_Info_API import DWX_Info_API
from telegramBot import NotificationsTelegramBot
# Import the logger:
import logging, json
logger = logging.getLogger()
class DRefresherClass(object):
'''Service to be executed at X timeframe and refresh the tokens.
Ex (execute every 30 min): */30 * * * * start-refresher.sh'''
def __init__(self):
# Create bot object:
self.BOT = NotificationsTelegramBot("1159315823:AAFexwCPKJvMeulDnS-he3NCeAjWqcTgejY", 779773830)
# Initialize the objects:
self._defineAPIObjects()
# Execute:
self._executeRefresh()
def _defineAPIObjects(self, isDemo=True):
# Let's create the auth credentials:
self._loadJSONCredentials()
# Get the other APIs:
self.INFO_API = DWX_Info_API(self.AUTH_CREDS, _version=2.0, _demo=isDemo)
def _executeRefresh(self):
# Generate new credentials:
logger.warning('[REFRESH_CREDS] - Time to refresh > ¡Generate TOKENS!')
self.INFO_API.AUTHENTICATION._get_access_refresh_tokens_wrapper()
# If failed, new access token will attribute will be None:
if self.INFO_API.AUTHENTICATION.access_token:
# Save the credentials:
self._saveJSONCredentials(self.INFO_API.AUTHENTICATION._auth_creds)
else:
logger.warning('[REFRESH_CREDS] - Credentials NOT RETRIEVED')
self.BOT.bot_send_msg('[REFRESH_CREDS] - Credentials NOT RETRIEVED')
def _loadJSONCredentials(self):
# Load the file and return it:
with open('APICredentials.json') as json_file:
self.AUTH_CREDS = json.load(json_file)
# Log:
logger.warning('[CREDS_LOAD] - ¡Credentials loaded!')
def _saveJSONCredentials(self, credentials):
# Save then to the file to be accesed by other classes:
with open('APICredentials.json', 'w') as json_file:
json.dump(credentials, json_file)
# Log:
logger.warning('[CREDS_SAVE] - ¡Credentials saved!')
# Concluded:
self.BOT.bot_send_msg('[CREDS_SAVE] - ¡Credentials saved and concluded!')
if __name__ == "__main__":
# Create the object:
DREFRESHER = DRefresherClass() | D-Refresher/D_Refresher.py | from darwinexapis.API.InfoAPI.DWX_Info_API import DWX_Info_API
from telegramBot import NotificationsTelegramBot
# Import the logger:
import logging, json
logger = logging.getLogger()
class DRefresherClass(object):
'''Service to be executed at X timeframe and refresh the tokens.
Ex (execute every 30 min): */30 * * * * start-refresher.sh'''
def __init__(self):
# Create bot object:
self.BOT = NotificationsTelegramBot("1159315823:AAFexwCPKJvMeulDnS-he3NCeAjWqcTgejY", 779773830)
# Initialize the objects:
self._defineAPIObjects()
# Execute:
self._executeRefresh()
def _defineAPIObjects(self, isDemo=True):
# Let's create the auth credentials:
self._loadJSONCredentials()
# Get the other APIs:
self.INFO_API = DWX_Info_API(self.AUTH_CREDS, _version=2.0, _demo=isDemo)
def _executeRefresh(self):
# Generate new credentials:
logger.warning('[REFRESH_CREDS] - Time to refresh > ¡Generate TOKENS!')
self.INFO_API.AUTHENTICATION._get_access_refresh_tokens_wrapper()
# If failed, new access token will attribute will be None:
if self.INFO_API.AUTHENTICATION.access_token:
# Save the credentials:
self._saveJSONCredentials(self.INFO_API.AUTHENTICATION._auth_creds)
else:
logger.warning('[REFRESH_CREDS] - Credentials NOT RETRIEVED')
self.BOT.bot_send_msg('[REFRESH_CREDS] - Credentials NOT RETRIEVED')
def _loadJSONCredentials(self):
# Load the file and return it:
with open('APICredentials.json') as json_file:
self.AUTH_CREDS = json.load(json_file)
# Log:
logger.warning('[CREDS_LOAD] - ¡Credentials loaded!')
def _saveJSONCredentials(self, credentials):
# Save then to the file to be accesed by other classes:
with open('APICredentials.json', 'w') as json_file:
json.dump(credentials, json_file)
# Log:
logger.warning('[CREDS_SAVE] - ¡Credentials saved!')
# Concluded:
self.BOT.bot_send_msg('[CREDS_SAVE] - ¡Credentials saved and concluded!')
if __name__ == "__main__":
# Create the object:
DREFRESHER = DRefresherClass() | 0.538498 | 0.112065 |
import unittest
from yookassa_payout.domain.exceptions.api_error import ApiError
from yookassa_payout.domain.response.deposition_response_builder import DepositionResponseBuilder
from yookassa_payout.domain.response.make_deposition_response import MakeDepositionResponse
from yookassa_payout.domain.response.test_deposition_response import TestDepositionResponse \
as TDepositionResponse
class TestDepositionResponseBuilder(unittest.TestCase):
def test_build(self):
res = DepositionResponseBuilder.build({
'testDepositionResponse': {
'client_order_id': '215d8da0-000f-50be-b000-0003308c89be',
'error': 123456,
'tech_message': 'tech_message',
'identification': 'identification',
}
})
self.assertIsInstance(res, TDepositionResponse)
self.assertIsInstance(res.client_order_id, str)
self.assertEqual(res.client_order_id, '215d8da0-000f-50be-b000-0003308c89be')
self.assertIsInstance(res.error, int)
self.assertEqual(res.error, 123456)
self.assertIsInstance(res.tech_message, str)
self.assertEqual(res.tech_message, 'tech_message')
self.assertIsInstance(res.identification, str)
self.assertEqual(res.identification, 'identification')
res = DepositionResponseBuilder.build({
'makeDepositionResponse': {
'client_order_id': '215d8da0-000f-50be-b000-0003308c89be',
'error': 123456,
'tech_message': 'tech_message',
'identification': 'identification',
'balance': 30,
}
})
self.assertIsInstance(res, MakeDepositionResponse)
self.assertIsInstance(res.client_order_id, str)
self.assertEqual(res.client_order_id, '215d8da0-000f-50be-b000-0003308c89be')
self.assertIsInstance(res.error, int)
self.assertEqual(res.error, 123456)
self.assertIsInstance(res.tech_message, str)
self.assertEqual(res.tech_message, 'tech_message')
self.assertIsInstance(res.identification, str)
self.assertEqual(res.identification, 'identification')
self.assertIsInstance(res.balance, float)
self.assertEqual(res.balance, 30.0)
with self.assertRaises(ApiError):
res = DepositionResponseBuilder.build({
'fakeDeposition': {}
}) | tests/unit/test_deposition_response_builder.py | import unittest
from yookassa_payout.domain.exceptions.api_error import ApiError
from yookassa_payout.domain.response.deposition_response_builder import DepositionResponseBuilder
from yookassa_payout.domain.response.make_deposition_response import MakeDepositionResponse
from yookassa_payout.domain.response.test_deposition_response import TestDepositionResponse \
as TDepositionResponse
class TestDepositionResponseBuilder(unittest.TestCase):
def test_build(self):
res = DepositionResponseBuilder.build({
'testDepositionResponse': {
'client_order_id': '215d8da0-000f-50be-b000-0003308c89be',
'error': 123456,
'tech_message': 'tech_message',
'identification': 'identification',
}
})
self.assertIsInstance(res, TDepositionResponse)
self.assertIsInstance(res.client_order_id, str)
self.assertEqual(res.client_order_id, '215d8da0-000f-50be-b000-0003308c89be')
self.assertIsInstance(res.error, int)
self.assertEqual(res.error, 123456)
self.assertIsInstance(res.tech_message, str)
self.assertEqual(res.tech_message, 'tech_message')
self.assertIsInstance(res.identification, str)
self.assertEqual(res.identification, 'identification')
res = DepositionResponseBuilder.build({
'makeDepositionResponse': {
'client_order_id': '215d8da0-000f-50be-b000-0003308c89be',
'error': 123456,
'tech_message': 'tech_message',
'identification': 'identification',
'balance': 30,
}
})
self.assertIsInstance(res, MakeDepositionResponse)
self.assertIsInstance(res.client_order_id, str)
self.assertEqual(res.client_order_id, '215d8da0-000f-50be-b000-0003308c89be')
self.assertIsInstance(res.error, int)
self.assertEqual(res.error, 123456)
self.assertIsInstance(res.tech_message, str)
self.assertEqual(res.tech_message, 'tech_message')
self.assertIsInstance(res.identification, str)
self.assertEqual(res.identification, 'identification')
self.assertIsInstance(res.balance, float)
self.assertEqual(res.balance, 30.0)
with self.assertRaises(ApiError):
res = DepositionResponseBuilder.build({
'fakeDeposition': {}
}) | 0.587352 | 0.286821 |
from __future__ import print_function
from collections import OrderedDict
import itertools
def test_config(python, chainer, target, chainerx):
if chainerx:
s_chainerx = '.chx'
else:
s_chainerx = ''
key = 'chainerch.py{}.{}.{}{}'.format(python, chainer, target, s_chainerx)
value = OrderedDict((
('requirement', OrderedDict((
('cpu', 4),
('memory', 16),
('disk', 10),
))),
('command', 'bash .flexci/pytest_script.sh'),
('environment_variables', [
('PYTHON', str(python)),
('CHAINER', chainer),
('CHAINERX', '1' if chainerx else '0'),
('GPU', '1' if target == 'gpu' else '0'),
]),
))
if target == 'gpu':
value['requirement']['gpu'] = 1
return key, value
def main():
configs = []
for python, chainer in itertools.product(
(37,), ('stable', 'latest', 'base')):
for chainerx in (True, False):
configs.append(test_config(python, chainer, 'cpu', chainerx))
configs.append(test_config(python, chainer, 'gpu', chainerx))
# small test in python 36
configs.append(test_config(36, 'stable', 'gpu', False))
print('# DO NOT MODIFY THIS FILE MANUALLY.')
print('# USE gen_config.py INSTEAD.')
print()
dump_pbtxt('configs', configs)
def dump_pbtxt(key, value, level=0):
indent = ' ' * level
if isinstance(value, int):
print('{}{}: {}'.format(indent, key, value))
elif isinstance(value, str):
print('{}{}: "{}"'.format(indent, key, value))
elif isinstance(value, list):
for k, v in value:
print('{}{} {{'.format(indent, key))
dump_pbtxt('key', k, level + 1)
dump_pbtxt('value', v, level + 1)
print('{}}}'.format(indent))
elif isinstance(value, dict):
print('{}{} {{'.format(indent, key))
for k, v in value.items():
dump_pbtxt(k, v, level + 1)
print('{}}}'.format(indent))
if __name__ == '__main__':
main() | .flexci/gen_config.py | from __future__ import print_function
from collections import OrderedDict
import itertools
def test_config(python, chainer, target, chainerx):
if chainerx:
s_chainerx = '.chx'
else:
s_chainerx = ''
key = 'chainerch.py{}.{}.{}{}'.format(python, chainer, target, s_chainerx)
value = OrderedDict((
('requirement', OrderedDict((
('cpu', 4),
('memory', 16),
('disk', 10),
))),
('command', 'bash .flexci/pytest_script.sh'),
('environment_variables', [
('PYTHON', str(python)),
('CHAINER', chainer),
('CHAINERX', '1' if chainerx else '0'),
('GPU', '1' if target == 'gpu' else '0'),
]),
))
if target == 'gpu':
value['requirement']['gpu'] = 1
return key, value
def main():
configs = []
for python, chainer in itertools.product(
(37,), ('stable', 'latest', 'base')):
for chainerx in (True, False):
configs.append(test_config(python, chainer, 'cpu', chainerx))
configs.append(test_config(python, chainer, 'gpu', chainerx))
# small test in python 36
configs.append(test_config(36, 'stable', 'gpu', False))
print('# DO NOT MODIFY THIS FILE MANUALLY.')
print('# USE gen_config.py INSTEAD.')
print()
dump_pbtxt('configs', configs)
def dump_pbtxt(key, value, level=0):
indent = ' ' * level
if isinstance(value, int):
print('{}{}: {}'.format(indent, key, value))
elif isinstance(value, str):
print('{}{}: "{}"'.format(indent, key, value))
elif isinstance(value, list):
for k, v in value:
print('{}{} {{'.format(indent, key))
dump_pbtxt('key', k, level + 1)
dump_pbtxt('value', v, level + 1)
print('{}}}'.format(indent))
elif isinstance(value, dict):
print('{}{} {{'.format(indent, key))
for k, v in value.items():
dump_pbtxt(k, v, level + 1)
print('{}}}'.format(indent))
if __name__ == '__main__':
main() | 0.455925 | 0.101056 |
from styx_msgs.msg import TrafficLight
import csv
import cv2
import numpy as np
from math import ceil, exp, log
from enum import Enum
from keras.models import load_model
# I had to make TWO totally ugly and disgusting hack because of a Keras bugs:
# a) https://github.com/keras-team/keras/issues/7431
# b) https://github.com/keras-team/keras/issues/6462
def load_mobilenet(fname):
from keras.utils.generic_utils import CustomObjectScope
import keras.applications as A
with CustomObjectScope({'relu6': A.mobilenet.relu6,'DepthwiseConv2D': A.mobilenet.DepthwiseConv2D}):
model = load_model(fname)
model._make_predict_function()
return model
#~ The output vector:
#~ light_types['RED'] = [1,0,0,0,0,0,0]
#~ light_types['GREEN'] = [0,1,0,0,0,0,0]
#~ light_types['YELLOW'] = [0,0,1,0,0,0,0]
#~ light_types['RED_YELLOW'] = [0,0,0,1,0,0,0]
#~ light_types['RED_GREEN'] = [0,0,0,0,1,0,0]
#~ light_types['GREEN_YELLOW'] = [0,0,0,0,0,1,0]
#~ light_types['NO_LIGHT'] = [0,0,0,0,0,0,1]
trafficlight_str = {}
trafficlight_str[0] = 'RED'
trafficlight_str[1] = 'YELLOW'
trafficlight_str[2] = 'GREEN'
trafficlight_str[4] = 'UNKNOWN'
class TLClassifierSite(object):
def __init__(self):
self.model = load_mobilenet('light_classification/mobilenet_model.h5')
self.prev_pred = TrafficLight.UNKNOWN
def get_classification(self, image):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
# Convert to (224, 224)
img = cv2.resize(image, (224,224))
# Predict
pred = self.model.predict(img[None, :, :, :])
pred = pred[0]
# Business logic :)
res = TrafficLight.UNKNOWN
# Trivial cases
if pred[0] > 0.8:
res = TrafficLight.RED
elif pred[1] > 0.8:
res = TrafficLight.GREEN
elif pred[2] > 0.8:
res = TrafficLight.YELLOW
elif pred[6] > 0.8:
res = TrafficLight.UNKNOWN
# Complex cases
else:
res = self.complex_cases(pred)
self.prev_pred = res
return res
def complex_cases(self, pred):
res = TrafficLight.UNKNOWN
# Based on previous VALID light
if self.prev_pred == TrafficLight.RED:
# Only RED -> RED, the RED -> GREEN alternation is possbile, but it is a defensive algorithm
if pred[0] > 0.5:
res = TrafficLight.RED
elif self.prev_pred == TrafficLight.GREEN:
# GREEN -> GREEN
if pred[1] > 0.5:
res = TrafficLight.GREEN
# GREEN -> YELLOW
elif pred[2] > 0.5:
res = TrafficLight.YELLOW
elif self.prev_pred == TrafficLight.YELLOW:
# YELLOW -> YELLOW
if pred[2] > 0.5:
res = TrafficLight.YELLOW
# YELLOW -> RED
elif pred[0] > 0.5:
res = TrafficLight.RED
return res | ros/src/tl_detector/light_classification/tl_classifier_site.py | from styx_msgs.msg import TrafficLight
import csv
import cv2
import numpy as np
from math import ceil, exp, log
from enum import Enum
from keras.models import load_model
# I had to make TWO totally ugly and disgusting hack because of a Keras bugs:
# a) https://github.com/keras-team/keras/issues/7431
# b) https://github.com/keras-team/keras/issues/6462
def load_mobilenet(fname):
from keras.utils.generic_utils import CustomObjectScope
import keras.applications as A
with CustomObjectScope({'relu6': A.mobilenet.relu6,'DepthwiseConv2D': A.mobilenet.DepthwiseConv2D}):
model = load_model(fname)
model._make_predict_function()
return model
#~ The output vector:
#~ light_types['RED'] = [1,0,0,0,0,0,0]
#~ light_types['GREEN'] = [0,1,0,0,0,0,0]
#~ light_types['YELLOW'] = [0,0,1,0,0,0,0]
#~ light_types['RED_YELLOW'] = [0,0,0,1,0,0,0]
#~ light_types['RED_GREEN'] = [0,0,0,0,1,0,0]
#~ light_types['GREEN_YELLOW'] = [0,0,0,0,0,1,0]
#~ light_types['NO_LIGHT'] = [0,0,0,0,0,0,1]
trafficlight_str = {}
trafficlight_str[0] = 'RED'
trafficlight_str[1] = 'YELLOW'
trafficlight_str[2] = 'GREEN'
trafficlight_str[4] = 'UNKNOWN'
class TLClassifierSite(object):
def __init__(self):
self.model = load_mobilenet('light_classification/mobilenet_model.h5')
self.prev_pred = TrafficLight.UNKNOWN
def get_classification(self, image):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
# Convert to (224, 224)
img = cv2.resize(image, (224,224))
# Predict
pred = self.model.predict(img[None, :, :, :])
pred = pred[0]
# Business logic :)
res = TrafficLight.UNKNOWN
# Trivial cases
if pred[0] > 0.8:
res = TrafficLight.RED
elif pred[1] > 0.8:
res = TrafficLight.GREEN
elif pred[2] > 0.8:
res = TrafficLight.YELLOW
elif pred[6] > 0.8:
res = TrafficLight.UNKNOWN
# Complex cases
else:
res = self.complex_cases(pred)
self.prev_pred = res
return res
def complex_cases(self, pred):
res = TrafficLight.UNKNOWN
# Based on previous VALID light
if self.prev_pred == TrafficLight.RED:
# Only RED -> RED, the RED -> GREEN alternation is possbile, but it is a defensive algorithm
if pred[0] > 0.5:
res = TrafficLight.RED
elif self.prev_pred == TrafficLight.GREEN:
# GREEN -> GREEN
if pred[1] > 0.5:
res = TrafficLight.GREEN
# GREEN -> YELLOW
elif pred[2] > 0.5:
res = TrafficLight.YELLOW
elif self.prev_pred == TrafficLight.YELLOW:
# YELLOW -> YELLOW
if pred[2] > 0.5:
res = TrafficLight.YELLOW
# YELLOW -> RED
elif pred[0] > 0.5:
res = TrafficLight.RED
return res | 0.762159 | 0.203549 |
import logging
from time import tzname
from subprocess import call
SYSTEM_SITE_ID = "system"
LOGGER = logging.getLogger("timevortex")
KEY_SITE_ID = "siteID"
KEY_VARIABLE_ID = "variableID"
KEY_VALUE = "value"
KEY_DATE = "date"
KEY_DST_TIMEZONE = "dstTimezone"
KEY_NON_DST_TIMEZONE = "nonDstTimezone"
KEY_ERROR = "error"
KEY_TIMESERIES = "timeseries"
ERROR_TIMESERIES_NOT_DEFINED = "self.timeseries does not exist. Please create one before send any message."
ERROR_BACKUP_DEACTIVATED = "error_backup_deactivated"
ERROR_MISSING_SENDER_EMAIL = "error_missing_sender_email"
ERROR_SMTP_AUTH = "error_smtp_authentication"
ERROR_MISSING_SENDER_PASSWORD = "<PASSWORD>" # noqa
ERROR_MISSING_TARGET_EMAIL = "error_missing_target_email"
KEY_SENDER_EMAIL = "sender_email"
KEY_SENDER_PASSWORD = "<PASSWORD>" # noqa
KEY_TARGET_INFORMATION_EMAIL = "target_information_email"
KEY_NEXT_SEND_DAILY_REPORT = "next_send_daily_report"
KEY_LAST_TIME_DAILY_REPORT = "last_time_daily_report"
ERROR_MISSING_NEXT_SEND = "error_missing_next_send"
KEY_EMAIL_HOST_USER = "EMAIL_HOST_USER"
KEY_EMAIL_HOST_PASSWORD = "EMAIL_HOST_PASSWORD" # noqa
KEY_MISSING_DB_ELEMENT = "Missing %s in DB."
LABEL_LAST_TIME_DAILY_REPORT = "Last time daily report"
ERROR_TIMEVORTEX = {
ERROR_BACKUP_DEACTIVATED: "Backup script deactivated. Please specify target destination to activate the command.",
ERROR_SMTP_AUTH: "Error with SMTP authentication, verify that %s and %s are correct",
ERROR_MISSING_SENDER_EMAIL: KEY_MISSING_DB_ELEMENT % KEY_SENDER_EMAIL,
ERROR_MISSING_SENDER_PASSWORD: KEY_MISSING_DB_ELEMENT % KEY_SENDER_PASSWORD,
ERROR_MISSING_TARGET_EMAIL: KEY_MISSING_DB_ELEMENT % KEY_TARGET_INFORMATION_EMAIL,
ERROR_MISSING_NEXT_SEND: KEY_MISSING_DB_ELEMENT % KEY_NEXT_SEND_DAILY_REPORT,
}
def timeseries_json(site_id, variable_id, value, date):
"""Create a TimeVortex json format dict
"""
return {
KEY_SITE_ID: site_id,
KEY_VARIABLE_ID: variable_id,
KEY_VALUE: value,
KEY_DATE: date,
KEY_DST_TIMEZONE: tzname[1],
KEY_NON_DST_TIMEZONE: tzname[0]
}
def call_and_exit(command, shell=True):
"""Call a shell command and exit if error
"""
code = call(command, shell=shell)
if code != 0:
exit(1) | timevortex/utils/globals.py | import logging
from time import tzname
from subprocess import call
SYSTEM_SITE_ID = "system"
LOGGER = logging.getLogger("timevortex")
KEY_SITE_ID = "siteID"
KEY_VARIABLE_ID = "variableID"
KEY_VALUE = "value"
KEY_DATE = "date"
KEY_DST_TIMEZONE = "dstTimezone"
KEY_NON_DST_TIMEZONE = "nonDstTimezone"
KEY_ERROR = "error"
KEY_TIMESERIES = "timeseries"
ERROR_TIMESERIES_NOT_DEFINED = "self.timeseries does not exist. Please create one before send any message."
ERROR_BACKUP_DEACTIVATED = "error_backup_deactivated"
ERROR_MISSING_SENDER_EMAIL = "error_missing_sender_email"
ERROR_SMTP_AUTH = "error_smtp_authentication"
ERROR_MISSING_SENDER_PASSWORD = "<PASSWORD>" # noqa
ERROR_MISSING_TARGET_EMAIL = "error_missing_target_email"
KEY_SENDER_EMAIL = "sender_email"
KEY_SENDER_PASSWORD = "<PASSWORD>" # noqa
KEY_TARGET_INFORMATION_EMAIL = "target_information_email"
KEY_NEXT_SEND_DAILY_REPORT = "next_send_daily_report"
KEY_LAST_TIME_DAILY_REPORT = "last_time_daily_report"
ERROR_MISSING_NEXT_SEND = "error_missing_next_send"
KEY_EMAIL_HOST_USER = "EMAIL_HOST_USER"
KEY_EMAIL_HOST_PASSWORD = "EMAIL_HOST_PASSWORD" # noqa
KEY_MISSING_DB_ELEMENT = "Missing %s in DB."
LABEL_LAST_TIME_DAILY_REPORT = "Last time daily report"
ERROR_TIMEVORTEX = {
ERROR_BACKUP_DEACTIVATED: "Backup script deactivated. Please specify target destination to activate the command.",
ERROR_SMTP_AUTH: "Error with SMTP authentication, verify that %s and %s are correct",
ERROR_MISSING_SENDER_EMAIL: KEY_MISSING_DB_ELEMENT % KEY_SENDER_EMAIL,
ERROR_MISSING_SENDER_PASSWORD: KEY_MISSING_DB_ELEMENT % KEY_SENDER_PASSWORD,
ERROR_MISSING_TARGET_EMAIL: KEY_MISSING_DB_ELEMENT % KEY_TARGET_INFORMATION_EMAIL,
ERROR_MISSING_NEXT_SEND: KEY_MISSING_DB_ELEMENT % KEY_NEXT_SEND_DAILY_REPORT,
}
def timeseries_json(site_id, variable_id, value, date):
"""Create a TimeVortex json format dict
"""
return {
KEY_SITE_ID: site_id,
KEY_VARIABLE_ID: variable_id,
KEY_VALUE: value,
KEY_DATE: date,
KEY_DST_TIMEZONE: tzname[1],
KEY_NON_DST_TIMEZONE: tzname[0]
}
def call_and_exit(command, shell=True):
"""Call a shell command and exit if error
"""
code = call(command, shell=shell)
if code != 0:
exit(1) | 0.344333 | 0.03949 |
import torch.nn as nn
import torch
from tensorboardX import SummaryWriter
from torchsummary import summary
class Net(nn.Module):
def __init__(self, features):
super(Net, self).__init__()
self.layer0 = nn.Sequential(nn.Linear(features, 16), nn.ReLU(),nn.BatchNorm1d(16))
self.layer1 = nn.Sequential(nn.Linear(16, 32), nn.ReLU())
self.dropout1 = nn.Dropout(p=0.25)
self.layer2 = nn.Sequential(nn.Linear(32, 64), nn.ReLU())
self.dropout2 = nn.Dropout(p=0.25)
self.layer3 = nn.Sequential(nn.Linear(64, 128), nn.ReLU())
self.dropout3 = nn.Dropout(p=0.25)
self.layer4 = nn.Sequential(nn.Linear(128, 256), nn.ReLU())
self.dropout4 = nn.Dropout(p=0.25)
self.layer5 = nn.Sequential(nn.Linear(256, 512), nn.ReLU())
self.layer6 = nn.Sequential(nn.Linear(512, 1024), nn.ReLU())
self.layer7 = nn.Sequential(nn.Linear(1024, 1024), nn.ReLU())
self.layer8 = nn.Sequential(nn.Linear(1024, 1024), nn.ReLU())
self.layer9 = nn.Sequential(nn.Linear(1024, 1024), nn.ReLU())
self.layer10 = nn.Sequential(nn.Linear(1024, 256), nn.ReLU())
self.layer11 = nn.Sequential(nn.Linear(256, 64), nn.ReLU())
self.layer12 = nn.Sequential(nn.Linear(64, 16), nn.ReLU())
self.layer13 = nn.Sequential(nn.Linear(16, 1), nn.ReLU())
def forward(self, x):
y_pred = self.layer0(x)
y_pred = self.layer1(y_pred)
# y_pred = self.dropout1(y_pred)
y_pred = self.layer2(y_pred)
# y_pred = self.dropout2(y_pred)
y_pred = self.layer3(y_pred)
# y_pred = self.dropout3(y_pred)
y_pred = self.layer4(y_pred)
# y_pred = self.dropout4(y_pred)
y_pred = self.layer5(y_pred)
y_pred = self.layer6(y_pred)
y_pred = self.layer7(y_pred)
y_pred = self.layer8(y_pred)
y_pred = self.layer9(y_pred)
y_pred = self.layer10(y_pred)
y_pred = self.layer11(y_pred)
y_pred = self.layer12(y_pred)
y_pred = self.layer13(y_pred)
return y_pred
class Howard(nn.Module):
def __init__(self, features):
super(Howard, self).__init__()
self.linear_relu1 = nn.Linear(features, 64)
self.linear_relu2 = nn.Linear(64, 256)
self.linear_relu3 = nn.Linear(256, 256)
self.linear_relu4 = nn.Linear(256, 256)
self.linear_relu5 = nn.Linear(256, 256)
self.linear_relu6 = nn.Linear(256, 256)
self.linear_relu7 = nn.Linear(256, 256)
self.linear_relu8 = nn.Linear(256, 256)
self.linear_relu9 = nn.Linear(256, 256)
self.linear_relu10 = nn.Linear(256, 256)
self.linear_relu11 = nn.Linear(256, 256)
self.linear_relu12 = nn.Linear(256, 256)
self.linear_relu13 = nn.Linear(256, 256)
self.linear_relu14 = nn.Linear(256, 16)
self.linear_relu15 = nn.Linear(16, features)
self.linear_relu16 = nn.Linear(features, 1)
def forward(self, x):
y_pred = self.linear_relu1(x)
y_pred = nn.functional.relu(y_pred)
y_pred = self.linear_relu2(y_pred)
y_pred = nn.functional.relu(y_pred)
y_pred = self.linear_relu3(y_pred)
y_pred = nn.functional.relu(y_pred)
y_pred = self.linear_relu4(y_pred)
y_pred = nn.functional.relu(y_pred)
y_pred = self.linear_relu5(y_pred)
y_pred = nn.functional.relu(y_pred)
y_pred = self.linear_relu6(y_pred)
y_pred = nn.functional.relu(y_pred)
y_pred = self.linear_relu7(y_pred)
y_pred = nn.functional.relu(y_pred)
y_pred = self.linear_relu8(y_pred)
y_pred = nn.functional.relu(y_pred)
y_pred = self.linear_relu9(y_pred)
y_pred = nn.functional.relu(y_pred)
y_pred = self.linear_relu10(y_pred)
y_pred = nn.functional.relu(y_pred)
y_pred = self.linear_relu11(y_pred)
y_pred = nn.functional.relu(y_pred)
y_pred = self.linear_relu12(y_pred)
y_pred = nn.functional.relu(y_pred)
y_pred = self.linear_relu13(y_pred)
y_pred = nn.functional.relu(y_pred)
y_pred = self.linear_relu14(y_pred)
y_pred = nn.functional.relu(y_pred)
y_pred = self.linear_relu15(y_pred)
y_pred = nn.functional.relu(y_pred)
y_pred = self.linear_relu16(y_pred)
return y_pred
class JackNet(nn.Module):
def __init__(self, features):
super(JackNet, self).__init__()
self.layer0 = nn.Sequential(nn.Linear(features, 128), nn.ReLU())
self.layer1 = nn.Sequential(nn.Linear(128, 256), nn.ReLU())
self.dropout1 = nn.Dropout(p=0.25)
self.layer2 = nn.Sequential(nn.Linear(256, 256), nn.ReLU())
self.dropout2 = nn.Dropout(p=0.25)
self.layer3 = nn.Sequential(nn.Linear(256, 256), nn.ReLU())
self.dropout3 = nn.Dropout(p=0.25)
self.layer4 = nn.Sequential(nn.Linear(256, 256), nn.ReLU())
self.dropout4 = nn.Dropout(p=0.25)
self.layer5 = nn.Sequential(nn.Linear(256, 256), nn.ReLU())
self.layer6 = nn.Sequential(nn.Linear(256, 128), nn.ReLU())
self.layer7 = nn.Sequential(nn.Linear(128, 1))
def forward(self, x):
y_pred = self.layer0(x)
y_pred = self.layer1(y_pred)
# y_pred = self.dropout1(y_pred)
y_pred = self.layer2(y_pred)
# y_pred = self.dropout2(y_pred)
y_pred = self.layer3(y_pred)
# y_pred = self.dropout3(y_pred)
y_pred = self.layer4(y_pred)
# y_pred = self.dropout4(y_pred)
y_pred = self.layer5(y_pred)
y_pred = self.layer6(y_pred)
y_pred = self.layer7(y_pred)
# y_pred = self.layer8(y_pred)
# y_pred = self.layer9(y_pred)
# y_pred = self.layer10(y_pred)
# y_pred = self.layer11(y_pred)
# y_pred = self.layer12(y_pred)
return y_pred
class fusion_net(nn.Module):
def __init__(self, features):
super(fusion_net, self).__init__()
self.layer1 = nn.Sequential(nn.Linear(features, 8), nn.ReLU())
self.layer2 = nn.Sequential(nn.Linear(8, 8), nn.ReLU())
self.layer3 = nn.Sequential(nn.Linear(8, 4), nn.ReLU())
self.layer4 = nn.Sequential(nn.Linear(4, 2), nn.ReLU())
self.layer5 = nn.Sequential(nn.Linear(2, 1))
def forward(self, x):
y_pred = self.layer1(x)
y_pred = self.layer2(y_pred)
y_pred = self.layer3(y_pred)
y_pred = self.layer4(y_pred)
y_pred = self.layer5(y_pred)
return y_pred
if __name__ == "__main__":
#畫出模型架構
x = torch.rand(1, 5).cuda()
model = fusion_net(5).cuda()
summary(model, (1,5))
with SummaryWriter(comment='Net') as w:
w.add_graph(model, x) | Net/model.py | import torch.nn as nn
import torch
from tensorboardX import SummaryWriter
from torchsummary import summary
class Net(nn.Module):
def __init__(self, features):
super(Net, self).__init__()
self.layer0 = nn.Sequential(nn.Linear(features, 16), nn.ReLU(),nn.BatchNorm1d(16))
self.layer1 = nn.Sequential(nn.Linear(16, 32), nn.ReLU())
self.dropout1 = nn.Dropout(p=0.25)
self.layer2 = nn.Sequential(nn.Linear(32, 64), nn.ReLU())
self.dropout2 = nn.Dropout(p=0.25)
self.layer3 = nn.Sequential(nn.Linear(64, 128), nn.ReLU())
self.dropout3 = nn.Dropout(p=0.25)
self.layer4 = nn.Sequential(nn.Linear(128, 256), nn.ReLU())
self.dropout4 = nn.Dropout(p=0.25)
self.layer5 = nn.Sequential(nn.Linear(256, 512), nn.ReLU())
self.layer6 = nn.Sequential(nn.Linear(512, 1024), nn.ReLU())
self.layer7 = nn.Sequential(nn.Linear(1024, 1024), nn.ReLU())
self.layer8 = nn.Sequential(nn.Linear(1024, 1024), nn.ReLU())
self.layer9 = nn.Sequential(nn.Linear(1024, 1024), nn.ReLU())
self.layer10 = nn.Sequential(nn.Linear(1024, 256), nn.ReLU())
self.layer11 = nn.Sequential(nn.Linear(256, 64), nn.ReLU())
self.layer12 = nn.Sequential(nn.Linear(64, 16), nn.ReLU())
self.layer13 = nn.Sequential(nn.Linear(16, 1), nn.ReLU())
def forward(self, x):
y_pred = self.layer0(x)
y_pred = self.layer1(y_pred)
# y_pred = self.dropout1(y_pred)
y_pred = self.layer2(y_pred)
# y_pred = self.dropout2(y_pred)
y_pred = self.layer3(y_pred)
# y_pred = self.dropout3(y_pred)
y_pred = self.layer4(y_pred)
# y_pred = self.dropout4(y_pred)
y_pred = self.layer5(y_pred)
y_pred = self.layer6(y_pred)
y_pred = self.layer7(y_pred)
y_pred = self.layer8(y_pred)
y_pred = self.layer9(y_pred)
y_pred = self.layer10(y_pred)
y_pred = self.layer11(y_pred)
y_pred = self.layer12(y_pred)
y_pred = self.layer13(y_pred)
return y_pred
class Howard(nn.Module):
def __init__(self, features):
super(Howard, self).__init__()
self.linear_relu1 = nn.Linear(features, 64)
self.linear_relu2 = nn.Linear(64, 256)
self.linear_relu3 = nn.Linear(256, 256)
self.linear_relu4 = nn.Linear(256, 256)
self.linear_relu5 = nn.Linear(256, 256)
self.linear_relu6 = nn.Linear(256, 256)
self.linear_relu7 = nn.Linear(256, 256)
self.linear_relu8 = nn.Linear(256, 256)
self.linear_relu9 = nn.Linear(256, 256)
self.linear_relu10 = nn.Linear(256, 256)
self.linear_relu11 = nn.Linear(256, 256)
self.linear_relu12 = nn.Linear(256, 256)
self.linear_relu13 = nn.Linear(256, 256)
self.linear_relu14 = nn.Linear(256, 16)
self.linear_relu15 = nn.Linear(16, features)
self.linear_relu16 = nn.Linear(features, 1)
def forward(self, x):
y_pred = self.linear_relu1(x)
y_pred = nn.functional.relu(y_pred)
y_pred = self.linear_relu2(y_pred)
y_pred = nn.functional.relu(y_pred)
y_pred = self.linear_relu3(y_pred)
y_pred = nn.functional.relu(y_pred)
y_pred = self.linear_relu4(y_pred)
y_pred = nn.functional.relu(y_pred)
y_pred = self.linear_relu5(y_pred)
y_pred = nn.functional.relu(y_pred)
y_pred = self.linear_relu6(y_pred)
y_pred = nn.functional.relu(y_pred)
y_pred = self.linear_relu7(y_pred)
y_pred = nn.functional.relu(y_pred)
y_pred = self.linear_relu8(y_pred)
y_pred = nn.functional.relu(y_pred)
y_pred = self.linear_relu9(y_pred)
y_pred = nn.functional.relu(y_pred)
y_pred = self.linear_relu10(y_pred)
y_pred = nn.functional.relu(y_pred)
y_pred = self.linear_relu11(y_pred)
y_pred = nn.functional.relu(y_pred)
y_pred = self.linear_relu12(y_pred)
y_pred = nn.functional.relu(y_pred)
y_pred = self.linear_relu13(y_pred)
y_pred = nn.functional.relu(y_pred)
y_pred = self.linear_relu14(y_pred)
y_pred = nn.functional.relu(y_pred)
y_pred = self.linear_relu15(y_pred)
y_pred = nn.functional.relu(y_pred)
y_pred = self.linear_relu16(y_pred)
return y_pred
class JackNet(nn.Module):
def __init__(self, features):
super(JackNet, self).__init__()
self.layer0 = nn.Sequential(nn.Linear(features, 128), nn.ReLU())
self.layer1 = nn.Sequential(nn.Linear(128, 256), nn.ReLU())
self.dropout1 = nn.Dropout(p=0.25)
self.layer2 = nn.Sequential(nn.Linear(256, 256), nn.ReLU())
self.dropout2 = nn.Dropout(p=0.25)
self.layer3 = nn.Sequential(nn.Linear(256, 256), nn.ReLU())
self.dropout3 = nn.Dropout(p=0.25)
self.layer4 = nn.Sequential(nn.Linear(256, 256), nn.ReLU())
self.dropout4 = nn.Dropout(p=0.25)
self.layer5 = nn.Sequential(nn.Linear(256, 256), nn.ReLU())
self.layer6 = nn.Sequential(nn.Linear(256, 128), nn.ReLU())
self.layer7 = nn.Sequential(nn.Linear(128, 1))
def forward(self, x):
y_pred = self.layer0(x)
y_pred = self.layer1(y_pred)
# y_pred = self.dropout1(y_pred)
y_pred = self.layer2(y_pred)
# y_pred = self.dropout2(y_pred)
y_pred = self.layer3(y_pred)
# y_pred = self.dropout3(y_pred)
y_pred = self.layer4(y_pred)
# y_pred = self.dropout4(y_pred)
y_pred = self.layer5(y_pred)
y_pred = self.layer6(y_pred)
y_pred = self.layer7(y_pred)
# y_pred = self.layer8(y_pred)
# y_pred = self.layer9(y_pred)
# y_pred = self.layer10(y_pred)
# y_pred = self.layer11(y_pred)
# y_pred = self.layer12(y_pred)
return y_pred
class fusion_net(nn.Module):
def __init__(self, features):
super(fusion_net, self).__init__()
self.layer1 = nn.Sequential(nn.Linear(features, 8), nn.ReLU())
self.layer2 = nn.Sequential(nn.Linear(8, 8), nn.ReLU())
self.layer3 = nn.Sequential(nn.Linear(8, 4), nn.ReLU())
self.layer4 = nn.Sequential(nn.Linear(4, 2), nn.ReLU())
self.layer5 = nn.Sequential(nn.Linear(2, 1))
def forward(self, x):
y_pred = self.layer1(x)
y_pred = self.layer2(y_pred)
y_pred = self.layer3(y_pred)
y_pred = self.layer4(y_pred)
y_pred = self.layer5(y_pred)
return y_pred
if __name__ == "__main__":
#畫出模型架構
x = torch.rand(1, 5).cuda()
model = fusion_net(5).cuda()
summary(model, (1,5))
with SummaryWriter(comment='Net') as w:
w.add_graph(model, x) | 0.932199 | 0.475727 |
__description__ = \
"""
Compute the positions of nodes in a flattened genotype-phenotype map. This goes
into the core NetworkX DiGraph object as the "pos" call.
"""
__author__ = "<NAME>"
from gpgraph import check
import numpy as np
def flattened(G, node_list=None, scale=1, vertical=False):
"""
Get flattened positions for a genotype-phenotype graph.
Parameters
----------
G : GenotypePhenotypeGraph object
A genotype-phenotype objects
node_list: list-like
list of nodes to include. if None, use G.nodes()
scale : float (default=1)
density of the nodes. Must be > 0.
vertical : bool (default=False)
position nodes top-to-bottom (vertical) rather than left-to-right
Returns
-------
positions: dict
positions of all nodes in network (i.e. {index: [x,y]})
"""
# Make sure this looks like a GenotypePhenotypeGraph that has a genotype
# phenotype map loaded.
try:
if G.gpm is None:
raise AttributeError
except AttributeError:
err = "G must be a GenotypePhenotypeGraph object with a loaded \n"
err += "genotype map. (see the add_gpm method)."
raise ValueError(err)
if node_list is None:
node_list = list(G.nodes)
# Make sure node_list is sane
check.node_list_sanity(node_list,G)
# Make sure the scale is sane
check.float_sanity(scale,min_allowed=0.0)
# Get the binary genotypes from the gpgraph
# Set level of nodes and begin calc offset on the fly
graph = G
offsets = {}
positions = {}
for n in range(len(list(G.nodes()))):
node = graph.nodes[n]
# Calculate the level of each node
level = node["binary"].count("1")
if level in offsets:
offsets[level] += 1
else:
offsets[level] = 1
positions[n] = [level]
# Center the offsets on 0
for key, val in offsets.items():
offsets[key] = list(np.arange(val) - (val - 1) / 2.0)
# Offset positions
if vertical:
for n in range(len(list(G.nodes()))):
pos = offsets[positions[n][0]].pop(0)
scaled = scale * pos
positions[n].insert(0, scaled)
positions[n][-1] *= -1
else:
for n in range(len(list(G.nodes()))):
pos = offsets[positions[n][0]].pop(0)
scaled = scale * pos
positions[n].append(scaled)
return positions | gpgraph/pyplot/pos.py | __description__ = \
"""
Compute the positions of nodes in a flattened genotype-phenotype map. This goes
into the core NetworkX DiGraph object as the "pos" call.
"""
__author__ = "<NAME>"
from gpgraph import check
import numpy as np
def flattened(G, node_list=None, scale=1, vertical=False):
"""
Get flattened positions for a genotype-phenotype graph.
Parameters
----------
G : GenotypePhenotypeGraph object
A genotype-phenotype objects
node_list: list-like
list of nodes to include. if None, use G.nodes()
scale : float (default=1)
density of the nodes. Must be > 0.
vertical : bool (default=False)
position nodes top-to-bottom (vertical) rather than left-to-right
Returns
-------
positions: dict
positions of all nodes in network (i.e. {index: [x,y]})
"""
# Make sure this looks like a GenotypePhenotypeGraph that has a genotype
# phenotype map loaded.
try:
if G.gpm is None:
raise AttributeError
except AttributeError:
err = "G must be a GenotypePhenotypeGraph object with a loaded \n"
err += "genotype map. (see the add_gpm method)."
raise ValueError(err)
if node_list is None:
node_list = list(G.nodes)
# Make sure node_list is sane
check.node_list_sanity(node_list,G)
# Make sure the scale is sane
check.float_sanity(scale,min_allowed=0.0)
# Get the binary genotypes from the gpgraph
# Set level of nodes and begin calc offset on the fly
graph = G
offsets = {}
positions = {}
for n in range(len(list(G.nodes()))):
node = graph.nodes[n]
# Calculate the level of each node
level = node["binary"].count("1")
if level in offsets:
offsets[level] += 1
else:
offsets[level] = 1
positions[n] = [level]
# Center the offsets on 0
for key, val in offsets.items():
offsets[key] = list(np.arange(val) - (val - 1) / 2.0)
# Offset positions
if vertical:
for n in range(len(list(G.nodes()))):
pos = offsets[positions[n][0]].pop(0)
scaled = scale * pos
positions[n].insert(0, scaled)
positions[n][-1] *= -1
else:
for n in range(len(list(G.nodes()))):
pos = offsets[positions[n][0]].pop(0)
scaled = scale * pos
positions[n].append(scaled)
return positions | 0.817829 | 0.604049 |
import argparse
import errno
import http.server
import os
import socketserver
import sys
from datetime import date
from pathlib import Path
import helpers
from builder import Builder
__version__ = '3.2.0'
CONFIG_FILE = 'config.yaml'
def show_statistics():
articles = 0
drafts = 0
word_count_total = 0
helpers.chdir_to_articles()
for article in os.listdir('.'):
if os.path.isfile(article) and not article.startswith('.'):
article_yaml = helpers.load_yaml(article)
is_publish = helpers.read_key(article_yaml[0], 'publish')
markdown = helpers.read_key(article_yaml[1], 'markdown')
if not is_publish:
drafts = drafts + 1
articles = articles + 1
word_count = len(markdown.split())
word_count_total += word_count
print('{} article(s): {} to publish, {} draft(s)'.format(
str(articles), str(articles - drafts), str(drafts)))
print('{} word(s) total, {} word(s) average'.format(
str(word_count_total), str(round(word_count_total / articles))))
def publish():
try:
os.chdir('build/')
except OSError as error:
if error.errno == errno.ENOENT:
print('Nothing to publish.')
sys.exit(1)
handler = http.server.SimpleHTTPRequestHandler
port = 8080
try:
httpd = socketserver.TCPServer(('127.0.0.1', port), handler)
except OSError as error:
print(error)
sys.exit(1)
print('Published at http://localhost:{}'.format(str(port)))
print('Press control + c to stop.')
try:
httpd.serve_forever()
except KeyboardInterrupt:
httpd.shutdown()
httpd.server_close()
def initialize():
os.makedirs('articles/', exist_ok=True)
def build():
config_file = CONFIG_FILE
helpers.check_file(config_file)
config_yaml = helpers.load_yaml(config_file)[0]
selected_theme = helpers.read_key(config_yaml, 'theme')
blog_name = helpers.read_key(config_yaml, 'name')
description = helpers.read_key(config_yaml, 'description')
language = helpers.read_key(config_yaml, 'language')
builder = Builder(theme=selected_theme,
name=blog_name,
description=description,
lang=language)
helpers.chdir_to_articles()
for article in os.listdir('.'):
if os.path.isfile(article) and not article.startswith('.'):
builder.build_article(article)
builder.build_overview()
def add_article(name):
header = '---\n' \
+ 'title: {}\n'.format(name) \
+ 'date: {} #(YYYY-MM-DD)\n'.format(date.today()) \
+ 'publish: no #(yes/no)\n' \
+ '---\n' \
+ 'markdown: |\n'
helpers.chdir_to_articles()
if Path('{}.yaml'.format(name)).is_file():
print('Article already exists.')
else:
with open('{}.yaml'.format(name), 'w') as article:
article.write(header)
def main():
parser = argparse.ArgumentParser(
description='create and publish blog articles',
epilog='further help: https://github.com/schdav/blogy')
group = parser.add_mutually_exclusive_group()
group.add_argument('-a',
'--add',
help='add article with given name',
metavar=('NAME'))
group.add_argument('-b', '--build', help='build blog', action='store_true')
group.add_argument('-i',
'--init',
help='initialize environment',
action='store_true')
group.add_argument('-p',
'--publish',
help='publish blog locally',
action='store_true')
group.add_argument('-s',
'--stats',
help='show statistics',
action='store_true')
parser.add_argument('-v',
'--version',
action='version',
version=__version__)
args = parser.parse_args()
if args.add:
add_article(args.add)
elif args.build:
build()
elif args.init:
initialize()
elif args.publish:
publish()
elif args.stats:
show_statistics()
if __name__ == '__main__':
main() | blogy.py | import argparse
import errno
import http.server
import os
import socketserver
import sys
from datetime import date
from pathlib import Path
import helpers
from builder import Builder
__version__ = '3.2.0'
CONFIG_FILE = 'config.yaml'
def show_statistics():
articles = 0
drafts = 0
word_count_total = 0
helpers.chdir_to_articles()
for article in os.listdir('.'):
if os.path.isfile(article) and not article.startswith('.'):
article_yaml = helpers.load_yaml(article)
is_publish = helpers.read_key(article_yaml[0], 'publish')
markdown = helpers.read_key(article_yaml[1], 'markdown')
if not is_publish:
drafts = drafts + 1
articles = articles + 1
word_count = len(markdown.split())
word_count_total += word_count
print('{} article(s): {} to publish, {} draft(s)'.format(
str(articles), str(articles - drafts), str(drafts)))
print('{} word(s) total, {} word(s) average'.format(
str(word_count_total), str(round(word_count_total / articles))))
def publish():
try:
os.chdir('build/')
except OSError as error:
if error.errno == errno.ENOENT:
print('Nothing to publish.')
sys.exit(1)
handler = http.server.SimpleHTTPRequestHandler
port = 8080
try:
httpd = socketserver.TCPServer(('127.0.0.1', port), handler)
except OSError as error:
print(error)
sys.exit(1)
print('Published at http://localhost:{}'.format(str(port)))
print('Press control + c to stop.')
try:
httpd.serve_forever()
except KeyboardInterrupt:
httpd.shutdown()
httpd.server_close()
def initialize():
os.makedirs('articles/', exist_ok=True)
def build():
config_file = CONFIG_FILE
helpers.check_file(config_file)
config_yaml = helpers.load_yaml(config_file)[0]
selected_theme = helpers.read_key(config_yaml, 'theme')
blog_name = helpers.read_key(config_yaml, 'name')
description = helpers.read_key(config_yaml, 'description')
language = helpers.read_key(config_yaml, 'language')
builder = Builder(theme=selected_theme,
name=blog_name,
description=description,
lang=language)
helpers.chdir_to_articles()
for article in os.listdir('.'):
if os.path.isfile(article) and not article.startswith('.'):
builder.build_article(article)
builder.build_overview()
def add_article(name):
header = '---\n' \
+ 'title: {}\n'.format(name) \
+ 'date: {} #(YYYY-MM-DD)\n'.format(date.today()) \
+ 'publish: no #(yes/no)\n' \
+ '---\n' \
+ 'markdown: |\n'
helpers.chdir_to_articles()
if Path('{}.yaml'.format(name)).is_file():
print('Article already exists.')
else:
with open('{}.yaml'.format(name), 'w') as article:
article.write(header)
def main():
parser = argparse.ArgumentParser(
description='create and publish blog articles',
epilog='further help: https://github.com/schdav/blogy')
group = parser.add_mutually_exclusive_group()
group.add_argument('-a',
'--add',
help='add article with given name',
metavar=('NAME'))
group.add_argument('-b', '--build', help='build blog', action='store_true')
group.add_argument('-i',
'--init',
help='initialize environment',
action='store_true')
group.add_argument('-p',
'--publish',
help='publish blog locally',
action='store_true')
group.add_argument('-s',
'--stats',
help='show statistics',
action='store_true')
parser.add_argument('-v',
'--version',
action='version',
version=__version__)
args = parser.parse_args()
if args.add:
add_article(args.add)
elif args.build:
build()
elif args.init:
initialize()
elif args.publish:
publish()
elif args.stats:
show_statistics()
if __name__ == '__main__':
main() | 0.157622 | 0.071138 |
import numpy as np
from scipy import ndimage
from loguru import logger
from skimage.morphology import skeletonize as skeletonize_skimage
def erode(I, num_iter, thresh=0.5):
I -= np.min(I)
I = I / np.max(I)
I = (I >= thresh) * 1.0
struct2 = ndimage.generate_binary_structure(3, 2)
for i in range(num_iter):
I = ndimage.binary_erosion(I, structure=struct2).astype(I.dtype)
I = I.astype('int')
print(I)
I = np.nan_to_num(I)
return I.astype('int')
def dilate(I, num_iter, thresh=0.5):
I -= np.min(I)
I = I / np.max(I)
I = (I >= thresh) * 1.0
struct2 = ndimage.generate_binary_structure(3, 2)
for i in range(num_iter):
I = ndimage.binary_dilation(I, structure=struct2).astype(I.dtype)
return np.nan_to_num(I)
def opening(I, num_iter, thresh=0.5):
I -= np.min(I)
I = I / np.max(I)
I = (I >= thresh) * 1.0
struct2 = ndimage.generate_binary_structure(3, 2)
for i in range(num_iter):
I = ndimage.binary_opening(I, structure=struct2).astype(I.dtype)
return I
def closing(I, num_iter, thresh=0.5):
I -= np.min(I)
I = I / np.max(I)
I = (I >= thresh) * 1.0
struct2 = ndimage.generate_binary_structure(3, 2)
for i in range(num_iter):
I = ndimage.binary_closing(I, structure=struct2).astype(I.dtype)
return I
def distance_transform_edt(I, thresh=0.5):
I -= np.min(I)
I = I / np.max(I)
I = (I >= thresh) * 1.0
I = ndimage.distance_transform_edt(I)
return I
def median(I, median_size, num_iter, thresh=0.5):
"""Median filter, using ndimage implementation.
Parameters
----------
data : np.ndarray (D,H,W)
Input image
median_size : int
Median size
num_iter : int
Returns
-------
np.ndarray (D,H,W)
Median filtered image
"""
I = I * 1.0
for i in range(num_iter):
I = ndimage.median_filter(I, median_size).astype(I.dtype)
return I
def skeletonize(I, thresh=0.5):
I -= np.min(I)
I = I / np.max(I)
I = (I >= thresh) * 1.0
skeleton = skeletonize_skimage(I) # returns 0-255
skeleton = (skeleton > 0) * 1.0
return skeleton
def watershed(I, markers, thresh=0.5):
I -= np.min(I)
I = I / np.max(I)
from skimage import img_as_ubyte
I = img_as_ubyte(I)
# xm, ym, zm = np.ogrid[0:I.shape[0]:10, 0:I.shape[1]:10, 0:I.shape[2]:10]
markers = ((markers > 0) * 1.0).astype(np.int16)
markers = ndimage.label(markers)[0]
# markers[xm, ym, zm]= np.arange(xm.size*ym.size*zm.size).reshape((xm.size,ym.size, zm.size))
ws = ndimage.watershed_ift(I, markers)
return ws | survos2/server/filtering/morph.py | import numpy as np
from scipy import ndimage
from loguru import logger
from skimage.morphology import skeletonize as skeletonize_skimage
def erode(I, num_iter, thresh=0.5):
I -= np.min(I)
I = I / np.max(I)
I = (I >= thresh) * 1.0
struct2 = ndimage.generate_binary_structure(3, 2)
for i in range(num_iter):
I = ndimage.binary_erosion(I, structure=struct2).astype(I.dtype)
I = I.astype('int')
print(I)
I = np.nan_to_num(I)
return I.astype('int')
def dilate(I, num_iter, thresh=0.5):
I -= np.min(I)
I = I / np.max(I)
I = (I >= thresh) * 1.0
struct2 = ndimage.generate_binary_structure(3, 2)
for i in range(num_iter):
I = ndimage.binary_dilation(I, structure=struct2).astype(I.dtype)
return np.nan_to_num(I)
def opening(I, num_iter, thresh=0.5):
I -= np.min(I)
I = I / np.max(I)
I = (I >= thresh) * 1.0
struct2 = ndimage.generate_binary_structure(3, 2)
for i in range(num_iter):
I = ndimage.binary_opening(I, structure=struct2).astype(I.dtype)
return I
def closing(I, num_iter, thresh=0.5):
I -= np.min(I)
I = I / np.max(I)
I = (I >= thresh) * 1.0
struct2 = ndimage.generate_binary_structure(3, 2)
for i in range(num_iter):
I = ndimage.binary_closing(I, structure=struct2).astype(I.dtype)
return I
def distance_transform_edt(I, thresh=0.5):
I -= np.min(I)
I = I / np.max(I)
I = (I >= thresh) * 1.0
I = ndimage.distance_transform_edt(I)
return I
def median(I, median_size, num_iter, thresh=0.5):
"""Median filter, using ndimage implementation.
Parameters
----------
data : np.ndarray (D,H,W)
Input image
median_size : int
Median size
num_iter : int
Returns
-------
np.ndarray (D,H,W)
Median filtered image
"""
I = I * 1.0
for i in range(num_iter):
I = ndimage.median_filter(I, median_size).astype(I.dtype)
return I
def skeletonize(I, thresh=0.5):
I -= np.min(I)
I = I / np.max(I)
I = (I >= thresh) * 1.0
skeleton = skeletonize_skimage(I) # returns 0-255
skeleton = (skeleton > 0) * 1.0
return skeleton
def watershed(I, markers, thresh=0.5):
I -= np.min(I)
I = I / np.max(I)
from skimage import img_as_ubyte
I = img_as_ubyte(I)
# xm, ym, zm = np.ogrid[0:I.shape[0]:10, 0:I.shape[1]:10, 0:I.shape[2]:10]
markers = ((markers > 0) * 1.0).astype(np.int16)
markers = ndimage.label(markers)[0]
# markers[xm, ym, zm]= np.arange(xm.size*ym.size*zm.size).reshape((xm.size,ym.size, zm.size))
ws = ndimage.watershed_ift(I, markers)
return ws | 0.514156 | 0.493531 |
from datetime import datetime
import os
import socket
import json
from sqlite3 import Timestamp
from celery import shared_task
from progressui.backend import ProgressSend
from git.repo.base import Repo
from tools import shell, git, bbcommand, patch, bbfile, dishes
from tools import migration
from tools.migration import Migration
from .models import MetaLayer, MyMachine, MyPackages, Project
# TODO:后续提高稳定性,无论如何误操作可自恢复
@shared_task(bind=True)
def project_initial_task(self, project_id, project_path, project_version, project_name):
server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server.bind(('', 8866))
progress_send = ProgressSend(self)
template_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'project_template')
target_path = os.path.join(project_path, project_name)
r, err = shell.shell_cmd('cp -rp %s/. %s' % (template_path, target_path), os.curdir)
if err == True:
raise Exception("project template build error: %s" % (r))
project=Project.objects.get(id=project_id)
MyMachine.objects.create(project=project, name='dianshao', base='none', initial_method='Systemd', flash='SDCard',
distro_version='1.0.0', description='my machine generate by dianshao',
machine_include='{}', distro_include='{}')
# TODO: 根据项目名自动生成 distro, image, machine, bblayer, conf.sample 等文件
Repo.init(target_path)
progress_send.send_progress(percentage='10', description='Add Bitbake Submodule')
if project_version == 'HARDKNOTT':
yocto_version = 'hardknott'
bitbake_version = '1.50'
elif project_version == 'GATESGARTH':
yocto_version = 'gatesgarth'
bitbake_version = '1.48'
elif project_version == 'DUNFELL':
yocto_version = 'dunfell'
bitbake_version = '1.46'
elif project_version == 'ZEUS':
yocto_version = 'zeus'
bitbake_version = '1.44'
path = os.path.join(project_path, project_name, 'bitbake')
while(os.path.exists(path) == False):
submodule = git.git_submodule(target_path, 'bitbake',
'https://github.com/openembedded/bitbake.git',
bitbake_version)
submodule.start()
while submodule.is_alive():
try:
server.settimeout(5)
byte, addr = server.recvfrom(1024)
except:
continue
gitMessage = json.loads(byte.decode('ascii'))
sub = [{'percentage': int(gitMessage['cur_count']*100/gitMessage['max_count']), 'description': gitMessage['message']}]
progress_send.send_progress(percentage='10',subProgress=sub, description='Add Bitbake Submodule')
if os.path.exists(path):
break
bitbake_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'bitbake')
r, err = shell.shell_cmd(command=('cp -r %s %s' % (bitbake_path, target_path)), cwd = target_path)
if err == True:
server.close()
raise Exception("project template build error: %s" % (r))
progress_send.send_progress(percentage='30', description='Add Openembedded-Core Submodule')
path = os.path.join(project_path, project_name, 'openembedded-core')
while(os.path.exists(path) == False):
submodule = git.git_submodule(target_path, 'openembedded-core',
'https://github.com/openembedded/openembedded-core.git',
yocto_version)
submodule.start()
while submodule.is_alive():
try:
server.settimeout(5)
byte, addr = server.recvfrom(1024)
except:
continue
gitMessage = json.loads(byte.decode('ascii'))
sub = [{'percentage': int(gitMessage['cur_count']*100/gitMessage['max_count']), 'description': gitMessage['message']}]
progress_send.send_progress(percentage='30', subProgress=sub, description='Add Openembedded-Core Submodule')
if os.path.exists(path):
break
else:
print("git clone failed\n")
project = Project.objects.get(id=project_id)
MetaLayer.objects.create(project=project,
name='openembedded-core',
url='https://github.com/openembedded/openembedded-core.git',
remote_or_local = 'remote')
progress_send.send_progress(percentage='50', description='Add Meta-Yocto Submodule')
path = os.path.join(project_path, project_name, 'meta-yocto')
while(os.path.exists(path) == False):
submodule = git.git_submodule(target_path, 'meta-yocto',
'https://git.yoctoproject.org/meta-yocto.git',
yocto_version)
submodule.start()
while submodule.is_alive():
try:
server.settimeout(5)
byte, addr = server.recvfrom(1024)
except:
continue
gitMessage = json.loads(byte.decode('ascii'))
sub = [{'percentage': int(gitMessage['cur_count']*100/gitMessage['max_count']), 'description': gitMessage['message']}]
progress_send.send_progress(percentage='50', subProgress=sub, description='Add Meta-Yocto Submodule')
if os.path.exists(path):
break
else:
print("git clone failed\n")
MetaLayer.objects.create(project=project,
name='meta-yocto',
url='https://git.yoctoproject.org/meta-yocto.git',
remote_or_local = 'remote',
sub = 'meta-poky')
MetaLayer.objects.create(project=project,
name='meta-yocto',
url='https://git.yoctoproject.org/meta-yocto.git',
remote_or_local = 'remote',
sub = 'meta-yocto-bsp')
progress_send.send_progress(percentage='70', description='Add Meta-Yocto Submodule')
path = os.path.join(project_path, project_name, 'meta-openembedded')
while(os.path.exists(path) == False):
submodule = git.git_submodule(target_path, 'meta-openembedded',
'https://github.com/openembedded/meta-openembedded.git',
yocto_version)
submodule.start()
while submodule.is_alive():
try:
server.settimeout(5)
byte, addr = server.recvfrom(1024)
except:
continue
gitMessage = json.loads(byte.decode('ascii'))
sub = [{'percentage': int(gitMessage['cur_count']*100/gitMessage['max_count']), 'description': gitMessage['message']}]
progress_send.send_progress(percentage='70', subProgress=sub, description='Add Meta-Openembedded Submodule')
if os.path.exists(path):
break
else:
print("git clone failed\n")
MetaLayer.objects.create(project=project,
name='meta-openembedded',
url='https://github.com/openembedded/meta-openembedded.git',
remote_or_local = 'remote',
sub = 'meta-oe')
MetaLayer.objects.create(project=project,
name='meta-openembedded',
url='https://github.com/openembedded/meta-openembedded.git',
remote_or_local = 'remote',
sub = 'meta-python')
MetaLayer.objects.create(project=project,
name='meta-openembedded',
url='https://github.com/openembedded/meta-openembedded.git',
remote_or_local = 'remote',
sub = 'meta-networking')
progress_send.send_progress(percentage='90', description='Add Meta-Rauc Submodule')
path = os.path.join(project_path, project_name, 'meta-rauc')
while(os.path.exists(path) == False):
submodule = git.git_submodule(target_path, 'meta-rauc',
'https://github.com/rauc/meta-rauc.git',
yocto_version)
submodule.start()
while submodule.is_alive():
try:
server.settimeout(5)
byte, addr = server.recvfrom(1024)
except:
continue
gitMessage = json.loads(byte.decode('ascii'))
sub = [{'percentage': int(gitMessage['cur_count']*100/gitMessage['max_count']), 'description': gitMessage['message']}]
progress_send.send_progress(percentage='90', subProgress=sub, description='Add Meta-Openembedded Submodule')
if os.path.exists(path):
break
else:
print("git clone failed\n")
MetaLayer.objects.create(project=project,
name='meta-rauc',
url='https://github.com/rauc/meta-rauc.git',
remote_or_local = 'remote')
ret, err = shell.shell_cmd(command=('unset BBPATH; bash -c \"source %s %s;\"'
% (os.path.join(target_path, 'oe-init-build-env'), os.path.join(target_path, 'build'))),
cwd=target_path)
if err == True:
server.close()
raise Exception("auto create configure file error: %s" % (ret))
server.close()
bb_path = os.path.join(project_path, project_name, 'bitbake')
oe_path = os.path.join(project_path, project_name, 'openembedded-core')
yocto_path = os.path.join(project_path, project_name, 'meta-yocto')
if os.path.exists(bb_path) == False or os.path.exists(oe_path) == False or os.path.exists(yocto_path) == False:
raise Exception('Project is not complete')
return "Project Create Success"
@shared_task(bind=True)
def project_import_task(self, project_path, project_name, url):
server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server.bind(('', 8866))
progress_send = ProgressSend(self)
progress_send.send_progress(percentage=50, description='Clone Project')
project_repo = git.git_clone(url, project_path, project_name)
project_repo.start()
path = os.path.join(project_path, project_name)
i = 0
while(os.path.exists(path) == False and i < 3):
while project_repo.is_alive():
try:
server.settimeout(5)
byte, addr = server.recvfrom(1024)
except:
continue
gitMessage = json.loads(byte.decode('ascii'))
sub = [{'percentage': int(gitMessage['cur_count']*100/gitMessage['max_count']), 'description': gitMessage['message']}]
progress_send.send_progress(percentage=50, subProgress=sub, description='Start Clone Project')
if os.path.exists(path):
break
else:
i += 1
if i == 3:
raise Exception('git clone error')
target_path = os.path.join(project_path, project_name)
bitbake_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'bitbake')
r, err = shell.shell_cmd(command=('cp -r %s %s' % (bitbake_path, target_path)), cwd = target_path)
if err == True:
server.close()
raise Exception("project template build error: %s" % (r))
m = Migration()
m.project_import(project_path, project_name)
@shared_task(bind=True)
def meta_clone_task(self, name, url, remote_or_local, subd, project_id):
# TODO: meta add sub directory, meta add without donwload
progress_send = ProgressSend(self)
server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server.bind(('', 8866))
progress_send.send_progress(percentage=0, description='Check the Exist Meta')
metas = MetaLayer.objects.filter(project__id=project_id)
project = Project.objects.get(id=project_id)
for m in metas:
if m.name == name and m.sub == subd:
server.close()
raise Exception("meta is already exist")
progress_send.send_progress(33, description='Meta Adding...')
if project.project_version == 'HARDKNOTT':
yocto_version = 'hardknott'
elif project.project_version == 'GATESGARTH':
yocto_version = 'gatesgarth'
elif project.project_version == 'DUNFELL':
yocto_version = 'dunfell'
elif project.project_version == 'ZEUS':
yocto_version = 'zeus'
if remote_or_local == 'remote':
path = os.path.join(project.project_path, project.project_name, name)
i = 0
while(os.path.exists(path) == False and i < 3):
submodule = git.git_submodule(os.path.join(project.project_path, project.project_name),
name, url, yocto_version)
submodule.start()
while submodule.is_alive():
try:
server.settimeout(5)
byte, addr = server.recvfrom(1024)
except:
continue
gitMessage = json.loads(byte.decode('ascii'))
sub = [{'percentage': int(gitMessage['cur_count']*100/gitMessage['max_count']), 'description': gitMessage['message']}]
progress_send.send_progress(subProgress=sub)
if os.path.exists(path):
break
else:
i += 1
if i == 3:
raise Exception('git clone error')
progress_send.send_progress(66, description='Save Meta-Layer')
try:
MetaLayer.objects.create(project=project,
name=name, url=url, remote_or_local=remote_or_local, sub=subd)
except:
server.close()
raise Exception("meta model create err")
if subd != '':
meta_name = name + '/' + subd
else:
meta_name = name
bbcommand.bitbake_addlayer(os.path.join(project.project_path, project.project_name),
os.path.join(project.project_path, project.project_name, meta_name))
server.close()
return 'meta add success'
@shared_task(bind=True)
def bitbake_progress(self, project_path, project_name, target, command):
# TODO: 增加一个锁,确保同一个时刻只有一个 Bitbake 进程
# TODO: 任务恢复,每次进入任务查询是否有 Bitbake 任务在进行中, 并默认不显示,点击按钮后显示任务进度
progress_send = ProgressSend(self)
server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server.bind(('', 6688))
bitbake = bbcommand.BitbakeThread(os.path.join(project_path, project_name), target, command)
bitbake.start()
progress_send = ProgressSend(self)
lock_file = os.path.join(project_path, project_path, 'build/bitbake.lock')
if os.path.exists(lock_file):
raise Exception('Another Bitbake Process')
start_time = datetime.now().timestamp()
while True:
bbprogress_byte, addr = server.recvfrom(8192)
bbprogress = json.loads(bbprogress_byte.decode('ascii'))
if bbprogress['event_type'] == 'dianshao_ui_start':
print('dianshao ui has already started')
if bbprogress['event_type'] == 'TaskList':
period = datetime.utcnow().timestamp() - start_time
period_s = "{:.1f}".format(period)
sub = []
# TODO: 处理 progress < 0
for task in bbprogress['tasks']:
if task['progress'] < 0:
sub.append({'percentage': 0, 'description': ('%s pending' % task['title'])})
else:
sub.append({'percentage': task['progress'], 'description': (('%s:%s') %(task['title'], task['rate']))})
progress_send.send_progress(header=('Bitbaking... %s seconds' % period_s), subProgress=sub)
continue
if bbprogress['event_type'] == 'Ping':
# TODO: Server Command
# TODO: ping interval
period = datetime.utcnow().timestamp() - start_time
period_s = "{:.1f}".format(period)
progress_send.send_progress(header=('Bitbaking... %s seconds' % period_s))
if bbprogress['event_type'] == 'End':
if bbprogress['total_error'] > 0:
progress_send.send_progress(header='Bitbake Failed', description=bbprogress['summary'])
raise Exception('Bitbake Failed, With %s errors' % bbprogress['total_error'])
elif bbprogress['total_task_failures'] > 0:
progress_send.send_progress(header='Bitbake Failed', description=bbprogress['summary'])
raise Exception('Bitbake Failed, With %s errors' % bbprogress['total_task_failures'])
else:
progress_send.send_progress(header='Bitbake Success', description=bbprogress['summary'])
return ('Bitbake Success with %s Warnings' % bbprogress['total_warning'])
if bbprogress['event_type'] == 'CommandFailed':
raise Exception('Bitbake Failed, Please Find Details in dianshao_bitbake.log')
if bbprogress['event_type'] == 'CommandExit':
break
if bbprogress['event_type'] == 'CommandCompleted':
break
if bbprogress['event_type'] == 'logging.LogRecord':
print(bbprogress['msg'])
if bbprogress['event_type'] == 'CacheLoadStarted':
period = datetime.utcnow().timestamp() - start_time
period_s = "{:.1f}".format(period)
progress_send.send_progress(header=('Bitbaking... %s seconds' % period_s),
percentage=0, description='cache data load started')
if bbprogress['event_type'] == 'CacheLoadProgress':
period = datetime.utcnow().timestamp() - start_time
period_s = "{:.1f}".format(period)
progress_send.send_progress(header=('Bitbaking... %s seconds' % period_s),
percentage=int(int(bbprogress['current'])*100/int(bbprogress['total'])), description='cache data loading')
if bbprogress['event_type'] == 'CacheLoadCompleted':
period = datetime.utcnow().timestamp() - start_time
period_s = "{:.1f}".format(period)
progress_send.send_progress(header=('Bitbaking... %s seconds' % period_s),
percentage=100, description='cache data load succes with %d retry times' % bbprogress['num_entries'])
if bbprogress['event_type'] == 'ProcessStarted':
period = datetime.utcnow().timestamp() - start_time
period_s = "{:.1f}".format(period)
progress_send.send_progress(header=('Bitbaking... %s seconds' % period_s),
percentage=0, description='%s process started' % bbprogress['processname'])
if bbprogress['event_type'] == 'ProcessProgress':
period = datetime.utcnow().timestamp() - start_time
period_s = "{:.1f}".format(period)
progress_send.send_progress(header=('Bitbaking... %s seconds' % period_s),
percentage=int(bbprogress['progress']), description='%s process excuting' % bbprogress['processname'])
# TODO: Add Parse Progress
if bbprogress['event_type'] == 'ProcessFinished':
period = datetime.utcnow().timestamp() - start_time
period_s = "{:.1f}".format(period)
progress_send.send_progress(header=('Bitbaking... %s seconds' % period_s),
percentage=100, description='%s process finished' % bbprogress['processname'])
if bbprogress['event_type'] == 'runQueueTaskStarted':
period = datetime.utcnow().timestamp() - start_time
period_s = "{:.1f}".format(period)
progress_send.send_progress(header=('Bitbaking... %s seconds' % period_s),
percentage=int(int(bbprogress['current'])*100/int(bbprogress['total'])), description='%s scene queue task started' % bbprogress['taskstring'])
if bbprogress['event_type'] == 'ParseStarted':
period = datetime.utcnow().timestamp() - start_time
period_s = "{:.1f}".format(period)
progress_send.send_progress(header=('Bitbaking... %s seconds' % period_s),
percentage=0, description='Parse started')
if bbprogress['event_type'] == 'ParseProgress':
period = datetime.utcnow().timestamp() - start_time
period_s = "{:.1f}".format(period)
progress_send.send_progress(header=('Bitbaking... %s seconds' % period_s),
percentage=int(int(bbprogress['current'])*100/int(bbprogress['total'])), description='Parsing')
if bbprogress['event_type'] == 'ParseCompleted':
period = datetime.utcnow().timestamp() - start_time
period_s = "{:.1f}".format(period)
progress_send.send_progress(header=('Bitbaking... %s seconds' % period_s),
percentage=100, description='Parse Completed')
# TODO: ParseFailed 处理
# TODO: TaskBase 消息显示
if bbprogress['event_type'] == 'TaskBase':
period = datetime.utcnow().timestamp() - start_time
period_s = "{:.1f}".format(period)
progress_send.send_progress(header=('Bitbaking... %s seconds' % period_s),
description=bbprogress['message'])
# TODO: bitbake 错误处理
if bbprogress['event_type'] == 'CommandFailed':
server.close()
raise Exception('bitbake target failed with err CommandFailed')
server.close()
return 'bitbake target success'
@shared_task(bind=True)
def bbfile_task_create(self, name, version, type, project_path, mypackage_id):
package = MyPackages.objects.get(id=mypackage_id)
bb = bbfile.DianshaoBBFile(name, version, type)
bb.create_folder(project_path, package.catagory)
bb.create_bbfile(mypackage_id)
@shared_task(bind=True)
def bbfile_localfile_import_task(self, name, version, type, project_path, file_name, file_path, mypackage_id):
package = MyPackages.objects.get(id=mypackage_id)
bb = bbfile.DianshaoBBFile(name, version, type)
bb.create_folder(project_path, package.catagory)
bb.create_local_file(file_path, file_name)
@shared_task(bind=True)
def bbfile_localfile_create_task(self, name, version, type, project_path, file_name, content, mypackage_id):
package = MyPackages.objects.get(id=mypackage_id)
bb = bbfile.DianshaoBBFile(name, version, type)
bb.create_folder(project_path, package.catagory)
bb.create_local_file(file_name, content)
@shared_task(bind=True)
def machinefile_create_task(self, mymachine_id):
machine_file = bbfile.DianshaoMachineFile(mymachine_id)
machine_file.create_machine_file()
machine_file.create_distro_file()
@shared_task(bind=True)
def imagefile_create_task(self, myimage_id):
imagefile = bbfile.DianshaoImageFile(myimage_id)
imagefile.create_image_file()
@shared_task(bind=True)
def updatefile_create_task(self, myimage_id):
imagefile = bbfile.DianshaoImageFile(myimage_id)
imagefile.create_update_file()
@shared_task(bind=True)
def imagefile_upload_task(self, myimage_id):
imageupload = dishes.DishesAgent(myimage_id)
imageupload.upload_package()
@shared_task(bind=True)
def config_set_task(self, project_id, machine, distro, pm, pt):
conf = bbfile.DianshaoConfFile(project_id)
conf.set_config_file(machine, distro, pm, pt)
@shared_task(bind=True)
def patch_generator_task(self, name, file_path, project_path, package_name,
package_version, package_type, catagory, text1, text2):
patch.patch_generator(name, file_path, project_path, package_name,
package_version, package_type, catagory, text1, text2)
@shared_task(bind=True)
def shell_cmd_task(self, cmd, cwd):
ret, error = shell.shell_cmd(command=cmd, cwd=cwd)
if error:
raise Exception(ret)
@shared_task(bind=True)
def project_export_task(self, project_id):
progress_send = ProgressSend(self)
progress_send.send_progress(percentage=0, description='project exporting')
m = Migration()
m.project_export(project_id)
@shared_task(bind=True)
def create_wks_file(self, project_id, name, content):
wks = bbfile.DianshaoWksFile(project_id)
wks.create(name, content) | src/projects/tasks.py | from datetime import datetime
import os
import socket
import json
from sqlite3 import Timestamp
from celery import shared_task
from progressui.backend import ProgressSend
from git.repo.base import Repo
from tools import shell, git, bbcommand, patch, bbfile, dishes
from tools import migration
from tools.migration import Migration
from .models import MetaLayer, MyMachine, MyPackages, Project
# TODO:后续提高稳定性,无论如何误操作可自恢复
@shared_task(bind=True)
def project_initial_task(self, project_id, project_path, project_version, project_name):
server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server.bind(('', 8866))
progress_send = ProgressSend(self)
template_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'project_template')
target_path = os.path.join(project_path, project_name)
r, err = shell.shell_cmd('cp -rp %s/. %s' % (template_path, target_path), os.curdir)
if err == True:
raise Exception("project template build error: %s" % (r))
project=Project.objects.get(id=project_id)
MyMachine.objects.create(project=project, name='dianshao', base='none', initial_method='Systemd', flash='SDCard',
distro_version='1.0.0', description='my machine generate by dianshao',
machine_include='{}', distro_include='{}')
# TODO: 根据项目名自动生成 distro, image, machine, bblayer, conf.sample 等文件
Repo.init(target_path)
progress_send.send_progress(percentage='10', description='Add Bitbake Submodule')
if project_version == 'HARDKNOTT':
yocto_version = 'hardknott'
bitbake_version = '1.50'
elif project_version == 'GATESGARTH':
yocto_version = 'gatesgarth'
bitbake_version = '1.48'
elif project_version == 'DUNFELL':
yocto_version = 'dunfell'
bitbake_version = '1.46'
elif project_version == 'ZEUS':
yocto_version = 'zeus'
bitbake_version = '1.44'
path = os.path.join(project_path, project_name, 'bitbake')
while(os.path.exists(path) == False):
submodule = git.git_submodule(target_path, 'bitbake',
'https://github.com/openembedded/bitbake.git',
bitbake_version)
submodule.start()
while submodule.is_alive():
try:
server.settimeout(5)
byte, addr = server.recvfrom(1024)
except:
continue
gitMessage = json.loads(byte.decode('ascii'))
sub = [{'percentage': int(gitMessage['cur_count']*100/gitMessage['max_count']), 'description': gitMessage['message']}]
progress_send.send_progress(percentage='10',subProgress=sub, description='Add Bitbake Submodule')
if os.path.exists(path):
break
bitbake_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'bitbake')
r, err = shell.shell_cmd(command=('cp -r %s %s' % (bitbake_path, target_path)), cwd = target_path)
if err == True:
server.close()
raise Exception("project template build error: %s" % (r))
progress_send.send_progress(percentage='30', description='Add Openembedded-Core Submodule')
path = os.path.join(project_path, project_name, 'openembedded-core')
while(os.path.exists(path) == False):
submodule = git.git_submodule(target_path, 'openembedded-core',
'https://github.com/openembedded/openembedded-core.git',
yocto_version)
submodule.start()
while submodule.is_alive():
try:
server.settimeout(5)
byte, addr = server.recvfrom(1024)
except:
continue
gitMessage = json.loads(byte.decode('ascii'))
sub = [{'percentage': int(gitMessage['cur_count']*100/gitMessage['max_count']), 'description': gitMessage['message']}]
progress_send.send_progress(percentage='30', subProgress=sub, description='Add Openembedded-Core Submodule')
if os.path.exists(path):
break
else:
print("git clone failed\n")
project = Project.objects.get(id=project_id)
MetaLayer.objects.create(project=project,
name='openembedded-core',
url='https://github.com/openembedded/openembedded-core.git',
remote_or_local = 'remote')
progress_send.send_progress(percentage='50', description='Add Meta-Yocto Submodule')
path = os.path.join(project_path, project_name, 'meta-yocto')
while(os.path.exists(path) == False):
submodule = git.git_submodule(target_path, 'meta-yocto',
'https://git.yoctoproject.org/meta-yocto.git',
yocto_version)
submodule.start()
while submodule.is_alive():
try:
server.settimeout(5)
byte, addr = server.recvfrom(1024)
except:
continue
gitMessage = json.loads(byte.decode('ascii'))
sub = [{'percentage': int(gitMessage['cur_count']*100/gitMessage['max_count']), 'description': gitMessage['message']}]
progress_send.send_progress(percentage='50', subProgress=sub, description='Add Meta-Yocto Submodule')
if os.path.exists(path):
break
else:
print("git clone failed\n")
MetaLayer.objects.create(project=project,
name='meta-yocto',
url='https://git.yoctoproject.org/meta-yocto.git',
remote_or_local = 'remote',
sub = 'meta-poky')
MetaLayer.objects.create(project=project,
name='meta-yocto',
url='https://git.yoctoproject.org/meta-yocto.git',
remote_or_local = 'remote',
sub = 'meta-yocto-bsp')
progress_send.send_progress(percentage='70', description='Add Meta-Yocto Submodule')
path = os.path.join(project_path, project_name, 'meta-openembedded')
while(os.path.exists(path) == False):
submodule = git.git_submodule(target_path, 'meta-openembedded',
'https://github.com/openembedded/meta-openembedded.git',
yocto_version)
submodule.start()
while submodule.is_alive():
try:
server.settimeout(5)
byte, addr = server.recvfrom(1024)
except:
continue
gitMessage = json.loads(byte.decode('ascii'))
sub = [{'percentage': int(gitMessage['cur_count']*100/gitMessage['max_count']), 'description': gitMessage['message']}]
progress_send.send_progress(percentage='70', subProgress=sub, description='Add Meta-Openembedded Submodule')
if os.path.exists(path):
break
else:
print("git clone failed\n")
MetaLayer.objects.create(project=project,
name='meta-openembedded',
url='https://github.com/openembedded/meta-openembedded.git',
remote_or_local = 'remote',
sub = 'meta-oe')
MetaLayer.objects.create(project=project,
name='meta-openembedded',
url='https://github.com/openembedded/meta-openembedded.git',
remote_or_local = 'remote',
sub = 'meta-python')
MetaLayer.objects.create(project=project,
name='meta-openembedded',
url='https://github.com/openembedded/meta-openembedded.git',
remote_or_local = 'remote',
sub = 'meta-networking')
progress_send.send_progress(percentage='90', description='Add Meta-Rauc Submodule')
path = os.path.join(project_path, project_name, 'meta-rauc')
while(os.path.exists(path) == False):
submodule = git.git_submodule(target_path, 'meta-rauc',
'https://github.com/rauc/meta-rauc.git',
yocto_version)
submodule.start()
while submodule.is_alive():
try:
server.settimeout(5)
byte, addr = server.recvfrom(1024)
except:
continue
gitMessage = json.loads(byte.decode('ascii'))
sub = [{'percentage': int(gitMessage['cur_count']*100/gitMessage['max_count']), 'description': gitMessage['message']}]
progress_send.send_progress(percentage='90', subProgress=sub, description='Add Meta-Openembedded Submodule')
if os.path.exists(path):
break
else:
print("git clone failed\n")
MetaLayer.objects.create(project=project,
name='meta-rauc',
url='https://github.com/rauc/meta-rauc.git',
remote_or_local = 'remote')
ret, err = shell.shell_cmd(command=('unset BBPATH; bash -c \"source %s %s;\"'
% (os.path.join(target_path, 'oe-init-build-env'), os.path.join(target_path, 'build'))),
cwd=target_path)
if err == True:
server.close()
raise Exception("auto create configure file error: %s" % (ret))
server.close()
bb_path = os.path.join(project_path, project_name, 'bitbake')
oe_path = os.path.join(project_path, project_name, 'openembedded-core')
yocto_path = os.path.join(project_path, project_name, 'meta-yocto')
if os.path.exists(bb_path) == False or os.path.exists(oe_path) == False or os.path.exists(yocto_path) == False:
raise Exception('Project is not complete')
return "Project Create Success"
@shared_task(bind=True)
def project_import_task(self, project_path, project_name, url):
server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server.bind(('', 8866))
progress_send = ProgressSend(self)
progress_send.send_progress(percentage=50, description='Clone Project')
project_repo = git.git_clone(url, project_path, project_name)
project_repo.start()
path = os.path.join(project_path, project_name)
i = 0
while(os.path.exists(path) == False and i < 3):
while project_repo.is_alive():
try:
server.settimeout(5)
byte, addr = server.recvfrom(1024)
except:
continue
gitMessage = json.loads(byte.decode('ascii'))
sub = [{'percentage': int(gitMessage['cur_count']*100/gitMessage['max_count']), 'description': gitMessage['message']}]
progress_send.send_progress(percentage=50, subProgress=sub, description='Start Clone Project')
if os.path.exists(path):
break
else:
i += 1
if i == 3:
raise Exception('git clone error')
target_path = os.path.join(project_path, project_name)
bitbake_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'bitbake')
r, err = shell.shell_cmd(command=('cp -r %s %s' % (bitbake_path, target_path)), cwd = target_path)
if err == True:
server.close()
raise Exception("project template build error: %s" % (r))
m = Migration()
m.project_import(project_path, project_name)
@shared_task(bind=True)
def meta_clone_task(self, name, url, remote_or_local, subd, project_id):
# TODO: meta add sub directory, meta add without donwload
progress_send = ProgressSend(self)
server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server.bind(('', 8866))
progress_send.send_progress(percentage=0, description='Check the Exist Meta')
metas = MetaLayer.objects.filter(project__id=project_id)
project = Project.objects.get(id=project_id)
for m in metas:
if m.name == name and m.sub == subd:
server.close()
raise Exception("meta is already exist")
progress_send.send_progress(33, description='Meta Adding...')
if project.project_version == 'HARDKNOTT':
yocto_version = 'hardknott'
elif project.project_version == 'GATESGARTH':
yocto_version = 'gatesgarth'
elif project.project_version == 'DUNFELL':
yocto_version = 'dunfell'
elif project.project_version == 'ZEUS':
yocto_version = 'zeus'
if remote_or_local == 'remote':
path = os.path.join(project.project_path, project.project_name, name)
i = 0
while(os.path.exists(path) == False and i < 3):
submodule = git.git_submodule(os.path.join(project.project_path, project.project_name),
name, url, yocto_version)
submodule.start()
while submodule.is_alive():
try:
server.settimeout(5)
byte, addr = server.recvfrom(1024)
except:
continue
gitMessage = json.loads(byte.decode('ascii'))
sub = [{'percentage': int(gitMessage['cur_count']*100/gitMessage['max_count']), 'description': gitMessage['message']}]
progress_send.send_progress(subProgress=sub)
if os.path.exists(path):
break
else:
i += 1
if i == 3:
raise Exception('git clone error')
progress_send.send_progress(66, description='Save Meta-Layer')
try:
MetaLayer.objects.create(project=project,
name=name, url=url, remote_or_local=remote_or_local, sub=subd)
except:
server.close()
raise Exception("meta model create err")
if subd != '':
meta_name = name + '/' + subd
else:
meta_name = name
bbcommand.bitbake_addlayer(os.path.join(project.project_path, project.project_name),
os.path.join(project.project_path, project.project_name, meta_name))
server.close()
return 'meta add success'
@shared_task(bind=True)
def bitbake_progress(self, project_path, project_name, target, command):
# TODO: 增加一个锁,确保同一个时刻只有一个 Bitbake 进程
# TODO: 任务恢复,每次进入任务查询是否有 Bitbake 任务在进行中, 并默认不显示,点击按钮后显示任务进度
progress_send = ProgressSend(self)
server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server.bind(('', 6688))
bitbake = bbcommand.BitbakeThread(os.path.join(project_path, project_name), target, command)
bitbake.start()
progress_send = ProgressSend(self)
lock_file = os.path.join(project_path, project_path, 'build/bitbake.lock')
if os.path.exists(lock_file):
raise Exception('Another Bitbake Process')
start_time = datetime.now().timestamp()
while True:
bbprogress_byte, addr = server.recvfrom(8192)
bbprogress = json.loads(bbprogress_byte.decode('ascii'))
if bbprogress['event_type'] == 'dianshao_ui_start':
print('dianshao ui has already started')
if bbprogress['event_type'] == 'TaskList':
period = datetime.utcnow().timestamp() - start_time
period_s = "{:.1f}".format(period)
sub = []
# TODO: 处理 progress < 0
for task in bbprogress['tasks']:
if task['progress'] < 0:
sub.append({'percentage': 0, 'description': ('%s pending' % task['title'])})
else:
sub.append({'percentage': task['progress'], 'description': (('%s:%s') %(task['title'], task['rate']))})
progress_send.send_progress(header=('Bitbaking... %s seconds' % period_s), subProgress=sub)
continue
if bbprogress['event_type'] == 'Ping':
# TODO: Server Command
# TODO: ping interval
period = datetime.utcnow().timestamp() - start_time
period_s = "{:.1f}".format(period)
progress_send.send_progress(header=('Bitbaking... %s seconds' % period_s))
if bbprogress['event_type'] == 'End':
if bbprogress['total_error'] > 0:
progress_send.send_progress(header='Bitbake Failed', description=bbprogress['summary'])
raise Exception('Bitbake Failed, With %s errors' % bbprogress['total_error'])
elif bbprogress['total_task_failures'] > 0:
progress_send.send_progress(header='Bitbake Failed', description=bbprogress['summary'])
raise Exception('Bitbake Failed, With %s errors' % bbprogress['total_task_failures'])
else:
progress_send.send_progress(header='Bitbake Success', description=bbprogress['summary'])
return ('Bitbake Success with %s Warnings' % bbprogress['total_warning'])
if bbprogress['event_type'] == 'CommandFailed':
raise Exception('Bitbake Failed, Please Find Details in dianshao_bitbake.log')
if bbprogress['event_type'] == 'CommandExit':
break
if bbprogress['event_type'] == 'CommandCompleted':
break
if bbprogress['event_type'] == 'logging.LogRecord':
print(bbprogress['msg'])
if bbprogress['event_type'] == 'CacheLoadStarted':
period = datetime.utcnow().timestamp() - start_time
period_s = "{:.1f}".format(period)
progress_send.send_progress(header=('Bitbaking... %s seconds' % period_s),
percentage=0, description='cache data load started')
if bbprogress['event_type'] == 'CacheLoadProgress':
period = datetime.utcnow().timestamp() - start_time
period_s = "{:.1f}".format(period)
progress_send.send_progress(header=('Bitbaking... %s seconds' % period_s),
percentage=int(int(bbprogress['current'])*100/int(bbprogress['total'])), description='cache data loading')
if bbprogress['event_type'] == 'CacheLoadCompleted':
period = datetime.utcnow().timestamp() - start_time
period_s = "{:.1f}".format(period)
progress_send.send_progress(header=('Bitbaking... %s seconds' % period_s),
percentage=100, description='cache data load succes with %d retry times' % bbprogress['num_entries'])
if bbprogress['event_type'] == 'ProcessStarted':
period = datetime.utcnow().timestamp() - start_time
period_s = "{:.1f}".format(period)
progress_send.send_progress(header=('Bitbaking... %s seconds' % period_s),
percentage=0, description='%s process started' % bbprogress['processname'])
if bbprogress['event_type'] == 'ProcessProgress':
period = datetime.utcnow().timestamp() - start_time
period_s = "{:.1f}".format(period)
progress_send.send_progress(header=('Bitbaking... %s seconds' % period_s),
percentage=int(bbprogress['progress']), description='%s process excuting' % bbprogress['processname'])
# TODO: Add Parse Progress
if bbprogress['event_type'] == 'ProcessFinished':
period = datetime.utcnow().timestamp() - start_time
period_s = "{:.1f}".format(period)
progress_send.send_progress(header=('Bitbaking... %s seconds' % period_s),
percentage=100, description='%s process finished' % bbprogress['processname'])
if bbprogress['event_type'] == 'runQueueTaskStarted':
period = datetime.utcnow().timestamp() - start_time
period_s = "{:.1f}".format(period)
progress_send.send_progress(header=('Bitbaking... %s seconds' % period_s),
percentage=int(int(bbprogress['current'])*100/int(bbprogress['total'])), description='%s scene queue task started' % bbprogress['taskstring'])
if bbprogress['event_type'] == 'ParseStarted':
period = datetime.utcnow().timestamp() - start_time
period_s = "{:.1f}".format(period)
progress_send.send_progress(header=('Bitbaking... %s seconds' % period_s),
percentage=0, description='Parse started')
if bbprogress['event_type'] == 'ParseProgress':
period = datetime.utcnow().timestamp() - start_time
period_s = "{:.1f}".format(period)
progress_send.send_progress(header=('Bitbaking... %s seconds' % period_s),
percentage=int(int(bbprogress['current'])*100/int(bbprogress['total'])), description='Parsing')
if bbprogress['event_type'] == 'ParseCompleted':
period = datetime.utcnow().timestamp() - start_time
period_s = "{:.1f}".format(period)
progress_send.send_progress(header=('Bitbaking... %s seconds' % period_s),
percentage=100, description='Parse Completed')
# TODO: ParseFailed 处理
# TODO: TaskBase 消息显示
if bbprogress['event_type'] == 'TaskBase':
period = datetime.utcnow().timestamp() - start_time
period_s = "{:.1f}".format(period)
progress_send.send_progress(header=('Bitbaking... %s seconds' % period_s),
description=bbprogress['message'])
# TODO: bitbake 错误处理
if bbprogress['event_type'] == 'CommandFailed':
server.close()
raise Exception('bitbake target failed with err CommandFailed')
server.close()
return 'bitbake target success'
@shared_task(bind=True)
def bbfile_task_create(self, name, version, type, project_path, mypackage_id):
package = MyPackages.objects.get(id=mypackage_id)
bb = bbfile.DianshaoBBFile(name, version, type)
bb.create_folder(project_path, package.catagory)
bb.create_bbfile(mypackage_id)
@shared_task(bind=True)
def bbfile_localfile_import_task(self, name, version, type, project_path, file_name, file_path, mypackage_id):
package = MyPackages.objects.get(id=mypackage_id)
bb = bbfile.DianshaoBBFile(name, version, type)
bb.create_folder(project_path, package.catagory)
bb.create_local_file(file_path, file_name)
@shared_task(bind=True)
def bbfile_localfile_create_task(self, name, version, type, project_path, file_name, content, mypackage_id):
package = MyPackages.objects.get(id=mypackage_id)
bb = bbfile.DianshaoBBFile(name, version, type)
bb.create_folder(project_path, package.catagory)
bb.create_local_file(file_name, content)
@shared_task(bind=True)
def machinefile_create_task(self, mymachine_id):
machine_file = bbfile.DianshaoMachineFile(mymachine_id)
machine_file.create_machine_file()
machine_file.create_distro_file()
@shared_task(bind=True)
def imagefile_create_task(self, myimage_id):
imagefile = bbfile.DianshaoImageFile(myimage_id)
imagefile.create_image_file()
@shared_task(bind=True)
def updatefile_create_task(self, myimage_id):
imagefile = bbfile.DianshaoImageFile(myimage_id)
imagefile.create_update_file()
@shared_task(bind=True)
def imagefile_upload_task(self, myimage_id):
imageupload = dishes.DishesAgent(myimage_id)
imageupload.upload_package()
@shared_task(bind=True)
def config_set_task(self, project_id, machine, distro, pm, pt):
conf = bbfile.DianshaoConfFile(project_id)
conf.set_config_file(machine, distro, pm, pt)
@shared_task(bind=True)
def patch_generator_task(self, name, file_path, project_path, package_name,
package_version, package_type, catagory, text1, text2):
patch.patch_generator(name, file_path, project_path, package_name,
package_version, package_type, catagory, text1, text2)
@shared_task(bind=True)
def shell_cmd_task(self, cmd, cwd):
ret, error = shell.shell_cmd(command=cmd, cwd=cwd)
if error:
raise Exception(ret)
@shared_task(bind=True)
def project_export_task(self, project_id):
progress_send = ProgressSend(self)
progress_send.send_progress(percentage=0, description='project exporting')
m = Migration()
m.project_export(project_id)
@shared_task(bind=True)
def create_wks_file(self, project_id, name, content):
wks = bbfile.DianshaoWksFile(project_id)
wks.create(name, content) | 0.109135 | 0.066327 |
import unittest
import mock
from google.api_core import exceptions
from google.cloud import datacatalog
from google.datacatalog_connectors.commons_test import utils
from google.protobuf import timestamp_pb2
from google.datacatalog_connectors import commons
class DataCatalogFacadeTestCase(unittest.TestCase):
__COMMONS_PACKAGE = 'google.datacatalog_connectors.commons'
__SEARCH_CATALOG_METHOD = '{}.DataCatalogFacade.search_catalog'.format(
__COMMONS_PACKAGE)
__BOOL_TYPE = datacatalog.FieldType.PrimitiveType.BOOL
__DOUBLE_TYPE = datacatalog.FieldType.PrimitiveType.DOUBLE
__STRING_TYPE = datacatalog.FieldType.PrimitiveType.STRING
__TIMESTAMP_TYPE = datacatalog.FieldType.PrimitiveType.TIMESTAMP
__NON_PRIMITIVE_TYPE = datacatalog.FieldType.PrimitiveType.\
PRIMITIVE_TYPE_UNSPECIFIED
@mock.patch('{}.datacatalog_facade.datacatalog.DataCatalogClient'.format(
__COMMONS_PACKAGE))
def setUp(self, mock_datacatalog_client):
self.__datacatalog_facade = commons \
.DataCatalogFacade('test-project')
# Shortcut for the object assigned
# to self.__datacatalog_facade.__datacatalog
self.__datacatalog_client = mock_datacatalog_client.return_value
def test_constructor_should_set_instance_attributes(self):
attrs = self.__datacatalog_facade.__dict__
self.assertIsNotNone(attrs['_DataCatalogFacade__datacatalog'])
self.assertEqual('test-project',
attrs['_DataCatalogFacade__project_id'])
def test_create_entry_should_succeed(self):
entry = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource', 11, 22)
self.__datacatalog_facade.create_entry('entry_group_name', 'entry_id',
entry)
datacatalog_client = self.__datacatalog_client
self.assertEqual(1, datacatalog_client.create_entry.call_count)
def test_create_entry_should_raise_on_permission_denied(self):
datacatalog_client = self.__datacatalog_client
datacatalog_client.create_entry.side_effect = \
exceptions.PermissionDenied('Permission denied')
entry = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource', 11, 22)
self.assertRaises(exceptions.PermissionDenied,
self.__datacatalog_facade.create_entry,
'entry_group_name', 'entry_id', entry)
self.assertEqual(1, datacatalog_client.create_entry.call_count)
def test_get_entry_should_succeed(self):
self.__datacatalog_facade.get_entry('entry_name')
datacatalog_client = self.__datacatalog_client
self.assertEqual(1, datacatalog_client.get_entry.call_count)
def test_lookup_entry_should_return_datacatalog_client_result(self):
fake_entry = datacatalog.Entry()
fake_entry.linked_resource = 'linked_resource'
datacatalog_client = self.__datacatalog_client
datacatalog_client.lookup_entry.return_value = fake_entry
entry = self.__datacatalog_facade.lookup_entry('linked_resource')
self.assertEqual(fake_entry, entry)
def test_lookup_entry_should_fulfill_linked_resource_request_field(self):
self.__datacatalog_facade.lookup_entry('linked_resource')
fake_request = datacatalog.LookupEntryRequest()
fake_request.linked_resource = 'linked_resource'
datacatalog_client = self.__datacatalog_client
datacatalog_client.lookup_entry.assert_called_once_with(
request=fake_request)
def test_update_entry_should_succeed(self):
self.__datacatalog_facade.update_entry({})
datacatalog_client = self.__datacatalog_client
self.assertEqual(1, datacatalog_client.update_entry.call_count)
def test_upsert_entry_nonexistent_should_create(self):
datacatalog_client = self.__datacatalog_client
datacatalog_client.get_entry.side_effect = \
exceptions.PermissionDenied('Entry not found')
entry = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource', 11, 22)
self.__datacatalog_facade.upsert_entry('entry_group_name', 'entry_id',
entry)
self.assertEqual(1, datacatalog_client.get_entry.call_count)
self.assertEqual(1, datacatalog_client.create_entry.call_count)
def test_upsert_entry_nonexistent_on_failed_precondition_should_raise(
self):
datacatalog_client = self.__datacatalog_client
datacatalog_client.get_entry.side_effect = \
exceptions.PermissionDenied('Entry not found')
datacatalog_client.create_entry.side_effect = \
exceptions.FailedPrecondition('Failed precondition')
entry = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource', 11, 22)
self.assertRaises(exceptions.FailedPrecondition,
self.__datacatalog_facade.upsert_entry,
'entry_group_name', 'entry_id', entry)
self.assertEqual(1, datacatalog_client.get_entry.call_count)
self.assertEqual(1, datacatalog_client.create_entry.call_count)
def test_upsert_entry_changed_should_update(self):
entry_1 = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource_1', 11, 22)
datacatalog_client = self.__datacatalog_client
datacatalog_client.get_entry.return_value = entry_1
entry_2 = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource_2', 11, 22)
self.__datacatalog_facade.upsert_entry('entry_group_name', 'entry_id',
entry_2)
self.assertEqual(1, datacatalog_client.get_entry.call_count)
self.assertEqual(1, datacatalog_client.update_entry.call_count)
datacatalog_client.update_entry.assert_called_with(entry=entry_2,
update_mask=None)
def test_upsert_entry_columns_equal_should_not_call_api(self):
col_1 = utils.Utils.create_column_schema('column_1', 'int',
'description')
col_2 = utils.Utils.create_column_schema('column_2', 'string',
'description')
cols = [col_1, col_2]
entry_1 = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource_1', 11, 22, cols)
datacatalog_client = self.__datacatalog_client
datacatalog_client.get_entry.return_value = entry_1
entry_2 = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource_1', 11, 22, cols)
self.__datacatalog_facade.upsert_entry('entry_group_name', 'entry_id',
entry_2)
self.assertEqual(1, datacatalog_client.get_entry.call_count)
datacatalog_client.update_entry.assert_not_called()
datacatalog_client.create_entry.assert_not_called()
def test_upsert_entry_columns_changed_should_update(self):
col_1 = utils.Utils.create_column_schema('column_1', 'int',
'description')
col_2 = utils.Utils.create_column_schema('column_2', 'string',
'description')
cols = [col_1, col_2]
entry_1 = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource_1', 11, 22, cols)
datacatalog_client = self.__datacatalog_client
datacatalog_client.get_entry.return_value = entry_1
col_3 = utils.Utils.create_column_schema('column_2', 'int',
'description')
cols_2 = [col_1, col_3]
entry_2 = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource_1', 11, 22, cols_2)
self.__datacatalog_facade.upsert_entry('entry_group_name', 'entry_id',
entry_2)
self.assertEqual(1, datacatalog_client.get_entry.call_count)
datacatalog_client.create_entry.assert_not_called()
self.assertEqual(1, datacatalog_client.update_entry.call_count)
datacatalog_client.update_entry.assert_called_with(entry=entry_2,
update_mask=None)
def test_upsert_entry_column_deleted_should_update(self):
col_1 = utils.Utils.create_column_schema('column_1', 'int',
'description')
col_2 = utils.Utils.create_column_schema('column_2', 'string',
'description')
cols = [col_1, col_2]
entry_1 = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource_1', 11, 22, cols)
datacatalog_client = self.__datacatalog_client
datacatalog_client.get_entry.return_value = entry_1
cols_2 = [col_1]
entry_2 = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource_1', 11, 22, cols_2)
self.__datacatalog_facade.upsert_entry('entry_group_name', 'entry_id',
entry_2)
self.assertEqual(1, datacatalog_client.get_entry.call_count)
datacatalog_client.create_entry.assert_not_called()
self.assertEqual(1, datacatalog_client.update_entry.call_count)
datacatalog_client.update_entry.assert_called_with(entry=entry_2,
update_mask=None)
def test_upsert_entry_column_added_should_update(self):
col_1 = utils.Utils.create_column_schema('column_1', 'int',
'description')
col_2 = utils.Utils.create_column_schema('column_2', 'string',
'description')
cols = [col_1, col_2]
entry_1 = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource_1', 11, 22, cols)
datacatalog_client = self.__datacatalog_client
datacatalog_client.get_entry.return_value = entry_1
col_3 = utils.Utils.create_column_schema('column_3', 'string',
'description')
cols_2 = [col_1, col_2, col_3]
entry_2 = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource_1', 11, 22, cols_2)
self.__datacatalog_facade.upsert_entry('entry_group_name', 'entry_id',
entry_2)
self.assertEqual(1, datacatalog_client.get_entry.call_count)
datacatalog_client.create_entry.assert_not_called()
self.assertEqual(1, datacatalog_client.update_entry.call_count)
datacatalog_client.update_entry.assert_called_with(entry=entry_2,
update_mask=None)
def test_upsert_entry_subcolumn_added_should_update(self):
col_1 = utils.Utils.create_column_schema('column_1', 'int',
'description')
col_2 = utils.Utils.create_column_schema('column_2', 'string',
'description')
cols = [col_1, col_2]
entry_1 = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource_1', 11, 22, cols)
datacatalog_client = self.__datacatalog_client
datacatalog_client.get_entry.return_value = entry_1
col_3 = utils.Utils.create_column_schema('column_2', 'string',
'description')
col_3.subcolumns = [{}]
cols_2 = [col_1, col_3]
entry_2 = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource_1', 11, 22, cols_2)
self.__datacatalog_facade.upsert_entry('entry_group_name', 'entry_id',
entry_2)
self.assertEqual(1, datacatalog_client.get_entry.call_count)
datacatalog_client.create_entry.assert_not_called()
self.assertEqual(1, datacatalog_client.update_entry.call_count)
datacatalog_client.update_entry.assert_called_with(entry=entry_2,
update_mask=None)
def test_upsert_entry_subcolumn_deleted_should_update(self):
col_1 = utils.Utils.create_column_schema('column_1', 'int',
'description')
col_2 = utils.Utils.create_column_schema('column_2', 'string',
'description')
col_2.subcolumns = [{}, {}]
cols = [col_1, col_2]
entry_1 = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource_1', 11, 22, cols)
datacatalog_client = self.__datacatalog_client
datacatalog_client.get_entry.return_value = entry_1
col_3 = utils.Utils.create_column_schema('column_2', 'string',
'description')
col_3.subcolumns = [{}]
cols_2 = [col_1, col_3]
entry_2 = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource_1', 11, 22, cols_2)
self.__datacatalog_facade.upsert_entry('entry_group_name', 'entry_id',
entry_2)
self.assertEqual(1, datacatalog_client.get_entry.call_count)
datacatalog_client.create_entry.assert_not_called()
self.assertEqual(1, datacatalog_client.update_entry.call_count)
datacatalog_client.update_entry.assert_called_with(entry=entry_2,
update_mask=None)
def test_upsert_entry_should_raise_on_failed_precondition(self):
entry_1 = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource_1', 11, 22)
datacatalog_client = self.__datacatalog_client
datacatalog_client.get_entry.return_value = entry_1
datacatalog_client.update_entry.side_effect = \
exceptions.FailedPrecondition('Failed precondition')
entry_2 = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource_2', 11, 22)
self.assertRaises(exceptions.FailedPrecondition,
self.__datacatalog_facade.upsert_entry,
'entry_group_name', 'entry_id', entry_2)
self.assertEqual(1, datacatalog_client.get_entry.call_count)
self.assertEqual(1, datacatalog_client.update_entry.call_count)
def test_upsert_entry_unchanged_should_not_update(self):
entry = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource', 11, 22)
datacatalog_client = self.__datacatalog_client
datacatalog_client.get_entry.return_value = entry
self.__datacatalog_facade.upsert_entry('entry_group_name', 'entry_id',
entry)
self.assertEqual(1, datacatalog_client.get_entry.call_count)
datacatalog_client.update_entry.assert_not_called()
def test_delete_entry_should_succeed(self):
self.__datacatalog_facade.delete_entry('entry_name')
datacatalog_client = self.__datacatalog_client
self.assertEqual(1, datacatalog_client.delete_entry.call_count)
def test_delete_entry_error_should_be_ignored(self):
datacatalog_client = self.__datacatalog_client
datacatalog_client.delete_entry.side_effect = \
Exception('Error when deleting entry')
self.__datacatalog_facade.delete_entry('entry_name')
self.assertEqual(1, datacatalog_client.delete_entry.call_count)
def test_create_entry_group_should_succeed(self):
self.__datacatalog_facade.create_entry_group('location-id',
'entry_group_id')
datacatalog_client = self.__datacatalog_client
self.assertEqual(1, datacatalog_client.create_entry_group.call_count)
def test_delete_entry_group_should_succeed(self):
self.__datacatalog_facade.delete_entry_group('entry_group_name')
datacatalog_client = self.__datacatalog_client
self.assertEqual(1, datacatalog_client.delete_entry_group.call_count)
def test_create_tag_template_should_succeed(self):
self.__datacatalog_facade.create_tag_template('location-id',
'tag_template_id', {})
datacatalog_client = self.__datacatalog_client
self.assertEqual(1, datacatalog_client.create_tag_template.call_count)
def test_get_tag_template_should_succeed(self):
self.__datacatalog_facade.get_tag_template('tag_template_name')
datacatalog_client = self.__datacatalog_client
self.assertEqual(1, datacatalog_client.get_tag_template.call_count)
def test_delete_tag_template_should_succeed(self):
self.__datacatalog_facade.delete_tag_template('tag_template_name')
datacatalog_client = self.__datacatalog_client
self.assertEqual(1, datacatalog_client.delete_tag_template.call_count)
def test_create_tag_should_succeed(self):
self.__datacatalog_facade.create_tag('entry_name', {})
datacatalog_client = self.__datacatalog_client
self.assertEqual(1, datacatalog_client.create_tag.call_count)
def test_delete_tag_should_succeed(self):
self.__datacatalog_facade.delete_tag(self.__create_tag())
datacatalog_client = self.__datacatalog_client
self.assertEqual(1, datacatalog_client.delete_tag.call_count)
def test_list_tags_should_succeed(self):
self.__datacatalog_facade.list_tags('entry_name')
datacatalog_client = self.__datacatalog_client
self.assertEqual(1, datacatalog_client.list_tags.call_count)
def test_update_tag_should_succeed(self):
self.__datacatalog_facade.update_tag({})
datacatalog_client = self.__datacatalog_client
self.assertEqual(1, datacatalog_client.update_tag.call_count)
def test_upsert_tags_nonexistent_should_succeed(self):
datacatalog_client = self.__datacatalog_client
datacatalog_client.list_tags.return_value = []
entry = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource', 11, 22)
self.__datacatalog_facade.upsert_tags(entry, [self.__create_tag()])
self.assertEqual(1, datacatalog_client.create_tag.call_count)
datacatalog_client.update_tag.assert_not_called()
def test_upsert_tags_changed_should_succeed(self):
datacatalog_client = self.__datacatalog_client
datacatalog_client.list_tags.return_value = [self.__create_tag()]
entry = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource', 11, 22)
changed_tag = self.__create_tag()
changed_tag.fields['bool-field'].bool_value = False
self.__datacatalog_facade.upsert_tags(entry, [changed_tag])
datacatalog_client.create_tag.assert_not_called()
self.assertEqual(1, datacatalog_client.update_tag.call_count)
def test_upsert_tags_changed_column_uppercase_should_succeed(self):
datacatalog_client = self.__datacatalog_client
current_tag = self.__create_tag()
current_tag.column = 'ABC'
datacatalog_client.list_tags.return_value = [current_tag]
entry = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource', 11, 22)
changed_tag = self.__create_tag()
changed_tag.column = 'abc'
changed_tag.fields['bool-field'].bool_value = False
self.__datacatalog_facade.upsert_tags(entry, [changed_tag])
datacatalog_client.create_tag.assert_not_called()
self.assertEqual(1, datacatalog_client.update_tag.call_count)
def test_upsert_tags_unchanged_column_uppercase_should_succeed(self):
entry = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource', 11, 22)
current_tag = self.__create_tag()
current_tag.column = 'ABC'
datacatalog_client = self.__datacatalog_client
datacatalog_client.list_tags.return_value = [current_tag]
# column name is case insensitive, so it's the same column.
tag = self.__create_tag()
tag.column = 'abc'
self.__datacatalog_facade.upsert_tags(entry, [tag])
datacatalog_client.create_tag.assert_not_called()
datacatalog_client.update_tag.assert_not_called()
def test_upsert_tags_unchanged_should_succeed(self):
entry = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource', 11, 22)
tag = self.__create_tag()
datacatalog_client = self.__datacatalog_client
datacatalog_client.list_tags.return_value = [tag]
self.__datacatalog_facade.upsert_tags(entry, [tag])
datacatalog_client.create_tag.assert_not_called()
datacatalog_client.update_tag.assert_not_called()
def test_upsert_tags_should_handle_empty_list(self):
entry = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource', 11, 22)
try:
self.__datacatalog_facade.upsert_tags(entry, None)
except exceptions.GoogleAPICallError as e:
super(DataCatalogFacadeTestCase, self).fail(e)
def test_delete_tags_nonexistent_should_succeed(self):
datacatalog_client = self.__datacatalog_client
datacatalog_client.list_tags.return_value = []
entry = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource', 11, 22)
self.__datacatalog_facade.delete_tags(entry, [self.__create_tag()],
'template')
datacatalog_client.delete_tag.assert_not_called()
def test_delete_tags_nonexistent_template_should_succeed(self):
entry = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource', 11, 22)
tag = self.__create_tag()
datacatalog_client = self.__datacatalog_client
datacatalog_client.list_tags.return_value = [tag]
self.__datacatalog_facade.delete_tags(entry, [tag],
'nonexistent-template')
datacatalog_client.delete_tag.assert_not_called()
def test_delete_tags_unchanged_should_succeed(self):
entry = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource', 11, 22)
tag = self.__create_tag()
datacatalog_client = self.__datacatalog_client
datacatalog_client.list_tags.return_value = [tag]
self.__datacatalog_facade.delete_tags(entry, [tag], 'template')
datacatalog_client.delete_tag.assert_not_called()
def test_delete_tags_deleted_should_succeed(self):
entry = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource', 11, 22)
deleted_tag = self.__create_tag()
datacatalog_client = self.__datacatalog_client
datacatalog_client.list_tags.return_value = [deleted_tag]
new_tag = self.__create_tag()
new_tag.template = 'new_template_2'
self.__datacatalog_facade.delete_tags(entry, [new_tag], 'template')
self.assertEqual(1, datacatalog_client.delete_tag.call_count)
def test_delete_tags_should_handle_empty_list(self):
entry = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource', 11, 22)
try:
self.__datacatalog_facade.delete_tags(entry, [], 'template')
except exceptions.GoogleAPICallError as e:
super(DataCatalogFacadeTestCase, self).fail(e)
def test_search_results_should_return_values(self):
expected_return_value = [
self.__create_search_result('localhost//asset_1'),
self.__create_search_result('localhost//asset_2')
]
datacatalog_client = self.__datacatalog_client
datacatalog_client.search_catalog.return_value = expected_return_value
return_value = self.__datacatalog_facade.search_catalog('query')
self.assertEqual(1, datacatalog_client.search_catalog.call_count)
self.assertEqual(expected_return_value, return_value)
@mock.patch(__SEARCH_CATALOG_METHOD)
def test_search_catalog_relative_resource_name_should_return_names(
self, mock_search_catalog): # noqa: E125
expected_resource_names = ['localhost//asset_1', 'localhost//asset_2']
search_return_values = [
self.__create_search_result(resource_name)
for resource_name in expected_resource_names
]
mock_search_catalog.return_value = search_return_values
resource_names = self.__datacatalog_facade \
.search_catalog_relative_resource_name(
'system=bigquery')
self.assertEqual(1, mock_search_catalog.call_count)
self.assertEqual(expected_resource_names, resource_names)
@mock.patch(__SEARCH_CATALOG_METHOD)
def test_get_tag_field_values_for_search_results_string_field_should_return_values( # noqa: E501
self, mock_search_catalog): # noqa: E125
expected_resource_names = ['localhost//asset_1', 'localhost//asset_2']
search_return_values = [
self.__create_search_result(resource_name)
for resource_name in expected_resource_names
]
mock_search_catalog.return_value = search_return_values
tag = self.__create_tag()
datacatalog_client = self.__datacatalog_client
datacatalog_client.list_tags.return_value = [tag]
string_value = self.__datacatalog_facade \
.get_tag_field_values_for_search_results(
'system=bigquery', 'template', 'string-field',
self.__STRING_TYPE)
self.assertEqual(1, mock_search_catalog.call_count)
self.assertEqual(2, datacatalog_client.list_tags.call_count)
self.assertEqual(string_value,
['Test String Value', 'Test String Value'])
@mock.patch(__SEARCH_CATALOG_METHOD)
def test_get_tag_field_values_for_search_results_double_field_should_return_values( # noqa: E501
self, mock_search_catalog): # noqa: E125
expected_resource_names = ['localhost//asset_1', 'localhost//asset_2']
search_return_values = [
self.__create_search_result(resource_name)
for resource_name in expected_resource_names
]
mock_search_catalog.return_value = search_return_values
tag = self.__create_tag()
datacatalog_client = self.__datacatalog_client
datacatalog_client.list_tags.return_value = [tag]
double_value = self.__datacatalog_facade \
.get_tag_field_values_for_search_results(
'system=bigquery', 'template', 'double-field',
self.__DOUBLE_TYPE)
self.assertEqual(1, mock_search_catalog.call_count)
self.assertEqual(2, datacatalog_client.list_tags.call_count)
self.assertEqual(double_value, [1.0, 1.0])
@mock.patch(__SEARCH_CATALOG_METHOD)
def test_get_tag_field_values_for_search_results_bool_field_should_return_values( # noqa: E501
self, mock_search_catalog): # noqa: E125
expected_resource_names = ['localhost//asset_1', 'localhost//asset_2']
search_return_values = [
self.__create_search_result(resource_name)
for resource_name in expected_resource_names
]
mock_search_catalog.return_value = search_return_values
tag = self.__create_tag()
datacatalog_client = self.__datacatalog_client
datacatalog_client.list_tags.return_value = [tag]
bool_value = self.__datacatalog_facade \
.get_tag_field_values_for_search_results(
'system=bigquery', 'template', 'bool-field',
self.__BOOL_TYPE)
self.assertEqual(1, mock_search_catalog.call_count)
self.assertEqual(2, datacatalog_client.list_tags.call_count)
self.assertEqual(bool_value, [True, True])
@mock.patch(__SEARCH_CATALOG_METHOD)
def test_get_tag_field_values_for_search_results_timestamp_field_should_return_values( # noqa: E501
self, mock_search_catalog): # noqa: E125
expected_resource_names = ['localhost//asset_1', 'localhost//asset_2']
search_return_values = [
self.__create_search_result(resource_name)
for resource_name in expected_resource_names
]
mock_search_catalog.return_value = search_return_values
tag = self.__create_tag()
datacatalog_client = self.__datacatalog_client
datacatalog_client.list_tags.return_value = [tag]
timestamp_value = self.__datacatalog_facade \
.get_tag_field_values_for_search_results(
'system=bigquery', 'template', 'timestamp-field',
self.__TIMESTAMP_TYPE)
self.assertEqual(1, mock_search_catalog.call_count)
self.assertEqual(2, datacatalog_client.list_tags.call_count)
self.assertEqual(timestamp_value[0].timestamp(), 1567778400)
self.assertEqual(timestamp_value[1].timestamp(), 1567778400)
@mock.patch(__SEARCH_CATALOG_METHOD)
def test_get_tag_field_values_for_search_results_enum_field_should_return_values( # noqa: E501
self, mock_search_catalog): # noqa: E125
expected_resource_names = ['localhost//asset_1', 'localhost//asset_2']
search_return_values = [
self.__create_search_result(resource_name)
for resource_name in expected_resource_names
]
mock_search_catalog.return_value = search_return_values
tag = self.__create_tag()
datacatalog_client = self.__datacatalog_client
datacatalog_client.list_tags.return_value = [tag]
bool_value = self.__datacatalog_facade \
.get_tag_field_values_for_search_results(
'system=bigquery', 'template', 'enum-field',
self.__NON_PRIMITIVE_TYPE)
self.assertEqual(1, mock_search_catalog.call_count)
self.assertEqual(2, datacatalog_client.list_tags.call_count)
self.assertEqual(bool_value, ['Test ENUM Value', 'Test ENUM Value'])
@classmethod
def __create_tag(cls):
tag = datacatalog.Tag()
tag.name = 'tag_template'
tag.template = 'template'
bool_field = datacatalog.TagField()
bool_field.bool_value = True
tag.fields['bool-field'] = bool_field
double_field = datacatalog.TagField()
double_field.double_value = 1
tag.fields['double-field'] = double_field
string_field = datacatalog.TagField()
string_field.string_value = 'Test String Value'
tag.fields['string-field'] = string_field
timestamp = timestamp_pb2.Timestamp()
timestamp.FromJsonString('2019-09-06T11:00:00-03:00')
timestamp_field = datacatalog.TagField()
timestamp_field.timestamp_value = timestamp
tag.fields['timestamp-field'] = timestamp_field
enum_field = datacatalog.TagField()
enum_field.enum_value.display_name = 'Test ENUM Value'
tag.fields['enum-field'] = enum_field
return tag
@classmethod
def __create_search_result(cls, relative_resource_name):
search_result = datacatalog.SearchCatalogResult()
search_result.relative_resource_name = relative_resource_name
return search_result | google-datacatalog-connectors-commons/tests/google/datacatalog_connectors/commons/datacatalog_facade_test.py |
import unittest
import mock
from google.api_core import exceptions
from google.cloud import datacatalog
from google.datacatalog_connectors.commons_test import utils
from google.protobuf import timestamp_pb2
from google.datacatalog_connectors import commons
class DataCatalogFacadeTestCase(unittest.TestCase):
__COMMONS_PACKAGE = 'google.datacatalog_connectors.commons'
__SEARCH_CATALOG_METHOD = '{}.DataCatalogFacade.search_catalog'.format(
__COMMONS_PACKAGE)
__BOOL_TYPE = datacatalog.FieldType.PrimitiveType.BOOL
__DOUBLE_TYPE = datacatalog.FieldType.PrimitiveType.DOUBLE
__STRING_TYPE = datacatalog.FieldType.PrimitiveType.STRING
__TIMESTAMP_TYPE = datacatalog.FieldType.PrimitiveType.TIMESTAMP
__NON_PRIMITIVE_TYPE = datacatalog.FieldType.PrimitiveType.\
PRIMITIVE_TYPE_UNSPECIFIED
@mock.patch('{}.datacatalog_facade.datacatalog.DataCatalogClient'.format(
__COMMONS_PACKAGE))
def setUp(self, mock_datacatalog_client):
self.__datacatalog_facade = commons \
.DataCatalogFacade('test-project')
# Shortcut for the object assigned
# to self.__datacatalog_facade.__datacatalog
self.__datacatalog_client = mock_datacatalog_client.return_value
def test_constructor_should_set_instance_attributes(self):
attrs = self.__datacatalog_facade.__dict__
self.assertIsNotNone(attrs['_DataCatalogFacade__datacatalog'])
self.assertEqual('test-project',
attrs['_DataCatalogFacade__project_id'])
def test_create_entry_should_succeed(self):
entry = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource', 11, 22)
self.__datacatalog_facade.create_entry('entry_group_name', 'entry_id',
entry)
datacatalog_client = self.__datacatalog_client
self.assertEqual(1, datacatalog_client.create_entry.call_count)
def test_create_entry_should_raise_on_permission_denied(self):
datacatalog_client = self.__datacatalog_client
datacatalog_client.create_entry.side_effect = \
exceptions.PermissionDenied('Permission denied')
entry = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource', 11, 22)
self.assertRaises(exceptions.PermissionDenied,
self.__datacatalog_facade.create_entry,
'entry_group_name', 'entry_id', entry)
self.assertEqual(1, datacatalog_client.create_entry.call_count)
def test_get_entry_should_succeed(self):
self.__datacatalog_facade.get_entry('entry_name')
datacatalog_client = self.__datacatalog_client
self.assertEqual(1, datacatalog_client.get_entry.call_count)
def test_lookup_entry_should_return_datacatalog_client_result(self):
fake_entry = datacatalog.Entry()
fake_entry.linked_resource = 'linked_resource'
datacatalog_client = self.__datacatalog_client
datacatalog_client.lookup_entry.return_value = fake_entry
entry = self.__datacatalog_facade.lookup_entry('linked_resource')
self.assertEqual(fake_entry, entry)
def test_lookup_entry_should_fulfill_linked_resource_request_field(self):
self.__datacatalog_facade.lookup_entry('linked_resource')
fake_request = datacatalog.LookupEntryRequest()
fake_request.linked_resource = 'linked_resource'
datacatalog_client = self.__datacatalog_client
datacatalog_client.lookup_entry.assert_called_once_with(
request=fake_request)
def test_update_entry_should_succeed(self):
self.__datacatalog_facade.update_entry({})
datacatalog_client = self.__datacatalog_client
self.assertEqual(1, datacatalog_client.update_entry.call_count)
def test_upsert_entry_nonexistent_should_create(self):
datacatalog_client = self.__datacatalog_client
datacatalog_client.get_entry.side_effect = \
exceptions.PermissionDenied('Entry not found')
entry = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource', 11, 22)
self.__datacatalog_facade.upsert_entry('entry_group_name', 'entry_id',
entry)
self.assertEqual(1, datacatalog_client.get_entry.call_count)
self.assertEqual(1, datacatalog_client.create_entry.call_count)
def test_upsert_entry_nonexistent_on_failed_precondition_should_raise(
self):
datacatalog_client = self.__datacatalog_client
datacatalog_client.get_entry.side_effect = \
exceptions.PermissionDenied('Entry not found')
datacatalog_client.create_entry.side_effect = \
exceptions.FailedPrecondition('Failed precondition')
entry = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource', 11, 22)
self.assertRaises(exceptions.FailedPrecondition,
self.__datacatalog_facade.upsert_entry,
'entry_group_name', 'entry_id', entry)
self.assertEqual(1, datacatalog_client.get_entry.call_count)
self.assertEqual(1, datacatalog_client.create_entry.call_count)
def test_upsert_entry_changed_should_update(self):
entry_1 = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource_1', 11, 22)
datacatalog_client = self.__datacatalog_client
datacatalog_client.get_entry.return_value = entry_1
entry_2 = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource_2', 11, 22)
self.__datacatalog_facade.upsert_entry('entry_group_name', 'entry_id',
entry_2)
self.assertEqual(1, datacatalog_client.get_entry.call_count)
self.assertEqual(1, datacatalog_client.update_entry.call_count)
datacatalog_client.update_entry.assert_called_with(entry=entry_2,
update_mask=None)
def test_upsert_entry_columns_equal_should_not_call_api(self):
col_1 = utils.Utils.create_column_schema('column_1', 'int',
'description')
col_2 = utils.Utils.create_column_schema('column_2', 'string',
'description')
cols = [col_1, col_2]
entry_1 = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource_1', 11, 22, cols)
datacatalog_client = self.__datacatalog_client
datacatalog_client.get_entry.return_value = entry_1
entry_2 = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource_1', 11, 22, cols)
self.__datacatalog_facade.upsert_entry('entry_group_name', 'entry_id',
entry_2)
self.assertEqual(1, datacatalog_client.get_entry.call_count)
datacatalog_client.update_entry.assert_not_called()
datacatalog_client.create_entry.assert_not_called()
def test_upsert_entry_columns_changed_should_update(self):
col_1 = utils.Utils.create_column_schema('column_1', 'int',
'description')
col_2 = utils.Utils.create_column_schema('column_2', 'string',
'description')
cols = [col_1, col_2]
entry_1 = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource_1', 11, 22, cols)
datacatalog_client = self.__datacatalog_client
datacatalog_client.get_entry.return_value = entry_1
col_3 = utils.Utils.create_column_schema('column_2', 'int',
'description')
cols_2 = [col_1, col_3]
entry_2 = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource_1', 11, 22, cols_2)
self.__datacatalog_facade.upsert_entry('entry_group_name', 'entry_id',
entry_2)
self.assertEqual(1, datacatalog_client.get_entry.call_count)
datacatalog_client.create_entry.assert_not_called()
self.assertEqual(1, datacatalog_client.update_entry.call_count)
datacatalog_client.update_entry.assert_called_with(entry=entry_2,
update_mask=None)
def test_upsert_entry_column_deleted_should_update(self):
col_1 = utils.Utils.create_column_schema('column_1', 'int',
'description')
col_2 = utils.Utils.create_column_schema('column_2', 'string',
'description')
cols = [col_1, col_2]
entry_1 = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource_1', 11, 22, cols)
datacatalog_client = self.__datacatalog_client
datacatalog_client.get_entry.return_value = entry_1
cols_2 = [col_1]
entry_2 = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource_1', 11, 22, cols_2)
self.__datacatalog_facade.upsert_entry('entry_group_name', 'entry_id',
entry_2)
self.assertEqual(1, datacatalog_client.get_entry.call_count)
datacatalog_client.create_entry.assert_not_called()
self.assertEqual(1, datacatalog_client.update_entry.call_count)
datacatalog_client.update_entry.assert_called_with(entry=entry_2,
update_mask=None)
def test_upsert_entry_column_added_should_update(self):
col_1 = utils.Utils.create_column_schema('column_1', 'int',
'description')
col_2 = utils.Utils.create_column_schema('column_2', 'string',
'description')
cols = [col_1, col_2]
entry_1 = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource_1', 11, 22, cols)
datacatalog_client = self.__datacatalog_client
datacatalog_client.get_entry.return_value = entry_1
col_3 = utils.Utils.create_column_schema('column_3', 'string',
'description')
cols_2 = [col_1, col_2, col_3]
entry_2 = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource_1', 11, 22, cols_2)
self.__datacatalog_facade.upsert_entry('entry_group_name', 'entry_id',
entry_2)
self.assertEqual(1, datacatalog_client.get_entry.call_count)
datacatalog_client.create_entry.assert_not_called()
self.assertEqual(1, datacatalog_client.update_entry.call_count)
datacatalog_client.update_entry.assert_called_with(entry=entry_2,
update_mask=None)
def test_upsert_entry_subcolumn_added_should_update(self):
col_1 = utils.Utils.create_column_schema('column_1', 'int',
'description')
col_2 = utils.Utils.create_column_schema('column_2', 'string',
'description')
cols = [col_1, col_2]
entry_1 = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource_1', 11, 22, cols)
datacatalog_client = self.__datacatalog_client
datacatalog_client.get_entry.return_value = entry_1
col_3 = utils.Utils.create_column_schema('column_2', 'string',
'description')
col_3.subcolumns = [{}]
cols_2 = [col_1, col_3]
entry_2 = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource_1', 11, 22, cols_2)
self.__datacatalog_facade.upsert_entry('entry_group_name', 'entry_id',
entry_2)
self.assertEqual(1, datacatalog_client.get_entry.call_count)
datacatalog_client.create_entry.assert_not_called()
self.assertEqual(1, datacatalog_client.update_entry.call_count)
datacatalog_client.update_entry.assert_called_with(entry=entry_2,
update_mask=None)
def test_upsert_entry_subcolumn_deleted_should_update(self):
col_1 = utils.Utils.create_column_schema('column_1', 'int',
'description')
col_2 = utils.Utils.create_column_schema('column_2', 'string',
'description')
col_2.subcolumns = [{}, {}]
cols = [col_1, col_2]
entry_1 = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource_1', 11, 22, cols)
datacatalog_client = self.__datacatalog_client
datacatalog_client.get_entry.return_value = entry_1
col_3 = utils.Utils.create_column_schema('column_2', 'string',
'description')
col_3.subcolumns = [{}]
cols_2 = [col_1, col_3]
entry_2 = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource_1', 11, 22, cols_2)
self.__datacatalog_facade.upsert_entry('entry_group_name', 'entry_id',
entry_2)
self.assertEqual(1, datacatalog_client.get_entry.call_count)
datacatalog_client.create_entry.assert_not_called()
self.assertEqual(1, datacatalog_client.update_entry.call_count)
datacatalog_client.update_entry.assert_called_with(entry=entry_2,
update_mask=None)
def test_upsert_entry_should_raise_on_failed_precondition(self):
entry_1 = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource_1', 11, 22)
datacatalog_client = self.__datacatalog_client
datacatalog_client.get_entry.return_value = entry_1
datacatalog_client.update_entry.side_effect = \
exceptions.FailedPrecondition('Failed precondition')
entry_2 = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource_2', 11, 22)
self.assertRaises(exceptions.FailedPrecondition,
self.__datacatalog_facade.upsert_entry,
'entry_group_name', 'entry_id', entry_2)
self.assertEqual(1, datacatalog_client.get_entry.call_count)
self.assertEqual(1, datacatalog_client.update_entry.call_count)
def test_upsert_entry_unchanged_should_not_update(self):
entry = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource', 11, 22)
datacatalog_client = self.__datacatalog_client
datacatalog_client.get_entry.return_value = entry
self.__datacatalog_facade.upsert_entry('entry_group_name', 'entry_id',
entry)
self.assertEqual(1, datacatalog_client.get_entry.call_count)
datacatalog_client.update_entry.assert_not_called()
def test_delete_entry_should_succeed(self):
self.__datacatalog_facade.delete_entry('entry_name')
datacatalog_client = self.__datacatalog_client
self.assertEqual(1, datacatalog_client.delete_entry.call_count)
def test_delete_entry_error_should_be_ignored(self):
datacatalog_client = self.__datacatalog_client
datacatalog_client.delete_entry.side_effect = \
Exception('Error when deleting entry')
self.__datacatalog_facade.delete_entry('entry_name')
self.assertEqual(1, datacatalog_client.delete_entry.call_count)
def test_create_entry_group_should_succeed(self):
self.__datacatalog_facade.create_entry_group('location-id',
'entry_group_id')
datacatalog_client = self.__datacatalog_client
self.assertEqual(1, datacatalog_client.create_entry_group.call_count)
def test_delete_entry_group_should_succeed(self):
self.__datacatalog_facade.delete_entry_group('entry_group_name')
datacatalog_client = self.__datacatalog_client
self.assertEqual(1, datacatalog_client.delete_entry_group.call_count)
def test_create_tag_template_should_succeed(self):
self.__datacatalog_facade.create_tag_template('location-id',
'tag_template_id', {})
datacatalog_client = self.__datacatalog_client
self.assertEqual(1, datacatalog_client.create_tag_template.call_count)
def test_get_tag_template_should_succeed(self):
self.__datacatalog_facade.get_tag_template('tag_template_name')
datacatalog_client = self.__datacatalog_client
self.assertEqual(1, datacatalog_client.get_tag_template.call_count)
def test_delete_tag_template_should_succeed(self):
self.__datacatalog_facade.delete_tag_template('tag_template_name')
datacatalog_client = self.__datacatalog_client
self.assertEqual(1, datacatalog_client.delete_tag_template.call_count)
def test_create_tag_should_succeed(self):
self.__datacatalog_facade.create_tag('entry_name', {})
datacatalog_client = self.__datacatalog_client
self.assertEqual(1, datacatalog_client.create_tag.call_count)
def test_delete_tag_should_succeed(self):
self.__datacatalog_facade.delete_tag(self.__create_tag())
datacatalog_client = self.__datacatalog_client
self.assertEqual(1, datacatalog_client.delete_tag.call_count)
def test_list_tags_should_succeed(self):
self.__datacatalog_facade.list_tags('entry_name')
datacatalog_client = self.__datacatalog_client
self.assertEqual(1, datacatalog_client.list_tags.call_count)
def test_update_tag_should_succeed(self):
self.__datacatalog_facade.update_tag({})
datacatalog_client = self.__datacatalog_client
self.assertEqual(1, datacatalog_client.update_tag.call_count)
def test_upsert_tags_nonexistent_should_succeed(self):
datacatalog_client = self.__datacatalog_client
datacatalog_client.list_tags.return_value = []
entry = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource', 11, 22)
self.__datacatalog_facade.upsert_tags(entry, [self.__create_tag()])
self.assertEqual(1, datacatalog_client.create_tag.call_count)
datacatalog_client.update_tag.assert_not_called()
def test_upsert_tags_changed_should_succeed(self):
datacatalog_client = self.__datacatalog_client
datacatalog_client.list_tags.return_value = [self.__create_tag()]
entry = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource', 11, 22)
changed_tag = self.__create_tag()
changed_tag.fields['bool-field'].bool_value = False
self.__datacatalog_facade.upsert_tags(entry, [changed_tag])
datacatalog_client.create_tag.assert_not_called()
self.assertEqual(1, datacatalog_client.update_tag.call_count)
def test_upsert_tags_changed_column_uppercase_should_succeed(self):
datacatalog_client = self.__datacatalog_client
current_tag = self.__create_tag()
current_tag.column = 'ABC'
datacatalog_client.list_tags.return_value = [current_tag]
entry = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource', 11, 22)
changed_tag = self.__create_tag()
changed_tag.column = 'abc'
changed_tag.fields['bool-field'].bool_value = False
self.__datacatalog_facade.upsert_tags(entry, [changed_tag])
datacatalog_client.create_tag.assert_not_called()
self.assertEqual(1, datacatalog_client.update_tag.call_count)
def test_upsert_tags_unchanged_column_uppercase_should_succeed(self):
entry = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource', 11, 22)
current_tag = self.__create_tag()
current_tag.column = 'ABC'
datacatalog_client = self.__datacatalog_client
datacatalog_client.list_tags.return_value = [current_tag]
# column name is case insensitive, so it's the same column.
tag = self.__create_tag()
tag.column = 'abc'
self.__datacatalog_facade.upsert_tags(entry, [tag])
datacatalog_client.create_tag.assert_not_called()
datacatalog_client.update_tag.assert_not_called()
def test_upsert_tags_unchanged_should_succeed(self):
entry = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource', 11, 22)
tag = self.__create_tag()
datacatalog_client = self.__datacatalog_client
datacatalog_client.list_tags.return_value = [tag]
self.__datacatalog_facade.upsert_tags(entry, [tag])
datacatalog_client.create_tag.assert_not_called()
datacatalog_client.update_tag.assert_not_called()
def test_upsert_tags_should_handle_empty_list(self):
entry = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource', 11, 22)
try:
self.__datacatalog_facade.upsert_tags(entry, None)
except exceptions.GoogleAPICallError as e:
super(DataCatalogFacadeTestCase, self).fail(e)
def test_delete_tags_nonexistent_should_succeed(self):
datacatalog_client = self.__datacatalog_client
datacatalog_client.list_tags.return_value = []
entry = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource', 11, 22)
self.__datacatalog_facade.delete_tags(entry, [self.__create_tag()],
'template')
datacatalog_client.delete_tag.assert_not_called()
def test_delete_tags_nonexistent_template_should_succeed(self):
entry = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource', 11, 22)
tag = self.__create_tag()
datacatalog_client = self.__datacatalog_client
datacatalog_client.list_tags.return_value = [tag]
self.__datacatalog_facade.delete_tags(entry, [tag],
'nonexistent-template')
datacatalog_client.delete_tag.assert_not_called()
def test_delete_tags_unchanged_should_succeed(self):
entry = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource', 11, 22)
tag = self.__create_tag()
datacatalog_client = self.__datacatalog_client
datacatalog_client.list_tags.return_value = [tag]
self.__datacatalog_facade.delete_tags(entry, [tag], 'template')
datacatalog_client.delete_tag.assert_not_called()
def test_delete_tags_deleted_should_succeed(self):
entry = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource', 11, 22)
deleted_tag = self.__create_tag()
datacatalog_client = self.__datacatalog_client
datacatalog_client.list_tags.return_value = [deleted_tag]
new_tag = self.__create_tag()
new_tag.template = 'new_template_2'
self.__datacatalog_facade.delete_tags(entry, [new_tag], 'template')
self.assertEqual(1, datacatalog_client.delete_tag.call_count)
def test_delete_tags_should_handle_empty_list(self):
entry = utils.Utils.create_entry_user_defined_type(
'type', 'system', 'display_name', 'name', 'description',
'linked_resource', 11, 22)
try:
self.__datacatalog_facade.delete_tags(entry, [], 'template')
except exceptions.GoogleAPICallError as e:
super(DataCatalogFacadeTestCase, self).fail(e)
def test_search_results_should_return_values(self):
expected_return_value = [
self.__create_search_result('localhost//asset_1'),
self.__create_search_result('localhost//asset_2')
]
datacatalog_client = self.__datacatalog_client
datacatalog_client.search_catalog.return_value = expected_return_value
return_value = self.__datacatalog_facade.search_catalog('query')
self.assertEqual(1, datacatalog_client.search_catalog.call_count)
self.assertEqual(expected_return_value, return_value)
@mock.patch(__SEARCH_CATALOG_METHOD)
def test_search_catalog_relative_resource_name_should_return_names(
self, mock_search_catalog): # noqa: E125
expected_resource_names = ['localhost//asset_1', 'localhost//asset_2']
search_return_values = [
self.__create_search_result(resource_name)
for resource_name in expected_resource_names
]
mock_search_catalog.return_value = search_return_values
resource_names = self.__datacatalog_facade \
.search_catalog_relative_resource_name(
'system=bigquery')
self.assertEqual(1, mock_search_catalog.call_count)
self.assertEqual(expected_resource_names, resource_names)
@mock.patch(__SEARCH_CATALOG_METHOD)
def test_get_tag_field_values_for_search_results_string_field_should_return_values( # noqa: E501
self, mock_search_catalog): # noqa: E125
expected_resource_names = ['localhost//asset_1', 'localhost//asset_2']
search_return_values = [
self.__create_search_result(resource_name)
for resource_name in expected_resource_names
]
mock_search_catalog.return_value = search_return_values
tag = self.__create_tag()
datacatalog_client = self.__datacatalog_client
datacatalog_client.list_tags.return_value = [tag]
string_value = self.__datacatalog_facade \
.get_tag_field_values_for_search_results(
'system=bigquery', 'template', 'string-field',
self.__STRING_TYPE)
self.assertEqual(1, mock_search_catalog.call_count)
self.assertEqual(2, datacatalog_client.list_tags.call_count)
self.assertEqual(string_value,
['Test String Value', 'Test String Value'])
@mock.patch(__SEARCH_CATALOG_METHOD)
def test_get_tag_field_values_for_search_results_double_field_should_return_values( # noqa: E501
self, mock_search_catalog): # noqa: E125
expected_resource_names = ['localhost//asset_1', 'localhost//asset_2']
search_return_values = [
self.__create_search_result(resource_name)
for resource_name in expected_resource_names
]
mock_search_catalog.return_value = search_return_values
tag = self.__create_tag()
datacatalog_client = self.__datacatalog_client
datacatalog_client.list_tags.return_value = [tag]
double_value = self.__datacatalog_facade \
.get_tag_field_values_for_search_results(
'system=bigquery', 'template', 'double-field',
self.__DOUBLE_TYPE)
self.assertEqual(1, mock_search_catalog.call_count)
self.assertEqual(2, datacatalog_client.list_tags.call_count)
self.assertEqual(double_value, [1.0, 1.0])
@mock.patch(__SEARCH_CATALOG_METHOD)
def test_get_tag_field_values_for_search_results_bool_field_should_return_values( # noqa: E501
self, mock_search_catalog): # noqa: E125
expected_resource_names = ['localhost//asset_1', 'localhost//asset_2']
search_return_values = [
self.__create_search_result(resource_name)
for resource_name in expected_resource_names
]
mock_search_catalog.return_value = search_return_values
tag = self.__create_tag()
datacatalog_client = self.__datacatalog_client
datacatalog_client.list_tags.return_value = [tag]
bool_value = self.__datacatalog_facade \
.get_tag_field_values_for_search_results(
'system=bigquery', 'template', 'bool-field',
self.__BOOL_TYPE)
self.assertEqual(1, mock_search_catalog.call_count)
self.assertEqual(2, datacatalog_client.list_tags.call_count)
self.assertEqual(bool_value, [True, True])
@mock.patch(__SEARCH_CATALOG_METHOD)
def test_get_tag_field_values_for_search_results_timestamp_field_should_return_values( # noqa: E501
self, mock_search_catalog): # noqa: E125
expected_resource_names = ['localhost//asset_1', 'localhost//asset_2']
search_return_values = [
self.__create_search_result(resource_name)
for resource_name in expected_resource_names
]
mock_search_catalog.return_value = search_return_values
tag = self.__create_tag()
datacatalog_client = self.__datacatalog_client
datacatalog_client.list_tags.return_value = [tag]
timestamp_value = self.__datacatalog_facade \
.get_tag_field_values_for_search_results(
'system=bigquery', 'template', 'timestamp-field',
self.__TIMESTAMP_TYPE)
self.assertEqual(1, mock_search_catalog.call_count)
self.assertEqual(2, datacatalog_client.list_tags.call_count)
self.assertEqual(timestamp_value[0].timestamp(), 1567778400)
self.assertEqual(timestamp_value[1].timestamp(), 1567778400)
@mock.patch(__SEARCH_CATALOG_METHOD)
def test_get_tag_field_values_for_search_results_enum_field_should_return_values( # noqa: E501
self, mock_search_catalog): # noqa: E125
expected_resource_names = ['localhost//asset_1', 'localhost//asset_2']
search_return_values = [
self.__create_search_result(resource_name)
for resource_name in expected_resource_names
]
mock_search_catalog.return_value = search_return_values
tag = self.__create_tag()
datacatalog_client = self.__datacatalog_client
datacatalog_client.list_tags.return_value = [tag]
bool_value = self.__datacatalog_facade \
.get_tag_field_values_for_search_results(
'system=bigquery', 'template', 'enum-field',
self.__NON_PRIMITIVE_TYPE)
self.assertEqual(1, mock_search_catalog.call_count)
self.assertEqual(2, datacatalog_client.list_tags.call_count)
self.assertEqual(bool_value, ['Test ENUM Value', 'Test ENUM Value'])
@classmethod
def __create_tag(cls):
tag = datacatalog.Tag()
tag.name = 'tag_template'
tag.template = 'template'
bool_field = datacatalog.TagField()
bool_field.bool_value = True
tag.fields['bool-field'] = bool_field
double_field = datacatalog.TagField()
double_field.double_value = 1
tag.fields['double-field'] = double_field
string_field = datacatalog.TagField()
string_field.string_value = 'Test String Value'
tag.fields['string-field'] = string_field
timestamp = timestamp_pb2.Timestamp()
timestamp.FromJsonString('2019-09-06T11:00:00-03:00')
timestamp_field = datacatalog.TagField()
timestamp_field.timestamp_value = timestamp
tag.fields['timestamp-field'] = timestamp_field
enum_field = datacatalog.TagField()
enum_field.enum_value.display_name = 'Test ENUM Value'
tag.fields['enum-field'] = enum_field
return tag
@classmethod
def __create_search_result(cls, relative_resource_name):
search_result = datacatalog.SearchCatalogResult()
search_result.relative_resource_name = relative_resource_name
return search_result | 0.512693 | 0.14734 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from .. import _utilities, _tables
__all__ = [
'HookAuthArgs',
'HookChannelArgs',
'HookHeaderArgs',
]
@pulumi.input_type
class HookAuthArgs:
def __init__(__self__, *,
key: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] key: Key to use for authentication, usually the header name, for example `"Authorization"`.
:param pulumi.Input[str] type: The type of hook to trigger. Currently only `"HTTP"` is supported.
:param pulumi.Input[str] value: Authentication secret.
"""
if key is not None:
pulumi.set(__self__, "key", key)
if type is not None:
pulumi.set(__self__, "type", type)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
Key to use for authentication, usually the header name, for example `"Authorization"`.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
The type of hook to trigger. Currently only `"HTTP"` is supported.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
Authentication secret.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class HookChannelArgs:
def __init__(__self__, *,
uri: pulumi.Input[str],
version: pulumi.Input[str],
method: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[str] uri: The URI the hook will hit.
:param pulumi.Input[str] version: The version of the endpoint.
:param pulumi.Input[str] method: The request method to use. Default is `"POST"`.
:param pulumi.Input[bool] type: The type of hook to trigger. Currently only `"HTTP"` is supported.
"""
pulumi.set(__self__, "uri", uri)
pulumi.set(__self__, "version", version)
if method is not None:
pulumi.set(__self__, "method", method)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def uri(self) -> pulumi.Input[str]:
"""
The URI the hook will hit.
"""
return pulumi.get(self, "uri")
@uri.setter
def uri(self, value: pulumi.Input[str]):
pulumi.set(self, "uri", value)
@property
@pulumi.getter
def version(self) -> pulumi.Input[str]:
"""
The version of the endpoint.
"""
return pulumi.get(self, "version")
@version.setter
def version(self, value: pulumi.Input[str]):
pulumi.set(self, "version", value)
@property
@pulumi.getter
def method(self) -> Optional[pulumi.Input[str]]:
"""
The request method to use. Default is `"POST"`.
"""
return pulumi.get(self, "method")
@method.setter
def method(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "method", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[bool]]:
"""
The type of hook to trigger. Currently only `"HTTP"` is supported.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class HookHeaderArgs:
def __init__(__self__, *,
key: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] key: Key to use for authentication, usually the header name, for example `"Authorization"`.
:param pulumi.Input[str] value: Authentication secret.
"""
if key is not None:
pulumi.set(__self__, "key", key)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
Key to use for authentication, usually the header name, for example `"Authorization"`.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
Authentication secret.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value) | sdk/python/pulumi_okta/inline/_inputs.py |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from .. import _utilities, _tables
__all__ = [
'HookAuthArgs',
'HookChannelArgs',
'HookHeaderArgs',
]
@pulumi.input_type
class HookAuthArgs:
def __init__(__self__, *,
key: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] key: Key to use for authentication, usually the header name, for example `"Authorization"`.
:param pulumi.Input[str] type: The type of hook to trigger. Currently only `"HTTP"` is supported.
:param pulumi.Input[str] value: Authentication secret.
"""
if key is not None:
pulumi.set(__self__, "key", key)
if type is not None:
pulumi.set(__self__, "type", type)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
Key to use for authentication, usually the header name, for example `"Authorization"`.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
The type of hook to trigger. Currently only `"HTTP"` is supported.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
Authentication secret.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class HookChannelArgs:
def __init__(__self__, *,
uri: pulumi.Input[str],
version: pulumi.Input[str],
method: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[str] uri: The URI the hook will hit.
:param pulumi.Input[str] version: The version of the endpoint.
:param pulumi.Input[str] method: The request method to use. Default is `"POST"`.
:param pulumi.Input[bool] type: The type of hook to trigger. Currently only `"HTTP"` is supported.
"""
pulumi.set(__self__, "uri", uri)
pulumi.set(__self__, "version", version)
if method is not None:
pulumi.set(__self__, "method", method)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def uri(self) -> pulumi.Input[str]:
"""
The URI the hook will hit.
"""
return pulumi.get(self, "uri")
@uri.setter
def uri(self, value: pulumi.Input[str]):
pulumi.set(self, "uri", value)
@property
@pulumi.getter
def version(self) -> pulumi.Input[str]:
"""
The version of the endpoint.
"""
return pulumi.get(self, "version")
@version.setter
def version(self, value: pulumi.Input[str]):
pulumi.set(self, "version", value)
@property
@pulumi.getter
def method(self) -> Optional[pulumi.Input[str]]:
"""
The request method to use. Default is `"POST"`.
"""
return pulumi.get(self, "method")
@method.setter
def method(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "method", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[bool]]:
"""
The type of hook to trigger. Currently only `"HTTP"` is supported.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class HookHeaderArgs:
def __init__(__self__, *,
key: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] key: Key to use for authentication, usually the header name, for example `"Authorization"`.
:param pulumi.Input[str] value: Authentication secret.
"""
if key is not None:
pulumi.set(__self__, "key", key)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
Key to use for authentication, usually the header name, for example `"Authorization"`.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
Authentication secret.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value) | 0.881558 | 0.091951 |
import logging
import os
from collections import Counter
from typing import Dict, List, Optional
import click
import mlflow
import numpy as np
import pandas as pd
import torch
from sklearn.metrics import f1_score
from sklearn.model_selection import KFold, StratifiedShuffleSplit
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
Trainer,
TrainingArguments,
)
from stonkgs.constants import (
CELL_LINE_DIR,
CORRECT_DIR,
DEEPSPEED_CONFIG_PATH,
DISEASE_DIR,
EMBEDDINGS_PATH,
LOCATION_DIR,
MLFLOW_FINETUNING_TRACKING_URI,
NLP_BL_OUTPUT_DIR,
NLP_MODEL_TYPE,
RELATION_TYPE_DIR,
SPECIES_DIR,
STONKGS_OUTPUT_DIR,
)
from stonkgs.data.indra_for_pretraining import prepare_df
# Initialize logger
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
# Disable alembic info
logging.getLogger("alembic").setLevel(logging.WARNING)
class INDRAEvidenceDataset(torch.utils.data.Dataset):
"""Custom Dataset class for INDRA data."""
def __init__(self, encodings, labels):
"""Initialize INDRA Dataset based on token embeddings for each text evidence."""
# Assumes that the labels are numerically encoded
self.encodings = encodings
self.labels = labels
def __getitem__(self, idx):
"""Return data entries (text evidences) for given indices."""
item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
item["labels"] = torch.tensor(self.labels[idx])
return item
def __len__(self):
"""Return the length of the dataset."""
return len(self.labels)
def get_train_test_splits(
data: pd.DataFrame,
max_dataset_size: int = 100000,
label_column_name: str = "class",
random_seed: int = 42,
n_splits: int = 5,
) -> List:
"""Return deterministic train/test indices for n_splits based on the fine-tuning dataset that is passed."""
# Leave out the label in the dataset
data_no_labels = data.drop(label_column_name, axis=1)
labels = data[label_column_name]
# Cut the dataset down to max_dataset_size (deterministically!) using StratifiedShuffleSplit if needed:
# (this is not an actual train/test split, this is just for getting a dataset of size max_dataset_size in a
# stratified and deterministic manner)
if len(data) > max_dataset_size:
splitter = StratifiedShuffleSplit(
n_splits=1,
train_size=max_dataset_size,
random_state=random_seed,
)
for train_index, _ in splitter.split(data_no_labels, labels):
data_no_labels = data_no_labels.iloc[train_index, :].reset_index(drop=True)
labels = labels.iloc[train_index].reset_index(drop=True)
# Generate the actual train/test splits here:
# Implement non-stratified train/test splits with no validation split
# It is shuffled deterministically (determined by random_seed)
skf = KFold(n_splits=n_splits, random_state=random_seed, shuffle=True)
return [
{"train_idx": train_idx, "test_idx": test_idx}
for train_idx, test_idx in skf.split(data_no_labels, labels)
]
def run_nlp_baseline_classification_cv(
train_data_path: str,
sep: Optional[str] = "\t",
model_type: str = NLP_MODEL_TYPE,
output_dir: Optional[str] = NLP_BL_OUTPUT_DIR,
logging_uri_mlflow: Optional[str] = MLFLOW_FINETUNING_TRACKING_URI,
label_column_name: str = "class",
text_data_column_name: str = "evidence",
epochs: Optional[int] = 10,
log_steps: int = 500,
lr: float = 5e-5,
batch_size: int = 16,
gradient_accumulation: int = 1,
task_name: str = "",
embedding_path: str = EMBEDDINGS_PATH,
deepspeed: bool = True,
max_dataset_size: int = 100000,
) -> Dict:
"""Run cross-validation for the sequence classification task."""
# Get data splits
indra_data = pd.read_csv(train_data_path, sep=sep)
# TODO: leave it out later on?
# Filter out any triples that contain a node that is not in the embeddings_dict
embeddings_dict = prepare_df(embedding_path)
original_length = len(indra_data)
indra_data = indra_data[
indra_data["source"].isin(embeddings_dict.keys())
& indra_data["target"].isin(embeddings_dict.keys())
].reset_index(drop=True)
new_length = len(indra_data)
logger.info(
f"{original_length - new_length} out of {original_length} triples are left out because they contain "
f"nodes which are not present in the pre-training data"
)
train_test_splits = get_train_test_splits(
indra_data,
label_column_name=label_column_name,
max_dataset_size=max_dataset_size,
)
# Get text evidences and labels
evidences_text, labels_str = indra_data[text_data_column_name], indra_data[label_column_name]
# Numerically encode labels
unique_tags = set(label for label in labels_str)
tag2id = {label: number for number, label in enumerate(unique_tags)}
id2tag = {value: key for key, value in tag2id.items()}
labels = pd.Series([int(tag2id[label]) for label in labels_str])
# Initialize the f1-score
f1_scores = []
# End previous run
mlflow.end_run()
# Initialize mlflow run, set tracking URI to use the same experiment for all runs,
# so that one can compare them
mlflow.set_tracking_uri(logging_uri_mlflow)
mlflow.set_experiment("NLP Baseline for STonKGs")
# Start a parent run so that all CV splits are tracked as nested runs
# mlflow.start_run(run_name='Parent Run')
# Initialize a dataframe for all the predicted labels
result_df = pd.DataFrame()
for idx, indices in enumerate(train_test_splits):
# Initialize tokenizer and model
tokenizer = AutoTokenizer.from_pretrained(model_type)
model = AutoModelForSequenceClassification.from_pretrained(
model_type, num_labels=len(unique_tags)
)
# Encode all text evidences, pad and truncate to max_seq_len
train_evidences = tokenizer(
evidences_text[indices["train_idx"]].tolist(), truncation=True, padding=True
)
test_evidences = tokenizer(
evidences_text[indices["test_idx"]].tolist(), truncation=True, padding=True
)
train_labels = labels[indices["train_idx"]].tolist()
test_labels = labels[indices["test_idx"]].tolist()
train_dataset = INDRAEvidenceDataset(encodings=train_evidences, labels=train_labels)
test_dataset = INDRAEvidenceDataset(encodings=test_evidences, labels=test_labels)
# Note that due to the randomization in the batches, the training/evaluation is slightly
# different every time
training_args = TrainingArguments(
# label_names
output_dir=output_dir,
num_train_epochs=epochs, # total number of training epochs
logging_steps=log_steps,
learning_rate=lr,
# Use deepspeed with a specified config file for speedup
deepspeed=DEEPSPEED_CONFIG_PATH if deepspeed else None,
report_to=["mlflow"], # log via mlflow
do_train=True,
do_predict=True,
per_device_train_batch_size=batch_size,
gradient_accumulation_steps=gradient_accumulation,
)
# Initialize Trainer based on the training dataset
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
)
# Train
trainer.train()
# Log some details about the datasets used in training and testing
mlflow.log_param("label dict", str(tag2id))
mlflow.log_param("training dataset size", str(len(train_labels)))
mlflow.log_param("training class dist", str(Counter(train_labels)))
mlflow.log_param("test dataset size", str(len(test_labels)))
mlflow.log_param("test class dist", str(Counter(test_labels)))
# Make predictions for the test dataset
predictions = trainer.predict(test_dataset=test_dataset).predictions
predicted_labels = np.argmax(predictions, axis=1)
logger.info(f"Predicted labels: {predicted_labels}")
# Save the predicted + true labels
partial_result_df = pd.DataFrame(
{
"split": idx,
"index": indices["test_idx"].tolist(),
"predicted_label": predicted_labels.tolist(),
"true_label": test_labels,
"evidence": evidences_text[indices["test_idx"]].tolist(),
},
)
result_df = result_df.append(
partial_result_df,
ignore_index=True,
)
# Use weighted average
f1_sc = f1_score(test_labels, predicted_labels, average="weighted")
f1_scores.append(f1_sc)
# Log the final f1 score of the split
mlflow.log_metric("f1_score_weighted", f1_sc)
# Log mean and std f1-scores from the cross validation procedure (average and std across all splits) to the
# standard logger
logger.info(f"Mean f1-score: {np.mean(f1_scores)}")
logger.info(f"Std f1-score: {np.std(f1_scores)}")
# Map the labels in the result df back to their original names
result_df = result_df.replace({"predicted_label": id2tag, "true_label": id2tag})
# Save the result_df
result_df.to_csv(
os.path.join(NLP_BL_OUTPUT_DIR, "predicted_labels_nlp_" + task_name + "df.tsv"),
index=False,
sep="\t",
)
# Save the last model
trainer.save_model(output_dir=NLP_BL_OUTPUT_DIR)
# End the previous run
mlflow.end_run()
# Log the mean and std f1 score from the cross validation procedure to mlflow
with mlflow.start_run():
# Log the task name as well
mlflow.log_param("task name", task_name)
mlflow.log_metric("f1_score_mean", np.mean(f1_scores))
mlflow.log_metric("f1_score_std", np.std(f1_scores))
# End parent run
# mlflow.end_run()
return {"f1_score_mean": np.mean(f1_scores), "f1_score_std": np.std(f1_scores)}
@click.command()
@click.option("-e", "--epochs", default=5, help="Number of epochs", type=int)
@click.option("--lr", default=5e-5, help="Learning rate", type=float)
@click.option(
"--logging_dir",
default=MLFLOW_FINETUNING_TRACKING_URI,
help="Mlflow logging/tracking URI",
type=str,
)
@click.option("--log_steps", default=500, help="Number of steps between each log", type=int)
@click.option("--output_dir", default=STONKGS_OUTPUT_DIR, help="Output directory", type=str)
@click.option("--batch_size", default=8, help="Batch size used in fine-tuning", type=int)
@click.option(
"--gradient_accumulation_steps", default=1, help="Gradient accumulation steps", type=int
)
@click.option("--deepspeed", default=True, help="Whether to use deepspeed or not", type=bool)
@click.option(
"--max_dataset_size",
default=100000,
help="Maximum dataset size of the fine-tuning datasets",
type=int,
)
@click.option("--local_rank", default=-1, help="THIS PARAMETER IS IGNORED", type=int)
def run_all_fine_tuning_tasks(
epochs: int = 5,
log_steps: int = 500,
lr: float = 5e-5,
output_dir: str = STONKGS_OUTPUT_DIR,
logging_dir: Optional[str] = MLFLOW_FINETUNING_TRACKING_URI,
batch_size: int = 8,
gradient_accumulation_steps: int = 1,
deepspeed: bool = True,
max_dataset_size: int = 100000, # effectively removes the max dataset size restriction
local_rank: int = -1,
):
"""Run all fine-tuning tasks at once."""
# Specify all directories and file names
directories = [
CELL_LINE_DIR,
CORRECT_DIR,
CORRECT_DIR,
DISEASE_DIR,
LOCATION_DIR,
SPECIES_DIR,
RELATION_TYPE_DIR,
RELATION_TYPE_DIR,
]
file_names = [
"cell_line_no_duplicates.tsv",
"correct_incorrect_binary_no_duplicates.tsv",
"correct_incorrect_multiclass_no_duplicates.tsv",
"disease_no_duplicates.tsv",
"location_no_duplicates.tsv",
"species_no_duplicates.tsv",
"relation_type_no_duplicates.tsv",
"relation_type_no_duplicates.tsv",
]
task_names = [
"cell_line",
"correct_binary",
"correct_multiclass",
"disease",
"location",
"species",
"interaction",
"polarity",
]
# Specify the column names of the target variable
column_names = ["class"] * 6 + ["interaction"] + ["polarity"]
for directory, file, column_name, task_name in zip(
directories,
file_names,
column_names,
task_names,
):
# Run each of the six classification tasks
run_nlp_baseline_classification_cv(
train_data_path=os.path.join(directory, file),
output_dir=output_dir,
logging_uri_mlflow=logging_dir,
epochs=epochs,
log_steps=log_steps,
lr=lr,
batch_size=batch_size,
gradient_accumulation=gradient_accumulation_steps,
label_column_name=column_name,
task_name=task_name,
deepspeed=deepspeed,
max_dataset_size=max_dataset_size,
)
logger.info(f"Finished the {task_name} task")
if __name__ == "__main__":
# Set the huggingface environment variable for tokenizer parallelism to false
os.environ["TOKENIZERS_PARALLELISM"] = "false"
# Run all classification tasks
run_all_fine_tuning_tasks() | src/stonkgs/models/nlp_baseline_model.py | import logging
import os
from collections import Counter
from typing import Dict, List, Optional
import click
import mlflow
import numpy as np
import pandas as pd
import torch
from sklearn.metrics import f1_score
from sklearn.model_selection import KFold, StratifiedShuffleSplit
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
Trainer,
TrainingArguments,
)
from stonkgs.constants import (
CELL_LINE_DIR,
CORRECT_DIR,
DEEPSPEED_CONFIG_PATH,
DISEASE_DIR,
EMBEDDINGS_PATH,
LOCATION_DIR,
MLFLOW_FINETUNING_TRACKING_URI,
NLP_BL_OUTPUT_DIR,
NLP_MODEL_TYPE,
RELATION_TYPE_DIR,
SPECIES_DIR,
STONKGS_OUTPUT_DIR,
)
from stonkgs.data.indra_for_pretraining import prepare_df
# Initialize logger
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
# Disable alembic info
logging.getLogger("alembic").setLevel(logging.WARNING)
class INDRAEvidenceDataset(torch.utils.data.Dataset):
"""Custom Dataset class for INDRA data."""
def __init__(self, encodings, labels):
"""Initialize INDRA Dataset based on token embeddings for each text evidence."""
# Assumes that the labels are numerically encoded
self.encodings = encodings
self.labels = labels
def __getitem__(self, idx):
"""Return data entries (text evidences) for given indices."""
item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
item["labels"] = torch.tensor(self.labels[idx])
return item
def __len__(self):
"""Return the length of the dataset."""
return len(self.labels)
def get_train_test_splits(
data: pd.DataFrame,
max_dataset_size: int = 100000,
label_column_name: str = "class",
random_seed: int = 42,
n_splits: int = 5,
) -> List:
"""Return deterministic train/test indices for n_splits based on the fine-tuning dataset that is passed."""
# Leave out the label in the dataset
data_no_labels = data.drop(label_column_name, axis=1)
labels = data[label_column_name]
# Cut the dataset down to max_dataset_size (deterministically!) using StratifiedShuffleSplit if needed:
# (this is not an actual train/test split, this is just for getting a dataset of size max_dataset_size in a
# stratified and deterministic manner)
if len(data) > max_dataset_size:
splitter = StratifiedShuffleSplit(
n_splits=1,
train_size=max_dataset_size,
random_state=random_seed,
)
for train_index, _ in splitter.split(data_no_labels, labels):
data_no_labels = data_no_labels.iloc[train_index, :].reset_index(drop=True)
labels = labels.iloc[train_index].reset_index(drop=True)
# Generate the actual train/test splits here:
# Implement non-stratified train/test splits with no validation split
# It is shuffled deterministically (determined by random_seed)
skf = KFold(n_splits=n_splits, random_state=random_seed, shuffle=True)
return [
{"train_idx": train_idx, "test_idx": test_idx}
for train_idx, test_idx in skf.split(data_no_labels, labels)
]
def run_nlp_baseline_classification_cv(
train_data_path: str,
sep: Optional[str] = "\t",
model_type: str = NLP_MODEL_TYPE,
output_dir: Optional[str] = NLP_BL_OUTPUT_DIR,
logging_uri_mlflow: Optional[str] = MLFLOW_FINETUNING_TRACKING_URI,
label_column_name: str = "class",
text_data_column_name: str = "evidence",
epochs: Optional[int] = 10,
log_steps: int = 500,
lr: float = 5e-5,
batch_size: int = 16,
gradient_accumulation: int = 1,
task_name: str = "",
embedding_path: str = EMBEDDINGS_PATH,
deepspeed: bool = True,
max_dataset_size: int = 100000,
) -> Dict:
"""Run cross-validation for the sequence classification task."""
# Get data splits
indra_data = pd.read_csv(train_data_path, sep=sep)
# TODO: leave it out later on?
# Filter out any triples that contain a node that is not in the embeddings_dict
embeddings_dict = prepare_df(embedding_path)
original_length = len(indra_data)
indra_data = indra_data[
indra_data["source"].isin(embeddings_dict.keys())
& indra_data["target"].isin(embeddings_dict.keys())
].reset_index(drop=True)
new_length = len(indra_data)
logger.info(
f"{original_length - new_length} out of {original_length} triples are left out because they contain "
f"nodes which are not present in the pre-training data"
)
train_test_splits = get_train_test_splits(
indra_data,
label_column_name=label_column_name,
max_dataset_size=max_dataset_size,
)
# Get text evidences and labels
evidences_text, labels_str = indra_data[text_data_column_name], indra_data[label_column_name]
# Numerically encode labels
unique_tags = set(label for label in labels_str)
tag2id = {label: number for number, label in enumerate(unique_tags)}
id2tag = {value: key for key, value in tag2id.items()}
labels = pd.Series([int(tag2id[label]) for label in labels_str])
# Initialize the f1-score
f1_scores = []
# End previous run
mlflow.end_run()
# Initialize mlflow run, set tracking URI to use the same experiment for all runs,
# so that one can compare them
mlflow.set_tracking_uri(logging_uri_mlflow)
mlflow.set_experiment("NLP Baseline for STonKGs")
# Start a parent run so that all CV splits are tracked as nested runs
# mlflow.start_run(run_name='Parent Run')
# Initialize a dataframe for all the predicted labels
result_df = pd.DataFrame()
for idx, indices in enumerate(train_test_splits):
# Initialize tokenizer and model
tokenizer = AutoTokenizer.from_pretrained(model_type)
model = AutoModelForSequenceClassification.from_pretrained(
model_type, num_labels=len(unique_tags)
)
# Encode all text evidences, pad and truncate to max_seq_len
train_evidences = tokenizer(
evidences_text[indices["train_idx"]].tolist(), truncation=True, padding=True
)
test_evidences = tokenizer(
evidences_text[indices["test_idx"]].tolist(), truncation=True, padding=True
)
train_labels = labels[indices["train_idx"]].tolist()
test_labels = labels[indices["test_idx"]].tolist()
train_dataset = INDRAEvidenceDataset(encodings=train_evidences, labels=train_labels)
test_dataset = INDRAEvidenceDataset(encodings=test_evidences, labels=test_labels)
# Note that due to the randomization in the batches, the training/evaluation is slightly
# different every time
training_args = TrainingArguments(
# label_names
output_dir=output_dir,
num_train_epochs=epochs, # total number of training epochs
logging_steps=log_steps,
learning_rate=lr,
# Use deepspeed with a specified config file for speedup
deepspeed=DEEPSPEED_CONFIG_PATH if deepspeed else None,
report_to=["mlflow"], # log via mlflow
do_train=True,
do_predict=True,
per_device_train_batch_size=batch_size,
gradient_accumulation_steps=gradient_accumulation,
)
# Initialize Trainer based on the training dataset
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
)
# Train
trainer.train()
# Log some details about the datasets used in training and testing
mlflow.log_param("label dict", str(tag2id))
mlflow.log_param("training dataset size", str(len(train_labels)))
mlflow.log_param("training class dist", str(Counter(train_labels)))
mlflow.log_param("test dataset size", str(len(test_labels)))
mlflow.log_param("test class dist", str(Counter(test_labels)))
# Make predictions for the test dataset
predictions = trainer.predict(test_dataset=test_dataset).predictions
predicted_labels = np.argmax(predictions, axis=1)
logger.info(f"Predicted labels: {predicted_labels}")
# Save the predicted + true labels
partial_result_df = pd.DataFrame(
{
"split": idx,
"index": indices["test_idx"].tolist(),
"predicted_label": predicted_labels.tolist(),
"true_label": test_labels,
"evidence": evidences_text[indices["test_idx"]].tolist(),
},
)
result_df = result_df.append(
partial_result_df,
ignore_index=True,
)
# Use weighted average
f1_sc = f1_score(test_labels, predicted_labels, average="weighted")
f1_scores.append(f1_sc)
# Log the final f1 score of the split
mlflow.log_metric("f1_score_weighted", f1_sc)
# Log mean and std f1-scores from the cross validation procedure (average and std across all splits) to the
# standard logger
logger.info(f"Mean f1-score: {np.mean(f1_scores)}")
logger.info(f"Std f1-score: {np.std(f1_scores)}")
# Map the labels in the result df back to their original names
result_df = result_df.replace({"predicted_label": id2tag, "true_label": id2tag})
# Save the result_df
result_df.to_csv(
os.path.join(NLP_BL_OUTPUT_DIR, "predicted_labels_nlp_" + task_name + "df.tsv"),
index=False,
sep="\t",
)
# Save the last model
trainer.save_model(output_dir=NLP_BL_OUTPUT_DIR)
# End the previous run
mlflow.end_run()
# Log the mean and std f1 score from the cross validation procedure to mlflow
with mlflow.start_run():
# Log the task name as well
mlflow.log_param("task name", task_name)
mlflow.log_metric("f1_score_mean", np.mean(f1_scores))
mlflow.log_metric("f1_score_std", np.std(f1_scores))
# End parent run
# mlflow.end_run()
return {"f1_score_mean": np.mean(f1_scores), "f1_score_std": np.std(f1_scores)}
@click.command()
@click.option("-e", "--epochs", default=5, help="Number of epochs", type=int)
@click.option("--lr", default=5e-5, help="Learning rate", type=float)
@click.option(
"--logging_dir",
default=MLFLOW_FINETUNING_TRACKING_URI,
help="Mlflow logging/tracking URI",
type=str,
)
@click.option("--log_steps", default=500, help="Number of steps between each log", type=int)
@click.option("--output_dir", default=STONKGS_OUTPUT_DIR, help="Output directory", type=str)
@click.option("--batch_size", default=8, help="Batch size used in fine-tuning", type=int)
@click.option(
"--gradient_accumulation_steps", default=1, help="Gradient accumulation steps", type=int
)
@click.option("--deepspeed", default=True, help="Whether to use deepspeed or not", type=bool)
@click.option(
"--max_dataset_size",
default=100000,
help="Maximum dataset size of the fine-tuning datasets",
type=int,
)
@click.option("--local_rank", default=-1, help="THIS PARAMETER IS IGNORED", type=int)
def run_all_fine_tuning_tasks(
epochs: int = 5,
log_steps: int = 500,
lr: float = 5e-5,
output_dir: str = STONKGS_OUTPUT_DIR,
logging_dir: Optional[str] = MLFLOW_FINETUNING_TRACKING_URI,
batch_size: int = 8,
gradient_accumulation_steps: int = 1,
deepspeed: bool = True,
max_dataset_size: int = 100000, # effectively removes the max dataset size restriction
local_rank: int = -1,
):
"""Run all fine-tuning tasks at once."""
# Specify all directories and file names
directories = [
CELL_LINE_DIR,
CORRECT_DIR,
CORRECT_DIR,
DISEASE_DIR,
LOCATION_DIR,
SPECIES_DIR,
RELATION_TYPE_DIR,
RELATION_TYPE_DIR,
]
file_names = [
"cell_line_no_duplicates.tsv",
"correct_incorrect_binary_no_duplicates.tsv",
"correct_incorrect_multiclass_no_duplicates.tsv",
"disease_no_duplicates.tsv",
"location_no_duplicates.tsv",
"species_no_duplicates.tsv",
"relation_type_no_duplicates.tsv",
"relation_type_no_duplicates.tsv",
]
task_names = [
"cell_line",
"correct_binary",
"correct_multiclass",
"disease",
"location",
"species",
"interaction",
"polarity",
]
# Specify the column names of the target variable
column_names = ["class"] * 6 + ["interaction"] + ["polarity"]
for directory, file, column_name, task_name in zip(
directories,
file_names,
column_names,
task_names,
):
# Run each of the six classification tasks
run_nlp_baseline_classification_cv(
train_data_path=os.path.join(directory, file),
output_dir=output_dir,
logging_uri_mlflow=logging_dir,
epochs=epochs,
log_steps=log_steps,
lr=lr,
batch_size=batch_size,
gradient_accumulation=gradient_accumulation_steps,
label_column_name=column_name,
task_name=task_name,
deepspeed=deepspeed,
max_dataset_size=max_dataset_size,
)
logger.info(f"Finished the {task_name} task")
if __name__ == "__main__":
# Set the huggingface environment variable for tokenizer parallelism to false
os.environ["TOKENIZERS_PARALLELISM"] = "false"
# Run all classification tasks
run_all_fine_tuning_tasks() | 0.839142 | 0.336195 |
import pytest
from silene.crawl_request import CrawlRequest
from silene.crawler_configuration import CrawlerConfiguration
def test_constructor_should_raise_value_error_when_invalid_domain_in_allowed_domains() -> None:
with pytest.raises(ValueError) as exc_info:
CrawlerConfiguration([], allowed_domains=['example.invalid'])
assert str(exc_info.value) == 'Could not extract a valid domain from example.invalid'
def test_seed_requests_should_return_seed_requests() -> None:
seed_requests = [CrawlRequest('https://example.com')]
crawler_configuration = CrawlerConfiguration(seed_requests)
assert crawler_configuration.seed_requests is seed_requests
def test_filter_duplicate_requests_should_return_default_value_when_not_specified() -> None:
crawler_configuration = CrawlerConfiguration([])
assert crawler_configuration.filter_duplicate_requests is True
def test_filter_duplicate_requests_should_return_specified_value_when_specified() -> None:
crawler_configuration = CrawlerConfiguration([], filter_duplicate_requests=False)
assert crawler_configuration.filter_duplicate_requests is False
def test_filter_offsite_requests_should_return_default_value_when_not_specified() -> None:
crawler_configuration = CrawlerConfiguration([])
assert crawler_configuration.filter_offsite_requests is False
def test_filter_offsite_requests_should_return_specified_value_when_specified() -> None:
crawler_configuration = CrawlerConfiguration([], filter_offsite_requests=True)
assert crawler_configuration.filter_offsite_requests is True
def test_allowed_domains_should_return_empty_list_when_no_allowed_domains_specified() -> None:
crawler_configuration = CrawlerConfiguration([])
assert crawler_configuration.allowed_domains == []
def test_allowed_domains_should_return_domains_only() -> None:
crawler_configuration = CrawlerConfiguration([], allowed_domains=['https://www.example.com:80/'])
assert crawler_configuration.allowed_domains == ['www.example.com']
def test_str_should_return_string_representation() -> None:
crawler_configuration = CrawlerConfiguration([CrawlRequest('https://example.com')],
filter_offsite_requests=True,
allowed_domains=['example.com'])
assert str(crawler_configuration) == 'CrawlerConfiguration(seed_requests=1 requests, ' \
'filter_duplicate_requests=True, ' \
'filter_offsite_requests=True, ' \
'allowed_domains=1 domains)' | tests/unit/test_crawler_configuration.py |
import pytest
from silene.crawl_request import CrawlRequest
from silene.crawler_configuration import CrawlerConfiguration
def test_constructor_should_raise_value_error_when_invalid_domain_in_allowed_domains() -> None:
with pytest.raises(ValueError) as exc_info:
CrawlerConfiguration([], allowed_domains=['example.invalid'])
assert str(exc_info.value) == 'Could not extract a valid domain from example.invalid'
def test_seed_requests_should_return_seed_requests() -> None:
seed_requests = [CrawlRequest('https://example.com')]
crawler_configuration = CrawlerConfiguration(seed_requests)
assert crawler_configuration.seed_requests is seed_requests
def test_filter_duplicate_requests_should_return_default_value_when_not_specified() -> None:
crawler_configuration = CrawlerConfiguration([])
assert crawler_configuration.filter_duplicate_requests is True
def test_filter_duplicate_requests_should_return_specified_value_when_specified() -> None:
crawler_configuration = CrawlerConfiguration([], filter_duplicate_requests=False)
assert crawler_configuration.filter_duplicate_requests is False
def test_filter_offsite_requests_should_return_default_value_when_not_specified() -> None:
crawler_configuration = CrawlerConfiguration([])
assert crawler_configuration.filter_offsite_requests is False
def test_filter_offsite_requests_should_return_specified_value_when_specified() -> None:
crawler_configuration = CrawlerConfiguration([], filter_offsite_requests=True)
assert crawler_configuration.filter_offsite_requests is True
def test_allowed_domains_should_return_empty_list_when_no_allowed_domains_specified() -> None:
crawler_configuration = CrawlerConfiguration([])
assert crawler_configuration.allowed_domains == []
def test_allowed_domains_should_return_domains_only() -> None:
crawler_configuration = CrawlerConfiguration([], allowed_domains=['https://www.example.com:80/'])
assert crawler_configuration.allowed_domains == ['www.example.com']
def test_str_should_return_string_representation() -> None:
crawler_configuration = CrawlerConfiguration([CrawlRequest('https://example.com')],
filter_offsite_requests=True,
allowed_domains=['example.com'])
assert str(crawler_configuration) == 'CrawlerConfiguration(seed_requests=1 requests, ' \
'filter_duplicate_requests=True, ' \
'filter_offsite_requests=True, ' \
'allowed_domains=1 domains)' | 0.79956 | 0.554229 |
from __future__ import unicode_literals
# To use a consistent encoding
import codecs
from setuptools import setup, find_packages
import sys, os.path
def parse_reqs(req_path="./requirements.txt"):
"""Recursively parse requirements from nested pip files."""
install_requires = []
with codecs.open(req_path, "r") as handle:
# remove comments and empty lines
lines = (
line.strip() for line in handle if line.strip() and not line.startswith("#")
)
for line in lines:
# check for nested requirements files
if line.startswith("-r"):
# recursively call this function
install_requires += parse_reqs(req_path=line[3:])
else:
# add the line as a new requirement
install_requires.append(line)
return install_requires
setup(
name="pdf_stuff",
version="0.0.1",
url="https://github.com/rldotai/pdf-stuff",
license="BSD",
author="rldotai",
author_email="<EMAIL>",
description="Scripts and such for working with PDFs.",
long_description=__doc__,
packages=find_packages(exclude=["tests"]),
include_package_data=True,
zip_safe=False,
platforms="any",
install_requires=parse_reqs(),
entry_points={
"console_scripts": [
"pdfdiff.py = pdf_stuff.pdfdiff:main",
"pdf2text.py = pdf_stuff.pdf2text:main",
"pdfmeta.py = pdf_stuff.pdfmeta:main",
],
},
classifiers=[
# As from http://pypi.python.org/pypi?%3Aaction=list_classifiers
"Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: POSIX",
"Operating System :: MacOS",
"Operating System :: Unix",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Topic :: Software Development :: Libraries :: Python Modules",
],
) | setup.py | from __future__ import unicode_literals
# To use a consistent encoding
import codecs
from setuptools import setup, find_packages
import sys, os.path
def parse_reqs(req_path="./requirements.txt"):
"""Recursively parse requirements from nested pip files."""
install_requires = []
with codecs.open(req_path, "r") as handle:
# remove comments and empty lines
lines = (
line.strip() for line in handle if line.strip() and not line.startswith("#")
)
for line in lines:
# check for nested requirements files
if line.startswith("-r"):
# recursively call this function
install_requires += parse_reqs(req_path=line[3:])
else:
# add the line as a new requirement
install_requires.append(line)
return install_requires
setup(
name="pdf_stuff",
version="0.0.1",
url="https://github.com/rldotai/pdf-stuff",
license="BSD",
author="rldotai",
author_email="<EMAIL>",
description="Scripts and such for working with PDFs.",
long_description=__doc__,
packages=find_packages(exclude=["tests"]),
include_package_data=True,
zip_safe=False,
platforms="any",
install_requires=parse_reqs(),
entry_points={
"console_scripts": [
"pdfdiff.py = pdf_stuff.pdfdiff:main",
"pdf2text.py = pdf_stuff.pdf2text:main",
"pdfmeta.py = pdf_stuff.pdfmeta:main",
],
},
classifiers=[
# As from http://pypi.python.org/pypi?%3Aaction=list_classifiers
"Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: POSIX",
"Operating System :: MacOS",
"Operating System :: Unix",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Topic :: Software Development :: Libraries :: Python Modules",
],
) | 0.481698 | 0.140248 |
from user import User
from credentials import Credentials
def create_user(login_username, password):
'''
Function to create new user
'''
new_user = User(login_username,password)
return new_user
def create_credentials(account_name,account_username, account_password):
'''
Function to create new credential
'''
new_credential = Credentials(account_name,account_username, account_password)
return new_credential
def save_user(user):
'''
Function to save user
'''
user.save_user()
def user_authenticate(name,password):
'''
Function to authenticate user
'''
return User.authenticate_user(name,password)
def save_credentials(credential):
'''
Function to save credential
'''
credential.save_credential()
def del_credential(credential):
'''
Function to delete credential
'''
credential.delete_credential()
def display_credentials():
'''
Function that returns all saved credentials
'''
return Credentials.display_credential()
def generate_pass():
'''
Function that generates random password
'''
return Credentials.generate_password()
def find_credential(name):
'''
Function that finds credentials using the name of the account
'''
return Credentials.find_by_name(name)
def credential_exist(name):
'''
Function to check that credential exist
'''
return Credentials.name_exist(name)
def main():
print("PASSWORD LOCKER")
print("--"*30)
print("An application that saves your account Details")
print("\n")
print("What is your name?")
user_name = input()
print("\n")
print(f"Hello {user_name}. Are you a new user or would you like to create an account")
print("\n")
while True:
print("Use these short codes:\n -nu: new user \n -li:log in \n -ex: exit the password locker")
authentication_short_code = input().lower()
if authentication_short_code =='nu':
print("New account")
print("-"*30)
print("Username")
login_username = input()
print("Password")
password = input()
save_user(create_user(login_username,password))
print("\n")
print("-"*30)
print(f"Account for {login_username} created. Proceed to log in")
print("-"*30)
print("\n")
elif authentication_short_code == 'li':
print("Enter your user name")
login_username = input()
print("Enter your password")
password = input()
print("\n")
authenticated_password = user_authenticate(login_username,password)
if authenticated_password == password:
print("-"*30)
print("You have successfully logged in")
print("-"*30)
print("\n")
print("What would you like to do?")
while True:
print("Use the following short codes: \n -cc: create new credentials \n -fc: find a specific credential/delete a credential, \n -dc: display all your accounts \n -lo: log-out")
credentials_short_code = input().lower()
print("-"*30)
if credentials_short_code == 'cc':
print("New Credentials")
print("-"*30)
print("Account Name(eg Twitter)...")
account_name = input()
print(f"What is your username for {account_name}")
account_username = input()
print("\n")
print("Would you like a generated password? (y/n)?")
gen_pass = input().lower()
if gen_pass == 'y':
account_password = generate_pass()
print(f"Password generated is {account_password}")
save_credentials(create_credentials(account_name,account_username,account_password))
else:
print("Enter the account password, (should be longer than 7 characters long)")
account_password = input()
if len(account_password) >= 7:
save_credentials(create_credentials(account_name,account_username,account_password))
print(f"Account details for {account_name} have been saved")
print("\n")
else:
print("Password is too short. Try again")
elif credentials_short_code == "dc":
if display_credentials():
print("Here is a list of all your accounts and there credentials")
print('\n')
for credential in display_credentials():
print(f"{credential.account_name} ,username: {credential.account_username}, password: {credential.account_password}")
print("\n")
else:
print("You don't seem to have any credentials saved")
print("\n")
elif credentials_short_code == "fc":
print("Enter the name of account you are looking for e.g Twitter...")
searched_name = input()
if credential_exist(searched_name):
searched_credential = find_credential(searched_name)
print(f"{searched_credential.account_name} , username: {searched_credential.account_username}, password: {searched_credential.account_password} ")
print(f"Would you like to delete credentials for {searched_credential.account_name}? (y/n)")
delete_credential = input().lower()
if delete_credential == 'y':
del_credential(searched_credential)
print("Credentials have been deleted")
else:
print("Credentials have not been deleted")
else:
print("The credentials for that name do not exist")
elif credentials_short_code == 'lo':
print("You have successfully logged out..")
break
else:
print("I really didn't get that. Please use the short codes")
else:
print("Invalid username and password,try again")
elif authentication_short_code == 'ex':
print("Bye....")
break
else:
print("Invalid option, please use the short code")
if __name__ == '__main__':
main() | run.py |
from user import User
from credentials import Credentials
def create_user(login_username, password):
'''
Function to create new user
'''
new_user = User(login_username,password)
return new_user
def create_credentials(account_name,account_username, account_password):
'''
Function to create new credential
'''
new_credential = Credentials(account_name,account_username, account_password)
return new_credential
def save_user(user):
'''
Function to save user
'''
user.save_user()
def user_authenticate(name,password):
'''
Function to authenticate user
'''
return User.authenticate_user(name,password)
def save_credentials(credential):
'''
Function to save credential
'''
credential.save_credential()
def del_credential(credential):
'''
Function to delete credential
'''
credential.delete_credential()
def display_credentials():
'''
Function that returns all saved credentials
'''
return Credentials.display_credential()
def generate_pass():
'''
Function that generates random password
'''
return Credentials.generate_password()
def find_credential(name):
'''
Function that finds credentials using the name of the account
'''
return Credentials.find_by_name(name)
def credential_exist(name):
'''
Function to check that credential exist
'''
return Credentials.name_exist(name)
def main():
print("PASSWORD LOCKER")
print("--"*30)
print("An application that saves your account Details")
print("\n")
print("What is your name?")
user_name = input()
print("\n")
print(f"Hello {user_name}. Are you a new user or would you like to create an account")
print("\n")
while True:
print("Use these short codes:\n -nu: new user \n -li:log in \n -ex: exit the password locker")
authentication_short_code = input().lower()
if authentication_short_code =='nu':
print("New account")
print("-"*30)
print("Username")
login_username = input()
print("Password")
password = input()
save_user(create_user(login_username,password))
print("\n")
print("-"*30)
print(f"Account for {login_username} created. Proceed to log in")
print("-"*30)
print("\n")
elif authentication_short_code == 'li':
print("Enter your user name")
login_username = input()
print("Enter your password")
password = input()
print("\n")
authenticated_password = user_authenticate(login_username,password)
if authenticated_password == password:
print("-"*30)
print("You have successfully logged in")
print("-"*30)
print("\n")
print("What would you like to do?")
while True:
print("Use the following short codes: \n -cc: create new credentials \n -fc: find a specific credential/delete a credential, \n -dc: display all your accounts \n -lo: log-out")
credentials_short_code = input().lower()
print("-"*30)
if credentials_short_code == 'cc':
print("New Credentials")
print("-"*30)
print("Account Name(eg Twitter)...")
account_name = input()
print(f"What is your username for {account_name}")
account_username = input()
print("\n")
print("Would you like a generated password? (y/n)?")
gen_pass = input().lower()
if gen_pass == 'y':
account_password = generate_pass()
print(f"Password generated is {account_password}")
save_credentials(create_credentials(account_name,account_username,account_password))
else:
print("Enter the account password, (should be longer than 7 characters long)")
account_password = input()
if len(account_password) >= 7:
save_credentials(create_credentials(account_name,account_username,account_password))
print(f"Account details for {account_name} have been saved")
print("\n")
else:
print("Password is too short. Try again")
elif credentials_short_code == "dc":
if display_credentials():
print("Here is a list of all your accounts and there credentials")
print('\n')
for credential in display_credentials():
print(f"{credential.account_name} ,username: {credential.account_username}, password: {credential.account_password}")
print("\n")
else:
print("You don't seem to have any credentials saved")
print("\n")
elif credentials_short_code == "fc":
print("Enter the name of account you are looking for e.g Twitter...")
searched_name = input()
if credential_exist(searched_name):
searched_credential = find_credential(searched_name)
print(f"{searched_credential.account_name} , username: {searched_credential.account_username}, password: {searched_credential.account_password} ")
print(f"Would you like to delete credentials for {searched_credential.account_name}? (y/n)")
delete_credential = input().lower()
if delete_credential == 'y':
del_credential(searched_credential)
print("Credentials have been deleted")
else:
print("Credentials have not been deleted")
else:
print("The credentials for that name do not exist")
elif credentials_short_code == 'lo':
print("You have successfully logged out..")
break
else:
print("I really didn't get that. Please use the short codes")
else:
print("Invalid username and password,try again")
elif authentication_short_code == 'ex':
print("Bye....")
break
else:
print("Invalid option, please use the short code")
if __name__ == '__main__':
main() | 0.192236 | 0.069954 |
from __future__ import print_function
import os
import cv2
import json
import lmdb
import numpy as np
from matplotlib import pyplot
class USCISI_CMD_API( object ) :
""" Simple API for reading the USCISI CMD dataset
This API simply loads and parses CMD samples from LMDB
# Example:
```python
# get the LMDB file path
lmdb_dir = os.path.dirname( os.path.realpath(__file__) )
# create dataset instance
dataset = USCISI_CMD_API( lmdb_dir=lmdb_dir,
sample_file=os.path.join( lmdb_dir, 'samples.keys'),
differentiate_target=True )
# retrieve the first 24 samples in the dataset
samples = dataset( range(24) )
# visualize these samples
dataset.visualize_samples( samples )
# retrieve 24 random samples in the dataset
samples = dataset( [None]*24 )
# visualize these samples
dataset.visualize_samples( samples )
# get the exact 50th sample in the dataset
sample = dataset[50]
# visualize these samples
dataset.visualize_samples( [sample] )
```
# Arguments:
lmdb_dir = file path to the dataset LMDB
sample_file = file path ot the sample list, e.g. samples.keys
differentiate_target = bool, whether or not generate 3-class target map
# Note:
1. samples, i.e. the output of "get_samples" or "__call__", is a list of samples
however, the dimension of each sample may or may not the same
2. CMD samples are generated upon
- MIT SUN2012 dataset [https://groups.csail.mit.edu/vision/SUN/]
- MS COCO dataset [http://cocodataset.org/#termsofuse]
3. detailed synthesis process can be found in paper
# Citation:
<NAME> et.al. "BusterNet: Detecting Image Copy-Move ForgeryWith Source/Target Localization".
In: European Conference on Computer Vision (ECCV). Springer. 2018.
# Contact:
Dr. <NAME>
yue_wu<EMAIL>
"""
def __init__( self, lmdb_dir, sample_file, differentiate_target = True ) :
assert os.path.isdir(lmdb_dir)
self.lmdb_dir = lmdb_dir
assert os.path.isfile(sample_file)
self.sample_keys = self._load_sample_keys(sample_file)
self.differentiate_target = differentiate_target
print("INFO: successfully load USC-ISI CMD LMDB with {} keys".format( self.nb_samples ) )
@property
def nb_samples( self ) :
return len( self.sample_keys )
def _load_sample_keys( self, sample_file ) :
'''Load sample keys from a given sample file
INPUT:
sample_file = str, path to sample key file
OUTPUT:
keys = list of str, each element is a valid key in LMDB
'''
with open( sample_file, 'r' ) as IN :
keys = [ line.strip() for line in IN.readlines() ]
return keys
def _get_image_from_lut( self, lut ) :
'''Decode image array from LMDB lut
INPUT:
lut = dict, raw decoded lut retrieved from LMDB
OUTPUT:
image = np.ndarray, dtype='uint8'
'''
image_jpeg_buffer = lut['image_jpeg_buffer']
image = cv2.imdecode( np.array(image_jpeg_buffer).astype('uint8').reshape([-1,1]), 1 )
return image
def _get_mask_from_lut( self, lut ) :
'''Decode copy-move mask from LMDB lut
INPUT:
lut = dict, raw decoded lut retrieved from LMDB
OUTPUT:
cmd_mask = np.ndarray, dtype='float32'
shape of HxWx1, if differentiate_target=False
shape of HxWx3, if differentiate target=True
NOTE:
cmd_mask is encoded in the one-hot style, if differentiate target=True.
color channel, R, G, and B stand for TARGET, SOURCE, and BACKGROUND classes
'''
def reconstruct( cnts, h, w, val=1 ) :
rst = np.zeros([h,w], dtype='uint8')
cv2.fillPoly( rst, cnts, val )
return rst
h, w = lut['image_height'], lut['image_width']
src_cnts = [ np.array(cnts).reshape([-1,1,2]) for cnts in lut['source_contour'] ]
src_mask = reconstruct( src_cnts, h, w, val = 1 )
tgt_cnts = [ np.array(cnts).reshape([-1,1,2]) for cnts in lut['target_contour'] ]
tgt_mask = reconstruct( tgt_cnts, h, w, val = 1 )
if ( self.differentiate_target ) :
# 3-class target
background = np.ones([h,w]).astype('uint8') - np.maximum(src_mask, tgt_mask)
cmd_mask = np.dstack( [tgt_mask, src_mask, background ] ).astype(np.float32)
else :
# 2-class target
cmd_mask = np.maximum(src_mask, tgt_mask).astype(np.float32)
return cmd_mask
def _get_transmat_from_lut( self, lut ) :
'''Decode transform matrix between SOURCE and TARGET
INPUT:
lut = dict, raw decoded lut retrieved from LMDB
OUTPUT:
trans_mat = np.ndarray, dtype='float32', size of 3x3
'''
trans_mat = lut['transform_matrix']
return np.array(trans_mat).reshape([3,3])
def _decode_lut_str( self, lut_str ) :
'''Decode a raw LMDB lut
INPUT:
lut_str = str, raw string retrieved from LMDB
OUTPUT:
image = np.ndarray, dtype='uint8', cmd image
cmd_mask = np.ndarray, dtype='float32', cmd mask
trans_mat = np.ndarray, dtype='float32', cmd transform matrix
'''
# 1. get raw lut
lut = json.loads(lut_str)
# 2. reconstruct image
image = self._get_image_from_lut(lut)
# 3. reconstruct copy-move masks
cmd_mask = self._get_mask_from_lut(lut)
# 4. get transform matrix if necessary
trans_mat = self._get_transmat_from_lut(lut)
return ( image, cmd_mask, trans_mat )
def get_one_sample( self, key = None ) :
'''Get a (random) sample from given key
INPUT:
key = str, a sample key or None, if None then use random key
OUTPUT:
sample = tuple of (image, cmd_mask, trans_mat)
'''
return self.get_samples([key])[0]
def get_samples( self, key_list ) :
'''Get samples according to a given key list
INPUT:
key_list = list, each element is a LMDB key or idx
OUTPUT:
sample_list = list, each element is a tuple of (image, cmd_mask, trans_mat)
'''
env = lmdb.open( self.lmdb_dir )
sample_list = []
with env.begin( write=False ) as txn :
for key in key_list :
if not isinstance( key, str ) and isinstance( key, int ):
idx = key % self.nb_samples
key = self.sample_keys[idx]
elif isinstance( key, str ) :
pass
else :
key = np.random.choice(self.sample_keys, 1)[0]
print("INFO: use random key", key)
lut_str = txn.get( key )
sample = self._decode_lut_str( lut_str )
sample_list.append( sample )
return sample_list
def visualize_samples( self, sample_list ) :
'''Visualize a list of samples
'''
for image, cmd_mask, trans_mat in sample_list :
pyplot.figure(figsize=(10,10))
pyplot.subplot(121)
pyplot.imshow( image )
pyplot.subplot(122)
pyplot.imshow( cmd_mask )
return
def __call__( self, key_list ) :
return self.get_samples( key_list )
def __getitem__( self, key_idx ) :
return self.get_one_sample( key=key_idx ) | Data/USCISI-CMFD-Small/api.py | from __future__ import print_function
import os
import cv2
import json
import lmdb
import numpy as np
from matplotlib import pyplot
class USCISI_CMD_API( object ) :
""" Simple API for reading the USCISI CMD dataset
This API simply loads and parses CMD samples from LMDB
# Example:
```python
# get the LMDB file path
lmdb_dir = os.path.dirname( os.path.realpath(__file__) )
# create dataset instance
dataset = USCISI_CMD_API( lmdb_dir=lmdb_dir,
sample_file=os.path.join( lmdb_dir, 'samples.keys'),
differentiate_target=True )
# retrieve the first 24 samples in the dataset
samples = dataset( range(24) )
# visualize these samples
dataset.visualize_samples( samples )
# retrieve 24 random samples in the dataset
samples = dataset( [None]*24 )
# visualize these samples
dataset.visualize_samples( samples )
# get the exact 50th sample in the dataset
sample = dataset[50]
# visualize these samples
dataset.visualize_samples( [sample] )
```
# Arguments:
lmdb_dir = file path to the dataset LMDB
sample_file = file path ot the sample list, e.g. samples.keys
differentiate_target = bool, whether or not generate 3-class target map
# Note:
1. samples, i.e. the output of "get_samples" or "__call__", is a list of samples
however, the dimension of each sample may or may not the same
2. CMD samples are generated upon
- MIT SUN2012 dataset [https://groups.csail.mit.edu/vision/SUN/]
- MS COCO dataset [http://cocodataset.org/#termsofuse]
3. detailed synthesis process can be found in paper
# Citation:
<NAME> et.al. "BusterNet: Detecting Image Copy-Move ForgeryWith Source/Target Localization".
In: European Conference on Computer Vision (ECCV). Springer. 2018.
# Contact:
Dr. <NAME>
yue_wu<EMAIL>
"""
def __init__( self, lmdb_dir, sample_file, differentiate_target = True ) :
assert os.path.isdir(lmdb_dir)
self.lmdb_dir = lmdb_dir
assert os.path.isfile(sample_file)
self.sample_keys = self._load_sample_keys(sample_file)
self.differentiate_target = differentiate_target
print("INFO: successfully load USC-ISI CMD LMDB with {} keys".format( self.nb_samples ) )
@property
def nb_samples( self ) :
return len( self.sample_keys )
def _load_sample_keys( self, sample_file ) :
'''Load sample keys from a given sample file
INPUT:
sample_file = str, path to sample key file
OUTPUT:
keys = list of str, each element is a valid key in LMDB
'''
with open( sample_file, 'r' ) as IN :
keys = [ line.strip() for line in IN.readlines() ]
return keys
def _get_image_from_lut( self, lut ) :
'''Decode image array from LMDB lut
INPUT:
lut = dict, raw decoded lut retrieved from LMDB
OUTPUT:
image = np.ndarray, dtype='uint8'
'''
image_jpeg_buffer = lut['image_jpeg_buffer']
image = cv2.imdecode( np.array(image_jpeg_buffer).astype('uint8').reshape([-1,1]), 1 )
return image
def _get_mask_from_lut( self, lut ) :
'''Decode copy-move mask from LMDB lut
INPUT:
lut = dict, raw decoded lut retrieved from LMDB
OUTPUT:
cmd_mask = np.ndarray, dtype='float32'
shape of HxWx1, if differentiate_target=False
shape of HxWx3, if differentiate target=True
NOTE:
cmd_mask is encoded in the one-hot style, if differentiate target=True.
color channel, R, G, and B stand for TARGET, SOURCE, and BACKGROUND classes
'''
def reconstruct( cnts, h, w, val=1 ) :
rst = np.zeros([h,w], dtype='uint8')
cv2.fillPoly( rst, cnts, val )
return rst
h, w = lut['image_height'], lut['image_width']
src_cnts = [ np.array(cnts).reshape([-1,1,2]) for cnts in lut['source_contour'] ]
src_mask = reconstruct( src_cnts, h, w, val = 1 )
tgt_cnts = [ np.array(cnts).reshape([-1,1,2]) for cnts in lut['target_contour'] ]
tgt_mask = reconstruct( tgt_cnts, h, w, val = 1 )
if ( self.differentiate_target ) :
# 3-class target
background = np.ones([h,w]).astype('uint8') - np.maximum(src_mask, tgt_mask)
cmd_mask = np.dstack( [tgt_mask, src_mask, background ] ).astype(np.float32)
else :
# 2-class target
cmd_mask = np.maximum(src_mask, tgt_mask).astype(np.float32)
return cmd_mask
def _get_transmat_from_lut( self, lut ) :
'''Decode transform matrix between SOURCE and TARGET
INPUT:
lut = dict, raw decoded lut retrieved from LMDB
OUTPUT:
trans_mat = np.ndarray, dtype='float32', size of 3x3
'''
trans_mat = lut['transform_matrix']
return np.array(trans_mat).reshape([3,3])
def _decode_lut_str( self, lut_str ) :
'''Decode a raw LMDB lut
INPUT:
lut_str = str, raw string retrieved from LMDB
OUTPUT:
image = np.ndarray, dtype='uint8', cmd image
cmd_mask = np.ndarray, dtype='float32', cmd mask
trans_mat = np.ndarray, dtype='float32', cmd transform matrix
'''
# 1. get raw lut
lut = json.loads(lut_str)
# 2. reconstruct image
image = self._get_image_from_lut(lut)
# 3. reconstruct copy-move masks
cmd_mask = self._get_mask_from_lut(lut)
# 4. get transform matrix if necessary
trans_mat = self._get_transmat_from_lut(lut)
return ( image, cmd_mask, trans_mat )
def get_one_sample( self, key = None ) :
'''Get a (random) sample from given key
INPUT:
key = str, a sample key or None, if None then use random key
OUTPUT:
sample = tuple of (image, cmd_mask, trans_mat)
'''
return self.get_samples([key])[0]
def get_samples( self, key_list ) :
'''Get samples according to a given key list
INPUT:
key_list = list, each element is a LMDB key or idx
OUTPUT:
sample_list = list, each element is a tuple of (image, cmd_mask, trans_mat)
'''
env = lmdb.open( self.lmdb_dir )
sample_list = []
with env.begin( write=False ) as txn :
for key in key_list :
if not isinstance( key, str ) and isinstance( key, int ):
idx = key % self.nb_samples
key = self.sample_keys[idx]
elif isinstance( key, str ) :
pass
else :
key = np.random.choice(self.sample_keys, 1)[0]
print("INFO: use random key", key)
lut_str = txn.get( key )
sample = self._decode_lut_str( lut_str )
sample_list.append( sample )
return sample_list
def visualize_samples( self, sample_list ) :
'''Visualize a list of samples
'''
for image, cmd_mask, trans_mat in sample_list :
pyplot.figure(figsize=(10,10))
pyplot.subplot(121)
pyplot.imshow( image )
pyplot.subplot(122)
pyplot.imshow( cmd_mask )
return
def __call__( self, key_list ) :
return self.get_samples( key_list )
def __getitem__( self, key_idx ) :
return self.get_one_sample( key=key_idx ) | 0.759047 | 0.678976 |
from datetime import datetime
from pathlib import PosixPath
from typing import List, Tuple, Any, Optional, Dict
from dateutil.tz import tzutc
from blurr.core.store_key import Key, KeyType
from blurr.runner.spark_runner import SparkRunner, get_spark_session
def execute_runner(stream_bts_file: str,
window_bts_file: Optional[str],
local_json_files: List[str],
old_state: Optional[Dict[str, Dict]] = None) -> Tuple[SparkRunner, Any]:
runner = SparkRunner(stream_bts_file, window_bts_file)
if old_state:
old_state = get_spark_session().sparkContext.parallelize(old_state.items())
return runner, runner.execute(
runner.get_record_rdd_from_json_files(local_json_files), old_state)
def get_spark_output(out_dir: PosixPath) -> List:
output_files = out_dir.listdir(lambda x: x.basename.startswith('part'))
output_text = []
for output_file in output_files:
output_text.extend(output_file.readlines(cr=False))
return output_text
def test_only_stream_bts_provided():
runner, data = execute_runner('tests/data/stream.yml', None, ['tests/data/raw.json'])
block_data = {}
window_data = {}
for id, (per_id_block_data, per_id_window_data) in data.collect():
block_data[id] = per_id_block_data
window_data[id] = per_id_window_data
assert len(block_data) == 3
# Stream BTS output
assert block_data['userA'][Key(KeyType.TIMESTAMP, 'userA', 'session', [],
datetime(2018, 3, 7, 23, 35, 31, tzinfo=tzutc()))] == {
'_identity': 'userA',
'_start_time': datetime(
2018, 3, 7, 23, 35, 31, tzinfo=tzutc()).isoformat(),
'_end_time': datetime(
2018, 3, 7, 23, 35, 32, tzinfo=tzutc()).isoformat(),
'events': 2,
'country': 'IN',
'continent': 'World'
}
assert block_data['userA'][Key(KeyType.TIMESTAMP, 'userA', 'session', [],
datetime(2018, 3, 7, 22, 35, 31))] == {
'_identity': 'userA',
'_start_time': datetime(
2018, 3, 7, 22, 35, 31, tzinfo=tzutc()).isoformat(),
'_end_time': datetime(
2018, 3, 7, 22, 35, 31, tzinfo=tzutc()).isoformat(),
'events': 1,
'country': 'US',
'continent': 'North America'
}
assert block_data['userA'][Key(KeyType.DIMENSION, 'userA', 'state')] == {
'_identity': 'userA',
'country': 'IN',
'continent': 'World'
}
assert block_data['userB'][Key(KeyType.TIMESTAMP, 'userB', 'session', [],
datetime(2018, 3, 7, 23, 35, 31, tzinfo=tzutc()))] == {
'_identity': 'userB',
'_start_time': datetime(
2018, 3, 7, 23, 35, 31, tzinfo=tzutc()).isoformat(),
'_end_time': datetime(
2018, 3, 7, 23, 35, 31, tzinfo=tzutc()).isoformat(),
'events': 1,
'country': '',
'continent': ''
}
assert window_data == {'userA': [], 'userB': [], 'userC': []}
def test_no_variable_aggreate_data_stored():
runner, data = execute_runner('tests/data/stream.yml', None, ['tests/data/raw.json'])
block_data = {}
for id, (per_id_block_data, _) in data.collect():
block_data[id] = per_id_block_data
# Variables should not be stored
assert Key(KeyType.DIMENSION, 'userA', 'vars') not in block_data['userA']
def test_stream_and_window_bts_provided():
runner, data = execute_runner('tests/data/stream.yml', 'tests/data/window.yml',
['tests/data/raw.json'])
window_data = {}
for id, (_, per_id_window_data) in data.collect():
window_data[id] = per_id_window_data
assert window_data['userA'] == [{
'last_session.events': 1,
'last_session._identity': 'userA',
'last_day.total_events': 1,
'last_day._identity': 'userA'
}]
assert window_data['userB'] == []
def test_stream_bts_with_state():
_, data_combined = execute_runner('tests/data/stream.yml', None,
['tests/data/raw.json', 'tests/data/raw2.json'], None)
_, data_separate = execute_runner('tests/data/stream.yml', None, ['tests/data/raw.json'], None)
old_state = {
identity: block_data
for identity, (block_data, window_data) in data_separate.collect()
}
_, data_separate = execute_runner('tests/data/stream.yml', None, ['tests/data/raw2.json'],
old_state)
assert {}.update(data_separate.collect()) == {}.update(data_combined.collect())
def test_stream_and_window_bts_with_state():
_, data_combined = execute_runner('tests/data/stream.yml', 'tests/data/window.yml',
['tests/data/raw.json', 'tests/data/raw2.json'], None)
_, data_separate = execute_runner('tests/data/stream.yml', 'tests/data/window.yml',
['tests/data/raw.json'], None)
old_state = {
identity: block_data
for identity, (block_data, window_data) in data_separate.collect()
}
_, data_separate = execute_runner('tests/data/stream.yml', 'tests/data/window.yml',
['tests/data/raw2.json'], old_state)
assert {}.update(data_separate.collect()) == {}.update(data_combined.collect())
def test_write_output_file_only_source_bts_provided(tmpdir):
runner, data = execute_runner('tests/data/stream.yml', None, ['tests/data/raw.json'])
out_dir = tmpdir.join('out')
runner.write_output_file(str(out_dir), data)
output_text = get_spark_output(out_dir)
assert ('["userA/session//2018-03-07T22:35:31+00:00", {'
'"_identity": "userA", '
'"_start_time": "2018-03-07T22:35:31+00:00", '
'"_end_time": "2018-03-07T22:35:31+00:00", '
'"events": 1, '
'"country": "US", '
'"continent": "North America"'
'}]') in output_text
def test_write_output_file_with_stream_and_window_bts_provided(tmpdir):
runner, data = execute_runner('tests/data/stream.yml', 'tests/data/window.yml',
['tests/data/raw.json'])
out_dir = tmpdir.join('out')
runner.write_output_file(str(out_dir), data)
output_text = get_spark_output(out_dir)
assert 'last_day._identity,last_day.total_events,last_session._identity,last_session.events' in output_text
assert 'userA,1,userA,1' in output_text | tests/runner/spark_runner_test.py | from datetime import datetime
from pathlib import PosixPath
from typing import List, Tuple, Any, Optional, Dict
from dateutil.tz import tzutc
from blurr.core.store_key import Key, KeyType
from blurr.runner.spark_runner import SparkRunner, get_spark_session
def execute_runner(stream_bts_file: str,
window_bts_file: Optional[str],
local_json_files: List[str],
old_state: Optional[Dict[str, Dict]] = None) -> Tuple[SparkRunner, Any]:
runner = SparkRunner(stream_bts_file, window_bts_file)
if old_state:
old_state = get_spark_session().sparkContext.parallelize(old_state.items())
return runner, runner.execute(
runner.get_record_rdd_from_json_files(local_json_files), old_state)
def get_spark_output(out_dir: PosixPath) -> List:
output_files = out_dir.listdir(lambda x: x.basename.startswith('part'))
output_text = []
for output_file in output_files:
output_text.extend(output_file.readlines(cr=False))
return output_text
def test_only_stream_bts_provided():
runner, data = execute_runner('tests/data/stream.yml', None, ['tests/data/raw.json'])
block_data = {}
window_data = {}
for id, (per_id_block_data, per_id_window_data) in data.collect():
block_data[id] = per_id_block_data
window_data[id] = per_id_window_data
assert len(block_data) == 3
# Stream BTS output
assert block_data['userA'][Key(KeyType.TIMESTAMP, 'userA', 'session', [],
datetime(2018, 3, 7, 23, 35, 31, tzinfo=tzutc()))] == {
'_identity': 'userA',
'_start_time': datetime(
2018, 3, 7, 23, 35, 31, tzinfo=tzutc()).isoformat(),
'_end_time': datetime(
2018, 3, 7, 23, 35, 32, tzinfo=tzutc()).isoformat(),
'events': 2,
'country': 'IN',
'continent': 'World'
}
assert block_data['userA'][Key(KeyType.TIMESTAMP, 'userA', 'session', [],
datetime(2018, 3, 7, 22, 35, 31))] == {
'_identity': 'userA',
'_start_time': datetime(
2018, 3, 7, 22, 35, 31, tzinfo=tzutc()).isoformat(),
'_end_time': datetime(
2018, 3, 7, 22, 35, 31, tzinfo=tzutc()).isoformat(),
'events': 1,
'country': 'US',
'continent': 'North America'
}
assert block_data['userA'][Key(KeyType.DIMENSION, 'userA', 'state')] == {
'_identity': 'userA',
'country': 'IN',
'continent': 'World'
}
assert block_data['userB'][Key(KeyType.TIMESTAMP, 'userB', 'session', [],
datetime(2018, 3, 7, 23, 35, 31, tzinfo=tzutc()))] == {
'_identity': 'userB',
'_start_time': datetime(
2018, 3, 7, 23, 35, 31, tzinfo=tzutc()).isoformat(),
'_end_time': datetime(
2018, 3, 7, 23, 35, 31, tzinfo=tzutc()).isoformat(),
'events': 1,
'country': '',
'continent': ''
}
assert window_data == {'userA': [], 'userB': [], 'userC': []}
def test_no_variable_aggreate_data_stored():
runner, data = execute_runner('tests/data/stream.yml', None, ['tests/data/raw.json'])
block_data = {}
for id, (per_id_block_data, _) in data.collect():
block_data[id] = per_id_block_data
# Variables should not be stored
assert Key(KeyType.DIMENSION, 'userA', 'vars') not in block_data['userA']
def test_stream_and_window_bts_provided():
runner, data = execute_runner('tests/data/stream.yml', 'tests/data/window.yml',
['tests/data/raw.json'])
window_data = {}
for id, (_, per_id_window_data) in data.collect():
window_data[id] = per_id_window_data
assert window_data['userA'] == [{
'last_session.events': 1,
'last_session._identity': 'userA',
'last_day.total_events': 1,
'last_day._identity': 'userA'
}]
assert window_data['userB'] == []
def test_stream_bts_with_state():
_, data_combined = execute_runner('tests/data/stream.yml', None,
['tests/data/raw.json', 'tests/data/raw2.json'], None)
_, data_separate = execute_runner('tests/data/stream.yml', None, ['tests/data/raw.json'], None)
old_state = {
identity: block_data
for identity, (block_data, window_data) in data_separate.collect()
}
_, data_separate = execute_runner('tests/data/stream.yml', None, ['tests/data/raw2.json'],
old_state)
assert {}.update(data_separate.collect()) == {}.update(data_combined.collect())
def test_stream_and_window_bts_with_state():
_, data_combined = execute_runner('tests/data/stream.yml', 'tests/data/window.yml',
['tests/data/raw.json', 'tests/data/raw2.json'], None)
_, data_separate = execute_runner('tests/data/stream.yml', 'tests/data/window.yml',
['tests/data/raw.json'], None)
old_state = {
identity: block_data
for identity, (block_data, window_data) in data_separate.collect()
}
_, data_separate = execute_runner('tests/data/stream.yml', 'tests/data/window.yml',
['tests/data/raw2.json'], old_state)
assert {}.update(data_separate.collect()) == {}.update(data_combined.collect())
def test_write_output_file_only_source_bts_provided(tmpdir):
runner, data = execute_runner('tests/data/stream.yml', None, ['tests/data/raw.json'])
out_dir = tmpdir.join('out')
runner.write_output_file(str(out_dir), data)
output_text = get_spark_output(out_dir)
assert ('["userA/session//2018-03-07T22:35:31+00:00", {'
'"_identity": "userA", '
'"_start_time": "2018-03-07T22:35:31+00:00", '
'"_end_time": "2018-03-07T22:35:31+00:00", '
'"events": 1, '
'"country": "US", '
'"continent": "North America"'
'}]') in output_text
def test_write_output_file_with_stream_and_window_bts_provided(tmpdir):
runner, data = execute_runner('tests/data/stream.yml', 'tests/data/window.yml',
['tests/data/raw.json'])
out_dir = tmpdir.join('out')
runner.write_output_file(str(out_dir), data)
output_text = get_spark_output(out_dir)
assert 'last_day._identity,last_day.total_events,last_session._identity,last_session.events' in output_text
assert 'userA,1,userA,1' in output_text | 0.743308 | 0.406921 |
import torch.nn.functional as F
from util.util import compute_tensor_iu
def get_new_iou_hook(values, size):
return 'iou/new_iou_%s'%size, values['iou/new_i_%s'%size]/values['iou/new_u_%s'%size]
def get_orig_iou_hook(values):
return 'iou/orig_iou', values['iou/orig_i']/values['iou/orig_u']
def get_iou_gain(values, size):
return 'iou/iou_gain_%s'%size, values['iou/new_iou_%s'%size] - values['iou/orig_iou']
iou_hooks_to_be_used = [
get_orig_iou_hook,
lambda x: get_new_iou_hook(x, '224'), lambda x: get_iou_gain(x, '224'),
lambda x: get_new_iou_hook(x, '56'), lambda x: get_iou_gain(x, '56'),
lambda x: get_new_iou_hook(x, '28'), lambda x: get_iou_gain(x, '28'),
lambda x: get_new_iou_hook(x, '28_2'), lambda x: get_iou_gain(x, '28_2'),
lambda x: get_new_iou_hook(x, '28_3'), lambda x: get_iou_gain(x, '28_3'),
lambda x: get_new_iou_hook(x, '56_2'), lambda x: get_iou_gain(x, '56_2'),
]
iou_hooks_final_only = [
get_orig_iou_hook,
lambda x: get_new_iou_hook(x, '224'), lambda x: get_iou_gain(x, '224'),
]
# Compute common loss and metric for generator only
def compute_loss_and_metrics(images, para, detailed=True, need_loss=True, has_lower_res=True):
"""
This part compute loss and metrics for the generator
"""
loss_and_metrics = {}
gt = images['gt']
seg = images['seg']
pred_224 = images['pred_224']
if has_lower_res:
pred_28 = images['pred_28']
pred_56 = images['pred_56']
pred_28_2 = images['pred_28_2']
pred_28_3 = images['pred_28_3']
pred_56_2 = images['pred_56_2']
if need_loss:
# Loss weights
ce_weights = para['ce_weight']
l1_weights = para['l1_weight']
l2_weights = para['l2_weight']
# temp holder for losses at different scale
ce_loss = [0] * 6
l1_loss = [0] * 6
l2_loss = [0] * 6
loss = [0] * 6
ce_loss[0] = F.binary_cross_entropy_with_logits(images['out_224'], (gt>0.5).float())
if has_lower_res:
ce_loss[1] = F.binary_cross_entropy_with_logits(images['out_28'], (gt>0.5).float())
ce_loss[2] = F.binary_cross_entropy_with_logits(images['out_56'], (gt>0.5).float())
ce_loss[3] = F.binary_cross_entropy_with_logits(images['out_28_2'], (gt>0.5).float())
ce_loss[4] = F.binary_cross_entropy_with_logits(images['out_28_3'], (gt>0.5).float())
ce_loss[5] = F.binary_cross_entropy_with_logits(images['out_56_2'], (gt>0.5).float())
l1_loss[0] = F.l1_loss(pred_224, gt)
if has_lower_res:
l2_loss[0] = F.mse_loss(pred_224, gt)
l1_loss[1] = F.l1_loss(pred_28, gt)
l2_loss[1] = F.mse_loss(pred_28, gt)
l1_loss[2] = F.l1_loss(pred_56, gt)
l2_loss[2] = F.mse_loss(pred_56, gt)
if has_lower_res:
l1_loss[3] = F.l1_loss(pred_28_2, gt)
l2_loss[3] = F.mse_loss(pred_28_2, gt)
l1_loss[4] = F.l1_loss(pred_28_3, gt)
l2_loss[4] = F.mse_loss(pred_28_3, gt)
l1_loss[5] = F.l1_loss(pred_56_2, gt)
l2_loss[5] = F.mse_loss(pred_56_2, gt)
loss_and_metrics['grad_loss'] = F.l1_loss(images['gt_sobel'], images['pred_sobel'])
# Weighted loss for different levels
for i in range(6):
loss[i] = ce_loss[i] * ce_weights[i] + \
l1_loss[i] * l1_weights[i] + \
l2_loss[i] * l2_weights[i]
loss[0] += loss_and_metrics['grad_loss'] * para['grad_weight']
"""
Compute IOU stats
"""
orig_total_i, orig_total_u = compute_tensor_iu(seg>0.5, gt>0.5)
loss_and_metrics['iou/orig_i'] = orig_total_i
loss_and_metrics['iou/orig_u'] = orig_total_u
new_total_i, new_total_u = compute_tensor_iu(pred_224>0.5, gt>0.5)
loss_and_metrics['iou/new_i_224'] = new_total_i
loss_and_metrics['iou/new_u_224'] = new_total_u
if has_lower_res:
new_total_i, new_total_u = compute_tensor_iu(pred_56>0.5, gt>0.5)
loss_and_metrics['iou/new_i_56'] = new_total_i
loss_and_metrics['iou/new_u_56'] = new_total_u
new_total_i, new_total_u = compute_tensor_iu(pred_28>0.5, gt>0.5)
loss_and_metrics['iou/new_i_28'] = new_total_i
loss_and_metrics['iou/new_u_28'] = new_total_u
new_total_i, new_total_u = compute_tensor_iu(pred_28_2>0.5, gt>0.5)
loss_and_metrics['iou/new_i_28_2'] = new_total_i
loss_and_metrics['iou/new_u_28_2'] = new_total_u
new_total_i, new_total_u = compute_tensor_iu(pred_28_3>0.5, gt>0.5)
loss_and_metrics['iou/new_i_28_3'] = new_total_i
loss_and_metrics['iou/new_u_28_3'] = new_total_u
new_total_i, new_total_u = compute_tensor_iu(pred_56_2>0.5, gt>0.5)
loss_and_metrics['iou/new_i_56_2'] = new_total_i
loss_and_metrics['iou/new_u_56_2'] = new_total_u
"""
All done.
Now gather everything in a dict for logging
"""
if need_loss:
loss_and_metrics['total_loss'] = 0
for i in range(6):
loss_and_metrics['ce_loss/s_%d'%i] = ce_loss[i]
loss_and_metrics['l1_loss/s_%d'%i] = l1_loss[i]
loss_and_metrics['l2_loss/s_%d'%i] = l2_loss[i]
loss_and_metrics['loss/s_%d'%i] = loss[i]
loss_and_metrics['total_loss'] += loss[i]
return loss_and_metrics | util/metrics_compute.py | import torch.nn.functional as F
from util.util import compute_tensor_iu
def get_new_iou_hook(values, size):
return 'iou/new_iou_%s'%size, values['iou/new_i_%s'%size]/values['iou/new_u_%s'%size]
def get_orig_iou_hook(values):
return 'iou/orig_iou', values['iou/orig_i']/values['iou/orig_u']
def get_iou_gain(values, size):
return 'iou/iou_gain_%s'%size, values['iou/new_iou_%s'%size] - values['iou/orig_iou']
iou_hooks_to_be_used = [
get_orig_iou_hook,
lambda x: get_new_iou_hook(x, '224'), lambda x: get_iou_gain(x, '224'),
lambda x: get_new_iou_hook(x, '56'), lambda x: get_iou_gain(x, '56'),
lambda x: get_new_iou_hook(x, '28'), lambda x: get_iou_gain(x, '28'),
lambda x: get_new_iou_hook(x, '28_2'), lambda x: get_iou_gain(x, '28_2'),
lambda x: get_new_iou_hook(x, '28_3'), lambda x: get_iou_gain(x, '28_3'),
lambda x: get_new_iou_hook(x, '56_2'), lambda x: get_iou_gain(x, '56_2'),
]
iou_hooks_final_only = [
get_orig_iou_hook,
lambda x: get_new_iou_hook(x, '224'), lambda x: get_iou_gain(x, '224'),
]
# Compute common loss and metric for generator only
def compute_loss_and_metrics(images, para, detailed=True, need_loss=True, has_lower_res=True):
"""
This part compute loss and metrics for the generator
"""
loss_and_metrics = {}
gt = images['gt']
seg = images['seg']
pred_224 = images['pred_224']
if has_lower_res:
pred_28 = images['pred_28']
pred_56 = images['pred_56']
pred_28_2 = images['pred_28_2']
pred_28_3 = images['pred_28_3']
pred_56_2 = images['pred_56_2']
if need_loss:
# Loss weights
ce_weights = para['ce_weight']
l1_weights = para['l1_weight']
l2_weights = para['l2_weight']
# temp holder for losses at different scale
ce_loss = [0] * 6
l1_loss = [0] * 6
l2_loss = [0] * 6
loss = [0] * 6
ce_loss[0] = F.binary_cross_entropy_with_logits(images['out_224'], (gt>0.5).float())
if has_lower_res:
ce_loss[1] = F.binary_cross_entropy_with_logits(images['out_28'], (gt>0.5).float())
ce_loss[2] = F.binary_cross_entropy_with_logits(images['out_56'], (gt>0.5).float())
ce_loss[3] = F.binary_cross_entropy_with_logits(images['out_28_2'], (gt>0.5).float())
ce_loss[4] = F.binary_cross_entropy_with_logits(images['out_28_3'], (gt>0.5).float())
ce_loss[5] = F.binary_cross_entropy_with_logits(images['out_56_2'], (gt>0.5).float())
l1_loss[0] = F.l1_loss(pred_224, gt)
if has_lower_res:
l2_loss[0] = F.mse_loss(pred_224, gt)
l1_loss[1] = F.l1_loss(pred_28, gt)
l2_loss[1] = F.mse_loss(pred_28, gt)
l1_loss[2] = F.l1_loss(pred_56, gt)
l2_loss[2] = F.mse_loss(pred_56, gt)
if has_lower_res:
l1_loss[3] = F.l1_loss(pred_28_2, gt)
l2_loss[3] = F.mse_loss(pred_28_2, gt)
l1_loss[4] = F.l1_loss(pred_28_3, gt)
l2_loss[4] = F.mse_loss(pred_28_3, gt)
l1_loss[5] = F.l1_loss(pred_56_2, gt)
l2_loss[5] = F.mse_loss(pred_56_2, gt)
loss_and_metrics['grad_loss'] = F.l1_loss(images['gt_sobel'], images['pred_sobel'])
# Weighted loss for different levels
for i in range(6):
loss[i] = ce_loss[i] * ce_weights[i] + \
l1_loss[i] * l1_weights[i] + \
l2_loss[i] * l2_weights[i]
loss[0] += loss_and_metrics['grad_loss'] * para['grad_weight']
"""
Compute IOU stats
"""
orig_total_i, orig_total_u = compute_tensor_iu(seg>0.5, gt>0.5)
loss_and_metrics['iou/orig_i'] = orig_total_i
loss_and_metrics['iou/orig_u'] = orig_total_u
new_total_i, new_total_u = compute_tensor_iu(pred_224>0.5, gt>0.5)
loss_and_metrics['iou/new_i_224'] = new_total_i
loss_and_metrics['iou/new_u_224'] = new_total_u
if has_lower_res:
new_total_i, new_total_u = compute_tensor_iu(pred_56>0.5, gt>0.5)
loss_and_metrics['iou/new_i_56'] = new_total_i
loss_and_metrics['iou/new_u_56'] = new_total_u
new_total_i, new_total_u = compute_tensor_iu(pred_28>0.5, gt>0.5)
loss_and_metrics['iou/new_i_28'] = new_total_i
loss_and_metrics['iou/new_u_28'] = new_total_u
new_total_i, new_total_u = compute_tensor_iu(pred_28_2>0.5, gt>0.5)
loss_and_metrics['iou/new_i_28_2'] = new_total_i
loss_and_metrics['iou/new_u_28_2'] = new_total_u
new_total_i, new_total_u = compute_tensor_iu(pred_28_3>0.5, gt>0.5)
loss_and_metrics['iou/new_i_28_3'] = new_total_i
loss_and_metrics['iou/new_u_28_3'] = new_total_u
new_total_i, new_total_u = compute_tensor_iu(pred_56_2>0.5, gt>0.5)
loss_and_metrics['iou/new_i_56_2'] = new_total_i
loss_and_metrics['iou/new_u_56_2'] = new_total_u
"""
All done.
Now gather everything in a dict for logging
"""
if need_loss:
loss_and_metrics['total_loss'] = 0
for i in range(6):
loss_and_metrics['ce_loss/s_%d'%i] = ce_loss[i]
loss_and_metrics['l1_loss/s_%d'%i] = l1_loss[i]
loss_and_metrics['l2_loss/s_%d'%i] = l2_loss[i]
loss_and_metrics['loss/s_%d'%i] = loss[i]
loss_and_metrics['total_loss'] += loss[i]
return loss_and_metrics | 0.695855 | 0.342791 |
import logging
class Tiling(object):
def __init__(self, tiles):
logging.debug("Initializing tiling - tiles: %s" % (tiles,))
self.tiles = tiles
self.squares = [sq for tile in tiles for sq in tile]
logging.debug("Initializing tiling - squares: %s" % (self.squares,))
self.min_x = min(x for x, y in self.squares)
self.min_y = min(y for x, y in self.squares)
self.max_x = max(x for x, y in self.squares)
self.max_y = max(y for x, y in self.squares)
def format_row_sides(self, row):
return " ".join("|" if r else " " for r in row)
def format_row_upper(self, row):
return "+" + "+".join("-" if r else " " for r in row) + "+"
def format_tiling_lines(self, h, v):
for i in range(0, self.max_y - self.min_y + 1):
yield self.format_row_upper(h[i])
yield self.format_row_sides(v[i])
yield self.format_row_upper(h[self.max_y - self.min_y + 1])
def make_base_h_row(self, i):
lines_above = set(x for x, y in self.squares if y == i - 1)
lines_below = set(x for x, y in self.squares if y == i)
lines = lines_above.union(lines_below)
return [(x in lines) for x in range(self.min_x, self.max_x + 1)]
def make_base_v_row(self, i):
lines_left = set(x for x, y in self.squares if y == i)
lines_right = set(x + 1 for x, y in self.squares if y == i)
lines = lines_left.union(lines_right)
return [(x in lines) for x in range(self.min_x, max(lines) + 1)]
def calculate_tiling(self):
h = [self.make_base_h_row(i) for i in range(self.min_y, self.max_y + 2)]
v = [self.make_base_v_row(i) for i in range(self.min_y, self.max_y + 1)]
for tile in self.tiles:
for sq_a in tile:
for sq_b in tile:
a, b = sorted([sq_a, sq_b])
ax, ay = a[0] - self.min_x, a[1] - self.min_y
bx, by = b[0] - self.min_x, b[1] - self.min_y
if (ay == by) and (ax + 1 == bx):
v[ay][bx] = False
if (ax == bx) and (ay + 1 == by):
h[by][ax] = False
return h, v
def row_max(self, i):
return max(sq[0] for sq in self.squares if sq[1] == i)
def faces(self, ragged):
if ragged:
faces = [
[-1 for i in range(self.min_x, self.row_max(j) + 1)]
for j in range(self.min_y, self.max_y + 1)
]
else:
faces = [
[-2 for i in range(self.min_x, self.max_x + 1)]
for j in range(self.min_y, self.max_y + 1)
]
for i, tile in enumerate(self.tiles):
for sq in tile:
faces[sq[1] - self.min_y][sq[0] - self.min_x] = i
for x, y in self.open_blanks():
faces[y - self.min_y][x - self.min_x] = -2
return faces
def open_blanks(self):
s = set()
sqs = set(self.squares)
queue = (
[(x, self.min_y) for x in range(self.min_x, self.row_max(self.min_y) + 1)]
+ [(self.min_x, y) for y in range(self.min_y + 1, self.max_y)]
+ [
(x, y)
for y in range(self.min_y + 1, self.max_y)
for x in range(
min(self.row_max(y - 1), self.row_max(y + 1)), self.row_max(y) + 1
)
]
+ [(x, self.max_y) for x in range(self.min_x, self.row_max(self.max_y) + 1)]
)
for sq in queue:
if sq in sqs:
continue
s.add(sq)
for x, y in self.neighbours(sq):
if self.min_y <= y <= self.max_y:
if self.min_x <= x <= self.row_max(y):
if (x, y) not in s:
queue.append((x, y))
return s
@staticmethod
def neighbours(sq):
x, y = sq
yield (x - 1, y)
yield (x + 1, y)
yield (x, y - 1)
yield (x, y + 1)
def nodes(self):
nodes = [
[0 for i in range(self.min_x - 1, self.max_x + 1)]
for j in range(self.min_y - 1, self.max_y + 1)
]
for i, row in enumerate(self.v):
for j, bar in enumerate(row):
if bar:
nodes[i][j] += 4
nodes[i + 1][j] += 1
for i, row in enumerate(self.h):
for j, bar in enumerate(row):
if bar:
nodes[i][j] += 2
nodes[i][j + 1] += 8
return nodes
def abstract(self, ragged=True):
self.h, self.v = self.calculate_tiling()
return self.faces(ragged), self.v, self.h, self.nodes() | pretty_poly/tiling.py | import logging
class Tiling(object):
def __init__(self, tiles):
logging.debug("Initializing tiling - tiles: %s" % (tiles,))
self.tiles = tiles
self.squares = [sq for tile in tiles for sq in tile]
logging.debug("Initializing tiling - squares: %s" % (self.squares,))
self.min_x = min(x for x, y in self.squares)
self.min_y = min(y for x, y in self.squares)
self.max_x = max(x for x, y in self.squares)
self.max_y = max(y for x, y in self.squares)
def format_row_sides(self, row):
return " ".join("|" if r else " " for r in row)
def format_row_upper(self, row):
return "+" + "+".join("-" if r else " " for r in row) + "+"
def format_tiling_lines(self, h, v):
for i in range(0, self.max_y - self.min_y + 1):
yield self.format_row_upper(h[i])
yield self.format_row_sides(v[i])
yield self.format_row_upper(h[self.max_y - self.min_y + 1])
def make_base_h_row(self, i):
lines_above = set(x for x, y in self.squares if y == i - 1)
lines_below = set(x for x, y in self.squares if y == i)
lines = lines_above.union(lines_below)
return [(x in lines) for x in range(self.min_x, self.max_x + 1)]
def make_base_v_row(self, i):
lines_left = set(x for x, y in self.squares if y == i)
lines_right = set(x + 1 for x, y in self.squares if y == i)
lines = lines_left.union(lines_right)
return [(x in lines) for x in range(self.min_x, max(lines) + 1)]
def calculate_tiling(self):
h = [self.make_base_h_row(i) for i in range(self.min_y, self.max_y + 2)]
v = [self.make_base_v_row(i) for i in range(self.min_y, self.max_y + 1)]
for tile in self.tiles:
for sq_a in tile:
for sq_b in tile:
a, b = sorted([sq_a, sq_b])
ax, ay = a[0] - self.min_x, a[1] - self.min_y
bx, by = b[0] - self.min_x, b[1] - self.min_y
if (ay == by) and (ax + 1 == bx):
v[ay][bx] = False
if (ax == bx) and (ay + 1 == by):
h[by][ax] = False
return h, v
def row_max(self, i):
return max(sq[0] for sq in self.squares if sq[1] == i)
def faces(self, ragged):
if ragged:
faces = [
[-1 for i in range(self.min_x, self.row_max(j) + 1)]
for j in range(self.min_y, self.max_y + 1)
]
else:
faces = [
[-2 for i in range(self.min_x, self.max_x + 1)]
for j in range(self.min_y, self.max_y + 1)
]
for i, tile in enumerate(self.tiles):
for sq in tile:
faces[sq[1] - self.min_y][sq[0] - self.min_x] = i
for x, y in self.open_blanks():
faces[y - self.min_y][x - self.min_x] = -2
return faces
def open_blanks(self):
s = set()
sqs = set(self.squares)
queue = (
[(x, self.min_y) for x in range(self.min_x, self.row_max(self.min_y) + 1)]
+ [(self.min_x, y) for y in range(self.min_y + 1, self.max_y)]
+ [
(x, y)
for y in range(self.min_y + 1, self.max_y)
for x in range(
min(self.row_max(y - 1), self.row_max(y + 1)), self.row_max(y) + 1
)
]
+ [(x, self.max_y) for x in range(self.min_x, self.row_max(self.max_y) + 1)]
)
for sq in queue:
if sq in sqs:
continue
s.add(sq)
for x, y in self.neighbours(sq):
if self.min_y <= y <= self.max_y:
if self.min_x <= x <= self.row_max(y):
if (x, y) not in s:
queue.append((x, y))
return s
@staticmethod
def neighbours(sq):
x, y = sq
yield (x - 1, y)
yield (x + 1, y)
yield (x, y - 1)
yield (x, y + 1)
def nodes(self):
nodes = [
[0 for i in range(self.min_x - 1, self.max_x + 1)]
for j in range(self.min_y - 1, self.max_y + 1)
]
for i, row in enumerate(self.v):
for j, bar in enumerate(row):
if bar:
nodes[i][j] += 4
nodes[i + 1][j] += 1
for i, row in enumerate(self.h):
for j, bar in enumerate(row):
if bar:
nodes[i][j] += 2
nodes[i][j + 1] += 8
return nodes
def abstract(self, ragged=True):
self.h, self.v = self.calculate_tiling()
return self.faces(ragged), self.v, self.h, self.nodes() | 0.551091 | 0.427695 |
# -----------------------------------------------------------------------------
# Module Import
# -----------------------------------------------------------------------------
import argparse
import atexit
from .serial_ifc import get_serial
# -----------------------------------------------------------------------------
# Module Variables
# -----------------------------------------------------------------------------
DESCRIPTION = """
PowerCounter 'capture' command
==============================
Capture from the serial port and save the output in a file without any further
processing.
Example:
powercounter -d /dev/ttyUSB1 capture test.dat
"""
# -----------------------------------------------------------------------------
# Functions
# -----------------------------------------------------------------------------
def capture(args):
"""Handle the capture command of powercounter.
Args:
args (obj) - The command line arguments.
Return:
Returns True on success, otherwise False.
"""
print("Saving data into file %s. Press Ctrl-C to stop." % args.output_file)
# Open serial port
serial_dev = get_serial(args)
if serial_dev is None:
return False
atexit.register(serial_dev.close)
# Open output file
try:
output_fh = open(args.output_file, 'wb')
except OSError:
print("ERROR: Can't open output file %s!" % args.output_file)
return False
atexit.register(output_fh.close)
num_bytes = 0
while True:
try:
byte_buffer = serial_dev.read(64)
output_fh.write(byte_buffer)
num_bytes += len(byte_buffer)
print("Read %d bytes...\r" % num_bytes)
except KeyboardInterrupt:
print("\n\nFinishing capture.")
break
return True
def add_capture_parser(subparsers):
"""Add the subparser for the capture command.
Args:
subparsers (obj): The subparsers object used to generate the subparsers.
"""
capture_parser = subparsers.add_parser('capture',
description=DESCRIPTION,
formatter_class=argparse.RawTextHelpFormatter)
capture_parser.add_argument("output_file",
metavar="OUTPUT_FILE",
help="The output file to store the raw data.",
action="store",
default=None)
capture_parser.set_defaults(func=capture)
# -----------------------------------------------------------------------------
# EOF
# ----------------------------------------------------------------------------- | power_counter/capture_cmd.py | # -----------------------------------------------------------------------------
# Module Import
# -----------------------------------------------------------------------------
import argparse
import atexit
from .serial_ifc import get_serial
# -----------------------------------------------------------------------------
# Module Variables
# -----------------------------------------------------------------------------
DESCRIPTION = """
PowerCounter 'capture' command
==============================
Capture from the serial port and save the output in a file without any further
processing.
Example:
powercounter -d /dev/ttyUSB1 capture test.dat
"""
# -----------------------------------------------------------------------------
# Functions
# -----------------------------------------------------------------------------
def capture(args):
"""Handle the capture command of powercounter.
Args:
args (obj) - The command line arguments.
Return:
Returns True on success, otherwise False.
"""
print("Saving data into file %s. Press Ctrl-C to stop." % args.output_file)
# Open serial port
serial_dev = get_serial(args)
if serial_dev is None:
return False
atexit.register(serial_dev.close)
# Open output file
try:
output_fh = open(args.output_file, 'wb')
except OSError:
print("ERROR: Can't open output file %s!" % args.output_file)
return False
atexit.register(output_fh.close)
num_bytes = 0
while True:
try:
byte_buffer = serial_dev.read(64)
output_fh.write(byte_buffer)
num_bytes += len(byte_buffer)
print("Read %d bytes...\r" % num_bytes)
except KeyboardInterrupt:
print("\n\nFinishing capture.")
break
return True
def add_capture_parser(subparsers):
"""Add the subparser for the capture command.
Args:
subparsers (obj): The subparsers object used to generate the subparsers.
"""
capture_parser = subparsers.add_parser('capture',
description=DESCRIPTION,
formatter_class=argparse.RawTextHelpFormatter)
capture_parser.add_argument("output_file",
metavar="OUTPUT_FILE",
help="The output file to store the raw data.",
action="store",
default=None)
capture_parser.set_defaults(func=capture)
# -----------------------------------------------------------------------------
# EOF
# ----------------------------------------------------------------------------- | 0.568416 | 0.192103 |
from __future__ import division
import numpy as np
import scipy.ndimage as ndi
from sklearn import mixture
def twoPointStencil2D(data, h=1):
"""
Compute two-Pooints stencil on each axis:
f(x+h)-f(x-h) 1Dconvolve([1, 0, -1])
f'(x) = ------------- = ----------------------
2h 2h
Handle borders using one-sided stencil
f(x)-f(x-h) f'(x) = f(x+h)-f(x)
f'(x) + ----------- -----------
h h
"""
der = np.zeros((data.shape[0], data.shape[1],2))
der[:,:,0] = ndi.convolve1d(data, [1, 0, -1], axis=0, mode= 'nearest')/(2*h)
der[:,:,1] = ndi.convolve1d(data, [1, 0, -1], axis=1, mode= 'nearest')/(2*h)
#--- Handle rows border
der[0,:,0] = (data[1,:] - data[0,:])/h
der[-1,:,0] = (data[-1,:] - data[-2,:])/h
#--- handle colums border
der[:,0,1] = (data[:,1] - data[:,0])/h
der[:,-1,1] = (data[:,-1] - data[:,-2])/h
return der
def derGMMmodel(GMMmodel, UB):
"""
Compute derivates of GMM model, respect to each corner as:
sum(W*N(x,\mu,\Sigma)*(x - \mu).T inv(\Sigma))
f'(x) = -----------------------------------------------
sum(W*N(x,\mu,\Sigma))
"""
outUB = UB
U = UB[0:2]
B = UB[2:4]
#--- Compute deriv respect to Upper corner
denU = np.exp(GMMmodel['Upper'].score(U.reshape(1,-1)))
numU = np.sum(
np.exp(
mixture.log_multivariate_normal_density(
GMMmodel['Upper'].means_,
GMMmodel['Upper'].covars_,
GMMmodel['Upper'].covariance_type)
)
* GMMmodel['Upper'].weights_
* (GMMmodel['Upper'].mean_ - U).T
* np.linalg.inv(GMMmodel['Upper'].covars_),
axis=0
)
outUB[0:2] = numU/denU
#--- Compute deriv respect to Bottom corner
denB = np.exp(GMMmodel['Bottom'].score(B.reshape(1,-1)))
numB = np.sum(
np.exp(
mixture.log_multivariate_normal_density(
GMMmodel['Bottom'].means_,
GMMmodel['Bottom'].covars_,
GMMmodel['Bottom'].covariance_type)
)
* GMMmodel['Bottom'].weights_
* (GMMmodel['Bottom'].mean_ - U).T
* np.linalg.inv(GMMmodel['Bottom'].covars_),
axis=0
)
outUB[2:4] = numB/denB
return outUB
def computeII(data):
"""
Computes Integral Image as defined on
Lewis, J.P. (1995). Fast template matching. Proc. Vision Interface
"""
return data.cumsum(axis=0).cumsum(axis=1)
def getIIsum(data, U, B):
"""
Compute summed area as:
A=U Bi=U[0],B[1]
+----------+
| |
| |
+----------+
C=B[0],U[1] D=B
\sum = I(D) - I(A) + I(Bi) + I(C)
"""
if (U == B):
return data[U]
else:
return (data[B] + data[U]) - (data[U[0], B[1]] + data[B[0], U[1]])
def computeLogProb(P1II, P0II, Qmodel, UB):
"""
Compute prob as:
#---
__ K __ |S_k| __|~S_k|
P(L) = \ \ log{P(s_d|h)} \ log{P(s_d|h)} + log{P(h)}
/__k=1 /__ d=1 /__d=1
log{P(h)} = log{P(u)P(b)} = log{P(u)} + log{P(b)}
Where \sum is computed using Inntegral Image
"""
U = UB[0:2]
B = UB[2:4]
#qProb = Qmodel['Upper'].score(U.reshape(1,-1)) + \
# Qmodel['Bottom'].score(B.reshape(1,-1))
pProb1 = getIIsum(P1II, (U[0], U[1]), (B[0], B[1]))
pProb0 = P0II[-1,-1] - getIIsum(P0II, (U[0], U[1]), (B[0], B[1]))
return pProb1 + pProb0 #+ qProb
def derP1(II, UB):
dUr = (getIIsum(II, (UB[0]+1, UB[1]), (UB[2],UB[3])) - getIIsum(II, (UB[0]-1, UB[1]), (UB[2],UB[3])))/2
dUc = (getIIsum(II, (UB[0], UB[1]+1), (UB[2],UB[3])) - getIIsum(II, (UB[0], UB[1]-1), (UB[2],UB[3])))/2
dBr = (getIIsum(II, (UB[0], UB[1]), (UB[2]+1,UB[3])) - getIIsum(II, (UB[0], UB[1]), (UB[2]-1,UB[3])))/2
dBc = (getIIsum(II, (UB[0], UB[1]), (UB[2],UB[3]+1)) - getIIsum(II, (UB[0], UB[1]), (UB[2],UB[3]-1)))/2
return np.array([dUr, dUc, dBr, dBc])
def derP0(II, UB):
all0 = 2*II[-1,-1]
dUr = (all0 - getIIsum(II, (UB[0]+1, UB[1]), (UB[2],UB[3])) + getIIsum(II, (UB[0]-1, UB[1]), (UB[2],UB[3])))/2
dUc = (all0 - getIIsum(II, (UB[0], UB[1]+1), (UB[2],UB[3])) + getIIsum(II, (UB[0], UB[1]-1), (UB[2],UB[3])))/2
dBr = (all0 - getIIsum(II, (UB[0], UB[1]), (UB[2]+1,UB[3])) + getIIsum(II, (UB[0], UB[1]), (UB[2]-1,UB[3])))/2
dBc = (all0 - getIIsum(II, (UB[0], UB[1]), (UB[2],UB[3]+1)) + getIIsum(II, (UB[0], UB[1]), (UB[2],UB[3]-1)))/2
return np.array([dUr, dUc, dBr, dBc])
def predictLayout(P1II, P0II, Qmodel, init=np.zeros(4), thr=0.001, T=100, alpha=0.1):
deltaLogProb = np.Inf
prevLogProb = 99999999999
bestUB = init
#--- Init Step
thisUB = init
bestLogProb = computeLogProb(P1II, P0II, Qmodel, thisUB)
#--- Iterate "T" times or until converge
for i in np.arange(T):
#thisUB = thisUB - (alpha * \
# (derPmodelII[thisUB[[0,2]],
# thisUB[[1,3]],:].flatten() + \
# derQmodel(Qmodel, thisUB)))
thisUB = thisUB - (
0.00001 * \
(
derP1(P1II, thisUB) + derP0(P0II, thisUB) #+ derGMMmodel(Qmodel, thisUB)
)
).astype(int)
print thisUB
logProb = computeLogProb(P1II, P0II, Qmodel, thisUB)
print "Iteration: {0:}, LogProb= {1:}".format(i, logProb)
#deltaLogProb = np.abs(logProb - prevLogProb)
prevLogProb = logProb
if (logProb > bestLogProb):
bestLogProb = logProb
bestUB = thisUB
if(deltaLogProb <= thr):
#--- Alg is converged, the get out of here!!!
print "hola"
break
return bestUB
def _testModule():
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import cm
try:
import cPickle as pickle
except:
import pickle as pickle
EPS = np.finfo(float).eps
fh = open("/home/lorenzoqd/TFM/ILA/models/CRFs/_z0.3_w32_g3/GMM_22_z0.3_w32_g3_u2_b3_model.pickle",'r')
Qmodel = pickle.load(fh)
fh.close()
P = np.loadtxt('/home/lorenzoqd/TFM/ILA/models/CRFs/_z0.3_w32_g3/test_pos/bla.txt')
P1 = P[:,1].copy()
P0 = P[:,1].copy()
P1[P[:,0]==0] = 1 - P1[P[:,0]==0]
P0[P[:,0]==1] = 1 - P1[P[:,0]==1]
P1 = np.log(P1 + EPS).reshape(365,230)
P0 = np.log(P0 + EPS).reshape(365,230)
#Pmodel = np.log(P1)
#Pmodel0 = Pmodel.copy()
#Pmodel1 = Pmodel.copy()
#Pmodel1[P[:,0]==0] = 0
#Pmodel1 = Pmodel1.reshape(365,230)
#Pmodel0[P[:,0]==1] = 0
#Pmodel0 = Pmodel0.reshape(365,230)
T = 100
thr = 0.1 #--- keep hight for test only
alpha = 0.1
#--- Test computeII -> OK
P1II = computeII(P1)
P0II = computeII(P0)
fig, ax = plt.subplots(nrows=1, ncols=2)
ax[0].axis('off')
ax[0].imshow(P1, cmap=cm.coolwarm)
ax[1].axis('off')
ax[1].imshow(P0, cmap=cm.coolwarm)
fig.savefig('testP.png', bbox_inches='tight')
plt.close(fig)
fig1, ax1 = plt.subplots(nrows=1, ncols=2)
ax1[0].axis('off')
ax1[0].imshow(P1II, cmap=cm.coolwarm)
ax1[1].axis('off')
ax1[1].imshow(P0II, cmap=cm.coolwarm)
fig1.savefig('testII.png', bbox_inches='tight')
plt.close(fig1)
uc = 0
br = 364
bc = 229
all0 = getIIsum(P0II, (0,0), (364,229))
der = np.zeros((365,230))
for r in np.arange(5,360,1):
for c in np.arange(5,225,1):
der[r,c] = ((getIIsum(P1II, (r+1, c+1),(br, bc)) - getIIsum(P1II, (r-1, c-1),(br, bc)))/2) + \
(((all0 - getIIsum(P0II, (r+1,c-1),(br, bc)))-(all0 - getIIsum(P0II, (r-1,c+1), (br,bc))))/2)
fig2, ax2 = plt.subplots(nrows=1, ncols=1)
ax2.axis('off')
im = ax2.imshow(der, cmap=cm.coolwarm)
fig2.colorbar(im)
fig2.savefig('testIIder.png', bbox_inches='tight')
print computeLogProb(P1II, P0II, Qmodel, np.array([100,80,200,180]))
OUT = predictLayout(P1II, P0II, Qmodel, init=np.array([100,80,200,180]), thr=thr, T=T, alpha=alpha)
#OUT = predictLayout(init=np.array([100, 80, 200, 180]),
# P1II=P1II, P0II=P0II,
# Qmodel=Qmodel,
# thr=thr, T=T, alpha=alpha)
print OUT
print "test"
if __name__ == '__main__':
_testModule() | ILA/code/predictLayout.py | from __future__ import division
import numpy as np
import scipy.ndimage as ndi
from sklearn import mixture
def twoPointStencil2D(data, h=1):
"""
Compute two-Pooints stencil on each axis:
f(x+h)-f(x-h) 1Dconvolve([1, 0, -1])
f'(x) = ------------- = ----------------------
2h 2h
Handle borders using one-sided stencil
f(x)-f(x-h) f'(x) = f(x+h)-f(x)
f'(x) + ----------- -----------
h h
"""
der = np.zeros((data.shape[0], data.shape[1],2))
der[:,:,0] = ndi.convolve1d(data, [1, 0, -1], axis=0, mode= 'nearest')/(2*h)
der[:,:,1] = ndi.convolve1d(data, [1, 0, -1], axis=1, mode= 'nearest')/(2*h)
#--- Handle rows border
der[0,:,0] = (data[1,:] - data[0,:])/h
der[-1,:,0] = (data[-1,:] - data[-2,:])/h
#--- handle colums border
der[:,0,1] = (data[:,1] - data[:,0])/h
der[:,-1,1] = (data[:,-1] - data[:,-2])/h
return der
def derGMMmodel(GMMmodel, UB):
"""
Compute derivates of GMM model, respect to each corner as:
sum(W*N(x,\mu,\Sigma)*(x - \mu).T inv(\Sigma))
f'(x) = -----------------------------------------------
sum(W*N(x,\mu,\Sigma))
"""
outUB = UB
U = UB[0:2]
B = UB[2:4]
#--- Compute deriv respect to Upper corner
denU = np.exp(GMMmodel['Upper'].score(U.reshape(1,-1)))
numU = np.sum(
np.exp(
mixture.log_multivariate_normal_density(
GMMmodel['Upper'].means_,
GMMmodel['Upper'].covars_,
GMMmodel['Upper'].covariance_type)
)
* GMMmodel['Upper'].weights_
* (GMMmodel['Upper'].mean_ - U).T
* np.linalg.inv(GMMmodel['Upper'].covars_),
axis=0
)
outUB[0:2] = numU/denU
#--- Compute deriv respect to Bottom corner
denB = np.exp(GMMmodel['Bottom'].score(B.reshape(1,-1)))
numB = np.sum(
np.exp(
mixture.log_multivariate_normal_density(
GMMmodel['Bottom'].means_,
GMMmodel['Bottom'].covars_,
GMMmodel['Bottom'].covariance_type)
)
* GMMmodel['Bottom'].weights_
* (GMMmodel['Bottom'].mean_ - U).T
* np.linalg.inv(GMMmodel['Bottom'].covars_),
axis=0
)
outUB[2:4] = numB/denB
return outUB
def computeII(data):
"""
Computes Integral Image as defined on
Lewis, J.P. (1995). Fast template matching. Proc. Vision Interface
"""
return data.cumsum(axis=0).cumsum(axis=1)
def getIIsum(data, U, B):
"""
Compute summed area as:
A=U Bi=U[0],B[1]
+----------+
| |
| |
+----------+
C=B[0],U[1] D=B
\sum = I(D) - I(A) + I(Bi) + I(C)
"""
if (U == B):
return data[U]
else:
return (data[B] + data[U]) - (data[U[0], B[1]] + data[B[0], U[1]])
def computeLogProb(P1II, P0II, Qmodel, UB):
"""
Compute prob as:
#---
__ K __ |S_k| __|~S_k|
P(L) = \ \ log{P(s_d|h)} \ log{P(s_d|h)} + log{P(h)}
/__k=1 /__ d=1 /__d=1
log{P(h)} = log{P(u)P(b)} = log{P(u)} + log{P(b)}
Where \sum is computed using Inntegral Image
"""
U = UB[0:2]
B = UB[2:4]
#qProb = Qmodel['Upper'].score(U.reshape(1,-1)) + \
# Qmodel['Bottom'].score(B.reshape(1,-1))
pProb1 = getIIsum(P1II, (U[0], U[1]), (B[0], B[1]))
pProb0 = P0II[-1,-1] - getIIsum(P0II, (U[0], U[1]), (B[0], B[1]))
return pProb1 + pProb0 #+ qProb
def derP1(II, UB):
dUr = (getIIsum(II, (UB[0]+1, UB[1]), (UB[2],UB[3])) - getIIsum(II, (UB[0]-1, UB[1]), (UB[2],UB[3])))/2
dUc = (getIIsum(II, (UB[0], UB[1]+1), (UB[2],UB[3])) - getIIsum(II, (UB[0], UB[1]-1), (UB[2],UB[3])))/2
dBr = (getIIsum(II, (UB[0], UB[1]), (UB[2]+1,UB[3])) - getIIsum(II, (UB[0], UB[1]), (UB[2]-1,UB[3])))/2
dBc = (getIIsum(II, (UB[0], UB[1]), (UB[2],UB[3]+1)) - getIIsum(II, (UB[0], UB[1]), (UB[2],UB[3]-1)))/2
return np.array([dUr, dUc, dBr, dBc])
def derP0(II, UB):
all0 = 2*II[-1,-1]
dUr = (all0 - getIIsum(II, (UB[0]+1, UB[1]), (UB[2],UB[3])) + getIIsum(II, (UB[0]-1, UB[1]), (UB[2],UB[3])))/2
dUc = (all0 - getIIsum(II, (UB[0], UB[1]+1), (UB[2],UB[3])) + getIIsum(II, (UB[0], UB[1]-1), (UB[2],UB[3])))/2
dBr = (all0 - getIIsum(II, (UB[0], UB[1]), (UB[2]+1,UB[3])) + getIIsum(II, (UB[0], UB[1]), (UB[2]-1,UB[3])))/2
dBc = (all0 - getIIsum(II, (UB[0], UB[1]), (UB[2],UB[3]+1)) + getIIsum(II, (UB[0], UB[1]), (UB[2],UB[3]-1)))/2
return np.array([dUr, dUc, dBr, dBc])
def predictLayout(P1II, P0II, Qmodel, init=np.zeros(4), thr=0.001, T=100, alpha=0.1):
deltaLogProb = np.Inf
prevLogProb = 99999999999
bestUB = init
#--- Init Step
thisUB = init
bestLogProb = computeLogProb(P1II, P0II, Qmodel, thisUB)
#--- Iterate "T" times or until converge
for i in np.arange(T):
#thisUB = thisUB - (alpha * \
# (derPmodelII[thisUB[[0,2]],
# thisUB[[1,3]],:].flatten() + \
# derQmodel(Qmodel, thisUB)))
thisUB = thisUB - (
0.00001 * \
(
derP1(P1II, thisUB) + derP0(P0II, thisUB) #+ derGMMmodel(Qmodel, thisUB)
)
).astype(int)
print thisUB
logProb = computeLogProb(P1II, P0II, Qmodel, thisUB)
print "Iteration: {0:}, LogProb= {1:}".format(i, logProb)
#deltaLogProb = np.abs(logProb - prevLogProb)
prevLogProb = logProb
if (logProb > bestLogProb):
bestLogProb = logProb
bestUB = thisUB
if(deltaLogProb <= thr):
#--- Alg is converged, the get out of here!!!
print "hola"
break
return bestUB
def _testModule():
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import cm
try:
import cPickle as pickle
except:
import pickle as pickle
EPS = np.finfo(float).eps
fh = open("/home/lorenzoqd/TFM/ILA/models/CRFs/_z0.3_w32_g3/GMM_22_z0.3_w32_g3_u2_b3_model.pickle",'r')
Qmodel = pickle.load(fh)
fh.close()
P = np.loadtxt('/home/lorenzoqd/TFM/ILA/models/CRFs/_z0.3_w32_g3/test_pos/bla.txt')
P1 = P[:,1].copy()
P0 = P[:,1].copy()
P1[P[:,0]==0] = 1 - P1[P[:,0]==0]
P0[P[:,0]==1] = 1 - P1[P[:,0]==1]
P1 = np.log(P1 + EPS).reshape(365,230)
P0 = np.log(P0 + EPS).reshape(365,230)
#Pmodel = np.log(P1)
#Pmodel0 = Pmodel.copy()
#Pmodel1 = Pmodel.copy()
#Pmodel1[P[:,0]==0] = 0
#Pmodel1 = Pmodel1.reshape(365,230)
#Pmodel0[P[:,0]==1] = 0
#Pmodel0 = Pmodel0.reshape(365,230)
T = 100
thr = 0.1 #--- keep hight for test only
alpha = 0.1
#--- Test computeII -> OK
P1II = computeII(P1)
P0II = computeII(P0)
fig, ax = plt.subplots(nrows=1, ncols=2)
ax[0].axis('off')
ax[0].imshow(P1, cmap=cm.coolwarm)
ax[1].axis('off')
ax[1].imshow(P0, cmap=cm.coolwarm)
fig.savefig('testP.png', bbox_inches='tight')
plt.close(fig)
fig1, ax1 = plt.subplots(nrows=1, ncols=2)
ax1[0].axis('off')
ax1[0].imshow(P1II, cmap=cm.coolwarm)
ax1[1].axis('off')
ax1[1].imshow(P0II, cmap=cm.coolwarm)
fig1.savefig('testII.png', bbox_inches='tight')
plt.close(fig1)
uc = 0
br = 364
bc = 229
all0 = getIIsum(P0II, (0,0), (364,229))
der = np.zeros((365,230))
for r in np.arange(5,360,1):
for c in np.arange(5,225,1):
der[r,c] = ((getIIsum(P1II, (r+1, c+1),(br, bc)) - getIIsum(P1II, (r-1, c-1),(br, bc)))/2) + \
(((all0 - getIIsum(P0II, (r+1,c-1),(br, bc)))-(all0 - getIIsum(P0II, (r-1,c+1), (br,bc))))/2)
fig2, ax2 = plt.subplots(nrows=1, ncols=1)
ax2.axis('off')
im = ax2.imshow(der, cmap=cm.coolwarm)
fig2.colorbar(im)
fig2.savefig('testIIder.png', bbox_inches='tight')
print computeLogProb(P1II, P0II, Qmodel, np.array([100,80,200,180]))
OUT = predictLayout(P1II, P0II, Qmodel, init=np.array([100,80,200,180]), thr=thr, T=T, alpha=alpha)
#OUT = predictLayout(init=np.array([100, 80, 200, 180]),
# P1II=P1II, P0II=P0II,
# Qmodel=Qmodel,
# thr=thr, T=T, alpha=alpha)
print OUT
print "test"
if __name__ == '__main__':
_testModule() | 0.696062 | 0.628151 |
import mock
from nova import context as nova_context
from nova import exception
from nova import objects
from nova.scheduler import weights
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import integrated_helpers
from nova.tests.unit import fake_notifier
from nova.tests.unit.image import fake as fake_image
from nova import utils
class HostNameWeigher(weights.BaseHostWeigher):
# TestMultiCellMigrate creates host1 in cell1 and host2 in cell2.
# Something about migrating from host1 to host2 teases out failures
# which probably has to do with cell1 being the default cell DB in
# our base test class setup, so prefer host1 to make the tests
# deterministic.
_weights = {'host1': 100, 'host2': 50}
def _weigh_object(self, host_state, weight_properties):
# Any undefined host gets no weight.
return self._weights.get(host_state.host, 0)
class TestMultiCellMigrate(integrated_helpers.ProviderUsageBaseTestCase):
"""Tests for cross-cell cold migration (resize)"""
NUMBER_OF_CELLS = 2
compute_driver = 'fake.MediumFakeDriver'
def setUp(self):
# Use our custom weigher defined above to make sure that we have
# a predictable scheduling sort order during server create.
self.flags(weight_classes=[__name__ + '.HostNameWeigher'],
group='filter_scheduler')
super(TestMultiCellMigrate, self).setUp()
self.cinder = self.useFixture(nova_fixtures.CinderFixture(self))
self._enable_cross_cell_resize()
self.created_images = [] # list of image IDs created during resize
# Adjust the polling interval and timeout for long RPC calls.
self.flags(rpc_response_timeout=1)
self.flags(long_rpc_timeout=60)
# Set up 2 compute services in different cells
self.host_to_cell_mappings = {
'host1': 'cell1', 'host2': 'cell2'}
for host in sorted(self.host_to_cell_mappings):
cell_name = self.host_to_cell_mappings[host]
# Start the compute service on the given host in the given cell.
self._start_compute(host, cell_name=cell_name)
# Create an aggregate where the AZ name is the cell name.
agg_id = self._create_aggregate(
cell_name, availability_zone=cell_name)
# Add the host to the aggregate.
body = {'add_host': {'host': host}}
self.admin_api.post_aggregate_action(agg_id, body)
def _enable_cross_cell_resize(self):
# Enable cross-cell resize policy since it defaults to not allow
# anyone to perform that type of operation. For these tests we'll
# just allow admins to perform cross-cell resize.
# TODO(mriedem): Uncomment this when the policy rule is added and
# used in the compute API _allow_cross_cell_resize method. For now
# we just stub that method to return True.
# self.policy_fixture.set_rules({
# servers_policies.CROSS_CELL_RESIZE:
# base_policies.RULE_ADMIN_API},
# overwrite=False)
self.stub_out('nova.compute.api.API._allow_cross_cell_resize',
lambda *a, **kw: True)
def assertFlavorMatchesAllocation(self, flavor, allocation,
volume_backed=False):
self.assertEqual(flavor['vcpus'], allocation['VCPU'])
self.assertEqual(flavor['ram'], allocation['MEMORY_MB'])
# Volume-backed instances won't have DISK_GB allocations.
if volume_backed:
self.assertNotIn('DISK_GB', allocation)
else:
self.assertEqual(flavor['disk'], allocation['DISK_GB'])
def assert_instance_fields_match_flavor(self, instance, flavor):
self.assertEqual(instance.memory_mb, flavor['ram'])
self.assertEqual(instance.vcpus, flavor['vcpus'])
self.assertEqual(instance.root_gb, flavor['disk'])
self.assertEqual(
instance.ephemeral_gb, flavor['OS-FLV-EXT-DATA:ephemeral'])
def _count_volume_attachments(self, server_id):
attachment_ids = self.cinder.attachment_ids_for_instance(server_id)
return len(attachment_ids)
def assert_quota_usage(self, expected_num_instances):
limits = self.api.get_limits()['absolute']
self.assertEqual(expected_num_instances, limits['totalInstancesUsed'])
def _create_server(self, flavor, volume_backed=False):
"""Creates a server and waits for it to be ACTIVE
:param flavor: dict form of the flavor to use
:param volume_backed: True if the server should be volume-backed
:returns: server dict response from the GET /servers/{server_id} API
"""
# Provide a VIF tag for the pre-existing port. Since VIF tags are
# stored in the virtual_interfaces table in the cell DB, we want to
# make sure those survive the resize to another cell.
networks = [{
'port': self.neutron.port_1['id'],
'tag': 'private'
}]
image_uuid = fake_image.get_valid_image_id()
server = self._build_minimal_create_server_request(
self.api, 'test_cross_cell_resize',
image_uuid=image_uuid,
flavor_id=flavor['id'],
networks=networks)
# Put a tag on the server to make sure that survives the resize.
server['tags'] = ['test']
if volume_backed:
bdms = [{
'boot_index': 0,
'uuid': nova_fixtures.CinderFixture.IMAGE_BACKED_VOL,
'source_type': 'volume',
'destination_type': 'volume',
'tag': 'root'
}]
server['block_device_mapping_v2'] = bdms
# We don't need the imageRef for volume-backed servers.
server.pop('imageRef', None)
server = self.api.post_server({'server': server})
server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
# For volume-backed make sure there is one attachment to start.
if volume_backed:
self.assertEqual(1, self._count_volume_attachments(server['id']),
self.cinder.volume_to_attachment)
return server
def stub_image_create(self):
"""Stubs the _FakeImageService.create method to track created images"""
original_create = self.image_service.create
def image_create_snooper(*args, **kwargs):
image = original_create(*args, **kwargs)
self.created_images.append(image['id'])
return image
_p = mock.patch.object(
self.image_service, 'create', side_effect=image_create_snooper)
_p.start()
self.addCleanup(_p.stop)
def _resize_and_validate(self, volume_backed=False, stopped=False,
target_host=None):
"""Creates and resizes the server to another cell. Validates various
aspects of the server and its related records (allocations, migrations,
actions, VIF tags, etc).
:param volume_backed: True if the server should be volume-backed, False
if image-backed.
:param stopped: True if the server should be stopped prior to resize,
False if the server should be ACTIVE
:param target_host: If not None, triggers a cold migration to the
specified host.
:returns: tuple of:
- server response object
- source compute node resource provider uuid
- target compute node resource provider uuid
- old flavor
- new flavor
"""
# Create the server.
flavors = self.api.get_flavors()
old_flavor = flavors[0]
server = self._create_server(old_flavor, volume_backed=volume_backed)
original_host = server['OS-EXT-SRV-ATTR:host']
image_uuid = None if volume_backed else server['image']['id']
# Our HostNameWeigher ensures the server starts in cell1, so we expect
# the server AZ to be cell1 as well.
self.assertEqual('cell1', server['OS-EXT-AZ:availability_zone'])
if stopped:
# Stop the server before resizing it.
self.api.post_server_action(server['id'], {'os-stop': None})
self._wait_for_state_change(self.api, server, 'SHUTOFF')
# Before resizing make sure quota usage is only 1 for total instances.
self.assert_quota_usage(expected_num_instances=1)
if target_host:
# Cold migrate the server to the target host.
new_flavor = old_flavor # flavor does not change for cold migrate
body = {'migrate': {'host': target_host}}
expected_host = target_host
else:
# Resize it which should migrate the server to the host in the
# other cell.
new_flavor = flavors[1]
body = {'resize': {'flavorRef': new_flavor['id']}}
expected_host = 'host1' if original_host == 'host2' else 'host2'
self.stub_image_create()
self.api.post_server_action(server['id'], body)
# Wait for the server to be resized and then verify the host has
# changed to be the host in the other cell.
server = self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE')
self.assertEqual(expected_host, server['OS-EXT-SRV-ATTR:host'])
# Assert that the instance is only listed one time from the API (to
# make sure it's not listed out of both cells).
# Note that we only get one because the DB API excludes hidden
# instances by default (see instance_get_all_by_filters_sort).
servers = self.api.get_servers()
self.assertEqual(1, len(servers),
'Unexpected number of servers: %s' % servers)
self.assertEqual(expected_host, servers[0]['OS-EXT-SRV-ATTR:host'])
# And that there is only one migration record.
migrations = self.api.api_get(
'/os-migrations?instance_uuid=%s' % server['id']
).body['migrations']
self.assertEqual(1, len(migrations),
'Unexpected number of migrations records: %s' %
migrations)
migration = migrations[0]
self.assertEqual('finished', migration['status'])
# There should be at least two actions, one for create and one for the
# resize. There will be a third action if the server was stopped.
actions = self.api.api_get(
'/servers/%s/os-instance-actions' % server['id']
).body['instanceActions']
expected_num_of_actions = 3 if stopped else 2
self.assertEqual(expected_num_of_actions, len(actions), actions)
# Each action should have events (make sure these were copied from
# the source cell to the target cell).
for action in actions:
detail = self.api.api_get(
'/servers/%s/os-instance-actions/%s' % (
server['id'], action['request_id'])).body['instanceAction']
self.assertNotEqual(0, len(detail['events']), detail)
# The tag should still be present on the server.
self.assertEqual(1, len(server['tags']),
'Server tags not found in target cell.')
self.assertEqual('test', server['tags'][0])
# Confirm the source node has allocations for the old flavor and the
# target node has allocations for the new flavor.
source_rp_uuid = self._get_provider_uuid_by_host(original_host)
# The source node allocations should be on the migration record.
source_allocations = self._get_allocations_by_provider_uuid(
source_rp_uuid)[migration['uuid']]['resources']
self.assertFlavorMatchesAllocation(
old_flavor, source_allocations, volume_backed=volume_backed)
target_rp_uuid = self._get_provider_uuid_by_host(expected_host)
# The target node allocations should be on the instance record.
target_allocations = self._get_allocations_by_provider_uuid(
target_rp_uuid)[server['id']]['resources']
self.assertFlavorMatchesAllocation(
new_flavor, target_allocations, volume_backed=volume_backed)
# The instance, in the target cell DB, should have the old and new
# flavor stored with it with the values we expect at this point.
target_cell_name = self.host_to_cell_mappings[expected_host]
self.assertEqual(
target_cell_name, server['OS-EXT-AZ:availability_zone'])
target_cell = self.cell_mappings[target_cell_name]
admin_context = nova_context.get_admin_context()
with nova_context.target_cell(admin_context, target_cell) as cctxt:
inst = objects.Instance.get_by_uuid(
cctxt, server['id'], expected_attrs=['flavor'])
self.assertIsNotNone(
inst.old_flavor,
'instance.old_flavor not saved in target cell')
self.assertIsNotNone(
inst.new_flavor,
'instance.new_flavor not saved in target cell')
self.assertEqual(inst.flavor.flavorid, inst.new_flavor.flavorid)
if target_host: # cold migrate so flavor does not change
self.assertEqual(
inst.flavor.flavorid, inst.old_flavor.flavorid)
else:
self.assertNotEqual(
inst.flavor.flavorid, inst.old_flavor.flavorid)
self.assertEqual(old_flavor['id'], inst.old_flavor.flavorid)
self.assertEqual(new_flavor['id'], inst.new_flavor.flavorid)
# Assert the ComputeManager._set_instance_info fields
# are correct after the resize.
self.assert_instance_fields_match_flavor(inst, new_flavor)
# The availability_zone field in the DB should also be updated.
self.assertEqual(target_cell_name, inst.availability_zone)
# Assert the VIF tag was carried through to the target cell DB.
interface_attachments = self.api.get_port_interfaces(server['id'])
self.assertEqual(1, len(interface_attachments))
self.assertEqual('private', interface_attachments[0]['tag'])
if volume_backed:
# Assert the BDM tag was carried through to the target cell DB.
volume_attachments = self.api.get_server_volumes(server['id'])
self.assertEqual(1, len(volume_attachments))
self.assertEqual('root', volume_attachments[0]['tag'])
# Make sure the guest is no longer tracked on the source node.
source_guest_uuids = (
self.computes[original_host].manager.driver.list_instance_uuids())
self.assertNotIn(server['id'], source_guest_uuids)
# And the guest is on the target node hypervisor.
target_guest_uuids = (
self.computes[expected_host].manager.driver.list_instance_uuids())
self.assertIn(server['id'], target_guest_uuids)
# The source hypervisor continues to report usage in the hypervisors
# API because even though the guest was destroyed there, the instance
# resources are still claimed on that node in case the user reverts.
self.assert_hypervisor_usage(source_rp_uuid, old_flavor, volume_backed)
# The new flavor should show up with resource usage on the target host.
self.assert_hypervisor_usage(target_rp_uuid, new_flavor, volume_backed)
# While we have a copy of the instance in each cell database make sure
# that quota usage is only reporting 1 (because one is hidden).
self.assert_quota_usage(expected_num_instances=1)
# For a volume-backed server, at this point there should be two volume
# attachments for the instance: one tracked in the source cell and
# one in the target cell.
if volume_backed:
self.assertEqual(2, self._count_volume_attachments(server['id']),
self.cinder.volume_to_attachment)
# Assert the expected power state.
expected_power_state = 4 if stopped else 1
self.assertEqual(
expected_power_state, server['OS-EXT-STS:power_state'],
"Unexpected power state after resize.")
# For an image-backed server, a snapshot image should have been created
# and then deleted during the resize.
if volume_backed:
self.assertEqual('', server['image'])
self.assertEqual(
0, len(self.created_images),
"Unexpected image create during volume-backed resize")
else:
# The original image for the server shown in the API should not
# have changed even if a snapshot was used to create the guest
# on the dest host.
self.assertEqual(image_uuid, server['image']['id'])
self.assertEqual(
1, len(self.created_images),
"Unexpected number of images created for image-backed resize")
# Make sure the temporary snapshot image was deleted; we use the
# compute images proxy API here which is deprecated so we force the
# microversion to 2.1.
with utils.temporary_mutation(self.api, microversion='2.1'):
self.api.api_get('/images/%s' % self.created_images[0],
check_response_status=[404])
return server, source_rp_uuid, target_rp_uuid, old_flavor, new_flavor
def test_resize_confirm_image_backed(self):
"""Creates an image-backed server in one cell and resizes it to the
host in the other cell. The resize is confirmed.
"""
self._resize_and_validate()
# TODO(mriedem): Confirm the resize and make assertions.
def test_resize_revert_volume_backed(self):
"""Tests a volume-backed resize to another cell where the resize
is reverted back to the original source cell.
"""
self._resize_and_validate(volume_backed=True)
# TODO(mriedem): Revert the resize and make assertions.
def test_delete_while_in_verify_resize_status(self):
"""Tests that when deleting a server in VERIFY_RESIZE status, the
data is cleaned from both the source and target cell.
"""
server = self._resize_and_validate()[0]
self.api.delete_server(server['id'])
self._wait_until_deleted(server)
# Now list servers to make sure it doesn't show up from the source cell
servers = self.api.get_servers()
self.assertEqual(0, len(servers), servers)
# FIXME(mriedem): Need to cleanup from source cell in API method
# _confirm_resize_on_deleting(). The above check passes because the
# instance is still hidden in the source cell so the API filters it
# out.
target_host = server['OS-EXT-SRV-ATTR:host']
source_host = 'host1' if target_host == 'host2' else 'host2'
source_cell = self.cell_mappings[
self.host_to_cell_mappings[source_host]]
ctxt = nova_context.get_admin_context()
with nova_context.target_cell(ctxt, source_cell) as cctxt:
# Once the API is fixed this should raise InstanceNotFound.
instance = objects.Instance.get_by_uuid(cctxt, server['id'])
self.assertTrue(instance.hidden)
def test_cold_migrate_target_host_in_other_cell(self):
"""Tests cold migrating to a target host in another cell. This is
mostly just to ensure the API does not restrict the target host to
the source cell when cross-cell resize is allowed by policy.
"""
# _resize_and_validate creates the server on host1 which is in cell1.
# To make things interesting, start a third host but in cell1 so we can
# be sure the requested host from cell2 is honored.
self._start_compute(
'host3', cell_name=self.host_to_cell_mappings['host1'])
self._resize_and_validate(target_host='host2')
# TODO(mriedem): Test cross-cell list where the source cell has two
# hosts so the CrossCellWeigher picks the other host in the source cell
# and we do a traditional resize. Add a variant on this where the flavor
# being resized to is only available, via aggregate, on the host in the
# other cell so the CrossCellWeigher is overruled by the filters.
# TODO(mriedem): Test a bunch of rollback scenarios.
# TODO(mriedem): Test re-scheduling when the first host fails the
# resize_claim and a subsequent alternative host works, and also the
# case that all hosts fail the resize_claim.
# TODO(mriedem): Test cross-cell anti-affinity group assumptions from
# scheduler utils setup_instance_group where it assumes moves are within
# the same cell, so:
# 0. create 2 hosts in cell1 and 1 host in cell2
# 1. create two servers in an anti-affinity group in cell1
# 2. migrate one server to cell2
# 3. migrate the other server to cell2 - this should fail during scheduling
# because there is already a server from the anti-affinity group on the
# host in cell2 but setup_instance_group code may not catch it.
# TODO(mriedem): Perform a resize with at-capacity computes, meaning that
# when we revert we can only fit the instance with the old flavor back
# onto the source host in the source cell.
def test_resize_confirm_from_stopped(self):
"""Tests resizing and confirming a server that was initially stopped
so it should remain stopped through the resize.
"""
self._resize_and_validate(volume_backed=True, stopped=True)
# TODO(mriedem): Confirm the resize and assert the guest remains off
def test_finish_snapshot_based_resize_at_dest_spawn_fails(self):
"""Negative test where the driver spawn fails on the dest host during
finish_snapshot_based_resize_at_dest which triggers a rollback of the
instance data in the target cell. Furthermore, the test will hard
reboot the server in the source cell to recover it from ERROR status.
"""
# Create a volume-backed server. This is more interesting for rollback
# testing to make sure the volume attachments in the target cell were
# cleaned up on failure.
flavors = self.api.get_flavors()
server = self._create_server(flavors[0], volume_backed=True)
# Now mock out the spawn method on the destination host to fail
# during _finish_snapshot_based_resize_at_dest_spawn and then resize
# the server.
error = exception.HypervisorUnavailable(host='host2')
with mock.patch.object(self.computes['host2'].driver, 'spawn',
side_effect=error):
flavor2 = flavors[1]['id']
body = {'resize': {'flavorRef': flavor2}}
self.api.post_server_action(server['id'], body)
# The server should go to ERROR state with a fault record and
# the API should still be showing the server from the source cell
# because the instance mapping was not updated.
server = self._wait_for_server_parameter(
self.admin_api, server,
{'status': 'ERROR', 'OS-EXT-STS:task_state': None})
# The migration should be in 'error' status.
self._wait_for_migration_status(server, ['error'])
# Assert a fault was recorded.
self.assertIn('fault', server)
self.assertIn('Connection to the hypervisor is broken',
server['fault']['message'])
# The instance in the target cell DB should have been hard-deleted.
self._assert_instance_not_in_cell('cell2', server['id'])
# Assert that there is only one volume attachment for the server, i.e.
# the one in the target cell was deleted.
self.assertEqual(1, self._count_volume_attachments(server['id']),
self.cinder.volume_to_attachment)
# Assert that migration-based allocations were properly reverted.
self._assert_allocation_revert_on_fail(server)
# Now hard reboot the server in the source cell and it should go back
# to ACTIVE.
self.api.post_server_action(server['id'], {'reboot': {'type': 'HARD'}})
self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
# Now retry the resize without the fault in the target host to make
# sure things are OK (no duplicate entry errors in the target DB).
self.api.post_server_action(server['id'], body)
self._wait_for_state_change(self.admin_api, server, 'VERIFY_RESIZE')
def _assert_instance_not_in_cell(self, cell_name, server_id):
cell = self.cell_mappings[cell_name]
ctxt = nova_context.get_admin_context(read_deleted='yes')
with nova_context.target_cell(ctxt, cell) as cctxt:
self.assertRaises(
exception.InstanceNotFound,
objects.Instance.get_by_uuid, cctxt, server_id)
def _assert_allocation_revert_on_fail(self, server):
# Since this happens in MigrationTask.rollback in conductor, we need
# to wait for something which happens after that, which is the
# ComputeTaskManager._cold_migrate method sending the
# compute_task.migrate_server.error event.
fake_notifier.wait_for_versioned_notifications(
'compute_task.migrate_server.error')
mig_uuid = self.get_migration_uuid_for_instance(server['id'])
mig_allocs = self._get_allocations_by_server_uuid(mig_uuid)
self.assertEqual({}, mig_allocs)
source_rp_uuid = self._get_provider_uuid_by_host(
server['OS-EXT-SRV-ATTR:host'])
server_allocs = self._get_allocations_by_server_uuid(server['id'])
volume_backed = False if server['image'] else True
self.assertFlavorMatchesAllocation(
server['flavor'], server_allocs[source_rp_uuid]['resources'],
volume_backed=volume_backed)
def test_prep_snapshot_based_resize_at_source_destroy_fails(self):
"""Negative test where prep_snapshot_based_resize_at_source fails
destroying the guest for the non-volume backed server and asserts
resources are rolled back.
"""
# Create a non-volume backed server for the snapshot flow.
flavors = self.api.get_flavors()
flavor1 = flavors[0]
server = self._create_server(flavor1)
# Now mock out the snapshot method on the source host to fail
# during _prep_snapshot_based_resize_at_source and then resize
# the server.
source_host = server['OS-EXT-SRV-ATTR:host']
error = exception.HypervisorUnavailable(host=source_host)
with mock.patch.object(self.computes[source_host].driver, 'destroy',
side_effect=error):
flavor2 = flavors[1]['id']
body = {'resize': {'flavorRef': flavor2}}
self.api.post_server_action(server['id'], body)
# The server should go to ERROR state with a fault record and
# the API should still be showing the server from the source cell
# because the instance mapping was not updated.
server = self._wait_for_server_parameter(
self.admin_api, server,
{'status': 'ERROR', 'OS-EXT-STS:task_state': None})
# The migration should be in 'error' status.
self._wait_for_migration_status(server, ['error'])
# Assert a fault was recorded.
self.assertIn('fault', server)
self.assertIn('Connection to the hypervisor is broken',
server['fault']['message'])
# The instance in the target cell DB should have been hard-deleted.
self._assert_instance_not_in_cell('cell2', server['id'])
# Assert that migration-based allocations were properly reverted.
self._assert_allocation_revert_on_fail(server)
# Now hard reboot the server in the source cell and it should go back
# to ACTIVE.
self.api.post_server_action(server['id'], {'reboot': {'type': 'HARD'}})
self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
# Now retry the resize without the fault in the target host to make
# sure things are OK (no duplicate entry errors in the target DB).
self.api.post_server_action(server['id'], body)
self._wait_for_state_change(self.admin_api, server, 'VERIFY_RESIZE') | nova/tests/functional/test_cross_cell_migrate.py |
import mock
from nova import context as nova_context
from nova import exception
from nova import objects
from nova.scheduler import weights
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import integrated_helpers
from nova.tests.unit import fake_notifier
from nova.tests.unit.image import fake as fake_image
from nova import utils
class HostNameWeigher(weights.BaseHostWeigher):
# TestMultiCellMigrate creates host1 in cell1 and host2 in cell2.
# Something about migrating from host1 to host2 teases out failures
# which probably has to do with cell1 being the default cell DB in
# our base test class setup, so prefer host1 to make the tests
# deterministic.
_weights = {'host1': 100, 'host2': 50}
def _weigh_object(self, host_state, weight_properties):
# Any undefined host gets no weight.
return self._weights.get(host_state.host, 0)
class TestMultiCellMigrate(integrated_helpers.ProviderUsageBaseTestCase):
"""Tests for cross-cell cold migration (resize)"""
NUMBER_OF_CELLS = 2
compute_driver = 'fake.MediumFakeDriver'
def setUp(self):
# Use our custom weigher defined above to make sure that we have
# a predictable scheduling sort order during server create.
self.flags(weight_classes=[__name__ + '.HostNameWeigher'],
group='filter_scheduler')
super(TestMultiCellMigrate, self).setUp()
self.cinder = self.useFixture(nova_fixtures.CinderFixture(self))
self._enable_cross_cell_resize()
self.created_images = [] # list of image IDs created during resize
# Adjust the polling interval and timeout for long RPC calls.
self.flags(rpc_response_timeout=1)
self.flags(long_rpc_timeout=60)
# Set up 2 compute services in different cells
self.host_to_cell_mappings = {
'host1': 'cell1', 'host2': 'cell2'}
for host in sorted(self.host_to_cell_mappings):
cell_name = self.host_to_cell_mappings[host]
# Start the compute service on the given host in the given cell.
self._start_compute(host, cell_name=cell_name)
# Create an aggregate where the AZ name is the cell name.
agg_id = self._create_aggregate(
cell_name, availability_zone=cell_name)
# Add the host to the aggregate.
body = {'add_host': {'host': host}}
self.admin_api.post_aggregate_action(agg_id, body)
def _enable_cross_cell_resize(self):
# Enable cross-cell resize policy since it defaults to not allow
# anyone to perform that type of operation. For these tests we'll
# just allow admins to perform cross-cell resize.
# TODO(mriedem): Uncomment this when the policy rule is added and
# used in the compute API _allow_cross_cell_resize method. For now
# we just stub that method to return True.
# self.policy_fixture.set_rules({
# servers_policies.CROSS_CELL_RESIZE:
# base_policies.RULE_ADMIN_API},
# overwrite=False)
self.stub_out('nova.compute.api.API._allow_cross_cell_resize',
lambda *a, **kw: True)
def assertFlavorMatchesAllocation(self, flavor, allocation,
volume_backed=False):
self.assertEqual(flavor['vcpus'], allocation['VCPU'])
self.assertEqual(flavor['ram'], allocation['MEMORY_MB'])
# Volume-backed instances won't have DISK_GB allocations.
if volume_backed:
self.assertNotIn('DISK_GB', allocation)
else:
self.assertEqual(flavor['disk'], allocation['DISK_GB'])
def assert_instance_fields_match_flavor(self, instance, flavor):
self.assertEqual(instance.memory_mb, flavor['ram'])
self.assertEqual(instance.vcpus, flavor['vcpus'])
self.assertEqual(instance.root_gb, flavor['disk'])
self.assertEqual(
instance.ephemeral_gb, flavor['OS-FLV-EXT-DATA:ephemeral'])
def _count_volume_attachments(self, server_id):
attachment_ids = self.cinder.attachment_ids_for_instance(server_id)
return len(attachment_ids)
def assert_quota_usage(self, expected_num_instances):
limits = self.api.get_limits()['absolute']
self.assertEqual(expected_num_instances, limits['totalInstancesUsed'])
def _create_server(self, flavor, volume_backed=False):
"""Creates a server and waits for it to be ACTIVE
:param flavor: dict form of the flavor to use
:param volume_backed: True if the server should be volume-backed
:returns: server dict response from the GET /servers/{server_id} API
"""
# Provide a VIF tag for the pre-existing port. Since VIF tags are
# stored in the virtual_interfaces table in the cell DB, we want to
# make sure those survive the resize to another cell.
networks = [{
'port': self.neutron.port_1['id'],
'tag': 'private'
}]
image_uuid = fake_image.get_valid_image_id()
server = self._build_minimal_create_server_request(
self.api, 'test_cross_cell_resize',
image_uuid=image_uuid,
flavor_id=flavor['id'],
networks=networks)
# Put a tag on the server to make sure that survives the resize.
server['tags'] = ['test']
if volume_backed:
bdms = [{
'boot_index': 0,
'uuid': nova_fixtures.CinderFixture.IMAGE_BACKED_VOL,
'source_type': 'volume',
'destination_type': 'volume',
'tag': 'root'
}]
server['block_device_mapping_v2'] = bdms
# We don't need the imageRef for volume-backed servers.
server.pop('imageRef', None)
server = self.api.post_server({'server': server})
server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
# For volume-backed make sure there is one attachment to start.
if volume_backed:
self.assertEqual(1, self._count_volume_attachments(server['id']),
self.cinder.volume_to_attachment)
return server
def stub_image_create(self):
"""Stubs the _FakeImageService.create method to track created images"""
original_create = self.image_service.create
def image_create_snooper(*args, **kwargs):
image = original_create(*args, **kwargs)
self.created_images.append(image['id'])
return image
_p = mock.patch.object(
self.image_service, 'create', side_effect=image_create_snooper)
_p.start()
self.addCleanup(_p.stop)
def _resize_and_validate(self, volume_backed=False, stopped=False,
target_host=None):
"""Creates and resizes the server to another cell. Validates various
aspects of the server and its related records (allocations, migrations,
actions, VIF tags, etc).
:param volume_backed: True if the server should be volume-backed, False
if image-backed.
:param stopped: True if the server should be stopped prior to resize,
False if the server should be ACTIVE
:param target_host: If not None, triggers a cold migration to the
specified host.
:returns: tuple of:
- server response object
- source compute node resource provider uuid
- target compute node resource provider uuid
- old flavor
- new flavor
"""
# Create the server.
flavors = self.api.get_flavors()
old_flavor = flavors[0]
server = self._create_server(old_flavor, volume_backed=volume_backed)
original_host = server['OS-EXT-SRV-ATTR:host']
image_uuid = None if volume_backed else server['image']['id']
# Our HostNameWeigher ensures the server starts in cell1, so we expect
# the server AZ to be cell1 as well.
self.assertEqual('cell1', server['OS-EXT-AZ:availability_zone'])
if stopped:
# Stop the server before resizing it.
self.api.post_server_action(server['id'], {'os-stop': None})
self._wait_for_state_change(self.api, server, 'SHUTOFF')
# Before resizing make sure quota usage is only 1 for total instances.
self.assert_quota_usage(expected_num_instances=1)
if target_host:
# Cold migrate the server to the target host.
new_flavor = old_flavor # flavor does not change for cold migrate
body = {'migrate': {'host': target_host}}
expected_host = target_host
else:
# Resize it which should migrate the server to the host in the
# other cell.
new_flavor = flavors[1]
body = {'resize': {'flavorRef': new_flavor['id']}}
expected_host = 'host1' if original_host == 'host2' else 'host2'
self.stub_image_create()
self.api.post_server_action(server['id'], body)
# Wait for the server to be resized and then verify the host has
# changed to be the host in the other cell.
server = self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE')
self.assertEqual(expected_host, server['OS-EXT-SRV-ATTR:host'])
# Assert that the instance is only listed one time from the API (to
# make sure it's not listed out of both cells).
# Note that we only get one because the DB API excludes hidden
# instances by default (see instance_get_all_by_filters_sort).
servers = self.api.get_servers()
self.assertEqual(1, len(servers),
'Unexpected number of servers: %s' % servers)
self.assertEqual(expected_host, servers[0]['OS-EXT-SRV-ATTR:host'])
# And that there is only one migration record.
migrations = self.api.api_get(
'/os-migrations?instance_uuid=%s' % server['id']
).body['migrations']
self.assertEqual(1, len(migrations),
'Unexpected number of migrations records: %s' %
migrations)
migration = migrations[0]
self.assertEqual('finished', migration['status'])
# There should be at least two actions, one for create and one for the
# resize. There will be a third action if the server was stopped.
actions = self.api.api_get(
'/servers/%s/os-instance-actions' % server['id']
).body['instanceActions']
expected_num_of_actions = 3 if stopped else 2
self.assertEqual(expected_num_of_actions, len(actions), actions)
# Each action should have events (make sure these were copied from
# the source cell to the target cell).
for action in actions:
detail = self.api.api_get(
'/servers/%s/os-instance-actions/%s' % (
server['id'], action['request_id'])).body['instanceAction']
self.assertNotEqual(0, len(detail['events']), detail)
# The tag should still be present on the server.
self.assertEqual(1, len(server['tags']),
'Server tags not found in target cell.')
self.assertEqual('test', server['tags'][0])
# Confirm the source node has allocations for the old flavor and the
# target node has allocations for the new flavor.
source_rp_uuid = self._get_provider_uuid_by_host(original_host)
# The source node allocations should be on the migration record.
source_allocations = self._get_allocations_by_provider_uuid(
source_rp_uuid)[migration['uuid']]['resources']
self.assertFlavorMatchesAllocation(
old_flavor, source_allocations, volume_backed=volume_backed)
target_rp_uuid = self._get_provider_uuid_by_host(expected_host)
# The target node allocations should be on the instance record.
target_allocations = self._get_allocations_by_provider_uuid(
target_rp_uuid)[server['id']]['resources']
self.assertFlavorMatchesAllocation(
new_flavor, target_allocations, volume_backed=volume_backed)
# The instance, in the target cell DB, should have the old and new
# flavor stored with it with the values we expect at this point.
target_cell_name = self.host_to_cell_mappings[expected_host]
self.assertEqual(
target_cell_name, server['OS-EXT-AZ:availability_zone'])
target_cell = self.cell_mappings[target_cell_name]
admin_context = nova_context.get_admin_context()
with nova_context.target_cell(admin_context, target_cell) as cctxt:
inst = objects.Instance.get_by_uuid(
cctxt, server['id'], expected_attrs=['flavor'])
self.assertIsNotNone(
inst.old_flavor,
'instance.old_flavor not saved in target cell')
self.assertIsNotNone(
inst.new_flavor,
'instance.new_flavor not saved in target cell')
self.assertEqual(inst.flavor.flavorid, inst.new_flavor.flavorid)
if target_host: # cold migrate so flavor does not change
self.assertEqual(
inst.flavor.flavorid, inst.old_flavor.flavorid)
else:
self.assertNotEqual(
inst.flavor.flavorid, inst.old_flavor.flavorid)
self.assertEqual(old_flavor['id'], inst.old_flavor.flavorid)
self.assertEqual(new_flavor['id'], inst.new_flavor.flavorid)
# Assert the ComputeManager._set_instance_info fields
# are correct after the resize.
self.assert_instance_fields_match_flavor(inst, new_flavor)
# The availability_zone field in the DB should also be updated.
self.assertEqual(target_cell_name, inst.availability_zone)
# Assert the VIF tag was carried through to the target cell DB.
interface_attachments = self.api.get_port_interfaces(server['id'])
self.assertEqual(1, len(interface_attachments))
self.assertEqual('private', interface_attachments[0]['tag'])
if volume_backed:
# Assert the BDM tag was carried through to the target cell DB.
volume_attachments = self.api.get_server_volumes(server['id'])
self.assertEqual(1, len(volume_attachments))
self.assertEqual('root', volume_attachments[0]['tag'])
# Make sure the guest is no longer tracked on the source node.
source_guest_uuids = (
self.computes[original_host].manager.driver.list_instance_uuids())
self.assertNotIn(server['id'], source_guest_uuids)
# And the guest is on the target node hypervisor.
target_guest_uuids = (
self.computes[expected_host].manager.driver.list_instance_uuids())
self.assertIn(server['id'], target_guest_uuids)
# The source hypervisor continues to report usage in the hypervisors
# API because even though the guest was destroyed there, the instance
# resources are still claimed on that node in case the user reverts.
self.assert_hypervisor_usage(source_rp_uuid, old_flavor, volume_backed)
# The new flavor should show up with resource usage on the target host.
self.assert_hypervisor_usage(target_rp_uuid, new_flavor, volume_backed)
# While we have a copy of the instance in each cell database make sure
# that quota usage is only reporting 1 (because one is hidden).
self.assert_quota_usage(expected_num_instances=1)
# For a volume-backed server, at this point there should be two volume
# attachments for the instance: one tracked in the source cell and
# one in the target cell.
if volume_backed:
self.assertEqual(2, self._count_volume_attachments(server['id']),
self.cinder.volume_to_attachment)
# Assert the expected power state.
expected_power_state = 4 if stopped else 1
self.assertEqual(
expected_power_state, server['OS-EXT-STS:power_state'],
"Unexpected power state after resize.")
# For an image-backed server, a snapshot image should have been created
# and then deleted during the resize.
if volume_backed:
self.assertEqual('', server['image'])
self.assertEqual(
0, len(self.created_images),
"Unexpected image create during volume-backed resize")
else:
# The original image for the server shown in the API should not
# have changed even if a snapshot was used to create the guest
# on the dest host.
self.assertEqual(image_uuid, server['image']['id'])
self.assertEqual(
1, len(self.created_images),
"Unexpected number of images created for image-backed resize")
# Make sure the temporary snapshot image was deleted; we use the
# compute images proxy API here which is deprecated so we force the
# microversion to 2.1.
with utils.temporary_mutation(self.api, microversion='2.1'):
self.api.api_get('/images/%s' % self.created_images[0],
check_response_status=[404])
return server, source_rp_uuid, target_rp_uuid, old_flavor, new_flavor
def test_resize_confirm_image_backed(self):
"""Creates an image-backed server in one cell and resizes it to the
host in the other cell. The resize is confirmed.
"""
self._resize_and_validate()
# TODO(mriedem): Confirm the resize and make assertions.
def test_resize_revert_volume_backed(self):
"""Tests a volume-backed resize to another cell where the resize
is reverted back to the original source cell.
"""
self._resize_and_validate(volume_backed=True)
# TODO(mriedem): Revert the resize and make assertions.
def test_delete_while_in_verify_resize_status(self):
"""Tests that when deleting a server in VERIFY_RESIZE status, the
data is cleaned from both the source and target cell.
"""
server = self._resize_and_validate()[0]
self.api.delete_server(server['id'])
self._wait_until_deleted(server)
# Now list servers to make sure it doesn't show up from the source cell
servers = self.api.get_servers()
self.assertEqual(0, len(servers), servers)
# FIXME(mriedem): Need to cleanup from source cell in API method
# _confirm_resize_on_deleting(). The above check passes because the
# instance is still hidden in the source cell so the API filters it
# out.
target_host = server['OS-EXT-SRV-ATTR:host']
source_host = 'host1' if target_host == 'host2' else 'host2'
source_cell = self.cell_mappings[
self.host_to_cell_mappings[source_host]]
ctxt = nova_context.get_admin_context()
with nova_context.target_cell(ctxt, source_cell) as cctxt:
# Once the API is fixed this should raise InstanceNotFound.
instance = objects.Instance.get_by_uuid(cctxt, server['id'])
self.assertTrue(instance.hidden)
def test_cold_migrate_target_host_in_other_cell(self):
"""Tests cold migrating to a target host in another cell. This is
mostly just to ensure the API does not restrict the target host to
the source cell when cross-cell resize is allowed by policy.
"""
# _resize_and_validate creates the server on host1 which is in cell1.
# To make things interesting, start a third host but in cell1 so we can
# be sure the requested host from cell2 is honored.
self._start_compute(
'host3', cell_name=self.host_to_cell_mappings['host1'])
self._resize_and_validate(target_host='host2')
# TODO(mriedem): Test cross-cell list where the source cell has two
# hosts so the CrossCellWeigher picks the other host in the source cell
# and we do a traditional resize. Add a variant on this where the flavor
# being resized to is only available, via aggregate, on the host in the
# other cell so the CrossCellWeigher is overruled by the filters.
# TODO(mriedem): Test a bunch of rollback scenarios.
# TODO(mriedem): Test re-scheduling when the first host fails the
# resize_claim and a subsequent alternative host works, and also the
# case that all hosts fail the resize_claim.
# TODO(mriedem): Test cross-cell anti-affinity group assumptions from
# scheduler utils setup_instance_group where it assumes moves are within
# the same cell, so:
# 0. create 2 hosts in cell1 and 1 host in cell2
# 1. create two servers in an anti-affinity group in cell1
# 2. migrate one server to cell2
# 3. migrate the other server to cell2 - this should fail during scheduling
# because there is already a server from the anti-affinity group on the
# host in cell2 but setup_instance_group code may not catch it.
# TODO(mriedem): Perform a resize with at-capacity computes, meaning that
# when we revert we can only fit the instance with the old flavor back
# onto the source host in the source cell.
def test_resize_confirm_from_stopped(self):
"""Tests resizing and confirming a server that was initially stopped
so it should remain stopped through the resize.
"""
self._resize_and_validate(volume_backed=True, stopped=True)
# TODO(mriedem): Confirm the resize and assert the guest remains off
def test_finish_snapshot_based_resize_at_dest_spawn_fails(self):
"""Negative test where the driver spawn fails on the dest host during
finish_snapshot_based_resize_at_dest which triggers a rollback of the
instance data in the target cell. Furthermore, the test will hard
reboot the server in the source cell to recover it from ERROR status.
"""
# Create a volume-backed server. This is more interesting for rollback
# testing to make sure the volume attachments in the target cell were
# cleaned up on failure.
flavors = self.api.get_flavors()
server = self._create_server(flavors[0], volume_backed=True)
# Now mock out the spawn method on the destination host to fail
# during _finish_snapshot_based_resize_at_dest_spawn and then resize
# the server.
error = exception.HypervisorUnavailable(host='host2')
with mock.patch.object(self.computes['host2'].driver, 'spawn',
side_effect=error):
flavor2 = flavors[1]['id']
body = {'resize': {'flavorRef': flavor2}}
self.api.post_server_action(server['id'], body)
# The server should go to ERROR state with a fault record and
# the API should still be showing the server from the source cell
# because the instance mapping was not updated.
server = self._wait_for_server_parameter(
self.admin_api, server,
{'status': 'ERROR', 'OS-EXT-STS:task_state': None})
# The migration should be in 'error' status.
self._wait_for_migration_status(server, ['error'])
# Assert a fault was recorded.
self.assertIn('fault', server)
self.assertIn('Connection to the hypervisor is broken',
server['fault']['message'])
# The instance in the target cell DB should have been hard-deleted.
self._assert_instance_not_in_cell('cell2', server['id'])
# Assert that there is only one volume attachment for the server, i.e.
# the one in the target cell was deleted.
self.assertEqual(1, self._count_volume_attachments(server['id']),
self.cinder.volume_to_attachment)
# Assert that migration-based allocations were properly reverted.
self._assert_allocation_revert_on_fail(server)
# Now hard reboot the server in the source cell and it should go back
# to ACTIVE.
self.api.post_server_action(server['id'], {'reboot': {'type': 'HARD'}})
self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
# Now retry the resize without the fault in the target host to make
# sure things are OK (no duplicate entry errors in the target DB).
self.api.post_server_action(server['id'], body)
self._wait_for_state_change(self.admin_api, server, 'VERIFY_RESIZE')
def _assert_instance_not_in_cell(self, cell_name, server_id):
cell = self.cell_mappings[cell_name]
ctxt = nova_context.get_admin_context(read_deleted='yes')
with nova_context.target_cell(ctxt, cell) as cctxt:
self.assertRaises(
exception.InstanceNotFound,
objects.Instance.get_by_uuid, cctxt, server_id)
def _assert_allocation_revert_on_fail(self, server):
# Since this happens in MigrationTask.rollback in conductor, we need
# to wait for something which happens after that, which is the
# ComputeTaskManager._cold_migrate method sending the
# compute_task.migrate_server.error event.
fake_notifier.wait_for_versioned_notifications(
'compute_task.migrate_server.error')
mig_uuid = self.get_migration_uuid_for_instance(server['id'])
mig_allocs = self._get_allocations_by_server_uuid(mig_uuid)
self.assertEqual({}, mig_allocs)
source_rp_uuid = self._get_provider_uuid_by_host(
server['OS-EXT-SRV-ATTR:host'])
server_allocs = self._get_allocations_by_server_uuid(server['id'])
volume_backed = False if server['image'] else True
self.assertFlavorMatchesAllocation(
server['flavor'], server_allocs[source_rp_uuid]['resources'],
volume_backed=volume_backed)
def test_prep_snapshot_based_resize_at_source_destroy_fails(self):
"""Negative test where prep_snapshot_based_resize_at_source fails
destroying the guest for the non-volume backed server and asserts
resources are rolled back.
"""
# Create a non-volume backed server for the snapshot flow.
flavors = self.api.get_flavors()
flavor1 = flavors[0]
server = self._create_server(flavor1)
# Now mock out the snapshot method on the source host to fail
# during _prep_snapshot_based_resize_at_source and then resize
# the server.
source_host = server['OS-EXT-SRV-ATTR:host']
error = exception.HypervisorUnavailable(host=source_host)
with mock.patch.object(self.computes[source_host].driver, 'destroy',
side_effect=error):
flavor2 = flavors[1]['id']
body = {'resize': {'flavorRef': flavor2}}
self.api.post_server_action(server['id'], body)
# The server should go to ERROR state with a fault record and
# the API should still be showing the server from the source cell
# because the instance mapping was not updated.
server = self._wait_for_server_parameter(
self.admin_api, server,
{'status': 'ERROR', 'OS-EXT-STS:task_state': None})
# The migration should be in 'error' status.
self._wait_for_migration_status(server, ['error'])
# Assert a fault was recorded.
self.assertIn('fault', server)
self.assertIn('Connection to the hypervisor is broken',
server['fault']['message'])
# The instance in the target cell DB should have been hard-deleted.
self._assert_instance_not_in_cell('cell2', server['id'])
# Assert that migration-based allocations were properly reverted.
self._assert_allocation_revert_on_fail(server)
# Now hard reboot the server in the source cell and it should go back
# to ACTIVE.
self.api.post_server_action(server['id'], {'reboot': {'type': 'HARD'}})
self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
# Now retry the resize without the fault in the target host to make
# sure things are OK (no duplicate entry errors in the target DB).
self.api.post_server_action(server['id'], body)
self._wait_for_state_change(self.admin_api, server, 'VERIFY_RESIZE') | 0.630116 | 0.281668 |
class SamplePlayer:
"""
Sample Player class
"""
def __init__(self, xpos, ypos, step, lives, symbol="#"):
"""
Function which inits a Sample Player
:param xpos: X position of the Player
:param ypos: Y position of the Player
:param step: How many steps should the Player do in move_* functions
:param lives: How many lives has the player
:param symbol: Symbol used to render the player
"""
self.xpos = xpos
self.ypos = ypos
self.step = step
self.lives = lives
self.bufferx = self.xpos
self.buffery = self.ypos
self.jump = True
self.symbol = symbol
def move_up(self):
"""
Move the Player up
:return: None
"""
self.ypos -= self.step
def move_down(self):
"""
Move the Player down
:return: None
"""
self.ypos += self.step
def move_right(self):
"""
Move the Player right
:return: None
"""
self.xpos += self.step
self.bufferx = self.xpos
def move_left(self):
"""
Move the Player left
:return: None
"""
self.xpos -= self.step
self.bufferx = self.xpos
def set_step(self, step):
"""
Set the step variable for Player object
:param step: int, step
:return: None
"""
self.step = step
def get_lives(self):
"""
Function which return how many lives the player has
:return: Player lives
"""
return self.lives
def is_alive(self):
"""
Returns is Player live status
:return: True(If the lives-var contains a higher int than 0)/False
"""
if self.lives <= 0:
return False
else:
return True
def jump_algo(self, level):
"""
Function which should be called every frame to let the Player jump
:param level: How high the player has to jump
:return: None
"""
if self.ypos > level and self.jump:
self.move_up()
elif self.ypos == level:
self.jump = False
self.move_down()
else:
self.jump = True
def check_if_on_tile(self, tile):
"""
Function which checks is the player is on a tile(coming soon)
:param tile: Tile object -> Polygon child
:return: True/False
"""
if self.xpos == tile.xpos and self.ypos == tile.ypos - 1:
return True
else:
return False
def add_to_screen(self, screen_object):
"""
Add the Player in the rendering queue
:param screen_object: clge.Screen object
:return: None
"""
screen_object.add_object(self.xpos, self.ypos, self.symbol) | clge/plugs/DefaultAssets/sample_player.py | class SamplePlayer:
"""
Sample Player class
"""
def __init__(self, xpos, ypos, step, lives, symbol="#"):
"""
Function which inits a Sample Player
:param xpos: X position of the Player
:param ypos: Y position of the Player
:param step: How many steps should the Player do in move_* functions
:param lives: How many lives has the player
:param symbol: Symbol used to render the player
"""
self.xpos = xpos
self.ypos = ypos
self.step = step
self.lives = lives
self.bufferx = self.xpos
self.buffery = self.ypos
self.jump = True
self.symbol = symbol
def move_up(self):
"""
Move the Player up
:return: None
"""
self.ypos -= self.step
def move_down(self):
"""
Move the Player down
:return: None
"""
self.ypos += self.step
def move_right(self):
"""
Move the Player right
:return: None
"""
self.xpos += self.step
self.bufferx = self.xpos
def move_left(self):
"""
Move the Player left
:return: None
"""
self.xpos -= self.step
self.bufferx = self.xpos
def set_step(self, step):
"""
Set the step variable for Player object
:param step: int, step
:return: None
"""
self.step = step
def get_lives(self):
"""
Function which return how many lives the player has
:return: Player lives
"""
return self.lives
def is_alive(self):
"""
Returns is Player live status
:return: True(If the lives-var contains a higher int than 0)/False
"""
if self.lives <= 0:
return False
else:
return True
def jump_algo(self, level):
"""
Function which should be called every frame to let the Player jump
:param level: How high the player has to jump
:return: None
"""
if self.ypos > level and self.jump:
self.move_up()
elif self.ypos == level:
self.jump = False
self.move_down()
else:
self.jump = True
def check_if_on_tile(self, tile):
"""
Function which checks is the player is on a tile(coming soon)
:param tile: Tile object -> Polygon child
:return: True/False
"""
if self.xpos == tile.xpos and self.ypos == tile.ypos - 1:
return True
else:
return False
def add_to_screen(self, screen_object):
"""
Add the Player in the rendering queue
:param screen_object: clge.Screen object
:return: None
"""
screen_object.add_object(self.xpos, self.ypos, self.symbol) | 0.894424 | 0.673128 |
import battle
import jsonobject
import logging
import model
logger = logging.getLogger('kcaa.kcsapi.expedition')
class Expedition(model.KCAAObject):
"""Information about the current expedition."""
fleet_id = jsonobject.JSONProperty('fleet_id', value_type=int)
"""ID of the fleet which is going on expedition."""
maparea_id = jsonobject.JSONProperty('maparea_id', value_type=int)
"""ID of the maparea."""
map_id = jsonobject.JSONProperty('map_id', value_type=int)
"""ID of the map."""
cell_boss = jsonobject.JSONProperty('cell_boss', value_type=int)
"""ID of the cell where a boss lives."""
cell_id = jsonobject.JSONProperty('cell_id', value_type=int)
"""ID of the cell on the next move.
Cell ID is assigned from 0 (the start cell).
Note that this is deterministically available when a compass is presented.
"""
is_terminal = jsonobject.JSONProperty('is_terminal', value_type=bool)
"""Whether the next cell is the terminal of the path."""
needs_compass = jsonobject.JSONProperty('needs_compass', value_type=bool)
"""Whether needs a compass on the next move."""
needs_active_selection = jsonobject.JSONProperty('needs_active_selection',
value_type=bool)
"""Whether needs an active selection from the player on the next move."""
next_cell_selections = jsonobject.JSONProperty(
'next_cell_selections', value_type=list, element_type=int)
"""Next cells selectable for the active selection."""
event = jsonobject.JSONProperty('event', value_type=int)
"""Event that will happen in the cell."""
EVENT_ITEM = 2
EVENT_BATTLE = 4
EVENT_BATTLE_BOSS = 5
EVENT_ACTIVE_SELECTION = 6
produced_item = jsonobject.JSONProperty('produced_item', value_type=int)
"""Item produced in the next cell."""
PRODUCTION_NONE = 0
@property
def location_id(self):
return (self.maparea_id, self.map_id, self.cell_id)
def update(self, api_name, request, response, objects, debug):
super(Expedition, self).update(api_name, request, response, objects,
debug)
if api_name in ('/api_req_map/start',
'/api_req_map/next'):
data = response.api_data
if api_name == '/api_req_map/start':
self.fleet_id = int(request.api_deck_id)
self.maparea_id = int(request.api_maparea_id)
self.map_id = int(request.api_mapinfo_no)
self.cell_boss = data.api_bosscell_no
self.produced_item = Expedition.PRODUCTION_NONE
else:
self.produced_item = data.api_production_kind
# api_rashin_id might represent the animation pattern of the
# compass. Not useful here anyways.
self.cell_id = data.api_no
self.is_terminal = data.api_next == 0
self.needs_compass = data.api_rashin_flg == 1
self.event = data.api_event_id
if (self.event == Expedition.EVENT_ACTIVE_SELECTION and
hasattr(data, 'api_select_route')):
self.needs_active_selection = True
self.next_cell_selections = (
data.api_select_route.api_select_cells)
else:
self.needs_active_selection = False
self.next_cell_selections = None
logger.debug('Next: {}-{}-{}'.format(
self.maparea_id, self.map_id, self.cell_id))
logger.debug('Boss: {}-{}-{}'.format(
self.maparea_id, self.map_id, self.cell_boss))
logger.debug('Event: {} (kind: {}, color: {})'.format(
self.event, data.api_event_kind, data.api_color_no))
logger.debug('Item produced: {}'.format(self.produced_item))
logger.debug('Needs compass: {}'.format(self.needs_compass))
logger.debug('Needs active selection: {}'.format(
self.needs_active_selection))
if self.needs_active_selection:
logger.debug(' Selections: {}'.format(
self.next_cell_selections))
# Other potentially interesting data:
# - api_color_no: probably the color of the next cell after the
# exact event is revealed
# - api_event_kind: additional info on the event?
# - api_production_kind: probably the category of the found item
# - api_enemy: enemy info (useful if submarines)
# logger.debug('next: {}'.format(data.api_next))
# logger.debug('rashin_flg (id): {} ({})'.format(
# data.api_rashin_flg, data.api_rashin_id))
# if hasattr(data, 'api_enemy'):
# logger.debug('enemy : {}'.format(str(data.api_enemy)))
class ExpeditionResult(model.KCAAObject):
"""Result of the latest expedition battle."""
result = jsonobject.JSONProperty('result', value_type=int)
"""Resuslt of the battle."""
got_ship = jsonobject.JSONProperty('got_ship', value_type=bool)
"""Whether got a ship as a reward."""
new_ship_id = jsonobject.JSONProperty('new_ship_id', value_type=int)
"""Ship definition ID of the new ship."""
num_obtained_items = jsonobject.JSONProperty('num_obtained_items',
value_type=int)
"""Number of items obtained as a reward."""
first_cleared = jsonobject.JSONProperty('first_cleared', value_type=bool)
"""Whether first cleared."""
def update(self, api_name, request, response, objects, debug):
super(ExpeditionResult, self).update(api_name, request, response,
objects, debug)
if api_name in ('/api_req_sortie/battleresult',
'/api_req_combined_battle/battleresult'):
self.result = battle.Battle.get_result_for_win_rank(
response.api_data.api_win_rank)
self.got_ship = response.api_data.api_get_flag[1] == 1
if self.got_ship:
self.new_ship_id = response.api_data.api_get_ship.api_ship_id
else:
self.new_ship_id = None
if hasattr(response.api_data, 'api_get_eventitem'):
self.num_obtained_items = len(
response.api_data.api_get_eventitem)
# api_get_eventitem
# - api_id: item ID (maybe use item, ship, or equipment)
# - api_type: 1 (use item), 2 (ship), 3 (equipment)
# - api_value: amount
else:
self.num_obtained_items = 0
self.first_cleared = response.api_data.api_first_clear == 1 | server/kcaa/kcsapi/expedition.py |
import battle
import jsonobject
import logging
import model
logger = logging.getLogger('kcaa.kcsapi.expedition')
class Expedition(model.KCAAObject):
"""Information about the current expedition."""
fleet_id = jsonobject.JSONProperty('fleet_id', value_type=int)
"""ID of the fleet which is going on expedition."""
maparea_id = jsonobject.JSONProperty('maparea_id', value_type=int)
"""ID of the maparea."""
map_id = jsonobject.JSONProperty('map_id', value_type=int)
"""ID of the map."""
cell_boss = jsonobject.JSONProperty('cell_boss', value_type=int)
"""ID of the cell where a boss lives."""
cell_id = jsonobject.JSONProperty('cell_id', value_type=int)
"""ID of the cell on the next move.
Cell ID is assigned from 0 (the start cell).
Note that this is deterministically available when a compass is presented.
"""
is_terminal = jsonobject.JSONProperty('is_terminal', value_type=bool)
"""Whether the next cell is the terminal of the path."""
needs_compass = jsonobject.JSONProperty('needs_compass', value_type=bool)
"""Whether needs a compass on the next move."""
needs_active_selection = jsonobject.JSONProperty('needs_active_selection',
value_type=bool)
"""Whether needs an active selection from the player on the next move."""
next_cell_selections = jsonobject.JSONProperty(
'next_cell_selections', value_type=list, element_type=int)
"""Next cells selectable for the active selection."""
event = jsonobject.JSONProperty('event', value_type=int)
"""Event that will happen in the cell."""
EVENT_ITEM = 2
EVENT_BATTLE = 4
EVENT_BATTLE_BOSS = 5
EVENT_ACTIVE_SELECTION = 6
produced_item = jsonobject.JSONProperty('produced_item', value_type=int)
"""Item produced in the next cell."""
PRODUCTION_NONE = 0
@property
def location_id(self):
return (self.maparea_id, self.map_id, self.cell_id)
def update(self, api_name, request, response, objects, debug):
super(Expedition, self).update(api_name, request, response, objects,
debug)
if api_name in ('/api_req_map/start',
'/api_req_map/next'):
data = response.api_data
if api_name == '/api_req_map/start':
self.fleet_id = int(request.api_deck_id)
self.maparea_id = int(request.api_maparea_id)
self.map_id = int(request.api_mapinfo_no)
self.cell_boss = data.api_bosscell_no
self.produced_item = Expedition.PRODUCTION_NONE
else:
self.produced_item = data.api_production_kind
# api_rashin_id might represent the animation pattern of the
# compass. Not useful here anyways.
self.cell_id = data.api_no
self.is_terminal = data.api_next == 0
self.needs_compass = data.api_rashin_flg == 1
self.event = data.api_event_id
if (self.event == Expedition.EVENT_ACTIVE_SELECTION and
hasattr(data, 'api_select_route')):
self.needs_active_selection = True
self.next_cell_selections = (
data.api_select_route.api_select_cells)
else:
self.needs_active_selection = False
self.next_cell_selections = None
logger.debug('Next: {}-{}-{}'.format(
self.maparea_id, self.map_id, self.cell_id))
logger.debug('Boss: {}-{}-{}'.format(
self.maparea_id, self.map_id, self.cell_boss))
logger.debug('Event: {} (kind: {}, color: {})'.format(
self.event, data.api_event_kind, data.api_color_no))
logger.debug('Item produced: {}'.format(self.produced_item))
logger.debug('Needs compass: {}'.format(self.needs_compass))
logger.debug('Needs active selection: {}'.format(
self.needs_active_selection))
if self.needs_active_selection:
logger.debug(' Selections: {}'.format(
self.next_cell_selections))
# Other potentially interesting data:
# - api_color_no: probably the color of the next cell after the
# exact event is revealed
# - api_event_kind: additional info on the event?
# - api_production_kind: probably the category of the found item
# - api_enemy: enemy info (useful if submarines)
# logger.debug('next: {}'.format(data.api_next))
# logger.debug('rashin_flg (id): {} ({})'.format(
# data.api_rashin_flg, data.api_rashin_id))
# if hasattr(data, 'api_enemy'):
# logger.debug('enemy : {}'.format(str(data.api_enemy)))
class ExpeditionResult(model.KCAAObject):
"""Result of the latest expedition battle."""
result = jsonobject.JSONProperty('result', value_type=int)
"""Resuslt of the battle."""
got_ship = jsonobject.JSONProperty('got_ship', value_type=bool)
"""Whether got a ship as a reward."""
new_ship_id = jsonobject.JSONProperty('new_ship_id', value_type=int)
"""Ship definition ID of the new ship."""
num_obtained_items = jsonobject.JSONProperty('num_obtained_items',
value_type=int)
"""Number of items obtained as a reward."""
first_cleared = jsonobject.JSONProperty('first_cleared', value_type=bool)
"""Whether first cleared."""
def update(self, api_name, request, response, objects, debug):
super(ExpeditionResult, self).update(api_name, request, response,
objects, debug)
if api_name in ('/api_req_sortie/battleresult',
'/api_req_combined_battle/battleresult'):
self.result = battle.Battle.get_result_for_win_rank(
response.api_data.api_win_rank)
self.got_ship = response.api_data.api_get_flag[1] == 1
if self.got_ship:
self.new_ship_id = response.api_data.api_get_ship.api_ship_id
else:
self.new_ship_id = None
if hasattr(response.api_data, 'api_get_eventitem'):
self.num_obtained_items = len(
response.api_data.api_get_eventitem)
# api_get_eventitem
# - api_id: item ID (maybe use item, ship, or equipment)
# - api_type: 1 (use item), 2 (ship), 3 (equipment)
# - api_value: amount
else:
self.num_obtained_items = 0
self.first_cleared = response.api_data.api_first_clear == 1 | 0.591133 | 0.169991 |
import outputters
import inputters
import psucontrol
import time
import yaml
import os, random, shlex, sys
def boot():
display.cls(); display.line(0, "Time machine is"); display.line(1, "starting ..."); time.sleep(1)
display.cls(); display.line(0, "Casostroj startuje"); time.sleep(1)
display.cls(); display.line(0, "Time machine"); display.line(1, "is ready ..."); time.sleep(1)
display.cls(); display.line(0, "Casostroj"); display.line(1, "je pripraven ..."); time.sleep(1)
display.cls()
def ask_for_key(prompt='Casovy kod?'):
display.cls()
display.line(0, prompt)
display.goto(1,0)
return(inputter.input_by_char())
def pairs(lst):
if not lst:
return []
if len(lst) <= 2:
return [lst]
return [lst[0:2]] + pairs(lst[2:])
def display_message(message=[]):
for p in pairs(message):
display.cls();
display.line(0, p[0]);
if len(p) > 1:
display.line(1, p[1])
time.sleep(4)
def get_destination():
dst = None
retries = 5
while dst is None:
key = ask_for_key()
try:
dst = time_destinations[key]
except KeyError:
retries -= 1
display.line(0, 'Spatny kod!')
time.sleep(2)
if retries <= 0:
return None
return(dst)
def random_file(root=''):
file = random.choice(os.listdir(root))
return os.path.join(root, file)
console = False
if len(sys.argv) == 2 and sys.argv[1] == 'console':
console = True
button = psucontrol.PushButton(27)
button.wait_for_push()
del button
psu = psucontrol.PSU(17) # pin 11 on the connector
psu.turn_on(); time.sleep(0.5)
kb_mode = random.choice(list(range(1,6)) + list(range(10,18)))
print("Keyboard mode {mode}".format(mode=kb_mode))
os.system('sudo python NotLinuxAjazzAK33RGB/ajazz.py --accept -d /dev/hidraw1 -v -l 5 -m {mode}'.format(mode=kb_mode))
display = outputters.get_outputter(console)
inputter = inputters.Inputter(display)
time_destinations = yaml.load(open('destinations.yml', 'r'))
if not console:
boot()
if console:
print(repr(time_destinations))
destination = get_destination()
if destination is not None:
display.cls()
display.line(0, destination['name'])
display.line(1, ' ... jedeme ...')
if not console:
#os.system('tvservice --preferred')
os.system('tvservice --explicit="CEA 4 HDMI"')
os.system('omxplayer -o hdmi {video}'.format(video=shlex.quote(random_file('video/splash'))))
os.system('omxplayer -o hdmi {video}'.format(video=shlex.quote(destination['video'])))
os.system('tvservice --off')
if 'message' in destination:
display_message(destination['message'])
display.cls(); display.line(0, "Time machine is"); display.line(1, "shutting down ..."); time.sleep(1)
time.sleep(2)
display.visibility(False)
del psu | timemachine.py | import outputters
import inputters
import psucontrol
import time
import yaml
import os, random, shlex, sys
def boot():
display.cls(); display.line(0, "Time machine is"); display.line(1, "starting ..."); time.sleep(1)
display.cls(); display.line(0, "Casostroj startuje"); time.sleep(1)
display.cls(); display.line(0, "Time machine"); display.line(1, "is ready ..."); time.sleep(1)
display.cls(); display.line(0, "Casostroj"); display.line(1, "je pripraven ..."); time.sleep(1)
display.cls()
def ask_for_key(prompt='Casovy kod?'):
display.cls()
display.line(0, prompt)
display.goto(1,0)
return(inputter.input_by_char())
def pairs(lst):
if not lst:
return []
if len(lst) <= 2:
return [lst]
return [lst[0:2]] + pairs(lst[2:])
def display_message(message=[]):
for p in pairs(message):
display.cls();
display.line(0, p[0]);
if len(p) > 1:
display.line(1, p[1])
time.sleep(4)
def get_destination():
dst = None
retries = 5
while dst is None:
key = ask_for_key()
try:
dst = time_destinations[key]
except KeyError:
retries -= 1
display.line(0, 'Spatny kod!')
time.sleep(2)
if retries <= 0:
return None
return(dst)
def random_file(root=''):
file = random.choice(os.listdir(root))
return os.path.join(root, file)
console = False
if len(sys.argv) == 2 and sys.argv[1] == 'console':
console = True
button = psucontrol.PushButton(27)
button.wait_for_push()
del button
psu = psucontrol.PSU(17) # pin 11 on the connector
psu.turn_on(); time.sleep(0.5)
kb_mode = random.choice(list(range(1,6)) + list(range(10,18)))
print("Keyboard mode {mode}".format(mode=kb_mode))
os.system('sudo python NotLinuxAjazzAK33RGB/ajazz.py --accept -d /dev/hidraw1 -v -l 5 -m {mode}'.format(mode=kb_mode))
display = outputters.get_outputter(console)
inputter = inputters.Inputter(display)
time_destinations = yaml.load(open('destinations.yml', 'r'))
if not console:
boot()
if console:
print(repr(time_destinations))
destination = get_destination()
if destination is not None:
display.cls()
display.line(0, destination['name'])
display.line(1, ' ... jedeme ...')
if not console:
#os.system('tvservice --preferred')
os.system('tvservice --explicit="CEA 4 HDMI"')
os.system('omxplayer -o hdmi {video}'.format(video=shlex.quote(random_file('video/splash'))))
os.system('omxplayer -o hdmi {video}'.format(video=shlex.quote(destination['video'])))
os.system('tvservice --off')
if 'message' in destination:
display_message(destination['message'])
display.cls(); display.line(0, "Time machine is"); display.line(1, "shutting down ..."); time.sleep(1)
time.sleep(2)
display.visibility(False)
del psu | 0.190197 | 0.119923 |
import json
from typing import ClassVar, Dict, Optional
from pydantic import BaseModel, Field
from emulation_system.compose_file_creator.input.hardware_models.hardware_model import (
EmulationLevelNotSupportedError,
HardwareModel,
)
from emulation_system.compose_file_creator.settings.config_file_settings import (
EmulationLevels,
Hardware,
)
class FirmwareSerialNumberModel(BaseModel):
"""Model for information needed to set a firmware emulator's serial number."""
env_var_name: str
model: str
version: str
class ProxyInfoModel(BaseModel):
"""Model to provide information needed to connect module to proxy."""
env_var_name: str
emulator_port: int
driver_port: int
class ModuleInputModel(HardwareModel):
"""Parent class of all Modules, Subclass of HardwareModel.
Used to group all modules together and distinguish them from robots.
"""
firmware_serial_number_info: ClassVar[Optional[FirmwareSerialNumberModel]] = Field(
alias="firmware-serial-number-info", allow_mutation=False
)
proxy_info: ClassVar[ProxyInfoModel] = Field(
alias="proxy-info", allow_mutation=False
)
def _get_firmware_serial_number_env_var(self) -> Dict[str, str]:
"""Builds firmware level serial number environment variable."""
if self.firmware_serial_number_info is None:
raise EmulationLevelNotSupportedError(self.emulation_level, self.hardware)
value = {
"serial_number": self.id,
"model": self.firmware_serial_number_info.model,
"version": self.firmware_serial_number_info.version,
}
if self.hardware in [Hardware.THERMOCYCLER_MODULE, Hardware.TEMPERATURE_MODULE]:
value.update(self.hardware_specific_attributes.dict())
return {self.firmware_serial_number_info.env_var_name: json.dumps(value)}
def _get_hardware_serial_number_env_var(self) -> Dict[str, str]:
"""Builds hardware level serial number environment variable."""
return {"SERIAL_NUMBER": self.id}
def get_serial_number_env_var(self) -> Dict[str, str]:
"""Builds serial number env var based off of emulation level."""
return (
self._get_firmware_serial_number_env_var()
if self.emulation_level == EmulationLevels.FIRMWARE
else self._get_hardware_serial_number_env_var()
)
@classmethod
def get_proxy_info_env_var(cls) -> Dict[str, str]:
"""Builds proxy info env var."""
value = {
"emulator_port": cls.proxy_info.emulator_port,
"driver_port": cls.proxy_info.driver_port,
}
return {cls.proxy_info.env_var_name: json.dumps(value)} | emulation_system/emulation_system/compose_file_creator/input/hardware_models/modules/module_model.py | import json
from typing import ClassVar, Dict, Optional
from pydantic import BaseModel, Field
from emulation_system.compose_file_creator.input.hardware_models.hardware_model import (
EmulationLevelNotSupportedError,
HardwareModel,
)
from emulation_system.compose_file_creator.settings.config_file_settings import (
EmulationLevels,
Hardware,
)
class FirmwareSerialNumberModel(BaseModel):
"""Model for information needed to set a firmware emulator's serial number."""
env_var_name: str
model: str
version: str
class ProxyInfoModel(BaseModel):
"""Model to provide information needed to connect module to proxy."""
env_var_name: str
emulator_port: int
driver_port: int
class ModuleInputModel(HardwareModel):
"""Parent class of all Modules, Subclass of HardwareModel.
Used to group all modules together and distinguish them from robots.
"""
firmware_serial_number_info: ClassVar[Optional[FirmwareSerialNumberModel]] = Field(
alias="firmware-serial-number-info", allow_mutation=False
)
proxy_info: ClassVar[ProxyInfoModel] = Field(
alias="proxy-info", allow_mutation=False
)
def _get_firmware_serial_number_env_var(self) -> Dict[str, str]:
"""Builds firmware level serial number environment variable."""
if self.firmware_serial_number_info is None:
raise EmulationLevelNotSupportedError(self.emulation_level, self.hardware)
value = {
"serial_number": self.id,
"model": self.firmware_serial_number_info.model,
"version": self.firmware_serial_number_info.version,
}
if self.hardware in [Hardware.THERMOCYCLER_MODULE, Hardware.TEMPERATURE_MODULE]:
value.update(self.hardware_specific_attributes.dict())
return {self.firmware_serial_number_info.env_var_name: json.dumps(value)}
def _get_hardware_serial_number_env_var(self) -> Dict[str, str]:
"""Builds hardware level serial number environment variable."""
return {"SERIAL_NUMBER": self.id}
def get_serial_number_env_var(self) -> Dict[str, str]:
"""Builds serial number env var based off of emulation level."""
return (
self._get_firmware_serial_number_env_var()
if self.emulation_level == EmulationLevels.FIRMWARE
else self._get_hardware_serial_number_env_var()
)
@classmethod
def get_proxy_info_env_var(cls) -> Dict[str, str]:
"""Builds proxy info env var."""
value = {
"emulator_port": cls.proxy_info.emulator_port,
"driver_port": cls.proxy_info.driver_port,
}
return {cls.proxy_info.env_var_name: json.dumps(value)} | 0.848533 | 0.181444 |
import pytest
from opencadd.structure.pocket import PocketBase
class TestPocketBase:
"""
Test PocketBase class methods.
"""
@pytest.mark.parametrize(
"residue_ids, residue_ixs, residue_ids_formatted, residue_ixs_formatted",
[
([1, 2, 3], None, [1, 2, 3], [None, None, None]),
(["1", "2", "_", "_"], ["1", "2", "3", "4"], [1, 2, None, None], [1, 2, 3, 4]),
(["1", "2", None, None], ["1", "2", "3", "4"], [1, 2, None, None], [1, 2, 3, 4]),
],
)
def test_format_residue_ids_and_ixs(
self, residue_ids, residue_ixs, residue_ids_formatted, residue_ixs_formatted
):
"""
Test formatting of user-input residue PDB IDs and residue indices.
"""
base_pocket = PocketBase()
residue_ids2, residue_ixs2 = base_pocket._format_residue_ids_and_ixs(
residue_ids, residue_ixs, ""
)
assert residue_ids2 == residue_ids_formatted
assert residue_ixs2 == residue_ixs_formatted
@pytest.mark.parametrize(
"residue_ids, residue_ixs",
[
([1, 2, 3], [None, 2, 3]), # Non-int-castable index (None)
([1, 2, 3], ["a", 2, 3]), # Non-int-castable index
([1, 1, 2], None), # Duplicated PDB IDs
([1, 2, 3], [1, 1, 2]), # Duplicated indices
],
)
def test_format_residue_ids_and_ixs_raises(self, residue_ids, residue_ixs):
"""
Test error handling when formatting user-input residue PDB IDs and
residue indices.
"""
with pytest.raises((ValueError, TypeError)):
base_pocket = PocketBase()
base_pocket._format_residue_ids_and_ixs(residue_ids, residue_ixs, "")
@pytest.mark.parametrize(
"residue_ids, residue_ixs, n_residues",
[
([101, None], [1, 2], 2),
([101, None], [1, 2], 2),
([101, None], [None, None], 2),
],
)
def test_residues(self, residue_ids, residue_ixs, n_residues):
"""
Test property residues.
"""
base_pocket = PocketBase()
base_pocket._residue_ids = residue_ids
base_pocket._residue_ixs = residue_ixs
assert base_pocket.residues.columns.to_list() == ["residue.id", "residue.ix"]
assert (
base_pocket.residues.index.to_list()
== base_pocket.residues.reset_index().index.to_list()
)
assert base_pocket.residues.dtypes.to_list() == ["Int32", "Int32"]
assert len(base_pocket.residues) == n_residues
@pytest.mark.parametrize(
"residue_ids, residue_ixs, residue_id, residue_ix",
[
([101, None], [1, 2], 101, 1), # Residue ID+index exist
([101, None], [1, 2], 102, None), # Residue ID does not exist
([101, None], [None, None], 101, None), # Residue ID maps to None
],
)
def test_residue_id2ix(self, residue_ids, residue_ixs, residue_id, residue_ix):
"""
Test residue PDB ID to index mapping.
"""
base_pocket = PocketBase()
base_pocket._residue_ids = residue_ids
base_pocket._residue_ixs = residue_ixs
assert base_pocket._residue_id2ix(residue_id) == residue_ix
@pytest.mark.parametrize(
"residue_ids, residue_ixs, residue_ix, residue_id",
[
([101, None], [1, 2], 1, 101), # Residue index+ID exist
([101, None], [1, 2], 2, None), # Residue index maps to None
([101, 102], [1, 2], 10, None), # Residue index does not exist
],
)
def test_residue_ix2id(self, residue_ids, residue_ixs, residue_ix, residue_id):
"""
Test residue index to PDB ID mapping.
"""
base_pocket = PocketBase()
base_pocket._residue_ids = residue_ids
base_pocket._residue_ixs = residue_ixs
assert base_pocket._residue_ix2id(residue_ix) == residue_id | opencadd/tests/structure/test_pocket_base.py | import pytest
from opencadd.structure.pocket import PocketBase
class TestPocketBase:
"""
Test PocketBase class methods.
"""
@pytest.mark.parametrize(
"residue_ids, residue_ixs, residue_ids_formatted, residue_ixs_formatted",
[
([1, 2, 3], None, [1, 2, 3], [None, None, None]),
(["1", "2", "_", "_"], ["1", "2", "3", "4"], [1, 2, None, None], [1, 2, 3, 4]),
(["1", "2", None, None], ["1", "2", "3", "4"], [1, 2, None, None], [1, 2, 3, 4]),
],
)
def test_format_residue_ids_and_ixs(
self, residue_ids, residue_ixs, residue_ids_formatted, residue_ixs_formatted
):
"""
Test formatting of user-input residue PDB IDs and residue indices.
"""
base_pocket = PocketBase()
residue_ids2, residue_ixs2 = base_pocket._format_residue_ids_and_ixs(
residue_ids, residue_ixs, ""
)
assert residue_ids2 == residue_ids_formatted
assert residue_ixs2 == residue_ixs_formatted
@pytest.mark.parametrize(
"residue_ids, residue_ixs",
[
([1, 2, 3], [None, 2, 3]), # Non-int-castable index (None)
([1, 2, 3], ["a", 2, 3]), # Non-int-castable index
([1, 1, 2], None), # Duplicated PDB IDs
([1, 2, 3], [1, 1, 2]), # Duplicated indices
],
)
def test_format_residue_ids_and_ixs_raises(self, residue_ids, residue_ixs):
"""
Test error handling when formatting user-input residue PDB IDs and
residue indices.
"""
with pytest.raises((ValueError, TypeError)):
base_pocket = PocketBase()
base_pocket._format_residue_ids_and_ixs(residue_ids, residue_ixs, "")
@pytest.mark.parametrize(
"residue_ids, residue_ixs, n_residues",
[
([101, None], [1, 2], 2),
([101, None], [1, 2], 2),
([101, None], [None, None], 2),
],
)
def test_residues(self, residue_ids, residue_ixs, n_residues):
"""
Test property residues.
"""
base_pocket = PocketBase()
base_pocket._residue_ids = residue_ids
base_pocket._residue_ixs = residue_ixs
assert base_pocket.residues.columns.to_list() == ["residue.id", "residue.ix"]
assert (
base_pocket.residues.index.to_list()
== base_pocket.residues.reset_index().index.to_list()
)
assert base_pocket.residues.dtypes.to_list() == ["Int32", "Int32"]
assert len(base_pocket.residues) == n_residues
@pytest.mark.parametrize(
"residue_ids, residue_ixs, residue_id, residue_ix",
[
([101, None], [1, 2], 101, 1), # Residue ID+index exist
([101, None], [1, 2], 102, None), # Residue ID does not exist
([101, None], [None, None], 101, None), # Residue ID maps to None
],
)
def test_residue_id2ix(self, residue_ids, residue_ixs, residue_id, residue_ix):
"""
Test residue PDB ID to index mapping.
"""
base_pocket = PocketBase()
base_pocket._residue_ids = residue_ids
base_pocket._residue_ixs = residue_ixs
assert base_pocket._residue_id2ix(residue_id) == residue_ix
@pytest.mark.parametrize(
"residue_ids, residue_ixs, residue_ix, residue_id",
[
([101, None], [1, 2], 1, 101), # Residue index+ID exist
([101, None], [1, 2], 2, None), # Residue index maps to None
([101, 102], [1, 2], 10, None), # Residue index does not exist
],
)
def test_residue_ix2id(self, residue_ids, residue_ixs, residue_ix, residue_id):
"""
Test residue index to PDB ID mapping.
"""
base_pocket = PocketBase()
base_pocket._residue_ids = residue_ids
base_pocket._residue_ixs = residue_ixs
assert base_pocket._residue_ix2id(residue_ix) == residue_id | 0.61659 | 0.634741 |
import tensorflow as tf
pi = 3.141592653589793
U = 32768.0
tfand = tf.logical_and
class TutorialBotOutput:
def __init__(self, batch_size):
self.batch_size = batch_size
global zero,zeros3
zero = tf.zeros(self.batch_size, tf.float32)
zeros3 = [zero,zero,zero]
def get_output_vector_model(self, state_object):
steer = pitch = yaw = roll = throttle = boost = jump = powerslide = zero
player, ball = state_object.gamecars[0], state_object.gameball
pL,pV,pR = a3(player.Location), a3(player.Velocity), a3(player.Rotation)
paV,pB = a3(player.AngularVelocity), tf.cast(player.Boost,tf.float32)
bL,bR,bV = a3(ball.Location), a3(ball.Rotation), a3(ball.Velocity)
pxv,pyv,pzv = local(pV,zeros3,pR)
pvd,pva,pvi = spherical(pxv,pyv,pzv)
iv,rv,av = local(paV,zeros3,pR)
tx,ty,tz = local(bL,pL,pR)
txv,tyv,tzv = local(bV,zeros3,pR)
xv,yv,zv = pxv-txv, pyv-tyv, pzv-tzv
dT = (.5*tf.abs(ty) + .9*tf.abs(tx) + .34*tf.abs(tz))/1500.0
tL = predict_ball(bL,bV,dT)
x,y,z = local(tL,pL,pR)
d,a,i = spherical(x,y,z)
r = pR[2]/U
# controlls
throttle = regress((y-yv*.23)/900.0)
steer = regress(a-av/45.0)
yaw = regress(a-av/13.0)
pitch = regress(-i-iv/15.0)
roll = regress(-r+rv/22.0)
jump = tf.cast( tfand(120<tz, tfand(tz<400 , tfand( tz%250>140, tfand(d<1800,
tf.abs(a-pva)<.15) ) ) ), tf.float32)
boost = tf.cast( tfand( tf.abs(a)<.15, tfand( throttle>.5, tf.abs(i)<.25 )), tf.float32)
powerslide = tf.cast( tfand( throttle*pyv>0.0, tfand( .2<tf.abs(a-av/35.0),
tfand( tf.abs(a-av/35.0)<.8, xv>500.0 ) ) ), tf.float32)
output = [throttle, steer, pitch, yaw, roll, jump, boost, powerslide]
return output
def a3(V):
try : a = tf.stack([V.X,V.Y,V.Z])
except :
try :a = tf.stack([V.Pitch,V.Yaw,V.Roll])
except : a = tf.stack([V[0],V[1],V[2]])
return tf.cast(a,tf.float32)
def Range180(value,pi):
value = value - tf.abs(value)//(2.0*pi) * (2.0*pi) * tf.sign(value)
value = value - tf.cast(tf.greater( tf.abs(value), pi),tf.float32) * (2.0*pi) * tf.sign(value)
return value
def rotate2D(x,y,ang):
x2 = x*tf.cos(ang) - y*tf.sin(ang)
y2 = y*tf.cos(ang) + x*tf.sin(ang)
return x2,y2
def local(tL,oL,oR,Urot=True):
L = tL-oL
if Urot :
pitch = oR[0]*pi/U
yaw = Range180(oR[1]-U/2,U)*pi/U
roll = oR[2]*pi/U
R = -tf.stack([pitch,yaw,roll])
else :
R = -oR
x,y = rotate2D(L[0],L[1],R[1])
y,z = rotate2D(y,L[2],R[0])
x,z = rotate2D(x,z,R[2])
return x,y,z
def spherical(x,y,z):
d = tf.sqrt(x*x+y*y+z*z)
try : i = tf.acos(z/d)
except: i=0
a = tf.atan2(y,x)
return d, Range180(a-pi/2,pi)/pi, Range180(i-pi/2,pi)/pi
def d3(A,B=[0,0,0]):
A,B = a3(A),a3(B)
return tf.sqrt((A[0]-B[0])**2+(A[1]-B[1])**2+(A[2]-B[2])**2)
def regress(a):
cond1 = tf.cast(abs(a)> .1, tf.float32)
result = cond1*tf.sign(a) + (1-cond1)*10*a
return result
def predict_ball(L0,V0,dt):
r = 0.03
g = a3([zero,zero,zero-650.0])
A = g -r*V0
nL = L0 + V0*dt + .5*A*dt**2
return nL | bot_code/models/fake_models/TutorialBot/atba2_demo_output_reg.py | import tensorflow as tf
pi = 3.141592653589793
U = 32768.0
tfand = tf.logical_and
class TutorialBotOutput:
def __init__(self, batch_size):
self.batch_size = batch_size
global zero,zeros3
zero = tf.zeros(self.batch_size, tf.float32)
zeros3 = [zero,zero,zero]
def get_output_vector_model(self, state_object):
steer = pitch = yaw = roll = throttle = boost = jump = powerslide = zero
player, ball = state_object.gamecars[0], state_object.gameball
pL,pV,pR = a3(player.Location), a3(player.Velocity), a3(player.Rotation)
paV,pB = a3(player.AngularVelocity), tf.cast(player.Boost,tf.float32)
bL,bR,bV = a3(ball.Location), a3(ball.Rotation), a3(ball.Velocity)
pxv,pyv,pzv = local(pV,zeros3,pR)
pvd,pva,pvi = spherical(pxv,pyv,pzv)
iv,rv,av = local(paV,zeros3,pR)
tx,ty,tz = local(bL,pL,pR)
txv,tyv,tzv = local(bV,zeros3,pR)
xv,yv,zv = pxv-txv, pyv-tyv, pzv-tzv
dT = (.5*tf.abs(ty) + .9*tf.abs(tx) + .34*tf.abs(tz))/1500.0
tL = predict_ball(bL,bV,dT)
x,y,z = local(tL,pL,pR)
d,a,i = spherical(x,y,z)
r = pR[2]/U
# controlls
throttle = regress((y-yv*.23)/900.0)
steer = regress(a-av/45.0)
yaw = regress(a-av/13.0)
pitch = regress(-i-iv/15.0)
roll = regress(-r+rv/22.0)
jump = tf.cast( tfand(120<tz, tfand(tz<400 , tfand( tz%250>140, tfand(d<1800,
tf.abs(a-pva)<.15) ) ) ), tf.float32)
boost = tf.cast( tfand( tf.abs(a)<.15, tfand( throttle>.5, tf.abs(i)<.25 )), tf.float32)
powerslide = tf.cast( tfand( throttle*pyv>0.0, tfand( .2<tf.abs(a-av/35.0),
tfand( tf.abs(a-av/35.0)<.8, xv>500.0 ) ) ), tf.float32)
output = [throttle, steer, pitch, yaw, roll, jump, boost, powerslide]
return output
def a3(V):
try : a = tf.stack([V.X,V.Y,V.Z])
except :
try :a = tf.stack([V.Pitch,V.Yaw,V.Roll])
except : a = tf.stack([V[0],V[1],V[2]])
return tf.cast(a,tf.float32)
def Range180(value,pi):
value = value - tf.abs(value)//(2.0*pi) * (2.0*pi) * tf.sign(value)
value = value - tf.cast(tf.greater( tf.abs(value), pi),tf.float32) * (2.0*pi) * tf.sign(value)
return value
def rotate2D(x,y,ang):
x2 = x*tf.cos(ang) - y*tf.sin(ang)
y2 = y*tf.cos(ang) + x*tf.sin(ang)
return x2,y2
def local(tL,oL,oR,Urot=True):
L = tL-oL
if Urot :
pitch = oR[0]*pi/U
yaw = Range180(oR[1]-U/2,U)*pi/U
roll = oR[2]*pi/U
R = -tf.stack([pitch,yaw,roll])
else :
R = -oR
x,y = rotate2D(L[0],L[1],R[1])
y,z = rotate2D(y,L[2],R[0])
x,z = rotate2D(x,z,R[2])
return x,y,z
def spherical(x,y,z):
d = tf.sqrt(x*x+y*y+z*z)
try : i = tf.acos(z/d)
except: i=0
a = tf.atan2(y,x)
return d, Range180(a-pi/2,pi)/pi, Range180(i-pi/2,pi)/pi
def d3(A,B=[0,0,0]):
A,B = a3(A),a3(B)
return tf.sqrt((A[0]-B[0])**2+(A[1]-B[1])**2+(A[2]-B[2])**2)
def regress(a):
cond1 = tf.cast(abs(a)> .1, tf.float32)
result = cond1*tf.sign(a) + (1-cond1)*10*a
return result
def predict_ball(L0,V0,dt):
r = 0.03
g = a3([zero,zero,zero-650.0])
A = g -r*V0
nL = L0 + V0*dt + .5*A*dt**2
return nL | 0.69285 | 0.577674 |
import pygame
pygame.display.init()
pygame.mixer.init()
pygame.font.init()
pygame.mixer.music.load('pong_sound.mp3')
font = pygame.font.SysFont('optimattc', 25, 0, 0)
class Game:
width = 640
height = 400
running = True
movementY = 6
movementX = 6
white = (255, 255, 255, 255)
playerWidth = 10
playerHeight = 60
y = 0
ballX, ballY = 0, 0
movementY = 6
movementX = 6
scoreP1, scoreP2 = 0, 0
ballRadius = 4
playerMove = 10
def __init__(self, pygame, font):
self.pygame = pygame
self.font = font
def init(self):
screen = pygame.display.set_mode((self.width, self.height))
self.run(self.pygame, screen)
def reset(self):
self.startPositions()
self.movementY = 6
self.movementX = 6
def startPositions(self):
self.y = int((self.height - self.playerHeight) / 2)
self.ballX, self.ballY = int(self.width / 2), int(self.height / 2)
def changeMovement(self, playerY, ballY):
hit = playerY - ballY
if hit < 0 and hit > -25:
self.movementY = int(self.movementY * 1.5)
if self.movementY > 0:
self.movementY *= -1
elif hit < -35 and hit > -60:
self.movementY = int(self.movementY * 1.5)
if self.movementY < 0:
self.movementY *= -1
else:
if self.movementY > 0:
self.movementY = 6
else:
self.movementY = -6
def checkCollision(self, ballX, ballY):
if ballY + self.ballRadius >= self.height:
self.movementY *= -1
pygame.mixer.music.play()
return True
elif ballY - self.ballRadius <= 0:
self.movementY *= -1
pygame.mixer.music.play()
return True
elif ballX < 20 and (ballY <= self.y + self.playerHeight and ballY >= self.y):
self.changeMovement(self.y, ballY)
self.movementX *= -1
pygame.mixer.music.play()
return True
elif ballX > self.width - 20 and (ballY <= ballY + self.playerHeight and ballY >= ballY):
self.changeMovement(ballY, ballY)
self.movementX *= -1
pygame.mixer.music.play()
return True
elif ballX < 10:
self.scoreP2 += 1
self.reset() # player2 won
return False
elif ballX > self.width - 10:
self.scoreP1 += 1
self.reset() # player1 won
return False
else:
return True
def run(self, pygame, screen):
self.startPositions()
while self.running:
screen.fill((0, 0, 0, 255))
textP1 = self.font.render(f'{self.scoreP1}', True, self.white)
textP2 = self.font.render(f'{self.scoreP2}', True, self.white)
screen.blit(textP1, (self.width / 2 - 40, 10))
screen.blit(textP2, (self.width / 2 + 30, 10))
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.running = False
# movement
if pygame.key.get_pressed()[pygame.K_DOWN]:
self.y += self.playerMove
if pygame.key.get_pressed()[pygame.K_UP]:
self.y -= self.playerMove
if self.movementX > 0 and self.movementY > 0:
if self.checkCollision(self.ballX + self.movementX + self.ballRadius, self.ballY + self.movementY + self.ballRadius):
pass
elif self.movementX > 0 and self.movementY < 0:
if self.checkCollision(self.ballX + self.movementX + self.ballRadius, self.ballY + self.movementY - self.ballRadius):
pass
elif self.movementX < 0 and self.movementY > 0:
if self.checkCollision(self.ballX + self.movementX - self.ballRadius, self.ballY + self.movementY + self.ballRadius):
pass
else:
if self.checkCollision(self.ballX + self.movementX - self.ballRadius, self.ballY + self.movementY - self.ballRadius):
pass
self.ballX += self.movementX
self.ballY += self.movementY
#player1
pygame.draw.rect(screen, self.white, (10, self.y, self.playerWidth, self.playerHeight))
#player2
pygame.draw.rect(screen, self.white, (self.width - self.playerWidth - 10, self.ballY - 30, self.playerWidth, self.playerHeight))
#ball
pygame.draw.circle(screen, self.white, (self.ballX, self.ballY), 8)
#line
pygame.draw.line(screen, self.white, (self.width / 2, 0), (self.width / 2, self.height), 1)
pygame.display.flip()
pygame.display.quit()
game = Game(pygame, font)
game.init() | pong.py | import pygame
pygame.display.init()
pygame.mixer.init()
pygame.font.init()
pygame.mixer.music.load('pong_sound.mp3')
font = pygame.font.SysFont('optimattc', 25, 0, 0)
class Game:
width = 640
height = 400
running = True
movementY = 6
movementX = 6
white = (255, 255, 255, 255)
playerWidth = 10
playerHeight = 60
y = 0
ballX, ballY = 0, 0
movementY = 6
movementX = 6
scoreP1, scoreP2 = 0, 0
ballRadius = 4
playerMove = 10
def __init__(self, pygame, font):
self.pygame = pygame
self.font = font
def init(self):
screen = pygame.display.set_mode((self.width, self.height))
self.run(self.pygame, screen)
def reset(self):
self.startPositions()
self.movementY = 6
self.movementX = 6
def startPositions(self):
self.y = int((self.height - self.playerHeight) / 2)
self.ballX, self.ballY = int(self.width / 2), int(self.height / 2)
def changeMovement(self, playerY, ballY):
hit = playerY - ballY
if hit < 0 and hit > -25:
self.movementY = int(self.movementY * 1.5)
if self.movementY > 0:
self.movementY *= -1
elif hit < -35 and hit > -60:
self.movementY = int(self.movementY * 1.5)
if self.movementY < 0:
self.movementY *= -1
else:
if self.movementY > 0:
self.movementY = 6
else:
self.movementY = -6
def checkCollision(self, ballX, ballY):
if ballY + self.ballRadius >= self.height:
self.movementY *= -1
pygame.mixer.music.play()
return True
elif ballY - self.ballRadius <= 0:
self.movementY *= -1
pygame.mixer.music.play()
return True
elif ballX < 20 and (ballY <= self.y + self.playerHeight and ballY >= self.y):
self.changeMovement(self.y, ballY)
self.movementX *= -1
pygame.mixer.music.play()
return True
elif ballX > self.width - 20 and (ballY <= ballY + self.playerHeight and ballY >= ballY):
self.changeMovement(ballY, ballY)
self.movementX *= -1
pygame.mixer.music.play()
return True
elif ballX < 10:
self.scoreP2 += 1
self.reset() # player2 won
return False
elif ballX > self.width - 10:
self.scoreP1 += 1
self.reset() # player1 won
return False
else:
return True
def run(self, pygame, screen):
self.startPositions()
while self.running:
screen.fill((0, 0, 0, 255))
textP1 = self.font.render(f'{self.scoreP1}', True, self.white)
textP2 = self.font.render(f'{self.scoreP2}', True, self.white)
screen.blit(textP1, (self.width / 2 - 40, 10))
screen.blit(textP2, (self.width / 2 + 30, 10))
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.running = False
# movement
if pygame.key.get_pressed()[pygame.K_DOWN]:
self.y += self.playerMove
if pygame.key.get_pressed()[pygame.K_UP]:
self.y -= self.playerMove
if self.movementX > 0 and self.movementY > 0:
if self.checkCollision(self.ballX + self.movementX + self.ballRadius, self.ballY + self.movementY + self.ballRadius):
pass
elif self.movementX > 0 and self.movementY < 0:
if self.checkCollision(self.ballX + self.movementX + self.ballRadius, self.ballY + self.movementY - self.ballRadius):
pass
elif self.movementX < 0 and self.movementY > 0:
if self.checkCollision(self.ballX + self.movementX - self.ballRadius, self.ballY + self.movementY + self.ballRadius):
pass
else:
if self.checkCollision(self.ballX + self.movementX - self.ballRadius, self.ballY + self.movementY - self.ballRadius):
pass
self.ballX += self.movementX
self.ballY += self.movementY
#player1
pygame.draw.rect(screen, self.white, (10, self.y, self.playerWidth, self.playerHeight))
#player2
pygame.draw.rect(screen, self.white, (self.width - self.playerWidth - 10, self.ballY - 30, self.playerWidth, self.playerHeight))
#ball
pygame.draw.circle(screen, self.white, (self.ballX, self.ballY), 8)
#line
pygame.draw.line(screen, self.white, (self.width / 2, 0), (self.width / 2, self.height), 1)
pygame.display.flip()
pygame.display.quit()
game = Game(pygame, font)
game.init() | 0.213295 | 0.192255 |
import testtools
from os_apply_config import config_exception
from os_apply_config import value_types
class ValueTypeTestCase(testtools.TestCase):
def test_unknown_type(self):
self.assertRaises(
ValueError, value_types.ensure_type, "foo", "badtype")
def test_int(self):
self.assertEqual("123", value_types.ensure_type("123", "int"))
def test_default(self):
self.assertEqual("foobar",
value_types.ensure_type("foobar", "default"))
self.assertEqual("x86_64",
value_types.ensure_type("x86_64", "default"))
def test_default_bad(self):
self.assertRaises(config_exception.ConfigException,
value_types.ensure_type, "foo\nbar", "default")
def test_default_empty(self):
self.assertEqual('',
value_types.ensure_type('', 'default'))
def test_raw_empty(self):
self.assertEqual('',
value_types.ensure_type('', 'raw'))
def test_net_address_ipv4(self):
self.assertEqual('192.0.2.1', value_types.ensure_type('192.0.2.1',
'netaddress'))
def test_net_address_cidr(self):
self.assertEqual('192.0.2.0/24',
value_types.ensure_type('192.0.2.0/24', 'netaddress'))
def test_ent_address_ipv6(self):
self.assertEqual('::', value_types.ensure_type('::', 'netaddress'))
self.assertEqual('2001:db8::2:1', value_types.ensure_type(
'2001:db8::2:1', 'netaddress'))
def test_net_address_dns(self):
self.assertEqual('host.0domain-name.test',
value_types.ensure_type('host.0domain-name.test',
'netaddress'))
def test_net_address_empty(self):
self.assertEqual('', value_types.ensure_type('', 'netaddress'))
def test_net_address_bad(self):
self.assertRaises(config_exception.ConfigException,
value_types.ensure_type, "192.0.2.1;DROP TABLE foo",
'netaddress')
def test_netdevice(self):
self.assertEqual('eth0',
value_types.ensure_type('eth0', 'netdevice'))
def test_netdevice_dash(self):
self.assertEqual('br-ctlplane',
value_types.ensure_type('br-ctlplane', 'netdevice'))
def test_netdevice_alias(self):
self.assertEqual('eth0:1',
value_types.ensure_type('eth0:1', 'netdevice'))
def test_netdevice_bad(self):
self.assertRaises(config_exception.ConfigException,
value_types.ensure_type, "br-tun; DROP TABLE bar",
'netdevice')
def test_dsn_nopass(self):
test_dsn = 'mysql://user@host/db'
self.assertEqual(test_dsn, value_types.ensure_type(test_dsn, 'dsn'))
def test_dsn(self):
test_dsn = 'mysql://user:pass@host/db'
self.assertEqual(test_dsn, value_types.ensure_type(test_dsn, 'dsn'))
def test_dsn_set_variables(self):
test_dsn = 'mysql://user:pass@host/db?charset=utf8'
self.assertEqual(test_dsn, value_types.ensure_type(test_dsn, 'dsn'))
def test_dsn_sqlite_memory(self):
test_dsn = 'sqlite://'
self.assertEqual(test_dsn, value_types.ensure_type(test_dsn, 'dsn'))
def test_dsn_sqlite_file(self):
test_dsn = 'sqlite:///tmp/foo.db'
self.assertEqual(test_dsn, value_types.ensure_type(test_dsn, 'dsn'))
def test_dsn_bad(self):
self.assertRaises(config_exception.ConfigException,
value_types.ensure_type,
"mysql:/user:pass@host/db?charset=utf8", 'dsn')
self.assertRaises(config_exception.ConfigException,
value_types.ensure_type,
"mysql://user:pass@host/db?charset=utf8;DROP TABLE "
"foo", 'dsn')
def test_swiftdevices_single(self):
test_swiftdevices = 'r1z1-127.0.0.1:%PORT%/d1'
self.assertEqual(test_swiftdevices, value_types.ensure_type(
test_swiftdevices,
'swiftdevices'))
def test_swiftdevices_multi(self):
test_swiftdevices = 'r1z1-127.0.0.1:%PORT%/d1,r1z1-127.0.0.1:%PORT%/d2'
self.assertEqual(test_swiftdevices, value_types.ensure_type(
test_swiftdevices,
'swiftdevices'))
def test_swiftdevices_blank(self):
test_swiftdevices = ''
self.assertRaises(config_exception.ConfigException,
value_types.ensure_type,
test_swiftdevices,
'swiftdevices')
def test_swiftdevices_bad(self):
test_swiftdevices = 'rz1-127.0.0.1:%PORT%/d1'
self.assertRaises(config_exception.ConfigException,
value_types.ensure_type,
test_swiftdevices,
'swiftdevices')
def test_username(self):
for test_username in ['guest', 'guest_13-42']:
self.assertEqual(test_username, value_types.ensure_type(
test_username,
'username'))
def test_username_bad(self):
for test_username in ['guest`ls`', 'guest$PASSWD', 'guest 2']:
self.assertRaises(config_exception.ConfigException,
value_types.ensure_type,
test_username,
'username') | os_apply_config/tests/test_value_type.py |
import testtools
from os_apply_config import config_exception
from os_apply_config import value_types
class ValueTypeTestCase(testtools.TestCase):
def test_unknown_type(self):
self.assertRaises(
ValueError, value_types.ensure_type, "foo", "badtype")
def test_int(self):
self.assertEqual("123", value_types.ensure_type("123", "int"))
def test_default(self):
self.assertEqual("foobar",
value_types.ensure_type("foobar", "default"))
self.assertEqual("x86_64",
value_types.ensure_type("x86_64", "default"))
def test_default_bad(self):
self.assertRaises(config_exception.ConfigException,
value_types.ensure_type, "foo\nbar", "default")
def test_default_empty(self):
self.assertEqual('',
value_types.ensure_type('', 'default'))
def test_raw_empty(self):
self.assertEqual('',
value_types.ensure_type('', 'raw'))
def test_net_address_ipv4(self):
self.assertEqual('192.0.2.1', value_types.ensure_type('192.0.2.1',
'netaddress'))
def test_net_address_cidr(self):
self.assertEqual('192.0.2.0/24',
value_types.ensure_type('192.0.2.0/24', 'netaddress'))
def test_ent_address_ipv6(self):
self.assertEqual('::', value_types.ensure_type('::', 'netaddress'))
self.assertEqual('2001:db8::2:1', value_types.ensure_type(
'2001:db8::2:1', 'netaddress'))
def test_net_address_dns(self):
self.assertEqual('host.0domain-name.test',
value_types.ensure_type('host.0domain-name.test',
'netaddress'))
def test_net_address_empty(self):
self.assertEqual('', value_types.ensure_type('', 'netaddress'))
def test_net_address_bad(self):
self.assertRaises(config_exception.ConfigException,
value_types.ensure_type, "192.0.2.1;DROP TABLE foo",
'netaddress')
def test_netdevice(self):
self.assertEqual('eth0',
value_types.ensure_type('eth0', 'netdevice'))
def test_netdevice_dash(self):
self.assertEqual('br-ctlplane',
value_types.ensure_type('br-ctlplane', 'netdevice'))
def test_netdevice_alias(self):
self.assertEqual('eth0:1',
value_types.ensure_type('eth0:1', 'netdevice'))
def test_netdevice_bad(self):
self.assertRaises(config_exception.ConfigException,
value_types.ensure_type, "br-tun; DROP TABLE bar",
'netdevice')
def test_dsn_nopass(self):
test_dsn = 'mysql://user@host/db'
self.assertEqual(test_dsn, value_types.ensure_type(test_dsn, 'dsn'))
def test_dsn(self):
test_dsn = 'mysql://user:pass@host/db'
self.assertEqual(test_dsn, value_types.ensure_type(test_dsn, 'dsn'))
def test_dsn_set_variables(self):
test_dsn = 'mysql://user:pass@host/db?charset=utf8'
self.assertEqual(test_dsn, value_types.ensure_type(test_dsn, 'dsn'))
def test_dsn_sqlite_memory(self):
test_dsn = 'sqlite://'
self.assertEqual(test_dsn, value_types.ensure_type(test_dsn, 'dsn'))
def test_dsn_sqlite_file(self):
test_dsn = 'sqlite:///tmp/foo.db'
self.assertEqual(test_dsn, value_types.ensure_type(test_dsn, 'dsn'))
def test_dsn_bad(self):
self.assertRaises(config_exception.ConfigException,
value_types.ensure_type,
"mysql:/user:pass@host/db?charset=utf8", 'dsn')
self.assertRaises(config_exception.ConfigException,
value_types.ensure_type,
"mysql://user:pass@host/db?charset=utf8;DROP TABLE "
"foo", 'dsn')
def test_swiftdevices_single(self):
test_swiftdevices = 'r1z1-127.0.0.1:%PORT%/d1'
self.assertEqual(test_swiftdevices, value_types.ensure_type(
test_swiftdevices,
'swiftdevices'))
def test_swiftdevices_multi(self):
test_swiftdevices = 'r1z1-127.0.0.1:%PORT%/d1,r1z1-127.0.0.1:%PORT%/d2'
self.assertEqual(test_swiftdevices, value_types.ensure_type(
test_swiftdevices,
'swiftdevices'))
def test_swiftdevices_blank(self):
test_swiftdevices = ''
self.assertRaises(config_exception.ConfigException,
value_types.ensure_type,
test_swiftdevices,
'swiftdevices')
def test_swiftdevices_bad(self):
test_swiftdevices = 'rz1-127.0.0.1:%PORT%/d1'
self.assertRaises(config_exception.ConfigException,
value_types.ensure_type,
test_swiftdevices,
'swiftdevices')
def test_username(self):
for test_username in ['guest', 'guest_13-42']:
self.assertEqual(test_username, value_types.ensure_type(
test_username,
'username'))
def test_username_bad(self):
for test_username in ['guest`ls`', 'guest$PASSWD', 'guest 2']:
self.assertRaises(config_exception.ConfigException,
value_types.ensure_type,
test_username,
'username') | 0.548432 | 0.440289 |
import math
import threading
import time
from bmconfigparser import BMConfigParser
from singleton import Singleton
import state
class Throttle(object):
minChunkSize = 4096
maxChunkSize = 131072
def __init__(self, limit=0):
self.limit = limit
self.speed = 0
self.chunkSize = Throttle.maxChunkSize
self.txTime = int(time.time())
self.txLen = 0
self.total = 0
self.timer = threading.Event()
self.lock = threading.RLock()
self.resetChunkSize()
def recalculate(self):
with self.lock:
now = int(time.time())
if now > self.txTime:
self.speed = self.txLen / (now - self.txTime)
self.txLen -= self.limit * (now - self.txTime)
self.txTime = now
if self.txLen < 0 or self.limit == 0:
self.txLen = 0
def wait(self, dataLen):
with self.lock:
self.txLen += dataLen
self.total += dataLen
while state.shutdown == 0:
self.recalculate()
if self.limit == 0:
break
if self.txLen < self.limit:
break
self.timer.wait(0.2)
def getSpeed(self):
self.recalculate()
return self.speed
def resetChunkSize(self):
with self.lock:
# power of two smaller or equal to speed limit
try:
self.chunkSize = int(math.pow(2, int(math.log(self.limit,2))))
except ValueError:
self.chunkSize = Throttle.maxChunkSize
# range check
if self.chunkSize < Throttle.minChunkSize:
self.chunkSize = Throttle.minChunkSize
elif self.chunkSize > Throttle.maxChunkSize:
self.chunkSize = Throttle.maxChunkSize
@Singleton
class SendThrottle(Throttle):
def __init__(self):
Throttle.__init__(self, BMConfigParser().safeGetInt('bitmessagesettings', 'maxuploadrate')*1024)
def resetLimit(self):
with self.lock:
self.limit = BMConfigParser().safeGetInt('bitmessagesettings', 'maxuploadrate')*1024
Throttle.resetChunkSize(self)
@Singleton
class ReceiveThrottle(Throttle):
def __init__(self):
Throttle.__init__(self, BMConfigParser().safeGetInt('bitmessagesettings', 'maxdownloadrate')*1024)
def resetLimit(self):
with self.lock:
self.limit = BMConfigParser().safeGetInt('bitmessagesettings', 'maxdownloadrate')*1024
Throttle.resetChunkSize(self) | src/throttle.py | import math
import threading
import time
from bmconfigparser import BMConfigParser
from singleton import Singleton
import state
class Throttle(object):
minChunkSize = 4096
maxChunkSize = 131072
def __init__(self, limit=0):
self.limit = limit
self.speed = 0
self.chunkSize = Throttle.maxChunkSize
self.txTime = int(time.time())
self.txLen = 0
self.total = 0
self.timer = threading.Event()
self.lock = threading.RLock()
self.resetChunkSize()
def recalculate(self):
with self.lock:
now = int(time.time())
if now > self.txTime:
self.speed = self.txLen / (now - self.txTime)
self.txLen -= self.limit * (now - self.txTime)
self.txTime = now
if self.txLen < 0 or self.limit == 0:
self.txLen = 0
def wait(self, dataLen):
with self.lock:
self.txLen += dataLen
self.total += dataLen
while state.shutdown == 0:
self.recalculate()
if self.limit == 0:
break
if self.txLen < self.limit:
break
self.timer.wait(0.2)
def getSpeed(self):
self.recalculate()
return self.speed
def resetChunkSize(self):
with self.lock:
# power of two smaller or equal to speed limit
try:
self.chunkSize = int(math.pow(2, int(math.log(self.limit,2))))
except ValueError:
self.chunkSize = Throttle.maxChunkSize
# range check
if self.chunkSize < Throttle.minChunkSize:
self.chunkSize = Throttle.minChunkSize
elif self.chunkSize > Throttle.maxChunkSize:
self.chunkSize = Throttle.maxChunkSize
@Singleton
class SendThrottle(Throttle):
def __init__(self):
Throttle.__init__(self, BMConfigParser().safeGetInt('bitmessagesettings', 'maxuploadrate')*1024)
def resetLimit(self):
with self.lock:
self.limit = BMConfigParser().safeGetInt('bitmessagesettings', 'maxuploadrate')*1024
Throttle.resetChunkSize(self)
@Singleton
class ReceiveThrottle(Throttle):
def __init__(self):
Throttle.__init__(self, BMConfigParser().safeGetInt('bitmessagesettings', 'maxdownloadrate')*1024)
def resetLimit(self):
with self.lock:
self.limit = BMConfigParser().safeGetInt('bitmessagesettings', 'maxdownloadrate')*1024
Throttle.resetChunkSize(self) | 0.332527 | 0.096919 |
from abc import ABC, abstractmethod
import copy
from ..PlayerStructs import *
import json
class PlayerDataTrimAlg(ABC):
def __init__(self):
pass
@abstractmethod
def trimmedList(self, pastModelIncs):
pass
# ---------------------- KNNRegression stuff ---------------------------
class AgeSortPlayerDataTrimAlg(PlayerDataTrimAlg):
def __init__(self, maxNumModelElements):
super().__init__()
self.maxNumModelElements = maxNumModelElements
def creationTimeSort(self, elem):
return elem.creationTime
def trimmedList(self, pastModelIncs):
if(len(pastModelIncs) <= self.maxNumModelElements):
return [pastModelIncs, []]
pastModelIncsSorted = sorted(pastModelIncs, key=self.creationTimeSort)
removedI = pastModelIncs.index(pastModelIncsSorted[0])
pastModelIncs.pop(removedI)
return [pastModelIncs, [removedI]]
class QualitySortPlayerDataTrimAlg(PlayerDataTrimAlg):
def __init__(self, maxNumModelElements, qualityWeights = None, accStateResidue = None):
super().__init__()
self.maxNumModelElements = maxNumModelElements
self.qualityWeights = PlayerCharacteristics(ability = 0.5, engagement = 0.5) if qualityWeights==None else qualityWeights
self.accStateResidue = False if accStateResidue == None else accStateResidue
def considerStateResidue(self, accStateResidue):
self.accStateResidue = accStateResidue
def stateTypeFilter(self, element):
return element.stateType == 0
def qSort(self, elem):
return elem.quality
def calcQuality(self, state):
total = self.qualityWeights.ability*state.characteristics.ability + self.qualityWeights.engagement*state.characteristics.engagement
return total
def trimmedList(self, pastModelIncs):
for modelInc in pastModelIncs:
if(modelInc.quality == -1):
modelInc.quality = self.calcQuality(modelInc)
if(self.accStateResidue):
modelInc.quality += modelInc.stateType
if(len(pastModelIncs) <= self.maxNumModelElements):
return [pastModelIncs, []]
pastModelIncsSorted = sorted(pastModelIncs, key=self.qSort)
removedI = pastModelIncs.index(pastModelIncsSorted[0])
pastModelIncs.pop(removedI)
return [pastModelIncs, [removedI]]
class ProximitySortPlayerDataTrimAlg(PlayerDataTrimAlg):
def __init__(self, maxNumModelElements, epsilon = None, accStateResidue = None):
super().__init__()
self.maxNumModelElements = maxNumModelElements
self.epsilon = 0.01 if epsilon == None else epsilon
self.accStateResidue = False if accStateResidue == None else accStateResidue
def considerStateResidue(self, accStateResidue):
self.accStateResidue = accStateResidue
def proximitySort(self, elem):
return elem.quality
def creationTimeSort(self, elem):
return elem.creationTime
def trimmedList(self, pastModelIncs):
if(len(pastModelIncs) <= self.maxNumModelElements):
return [pastModelIncs, []]
pastModelIncsSortedAge = sorted(pastModelIncs, key=self.creationTimeSort)
lastDataPoint = pastModelIncsSortedAge[-1]
for modelInc in pastModelIncs:
modelInc.quality = lastDataPoint.profile.sqrDistanceBetween(modelInc.profile)
if(self.accStateResidue):
modelInc.quality += modelInc.stateType
# check if there is already a close point
pastModelIncsSorted = sorted(pastModelIncs, key=self.proximitySort)
pastModelIncsSorted.remove(lastDataPoint) #remove the point to be tested
removedI = None
closestPoint = pastModelIncsSorted[0]
# print(json.dumps(closestPoint, default=lambda o: [o.__dict__["quality"],o.__dict__["stateType"],o.__dict__["creationTime"]], sort_keys=True))
if (self.accStateResidue and closestPoint.stateType == 0) or closestPoint.quality > (self.epsilon + closestPoint.stateType):
removedI = pastModelIncs.index(closestPoint)
pastModelIncs.pop(removedI)
else:
removedI = pastModelIncs.index(lastDataPoint)
pastModelIncs.pop(removedI)
return [pastModelIncs, [removedI]] | GIMMECore/AlgDefStructs/PlayerDataTrimAlg.py | from abc import ABC, abstractmethod
import copy
from ..PlayerStructs import *
import json
class PlayerDataTrimAlg(ABC):
def __init__(self):
pass
@abstractmethod
def trimmedList(self, pastModelIncs):
pass
# ---------------------- KNNRegression stuff ---------------------------
class AgeSortPlayerDataTrimAlg(PlayerDataTrimAlg):
def __init__(self, maxNumModelElements):
super().__init__()
self.maxNumModelElements = maxNumModelElements
def creationTimeSort(self, elem):
return elem.creationTime
def trimmedList(self, pastModelIncs):
if(len(pastModelIncs) <= self.maxNumModelElements):
return [pastModelIncs, []]
pastModelIncsSorted = sorted(pastModelIncs, key=self.creationTimeSort)
removedI = pastModelIncs.index(pastModelIncsSorted[0])
pastModelIncs.pop(removedI)
return [pastModelIncs, [removedI]]
class QualitySortPlayerDataTrimAlg(PlayerDataTrimAlg):
def __init__(self, maxNumModelElements, qualityWeights = None, accStateResidue = None):
super().__init__()
self.maxNumModelElements = maxNumModelElements
self.qualityWeights = PlayerCharacteristics(ability = 0.5, engagement = 0.5) if qualityWeights==None else qualityWeights
self.accStateResidue = False if accStateResidue == None else accStateResidue
def considerStateResidue(self, accStateResidue):
self.accStateResidue = accStateResidue
def stateTypeFilter(self, element):
return element.stateType == 0
def qSort(self, elem):
return elem.quality
def calcQuality(self, state):
total = self.qualityWeights.ability*state.characteristics.ability + self.qualityWeights.engagement*state.characteristics.engagement
return total
def trimmedList(self, pastModelIncs):
for modelInc in pastModelIncs:
if(modelInc.quality == -1):
modelInc.quality = self.calcQuality(modelInc)
if(self.accStateResidue):
modelInc.quality += modelInc.stateType
if(len(pastModelIncs) <= self.maxNumModelElements):
return [pastModelIncs, []]
pastModelIncsSorted = sorted(pastModelIncs, key=self.qSort)
removedI = pastModelIncs.index(pastModelIncsSorted[0])
pastModelIncs.pop(removedI)
return [pastModelIncs, [removedI]]
class ProximitySortPlayerDataTrimAlg(PlayerDataTrimAlg):
def __init__(self, maxNumModelElements, epsilon = None, accStateResidue = None):
super().__init__()
self.maxNumModelElements = maxNumModelElements
self.epsilon = 0.01 if epsilon == None else epsilon
self.accStateResidue = False if accStateResidue == None else accStateResidue
def considerStateResidue(self, accStateResidue):
self.accStateResidue = accStateResidue
def proximitySort(self, elem):
return elem.quality
def creationTimeSort(self, elem):
return elem.creationTime
def trimmedList(self, pastModelIncs):
if(len(pastModelIncs) <= self.maxNumModelElements):
return [pastModelIncs, []]
pastModelIncsSortedAge = sorted(pastModelIncs, key=self.creationTimeSort)
lastDataPoint = pastModelIncsSortedAge[-1]
for modelInc in pastModelIncs:
modelInc.quality = lastDataPoint.profile.sqrDistanceBetween(modelInc.profile)
if(self.accStateResidue):
modelInc.quality += modelInc.stateType
# check if there is already a close point
pastModelIncsSorted = sorted(pastModelIncs, key=self.proximitySort)
pastModelIncsSorted.remove(lastDataPoint) #remove the point to be tested
removedI = None
closestPoint = pastModelIncsSorted[0]
# print(json.dumps(closestPoint, default=lambda o: [o.__dict__["quality"],o.__dict__["stateType"],o.__dict__["creationTime"]], sort_keys=True))
if (self.accStateResidue and closestPoint.stateType == 0) or closestPoint.quality > (self.epsilon + closestPoint.stateType):
removedI = pastModelIncs.index(closestPoint)
pastModelIncs.pop(removedI)
else:
removedI = pastModelIncs.index(lastDataPoint)
pastModelIncs.pop(removedI)
return [pastModelIncs, [removedI]] | 0.390127 | 0.273765 |
from __future__ import unicode_literals
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Blog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, unique_for_date='posted', verbose_name='Заголовок')),
('description', models.TextField(verbose_name='Краткое содержание')),
('content', models.TextField(verbose_name='Полное содержание')),
('posted', models.DateTimeField(db_index=True, default=datetime.datetime(2020, 12, 20, 1, 36, 51, 339751), verbose_name='Опубликована')),
('image', models.FileField(default='temp.jpg', upload_to='', verbose_name='Путь к картинке')),
],
options={
'verbose_name': 'статья блога',
'verbose_name_plural': 'статьи блога',
'db_table': 'Posts',
'ordering': ['-posted'],
},
),
migrations.CreateModel(
name='Catalog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField(verbose_name='Заказ')),
('date', models.DateTimeField(db_index=True, default=datetime.datetime(2020, 12, 20, 1, 36, 51, 340751), verbose_name='Дата')),
('status', models.TextField(default='В очереди', verbose_name='Статус')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Автор')),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField(verbose_name='Комментарий')),
('date', models.DateTimeField(db_index=True, default=datetime.datetime(2020, 12, 20, 1, 36, 51, 340751), verbose_name='Дата')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Автор')),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.Blog', verbose_name='Статья')),
],
options={
'verbose_name': 'Комментарий',
'verbose_name_plural': 'Комментарий к статьям блога',
'db_table': 'Comments',
'ordering': ['-date'],
},
),
] | app/migrations/0001_initial.py | from __future__ import unicode_literals
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Blog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, unique_for_date='posted', verbose_name='Заголовок')),
('description', models.TextField(verbose_name='Краткое содержание')),
('content', models.TextField(verbose_name='Полное содержание')),
('posted', models.DateTimeField(db_index=True, default=datetime.datetime(2020, 12, 20, 1, 36, 51, 339751), verbose_name='Опубликована')),
('image', models.FileField(default='temp.jpg', upload_to='', verbose_name='Путь к картинке')),
],
options={
'verbose_name': 'статья блога',
'verbose_name_plural': 'статьи блога',
'db_table': 'Posts',
'ordering': ['-posted'],
},
),
migrations.CreateModel(
name='Catalog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField(verbose_name='Заказ')),
('date', models.DateTimeField(db_index=True, default=datetime.datetime(2020, 12, 20, 1, 36, 51, 340751), verbose_name='Дата')),
('status', models.TextField(default='В очереди', verbose_name='Статус')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Автор')),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField(verbose_name='Комментарий')),
('date', models.DateTimeField(db_index=True, default=datetime.datetime(2020, 12, 20, 1, 36, 51, 340751), verbose_name='Дата')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Автор')),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.Blog', verbose_name='Статья')),
],
options={
'verbose_name': 'Комментарий',
'verbose_name_plural': 'Комментарий к статьям блога',
'db_table': 'Comments',
'ordering': ['-date'],
},
),
] | 0.413359 | 0.130258 |
import pygame
class Settings:
""" Una clase para almacenar la configuración de celdaTP """
def __init__(self):
""" Inicializa la configuración del juego """
# Configuración de pantalla
self.screen_width = 960
self.screen_height = 540
self.screen = pygame.display.set_mode((self.screen_width,
self.screen_height))
self.icono = pygame.image.load("images/buttons/icono.png")
#Color de los textos
self.texto_naranja = (200, 70, 10)
self.texto_blanco = (255, 255, 255)
self.texto_amarillo = (160, 190, 0)
# Márgenes
self.margen_x = 80
self.margen_y = 80
self.margen_x2 = 880
self.margen_y2 = 460
"""Posiciones"""
#Botón empezar
self.empezar_x = 350
self.empezar_y = 440
self.empezar_x2 = 609
self.empezar_y2 = 483
self.empezar_xy = (self.empezar_x, self.empezar_y)
#Posicion texto flotante
self.flotante_x = 960
self.flotante_y = 30
#Posiciones textos presentación
self.posicion_y_2 = 130
self.posicion_y_3 = 155
self.posicion_y_4 = 205
self.posicion_y_5 = 230
self.posicion_y_6 = 255
self.posicion_y_7 = 305
self.posicion_y_8 = 355
self.posicion_y_9 = 380
self.posicion_y_10 = 455
self.posicion_x_flecha = 20
self.posicion_y_flecha = 412
self.posicion_y_nombre = 405
self.posicion_x_tecla_incorrecta = 80
self.posicion_y_tecla_incorrecta = 450
#Posicion texto barra info
self.barra_info_x = 960
self.barra_info_y = 390
#Posiciones ventana_carcel
self.prota_x = 705
self.prota_x2 = 820
self.prota_y = 234
self.prota_y2 = 342
self.papel_x = 105
self.papel_x2 = 170
self.papel_y = 210
self.papel_y2 = 240
self.cajon_x = 190
self.cajon_x2 = 230
self.cajon_y = 245
self.cajon_y2 = 282
self.chicle_x = 240
self.chicle_x2 = 250
self.chicle_y = 205
self.chicle_y2 = 215
self.puerta_x = 535
self.puerta_x2 = 660
self.puerta_y = 0
self.puerta_y2 = 280
self.poster_x = 842
self.poster_x2 = 913
self.poster_y = 70
self.poster_y2 = 240
#Posiciones ventana_poster
self.poster_poster_x = 370
self.poster_poster_x2 = 590
self.poster_poster_y = 85
self.poster_poster_y2 = 350
self.poster_poster2_x = 370
self.poster_poster2_x2 = 570
self.poster_poster2_y = 350
self.poster_poster2_y2 = 370
self.poster_chincheta_x = 571
self.poster_chincheta_x2 = 590
self.poster_chincheta_y = 351
self.poster_chincheta_y2 = 370
self.poster_volver_x = 23
self.poster_volver_x2 = 95
self.poster_volver_y = 355
self.poster_volver_y2 = 370 | settings.py | import pygame
class Settings:
""" Una clase para almacenar la configuración de celdaTP """
def __init__(self):
""" Inicializa la configuración del juego """
# Configuración de pantalla
self.screen_width = 960
self.screen_height = 540
self.screen = pygame.display.set_mode((self.screen_width,
self.screen_height))
self.icono = pygame.image.load("images/buttons/icono.png")
#Color de los textos
self.texto_naranja = (200, 70, 10)
self.texto_blanco = (255, 255, 255)
self.texto_amarillo = (160, 190, 0)
# Márgenes
self.margen_x = 80
self.margen_y = 80
self.margen_x2 = 880
self.margen_y2 = 460
"""Posiciones"""
#Botón empezar
self.empezar_x = 350
self.empezar_y = 440
self.empezar_x2 = 609
self.empezar_y2 = 483
self.empezar_xy = (self.empezar_x, self.empezar_y)
#Posicion texto flotante
self.flotante_x = 960
self.flotante_y = 30
#Posiciones textos presentación
self.posicion_y_2 = 130
self.posicion_y_3 = 155
self.posicion_y_4 = 205
self.posicion_y_5 = 230
self.posicion_y_6 = 255
self.posicion_y_7 = 305
self.posicion_y_8 = 355
self.posicion_y_9 = 380
self.posicion_y_10 = 455
self.posicion_x_flecha = 20
self.posicion_y_flecha = 412
self.posicion_y_nombre = 405
self.posicion_x_tecla_incorrecta = 80
self.posicion_y_tecla_incorrecta = 450
#Posicion texto barra info
self.barra_info_x = 960
self.barra_info_y = 390
#Posiciones ventana_carcel
self.prota_x = 705
self.prota_x2 = 820
self.prota_y = 234
self.prota_y2 = 342
self.papel_x = 105
self.papel_x2 = 170
self.papel_y = 210
self.papel_y2 = 240
self.cajon_x = 190
self.cajon_x2 = 230
self.cajon_y = 245
self.cajon_y2 = 282
self.chicle_x = 240
self.chicle_x2 = 250
self.chicle_y = 205
self.chicle_y2 = 215
self.puerta_x = 535
self.puerta_x2 = 660
self.puerta_y = 0
self.puerta_y2 = 280
self.poster_x = 842
self.poster_x2 = 913
self.poster_y = 70
self.poster_y2 = 240
#Posiciones ventana_poster
self.poster_poster_x = 370
self.poster_poster_x2 = 590
self.poster_poster_y = 85
self.poster_poster_y2 = 350
self.poster_poster2_x = 370
self.poster_poster2_x2 = 570
self.poster_poster2_y = 350
self.poster_poster2_y2 = 370
self.poster_chincheta_x = 571
self.poster_chincheta_x2 = 590
self.poster_chincheta_y = 351
self.poster_chincheta_y2 = 370
self.poster_volver_x = 23
self.poster_volver_x2 = 95
self.poster_volver_y = 355
self.poster_volver_y2 = 370 | 0.334155 | 0.178562 |
import json
import numpy as np
from lib.skeleton.skeleton import Skeleton
from lib.dataset.mocap_dataset import MocapDataset
from lib.camera.camera import CameraInfoPacket
h36m_skeleton = Skeleton(parents=[-1, 0, 1, 2, 3, 4, 0, 6, 7, 8, 9, 0, 11, 12, 13, 14, 12,
16, 17, 18, 19, 20, 19, 22, 12, 24, 25, 26, 27, 28, 27, 30],
joints_left=[6, 7, 8, 9, 10, 16, 17, 18, 19, 20, 21, 22, 23],
joints_right=[1, 2, 3, 4, 5, 24, 25, 26, 27, 28, 29, 30, 31])
class Human36mDataset(MocapDataset):
def __init__(self, path, camera_param, remove_static_joints=True, camera_wise_performance=False, universal=False):
super().__init__(fps=50, skeleton=h36m_skeleton)
self.universal = universal
camera_meta = json.load(open(camera_param, 'r'))
# subjects = ['S1', 'S5', 'S6', 'S7', 'S8', 'S9', 'S11']
subjects = [
'S1', 'S5', 'S6', 'S7', 'S8', 'S9', 'S11',
'S1_0.6', 'S5_0.6', 'S6_0.6', 'S7_0.6', 'S8_0.6', 'S9_0.6', 'S11_0.6',
'S1_0.7', 'S5_0.7', 'S6_0.7', 'S7_0.7', 'S8_0.7', 'S9_0.7', 'S11_0.7',
'S1_0.8', 'S5_0.8', 'S6_0.8', 'S7_0.8', 'S8_0.8', 'S9_0.8', 'S11_0.8',
'S1_0.9', 'S5_0.9', 'S6_0.9', 'S7_0.9', 'S8_0.9', 'S9_0.9', 'S11_0.9',
'S1_1.1', 'S5_1.1', 'S6_1.1', 'S7_1.1', 'S8_1.1', 'S9_1.1', 'S11_1.1'
]
if camera_wise_performance:
camera_dist = list()
for cam in camera_meta:
# camera_dist.append((cam['id'], cam['pitch'], cam['translation_scale'], cam['degree']))
camera_dist.append(cam['id'])
self.camera_dist = camera_dist
camera_info = dict()
for subject in subjects:
camera_info.setdefault(subject, list())
for cam in camera_meta:
K = np.eye(3, dtype=np.float64)
K[0, 0] = cam['focal_length'][0]
K[1, 1] = cam['focal_length'][1]
K[0, 2] = cam['center'][0]
K[1, 2] = cam['center'][1]
R = np.array(cam['R']).reshape(3, 3)
dist_coeff = np.array(
cam['radial_distortion'][:2] + cam['tangential_distortion'] + cam['radial_distortion'][2:]
).reshape((5,))
t = np.array(cam['translation'], dtype=np.float64).reshape(3, 1)
camera_info[subject].append(CameraInfoPacket(P=None, K=K, R=R, t=t,
res_w=cam['res_w'], res_h=cam['res_h'],
azimuth=cam['azimuth'], dist_coeff=dist_coeff,
undistort=False))
self.camera_info = camera_info
# Load serialized dataset
data = np.load(path, allow_pickle=True)['positions_3d'].item()
self._data = {}
for subject, actions in data.items():
self._data[subject] = {}
for action_name, positions in actions.items():
self._data[subject][action_name] = {
'positions': positions,
}
if remove_static_joints:
if self.universal:
self.remove_joints([4, 5, 9, 10, 11, 12, 13, 14, 16, 20, 21, 22, 23, 24, 28, 29, 30, 31])
else:
# Bring the skeleton to 17 joints instead of the original 32
self.remove_joints([4, 5, 9, 10, 11, 16, 20, 21, 22, 23, 24, 28, 29, 30, 31])
# Rewire shoulders to the correct parents
self._skeleton._parents[11] = 8
self._skeleton._parents[14] = 8
def supports_semi_supervised(self):
return True
@staticmethod
def remove_irrelevant_kpts(keypoints, universal=False):
"""
:param keypoints:
:return:
"""
if universal:
origin_keypoints, origin_keypoints_metadata = keypoints['positions_2d'].item(), keypoints['metadata'].item()
updated_keypoints, updated_keypoints_metadata = dict(), dict()
human36m_kpt_index = [0, 1, 2, 3, 4, 5, 6, 10, 11, 12, 13, 14, 15, 16]
updated_keypoints_metadata['layout_name'] = 'h36m'
updated_keypoints_metadata['num_joints'] = len(human36m_kpt_index)
updated_keypoints_metadata['keypoints_symmetry'] = [[4, 5, 6, 8, 9, 10], [1, 2, 3, 11, 12, 13]]
for subject in origin_keypoints.keys():
updated_keypoints.setdefault(subject, dict())
for action in origin_keypoints[subject]:
updated_keypoints[subject].setdefault(action, list())
for cam_idx, kps in enumerate(origin_keypoints[subject][action]):
updated_keypoints[subject][action].append(kps[:, human36m_kpt_index, :])
return updated_keypoints, updated_keypoints_metadata
else:
raise NotImplementedError | lib/dataset/h36m_aug_dataset.py |
import json
import numpy as np
from lib.skeleton.skeleton import Skeleton
from lib.dataset.mocap_dataset import MocapDataset
from lib.camera.camera import CameraInfoPacket
h36m_skeleton = Skeleton(parents=[-1, 0, 1, 2, 3, 4, 0, 6, 7, 8, 9, 0, 11, 12, 13, 14, 12,
16, 17, 18, 19, 20, 19, 22, 12, 24, 25, 26, 27, 28, 27, 30],
joints_left=[6, 7, 8, 9, 10, 16, 17, 18, 19, 20, 21, 22, 23],
joints_right=[1, 2, 3, 4, 5, 24, 25, 26, 27, 28, 29, 30, 31])
class Human36mDataset(MocapDataset):
def __init__(self, path, camera_param, remove_static_joints=True, camera_wise_performance=False, universal=False):
super().__init__(fps=50, skeleton=h36m_skeleton)
self.universal = universal
camera_meta = json.load(open(camera_param, 'r'))
# subjects = ['S1', 'S5', 'S6', 'S7', 'S8', 'S9', 'S11']
subjects = [
'S1', 'S5', 'S6', 'S7', 'S8', 'S9', 'S11',
'S1_0.6', 'S5_0.6', 'S6_0.6', 'S7_0.6', 'S8_0.6', 'S9_0.6', 'S11_0.6',
'S1_0.7', 'S5_0.7', 'S6_0.7', 'S7_0.7', 'S8_0.7', 'S9_0.7', 'S11_0.7',
'S1_0.8', 'S5_0.8', 'S6_0.8', 'S7_0.8', 'S8_0.8', 'S9_0.8', 'S11_0.8',
'S1_0.9', 'S5_0.9', 'S6_0.9', 'S7_0.9', 'S8_0.9', 'S9_0.9', 'S11_0.9',
'S1_1.1', 'S5_1.1', 'S6_1.1', 'S7_1.1', 'S8_1.1', 'S9_1.1', 'S11_1.1'
]
if camera_wise_performance:
camera_dist = list()
for cam in camera_meta:
# camera_dist.append((cam['id'], cam['pitch'], cam['translation_scale'], cam['degree']))
camera_dist.append(cam['id'])
self.camera_dist = camera_dist
camera_info = dict()
for subject in subjects:
camera_info.setdefault(subject, list())
for cam in camera_meta:
K = np.eye(3, dtype=np.float64)
K[0, 0] = cam['focal_length'][0]
K[1, 1] = cam['focal_length'][1]
K[0, 2] = cam['center'][0]
K[1, 2] = cam['center'][1]
R = np.array(cam['R']).reshape(3, 3)
dist_coeff = np.array(
cam['radial_distortion'][:2] + cam['tangential_distortion'] + cam['radial_distortion'][2:]
).reshape((5,))
t = np.array(cam['translation'], dtype=np.float64).reshape(3, 1)
camera_info[subject].append(CameraInfoPacket(P=None, K=K, R=R, t=t,
res_w=cam['res_w'], res_h=cam['res_h'],
azimuth=cam['azimuth'], dist_coeff=dist_coeff,
undistort=False))
self.camera_info = camera_info
# Load serialized dataset
data = np.load(path, allow_pickle=True)['positions_3d'].item()
self._data = {}
for subject, actions in data.items():
self._data[subject] = {}
for action_name, positions in actions.items():
self._data[subject][action_name] = {
'positions': positions,
}
if remove_static_joints:
if self.universal:
self.remove_joints([4, 5, 9, 10, 11, 12, 13, 14, 16, 20, 21, 22, 23, 24, 28, 29, 30, 31])
else:
# Bring the skeleton to 17 joints instead of the original 32
self.remove_joints([4, 5, 9, 10, 11, 16, 20, 21, 22, 23, 24, 28, 29, 30, 31])
# Rewire shoulders to the correct parents
self._skeleton._parents[11] = 8
self._skeleton._parents[14] = 8
def supports_semi_supervised(self):
return True
@staticmethod
def remove_irrelevant_kpts(keypoints, universal=False):
"""
:param keypoints:
:return:
"""
if universal:
origin_keypoints, origin_keypoints_metadata = keypoints['positions_2d'].item(), keypoints['metadata'].item()
updated_keypoints, updated_keypoints_metadata = dict(), dict()
human36m_kpt_index = [0, 1, 2, 3, 4, 5, 6, 10, 11, 12, 13, 14, 15, 16]
updated_keypoints_metadata['layout_name'] = 'h36m'
updated_keypoints_metadata['num_joints'] = len(human36m_kpt_index)
updated_keypoints_metadata['keypoints_symmetry'] = [[4, 5, 6, 8, 9, 10], [1, 2, 3, 11, 12, 13]]
for subject in origin_keypoints.keys():
updated_keypoints.setdefault(subject, dict())
for action in origin_keypoints[subject]:
updated_keypoints[subject].setdefault(action, list())
for cam_idx, kps in enumerate(origin_keypoints[subject][action]):
updated_keypoints[subject][action].append(kps[:, human36m_kpt_index, :])
return updated_keypoints, updated_keypoints_metadata
else:
raise NotImplementedError | 0.592667 | 0.204839 |
"""Test cases for style/* checks."""
from test.warnings_test_common import DEFINITION_TYPES
from test.warnings_test_common import FUNCTIONS_SETTING_VARS
from test.warnings_test_common import LinterFailure
from test.warnings_test_common import format_with_args
from test.warnings_test_common import format_with_command
from test.warnings_test_common import gen_source_line
from test.warnings_test_common import replacement
from test.warnings_test_common import run_linter_throw
from nose_parameterized import param, parameterized
from testtools import ExpectedException
from testtools import TestCase
class TestSpaceBeforeFunctionCallWarnings(TestCase):
"""Test case for a single space between a function call and name."""
def test_lint_pass(self):
"""Check that style/space_before_func passes.
Test passes where there is a single space before a function name
and a call, like so:
function_name ()
"""
result = run_linter_throw("function_call ()\n",
whitelist=["style/space_before_func"])
self.assertTrue(result)
def test_lint_pass_comment(self):
"""Check that style/space_before_func passes for commented calls.
Test passes where there is no space before a function name
and a call, where that line is commented like so:
# function_name()
"""
result = run_linter_throw("# function_call()\n",
whitelist=["style/space_before_func"])
self.assertTrue(result)
def test_lint_pass_inside_quotes(self):
"""Check that style/space_before_func passes for quoted calls.
Test passes where there is no space before a function name
and a call, where that line is inside quotes
"function_name()"
"""
result = run_linter_throw("call (\"function_call()\")\n",
whitelist=["style/space_before_func"])
self.assertTrue(result)
def test_lint_fail_nospace(self): # suppress(no-self-use)
"""Check that style/space_before_func fails.
Test fails where there is no space between a function name and a
call, like so:
function_name()
"""
with ExpectedException(LinterFailure):
run_linter_throw("function_call()\n",
whitelist=["style/space_before_func"])
def test_lint_fail_excessive_space(self): # suppress(no-self-use)
"""Check that style/space_before_func fails.
Test fails where there is more than one space between a function name
and a call, like so
function_name ()
"""
with ExpectedException(LinterFailure):
run_linter_throw("function_call ()\n",
whitelist=["style/space_before_func"])
def test_replace_excess_one_space(self):
"""Check that the style/space_before_func replacement has one space."""
def get_replacement():
"""Get replacement for function call with excessive whitespace."""
run_linter_throw("function_call ()\n",
whitelist=["style/space_before_func"])
exception = self.assertRaises(LinterFailure, get_replacement)
self.assertEqual(replacement(exception),
(1, "function_call ()\n"))
def test_replace_nospace_one_space(self):
"""Check that the style/space_before_func replacement has one space."""
def get_replacement():
"""Get replacement for function call with no whitespace."""
run_linter_throw("function_call()\n",
whitelist=["style/space_before_func"])
exception = self.assertRaises(LinterFailure, get_replacement)
self.assertEqual(replacement(exception),
(1, "function_call ()\n"))
class TestFunctionsMustbeLowercaseOnly(TestCase):
"""Test case for functions and macros being lowercase."""
def test_pass_lowercase_call(self):
"""style/lowercase passes when calling lowercase func."""
result = run_linter_throw("lowercase_func (ARGUMENT)\n",
whitelist=["style/lowercase_func"])
self.assertTrue(result)
def test_fail_uppercase_call(self): # suppress(no-self-use)
"""style/lowercase fails when calling uppercase func."""
with ExpectedException(LinterFailure):
run_linter_throw("UPPERCASE_FUNC (ARGUMENT)\n",
whitelist=["style/lowercase_func"])
def test_replace_uppercase_call(self):
"""style/lowercase replaces uppercase call with lowercase call."""
func_name = "UPPERCASE_FUNC"
error_line = "{0} (ARGUMENT)\n".format(func_name)
replacement_line = "{0} (ARGUMENT)\n".format(func_name.lower())
def get_replacement():
"""Replacement for all uppercase function call."""
run_linter_throw(error_line,
whitelist=["style/lowercase_func"])
exception = self.assertRaises(LinterFailure, get_replacement)
self.assertEqual(replacement(exception),
(1, replacement_line))
def test_pass_lowercase_func_def(self):
"""style/lowercase passes when defining lowercase func."""
result = run_linter_throw("function (lowercase_func) endfunction ()\n",
whitelist=["style/lowercase_func"])
self.assertTrue(result)
def test_fail_uppercase_func_def(self): # suppress(no-self-use)
"""style/lowercase fails when defining uppercase func."""
with ExpectedException(LinterFailure):
run_linter_throw("function (UPPERCASE_FUNC) endfunction ()\n",
whitelist=["style/lowercase_func"])
def test_replace_uppercase_func_def(self):
"""style/lowercase replaces uppercase call with lowercase call."""
func_name = "UPPERCASE_FUNC"
lower_name = func_name.lower()
error = "function ({0}) endfunction ()\n".format(func_name)
expected_repl = "function ({0}) endfunction ()\n".format(lower_name)
def get_replacement():
"""Replace uppercase function call."""
run_linter_throw(error,
whitelist=["style/lowercase_func"])
exception = self.assertRaises(LinterFailure, get_replacement)
self.assertEqual(replacement(exception),
(1, expected_repl))
def test_pass_lowercase_macro_def(self):
"""style/lowercase passes when defining lowercase macro."""
result = run_linter_throw("macro (lowercase_macro) endmacro ()\n",
whitelist=["style/lowercase_func"])
self.assertTrue(result)
def test_fail_uppercase_macro(self): # suppress(no-self-use)
"""style/lowercase fails when defining uppercase macro."""
with ExpectedException(LinterFailure):
run_linter_throw("macro (UPPERCASE_MACRO) endmacro ()\n",
whitelist=["style/lowercase_func"])
def test_replace_uppercase_macro(self):
"""style/lowercase replaces uppercase definition with lowercase def."""
macro_name = "UPPERCASE_MACRO"
lower_name = macro_name.lower()
error = "macro ({0}) endmacro ()\n".format(macro_name)
expected_replacement = "macro ({0}) endmacro ()\n".format(lower_name)
def get_replacement():
"""Replacement for uppercase macro."""
run_linter_throw(error,
whitelist=["style/lowercase_func"])
exception = self.assertRaises(LinterFailure, get_replacement)
self.assertEqual(replacement(exception),
(1, expected_replacement))
class TestUppercaseDefinitionArguments(TestCase):
"""Check that all arguments to a definition are uppercase."""
@parameterized.expand(DEFINITION_TYPES)
def test_pass_no_args(self, defin):
"""Check style/uppercase_args passes where function has no args."""
script = "{0} (definition_name)\nend{0} ()\n".format(defin)
self.assertTrue(run_linter_throw(script,
whitelist=["style/uppercase_args"]))
@parameterized.expand(DEFINITION_TYPES)
def test_pass_uppercase_args(self, defin):
"""Check style/uppercase_args passes where args are uppercase."""
script = "{0} (definition_name UPPERCASE)\nend{0} ()\n".format(defin)
self.assertTrue(run_linter_throw(script,
whitelist=["style/uppercase_args"]))
@parameterized.expand(DEFINITION_TYPES)
def test_fail_lowercase_args(self, defin): # suppress(no-self-use)
"""Check style/uppercase_args passes where args are lowercase."""
script = "{0} (definition_name lowercase)\nend{0} ()\n".format(defin)
with ExpectedException(LinterFailure):
run_linter_throw(script, whitelist=["style/uppercase_args"])
@parameterized.expand(DEFINITION_TYPES)
def test_replace_with_upper(self, defin):
"""Check style/uppercase_args passes where args are lowercase."""
script = "{0} (name lowercase)\nend{0} ()\n".format(defin)
def get_replacement():
"""Replacement for lowercase argument."""
run_linter_throw(script, whitelist=["style/uppercase_args"])
exception = self.assertRaises(LinterFailure, get_replacement)
self.assertEqual(replacement(exception),
(1, "{0} (name LOWERCASE)\n".format(defin)))
_FORMAT_WITH_DEREFFED_VAR = format_with_command(lambda x: "${" + x + "}")
_FORMAT_WITH_LOWERCASE_VAR = format_with_command(lambda x: x.lower())
_FORMAT_WITH_OTHER_QUOTES = format_with_command(other_xform=lambda x: ("\"" +
x +
"\""))
_FORMAT_QUOTES_AND_LOWER = format_with_command(var_xform=lambda x: x.lower(),
other_xform=lambda x: ("\"" +
x +
"\""))
class TestUppercaseVariableNamesOnly(TestCase):
"""Test case for uppercase variable names only."""
parameters = [param(m) for m in FUNCTIONS_SETTING_VARS]
@parameterized.expand(parameters, testcase_func_doc=format_with_args(0))
def test_pass_no_var_set(self, matcher):
"""Check that style/set_var_case passes with {0.cmd}.
Where no variable is actually set, then there is no linter failure
"""
# This will trip up matchers that match other arguments
result = run_linter_throw("{0} ()\n".format(matcher.cmd),
whitelist=["style/set_var_case"])
self.assertTrue(result)
@parameterized.expand(parameters,
testcase_func_doc=format_with_command())
def test_pass_no_quotes(self, matcher):
"""Check that style/set_var_case passes with {}.
Variables set by another CMake command should only be uppercase
"""
result = run_linter_throw(gen_source_line(matcher),
whitelist=["style/set_var_case"])
self.assertTrue(result)
@parameterized.expand(parameters,
testcase_func_doc=_FORMAT_WITH_DEREFFED_VAR)
def test_pass_inside_deref(self, matcher):
"""Check that style/set_var_case passes when var in deref, like {}.
Pass if variable is uppercase and inside of a deref, because variable
dereferences are not sink variables.
"""
xform = lambda x: "${" + x + "}" # suppress(E731)
result = run_linter_throw(gen_source_line(matcher,
match_transform=xform),
whitelist=["style/set_var_case"])
self.assertTrue(result)
@parameterized.expand(parameters,
testcase_func_doc=_FORMAT_WITH_OTHER_QUOTES)
def test_pass_other_quotes(self, matcher):
"""Check that style/set_var_case pass with other args quoted in {}."""
quote = "\"{0}\""
xform = lambda x: quote.format(x) # suppress(unnecessary-lambda,E731)
line = gen_source_line(matcher,
other_transform=xform)
result = run_linter_throw(line,
whitelist=["style/set_var_case"])
self.assertTrue(result)
@parameterized.expand(parameters,
testcase_func_doc=_FORMAT_WITH_LOWERCASE_VAR)
def test_fail_no_quotes(self, matcher): # suppress(no-self-use)
"""Check that style/set_var_case fails with {}, because lowercase."""
line = gen_source_line(matcher,
match_transform=lambda x: x.lower())
with ExpectedException(LinterFailure):
run_linter_throw(line,
whitelist=["style/set_var_case"])
@parameterized.expand(parameters,
testcase_func_doc=_FORMAT_QUOTES_AND_LOWER)
def test_fail_other_quotes(self, matcher): # suppress(no-self-use)
"""Check that style/set_var_case fails with other args quoted in {}."""
quote = "\"{0}\""
xform = lambda x: quote.format(x) # suppress(unnecessary-lambda,E731)
line = gen_source_line(matcher,
match_transform=lambda x: x.lower(),
other_transform=xform)
with ExpectedException(LinterFailure):
run_linter_throw(line,
whitelist=["style/set_var_case"])
@parameterized.expand(parameters,
testcase_func_doc=_FORMAT_WITH_LOWERCASE_VAR)
def test_replace_no_quotes(self, matcher):
"""Check that style/set_var_case replaces {} with uppercase var.
Replacement should have uppercase matched argument
"""
correct = gen_source_line(matcher)
incorrect = gen_source_line(matcher,
match_transform=lambda x: x.lower())
def get_replacement():
"""Replacement for lowercase variable."""
run_linter_throw(incorrect,
whitelist=["style/set_var_case"])
exception = self.assertRaises(LinterFailure, get_replacement)
self.assertEqual(replacement(exception),
(1, correct))
class TestFunctionArgumentsFallOnLine(TestCase):
"""Test alignment of function arguments."""
def test_pass_args_on_same_line(self):
"""style/argument_align passes when args on same line."""
self.assertTrue(run_linter_throw("call ($[ONE} TWO THREE \"FOUR\")\n",
whitelist=["style/argument_align"]))
def test_fail_args_unevenly_spaced(self): # suppress(no-self-use)
"""style/argument_align fails if args on same line spaced unevenly."""
with ExpectedException(LinterFailure):
run_linter_throw("call (ONE TWO)\n",
whitelist=["style/argument_align"])
def test_suggest_even_spacing(self):
"""style/argument_align suggests even spacing on the same line."""
def get_replacement():
"""Get replacement for unevenly spaced lines."""
run_linter_throw("call (ONE TWO)\n",
whitelist=["style/argument_align"])
exception = self.assertRaises(LinterFailure, get_replacement)
self.assertEqual(replacement(exception),
(1, "call (ONE TWO)\n"))
def test_fail_args_not_aligned(self): # suppress(no-self-use)
"""style/argument_align fails when args do not fall on baseline col."""
with ExpectedException(LinterFailure):
run_linter_throw("call (ONE\nTWO)\n",
whitelist=["style/argument_align"])
def test_fail_args_dispersed(self): # suppress(no-self-use)
"""style/argument_align fails if args on same line spaced unevenly."""
with ExpectedException(LinterFailure):
run_linter_throw("call (ONE\n"
" ${TWO} \"THREE\"\n"
" FOUR)\n",
whitelist=["style/argument_align"])
def test_fail_bad_kw_align(self): # suppress(no-self-use)
"""style/argument_align fails if args on same line spaced unevenly."""
with ExpectedException(LinterFailure):
run_linter_throw("call (ONE\n"
" TWO THREE\n"
" FOUR)\n",
whitelist=["style/argument_align"])
def test_fail_inconsistent_align(self): # suppress(no-self-use)
"""style/argument_align fails when args not aligned after first."""
with ExpectedException(LinterFailure):
run_linter_throw("call (${ONE} TWO\n"
" THREE)\n",
whitelist=["style/argument_align"])
# Over and under-indent
@parameterized.expand([
" THREE)\n",
" THREE)\n"
])
def test_suggest_baseline_align(self, third_line):
"""style/argument_align suggests alignment to the baseline."""
def get_replacement():
"""Get replacement for unevenly spaced lines."""
run_linter_throw("call (ONE\n"
" TWO\n" +
third_line,
whitelist=["style/argument_align"])
exception = self.assertRaises(LinterFailure, get_replacement)
self.assertEqual(replacement(exception),
# eg call (ONE
(3, (" THREE)\n")))
def test_fail_align_func_name(self): # suppress(no-self-use)
"""style/argument_align fails when args not aligned after second."""
with ExpectedException(LinterFailure):
run_linter_throw("function (ONE TWO\n"
" THREE)\n"
"endfunction ()\n",
whitelist=["style/argument_align"])
def test_fail_align_macro_name(self): # suppress(no-self-use)
"""style/argument_align fails when args not aligned after second."""
with ExpectedException(LinterFailure):
run_linter_throw("macro (name TWO\n"
" THREE)\n"
"endmacro ()\n",
whitelist=["style/argument_align"])
def test_suggest_align_first_arg(self):
"""style/argument_align suggests alignment to function's first arg."""
def get_replacement():
"""Get replacement for unevenly spaced lines."""
run_linter_throw("function (name ONE\n"
" TWO)\n"
"endfunction ()\n",
whitelist=["style/argument_align"])
exception = self.assertRaises(LinterFailure, get_replacement)
self.assertEqual(replacement(exception),
# eg, function (name ONE
(2, (" TWO)\n")))
def test_pass_args_aligend(self):
"""style/argument_align passes when args aligned."""
self.assertTrue(run_linter_throw("call (ONE\n"
" TWO)\n",
whitelist=["style/argument_align"]))
def test_pass_align_after(self):
"""style/argument_align passes when args aligned after first."""
self.assertTrue(run_linter_throw("call (ONE TWO\n"
" THREE)\n",
whitelist=["style/argument_align"]))
def test_pass_args_after_keyword(self):
"""style/argument_align passes with args after keyword arg."""
self.assertTrue(run_linter_throw("call (ONE\n"
" KEYWORD TWO\n"
" KEYWORD THREE)\n",
whitelist=["style/argument_align"]))
def test_pass_align_after_keyword(self):
"""style/argument_align passes with args after keyword arg."""
self.assertTrue(run_linter_throw("call (ONE\n"
" KEYWORD TWO\n"
" THREE)\n",
whitelist=["style/argument_align"]))
nonvariable_keywords = [
"${KEYWORD}",
"\"KEYWORD\"",
"KEYWORD/ARGUMENT\"",
"1234"
]
@parameterized.expand(nonvariable_keywords)
def test_fail_if_kw_not_var_align(self, keyword): # suppress(no-self-use)
"""style/argument_align fails when args not aligned after second."""
kw_len = len(keyword)
with ExpectedException(LinterFailure):
run_linter_throw("call (ONE\n"
" {0} ONE".format(keyword) +
" " + " " * kw_len + " TWO)",
whitelist=["style/argument_align"])
@parameterized.expand(nonvariable_keywords)
def test_fail_if_kw_not_var_after(self, keyword): # suppress(no-self-use)
"""style/argument_align fails when args not aligned after second."""
with ExpectedException(LinterFailure):
run_linter_throw("call (ONE\n"
" {0} ONE)\n".format(keyword),
whitelist=["style/argument_align"])
def test_pass_align_after_func(self):
"""style/argument_align passes when args aligned after second."""
self.assertTrue(run_linter_throw("function (name TWO\n"
" THREE)\n"
"endfunction ()\n",
whitelist=["style/argument_align"]))
def test_pass_align_after_macro(self):
"""style/argument_align passes when args aligned after second."""
self.assertTrue(run_linter_throw("macro (name TWO\n"
" THREE)\n"
"endmacro ()\n",
whitelist=["style/argument_align"]))
def test_pass_dispersed_if_cond(self):
"""style/argument_align passes when arguments to if are dispersed."""
self.assertTrue(run_linter_throw("if (CONDITION AND OTHER_COND OR\n"
" FINAL_CONDITION AND NOT COND)\n"
"endif ()",
whitelist=["unused/private_var"]))
class TestSingleQuoteUsage(TestCase):
"""Test that we are only allowed to use double quotes for strings."""
def test_pass_use_double_quotes(self):
"""Check style/doublequotes passes when strings use double quotes."""
self.assertTrue(run_linter_throw("call (\"ARGUMENT\")\n",
whitelist=["style/doublequotes"]))
def test_pass_sigle_in_double(self):
"""Check style/doublequotes passes if strings use internal single."""
self.assertTrue(run_linter_throw("call (\"\'ARGUMENT\'\")\n",
whitelist=["style/doublequotes"]))
def test_fail_use_single_quotes(self): # suppress(no-self-use)
"""Check style/doublequotes fails when strings use single quotes."""
with ExpectedException(LinterFailure):
run_linter_throw("call (\'ARGUMENT\')\n",
whitelist=["style/doublequotes"])
def test_replace_single_with_double(self):
"""Check style/doublequotes replaces single quote use with double."""
def get_replacement():
"""Replacement for single outer quotes."""
run_linter_throw("call (\'ARGUMENT\')\n",
whitelist=["style/doublequotes"])
exception = self.assertRaises(LinterFailure, get_replacement)
self.assertEqual(replacement(exception),
(1, "call (\"ARGUMENT\")\n"))
def test_replace_only_outerquotes(self):
"""Check style/doublequotes only replaces outer quotes."""
def get_replacement():
"""Replacement for single outer quote."""
run_linter_throw("call (\'ARG \\'U\\' MENT\')\n",
whitelist=["style/doublequotes"])
exception = self.assertRaises(LinterFailure, get_replacement)
self.assertEqual(replacement(exception),
(1, "call (\"ARG \\'U\\' MENT\")\n"))
HEADER_BODY_STRUCTURES = [
"function",
"macro",
"while",
"foreach",
"if"
]
class TestIndentation(TestCase):
"""Test indentation checks."""
def test_pass_no_indent_spec(self):
"""style/indent passes when no indentation is specified."""
self.assertTrue(run_linter_throw("function_call ()\n",
whitelist=["style/indent"]))
def test_pass_top_call_noindent(self):
"""style/indent passes with zero indents for toplevel calls."""
self.assertTrue(run_linter_throw("function_call ()\n",
whitelist=["style/indent"],
indent=1))
def test_pass_top_def_noindent(self):
"""style/indent passes with zero indents for toplevel definitions."""
self.assertTrue(run_linter_throw("function (f ARG)\nendfunction()\n",
whitelist=["style/indent"],
indent=1))
def test_pass_call_one_indent(self):
"""style/indent passes with one indent for nested calls."""
script = "function (f ARG)\n call (ARG)\nendfunction ()"
self.assertTrue(run_linter_throw(script,
whitelist=["style/indent"],
indent=1))
def test_pass_if_body_one_indent(self):
"""style/indent passes with one indent for if body."""
script = "if (COND)\n call (ARG)\nendif ()"
self.assertTrue(run_linter_throw(script,
whitelist=["style/indent"],
indent=1))
def test_pass_nest_if_indent(self):
"""style/indent passes with one indent for if body."""
script = "if (COND)\n if (OTHER)\n call (ARG)\n endif ()\nendif ()"
self.assertTrue(run_linter_throw(script,
whitelist=["style/indent"],
indent=1))
def test_fail_one_indent_top_call(self): # suppress(no-self-use)
"""style/indent fails with one indent for toplevel calls."""
with ExpectedException(LinterFailure):
run_linter_throw(" function_call ()\n",
whitelist=["style/indent"],
indent=1)
def test_fail_one_indent_toplevel(self): # suppress(no-self-use)
"""style/indent fails with one indent for toplevel defs."""
with ExpectedException(LinterFailure):
run_linter_throw(" function (definition ARG)\n endfunction ()",
whitelist=["style/indent"],
indent=1)
@parameterized.expand(HEADER_BODY_STRUCTURES)
def test_fail_bad_term_indent(self, structure): # suppress(no-self-use)
"""style/indent fails with one indent terminator."""
with ExpectedException(LinterFailure):
run_linter_throw("{0} ()\n end{0} ()".format(structure),
whitelist=["style/indent"],
indent=1)
@parameterized.expand([
"else",
"elseif"
]) # suppress(no-self-use)
def test_fail_mismatch_if_alt(self, alt):
"""style/indent fails when else, elseif has mismatched indent."""
with ExpectedException(LinterFailure):
script = "if (COND)\n {0} (COND)\nendif ()"
run_linter_throw(script.format(alt),
whitelist=["style/indent"],
indent=1)
def test_fail_noindent_nested_call(self): # suppress(no-self-use)
"""style/indent fails with zero indents for a nested call."""
with ExpectedException(LinterFailure):
script = "function (f ARG)\ncall (ARG)\nendfunction ()"
run_linter_throw(script, whitelist=["style/indent"], indent=1)
def test_suggest_more_indent(self):
"""style/indent suggests more indentation where required."""
script = "function (f ARG)\ncall (ARG)\nendfunction ()"
def get_replacement():
"""Replacement for lack of indent."""
run_linter_throw(script, whitelist=["style/indent"], indent=1)
exception = self.assertRaises(LinterFailure, get_replacement)
self.assertEqual(replacement(exception),
(2, " call (ARG)\n"))
def test_suggest_less_indent(self):
"""style/indent suggests less indentation where required."""
script = "function (f ARG)\n call (ARG)\n endfunction ()\n"
def get_replacement():
"""Replacement for too much indent."""
run_linter_throw(script, whitelist=["style/indent"], indent=1)
exception = self.assertRaises(LinterFailure, get_replacement)
self.assertEqual(replacement(exception),
(3, "endfunction ()\n")) | test/test_style_warnings.py | """Test cases for style/* checks."""
from test.warnings_test_common import DEFINITION_TYPES
from test.warnings_test_common import FUNCTIONS_SETTING_VARS
from test.warnings_test_common import LinterFailure
from test.warnings_test_common import format_with_args
from test.warnings_test_common import format_with_command
from test.warnings_test_common import gen_source_line
from test.warnings_test_common import replacement
from test.warnings_test_common import run_linter_throw
from nose_parameterized import param, parameterized
from testtools import ExpectedException
from testtools import TestCase
class TestSpaceBeforeFunctionCallWarnings(TestCase):
"""Test case for a single space between a function call and name."""
def test_lint_pass(self):
"""Check that style/space_before_func passes.
Test passes where there is a single space before a function name
and a call, like so:
function_name ()
"""
result = run_linter_throw("function_call ()\n",
whitelist=["style/space_before_func"])
self.assertTrue(result)
def test_lint_pass_comment(self):
"""Check that style/space_before_func passes for commented calls.
Test passes where there is no space before a function name
and a call, where that line is commented like so:
# function_name()
"""
result = run_linter_throw("# function_call()\n",
whitelist=["style/space_before_func"])
self.assertTrue(result)
def test_lint_pass_inside_quotes(self):
"""Check that style/space_before_func passes for quoted calls.
Test passes where there is no space before a function name
and a call, where that line is inside quotes
"function_name()"
"""
result = run_linter_throw("call (\"function_call()\")\n",
whitelist=["style/space_before_func"])
self.assertTrue(result)
def test_lint_fail_nospace(self): # suppress(no-self-use)
"""Check that style/space_before_func fails.
Test fails where there is no space between a function name and a
call, like so:
function_name()
"""
with ExpectedException(LinterFailure):
run_linter_throw("function_call()\n",
whitelist=["style/space_before_func"])
def test_lint_fail_excessive_space(self): # suppress(no-self-use)
"""Check that style/space_before_func fails.
Test fails where there is more than one space between a function name
and a call, like so
function_name ()
"""
with ExpectedException(LinterFailure):
run_linter_throw("function_call ()\n",
whitelist=["style/space_before_func"])
def test_replace_excess_one_space(self):
"""Check that the style/space_before_func replacement has one space."""
def get_replacement():
"""Get replacement for function call with excessive whitespace."""
run_linter_throw("function_call ()\n",
whitelist=["style/space_before_func"])
exception = self.assertRaises(LinterFailure, get_replacement)
self.assertEqual(replacement(exception),
(1, "function_call ()\n"))
def test_replace_nospace_one_space(self):
"""Check that the style/space_before_func replacement has one space."""
def get_replacement():
"""Get replacement for function call with no whitespace."""
run_linter_throw("function_call()\n",
whitelist=["style/space_before_func"])
exception = self.assertRaises(LinterFailure, get_replacement)
self.assertEqual(replacement(exception),
(1, "function_call ()\n"))
class TestFunctionsMustbeLowercaseOnly(TestCase):
"""Test case for functions and macros being lowercase."""
def test_pass_lowercase_call(self):
"""style/lowercase passes when calling lowercase func."""
result = run_linter_throw("lowercase_func (ARGUMENT)\n",
whitelist=["style/lowercase_func"])
self.assertTrue(result)
def test_fail_uppercase_call(self): # suppress(no-self-use)
"""style/lowercase fails when calling uppercase func."""
with ExpectedException(LinterFailure):
run_linter_throw("UPPERCASE_FUNC (ARGUMENT)\n",
whitelist=["style/lowercase_func"])
def test_replace_uppercase_call(self):
"""style/lowercase replaces uppercase call with lowercase call."""
func_name = "UPPERCASE_FUNC"
error_line = "{0} (ARGUMENT)\n".format(func_name)
replacement_line = "{0} (ARGUMENT)\n".format(func_name.lower())
def get_replacement():
"""Replacement for all uppercase function call."""
run_linter_throw(error_line,
whitelist=["style/lowercase_func"])
exception = self.assertRaises(LinterFailure, get_replacement)
self.assertEqual(replacement(exception),
(1, replacement_line))
def test_pass_lowercase_func_def(self):
"""style/lowercase passes when defining lowercase func."""
result = run_linter_throw("function (lowercase_func) endfunction ()\n",
whitelist=["style/lowercase_func"])
self.assertTrue(result)
def test_fail_uppercase_func_def(self): # suppress(no-self-use)
"""style/lowercase fails when defining uppercase func."""
with ExpectedException(LinterFailure):
run_linter_throw("function (UPPERCASE_FUNC) endfunction ()\n",
whitelist=["style/lowercase_func"])
def test_replace_uppercase_func_def(self):
"""style/lowercase replaces uppercase call with lowercase call."""
func_name = "UPPERCASE_FUNC"
lower_name = func_name.lower()
error = "function ({0}) endfunction ()\n".format(func_name)
expected_repl = "function ({0}) endfunction ()\n".format(lower_name)
def get_replacement():
"""Replace uppercase function call."""
run_linter_throw(error,
whitelist=["style/lowercase_func"])
exception = self.assertRaises(LinterFailure, get_replacement)
self.assertEqual(replacement(exception),
(1, expected_repl))
def test_pass_lowercase_macro_def(self):
"""style/lowercase passes when defining lowercase macro."""
result = run_linter_throw("macro (lowercase_macro) endmacro ()\n",
whitelist=["style/lowercase_func"])
self.assertTrue(result)
def test_fail_uppercase_macro(self): # suppress(no-self-use)
"""style/lowercase fails when defining uppercase macro."""
with ExpectedException(LinterFailure):
run_linter_throw("macro (UPPERCASE_MACRO) endmacro ()\n",
whitelist=["style/lowercase_func"])
def test_replace_uppercase_macro(self):
"""style/lowercase replaces uppercase definition with lowercase def."""
macro_name = "UPPERCASE_MACRO"
lower_name = macro_name.lower()
error = "macro ({0}) endmacro ()\n".format(macro_name)
expected_replacement = "macro ({0}) endmacro ()\n".format(lower_name)
def get_replacement():
"""Replacement for uppercase macro."""
run_linter_throw(error,
whitelist=["style/lowercase_func"])
exception = self.assertRaises(LinterFailure, get_replacement)
self.assertEqual(replacement(exception),
(1, expected_replacement))
class TestUppercaseDefinitionArguments(TestCase):
"""Check that all arguments to a definition are uppercase."""
@parameterized.expand(DEFINITION_TYPES)
def test_pass_no_args(self, defin):
"""Check style/uppercase_args passes where function has no args."""
script = "{0} (definition_name)\nend{0} ()\n".format(defin)
self.assertTrue(run_linter_throw(script,
whitelist=["style/uppercase_args"]))
@parameterized.expand(DEFINITION_TYPES)
def test_pass_uppercase_args(self, defin):
"""Check style/uppercase_args passes where args are uppercase."""
script = "{0} (definition_name UPPERCASE)\nend{0} ()\n".format(defin)
self.assertTrue(run_linter_throw(script,
whitelist=["style/uppercase_args"]))
@parameterized.expand(DEFINITION_TYPES)
def test_fail_lowercase_args(self, defin): # suppress(no-self-use)
"""Check style/uppercase_args passes where args are lowercase."""
script = "{0} (definition_name lowercase)\nend{0} ()\n".format(defin)
with ExpectedException(LinterFailure):
run_linter_throw(script, whitelist=["style/uppercase_args"])
@parameterized.expand(DEFINITION_TYPES)
def test_replace_with_upper(self, defin):
"""Check style/uppercase_args passes where args are lowercase."""
script = "{0} (name lowercase)\nend{0} ()\n".format(defin)
def get_replacement():
"""Replacement for lowercase argument."""
run_linter_throw(script, whitelist=["style/uppercase_args"])
exception = self.assertRaises(LinterFailure, get_replacement)
self.assertEqual(replacement(exception),
(1, "{0} (name LOWERCASE)\n".format(defin)))
_FORMAT_WITH_DEREFFED_VAR = format_with_command(lambda x: "${" + x + "}")
_FORMAT_WITH_LOWERCASE_VAR = format_with_command(lambda x: x.lower())
_FORMAT_WITH_OTHER_QUOTES = format_with_command(other_xform=lambda x: ("\"" +
x +
"\""))
_FORMAT_QUOTES_AND_LOWER = format_with_command(var_xform=lambda x: x.lower(),
other_xform=lambda x: ("\"" +
x +
"\""))
class TestUppercaseVariableNamesOnly(TestCase):
"""Test case for uppercase variable names only."""
parameters = [param(m) for m in FUNCTIONS_SETTING_VARS]
@parameterized.expand(parameters, testcase_func_doc=format_with_args(0))
def test_pass_no_var_set(self, matcher):
"""Check that style/set_var_case passes with {0.cmd}.
Where no variable is actually set, then there is no linter failure
"""
# This will trip up matchers that match other arguments
result = run_linter_throw("{0} ()\n".format(matcher.cmd),
whitelist=["style/set_var_case"])
self.assertTrue(result)
@parameterized.expand(parameters,
testcase_func_doc=format_with_command())
def test_pass_no_quotes(self, matcher):
"""Check that style/set_var_case passes with {}.
Variables set by another CMake command should only be uppercase
"""
result = run_linter_throw(gen_source_line(matcher),
whitelist=["style/set_var_case"])
self.assertTrue(result)
@parameterized.expand(parameters,
testcase_func_doc=_FORMAT_WITH_DEREFFED_VAR)
def test_pass_inside_deref(self, matcher):
"""Check that style/set_var_case passes when var in deref, like {}.
Pass if variable is uppercase and inside of a deref, because variable
dereferences are not sink variables.
"""
xform = lambda x: "${" + x + "}" # suppress(E731)
result = run_linter_throw(gen_source_line(matcher,
match_transform=xform),
whitelist=["style/set_var_case"])
self.assertTrue(result)
@parameterized.expand(parameters,
testcase_func_doc=_FORMAT_WITH_OTHER_QUOTES)
def test_pass_other_quotes(self, matcher):
"""Check that style/set_var_case pass with other args quoted in {}."""
quote = "\"{0}\""
xform = lambda x: quote.format(x) # suppress(unnecessary-lambda,E731)
line = gen_source_line(matcher,
other_transform=xform)
result = run_linter_throw(line,
whitelist=["style/set_var_case"])
self.assertTrue(result)
@parameterized.expand(parameters,
testcase_func_doc=_FORMAT_WITH_LOWERCASE_VAR)
def test_fail_no_quotes(self, matcher): # suppress(no-self-use)
"""Check that style/set_var_case fails with {}, because lowercase."""
line = gen_source_line(matcher,
match_transform=lambda x: x.lower())
with ExpectedException(LinterFailure):
run_linter_throw(line,
whitelist=["style/set_var_case"])
@parameterized.expand(parameters,
testcase_func_doc=_FORMAT_QUOTES_AND_LOWER)
def test_fail_other_quotes(self, matcher): # suppress(no-self-use)
"""Check that style/set_var_case fails with other args quoted in {}."""
quote = "\"{0}\""
xform = lambda x: quote.format(x) # suppress(unnecessary-lambda,E731)
line = gen_source_line(matcher,
match_transform=lambda x: x.lower(),
other_transform=xform)
with ExpectedException(LinterFailure):
run_linter_throw(line,
whitelist=["style/set_var_case"])
@parameterized.expand(parameters,
testcase_func_doc=_FORMAT_WITH_LOWERCASE_VAR)
def test_replace_no_quotes(self, matcher):
"""Check that style/set_var_case replaces {} with uppercase var.
Replacement should have uppercase matched argument
"""
correct = gen_source_line(matcher)
incorrect = gen_source_line(matcher,
match_transform=lambda x: x.lower())
def get_replacement():
"""Replacement for lowercase variable."""
run_linter_throw(incorrect,
whitelist=["style/set_var_case"])
exception = self.assertRaises(LinterFailure, get_replacement)
self.assertEqual(replacement(exception),
(1, correct))
class TestFunctionArgumentsFallOnLine(TestCase):
"""Test alignment of function arguments."""
def test_pass_args_on_same_line(self):
"""style/argument_align passes when args on same line."""
self.assertTrue(run_linter_throw("call ($[ONE} TWO THREE \"FOUR\")\n",
whitelist=["style/argument_align"]))
def test_fail_args_unevenly_spaced(self): # suppress(no-self-use)
"""style/argument_align fails if args on same line spaced unevenly."""
with ExpectedException(LinterFailure):
run_linter_throw("call (ONE TWO)\n",
whitelist=["style/argument_align"])
def test_suggest_even_spacing(self):
"""style/argument_align suggests even spacing on the same line."""
def get_replacement():
"""Get replacement for unevenly spaced lines."""
run_linter_throw("call (ONE TWO)\n",
whitelist=["style/argument_align"])
exception = self.assertRaises(LinterFailure, get_replacement)
self.assertEqual(replacement(exception),
(1, "call (ONE TWO)\n"))
def test_fail_args_not_aligned(self): # suppress(no-self-use)
"""style/argument_align fails when args do not fall on baseline col."""
with ExpectedException(LinterFailure):
run_linter_throw("call (ONE\nTWO)\n",
whitelist=["style/argument_align"])
def test_fail_args_dispersed(self): # suppress(no-self-use)
"""style/argument_align fails if args on same line spaced unevenly."""
with ExpectedException(LinterFailure):
run_linter_throw("call (ONE\n"
" ${TWO} \"THREE\"\n"
" FOUR)\n",
whitelist=["style/argument_align"])
def test_fail_bad_kw_align(self): # suppress(no-self-use)
"""style/argument_align fails if args on same line spaced unevenly."""
with ExpectedException(LinterFailure):
run_linter_throw("call (ONE\n"
" TWO THREE\n"
" FOUR)\n",
whitelist=["style/argument_align"])
def test_fail_inconsistent_align(self): # suppress(no-self-use)
"""style/argument_align fails when args not aligned after first."""
with ExpectedException(LinterFailure):
run_linter_throw("call (${ONE} TWO\n"
" THREE)\n",
whitelist=["style/argument_align"])
# Over and under-indent
@parameterized.expand([
" THREE)\n",
" THREE)\n"
])
def test_suggest_baseline_align(self, third_line):
"""style/argument_align suggests alignment to the baseline."""
def get_replacement():
"""Get replacement for unevenly spaced lines."""
run_linter_throw("call (ONE\n"
" TWO\n" +
third_line,
whitelist=["style/argument_align"])
exception = self.assertRaises(LinterFailure, get_replacement)
self.assertEqual(replacement(exception),
# eg call (ONE
(3, (" THREE)\n")))
def test_fail_align_func_name(self): # suppress(no-self-use)
"""style/argument_align fails when args not aligned after second."""
with ExpectedException(LinterFailure):
run_linter_throw("function (ONE TWO\n"
" THREE)\n"
"endfunction ()\n",
whitelist=["style/argument_align"])
def test_fail_align_macro_name(self): # suppress(no-self-use)
"""style/argument_align fails when args not aligned after second."""
with ExpectedException(LinterFailure):
run_linter_throw("macro (name TWO\n"
" THREE)\n"
"endmacro ()\n",
whitelist=["style/argument_align"])
def test_suggest_align_first_arg(self):
"""style/argument_align suggests alignment to function's first arg."""
def get_replacement():
"""Get replacement for unevenly spaced lines."""
run_linter_throw("function (name ONE\n"
" TWO)\n"
"endfunction ()\n",
whitelist=["style/argument_align"])
exception = self.assertRaises(LinterFailure, get_replacement)
self.assertEqual(replacement(exception),
# eg, function (name ONE
(2, (" TWO)\n")))
def test_pass_args_aligend(self):
"""style/argument_align passes when args aligned."""
self.assertTrue(run_linter_throw("call (ONE\n"
" TWO)\n",
whitelist=["style/argument_align"]))
def test_pass_align_after(self):
"""style/argument_align passes when args aligned after first."""
self.assertTrue(run_linter_throw("call (ONE TWO\n"
" THREE)\n",
whitelist=["style/argument_align"]))
def test_pass_args_after_keyword(self):
"""style/argument_align passes with args after keyword arg."""
self.assertTrue(run_linter_throw("call (ONE\n"
" KEYWORD TWO\n"
" KEYWORD THREE)\n",
whitelist=["style/argument_align"]))
def test_pass_align_after_keyword(self):
"""style/argument_align passes with args after keyword arg."""
self.assertTrue(run_linter_throw("call (ONE\n"
" KEYWORD TWO\n"
" THREE)\n",
whitelist=["style/argument_align"]))
nonvariable_keywords = [
"${KEYWORD}",
"\"KEYWORD\"",
"KEYWORD/ARGUMENT\"",
"1234"
]
@parameterized.expand(nonvariable_keywords)
def test_fail_if_kw_not_var_align(self, keyword): # suppress(no-self-use)
"""style/argument_align fails when args not aligned after second."""
kw_len = len(keyword)
with ExpectedException(LinterFailure):
run_linter_throw("call (ONE\n"
" {0} ONE".format(keyword) +
" " + " " * kw_len + " TWO)",
whitelist=["style/argument_align"])
@parameterized.expand(nonvariable_keywords)
def test_fail_if_kw_not_var_after(self, keyword): # suppress(no-self-use)
"""style/argument_align fails when args not aligned after second."""
with ExpectedException(LinterFailure):
run_linter_throw("call (ONE\n"
" {0} ONE)\n".format(keyword),
whitelist=["style/argument_align"])
def test_pass_align_after_func(self):
"""style/argument_align passes when args aligned after second."""
self.assertTrue(run_linter_throw("function (name TWO\n"
" THREE)\n"
"endfunction ()\n",
whitelist=["style/argument_align"]))
def test_pass_align_after_macro(self):
"""style/argument_align passes when args aligned after second."""
self.assertTrue(run_linter_throw("macro (name TWO\n"
" THREE)\n"
"endmacro ()\n",
whitelist=["style/argument_align"]))
def test_pass_dispersed_if_cond(self):
"""style/argument_align passes when arguments to if are dispersed."""
self.assertTrue(run_linter_throw("if (CONDITION AND OTHER_COND OR\n"
" FINAL_CONDITION AND NOT COND)\n"
"endif ()",
whitelist=["unused/private_var"]))
class TestSingleQuoteUsage(TestCase):
"""Test that we are only allowed to use double quotes for strings."""
def test_pass_use_double_quotes(self):
"""Check style/doublequotes passes when strings use double quotes."""
self.assertTrue(run_linter_throw("call (\"ARGUMENT\")\n",
whitelist=["style/doublequotes"]))
def test_pass_sigle_in_double(self):
"""Check style/doublequotes passes if strings use internal single."""
self.assertTrue(run_linter_throw("call (\"\'ARGUMENT\'\")\n",
whitelist=["style/doublequotes"]))
def test_fail_use_single_quotes(self): # suppress(no-self-use)
"""Check style/doublequotes fails when strings use single quotes."""
with ExpectedException(LinterFailure):
run_linter_throw("call (\'ARGUMENT\')\n",
whitelist=["style/doublequotes"])
def test_replace_single_with_double(self):
"""Check style/doublequotes replaces single quote use with double."""
def get_replacement():
"""Replacement for single outer quotes."""
run_linter_throw("call (\'ARGUMENT\')\n",
whitelist=["style/doublequotes"])
exception = self.assertRaises(LinterFailure, get_replacement)
self.assertEqual(replacement(exception),
(1, "call (\"ARGUMENT\")\n"))
def test_replace_only_outerquotes(self):
"""Check style/doublequotes only replaces outer quotes."""
def get_replacement():
"""Replacement for single outer quote."""
run_linter_throw("call (\'ARG \\'U\\' MENT\')\n",
whitelist=["style/doublequotes"])
exception = self.assertRaises(LinterFailure, get_replacement)
self.assertEqual(replacement(exception),
(1, "call (\"ARG \\'U\\' MENT\")\n"))
HEADER_BODY_STRUCTURES = [
"function",
"macro",
"while",
"foreach",
"if"
]
class TestIndentation(TestCase):
"""Test indentation checks."""
def test_pass_no_indent_spec(self):
"""style/indent passes when no indentation is specified."""
self.assertTrue(run_linter_throw("function_call ()\n",
whitelist=["style/indent"]))
def test_pass_top_call_noindent(self):
"""style/indent passes with zero indents for toplevel calls."""
self.assertTrue(run_linter_throw("function_call ()\n",
whitelist=["style/indent"],
indent=1))
def test_pass_top_def_noindent(self):
"""style/indent passes with zero indents for toplevel definitions."""
self.assertTrue(run_linter_throw("function (f ARG)\nendfunction()\n",
whitelist=["style/indent"],
indent=1))
def test_pass_call_one_indent(self):
"""style/indent passes with one indent for nested calls."""
script = "function (f ARG)\n call (ARG)\nendfunction ()"
self.assertTrue(run_linter_throw(script,
whitelist=["style/indent"],
indent=1))
def test_pass_if_body_one_indent(self):
"""style/indent passes with one indent for if body."""
script = "if (COND)\n call (ARG)\nendif ()"
self.assertTrue(run_linter_throw(script,
whitelist=["style/indent"],
indent=1))
def test_pass_nest_if_indent(self):
"""style/indent passes with one indent for if body."""
script = "if (COND)\n if (OTHER)\n call (ARG)\n endif ()\nendif ()"
self.assertTrue(run_linter_throw(script,
whitelist=["style/indent"],
indent=1))
def test_fail_one_indent_top_call(self): # suppress(no-self-use)
"""style/indent fails with one indent for toplevel calls."""
with ExpectedException(LinterFailure):
run_linter_throw(" function_call ()\n",
whitelist=["style/indent"],
indent=1)
def test_fail_one_indent_toplevel(self): # suppress(no-self-use)
"""style/indent fails with one indent for toplevel defs."""
with ExpectedException(LinterFailure):
run_linter_throw(" function (definition ARG)\n endfunction ()",
whitelist=["style/indent"],
indent=1)
@parameterized.expand(HEADER_BODY_STRUCTURES)
def test_fail_bad_term_indent(self, structure): # suppress(no-self-use)
"""style/indent fails with one indent terminator."""
with ExpectedException(LinterFailure):
run_linter_throw("{0} ()\n end{0} ()".format(structure),
whitelist=["style/indent"],
indent=1)
@parameterized.expand([
"else",
"elseif"
]) # suppress(no-self-use)
def test_fail_mismatch_if_alt(self, alt):
"""style/indent fails when else, elseif has mismatched indent."""
with ExpectedException(LinterFailure):
script = "if (COND)\n {0} (COND)\nendif ()"
run_linter_throw(script.format(alt),
whitelist=["style/indent"],
indent=1)
def test_fail_noindent_nested_call(self): # suppress(no-self-use)
"""style/indent fails with zero indents for a nested call."""
with ExpectedException(LinterFailure):
script = "function (f ARG)\ncall (ARG)\nendfunction ()"
run_linter_throw(script, whitelist=["style/indent"], indent=1)
def test_suggest_more_indent(self):
"""style/indent suggests more indentation where required."""
script = "function (f ARG)\ncall (ARG)\nendfunction ()"
def get_replacement():
"""Replacement for lack of indent."""
run_linter_throw(script, whitelist=["style/indent"], indent=1)
exception = self.assertRaises(LinterFailure, get_replacement)
self.assertEqual(replacement(exception),
(2, " call (ARG)\n"))
def test_suggest_less_indent(self):
"""style/indent suggests less indentation where required."""
script = "function (f ARG)\n call (ARG)\n endfunction ()\n"
def get_replacement():
"""Replacement for too much indent."""
run_linter_throw(script, whitelist=["style/indent"], indent=1)
exception = self.assertRaises(LinterFailure, get_replacement)
self.assertEqual(replacement(exception),
(3, "endfunction ()\n")) | 0.741393 | 0.479808 |
import traceback
import json
import botutils
from discord.ext import commands
from ._gameplay import Gameplay
from botutils import start_votes_timer
with open('botutils/bot_text.json') as json_file:
language = json.load(json_file)
error_str = language["system"]["error"]
fstart_min = language["errors"]["fstart_min"]
fstart_max = language["errors"]["fstart_max"]
start_str = language["cmd"]["start"]
class Start(Gameplay, name = language["system"]["gameplay_cog"]):
"""Start command cog"""
@commands.command(
pass_context = True,
name = "start",
brief = language["doc"]["start"]["brief"],
help = language["doc"]["start"]["help"],
description = language["doc"]["start"]["description"]
)
@commands.check(botutils.check_if_lobby)
@commands.check(botutils.check_if_is_pregame_player)
async def start(self, ctx):
"""Start command"""
import globvars
# The player has already voted to start
if ctx.author.id in globvars.start_votes:
return
game = botutils.GameChooser().get_selected_game()
if len(globvars.master_state.pregame) < game.MIN_PLAYERS:
msg = fstart_min.format(
ctx.author.mention,
botutils.BotEmoji.cross,
str(game),
game.MIN_PLAYERS
)
await ctx.send(msg)
return
if len(globvars.master_state.pregame) > game.MAX_PLAYERS:
msg = fstart_max.format(
ctx.author.mention,
botutils.BotEmoji.cross,
str(game),
game.MAX_PLAYERS
)
await ctx.send(msg)
return
# The player has not voted to start yet
else:
globvars.start_votes.append(ctx.author.id)
# First person to vote. Start the clear start votes timer
if len(globvars.start_votes) == 1:
if start_votes_timer.is_running():
start_votes_timer.cancel()
start_votes_timer.start()
# Calculate the number of votes needed
votes_needed = max(len(globvars.master_state.pregame) - 3, 3)
# Reached the number of votes needed. Start the game.
if len(globvars.start_votes) == votes_needed:
game = botutils.GameChooser().get_selected_game()
globvars.master_state.game = game
await globvars.master_state.game.start_game()
botutils.update_state_machine()
# Clear the start votes
globvars.start_votes.clear()
return
votes_left = votes_needed - len(globvars.start_votes)
# Do not have a negative number of votes required to start
if votes_left < 0:
return
msg = start_str.format(
ctx.author.name,
votes_left,
"vote" if votes_left == 1 else "votes"
)
await ctx.send(msg)
@start.error
async def start_error(self, ctx, error):
"""Error handling of the start command"""
# Case: check failure
if isinstance(error, commands.CheckFailure):
return
# For other cases we will want to see the error logged
else:
try:
raise error
except Exception:
await ctx.send(error_str)
await botutils.log(botutils.Level.error, traceback.format_exc()) | cmd/gameplay/start.py |
import traceback
import json
import botutils
from discord.ext import commands
from ._gameplay import Gameplay
from botutils import start_votes_timer
with open('botutils/bot_text.json') as json_file:
language = json.load(json_file)
error_str = language["system"]["error"]
fstart_min = language["errors"]["fstart_min"]
fstart_max = language["errors"]["fstart_max"]
start_str = language["cmd"]["start"]
class Start(Gameplay, name = language["system"]["gameplay_cog"]):
"""Start command cog"""
@commands.command(
pass_context = True,
name = "start",
brief = language["doc"]["start"]["brief"],
help = language["doc"]["start"]["help"],
description = language["doc"]["start"]["description"]
)
@commands.check(botutils.check_if_lobby)
@commands.check(botutils.check_if_is_pregame_player)
async def start(self, ctx):
"""Start command"""
import globvars
# The player has already voted to start
if ctx.author.id in globvars.start_votes:
return
game = botutils.GameChooser().get_selected_game()
if len(globvars.master_state.pregame) < game.MIN_PLAYERS:
msg = fstart_min.format(
ctx.author.mention,
botutils.BotEmoji.cross,
str(game),
game.MIN_PLAYERS
)
await ctx.send(msg)
return
if len(globvars.master_state.pregame) > game.MAX_PLAYERS:
msg = fstart_max.format(
ctx.author.mention,
botutils.BotEmoji.cross,
str(game),
game.MAX_PLAYERS
)
await ctx.send(msg)
return
# The player has not voted to start yet
else:
globvars.start_votes.append(ctx.author.id)
# First person to vote. Start the clear start votes timer
if len(globvars.start_votes) == 1:
if start_votes_timer.is_running():
start_votes_timer.cancel()
start_votes_timer.start()
# Calculate the number of votes needed
votes_needed = max(len(globvars.master_state.pregame) - 3, 3)
# Reached the number of votes needed. Start the game.
if len(globvars.start_votes) == votes_needed:
game = botutils.GameChooser().get_selected_game()
globvars.master_state.game = game
await globvars.master_state.game.start_game()
botutils.update_state_machine()
# Clear the start votes
globvars.start_votes.clear()
return
votes_left = votes_needed - len(globvars.start_votes)
# Do not have a negative number of votes required to start
if votes_left < 0:
return
msg = start_str.format(
ctx.author.name,
votes_left,
"vote" if votes_left == 1 else "votes"
)
await ctx.send(msg)
@start.error
async def start_error(self, ctx, error):
"""Error handling of the start command"""
# Case: check failure
if isinstance(error, commands.CheckFailure):
return
# For other cases we will want to see the error logged
else:
try:
raise error
except Exception:
await ctx.send(error_str)
await botutils.log(botutils.Level.error, traceback.format_exc()) | 0.363082 | 0.075075 |
from __future__ import absolute_import
__version__ = "0.1.1"
from id_card_detector.cv_utils import (read_image,
visualize_prediction,
export_predicted_bboxes,
fit_quads_over_masks,
visualize_quads,
unwarp_quads,
export_unwarped_quads)
from id_card_detector.predict import (get_prediction)
def detect_card(image_path: str,
output_dir: str = "output/",
unwarp: bool = True,
model_name: str = "maskrcnn_resnet50",
color: tuple = (0, 0, 0)):
"""
Arguments:
image_path: path to the image to be processed
output_dir: path to the results to be exported
unwarp: unwarp detected id card to rectangle
model_name: model to be used in the inference
color: color to be used in the mask/bbox/quad visualizations
"""
# read image from given path
image = read_image(image_path)
# get prediction
masks, boxes, classes, scores = get_prediction(image=image,
model_name="maskrcnn_resnet50",
threshold=0.75)
# visualize detected bboxes and masks
prediction_visual = visualize_prediction(image, masks, boxes, classes,
rect_th=2,
text_size=0.85,
text_th=2,
color=color,
output_dir=output_dir)
if not unwarp:
# export detected bounding boxes
export_predicted_bboxes(image=image,
boxes=boxes,
output_dir=output_dir)
# arange other values as empty
quads = []
unwarped_quads = []
else:
# fit quads to predicted masks
quads = fit_quads_over_masks(image, masks)
# visualize/export quads
quad_visual = visualize_quads(image=image,
quads=quads,
output_dir=output_dir,
color=color)
# unwarp quads to rects
unwarped_quads = unwarp_quads(image, quads)
# export unwarped quads
export_unwarped_quads(unwarped_quads,
output_dir=output_dir)
return masks, boxes, classes, scores, quads | id_card_detector/__init__.py | from __future__ import absolute_import
__version__ = "0.1.1"
from id_card_detector.cv_utils import (read_image,
visualize_prediction,
export_predicted_bboxes,
fit_quads_over_masks,
visualize_quads,
unwarp_quads,
export_unwarped_quads)
from id_card_detector.predict import (get_prediction)
def detect_card(image_path: str,
output_dir: str = "output/",
unwarp: bool = True,
model_name: str = "maskrcnn_resnet50",
color: tuple = (0, 0, 0)):
"""
Arguments:
image_path: path to the image to be processed
output_dir: path to the results to be exported
unwarp: unwarp detected id card to rectangle
model_name: model to be used in the inference
color: color to be used in the mask/bbox/quad visualizations
"""
# read image from given path
image = read_image(image_path)
# get prediction
masks, boxes, classes, scores = get_prediction(image=image,
model_name="maskrcnn_resnet50",
threshold=0.75)
# visualize detected bboxes and masks
prediction_visual = visualize_prediction(image, masks, boxes, classes,
rect_th=2,
text_size=0.85,
text_th=2,
color=color,
output_dir=output_dir)
if not unwarp:
# export detected bounding boxes
export_predicted_bboxes(image=image,
boxes=boxes,
output_dir=output_dir)
# arange other values as empty
quads = []
unwarped_quads = []
else:
# fit quads to predicted masks
quads = fit_quads_over_masks(image, masks)
# visualize/export quads
quad_visual = visualize_quads(image=image,
quads=quads,
output_dir=output_dir,
color=color)
# unwarp quads to rects
unwarped_quads = unwarp_quads(image, quads)
# export unwarped quads
export_unwarped_quads(unwarped_quads,
output_dir=output_dir)
return masks, boxes, classes, scores, quads | 0.834069 | 0.278576 |
from collections import deque
import copy
drow = [0, -1, -1, 0, 1, 1, 1, 0, -1]
dcol = [0, 0, -1, -1, -1, 0, 1, 1, 1]
def updateTable(table, sharkRow, sharkCol):
smallestFish = float('inf')
fishRow, fishCol = 0, 0
biggestFish = 0
for i in range(4):
for j in range(4):
curFish = table[i][j][0]
biggestFish = max(biggestFish, curFish)
if curFish > 0 and smallestFish > curFish:
smallestFish = curFish
fishRow, fishCol = i, j
while True:
while True:
curDir = table[fishRow][fishCol][1]
nrow, ncol = fishRow + drow[curDir], fishCol + dcol[curDir]
if nrow >= 0 and nrow < 4 and ncol >= 0 and ncol < 4:
if not (nrow == sharkRow and ncol == sharkCol):
table[nrow][ncol][0], table[fishRow][fishCol][0] = table[fishRow][fishCol][0], table[nrow][ncol][0]
table[nrow][ncol][1], table[fishRow][fishCol][1] = table[fishRow][fishCol][1], table[nrow][ncol][1]
break
curDir = 1 if curDir == 8 else curDir + 1
table[fishRow][fishCol][1] = curDir
if smallestFish == biggestFish:
return
nextSmallestFish = float('inf')
nextSmallRow, nextSmallCol = 0, 0
for i in range(4):
for j in range(4):
curFish = table[i][j][0]
if curFish > smallestFish and nextSmallestFish > curFish:
nextSmallestFish = curFish
nextSmallRow, nextSmallCol = i, j
smallestFish = nextSmallestFish
fishRow, fishCol = nextSmallRow, nextSmallCol
if __name__ == "__main__":
table = []
for _ in range(4):
data = list(map(int, input().split()))
row = []
for i in range(0, len(data), 2):
row.append([data[i], data[i + 1]])
table.append(row)
result = table[0][0][0]
table[0][0][0] = 0
updateTable(table, 0, 0)
q = deque([(0, 0, result, table)])
while q:
row, col, curResult, curTable = q.popleft()
result = max(result, curResult)
dir = curTable[row][col][1]
nrows, ncols = [], []
for i in range(1, 4):
nrows.append(row + i * drow[dir])
ncols.append(col + i * dcol[dir])
for i in range(3):
nrow, ncol = nrows[i], ncols[i]
if nrow >= 0 and nrow < 4 and ncol >= 0 and ncol < 4:
if curTable[nrow][ncol][0] > 0:
copyTable = copy.deepcopy(curTable)
q.append((nrow, ncol, curResult +
copyTable[nrow][ncol][0], copyTable))
copyTable[nrow][ncol][0] = 0
updateTable(copyTable, nrow, ncol)
print(result)
# 7 6 2 3 15 6 9 8
# 3 1 1 8 14 7 10 1
# 6 1 13 6 4 3 11 4
# 16 1 8 7 5 2 12 2
# 33
# 16 7 1 4 4 3 12 8
# 14 7 7 6 3 4 10 2
# 5 2 15 2 8 3 6 4
# 11 8 2 4 13 5 9 4
# 43
# 12 6 14 5 4 5 6 7
# 15 1 11 7 3 7 7 5
# 10 3 8 3 16 6 1 1
# 5 8 2 7 13 6 9 2
# 76 | practices/teen_shark.py |
from collections import deque
import copy
drow = [0, -1, -1, 0, 1, 1, 1, 0, -1]
dcol = [0, 0, -1, -1, -1, 0, 1, 1, 1]
def updateTable(table, sharkRow, sharkCol):
smallestFish = float('inf')
fishRow, fishCol = 0, 0
biggestFish = 0
for i in range(4):
for j in range(4):
curFish = table[i][j][0]
biggestFish = max(biggestFish, curFish)
if curFish > 0 and smallestFish > curFish:
smallestFish = curFish
fishRow, fishCol = i, j
while True:
while True:
curDir = table[fishRow][fishCol][1]
nrow, ncol = fishRow + drow[curDir], fishCol + dcol[curDir]
if nrow >= 0 and nrow < 4 and ncol >= 0 and ncol < 4:
if not (nrow == sharkRow and ncol == sharkCol):
table[nrow][ncol][0], table[fishRow][fishCol][0] = table[fishRow][fishCol][0], table[nrow][ncol][0]
table[nrow][ncol][1], table[fishRow][fishCol][1] = table[fishRow][fishCol][1], table[nrow][ncol][1]
break
curDir = 1 if curDir == 8 else curDir + 1
table[fishRow][fishCol][1] = curDir
if smallestFish == biggestFish:
return
nextSmallestFish = float('inf')
nextSmallRow, nextSmallCol = 0, 0
for i in range(4):
for j in range(4):
curFish = table[i][j][0]
if curFish > smallestFish and nextSmallestFish > curFish:
nextSmallestFish = curFish
nextSmallRow, nextSmallCol = i, j
smallestFish = nextSmallestFish
fishRow, fishCol = nextSmallRow, nextSmallCol
if __name__ == "__main__":
table = []
for _ in range(4):
data = list(map(int, input().split()))
row = []
for i in range(0, len(data), 2):
row.append([data[i], data[i + 1]])
table.append(row)
result = table[0][0][0]
table[0][0][0] = 0
updateTable(table, 0, 0)
q = deque([(0, 0, result, table)])
while q:
row, col, curResult, curTable = q.popleft()
result = max(result, curResult)
dir = curTable[row][col][1]
nrows, ncols = [], []
for i in range(1, 4):
nrows.append(row + i * drow[dir])
ncols.append(col + i * dcol[dir])
for i in range(3):
nrow, ncol = nrows[i], ncols[i]
if nrow >= 0 and nrow < 4 and ncol >= 0 and ncol < 4:
if curTable[nrow][ncol][0] > 0:
copyTable = copy.deepcopy(curTable)
q.append((nrow, ncol, curResult +
copyTable[nrow][ncol][0], copyTable))
copyTable[nrow][ncol][0] = 0
updateTable(copyTable, nrow, ncol)
print(result)
# 7 6 2 3 15 6 9 8
# 3 1 1 8 14 7 10 1
# 6 1 13 6 4 3 11 4
# 16 1 8 7 5 2 12 2
# 33
# 16 7 1 4 4 3 12 8
# 14 7 7 6 3 4 10 2
# 5 2 15 2 8 3 6 4
# 11 8 2 4 13 5 9 4
# 43
# 12 6 14 5 4 5 6 7
# 15 1 11 7 3 7 7 5
# 10 3 8 3 16 6 1 1
# 5 8 2 7 13 6 9 2
# 76 | 0.368178 | 0.304843 |
from behave import *
import requests
from django.contrib.auth.models import User
from rest_framework.authtoken.models import Token
use_step_matcher("re")
@given("that I am a unregistered participant of a event")
def step_impl(context):
context.username = "12thMan"
context.password = "<PASSWORD>"
context.first_name = "12th"
context.last_name = "Man"
context.email = "<EMAIL>"
usr = User.objects.create_user(
context.username,
context.email,
context.password
)
usr.first_name = context.first_name
usr.last_name = context.last_name
usr.save()
registered_user = User.objects.filter(username="12thMan")
assert len(registered_user) == 1
user_auth_token, _ = Token.objects.get_or_create(user=usr)
context.key = user_auth_token.key
data = {
"name": "New year event",
"x_label_min": "Some text to be displayed on the graph",
"x_label_max": "Something else you want to be displayed on the graph",
}
headers = {
'Authorization':'Token '+ context.key
}
resp = requests.post(context.test.live_server_url + "/host/events/create/", data, headers=headers)
context.event_api_response_data = resp.json()
context.eventId = context.event_api_response_data["id"]
@when("I make an API call to the participant registration API with event id")
def step_impl(context):
data = {
"event_id": context.eventId
}
headers = {
'Authorization':'Token '+ context.key
}
resp = requests.post(context.test.live_server_url + "/walk/register_participant/", data, headers=headers)
assert resp.status_code >= 200 and resp.status_code < 300
context.api_response_data = resp.json()
@then("I expect the response to tell me the re is successful and give a participant code")
def step_impl(context):
assert context.api_response_data["status"] == "registered"
@given("that I am a participant and wants to join an event and forgets to give event id")
def step_impl(context):
context.username = "12thMan"
context.password = "<PASSWORD>"
context.first_name = "12th"
context.last_name = "Man"
context.email = "<EMAIL>"
usr = User.objects.create_user(
context.username,
context.email,
context.password
)
usr.first_name = context.first_name
usr.last_name = context.last_name
usr.save()
registered_user = User.objects.filter(username="12thMan")
assert len(registered_user) == 1
user_auth_token, _ = Token.objects.get_or_create(user=usr)
context.key = user_auth_token.key
@when("I make an API call to the participant registration API without giving event id")
def step_impl(context):
data = {
"event_id": ''
}
headers = {
'Authorization':'Token '+ context.key
}
resp = requests.post(context.test.live_server_url + "/walk/register_participant/", data, headers=headers)
assert resp.status_code >= 400 and resp.status_code < 500
context.api_response_data = resp.json()
@then("I expect the response to tell me that the registration is not successful")
def step_impl(context):
assert context.api_response_data["message"] == "Event not found, try a different event ID"
@given("that I am a participant and want to join an event by giving wrong event id")
def step_impl(context):
context.username = "12thMan"
context.password = "<PASSWORD>"
context.first_name = "12th"
context.last_name = "Man"
context.email = "<EMAIL>"
usr = User.objects.create_user(
context.username,
context.email,
context.password
)
usr.first_name = context.first_name
usr.last_name = context.last_name
usr.save()
registered_user = User.objects.filter(username="12thMan")
assert len(registered_user) == 1
user_auth_token, _ = Token.objects.get_or_create(user=usr)
context.key = user_auth_token.key
@when("I make an API call to the participant registration API with wrong event id")
def step_impl(context):
data = {
"event_id": '12345'
}
headers = {
'Authorization':'Token '+ context.key
}
resp = requests.post(context.test.live_server_url + "/walk/register_participant/", data, headers=headers)
assert resp.status_code >= 400 and resp.status_code < 500
context.api_response_data = resp.json()
@then("I expect the response to tell me that the registration is not successful and event id is wrong")
def step_impl(context):
assert context.api_response_data["message"] == "Event not found, try a different event ID" | behave_tests/steps/participant_register.py | from behave import *
import requests
from django.contrib.auth.models import User
from rest_framework.authtoken.models import Token
use_step_matcher("re")
@given("that I am a unregistered participant of a event")
def step_impl(context):
context.username = "12thMan"
context.password = "<PASSWORD>"
context.first_name = "12th"
context.last_name = "Man"
context.email = "<EMAIL>"
usr = User.objects.create_user(
context.username,
context.email,
context.password
)
usr.first_name = context.first_name
usr.last_name = context.last_name
usr.save()
registered_user = User.objects.filter(username="12thMan")
assert len(registered_user) == 1
user_auth_token, _ = Token.objects.get_or_create(user=usr)
context.key = user_auth_token.key
data = {
"name": "New year event",
"x_label_min": "Some text to be displayed on the graph",
"x_label_max": "Something else you want to be displayed on the graph",
}
headers = {
'Authorization':'Token '+ context.key
}
resp = requests.post(context.test.live_server_url + "/host/events/create/", data, headers=headers)
context.event_api_response_data = resp.json()
context.eventId = context.event_api_response_data["id"]
@when("I make an API call to the participant registration API with event id")
def step_impl(context):
data = {
"event_id": context.eventId
}
headers = {
'Authorization':'Token '+ context.key
}
resp = requests.post(context.test.live_server_url + "/walk/register_participant/", data, headers=headers)
assert resp.status_code >= 200 and resp.status_code < 300
context.api_response_data = resp.json()
@then("I expect the response to tell me the re is successful and give a participant code")
def step_impl(context):
assert context.api_response_data["status"] == "registered"
@given("that I am a participant and wants to join an event and forgets to give event id")
def step_impl(context):
context.username = "12thMan"
context.password = "<PASSWORD>"
context.first_name = "12th"
context.last_name = "Man"
context.email = "<EMAIL>"
usr = User.objects.create_user(
context.username,
context.email,
context.password
)
usr.first_name = context.first_name
usr.last_name = context.last_name
usr.save()
registered_user = User.objects.filter(username="12thMan")
assert len(registered_user) == 1
user_auth_token, _ = Token.objects.get_or_create(user=usr)
context.key = user_auth_token.key
@when("I make an API call to the participant registration API without giving event id")
def step_impl(context):
data = {
"event_id": ''
}
headers = {
'Authorization':'Token '+ context.key
}
resp = requests.post(context.test.live_server_url + "/walk/register_participant/", data, headers=headers)
assert resp.status_code >= 400 and resp.status_code < 500
context.api_response_data = resp.json()
@then("I expect the response to tell me that the registration is not successful")
def step_impl(context):
assert context.api_response_data["message"] == "Event not found, try a different event ID"
@given("that I am a participant and want to join an event by giving wrong event id")
def step_impl(context):
context.username = "12thMan"
context.password = "<PASSWORD>"
context.first_name = "12th"
context.last_name = "Man"
context.email = "<EMAIL>"
usr = User.objects.create_user(
context.username,
context.email,
context.password
)
usr.first_name = context.first_name
usr.last_name = context.last_name
usr.save()
registered_user = User.objects.filter(username="12thMan")
assert len(registered_user) == 1
user_auth_token, _ = Token.objects.get_or_create(user=usr)
context.key = user_auth_token.key
@when("I make an API call to the participant registration API with wrong event id")
def step_impl(context):
data = {
"event_id": '12345'
}
headers = {
'Authorization':'Token '+ context.key
}
resp = requests.post(context.test.live_server_url + "/walk/register_participant/", data, headers=headers)
assert resp.status_code >= 400 and resp.status_code < 500
context.api_response_data = resp.json()
@then("I expect the response to tell me that the registration is not successful and event id is wrong")
def step_impl(context):
assert context.api_response_data["message"] == "Event not found, try a different event ID" | 0.396886 | 0.21101 |
from dataclasses import dataclass
from typing import Any, Dict, Final, List, Optional
from boto3.dynamodb.conditions import Attr
import boto3
@dataclass(frozen=True)
class User:
user_id: str
name: str
face_ids: List[str]
@classmethod
def parse(cls, data: Dict):
return User(user_id=data['user_id'],
name=data['name'],
face_ids=data['face_ids'])
def copy_with(
self,
name: Optional[str] = None,
face_ids: Optional[List[str]] = None,
):
return User(user_id=self.user_id,
name=name or self.name,
face_ids=face_ids or self.face_ids)
class UserDatabaseException(Exception):
def __init__(
self,
message: str,
data: Optional[Any] = None,
) -> None:
self.message = message
self.data = data
def __str__(self) -> str:
error_message: str = f'[UserDatabaseException] {self.message}'
if self.data:
error_message += f'\n{self.data}'
return error_message
class UserDatabaseUserNotExistException(UserDatabaseException):
def __init__(self, user_id) -> None:
super().__init__(
'user_id is not exist',
{'user_id': user_id},
)
class UserDatabaseUserAlreadExistException(UserDatabaseException):
def __init__(self, user_id) -> None:
super().__init__(
'user_id is already exist',
{'user_id': user_id},
)
class UserDatabase:
__service_name: Final[str] = 'dynamodb'
__region_name: Final[str] = 'ap-northeast-2'
__table_name: Final[str] = 'Users'
__db: Final = boto3.resource(
__service_name,
region_name=__region_name,
)
def __init__(self) -> None:
self.table = UserDatabase.__db.Table(UserDatabase.__table_name)
@classmethod
def create_table(cls) -> None:
UserDatabase.__db.create_table(
TableName='Users',
KeySchema=[
{
'AttributeName': 'user_id',
'KeyType': 'HASH' # Partition key
},
],
AttributeDefinitions=[
{
'AttributeName': 'user_id', #External Id
'AttributeType': 'S', #string
},
],
ProvisionedThroughput={
'ReadCapacityUnits': 10,
'WriteCapacityUnits': 10
},
)
@classmethod
def delete_table(cls) -> None:
UserDatabase.__db.Table(UserDatabase.__table_name).delete()
def create(self, user: User) -> None:
is_exist: bool
try:
self.read(user.user_id)
except UserDatabaseUserNotExistException:
is_exist = False
else:
is_exist = True
if is_exist:
raise UserDatabaseUserAlreadExistException(user.user_id)
self.table.put_item(Item={
'user_id': user.user_id,
'name': user.name,
'face_ids': user.face_ids
})
def read(self, user_id: str) -> User:
res = self.table.get_item(Key={'user_id': user_id})
if not ('Item' in res):
raise UserDatabaseUserNotExistException(user_id)
return User.parse(res['Item'])
def update(self, user: User, new_name: str) -> None:
is_exist: bool
try:
self.read(user.user_id)
except UserDatabaseUserNotExistException:
is_exist = False
else:
is_exist = True
if not is_exist:
raise UserDatabaseUserNotExistException(user.user_id)
self.table.put_item(Item={
'user_id': user.user_id,
'name': new_name,
'face_ids': user.face_ids
}, )
def delete(self, user_id: str) -> None:
is_exist: bool
try:
self.read(user_id)
except UserDatabaseUserNotExistException:
is_exist = False
else:
is_exist = True
if not is_exist:
raise UserDatabaseUserNotExistException(user_id)
self.table.delete_item(Key={'user_id': user_id})
def search_by_name(self, name: str) -> List[User]:
res = self.table.scan(FilterExpression=Attr('name').eq(name))
# TODO: use query instead of scan
return [User.parse(item) for item in res['Items']]
def search_by_face_id(self, face_id: str) -> List[User]:
res = self.table.scan(
FilterExpression=Attr('face_ids').contains(face_id))
# TODO: use query instead of scan
return [User.parse(item) for item in res['Items']]
if __name__ == '__main__':
from pprint import pprint
user_db = UserDatabase()
user = User('uuid', '유재석', ['123123', '111111'])
res = user_db.search_by_face_id(user.face_ids)
pprint(res) | amazon_rekognition/user_database.py | from dataclasses import dataclass
from typing import Any, Dict, Final, List, Optional
from boto3.dynamodb.conditions import Attr
import boto3
@dataclass(frozen=True)
class User:
user_id: str
name: str
face_ids: List[str]
@classmethod
def parse(cls, data: Dict):
return User(user_id=data['user_id'],
name=data['name'],
face_ids=data['face_ids'])
def copy_with(
self,
name: Optional[str] = None,
face_ids: Optional[List[str]] = None,
):
return User(user_id=self.user_id,
name=name or self.name,
face_ids=face_ids or self.face_ids)
class UserDatabaseException(Exception):
def __init__(
self,
message: str,
data: Optional[Any] = None,
) -> None:
self.message = message
self.data = data
def __str__(self) -> str:
error_message: str = f'[UserDatabaseException] {self.message}'
if self.data:
error_message += f'\n{self.data}'
return error_message
class UserDatabaseUserNotExistException(UserDatabaseException):
def __init__(self, user_id) -> None:
super().__init__(
'user_id is not exist',
{'user_id': user_id},
)
class UserDatabaseUserAlreadExistException(UserDatabaseException):
def __init__(self, user_id) -> None:
super().__init__(
'user_id is already exist',
{'user_id': user_id},
)
class UserDatabase:
__service_name: Final[str] = 'dynamodb'
__region_name: Final[str] = 'ap-northeast-2'
__table_name: Final[str] = 'Users'
__db: Final = boto3.resource(
__service_name,
region_name=__region_name,
)
def __init__(self) -> None:
self.table = UserDatabase.__db.Table(UserDatabase.__table_name)
@classmethod
def create_table(cls) -> None:
UserDatabase.__db.create_table(
TableName='Users',
KeySchema=[
{
'AttributeName': 'user_id',
'KeyType': 'HASH' # Partition key
},
],
AttributeDefinitions=[
{
'AttributeName': 'user_id', #External Id
'AttributeType': 'S', #string
},
],
ProvisionedThroughput={
'ReadCapacityUnits': 10,
'WriteCapacityUnits': 10
},
)
@classmethod
def delete_table(cls) -> None:
UserDatabase.__db.Table(UserDatabase.__table_name).delete()
def create(self, user: User) -> None:
is_exist: bool
try:
self.read(user.user_id)
except UserDatabaseUserNotExistException:
is_exist = False
else:
is_exist = True
if is_exist:
raise UserDatabaseUserAlreadExistException(user.user_id)
self.table.put_item(Item={
'user_id': user.user_id,
'name': user.name,
'face_ids': user.face_ids
})
def read(self, user_id: str) -> User:
res = self.table.get_item(Key={'user_id': user_id})
if not ('Item' in res):
raise UserDatabaseUserNotExistException(user_id)
return User.parse(res['Item'])
def update(self, user: User, new_name: str) -> None:
is_exist: bool
try:
self.read(user.user_id)
except UserDatabaseUserNotExistException:
is_exist = False
else:
is_exist = True
if not is_exist:
raise UserDatabaseUserNotExistException(user.user_id)
self.table.put_item(Item={
'user_id': user.user_id,
'name': new_name,
'face_ids': user.face_ids
}, )
def delete(self, user_id: str) -> None:
is_exist: bool
try:
self.read(user_id)
except UserDatabaseUserNotExistException:
is_exist = False
else:
is_exist = True
if not is_exist:
raise UserDatabaseUserNotExistException(user_id)
self.table.delete_item(Key={'user_id': user_id})
def search_by_name(self, name: str) -> List[User]:
res = self.table.scan(FilterExpression=Attr('name').eq(name))
# TODO: use query instead of scan
return [User.parse(item) for item in res['Items']]
def search_by_face_id(self, face_id: str) -> List[User]:
res = self.table.scan(
FilterExpression=Attr('face_ids').contains(face_id))
# TODO: use query instead of scan
return [User.parse(item) for item in res['Items']]
if __name__ == '__main__':
from pprint import pprint
user_db = UserDatabase()
user = User('uuid', '유재석', ['123123', '111111'])
res = user_db.search_by_face_id(user.face_ids)
pprint(res) | 0.689201 | 0.157396 |
import numpy as np
from math import factorial, sqrt, cos, sin
fact = lambda x: factorial(int(x))
def choose(n, k):
return fact(n)/fact(k)/fact(n-k)
def dmat_entry(j,m_,m,beta):
#real valued. implemented according to wikipedia
partA = sqrt(fact(j+m_)*fact(j-m_)*fact(j+m)*fact(j-m))
partB = 0.
for s in range(max(int(m-m_),0),min(int(j+m),int(j-m_))+1):
temp = (-1.)**s / (fact(j+m-s)*fact(s)*fact(m_-m+s)*fact(j-m_-s))
partB += temp * cos(beta/2)**(2*j+m-m_-2*s) * (sin(beta/2))**(m_-m+2*s)
return partA * partB
def dm(theta,l):
ret=np.zeros((2*l+1,2*l+1))
for m in range(-l,l+1):
for n in range(-l,l+1):
ret[m+l,n+l]=dmat_entry(l,m,n,theta)
return ret
def Dmat_entry(l,m,n,alpha,beta,gamma):
return np.exp(-1j*m*alpha) * dmat_entry(l,m,n,beta) * np.exp(-1j*n*gamma)
def Dm(angles, l=1):
ret = np.zeros((2*l+1,2*l+1), dtype=np.complex)
for m in range(-l,l+1):
for n in range(-l,l+1):
ret[m+l,n+l] = Dmat_entry(l,m,n,angles[0],angles[1],angles[2])
#print(ret[m+l,n+l])
return ret
def _Dm_hardcode(angles, l=1):
alpha, beta, gamma = angles
sin = np.sin
cos = np.cos
sqrt = np.sqrt
exp = np.exp
i = 1j
if l == 0:
return np.ones((1,1),dtype=np.complex)
if l == 1:
D = np.zeros((3,3),dtype=np.complex)
D[2,2] = (1+cos(beta))/2*exp(-i*(alpha+gamma))
D[2,1] = -1/sqrt(2)*sin(beta)*exp(-i*alpha)
D[2,0] = (1-cos(beta))/2*exp(-i*(alpha-gamma))
D[1,2] = 1/sqrt(2)*sin(beta)*exp(-i*gamma)
D[1,1] = cos(beta)
D[1,0] = -1/sqrt(2)*sin(beta)*exp(i*gamma)
D[0,2] = (1-cos(beta))/2*exp(i*(alpha-gamma))
D[0,1] = 1/sqrt(2)*sin(beta)*exp(i*alpha)
D[0,0] = (1+cos(beta))/2*exp(i*(alpha+gamma))
return D
if l == 2:
ei = lambda x: exp(1j * x)
D = np.zeros((5,5),dtype=np.complex)
D[4,4] = ((1+cos(beta))/2)**2*exp(-2*i*(alpha+gamma))
D[4,3] = -(1+cos(beta))/2*sin(beta)*exp(-i*(2*alpha+gamma))
D[4,2] = sqrt(3./8)*sin(beta)**2*exp(-i*2*alpha)
D[4,1] = -(1-cos(beta))/2*sin(beta)*exp(i*(-2*alpha+gamma))
D[4,0] = ((1-cos(beta))/2)**2*exp(2*i*(-alpha+gamma))
D[3,4] = (1+cos(beta))/2*sin(beta)*exp(-i*(alpha+2*gamma))
D[3,3] = (cos(beta)**2-(1-cos(beta))/2)*exp(-i*(alpha+gamma))
D[3,2] = -sqrt(3./8)*sin(2*beta)*exp(-i*alpha)
D[3,1] = ((1+cos(beta))/2-cos(beta)**2)*exp(i*(-alpha+gamma))
D[3,0] = -((1-cos(beta))/2)*sin(beta)*exp(i*(-alpha+2*gamma))
D[2,4] = sqrt(3./8)*sin(beta)**2*exp(-i*2*gamma)
D[2,3] = sqrt(3./8)*sin(2*beta)*exp(-i*gamma)
D[2,2] = (3*cos(beta)**2-1.)/2
D[2,1] = -sqrt(3./8)*sin(2*beta)*exp(i*gamma)
D[2,0] = sqrt(3./8)*sin(beta)**2*exp(i*2*gamma)
D[1,4] = (1-cos(beta))/2*sin(beta)*exp(i*(alpha-2*gamma))
D[1,3] = ((1+cos(beta))/2-cos(beta)**2)*exp(i*(alpha-gamma))
D[1,2] = sqrt(3./8)*sin(beta)**2*exp(i*alpha)
D[1,1] = (cos(beta)**2-(1-cos(beta))/2)*exp(i*(alpha+gamma))
D[1,0] = -(1+cos(beta))/2*sin(beta)*exp(i*(alpha+2*gamma))
D[0,4] = ((1-cos(beta))/2)**2*exp(2*i*(alpha-gamma))
D[0,3] = ((1-cos(beta))/2)*sin(beta)*exp(i*(2*alpha-gamma))
D[0,2] = sqrt(3./8)*sin(beta)**2*exp(i*2*alpha)
D[0,1] = (1+cos(beta))/2*sin(beta)*exp(i*(2*alpha+gamma))
D[0,0] = ((1+cos(beta))/2)**2*exp(2*i*(alpha+gamma))
return D | utils/WignerD.py | import numpy as np
from math import factorial, sqrt, cos, sin
fact = lambda x: factorial(int(x))
def choose(n, k):
return fact(n)/fact(k)/fact(n-k)
def dmat_entry(j,m_,m,beta):
#real valued. implemented according to wikipedia
partA = sqrt(fact(j+m_)*fact(j-m_)*fact(j+m)*fact(j-m))
partB = 0.
for s in range(max(int(m-m_),0),min(int(j+m),int(j-m_))+1):
temp = (-1.)**s / (fact(j+m-s)*fact(s)*fact(m_-m+s)*fact(j-m_-s))
partB += temp * cos(beta/2)**(2*j+m-m_-2*s) * (sin(beta/2))**(m_-m+2*s)
return partA * partB
def dm(theta,l):
ret=np.zeros((2*l+1,2*l+1))
for m in range(-l,l+1):
for n in range(-l,l+1):
ret[m+l,n+l]=dmat_entry(l,m,n,theta)
return ret
def Dmat_entry(l,m,n,alpha,beta,gamma):
return np.exp(-1j*m*alpha) * dmat_entry(l,m,n,beta) * np.exp(-1j*n*gamma)
def Dm(angles, l=1):
ret = np.zeros((2*l+1,2*l+1), dtype=np.complex)
for m in range(-l,l+1):
for n in range(-l,l+1):
ret[m+l,n+l] = Dmat_entry(l,m,n,angles[0],angles[1],angles[2])
#print(ret[m+l,n+l])
return ret
def _Dm_hardcode(angles, l=1):
alpha, beta, gamma = angles
sin = np.sin
cos = np.cos
sqrt = np.sqrt
exp = np.exp
i = 1j
if l == 0:
return np.ones((1,1),dtype=np.complex)
if l == 1:
D = np.zeros((3,3),dtype=np.complex)
D[2,2] = (1+cos(beta))/2*exp(-i*(alpha+gamma))
D[2,1] = -1/sqrt(2)*sin(beta)*exp(-i*alpha)
D[2,0] = (1-cos(beta))/2*exp(-i*(alpha-gamma))
D[1,2] = 1/sqrt(2)*sin(beta)*exp(-i*gamma)
D[1,1] = cos(beta)
D[1,0] = -1/sqrt(2)*sin(beta)*exp(i*gamma)
D[0,2] = (1-cos(beta))/2*exp(i*(alpha-gamma))
D[0,1] = 1/sqrt(2)*sin(beta)*exp(i*alpha)
D[0,0] = (1+cos(beta))/2*exp(i*(alpha+gamma))
return D
if l == 2:
ei = lambda x: exp(1j * x)
D = np.zeros((5,5),dtype=np.complex)
D[4,4] = ((1+cos(beta))/2)**2*exp(-2*i*(alpha+gamma))
D[4,3] = -(1+cos(beta))/2*sin(beta)*exp(-i*(2*alpha+gamma))
D[4,2] = sqrt(3./8)*sin(beta)**2*exp(-i*2*alpha)
D[4,1] = -(1-cos(beta))/2*sin(beta)*exp(i*(-2*alpha+gamma))
D[4,0] = ((1-cos(beta))/2)**2*exp(2*i*(-alpha+gamma))
D[3,4] = (1+cos(beta))/2*sin(beta)*exp(-i*(alpha+2*gamma))
D[3,3] = (cos(beta)**2-(1-cos(beta))/2)*exp(-i*(alpha+gamma))
D[3,2] = -sqrt(3./8)*sin(2*beta)*exp(-i*alpha)
D[3,1] = ((1+cos(beta))/2-cos(beta)**2)*exp(i*(-alpha+gamma))
D[3,0] = -((1-cos(beta))/2)*sin(beta)*exp(i*(-alpha+2*gamma))
D[2,4] = sqrt(3./8)*sin(beta)**2*exp(-i*2*gamma)
D[2,3] = sqrt(3./8)*sin(2*beta)*exp(-i*gamma)
D[2,2] = (3*cos(beta)**2-1.)/2
D[2,1] = -sqrt(3./8)*sin(2*beta)*exp(i*gamma)
D[2,0] = sqrt(3./8)*sin(beta)**2*exp(i*2*gamma)
D[1,4] = (1-cos(beta))/2*sin(beta)*exp(i*(alpha-2*gamma))
D[1,3] = ((1+cos(beta))/2-cos(beta)**2)*exp(i*(alpha-gamma))
D[1,2] = sqrt(3./8)*sin(beta)**2*exp(i*alpha)
D[1,1] = (cos(beta)**2-(1-cos(beta))/2)*exp(i*(alpha+gamma))
D[1,0] = -(1+cos(beta))/2*sin(beta)*exp(i*(alpha+2*gamma))
D[0,4] = ((1-cos(beta))/2)**2*exp(2*i*(alpha-gamma))
D[0,3] = ((1-cos(beta))/2)*sin(beta)*exp(i*(2*alpha-gamma))
D[0,2] = sqrt(3./8)*sin(beta)**2*exp(i*2*alpha)
D[0,1] = (1+cos(beta))/2*sin(beta)*exp(i*(2*alpha+gamma))
D[0,0] = ((1+cos(beta))/2)**2*exp(2*i*(alpha+gamma))
return D | 0.211417 | 0.55923 |
from django import forms
from .models import Category
from .models import Location
from .models import Organization
'''
Forms for the landing page dropdowns.
The category dropdown is a choice field
which displays all categories in the queryset.
It also excludes the empty string and . characters
from categories since both represent nonexistent fields.
Initial is 0 because we want the dropdown to default to the
first possible category, which is "all locations / organizations"
'''
class LandingPageForm(forms.Form):
category = forms.ModelChoiceField(queryset=Category.objects.order_by('category').exclude(category='.').exclude(category=''),
required=False,
widget=forms.Select(attrs={'class' : 'form-control btn btn-light dropdown-toggle',
'type' : 'button', 'data-toggle' : 'dropdown'}),
initial=0
)
location = forms.ModelChoiceField(queryset=Location.objects.exclude(location='').exclude(location='.'),
required=False,
initial=0,
widget=forms.Select(attrs={'class' : 'form-control btn btn-light dropdown-toggle',
'type' : 'button', 'data-toggle' : 'dropdown'}))
'''
The forms for the refine search bar on the results
page is the same type of form as on the landing page.
'''
class ResultsPageForm(forms.Form):
category = forms.ModelChoiceField(queryset=Category.objects.order_by('category').exclude(category='.').exclude(category=''),
required=False,
initial=0,
widget=forms.Select(attrs={'class' : 'col-sm-3 form-control btn btn-light dropdown-toggle',
'type' : 'button', 'data-toggle' : 'dropdown'})
)
location = forms.ModelChoiceField(queryset=Location.objects.exclude(location='').exclude(location='.'),
required=False,
initial=0,
widget=forms.Select(attrs={'class' : 'col-sm-3 form-control btn btn-light dropdown-toggle',
'type' : 'button', 'data-toggle' : 'dropdown'}))
# Uses Google places API and radius field
# myLocation = forms.CharField(max_length=100, label='Start Location', widget=forms.TextInput(attrs={'id': 'searchLocation', 'class':'col-sm-3'}), required=False)
# radius = forms.IntegerField(label='Radius', widget=forms.NumberInput(attrs={'class': 'col-sm-1'}), required=False, min_value=0) | volDB/forms.py | from django import forms
from .models import Category
from .models import Location
from .models import Organization
'''
Forms for the landing page dropdowns.
The category dropdown is a choice field
which displays all categories in the queryset.
It also excludes the empty string and . characters
from categories since both represent nonexistent fields.
Initial is 0 because we want the dropdown to default to the
first possible category, which is "all locations / organizations"
'''
class LandingPageForm(forms.Form):
category = forms.ModelChoiceField(queryset=Category.objects.order_by('category').exclude(category='.').exclude(category=''),
required=False,
widget=forms.Select(attrs={'class' : 'form-control btn btn-light dropdown-toggle',
'type' : 'button', 'data-toggle' : 'dropdown'}),
initial=0
)
location = forms.ModelChoiceField(queryset=Location.objects.exclude(location='').exclude(location='.'),
required=False,
initial=0,
widget=forms.Select(attrs={'class' : 'form-control btn btn-light dropdown-toggle',
'type' : 'button', 'data-toggle' : 'dropdown'}))
'''
The forms for the refine search bar on the results
page is the same type of form as on the landing page.
'''
class ResultsPageForm(forms.Form):
category = forms.ModelChoiceField(queryset=Category.objects.order_by('category').exclude(category='.').exclude(category=''),
required=False,
initial=0,
widget=forms.Select(attrs={'class' : 'col-sm-3 form-control btn btn-light dropdown-toggle',
'type' : 'button', 'data-toggle' : 'dropdown'})
)
location = forms.ModelChoiceField(queryset=Location.objects.exclude(location='').exclude(location='.'),
required=False,
initial=0,
widget=forms.Select(attrs={'class' : 'col-sm-3 form-control btn btn-light dropdown-toggle',
'type' : 'button', 'data-toggle' : 'dropdown'}))
# Uses Google places API and radius field
# myLocation = forms.CharField(max_length=100, label='Start Location', widget=forms.TextInput(attrs={'id': 'searchLocation', 'class':'col-sm-3'}), required=False)
# radius = forms.IntegerField(label='Radius', widget=forms.NumberInput(attrs={'class': 'col-sm-1'}), required=False, min_value=0) | 0.552057 | 0.117876 |
# Copyright 2020-2021 Blue Brain Project / EPFL
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
TWO_LETTER_ENTITIES = [
"ph", "ca", "hg", "o2", "na", "mg"
]
MIN_NODE_SIZE = 10
MAX_NODE_SIZE = 40
MIN_FONT_SIZE = 6
MAX_FONT_SIZE = 24
MIN_EDGE_WIDTH = 3
MAX_EDGE_WIDTH = 10
DEFAULT_TYPES = [
"CHEMICAL",
"PROTEIN",
"DISEASE",
"CELL_TYPE",
"PATHWAY",
"CELL_COMPARTMENT",
"DRUG",
"Biomarkers",
"Condition",
"ORGAN",
"ORGANISM",
"GENE"
]
VISUALIZATION_CONTENT_STYLE = {
"width": "100%",
"top": "0px",
"left":"0px",
"bottom": "0px",
"position": "fixed",
}
COLORS = {
"DISEASE": "#c3c94d",
"ORGANISM": "#9c83e8",
"ORGAN": "#6dc960",
"PROTEIN": "#de6dcb",
"CHEMICAL": "#64cca3",
"PATHWAY": "#e77158",
"CELL_TYPE": "#4cc9d8",
"DRUG": "#cf9749",
"GENE": "#7fa0de",
"CELL_COMPARTMENT": "#df7fa5",
"Biomarkers": "#7cccee",
"Condition": "#91c79f",
"0": "#8cb900",
"1": "#d97dd8",
"2": "#00c7ff",
"3": "#ff7045",
"4": "#23966F",
"5": "#deb1e0",
"6": "#dbcd9d",
"7": "#7cccee",
"8": "#91c79f",
"9": "#adbce9",
"10": "#b3edd5",
"11": "#8dc3b8",
}
CYTOSCAPE_STYLE_STYLESHEET = [
{
"selector": 'node',
'style': {
"opacity": 1,
"text-valign": "center",
"text-halign": "center",
"label": "data(name)",
"overlay-padding": "6px",
"z-index": "10",
}
}, {
"selector": "edge",
"style": {
'curve-style': 'bezier',
'line-color': '#D5DAE6',
}
}, {
"selector": "node",
"style": {
"width": 10,
"height": 10,
}
}, {
"selector": "edge",
"style": {
"width": 2,
}
}
]
# Layout configs
# COSE_BILKENT_CONFIG = {
# "quality": 'default',
# "refresh": 30,
# "fit": True,
# "padding": 20,
# "randomize": True,
# "nodeSeparation": 75,
# "nodeRepulsion": 40500,
# "idealEdgeLength": 70,
# "edgeElasticity": 0.45,
# "nestingFactor": 0.1,
# "gravity": 50.25,
# "numIter": 2500,
# "tile": True,
# "tilingPaddingVertical": 50,
# "tilingPaddingHorizontal": 50,
# "gravityRangeCompound": 1.5,
# "gravityCompound": 2.0,
# "gravityRange": 23.8,
# "initialEnergyOnIncremental": 50.5
# }
COSE_BILKENT_CONFIG = {
"name": "cose-bilkent",
"quality": 'default',
# Whether to include labels in node dimensions. Useful for avoiding label overlap
"nodeDimensionsIncludeLabels": False,
# number of ticks per frame; higher is faster but more jerky
"refresh": 30,
# Whether to fit the network view after when done
"fit": True,
# Padding on fit
"padding": 10,
# Whether to enable incremental mode
"randomize": True,
# Node repulsion (non overlapping) multiplier
"nodeRepulsion": 4500,
# Ideal (intra-graph) edge length
"idealEdgeLength": 70,
# Divisor to compute edge forces
"edgeElasticity": 0.45,
# Nesting factor (multiplier) to compute ideal edge length for inter-graph edges
"nestingFactor": 0.1,
# Gravity force (constant)
"gravity": 50.25,
# Maximum number of iterations to perform
"numIter": 2500,
# Whether to tile disconnected nodes
"tile": True,
# Type of layout animation. The option set is {'during', 'end', false}
"animate": False,
# Duration for animate:end
"animationDuration": 500,
# Amount of vertical space to put between degree zero nodes during tiling (can also be a function)
"tilingPaddingVertical": 10,
# Amount of horizontal space to put between degree zero nodes during tiling (can also be a function)
"tilingPaddingHorizontal": 10,
# Gravity range (constant) for compounds
"gravityRangeCompound": 1.5,
# Gravity force (constant) for compounds
"gravityCompound": 2.0,
# Gravity range (constant)
"gravityRange": 30,
# Initial cooling factor for incremental layout
"initialEnergyOnIncremental": 0.5
}
COLA_CONFIG = {
'name': 'cola',
'animate': True,
'refresh': 1,
'maxSimulationTime': 8000,
'ungrabifyWhileSimulating': False,
'fit': True,
'padding': 30,
'randomize': True,
'avoidOverlap': True,
'handleDisconnected': True,
'convergenceThreshold': 0.001,
'nodeSpacing': 10,
'edgeLength': 100
}
COSE_CONFIG = {
'name': "cose",
'showlegend': True,
'idealEdgeLength': 100,
'nodeOverlap': 0,
'refresh': 20,
'fit': True,
'padding': 30,
'randomize': False,
'componentSpacing': 100,
'nodeRepulsion': 400000,
'edgeElasticity': 100,
'nestingFactor': 5,
'gravity': 80,
'numIter': 1000,
'initialTemp': 200,
'coolingFactor': 0.95,
'minTemp': 1.0
}
LAYOUT_CONFIGS = {
"preset": {
"name": "preset"
},
"cose": COSE_CONFIG,
"cose-bilkent": COSE_BILKENT_CONFIG,
"cola": COLA_CONFIG
}
CORD19_PROP_TYPES = {
"nodes": {
'@type': 'category',
'paper': 'category',
'paper_frequency': 'numeric',
'entity_type': 'category',
'degree_frequency': 'numeric',
'pagerank_frequency': 'numeric',
'paragraph_frequency': 'numeric',
'community_frequency': 'numeric',
'community_npmi': 'numeric'
},
"edges": {
'frequency': 'numeric',
'ppmi': 'numeric',
'npmi': 'numeric',
'distance_npmi': 'numeric'
}
} | cord19kg/apps/resources.py |
# Copyright 2020-2021 Blue Brain Project / EPFL
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
TWO_LETTER_ENTITIES = [
"ph", "ca", "hg", "o2", "na", "mg"
]
MIN_NODE_SIZE = 10
MAX_NODE_SIZE = 40
MIN_FONT_SIZE = 6
MAX_FONT_SIZE = 24
MIN_EDGE_WIDTH = 3
MAX_EDGE_WIDTH = 10
DEFAULT_TYPES = [
"CHEMICAL",
"PROTEIN",
"DISEASE",
"CELL_TYPE",
"PATHWAY",
"CELL_COMPARTMENT",
"DRUG",
"Biomarkers",
"Condition",
"ORGAN",
"ORGANISM",
"GENE"
]
VISUALIZATION_CONTENT_STYLE = {
"width": "100%",
"top": "0px",
"left":"0px",
"bottom": "0px",
"position": "fixed",
}
COLORS = {
"DISEASE": "#c3c94d",
"ORGANISM": "#9c83e8",
"ORGAN": "#6dc960",
"PROTEIN": "#de6dcb",
"CHEMICAL": "#64cca3",
"PATHWAY": "#e77158",
"CELL_TYPE": "#4cc9d8",
"DRUG": "#cf9749",
"GENE": "#7fa0de",
"CELL_COMPARTMENT": "#df7fa5",
"Biomarkers": "#7cccee",
"Condition": "#91c79f",
"0": "#8cb900",
"1": "#d97dd8",
"2": "#00c7ff",
"3": "#ff7045",
"4": "#23966F",
"5": "#deb1e0",
"6": "#dbcd9d",
"7": "#7cccee",
"8": "#91c79f",
"9": "#adbce9",
"10": "#b3edd5",
"11": "#8dc3b8",
}
CYTOSCAPE_STYLE_STYLESHEET = [
{
"selector": 'node',
'style': {
"opacity": 1,
"text-valign": "center",
"text-halign": "center",
"label": "data(name)",
"overlay-padding": "6px",
"z-index": "10",
}
}, {
"selector": "edge",
"style": {
'curve-style': 'bezier',
'line-color': '#D5DAE6',
}
}, {
"selector": "node",
"style": {
"width": 10,
"height": 10,
}
}, {
"selector": "edge",
"style": {
"width": 2,
}
}
]
# Layout configs
# COSE_BILKENT_CONFIG = {
# "quality": 'default',
# "refresh": 30,
# "fit": True,
# "padding": 20,
# "randomize": True,
# "nodeSeparation": 75,
# "nodeRepulsion": 40500,
# "idealEdgeLength": 70,
# "edgeElasticity": 0.45,
# "nestingFactor": 0.1,
# "gravity": 50.25,
# "numIter": 2500,
# "tile": True,
# "tilingPaddingVertical": 50,
# "tilingPaddingHorizontal": 50,
# "gravityRangeCompound": 1.5,
# "gravityCompound": 2.0,
# "gravityRange": 23.8,
# "initialEnergyOnIncremental": 50.5
# }
COSE_BILKENT_CONFIG = {
"name": "cose-bilkent",
"quality": 'default',
# Whether to include labels in node dimensions. Useful for avoiding label overlap
"nodeDimensionsIncludeLabels": False,
# number of ticks per frame; higher is faster but more jerky
"refresh": 30,
# Whether to fit the network view after when done
"fit": True,
# Padding on fit
"padding": 10,
# Whether to enable incremental mode
"randomize": True,
# Node repulsion (non overlapping) multiplier
"nodeRepulsion": 4500,
# Ideal (intra-graph) edge length
"idealEdgeLength": 70,
# Divisor to compute edge forces
"edgeElasticity": 0.45,
# Nesting factor (multiplier) to compute ideal edge length for inter-graph edges
"nestingFactor": 0.1,
# Gravity force (constant)
"gravity": 50.25,
# Maximum number of iterations to perform
"numIter": 2500,
# Whether to tile disconnected nodes
"tile": True,
# Type of layout animation. The option set is {'during', 'end', false}
"animate": False,
# Duration for animate:end
"animationDuration": 500,
# Amount of vertical space to put between degree zero nodes during tiling (can also be a function)
"tilingPaddingVertical": 10,
# Amount of horizontal space to put between degree zero nodes during tiling (can also be a function)
"tilingPaddingHorizontal": 10,
# Gravity range (constant) for compounds
"gravityRangeCompound": 1.5,
# Gravity force (constant) for compounds
"gravityCompound": 2.0,
# Gravity range (constant)
"gravityRange": 30,
# Initial cooling factor for incremental layout
"initialEnergyOnIncremental": 0.5
}
COLA_CONFIG = {
'name': 'cola',
'animate': True,
'refresh': 1,
'maxSimulationTime': 8000,
'ungrabifyWhileSimulating': False,
'fit': True,
'padding': 30,
'randomize': True,
'avoidOverlap': True,
'handleDisconnected': True,
'convergenceThreshold': 0.001,
'nodeSpacing': 10,
'edgeLength': 100
}
COSE_CONFIG = {
'name': "cose",
'showlegend': True,
'idealEdgeLength': 100,
'nodeOverlap': 0,
'refresh': 20,
'fit': True,
'padding': 30,
'randomize': False,
'componentSpacing': 100,
'nodeRepulsion': 400000,
'edgeElasticity': 100,
'nestingFactor': 5,
'gravity': 80,
'numIter': 1000,
'initialTemp': 200,
'coolingFactor': 0.95,
'minTemp': 1.0
}
LAYOUT_CONFIGS = {
"preset": {
"name": "preset"
},
"cose": COSE_CONFIG,
"cose-bilkent": COSE_BILKENT_CONFIG,
"cola": COLA_CONFIG
}
CORD19_PROP_TYPES = {
"nodes": {
'@type': 'category',
'paper': 'category',
'paper_frequency': 'numeric',
'entity_type': 'category',
'degree_frequency': 'numeric',
'pagerank_frequency': 'numeric',
'paragraph_frequency': 'numeric',
'community_frequency': 'numeric',
'community_npmi': 'numeric'
},
"edges": {
'frequency': 'numeric',
'ppmi': 'numeric',
'npmi': 'numeric',
'distance_npmi': 'numeric'
}
} | 0.821367 | 0.415136 |
from sqlalchemy import Column, Float, String, Integer, ForeignKey, create_engine, inspect
from sqlalchemy.engine import Engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, sessionmaker
from utils.constants import WEBTEXT_DB
Base = declarative_base()
Session = sessionmaker()
class Doc(Base):
__tablename__ = 'docs'
doc_scores = relationship('DocScore')
span_scores = relationship('SpanScore')
# Metadata
id = Column(Integer, primary_key=True)
location = Column(String)
# Text
text = Column(String)
class DocScore(Base):
__tablename__ = 'doc_scores'
# Metadata
id = Column(Integer, ForeignKey('docs.id'), primary_key=True)
# Attributes
toxicity = Column(Float)
severe_toxicity = Column(Float)
identity_attack = Column(Float)
insult = Column(Float)
threat = Column(Float)
profanity = Column(Float)
sexually_explicit = Column(Float)
flirtation = Column(Float)
def __repr__(self):
return f"<DocScore<id={self.id}>"
class SpanScore(Base):
__tablename__ = 'span_scores'
# Metadata
id = Column(Integer, ForeignKey('docs.id'), primary_key=True)
begin = Column(Integer, primary_key=True)
end = Column(Integer, primary_key=True)
# Attributes
toxicity = Column(Float)
severe_toxicity = Column(Float)
identity_attack = Column(Float)
insult = Column(Float)
threat = Column(Float)
profanity = Column(Float)
sexually_explicit = Column(Float)
flirtation = Column(Float)
def __repr__(self):
return f"<SpanScore<id={self.id}, begin={self.begin}, end={self.end}>"
def corpus_db_engine(**kwargs) -> Engine:
return create_engine(f'sqlite:///{WEBTEXT_DB}', **kwargs)
def corpus_db_session(**kwargs) -> Session:
engine = corpus_db_engine(**kwargs)
session = Session(bind=engine)
return session
def primary_key(table: Base):
tuple(pk.name for pk in inspect(table).primary_key) | utils/db.py | from sqlalchemy import Column, Float, String, Integer, ForeignKey, create_engine, inspect
from sqlalchemy.engine import Engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, sessionmaker
from utils.constants import WEBTEXT_DB
Base = declarative_base()
Session = sessionmaker()
class Doc(Base):
__tablename__ = 'docs'
doc_scores = relationship('DocScore')
span_scores = relationship('SpanScore')
# Metadata
id = Column(Integer, primary_key=True)
location = Column(String)
# Text
text = Column(String)
class DocScore(Base):
__tablename__ = 'doc_scores'
# Metadata
id = Column(Integer, ForeignKey('docs.id'), primary_key=True)
# Attributes
toxicity = Column(Float)
severe_toxicity = Column(Float)
identity_attack = Column(Float)
insult = Column(Float)
threat = Column(Float)
profanity = Column(Float)
sexually_explicit = Column(Float)
flirtation = Column(Float)
def __repr__(self):
return f"<DocScore<id={self.id}>"
class SpanScore(Base):
__tablename__ = 'span_scores'
# Metadata
id = Column(Integer, ForeignKey('docs.id'), primary_key=True)
begin = Column(Integer, primary_key=True)
end = Column(Integer, primary_key=True)
# Attributes
toxicity = Column(Float)
severe_toxicity = Column(Float)
identity_attack = Column(Float)
insult = Column(Float)
threat = Column(Float)
profanity = Column(Float)
sexually_explicit = Column(Float)
flirtation = Column(Float)
def __repr__(self):
return f"<SpanScore<id={self.id}, begin={self.begin}, end={self.end}>"
def corpus_db_engine(**kwargs) -> Engine:
return create_engine(f'sqlite:///{WEBTEXT_DB}', **kwargs)
def corpus_db_session(**kwargs) -> Session:
engine = corpus_db_engine(**kwargs)
session = Session(bind=engine)
return session
def primary_key(table: Base):
tuple(pk.name for pk in inspect(table).primary_key) | 0.663887 | 0.217732 |
from __future__ import print_function
from __future__ import unicode_literals
import time
import sys
import os
import shutil
import csv
import boto3
import pyspark
import zipfile
import tarfile
from time import gmtime, strftime
from awsglue.utils import getResolvedOptions
import mleap.pyspark
from pyspark.sql import SparkSession
from pyspark.ml import Pipeline
from pyspark.sql.types import StructField, StructType, StringType, DoubleType, FloatType
from pyspark.ml.feature import StringIndexer, VectorIndexer, OneHotEncoder, VectorAssembler
from pyspark.sql.functions import *
from mleap.pyspark.spark_support import SimpleSparkSerializer
from awsglue.transforms import *
from pyspark.context import SparkContext
from awsglue.context import GlueContext
from awsglue.job import Job
def csv_line(data):
r = ','.join(str(d) for d in data[1])
return str(data[0]) + "," + r
glueContext = GlueContext(SparkContext.getOrCreate())
logger = glueContext.get_logger()
spark = glueContext.spark_session
args = getResolvedOptions(sys.argv, ['JOB_NAME', 'S3_BUCKET'])
# This is needed to save RDDs which is the only way to write nested Dataframes into CSV format
spark.sparkContext._jsc.hadoopConfiguration().set("mapred.output.committer.class",
"org.apache.hadoop.mapred.FileOutputCommitter")
# Read source data into a Glue dynamic frame
windturbine_rawdata = glueContext.create_dynamic_frame.from_catalog(
database="endtoendml-db", table_name="raw")
df = windturbine_rawdata.toDF()
df = df.na.replace('', "HAWT", subset=["turbine_type"])
df = df.na.fill(37.0, subset=["oil_temperature"])
# Defining indexers and one-hot encoders
col0_indexer = StringIndexer(inputCol="turbine_id", outputCol="indexed_turbine_id")
col1_indexer = StringIndexer(inputCol="turbine_type", outputCol="indexed_turbine_type")
col10_indexer = StringIndexer(inputCol="wind_direction", outputCol="indexed_wind_direction")
turbine_id_encoder = OneHotEncoder(inputCol="indexed_turbine_id", outputCol="turb_id").setDropLast(False)
turbine_type_encoder = OneHotEncoder(inputCol="indexed_turbine_type", outputCol="turb_type").setDropLast(False)
wind_direction_encoder = OneHotEncoder(inputCol="indexed_wind_direction", outputCol="wind_dir").setDropLast(False)
assembler = VectorAssembler(inputCols=['turb_id', 'turb_type', 'wind_speed', 'rpm_blade', 'oil_temperature', 'oil_level','temperature','humidity', 'vibrations_frequency', 'pressure', 'wind_dir'], outputCol="features")
# Defining pipeline
pipeline = Pipeline(stages=[col0_indexer, col1_indexer, col10_indexer, turbine_id_encoder, turbine_type_encoder, wind_direction_encoder, assembler])
logger.info('Fitting pipeline...')
model = pipeline.fit(df)
df = model.transform(df)
logger.info('Completed pipeline fit-transform.')
logger.info('Fitting target variable indexer...')
label_indexer = StringIndexer(inputCol="breakdown", outputCol="indexed_breakdown")
indexed_label_df = label_indexer.fit(df).transform(df)
logger.info('Completed indexer fit-transform.')
logger.info('Random split started...')
# Split the overall dataset into 80-20 training and validation
(train_df, validation_df) = indexed_label_df.randomSplit([0.8, 0.2])
logger.info('Random split completed.')
logger.info('Save train file started...')
# Convert the train dataframe to RDD to save in CSV format and upload to S3
train_rdd = train_df.rdd.map(lambda x: (x.indexed_breakdown, x.features))
train_lines = train_rdd.map(csv_line)
train_lines.saveAsTextFile('s3://{0}/data/preprocessed/train'.format(args['S3_BUCKET']))
logger.info('Save train file completed.')
logger.info('Save validation file started...')
# Convert the validation dataframe to RDD to save in CSV format and upload to S3
validation_rdd = validation_df.rdd.map(lambda x: (x.indexed_breakdown, x.features))
validation_lines = validation_rdd.map(csv_line)
validation_lines.saveAsTextFile('s3://{0}/data/preprocessed/val'.format(args['S3_BUCKET']))
logger.info('Save validation file completed.')
# Serialize and store the model via MLeap
timestamp = strftime("%Y-%m-%d-%H-%M-%S", gmtime())
model_filename = '/tmp/model-' + timestamp + '.zip'
SimpleSparkSerializer().serializeToBundle(model, 'jar:file:' + model_filename, df)
# Unzip the model as SageMaker expects a .tar.gz file but MLeap produces a .zip file
with zipfile.ZipFile(model_filename) as zf:
zf.extractall("/tmp/model-" + timestamp)
# Write back the content as a .tar.gz file
with tarfile.open("/tmp/model-" + timestamp + ".tar.gz", "w:gz") as tar:
tar.add("/tmp/model-" + timestamp + "/bundle.json", arcname='bundle.json')
tar.add("/tmp/model-" + timestamp + "/root", arcname='root')
# Upload the model in tar.gz format to S3 so that it can be used with SageMaker for inference later
s3 = boto3.resource('s3')
s3.Bucket(args['S3_BUCKET']).upload_file('/tmp/model-' + timestamp + '.tar.gz', 'output/sparkml/model.tar.gz') | 02_data_exploration_and_feature_eng/endtoendml_etl.py | from __future__ import print_function
from __future__ import unicode_literals
import time
import sys
import os
import shutil
import csv
import boto3
import pyspark
import zipfile
import tarfile
from time import gmtime, strftime
from awsglue.utils import getResolvedOptions
import mleap.pyspark
from pyspark.sql import SparkSession
from pyspark.ml import Pipeline
from pyspark.sql.types import StructField, StructType, StringType, DoubleType, FloatType
from pyspark.ml.feature import StringIndexer, VectorIndexer, OneHotEncoder, VectorAssembler
from pyspark.sql.functions import *
from mleap.pyspark.spark_support import SimpleSparkSerializer
from awsglue.transforms import *
from pyspark.context import SparkContext
from awsglue.context import GlueContext
from awsglue.job import Job
def csv_line(data):
r = ','.join(str(d) for d in data[1])
return str(data[0]) + "," + r
glueContext = GlueContext(SparkContext.getOrCreate())
logger = glueContext.get_logger()
spark = glueContext.spark_session
args = getResolvedOptions(sys.argv, ['JOB_NAME', 'S3_BUCKET'])
# This is needed to save RDDs which is the only way to write nested Dataframes into CSV format
spark.sparkContext._jsc.hadoopConfiguration().set("mapred.output.committer.class",
"org.apache.hadoop.mapred.FileOutputCommitter")
# Read source data into a Glue dynamic frame
windturbine_rawdata = glueContext.create_dynamic_frame.from_catalog(
database="endtoendml-db", table_name="raw")
df = windturbine_rawdata.toDF()
df = df.na.replace('', "HAWT", subset=["turbine_type"])
df = df.na.fill(37.0, subset=["oil_temperature"])
# Defining indexers and one-hot encoders
col0_indexer = StringIndexer(inputCol="turbine_id", outputCol="indexed_turbine_id")
col1_indexer = StringIndexer(inputCol="turbine_type", outputCol="indexed_turbine_type")
col10_indexer = StringIndexer(inputCol="wind_direction", outputCol="indexed_wind_direction")
turbine_id_encoder = OneHotEncoder(inputCol="indexed_turbine_id", outputCol="turb_id").setDropLast(False)
turbine_type_encoder = OneHotEncoder(inputCol="indexed_turbine_type", outputCol="turb_type").setDropLast(False)
wind_direction_encoder = OneHotEncoder(inputCol="indexed_wind_direction", outputCol="wind_dir").setDropLast(False)
assembler = VectorAssembler(inputCols=['turb_id', 'turb_type', 'wind_speed', 'rpm_blade', 'oil_temperature', 'oil_level','temperature','humidity', 'vibrations_frequency', 'pressure', 'wind_dir'], outputCol="features")
# Defining pipeline
pipeline = Pipeline(stages=[col0_indexer, col1_indexer, col10_indexer, turbine_id_encoder, turbine_type_encoder, wind_direction_encoder, assembler])
logger.info('Fitting pipeline...')
model = pipeline.fit(df)
df = model.transform(df)
logger.info('Completed pipeline fit-transform.')
logger.info('Fitting target variable indexer...')
label_indexer = StringIndexer(inputCol="breakdown", outputCol="indexed_breakdown")
indexed_label_df = label_indexer.fit(df).transform(df)
logger.info('Completed indexer fit-transform.')
logger.info('Random split started...')
# Split the overall dataset into 80-20 training and validation
(train_df, validation_df) = indexed_label_df.randomSplit([0.8, 0.2])
logger.info('Random split completed.')
logger.info('Save train file started...')
# Convert the train dataframe to RDD to save in CSV format and upload to S3
train_rdd = train_df.rdd.map(lambda x: (x.indexed_breakdown, x.features))
train_lines = train_rdd.map(csv_line)
train_lines.saveAsTextFile('s3://{0}/data/preprocessed/train'.format(args['S3_BUCKET']))
logger.info('Save train file completed.')
logger.info('Save validation file started...')
# Convert the validation dataframe to RDD to save in CSV format and upload to S3
validation_rdd = validation_df.rdd.map(lambda x: (x.indexed_breakdown, x.features))
validation_lines = validation_rdd.map(csv_line)
validation_lines.saveAsTextFile('s3://{0}/data/preprocessed/val'.format(args['S3_BUCKET']))
logger.info('Save validation file completed.')
# Serialize and store the model via MLeap
timestamp = strftime("%Y-%m-%d-%H-%M-%S", gmtime())
model_filename = '/tmp/model-' + timestamp + '.zip'
SimpleSparkSerializer().serializeToBundle(model, 'jar:file:' + model_filename, df)
# Unzip the model as SageMaker expects a .tar.gz file but MLeap produces a .zip file
with zipfile.ZipFile(model_filename) as zf:
zf.extractall("/tmp/model-" + timestamp)
# Write back the content as a .tar.gz file
with tarfile.open("/tmp/model-" + timestamp + ".tar.gz", "w:gz") as tar:
tar.add("/tmp/model-" + timestamp + "/bundle.json", arcname='bundle.json')
tar.add("/tmp/model-" + timestamp + "/root", arcname='root')
# Upload the model in tar.gz format to S3 so that it can be used with SageMaker for inference later
s3 = boto3.resource('s3')
s3.Bucket(args['S3_BUCKET']).upload_file('/tmp/model-' + timestamp + '.tar.gz', 'output/sparkml/model.tar.gz') | 0.451327 | 0.225811 |
import asyncio
import copy
import contextlib
import time
import os
import random
import re
import json
import logging
import tempfile
import uuid
from xml.etree import ElementTree as et
import aiohttp
from aiohttp import web
from rallyci import utils
from rallyci.common import asyncssh
LOG = logging.getLogger(__name__)
RE_LA = re.compile(r".*load average: (\d+\.\d+),.*")
RE_MEM = re.compile(r".*Mem: +\d+ +\d+ +(\d+) +\d+ +\d+ +(\d+).*")
IFACE_RE = re.compile(r"\d+: ([a-z]+)(\d+): .*")
IP_RE = re.compile(r"(\d+\.\d+\.\d+\.\d+)\s")
DYNAMIC_BRIDGES = {}
DYNAMIC_BRIDGE_LOCK = asyncio.Lock()
class ZFS:
def __init__(self, ssh, path, dataset, **kwargs):
self.ssh = ssh
self.path = path
self.dataset = dataset
@asyncio.coroutine
def create(self, name):
cmd = "zfs create {dataset}/{name}".format(dataset=self.dataset,
name=name)
yield from self.ssh.run(cmd)
@asyncio.coroutine
def list_files(self, name):
cmd = "ls /{path}/{name}".format(path=self.path, name=name)
ls = yield from self.ssh.run(cmd, return_output=True)
return [os.path.join("/", self.dataset, name, f) for f in ls.splitlines()]
@asyncio.coroutine
def clone(self, src, dst):
cmd = "zfs clone {dataset}/{src}@1 {dataset}/{dst}"
cmd = cmd.format(dataset=self.dataset, src=src, dst=dst)
yield from self.ssh.run(cmd)
@asyncio.coroutine
def exist(self, name):
LOG.debug("Checking if image %s exist" % name)
cmd = "zfs list"
data = yield from self.ssh.run(cmd, return_output=True)
r = re.search("^%s/%s " % (self.dataset, name), data, re.MULTILINE)
return bool(r)
@asyncio.coroutine
def snapshot(self, name, snapshot="1"):
cmd = "zfs snapshot {dataset}/{name}@{snapshot}".format(
dataset=self.dataset, name=name, snapshot=snapshot)
yield from self.ssh.run(cmd)
@asyncio.coroutine
def destroy(self, name):
cmd = "zfs destroy {dataset}/{name}".format(name=name,
dataset=self.dataset)
yield from self.ssh.run(cmd)
@asyncio.coroutine
def download(self, name, url):
# TODO: cache
yield from self.create(name)
cmd = "wget -nv {url} -O {path}/{name}/vda.qcow2"
cmd = cmd.format(name=name, path=self.path, url=url)
yield from self.ssh.run(cmd)
cmd = "qemu-img resize {path}/{name}/vda.qcow2 32G"
cmd = cmd.format(name=name, path=self.path)
yield from self.ssh.run(cmd)
class BTRFS:
def __init__(self, ssh, path, **kwargs):
self.ssh = ssh
self.path = path
@asyncio.coroutine
def create(self, name):
cmd = "btrfs subvolume create {path}/{name}".format(path=self.path,
name=name)
yield from self.ssh.run(cmd)
@asyncio.coroutine
def list_files(self, name):
cmd = "ls {path}/{name}".format(path=self.path, name=name)
ls = yield from self.ssh.run(cmd, return_output=True)
return [os.path.join("/", self.path, name, f) for f in ls.splitlines()]
@asyncio.coroutine
def clone(self, src, dst):
cmd = "btrfs subvolume delete {path}/{dst}"
cmd = cmd.format(path=self.path, src=src, dst=dst)
yield from self.ssh.run(cmd, raise_on_error=False)
cmd = "btrfs subvolume snapshot {path}/{src} {path}/{dst}"
cmd = cmd.format(path=self.path, src=src, dst=dst)
yield from self.ssh.run(cmd)
@asyncio.coroutine
def exist(self, name):
LOG.debug("Checking if image %s exist" % name)
cmd = "btrfs subvolume list %s" % self.path
data = yield from self.ssh.run(cmd, return_output=True)
r = re.search(" %s$" % name, data, re.MULTILINE)
return bool(r)
@asyncio.coroutine
def snapshot(self, *args, **kwargs):
yield from asyncio.sleep(0)
@asyncio.coroutine
def destroy(self, name):
cmd = "btrfs subvolume delete {path}/{name}".format(path=self.path,
name=name)
yield from self.ssh.run(cmd)
@asyncio.coroutine
def download(self, name, url):
# TODO: cache
yield from self.create(name)
cmd = "wget -nv {url} -O /{path}/{name}/vda.qcow2"
cmd = cmd.format(name=name, path=self.path, url=url)
yield from self.ssh.run(cmd)
# TODO: size should be set in config
cmd = "qemu-img resize /{path}/{name}/vda.qcow2 32G"
cmd = cmd.format(name=name, path=self.path)
yield from self.ssh.run(cmd)
BACKENDS = {"btrfs": BTRFS, "zfs": ZFS}
class Host:
def __init__(self, ssh_conf, config, root, vm_key):
"""
ssh_config: item from hosts from provider
config: full "provider" item
"""
self.image_locks = {}
self.config = config
self.root = root
self.vms = []
self.br_vm = {}
self.ssh = asyncssh.AsyncSSH(**ssh_conf)
self.vm_key = vm_key
self.la = 0.0
self.free = 0
storage_cf = config["storage"]
self.storage = BACKENDS[storage_cf["backend"]](self.ssh, **storage_cf)
def __str__(self):
return "<Host %s (la: %s, free: %s)>" % (self.ssh.hostname,
self.la, self.free)
@asyncio.coroutine
def update_stats(self):
cmd = "uptime && free -m"
data = yield from self.ssh.run(cmd, return_output=True)
self.la = float(RE_LA.search(data, re.MULTILINE).group(1))
free = RE_MEM.search(data, re.MULTILINE).groups()
self.free = sum(map(int, free))
@asyncio.coroutine
def boot_image(self, name):
conf = self.config["images"][name]
vm = VM(self, conf, {"name": name})
vm.disks.append(name)
for f in (yield from self.storage.list_files(name)):
vm.add_disk(f)
vm.add_net(conf.get("build-net", "virbr0"))
yield from vm.boot()
return vm
@asyncio.coroutine
def build_image(self, name):
LOG.info("Building image %s" % name)
self.image_locks.setdefault(name, asyncio.Lock())
with (yield from self.image_locks[name]):
if (yield from self.storage.exist(name)):
LOG.debug("Image %s exist" % name)
return
image_conf = self.config["images"][name]
parent = image_conf.get("parent")
if parent:
yield from self.build_image(parent)
yield from self.storage.clone(parent, name)
else:
url = image_conf.get("url")
if url:
yield from self.storage.download(name, url)
yield from self.storage.snapshot(name)
return # TODO: support build_script for downloaded images
build_scripts = image_conf.get("build-scripts")
if build_scripts:
vm = yield from self.boot_image(name)
try:
for script in build_scripts:
script = self.root.config.data["script"][script]
LOG.debug("Running build script %s" % script)
yield from vm.run_script(script)
yield from vm.shutdown(storage=False)
except:
LOG.exception("Error building image")
yield from vm.destroy()
raise
else:
LOG.debug("No build script for image %s" % name)
yield from asyncio.sleep(4)
yield from self.storage.snapshot(name)
@asyncio.coroutine
def _get_vm(self, local_cfg, conf):
"""
:param local_cfg: config.job.runner.vms item
:param conf: config.provider.vms item
"""
LOG.debug("Creating VM with conf %s" % conf)
name = local_cfg["name"]
image = conf.get("image")
if image:
yield from self.build_image(image)
else:
image = name
rnd_name = utils.get_rnd_name(name)
yield from self.storage.clone(image, rnd_name)
vm = VM(self, conf, local_cfg)
files = yield from self.storage.list_files(rnd_name)
vm.disks.append(rnd_name)
for f in files:
vm.add_disk(f)
for net in conf["net"]:
net = net.split(" ")
if len(net) == 1:
vm.add_net(net[0])
else:
vm.add_net(net[0], mac=net[1])
yield from vm.boot()
self.vms.append(vm)
return vm
@asyncio.coroutine
def get_vms(self, vm_confs):
"""Return VMs for runner.
:param vm_confs: config.job.runner.vms items
"""
vms = []
ifs = {}
LOG.debug("Getting VMS %s" % vm_confs)
for vm_conf in vm_confs:
conf = copy.deepcopy(self.config["vms"][vm_conf["name"]])
br = None
net_conf = []
for net in conf["net"]:
ifname = net.split(" ")
if ifname[0].endswith("%"):
if ifname[0] in ifs:
br = ifname[0] = ifs[ifname[0]]
else:
br = yield from self._get_bridge(ifname[0][:-1])
ifs[ifname[0]] = br
ifname[0] = br
net_conf.append(" ".join(ifname))
conf["net"] = net_conf
vm = yield from self._get_vm(vm_conf, conf)
self.br_vm.setdefault(br, [])
self.br_vm[br].append(vm)
vms.append(vm)
return vms
@asyncio.coroutine
def cleanup_net(self):
clean = []
with (yield from DYNAMIC_BRIDGE_LOCK):
for br, vms in self.br_vm.items():
if not vms:
yield from self.ssh.run("ip link del %s" % br)
clean.append(br)
for br in clean:
del self.br_vm[br]
@asyncio.coroutine
def _get_bridge(self, prefix):
with (yield from DYNAMIC_BRIDGE_LOCK):
data = yield from self.ssh.run("ip link list", return_output=True)
nums = set()
for line in data.splitlines():
m = IFACE_RE.match(line)
if m:
if m.group(1) == prefix:
nums.add(int(m.group(2)))
for i in range(len(nums) + 1):
if i not in nums:
br = "%s%d" % (prefix, i)
break
yield from self.ssh.run("ip link add %s type bridge" % br)
yield from self.ssh.run("ip link set %s up" % br)
return br
class Provider:
def __init__(self, root, config):
"""
:param config: full provider config
"""
self.root = root
self.config = config
self.name = config["name"]
self.key = config.get("key")
self.ifs = {}
self.last = time.time()
self.get_vms_lock = asyncio.Lock()
def get_stats(self):
pass
def start(self):
self.hosts = [Host(c, self.config, self.root, self.key)
for c in self.config["hosts"]]
self.mds = MetadataServer(self.root.loop,
self.config.get("metadata_server", {}))
self.mds_future = asyncio.async(self.mds.start())
@asyncio.coroutine
def cleanup(self, vms):
LOG.debug("Starting cleanup %s" % vms)
for vm in vms:
LOG.debug("Cleaning %s" % vm)
yield from vm.destroy()
LOG.debug("Cleanup completed")
@asyncio.coroutine
def stop(self):
yield from self.mds_future.cancel()
@asyncio.coroutine
def get_vms(self, vm_confs):
"""
:param vm_confs: job.runner.vms
"""
memory_required = self.config.get("freemb", 1024)
for cfg in vm_confs:
memory_required += self.config["vms"][cfg["name"]]["memory"]
best = None
with (yield from self.get_vms_lock):
sleep = self.last + 5 - time.time()
if sleep > 1:
yield from asyncio.sleep(sleep)
while True:
random.shuffle(self.hosts)
LOG.debug("Chosing from %s" % self.hosts)
for host in self.hosts:
yield from host.update_stats()
if host.free >= memory_required and host.la < self.config.get("maxla", 4):
LOG.debug("Chosen host: %s" % host)
best = host
break
if best:
break
LOG.info("All systems are overloaded. Waiting 30 seconds.")
yield from asyncio.sleep(30)
self.last = time.time()
return (yield from host.get_vms(vm_confs))
class VM:
def __init__(self, host, cfg=None, local_cfg=None):
"""Represent a VM.
:param host: Host instance
:param cfg: config.provider.vms item
:param local_cfg: job.runner.vms item
"""
self.host = host
self.cfg = cfg or {}
self.local_cfg = local_cfg
self._ssh = host.ssh
self.macs = []
self.disks = []
self.bridges = []
self.name = utils.get_rnd_name(local_cfg["name"])
x = XMLElement(None, "domain", type="kvm")
self.x = x
x.se("name").x.text = self.name
for mem in ("memory", "currentMemory"):
x.se(mem, unit="MiB").x.text = str(self.cfg.get("memory", 1024))
x.se("vcpu", placement="static").x.text = "1"
cpu = x.se("cpu", mode="host-model")
cpu.se("model", fallback="forbid")
os = x.se("os")
os.se("type", arch="x86_64", machine="pc-1.0").x.text = "hvm"
features = x.se("features")
features.se("acpi")
features.se("apic")
features.se("pae")
self.devices = x.se("devices")
self.devices.se("emulator").x.text = "/usr/bin/kvm"
self.devices.se("controller", type="pci", index="0", model="pci-root")
self.devices.se("graphics", type="spice", autoport="yes")
mb = self.devices.se("memballoon", model="virtio")
mb.se("address", type="pci", domain="0x0000", bus="0x00",
slot="0x09", function="0x0")
def __str__(self):
return self.__repr__()
def __repr__(self):
return "<VM %s %s>" % (self.name, self.local_cfg)
@asyncio.coroutine
def run_script(self, script, env=None, raise_on_error=True, cb=None):
LOG.debug("Running script: %s on vm %s with env %s" % (script, self, env))
yield from self.get_ip()
cmd = "".join(["%s='%s' " % tuple(e) for e in env.items()]) if env else ""
cmd += script["interpreter"]
ssh = asyncssh.AsyncSSH(script.get("user", "root"), self.ip,
key=self.host.vm_key, cb=cb)
status = yield from ssh.run(cmd, stdin=script["data"],
raise_on_error=raise_on_error,
user=script.get("user", "root"))
return status
@asyncio.coroutine
def shutdown(self, timeout=30, storage=False):
if not hasattr(self, "ip"):
yield from self.destroy(storage=storage)
return
ssh = yield from self.get_ssh()
yield from ssh.run("shutdown -h now")
deadline = time.time() + timeout
cmd = "virsh list | grep -q {}".format(self.name)
while True:
yield from asyncio.sleep(4)
error = yield from self._ssh.run(cmd, raise_on_error=False)
if error:
return
elif time.time() > timeout:
yield from self.destroy(storage=storage)
return
@asyncio.coroutine
def destroy(self, storage=True):
cmd = "virsh destroy {}".format(self.name)
yield from self._ssh.run(cmd, raise_on_error=False)
if storage:
for disk in self.disks:
yield from self.host.storage.destroy(disk)
for br in self.bridges:
lst = self.host.br_vm.get(br)
if lst and self in lst:
lst.remove(self)
yield from self.host.cleanup_net()
@asyncio.coroutine
def get_ssh(self):
yield from self.get_ip()
return asyncssh.AsyncSSH("root", self.ip, key=self.host.vm_key)
@asyncio.coroutine
def get_ip(self, timeout=300):
if hasattr(self, "ip"):
yield from asyncio.sleep(0)
return self.ip
deadline = time.time() + timeout
cmd = "egrep -i '%s' /proc/net/arp" % "|".join(self.macs)
while True:
if time.time() > deadline:
raise Exception("Unable to find ip of VM %s" % self.cfg)
yield from asyncio.sleep(4)
LOG.debug("Checking for ip for vm %s (%s)" % (self.name, repr(self.macs)))
data = yield from self._ssh.run(cmd, return_output=True, raise_on_error=False)
for line in data.splitlines():
m = IP_RE.match(line)
if m:
self.ip = m.group(1)
# TODO: wait_for_ssh
yield from asyncio.sleep(8)
return
@asyncio.coroutine
def boot(self):
conf = "/tmp/.conf.%s.xml" % self.name
with self.fd() as fd:
yield from self._ssh.run("cat > %s" % conf, stdin=fd)
yield from self._ssh.run("virsh create {c}".format(c=conf))
@contextlib.contextmanager
def fd(self):
xmlfile = tempfile.NamedTemporaryFile()
try:
fd = open(xmlfile.name, "w+b")
et.ElementTree(self.x.x).write(fd)
fd.seek(0)
yield fd
finally:
fd.close()
def add_disk(self, path):
dev = os.path.split(path)[1].split(".")[0]
LOG.debug("Adding disk %s with path %s" % (dev, path))
disk = self.devices.se("disk", device="disk", type="file")
disk.se("driver", name="qemu", type="qcow2", cache="unsafe")
disk.se("source", file=path)
disk.se("target", dev=dev, bus="virtio")
def add_net(self, bridge, mac=None):
if not mac:
mac = utils.get_rnd_mac()
net = self.devices.se("interface", type="bridge")
net.se("source", bridge=bridge)
net.se("model", type="virtio")
net.se("mac", address=mac)
self.macs.append(mac)
self.bridges.append(bridge)
class XMLElement:
def __init__(self, parent, *args, **kwargs):
if parent is not None:
self.x = et.SubElement(parent, *args, **kwargs)
else:
self.x = et.Element(*args, **kwargs)
def se(self, *args, **kwargs):
return XMLElement(self.x, *args, **kwargs)
def write(self, fd):
et.ElementTree(self.x).write(fd)
def tostring(self):
return et.tostring(self.x)
class MetadataServer:
"""Metadata server for cloud-init.
Supported versions:
* 2012-08-10
"""
def __init__(self, loop, config):
self.loop = loop
self.config = config
def get_metadata(self):
keys = {}
with open(self.config["authorized_keys"]) as kf:
for i, line in enumerate(kf.readlines()):
if line:
keys["key-" + str(i)] = line
return json.dumps({
"uuid": str(uuid.uuid4()),
"availability_zone": "nova",
"hostname": "rally-ci-vm",
"launch_index": 0,
"meta": {
"priority": "low",
"role": "rally-ci-test-vm",
},
"public_keys": keys,
"name": "test"
}).encode("utf8")
@asyncio.coroutine
def user_data(self, request):
version = request.match_info["version"]
if version in ("2012-08-10", "latest"):
return web.Response(body=self.config["user_data"].encode("utf-8"))
return web.Response(status=404)
@asyncio.coroutine
def meta_data(self, request):
LOG.debug("Metadata request: %s" % request)
version = request.match_info["version"]
if version in ("2012-08-10", "latest"):
md = self.get_metadata()
LOG.debug(md)
return web.Response(body=md, content_type="application/json")
return web.Response(status=404)
@asyncio.coroutine
def start(self):
self.app = web.Application(loop=self.loop)
for route in (
("/openstack/{version:.*}/meta_data.json", self.meta_data),
("/openstack/{version:.*}/user_data", self.user_data),
):
self.app.router.add_route("GET", *route)
self.handler = self.app.make_handler()
addr = self.config.get("listen_addr", "169.254.169.254")
port = self.config.get("listen_port", 8080)
self.srv = yield from self.loop.create_server(self.handler, addr, port)
LOG.debug("Metadata server started at %s:%s" % (addr, port))
@asyncio.coroutine
def stop(self, timeout=1.0):
yield from self.handler.finish_connections(timeout)
self.srv.close()
yield from self.srv.wait_closed()
yield from self.app.finish() | rallyci/providers/virsh.py |
import asyncio
import copy
import contextlib
import time
import os
import random
import re
import json
import logging
import tempfile
import uuid
from xml.etree import ElementTree as et
import aiohttp
from aiohttp import web
from rallyci import utils
from rallyci.common import asyncssh
LOG = logging.getLogger(__name__)
RE_LA = re.compile(r".*load average: (\d+\.\d+),.*")
RE_MEM = re.compile(r".*Mem: +\d+ +\d+ +(\d+) +\d+ +\d+ +(\d+).*")
IFACE_RE = re.compile(r"\d+: ([a-z]+)(\d+): .*")
IP_RE = re.compile(r"(\d+\.\d+\.\d+\.\d+)\s")
DYNAMIC_BRIDGES = {}
DYNAMIC_BRIDGE_LOCK = asyncio.Lock()
class ZFS:
def __init__(self, ssh, path, dataset, **kwargs):
self.ssh = ssh
self.path = path
self.dataset = dataset
@asyncio.coroutine
def create(self, name):
cmd = "zfs create {dataset}/{name}".format(dataset=self.dataset,
name=name)
yield from self.ssh.run(cmd)
@asyncio.coroutine
def list_files(self, name):
cmd = "ls /{path}/{name}".format(path=self.path, name=name)
ls = yield from self.ssh.run(cmd, return_output=True)
return [os.path.join("/", self.dataset, name, f) for f in ls.splitlines()]
@asyncio.coroutine
def clone(self, src, dst):
cmd = "zfs clone {dataset}/{src}@1 {dataset}/{dst}"
cmd = cmd.format(dataset=self.dataset, src=src, dst=dst)
yield from self.ssh.run(cmd)
@asyncio.coroutine
def exist(self, name):
LOG.debug("Checking if image %s exist" % name)
cmd = "zfs list"
data = yield from self.ssh.run(cmd, return_output=True)
r = re.search("^%s/%s " % (self.dataset, name), data, re.MULTILINE)
return bool(r)
@asyncio.coroutine
def snapshot(self, name, snapshot="1"):
cmd = "zfs snapshot {dataset}/{name}@{snapshot}".format(
dataset=self.dataset, name=name, snapshot=snapshot)
yield from self.ssh.run(cmd)
@asyncio.coroutine
def destroy(self, name):
cmd = "zfs destroy {dataset}/{name}".format(name=name,
dataset=self.dataset)
yield from self.ssh.run(cmd)
@asyncio.coroutine
def download(self, name, url):
# TODO: cache
yield from self.create(name)
cmd = "wget -nv {url} -O {path}/{name}/vda.qcow2"
cmd = cmd.format(name=name, path=self.path, url=url)
yield from self.ssh.run(cmd)
cmd = "qemu-img resize {path}/{name}/vda.qcow2 32G"
cmd = cmd.format(name=name, path=self.path)
yield from self.ssh.run(cmd)
class BTRFS:
def __init__(self, ssh, path, **kwargs):
self.ssh = ssh
self.path = path
@asyncio.coroutine
def create(self, name):
cmd = "btrfs subvolume create {path}/{name}".format(path=self.path,
name=name)
yield from self.ssh.run(cmd)
@asyncio.coroutine
def list_files(self, name):
cmd = "ls {path}/{name}".format(path=self.path, name=name)
ls = yield from self.ssh.run(cmd, return_output=True)
return [os.path.join("/", self.path, name, f) for f in ls.splitlines()]
@asyncio.coroutine
def clone(self, src, dst):
cmd = "btrfs subvolume delete {path}/{dst}"
cmd = cmd.format(path=self.path, src=src, dst=dst)
yield from self.ssh.run(cmd, raise_on_error=False)
cmd = "btrfs subvolume snapshot {path}/{src} {path}/{dst}"
cmd = cmd.format(path=self.path, src=src, dst=dst)
yield from self.ssh.run(cmd)
@asyncio.coroutine
def exist(self, name):
LOG.debug("Checking if image %s exist" % name)
cmd = "btrfs subvolume list %s" % self.path
data = yield from self.ssh.run(cmd, return_output=True)
r = re.search(" %s$" % name, data, re.MULTILINE)
return bool(r)
@asyncio.coroutine
def snapshot(self, *args, **kwargs):
yield from asyncio.sleep(0)
@asyncio.coroutine
def destroy(self, name):
cmd = "btrfs subvolume delete {path}/{name}".format(path=self.path,
name=name)
yield from self.ssh.run(cmd)
@asyncio.coroutine
def download(self, name, url):
# TODO: cache
yield from self.create(name)
cmd = "wget -nv {url} -O /{path}/{name}/vda.qcow2"
cmd = cmd.format(name=name, path=self.path, url=url)
yield from self.ssh.run(cmd)
# TODO: size should be set in config
cmd = "qemu-img resize /{path}/{name}/vda.qcow2 32G"
cmd = cmd.format(name=name, path=self.path)
yield from self.ssh.run(cmd)
BACKENDS = {"btrfs": BTRFS, "zfs": ZFS}
class Host:
def __init__(self, ssh_conf, config, root, vm_key):
"""
ssh_config: item from hosts from provider
config: full "provider" item
"""
self.image_locks = {}
self.config = config
self.root = root
self.vms = []
self.br_vm = {}
self.ssh = asyncssh.AsyncSSH(**ssh_conf)
self.vm_key = vm_key
self.la = 0.0
self.free = 0
storage_cf = config["storage"]
self.storage = BACKENDS[storage_cf["backend"]](self.ssh, **storage_cf)
def __str__(self):
return "<Host %s (la: %s, free: %s)>" % (self.ssh.hostname,
self.la, self.free)
@asyncio.coroutine
def update_stats(self):
cmd = "uptime && free -m"
data = yield from self.ssh.run(cmd, return_output=True)
self.la = float(RE_LA.search(data, re.MULTILINE).group(1))
free = RE_MEM.search(data, re.MULTILINE).groups()
self.free = sum(map(int, free))
@asyncio.coroutine
def boot_image(self, name):
conf = self.config["images"][name]
vm = VM(self, conf, {"name": name})
vm.disks.append(name)
for f in (yield from self.storage.list_files(name)):
vm.add_disk(f)
vm.add_net(conf.get("build-net", "virbr0"))
yield from vm.boot()
return vm
@asyncio.coroutine
def build_image(self, name):
LOG.info("Building image %s" % name)
self.image_locks.setdefault(name, asyncio.Lock())
with (yield from self.image_locks[name]):
if (yield from self.storage.exist(name)):
LOG.debug("Image %s exist" % name)
return
image_conf = self.config["images"][name]
parent = image_conf.get("parent")
if parent:
yield from self.build_image(parent)
yield from self.storage.clone(parent, name)
else:
url = image_conf.get("url")
if url:
yield from self.storage.download(name, url)
yield from self.storage.snapshot(name)
return # TODO: support build_script for downloaded images
build_scripts = image_conf.get("build-scripts")
if build_scripts:
vm = yield from self.boot_image(name)
try:
for script in build_scripts:
script = self.root.config.data["script"][script]
LOG.debug("Running build script %s" % script)
yield from vm.run_script(script)
yield from vm.shutdown(storage=False)
except:
LOG.exception("Error building image")
yield from vm.destroy()
raise
else:
LOG.debug("No build script for image %s" % name)
yield from asyncio.sleep(4)
yield from self.storage.snapshot(name)
@asyncio.coroutine
def _get_vm(self, local_cfg, conf):
"""
:param local_cfg: config.job.runner.vms item
:param conf: config.provider.vms item
"""
LOG.debug("Creating VM with conf %s" % conf)
name = local_cfg["name"]
image = conf.get("image")
if image:
yield from self.build_image(image)
else:
image = name
rnd_name = utils.get_rnd_name(name)
yield from self.storage.clone(image, rnd_name)
vm = VM(self, conf, local_cfg)
files = yield from self.storage.list_files(rnd_name)
vm.disks.append(rnd_name)
for f in files:
vm.add_disk(f)
for net in conf["net"]:
net = net.split(" ")
if len(net) == 1:
vm.add_net(net[0])
else:
vm.add_net(net[0], mac=net[1])
yield from vm.boot()
self.vms.append(vm)
return vm
@asyncio.coroutine
def get_vms(self, vm_confs):
"""Return VMs for runner.
:param vm_confs: config.job.runner.vms items
"""
vms = []
ifs = {}
LOG.debug("Getting VMS %s" % vm_confs)
for vm_conf in vm_confs:
conf = copy.deepcopy(self.config["vms"][vm_conf["name"]])
br = None
net_conf = []
for net in conf["net"]:
ifname = net.split(" ")
if ifname[0].endswith("%"):
if ifname[0] in ifs:
br = ifname[0] = ifs[ifname[0]]
else:
br = yield from self._get_bridge(ifname[0][:-1])
ifs[ifname[0]] = br
ifname[0] = br
net_conf.append(" ".join(ifname))
conf["net"] = net_conf
vm = yield from self._get_vm(vm_conf, conf)
self.br_vm.setdefault(br, [])
self.br_vm[br].append(vm)
vms.append(vm)
return vms
@asyncio.coroutine
def cleanup_net(self):
clean = []
with (yield from DYNAMIC_BRIDGE_LOCK):
for br, vms in self.br_vm.items():
if not vms:
yield from self.ssh.run("ip link del %s" % br)
clean.append(br)
for br in clean:
del self.br_vm[br]
@asyncio.coroutine
def _get_bridge(self, prefix):
with (yield from DYNAMIC_BRIDGE_LOCK):
data = yield from self.ssh.run("ip link list", return_output=True)
nums = set()
for line in data.splitlines():
m = IFACE_RE.match(line)
if m:
if m.group(1) == prefix:
nums.add(int(m.group(2)))
for i in range(len(nums) + 1):
if i not in nums:
br = "%s%d" % (prefix, i)
break
yield from self.ssh.run("ip link add %s type bridge" % br)
yield from self.ssh.run("ip link set %s up" % br)
return br
class Provider:
def __init__(self, root, config):
"""
:param config: full provider config
"""
self.root = root
self.config = config
self.name = config["name"]
self.key = config.get("key")
self.ifs = {}
self.last = time.time()
self.get_vms_lock = asyncio.Lock()
def get_stats(self):
pass
def start(self):
self.hosts = [Host(c, self.config, self.root, self.key)
for c in self.config["hosts"]]
self.mds = MetadataServer(self.root.loop,
self.config.get("metadata_server", {}))
self.mds_future = asyncio.async(self.mds.start())
@asyncio.coroutine
def cleanup(self, vms):
LOG.debug("Starting cleanup %s" % vms)
for vm in vms:
LOG.debug("Cleaning %s" % vm)
yield from vm.destroy()
LOG.debug("Cleanup completed")
@asyncio.coroutine
def stop(self):
yield from self.mds_future.cancel()
@asyncio.coroutine
def get_vms(self, vm_confs):
"""
:param vm_confs: job.runner.vms
"""
memory_required = self.config.get("freemb", 1024)
for cfg in vm_confs:
memory_required += self.config["vms"][cfg["name"]]["memory"]
best = None
with (yield from self.get_vms_lock):
sleep = self.last + 5 - time.time()
if sleep > 1:
yield from asyncio.sleep(sleep)
while True:
random.shuffle(self.hosts)
LOG.debug("Chosing from %s" % self.hosts)
for host in self.hosts:
yield from host.update_stats()
if host.free >= memory_required and host.la < self.config.get("maxla", 4):
LOG.debug("Chosen host: %s" % host)
best = host
break
if best:
break
LOG.info("All systems are overloaded. Waiting 30 seconds.")
yield from asyncio.sleep(30)
self.last = time.time()
return (yield from host.get_vms(vm_confs))
class VM:
def __init__(self, host, cfg=None, local_cfg=None):
"""Represent a VM.
:param host: Host instance
:param cfg: config.provider.vms item
:param local_cfg: job.runner.vms item
"""
self.host = host
self.cfg = cfg or {}
self.local_cfg = local_cfg
self._ssh = host.ssh
self.macs = []
self.disks = []
self.bridges = []
self.name = utils.get_rnd_name(local_cfg["name"])
x = XMLElement(None, "domain", type="kvm")
self.x = x
x.se("name").x.text = self.name
for mem in ("memory", "currentMemory"):
x.se(mem, unit="MiB").x.text = str(self.cfg.get("memory", 1024))
x.se("vcpu", placement="static").x.text = "1"
cpu = x.se("cpu", mode="host-model")
cpu.se("model", fallback="forbid")
os = x.se("os")
os.se("type", arch="x86_64", machine="pc-1.0").x.text = "hvm"
features = x.se("features")
features.se("acpi")
features.se("apic")
features.se("pae")
self.devices = x.se("devices")
self.devices.se("emulator").x.text = "/usr/bin/kvm"
self.devices.se("controller", type="pci", index="0", model="pci-root")
self.devices.se("graphics", type="spice", autoport="yes")
mb = self.devices.se("memballoon", model="virtio")
mb.se("address", type="pci", domain="0x0000", bus="0x00",
slot="0x09", function="0x0")
def __str__(self):
return self.__repr__()
def __repr__(self):
return "<VM %s %s>" % (self.name, self.local_cfg)
@asyncio.coroutine
def run_script(self, script, env=None, raise_on_error=True, cb=None):
LOG.debug("Running script: %s on vm %s with env %s" % (script, self, env))
yield from self.get_ip()
cmd = "".join(["%s='%s' " % tuple(e) for e in env.items()]) if env else ""
cmd += script["interpreter"]
ssh = asyncssh.AsyncSSH(script.get("user", "root"), self.ip,
key=self.host.vm_key, cb=cb)
status = yield from ssh.run(cmd, stdin=script["data"],
raise_on_error=raise_on_error,
user=script.get("user", "root"))
return status
@asyncio.coroutine
def shutdown(self, timeout=30, storage=False):
if not hasattr(self, "ip"):
yield from self.destroy(storage=storage)
return
ssh = yield from self.get_ssh()
yield from ssh.run("shutdown -h now")
deadline = time.time() + timeout
cmd = "virsh list | grep -q {}".format(self.name)
while True:
yield from asyncio.sleep(4)
error = yield from self._ssh.run(cmd, raise_on_error=False)
if error:
return
elif time.time() > timeout:
yield from self.destroy(storage=storage)
return
@asyncio.coroutine
def destroy(self, storage=True):
cmd = "virsh destroy {}".format(self.name)
yield from self._ssh.run(cmd, raise_on_error=False)
if storage:
for disk in self.disks:
yield from self.host.storage.destroy(disk)
for br in self.bridges:
lst = self.host.br_vm.get(br)
if lst and self in lst:
lst.remove(self)
yield from self.host.cleanup_net()
@asyncio.coroutine
def get_ssh(self):
yield from self.get_ip()
return asyncssh.AsyncSSH("root", self.ip, key=self.host.vm_key)
@asyncio.coroutine
def get_ip(self, timeout=300):
if hasattr(self, "ip"):
yield from asyncio.sleep(0)
return self.ip
deadline = time.time() + timeout
cmd = "egrep -i '%s' /proc/net/arp" % "|".join(self.macs)
while True:
if time.time() > deadline:
raise Exception("Unable to find ip of VM %s" % self.cfg)
yield from asyncio.sleep(4)
LOG.debug("Checking for ip for vm %s (%s)" % (self.name, repr(self.macs)))
data = yield from self._ssh.run(cmd, return_output=True, raise_on_error=False)
for line in data.splitlines():
m = IP_RE.match(line)
if m:
self.ip = m.group(1)
# TODO: wait_for_ssh
yield from asyncio.sleep(8)
return
@asyncio.coroutine
def boot(self):
conf = "/tmp/.conf.%s.xml" % self.name
with self.fd() as fd:
yield from self._ssh.run("cat > %s" % conf, stdin=fd)
yield from self._ssh.run("virsh create {c}".format(c=conf))
@contextlib.contextmanager
def fd(self):
xmlfile = tempfile.NamedTemporaryFile()
try:
fd = open(xmlfile.name, "w+b")
et.ElementTree(self.x.x).write(fd)
fd.seek(0)
yield fd
finally:
fd.close()
def add_disk(self, path):
dev = os.path.split(path)[1].split(".")[0]
LOG.debug("Adding disk %s with path %s" % (dev, path))
disk = self.devices.se("disk", device="disk", type="file")
disk.se("driver", name="qemu", type="qcow2", cache="unsafe")
disk.se("source", file=path)
disk.se("target", dev=dev, bus="virtio")
def add_net(self, bridge, mac=None):
if not mac:
mac = utils.get_rnd_mac()
net = self.devices.se("interface", type="bridge")
net.se("source", bridge=bridge)
net.se("model", type="virtio")
net.se("mac", address=mac)
self.macs.append(mac)
self.bridges.append(bridge)
class XMLElement:
def __init__(self, parent, *args, **kwargs):
if parent is not None:
self.x = et.SubElement(parent, *args, **kwargs)
else:
self.x = et.Element(*args, **kwargs)
def se(self, *args, **kwargs):
return XMLElement(self.x, *args, **kwargs)
def write(self, fd):
et.ElementTree(self.x).write(fd)
def tostring(self):
return et.tostring(self.x)
class MetadataServer:
"""Metadata server for cloud-init.
Supported versions:
* 2012-08-10
"""
def __init__(self, loop, config):
self.loop = loop
self.config = config
def get_metadata(self):
keys = {}
with open(self.config["authorized_keys"]) as kf:
for i, line in enumerate(kf.readlines()):
if line:
keys["key-" + str(i)] = line
return json.dumps({
"uuid": str(uuid.uuid4()),
"availability_zone": "nova",
"hostname": "rally-ci-vm",
"launch_index": 0,
"meta": {
"priority": "low",
"role": "rally-ci-test-vm",
},
"public_keys": keys,
"name": "test"
}).encode("utf8")
@asyncio.coroutine
def user_data(self, request):
version = request.match_info["version"]
if version in ("2012-08-10", "latest"):
return web.Response(body=self.config["user_data"].encode("utf-8"))
return web.Response(status=404)
@asyncio.coroutine
def meta_data(self, request):
LOG.debug("Metadata request: %s" % request)
version = request.match_info["version"]
if version in ("2012-08-10", "latest"):
md = self.get_metadata()
LOG.debug(md)
return web.Response(body=md, content_type="application/json")
return web.Response(status=404)
@asyncio.coroutine
def start(self):
self.app = web.Application(loop=self.loop)
for route in (
("/openstack/{version:.*}/meta_data.json", self.meta_data),
("/openstack/{version:.*}/user_data", self.user_data),
):
self.app.router.add_route("GET", *route)
self.handler = self.app.make_handler()
addr = self.config.get("listen_addr", "169.254.169.254")
port = self.config.get("listen_port", 8080)
self.srv = yield from self.loop.create_server(self.handler, addr, port)
LOG.debug("Metadata server started at %s:%s" % (addr, port))
@asyncio.coroutine
def stop(self, timeout=1.0):
yield from self.handler.finish_connections(timeout)
self.srv.close()
yield from self.srv.wait_closed()
yield from self.app.finish() | 0.283583 | 0.101947 |
import json
from hashlib import sha1
from hmac import HMAC
import redis
from redis_benchmarks_specification.__api__.app import (
create_app,
SIG_HEADER,
should_action,
)
from redis_benchmarks_specification.__common__.env import (
STREAM_KEYNAME_GH_EVENTS_COMMIT,
)
def test_create_app():
try:
conn = redis.StrictRedis(port=16379, decode_responses=True)
conn.ping()
conn.flushall()
auth_token = conn.acl_genpass()
conn.set("default:auth_token", auth_token)
flask_app = create_app(conn, "default")
req_data = "{}".encode()
expected_sign = HMAC(
key=auth_token.encode(), msg=req_data, digestmod=sha1
).hexdigest()
# Unathorized due to missing header
with flask_app.test_client() as test_client:
response = test_client.post(
"/api/gh/redis/redis/commits",
json={},
headers={},
content_type="application/json",
)
assert response.status_code == 403
# Unathorized due to wrong header value
with flask_app.test_client() as test_client:
response = test_client.post(
"/api/gh/redis/redis/commits",
json={},
headers={SIG_HEADER: "sha1=abc"},
content_type="application/json",
)
assert response.status_code == 403
# Authorized but ignored event
with flask_app.test_client() as test_client:
response = test_client.post(
"/api/gh/redis/redis/commits",
data=json.dumps(dict({})),
headers={SIG_HEADER: "sha1={}".format(expected_sign)},
content_type="application/json",
)
assert response.status_code == 200
assert response.json == {"message": "Ignored event from webhook"}
# Authorized and PR event
with open(
"./utils/tests/test_data/event_webhook_labelled_pr.json"
) as json_file:
label_pr_json = json.load(json_file)
json_str = json.dumps(label_pr_json)
req_data = json_str.encode()
expected_sign = HMAC(
key=auth_token.encode(), msg=req_data, digestmod=sha1
).hexdigest()
with flask_app.test_client() as test_client:
response = test_client.post(
"/api/gh/redis/redis/commits",
content_type="application/json",
data=req_data,
headers={
"Content-type": "application/json",
SIG_HEADER: "sha1={}".format(expected_sign),
},
)
assert response.status_code == 200
assert (
response.json["git_hash"]
== "a3448f39efb8900f6f66778783461cf49de94b4f"
)
assert response.json["ref_label"] == "filipecosta90:unstable.55555"
assert response.json["ref"] == "unstable.55555"
assert conn.exists(STREAM_KEYNAME_GH_EVENTS_COMMIT)
# Authorized and git pushes to repo
with open(
"./utils/tests/test_data/event_webhook_pushed_repo.json"
) as json_file:
label_pr_json = json.load(json_file)
json_str = json.dumps(label_pr_json)
req_data = json_str.encode()
expected_sign = HMAC(
key=auth_token.encode(), msg=req_data, digestmod=sha1
).hexdigest()
with flask_app.test_client() as test_client:
response = test_client.post(
"/api/gh/redis/redis/commits",
content_type="application/json",
data=req_data,
headers={
"Content-type": "application/json",
SIG_HEADER: "sha1={}".format(expected_sign),
},
)
assert response.status_code == 200
assert (
response.json["git_hash"]
== "921489d5392a13e10493c6578a27b4bd5324a929"
)
assert response.json["ref_label"] == "refs/heads/unstable.55555"
assert response.json["ref"] == "unstable.55555"
assert conn.exists(STREAM_KEYNAME_GH_EVENTS_COMMIT)
except redis.exceptions.ConnectionError:
pass
def test_should_action():
assert should_action("labeled") == True
assert should_action("opened") == True
assert should_action("closed") == False
assert should_action("na") == False
assert should_action("reopened") == True
assert should_action("synchronize") == True | utils/tests/test_app.py | import json
from hashlib import sha1
from hmac import HMAC
import redis
from redis_benchmarks_specification.__api__.app import (
create_app,
SIG_HEADER,
should_action,
)
from redis_benchmarks_specification.__common__.env import (
STREAM_KEYNAME_GH_EVENTS_COMMIT,
)
def test_create_app():
try:
conn = redis.StrictRedis(port=16379, decode_responses=True)
conn.ping()
conn.flushall()
auth_token = conn.acl_genpass()
conn.set("default:auth_token", auth_token)
flask_app = create_app(conn, "default")
req_data = "{}".encode()
expected_sign = HMAC(
key=auth_token.encode(), msg=req_data, digestmod=sha1
).hexdigest()
# Unathorized due to missing header
with flask_app.test_client() as test_client:
response = test_client.post(
"/api/gh/redis/redis/commits",
json={},
headers={},
content_type="application/json",
)
assert response.status_code == 403
# Unathorized due to wrong header value
with flask_app.test_client() as test_client:
response = test_client.post(
"/api/gh/redis/redis/commits",
json={},
headers={SIG_HEADER: "sha1=abc"},
content_type="application/json",
)
assert response.status_code == 403
# Authorized but ignored event
with flask_app.test_client() as test_client:
response = test_client.post(
"/api/gh/redis/redis/commits",
data=json.dumps(dict({})),
headers={SIG_HEADER: "sha1={}".format(expected_sign)},
content_type="application/json",
)
assert response.status_code == 200
assert response.json == {"message": "Ignored event from webhook"}
# Authorized and PR event
with open(
"./utils/tests/test_data/event_webhook_labelled_pr.json"
) as json_file:
label_pr_json = json.load(json_file)
json_str = json.dumps(label_pr_json)
req_data = json_str.encode()
expected_sign = HMAC(
key=auth_token.encode(), msg=req_data, digestmod=sha1
).hexdigest()
with flask_app.test_client() as test_client:
response = test_client.post(
"/api/gh/redis/redis/commits",
content_type="application/json",
data=req_data,
headers={
"Content-type": "application/json",
SIG_HEADER: "sha1={}".format(expected_sign),
},
)
assert response.status_code == 200
assert (
response.json["git_hash"]
== "a3448f39efb8900f6f66778783461cf49de94b4f"
)
assert response.json["ref_label"] == "filipecosta90:unstable.55555"
assert response.json["ref"] == "unstable.55555"
assert conn.exists(STREAM_KEYNAME_GH_EVENTS_COMMIT)
# Authorized and git pushes to repo
with open(
"./utils/tests/test_data/event_webhook_pushed_repo.json"
) as json_file:
label_pr_json = json.load(json_file)
json_str = json.dumps(label_pr_json)
req_data = json_str.encode()
expected_sign = HMAC(
key=auth_token.encode(), msg=req_data, digestmod=sha1
).hexdigest()
with flask_app.test_client() as test_client:
response = test_client.post(
"/api/gh/redis/redis/commits",
content_type="application/json",
data=req_data,
headers={
"Content-type": "application/json",
SIG_HEADER: "sha1={}".format(expected_sign),
},
)
assert response.status_code == 200
assert (
response.json["git_hash"]
== "921489d5392a13e10493c6578a27b4bd5324a929"
)
assert response.json["ref_label"] == "refs/heads/unstable.55555"
assert response.json["ref"] == "unstable.55555"
assert conn.exists(STREAM_KEYNAME_GH_EVENTS_COMMIT)
except redis.exceptions.ConnectionError:
pass
def test_should_action():
assert should_action("labeled") == True
assert should_action("opened") == True
assert should_action("closed") == False
assert should_action("na") == False
assert should_action("reopened") == True
assert should_action("synchronize") == True | 0.395484 | 0.190065 |
from __future__ import annotations
import argparse
import json
import os
import re
from typing import List, Tuple, Sequence, Dict, Iterator, Optional
import boto3
import requests # type: ignore
import yaml # type: ignore
REGION = "us-east-1"
ORG_NAME = "<NAME>"
R53_STACK_NAME = "nextflow-r53-alias-record"
R53_STACK_OUTPUT = "Route53RecordSet"
VPC_STACK_NAME = "nextflow-vpc"
VPC_STACK_OUTPUT_VID = "VPCId"
VPC_STACK_OUTPUT_SIDS = [
"PrivateSubnet",
"PrivateSubnet1",
"PrivateSubnet2",
"PrivateSubnet3",
]
def main() -> None:
args = parse_args()
projects = Projects(args.projects_dir)
if args.dry_run:
print(
"The following Tower project configurations were "
"discovered and confirmed to be valid:\n -",
"\n - ".join(projects.config_paths),
)
else:
tower = TowerClient()
org = TowerOrganization(tower, projects)
org.create_workspaces()
class InvalidTowerProject(Exception):
pass
class Users:
def __init__(
self,
owners: Sequence[str] = [],
admins: Sequence[str] = [],
maintainers: Sequence[str] = [],
launchers: Sequence[str] = [],
viewers: Sequence[str] = [],
):
"""Utility class for storing lists of users and their roles
All users are stored as emails.
Args:
owners (Sequence[str]):
The users have full permissions on any resources within
the organization associated with the workspace
admins (Sequence[str]):
The users have full permission on the resources associated
with the workspace. Therefore they can create/modify/delete
Pipelines, Compute environments, Actions, Credentials. They
can add/remove users to the workspace, but cannot create a
new workspace or modify another workspace
maintainers (Sequence[str]):
The users can launch pipeline and modify pipeline executions
(e.g. can change the pipeline launch compute env, parameters,
pre/post-run scripts, nextflow config) and create new pipeline
configuration in the Launchpad. The users cannot modify Compute
env settings and Credentials
launchers (Sequence[str]):
The users can launch pipeline executions and modify the
pipeline input/output parameters. They cannot modify the
launch configuration and other resources
viewers (Sequence[str]):
The users can access to the team resources in read-only mode
Returns:
[type]: [description]
"""
self.owners = owners
self.admins = admins
self.maintainers = maintainers
self.launchers = launchers
self.viewers = viewers
def list_users(self) -> Iterator[Tuple[str, str]]:
"""List all users and their Tower roles
Yields:
Iterator[Tuple[str, str]]:
Each element is the user email (str) and Tower role (str)
"""
role_mapping = {
"owners": "owner",
"admins": "admin",
"maintainers": "maintain",
"launchers": "launch",
"viewers": "view",
}
for user_group, role in role_mapping.items():
users = getattr(self, user_group)
for user in users:
yield user, role
class Projects:
def __init__(self, config_directory: str) -> None:
"""Create Projects instance
Args:
config_directory (str): Directory containing project config files
"""
self.config_directory = config_directory
self.users_per_project = self.extract_users()
def list_projects(self) -> Iterator[str]:
"""List all project YAML configuration files
Yields:
Iterator[str]:
Each element is a YAML filepath as a str
"""
# Obtain a list of config files from the given directory
self.config_paths = list()
for dirpath, _, filenames in os.walk(self.config_directory):
for filename in filenames:
filepath = os.path.join(dirpath, filename)
if filename.endswith("-project.yaml"):
self.config_paths.append(filepath)
yield filepath
def validate_config(self, config: Dict) -> None:
"""Validate Tower project configuration
Args:
config (Dict): Tower project configuration
Raises:
InvalidTowerProject: When the config is invalid
"""
has_stack_name = "stack_name" in config
is_valid = (
has_stack_name
and "template_path" in config
and config["template_path"] == "tower-project.yaml"
and "parameters" in config
and (
"S3ReadWriteAccessArns" in config["parameters"]
or "S3ReadOnlyAccessArns" in config["parameters"]
)
)
if not is_valid:
if has_stack_name:
stack_name = config["stack_name"]
raise InvalidTowerProject(f"{stack_name}.yaml is invalid")
else:
raise InvalidTowerProject(f"This config is invalid:\n{config}")
def load_projects(self) -> Iterator[dict]:
"""Load all project configuration files from given directory
Yields:
Iterator[dict]:
Each element is a parsed YAML file as a dict
"""
# Ignore all Sceptre resolvers
yaml.add_multi_constructor("!", lambda loader, suffix, node: None)
# Load the tower-project.yaml config files into a list
for config_path in self.list_projects():
with open(config_path) as config_file:
config = yaml.load(config_file, Loader=yaml.Loader)
self.validate_config(config)
yield config
def extract_emails(self, arns: Sequence[str]) -> List[str]:
"""Extract role session names (emails) from assumed-role ARNs
Args:
arns (Sequence[str]): List of assumed-role ARNs
Returns:
List[str]: List of email from the role session names
"""
role_arn_regex = re.compile(
r"arn:aws:sts::(?P<account_id>[0-9]+):assumed-role/(?P<role_name>[^/]+)"
r"/(?P<session_name>[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,})"
)
emails = list()
for arn in arns:
match = role_arn_regex.fullmatch(arn)
if match:
email = match.group("session_name")
emails.append(email)
else:
raise ValueError(
f"Listed ARN ({arn}) doesn't follow expected format: "
"'arn:aws:sts::<account_id>:<role_name>:<email>'"
)
return emails
def extract_users(self) -> Dict[str, Users]:
"""Extract users from a series of config files
Returns:
Dict[str, Users]:
Mapping between projects/stacks and users
"""
users_per_project = dict()
for config in self.load_projects():
stack_name = config["stack_name"]
maintainer_arns = config["parameters"].get("S3ReadWriteAccessArns", [])
viewer_arns = config["parameters"].get("S3ReadOnlyAccessArns", [])
maintainers = self.extract_emails(maintainer_arns)
viewers = self.extract_emails(viewer_arns)
users_per_project[stack_name] = Users(
maintainers=maintainers, viewers=viewers
)
return users_per_project
class AwsClient:
def __init__(self) -> None:
self.region = REGION
self.session = boto3.session.Session(region_name=REGION)
def get_cfn_stack_outputs(self, stack_name: str) -> dict:
"""Retrieve output values for a CloudFormation stack
Args:
stack_name (str): CloudFormation stack name
Returns:
dict: A mapping between output names and their values
"""
cfn = self.session.client("cloudformation")
response = cfn.describe_stacks(StackName=stack_name)
outputs_raw = response["Stacks"][0]["Outputs"]
outputs = {p["OutputKey"]: p["OutputValue"] for p in outputs_raw}
outputs["stack_name"] = stack_name
return outputs
def get_secret_value(self, secret_arn: str) -> dict:
"""Retrieve value for a secret stored in Secrets Manager
Args:
secret_arn (str): ARN for Secrets Manager secret
Returns:
dict: Decrypted secret value
"""
secretsmanager = self.session.client("secretsmanager")
response = secretsmanager.get_secret_value(SecretId=secret_arn)
secret_value = json.loads(response["SecretString"])
return secret_value
class TowerClient:
def __init__(self, tower_token=None) -> None:
"""Generate NextflowTower instance
The descriptions below for the user types were copied
from the Nextflow Tower interface.
Raises:
KeyError: The 'NXF_TOWER_TOKEN' environment variable isn't defined
"""
self.aws = AwsClient()
self.vpc = self.aws.get_cfn_stack_outputs(VPC_STACK_NAME)
self.tower_api_base_url = self.get_tower_api_base_url()
# Retrieve Nextflow token from environment
try:
self.tower_token = tower_token or os.environ["NXF_TOWER_TOKEN"]
except KeyError as e:
raise KeyError(
"The 'NXF_TOWER_TOKEN' environment variable must "
"be defined with a Nextflow Tower API token."
) from e
def get_valid_name(self, full_name: str) -> str:
"""Generate Tower-friendly name from full name
Args:
full_name (str): Full name (with spaces/punctuation)
Returns:
str: Name with only alphanumeric, dash and underscore characters
"""
return re.sub(r"[^A-Za-z0-9_-]", "-", full_name)
def get_tower_api_base_url(self) -> str:
"""Infer Nextflow Tower API endpoint from CloudFormation
Returns:
str: A full URL for the Tower API endpoint
"""
stack = self.aws.get_cfn_stack_outputs(R53_STACK_NAME)
hostname = stack[R53_STACK_OUTPUT]
endpoint = f"https://{hostname}/api"
return endpoint
def request(self, method: str, endpoint: str, **kwargs) -> dict:
"""Make an authenticated HTTP request to the Nextflow Tower API
Args:
method (str): An HTTP method (GET, PUT, POST, or DELETE)
endpoint (str): The API endpoint with the path parameters filled in
Returns:
Response: The raw Response object to allow for special handling
"""
assert method in {"GET", "PUT", "POST", "DELETE"}
url = self.tower_api_base_url + endpoint
kwargs["headers"] = {"Authorization": f"Bearer {self.tower_token}"}
response = requests.request(method, url, **kwargs)
try:
result = response.json()
except json.decoder.JSONDecodeError:
result = dict()
return result
class TowerWorkspace:
def __init__(
self,
org: TowerOrganization,
stack_name: str,
users: Users,
) -> None:
self.org = org
self.tower = org.tower
self.stack_name = stack_name
self.stack = self.tower.aws.get_cfn_stack_outputs(stack_name)
self.full_name = stack_name
self.name = self.tower.get_valid_name(stack_name)
self.json = self.create()
self.id = self.json["id"]
self.users = users
self.participants: Dict[str, dict] = dict()
self.populate()
self.create_compute_environment()
def create(self) -> dict:
"""Create a Tower workspace under an organization
Returns:
dict: Workspace JSON from API
"""
# Check if the project workspace already exists
endpoint = f"/orgs/{self.org.id}/workspaces"
response = self.tower.request("GET", endpoint)
for workspace in response["workspaces"]:
if workspace["name"] == self.name:
return workspace
# Otherwise, create a new project workspace under the organization
data = {
"workspace": {
"name": self.name,
"fullName": self.full_name,
"description": None,
"visibility": "PRIVATE",
}
}
response = self.tower.request("POST", endpoint, json=data)
return response["workspace"]
def add_participant(self, user: str, role: str) -> int:
"""Add user to the workspace (if need be) and return participant ID
Args:
user (str): Email address for the user
role (str): 'owner', 'admin', 'maintain', 'launch', or 'view'
Returns:
int: Participant ID for the user in the given workspace
"""
# Attempt to add the user as a participant of the given workspace
endpoint = f"/orgs/{self.org.id}/workspaces/{self.id}/participants"
member_id = self.org.members[user]["memberId"]
data = {
"memberId": member_id,
"teamId": None,
"userNameOrEmail": None,
}
response = self.tower.request("PUT", f"{endpoint}/add", json=data)
# If the user is already a member, you get the following message:
# "Already a participant"
# In this case, look up the participant ID using the member ID
if "message" in response and response["message"] == "Already a participant":
response = self.tower.request("GET", endpoint)
for participant in response["participants"]:
if participant["memberId"] == member_id:
break
# Otherwise, just return their new participant ID for the workspace
else:
participant = response["participant"]
self.participants[user] = participant
# Update participant role
participant_id = participant["participantId"]
self.set_participant_role(participant_id, role)
return participant
def set_participant_role(self, part_id: int, role: str) -> None:
"""Update the participant role in the given workspace
Args:
part_id (int): Participant ID for the user
role (str): 'owner', 'admin', 'maintain', 'launch', or 'view'
"""
endpoint = (
f"/orgs/{self.org.id}/workspaces/{self.id}/participants/{part_id}/role"
)
data = {"role": role}
self.tower.request("PUT", endpoint, json=data)
def populate(self) -> None:
"""Add maintainers and viewers to the organization and workspace"""
for user, role in self.users.list_users():
self.add_participant(user, role)
def create_credentials(self) -> int:
"""Create entry for Forge credentials under the given workspace
Returns:
int: Identifier for the Forge credentials entry
"""
# Check if Forge credentials have already been created for this project
endpoint = "/credentials"
params = {"workspaceId": self.id}
response = self.tower.request("GET", endpoint, params=params)
for cred in response["credentials"]:
if cred["name"] == self.stack_name:
assert cred["provider"] == "aws"
assert cred["deleted"] is None
return cred["id"]
# Otherwise, create a new credentials entry for the project
secret_arn = self.stack["TowerForgeServiceUserAccessKeySecretArn"]
credentials = self.tower.aws.get_secret_value(secret_arn)
data = {
"credentials": {
"name": self.stack_name,
"provider": "aws",
"keys": {
"accessKey": credentials["aws_access_key_id"],
"secretKey": credentials["aws_secret_access_key"],
"assumeRoleArn": self.stack["TowerForgeServiceRoleArn"],
},
"description": f"Credentials for {self.stack_name}",
}
}
response = self.tower.request("POST", endpoint, params=params, json=data)
return response["credentialsId"]
def generate_compute_environment(self, name: str, model: str) -> dict:
"""Generate request object for creating a compute environment.
Args:
name (str): Name of the compute environment
type (str): Pricing model, either "EC2" (on-demand) or "SPOT"
Returns:
dict: [description]
"""
assert model in {"SPOT", "EC2"}, "Wrong provisioning model"
credentials_id = self.create_credentials()
data = {
"computeEnv": {
"name": name,
"platform": "aws-batch",
"credentialsId": credentials_id,
"config": {
"configMode": "Batch Forge",
"region": self.tower.aws.region,
"workDir": f"s3://{self.stack['TowerBucket']}/work",
"credentials": None,
"computeJobRole": self.stack["TowerForgeBatchWorkJobRoleArn"],
"headJobRole": self.stack["TowerForgeBatchHeadJobRoleArn"],
"headJobCpus": None,
"headJobMemoryMb": None,
"preRunScript": None,
"postRunScript": None,
"cliPath": None,
"forge": {
"vpcId": self.tower.vpc[VPC_STACK_OUTPUT_VID],
"subnets": [self.tower.vpc[o] for o in VPC_STACK_OUTPUT_SIDS],
"fsxMode": "None",
"efsMode": "None",
"type": model,
"minCpus": 0,
"maxCpus": 500,
"gpuEnabled": False,
"ebsAutoScale": True,
"allowBuckets": [],
"disposeOnDeletion": True,
"instanceTypes": [],
"allocStrategy": None,
"ec2KeyPair": None,
"imageId": None,
"securityGroups": [],
"ebsBlockSize": 250,
"fusionEnabled": False,
"efsCreate": False,
"bidPercentage": None,
},
},
}
}
return data
def create_compute_environment(self) -> Dict[str, Optional[str]]:
"""Create default compute environment under the given workspace
Returns:
Dict[str, Optional[str]]: Identifier for the compute environment
"""
compute_env_ids: dict[str, Optional[str]] = {"SPOT": None, "EC2": None}
# Create compute environment names
comp_env_prefix = f"{self.stack_name} (v2)"
comp_env_spot = f"{comp_env_prefix} (spot)"
comp_env_ec2 = f"{comp_env_prefix} (on-demand)"
# Check if compute environment has already been created for this project
endpoint = "/compute-envs"
params = {"workspaceId": self.id}
response = self.tower.request("GET", endpoint, params=params)
for comp_env in response["computeEnvs"]:
if comp_env["platform"] == "aws-batch" and (
comp_env["status"] == "AVAILABLE" or comp_env["status"] == "CREATING"
):
if comp_env["name"] == comp_env_spot:
compute_env_ids["SPOT"] = comp_env["id"]
elif comp_env["name"] == comp_env_ec2:
compute_env_ids["EC2"] = comp_env["id"]
# Create any missing compute environments for the project
if compute_env_ids["SPOT"] is None:
data = self.generate_compute_environment(comp_env_spot, "SPOT")
response = self.tower.request("POST", endpoint, params=params, json=data)
compute_env_ids["SPOT"] = response["computeEnvId"]
self.set_primary_compute_environment(response["computeEnvId"])
if compute_env_ids["EC2"] is None:
data = self.generate_compute_environment(comp_env_ec2, "EC2")
response = self.tower.request("POST", endpoint, params=params, json=data)
compute_env_ids["EC2"] = response["computeEnvId"]
return compute_env_ids
def set_primary_compute_environment(self, compute_env_id: str) -> None:
"""Mark the given compute environment as the primary one (default)
Args:
compute_env_id (str): Compute environment ID
"""
endpoint = f"/compute-envs/{compute_env_id}/primary"
params = {"workspaceId": self.id}
self.tower.request("POST", endpoint, params=params, json="{}")
class TowerOrganization:
def __init__(
self,
tower: TowerClient,
projects: Projects,
full_name: str = ORG_NAME,
) -> None:
"""Create Tower organization helper instance
Args:
tower (TowerClient): Nextflow Tower client
projects (Projects): List of projects and their users
full_name (str): (Optional) Full name of organization
"""
self.tower = tower
self.full_name = full_name
self.name = self.tower.get_valid_name(full_name)
self.json = self.create()
self.id = self.json["orgId"]
self.projects = projects
self.users_per_project = projects.users_per_project
self.members: Dict[str, dict] = dict()
self.populate()
self.workspaces: Dict[str, TowerWorkspace] = dict()
def create(self) -> dict:
"""Get or create Tower organization with the given name
Returns:
dict: Organization JSON from API
"""
# Check if given org name is already among the existing orgs
endpoint = "/orgs"
response = self.tower.request("GET", endpoint)
for org in response["organizations"]:
if org["fullName"] == self.full_name:
return org
# Otherwise, create a new organization
data = {
"organization": {
"name": self.name,
"fullName": self.full_name,
"description": None,
"location": None,
"website": None,
"logo": None,
},
"logoId": None,
}
response = self.tower.request("POST", endpoint, json=data)
return response["organization"]
def add_member(self, user: str) -> dict:
"""Add user to the organization (if need be) and return member ID
Args:
user (str): Email address for the user
Returns:
dict: Tower definition of a organization member
"""
# Attempt to add the user as a member of the given organization
endpoint = f"/orgs/{self.id}/members"
data = {"user": user}
response = self.tower.request(
"PUT",
f"{endpoint}/add",
json=data,
)
# If the user is already a member, you get the following message:
# "User '<username>' is already a member"
# This hacky approach is necessary because you need to retrieve the
# member ID using the username (you can't with the email alone)
if "message" in response and "already a member" in response["message"]:
username = response["message"].split("'")[1]
response = self.tower.request("GET", endpoint)
members = response["members"]
for member in members:
if member["userName"] == username:
break
# Otherwise, just return their new member ID for the organization
else:
member = response["member"]
self.members[user] = member
return member
def populate(self) -> None:
"""Add all emails from across all projects to the organization
Returns:
Dict[str, dict]: Same as self.project, but with member IDs
"""
for project_users in self.users_per_project.values():
for user, _ in project_users.list_users():
self.add_member(user)
def list_projects(self) -> Iterator[Tuple[str, Users]]:
"""Iterate over all projects and their users
Yields:
Iterator[Tuple[str, Users]]:
Each element is the project name and its users
"""
for project, project_users in self.users_per_project.items():
yield project, project_users
def create_workspaces(self) -> Dict[str, TowerWorkspace]:
"""Create a workspace for each project
Returns:
Dict[str, TowerWorkspace]:
Mapping of project names and their corresponding workspaces
"""
for name, users in self.list_projects():
ws = TowerWorkspace(self, name, users)
self.workspaces[name] = ws
return self.workspaces
def parse_args() -> argparse.Namespace:
"""Parse and validate command-line arguments
Returns:
argparse.Namespace: Parsed command-line arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument("projects_dir")
parser.add_argument("--dry_run", "-n", action="store_true")
args = parser.parse_args()
return args
if __name__ == "__main__":
main() | bin/configure-tower-projects.py |
from __future__ import annotations
import argparse
import json
import os
import re
from typing import List, Tuple, Sequence, Dict, Iterator, Optional
import boto3
import requests # type: ignore
import yaml # type: ignore
REGION = "us-east-1"
ORG_NAME = "<NAME>"
R53_STACK_NAME = "nextflow-r53-alias-record"
R53_STACK_OUTPUT = "Route53RecordSet"
VPC_STACK_NAME = "nextflow-vpc"
VPC_STACK_OUTPUT_VID = "VPCId"
VPC_STACK_OUTPUT_SIDS = [
"PrivateSubnet",
"PrivateSubnet1",
"PrivateSubnet2",
"PrivateSubnet3",
]
def main() -> None:
args = parse_args()
projects = Projects(args.projects_dir)
if args.dry_run:
print(
"The following Tower project configurations were "
"discovered and confirmed to be valid:\n -",
"\n - ".join(projects.config_paths),
)
else:
tower = TowerClient()
org = TowerOrganization(tower, projects)
org.create_workspaces()
class InvalidTowerProject(Exception):
pass
class Users:
def __init__(
self,
owners: Sequence[str] = [],
admins: Sequence[str] = [],
maintainers: Sequence[str] = [],
launchers: Sequence[str] = [],
viewers: Sequence[str] = [],
):
"""Utility class for storing lists of users and their roles
All users are stored as emails.
Args:
owners (Sequence[str]):
The users have full permissions on any resources within
the organization associated with the workspace
admins (Sequence[str]):
The users have full permission on the resources associated
with the workspace. Therefore they can create/modify/delete
Pipelines, Compute environments, Actions, Credentials. They
can add/remove users to the workspace, but cannot create a
new workspace or modify another workspace
maintainers (Sequence[str]):
The users can launch pipeline and modify pipeline executions
(e.g. can change the pipeline launch compute env, parameters,
pre/post-run scripts, nextflow config) and create new pipeline
configuration in the Launchpad. The users cannot modify Compute
env settings and Credentials
launchers (Sequence[str]):
The users can launch pipeline executions and modify the
pipeline input/output parameters. They cannot modify the
launch configuration and other resources
viewers (Sequence[str]):
The users can access to the team resources in read-only mode
Returns:
[type]: [description]
"""
self.owners = owners
self.admins = admins
self.maintainers = maintainers
self.launchers = launchers
self.viewers = viewers
def list_users(self) -> Iterator[Tuple[str, str]]:
"""List all users and their Tower roles
Yields:
Iterator[Tuple[str, str]]:
Each element is the user email (str) and Tower role (str)
"""
role_mapping = {
"owners": "owner",
"admins": "admin",
"maintainers": "maintain",
"launchers": "launch",
"viewers": "view",
}
for user_group, role in role_mapping.items():
users = getattr(self, user_group)
for user in users:
yield user, role
class Projects:
def __init__(self, config_directory: str) -> None:
"""Create Projects instance
Args:
config_directory (str): Directory containing project config files
"""
self.config_directory = config_directory
self.users_per_project = self.extract_users()
def list_projects(self) -> Iterator[str]:
"""List all project YAML configuration files
Yields:
Iterator[str]:
Each element is a YAML filepath as a str
"""
# Obtain a list of config files from the given directory
self.config_paths = list()
for dirpath, _, filenames in os.walk(self.config_directory):
for filename in filenames:
filepath = os.path.join(dirpath, filename)
if filename.endswith("-project.yaml"):
self.config_paths.append(filepath)
yield filepath
def validate_config(self, config: Dict) -> None:
"""Validate Tower project configuration
Args:
config (Dict): Tower project configuration
Raises:
InvalidTowerProject: When the config is invalid
"""
has_stack_name = "stack_name" in config
is_valid = (
has_stack_name
and "template_path" in config
and config["template_path"] == "tower-project.yaml"
and "parameters" in config
and (
"S3ReadWriteAccessArns" in config["parameters"]
or "S3ReadOnlyAccessArns" in config["parameters"]
)
)
if not is_valid:
if has_stack_name:
stack_name = config["stack_name"]
raise InvalidTowerProject(f"{stack_name}.yaml is invalid")
else:
raise InvalidTowerProject(f"This config is invalid:\n{config}")
def load_projects(self) -> Iterator[dict]:
"""Load all project configuration files from given directory
Yields:
Iterator[dict]:
Each element is a parsed YAML file as a dict
"""
# Ignore all Sceptre resolvers
yaml.add_multi_constructor("!", lambda loader, suffix, node: None)
# Load the tower-project.yaml config files into a list
for config_path in self.list_projects():
with open(config_path) as config_file:
config = yaml.load(config_file, Loader=yaml.Loader)
self.validate_config(config)
yield config
def extract_emails(self, arns: Sequence[str]) -> List[str]:
"""Extract role session names (emails) from assumed-role ARNs
Args:
arns (Sequence[str]): List of assumed-role ARNs
Returns:
List[str]: List of email from the role session names
"""
role_arn_regex = re.compile(
r"arn:aws:sts::(?P<account_id>[0-9]+):assumed-role/(?P<role_name>[^/]+)"
r"/(?P<session_name>[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,})"
)
emails = list()
for arn in arns:
match = role_arn_regex.fullmatch(arn)
if match:
email = match.group("session_name")
emails.append(email)
else:
raise ValueError(
f"Listed ARN ({arn}) doesn't follow expected format: "
"'arn:aws:sts::<account_id>:<role_name>:<email>'"
)
return emails
def extract_users(self) -> Dict[str, Users]:
"""Extract users from a series of config files
Returns:
Dict[str, Users]:
Mapping between projects/stacks and users
"""
users_per_project = dict()
for config in self.load_projects():
stack_name = config["stack_name"]
maintainer_arns = config["parameters"].get("S3ReadWriteAccessArns", [])
viewer_arns = config["parameters"].get("S3ReadOnlyAccessArns", [])
maintainers = self.extract_emails(maintainer_arns)
viewers = self.extract_emails(viewer_arns)
users_per_project[stack_name] = Users(
maintainers=maintainers, viewers=viewers
)
return users_per_project
class AwsClient:
def __init__(self) -> None:
self.region = REGION
self.session = boto3.session.Session(region_name=REGION)
def get_cfn_stack_outputs(self, stack_name: str) -> dict:
"""Retrieve output values for a CloudFormation stack
Args:
stack_name (str): CloudFormation stack name
Returns:
dict: A mapping between output names and their values
"""
cfn = self.session.client("cloudformation")
response = cfn.describe_stacks(StackName=stack_name)
outputs_raw = response["Stacks"][0]["Outputs"]
outputs = {p["OutputKey"]: p["OutputValue"] for p in outputs_raw}
outputs["stack_name"] = stack_name
return outputs
def get_secret_value(self, secret_arn: str) -> dict:
"""Retrieve value for a secret stored in Secrets Manager
Args:
secret_arn (str): ARN for Secrets Manager secret
Returns:
dict: Decrypted secret value
"""
secretsmanager = self.session.client("secretsmanager")
response = secretsmanager.get_secret_value(SecretId=secret_arn)
secret_value = json.loads(response["SecretString"])
return secret_value
class TowerClient:
def __init__(self, tower_token=None) -> None:
"""Generate NextflowTower instance
The descriptions below for the user types were copied
from the Nextflow Tower interface.
Raises:
KeyError: The 'NXF_TOWER_TOKEN' environment variable isn't defined
"""
self.aws = AwsClient()
self.vpc = self.aws.get_cfn_stack_outputs(VPC_STACK_NAME)
self.tower_api_base_url = self.get_tower_api_base_url()
# Retrieve Nextflow token from environment
try:
self.tower_token = tower_token or os.environ["NXF_TOWER_TOKEN"]
except KeyError as e:
raise KeyError(
"The 'NXF_TOWER_TOKEN' environment variable must "
"be defined with a Nextflow Tower API token."
) from e
def get_valid_name(self, full_name: str) -> str:
"""Generate Tower-friendly name from full name
Args:
full_name (str): Full name (with spaces/punctuation)
Returns:
str: Name with only alphanumeric, dash and underscore characters
"""
return re.sub(r"[^A-Za-z0-9_-]", "-", full_name)
def get_tower_api_base_url(self) -> str:
"""Infer Nextflow Tower API endpoint from CloudFormation
Returns:
str: A full URL for the Tower API endpoint
"""
stack = self.aws.get_cfn_stack_outputs(R53_STACK_NAME)
hostname = stack[R53_STACK_OUTPUT]
endpoint = f"https://{hostname}/api"
return endpoint
def request(self, method: str, endpoint: str, **kwargs) -> dict:
"""Make an authenticated HTTP request to the Nextflow Tower API
Args:
method (str): An HTTP method (GET, PUT, POST, or DELETE)
endpoint (str): The API endpoint with the path parameters filled in
Returns:
Response: The raw Response object to allow for special handling
"""
assert method in {"GET", "PUT", "POST", "DELETE"}
url = self.tower_api_base_url + endpoint
kwargs["headers"] = {"Authorization": f"Bearer {self.tower_token}"}
response = requests.request(method, url, **kwargs)
try:
result = response.json()
except json.decoder.JSONDecodeError:
result = dict()
return result
class TowerWorkspace:
def __init__(
self,
org: TowerOrganization,
stack_name: str,
users: Users,
) -> None:
self.org = org
self.tower = org.tower
self.stack_name = stack_name
self.stack = self.tower.aws.get_cfn_stack_outputs(stack_name)
self.full_name = stack_name
self.name = self.tower.get_valid_name(stack_name)
self.json = self.create()
self.id = self.json["id"]
self.users = users
self.participants: Dict[str, dict] = dict()
self.populate()
self.create_compute_environment()
def create(self) -> dict:
"""Create a Tower workspace under an organization
Returns:
dict: Workspace JSON from API
"""
# Check if the project workspace already exists
endpoint = f"/orgs/{self.org.id}/workspaces"
response = self.tower.request("GET", endpoint)
for workspace in response["workspaces"]:
if workspace["name"] == self.name:
return workspace
# Otherwise, create a new project workspace under the organization
data = {
"workspace": {
"name": self.name,
"fullName": self.full_name,
"description": None,
"visibility": "PRIVATE",
}
}
response = self.tower.request("POST", endpoint, json=data)
return response["workspace"]
def add_participant(self, user: str, role: str) -> int:
"""Add user to the workspace (if need be) and return participant ID
Args:
user (str): Email address for the user
role (str): 'owner', 'admin', 'maintain', 'launch', or 'view'
Returns:
int: Participant ID for the user in the given workspace
"""
# Attempt to add the user as a participant of the given workspace
endpoint = f"/orgs/{self.org.id}/workspaces/{self.id}/participants"
member_id = self.org.members[user]["memberId"]
data = {
"memberId": member_id,
"teamId": None,
"userNameOrEmail": None,
}
response = self.tower.request("PUT", f"{endpoint}/add", json=data)
# If the user is already a member, you get the following message:
# "Already a participant"
# In this case, look up the participant ID using the member ID
if "message" in response and response["message"] == "Already a participant":
response = self.tower.request("GET", endpoint)
for participant in response["participants"]:
if participant["memberId"] == member_id:
break
# Otherwise, just return their new participant ID for the workspace
else:
participant = response["participant"]
self.participants[user] = participant
# Update participant role
participant_id = participant["participantId"]
self.set_participant_role(participant_id, role)
return participant
def set_participant_role(self, part_id: int, role: str) -> None:
"""Update the participant role in the given workspace
Args:
part_id (int): Participant ID for the user
role (str): 'owner', 'admin', 'maintain', 'launch', or 'view'
"""
endpoint = (
f"/orgs/{self.org.id}/workspaces/{self.id}/participants/{part_id}/role"
)
data = {"role": role}
self.tower.request("PUT", endpoint, json=data)
def populate(self) -> None:
"""Add maintainers and viewers to the organization and workspace"""
for user, role in self.users.list_users():
self.add_participant(user, role)
def create_credentials(self) -> int:
"""Create entry for Forge credentials under the given workspace
Returns:
int: Identifier for the Forge credentials entry
"""
# Check if Forge credentials have already been created for this project
endpoint = "/credentials"
params = {"workspaceId": self.id}
response = self.tower.request("GET", endpoint, params=params)
for cred in response["credentials"]:
if cred["name"] == self.stack_name:
assert cred["provider"] == "aws"
assert cred["deleted"] is None
return cred["id"]
# Otherwise, create a new credentials entry for the project
secret_arn = self.stack["TowerForgeServiceUserAccessKeySecretArn"]
credentials = self.tower.aws.get_secret_value(secret_arn)
data = {
"credentials": {
"name": self.stack_name,
"provider": "aws",
"keys": {
"accessKey": credentials["aws_access_key_id"],
"secretKey": credentials["aws_secret_access_key"],
"assumeRoleArn": self.stack["TowerForgeServiceRoleArn"],
},
"description": f"Credentials for {self.stack_name}",
}
}
response = self.tower.request("POST", endpoint, params=params, json=data)
return response["credentialsId"]
def generate_compute_environment(self, name: str, model: str) -> dict:
"""Generate request object for creating a compute environment.
Args:
name (str): Name of the compute environment
type (str): Pricing model, either "EC2" (on-demand) or "SPOT"
Returns:
dict: [description]
"""
assert model in {"SPOT", "EC2"}, "Wrong provisioning model"
credentials_id = self.create_credentials()
data = {
"computeEnv": {
"name": name,
"platform": "aws-batch",
"credentialsId": credentials_id,
"config": {
"configMode": "Batch Forge",
"region": self.tower.aws.region,
"workDir": f"s3://{self.stack['TowerBucket']}/work",
"credentials": None,
"computeJobRole": self.stack["TowerForgeBatchWorkJobRoleArn"],
"headJobRole": self.stack["TowerForgeBatchHeadJobRoleArn"],
"headJobCpus": None,
"headJobMemoryMb": None,
"preRunScript": None,
"postRunScript": None,
"cliPath": None,
"forge": {
"vpcId": self.tower.vpc[VPC_STACK_OUTPUT_VID],
"subnets": [self.tower.vpc[o] for o in VPC_STACK_OUTPUT_SIDS],
"fsxMode": "None",
"efsMode": "None",
"type": model,
"minCpus": 0,
"maxCpus": 500,
"gpuEnabled": False,
"ebsAutoScale": True,
"allowBuckets": [],
"disposeOnDeletion": True,
"instanceTypes": [],
"allocStrategy": None,
"ec2KeyPair": None,
"imageId": None,
"securityGroups": [],
"ebsBlockSize": 250,
"fusionEnabled": False,
"efsCreate": False,
"bidPercentage": None,
},
},
}
}
return data
def create_compute_environment(self) -> Dict[str, Optional[str]]:
"""Create default compute environment under the given workspace
Returns:
Dict[str, Optional[str]]: Identifier for the compute environment
"""
compute_env_ids: dict[str, Optional[str]] = {"SPOT": None, "EC2": None}
# Create compute environment names
comp_env_prefix = f"{self.stack_name} (v2)"
comp_env_spot = f"{comp_env_prefix} (spot)"
comp_env_ec2 = f"{comp_env_prefix} (on-demand)"
# Check if compute environment has already been created for this project
endpoint = "/compute-envs"
params = {"workspaceId": self.id}
response = self.tower.request("GET", endpoint, params=params)
for comp_env in response["computeEnvs"]:
if comp_env["platform"] == "aws-batch" and (
comp_env["status"] == "AVAILABLE" or comp_env["status"] == "CREATING"
):
if comp_env["name"] == comp_env_spot:
compute_env_ids["SPOT"] = comp_env["id"]
elif comp_env["name"] == comp_env_ec2:
compute_env_ids["EC2"] = comp_env["id"]
# Create any missing compute environments for the project
if compute_env_ids["SPOT"] is None:
data = self.generate_compute_environment(comp_env_spot, "SPOT")
response = self.tower.request("POST", endpoint, params=params, json=data)
compute_env_ids["SPOT"] = response["computeEnvId"]
self.set_primary_compute_environment(response["computeEnvId"])
if compute_env_ids["EC2"] is None:
data = self.generate_compute_environment(comp_env_ec2, "EC2")
response = self.tower.request("POST", endpoint, params=params, json=data)
compute_env_ids["EC2"] = response["computeEnvId"]
return compute_env_ids
def set_primary_compute_environment(self, compute_env_id: str) -> None:
"""Mark the given compute environment as the primary one (default)
Args:
compute_env_id (str): Compute environment ID
"""
endpoint = f"/compute-envs/{compute_env_id}/primary"
params = {"workspaceId": self.id}
self.tower.request("POST", endpoint, params=params, json="{}")
class TowerOrganization:
def __init__(
self,
tower: TowerClient,
projects: Projects,
full_name: str = ORG_NAME,
) -> None:
"""Create Tower organization helper instance
Args:
tower (TowerClient): Nextflow Tower client
projects (Projects): List of projects and their users
full_name (str): (Optional) Full name of organization
"""
self.tower = tower
self.full_name = full_name
self.name = self.tower.get_valid_name(full_name)
self.json = self.create()
self.id = self.json["orgId"]
self.projects = projects
self.users_per_project = projects.users_per_project
self.members: Dict[str, dict] = dict()
self.populate()
self.workspaces: Dict[str, TowerWorkspace] = dict()
def create(self) -> dict:
"""Get or create Tower organization with the given name
Returns:
dict: Organization JSON from API
"""
# Check if given org name is already among the existing orgs
endpoint = "/orgs"
response = self.tower.request("GET", endpoint)
for org in response["organizations"]:
if org["fullName"] == self.full_name:
return org
# Otherwise, create a new organization
data = {
"organization": {
"name": self.name,
"fullName": self.full_name,
"description": None,
"location": None,
"website": None,
"logo": None,
},
"logoId": None,
}
response = self.tower.request("POST", endpoint, json=data)
return response["organization"]
def add_member(self, user: str) -> dict:
"""Add user to the organization (if need be) and return member ID
Args:
user (str): Email address for the user
Returns:
dict: Tower definition of a organization member
"""
# Attempt to add the user as a member of the given organization
endpoint = f"/orgs/{self.id}/members"
data = {"user": user}
response = self.tower.request(
"PUT",
f"{endpoint}/add",
json=data,
)
# If the user is already a member, you get the following message:
# "User '<username>' is already a member"
# This hacky approach is necessary because you need to retrieve the
# member ID using the username (you can't with the email alone)
if "message" in response and "already a member" in response["message"]:
username = response["message"].split("'")[1]
response = self.tower.request("GET", endpoint)
members = response["members"]
for member in members:
if member["userName"] == username:
break
# Otherwise, just return their new member ID for the organization
else:
member = response["member"]
self.members[user] = member
return member
def populate(self) -> None:
"""Add all emails from across all projects to the organization
Returns:
Dict[str, dict]: Same as self.project, but with member IDs
"""
for project_users in self.users_per_project.values():
for user, _ in project_users.list_users():
self.add_member(user)
def list_projects(self) -> Iterator[Tuple[str, Users]]:
"""Iterate over all projects and their users
Yields:
Iterator[Tuple[str, Users]]:
Each element is the project name and its users
"""
for project, project_users in self.users_per_project.items():
yield project, project_users
def create_workspaces(self) -> Dict[str, TowerWorkspace]:
"""Create a workspace for each project
Returns:
Dict[str, TowerWorkspace]:
Mapping of project names and their corresponding workspaces
"""
for name, users in self.list_projects():
ws = TowerWorkspace(self, name, users)
self.workspaces[name] = ws
return self.workspaces
def parse_args() -> argparse.Namespace:
"""Parse and validate command-line arguments
Returns:
argparse.Namespace: Parsed command-line arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument("projects_dir")
parser.add_argument("--dry_run", "-n", action="store_true")
args = parser.parse_args()
return args
if __name__ == "__main__":
main() | 0.792464 | 0.182025 |
llimport streamlit as st
import pickle
st.sidebar.image("img.jpg",use_column_width=True)
st.sidebar.title("FAKE NEWS AI")
st.header("Fake news Classification".upper())
st.empty()
option2 = st.sidebar.checkbox("creator info")
if option2:
st.info("### project by <NAME>")
st.info("KIET, MCA")
option4 = st.sidebar.checkbox("Project Info")
if option4:
st.header("Project details")
st.success('''
In this project, we have used various natural language processing techniques and machine learning algorithms to classifty fake news articles using sci-kit libraries from python. the project accuracy is 65 %, as we need 10 times the data ''')
option3 = st.sidebar.checkbox("project process")
if option3:
st.info("### project details")
st.image('images/ProcessFlow.PNG',use_column_width=True)
option6 = st.sidebar.checkbox("Random Classification Result")
if option6:
st.sidebar.image('images/RF_LCurve.png',use_column_width=True)
option7 = st.sidebar.checkbox("Logistic Classification Result")
if option7:
st.sidebar.image('images/LR_LCurve.PNG',use_column_width=True)
#function to run for prediction
def detecting_fake_news(var):
#retrieving the best model for prediction call
load_model = pickle.load(open('final_model.sav', 'rb'))
prediction = load_model.predict([var])
prob = load_model.predict_proba([var])
return prediction[0],prob[0][1]
data = st.text_area("Copy a News headline to test")
if st.button("analyse"):
if data :
with st.spinner('please wait, while we analyse this message'):
prediction,confidence = detecting_fake_news(data)
if confidence >.6:
st.success(f"## The news is NOT FAKE")
st.image('images/fact.png',width=150)
st.write(f"probability:{confidence} | {prediction}")
else:
st.warning(f"## Its Fake news")
st.image('images/fake.jpg',width=150)
st.write(f"probability:{confidence} | {prediction}")
else:
st.error("enter some message to analyse")
else:
st.info('click button to analyse')
if data and st.checkbox("other info"):
st.write(f"length of message {len(data)}")
st.write(f"length of words {len(data.split())}")
st.write(data.split()) | app.py | llimport streamlit as st
import pickle
st.sidebar.image("img.jpg",use_column_width=True)
st.sidebar.title("FAKE NEWS AI")
st.header("Fake news Classification".upper())
st.empty()
option2 = st.sidebar.checkbox("creator info")
if option2:
st.info("### project by <NAME>")
st.info("KIET, MCA")
option4 = st.sidebar.checkbox("Project Info")
if option4:
st.header("Project details")
st.success('''
In this project, we have used various natural language processing techniques and machine learning algorithms to classifty fake news articles using sci-kit libraries from python. the project accuracy is 65 %, as we need 10 times the data ''')
option3 = st.sidebar.checkbox("project process")
if option3:
st.info("### project details")
st.image('images/ProcessFlow.PNG',use_column_width=True)
option6 = st.sidebar.checkbox("Random Classification Result")
if option6:
st.sidebar.image('images/RF_LCurve.png',use_column_width=True)
option7 = st.sidebar.checkbox("Logistic Classification Result")
if option7:
st.sidebar.image('images/LR_LCurve.PNG',use_column_width=True)
#function to run for prediction
def detecting_fake_news(var):
#retrieving the best model for prediction call
load_model = pickle.load(open('final_model.sav', 'rb'))
prediction = load_model.predict([var])
prob = load_model.predict_proba([var])
return prediction[0],prob[0][1]
data = st.text_area("Copy a News headline to test")
if st.button("analyse"):
if data :
with st.spinner('please wait, while we analyse this message'):
prediction,confidence = detecting_fake_news(data)
if confidence >.6:
st.success(f"## The news is NOT FAKE")
st.image('images/fact.png',width=150)
st.write(f"probability:{confidence} | {prediction}")
else:
st.warning(f"## Its Fake news")
st.image('images/fake.jpg',width=150)
st.write(f"probability:{confidence} | {prediction}")
else:
st.error("enter some message to analyse")
else:
st.info('click button to analyse')
if data and st.checkbox("other info"):
st.write(f"length of message {len(data)}")
st.write(f"length of words {len(data.split())}")
st.write(data.split()) | 0.234056 | 0.196209 |
import matplotlib.pyplot as plt
import sys
import os
import json
import numpy as np
import matplotlib.patches as mpatches
import math
import matplotlib.ticker as ticker
from plot_utility import plot_figure
from plot_utility import plot_stars_figure
from plot_utility import get_Xcent_node
from plot_utility import get_Xcent_node
from plot_utility import parse_topo
from plot_utility import parse_adapt
from plot_utility import parse_file
if len(sys.argv) < 7:
print('Require epoch_dir<str> topo_path<str> x_percent<int(0-100)/avg> unit<node/pub/hash> snapshots_dir<snapshots/snapshots-exploit> epochs<list of int>')
sys.exit(0)
out_dir = sys.argv[1]
topo = sys.argv[2]
x_percent = sys.argv[3]
percent_unit = sys.argv[4]
snapshots_dir = sys.argv[5]
epochs = [int(i) for i in sys.argv[6:]]
epoch_dir = os.path.join(out_dir, snapshots_dir)
adapts = parse_adapt(os.path.join(out_dir, 'adapts'))
epoch_lats = {}
max_y = 0
min_y = 1e8
num_node = 0
for e in epochs:
epoch_file = os.path.join(epoch_dir, 'epoch'+str(e)+'.txt')
lats = parse_file(epoch_file, x_percent, topo, percent_unit)
epoch_lats[e] = lats
max_y = max(max_y, max([lat for i, lat in lats]))
min_y = min(min_y, min([lat for i, lat in lats]))
num_node = len(lats)
fig, axs = plt.subplots(ncols=2, nrows=1, constrained_layout=False, figsize=(20,10))
exp_name = str(os.path.basename(out_dir))
context_name = str(os.path.dirname(out_dir))
title = snapshots_dir + ', ' + str(x_percent)+', '+percent_unit+', '+context_name+', '+str(exp_name)
patches, _ = plot_figure(epoch_lats, axs[0], epochs, min_y, max_y, num_node-1, title, adapts)
num_patch_per_row = 10
interval = int(math.ceil( len(epochs) / num_patch_per_row))
axs[0].legend(loc='lower center', handles=patches, fontsize='small', ncol= math.ceil(len(patches)/interval))
patches, _ = plot_stars_figure(epoch_lats, axs[1], epochs, min_y, max_y, len(adapts)-1, title, adapts)
axs[1].legend(loc='lower center', handles=patches, fontsize='small', ncol= math.ceil(len(patches)/interval))
figname = exp_name+"-lat"+str(x_percent)+"-"+percent_unit
figpath = os.path.join(out_dir, figname)
lastest_path = os.path.join(out_dir, "latest")
fig.savefig(figpath)
fig.savefig(lastest_path) | sim/script/plot_single.py | import matplotlib.pyplot as plt
import sys
import os
import json
import numpy as np
import matplotlib.patches as mpatches
import math
import matplotlib.ticker as ticker
from plot_utility import plot_figure
from plot_utility import plot_stars_figure
from plot_utility import get_Xcent_node
from plot_utility import get_Xcent_node
from plot_utility import parse_topo
from plot_utility import parse_adapt
from plot_utility import parse_file
if len(sys.argv) < 7:
print('Require epoch_dir<str> topo_path<str> x_percent<int(0-100)/avg> unit<node/pub/hash> snapshots_dir<snapshots/snapshots-exploit> epochs<list of int>')
sys.exit(0)
out_dir = sys.argv[1]
topo = sys.argv[2]
x_percent = sys.argv[3]
percent_unit = sys.argv[4]
snapshots_dir = sys.argv[5]
epochs = [int(i) for i in sys.argv[6:]]
epoch_dir = os.path.join(out_dir, snapshots_dir)
adapts = parse_adapt(os.path.join(out_dir, 'adapts'))
epoch_lats = {}
max_y = 0
min_y = 1e8
num_node = 0
for e in epochs:
epoch_file = os.path.join(epoch_dir, 'epoch'+str(e)+'.txt')
lats = parse_file(epoch_file, x_percent, topo, percent_unit)
epoch_lats[e] = lats
max_y = max(max_y, max([lat for i, lat in lats]))
min_y = min(min_y, min([lat for i, lat in lats]))
num_node = len(lats)
fig, axs = plt.subplots(ncols=2, nrows=1, constrained_layout=False, figsize=(20,10))
exp_name = str(os.path.basename(out_dir))
context_name = str(os.path.dirname(out_dir))
title = snapshots_dir + ', ' + str(x_percent)+', '+percent_unit+', '+context_name+', '+str(exp_name)
patches, _ = plot_figure(epoch_lats, axs[0], epochs, min_y, max_y, num_node-1, title, adapts)
num_patch_per_row = 10
interval = int(math.ceil( len(epochs) / num_patch_per_row))
axs[0].legend(loc='lower center', handles=patches, fontsize='small', ncol= math.ceil(len(patches)/interval))
patches, _ = plot_stars_figure(epoch_lats, axs[1], epochs, min_y, max_y, len(adapts)-1, title, adapts)
axs[1].legend(loc='lower center', handles=patches, fontsize='small', ncol= math.ceil(len(patches)/interval))
figname = exp_name+"-lat"+str(x_percent)+"-"+percent_unit
figpath = os.path.join(out_dir, figname)
lastest_path = os.path.join(out_dir, "latest")
fig.savefig(figpath)
fig.savefig(lastest_path) | 0.177454 | 0.269214 |
import json
import argparse
from pathlib import Path
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', default="/deep/group/xray4all")
parser.add_argument('--experiment_dir', default=None)
parser.add_argument('--old_final', action="store_true")
parser.add_argument('--new_final', action="store_true")
return parser
def get_config_list(data_dir, experiment_dir):
ckpts_dir = Path(data_dir) / "final_ckpts"
config_list = []
for run in ["", "_2", "_3"]:
full_experiment_dir = ckpts_dir / (experiment_dir + run)
for ckpt_path in full_experiment_dir.glob("*.tar"):
if "best.pth.tar" in str(ckpt_path):
continue
config_dict = {}
config_dict["ckpt_path"] = str(ckpt_path)
with open(ckpt_path.parent / "args.json", 'r') as f:
run_args = json.load(f)
config_dict["is_3class"] = run_args["model_args"]["model_uncertainty"]
config_list.append(config_dict)
return config_list
if __name__ == "__main__":
parser = get_parser()
args = parser.parse_args()
assert args.experiment_dir is not None or args.new_final or args.old_final
pathologies = ["No Finding",
"Enlarged Cardiomediastinum",
"Cardiomegaly",
"Lung Lesion",
"Airspace Opacity",
"Edema",
"Consolidation",
"Pneumonia",
"Atelectasis",
"Pneumothorax",
"Pleural Effusion",
"Pleural Other",
"Fracture",
"Support Devices"]
configs_dir = Path("dataset/predict_configs")
configs_dir.mkdir(exist_ok=True)
if args.old_final:
path2experiment_dir = {"Atelectasis": "DenseNet121_320_1e-04_uncertainty_ones_top10",
"Cardiomegaly": "DenseNet121_320_1e-04_uncertainty_3-class_top10",
"Consolidation": "DenseNet121_320_1e-04_uncertainty_self-train_top10",
"Edema": "DenseNet121_320_1e-04_uncertainty_ones_top10",
"Pleural Effusion": "DenseNet121_320_1e-04_uncertainty_3-class_top10"}
config = {}
config["aggregation_method"] = "mean"
config["task2models"] = {}
for pathology, experiment_dir in path2experiment_dir.items():
config_list = get_config_list(args.data_dir, experiment_dir)
config["task2models"][pathology] = config_list
with open(configs_dir / "final.json", 'w') as f:
json.dump(config, f, indent=4)
elif args.new_final:
path2experiment_dir = {"Atelectasis": "CheXpert-Ones",
"Cardiomegaly": "CheXpert-3-class",
"Consolidation": "CheXpert-Self-Train",
"Edema": "CheXpert-Ones",
"Pleural Effusion": "CheXpert-3-class"}
config = {}
config["aggregation_method"] = "mean"
config["task2models"] = {}
for pathology, experiment_dir in path2experiment_dir.items():
config_list = get_config_list(args.data_dir, experiment_dir)
config["task2models"][pathology] = config_list
with open(configs_dir / "CheXpert-final.json", 'w') as f:
json.dump(config, f, indent=4)
else:
config_list = get_config_list(args.data_dir, args.experiment_dir)
config = {}
config["aggregation_method"] = "mean"
config["task2models"] = {}
for pathology in pathologies:
config["task2models"][pathology] = config_list
with open(configs_dir / (args.experiment_dir + ".json"), 'w') as f:
json.dump(config, f, indent=4) | scripts/write_configs.py | import json
import argparse
from pathlib import Path
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', default="/deep/group/xray4all")
parser.add_argument('--experiment_dir', default=None)
parser.add_argument('--old_final', action="store_true")
parser.add_argument('--new_final', action="store_true")
return parser
def get_config_list(data_dir, experiment_dir):
ckpts_dir = Path(data_dir) / "final_ckpts"
config_list = []
for run in ["", "_2", "_3"]:
full_experiment_dir = ckpts_dir / (experiment_dir + run)
for ckpt_path in full_experiment_dir.glob("*.tar"):
if "best.pth.tar" in str(ckpt_path):
continue
config_dict = {}
config_dict["ckpt_path"] = str(ckpt_path)
with open(ckpt_path.parent / "args.json", 'r') as f:
run_args = json.load(f)
config_dict["is_3class"] = run_args["model_args"]["model_uncertainty"]
config_list.append(config_dict)
return config_list
if __name__ == "__main__":
parser = get_parser()
args = parser.parse_args()
assert args.experiment_dir is not None or args.new_final or args.old_final
pathologies = ["No Finding",
"Enlarged Cardiomediastinum",
"Cardiomegaly",
"Lung Lesion",
"Airspace Opacity",
"Edema",
"Consolidation",
"Pneumonia",
"Atelectasis",
"Pneumothorax",
"Pleural Effusion",
"Pleural Other",
"Fracture",
"Support Devices"]
configs_dir = Path("dataset/predict_configs")
configs_dir.mkdir(exist_ok=True)
if args.old_final:
path2experiment_dir = {"Atelectasis": "DenseNet121_320_1e-04_uncertainty_ones_top10",
"Cardiomegaly": "DenseNet121_320_1e-04_uncertainty_3-class_top10",
"Consolidation": "DenseNet121_320_1e-04_uncertainty_self-train_top10",
"Edema": "DenseNet121_320_1e-04_uncertainty_ones_top10",
"Pleural Effusion": "DenseNet121_320_1e-04_uncertainty_3-class_top10"}
config = {}
config["aggregation_method"] = "mean"
config["task2models"] = {}
for pathology, experiment_dir in path2experiment_dir.items():
config_list = get_config_list(args.data_dir, experiment_dir)
config["task2models"][pathology] = config_list
with open(configs_dir / "final.json", 'w') as f:
json.dump(config, f, indent=4)
elif args.new_final:
path2experiment_dir = {"Atelectasis": "CheXpert-Ones",
"Cardiomegaly": "CheXpert-3-class",
"Consolidation": "CheXpert-Self-Train",
"Edema": "CheXpert-Ones",
"Pleural Effusion": "CheXpert-3-class"}
config = {}
config["aggregation_method"] = "mean"
config["task2models"] = {}
for pathology, experiment_dir in path2experiment_dir.items():
config_list = get_config_list(args.data_dir, experiment_dir)
config["task2models"][pathology] = config_list
with open(configs_dir / "CheXpert-final.json", 'w') as f:
json.dump(config, f, indent=4)
else:
config_list = get_config_list(args.data_dir, args.experiment_dir)
config = {}
config["aggregation_method"] = "mean"
config["task2models"] = {}
for pathology in pathologies:
config["task2models"][pathology] = config_list
with open(configs_dir / (args.experiment_dir + ".json"), 'w') as f:
json.dump(config, f, indent=4) | 0.271638 | 0.159446 |
from djinni.support import MultiSet # default imported in all files
from djinni.exception import CPyException # default imported in all files
from djinni.pycffi_marshal import CPyBoxedBool, CPyBoxedF32, CPyBoxedF64, CPyBoxedI16, CPyBoxedI32, CPyBoxedI64, CPyBoxedI8, CPyPrimitive, CPyRecord, CPyString
from constant_record import ConstantRecord
from constant_record_helper import ConstantRecordHelper
from PyCFFIlib_cffi import ffi, lib
from djinni import exception # this forces run of __init__.py which gives cpp option to call back into py to create exception
class Constants:
"""
Record containing constants
Constants
BOOL_CONSTANT: bool_constant has documentation.
I8_CONSTANT
I16_CONSTANT
I32_CONSTANT
I64_CONSTANT
F32_CONSTANT
F64_CONSTANT: f64_constant has long documentation.
(Second line of multi-line documentation.
Indented third line of multi-line documentation.)
OPT_BOOL_CONSTANT
OPT_I8_CONSTANT
OPT_I16_CONSTANT: opt_i16_constant has documentation.
OPT_I32_CONSTANT
OPT_I64_CONSTANT
OPT_F32_CONSTANT: opt_f32_constant has long documentation.
(Second line of multi-line documentation.
Indented third line of multi-line documentation.)
OPT_F64_CONSTANT
STRING_CONSTANT
OPT_STRING_CONSTANT
OBJECT_CONSTANT
DUMMY: No support for null optional constants
No support for optional constant records
No support for constant binary, list, set, map
"""
c_data_set = MultiSet()
@staticmethod
def check_c_data_set_empty():
assert len(Constants.c_data_set) == 0
BOOL_CONSTANT = True
I8_CONSTANT = 1
I16_CONSTANT = 2
I32_CONSTANT = 3
I64_CONSTANT = 4
F32_CONSTANT = 5.0
F64_CONSTANT = 5.0
OPT_BOOL_CONSTANT = True
OPT_I8_CONSTANT = 1
OPT_I16_CONSTANT = 2
OPT_I32_CONSTANT = 3
OPT_I64_CONSTANT = 4
OPT_F32_CONSTANT = 5.0
OPT_F64_CONSTANT = 5.0
STRING_CONSTANT = "string-constant"
OPT_STRING_CONSTANT = "string-constant"
DUMMY = False
def __init__(self):
pass
Constants.OBJECT_CONSTANT = ConstantRecord(
Constants.I32_CONSTANT,
Constants.STRING_CONSTANT) | test-suite/generated-src/python/constants.py |
from djinni.support import MultiSet # default imported in all files
from djinni.exception import CPyException # default imported in all files
from djinni.pycffi_marshal import CPyBoxedBool, CPyBoxedF32, CPyBoxedF64, CPyBoxedI16, CPyBoxedI32, CPyBoxedI64, CPyBoxedI8, CPyPrimitive, CPyRecord, CPyString
from constant_record import ConstantRecord
from constant_record_helper import ConstantRecordHelper
from PyCFFIlib_cffi import ffi, lib
from djinni import exception # this forces run of __init__.py which gives cpp option to call back into py to create exception
class Constants:
"""
Record containing constants
Constants
BOOL_CONSTANT: bool_constant has documentation.
I8_CONSTANT
I16_CONSTANT
I32_CONSTANT
I64_CONSTANT
F32_CONSTANT
F64_CONSTANT: f64_constant has long documentation.
(Second line of multi-line documentation.
Indented third line of multi-line documentation.)
OPT_BOOL_CONSTANT
OPT_I8_CONSTANT
OPT_I16_CONSTANT: opt_i16_constant has documentation.
OPT_I32_CONSTANT
OPT_I64_CONSTANT
OPT_F32_CONSTANT: opt_f32_constant has long documentation.
(Second line of multi-line documentation.
Indented third line of multi-line documentation.)
OPT_F64_CONSTANT
STRING_CONSTANT
OPT_STRING_CONSTANT
OBJECT_CONSTANT
DUMMY: No support for null optional constants
No support for optional constant records
No support for constant binary, list, set, map
"""
c_data_set = MultiSet()
@staticmethod
def check_c_data_set_empty():
assert len(Constants.c_data_set) == 0
BOOL_CONSTANT = True
I8_CONSTANT = 1
I16_CONSTANT = 2
I32_CONSTANT = 3
I64_CONSTANT = 4
F32_CONSTANT = 5.0
F64_CONSTANT = 5.0
OPT_BOOL_CONSTANT = True
OPT_I8_CONSTANT = 1
OPT_I16_CONSTANT = 2
OPT_I32_CONSTANT = 3
OPT_I64_CONSTANT = 4
OPT_F32_CONSTANT = 5.0
OPT_F64_CONSTANT = 5.0
STRING_CONSTANT = "string-constant"
OPT_STRING_CONSTANT = "string-constant"
DUMMY = False
def __init__(self):
pass
Constants.OBJECT_CONSTANT = ConstantRecord(
Constants.I32_CONSTANT,
Constants.STRING_CONSTANT) | 0.547464 | 0.085251 |
import matplotlib.pyplot as plt
# Measurements from happy-path.data
expt = [
('HotStuff',[
(14.917,11.54),
(41.649,12.6),
(62.075,14.15),
(94.362,18.69),
(112.436,23.72),
(124.599,28.59),
(129.521,33.79),
(135.073,39.175),
(140.052,48.7),
(142.850,59.3)
], '-o'),
('2C-HS',[
(17.462,9.6),
(46.540,10.8),
(69.698,12.2),
(101.286,17),
(113.162,22.8),
(127.463,27.4),
(132.674,31.5),
(136.262,37),
(139.196,46.3),
(142.981,57.5)
], '--+'),
('Streamlet',[
(16.159,10.15),
(46.59,10.76),
(67.20,12.25),
(101.170,16.63),
(117.174,21.69),
(128.625,26.85),
(132.803,30.55),
(136.484,36.5),
(138.231,45.44),
(144.888,51.7)
], '-*'),
('Origin-HS',[
(17.966,12.14),
(58.966,12.52),
(131.544,13.07),
(141.544,14.07),
(151.544,15.07),
(169.542,18.3),
(172.564,22.4),
(176.649,37.4),
(176.851,48.4)
], '-s')]
def do_plot():
f = plt.figure(1, figsize=(7,5));
plt.clf()
ax = f.add_subplot(1, 1, 1)
for name, entries, style in expt:
throughput = []
latency = []
for t, l in entries:
# batch.append(N*ToverN)
# throughput.append(ToverN*(N-t) / latency)
throughput.append(t)
latency.append(l)
ax.plot(throughput, latency, style, label='%s' % name)
#ax.set_xscale("log")
# ax.set_yscale("log")
# plt.ylim([0, 50])
#plt.xlim([10**3.8, 10**6.4])
plt.legend(loc='upper left')
# plt.ylabel('Throughput (Tx per second) in log scale')
plt.ylabel('Latency (ms)')
plt.xlabel('Throughput (KTx/s)')
# plt.xlabel('Requests (Tx) in log scale')
plt.tight_layout()
# plt.show()
plt.savefig('happy-path.pdf', format='pdf', dpi=400)
if __name__ == '__main__':
do_plot() | plot/happy-path/happy-path.py | import matplotlib.pyplot as plt
# Measurements from happy-path.data
expt = [
('HotStuff',[
(14.917,11.54),
(41.649,12.6),
(62.075,14.15),
(94.362,18.69),
(112.436,23.72),
(124.599,28.59),
(129.521,33.79),
(135.073,39.175),
(140.052,48.7),
(142.850,59.3)
], '-o'),
('2C-HS',[
(17.462,9.6),
(46.540,10.8),
(69.698,12.2),
(101.286,17),
(113.162,22.8),
(127.463,27.4),
(132.674,31.5),
(136.262,37),
(139.196,46.3),
(142.981,57.5)
], '--+'),
('Streamlet',[
(16.159,10.15),
(46.59,10.76),
(67.20,12.25),
(101.170,16.63),
(117.174,21.69),
(128.625,26.85),
(132.803,30.55),
(136.484,36.5),
(138.231,45.44),
(144.888,51.7)
], '-*'),
('Origin-HS',[
(17.966,12.14),
(58.966,12.52),
(131.544,13.07),
(141.544,14.07),
(151.544,15.07),
(169.542,18.3),
(172.564,22.4),
(176.649,37.4),
(176.851,48.4)
], '-s')]
def do_plot():
f = plt.figure(1, figsize=(7,5));
plt.clf()
ax = f.add_subplot(1, 1, 1)
for name, entries, style in expt:
throughput = []
latency = []
for t, l in entries:
# batch.append(N*ToverN)
# throughput.append(ToverN*(N-t) / latency)
throughput.append(t)
latency.append(l)
ax.plot(throughput, latency, style, label='%s' % name)
#ax.set_xscale("log")
# ax.set_yscale("log")
# plt.ylim([0, 50])
#plt.xlim([10**3.8, 10**6.4])
plt.legend(loc='upper left')
# plt.ylabel('Throughput (Tx per second) in log scale')
plt.ylabel('Latency (ms)')
plt.xlabel('Throughput (KTx/s)')
# plt.xlabel('Requests (Tx) in log scale')
plt.tight_layout()
# plt.show()
plt.savefig('happy-path.pdf', format='pdf', dpi=400)
if __name__ == '__main__':
do_plot() | 0.280616 | 0.445228 |