input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
of the disk in the Linode API.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def image(self) -> Optional[str]:
"""
An Image ID to deploy the Disk from. Official Linode Images start with linode/, while your Images start with private/. See /images for more information on the Images available for you to use. Examples are `linode/debian9`, `linode/fedora28`, `linode/ubuntu16.04lts`, `linode/arch`, and `private/12345`. See all images [here](https://api.linode.com/v4/linode/kernels). *Changing `image` forces the creation of a new Linode Instance.*
"""
return pulumi.get(self, "image")
@property
@pulumi.getter(name="readOnly")
def read_only(self) -> Optional[bool]:
return pulumi.get(self, "read_only")
@property
@pulumi.getter(name="rootPass")
def root_pass(self) -> Optional[str]:
"""
The initial password for the `root` user account. *This value can not be imported.* *Changing `root_pass` forces the creation of a new Linode Instance.* *If omitted, a random password will be generated but will not be stored in state.*
"""
return pulumi.get(self, "root_pass")
@property
@pulumi.getter(name="stackscriptData")
def stackscript_data(self) -> Optional[Mapping[str, Any]]:
"""
An object containing responses to any User Defined Fields present in the StackScript being deployed to this Linode. Only accepted if 'stackscript_id' is given. The required values depend on the StackScript being deployed. *This value can not be imported.* *Changing `stackscript_data` forces the creation of a new Linode Instance.*
"""
return pulumi.get(self, "stackscript_data")
@property
@pulumi.getter(name="stackscriptId")
def stackscript_id(self) -> Optional[int]:
"""
The StackScript to deploy to the newly created Linode. If provided, 'image' must also be provided, and must be an Image that is compatible with this StackScript. *This value can not be imported.* *Changing `stackscript_id` forces the creation of a new Linode Instance.*
"""
return pulumi.get(self, "stackscript_id")
@pulumi.output_type
class InstanceInterface(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "ipamAddress":
suggest = "ipam_address"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in InstanceInterface. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
InstanceInterface.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
InstanceInterface.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
ipam_address: Optional[str] = None,
label: Optional[str] = None,
purpose: Optional[str] = None):
"""
:param str ipam_address: This Network Interface’s private IP address in Classless Inter-Domain Routing (CIDR) notation.
:param str label: The name of this interface. If the interface is a VLAN, a label is required.
:param str purpose: The type of interface. (`public`, `vlan`)
"""
if ipam_address is not None:
pulumi.set(__self__, "ipam_address", ipam_address)
if label is not None:
pulumi.set(__self__, "label", label)
if purpose is not None:
pulumi.set(__self__, "purpose", purpose)
@property
@pulumi.getter(name="ipamAddress")
def ipam_address(self) -> Optional[str]:
"""
This Network Interface’s private IP address in Classless Inter-Domain Routing (CIDR) notation.
"""
return pulumi.get(self, "ipam_address")
@property
@pulumi.getter
def label(self) -> Optional[str]:
"""
The name of this interface. If the interface is a VLAN, a label is required.
"""
return pulumi.get(self, "label")
@property
@pulumi.getter
def purpose(self) -> Optional[str]:
"""
The type of interface. (`public`, `vlan`)
"""
return pulumi.get(self, "purpose")
@pulumi.output_type
class InstanceSpecs(dict):
def __init__(__self__, *,
disk: Optional[int] = None,
memory: Optional[int] = None,
transfer: Optional[int] = None,
vcpus: Optional[int] = None):
if disk is not None:
pulumi.set(__self__, "disk", disk)
if memory is not None:
pulumi.set(__self__, "memory", memory)
if transfer is not None:
pulumi.set(__self__, "transfer", transfer)
if vcpus is not None:
pulumi.set(__self__, "vcpus", vcpus)
@property
@pulumi.getter
def disk(self) -> Optional[int]:
return pulumi.get(self, "disk")
@property
@pulumi.getter
def memory(self) -> Optional[int]:
return pulumi.get(self, "memory")
@property
@pulumi.getter
def transfer(self) -> Optional[int]:
return pulumi.get(self, "transfer")
@property
@pulumi.getter
def vcpus(self) -> Optional[int]:
return pulumi.get(self, "vcpus")
@pulumi.output_type
class LkeClusterPool(dict):
def __init__(__self__, *,
count: int,
type: str,
id: Optional[int] = None,
nodes: Optional[Sequence['outputs.LkeClusterPoolNode']] = None):
"""
:param int count: The number of nodes in the Node Pool.
:param str type: A Linode Type for all of the nodes in the Node Pool. See all node types [here](https://api.linode.com/v4/linode/types).
:param int id: The ID of the node.
"""
pulumi.set(__self__, "count", count)
pulumi.set(__self__, "type", type)
if id is not None:
pulumi.set(__self__, "id", id)
if nodes is not None:
pulumi.set(__self__, "nodes", nodes)
@property
@pulumi.getter
def count(self) -> int:
"""
The number of nodes in the Node Pool.
"""
return pulumi.get(self, "count")
@property
@pulumi.getter
def type(self) -> str:
"""
A Linode Type for all of the nodes in the Node Pool. See all node types [here](https://api.linode.com/v4/linode/types).
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def id(self) -> Optional[int]:
"""
The ID of the node.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def nodes(self) -> Optional[Sequence['outputs.LkeClusterPoolNode']]:
return pulumi.get(self, "nodes")
@pulumi.output_type
class LkeClusterPoolNode(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "instanceId":
suggest = "instance_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in LkeClusterPoolNode. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
LkeClusterPoolNode.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
LkeClusterPoolNode.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
id: Optional[str] = None,
instance_id: Optional[int] = None,
status: Optional[str] = None):
"""
:param str id: The ID of the node.
:param int instance_id: The ID of the underlying Linode instance.
:param str status: The status of the node. (`ready`, `not_ready`)
"""
if id is not None:
pulumi.set(__self__, "id", id)
if instance_id is not None:
pulumi.set(__self__, "instance_id", instance_id)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
The ID of the node.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> Optional[int]:
"""
The ID of the underlying Linode instance.
"""
return pulumi.get(self, "instance_id")
@property
@pulumi.getter
def status(self) -> Optional[str]:
"""
The status of the node. (`ready`, `not_ready`)
"""
return pulumi.get(self, "status")
@pulumi.output_type
class NodeBalancerConfigNodeStatus(dict):
def __init__(__self__, *,
down: Optional[int] = None,
up: Optional[int] = None):
if down is not None:
pulumi.set(__self__, "down", down)
if up is not None:
pulumi.set(__self__, "up", up)
@property
@pulumi.getter
def down(self) -> Optional[int]:
return pulumi.get(self, "down")
@property
@pulumi.getter
def up(self) -> Optional[int]:
return pulumi.get(self, "up")
@pulumi.output_type
class NodeBalancerTransfer(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "in":
suggest = "in_"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in NodeBalancerTransfer. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
NodeBalancerTransfer.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
NodeBalancerTransfer.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
in_: Optional[float] = None,
out: Optional[float] = None,
total: Optional[float] = None):
if in_ is not None:
pulumi.set(__self__, "in_", in_)
if out is not None:
pulumi.set(__self__, "out", out)
if total is not None:
pulumi.set(__self__, "total", total)
@property
@pulumi.getter(name="in")
def in_(self) -> Optional[float]:
return pulumi.get(self, "in_")
@property
@pulumi.getter
def out(self) -> Optional[float]:
return pulumi.get(self, "out")
@property
@pulumi.getter
def total(self) -> Optional[float]:
return pulumi.get(self, "total")
@pulumi.output_type
class ObjectStorageBucketCert(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "privateKey":
suggest = "private_key"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ObjectStorageBucketCert. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ObjectStorageBucketCert.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ObjectStorageBucketCert.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
certificate: str,
private_key: str):
"""
:param str certificate: The Base64 encoded and PEM formatted SSL certificate.
:param str private_key: The private key associated with the TLS/SSL certificate.
"""
pulumi.set(__self__, "certificate", certificate)
pulumi.set(__self__, "private_key", private_key)
@property
@pulumi.getter
def certificate(self) -> str:
"""
The Base64 encoded and PEM formatted SSL certificate.
"""
return pulumi.get(self, "certificate")
@property
@pulumi.getter(name="privateKey")
def private_key(self) -> str:
"""
The private key associated with the TLS/SSL certificate.
"""
return pulumi.get(self, "private_key")
@pulumi.output_type
class ObjectStorageBucketLifecycleRule(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "abortIncompleteMultipartUploadDays":
suggest = "abort_incomplete_multipart_upload_days"
elif key == "noncurrentVersionExpiration":
suggest = "noncurrent_version_expiration"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ObjectStorageBucketLifecycleRule. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ObjectStorageBucketLifecycleRule.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ObjectStorageBucketLifecycleRule.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
enabled: bool,
abort_incomplete_multipart_upload_days: Optional[int] = None,
expiration: Optional['outputs.ObjectStorageBucketLifecycleRuleExpiration'] = None,
id: Optional[str] = None,
noncurrent_version_expiration: Optional['outputs.ObjectStorageBucketLifecycleRuleNoncurrentVersionExpiration'] = None,
prefix: Optional[str] = None):
"""
:param bool enabled: Specifies whether the lifecycle rule is active.
:param int abort_incomplete_multipart_upload_days: Specifies the number of days after initiating a multipart upload when the multipart upload must be completed.
| |
# /usr/bin/env python3.5
# -*- mode: python -*-
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2019-2020, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
""" Acceptance tests for various compression techniques """
import math
import os
import unittest
import unittest.mock
import logging
import shutil
import pickle
from decimal import Decimal
from glob import glob
import numpy as np
import tensorflow as tf
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.applications.mobilenet import MobileNet
from tensorflow.examples.tutorials.mnist import input_data
import aimet_common.defs
import aimet_tensorflow.utils.graph_saver
from aimet_common.utils import AimetLogger
import aimet_tensorflow.defs
from aimet_tensorflow.defs import ModuleCompRatioPair
from aimet_tensorflow.common import graph_eval
from aimet_tensorflow.compress import ModelCompressor
from aimet_tensorflow.common import tfrecord_generator
from aimet_tensorflow.common.tfrecord_generator import MnistParser
from aimet_tensorflow.examples.test_models import model_with_three_convs
logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.Test)
def tiny_imagenet_parse(serialized_example):
"""
Parser for TINY IMAGENET models, reads the tfRecords file
:param serialized_example:
:return: Input image
"""
# This works for tf_slim model: resnet_50_v2 but NOT for Keras VGG16
# Dense features in Example proto.
feature_map = {
'height': tf.FixedLenFeature((), tf.int64),
'width': tf.FixedLenFeature((), tf.int64),
'channel': tf.FixedLenFeature((), tf.int64),
'label': tf.FixedLenFeature((), tf.int64),
'image_raw': tf.FixedLenFeature((), tf.string),
'location_raw': tf.FixedLenFeature((), tf.string)}
features = tf.parse_single_example(serialized_example, feature_map)
image_raw = tf.decode_raw(features["image_raw"], tf.uint8)
image = tf.reshape(image_raw, [64, 64, 3])
return image
def imagenet_parse(serialized_example):
"""
Parser for IMAGENET models, reads the tfRecords file
:param serialized_example:
:return: Input image and labels
"""
dim = 224
features = tf.parse_single_example(serialized_example,
features={
'image/class/label': tf.FixedLenFeature([], tf.int64),
'image/encoded': tf.FixedLenFeature([], tf.string)})
image_data = features['image/encoded']
# Decode the jpeg
with tf.name_scope('prep_image', [image_data], None):
# decode and reshape to default 224x224
# pylint: disable=no-member
image = tf.image.decode_jpeg(image_data, channels=3)
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
image = tf.image.resize_images(image, [dim, dim])
return image
def evaluate(model: tf.Session, iterations: int, use_cuda: bool):
"""
eval function for MNIST LeNet model
:param model: tf.Session
:param iterations: iterations
:param use_cuda: use_cuda
:return:
"""
total_test_images = 10000
batch_size = 64
# iterate over entire test data set, when iterations is None
# TODO : figure out way to end iterator when the data set is exhausted
if iterations is None:
iterations = int(total_test_images / batch_size)
parser = MnistParser(data_inputs=['reshape_input'], validation_inputs=['labels'], batch_size=batch_size)
# Allocate the generator you wish to use to provide the network with data
generator = tfrecord_generator.TfRecordGenerator(tfrecords=[os.path.join('data', 'mnist', 'validation.tfrecords')],
parser=parser, num_gpus=1)
# Create the tensor map for input and ground truth ops
input_tensor_map = {}
inputs = ['reshape_input', 'labels']
for name in inputs:
input_tensor_map[name] = model.graph.get_tensor_by_name(name + ':0')
# get the evaluation tensor
eval_tensor = model.graph.get_tensor_by_name('accuracy:0')
avg_accuracy = 0
current_iterations = 0
for batch in generator:
current_iterations += 1
# Setup the feed dictionary
feed_dict = {}
for name, data in batch.items():
feed_dict[input_tensor_map[name]] = data
with model.as_default():
accuracy = model.run(eval_tensor, feed_dict=feed_dict)
avg_accuracy += accuracy
if current_iterations >= iterations:
break
return avg_accuracy / current_iterations
class SvdAcceptanceTests(unittest.TestCase):
def test_spatial_svd_compress_auto_with_finetuning(self):
"""
End to end test with MNIST model following fine tuning
:return:
"""
tf.set_random_seed(10)
AimetLogger.set_level_for_all_areas(logging.DEBUG)
# load the meta file
meta_path = os.path.join('models', 'mnist_save.meta')
sess = aimet_tensorflow.utils.graph_saver.load_model_from_meta(meta_path)
# ignore first Conv2D op
conv2d = sess.graph.get_operation_by_name('conv1/Conv2D')
modules_to_ignore = [conv2d]
greedy_params = aimet_common.defs.GreedySelectionParameters(target_comp_ratio=Decimal(0.5),
num_comp_ratio_candidates=10,
use_monotonic_fit=True,
saved_eval_scores_dict=None)
auto_params = aimet_tensorflow.defs.SpatialSvdParameters.AutoModeParams(greedy_select_params=greedy_params,
modules_to_ignore=modules_to_ignore)
params = aimet_tensorflow.defs.SpatialSvdParameters(input_op_names=['reshape_input'],
output_op_names=['dense_1/BiasAdd'],
mode=aimet_tensorflow.defs.SpatialSvdParameters.Mode.auto,
params=auto_params, multiplicity=8)
input_shape = (1, 1, 28, 28)
compr_model_sess, stats = ModelCompressor.compress_model(sess=sess,
working_dir=None,
eval_callback=evaluate, eval_iterations=5,
input_shape=input_shape,
compress_scheme=aimet_common.defs.CompressionScheme.
spatial_svd,
cost_metric=aimet_common.defs.CostMetric.mac,
parameters=params)
print(stats)
self.assertEqual(evaluate(compr_model_sess, 1, True), float(stats.compressed_model_accuracy))
all_ops = compr_model_sess.graph.get_operations()
conv_ops = [op for op in all_ops if op.type == 'Conv2D']
self.assertEqual(len(conv_ops), 4)
self.assertTrue(math.isclose(float(stats.mac_compression_ratio), 0.5, abs_tol=0.1))
# get the weights after fine tuning
conv2d_1_a_op = compr_model_sess.graph.get_operation_by_name('conv2/Conv2D_a')
conv2d_1_a_op_weights_before = conv2d_1_a_op.inputs[1].eval(session=compr_model_sess)
# fine tune the model
# get the input and validation place holders
x = compr_model_sess.graph.get_tensor_by_name('reshape_input:0')
y = compr_model_sess.graph.get_tensor_by_name('labels:0')
cross_entropy = compr_model_sess.graph.get_tensor_by_name('xent:0')
with compr_model_sess.graph.as_default():
# new optimizer and back propagation Op
optimizer = tf.train.AdamOptimizer(learning_rate=1e-3, name='Adam_new')
train_step = optimizer.minimize(loss=cross_entropy, name='train_step_new')
# initialize only uninitialized variables
# only needed when fine tuning, because we are adding new optimizer
graph_eval.initialize_uninitialized_vars(compr_model_sess)
mnist = input_data.read_data_sets(os.path.join(str('./'), 'data'), one_hot=True)
for i in range(1):
batch = mnist.train.next_batch(batch_size=32, shuffle=True)
_, loss_val = compr_model_sess.run([train_step, cross_entropy], feed_dict={x: batch[0], y: batch[1]})
# get the weights after fine tuning
conv2d_1_a_op = compr_model_sess.graph.get_operation_by_name('conv2/Conv2D_a')
conv2d_1_a_op_weights_after = conv2d_1_a_op.inputs[1].eval(session=compr_model_sess)
# weight should be different after one iteration
self.assertFalse(np.allclose(conv2d_1_a_op_weights_before, conv2d_1_a_op_weights_after))
# close original session
sess.close()
# close compressed model session
compr_model_sess.close()
# delete temp directory
shutil.rmtree(str('./temp_meta/'))
def test_spatial_svd_compress_manual(self):
"""
End to end manual mode spatial SVD using Resnet50 Keras model
:return:
"""
np.random.seed(1)
AimetLogger.set_level_for_all_areas(logging.INFO)
graph = tf.Graph()
with graph.as_default():
_ = ResNet50(weights=None, input_shape=(224, 224, 3))
init = tf.global_variables_initializer()
# Grow GPU memory as needed at the cost of fragmentation.
config = tf.ConfigProto()
config.gpu_options.allow_growth = True # pylint: disable=no-member
sess = tf.Session(graph=graph, config=config)
with sess.graph.as_default():
# predicted value of the model
y_hat = sess.graph.get_tensor_by_name('probs/Softmax:0')
# place holder for the labels
y = tf.placeholder(tf.int64, shape=[None, 1000], name='labels')
# prediction Op
correct_prediction = tf.equal(tf.argmax(y_hat, axis=1), tf.argmax(y, axis=1))
# accuracy Op
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name='accuracy')
sess.run(init)
writer = tf.summary.FileWriter('./', sess.graph)
# make sure the learning_phase flag is False (inference mode)
learning_phase = sess.graph.get_tensor_by_name('keras_learning_phase/input:0')
self.assertFalse(sess.run(learning_phase))
input_shape = (1, 224, 224, 3)
conv2_block1_2_conv = sess.graph.get_operation_by_name('conv2_block1_2_conv/Conv2D')
conv3_block1_2_conv = sess.graph.get_operation_by_name('conv3_block1_2_conv/Conv2D')
list_of_module_comp_ratio_pairs = [ModuleCompRatioPair(conv2_block1_2_conv, 0.5),
ModuleCompRatioPair(conv3_block1_2_conv, 0.4)
]
manual_params = aimet_tensorflow.defs.SpatialSvdParameters.ManualModeParams(list_of_module_comp_ratio_pairs=
list_of_module_comp_ratio_pairs)
params = aimet_tensorflow.defs.SpatialSvdParameters(input_op_names=['input_1'],
output_op_names=['probs/Softmax'],
mode=aimet_tensorflow.defs.SpatialSvdParameters.Mode.manual,
params=manual_params, multiplicity=8)
mocked_eval = unittest.mock.MagicMock()
mocked_eval.side_effect = [87, 64]
results = ModelCompressor.compress_model(sess=sess, working_dir=None, eval_callback=mocked_eval,
eval_iterations=5, input_shape=input_shape,
compress_scheme=aimet_common.defs.CompressionScheme.spatial_svd,
cost_metric=aimet_common.defs.CostMetric.mac, parameters=params,
trainer=None)
compr_model_sess, stats = results
print(stats)
# split ops for res2a_branch2b_Conv2D op
conv2_block1_2_conv_a = compr_model_sess.graph.get_operation_by_name('conv2_block1_2_conv/Conv2D_a')
conv2_block1_2_conv_b = compr_model_sess.graph.get_operation_by_name('conv2_block1_2_conv/Conv2D_b')
# split ops for res3a_branch2b_Conv2D op
conv3_block1_2_conv_a = compr_model_sess.graph.get_operation_by_name('conv3_block1_2_conv/Conv2D_a')
conv3_block1_2_conv_b = compr_model_sess.graph.get_operation_by_name('conv3_block1_2_conv/Conv2D_b')
# res2a_branch2b_Conv2D
self.assertEqual(compr_model_sess.run(conv2_block1_2_conv_a.inputs[1]).shape, (3, 1, 64, 48))
self.assertEqual(compr_model_sess.run(conv2_block1_2_conv_b.inputs[1]).shape, (1, 3, 48, 64))
self.assertEqual(compr_model_sess.run(conv3_block1_2_conv_a.inputs[1]).shape, (3, 1, 128, 80))
self.assertEqual(compr_model_sess.run(conv3_block1_2_conv_b.inputs[1]).shape, (1, 3, 80, 128))
# forward pass to the model with random data and labels
input_data = np.random.rand(32, 224, 224, 3)
labels = np.random.randint(low=2, size=(32, 1000))
accuracy_tensor = compr_model_sess.graph.get_tensor_by_name('accuracy:0')
input_tensor = compr_model_sess.graph.get_tensor_by_name('input_1:0')
label_tensor = compr_model_sess.graph.get_tensor_by_name('labels:0')
# make sure the learning_phase flag is False (inference mode)
learning_phase = compr_model_sess.graph.get_tensor_by_name('keras_learning_phase/input:0')
self.assertFalse(compr_model_sess.run(learning_phase))
compr_model_sess.run(accuracy_tensor, feed_dict={input_tensor: input_data, label_tensor: labels})
# close original session
sess.close()
# close compressed model session
compr_model_sess.close()
# delete temp directory
shutil.rmtree(str('./temp_meta/'))
def test_spatial_svd_compress_auto(self):
"""
End to end auto mode spatial SVD using Resnet50 Keras model
:return:
"""
np.random.seed(1)
AimetLogger.set_level_for_all_areas(logging.INFO)
graph = tf.Graph()
with graph.as_default():
_ = ResNet50(weights=None, input_shape=(224, 224, 3))
init = tf.global_variables_initializer()
# Grow GPU memory as needed at the cost of fragmentation.
config = tf.ConfigProto()
config.gpu_options.allow_growth = True # pylint: disable=no-member
sess = tf.Session(graph=graph, config=config)
all_ops = sess.graph.get_operations()
for op in all_ops:
print(op.name)
with sess.graph.as_default():
# predicted value of the model
y_hat = sess.graph.get_tensor_by_name('probs/Softmax:0')
# place holder for the labels
y = tf.placeholder(tf.int64, shape=[None, 1000], name='labels')
# prediction Op
correct_prediction = tf.equal(tf.argmax(y_hat, axis=1), tf.argmax(y, axis=1))
# accuracy Op
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name='accuracy')
sess.run(init)
# make sure the learning_phase flag is False (inference mode)
learning_phase = sess.graph.get_tensor_by_name('keras_learning_phase/input:0')
self.assertFalse(sess.run(learning_phase))
input_shape = (1, 224, 224, 3)
# compressing only two layers
modules_to_ignore = list()
all_ops = sess.graph.get_operations()
for op in all_ops:
if op.type == 'Conv2D':
modules_to_ignore.append(op)
del modules_to_ignore[5:7]
greedy_params = aimet_common.defs.GreedySelectionParameters(target_comp_ratio=Decimal(0.7),
num_comp_ratio_candidates=3,
use_monotonic_fit=True,
saved_eval_scores_dict=None)
auto_params | |
# Geographical helper functions for nmea_info.py and friends
#
# Helps with geographic functions, including:
# Lat+Long+Height -> XYZ
# XYZ -> Lat+Long+Height
# Lat+Long -> other Lat+Long (Helmert Transform)
# Lat+Long -> easting/northing (OS GB+IE Only)
# easting/northing -> Lat+Long (OS GB+IE Only)
# OS easting/northing -> OS 6 figure ref
#
# See http://gagravarr.org/code/ for updates and information
#
# GPL
#
# <NAME> - v0.06 (30/05/2007)
import math
# For each co-ordinate system we do, what are the A, B and E2 values?
# List is A, B, E^2 (E^2 calculated after)
abe_values = {
'wgs84': [ 6378137.0, 6356752.3141, -1 ],
'osgb' : [ 6377563.396, 6356256.91, -1 ],
'osie' : [ 6377340.189, 6356034.447, -1 ]
}
# The earth's radius, in meters, as taken from an average of the WGS84
# a and b parameters (should be close enough)
earths_radius = (abe_values['wgs84'][0] + abe_values['wgs84'][1]) / 2.0
# Calculate the E2 values
for system in abe_values.keys():
a = abe_values[system][0]
b = abe_values[system][1]
e2 = (a * a - b * b) / (a * a)
abe_values[system][2] = e2
# For each co-ordinate system we can translate between, what are
# the tx, ty, tz, s, rx, ry and rz values?
# List is tx, ty, tz, s, rx, ry, rz
transform_values = {
'wgs84_to_osgb' : [ -446.448, 125.157, -542.060,
20.4894 / 1000.0 / 1000.0, # given as ppm
- 0.1502 / 206265.0, # given as seconds of arc
- 0.2470 / 206265.0, # given as seconds of arc
- 0.8421 / 206265.0 # given as seconds of arc
],
'wgs84_to_osie' : [ -482.530, 130.596, -564.557,
- 8.1500 / 1000.0 / 1000.0, # given as ppm
- 1.0420 / 206265.0, # given as seconds of arc
- 0.2140 / 206265.0, # given as seconds of arc
- 0.6310 / 206265.0 # given as seconds of arc
],
'itrs2000_to_etrs89' : [ 0.054, 0.051, -0.048, 0,
0.000081 / 206265.0, # given as seconds of arc
0.00049 / 206265.0, # given as seconds of arc
0.000792 / 206265.0 # given as seconds of arc
]
}
# Calculate reverse transforms
for systems in [('wgs84', 'osgb'), ('wgs84', 'osie'), ('itrs2000', 'etrs89')]:
fs = systems[0] + "_to_" + systems[1]
rs = systems[1] + "_to_" + systems[0]
ra = []
for val in transform_values[fs]:
ra.append(-1.0 * val)
transform_values[rs] = ra
# Easting and Northin system values, for the systems we work with.
# List is n0, e0, F0, theta0 and landa0
en_values = {
'osgb' : [ -100000.0, 400000.0, 0.9996012717,
49.0 / 360.0 * 2.0 * math.pi,
- 2.0 / 360.0 * 2.0 * math.pi
],
'osie' : [ 250000.0, 200000.0, 1.000035,
53.5 / 360.0 * 2.0 * math.pi,
- 8.0 / 360.0 * 2.0 * math.pi
]
}
# Cassini Projection Origins
# List is lat (rad), long (rad), false easting, false northing
cassini_values = {
'osgb' : [ (53.0 + (13.0 / 60.0) + (17.274 / 3600.0)) / 360.0 * 2.0 * math.pi,
- (2.0 + (41.0 / 60.0) + (3.562 / 3600.0)) / 360.0 * 2.0 * math.pi,
0, 0 ]
}
# How many feet to the meter
feet_per_meter = 1.0 / 0.3048007491 # 3.28083
##############################################################
# OS GB Specific Helpers for Generic Methods #
##############################################################
def turn_wgs84_into_osgb36(lat_dec, long_dec, height):
"""See http://www.gps.gov.uk/guide6.asp#6.2 and http://www.gps.gov.uk/guide6.asp#6.6 for the calculations, and http://www.posc.org/Epicentre.2_2/DataModel/ExamplesofUsage/eu_cs34h.html for some background."""
wgs84_xyz = turn_llh_into_xyz(lat_dec, long_dec, height, 'wgs84')
osgb_xyz = turn_xyz_into_other_xyz(
wgs84_xyz[0], wgs84_xyz[1], wgs84_xyz[2], 'wgs84', 'osgb')
osgb_latlong = turn_xyz_into_llh(
osgb_xyz[0], osgb_xyz[1], osgb_xyz[2], 'osgb')
return osgb_latlong
def turn_osgb36_into_wgs84(lat_dec, long_dec, height):
"""See http://www.gps.gov.uk/guide6.asp#6.2 and http://www.gps.gov.uk/guide6.asp#6.6 for the calculations, and http://www.posc.org/Epicentre.2_2/DataModel/ExamplesofUsage/eu_cs34h.html for some background."""
osgb_xyz = turn_llh_into_xyz(lat_dec, long_dec, height, 'osgb')
wgs84_xyz = turn_xyz_into_other_xyz(
osgb_xyz[0], osgb_xyz[1], osgb_xyz[2], 'osgb', 'wgs84')
wgs84_latlong = turn_xyz_into_llh(
wgs84_xyz[0], wgs84_xyz[1], wgs84_xyz[2], 'wgs84')
return wgs84_latlong
def turn_osgb36_into_eastingnorthing(lat_dec, long_dec):
"""Turn OSGB36 (decimal) lat/long values into OS easting and northing values."""
return turn_latlong_into_eastingnorthing(lat_dec, long_dec, 'osgb')
def turn_eastingnorthing_into_osgb36(easting, northing):
"""Turn OSGB36 easting and northing values into (decimal) lat/long values inOSGB36."""
return turn_eastingnorthing_into_latlong(easting, northing, 'osgb')
##############################################################
# OS IE Specific Helpers for Generic Methods #
##############################################################
def turn_wgs84_into_osie36(lat_dec, long_dec, height):
"""As per turn_wgs84_into_osgb36, but for Irish grid"""
wgs84_xyz = turn_llh_into_xyz(lat_dec, long_dec, height, 'wgs84')
osie_xyz = turn_xyz_into_other_xyz(
wgs84_xyz[0], wgs84_xyz[1], wgs84_xyz[2], 'wgs84', 'osie')
osie_latlong = turn_xyz_into_llh(
osie_xyz[0], osie_xyz[1], osie_xyz[2], 'osie')
return osie_latlong
def turn_osie36_into_wgs84(lat_dec, long_dec, height):
"""As per turn_osgb36_into_wgs84, but for Irish grid"""
osie_xyz = turn_llh_into_xyz(lat_dec, long_dec, height, 'osie')
wgs84_xyz = turn_xyz_into_other_xyz(
osie_xyz[0], osie_xyz[1], osie_xyz[2], 'osie', 'wgs84')
wgs84_latlong = turn_xyz_into_llh(
wgs84_xyz[0], wgs84_xyz[1], wgs84_xyz[2], 'wgs84')
return wgs84_latlong
def turn_osie36_into_eastingnorthing(lat_dec, long_dec):
"""Turn OSIE36 (decimal) lat/long values into OS IE easting and northing values."""
return turn_latlong_into_eastingnorthing(lat_dec, long_dec, 'osie')
def turn_eastingnorthing_into_osie36(easting, northing):
"""Turn OSIE36 easting and northing values into (decimal) lat/long values inOSIE36."""
return turn_eastingnorthing_into_latlong(easting, northing, 'osie')
##############################################################
# Generic Transform Functions #
##############################################################
def turn_llh_into_xyz(lat_dec, long_dec, height, system):
"""Convert Lat, Long and Height into 3D Cartesian x,y,z
See http://www.ordnancesurvey.co.uk/gps/docs/convertingcoordinates3D.pdf"""
a = abe_values[system][0]
b = abe_values[system][1]
e2 = abe_values[system][2]
theta = float(lat_dec) / 360.0 * 2.0 * math.pi
landa = float(long_dec) / 360.0 * 2.0 * math.pi
height = float(height)
v = a / math.sqrt(1.0 - e2 * (math.sin(theta) * math.sin(theta)))
x = (v + height) * math.cos(theta) * math.cos(landa)
y = (v + height) * math.cos(theta) * math.sin(landa)
z = ((1.0 - e2) * v + height) * math.sin(theta)
return [x, y, z]
def turn_xyz_into_llh(x, y, z, system):
"""Convert 3D Cartesian x,y,z into Lat, Long and Height
See http://www.ordnancesurvey.co.uk/gps/docs/convertingcoordinates3D.pdf"""
a = abe_values[system][0]
b = abe_values[system][1]
e2 = abe_values[system][2]
p = math.sqrt(x * x + y * y)
long = math.atan(y / x) #@ReservedAssignment
lat_init = math.atan(z / (p * (1.0 - e2)))
v = a / math.sqrt(1.0 - e2 * (math.sin(lat_init) * math.sin(lat_init)))
lat = math.atan((z + e2 * v * math.sin(lat_init)) / p)
height = (p / math.cos(lat)) - v # Ignore if a bit out
# Turn from radians back into degrees
long = long / 2 / math.pi * 360 #@ReservedAssignment
lat = lat / 2 / math.pi * 360
return [lat, long, height]
def turn_xyz_into_other_xyz(old_x, old_y, old_z, from_scheme, to_scheme):
"""Helmert Transformation between one lat+long system and another
See http://www.ordnancesurvey.co.uk/oswebsite/gps/information/coordinatesystemsinfo/guidecontents/guide6.html for the calculations, and http://www.movable-type.co.uk/scripts/LatLongConvertCoords.html for a friendlier version with examples"""
transform = from_scheme + "_to_" + to_scheme
tx = transform_values[transform][0]
ty = transform_values[transform][1]
tz = transform_values[transform][2]
s = transform_values[transform][3]
rx = transform_values[transform][4]
ry = transform_values[transform][5]
rz = transform_values[transform][6]
# Do the transform
new_x = tx + ((1.0 + s) * old_x) + (-rz * old_y) + (ry * old_z)
new_y = ty + (rz * old_x) + ((1.0 + s) * old_y) + (-rx * old_z)
new_z = tz + (-ry * old_x) + (rx * old_y) + ((1.0 + s) * old_z)
return [new_x, new_y, new_z]
def calculate_distance_and_bearing(from_lat_dec, from_long_dec, to_lat_dec, to_long_dec):
"""Uses the spherical law of cosines to calculate the distance and bearing between two positions"""
# Turn them all into radians
from_theta = float(from_lat_dec) / 360.0 * 2.0 * math.pi
from_landa = float(from_long_dec) / 360.0 * 2.0 * math.pi
to_theta = float(to_lat_dec) / 360.0 * 2.0 * math.pi
to_landa = float(to_long_dec) / 360.0 * 2.0 * math.pi
d = math.acos(
math.sin(from_theta) * math.sin(to_theta) +
math.cos(from_theta) * math.cos(to_theta) * math.cos(to_landa - from_landa)
) * earths_radius
bearing = math.atan2(
math.sin(to_landa - from_landa) * math.cos(to_theta),
math.cos(from_theta) * math.sin(to_theta) -
math.sin(from_theta) * math.cos(to_theta) * math.cos(to_landa - from_landa)
)
bearing = bearing / 2.0 / math.pi * 360.0
return [d, bearing]
##############################################################
# Easting/Northing Transform Methods #
##############################################################
def turn_latlong_into_eastingnorthing(lat_dec, long_dec, scheme):
"""Turn OSGB36 or OSIE36 (decimal) lat/long values into OS easting and northing values. See http://www.ordnancesurvey.co.uk/oswebsite/gps/information/coordinatesystemsinfo/guidecontents/guide7.html for the calculations, and http://www.posc.org/Epicentre.2_2/DataModel/ExamplesofUsage/eu_cs34h.html for some background."""
n0 = en_values[scheme][0]
e0 = en_values[scheme][1]
f0 = en_values[scheme][2]
theta0 = en_values[scheme][3]
landa0 = en_values[scheme][4]
a = abe_values[scheme][0]
b = abe_values[scheme][1]
e2 = abe_values[scheme][2]
theta = float(lat_dec) / 360.0 * 2.0 * math.pi
landa = float(long_dec) / 360.0 * 2.0 * math.pi
n = (a - b) / (a + b)
v = a * f0 * math.pow((1 - e2 * math.sin(theta) * math.sin(theta)), -0.5)
ro = a * f0 * (1 - e2) * math.pow((1 - e2 * math.sin(theta) * math.sin(theta)), -1.5)
nu2 = v / ro - 1
M = b * f0 * (\
(1.0 + n + 5.0 / 4.0 * n * n + 5.0 / 4.0 * n * n * n) * (theta - theta0) - \
(3.0 * n + 3.0 * n * n + 21.0 / 8.0 * n * n * n) * math.sin(theta - theta0) * math.cos(theta + theta0) + \
(15.0 / 8.0 * n * n + 15.0 / 8.0 * n * n * n) * math.sin(2.0 * (theta - theta0)) * math.cos(2.0 * (theta + theta0)) - \
35.0 / 24.0 * n * n * n * math.sin(3.0 * (theta - theta0)) * math.cos(3.0 * (theta + theta0)) \
)
I = M + n0
II = v / 2.0 * math.sin(theta) * math.cos(theta)
III = v / 24.0 * math.sin(theta) * math.pow(math.cos(theta), 3) * \
(5.0 - math.pow(math.tan(theta), 2) + 9.0 * nu2)
IIIa = v / 720.0 * math.sin(theta) * math.pow(math.cos(theta), 5) * \
(61.0 - 58.0 * math.pow(math.tan(theta), 2) + math.pow(math.tan(theta), 4))
IV = v * math.cos(theta)
V = | |
<filename>run_semgrep.py
#!/usr/bin/env python3
# -*- coding: future_fstrings -*-
import pprint
import subprocess
import configparser
import os
import shutil
import json
import hashlib
import time
import argparse
import sys
import requests
import re
import glob
import datetime
from pathlib import Path
import webhooks
import comparison
import aws.upload_to_s3 as s3
env = os.getenv("env")
CONFIG = configparser.ConfigParser()
if env != "snow-test":
CONFIG.read('config.cfg')
else:
CONFIG.read('config-test.cfg')
# Global Variables
global_exit_code = 0
SNOW_ROOT = os.getenv('PWD')
if CONFIG['general']['run_local_semgrep'] != "False":
SNOW_ROOT = CONFIG['general']['run_local_semgrep']
LANGUAGES_DIR = SNOW_ROOT + CONFIG['general']['languages_dir']
RESULTS_DIR = SNOW_ROOT + CONFIG['general']['results']
REPOSITORIES_DIR = SNOW_ROOT + CONFIG['general']['repositories']
commit_head_env = CONFIG['general']['commit_head']
artifact_dir_env = CONFIG['general']['artifact_dir']
github_enterprise_url = CONFIG['general']['github_enterprise_url']
github_com_url = CONFIG['general']['github_com_url']
org_name = CONFIG['general']['org_name']
ghe_org_name = CONFIG['general']['ghe_org_name']
with open(f"{SNOW_ROOT}/{CONFIG['general']['forked_repos']}") as file:
FORKED_REPOS = json.load(file)
file.close()
print_text = CONFIG['general']['print_text']
high_alert_text = CONFIG['alerts']['high_alert_text']
banner = CONFIG['alerts']['banner']
normal_alert_text = CONFIG['alerts']['normal_alert_text']
no_vulns_text = CONFIG['alerts']['no_vulns_text']
errors_text = CONFIG['alerts']['errors_text']
def clean_workspace():
"""
If results are persisted between runs, this method
cleans up the results dir
"""
print('[+] Begin workspace cleanup')
mode = int('775', base=8)
os.makedirs(RESULTS_DIR, mode=mode, exist_ok=True)
clean_results_dir()
os.makedirs(REPOSITORIES_DIR, mode=mode, exist_ok=True)
print('[+] End workspace cleanup')
def set_exit_code(code):
global global_exit_code
global_exit_code = code
def clean_results_dir():
"""
Removes all result files but the most recent 3
"""
paths = []
for path in Path(RESULTS_DIR).iterdir():
paths.append(RESULTS_DIR + path.name)
paths = sorted(paths, key=os.path.getmtime)
repos = get_repo_list()
for repo in repos:
selected_paths = [x for x in paths if f"{repo}" in str(x)]
if len(selected_paths) > 3:
for file in selected_paths[:-3]:
try:
os.remove(file)
except FileNotFoundError:
print(f"[!!] Cannot clean result file. File not found! {file}")
continue
def get_repo_list():
"""
Grabs all enabled repository names across all languages
"""
repos = []
enabled_filename = set_enabled_filename()
for language in CONFIG.sections():
if language.find('language-') != -1:
filename = (
f"{LANGUAGES_DIR}{CONFIG[language]['language']}/{enabled_filename}"
)
with open(filename) as f:
enabled = f.read().splitlines()
repos = repos + [repo for repo in enabled]
return repos
def get_docker_image(mode=None):
"""
Downloads docker images and compares the digests
If mode = version, checks if semgrep has an update available
and returns 1 if so
"""
version = CONFIG['general']['version']
digest = CONFIG['general']['digest']
download_semgrep(version)
print("[+] Verifying Semgrep")
digest_check_scan = check_digest(digest, version)
if mode == "version":
download_semgrep("latest")
digest_check_update = check_digest(digest, "latest")
if digest_check_update == -1:
print("[!!] A new version of semgrep is available.")
return 1
else:
print("[+] Semgrep is up to date.")
return 0
else:
if digest_check_scan != -1:
raise Exception("[!!] Digest mismatch!")
print("[+] Semgrep downloaded and verified")
def download_semgrep(version):
print(f"[+] Downloading Semgrep {version}")
run_command(f"docker pull returntocorp/semgrep:{version}")
def check_digest(digest, version):
command = (
f"docker inspect --format='{{.RepoDigests}}' returntocorp/semgrep:{version}"
)
process = run_command(command)
return digest.find((process.stdout).decode("utf-8"))
def run_command(command):
return subprocess.run(
command,
shell=True,
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
def git_pull_repo(repo_path):
"""
When "git pull" fails it's sometimes because there was a
force push done at some point to the repo.
In this case the pull fails because we have local commits
that don't exists in the remote.
We attempt to fix this problem by rebasing the local repo
with the main branch of the remote.
A pull can also fail if we're in a headless state. The
checkout below fixes this.
"""
symref_process = run_command(
f"git -C {repo_path} remote show origin | sed -n '/HEAD branch/s/.*: //p'"
)
default_branch = symref_process.stdout.decode("utf-8")
try:
run_command(f"git -C {repo_path} checkout {default_branch}")
run_command(f"git -C {repo_path} pull")
except:
run_command(f"git -C {repo_path} reset --hard origin/{main_branch}")
run_command(f"git -C {repo_path} pull")
def git_ops(repo):
repo_path = f"{REPOSITORIES_DIR}{repo}"
git_url = set_github_url().split('https://')[1]
org = ghe_org_name if git == 'ghe' else org_name
git_repo = f"git@{git_url}:{org}/{repo}.git"
if os.path.isdir(f"{repo_path}"):
print(f"[+] Updating repo: {repo}")
git_pull_repo(repo_path)
else:
clone_command = f"git -C {REPOSITORIES_DIR} clone {git_repo}"
clone = run_command(clone_command)
print(clone.stdout.decode("utf-8"))
def git_forked_repos(repo, language, git_sha, git_repo_url):
repo_path = f"{REPOSITORIES_DIR}{repo}"
repo_language = language.replace("language-", "")
# Setup the upstream repo as a remote
forked_repo = FORKED_REPOS[repo]
print(f"[+] Repository is forked from {forked_repo}.")
# fetch the upstream repo
command = (
f"git -C {repo_path} remote | grep -q '^forked$' || "
f"git -C {repo_path} remote add forked {forked_repo}"
)
run_command(command)
run_command(f"git -C {repo_path} fetch forked")
# Get the remote "master" branch name (not always "master")
cmd = f"git -C {repo_path} remote show forked | sed -n '/HEAD branch/s/.*: //p'"
symref_process = run_command(cmd)
remote_master_name = symref_process.stdout.decode("utf-8")
# Identify the commit id it was forked from
cmd = f"git -C {repo_path} merge-base {git_sha} forked/{remote_master_name}"
merge_base_process = run_command(cmd)
forked_commit_id = merge_base_process.stdout.decode("utf-8").strip()
print(
f"[+] Using the commit id {forked_commit_id} as the "
"commit the repo is forked from."
)
"""
In this special case, we haven't pushed any custom code into the forked
repo as the HEAD of the repo exists in the repo we forked it from.
Note: startswith is used in case the git_sha is a shortened commit hash.
"""
if forked_commit_id.startswith(git_sha):
print(
f"[+] We have detected that this repository doesn't contain any custom"
f" commits. Returning no findings because of this."
)
for suffix in ["", "-fprm"]:
output = f"{RESULTS_DIR}{repo_language}-{repo}-{forked_commit_id[:7]}{suffix}.json"
# This will remove all the entries in the results but keeps the metadata about the scan.
# While this is odd code, it will ensure the output is consistent with other scan results.
if os.path.exists(output):
comparison.compare_to_last_run(output, output, output)
return
scan_repo(repo, language, git_repo_url, forked_commit_id)
# Compare the results and overwrite the original result with the comparison result
for suffix in ["", "-fprm"]:
file_prefix = f"{RESULTS_DIR}{repo_language}-{repo}-"
forked_output = f"{forked_commit_id[:7]}{suffix}.json"
new_output = f"{file_prefix}{git_sha[:7]}{suffix}.json"
if os.path.exists(forked_output):
comparison.compare_to_last_run(forked_output, new_output, new_output)
os.remove(forked_output)
def download_repos():
"""
Download all repos listed in the enabled files
"""
git_repo_url = set_github_url()
repos = get_repo_list()
for repo in repos:
git_ops(repo)
def scan_repos():
"""
Iterates over all repos in the enabled files and performs
a Semgrep scan.
"""
repos = get_repo_list()
for repo in repos:
language = find_repo_language(repo)
"""
Get the default branch name
"""
cmd = "git remote show origin | grep 'HEAD branch' | sed 's/.*: //'"
default_branch_name = run_command(cmd).stdout.decode('utf-8')
print(f"[+] Default branch name: {default_branch_name.strip()}")
get_sha_process = run_command(f"git -C {REPOSITORIES_DIR}{repo} rev-parse HEAD")
git_sha = get_sha_process.stdout.decode("utf-8").rstrip()
git_repo_url = set_github_url()
"""
Scan the repo and perform the comparison
"""
results, output_file = scan_repo(repo, language, git_repo_url, git_sha)
process_results(output_file)
"""
Special repos are repos that are forked from open-source libraries or projects.
For those repos, the results that we must consider for the scan are the diff
between our current version and the original version it's forked from.
"""
if repo in FORKED_REPOS:
git_forked_repos(repo, language, git_sha, git_repo_url)
def add_metadata(repo, language, git_repo_url, git_sha, output_file):
"""
Adds metadata and finding hash_id to a scan result
"""
output_file_path = f"{RESULTS_DIR}{output_file}"
configlanguage = f"language-{language}"
print(f"[+] Opening {output_file_path}")
with open(output_file_path, 'r') as file:
"""
Update the metadata on the scan result
"""
data = json.load(file)
metadata = {
"metadata": {
"GitHubRepo": git_repo_url,
"branch": git_sha,
"repoName": repo,
"language": language,
"timestamp": datetime.datetime.now(
tz=datetime.timezone.utc
).isoformat(),
}
}
data.update(metadata)
file.close()
with open(output_file_path, 'w') as file:
json.dump(data, file, sort_keys=True, indent=4)
if os.path.exists(output_file_path):
add_hash_id(output_file_path, 4, 1, "hash_id")
def process_results(output_file):
output_file_path = f"{RESULTS_DIR}{output_file}"
values = output_file.split('-')
language = values[0]
repo = values[1]
git_sha_short = values[2]
"""
Note: "fprm" stands for false positives removed
"""
fp_diff_outfile = f"{language}-{repo}-{git_sha_short}-fprm.json"
fp_diff_file_path = RESULTS_DIR + fp_diff_outfile
fp_file = (
f"{SNOW_ROOT}/languages/{language}/false_positives/{repo}_false_positives.json"
)
"""
Remove false positives from the results
"""
if os.path.exists(output_file_path):
comparison.remove_false_positives(output_file_path, fp_file, fp_diff_file_path)
"""
Sort result files by most recent
Get the second most recent result with fprm in it
"""
selected_paths = list(glob.glob(f"{RESULTS_DIR}{language}-{repo}-*-fprm.json"))
selected_paths = sorted(selected_paths, key=os.path.getmtime)
comparison_result = f"{fp_diff_file_path.split('-fprm')[0]}-comparison.json"
print(f"[+] Comparison result is stored at: {comparison_result}")
if len(selected_paths) >= 2:
old = selected_paths[-2]
print(f"[+] Old file is: {old}")
print(f"[+] Comparing {old} and {fp_diff_outfile}")
comparison.compare_to_last_run(old, fp_diff_file_path, comparison_result)
else:
print("[!!] Not enough runs for comparison")
def scan_repo(repo, language, git_repo_url, git_sha):
"""
Scans the repo with semgrep and adds metadata
Returns the results and output file path
"""
print(f'[+] Scanning repo: {repo}')
configlanguage = f"language-{language}"
output_file = f"{language}-{repo}-{git_sha[:7]}.json"
semgrep_command = (
"docker run --user \"$(id -u):$(id -g)\" --rm "
f"-v {SNOW_ROOT}:/src returntocorp/semgrep:{CONFIG['general']['version']} "
f"{CONFIG[configlanguage]['config']} "
f"{CONFIG[configlanguage]['exclude']} "
"--json --dangerously-allow-arbitrary-code-execution-from-rules "
f"-o /src{CONFIG['general']['results']}{output_file} "
f"{CONFIG['general']['repositories'][1:]}{repo}"
)
print(f"[+] Docker scan command:\n {semgrep_command}")
print(f"[+] Running Semgrep")
# Not using run_command here because we want to ignore the exit code of semgrep.
process = subprocess.run(semgrep_command, shell=True, stdout=subprocess.PIPE)
results = process.stdout.decode("utf-8")
if git != 'ghc':
print("[+] Semgrep scan results:")
if print_text == "true":
print(results)
add_metadata(repo, language, git_repo_url, git_sha, output_file)
return results, output_file
def read_line(issue_file, line, start_line, end_line):
"""
Grab source code. Include x lines above and x lines below
the issue location
"""
with open(issue_file) as f:
content = | |
some effort.
# Ascertain fitness
# First iteration just write the values
if not self.fitness:
self.fitness = newFitness
self.essPPaBest = self.essPPa
self.essEPaBest = self.essEPa
# Random next guess: The power level can be chose freely between the previously determined bounds.
self.essPPa = int(self.minESSPPa + (self.maxESSPPa - self.minESSPPa) * np.random.random_sample())
# The energy capacity must meet at least the minimum duration requirement, and cannot exceed the maximum.
self.essEPa = float(self.essPPa * (self.minESSEPa / self.minESSPPa) +
(self.maxESSEPa - (self.essPPa * (
self.minESSEPa / self.minESSPPa))) * np.random.random_sample())
print(['Iteration: '+ str(iterIdx) + ', ESS P: ' + str(self.essPPa) + ' kW , ESS E: ' + str(self.essEPa) + ' kWh, Fitness: ' + str(self.fitness)])
# Set the improvement tracker
lastImprovement = 1
# Other iterations check if fitness has improved (that is, has gotten smaller!!!)
elif newFitness < self.fitness:
self.fitness = newFitness
self.essPPaBest = self.essPPa
self.essEPaBest = self.essEPa
self.essPPa, self.essEPa = self.getNextGuess(fitnessLog, self.essPPaBest, self.essEPaBest, iterIdx)
print(['Iteration: ' + str(iterIdx) + ', ESS P: ' + str(self.essPPa) + ' kW , ESS E: ' + str(
self.essEPa) + ' kWh, Fitness: ' + str(self.fitness)])
# Reset the improvement tracker
lastImprovement = 1
# Lastly if nothing has improved search again in the previously defined range.
else:
# Widen the random number deviation
self.essPPa, self.essEPa = self.getNextGuess(fitnessLog, self.essPPaBest, self.essEPaBest, iterIdx/lastImprovement) #np.sqrt(lastImprovement + 1))
# Increment the improvement tracker
lastImprovement = lastImprovement + 1
# If there's no improvement after X iterations in a row, terminate the algorithm.
# NOTE this can mean two things, either that we have achieved convergence, or that we're stuck somewhere
if lastImprovement > convergenceRepeatNum:
convergenceFlag = True
print('*********************************')
print('Terminated at Iteration: ' + str(iterIdx) + ' with fitness: ' + str(self.fitness))
print(['Iteration: ' + str(iterIdx) + ', ESS P: ' + str(self.essPPa) + ' kW , ESS E: ' + str(
self.essEPa) + ' kWh, Fitness: ' + str(self.fitness)])
# Additional logging
fitnessLog['bestFitness'].loc[iterIdx] = self.fitness
fitnessLog['bestP'].loc[iterIdx] = self.essPPaBest
fitnessLog['bestE'].loc[iterIdx] = self.essEPaBest
self.fl = fitnessLog
def getNextGuess(self, fl, pBest, eBest, iterNumParam):
'''
This method determines the next values for `essPPa` and `essEPa` that are to be tested in an iteration of the
hill climber. It uses the historical fitness values from previous iterations and determines the direction of the
steepest gradient away from the best fitness value. It then biases the random selection for new power and energy
capacity values in the _opposite_ direction of the steepest gradient with the hope that this is the most likely
direction to find a better value pair at. If new selections are outside of the constraints put on the search
space, i.e., maximum and minimum power and energy capacities, and/or minimum duration (at the essPPa selected),
it corrects selections back to the edges of the search envelope as set by the constraints.
If the more iterations in the past the best found fitness lies, the stronger the random element in picking new
values. The idea being that the algorithm might be stuck and larger jumps might get it unstuck.
**Note:** this approach to a hill climber was tested with several test functions
(found in getFitness.py->getTestFitness). With these test functions the algorithm generally converges well.
The caveat is, that recent results seem to suggest that the actual search space for the optimal GBS may not be
smooth, while the test cases used smooth test functions. This should be investigated further.
:param fl: fitnessLog
:param pBest: essPPaBest: current best power guess for GBS
:param eBest: essEPaBest: current best energy guess for GBS
:param iterNumParam: [float] parameter describing the randomness of the next value pair selection, fraction of
iteration number and count since the last improved fitness value was found.
:return: newESSPPa, newESSEPa: [float] new pair of energy and power capacities to run the next iteration with
'''
# Reduce the data in fl to the necessary columns and usable values
fl = fl[['fitness', 'essPPa', 'essEPa']]
fl = fl.dropna()
# Parameter used to adjust variability/randomization of next guess
# TODO make adjustable parameter
exponent = 0.5
# Calculate distance from best point
fl['Dist'] = pd.Series(np.sqrt(list(np.asarray(fl['essPPa'] - pBest)**2 + np.asarray(fl['essEPa'] - eBest)**2)))
fl = fl.sort_values('Dist')
originFitness = fl['fitness'].iloc[0]
originP = fl['essPPa'].iloc[0]
originE = fl['essEPa'].iloc[0]
print('Origin P: ' + str(originP) + ', Origin E: ' + str(originE))
fl = fl[fl.Dist != 0]
fl['Slope'] = (fl['fitness'] - originFitness)/fl['Dist']
# Get the difference in power-coordinate DOWN the steepest gradient of the four nearest neighbors
if fl.shape[0] == 1:
maxSlopeIdx = fl['Slope'].astype(float).index[0]
elif fl.shape[0] < 3:
maxSlopeIdx = fl['Slope'].astype(float).idxmax()
else:
maxSlopeIdx = fl['Slope'][0:2].astype(float).idxmax()
dx = fl['essPPa'][maxSlopeIdx] - originP
newCoord = originP - dx
# Get random down and up variations from the power-coordinate
rndDown = (newCoord - self.minESSPPa) * np.random.random_sample()/iterNumParam**exponent
rndUp = (self.maxESSPPa - newCoord)*np.random.random_sample()/iterNumParam**exponent
newESSPPa = float(newCoord - rndDown + rndUp)
# Check constraints
if newESSPPa < self.minESSPPa:
newESSPPa = self.minESSPPa
elif newESSPPa > self.maxESSPPa:
newESSPPa = self.maxESSPPa
# Get a random new value of energy storage capacity
# Get the difference in power-coordinate DOWN the steepest gradient
#maxSlopeIdx = fl.index[1]
dy = fl['essEPa'][maxSlopeIdx] - originE
newCoordY = originE - dy
# Get random down and up variations from the power-coordinate
# Note that ess needs to meet minimum duration requirement, so the minimum size is constraint by the currently
# selected power level.
currentESSEMin = newESSPPa * (self.minESSEPa/self.minESSPPa)
rndDown = (newCoordY - currentESSEMin) * np.random.random_sample() / iterNumParam**exponent
rndUp = (self.maxESSEPa - newCoordY) * np.random.random_sample() / iterNumParam**exponent
newESSEPa = float(newCoordY - rndDown + rndUp)
# Check constraints
if newESSEPa < currentESSEMin:
newESSEPa = currentESSEMin
elif newESSEPa > self.maxESSEPa:
newESSEPa = self.maxESSEPa
return newESSPPa, newESSEPa
def setupSet(self, iterIdx, setIdx, identifier, eesIdx, eesPPa, eesEPa, startTimeIdx, endTimeIdx):
'''
Generates the specific projectSetAttributes.xml file, and the necessary folder in the project's output folder.
Returns the name of the specific set and it's absolute path. Set naming follows the convention of
'Set[iterationNumber].[snippetNumber].[currentUNIXEpoch]', where iterationNumber is the current iteration of the
of the optimizer, snippetNumber is the numerical identifier of the abbreviated data snippet, and the
currentUNIXEpoch is the current local machine unix time to the second in int format.
:param iterIdx: [int] current iteration of optimization algorithm
:param setIdx: [int] numerical identifier of the snippet of time-series to be run here.
:param identifier: [int] current local machine UNIX time to the second, could be any other integer
:param eesIdx: [int] index of the ees to be added to the system, e.g., ees0. This is necessary should the system
already have an ees that is not part of the optimization.
:param eesPPa: [float] nameplate power capacity of the ees, assumed to be symmetrical in and out.
:param eesEPa: [float] nameplate energy capacity of the ees, necessary to calculate ratedDuration, which is the
actual parameter used in the setup.
:param startTimeIdx: [int] index of the time stamp in the master-time series where the snippet of data starts
that is to be run here.
:param endTimeIdx: [int] index of the time stamp in the master-time series where the snippet of data ends that
is to be run here.
:return setPath: [os.path] path to the set folder
:return setName: [String] name of the set
'''
# Get the current path to avoid issues with mkdir
here = os.path.dirname(os.path.realpath(__file__))
# * Create the 'SetAttributes' file from the template and the specific information given
# Load the template
setAttributeTemplatePath = os.path.join(here, '../GBSModel/Resources/Setup/projectSetAttributes.xml')
setAttributeTemplateFile = open(setAttributeTemplatePath, 'r')
setAttributeTemplateFileXML = setAttributeTemplateFile.read()
setAttributeTemplateFile.close()
setAttributeSoup = bs(setAttributeTemplateFileXML, 'xml')
# Write the project name
setAttributeSoup.project['name'] = self.projectName
# Write the power levels and duration
compNameVal = 'ees' + str(eesIdx) + ' ees' + str(eesIdx) + ' ees' + str(eesIdx)
compTagVal = 'PInMaxPa POutMaxPa ratedDuration'
compAttrVal = 'value value value'
rtdDuration = int(3600*(eesEPa/eesPPa))
compValueVal = str(eesPPa) + ' PInMaxPa.value ' + str(rtdDuration)
setAttributeSoup.compAttributeValues.compName['value'] = compNameVal
setAttributeSoup.compAttributeValues.find('compTag')['value'] = compTagVal # See issue 99 for explanation
setAttributeSoup.compAttributeValues.compAttr['value'] = compAttrVal
setAttributeSoup.compAttributeValues.compValue['value'] = | |
if position == 'bottom':
x = window[2] + loc[1][0] + int(w/2) - 250
y = window[0] + loc[0][0] - 250
return x,y
def template_match(grayscale_unit8_image_array,template_file):
img_gray = grayscale_unit8_image_array
template = cv2.imread(template_file)
template = cv2.cvtColor(template,cv2.COLOR_BGR2GRAY)
template = hsfm.image.img_linear_stretch(template)
template = hsfm.core.noisify_template(template)
w, h = template.shape[::-1]
res = cv2.matchTemplate(img_gray,template,cv2.TM_CCOEFF_NORMED)
loc = np.where(res==res.max())
return loc,w,h,res
def noisify_template(template):
mask = template > 50
rand = np.random.randint(0,256,size=template.shape)
template[mask] = rand[mask]
return template
def determine_principal_point(left_fiducial, top_fiducial, right_fiducial, bottom_fiducial):
fa = np.array([left_fiducial, top_fiducial, right_fiducial, bottom_fiducial], dtype=float)
fx = fa[:,0]
fy = fa[:,1]
px = fx.reshape(-1,2).mean(axis=0).mean()
py = fy.reshape(-1,2).mean(axis=0).mean()
return (px,py)
def crop_about_principal_point(grayscale_unit8_image_array,
principal_point,
crop_from_pp_dist = 11250):
img_gray = grayscale_unit8_image_array
x_L = int(principal_point[0]-crop_from_pp_dist/2)
x_R = int(principal_point[0]+crop_from_pp_dist/2)
y_T = int(principal_point[1]-crop_from_pp_dist/2)
y_B = int(principal_point[1]+crop_from_pp_dist/2)
cropped = img_gray[y_T:y_B, x_L:x_R]
cropped = hsfm.image.clahe_equalize_image(cropped)
cropped = hsfm.image.img_linear_stretch(cropped)
return cropped
def move_match_files_in_sequence(bundle_adjust_directory,
image_prefix,
stereo_directory,
sequence):
i = sequence[0]
j = i+1
hsfm.io.create_dir(stereo_directory)
match_files = sorted(glob.glob(os.path.join(bundle_adjust_directory,'*-clean.match')))
for match_file in match_files:
if image_prefix + str(i) in match_file and image_prefix + str(j) in match_file:
path, name, extension = hsfm.io.split_file(match_file)
out = os.path.join(stereo_directory, name+ extension)
shutil.copyfile(match_file, out)
i = i+1
j = i+1
print('Match files copied to',stereo_directory)
new_match_files = sorted(glob.glob(os.path.join(stereo_directory,'*.match')))
# return new_match_files
def move_camera_files_in_sequence(bundle_adjust_directory,
image_prefix,
stereo_directory,
sequence,
extension='.tsai'):
i = sequence[0]
j = i+1
hsfm.io.create_dir(stereo_directory)
camera_files = sorted(glob.glob(os.path.join(bundle_adjust_directory,'*'+ extension)))
for camera_file in camera_files:
path, name, extension = hsfm.io.split_file(camera_file)
out = os.path.join(stereo_directory, name + extension)
shutil.copyfile(camera_file, out)
print('Camera files copied to', stereo_directory)
new_camera_files = sorted(glob.glob(os.path.join(stereo_directory,'*'+ extension)))
# return new_camera_files
def metashape_cameras_to_tsai(project_file_path,
original_metashape_metadata,
image_extension = '.tif'):
output_directory = os.path.join(os.path.dirname(project_file_path),'metashape_cameras')
hsfm.io.create_dir(output_directory)
metashape_export = hsfm.metashape.get_estimated_camera_centers(project_file_path)
images, lons, lats, alts, yaws, pitches, rolls, omegas, phis, kappas = metashape_export
images = [s + image_extension for s in images]
dict = {'image_file_name': images,
'lon': lons,
'lat': lats,
'alt': alts,
'lon_acc': 100,
'lat_acc': 100,
'alt_acc': 100,
'yaw': yaws,
'pitch': pitches,
'roll': rolls,
'yaw_acc': 30,
'pitch_acc': 30,
'roll_acc': 30,
'omega': omegas,
'phi': phis,
'kappa': kappas}
df = pd.DataFrame(dict)
metashape_metadata_df = pd.read_csv(original_metashape_metadata)
unaligned_cameras = df[df.isnull().any(axis=1)]['image_file_name'].values
# replace unaligned cameras with values from original input metadata
for i in unaligned_cameras:
df[df['image_file_name'].str.contains(i)] = metashape_metadata_df[metashape_metadata_df['image_file_name'].str.contains(i)]
gdf = hsfm.geospatial.df_xyz_coords_to_gdf(df,
lon='lon',
lat='lat',
z='alt',
epsg_code='4326')
gdf = gdf.to_crs('epsg:4978')
gdf = hsfm.geospatial.extract_gpd_geometry(gdf)
gdf = gdf[['image_file_name','x','y','z']]
for index, row in gdf.iterrows():
image_base_name = row['image_file_name']
out = os.path.join(output_directory,image_base_name+'.tsai')
with open(out, 'w') as f:
C0 = str(row['x'])
C1 = str(row['y'])
C2 = str(row['z'])
line0 = 'VERSION_4\n'
line1 = 'PINHOLE\n'
line2 = 'fu = ' + str(7564.1499999999996) +'\n'
line3 = 'fv = ' + str(7564.1499999999996) +'\n'
line4 = 'cu = ' + str(5625) +'\n'
line5 = 'cv = ' + str(5625) +'\n'
line6 = 'u_direction = 1 0 0\n'
line7 = 'v_direction = 0 1 0\n'
line8 = 'w_direction = 0 0 1\n'
line9 = ' '.join(['C =',C0,C1,C2,'\n'])
line10 = 'R = 1 0 0 0 1 0 0 0 1\n'
line11 = 'pitch = 1\n'
line12 = 'NULL\n'
f.writelines([line0,line1,line2,line3,line4,line5,line6,line7,line8,line9,line10,line11,line12])
return output_directory
def prepare_metashape_metadata(camera_positions_file_name,
output_directory='input_data',
flight_altitude_above_ground_m = 1500,
flight_altitude_m = None,
focal_length = None):
if isinstance(camera_positions_file_name, type(pd.DataFrame())):
df = camera_positions_file_name
else:
df = pd.read_csv(camera_positions_file_name)
hsfm.io.create_dir(output_directory)
df['yaw'] = 0.0
df['pitch'] = 0.0
df['roll'] = 0.0
df['image_file_name'] = df['fileName']+'.tif'
lons = df['Longitude'].values
lats = df['Latitude'].values
if isinstance(flight_altitude_m, type(None)):
df['alt'] = hsfm.geospatial.USGS_elevation_function(lats, lons)
df['alt'] = df['alt'] + flight_altitude_above_ground_m
df['alt'] = round(df['alt'].max())
else:
df['alt'] = flight_altitude_m
# df['alt'] = df['Altitude']
df['lon'] = df['Longitude'].astype(float).round(6)
df['lat'] = df['Latitude'].astype(float).round(6)
df['lon_acc'] = 1000
df['lat_acc'] = 1000
df['alt_acc'] = 1000
df['yaw_acc'] = 180
df['pitch_acc'] = 20
df['roll_acc'] = 20
# get values from nagap_image_metadata_updated.csv if it is being used as the input
df.loc[~df['Altitude'].str.contains('unknown'),'alt'] = \
df.loc[~df['Altitude'].str.contains('unknown')]['Altitude'].values
if not isinstance(focal_length, type(None)):
df['focal_length'] = focal_length
df = df[['image_file_name',
'lon',
'lat',
'alt',
'lon_acc',
'lat_acc',
'alt_acc',
'yaw',
'pitch',
'roll',
'yaw_acc',
'pitch_acc',
'roll_acc',
'focal_length']]
out = os.path.join(output_directory,'metashape_metadata.csv')
df.to_csv(out,index=False)
print(out)
return df
def metadata_transform(metadata_file,
pc_align_transform_file,
output_file_name = None):
'''
Applies pc_align transform to lat, lon, alt positions.
'''
metadata_df = pd.read_csv(metadata_file)
df = hsfm.geospatial.df_xyz_coords_to_gdf(metadata_df, z='alt')
df = df.to_crs('epsg:4978')
hsfm.geospatial.extract_gpd_geometry(df)
df_tmp = pd.DataFrame()
for index, row in df.iterrows():
C = [row.x,row.y,row.z]
C_translation, R_transform = hsfm.core.extract_transform(pc_align_transform_file)
row.x,row.y,row.z = hsfm.core.apply_position_transform(C, C_translation, R_transform)
row = row.drop(['geometry'])
df_tmp = df_tmp.append(row)
transformed_metadata = hsfm.geospatial.df_xyz_coords_to_gdf(df_tmp,
lon='x',
lat= 'y',
z='z',
epsg_code='4978')
transformed_metadata = transformed_metadata.to_crs('epsg:4326')
hsfm.geospatial.extract_gpd_geometry(transformed_metadata)
transformed_metadata[['lon', 'lat','alt']] = transformed_metadata[['x', 'y','z']]
transformed_metadata = transformed_metadata.drop(['x', 'y','z', 'geometry'], axis=1)
transformed_metadata = transformed_metadata[['image_file_name',
'lon',
'lat',
'alt',
'lon_acc',
'lat_acc',
'alt_acc',
'yaw',
'pitch',
'roll',
'yaw_acc',
'pitch_acc',
'roll_acc']]
transformed_metadata = transformed_metadata.sort_values(by=['image_file_name'], ascending=True)
if not isinstance(output_file_name, type(None)):
transformed_metadata.to_csv(output_file_name, index = False)
return transformed_metadata
def extract_transform(pc_align_transform_file):
transform = pd.read_csv(pc_align_transform_file, header=None, delimiter=r"\s+")
transform = transform.drop(3)
C_translation = list(transform[3].values)
transform = transform.drop([3],axis=1)
a = list(transform.iloc[0].values)
b = list(transform.iloc[1].values)
c = list(transform.iloc[2].values)
R_transform = [a,b,c]
return C_translation, R_transform
def apply_position_transform(C, C_translation, R_transform):
xi = R_transform[0][0]*C[0] + R_transform[0][1]*C[1] + R_transform[0][2]*C[2] + C_translation[0]
yi = R_transform[1][0]*C[0] + R_transform[1][1]*C[1] + R_transform[1][2]*C[2] + C_translation[1]
zi = R_transform[2][0]*C[0] + R_transform[2][1]*C[1] + R_transform[2][2]*C[2] + C_translation[2]
return(xi,yi,zi)
def compute_point_offsets(metadata_file_1,
metadata_file_2,
lon = 'lon',
lat = 'lat',
alt = 'alt'):
df1 = pd.read_csv(metadata_file_1)
df2 = pd.read_csv(metadata_file_2)
# make dataframes contain only entries for union of image file names in each.
if len(df1) > len(df2):
df1 = df1[df1['image_file_name'].isin(df2['image_file_name'].values)].reset_index(drop=True)
elif len(df1) < len(df2):
df2 = df2[df2['image_file_name'].isin(df1['image_file_name'].values)].reset_index(drop=True)
epsg_code = hsfm.geospatial.lon_lat_to_utm_epsg_code(df2[lon].values[0], df2[lat].values[0])
gdf1 = hsfm.geospatial.df_xy_coords_to_gdf(df1, lon=lon, lat=lat)
gdf1 = gdf1.to_crs('epsg:'+epsg_code)
hsfm.geospatial.extract_gpd_geometry(gdf1)
gdf2 = hsfm.geospatial.df_xy_coords_to_gdf(df2, lon=lon, lat=lat)
gdf2 = gdf2.to_crs('epsg:'+epsg_code)
hsfm.geospatial.extract_gpd_geometry(gdf2)
x_offset = gdf1.x - gdf2.x
y_offset = gdf1.y - gdf2.y
z_offset = gdf1.alt - gdf2.alt
return x_offset, y_offset, z_offset
def find_sets(lsts):
sets = [set(lst) for lst in lsts if lst]
merged = True
while merged:
merged = False
results = []
while sets:
common, rest = sets[0], sets[1:]
sets = []
for x in rest:
if x.isdisjoint(common):
sets.append(x)
else:
merged = True
common |= x
results.append(common)
sets = results
return [sorted(list(s)) for s in sets]
def determine_image_clusters(image_metadata,
image_square_dim = None,
pixel_pitch = None,
focal_length = None,
image_directory = None,
buffer_m = 1200,
# flight_altitude_above_ground_m = 1500,
output_directory = None,
image_extension = '.tif',
image_file_name_column = 'fileName',
move_images = False,
qc = True):
"""
buffer_m = Approximate image footprint diameter in meters.
move_images = True # images are moved instead of copied.
"""
if not isinstance(image_metadata, type(pd.DataFrame())):
df = pd.read_csv(image_metadata)
else:
df = image_metadata
if isinstance(focal_length, type(None)):
focal_length = df['focal_length'].values[0]
# convert to geopandas.GeoDataFrame() and UTM
gdf = hsfm.geospatial.df_xy_coords_to_gdf(df, lon='Longitude', lat='Latitude')
lon = df['Longitude'].iloc[0]
lat = df['Latitude'].iloc[0]
epsg_code = hsfm.geospatial.lon_lat_to_utm_epsg_code(lon, lat)
gdf = gdf.to_crs('epsg:' +epsg_code)
# approximate square altitude dependant footprint
# this does not work very well for clustering due to variable
# flight altitudes and distance above ground.
# if isinstance(image_square_dim, type(None)) and isinstance(image_directory, type(str())):
# img = glob.glob(os.path.join(image_directory,'*.tif'))[0]
# img = cv2.imread(img, cv2.IMREAD_GRAYSCALE)
# image_square_dim = img.shape[0]
# gdf['polygon'] = hsfm.core.compute_square_footprint(gdf,
# image_square_dim,
# pixel_pitch,
# focal_length,
# flight_altitude_above_ground_m)
# return gdf
# approximate circular image foot print
print('Estimated footprint diameter:', buffer_m)
radius_m = buffer_m/2
gdf['polygon'] = gdf.geometry.buffer(radius_m)
footprints = []
for i in gdf.polygon.values:
d = gpd.GeoDataFrame(gpd.GeoSeries(i),
columns=['geometry'],
crs='epsg:' +epsg_code)
footprints.append(d)
file_names = list(df[image_file_name_column].values)
footprints = np.array(list(zip(file_names, footprints)),dtype=object)
# find intersecting image pairs
threshold = 200000
matches = []
areas = []
for a, b in itertools.combinations(footprints, 2):
intersection = gpd.overlay(a[1], b[1], how='intersection')
if len(intersection) > 0 and intersection.area[0] > threshold:
areas.append(intersection.area[0])
matches.append((a[0], b[0]))
matched_files = list(set(np.array(matches).flatten()))
unmatched_files = hsfm.core.diff_lists(matched_files, file_names)
# expand footprint radius for single images until the belong to a set
while len(unmatched_files) > 0 and radius_m < 10000:
print('Images not part of a cluster:', *unmatched_files, sep = "\n")
radius_m = radius_m + 500
print('Increasing estimated footprint diameter to:', int(2*radius_m))
gdf.loc[gdf['fileName'].isin(unmatched_files),'polygon'] = \
gdf.loc[gdf['fileName'].isin(unmatched_files),'geometry'].buffer(radius_m)
footprints = []
for i in gdf.polygon.values:
d = gpd.GeoDataFrame(gpd.GeoSeries(i),
columns=['geometry'],
crs='epsg:' +epsg_code)
footprints.append(d)
file_names = list(df[image_file_name_column].values)
footprints = np.array(list(zip(file_names, footprints)),dtype=object)
matches = []
areas = []
for a, b in itertools.combinations(footprints, 2):
intersection = | |
YANG variable /relay_agent/dhcp/agent_information_option/state/enable (boolean)
YANG Description: Enable sending the DHCP option for Relay Agent information
-- option 82.
"""
return self.__enable
def _set_enable(self, v, load=False):
"""
Setter method for enable, mapped from YANG variable /relay_agent/dhcp/agent_information_option/state/enable (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_enable is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_enable() directly.
YANG Description: Enable sending the DHCP option for Relay Agent information
-- option 82.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """enable must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='boolean', is_config=False)""",
})
self.__enable = t
if hasattr(self, '_set'):
self._set()
def _unset_enable(self):
self.__enable = YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='boolean', is_config=False)
enable = __builtin__.property(_get_enable)
_pyangbind_elements = OrderedDict([('enable', enable), ])
class yc_agent_information_option_openconfig_relay_agent__relay_agent_dhcp_agent_information_option(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-relay-agent - based on the path /relay-agent/dhcp/agent-information-option. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Top-level container for relay agent information option
data
"""
__slots__ = ('_path_helper', '_extmethods', '__config','__state',)
_yang_name = 'agent-information-option'
_yang_namespace = 'http://openconfig.net/yang/relay-agent'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(base=yc_config_openconfig_relay_agent__relay_agent_dhcp_agent_information_option_config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='container', is_config=True)
self.__state = YANGDynClass(base=yc_state_openconfig_relay_agent__relay_agent_dhcp_agent_information_option_state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['relay-agent', 'dhcp', 'agent-information-option']
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /relay_agent/dhcp/agent_information_option/config (container)
YANG Description: Configuration data for the relay agent information
option
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /relay_agent/dhcp/agent_information_option/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration data for the relay agent information
option
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=yc_config_openconfig_relay_agent__relay_agent_dhcp_agent_information_option_config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """config must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=yc_config_openconfig_relay_agent__relay_agent_dhcp_agent_information_option_config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='container', is_config=True)""",
})
self.__config = t
if hasattr(self, '_set'):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(base=yc_config_openconfig_relay_agent__relay_agent_dhcp_agent_information_option_config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='container', is_config=True)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /relay_agent/dhcp/agent_information_option/state (container)
YANG Description: Operational state data for agent information at global
level
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /relay_agent/dhcp/agent_information_option/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: Operational state data for agent information at global
level
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=yc_state_openconfig_relay_agent__relay_agent_dhcp_agent_information_option_state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """state must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=yc_state_openconfig_relay_agent__relay_agent_dhcp_agent_information_option_state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='container', is_config=True)""",
})
self.__state = t
if hasattr(self, '_set'):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(base=yc_state_openconfig_relay_agent__relay_agent_dhcp_agent_information_option_state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='container', is_config=True)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([('config', config), ('state', state), ])
class yc_config_openconfig_relay_agent__relay_agent_dhcp_interfaces_interface_config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-relay-agent - based on the path /relay-agent/dhcp/interfaces/interface/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration data for relay agent interfaces.
"""
__slots__ = ('_path_helper', '_extmethods', '__id','__enable','__helper_address',)
_yang_name = 'config'
_yang_namespace = 'http://openconfig.net/yang/relay-agent'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__id = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='oc-if:interface-id', is_config=True)
self.__enable = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="enable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='boolean', is_config=True)
self.__helper_address = YANGDynClass(unique=True, base=TypedListType(allowed_type=[RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}),]), is_leaf=False, yang_name="helper-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='inet:ip-address', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['relay-agent', 'dhcp', 'interfaces', 'interface', 'config']
def _get_id(self):
"""
Getter method for id, mapped from YANG variable /relay_agent/dhcp/interfaces/interface/config/id (oc-if:interface-id)
YANG Description: Name of the interface on which relay agent is active
"""
return self.__id
def _set_id(self, v, load=False):
"""
Setter method for id, mapped from YANG variable /relay_agent/dhcp/interfaces/interface/config/id (oc-if:interface-id)
If this variable is read-only (config: false) in the
source YANG file, then _set_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_id() directly.
YANG Description: Name of the interface on which relay agent is active
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='oc-if:interface-id', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """id must be of a type compatible with oc-if:interface-id""",
'defined-type': "oc-if:interface-id",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='oc-if:interface-id', is_config=True)""",
})
self.__id = t
if hasattr(self, '_set'):
self._set()
def _unset_id(self):
self.__id = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='oc-if:interface-id', is_config=True)
def _get_enable(self):
"""
Getter method for enable, mapped from YANG variable /relay_agent/dhcp/interfaces/interface/config/enable (boolean)
YANG Description: Enables the relay agent on the referenced interface.
At least one helper address should also be configured
for forwarding requested.
"""
return self.__enable
def _set_enable(self, v, load=False):
"""
Setter method for enable, mapped from YANG variable /relay_agent/dhcp/interfaces/interface/config/enable (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_enable is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_enable() directly.
YANG Description: Enables the relay agent on the referenced interface.
At least one helper address should also be configured
for forwarding requested.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="enable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='boolean', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """enable must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="enable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='boolean', is_config=True)""",
})
self.__enable = t
if hasattr(self, '_set'):
self._set()
def _unset_enable(self):
self.__enable = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="enable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='boolean', is_config=True)
def _get_helper_address(self):
"""
Getter method for helper_address, mapped from YANG variable /relay_agent/dhcp/interfaces/interface/config/helper_address (inet:ip-address)
YANG Description: List of IPv4 or IPv6 addresses of DHCP servers to which the
relay agent should forward DHCPv4 requests. The relay agent is
expected to forward DHCPv4/BOOTP requests to all listed
server addresses | |
'Gyy', 'Gyy', 'Gyi', 'Gxi', 'Gxi', 'Gxi']
clifford_compilation['Gc14c20'] = ['Gxx', 'Gyy', 'Gyy', 'Gyy', 'Gxx', 'Gxi', 'Gxi']
clifford_compilation['Gc14c21'] = ['Gxy', 'Gyi', 'Gyi', 'Gyi', 'Gxi', 'Gxi', 'Gxi']
clifford_compilation['Gc14c22'] = ['Gxx', 'Gyx', 'Gyx', 'Gyy', 'Gxy', 'Gxi', 'Gxi']
clifford_compilation['Gc14c23'] = ['Gxx', 'Gyy', 'Gyx', 'Gyx', 'Gxx', 'Gxi', 'Gxi']
clifford_compilation['Gc15c0'] = ['Gyi', 'Gyi', 'Gyi', 'Gii', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc15c1'] = ['Gyy', 'Gyx', 'Gyi', 'Gii', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc15c2'] = ['Gyx', 'Gyx', 'Gyx', 'Giy', 'Giy', 'Giy', 'Gii']
clifford_compilation['Gc15c3'] = ['Gyx', 'Gyx', 'Gyi', 'Gii', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc15c4'] = ['Gyy', 'Gyy', 'Gyy', 'Gix', 'Gix', 'Gix', 'Gii']
clifford_compilation['Gc15c5'] = ['Gyx', 'Gyy', 'Gyy', 'Giy', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc15c6'] = ['Gyy', 'Gyy', 'Gyi', 'Gii', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc15c7'] = ['Gyy', 'Gyy', 'Gyy', 'Gix', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc15c8'] = ['Gyx', 'Gyy', 'Gyi', 'Gii', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc15c9'] = ['Gyx', 'Gyx', 'Gyy', 'Giy', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc15c10'] = ['Gyy', 'Gyx', 'Gyx', 'Gix', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc15c11'] = ['Gyx', 'Gyx', 'Gyx', 'Giy', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc15c12'] = ['Gyy', 'Gyx', 'Gyx', 'Gii', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc15c13'] = ['Gyx', 'Gyx', 'Gyx', 'Gii', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc15c14'] = ['Gyx', 'Gyy', 'Gyy', 'Giy', 'Gix', 'Gix', 'Gix']
clifford_compilation['Gc15c15'] = ['Gyy', 'Gyy', 'Gyy', 'Gii', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc15c16'] = ['Gyx', 'Gyi', 'Gyi', 'Gii', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc15c17'] = ['Gyx', 'Gyy', 'Gyx', 'Gii', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc15c18'] = ['Gyy', 'Gyy', 'Gyy', 'Gix', 'Gix', 'Gii', 'Gii']
clifford_compilation['Gc15c19'] = ['Gyx', 'Gyy', 'Gyy', 'Gii', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc15c20'] = ['Gyx', 'Gyy', 'Gyy', 'Giy', 'Gix', 'Gii', 'Gii']
clifford_compilation['Gc15c21'] = ['Gyy', 'Gyi', 'Gyi', 'Gii', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc15c22'] = ['Gyx', 'Gyx', 'Gyx', 'Giy', 'Giy', 'Gii', 'Gii']
clifford_compilation['Gc15c23'] = ['Gyx', 'Gyy', 'Gyx', 'Gix', 'Gix', 'Gii', 'Gii']
clifford_compilation['Gc16c0'] = ['Gxi', 'Gii', 'Gii', 'Gii', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc16c1'] = ['Gxy', 'Gix', 'Gii', 'Gii', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc16c2'] = ['Gxx', 'Gix', 'Gix', 'Giy', 'Giy', 'Giy', 'Gii']
clifford_compilation['Gc16c3'] = ['Gxx', 'Gix', 'Gii', 'Gii', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc16c4'] = ['Gxy', 'Giy', 'Giy', 'Gix', 'Gix', 'Gix', 'Gii']
clifford_compilation['Gc16c5'] = ['Gxx', 'Giy', 'Giy', 'Giy', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc16c6'] = ['Gxy', 'Giy', 'Gii', 'Gii', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc16c7'] = ['Gxy', 'Giy', 'Giy', 'Gix', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc16c8'] = ['Gxx', 'Giy', 'Gii', 'Gii', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc16c9'] = ['Gxx', 'Gix', 'Giy', 'Giy', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc16c10'] = ['Gxy', 'Gix', 'Gix', 'Gix', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc16c11'] = ['Gxx', 'Gix', 'Gix', 'Giy', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc16c12'] = ['Gxy', 'Gix', 'Gix', 'Gii', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc16c13'] = ['Gxx', 'Gix', 'Gix', 'Gii', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc16c14'] = ['Gxx', 'Giy', 'Giy', 'Giy', 'Gix', 'Gix', 'Gix']
clifford_compilation['Gc16c15'] = ['Gxy', 'Giy', 'Giy', 'Gii', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc16c16'] = ['Gxx', 'Gii', 'Gii', 'Gii', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc16c17'] = ['Gxx', 'Giy', 'Gix', 'Gii', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc16c18'] = ['Gxy', 'Giy', 'Giy', 'Gix', 'Gix', 'Gii', 'Gii']
clifford_compilation['Gc16c19'] = ['Gxx', 'Giy', 'Giy', 'Gii', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc16c20'] = ['Gxx', 'Giy', 'Giy', 'Giy', 'Gix', 'Gii', 'Gii']
clifford_compilation['Gc16c21'] = ['Gxy', 'Gii', 'Gii', 'Gii', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc16c22'] = ['Gxx', 'Gix', 'Gix', 'Giy', 'Giy', 'Gii', 'Gii']
clifford_compilation['Gc16c23'] = ['Gxx', 'Giy', 'Gix', 'Gix', 'Gix', 'Gii', 'Gii']
clifford_compilation['Gc17c0'] = ['Gxi', 'Gyi', 'Gxi', 'Gii', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc17c1'] = ['Gxy', 'Gyx', 'Gxi', 'Gii', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc17c2'] = ['Gxx', 'Gyx', 'Gxx', 'Giy', 'Giy', 'Giy', 'Gii']
clifford_compilation['Gc17c3'] = ['Gxx', 'Gyx', 'Gxi', 'Gii', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc17c4'] = ['Gxy', 'Gyy', 'Gxy', 'Gix', 'Gix', 'Gix', 'Gii']
clifford_compilation['Gc17c5'] = ['Gxx', 'Gyy', 'Gxy', 'Giy', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc17c6'] = ['Gxy', 'Gyy', 'Gxi', 'Gii', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc17c7'] = ['Gxy', 'Gyy', 'Gxy', 'Gix', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc17c8'] = ['Gxx', 'Gyy', 'Gxi', 'Gii', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc17c9'] = ['Gxx', 'Gyx', 'Gxy', 'Giy', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc17c10'] = ['Gxy', 'Gyx', 'Gxx', 'Gix', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc17c11'] = ['Gxx', 'Gyx', 'Gxx', 'Giy', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc17c12'] = ['Gxy', 'Gyx', 'Gxx', 'Gii', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc17c13'] = ['Gxx', 'Gyx', 'Gxx', 'Gii', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc17c14'] = ['Gxx', 'Gyy', 'Gxy', 'Giy', 'Gix', 'Gix', 'Gix']
clifford_compilation['Gc17c15'] = ['Gxy', 'Gyy', 'Gxy', 'Gii', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc17c16'] = ['Gxx', 'Gyi', 'Gxi', 'Gii', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc17c17'] = ['Gxx', 'Gyy', 'Gxx', 'Gii', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc17c18'] = ['Gxy', 'Gyy', 'Gxy', 'Gix', 'Gix', 'Gii', 'Gii']
clifford_compilation['Gc17c19'] = ['Gxx', 'Gyy', 'Gxy', 'Gii', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc17c20'] = ['Gxx', 'Gyy', 'Gxy', 'Giy', 'Gix', 'Gii', 'Gii']
clifford_compilation['Gc17c21'] = ['Gxy', 'Gyi', 'Gxi', 'Gii', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc17c22'] = ['Gxx', 'Gyx', 'Gxx', 'Giy', 'Giy', 'Gii', 'Gii']
clifford_compilation['Gc17c23'] = ['Gxx', 'Gyy', 'Gxx', 'Gix', 'Gix', 'Gii', 'Gii']
clifford_compilation['Gc18c0'] = ['Gyi', 'Gyi', 'Gyi', 'Gxi', 'Gxi', 'Gii', 'Gii']
clifford_compilation['Gc18c1'] = ['Gyy', 'Gyx', 'Gyi', 'Gxi', 'Gxi', 'Gii', 'Gii']
clifford_compilation['Gc18c2'] = ['Gyx', 'Gyx', 'Gyx', 'Gxy', 'Gxy', 'Giy', 'Gii']
clifford_compilation['Gc18c3'] = ['Gyx', 'Gyx', 'Gyi', 'Gxi', 'Gxi', 'Gii', 'Gii']
clifford_compilation['Gc18c4'] = ['Gyy', 'Gyy', 'Gyy', 'Gxx', 'Gxx', 'Gix', 'Gii']
clifford_compilation['Gc18c5'] = ['Gyx', 'Gyy', 'Gyy', 'Gxy', 'Gxi', 'Gii', 'Gii']
clifford_compilation['Gc18c6'] = ['Gyy', 'Gyy', 'Gyi', 'Gxi', 'Gxi', 'Gii', 'Gii']
clifford_compilation['Gc18c7'] = ['Gyy', 'Gyy', 'Gyy', 'Gxx', 'Gxi', 'Gii', 'Gii']
clifford_compilation['Gc18c8'] = ['Gyx', 'Gyy', 'Gyi', 'Gxi', 'Gxi', 'Gii', 'Gii']
clifford_compilation['Gc18c9'] = ['Gyx', 'Gyx', 'Gyy', 'Gxy', 'Gxi', 'Gii', 'Gii']
clifford_compilation['Gc18c10'] = ['Gyy', 'Gyx', 'Gyx', 'Gxx', 'Gxi', 'Gii', 'Gii']
clifford_compilation['Gc18c11'] = ['Gyx', 'Gyx', 'Gyx', 'Gxy', 'Gxi', 'Gii', 'Gii']
clifford_compilation['Gc18c12'] = ['Gyy', 'Gyx', 'Gyx', 'Gxi', 'Gxi', 'Gii', 'Gii']
clifford_compilation['Gc18c13'] = ['Gyx', 'Gyx', 'Gyx', 'Gxi', 'Gxi', 'Gii', 'Gii']
clifford_compilation['Gc18c14'] = ['Gyx', 'Gyy', 'Gyy', 'Gxy', 'Gxx', 'Gix', 'Gix']
clifford_compilation['Gc18c15'] = ['Gyy', 'Gyy', 'Gyy', 'Gxi', 'Gxi', 'Gii', 'Gii']
clifford_compilation['Gc18c16'] = ['Gyx', 'Gyi', 'Gyi', 'Gxi', 'Gxi', 'Gii', 'Gii']
clifford_compilation['Gc18c17'] = ['Gyx', 'Gyy', 'Gyx', 'Gxi', 'Gxi', 'Gii', 'Gii']
clifford_compilation['Gc18c18'] = ['Gyy', 'Gyy', 'Gyy', 'Gxx', 'Gxx', 'Gii', 'Gii']
clifford_compilation['Gc18c19'] = ['Gyx', 'Gyy', 'Gyy', 'Gxi', 'Gxi', 'Gii', 'Gii']
clifford_compilation['Gc18c20'] = ['Gyx', 'Gyy', 'Gyy', 'Gxy', 'Gxx', 'Gii', 'Gii']
clifford_compilation['Gc18c21'] = ['Gyy', 'Gyi', 'Gyi', 'Gxi', 'Gxi', 'Gii', 'Gii']
clifford_compilation['Gc18c22'] = ['Gyx', 'Gyx', 'Gyx', 'Gxy', 'Gxy', 'Gii', 'Gii']
clifford_compilation['Gc18c23'] = ['Gyx', 'Gyy', 'Gyx', 'Gxx', 'Gxx', 'Gii', 'Gii']
clifford_compilation['Gc19c0'] = ['Gxi', 'Gyi', 'Gyi', 'Gii', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc19c1'] = ['Gxy', 'Gyx', 'Gyi', 'Gii', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc19c2'] = ['Gxx', 'Gyx', 'Gyx', 'Giy', 'Giy', 'Giy', 'Gii']
clifford_compilation['Gc19c3'] = ['Gxx', 'Gyx', 'Gyi', 'Gii', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc19c4'] = ['Gxy', 'Gyy', 'Gyy', 'Gix', 'Gix', 'Gix', 'Gii']
clifford_compilation['Gc19c5'] = ['Gxx', 'Gyy', 'Gyy', 'Giy', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc19c6'] = ['Gxy', 'Gyy', 'Gyi', 'Gii', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc19c7'] = ['Gxy', 'Gyy', 'Gyy', 'Gix', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc19c8'] = ['Gxx', 'Gyy', 'Gyi', 'Gii', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc19c9'] = ['Gxx', 'Gyx', 'Gyy', 'Giy', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc19c10'] = ['Gxy', 'Gyx', 'Gyx', 'Gix', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc19c11'] = ['Gxx', 'Gyx', 'Gyx', 'Giy', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc19c12'] = ['Gxy', 'Gyx', 'Gyx', 'Gii', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc19c13'] = ['Gxx', 'Gyx', 'Gyx', 'Gii', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc19c14'] = ['Gxx', 'Gyy', 'Gyy', 'Giy', 'Gix', 'Gix', 'Gix']
clifford_compilation['Gc19c15'] = ['Gxy', 'Gyy', 'Gyy', 'Gii', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc19c16'] = ['Gxx', 'Gyi', 'Gyi', 'Gii', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc19c17'] = ['Gxx', 'Gyy', 'Gyx', 'Gii', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc19c18'] = ['Gxy', 'Gyy', 'Gyy', 'Gix', 'Gix', 'Gii', 'Gii']
clifford_compilation['Gc19c19'] = ['Gxx', 'Gyy', 'Gyy', 'Gii', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc19c20'] = ['Gxx', 'Gyy', 'Gyy', 'Giy', 'Gix', 'Gii', 'Gii']
clifford_compilation['Gc19c21'] = ['Gxy', 'Gyi', 'Gyi', 'Gii', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc19c22'] = ['Gxx', 'Gyx', 'Gyx', 'Giy', 'Giy', 'Gii', 'Gii']
clifford_compilation['Gc19c23'] = ['Gxx', 'Gyy', 'Gyx', 'Gix', 'Gix', 'Gii', 'Gii']
clifford_compilation['Gc20c0'] = ['Gxi', 'Gyi', 'Gyi', 'Gyi', 'Gxi', 'Gii', 'Gii']
clifford_compilation['Gc20c1'] = ['Gxy', 'Gyx', 'Gyi', 'Gyi', 'Gxi', 'Gii', 'Gii']
clifford_compilation['Gc20c2'] = ['Gxx', 'Gyx', 'Gyx', 'Gyy', 'Gxy', 'Giy', 'Gii']
clifford_compilation['Gc20c3'] = ['Gxx', 'Gyx', 'Gyi', 'Gyi', 'Gxi', 'Gii', 'Gii']
clifford_compilation['Gc20c4'] = ['Gxy', 'Gyy', 'Gyy', 'Gyx', 'Gxx', 'Gix', 'Gii']
clifford_compilation['Gc20c5'] = ['Gxx', 'Gyy', 'Gyy', 'Gyy', 'Gxi', 'Gii', 'Gii']
clifford_compilation['Gc20c6'] = ['Gxy', 'Gyy', 'Gyi', 'Gyi', 'Gxi', 'Gii', 'Gii']
clifford_compilation['Gc20c7'] = ['Gxy', 'Gyy', 'Gyy', 'Gyx', 'Gxi', 'Gii', 'Gii']
clifford_compilation['Gc20c8'] = ['Gxx', 'Gyy', 'Gyi', 'Gyi', 'Gxi', 'Gii', 'Gii']
clifford_compilation['Gc20c9'] = ['Gxx', 'Gyx', 'Gyy', 'Gyy', 'Gxi', 'Gii', 'Gii']
clifford_compilation['Gc20c10'] = ['Gxy', 'Gyx', 'Gyx', 'Gyx', 'Gxi', 'Gii', 'Gii']
clifford_compilation['Gc20c11'] = ['Gxx', 'Gyx', 'Gyx', 'Gyy', 'Gxi', 'Gii', 'Gii']
clifford_compilation['Gc20c12'] = ['Gxy', 'Gyx', 'Gyx', 'Gyi', 'Gxi', 'Gii', 'Gii']
clifford_compilation['Gc20c13'] = ['Gxx', 'Gyx', 'Gyx', 'Gyi', 'Gxi', 'Gii', 'Gii']
clifford_compilation['Gc20c14'] = ['Gxx', 'Gyy', 'Gyy', 'Gyy', 'Gxx', 'Gix', 'Gix']
clifford_compilation['Gc20c15'] = ['Gxy', 'Gyy', 'Gyy', 'Gyi', 'Gxi', 'Gii', 'Gii']
clifford_compilation['Gc20c16'] = ['Gxx', 'Gyi', 'Gyi', 'Gyi', 'Gxi', 'Gii', 'Gii']
clifford_compilation['Gc20c17'] = ['Gxx', 'Gyy', 'Gyx', 'Gyi', 'Gxi', 'Gii', 'Gii']
clifford_compilation['Gc20c18'] = ['Gxy', 'Gyy', 'Gyy', 'Gyx', 'Gxx', 'Gii', 'Gii']
clifford_compilation['Gc20c19'] = ['Gxx', 'Gyy', 'Gyy', 'Gyi', 'Gxi', 'Gii', 'Gii']
clifford_compilation['Gc20c20'] = ['Gxx', 'Gyy', 'Gyy', 'Gyy', 'Gxx', 'Gii', 'Gii']
clifford_compilation['Gc20c21'] = ['Gxy', 'Gyi', 'Gyi', 'Gyi', 'Gxi', 'Gii', 'Gii']
clifford_compilation['Gc20c22'] = ['Gxx', 'Gyx', 'Gyx', 'Gyy', 'Gxy', 'Gii', 'Gii']
clifford_compilation['Gc20c23'] = ['Gxx', 'Gyy', 'Gyx', 'Gyx', 'Gxx', 'Gii', 'Gii']
clifford_compilation['Gc21c0'] = ['Gyi', 'Gii', 'Gii', 'Gii', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc21c1'] = ['Gyy', 'Gix', 'Gii', 'Gii', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc21c2'] = ['Gyx', 'Gix', 'Gix', 'Giy', 'Giy', 'Giy', 'Gii']
clifford_compilation['Gc21c3'] = ['Gyx', 'Gix', 'Gii', 'Gii', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc21c4'] = ['Gyy', 'Giy', 'Giy', 'Gix', 'Gix', 'Gix', 'Gii']
clifford_compilation['Gc21c5'] = ['Gyx', 'Giy', 'Giy', 'Giy', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc21c6'] = ['Gyy', 'Giy', 'Gii', 'Gii', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc21c7'] = ['Gyy', 'Giy', 'Giy', 'Gix', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc21c8'] = ['Gyx', 'Giy', 'Gii', 'Gii', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc21c9'] = ['Gyx', 'Gix', 'Giy', 'Giy', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc21c10'] = ['Gyy', 'Gix', 'Gix', 'Gix', 'Gii', 'Gii', 'Gii']
clifford_compilation['Gc21c11'] = ['Gyx', | |
import gc
import sys
from typing import List
import pytest
from _pytest.config import ExitCode
from _pytest.monkeypatch import MonkeyPatch
from _pytest.pytester import Pytester
def test_simple_unittest(pytester: Pytester) -> None:
testpath = pytester.makepyfile(
"""
import unittest
class MyTestCase(unittest.TestCase):
def testpassing(self):
self.assertEqual('foo', 'foo')
def test_failing(self):
self.assertEqual('foo', 'bar')
"""
)
reprec = pytester.inline_run(testpath)
assert reprec.matchreport("testpassing").passed
assert reprec.matchreport("test_failing").failed
def test_runTest_method(pytester: Pytester) -> None:
pytester.makepyfile(
"""
import unittest
class MyTestCaseWithRunTest(unittest.TestCase):
def runTest(self):
self.assertEqual('foo', 'foo')
class MyTestCaseWithoutRunTest(unittest.TestCase):
def runTest(self):
self.assertEqual('foo', 'foo')
def test_something(self):
pass
"""
)
result = pytester.runpytest("-v")
result.stdout.fnmatch_lines(
"""
*MyTestCaseWithRunTest::runTest*
*MyTestCaseWithoutRunTest::test_something*
*2 passed*
"""
)
def test_isclasscheck_issue53(pytester: Pytester) -> None:
testpath = pytester.makepyfile(
"""
import unittest
class _E(object):
def __getattr__(self, tag):
pass
E = _E()
"""
)
result = pytester.runpytest(testpath)
assert result.ret == ExitCode.NO_TESTS_COLLECTED
def test_setup(pytester: Pytester) -> None:
testpath = pytester.makepyfile(
"""
import unittest
class MyTestCase(unittest.TestCase):
def setUp(self):
self.foo = 1
def setup_method(self, method):
self.foo2 = 1
def test_both(self):
self.assertEqual(1, self.foo)
assert self.foo2 == 1
def teardown_method(self, method):
assert 0, "42"
"""
)
reprec = pytester.inline_run("-s", testpath)
assert reprec.matchreport("test_both", when="call").passed
rep = reprec.matchreport("test_both", when="teardown")
assert rep.failed and "42" in str(rep.longrepr)
def test_setUpModule(pytester: Pytester) -> None:
testpath = pytester.makepyfile(
"""
values = []
def setUpModule():
values.append(1)
def tearDownModule():
del values[0]
def test_hello():
assert values == [1]
def test_world():
assert values == [1]
"""
)
result = pytester.runpytest(testpath)
result.stdout.fnmatch_lines(["*2 passed*"])
def test_setUpModule_failing_no_teardown(pytester: Pytester) -> None:
testpath = pytester.makepyfile(
"""
values = []
def setUpModule():
0/0
def tearDownModule():
values.append(1)
def test_hello():
pass
"""
)
reprec = pytester.inline_run(testpath)
reprec.assertoutcome(passed=0, failed=1)
call = reprec.getcalls("pytest_runtest_setup")[0]
assert not call.item.module.values
def test_new_instances(pytester: Pytester) -> None:
testpath = pytester.makepyfile(
"""
import unittest
class MyTestCase(unittest.TestCase):
def test_func1(self):
self.x = 2
def test_func2(self):
assert not hasattr(self, 'x')
"""
)
reprec = pytester.inline_run(testpath)
reprec.assertoutcome(passed=2)
def test_function_item_obj_is_instance(pytester: Pytester) -> None:
"""item.obj should be a bound method on unittest.TestCase function items (#5390)."""
pytester.makeconftest(
"""
def pytest_runtest_makereport(item, call):
if call.when == 'call':
class_ = item.parent.obj
assert isinstance(item.obj.__self__, class_)
"""
)
pytester.makepyfile(
"""
import unittest
class Test(unittest.TestCase):
def test_foo(self):
pass
"""
)
result = pytester.runpytest_inprocess()
result.stdout.fnmatch_lines(["* 1 passed in*"])
def test_teardown(pytester: Pytester) -> None:
testpath = pytester.makepyfile(
"""
import unittest
class MyTestCase(unittest.TestCase):
values = []
def test_one(self):
pass
def tearDown(self):
self.values.append(None)
class Second(unittest.TestCase):
def test_check(self):
self.assertEqual(MyTestCase.values, [None])
"""
)
reprec = pytester.inline_run(testpath)
passed, skipped, failed = reprec.countoutcomes()
assert failed == 0, failed
assert passed == 2
assert passed + skipped + failed == 2
def test_teardown_issue1649(pytester: Pytester) -> None:
"""
Are TestCase objects cleaned up? Often unittest TestCase objects set
attributes that are large and expensive during setUp.
The TestCase will not be cleaned up if the test fails, because it
would then exist in the stackframe.
"""
testpath = pytester.makepyfile(
"""
import unittest
class TestCaseObjectsShouldBeCleanedUp(unittest.TestCase):
def setUp(self):
self.an_expensive_object = 1
def test_demo(self):
pass
"""
)
pytester.inline_run("-s", testpath)
gc.collect()
for obj in gc.get_objects():
assert type(obj).__name__ != "TestCaseObjectsShouldBeCleanedUp"
def test_unittest_skip_issue148(pytester: Pytester) -> None:
testpath = pytester.makepyfile(
"""
import unittest
@unittest.skip("hello")
class MyTestCase(unittest.TestCase):
@classmethod
def setUpClass(self):
xxx
def test_one(self):
pass
@classmethod
def tearDownClass(self):
xxx
"""
)
reprec = pytester.inline_run(testpath)
reprec.assertoutcome(skipped=1)
def test_method_and_teardown_failing_reporting(pytester: Pytester) -> None:
pytester.makepyfile(
"""
import unittest
class TC(unittest.TestCase):
def tearDown(self):
assert 0, "down1"
def test_method(self):
assert False, "down2"
"""
)
result = pytester.runpytest("-s")
assert result.ret == 1
result.stdout.fnmatch_lines(
[
"*tearDown*",
"*assert 0*",
"*test_method*",
"*assert False*",
"*1 failed*1 error*",
]
)
def test_setup_failure_is_shown(pytester: Pytester) -> None:
pytester.makepyfile(
"""
import unittest
import pytest
class TC(unittest.TestCase):
def setUp(self):
assert 0, "down1"
def test_method(self):
print("never42")
xyz
"""
)
result = pytester.runpytest("-s")
assert result.ret == 1
result.stdout.fnmatch_lines(["*setUp*", "*assert 0*down1*", "*1 failed*"])
result.stdout.no_fnmatch_line("*never42*")
def test_setup_setUpClass(pytester: Pytester) -> None:
testpath = pytester.makepyfile(
"""
import unittest
import pytest
class MyTestCase(unittest.TestCase):
x = 0
@classmethod
def setUpClass(cls):
cls.x += 1
def test_func1(self):
assert self.x == 1
def test_func2(self):
assert self.x == 1
@classmethod
def tearDownClass(cls):
cls.x -= 1
def test_teareddown():
assert MyTestCase.x == 0
"""
)
reprec = pytester.inline_run(testpath)
reprec.assertoutcome(passed=3)
def test_fixtures_setup_setUpClass_issue8394(pytester: Pytester) -> None:
pytester.makepyfile(
"""
import unittest
class MyTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
def test_func1(self):
pass
@classmethod
def tearDownClass(cls):
pass
"""
)
result = pytester.runpytest("--fixtures")
assert result.ret == 0
result.stdout.no_fnmatch_line("*no docstring available*")
result = pytester.runpytest("--fixtures", "-v")
assert result.ret == 0
result.stdout.fnmatch_lines(["*no docstring available*"])
def test_setup_class(pytester: Pytester) -> None:
testpath = pytester.makepyfile(
"""
import unittest
import pytest
class MyTestCase(unittest.TestCase):
x = 0
def setup_class(cls):
cls.x += 1
def test_func1(self):
assert self.x == 1
def test_func2(self):
assert self.x == 1
def teardown_class(cls):
cls.x -= 1
def test_teareddown():
assert MyTestCase.x == 0
"""
)
reprec = pytester.inline_run(testpath)
reprec.assertoutcome(passed=3)
@pytest.mark.parametrize("type", ["Error", "Failure"])
def test_testcase_adderrorandfailure_defers(pytester: Pytester, type: str) -> None:
pytester.makepyfile(
"""
from unittest import TestCase
import pytest
class MyTestCase(TestCase):
def run(self, result):
excinfo = pytest.raises(ZeroDivisionError, lambda: 0/0)
try:
result.add%s(self, excinfo._excinfo)
except KeyboardInterrupt:
raise
except:
pytest.fail("add%s should not raise")
def test_hello(self):
pass
"""
% (type, type)
)
result = pytester.runpytest()
result.stdout.no_fnmatch_line("*should not raise*")
@pytest.mark.parametrize("type", ["Error", "Failure"])
def test_testcase_custom_exception_info(pytester: Pytester, type: str) -> None:
pytester.makepyfile(
"""
from typing import Generic, TypeVar
from unittest import TestCase
import pytest, _pytest._code
class MyTestCase(TestCase):
def run(self, result):
excinfo = pytest.raises(ZeroDivisionError, lambda: 0/0)
# We fake an incompatible exception info.
class FakeExceptionInfo(Generic[TypeVar("E")]):
def __init__(self, *args, **kwargs):
mp.undo()
raise TypeError()
@classmethod
def from_current(cls):
return cls()
@classmethod
def from_exc_info(cls, *args, **kwargs):
return cls()
mp = pytest.MonkeyPatch()
mp.setattr(_pytest._code, 'ExceptionInfo', FakeExceptionInfo)
try:
excinfo = excinfo._excinfo
result.add%(type)s(self, excinfo)
finally:
mp.undo()
def test_hello(self):
pass
"""
% locals()
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"NOTE: Incompatible Exception Representation*",
"*ZeroDivisionError*",
"*1 failed*",
]
)
def test_testcase_totally_incompatible_exception_info(pytester: Pytester) -> None:
import _pytest.unittest
(item,) = pytester.getitems(
"""
from unittest import TestCase
class MyTestCase(TestCase):
def test_hello(self):
pass
"""
)
assert isinstance(item, _pytest.unittest.TestCaseFunction)
item.addError(None, 42) # type: ignore[arg-type]
excinfo = item._excinfo
assert excinfo is not None
assert "ERROR: Unknown Incompatible" in str(excinfo.pop(0).getrepr())
def test_module_level_pytestmark(pytester: Pytester) -> None:
testpath = pytester.makepyfile(
"""
import unittest
import pytest
pytestmark = pytest.mark.xfail
class MyTestCase(unittest.TestCase):
def test_func1(self):
assert 0
"""
)
reprec = pytester.inline_run(testpath, "-s")
reprec.assertoutcome(skipped=1)
class TestTrialUnittest:
def setup_class(cls):
cls.ut = pytest.importorskip("twisted.trial.unittest")
# on windows trial uses a socket for a reactor and apparently doesn't close it properly
# https://twistedmatrix.com/trac/ticket/9227
cls.ignore_unclosed_socket_warning = ("-W", "always")
def test_trial_testcase_runtest_not_collected(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
from twisted.trial.unittest import TestCase
class TC(TestCase):
def test_hello(self):
pass
"""
)
reprec = pytester.inline_run(*self.ignore_unclosed_socket_warning)
reprec.assertoutcome(passed=1)
pytester.makepyfile(
"""
from twisted.trial.unittest import TestCase
class TC(TestCase):
def runTest(self):
pass
"""
)
reprec = pytester.inline_run(*self.ignore_unclosed_socket_warning)
reprec.assertoutcome(passed=1)
def test_trial_exceptions_with_skips(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
from twisted.trial import unittest
import pytest
class TC(unittest.TestCase):
def test_hello(self):
pytest.skip("skip_in_method")
@pytest.mark.skipif("sys.version_info != 1")
def test_hello2(self):
pass
@pytest.mark.xfail(reason="iwanto")
def test_hello3(self):
assert 0
def test_hello4(self):
pytest.xfail("i2wanto")
def test_trial_skip(self):
pass
test_trial_skip.skip = "trialselfskip"
def test_trial_todo(self):
assert 0
test_trial_todo.todo = "mytodo"
def test_trial_todo_success(self):
pass
test_trial_todo_success.todo = "mytodo"
class TC2(unittest.TestCase):
def setup_class(cls):
pytest.skip("skip_in_setup_class")
def test_method(self):
pass
"""
)
result = pytester.runpytest("-rxs", *self.ignore_unclosed_socket_warning)
result.stdout.fnmatch_lines_random(
[
"*XFAIL*test_trial_todo*",
"*trialselfskip*",
"*skip_in_setup_class*",
"*iwanto*",
"*i2wanto*",
"*sys.version_info*",
"*skip_in_method*",
"*1 failed*4 skipped*3 xfailed*",
]
)
assert result.ret == 1
def test_trial_error(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
from twisted.trial.unittest import TestCase
from twisted.internet.defer import Deferred
from twisted.internet import reactor
class TC(TestCase):
def test_one(self):
crash
def test_two(self):
def f(_):
crash
d = Deferred()
d.addCallback(f)
reactor.callLater(0.3, d.callback, None)
return d
def test_three(self):
def f():
pass # will never get called
reactor.callLater(0.3, f)
# will crash at teardown
def test_four(self):
def f(_):
reactor.callLater(0.3, f)
crash
d = Deferred()
d.addCallback(f)
reactor.callLater(0.3, d.callback, None)
return d
# will crash both at test time and at teardown
"""
)
result = pytester.runpytest(
"-vv", "-oconsole_output_style=classic", "-W", "ignore::DeprecationWarning"
)
result.stdout.fnmatch_lines(
[
"test_trial_error.py::TC::test_four FAILED",
"test_trial_error.py::TC::test_four ERROR",
"test_trial_error.py::TC::test_one FAILED",
"test_trial_error.py::TC::test_three FAILED",
"test_trial_error.py::TC::test_two FAILED",
"*ERRORS*",
"*_ ERROR at teardown of TC.test_four _*",
"*DelayedCalls*",
"*= FAILURES =*",
"*_ TC.test_four _*",
"*NameError*crash*",
"*_ TC.test_one _*",
"*NameError*crash*",
"*_ TC.test_three _*",
"*DelayedCalls*",
"*_ TC.test_two _*",
"*NameError*crash*",
"*= 4 failed, 1 error in *",
]
)
def test_trial_pdb(self, pytester: Pytester) -> None:
p = pytester.makepyfile(
"""
from twisted.trial import unittest
import pytest
class TC(unittest.TestCase):
def test_hello(self):
assert 0, "hellopdb"
"""
)
child = pytester.spawn_pytest(str(p))
child.expect("hellopdb")
child.sendeof()
def test_trial_testcase_skip_property(self, pytester: Pytester) -> None:
testpath = pytester.makepyfile(
"""
from twisted.trial import unittest
class MyTestCase(unittest.TestCase):
skip = 'dont run'
def test_func(self):
pass
"""
)
reprec = pytester.inline_run(testpath, "-s")
reprec.assertoutcome(skipped=1)
def test_trial_testfunction_skip_property(self, pytester: Pytester) -> None:
testpath = pytester.makepyfile(
"""
from twisted.trial import | |
#!/usr/bin/env python3
#
# Copyright (c) 2021 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Script information:
# -------------------
# upcall_cost.py uses various user space and kernel space probes to determine
# the costs (in time) for handling the first packet in user space. It
# calculates the following costs:
#
# - Time it takes from the kernel sending the upcall till it's received by the
# ovs-vswitchd process.
# - Time it takes from ovs-vswitchd sending the execute actions command till
# the kernel receives it.
# - The total time it takes from the kernel to sent the upcall until it
# receives the packet execute command.
# - The total time of the above, minus the time it takes for the actual lookup.
#
# In addition, it will also report the number of packets batched, as OVS will
# first try to read UPCALL_MAX_BATCH(64) packets from kernel space and then
# does the flow lookups and execution. So the smaller the batch size, the more
# realistic are the cost estimates.
#
# The script does not need any options to attach to a running instance of
# ovs-vswitchd. However, it's recommended always run the script with the
# --write-events option. This way, if something does go wrong, the collected
# data is saved. Use the --help option to see all the available options.
#
# Note: In addition to the bcc tools for your specific setup, you need the
# following Python packages:
# pip install alive-progress halo psutil scapy strenum text_histogram3
#
try:
from bcc import BPF, USDT, USDTException
except ModuleNotFoundError:
print("WARNING: Can't find the BPF Compiler Collection (BCC) tools!")
print(" This is NOT problem if you analyzing previously collected"
" data.\n")
from alive_progress import alive_bar
from collections import namedtuple
from halo import Halo
from scapy.all import TCP, UDP
from scapy.layers.l2 import Ether
from strenum import StrEnum
from text_histogram3 import histogram
from time import process_time
import argparse
import ast
import psutil
import re
import struct
import subprocess
import sys
import time
#
# Global definitions
#
DP_TUNNEL_PORT = -1
#
# Actual eBPF source code
#
ebpf_source = """
#include <linux/sched.h>
#include <linux/skbuff.h>
#include <uapi/linux/bpf.h>
#define MAX_PACKET <MAX_PACKET_VAL>
#define MAX_KEY <MAX_KEY_VAL>
enum {
EVENT_RECV_UPCALL = 0,
EVENT_DP_UPCALL,
EVENT_OP_FLOW_PUT,
EVENT_OP_FLOW_EXECUTE,
EVENT_OVS_PKT_EXEC,
_EVENT_MAX_EVENT
};
#define barrier_var(var) asm volatile("" : "=r"(var) : "0"(var))
struct event_t {
u32 event;
u32 cpu;
u32 pid;
u32 upcall_type;
u64 ts;
u32 pkt_frag_size;
u32 pkt_size;
u64 key_size;
char comm[TASK_COMM_LEN];
char dpif_name[32];
char dev_name[16];
unsigned char pkt[MAX_PACKET];
unsigned char key[MAX_KEY];
};
BPF_RINGBUF_OUTPUT(events, <BUFFER_PAGE_CNT>);
BPF_TABLE("percpu_array", uint32_t, uint64_t, dropcnt, _EVENT_MAX_EVENT);
static struct event_t *init_event(u32 type)
{
struct event_t *event = events.ringbuf_reserve(sizeof(struct event_t));
if (!event) {
uint64_t *value = dropcnt.lookup(&type);
if (value)
__sync_fetch_and_add(value, 1);
return NULL;
}
event->event = type;
event->ts = bpf_ktime_get_ns();
event->cpu = bpf_get_smp_processor_id();
event->pid = bpf_get_current_pid_tgid();
bpf_get_current_comm(&event->comm, sizeof(event->comm));
return event;
}
int trace__recv_upcall(struct pt_regs *ctx) {
uint32_t upcall_type;
uint64_t addr;
uint64_t size;
bpf_usdt_readarg(2, ctx, &upcall_type);
if (upcall_type != 0)
return 0;
struct event_t *event = init_event(EVENT_RECV_UPCALL);
if (!event)
return 1;
bpf_usdt_readarg(1, ctx, &addr);
bpf_probe_read_str(&event->dpif_name, sizeof(event->dpif_name),
(void *)addr);
event->upcall_type = upcall_type;
bpf_usdt_readarg(4, ctx, &event->pkt_size);
bpf_usdt_readarg(6, ctx, &event->key_size);
if (event->pkt_size > MAX_PACKET)
size = MAX_PACKET;
else
size = event->pkt_size;
bpf_usdt_readarg(3, ctx, &addr);
bpf_probe_read(&event->pkt, size, (void *)addr);
if (event->key_size > MAX_KEY)
size = MAX_KEY;
else
size = event->key_size;
bpf_usdt_readarg(5, ctx, &addr);
bpf_probe_read(&event->key, size, (void *)addr);
events.ringbuf_submit(event, 0);
return 0;
};
int trace__op_flow_put(struct pt_regs *ctx) {
uint64_t addr;
uint64_t size;
struct event_t *event = init_event(EVENT_OP_FLOW_PUT);
if (!event) {
return 1;
}
events.ringbuf_submit(event, 0);
return 0;
};
int trace__op_flow_execute(struct pt_regs *ctx) {
uint64_t addr;
uint64_t size;
struct event_t *event = init_event(EVENT_OP_FLOW_EXECUTE);
if (!event) {
return 1;
}
bpf_usdt_readarg(4, ctx, &event->pkt_size);
if (event->pkt_size > MAX_PACKET)
size = MAX_PACKET;
else
size = event->pkt_size;
bpf_usdt_readarg(3, ctx, &addr);
bpf_probe_read(&event->pkt, size, (void *)addr);
events.ringbuf_submit(event, 0);
return 0;
};
TRACEPOINT_PROBE(openvswitch, ovs_dp_upcall) {
uint64_t size;
struct sk_buff *skb = args->skbaddr;
if (args->upcall_cmd != 1 || skb == NULL || skb->data == NULL)
return 0;
struct event_t *event = init_event(EVENT_DP_UPCALL);
if (!event) {
return 1;
}
event->upcall_type = args->upcall_cmd;
event->pkt_size = args->len;
TP_DATA_LOC_READ_CONST(&event->dpif_name, dp_name,
sizeof(event->dpif_name));
TP_DATA_LOC_READ_CONST(&event->dev_name, dev_name,
sizeof(event->dev_name));
if (skb->data_len != 0) {
event->pkt_frag_size = (skb->len - skb->data_len) & 0xfffffff;
size = event->pkt_frag_size;
} else {
event->pkt_frag_size = 0;
size = event->pkt_size;
}
/* Prevent clang from using register mirroring (or any optimization) on
* the 'size' variable. */
barrier_var(size);
if (size > MAX_PACKET)
size = MAX_PACKET;
bpf_probe_read_kernel(event->pkt, size, skb->data);
events.ringbuf_submit(event, 0);
return 0;
}
int kprobe__ovs_packet_cmd_execute(struct pt_regs *ctx, struct sk_buff *skb)
{
uint64_t size;
if (skb == NULL || skb->data == NULL)
return 0;
struct event_t *event = init_event(EVENT_OVS_PKT_EXEC);
if (!event) {
return 1;
}
events.ringbuf_submit(event, 0);
return 0;
}
"""
#
# Event types
#
class EventType(StrEnum):
RECV_UPCALL = 'dpif_recv__recv_upcall'
DP_UPCALL = 'openvswitch__dp_upcall'
OP_FLOW_PUT = 'dpif_netlink_operate__op_flow_put'
OP_FLOW_EXECUTE = 'dpif_netlink_operate__op_flow_execute'
OVS_PKT_EXEC = 'ktrace__ovs_packet_cmd_execute'
def short_name(name, length=22):
if len(name) < length:
return name
return '..' + name[-(length - 2):]
def from_trace(trace_event):
if trace_event == 0:
return EventType.RECV_UPCALL
elif trace_event == 1:
return EventType.DP_UPCALL
elif trace_event == 2:
return EventType.OP_FLOW_PUT
elif trace_event == 3:
return EventType.OP_FLOW_EXECUTE
elif trace_event == 4:
return EventType.OVS_PKT_EXEC
raise ValueError
#
# Simple event class
#
class Event(object):
def __init__(self, ts, pid, comm, cpu, event_type):
self.ts = ts
self.pid = pid
self.comm = comm
self.cpu = cpu
self.event_type = event_type
def __str__(self):
return "[{:<22}] {:<16} {:8} [{:03}] {:18.9f}".format(
EventType.short_name(self.event_type),
self.comm,
self.pid,
self.cpu,
self.ts / 1000000000)
def __repr__(self):
more = ""
if self.__class__.__name__ != "Event":
more = ", ..."
return "{}({}, {}, {}, {}, {}{})".format(self.__class__.__name__,
self.ts, self.pid,
self.comm, self.cpu,
self.event_type, more)
def handle_event(event):
event = Event(event.ts, event.pid, event.comm.decode("utf-8"),
event.cpu, EventType.from_trace(event.event))
if not options.quiet:
print(event)
return event
def get_event_header_str():
return "{:<24} {:<16} {:>8} {:<3} {:<18} {}".format(
"EVENT", "COMM", "PID", "CPU", "TIME",
"EVENT DATA[dpif_name/dp_port/pkt_len/pkt_frag_len]")
#
# dp_upcall event class
#
class DpUpcall(Event):
def __init__(self, ts, pid, comm, cpu, dpif_name, port, pkt, pkt_len,
pkt_frag_len):
super(DpUpcall, self).__init__(ts, pid, comm, cpu, EventType.DP_UPCALL)
self.dpif_name = dpif_name
self.dp_port = get_dp_mapping(dpif_name, port)
if self.dp_port is None:
#
# As we only identify interfaces at startup, new interfaces could
# have been added, causing the lookup to fail. Just something to
# keep in mind when running this in a dynamic environment.
#
raise LookupError("Can't find datapath port mapping!")
self.pkt = pkt
self.pkt_len = pkt_len
self.pkt_frag_len = pkt_frag_len
def __str__(self):
return "[{:<22}] {:<16} {:8} [{:03}] {:18.9f}: " \
"{:<17} {:4} {:4} {:4}".format(self.event_type,
self.comm,
self.pid,
self.cpu,
self.ts / 1000000000,
self.dpif_name,
self.dp_port,
self.pkt_len,
self.pkt_frag_len)
def handle_event(event):
if event.pkt_size < options.packet_size:
pkt_len = event.pkt_size
else:
pkt_len = options.packet_size
pkt_data = bytes(event.pkt)[:pkt_len]
if len(pkt_data) <= 0 or event.pkt_size == 0:
return
try:
event = DpUpcall(event.ts, event.pid, event.comm.decode("utf-8"),
event.cpu, event.dpif_name.decode("utf-8"),
event.dev_name.decode("utf-8"),
pkt_data,
event.pkt_size,
event.pkt_frag_size)
except LookupError:
#
# If we can't do the port lookup, ignore this event.
#
return None
if not options.quiet:
print(event)
return event
#
# recv_upcall event class
#
class RecvUpcall(Event):
def __init__(self, ts, pid, comm, cpu, dpif_name, key, pkt, pkt_len):
super(RecvUpcall, self).__init__(ts, pid, comm, cpu,
EventType.RECV_UPCALL)
if dpif_name.startswith("system@"):
dpif_name = dpif_name[len("system@"):]
self.dpif_name = dpif_name
nla = RecvUpcall.decode_nlm(key, dump=False)
if "OVS_KEY_ATTR_IN_PORT" in nla:
self.dp_port = struct.unpack('=L', nla["OVS_KEY_ATTR_IN_PORT"])[0]
elif "OVS_KEY_ATTR_TUNNEL" in nla:
self.dp_port = DP_TUNNEL_PORT
else:
self.dp_port = RecvUpcall.get_system_dp_port(self.dpif_name)
if self.dp_port is None:
raise LookupError("Can't find RecvUpcall dp port mapping!")
self.pkt = pkt
self.pkt_len = pkt_len
def __str__(self):
return "[{:<22}] {:<16} {:8} [{:03}] {:18.9f}: {:<17} {:4} {:4}". \
format(
self.event_type,
self.comm,
self.pid,
self.cpu,
self.ts / 1000000000,
self.dpif_name,
self.dp_port,
self.pkt_len)
def get_system_dp_port(dpif_name):
dp_map = get_dp_mapping(dpif_name, "ovs-system", return_map=True)
if dpif_name not in dp_map:
return None
try:
return dp_map[dpif_name]["ovs-system"]
except KeyError:
return None
def decode_nlm(msg, indent=4, dump=True):
bytes_left = len(msg)
result = {}
while bytes_left:
if bytes_left < 4:
if dump:
print("{}WARN: decode truncated; can't read header".format(
' ' * indent))
break
nla_len, nla_type = struct.unpack("=HH", msg[:4])
if nla_len < 4:
if dump:
print("{}WARN: decode truncated; nla_len < 4".format(
' ' * indent))
break
nla_data = msg[4:nla_len]
trunc = ""
if nla_len > bytes_left:
trunc = "..."
nla_data = nla_data[:(bytes_left - 4)]
if RecvUpcall.get_ovs_key_attr_str(nla_type) == \
"OVS_KEY_ATTR_TUNNEL":
#
# If we have | |
"""
TITLE: asset.py
AUTHOR: <NAME> & <NAME>
VERSION: 2020-08-26
DESCRIPTION: Defines the Asset and AssetList classes.
Asset:
Attributes:
url: str, the URL to download an asset. Ex: https://ca-menlopark.civicplus.com/AgendaCenter/ViewFile/Agenda/_12082020-3549
asset_name: str, the title of an asset. Ex: City Council Regular Meeting
committee_name: str, the name of the committee that generated the asset. Ex: City Council
place: str, the name of the place associated with the asset in lowercase with spaces and punctuation removed. Ex: menlopark
state_or_province: str, the two-letter abbreviation for the state or province associated with an asset. Ex: ca
asset_type: str, one of the following strings: 'agenda', 'minutes', 'audio', 'video', 'agenda_packet', 'captions'
meeting_date: datetime.date corresponding to the time the meeting was held or today if no date given
meeting_time: datetime.time corresponding to the time the meetings was held or midnight if no time given
meeting_id: str, the name of the platform being scraped, state_or_province and place
followed by the unique meeting ID the platform assigned to the meeting
Ex: civicplus_ca_menlopark_12082020-3549
scraped_by: str, describes the module and version that produced the asset. Ex: 'civicplus.py_1.0.0'
content_type: str, the file type of the asset as given by HTTP headers. Ex: 'application/pdf'
content_length: str, the size of the asset in bytes
Public methods:
download: downloads an asset to a given target_path
append_to_csv: writes a new line to a csv containing metadata about a given asset
AssetList:
Attributes:
asset_args: dict, a dictionary containing metadata corresponding to the attributes of an Asset object.
Public methods:
download: a wrapper around Asset.download, it downloads each asset instance in an AssetList
to_csv: a wrapper around append_to_csv, it writes out a csv containing metadata about each asset instance
in an AssetList
From Python (example):
from civic_scraper.scrapers import SUPPORTED_SCRAPERS
cp = SUPPORTED_SITES['civicplus'] # Or choose another supported scraper
site = cp(base_url="https://ca-eastpaloalto.civicplus.com/AgendaCenter") # Or choose another url
metadata = site.scrape("2020-01-01", "2020-05-01") # Optional: Choose start_date, end_date and other parameters
metadata.download(target_dir="test") # Downloads assets to the directory "test". Can also choose optional file_size and file_type
metadata.to_csv(target_path="test.csv") # Downloads csv titled "test.csv"
"""
# Libraries
import csv
import os
import requests
import re
from collections import OrderedDict
import datetime
import logging
import sys
import mimetypes
# Parameters
SUPPORTED_ASSET_TYPES = ['agenda', 'minutes', 'audio', 'video', 'agenda_packet', 'captions']
# Logging
module_logger = logging.getLogger('civic_scraper.asset')
# Code
class Asset(object):
def __init__(
self,
url: str,
asset_name: str = None,
committee_name: str = None,
place: str = None,
state_or_province: str = None,
asset_type: str = None,
meeting_date: datetime.date = None,
meeting_time: datetime.time = None,
meeting_id: str = None,
scraped_by: str = None,
content_type: str = None,
content_length: str = None
):
"""
Create an instance of the Asset class.
"""
self.logger = logging.getLogger('civic_scraper.asset.Asset')
self.logger.info('creating an instance of Asset')
url_valid = False
while not url_valid:
if (url.find("http://") and url.find("https://")) != -1:
url_valid = True
else:
print("URL must start with 'http://' or 'https://'")
break
state_or_province_valid = False
while not state_or_province_valid:
if len(state_or_province) == 2 and state_or_province.isalpha():
state_or_province_valid = True
else:
print("State or province abbreviation must be exactly two letters.")
break
asset_type_valid = False
while not asset_type_valid:
if asset_type in SUPPORTED_ASSET_TYPES:
asset_type_valid = True
else:
print("The asset_type is: ", asset_type)
print("The value of asset_type must be one of the following: ", SUPPORTED_ASSET_TYPES)
break
scraped_by_valid = False
while not scraped_by_valid:
if re.match(r".+\.py_\d{1}\.\d{1}\.\d{1}", scraped_by) != None:
scraped_by_valid = True
else:
print("The format of scraped_by should be 'module.py_1.0.0'.")
break
valid_list = [url_valid, state_or_province_valid, asset_type_valid, scraped_by_valid]
if False in valid_list:
print("Cannot initialize Asset object. Invalid input.")
sys.exit()
else:
self.url = url
self.asset_name = asset_name
self.committee_name = committee_name
self.place = place
self.state_or_province = state_or_province
self.asset_type = asset_type
self.meeting_date = meeting_date
self.meeting_time = meeting_time
self.meeting_id = meeting_id
self.scraped_by = scraped_by
self.content_type = content_type
self.content_length = content_length
def download(self, target_dir=os.getcwd(), file_size=None, asset_list=SUPPORTED_ASSET_TYPES, file_type='both'):
"""
Downloads an asset into a target directory.
Input:
target_dir: str, target directory name. Default is the current working directory.
file_size: float, size of file in megabytes. Default is None.
asset_list: list of strings, one or more possible asset types to download. Default is all asset types.
file_type: string taking three possible values: 'html' (to download only html documents), 'pdf' (to download
only pdf documents or 'both' (to download both documents). Default is both
Output: asset in target directory
Returns: Full path to downloaded file
"""
self.logger.info('downloading an instance of Asset')
file_extension = mimetypes.guess_extension(self.content_type)
file_name = "{}_{}_{}_{}{}".format(self.place, self.state_or_province, self.asset_type, self.meeting_date, file_extension)
asset = self.url
file_size = self._mb_to_bytes(file_size)
# Download whether it's an HTML doc or a pdf
if file_type == "both":
self._handle_asset(asset, target_dir, asset_list, file_name, file_size)
elif file_type == 'pdf':
if self.asset_type in ['audio', 'video']:
self._handle_asset(asset, target_dir, asset_list, file_name, file_size)
elif self.asset_type not in ['audio', 'video'] and self.content_type == "application/pdf":
self._handle_asset(asset, target_dir, asset_list, file_name, file_size)
else:
if self.asset_type in ['audio', 'video']:
self._handle_asset(asset, target_dir, asset_list, file_name, file_size)
elif self.asset_type not in ['audio', 'video'] and self.content_type == "text/html":
self._handle_asset(asset, target_dir, asset_list, file_name, file_size)
def append_to_csv(self, target_path, write_header=False):
"""
Append the asset metadata in csv format to target_path.
If write_header is True, first write a line containing the header
names. If false, only write one line containing the values.
Input:
target_path: A required path for the csv
write_header: A flag indicating whether to write the csv header or not. Default is False.
Output: csv written to the target_path
Returns: Nothing.
"""
self.logger.info('appending row to csv')
# Make the dictionary
metadata_dict = OrderedDict([
('place', self.place),
('state_or_province', self.state_or_province),
('meeting_date', self.meeting_date),
('meeting_time', self.meeting_time),
('committee_name', self.committee_name),
('meeting_id', self.meeting_id),
('asset_type', self.asset_type),
('url', self.url),
('scraped_by', self.scraped_by),
('content_type', self.content_type),
('content_length', self.content_length),
])
# Writing the .csv
with open(target_path, 'a') as file:
dict_writer = csv.DictWriter(file, metadata_dict.keys())
if write_header:
dict_writer.writeheader()
if self.url is not None:
dict_writer.writerow(metadata_dict)
self.logger.info('done appending row to csv')
def __repr__(self):
return f'Asset(url: {self.url}, asset_name: {self.asset_name}, committee_name: {self.committee_name}, place: {self.place}, state_or_province: {self.state_or_province}, asset_type: {self.asset_type}, meeting_date: {self.meeting_date}, meeting_time: {self.meeting_time}, meeting_id: {self.meeting_id}, scraped_by: {self.scraped_by}, content_type: {self.content_type}, content_length: {self.content_length})'
def _handle_asset(self, asset, target_dir, asset_list, file_name, file_size):
'''
A helper function in download()
'''
if file_size is None and self.asset_type in asset_list:
response = requests.get(asset, allow_redirects=True)
if not os.path.isdir(target_dir):
self.logger.info('Making directory for asset')
os.mkdir(target_dir)
full_path = os.path.join(target_dir, file_name)
with open(full_path, 'wb') as file:
file.write(response.content)
self.logger.info('Asset downloaded')
return full_path
elif self.asset_type in asset_list and float(self.content_length) <= float(file_size):
response = requests.get(asset, allow_redirects=True)
if not os.path.isdir(target_dir):
self.logger.info('Making directory for asset')
os.mkdir(target_dir)
full_path = os.path.join(target_dir, file_name)
with open(full_path, 'wb') as file:
file.write(response.content)
self.logger.info('Asset downloaded')
return full_path
def _mb_to_bytes(self, file_size):
"""
Converts file_size from megabytes to bytes.
Input: File size in megabytes.
Returns: File size in bytes.
"""
if file_size is None:
return None
else:
return float(file_size) * 1e6
class AssetCollection(object):
def __init__(self, assets):
"""
Initialize AssetCollection
Input:
assets: a list of Asset instances.
"""
self.logger = logging.getLogger('civic_scraper.asset.AssetCollection')
self.logger.info('creating an instance of AssetCollection')
for asset in assets:
assert isinstance(asset, Asset)
self.assets = assets
def __iter__(self):
return iter(self.assets)
def __next__(self):
return next(self)
def __len__(self):
return len(self.assets)
def __repr__(self):
return f'AssetCollection({self.assets})'
def download(self, target_dir=os.getcwd(), file_size=None,
asset_list=SUPPORTED_ASSET_TYPES, file_type='both'):
"""
Write assets to target_dir.
Input:
target_dir: str, the directory to which a user downloads assets.
Default is the current working directory.
file_size: float, an optional parameter to limit the size of files
to be downloaded given in megabytes. Default is None.
asset_list: list of strings, an optional parameter to limit the type
of files to be downloaded. Default is all file types.
Valid file types are: 'agenda', 'minutes', 'audio', 'video',
'agenda_packet', and 'captions'.
file_type: string taking three possible values: 'html' (to download only html documents),
'pdf' (to download only pdf documents or 'both' (to download both documents).
Default is 'both'.
Output: Downloaded assets.
Returns: List of file paths to downloaded assets.
"""
self.logger.info("running AssetCollection.download")
downloaded_file_paths = []
for item in self.assets:
downloaded_file_path = item.download(target_dir, file_size, asset_list, file_type)
downloaded_file_paths.append(downloaded_file_path)
self.logger.info("done running AssetCollection.download")
return downloaded_file_paths
def to_csv(
self,
target_path=None,
appending=False,
):
"""
Write metadata about the asset list to a csv.
Input:
target_path: str, optional parameter giving the path to the csv to be created.
If target_path is given, write a file to that path.
If not given, create a file in the current working directory.
appending: bool, optional flag for whether or not to overwrite an | |
"""
@package mi.instrument.sunburst.driver
@file marine-integrations/mi/instrument/sunburst/driver.py
@author <NAME>, <NAME> & <NAME>
@brief Base Driver for the SAMI instruments
Release notes:
Sunburst Instruments SAMI2-PCO2 partial CO2 & SAMI2-PH pH underwater
sensors.
This is the base driver that contains common code for the SAMI2
instruments SAMI2-PCO2 & SAMI2-PH since they have the same basic
SAMI2 operating structure.
Some of this code also derives from initial code developed by Chris
Center
"""
__author__ = '<NAME>, <NAME> & <NAME>'
__license__ = 'Apache 2.0'
import re
import time
import datetime
from mi.core.log import get_logger
log = get_logger()
from mi.core.exceptions import InstrumentTimeoutException
from mi.core.driver_scheduler import \
DriverSchedulerConfigKey, \
TriggerType
from mi.core.util import dict_equal
from mi.core.common import BaseEnum, Units
from mi.core.instrument.data_particle import DataParticle
from mi.core.instrument.data_particle import DataParticleKey
from mi.core.instrument.data_particle import CommonDataParticleType
from mi.core.instrument.instrument_driver import SingleConnectionInstrumentDriver
from mi.core.instrument.instrument_driver import DriverEvent
from mi.core.instrument.instrument_driver import DriverAsyncEvent
from mi.core.instrument.instrument_driver import DriverProtocolState
from mi.core.instrument.instrument_driver import DriverParameter
from mi.core.instrument.instrument_driver import ResourceAgentState
from mi.core.instrument.instrument_driver import DriverConfigKey
from mi.core.instrument.instrument_protocol import CommandResponseInstrumentProtocol
from mi.core.instrument.driver_dict import DriverDictKey
from mi.core.instrument.protocol_param_dict import ParameterDictType
from mi.core.instrument.protocol_param_dict import ParameterDictVisibility
from mi.core.instrument.instrument_protocol import DEFAULT_CMD_TIMEOUT
from mi.core.instrument.instrument_protocol import DEFAULT_WRITE_DELAY
from mi.core.instrument.instrument_protocol import RE_PATTERN
from mi.core.exceptions import InstrumentProtocolException
from mi.core.exceptions import InstrumentParameterException
from mi.core.exceptions import NotImplementedException
from mi.core.exceptions import SampleException
###
# Driver Constant Definitions
###
# newline.
SAMI_NEWLINE = '\r'
# default command timeout.
SAMI_DEFAULT_TIMEOUT = 10
UNIX_EPOCH = datetime.datetime(1970, 1, 1)
SAMI_EPOCH = datetime.datetime(1904, 1, 1)
SAMI_UNIX_OFFSET = UNIX_EPOCH - SAMI_EPOCH
FIVE_YEARS_IN_SECONDS = 0x0968A480
ONE_YEAR_IN_SECONDS = 0x01E13380
# Time delay between retrieving system time and setting SAMI time. Multiple commands are sent before the time.
# Each command has a wakeup which takes 1 second.
SAMI_TIME_WAKEUP_DELAY = 8
# Time between sending newlines to wakeup SAMI
SAMI_WAKEUP_DELAY = 0.5
# Length of configuration string with '0' padding
# used to calculate number of '0' padding
SAMI_CONFIG_WITH_0_PADDING = 232
# Length of configuration string with 'f' padding
# used to calculate number of 'f' padding
SAMI_CONFIG_WITH_0_AND_F_PADDING = 512
# Terminator at the end of a configuration string
SAMI_CONFIG_TERMINATOR = '00'
# Value added to pump duration for timeout
SAMI_PUMP_TIMEOUT_OFFSET = 5.0
# Number of times to retry discovery in WAITING state
SAMI_DISCOVERY_RETRY_COUNT = 6
# Number of seconds to delay before retrying discovery in waiting state
SAMI_DISCOVERY_RETRY_DELAY = 20.0
# 1/8 second
SAMI_PUMP_DURATION_UNITS = 0.125
###
# Driver RegEx Definitions
###
# Regular Status Strings (produced every 1 Hz or in response to S0 command)
SAMI_REGULAR_STATUS_REGEX = (
r'[:]' + # status message identifier
'([0-9A-Fa-f]{8})' + # timestamp (seconds since 1904)
'([0-9A-Fa-f]{4})' + # status bit field
'([0-9A-Fa-f]{6})' + # number of data records recorded
'([0-9A-Fa-f]{6})' + # number of errors
'([0-9A-Fa-f]{6})' + # number of bytes stored
'([0-9A-Fa-f]{2})' + # unique id
SAMI_NEWLINE)
SAMI_REGULAR_STATUS_REGEX_MATCHER = re.compile(SAMI_REGULAR_STATUS_REGEX)
SAMI_BATTERY_VOLTAGE_REGEX = (
r'([0-9A-Fa-f]{4})' +
SAMI_NEWLINE)
BATTERY_VOLTAGE_REGEX_MATCHER = re.compile(SAMI_BATTERY_VOLTAGE_REGEX)
SAMI_THERMISTOR_VOLTAGE_REGEX = (
r'([0-9A-Fa-f]{4})' +
SAMI_NEWLINE)
SAMI_THERMISTOR_VOLTAGE_REGEX_MATCHER = re.compile(SAMI_BATTERY_VOLTAGE_REGEX)
# Error records
SAMI_ERROR_REGEX = r'\?([0-9A-Fa-f]{2})' + SAMI_NEWLINE
SAMI_ERROR_REGEX_MATCHER = re.compile(SAMI_ERROR_REGEX)
# Newline returned from SAMI
SAMI_NEW_LINE_REGEX = r'(.*)' + SAMI_NEWLINE
SAMI_NEW_LINE_REGEX_MATCHER = re.compile(SAMI_NEW_LINE_REGEX)
###
# Begin Classes
###
class SamiScheduledJob(BaseEnum):
"""
Schedulable jobs
"""
AUTO_SAMPLE = 'auto_sample'
ACQUIRE_STATUS = 'acquire_status'
class SamiDataParticleType(BaseEnum):
"""
Base class Data particle types produced by a SAMI instrument. Should be
sub-classed in the specific instrument driver
"""
RAW = CommonDataParticleType.RAW
REGULAR_STATUS = 'pco2w_regular_status'
BATTERY_VOLTAGE = 'pco2w_battery_voltage'
THERMISTOR_VOLTAGE = 'pco2w_thermistor_voltage'
class SamiProtocolState(BaseEnum):
"""
Instrument protocol states
"""
UNKNOWN = DriverProtocolState.UNKNOWN
WAITING = 'PROTOCOL_STATE_WAITING'
COMMAND = DriverProtocolState.COMMAND
AUTOSAMPLE = DriverProtocolState.AUTOSAMPLE
DIRECT_ACCESS = DriverProtocolState.DIRECT_ACCESS
POLLED_SAMPLE = 'PROTOCOL_STATE_POLLED_SAMPLE'
SCHEDULED_SAMPLE = 'PROTOCOL_STATE_SCHEDULED_SAMPLE'
REAGENT_FLUSH = 'PROTOCOL_STATE_REAGENT_FLUSH'
class SamiProtocolEvent(BaseEnum):
"""
Protocol events
"""
ENTER = DriverEvent.ENTER
EXIT = DriverEvent.EXIT
GET = DriverEvent.GET
SET = DriverEvent.SET
DISCOVER = DriverEvent.DISCOVER
START_AUTOSAMPLE = DriverEvent.START_AUTOSAMPLE
STOP_AUTOSAMPLE = DriverEvent.STOP_AUTOSAMPLE
EXECUTE_DIRECT = DriverEvent.EXECUTE_DIRECT
START_DIRECT = DriverEvent.START_DIRECT
STOP_DIRECT = DriverEvent.STOP_DIRECT
ACQUIRE_SAMPLE = DriverEvent.ACQUIRE_SAMPLE
ACQUIRE_STATUS = DriverEvent.ACQUIRE_STATUS
EXECUTE = 'PROTOCOL_EVENT_EXECUTE' # command execute
SUCCESS = 'PROTOCOL_EVENT_SUCCESS' # command success
TIMEOUT = 'PROTOCOL_EVENT_TIMEOUT' # command timeout
REAGENT_FLUSH = 'DRIVER_EVENT_REAGENT_FLUSH'
class SamiCapability(BaseEnum):
"""
Protocol events that should be exposed to users (subset of above).
"""
ACQUIRE_SAMPLE = SamiProtocolEvent.ACQUIRE_SAMPLE
ACQUIRE_STATUS = SamiProtocolEvent.ACQUIRE_STATUS
START_AUTOSAMPLE = SamiProtocolEvent.START_AUTOSAMPLE
STOP_AUTOSAMPLE = SamiProtocolEvent.STOP_AUTOSAMPLE
REAGENT_FLUSH = SamiProtocolEvent.REAGENT_FLUSH
GET = DriverEvent.GET
SET = DriverEvent.SET
START_DIRECT = DriverEvent.START_DIRECT
STOP_DIRECT = DriverEvent.STOP_DIRECT
class SamiParameter(DriverParameter):
"""
Base SAMI instrument parameters. Subclass and extend this Enum with device
specific parameters in subclass 'Parameter'.
"""
LAUNCH_TIME = 'launch_time'
START_TIME_FROM_LAUNCH = 'start_time_from_launch'
STOP_TIME_FROM_START = 'stop_time_from_start'
MODE_BITS = 'mode_bits'
SAMI_SAMPLE_INTERVAL = 'sami_sample_interval'
SAMI_DRIVER_VERSION = 'sami_driver_version'
SAMI_PARAMS_POINTER = 'sami_params_pointer'
DEVICE1_SAMPLE_INTERVAL = 'device1_sample_interval'
DEVICE1_DRIVER_VERSION = 'device1_driver_version'
DEVICE1_PARAMS_POINTER = 'device1_params_pointer'
DEVICE2_SAMPLE_INTERVAL = 'device2_sample_interval'
DEVICE2_DRIVER_VERSION = 'device2_driver_version'
DEVICE2_PARAMS_POINTER = 'device2_params_pointer'
DEVICE3_SAMPLE_INTERVAL = 'device3_sample_interval'
DEVICE3_DRIVER_VERSION = 'device3_driver_version'
DEVICE3_PARAMS_POINTER = 'device3_params_pointer'
PRESTART_SAMPLE_INTERVAL = 'prestart_sample_interval'
PRESTART_DRIVER_VERSION = 'prestart_driver_version'
PRESTART_PARAMS_POINTER = 'prestart_params_pointer'
GLOBAL_CONFIGURATION = 'global_configuration'
AUTO_SAMPLE_INTERVAL = 'auto_sample_interval'
REAGENT_FLUSH_DURATION = 'reagent_flush_duration'
# make sure to extend these in the individual drivers with the
# the portions of the configuration that is unique to each.
class Prompt(BaseEnum):
"""
Device i/o prompts..
"""
# The boot prompt is the prompt of the SAMI2's Lower Level operating
# system. If this prompt is reached, it means the SAMI2 instrument
# software has crashed and needs to be restarted with the command
# 'u'. If this has occurred, the instrument has been reset and will
# be in an unconfigured state.
BOOT_PROMPT = '7.7Boot>'
class SamiInstrumentCommand(BaseEnum):
"""
Base SAMI instrument command strings. Subclass and extend these with device
specific commands in subclass 'InstrumentCommand'.
This applies to the PCO2 where an additional ACQUIRE_SAMPLE
command is required for device 1, the external pump.
"""
SAMI_GET_STATUS = 'S0'
SAMI_START_STATUS = 'F0'
SAMI_STOP_STATUS = 'F5A'
SAMI_GET_CONFIG = 'L'
SAMI_SET_CONFIG = 'L5A'
SAMI_ERASE_ALL = 'E5A'
SAMI_GET_BATTERY_VOLTAGE = 'B'
SAMI_GET_THERMISTOR_VOLTAGE = 'T'
SAMI_START = 'G5A'
SAMI_STOP = 'Q5A'
SAMI_ACQUIRE_SAMPLE = 'R'
SAMI_ESCAPE_BOOT = 'u'
SAMI_PUMP_OFF = 'P'
###############################################################################
# Data Particles
###############################################################################
class SamiBatteryVoltageDataParticleKey(BaseEnum):
"""
Battery voltage data particle key
"""
BATTERY_VOLTAGE = 'pco2w_battery_voltage'
class SamiBatteryVoltageDataParticle(DataParticle):
"""
Routines for parsing raw data into an regular status data particle
structure.
@throw SampleException If there is a problem with sample creation
"""
_data_particle_type = SamiDataParticleType.BATTERY_VOLTAGE
def _build_parsed_values(self):
matched = BATTERY_VOLTAGE_REGEX_MATCHER.match(self.raw_data)
if not matched:
raise SampleException("No regex match of parsed sample data: [%s]" %
self.decoded_raw)
self.contents[DataParticleKey.PREFERRED_TIMESTAMP] = DataParticleKey.DRIVER_TIMESTAMP
result = [{DataParticleKey.VALUE_ID: SamiBatteryVoltageDataParticleKey.BATTERY_VOLTAGE,
DataParticleKey.VALUE: int(matched.group(1), 16)}]
return result
class SamiThermistorVoltageDataParticleKey(BaseEnum):
"""
Thermistor voltage data particle key
"""
THERMISTOR_VOLTAGE = 'pco2w_thermistor_voltage'
class SamiThermistorVoltageDataParticle(DataParticle):
"""
Routines for parsing raw data into an regular status data particle
structure.
@throw SampleException If there is a problem with sample creation
"""
_data_particle_type = SamiDataParticleType.THERMISTOR_VOLTAGE
def _build_parsed_values(self):
matched = SAMI_THERMISTOR_VOLTAGE_REGEX_MATCHER.match(self.raw_data)
if not matched:
raise SampleException("No regex match of parsed sample data: [%s]" %
self.decoded_raw)
self.contents[DataParticleKey.PREFERRED_TIMESTAMP] = DataParticleKey.DRIVER_TIMESTAMP
result = [{DataParticleKey.VALUE_ID: SamiThermistorVoltageDataParticleKey.THERMISTOR_VOLTAGE,
DataParticleKey.VALUE: int(matched.group(1), 16)}]
return result
class SamiRegularStatusDataParticleKey(BaseEnum):
"""
Data particle key for the regular (1 Hz or regular) status messages.
"""
ELAPSED_TIME_CONFIG = "elapsed_time_config"
CLOCK_ACTIVE = 'clock_active'
RECORDING_ACTIVE = 'recording_active'
RECORD_END_ON_TIME = 'record_end_on_time'
RECORD_MEMORY_FULL = 'record_memory_full'
RECORD_END_ON_ERROR = 'record_end_on_error'
DATA_DOWNLOAD_OK = 'data_download_ok'
FLASH_MEMORY_OPEN = 'flash_memory_open'
BATTERY_LOW_PRESTART = 'battery_low_prestart'
BATTERY_LOW_MEASUREMENT = 'battery_low_measurement'
BATTERY_LOW_BANK = 'battery_low_bank'
BATTERY_LOW_EXTERNAL = 'battery_low_external'
EXTERNAL_DEVICE1_FAULT = 'external_device1_fault'
EXTERNAL_DEVICE2_FAULT = 'external_device2_fault'
EXTERNAL_DEVICE3_FAULT = 'external_device3_fault'
FLASH_ERASED = 'flash_erased'
POWER_ON_INVALID = 'power_on_invalid'
NUM_DATA_RECORDS = 'num_data_records'
NUM_ERROR_RECORDS = 'num_error_records'
NUM_BYTES_STORED = 'num_bytes_stored'
UNIQUE_ID = 'unique_id'
class SamiRegularStatusDataParticle(DataParticle):
"""
Routines for parsing raw data into an regular status data particle
structure.
@throw SampleException If there is a problem with sample creation
"""
_data_particle_type = SamiDataParticleType.REGULAR_STATUS
def _build_parsed_values(self):
"""
Parse regular status values from raw data into a dictionary
"""
### Regular Status Messages
# Produced in response to S0 command, or automatically at 1 Hz. All
# regular status messages are preceded by the ':' character and
# terminate with a '/r'. Sample string:
#
# :CEE90B1B004100000100000000021254
#
# These messages consist of the time since the last configuration,
# status flags, the number of data records, the number of error
# records, the number of bytes stored (including configuration bytes),
# and the instrument's unique id.
###
matched = SAMI_REGULAR_STATUS_REGEX_MATCHER.match(self.raw_data)
if not matched:
raise SampleException("No regex match of parsed sample data: [%s]" %
self.decoded_raw)
particle_keys = [SamiRegularStatusDataParticleKey.ELAPSED_TIME_CONFIG,
SamiRegularStatusDataParticleKey.CLOCK_ACTIVE,
SamiRegularStatusDataParticleKey.RECORDING_ACTIVE,
SamiRegularStatusDataParticleKey.RECORD_END_ON_TIME,
SamiRegularStatusDataParticleKey.RECORD_MEMORY_FULL,
SamiRegularStatusDataParticleKey.RECORD_END_ON_ERROR,
SamiRegularStatusDataParticleKey.DATA_DOWNLOAD_OK,
SamiRegularStatusDataParticleKey.FLASH_MEMORY_OPEN,
SamiRegularStatusDataParticleKey.BATTERY_LOW_PRESTART,
SamiRegularStatusDataParticleKey.BATTERY_LOW_MEASUREMENT,
SamiRegularStatusDataParticleKey.BATTERY_LOW_BANK,
SamiRegularStatusDataParticleKey.BATTERY_LOW_EXTERNAL,
SamiRegularStatusDataParticleKey.EXTERNAL_DEVICE1_FAULT,
SamiRegularStatusDataParticleKey.EXTERNAL_DEVICE2_FAULT,
SamiRegularStatusDataParticleKey.EXTERNAL_DEVICE3_FAULT,
SamiRegularStatusDataParticleKey.FLASH_ERASED,
SamiRegularStatusDataParticleKey.POWER_ON_INVALID,
SamiRegularStatusDataParticleKey.NUM_DATA_RECORDS,
SamiRegularStatusDataParticleKey.NUM_ERROR_RECORDS,
SamiRegularStatusDataParticleKey.NUM_BYTES_STORED,
SamiRegularStatusDataParticleKey.UNIQUE_ID]
result = []
grp_index = 1 # used to index through match groups, starting at 1
bit_index = 0 # used to index through the bit fields represented by
# the two bytes after CLOCK_ACTIVE.
for key in particle_keys:
if key in [SamiRegularStatusDataParticleKey.CLOCK_ACTIVE,
SamiRegularStatusDataParticleKey.RECORDING_ACTIVE,
SamiRegularStatusDataParticleKey.RECORD_END_ON_TIME,
SamiRegularStatusDataParticleKey.RECORD_MEMORY_FULL,
SamiRegularStatusDataParticleKey.RECORD_END_ON_ERROR,
SamiRegularStatusDataParticleKey.DATA_DOWNLOAD_OK,
SamiRegularStatusDataParticleKey.FLASH_MEMORY_OPEN,
SamiRegularStatusDataParticleKey.BATTERY_LOW_PRESTART,
SamiRegularStatusDataParticleKey.BATTERY_LOW_MEASUREMENT,
SamiRegularStatusDataParticleKey.BATTERY_LOW_BANK,
SamiRegularStatusDataParticleKey.BATTERY_LOW_EXTERNAL,
SamiRegularStatusDataParticleKey.EXTERNAL_DEVICE1_FAULT,
SamiRegularStatusDataParticleKey.EXTERNAL_DEVICE2_FAULT,
SamiRegularStatusDataParticleKey.EXTERNAL_DEVICE3_FAULT,
SamiRegularStatusDataParticleKey.FLASH_ERASED,
SamiRegularStatusDataParticleKey.POWER_ON_INVALID]:
# if the keys match values represented by the bits in the two
# byte | |
"""Common get info functions for Interface"""
# Python
import re
import logging
import copy
import ipaddress
# Genie
from genie.metaparser.util.exceptions import SchemaEmptyParserError
from genie.utils.timeout import Timeout
from genie.libs.sdk.libs.utils.normalize import GroupKeys
from genie.utils import Dq
# Pyats
from pyats.utils.objects import find, R
# unicon
from unicon.core.errors import SubCommandFailure
log = logging.getLogger(__name__)
def get_interface_address_mask_running_config(device, interface,
address_family):
""" Get interface address and mask from show running-config interface {interface}
Args:
device ('obj'): Device object
interface ('str'): Interface name
address_family ('str'): Address family
Returns:
(Interface IP address, Interface Mask)
Raise:
None
"""
try:
output = device.execute('show configuration interfaces {interface}'
.format(interface=interface))
except SubCommandFailure:
return None, None
if not output:
return None, None
if address_family in ['ipv4', 'inet']:
# address 192.168.0.1/32
p1 = re.compile(r'address +(?P<ip>[\d\.]+)/(?P<mask>\d+);')
elif address_family in ['ipv6', 'inet6']:
# address 2001:db8:1005:4401::b/128
p1 = re.compile(r'address +(?P<ip>[\w\:]+)/(?P<mask>\d+);')
else:
log.info(
'Must provide one of the following address families: "ipv4", "ipv6", "inet", "inet6"'
)
return None, None
match = p1.findall(output)
if match:
return match[0][0], device.api.int_to_mask(int(match[0][1]))
return None, None
def get_interface_ip_address(device, interface, address_family,link_local=False,
return_all=False):
""" Get interface ip address from device
Args:
interface('str'): Interface to get address
device ('obj'): Device object
address_family ('str'): Address family
link_local ('bool'): Link local address
return_all ('bool'): return List of values
Returns:
None
ip_address ('str'): If has multiple addresses
will return the first one.
Raises:
None
"""
if address_family not in ["ipv4", "ipv6", "inet", "inet6"]:
log.info('Must provide one of the following address families: '
'"ipv4", "ipv6", "inet", "inet6"')
return
if address_family == "ipv4":
address_family = "inet"
elif address_family == "ipv6":
address_family = "inet6"
try:
out = device.parse(
'show interfaces terse {interface}'.format(interface=interface))
except SchemaEmptyParserError:
return
# Example dictionary structure:
# {
# "ge-0/0/0.0": {
# "protocol": {
# "inet": {
# "10.189.5.93/30": {
# "local": "10.189.5.93/30"
# }
# },
# "inet6": {
# "2001:db8:223c:2c16::1/64": {
# "local": "2001:db8:223c:2c16::1/64"
# },
# "fe80::250:56ff:fe8d:c829/64": {
# "local": "fe80::250:56ff:fe8d:c829/64"
# }
# },
# }
# }
# }
found = Dq(out).contains(interface).contains(address_family). \
get_values("local")
if found:
if return_all:
return found
if link_local:
return found[1]
return found[0]
return None
def get_address_without_netmask(device, interface, address_family,
return_all=False, link_local=False):
""" Get interface ip address without mask
Args:
interface('str'): Interface to get address
device ('obj'): Device object
address_family ('str'): Address family
return_all ('bool'): return List of values. Defaults to False
Default to False
link_local (`bool`): flag to get link-local address for IPv6
Returns:
None
ip_address ('str'): If has multiple addresses
will return the first one.
Raises:
None
"""
ip_addr_with_mask = get_interface_ip_address(
device=device,
interface=interface,
address_family=address_family, link_local=link_local)
if ip_addr_with_mask:
return ip_addr_with_mask.split('/')[0]
return None
def get_interface_speed(device, interface, bit_size='gbps'):
"""Get speed of an interface
Args:
device (obj): device object
interface (str): interface name
bit_size (str): desired return size (gbps/mbps/kbps)
Returns:
Device speed or None
Raises:
None
"""
try:
out = device.parse('show interfaces extensive {interface}'.format(
interface=interface.split('.')[0]))
except SchemaEmptyParserError as e:
return None
# Example Dictionary
# "physical-interface": [
# {
# "name": "ge-0/0/0",
# "speed": "1000mbps",
# }
speed_matrix = {
'kbps': {
'kbps': 1,
'mbps': 1000,
'gbps': 1000000,
},
'mbps': {
'kbps': 0.001,
'mbps': 1,
'gbps': 1000,
},
'gbps': {
'kbps': .0000001,
'mbps': 0.001,
'gbps': 1,
},
}
interfaces_list = Dq(out).get_values('physical-interface')
for interfaces_dict in interfaces_list:
speed_ = Dq(interfaces_dict).get_values('speed', 0)
if not speed_:
continue
if 'kbps' in speed_:
speed_ = int(re.sub(r'[a-zA-Z,]', '',
speed_)) / speed_matrix['kbps'][bit_size]
elif 'mbps' in speed_:
speed_ = int(re.sub(r'[a-zA-Z,]', '',
speed_)) / speed_matrix['mbps'][bit_size]
else:
speed_ = int(re.sub(r'[a-zA-Z,]', '',
speed_)) / speed_matrix['gbps'][bit_size]
return speed_
def get_interface_output_error_drops(device, interface):
""" Get output error drops based on interface name
Args:
device ('obj'): Device object
interface('str'): Interface name
Returns:
output_drops: Output error drops
Raises:
None
"""
try:
out = device.parse('show interfaces extensive {interface}'.format(
interface=interface.split('.')[0]))
except SchemaEmptyParserError as e:
return None
output_drops = out.q.get_values('output-drops', 0)
if not output_drops:
return None
return output_drops
def get_interface_statistics_output_error_drops(device, interface):
""" Get output error drops based on interface statistics
Args:
device ('obj'): Device object
interface('str'): Interface name
Returns:
output_drops: Output error drops
Raises:
None
"""
try:
out = device.parse('show interfaces statistics {interface}'.format(
interface=interface.split('.')[0]))
except SchemaEmptyParserError as e:
return None
output_drops = out.q.get_values('output-error-count', 0)
if not output_drops:
return None
return output_drops
def get_interface_queue_tail_dropped_packets(device, interface):
""" Get tail-dropped packets based on interfaces queue
Args:
device ('obj'): Device object
interface('str'): Interface name
Returns:
tail_drop_packets: Output error drops
Raises:
None
"""
try:
out = device.parse('show interfaces queue {interface}'.format(
interface=interface.split('.')[0]))
except SchemaEmptyParserError as e:
return None
tail_drop_packets = out.q.get_values('queue-counters-tail-drop-packets')
if not tail_drop_packets:
return None
return tail_drop_packets
def get_interface_queue_rl_dropped_packets(device, interface):
""" Get rl-dropped packets based on interfaces queue
Args:
device ('obj'): Device object
interface('str'): Interface name
Returns:
rl_drop_packets: Output error drops
Raises:
None
"""
try:
out = device.parse('show interfaces queue {interface}'.format(
interface=interface.split('.')[0]))
except SchemaEmptyParserError as e:
return None
rl_drop_packets = out.q.get_values('queue-counters-rl-drop-packets')
if not rl_drop_packets:
return None
return rl_drop_packets
def get_interface_queue_red_dropped_packets(device, interface):
""" Get red-dropped packets based on interfaces queue
Args:
device ('obj'): Device object
interface('str'): Interface name
Returns:
red_drop_packets: Output error drops
Raises:
None
"""
try:
out = device.parse('show interfaces queue {interface}'.format(
interface=interface.split('.')[0]))
except SchemaEmptyParserError as e:
return None
red_drop_packets = out.q.get_values('queue-counters-red-packets')
if not red_drop_packets:
return None
return red_drop_packets
def get_interface_queue_counters_dropped(device,
interface,
expected_queue_number,
extensive=False):
""" Get queue counters dropped based on interfaces queue
Args:
device ('obj'): Device object
interface('str'): Interface name
expected_queue_number ('str'): Queue number to check
extensive ('str'): Flag to check extensive in command
Returns:
total_drop_packets: Output error drops
Raises:
None
"""
try:
if extensive:
out = device.parse('show interfaces extensive {interface}'.format(
interface=interface.split('.')[0]))
else:
out = device.parse('show interfaces {interface}'.format(
interface=interface.split('.')[0]))
except SchemaEmptyParserError as e:
return None
total_drop_packets = out.q.get_values('queue-counters-total-drop-packets',
int(expected_queue_number))
if not total_drop_packets:
return None
return total_drop_packets
def get_interface_logical_output_bps(device,
logical_interface,
interface=None,
extensive=False,
output_dict=None):
"""Get logical output bps of a logical interface
Args:
device ('obj'): device object
logical_interface ('str'): Logical interface to check output bps
interface ('str'): interface name to pass in show command
extensive ('bool'): Use extensive in show command
output_dict ('dict'): Pass if dictionary already exist
Returns:
Device speed or None
Raises:
None
"""
out = None
try:
if not output_dict:
try:
if interface:
cmd = 'show interfaces {interface}'.format(
interface=interface)
else:
cmd = 'show interfaces'
if extensive:
cmd = '{cmd} extensive'.format(cmd=cmd)
out = device.parse(cmd)
except SchemaEmptyParserError:
return None
else:
out = output_dict
except SchemaEmptyParserError:
return None
result = True
# Get first interface inorder to compare output-bps with other interfaces
physical_intf_check = out.q.contains(
'{interface}|.*output-bps.*'.format(interface=logical_interface),
regex=True)
# To handle list within list
logical_interface_check = Dq(physical_intf_check.reconstruct())
logical_intf_list = logical_interface_check.contains(
'name|output-bps', regex=True).get_values('logical-interface')
for l_i_dict in logical_intf_list:
name = l_i_dict.get('name', None)
if not name or name != logical_interface:
continue
transit_traffic_statistic = l_i_dict.get('transit-traffic-statistics',
0)
if not transit_traffic_statistic:
return None
output_bps = transit_traffic_statistic.get('output-bps', 0)
if not output_bps:
return None
return output_bps
return None
def get_interface_queue_counters_trans_packets(device,
interface,
expected_queue_number,
extensive=False):
""" Get queue counters transmitter based on interfaces queue
Args:
device ('obj'): Device object
interface('str'): Interface name
expected_queue_number ('str'): Queue number to check
extensive ('str'): Flag to check extensive in command
Returns:
total_drop_packets: Output error drops
Raises:
None
"""
try:
if extensive:
out = device.parse('show interfaces extensive {interface}'.format(
interface=interface.split('.')[0]))
else:
out = device.parse('show interfaces queue {interface}'.format(
interface=interface.split('.')[0]))
except SchemaEmptyParserError as e:
return None
# Dcitonary:
# 'queue': [{'queue-counters-queued-packets': '0',
# 'queue-counters-total-drop-packets': '0',
# 'queue-counters-trans-packets': '0',
# 'queue-number': '0'}]
transmitted_drop_packets = out.q.get_values('queue-counters-trans-packets',
int(expected_queue_number))
if not transmitted_drop_packets:
return None
return transmitted_drop_packets
def get_interface_queue_counters_queued_packets(device,
interface,
expected_queue_number,
extensive=False):
""" Get queued packets based on queue number
Args:
device ('obj'): Device object
interface('str'): Interface name
expected_queue_number ('str'): Queue number to check
extensive ('str'): Flag to check extensive in command
Returns:
total_drop_packets: Output error drops
Raises:
None
"""
try:
if extensive:
out = device.parse('show interfaces extensive {interface}'.format(
interface=interface.split('.')[0]))
else:
out = device.parse('show interfaces {interface}'.format(
interface=interface.split('.')[0]))
except SchemaEmptyParserError as e:
return None
# Dcitonary:
# 'queue': [{'queue-counters-queued-packets': '0',
# 'queue-counters-total-drop-packets': '0',
# 'queue-counters-trans-packets': '0',
# 'queue-number': '0'}]
queue_counters_queued_packets = out.q.get_values(
'queue-counters-queued-packets', int(expected_queue_number))
if not queue_counters_queued_packets:
return None
return queue_counters_queued_packets
def get_interface_traffic_output_pps(device: object, interface: str) -> str:
"""Get interface output pps
Args:
device (object): Device object
interface (str): Interface to check
Returns:
str: Interface pps
"""
try:
out = device.parse('show interfaces {interface} extensive'.format(
interface=interface))
except SchemaEmptyParserError as e:
return None
# Example dict
# "interface-information": {
# "physical-interface": [
# {
# "traffic-statistics": {
# "output-pps": str
| |
== 1)
assert(v.minor == 0)
assert(v.patch is None)
assert(str(v) == s)
s = 'mod_auth_passthrough/2.1'
v = Version.parse(s)
assert(v.name_clean == 'mod_auth_passthrough')
assert(v.name == 'mod_auth_passthrough')
assert(v.major == 2)
assert(v.minor == 1)
assert(v.patch is None)
assert(str(v) == s)
s = 'FrontPage/5.0.2.2635'
v = Version.parse(s)
assert(v.name_clean == 'frontpage')
assert(v.major == 5)
assert(v.minor == 0)
assert(v.patch == '2.2635')
assert(v.patch1 == 2)
assert(v.patch2 == 2635)
assert(v.patch_str == '.')
assert(str(v) == s)
s = 'OpenSSL/0.9.8r'
v = Version.parse(s)
assert(v.name_clean == 'openssl')
assert(v.major == 0)
assert(v.minor == 9)
assert(v.patch == '8r')
assert(v.patch1 == 8)
assert(v.patch_str == 'r')
assert(v.patch2 is None)
assert(str(v) == s)
s = 'sendmail.8.14.7'
v = Version.parse(s)
assert(v.name_clean == 'sendmail')
assert(v.name == 'sendmail')
assert(v.major == 8)
assert(v.minor == 14)
assert(type(v.patch) is int)
assert(v.patch == 7)
assert(type(v.patch1) is int)
assert(v.patch1 == 7)
assert(v.patch2 is None)
assert(v.patch_str is None)
assert(str(v) == s)
s = 'Mercury POP3 server 1.48'
v = Version.parse(s)
assert(v.name_clean == 'mercurypop3server')
assert(v.name == 'Mercury POP3 server')
assert(v.major == 1)
assert(v.minor == 48)
assert(v.patch is None)
assert(str(v) == s)
s = 'Squid http proxy 3.0.STABLE20'
v = Version.parse(s)
assert(v.name_clean == 'squidhttpproxy')
assert(v.name == 'Squid http proxy')
assert(v.major == 3)
assert(v.minor == 0)
assert(v.patch == 'STABLE20')
assert(v.patch1 is None)
assert(v.patch_str == 'STABLE')
assert(v.patch2 == 20)
assert(str(v) == s)
s = 'mod_apreq2-20090110/2.7.1'
v = Version.parse(s)
assert(v.name_clean == 'mod_apreq2')
assert(v.name == 'mod_apreq2')
assert(v.major == 2)
assert(v.minor == 7)
assert(v.patch == 1)
assert(v.patch1 == 1)
assert(v.patch_str is None)
assert(v.patch2 is None)
assert(v.release_date == date(2009, 1, 10))
assert(str(v) == s)
s = 'mini_httpd/1.19 19dec2003'
v = Version.parse(s)
assert(v.name_clean == 'mini_httpd')
assert(v.major == 1)
assert(v.minor == 19)
assert(v.patch is None)
assert(v.release_date == date(2003, 12, 19))
assert(str(v) == s)
s = 'Allegro-Software-RomPager/4.34'
v = Version.parse(s)
assert(v.name_clean == 'allegro-software-rompager')
assert(v.major == 4)
assert(v.minor == 34)
assert(v.patch is None)
assert(str(v) == s)
s = 'Foobar 8.00.162'
v = Version.parse(s)
assert(v.name_clean == 'foobar')
assert(v.name == 'Foobar')
assert(v.major == 8)
assert(v.minor == 0)
assert(v.zero_prefixes['minor'] == 1)
assert(v.patch == 162)
assert(v.patch1 == 162)
assert(v.patch_str is None)
assert(v.patch2 is None)
assert(str(v) == s)
s = 'Foobar 8.00.0162'
v = Version.parse(s)
assert(v.name_clean == 'foobar')
assert(v.name == 'Foobar')
assert(v.major == 8)
assert(v.minor == 0)
assert(v.zero_prefixes['minor'] == 1)
assert(v.patch == 162)
assert(v.zero_prefixes['patch'] == 1)
assert(v.patch1 == 162)
assert(v.zero_prefixes['patch1'] == 1)
assert(v.patch_str is None)
assert(v.patch2 is None)
assert(str(v) == s)
s = 'LANCOM 1611+ 8.0.162'
v = Version.parse(s)
assert(v.name_clean == 'lancom1611+')
assert(v.name == 'LANCOM 1611+')
assert(v.major == 8)
assert(v.minor == 0)
assert(v.patch == 162)
assert(v.patch1 == 162)
assert(v.patch_str is None)
assert(v.patch2 is None)
assert(str(v) == s)
s = 'LANCOM 1611+ 8.00.162'
v = Version.parse(s)
assert(v.name_clean == 'lancom1611+')
assert(v.name == 'LANCOM 1611+')
assert(v.major == 8)
assert(v.minor == 0)
assert(v.zero_prefixes['minor'] == 1)
assert(v.patch == 162)
assert(v.patch1 == 162)
assert(v.patch_str is None)
assert(v.patch2 is None)
assert(str(v) == s)
s = 'LANCOM 1611+ 8.00.0162 / 16.06.2010'
v = Version.parse(s)
assert(v.name_clean == 'lancom1611+')
assert(v.name == 'LANCOM 1611+')
assert(v.major == 8)
assert(v.minor == 0)
assert(v.zero_prefixes['minor'] == 1)
assert(v.patch == 162)
assert(v.zero_prefixes['patch'] == 1)
assert(v.patch1 == 162)
assert(v.zero_prefixes['patch1'] == 1)
assert(v.patch_str is None)
assert(v.patch2 is None)
assert(v.release_date == date(2010, 6, 16))
assert(str(v) == s)
s = 'OpenSSL/0.9.8e-fips-rhel5'
v = Version.parse(s)
assert(v.name_clean == 'openssl')
assert(v.major == 0)
assert(v.minor == 9)
assert(v.patch == '8e-fips-rhel5')
assert(v.patch1 == 8)
assert(v.patch_str == 'e-fips-rhel')
assert(v.patch2 == 5) # Even though that's not ideal
assert(str(v) == s)
s = 'Sun-ONE-ASP/4.0.3'
v = Version.parse(s)
assert(v.name_clean == 'sun-one-asp')
assert(v.major == 4)
assert(v.minor == 0)
assert(v.patch == 3)
assert(v.patch1 == 3)
assert(v.patch_str is None)
assert(v.patch2 is None)
assert(str(v) == s)
s = 'thttpd/2.23beta1 26may2002'
v = Version.parse(s)
assert(v.name_clean == 'thttpd')
assert(v.major == 2)
assert(v.minor == 23)
assert(v.patch == 'beta1')
assert(v.patch1 is None)
assert(v.patch2 == 1)
assert(v.patch_str == 'beta')
assert(v.release_date == date(2002, 5, 26))
assert(str(v) == s)
# Test arbitary number of zero prefixes
# TODO and interaction with strings in major/minor
s = 'Foobar 0'
v = Version.parse(s)
assert(v.name_clean == 'foobar')
assert(v.major == 0)
assert(v.zero_prefixes['major'] == 0)
assert(v.minor is None)
assert(str(v) == s)
s = 'Foobar 03'
v = Version.parse(s)
assert(v.name_clean == 'foobar')
assert(v.major == 3)
assert(v.zero_prefixes['major'] == 1)
assert(v.minor is None)
assert(str(v) == s)
s = 'Foobar 003'
v = Version.parse(s)
assert(v.name_clean == 'foobar')
assert(v.major == 3)
assert(v.zero_prefixes['major'] == 2)
assert(v.minor is None)
assert(str(v) == s)
s = 'Foobar 00000000003'
v = Version.parse(s)
assert(v.name_clean == 'foobar')
assert(v.major == 3)
assert(v.zero_prefixes['major'] == 10)
assert(v.minor is None)
assert(str(v) == s)
s = 'Foobar 3.01'
v = Version.parse(s)
assert(v.name_clean == 'foobar')
assert(v.major == 3)
assert(v.minor == 1)
assert(v.zero_prefixes['minor'] == 1)
assert(str(v) == s)
s = 'Foobar 3.001'
v = Version.parse(s)
assert(v.name_clean == 'foobar')
assert(v.major == 3)
assert(v.minor == 1)
assert(v.zero_prefixes['minor'] == 2)
assert(str(v) == s)
s = 'Foobar 3.00000000001'
v = Version.parse(s)
assert(v.name_clean == 'foobar')
assert(v.major == 3)
assert(v.minor == 1)
assert(v.zero_prefixes['minor'] == 10)
assert(str(v) == s)
s = 'Foobar 03.01'
v = Version.parse(s)
assert(v.name_clean == 'foobar')
assert(v.major == 3)
assert(v.zero_prefixes['major'] == 1)
assert(v.minor == 1)
assert(v.zero_prefixes['minor'] == 1)
assert(str(v) == s)
s = 'Foobar 000003.01'
v = Version.parse(s)
assert(v.name_clean == 'foobar')
assert(v.major == 3)
assert(v.zero_prefixes['major'] == 5)
assert(v.minor == 1)
assert(v.zero_prefixes['minor'] == 1)
assert(str(v) == s)
s = 'Foobar 000003.00000000001'
v = Version.parse(s)
assert(v.name_clean == 'foobar')
assert(v.major == 3)
assert(v.zero_prefixes['major'] == 5)
assert(v.minor == 1)
assert(v.zero_prefixes['minor'] == 10)
assert(str(v) == s)
s = 'Foobar 3.1.01'
v = Version.parse(s)
assert(v.name_clean == 'foobar')
assert(v.major == 3)
assert(v.minor == 1)
assert(v.patch == 1)
assert(v.zero_prefixes['patch'] == 1)
assert(v.patch1 == 1)
assert(v.zero_prefixes['patch1'] == 1)
assert(v.patch2 is None)
assert(v.patch_str is None)
assert(str(v) == s)
s = 'Foobar 3.1.1-01'
v = Version.parse(s)
assert(v.name_clean == 'foobar')
assert(v.major == 3)
assert(v.minor == 1)
assert(v.patch == '1-01')
assert(v.patch1 == 1)
assert(v.patch2 == 1)
assert(v.patch_str == '-')
assert(str(v) == s)
s = 'Foobar 3 (FB3-DX) 1.90.26'
#v = Version.parse(s)
#assert(v.name_clean == 'php')
#assert(v.name == 'PHP')
#assert(v.major == 5)
#assert(v.minor == 2)
#assert(v.patch == '6-1+lenny15')
#assert(v.patch1 == 6)
#assert(v.patch2 == 1)
#assert(v.patch_str == '-')
#assert(str(v) == s)
s = 'ProTools Basic Edition 5.0 Build 11'
v = Version.parse(s)
assert(v.name_clean == 'protoolsbasicedition')
assert(v.name == 'ProTools Basic Edition')
assert(v.major == 5)
assert(v.minor == 0)
assert(v.patch is None)
assert(v.extra_str == 'Build 11')
assert(str(v) == s)
s = 'Fiddlesticks 2.0 Beta 2'
v = Version.parse(s)
assert(v.name_clean == 'fiddlesticks')
assert(v.name == 'Fiddlesticks')
assert(v.major == 2)
assert(v.minor == 0)
assert(v.patch is None)
assert(v.extra_str == 'Beta 2')
assert(str(v) == s)
s = 'IDA 5.19.1.1387.2314'
#v = Version.parse(s)
#assert(v.name_clean == 'ida')
#assert(v.name == 'IDA')
#assert(v.major == 5)
#assert(v.minor == 19)
#assert(v.patch == '')
#assert(v.patch1 == 6)
#assert(v.patch2 == 1)
#assert(v.patch_str == '-')
#assert(str(v) == s)
# s = 'IDA 5.19.1.1387.2314.0'
# v = Version.parse(s)
# assert(v.name_clean == 'php')
# assert(v.name == 'PHP')
# assert(v.major == 5)
# assert(v.minor == 2)
# assert(v.patch == '6-1+lenny15')
# assert(v.patch1 == 6)
# assert(v.patch2 == 1)
# assert(v.patch_str == '-')
# assert(str(v) == s)
#
# s = 'IDA 5.19.1.1387.2314.0.1352135'
# v = Version.parse(s)
# assert(v.name_clean == 'php')
# assert(v.name == 'PHP')
# assert(v.major == 5)
# assert(v.minor == 2)
# assert(v.patch == '6-1+lenny15')
# assert(v.patch1 == 6)
# assert(v.patch2 == 1)
# assert(v.patch_str == '-')
# assert(str(v) == s)
s = 'Cyrus POP3 v2.2.13-Debian-2.2.13-14+lenny3 server'
v = Version.parse(s)
assert(v.name_clean == 'cyruspop3')
assert(v.name == 'Cyrus POP3')
assert(v.major == 2)
assert(v.minor == 2)
assert(v.patch == '13-Debian-2.2.13-14')
assert(v.patch1 == 13)
assert(v.patch2 == 14)
assert(v.patch_str == '-Debian-2.2.13-')
assert(v.build_meta == 'lenny3')
assert(v.extra_str == ' server')
assert(str(v) == s)
s = 'POP MDaemon 9.0.4'
v = Version.parse(s)
assert(v.name_clean == 'popmdaemon')
assert(v.name == 'POP MDaemon')
assert(v.major == 9)
assert(v.minor == 0)
assert(v.patch == 4)
assert(v.patch1 == 4)
assert(v.patch2 is None)
assert(v.patch_str is None)
assert(str(v) == s)
s = 'POP3 Bigfoot v1.0 server'
v = Version.parse(s)
assert(v.name_clean == 'pop3bigfoot')
assert(v.name == 'POP3 Bigfoot')
assert(v.major == 1)
assert(v.minor == 0)
assert(v.patch is None)
assert(v.extra_str == ' server')
assert(str(v) == s)
s = 'IMail 8.05 4000-1'
v = Version.parse(s)
assert(v.name_clean == 'imail')
assert(v.name == 'IMail')
assert(v.major == 8)
assert(v.minor == 5)
assert(v.zero_prefixes['minor'] == 1)
assert(v.patch is None)
assert(v.extra_str == '4000-1') # | |
<gh_stars>10-100
import re
import ast
import _ast
import operator as op
import json
import base64
import inspect
import types
import sys
try: # Python2
from itertools import izip as zip
except ImportError: # python3 = zip is a buildin
pass
PY3 = sys.version_info >= (3,)
from .collectormanager import collectormgr
from .log import LoggerFactory
from .util import string_decode
# Global logger for this part
logger = LoggerFactory.create_logger('evaluater')
# supported operators
operators = {
ast.Add : op.add, # A + B
ast.Sub : op.sub, # A - B
ast.Mult : op.mul, # A * B
ast.Div : op.truediv, # A / B
ast.Pow : op.pow, # ???
ast.BitXor : op.xor, # ???
ast.USub : op.neg, # ???
ast.Eq : op.eq, # A == B
ast.NotEq : op.ne, # A != B
ast.Gt : op.gt, # A > B
ast.Lt : op.lt, # A < B
ast.GtE : op.ge, # A >= B
ast.LtE : op.le, # A <= B
ast.Mod : op.mod, # A % B
ast.Or : op.or_, _ast.Or: op.or_, # A or B
ast.And : op.and_, _ast.And: op.and_, # A and B
ast.BitOr : op.or_, # A | B
ast.BitAnd : op.and_, # A & B
ast.Not : op.not_, _ast.Not: op.not_, # not A
ast.In : op.contains, # A in L
ast.Subscript: op.getitem, _ast.Subscript: op.getitem, # d[k]
ast.Attribute: op.attrgetter, _ast.Attribute: op.attrgetter, # d.XXXX()
}
functions = {
'list': list,
}
functions_to_groups = {
'list': 'basic',
}
# This allow to have parameter for export_evaluater_function
# Python guys: decorators are a nightmare, impossible without google for simple task...
def parametrized(dec):
def layer(*args, **kwargs):
def repl(f):
return dec(f, *args, **kwargs)
return repl
return layer
def _export_evaluater_function(f, function_group):
# Export the function to the allowed functions
fname = f.__name__
functions[fname] = f
functions_to_groups[fname] = function_group
logger.debug('Evaluater: exporting function %s' % fname)
return f
@parametrized
def export_evaluater_function(f, function_group):
return _export_evaluater_function(f, function_group)
for f in (abs, min, max, sum, sorted, len, set):
# NOTE: find why, but we need to call the not decorated function... cool...
_export_evaluater_function(f, function_group='basic')
names = {'True': True, 'False': False, 'None': None}
class Evaluater(object):
def __init__(self):
self.cfg_data = {}
self.pat = re.compile('{{.*?}}')
def load(self, cfg_data):
self.cfg_data = cfg_data
# We want a simple string at the end, but try to be a bit smart when doing it
def __change_to_string(self, o):
if isinstance(o, list) or isinstance(o, set):
return ','.join([str(e) for e in o])
return str(o)
def compile(self, expr, check=None, to_string=False, variables={}):
# first manage {} thing and look at them
all_parts = self.pat.findall(expr)
changes = []
for p in all_parts:
orig_p = p # save the original expression, with {{}} and default parts
default_s = ''
p = p[2:-2] # remove {{ and }}
# If there is a EXPR||DEFAULT we split in the part we need to grok, and the default
if '||' in p:
part1, part2 = p.split('||', 1)
# get EXPR to get
p = part1
# and the default value to evaluate if need
default_s = part2
if p.startswith('collector.'):
s = p[len('collector.'):]
try:
v = collectormgr.get_data(s)
except KeyError: # ok cannot find it, try to switch to default if there is one
if default_s == '':
v = ''
else: # ok try to compile it to get a real python object
v = self.compile(default_s, check=check, to_string=to_string)
logger.debug('Ask', s, 'got', v)
changes.append((orig_p, v))
elif p.startswith('parameters.'):
s = p[len('parameters.'):]
v = self._found_params(s, check)
changes.append((orig_p, v))
elif p.startswith('variables.'):
s = p[len('variables.'):]
v = variables[s]
changes.append((orig_p, v))
else:
raise Exception('The {{ }} expression: %s is not a known type' % p)
if not len(changes) == len(all_parts):
raise ValueError('Some parts between {} cannot be changed')
for (p, v) in changes:
f = repr
if to_string:
f = self.__change_to_string
expr = expr.replace('%s' % p, f(v))
return expr
def eval_expr(self, expr, check=None, variables={}):
logger.debug('EVAL: expression: %s' % expr)
expr = self.compile(expr, check=check, variables=variables)
logger.debug('EVAL: exp changed: %s' % expr)
# final tree
tree = ast.parse(expr, mode='eval').body
try:
r = self.eval_(tree)
except Exception as exp:
logger.debug('EVAL: fail to eval expr: %s : %s' % (expr, exp))
raise
try:
logger.debug('EVAL: result: %s' % r)
except TypeError: # for r == tuple, try other
logger.debug('EVAL: result: %s' % str(r))
return r
def eval_(self, node):
logger.debug('eval_ node: %s => type=%s' % (node, type(node)))
if isinstance(node, ast.Num): # <number>
return node.n
elif isinstance(node, ast.Str): # <string>
return node.s
elif isinstance(node, ast.List): # <list>
return [self.eval_(e) for e in node.elts]
elif isinstance(node, ast.Tuple): # <tuple>
return tuple([self.eval_(e) for e in node.elts])
elif isinstance(node, ast.Dict): # <dict>
_keys = [self.eval_(e) for e in node.keys]
_values = [self.eval_(e) for e in node.values]
_dict = dict(zip(_keys, _values)) # zip it into a new dict
return _dict
elif isinstance(node, ast.BinOp): # <left> <operator> <right>
return operators[type(node.op)](self.eval_(node.left), self.eval_(node.right))
elif isinstance(node, _ast.BoolOp): # <elt1> OP <elt2> TOD: manage more than 2 params
if len(node.values) != 2:
raise Exception('Cannot manage and/or operators woth more than 2 parts currently.')
# Special case: _ast.And if the first element is False, then we should NOT eval the right part
# and directly returns False
left_part_eval = self.eval_(node.values[0])
if not left_part_eval and isinstance(node.op, _ast.And):
return False
# Special case: _ast.Or if the first element is True, then we should NOT eval the right part
# and directly returns True
left_part_eval = self.eval_(node.values[0])
if left_part_eval and isinstance(node.op, _ast.Or):
return True
# else, give the whole result
return operators[type(node.op)](left_part_eval, self.eval_(node.values[1]))
elif isinstance(node, ast.Compare): # <left> <operator> <right>
left = self.eval_(node.left)
right = self.eval_(node.comparators[0])
_op = operators[type(node.ops[0])]
reversed_operator = [op.contains] # some operators are in the right,left order!!
if _op not in reversed_operator:
return _op(left, right)
else: # reverse order
return _op(right, left)
elif isinstance(node, ast.UnaryOp): # <operator> <operand> e.g., -1
return operators[type(node.op)](self.eval_(node.operand))
elif isinstance(node, ast.Name): # name? try to look at it
key = node.id
v = names.get(key, None)
return v
# None, True, False are nameconstants in python3, but names in 2
elif PY3 and isinstance(node, ast.NameConstant):
key = node.value
v = names.get(str(key), None) # note: valus is alrady the final value, must lookup it to assert only what we want
return v
elif isinstance(node, ast.Subscript): # {}['key'] access
# NOTE: the 'key' is node.slice.value.s
# and the node.value is a ast.Dict, so must be eval_
_d = self.eval_(node.value)
v = _d[node.slice.value.s]
return v
# elif isinstance(node, _ast.Attribute): # o.f() call
# # NOTE: for security reason, only accept functons on basic types
# print "Attribute:", node, node.__dict__
# return None
# _d = self.eval_(node.value)
# v = _d[node.slice.value.s]
# return v
elif isinstance(node, ast.Call): # call? dangerous, must be registered :)
args = [self.eval_(arg) for arg in node.args]
f = None
# print 'attr?', isinstance(node.func, ast.Attribute)
# print 'name?', isinstance(node.func, ast.Name)
if isinstance(node.func, ast.Name):
fname = node.func.id
f = functions.get(fname, None)
if f is None:
logger.error('Eval unknown function %s' % (fname))
raise TypeError(node)
elif isinstance(node.func, ast.Attribute):
# Attribute is managed only if the base type is a standard one
_ref_object_node = node.func.value
if isinstance(_ref_object_node, ast.Dict) or isinstance(_ref_object_node, ast.List) or isinstance(_ref_object_node, ast.Str) or isinstance(_ref_object_node, ast.Set) or isinstance(_ref_object_node, ast.Subscript):
_ref_object = self.eval_(_ref_object_node)
f = getattr(_ref_object, node.func.attr)
else:
logger.error('Eval UNMANAGED (ast.attribute) CALL: %s %s %s is refused' % (node.func, node.func.__dict__, node.func.value.__dict__))
raise TypeError(node)
else:
logger.error('Eval UNMANAGED (othercall) CALL: %s %s %s is refused' % (node.func, node.func.__dict__, node.func.value.__dict__))
raise TypeError(node)
if f:
v = f(*args)
return v
else:
logger.error('Eval UNMANAGED node: %s %s and so is refused' % (node, type(node)))
raise TypeError(node)
# Try to find the params for a macro pack parameters
def _found_params(self, m, check):
# only import it now because if not will do an import loop
from .configurationmanager import configmgr
parts = [m]
# if we got a |, we got a default value somewhere
if '|' in m:
parts = m.split('|', 1)
change_to = ''
if not check:
logger.error('Cannot find parameters: %s as we dont have a check' % m)
return change_to
pack_name = check['pack_name']
pack_parameters = configmgr.get_parameters_from_pack(pack_name)
| |
<reponame>waffle620/fagyhal
import asyncio
import time
import steam
import steamapi
import discord
import datetime
import texttable
import wolframalpha
import copy
from discord.ext import commands
from utils import checks
from steam.steamid import SteamId
from steam.steamprofile import SteamProfile
from steam.steamaccountuniverse import SteamAccountUniverse
from steam.steamaccounttype import SteamAccountType
from mods.cog import Cog
cool = "```xl\n{0}\n```"
code = "```py\n{0}\n```"
diff = "```diff\n{0}\n```"
start_time = time.time()
wa = wolframalpha.Client('')
class Info(Cog):
def __init__(self, bot):
super().__init__(bot)
self.cursor = bot.mysql.cursor
self.discord_path = bot.path.discord
self.files_path = bot.path.files
self.bytes_download = bot.bytes_download
self.truncate = bot.truncate
self.get_json = bot.get_json
@commands.command(pass_context=True)
async def help(self, ctx, *, cmd:str=None):
"""help"""
if cmd and cmd in self.bot.commands:
nctx = copy.copy(ctx)
nctx.command = self.bot.commands[cmd]
await self.bot.command_help(nctx)
else:
await self.bot.say("{0}: https://facepunch.org/help".format(ctx.message.author.mention))
@commands.command(pass_context=True, no_pm=True)
async def server(self, ctx):
"""server info"""
try:
server = ctx.message.server
online = str(sum(1 for member in server.members if member.status == discord.Status.online or member.status == discord.Status.idle))
msg = ':desktop: **{0}** Information:\n'.format(server)
msg += ":id:: `{0}`\n".format(server.id)
msg += ":map: Region: __{0}__\n".format(str(server.region))
msg += ":busts_in_silhouette: Users: **{0}**/{1}\n".format(online, len(server.members))
msg += ":calendar_spiral: Created: `{0}`\n".format(str(server.created_at.strftime('%m/%d/%Y %H:%M:%S')))
msg += ":bust_in_silhouette: Owner: `{0}`\n".format(server.owner)
if server.verification_level:
msg += ":exclamation: Verification Level: **{0}**\n".format(str(server.verification_level).upper())
msg += ":speech_balloon: Default Channel: {0}\n".format(server.default_channel.mention)
if ctx.message.server.afk_channel:
msg += ":telephone_receiver: AFK Channel: {0}\n".format(ctx.message.server.afk_channel.mention)
msg += ":keyboard: AFK Timeout: {0} minutes\n".format(str(int(int(ctx.message.server.afk_timeout)/60)))
voice = 0
text = 0
for channel in server.channels:
if channel.type == discord.ChannelType.text:
text += 1
elif channel.type == discord.ChannelType.voice:
voice += 1
msg += ":arrow_forward: Channels: `{0}` Text | `{1}` Voice | **{2}** Total\n".format(text, voice, str(len(server.channels)))
msg += ":arrow_forward: Roles: `{0}`\n".format(str(len(server.roles)))
if len(server.emojis) != 0:
emotes = ""
for emote in server.emojis:
emotes += "<:{0}:{1}>".format(emote.name, emote.id)
msg += ":arrow_forward: Emotes: {0}\n".format(emotes)
msg += ':art: Server Icon: {0}'.format(server.icon_url)
await self.truncate(ctx.message.channel, msg)
except Exception as e:
await self.bot.say(e)
@commands.command()
async def invite(self):
"""returns invite link for bot"""
msg = diff.format('+ Invite me to your server with this url')
msg += '<https://discordapp.com/oauth2/authorize?client_id=170903265565736960&scope=bot&permissions=8>'
msg += diff.format("- Uncheck Administrator permission if you do not need Admin/Moderation commands.\n+ + +\n! Join NotSoSuper\'s Dev for any questions or help with the bot and free emotes!")
msg += 'https://discord.gg/QQENx4f'
await self.bot.say(msg)
@commands.command(pass_context=True, aliases=['userinfo', 'user'])
async def info(self, ctx, *users:discord.User):
"""Returns inputed users info."""
try:
if len(users) == 0:
users = [ctx.message.author]
server = ctx.message.server
for user in users:
seen_on_self = True if user == self.bot.user else False
seen_on = set([member.server.name for member in self.bot.get_all_members() if member == user and member.server != server]) if user != self.bot.user else 'ALL OF THEM'
msg = """:bust_in_silhouette: **{0}** Information:
:id:: `{1}`
:robot: Bot: {2}
:inbox_tray: Server Join Date: __{3}__
:globe_with_meridians: Discord Join Date: __{4}__
:information_source: Status: **{5}**{6}{7}
:eyes: Seen On (*Shard #{8}*): **{9}** servers **=>** `{10}`
:shield: Roles: `{11}`
:art: Avatar: {12}
""".format(user, user.id, 'Yes' if user.bot else 'No', user.joined_at.strftime('%m/%d/%Y %H:%M:%S'), user.created_at.strftime('%m/%d/%Y %H:%M:%S'), str(user.status).upper(), '\n:joystick: Playing: \"{0}\"'.format(user.game) if user.game else '', '\n:microphone2: Voice Channel: {0}'.format(user.voice_channel.name) if user.voice_channel else '', self.bot.shard_id, len(seen_on) if not seen_on_self else '999999999...', ", ".join(seen_on) if len(seen_on) >= 1 and not seen_on_self else 'none other than here ;-;' if not seen_on_self else 'discord mainframe', ", ".join([role.name for role in user.roles]), user.avatar_url)
await self.truncate(ctx.message.channel, msg)
except Exception as e:
await self.bot.say(str(e))
@commands.command(pass_context=True)
async def avatar(self, ctx, *users:discord.User):
"""Returns the input users avatar."""
if len(users) == 0:
users = [ctx.message.author]
for user in users:
await self.bot.say("`{0}`'s avatar is: {1}".format(user, user.avatar_url))
@commands.command(name='time', pass_context=True)
async def _time(self, ctx):
"""Returns bots date and time."""
await self.bot.say('Date is: **{0}**\nTime is: **{1}**'.format(time.strftime("%A, %B %d, %Y"), time.strftime("%I:%M:%S %p")))
@commands.command(pass_context=True)
async def uptime(self, ctx):
"""How long have I been up/online?"""
seconds = time.time() - start_time
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
w, d = divmod(d, 7)
if s != 0:
msg = '**{0}** seconds{1}.'.format(int(s), ' :frowning:' if m == 0 else '')
if m != 0:
e = ' :slight_frown:.' if h == 0 else '.'
msg = ' : **{0}** minutes : '.format(int(m)) + msg.replace('.', '') + e
if h != 0:
e = ' :slight_smile:.' if d == 0 else '.'
msg = ' : **{0}** hours'.format(int(h)) + msg.replace('.', '') + e
if d != 0:
e = ' :smiley:.' if w == 0 else '.'
msg = ' : **{0}** days'.format(int(d)) + msg.replace('.', '').replace(':slight_smile:', '') + e
if w != 0:
msg = ' : **{0}** weeks {1}'.format(int(w)) + msg.replace('.', '') + ' :joy: :joy: :joy: :joy: :joy: :joy: :joy: :joy: :joy: :joy:.'
if m == 0:
msg = ' '+msg
else:
msg = msg[2:]
await self.bot.say(":clock4: Online for{0}".format(msg))
#https://github.com/notcake/hal9000.plugins.private/tree/master/steamapi
steamapi.core.APIConnection(api_key="")
@commands.command(pass_context=True)
async def steam(self, ctx, stem:str):
"""Returns Steam information of inputed SteamID/Custom URL/Etc"""
try:
steamId = None
steamProfile = None
if steamId is None:
steamId = SteamId.fromSteamId("{0}".format(stem))
if steamId is None:
steamId = SteamId.fromSteamId3(stem)
if steamId is None:
steamId = SteamId.fromSteamId64(stem)
if steamId is None:
steamId = SteamId.fromProfileUrl(stem)
if steamId is None:
steamProfile = SteamProfile.fromCustomProfileUrl(stem)
if steamProfile is None:
await self.bot.say("bad steam id")
return
steamId = steamProfile.steamId
else:
steamProfile = SteamProfile.fromSteamId(steamId)
msg = ""
if steamProfile is not None and \
steamProfile.displayName is not None:
msg += "Username: " + steamProfile.displayName + "\n"
steam_user = steamapi.user.SteamUser(steamId.steamId64)
if steam_user.state == 0:
msg += "Status: Offline\n"
elif steam_user.state == 1:
msg += "Status: Online\n"
elif steam_user.state == 2:
msg += "Status: Busy\n"
elif steam_user.state == 3:
msg += "Status: Away\n"
elif steam_user.state == 4:
msg += "Status: Snooze\n"
elif steam_user.state == 5:
msg += "Status: Looking to Trade\n"
elif steam_user.state == 6:
msg += "Status: Looking to Play\n"
msg += "Avatar: \"{0}\"\n".format(str(steam_user.avatar_full))
if steam_user.level != None:
msg += "Level: {0}\n".format(str(steam_user.level))
if steam_user.currently_playing != None:
msg += "Currently Playing: {0}\n".format(str(steam_user.currently_playing))
elif steam_user.recently_played != []:
msg += "Recently Played: {0}\n".format(str(steam_user.recently_played).replace("<SteamApp ", "").replace(">", "").replace("[", "").replace("]", ""))
msg += "Created: {0}\n".format(str(steam_user.time_created))
msg += "Steam ID: " + steamId.steamId + "\n"
msg += "Steam ID 64: " + str(steamId.steamId64) + "\n"
msg += "Permanent Link: \"" + steamId.profileUrl + "\"\n"
if steamProfile != None and \
steamProfile.customProfileUrl != None:
msg += "Link: \"" + steamProfile.customProfileUrl + "\"\n"
msg = msg.replace("'", "′")
await self.bot.say(cool.format(msg))
except Exception as e:
await self.bot.say(code.format(type(e).__name__ + ': ' + str(e)))
@commands.command(pass_context=True)
async def cinfo(self, ctx):
"""Return Channel Information"""
msg = "Channel Name: {0}\n".format(ctx.message.channel.name)
msg += "Channel ID: {0}\n".format(ctx.message.channel.id)
msg += "Channel Created: {0}\n".format(ctx.message.channel.created_at)
await self.bot.say(cool.format(msg))
@commands.command(pass_context=True, alias='binfo')
async def botinfo(self, ctx):
"""Bot Information"""
msg = "NotSoBot\n"
msg += "Creator: @NotSoSuper#8800\n"
msg += "Creator Steam: http://steamcommunity.com/id/suck\n"
msg += "Library: Discord.py\n"
msg += "Code: https://github.com/NotSoSuper/notsosuper_bot/\n"
await self.bot.say(cool.format(msg))
@commands.command()
async def botc(self, *, text:str):
txt = text.split()
msg = "https://github.com/NotSoSuper/notsosuper_bot/search?q={0}".format("+".join(txt))
await self.bot.say(msg)
@commands.command(pass_context=True)
@commands.cooldown(1, 120, commands.BucketType.server)
async def stats(self, ctx):
try:
sql = "SELECT `messages_id` FROM `messages` WHERE action=0 ORDER BY `messages_id` DESC LIMIT 1"
message_count = self.cursor.execute(sql).fetchall()[0]['messages_id']
sql = "SELECT COUNT(`action`) FROM `messages` WHERE action=1"
message_delete_count = self.cursor.execute(sql).fetchall()[0]['COUNT(`action`)']
sql = "SELECT COUNT(`action`) FROM `messages` WHERE action=2"
message_edit_count = self.cursor.execute(sql).fetchall()[0]['COUNT(`action`)']
sql = "SELECT COUNT(`command`) FROM `command_logs`"
command_count = self.cursor.execute(sql).fetchall()[0]['COUNT(`command`)']
sql = 'SELECT COUNT(`command`) FROM `command_logs` WHERE server={0}'.format(ctx.message.server.id)
server_command_count = self.cursor.execute(sql).fetchall()[0]['COUNT(`command`)']
sql = "SELECT `command`, COUNT(`command`) AS magnitude FROM `command_logs` GROUP BY `command` ORDER BY magnitude DESC LIMIT 6"
command_magnitude = self.cursor.execute(sql).fetchall()
sql = 'SELECT `server`, `server_name`, COUNT(`command`) AS magnitude FROM `command_logs` GROUP BY `server` ORDER BY magnitude DESC LIMIT 5'
server_magnitude = self.cursor.execute(sql).fetchall()
sql = 'SELECT * FROM `stats`'
results = self.cursor.execute(sql).fetchall()
server_list = []
for shard in results:
server_list.append([shard['largest_member_server_name'], shard['largest_member_server']])
count = 0
counts = []
for x in server_list:
count = int(x[1])
counts.append(count)
max_ = int(max(counts))
max_index = int(counts.index(max_))
biggest_server_name = server_list[max_index][0]
biggest_server_count = server_list[max_index][1]
magnitude_table = texttable.Texttable(max_width=90)
# magnitude_table.set_cols_width([90, 90])
for x in server_magnitude:
magnitude_table.add_rows([["Server", "Commands"], [x['server_name'][:25], x['magnitude']]])
magnitude_msg = magnitude_table.draw()
command_table = texttable.Texttable(max_width=90)
# command_table.set_cols_width([90, 90])
for x in command_magnitude:
command_table.add_rows([["Command", "Count"], [x['command'], x['magnitude']]])
command_msg = command_table.draw()
command_stats_msg = magnitude_msg+'\n\n'+command_msg
text_channels = 0
voice_channels = 0
for shard in results:
text_channels += int(shard['text_channels'])
voice_channels += int(shard['voice_channels'])
user_count = 0
unique_users = 0
server_count = 0
for shard in results:
server_count += int(shard['servers'])
user_count += int(shard['users'])
unique_users += int(shard['unique_users'])
seconds = time.time() - start_time
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
w, d = divmod(d, 7)
if s != 0:
uptime = '**{0}** seconds.'.format(int(s))
if m != 0:
uptime = ' : **{0}** minutes : '.format(int(m)) + uptime
if h != 0:
uptime = ' : **{0}** hours'.format(int(h)) + uptime
if d != 0:
uptime = ' : **{0}** days'.format(int(d)) + uptime
if w != 0:
uptime = ' : **{0}** weeks {1}'.format(int(w)) + uptime
if m == 0:
uptime = ' '+uptime
else:
uptime = uptime[2:]
msg = ":bar_chart: **User/Bot Statistics**\n"
msg += "> Uptime: "+uptime+"\n"
msg += "> On **{0}** Servers\n".format(server_count)
msg += "> **{0}** Text channels | **{1}** Voice\n".format(text_channels, voice_channels)
msg += "> Serving **{0}** Users\n".format(user_count)
msg += "> Unique Users: **{0}**\n".format(unique_users)
msg += "> Who've messaged **{0}** times ".format(message_count)
msg += "where **{0}** of them have been edited ".format(message_edit_count)
msg += "and **{0}** deleted.\n".format(message_delete_count)
msg += "> In total **{0}** commands have been called, __{1}__ from this server.\n".format(command_count, server_command_count)
msg += ':keyboard: **Command Statistics**\n'
msg += "```\n{0}```".format(command_stats_msg)
msg += ':desktop: **Server Statistics**\n'
msg += '> Largest Server: **{0}** (Users: **{1}**)\n'.format(biggest_server_name, biggest_server_count)
msg += '> Most used on: **{0}** (Commands: **{1}**/{2})\n'.format(server_magnitude[0]['server_name'], server_magnitude[0]['magnitude'], command_count)
# msg += '> Server with most messages: *{0}* (Messages: **{1}/{2}**)'
await self.bot.say(msg)
except Exception as e:
print(e)
@commands.command(pass_context=True, aliases=['so', 'stack', 'csearch', 'stacko', 'stackoverflow'])
async def sof(self, ctx, *, text:str):
try:
api = 'https://api.stackexchange.com/2.2/search?order=desc&sort=votes&site=stackoverflow&intitle={0}'.format(text)
r = await self.get_json(api)
q_c = len(r['items'])
if q_c == 0:
api = 'https://api.stackexchange.com/2.2/similar?order=desc&sort=votes&site=stackoverflow&title={0}'.format(text)
r = await self.get_json(api)
q_c = len(r['items'])
if q_c == 0:
api = 'https://api.stackexchange.com/2.2/search/excerpts?order=desc&sort=votes&site=stackoverflow&q={0}'.format(text)
r = await self.get_json(api)
q_c = len(r['items'])
if q_c == 0:
api = 'https://api.stackexchange.com/2.2/search/advanced?order=desc&sort=votes&site=stackoverflow&q={0}'.format(text)
r = await self.get_json(api)
q_c = len(r['items'])
if q_c == 0:
await self.bot.say(":warning: `No results found on` <https://stackoverflow.com>")
return
if q_c > 5:
msg = "**First 5 Results For: `{0}`**\n".format(text)
else:
msg = "**First {0} Results For: `{1}`**\n".format(str(q_c), text)
count = 0
for s in r['items']:
if q_c > 5:
if count == 5:
break
else:
if count == q_c:
break
epoch = int(s['creation_date'])
date | |
self.dbapi.idisk_update(ipv['disk_or_part_uuid'],
{'foripvid': None})
except exception.DiskNotFound:
pass
elif ipv['pv_type'] == constants.PV_TYPE_PARTITION:
if not ipv.get('disk_or_part_uuid'):
return
try:
ihost = self.dbapi.ihost_get(ipv.get('forihostid'))
values = {'foripvid': None}
if ihost['invprovision'] == constants.PROVISIONED:
values.update(
{'status': constants.PARTITION_READY_STATUS})
self.dbapi.partition_update(ipv['disk_or_part_uuid'], values)
except exception.DiskPartitionNotFound:
pass
# TODO(rchurch): Update this for cinder disk removal
def _ipv_handle_phys_storage_removal(self, ipv, storage):
""" Remove a PV from a missing disk or partition"""
if ipv['lvm_pv_name'] == constants.CINDER_DRBD_DEVICE:
# Special Case: combo node /dev/drbd4 for cinder will
# not show up in the disk list so allow it to remain.
return
# For any other system type & VG the removal is done automatically
# as users don't have the option (yet).
try:
self._prepare_for_ipv_removal(ipv)
self.dbapi.ipv_destroy(ipv.id)
except Exception:
LOG.exception("Remove ipv for missing %s failed" % storage)
def update_partition_config(self, context, partition):
"""Configure the partition with the supplied data.
:param context: an admin context.
:param partition: data about the partition
"""
LOG.debug("PART conductor-manager partition: %s" % str(partition))
# Get host.
host_uuid = partition.get('ihost_uuid')
try:
db_host = self.dbapi.ihost_get(host_uuid)
except exception.ServerNotFound:
LOG.exception("Invalid host_uuid %s" % host_uuid)
return
personalities = [db_host.personality]
config_uuid = self._config_update_hosts(context,
personalities,
host_uuids=[host_uuid],
reboot=False)
config_dict = {
"host_uuids": [host_uuid],
'personalities': personalities,
"classes": ['platform::partitions::runtime'],
"idisk_uuid": partition.get('idisk_uuid'),
"partition_uuid": partition.get('uuid'),
puppet_common.REPORT_STATUS_CFG: puppet_common.REPORT_DISK_PARTITON_CONFIG
}
# Currently sysinv agent does not create the needed partition during nova-local
# configuration without the existence of the initial_config_complete flag.
# During Ansible bootstrap, force manifest apply as the generation of this
# file is deferred until host unlock where full controller manifest is applied.
force_apply = False if cutils.is_initial_config_complete() else True
self._config_apply_runtime_manifest(context,
config_uuid,
config_dict,
force=force_apply)
def ipartition_update_by_ihost(self, context,
ihost_uuid, ipart_dict_array):
"""Update existing partition information based on information received
from the agent."""
LOG.debug("PART ipartition_update_by_ihost %s ihost_uuid "
"ipart_dict_array: %s" % (ihost_uuid, str(ipart_dict_array)))
# Get host.
ihost_uuid.strip()
try:
db_host = self.dbapi.ihost_get(ihost_uuid)
except exception.ServerNotFound:
LOG.exception("Invalid ihost_uuid %s" % ihost_uuid)
return
# Get the id of the host.
forihostid = db_host['id']
# Obtain the partitions, disks and physical volumes that are currently
# present in the DB.
db_parts = self.dbapi.partition_get_by_ihost(ihost_uuid)
db_disks = self.dbapi.idisk_get_by_ihost(ihost_uuid)
# Check that the DB partitions are in sync with the DB disks and PVs.
for db_part in db_parts:
if not db_part.device_path:
# Should not happen unless pyudev gives us wrong data
# or we have a programming error.
LOG.error("PART ipartition_update_by_ihost: "
"Disk partition %s is missing its "
"device path, ignoring!" % db_part.uuid)
continue
# Obtain the disk the partition is on.
part_disk = next((d for d in db_disks
if d.device_path in db_part.device_path), None)
if not part_disk:
# Should not happen as we only store partitions associated
# with a disk.
LOG.error("PART ipartition_update_by_ihost: "
"Disk for partition %s is not "
"present in database, ignoring!" % db_part.uuid)
continue
partition_dict = {'forihostid': forihostid}
partition_update_needed = False
if part_disk.uuid != db_part['idisk_uuid']:
# TO DO: What happens when a disk is replaced
partition_update_needed = True
partition_dict.update({'idisk_uuid': part_disk.uuid})
LOG.info("Disk for partition %s has changed." %
db_part['uuid'])
if partition_update_needed:
self.dbapi.partition_update(db_part['uuid'],
partition_dict)
LOG.debug("PART conductor - partition needs to be "
"updated.")
# Go through the partitions reported by the agent and make needed
# modifications.
for ipart in ipart_dict_array:
# Not to add ceph osd related partitions
if (ipart['type_guid'] == constants.CEPH_DATA_PARTITION or
ipart['type_guid'] == constants.CEPH_JOURNAL_PARTITION):
continue
part_dict = {
'forihostid': forihostid,
'status': constants.PARTITION_IN_USE_STATUS, # Be conservative here
}
part_dict.update(ipart)
found = False
# If the paths match, then the partition already exists in the DB.
for db_part in db_parts:
if ipart['device_path'] == db_part.device_path:
found = True
if ipart['device_node'] != db_part.device_node:
LOG.info("PART update part device node")
self.dbapi.partition_update(
db_part.uuid,
{'device_node': ipart['device_node']})
LOG.debug("PART conductor - found partition: %s" %
db_part.device_path)
self._fill_partition_info(db_part, ipart)
# Try to resize the underlying FS.
if db_part.foripvid:
pv = self.dbapi.ipv_get(db_part.foripvid)
if (pv and pv.lvm_vg_name == constants.LVG_CINDER_VOLUMES):
try:
self._resize_cinder_volumes(delayed=True)
except retrying.RetryError:
LOG.info("Cinder volumes resize failed")
break
# If we've found no matching path, then this is a new partition.
if not found:
LOG.debug("PART conductor - partition not found, adding...")
# Complete disk info.
for db_disk in db_disks:
if db_disk.device_path in ipart['device_path']:
part_dict.update({'idisk_id': db_disk.id,
'idisk_uuid': db_disk.uuid})
LOG.debug("PART conductor - disk - part_dict: %s " %
str(part_dict))
new_part = None
try:
LOG.info("Partition create on host: %s. Details: %s" % (forihostid, part_dict))
new_part = self.dbapi.partition_create(
forihostid, part_dict)
except Exception as e:
LOG.exception("Partition creation failed on host: %s. "
"Details: %s" % (forihostid, str(e)))
# If the partition has been successfully created, update its status.
if new_part:
if new_part.type_guid != constants.USER_PARTITION_PHYSICAL_VOLUME:
status = {'status': constants.PARTITION_IN_USE_STATUS}
else:
status = {'status': constants.PARTITION_READY_STATUS}
self.dbapi.partition_update(new_part.uuid, status)
break
else:
# This shouldn't happen as disks are reported before partitions
LOG.warning("Found partition not associated with any disks, "
"underlying disk should be created on next inventory "
"reporting, ignoring for now. Details: ihost_uuid: %s "
"ipart_dict_array: %s" % (ihost_uuid, part_dict))
# Check to see if partitions have been removed.
for db_part in db_parts:
found = False
for ipart in ipart_dict_array:
if db_part.device_path:
if ipart['device_path'] == db_part.device_path:
found = True
break
# PART - TO DO - Maybe some extra checks will be needed here,
# depending on the status.
if not found:
delete_partition = True
# If it's still used by a PV, don't remove the partition yet.
if db_part.foripvid:
delete_partition = False
# If the partition is in creating state, don't remove it.
elif (db_part.status ==
constants.PARTITION_CREATE_ON_UNLOCK_STATUS or
db_part.status ==
constants.PARTITION_CREATE_IN_SVC_STATUS):
delete_partition = False
elif not cutils.is_partition_the_last(self.dbapi,
db_part.as_dict()):
delete_partition = False
LOG.debug("Partition %s(%s) is missing, but it cannot "
"be deleted since it's not the last "
"partition on disk." %
(db_part.uuid, db_part.device_path))
if delete_partition:
LOG.info("Deleting missing partition %s - %s" %
(db_part.uuid, db_part.device_path))
self.dbapi.partition_destroy(db_part.uuid)
else:
LOG.warn("Partition missing: %s - %s" %
(db_part.uuid, db_part.device_path))
def ipv_update_by_ihost(self, context,
ihost_uuid, ipv_dict_array):
"""Create or update ipv for an ihost with the supplied data.
This method allows records for a physical volume for ihost to be
created, or updated.
:param context: an admin context
:param ihost_uuid: ihost uuid unique id
:param ipv_dict_array: initial values for a physical volume objects
:returns: pass or fail
"""
def is_same_disk(idisk, ipv):
if 'disk_or_part_device_path' in ipv:
if ipv.get('disk_or_part_device_path') is not None:
if idisk.device_path == ipv.get('disk_or_part_device_path'):
return True
else:
return False
return False
ihost_uuid.strip()
try:
ihost = self.dbapi.ihost_get(ihost_uuid)
except exception.ServerNotFound:
LOG.exception("Invalid ihost_uuid %s" % ihost_uuid)
return
forihostid = ihost['id']
ipvs = self.dbapi.ipv_get_by_ihost(ihost_uuid)
ilvgs = self.dbapi.ilvg_get_by_ihost(ihost_uuid)
idisks = self.dbapi.idisk_get_by_ihost(ihost_uuid)
partitions = self.dbapi.partition_get_by_ihost(ihost_uuid)
# Cinder is now optional. A PV must be defined for it as part of
# provisioning. When looking for disk re-enumerations, identify it so
# when the DRBD device is reported by the agent we can reconcile the PV
# entry.
cinder_pv_id = None
# Timeout for PV operations
# In case of major failures (e.g. sysinv restart, system reset)
# PVs may remain stuck in adding or removing. Semantic checks
# will then prevent any other operation on the PVs
# First remove any invalid timeout (i.e. PV was removed)
ipv_uuids = [i['uuid'] for i in ipvs]
for k in self._pv_op_timeouts.keys():
if k not in ipv_uuids:
del self._pv_op_timeouts[k]
# Make sure that the Physical Volume to Disk info is still valid
for ipv in ipvs:
# Handle the case where the disk has been
# removed/replaced/re-enumerated.
pv_disk_is_present = False
if ipv['pv_type'] == constants.PV_TYPE_DISK:
for idisk in idisks:
if is_same_disk(idisk, ipv):
pv_disk_is_present = True
ipv_update_needed = False
pv_dict = {'forihostid': forihostid}
# Disk has been removed/replaced => UUID has changed.
if idisk.uuid != ipv['disk_or_part_uuid']:
ipv_update_needed = True
pv_dict.update({'disk_or_part_uuid': idisk.uuid})
LOG.info("Disk for ipv %s has changed." % ipv['uuid'])
# Disk has been re-enumerated.
if idisk.device_node != ipv['disk_or_part_device_node']:
ipv_update_needed = True
# If the PV name contained the device node, replace
# it accordingly.
new_lvm_pv_name = ipv['lvm_pv_name']
if ipv['disk_or_part_device_node'] in ipv['lvm_pv_name']:
new_lvm_pv_name = new_lvm_pv_name.replace(
ipv['disk_or_part_device_node'],
idisk.device_node)
# Update PV dictionary containing changes.
pv_dict.update({
'disk_or_part_device_node': idisk.device_node,
'lvm_pv_name': new_lvm_pv_name
})
# Update current PV object.
ipv.disk_or_part_device_node = idisk.device_node
ipv.lvm_pv_name = new_lvm_pv_name
LOG.info("Disk for ipv %s has been re-enumerated." %
ipv['uuid'])
if ipv_update_needed:
try:
self.dbapi.ipv_update(ipv['uuid'], pv_dict)
except Exception:
LOG.exception("Update ipv for changed idisk "
"details failed.")
break
| |
from __future__ import unicode_literals
import sys, os, shutil, re, getpass, io, certifi, plura_dl
from plura_dl import PluraDL
from plura_dl.utils import ExtractorError, DownloadError
if sys.version_info[0] <3:
raise Exception("Must be using Python 3")
certpath = os.path.abspath(certifi.where())
os.environ["SSL_CERT_FILE"] = certpath
# IMPORTANT SETTINGS TO PREVENT SPAM BLOCKING OF YOUR ACCOUNT/IP AT PLURALSIGHT # # # #
SLEEP_INTERVAL = 40 # minimum sleep time # #
SLEEP_OFFSET = 120 # adding random sleep time # Change this at your own risk. #
RATE_LIMIT = 10**6 # download rate in bytes/s # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Global defaults
DLPATH, USERNAME, PASSWORD = "", "", ""
INPROGRESSPATH, FINISHPATH, FAILPATH, INTERRUPTPATH = "", "", "", ""
PDL_OPTS = {}
SUBTITLE_OFF = False
FILENAME_TEMPLATE = r"%(playlist_index)s-%(chapter_number)s-%(title)s-%(resolution)s.%(ext)s"
PLURAURL = r"https://app.pluralsight.com/library/courses/"
SCRIPTPATH = os.path.dirname(os.path.abspath(sys.argv[0]))
COOKIEPATH = os.path.join(SCRIPTPATH, 'cookies')
COOKIEFILE = os.path.join(COOKIEPATH, 'cookies.txt')
if not os.path.exists(COOKIEPATH):
os.mkdir(COOKIEPATH)
elif os.path.exists(COOKIEFILE):
os.remove(COOKIEFILE)
class Logger(object):
"""Handling logging mechanism of PluraDL.
Arguments:
logpath {str} -- Path to logfile
"""
def __init__(self,logpath):
self.logpath = logpath
with open(self.logpath, 'wt') as f: f.close
def debug(self, msg):
print(msg)
with open(self.logpath, 'at') as f: f.write(msg+'\n')
def warning(self, msg):
print(msg)
with open(self.logpath, 'at') as f: f.write(msg+'\n')
def error(self, msg):
print(msg)
with open(self.logpath, 'at') as f: f.write(msg+'\n')
def set_playlist_options(digits):
"""Using appended digits in courselist.txt to set playlist option.
Arguments:
digits {[int]} -- List with playlist indicies
"""
global PDL_OPTS
n = len(digits)
if n == 0:
pass
elif n == 1:
print("Downloading video indicies up to",digits[0],"to")
PDL_OPTS["playlistend"] = digits[0]
elif n == 2:
print("Downloading video indicies from",digits[0],"up to and including",digits[1])
PDL_OPTS["playliststart"] = digits[0]
PDL_OPTS["playlistend"] = digits[1]
else:
print("Downloading specific video indicies", digits)
PDL_OPTS["playlist_items"] = ','.join([str(x) for x in digits])
def move_content(pdl, course_id, coursepath, completionpath):
"""Moves course content to its completion path.
Arguments:
pdl {PluraDL} -- PluraDL object
course_id {str} -- [description]
coursepath {str} -- [description]
completionpath {str} -- Path where to store content
"""
finalpath = os.path.join(completionpath, course_id)
pdl.to_stdout("Moving content to " + finalpath)
set_directory(completionpath)
try:
if os.path.exists(finalpath):
shutil.rmtree(finalpath)
shutil.move(coursepath,finalpath)
if os.path.exists(INPROGRESSPATH):
shutil.rmtree(INPROGRESSPATH)
except PermissionError:
print("Directory still in use, leaving it. Will be fixed in future releases.")
def invoke_download(course_id, course_url, coursepath):
"""Using plura_dl API to invoke download requests with associated parameters.
Arguments:
course_id {str} -- Course identifier
course_url {str} -- Playlist url
coursepath {str} -- Local temporary course storage path
Returns:
Bool -- Validation of completion level
"""
with PluraDL(PDL_OPTS) as pdl:
try:
# Invoke download
set_directory(coursepath)
pdl.download([course_url])
# Moving content to _finished destination path if the download was sucessful
pdl.to_stdout("The course '" + course_id + "' was downloaded successfully.")
move_content(pdl, course_id, coursepath, FINISHPATH)
return True
except ExtractorError:
# Handling the case of invalid download requests
pdl.to_stdout("The course '" + course_id + "' may not be a part of your current licence.")
pdl.to_stdout("Visit " + course_url + " for more information.\n")
# Moving content to _failed destination
move_content(pdl, course_id, coursepath, FAILPATH)
return True
except DownloadError:
# Handling the the more general case of download error
pdl.to_stdout("Something went wrong.")
pdl.to_stdout("The download request for '" + course_id + "' was forced to terminate.")
pdl.to_stdout("Double check that " + course_url)
pdl.to_stdout("exists or that your subscription is valid for accessing its content.\n")
# Moving content to _failed destination path
move_content(pdl, course_id, coursepath, FAILPATH)
return True
except KeyboardInterrupt:
# Handling the case of user interruption
pdl.to_stdout("\n\nThe download stream for '" + course_id + "' was canceled by user.")
# Moving content to _canceled destination
move_content(pdl, course_id, coursepath, INTERRUPTPATH)
return False
def pluradl(course):
"""Handling the video downloading requests for a single course.
Arguments:
course {(str, [])} -- Course identifier and playlist parameters
Returns:
str -- youtue-dl CLI command
"""
global PDL_OPTS
# Course metadata
course_id = course[0]
pl_digits = course[1]
set_playlist_options(pl_digits)
course_url = PLURAURL + course_id
# OS parameters - Setting up paths metadata
coursepath = os.path.join(INPROGRESSPATH,course_id)
# Invoking download if not already finished
if not os.path.exists(os.path.join(FINISHPATH, course_id)):
# Setting progress structure
if not os.path.exists(INPROGRESSPATH):
os.mkdir(INPROGRESSPATH)
set_directory(coursepath)
# Setting up logging metadata
logfile = course_id + ".log"
logpath = os.path.join(coursepath,logfile)
PDL_OPTS["logger"] = Logger(logpath)
return invoke_download(course_id, course_url, coursepath)
else:
print("Course", course_id, "already downloaded")
return True
def flag_parser():
"""Argument handling of 4 or more arguments, interpreting arguments
as flags with associated values.
Returns:
Bool -- Validation of argument input
"""
if len(sys.argv) < 5:
return False
global USERNAME
global PASSWORD
def _check_flags(key, flag_states, arg_string=' '.join(sys.argv[1:])):
for flag in flag_states[key][1:]:
if flag in all_flags:
lookaroundflag = r'(?<=' + flag + ' ' + ')'
lookaroundflag+=r".*?[\S]+"
return re.findall(lookaroundflag, arg_string)
def _check_inputs(key, user_inputs):
for user_input in user_inputs:
user_input = user_input.strip()
if user_input not in (all_flags):
flag_inputs[key] = user_input
break # will take the first valid input
usn_psw_flag_state = False
flag_states = {"usn":[False],"psw":[False]}
flag_inputs = {}
username_flags = ("--user", "--username", "-u")
password_flags = ("--pass", "--password", "-p")
all_flags=(username_flags+password_flags)
for arg in sys.argv[1:]:
if arg in username_flags:
flag_states["usn"][0] = True
flag_states["usn"].append(arg)
if arg in password_flags:
flag_states["psw"][0] = True
flag_states["psw"].append(arg)
if flag_states["usn"][0] and flag_states["psw"][0]:
usn_psw_flag_state = True
for key in flag_states.keys():
if flag_states[key][0]:
user_inputs = _check_flags(key, flag_states)
if user_inputs:
_check_inputs(key, user_inputs)
if (not "usn" in flag_inputs.keys()) or (not "psw" in flag_inputs.keys()):
usn_psw_flag_state = False
if usn_psw_flag_state:
USERNAME = flag_inputs["usn"]
PASSWORD = flag_inputs["<PASSWORD>"]
return True
else:
return False
def arg_parser():
"""Handling of simple username and password argument input.
Returns:
Bool -- Validation of argument input
"""
if len(sys.argv) < 3:
return False
global USERNAME
global PASSWORD
username = sys.argv[1]
password = sys.argv[2]
if username[0] != '-' and password[0] != '-':
USERNAME = sys.argv[1]
PASSWORD = sys.argv[2]
return True
else:
return False
def get_usr_pw():
"""Requesting credentials from the user.
Raises:
ValueError: User enters an empty password too many times
"""
print("Enter you Pluralsight credentials")
for attempt in ["First","Second","Last"]:
u0 = input("Enter username: ")
if u0 == "":
print("Username cannot be empty, enter username again")
print(attempt, "attempt failed")
continue
else:
USERNAME = u0
print("Enter password (will not be displayed)")
p0 = getpass.getpass(': ')
if p0 != "":
PASSWORD = p0
return USERNAME, PASSWORD
else:
print('Password cannot be empty, enter password again')
else:
raise ValueError('Username or password was not given.')
def set_subtitle():
"""Determines whether subtitle parameters should be turned on or not.
"""
global SUBTITLE_OFF
subtitle_flags = ("--sub", "--subtitle", "-s",
"--SUB", "--SUBTITLE", "-S")
if len(sys.argv) > 1:
for arg in sys.argv[1:]:
if arg in subtitle_flags:
SUBTITLE_OFF = True
print("Subtitles will not be appended to videoclips")
def set_directory(path):
"""Setting up directory state for os related tasks.
Arguments:
path {str} -- Full path to directory
"""
if not os.path.exists(path):
os.mkdir(path)
os.chdir(path)
def get_courses(scriptpath):
"""Parsing courselist.txt separating course data.
Arguments:
scriptpath {str} -- Absolute path to script directory
Returns:
[(str, [int])] -- List of course identifiers exposed by courselist.txt
"""
def _parse_line(line):
course_id = ""
digits = []
input_chunks = re.findall(r'[\S]{1,}', line)
for chunk in input_chunks:
if re.search(r'[\D]{1,}', chunk):
course_id = chunk
else:
digits.append(int(chunk))
digits.sort()
return course_id, digits
# courses textfile prelocated inside script directory
filelist = "courselist.txt"
# Loops the list's lines and stores it as a python list
filepath = os.path.join(scriptpath,filelist)
courses = []
try:
with open(filepath, 'r+') as file:
for line in file.readlines():
if re.search(r'\S', line):
course_id, digits = _parse_line(line)
courses.append((course_id, digits))
return courses
except FileNotFoundError:
print("There is no courselist.txt in script path. Terminating script ...")
def download_courses(courses):
"""Dowloading all courses listed in courselist.txt.
Arguments:
courses {[(str,[])]} -- List of tuples with course ID and playlist parameters.
"""
# General PluraDL settings
global PDL_OPTS
# Globals
PDL_OPTS["username"] = USERNAME
PDL_OPTS["password"] = PASSWORD
PDL_OPTS["sleep_interval"] = SLEEP_INTERVAL
PDL_OPTS["max_sleep_interval"] = SLEEP_INTERVAL + SLEEP_OFFSET
PDL_OPTS["ratelimit"] = RATE_LIMIT
PDL_OPTS["outtmpl"] = FILENAME_TEMPLATE
PDL_OPTS["cookiefile"] = COOKIEFILE
# Tweaks
PDL_OPTS["verbose"] = True
PDL_OPTS["restrictfilenames"] = True
PDL_OPTS["format"] = "bestaudio/best"
PDL_OPTS["writesubtitles"] = True
PDL_OPTS["allsubtitles"] = True
PDL_OPTS["subtitlesformat"] = r'srt'
PDL_OPTS["verbose"] = True
if SUBTITLE_OFF:
PDL_OPTS["writesubtitles"] = False
PDL_OPTS["allsubtitles"] = False
for course in courses:
if pluradl(course):
| |
<filename>yoapi/accounts.py
# -*- coding: utf-8 -*-
"""Account management package."""
import random
import pytz
import sys
from base64 import b64decode
from uuid import uuid4
import re
import requests
from bson import ObjectId
from datetime import datetime
from flask import current_app, g, request
from mongoengine import NotUniqueError, DoesNotExist, Q
from pytz import UnknownTimeZoneError
from parse_rest.user import User as ParseUser
from phonenumbers.phonenumberutil import NumberParseException
from requests.exceptions import RequestException
from .async import async_job
from .constants.regex import USERNAME_REGEX
from .core import cache, s3, twilio, sendgrid, redis, facebook
from .errors import APIError
from .helpers import (random_string, get_usec_timestamp, get_remote_addr,
random_number_string, clean_phone_number,
get_location_data)
from .models import (User, AuthToken, SignupLocation, Device,
NotificationEndpoint)
from .permissions import (assert_view_permission, assert_account_permission,
assert_admin_permission)
from .services import low_rq
from .urltools import UrlHelper
# Pylint rules regarding variable names that are not in PEP8.
# https://www.python.org/dev/peps/pep-0008/#global-variable-names
# pylint: disable=invalid-name
sms_redis_prefix = 'yoapi:sms:'
@async_job(rq=low_rq)
def add_email_to_mailchimp(email):
"""If the user has an email address on file add it to the mailchimp
list"""
request_json = {
'apikey': current_app.config.get('MAILCHIMP_API_KEY'),
'id': current_app.config.get('MAILCHIMP_LIST_ID'),
'email': {'email': email},
'double_optin': False
}
mailchimp_server = current_app.config.get('MAILCHIMP_SERVER')
endpoint_url = '%s/%s' % (mailchimp_server, 'lists/subscribe.json')
requests.post(endpoint_url, json=request_json)
def clear_profile_picture(user):
"""Clears the profile picture for a user"""
if user.photo and not user.photo.startswith('http'):
try:
s3.delete_image(user.photo)
except:
"""Since we're not sure what error could occur here log them"""
current_app.log_exception(sys.exc_info())
user.photo = None
user.save()
# Always clear the cache after modifying a user object.
clear_get_user_cache(user)
def clear_get_facebook_user_cache(facebook_id):
"""Clears the cache for the given facebook user's id"""
cache.delete_memoized(_get_facebook_user, facebook_id)
def clear_get_user_cache(user):
"""A convenience method to clear the _get_user cache"""
cache.delete_memoized(_get_user, user_id=str(user.user_id))
cache.delete_memoized(_get_user_by_username, str(user.username))
if user.facebook_id:
clear_get_facebook_user_cache(user.facebook_id)
def write_through_user_cache(user):
"""Write the changes on a user object directly to the cache
and the database to reduce the number of calls needed.
First save the user so that the changes are put into the db.
Second, reload the user so that any changes made directly to
to the db will be present in cache.
Last, write the new user object directly to the cache.
"""
user.save()
user.reload()
_func = _get_user
cache_key = _func.make_cache_key(_func.uncached,
user_id=user.user_id)
cache.cache.set(cache_key, user)
_func = _get_user_by_username
cache_key = _func.make_cache_key(_func.uncached,
str(user.username))
cache.cache.set(cache_key, user)
clear_get_facebook_user_cache(user.facebook_id)
def complete_account_verification_by_sms(user, token, number):
"""Verifies token and marks account as verified.
Arguments: user
token
number: must be the "clean" phone number that starts with +
"""
if not user.temp_token:
raise APIError('No verification code set.')
assert_valid_temp_token(user, token)
user.verified = True
user.temp_token.used = get_usec_timestamp()
# this should be unnecessary.
user.phone = number
user.save()
# Always clear the cache after modifying a user object.
clear_get_user_cache(user)
def confirm_password_reset(user, token, new_password):
"""Sets a new password for a user if the auth token matches"""
assert_valid_temp_token(user, token)
user.temp_token.used = get_usec_timestamp()
user.set_password(<PASSWORD>)
user.save()
# Always clear the cache after modifying a user object.
clear_get_user_cache(user)
def assert_valid_temp_token(user, token):
"""Verifies a temporary token against what is stored in the user object"""
if not user.temp_token:
# Ensure the user has a token
# TODO: Validate under what circumstance this can happen.
raise APIError('Verification token missing.')
elif token == user.temp_token.token:
# Check if the token has already been used.
if user.temp_token.used:
raise APIError('Verification code already used.')
# Check if the token has expired. The default expiration time should
# be one day.
if user.temp_token.expires < get_usec_timestamp():
raise APIError('Verification code expired.')
else:
print '111111 ' + user.temp_token.token + ' ' + token
raise APIError('Verification code incorrect.')
def make_valid_username(name):
'''Ensures that a username will pass the validation regex in the form'''
# if the username is already valid, don't mutate it.
if re.match(USERNAME_REGEX, name):
return name
# convert to 7-bit ascii so that isalpha() and isalnum()
# will behave themselves
name = name.encode('ascii', 'ignore')
first_letter_match = re.search('[A-Za-z]', name)
if not first_letter_match:
padding = random_number_string(length=6)
username = '%s%s' % ('YOUSER', padding)
return username
f_l_index = first_letter_match.start(0)
name = name[f_l_index:f_l_index + 50]
username = ''.join([c.upper() for c in name if c.isalnum()])
if not username:
padding = random_number_string(length=6)
username = '%s%s' % ('YOUSER', padding)
return username
def make_username_unique(original_username, random_length=4, use_letters=False):
username = make_valid_username(original_username)
try:
other_user = _get_user_by_username(username)
# user not found
except APIError:
return username
# If the user exists then generate a username with random numbers
# appened at the end.
if use_letters:
padding = random_string(length=random_length).upper()
else:
padding = random_number_string(length=random_length)
username = '%s%s' % (username[:50 - random_length], padding)
return username
def link_facebook_account(token):
"""Links the facebook account provided via token to a user"""
user = g.identity.user
fields = ['id', 'email', 'first_name', 'last_name', 'name', 'gender',
'age_range,' 'birthday']
facebook_profile = facebook.get_profile(token, fields=fields)
facebook_id = facebook_profile.get('id')
if not facebook_id:
raise APIError('Invalid facebook id')
try:
facebook_user = _get_facebook_user(facebook_id)
if facebook_user != user:
update_user(facebook_user, facebook_id=None,
ignore_permission=True)
except DoesNotExist:
pass
full_name = facebook_profile.get('name', '')
first_name = facebook_profile.get('first_name', '')
last_name = facebook_profile.get('last_name', '')
email = facebook_profile.get('email')
name = full_name or '%s%s' % (first_name, last_name)
birthday = facebook_profile.get('birthday')
gender = facebook_profile.get('gender')
age_range = facebook_profile.get('age_range')
age_range_str = None
if age_range:
if age_range.get('min'):
age_range_str = '%s+' % age_range.get('min')
elif age_range.get('max'):
age_range_str = '%s-' % age_range.get('max')
image_data = None
try:
picture_data = facebook.get_profile_picture(token)
if not picture_data.get('is_silhouette'):
req = requests.get(picture_data.get('url'))
if req.headers.get('content-type').startswith('image'):
image_data = req.content
except:
# There seem to be some weird ssl issues here. For now
# send an email but make sure the call succeeds.
current_app.log_exception(sys.exc_info())
return update_user(user, facebook_id=facebook_id,
name=full_name, email=email, first_name=first_name,
last_name=last_name, photo_data=image_data,
gender=gender, age_range=age_range_str,
birthday=birthday)
def find_users_by_numbers(numbers, country_code_if_missing='1',
user_phone=None, include_pseudo=False):
"""Returns contacts that match any of the given phone numbers
Because contacts are often stored without a country code prefix, we add
the user's own country code where applicable. This seems like a better
method than matchin friends against local numbers since false positives
are very likely to occur.
Args:
phone_numbers: An array of phone numbers.
"""
number_map = {}
for number in numbers:
try:
valid_number = clean_phone_number(number, country_code_if_missing,
user_phone)
number_map[valid_number] = number
except NumberParseException:
# Number invalid so we can't include it in the search.
pass
matches = User.objects(phone__in=number_map.keys(), verified=True)
for user in matches:
if not user.is_pseudo or include_pseudo:
original_number = number_map[user.phone]
yield original_number, user
def upsert_pseudo_user(phone_number, created_by_group=False):
"""Gets a user by phone number, or creates a psuedo user.
phone_number may be "dirty", and after cleaning will
start with + and include a country code.
"""
user = None
country_code_if_missing = g.identity.user.country_code or '1'
try:
phone_number = clean_phone_number(phone_number,
country_code_if_missing,
g.identity.user.phone)
except NumberParseException:
raise APIError('Invalid phone number')
# find_users_by_numbers returns (phone number, user)
users = [u[1] for u in find_users_by_numbers([phone_number],
include_pseudo=True)]
for u in users:
if u.parent or u.in_store:
continue
if not user or u.last_seen_time > user.last_seen_time:
user = u
if not user:
user = create_user(username=phone_number[1:], phone=phone_number,
is_pseudo=True, verified=True)
event_data = {'event': 'pseudo_user_created',
'phone': phone_number,
'creator': g.identity.user.username,
'is_group': created_by_group}
current_app.log_analytics(event_data)
return user
def upsert_facebook_user(token):
"""Gets or creates a facebook user"""
fields = ['id', 'email', 'first_name', 'last_name', 'name', 'gender',
'age_range,' 'birthday']
facebook_profile = facebook.get_profile(token, fields=fields)
facebook_id = facebook_profile.get('id')
if not facebook_id:
raise APIError('Invalid facebook id')
try:
user = _get_facebook_user(facebook_id)
except DoesNotExist:
user = None
full_name = facebook_profile.get('name', '')
first_name = facebook_profile.get('first_name', '')
last_name = facebook_profile.get('last_name', '')
name = full_name or '%s%s' % (first_name, last_name)
if user:
username = user.username
else:
username = make_username_unique(name)
email = facebook_profile.get('email')
birthday = facebook_profile.get('birthday')
gender = facebook_profile.get('gender')
age_range = facebook_profile.get('age_range')
age_range_str = None
if age_range:
if age_range.get('min'):
age_range_str = '%s+' % age_range.get('min')
elif age_range.get('max'):
age_range_str = '%s-' % age_range.get('max')
picture_data = facebook.get_profile_picture(token)
image_data = None
if picture_data and not picture_data.get('is_silhouette'):
try:
req = requests.get(picture_data.get('url'))
if req.headers.get('content-type').startswith('image'):
image_data = req.content
except RequestException:
url = picture_data.get('url')
current_app.log_exception(sys.exc_info(),
fb_picture_url=url)
if user:
return update_user(user, name=full_name, email=email,
first_name=first_name, last_name=last_name,
photo_data=image_data, gender=gender,
age_range=age_range_str, birthday=birthday,
ignore_permission=True)
current_app.log_analytics({'event': 'facebook_user_created',
'username': username,
'facebook_id': facebook_id})
return create_user(username=username, facebook_id=facebook_id,
name=full_name, email=email, first_name=first_name,
last_name=last_name, photo_data=image_data,
gender=gender, age_range=age_range_str,
birthday=birthday)
def create_user(**kwargs):
"""Registers a new user with the backend.
Args:
username: A username.
password: A password.
kwargs: Additional data to be stored with the account.
Returns:
A user object.
"""
# If a password is provided then it should be set through a method hashing
# it using bcrypt.
password = kwargs.pop('password', None)
# If we are setting a photo then we need to pop the image data before
# creating the model.
b64_image = None
if 'photo' in kwargs:
b64_image = kwargs.pop('photo')
image_data = None
if 'photo_data' in kwargs:
image_data = kwargs.pop('photo_data')
# Validate url's early
if kwargs.get('welcome_link'):
welcome_link = kwargs.get('welcome_link')
kwargs.update({'welcome_link': UrlHelper(welcome_link).get_url()})
# Validate | |
<reponame>richardangell/pitci
import numpy as np
import pandas as pd
import re
from pitci.base import LeafNodeScaledConformalPredictor
import pitci
import pytest
from unittest.mock import Mock
class DummyLeafNodeScaledConformalPredictor(LeafNodeScaledConformalPredictor):
"""Dummy class inheriting from LeafNodeScaledConformalPredictor so it's
functionality can be tested.
"""
def __init__(self, model="abcd"):
super().__init__(model=model)
def _generate_predictions(self, data):
"""Dummy function that returns 0s of shape (n,) where data has n rows."""
return np.zeros(data.shape[0])
def _generate_leaf_node_predictions(self, data):
"""Dummy function for returning leaf node index predictions, not implemented in
DummyLeafNodeScaledConformalPredictor so it has to be implemented specifically in
each test requiring it.
"""
raise NotImplementedError(
"_generate_leaf_node_predictions not implemented in DummyLeafNodeScaledConformalPredictor"
)
class TestCalibrate:
"""Tests for the LeafNodeScaledConformalPredictor.calibrate method."""
@pytest.mark.parametrize("alpha", [(-0.0001), (-1), (1.0001), (2), (55)])
def test_alpha_value_error(self, dmatrix_2x1_with_label, alpha):
"""Test an exception is raised if alpha is below 0 or greater than 1."""
dummy_confo_model = DummyLeafNodeScaledConformalPredictor()
with pytest.raises(
ValueError, match=re.escape("alpha must be in range [0 ,1]")
):
dummy_confo_model.calibrate(
data=dmatrix_2x1_with_label, alpha=alpha, response=np.array([0, 1])
)
def test_alpha_incorrect_type_error(self, dmatrix_2x1_with_label):
"""Test an exception is raised if alpha is not an int or float."""
dummy_confo_model = DummyLeafNodeScaledConformalPredictor()
with pytest.raises(
TypeError,
match=re.escape(
f"alpha is not in expected types {[int, float]}, got {str}"
),
):
dummy_confo_model.calibrate(
data=dmatrix_2x1_with_label, alpha="abc", response=np.array([0, 1])
)
def test_response_incorrect_type_error(self, dmatrix_2x1_with_label):
"""Test an exception is raised if response is not a pd.Series or np.ndarray."""
dummy_confo_model = DummyLeafNodeScaledConformalPredictor()
with pytest.raises(
TypeError,
match=re.escape(
f"response is not in expected types {[pd.Series, np.ndarray]}, got {bool}"
),
):
dummy_confo_model.calibrate(
data=dmatrix_2x1_with_label, alpha=0.5, response=False
)
def test_calibrate_calls_no_train_data(self, mocker, dmatrix_2x1_with_label):
"""Test the calls to _calibrate_interval and _calibrate_leaf_node_counts methods
when train_data is None.
"""
dummy_confo_model = DummyLeafNodeScaledConformalPredictor()
mock_manager = Mock()
mocked = mocker.patch.object(
pitci.base.LeafNodeScaledConformalPredictor, "_calibrate_leaf_node_counts"
)
mocked2 = mocker.patch.object(
pitci.base.LeafNodeScaledConformalPredictor, "_calibrate_interval"
)
mock_manager.attach_mock(mocked, "the_calibrate_leaf_node_counts")
mock_manager.attach_mock(mocked2, "the_calibrate_interval")
dummy_confo_model.calibrate(
data=dmatrix_2x1_with_label,
alpha=0.1,
response=dmatrix_2x1_with_label.get_label(),
train_data=None,
)
# test each function is called the correct number of times
assert (
mocked.call_count == 1
), "incorrect number of calls to _calibrate_leaf_node_counts"
assert (
mocked2.call_count == 1
), "incorrect number of calls to _calibrate_interval"
# test the order of calls to functions
assert (
mock_manager.mock_calls[0][0] == "the_calibrate_leaf_node_counts"
), "_calibrate_leaf_node_counts not called first"
assert (
mock_manager.mock_calls[1][0] == "the_calibrate_interval"
), "_calibrate_interval not called second"
# test the argumnets in the _calibrate_leaf_node_counts call
call_args = mocked.call_args_list[0]
call_pos_args = call_args[0]
call_kwargs = call_args[1]
assert (
call_pos_args == ()
), "positional args incorrect in _calibrate_leaf_node_counts call"
assert (
call_kwargs["data"] == dmatrix_2x1_with_label
), "data arg incorrect in _calibrate_leaf_node_counts call"
# test the arguments in the _calibrate_interval call
call_args = mocked2.call_args_list[0]
call_pos_args = call_args[0]
call_kwargs = call_args[1]
assert (
call_pos_args == ()
), "positional args incorrect in _calibrate_interval call"
np.testing.assert_array_equal(
call_kwargs["response"], dmatrix_2x1_with_label.get_label()
)
assert (
call_kwargs["alpha"] == 0.1
), "alpha arg incorrect in _calibrate_interval call"
assert (
call_kwargs["data"] == dmatrix_2x1_with_label
), "data arg incorrect in _calibrate_interval call"
def test_calibrate_calls_with_train_data(
self, mocker, dmatrix_2x1_with_label, dmatrix_2x1_with_label_gamma
):
"""Test the calls to _calibrate_interval and _calibrate_leaf_node_counts methods
when train_data is specified.
"""
dummy_confo_model = DummyLeafNodeScaledConformalPredictor()
mock_manager = Mock()
mocked = mocker.patch.object(
pitci.base.LeafNodeScaledConformalPredictor, "_calibrate_leaf_node_counts"
)
mocked2 = mocker.patch.object(
pitci.base.LeafNodeScaledConformalPredictor, "_calibrate_interval"
)
mock_manager.attach_mock(mocked, "the_calibrate_leaf_node_counts")
mock_manager.attach_mock(mocked2, "the_calibrate_interval")
dummy_confo_model.calibrate(
data=dmatrix_2x1_with_label,
alpha=0.1,
response=dmatrix_2x1_with_label.get_label(),
train_data=dmatrix_2x1_with_label_gamma,
)
# test each function is called the correct number of times
assert (
mocked.call_count == 1
), "incorrect number of calls to _calibrate_leaf_node_counts"
assert (
mocked2.call_count == 1
), "incorrect number of calls to _calibrate_interval"
# test the order of calls to functions
assert (
mock_manager.mock_calls[0][0] == "the_calibrate_leaf_node_counts"
), "_calibrate_leaf_node_counts not called first"
assert (
mock_manager.mock_calls[1][0] == "the_calibrate_interval"
), "_calibrate_interval not called second"
# test the argumnets in the _calibrate_leaf_node_counts call
call_args = mocked.call_args_list[0]
call_pos_args = call_args[0]
call_kwargs = call_args[1]
assert (
call_pos_args == ()
), "positional args incorrect in _calibrate_leaf_node_counts call"
assert (
call_kwargs["data"] == dmatrix_2x1_with_label_gamma
), "data arg incorrect in _calibrate_leaf_node_counts call"
# test the arguments in the _calibrate_interval call
call_args = mocked2.call_args_list[0]
call_pos_args = call_args[0]
call_kwargs = call_args[1]
assert (
call_pos_args == ()
), "positional args incorrect in _calibrate_interval call"
np.testing.assert_array_equal(
call_kwargs["response"], dmatrix_2x1_with_label.get_label()
)
assert (
call_kwargs["alpha"] == 0.1
), "alpha arg incorrect in _calibrate_interval call"
assert (
call_kwargs["data"] == dmatrix_2x1_with_label
), "data arg incorrect in _calibrate_interval call"
class TestCalculateScalingFactors:
"""Tests for the LeafNodeScaledConformalPredictor._calculate_scaling_factors method."""
def test_leaf_node_counts_exception(self):
"""Test an exception is raised if the leaf_node_counts attribute does not exist."""
dummy_confo_model = DummyLeafNodeScaledConformalPredictor()
assert not hasattr(
dummy_confo_model, "leaf_node_counts"
), "dummy_confo_model already has leaf_node_counts attribute"
with pytest.raises(
AttributeError,
match="leaf_node_counts attribute missing, run calibrate first.",
):
dummy_confo_model._calculate_scaling_factors(np.array([0, 1, 3, -9]))
def test_generate_leaf_node_predictions(self, mocker):
"""Test _generate_leaf_node_predictions is called with the data arg and the output
from this method is passed to the _count_leaf_node_visits_from_calibration
method.
"""
leaf_nodes_return_value = np.array([1, 0, 1 / 3, 2])
# set return value from _generate_leaf_node_predictions
mocked = mocker.patch.object(
DummyLeafNodeScaledConformalPredictor,
"_generate_leaf_node_predictions",
return_value=leaf_nodes_return_value,
)
mocked2 = mocker.patch.object(
DummyLeafNodeScaledConformalPredictor,
"_count_leaf_node_visits_from_calibration",
return_value=np.array([1]),
)
# set a dummy value for leaf_node_counts attribute as
# _count_leaf_node_visits_from_calibration is mocked
dummy_confo_model = DummyLeafNodeScaledConformalPredictor()
dummy_confo_model.leaf_node_counts = 1234
data_arg = np.array([0, 1, 3, -9])
dummy_confo_model._calculate_scaling_factors(data_arg)
# test the call to _generate_leaf_node_predictions
assert (
mocked.call_count == 1
), "incorrect number of calls to _generate_leaf_node_predictions"
call_args = mocked.call_args_list[0]
call_pos_args = call_args[0]
call_kwargs = call_args[1]
assert (
call_kwargs == {}
), "keyword args incorrect in _generate_leaf_node_predictions call"
assert len(call_pos_args) == 1, "incorrect number of positional args"
np.testing.assert_array_equal(call_pos_args[0], data_arg)
# test _count_leaf_node_visits_from_calibration called with
# _generate_leaf_node_predictions outputs
assert (
mocked2.call_count == 1
), "incorrect number of calls to _count_leaf_node_visits_from_calibration"
call_args = mocked2.call_args_list[0]
call_pos_args = call_args[0]
call_kwargs = call_args[1]
assert (
call_pos_args == ()
), "positional args incorrect in _count_leaf_node_visits_from_calibration call"
assert list(call_kwargs.keys()) == [
"leaf_node_predictions"
], "incorrect kwargs in _count_leaf_node_visits_from_calibration call"
np.testing.assert_array_equal(
call_kwargs["leaf_node_predictions"], leaf_nodes_return_value
)
def test_expected_output(self, mocker):
"""Test that the output from the function is calculated as 1 / _count_leaf_node_visits_from_calibration
method output.
"""
count_leaf_nodes_return_value = np.array([-4, 0, 1 / 3, 2])
# set return value from _count_leaf_node_visits_from_calibration
mocker.patch.object(
DummyLeafNodeScaledConformalPredictor,
"_count_leaf_node_visits_from_calibration",
return_value=count_leaf_nodes_return_value,
)
# mock _generate_leaf_node_predictions so it doesn't run
mocker.patch.object(
DummyLeafNodeScaledConformalPredictor, "_generate_leaf_node_predictions"
)
expected_results = 1 / count_leaf_nodes_return_value
# set a dummy value for leaf_node_counts attribute as
# _count_leaf_node_visits_from_calibration is mocked
dummy_confo_model = DummyLeafNodeScaledConformalPredictor()
dummy_confo_model.leaf_node_counts = 1234
results = dummy_confo_model._calculate_scaling_factors(np.array([0]))
np.testing.assert_array_equal(results, expected_results)
class TestCountLeafNodeVisitsFromCalibration:
"""Tests for the LeafNodeScaledConformalPredictor._count_leaf_node_visits_from_calibration method."""
def test_sum_dict_values(self, mocker):
"""Test that _sum_dict_values is applied to every row in the passed
leaf_node_predictions args.
"""
mocked = mocker.patch.object(
LeafNodeScaledConformalPredictor, "_sum_dict_values"
)
dummy_confo_model = DummyLeafNodeScaledConformalPredictor()
# set leaf_node_counts attribute so np.apply_along_axis can run
dummy_confo_model.leaf_node_counts = {"a": 1}
leaf_node_predictions_value = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
dummy_confo_model._count_leaf_node_visits_from_calibration(
leaf_node_predictions_value
)
assert (
mocked.call_count == leaf_node_predictions_value.shape[0]
), "incorrect number of calls to _sum_dict_values"
for call_no in range(leaf_node_predictions_value.shape[0]):
call_args = mocked.call_args_list[call_no]
call_pos_args = call_args[0]
call_kwargs = call_args[1]
assert call_kwargs == {
"counts": dummy_confo_model.leaf_node_counts
}, f"keyword args in _sum_dict_values call {call_no} incorrect"
assert (
len(call_pos_args) == 1
), f"number of positional args in _sum_dict_values call {call_no} incorrect"
np.testing.assert_array_equal(
call_pos_args[0], leaf_node_predictions_value[call_no, :]
)
def test_sum_dict_values_returned(self, mocker):
"""Test the output of running _sum_dict_values on each row is returned from the method."""
# set the return value from _sum_dict_values calls
sum_dict_values_return_values = [-2, 1, 0]
mocker.patch.object(
LeafNodeScaledConformalPredictor,
"_sum_dict_values",
side_effect=sum_dict_values_return_values,
)
dummy_confo_model = DummyLeafNodeScaledConformalPredictor()
# set leaf_node_counts attribute so np.apply_along_axis can run
dummy_confo_model.leaf_node_counts = {"a": 1}
# set leaf_node_predictions arg so _sum_dict_values will be called 3 times
leaf_node_predictions_value = np.array([[1], [2], [3]])
results = dummy_confo_model._count_leaf_node_visits_from_calibration(
leaf_node_predictions_value
)
np.testing.assert_array_equal(results, np.array(sum_dict_values_return_values))
class TestCalibrateLeafNodeCounts:
"""Tests for the LeafNodeScaledConformalPredictor._calibrate_leaf_node_counts method."""
def test_leaf_node_counts_calculated_correctly(self, mocker):
"""Test that leaf_node_counts are calculated as expected."""
leaf_node_preds = np.array(
[[1, 2, 3, 1, 3], [2, 2, 4, 2, 1], [1, 2, 5, 1, 7], [1, 2, 0, -4, 1]]
)
# set return value from _generate_leaf_node_predictions
mocker.patch.object(
DummyLeafNodeScaledConformalPredictor,
"_generate_leaf_node_predictions",
return_value=leaf_node_preds,
)
dummy_confo_model = DummyLeafNodeScaledConformalPredictor()
dummy_confo_model._calibrate_leaf_node_counts(np.array([0]))
# leaf_node_counts should be a tabulation of each column in leaf_node_preds
expected_leaf_node_counts = [
{1: 3, 2: 1},
{2: 4},
{0: 1, 3: 1, 4: 1, 5: 1},
{-4: 1, 1: 2, 2: 1},
{1: 2, 3: 1, 7: 1},
]
assert (
dummy_confo_model.leaf_node_counts == expected_leaf_node_counts
), "leaf_node_counts not calculated correctly"
class TestSumDictValues:
"""Tests for the LeafNodeScaledConformalPredictor._sum_dict_values method."""
@pytest.mark.parametrize(
"arr, counts, expected_output",
[
(np.array([1]), {0: {1: 123}}, 123),
(
np.array([1, 1, 1]),
{0: {1: 123, 0: 21}, 1: {3: -1, 1: 100}, 2: {1: 5}},
228,
),
(
np.array([1, 2, 3]),
{0: {1: | |
###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
class Queries:
browser_env_90 = """
SELECT
Partner,
Advertiser,
Advertiser_Id,
Insertion_Order,
Insertion_Order_Id,
Campaign,
Campaign_Id,
CASE
when Line_Item_Type = "TrueView" THEN "TrueView"
when Line_Item_Type != "TrueView" and Browser_Detail = "Other" THEN "Other"
ELSE Browser
END AS Browser,
Environment,
Device,
CASE
when Line_Item_Type = "TrueView" THEN "TrueView"
ELSE Concat(Device,"-",Environment)
END AS Device_Environment,
Line_item_type,
Week_start,
SUM(Impressions) AS Impressions
FROM
`{dataset}.DV360_Browser_Report_Clean`
WHERE
DATE_DIFF(CURRENT_DATE(), Week_start, WEEK) < 12
GROUP BY
Partner,
Advertiser,
Advertiser_Id,
Insertion_Order,
Insertion_Order_Id,
Campaign,
Campaign_Id,
Browser,
Environment,
Device,
Device_Environment,
Week_start,
Line_item_type
"""
browser_2_year = """
SELECT
Partner,
Advertiser,
Advertiser_ID,
Campaign,
Insertion_Order,
CASE
when Line_Item_Type = "TrueView" THEN "TrueView"
when Line_Item_Type != "TrueView" and Browser_Detail = "Other" THEN "Other"
ELSE Browser
END AS Browser,
Browser_detail as Browser_Detail,
Environment,
Device,
CASE
when Line_Item_Type = "TrueView" THEN "TrueView"
ELSE Concat(Device,"-",Environment)
END AS Device_Environment,
Week,
Month,
Year,
Week_start,
Line_Item_Type,
Segment1,
Segment2,
Segment3,
SUM(Impressions) AS Impressions,
SUM(Total_Conversions) AS Total_Conversions,
SUM(Post_Click_Conversions) AS Post_Click_Conversions,
SUM(Post_View_Conversions) AS Post_View_Conversions,
SUM(Revenue_Adv_Currency) AS Revenue_Adv_Currency,
SUM(Media_Cost_Advertiser_Currency) AS Media_Cost_Advertiser_Currency,
SUM(CM_Post_View_Revenue) AS CM_Post_View_Revenue,
SUM(CM_Post_Click_Revenue) AS CM_Post_Click_Revenue
FROM
`{dataset}.z_DV360_Browser_Report_Clean`
GROUP BY
Partner,
Advertiser,
Advertiser_ID,
Campaign,
Insertion_Order,
Browser,
Browser_detail,
Environment,
Device,
Device_Environment,
Week,
Month,
Year,
Line_Item_Type,
Segment1,
Segment2,
Segment3,
Week_start
"""
browser_share_multichart = """
WITH
filtered AS
(
SELECT
segment1,
segment2,
segment3,
browser,
sum(Impressions) AS imps,
IF(UPPER(browser)='SAFARI', SUM(Impressions), 0) AS safari_imps,
Line_Item_Type,
Advertiser,
Advertiser_ID,
Campaign,
Insertion_Order,
Device_Environment,
Week_start
FROM `{dataset}.DV3_Browser`
GROUP BY
segment1,
segment2,
segment3,
browser,
Line_Item_Type,
Advertiser,
Advertiser_ID,
Campaign,
Insertion_Order,
Device_Environment,
Week_start
),
subtotals AS
(
SELECT
1 AS segment_number,
segment1 AS segment,
browser,
SUM(imps) AS imps,
SUM(safari_imps) AS safari_imps,
Line_Item_Type,
Advertiser,
Advertiser_ID,
Campaign,
Insertion_Order,
Device_Environment,
Week_start
FROM filtered
WHERE segment1 IS NOT NULL
GROUP BY 1, 2, 3, Line_Item_Type, Advertiser, Advertiser_ID, Campaign,Insertion_Order,Device_Environment, Week_start
UNION ALL
SELECT
2 AS segment_number,
segment2 AS segment,
browser,
SUM(imps) AS segment_browser_imps,
SUM(safari_imps) AS safari_imps,
Line_Item_Type,
Advertiser,
Advertiser_ID,
Campaign,
Insertion_Order,
Device_Environment,
Week_start
FROM filtered
WHERE Segment2 IS NOT NULL
GROUP BY 1, 2, 3, Line_Item_Type, Advertiser, Advertiser_ID, Campaign,Insertion_Order,Device_Environment, Week_start
UNION ALL
SELECT
3 AS segment_number,
segment3 AS segment,
browser,
SUM(imps) AS segment_browser_imps,
SUM(safari_imps) AS safari_imps,
Line_Item_Type,
Advertiser,
Advertiser_ID,
Campaign,
Insertion_Order,
Device_Environment,
Week_start
FROM filtered
WHERE Segment3 IS NOT NULL
GROUP BY 1, 2, 3, Line_Item_Type, Advertiser, Advertiser_ID, Campaign,Insertion_Order,Device_Environment,Week_start
),
grand_total AS
(
SELECT
segment_number,
SUM(imps) AS imps
FROM subtotals
GROUP BY 1
)
SELECT
segment_number,
segment,
SUM(sbt.imps) AS impressions,
SUM(sbt.safari_imps) AS safari_impressions,
sbt.Line_Item_Type as Line_Item_Type,
sbt.browser as Browser,
sbt.Advertiser as Advertiser,
sbt.Advertiser_ID as Advertiser_ID,
sbt.Campaign as Campaign,
sbt.Insertion_Order as Insertion_Order,
sbt.Device_Environment as Device_Environment,
sbt.Week_start as Week_start
FROM `subtotals` sbt
LEFT OUTER JOIN `grand_total` AS gt USING (segment_number)
GROUP BY 1, 2, Line_Item_Type, Browser, Advertiser, Advertiser_ID, Campaign,Insertion_Order,Device_Environment,Week_start
ORDER BY 1, 2
"""
clean_browser_report = """
SELECT
DV3_reporting.Partner AS Partner_clean,
DV3_reporting.Partner_Id,
CONCAT(DV3_reporting.Partner," - ",CAST(DV3_reporting.Partner_Id AS STRING)) AS Partner,
DV3_reporting.Advertiser AS Advertiser_clean,
DV3_reporting.Advertiser_Id,
CONCAT(DV3_reporting.Advertiser," - ",CAST(DV3_reporting.Advertiser_Id AS STRING)) AS Advertiser,
DV3_reporting.Advertiser_Currency,
DV3_reporting.Insertion_Order AS Insertion_Order_clean,
DV3_reporting.Insertion_Order_Id,
CONCAT(DV3_reporting.Insertion_Order," - ",CAST(DV3_reporting.Insertion_Order_Id AS STRING)) AS Insertion_Order,
DV3_reporting.Campaign AS Campaign_clean,
DV3_reporting.Campaign_Id,
CONCAT(DV3_reporting.Campaign," - ",CAST(DV3_reporting.Campaign_Id AS STRING)) AS Campaign,
DV3_reporting.Line_Item AS Line_Item_clean,
DV3_reporting.Line_Item_Id,
CONCAT(DV3_reporting.Line_Item," - ",CAST(DV3_reporting.Line_Item AS STRING)) AS Line_Item,
DV3_reporting.Browser AS Browser_Detail,
Bro.Browser_Platform_clean AS Browser,
DV3_reporting.Environment AS Environment_Detail,
Env.Environment_clean AS Environment,
Dev.Device_Type AS Device_Type,
Dev.Device AS Device,
seg.Segment1 AS Segment1,
seg.Segment2 AS Segment2,
seg.Segment3 AS Segment3,
DV3_reporting.Week,
CAST(PARSE_DATE('%Y/%m/%d', SPLIT(DV3_reporting.Week,'-')[OFFSET(0)]) AS DATE) AS Week_start,
DV3_reporting.Month,
DV3_reporting.Year,
DV3_reporting.Line_Item_Type,
SUM(DV3_reporting.Impressions) AS Impressions,
SUM(DV3_reporting.Clicks) AS Clicks,
SUM(DV3_reporting.Total_Conversions) AS Total_Conversions,
SUM(DV3_reporting.Post_Click_Conversions) AS Post_Click_Conversions,
SUM(DV3_reporting.Post_View_Conversions) AS Post_View_Conversions,
SUM(DV3_reporting.CM_Post_View_Revenue) AS CM_Post_View_Revenue,
SUM(DV3_reporting.CM_Post_Click_Revenue) AS CM_Post_Click_Revenue,
SUM(DV3_reporting.Revenue_Adv_Currency) AS Revenue_Adv_Currency,
SUM(DV3_reporting.Media_Cost_Advertiser_Currency) AS Media_Cost_Advertiser_Currency,
SAFE_DIVIDE(SUM(DV3_reporting.Revenue_Adv_Currency), SAFE_DIVIDE(SUM(DV3_reporting.Impressions), 1000)) AS CPM,
SAFE_DIVIDE(SUM(DV3_reporting.Revenue_Adv_Currency), SUM(DV3_reporting.Total_Conversions)) AS CPA,
SAFE_DIVIDE(SUM(DV3_reporting.CM_Post_View_Revenue + DV3_reporting.CM_Post_Click_Revenue), SUM(DV3_reporting.Revenue_Adv_Currency)) AS ROAS_Total,
SAFE_DIVIDE(SUM(DV3_reporting.CM_Post_View_Revenue), SUM(DV3_reporting.Revenue_Adv_Currency)) AS ROAS_View,
SAFE_DIVIDE(SUM(DV3_reporting.CM_Post_Click_Revenue), SUM(DV3_reporting.Revenue_Adv_Currency)) AS ROAS_Click
FROM
`{dataset}.z_Dv360_Browser_Report_Dirty` AS DV3_reporting
LEFT JOIN
`{dataset}.z_Browser` AS Bro
ON
DV3_reporting.Browser = Bro.Browser_Platform
LEFT JOIN
`{dataset}.z_Environment` AS Env
ON
DV3_reporting.Environment = Env.Environment
LEFT JOIN
`{dataset}.z_Device_Type` AS Dev
ON
DV3_reporting.Device_Type = Dev.Device_Type
LEFT JOIN
`{dataset}.z_Custom_Segments` AS seg
ON
DV3_reporting.Line_Item_Id = seg.Line_Item_Id
GROUP BY
DV3_reporting.Partner,
DV3_reporting.Partner_Id,
DV3_reporting.Advertiser,
DV3_reporting.Advertiser_Id,
DV3_reporting.Advertiser_Currency,
DV3_reporting.Insertion_Order,
DV3_reporting.Insertion_Order_Id,
DV3_reporting.Campaign,
DV3_reporting.Campaign_Id,
DV3_reporting.Line_Item,
DV3_reporting.Line_Item_Id,
DV3_reporting.Browser,
Bro.Browser_Platform_clean,
DV3_reporting.Environment,
Env.Environment_clean,
DV3_reporting.Week,
DV3_reporting.Month,
DV3_reporting.Year,
DV3_reporting.Line_Item_Type,
Dev.Device,
Device_Type,
Week_start,
seg.Segment1,
seg.Segment2,
seg.Segment3
"""
cm_floodlight_join = """
SELECT
Flood.Floodlight_Attribution_Type AS Floodlight_Attribution_Type,
Att.Attribution_Type AS Attribution_Type,
CMBrowser.Browser_Platform AS Browser_Platform,
CMBrowser.Browser_Platform_detail AS Browser_Platform_detail,
CMBrowser.Browser_Platform_clean AS Browser_Platform_clean,
SUM(Total_Conversions) AS Total_Conversions,
SUM(Click_Through_Conversions) AS Click_Through_Conversions,
SUM(View_Through_Conversions) AS View_Through_Conversions
FROM
`{dataset}.z_Floodlight_CM_Report` AS Flood
JOIN
`{dataset}.z_Floodlight_Attribution` AS Att
ON
Flood.Floodlight_Attribution_Type = Att.Floodlight_Attribution_Type
LEFT JOIN
`{dataset}.z_CM_Browser_lookup` AS CMBrowser
ON
Flood.Browser_Platform = CMBrowser.Browser_Platform
GROUP BY
Flood.Floodlight_Attribution_Type,
Att.Attribution_Type,
Browser_Platform,
Browser_Platform_detail,
Browser_Platform_clean
"""
cm_floodlight_multichart = """
WITH
attrtype_browser_total AS
(
SELECT
IFNULL(att.Attribution_Type, '[missing]') as attribution_type,
IFNULL(cm_br.Browser_Platform_clean, '[missing]') as browser_platform,
SUM(fl.Total_Conversions) as convs
FROM `{dataset}.z_CM_Floodlight` fl
LEFT OUTER JOIN `{dataset}.z_Floodlight_Attribution` AS att USING(Floodlight_Attribution_Type)
LEFT OUTER JOIN `{dataset}.z_CM_Browser_lookup` AS cm_br USING(Browser_Platform_clean)
WHERE cm_br.Browser_Platform_clean IN ('Chrome', 'Safari', 'FF', 'MSFT')
GROUP BY 1, 2
),
grand_total AS
(
SELECT SUM(convs) AS convs
FROM attrtype_browser_total
)
SELECT
browser_platform,
SAFE_DIVIDE(SUM(IF(UPPER(attribution_type)='ATTRIBUTED', abt.convs, 0)),
SUM(abt.convs)) AS percent_attributed,
SAFE_DIVIDE(SUM(IF(UPPER(attribution_type)='UNATTRIBUTED', abt.convs, 0)),
SUM(abt.convs)) AS percent_unattributed,
SAFE_DIVIDE(SUM(abt.convs), ANY_VALUE(gt.convs)) share_of_floodlight_conversions
FROM attrtype_browser_total abt
CROSS JOIN grand_total gt
GROUP BY 1
ORDER BY 4 DESC
"""
cm_segmentation = """
SELECT
CONCAT(CM.Advertiser," - ",CAST(CM.Advertiser_Id AS STRING)) AS Advertiser,
CONCAT(CM.Campaign," - ",CAST(CM.Campaign_Id AS STRING)) AS Campaign,
CM.Site_Dcm,
CM.Browser_Platform AS Browser_Platform,
CMBrowser.Browser_Platform_detail AS Browser_Platform_detail,
CMBrowser.Browser_Platform_clean AS Browser_Platform_clean,
Platform_Type,
SiteSeg.Site_Type AS Site_Type,
CAST(Week AS DATE) AS Week,
SUM(CM.Impressions) AS Impressions,
SUM(Clicks) AS Clicks,
SUM(Total_Conversions) AS Total_Conversions,
SUM(Click_Through_Conversions) AS Click_Through_Conversions,
SUM(View_Through_Conversions) AS View_Through_Conversions
FROM
`{dataset}.z_CM_Browser_Report_Dirty` AS CM
LEFT JOIN
`{dataset}.z_CM_Site_Segmentation` AS SiteSeg
ON
CM.Site_Dcm = SiteSeg.Site_Dcm
LEFT JOIN
`{dataset}.z_CM_Browser_lookup` AS CMBrowser
ON
CM.Browser_Platform = CMBrowser.Browser_Platform
GROUP BY
Advertiser,
Campaign,
Site_Dcm,
Browser_Platform,
Browser_Platform_detail,
Browser_Platform_clean,
Platform_Type,
Site_Type,
Week
"""
cm_site_segmentation = """
SELECT
r.Site_Dcm,
Sum(r.Impressions) AS Impressions,
s.Site_Type
FROM `{dataset}.z_CM_Browser_Report_Dirty` as r
left join `{dataset}.z_CM_Site_Segmentation_Sheet` as s
on r.Site_Dcm = s.Site_Dcm
Group By
Site_Dcm,
Site_Type
Order By
Impressions desc
"""
dv360_custom_segments = """
SELECT DISTINCT
c.Advertiser,
c.Advertiser_Id,
c.Campaign,
c.Campaign_Id,
c.Insertion_Order,
c.Insertion_Order_Id,
c.Line_Item,
c.Line_Item_Id,
c.Line_Item_Type,
sum(c.Impressions),
s.Segment1,
s.Segment2,
s.Segment3
from
`{dataset}.z_Dv360_Browser_Report_Dirty` as c
left join
`{dataset}.z_Custom_Segments_Sheet` as s
on
c.Line_Item_Id = s.Line_Item_Id
where
c.Line_Item_Type != "TrueView"
Group By
c.Advertiser,
c.Advertiser_Id,
c.Campaign,
c.Campaign_Id,
c.Insertion_Order,
c.Insertion_Order_Id,
c.Line_Item,
c.Line_Item_Id,
c.Line_Item_Type,
s.Segment1,
s.Segment2,
s.Segment3
"""
safari_distribution_90days = """
SELECT
Partner,
Advertiser,
Campaign,
Environment,
Device,
CASE
when Line_Item_Type = "TrueView" THEN "TrueView"
ELSE Concat(Device,"-",Environment)
END AS Device_Environment,
Week_start,
Line_item_type,
CASE
WHEN Browser_detail="Safari" THEN "Safari 12+13"
WHEN Browser_detail="Safari 12" THEN "Safari 12+13"
WHEN Browser_detail="Safari 11" THEN "Safari 11"
ELSE
"Other Safari"
END
AS Browser_Rollup,
SUM(Impressions) AS Impressions
FROM
`{dataset}.z_DV360_Browser_Report_Clean`
WHERE
Browser = 'Safari'
AND
DATE_DIFF(CURRENT_DATE(),Week_start,WEEK)<12
GROUP BY
Browser_Rollup,
Partner,
Advertiser,
Campaign,
Environment,
Device,
Device_Environment,
Line_item_type,
Week_start
ORDER BY
Impressions DESC
"""
sdf_join = """
SELECT
C.Campaign_Id as Campaign_Id,
C.Advertiser_Id as Advertiser_Id,
C.Name as Campaign_Name,
IO.Io_Id as IO_Id,
IO.Name as IO_Name,
IO.Io_Type as IO_Type,
IO.Pacing as IO_Pacing,
IO.Performance_Goal_Type as IO_Performance_Goal_Type,
IO.Performance_Goal_Value as IO_Performance_Goal_Value,
IO.Budget_Type as IO_Budget_Type,
IO.Budget_Segments as IO_Budget_Segments,
IO.Auto_Budget_Allocation as IO_Auto_Budget_Allocation,
LI.Line_Item_Id as LI_Id,
LI.Type as LI_Type,
LI.Subtype as LI_Subtype,
LI.Name as LI_Name,
LI.Budget_Type as LI_Budget_Type,
LI.Budget_Amount as LI_Budget_Amount,
LI.Pacing as LI_Pacing,
LI.Pacing_Rate as LI_Pacing_Rate,
LI.Bid_Strategy_Type as LI_Bid_Strategy_Type,
LI.Bid_Strategy_Do_Not_Exceed as LI_Bid_Strategy_Do_Not_Exceed,
LI.Geography_Targeting_Include as LI_Geography_Targeting_Include,
LI.Geography_Targeting_Exclude as LI_Geography_Targeting_Exclude,
LI.Device_Targeting_Include as LI_Device_Targeting_Include,
LI.Device_Targeting_Exclude as LI_Device_Targeting_Exclude,
LI.Browser_Targeting_Include as LI_Browser_Targeting_Include,
LI.Browser_Targeting_Exclude as LI_Browser_Targeting_Exclude,
LI.Third_Party_Verification_Services as LI_Third_Party_Verification_Services,
LI.Third_Party_Verification_Labels as LI_Third_Party_Verification_Labels,
LI.Channel_Targeting_Include as LI_Channel_Targeting_Include,
LI.Channel_Targeting_Exclude as LI_Channel_Targeting_Exclude,
LI.Site_Targeting_Include as LI_Site_Targeting_Include,
LI.Site_Targeting_Exclude as LI_Site_Targeting_Exclude,
LI.App_Targeting_Include as LI_App_Targeting_Include,
LI.App_Targeting_Exclude as LI_App_Targeting_Exclude,
LI.App_Collection_Targeting_Include as LI_App_Collection_Targeting_Include,
LI.App_Collection_Targeting_Exclude as LI_App_Collection_Targeting_Exclude,
LI.Category_Targeting_Include as LI_Category_Targeting_Include,
LI.Category_Targeting_Exclude as LI_Category_Targeting_Exclude,
LI.Keyword_Targeting_Include as LI_Keyword_Targeting_Include,
LI.Keyword_Targeting_Exclude as LI_Keyword_Targeting_Exclude,
LI.Keyword_List_Targeting_Exclude as LI_Keyword_List_Targeting_Exclude,
LI.Audience_Targeting_Similar_Audiences as LI_Audience_Targeting_Similar_Audiences,
LI.Audience_Targeting_Include as LI_Audience_Targeting_Include,
LI.Audience_Targeting_Exclude as LI_Audience_Targeting_Exclude,
LI.Affinity_In_Market_Targeting_Include as LI_Affinity_In_Market_Targeting_Include,
LI.Affinity_In_Market_Targeting_Exclude as LI_Affinity_In_Market_Targeting_Exclude,
LI.Custom_List_Targeting as LI_Custom_List_Targeting,
LI.Daypart_Targeting as LI_Daypart_Targeting,
LI.Daypart_Targeting_Time_Zone as LI_Daypart_Targeting_Time_Zone,
LI.Environment_Targeting as LI_Environment_Targeting,
LI.Demographic_Targeting_Gender as LI_Demographic_Targeting_Gender,
LI.Demographic_Targeting_Age as LI_Demographic_Targeting_Age,
LI.Demographic_Targeting_Household_Income as LI_Demographic_Targeting_Household_Income,
LI.Demographic_Targeting_Parental_Status as LI_Demographic_Targeting_Parental_Status
FROM
`{dataset}.SDF_campaign` AS C
LEFT JOIN
| |
<gh_stars>0
# coding: utf-8
# # CIG96 - Deep optical reduction and calibration
#
# ### Observation date: 11sep12
# ### Telescope: CAHA2.2m
#
# #I. GENERAL INFORMATION
#
# Notebook with the full data reduction and calibration plus external IRAF steps.
#
# - Further info from this CAHA campaign and log can be found <a href="http://amiga.iaa.es:8888/display/science/CAHA+2.2m+Observations+11sep12">here</a>.
#
# - Notes on the data reduction and calibration can be found <a href="http://amiga.iaa.es:8888/display/science/Data+reduction+CIG96+CAHA2.2">here</a>.
#
# - Information about CIG96 paper (2016) can be found <a href="http://amiga.iaa.es:8888/display/science/CIG96+-+Deep+optical+and+HI+images#CIG96-DeepopticalandHIimages-Paperstructureandcomments">here</a>.
#
# #II. PRE-REDUCTION SETUP
#
# **NOTE**: when a box is previously labeled as ***IRAF***, it means the notebook needs an external computation in IRAF before proceeding with further cells.
#
# ##a) Import necessary modules
# In[1]:
import matplotlib as mpl
import repipy.astroim as astroim
import repipy.rename as rename
import repipy.utilities as utilities
import repipy.combine as combine
import repipy.create_masks as create_masks
import repipy.remove_cosmics as remove_cosmics
import repipy.arith as arith
import repipy.find_sky as find_sky
import repipy.scale_to_ref as scale_to_ref
import astropy.io.fits as fits
import lemon.astrometry as astrometry
import lemon.photometry as photometry
import numpy as np
import os
import sys
import shutil
import random
import subprocess
import warnings
import pandas
import sqlite3
import repipy.extract_mag_airmass_common as extract
import glob as glob
import matplotlib.pyplot as plt
from glob import glob as ls
from astropy.io import fits
import pyregion
import scipy
from scipy import stats
from stsci import convolve
#import seaborn as sns
# In[2]:
# renders interactive figures in the Notebook
get_ipython().magic(u'matplotlib nbagg')
# renders figures as static PNG
#%matplotlib inline
# In[3]:
mpl.pylab.rcParams['figure.figsize'] = 14, 7 # that's default image size for this interactive session
# ##b) Raw data and work directories definition
# In[4]:
# Directory where data are:
raw_data_dir = "/home/prm/Desktop/optical/optical/CAHA/cig96_jun16"
# Working directory:
work_dir = "/home/prm/Desktop/optical/optical/CAHA/cig96_jun16"
print "Raw data path: ", raw_data_dir
print "Work path: ", work_dir
# Switch to working directory
os.chdir(os.path.expandvars(work_dir))
print "\nYou will be working on the following path: ", work_dir
# ## IMPORTANT NOTE: if previously reduced:
#
# - AVOID repeating steps 1 to 9 (included) from Section III.
#
# - **MUST REPEAT ONLY** steps **12 and 16** from Section III.
# #III. DATA REDUCTION and CALIBRATION
#
# ##1. Headers fix
# Unfortunately, many observatories include WCS keywords in the headers, **but not the proper WCS info**. So, not only you have useless, and sometimes old-fashion keywords in the header, but they confuse programs trying to read the headers AFTER you have used astrometry.net. Best solution is to remove them first. Also, some **keywords** might be doubled or show ambiguous names (e.g. skyflats show up as "flat" instead of "skyflats").
#
# We make corrections for all these.
#
# ###1.1 WCS removal
# In[5]:
get_ipython().run_cell_magic(u'bash', u'', u'pwd')
# In[6]:
get_ipython().run_cell_magic(u'bash', u'', u'# removing WCS from all fits files\nwipe_wcs.py cig/cig96_def_crop-noWCS.fits')
# ###1.2 Proper keywords reading
# In[56]:
# makes a list containing all fits images
image_list = ls(os.path.join(work_dir, "*.fits"))
im = astroim.Astroim(image_list[0])
# Now we read from the object astroim the keywords we need.
# In[55]:
# assign the specified keywords to proper variables
filterk = im.header.filterk
exptimek = im.header.exptimek
objectk = im.header.objectk
airmassk = im.header.airmassk
datek = im.header.datek
telescope = im.header.telescope
gaink = im.header.gaink
read_noisek = im.header.ccdronk
timek = im.header.timek
filtersysk = im.header.filtersysk
print exptimek
print filterk
# ###1.3 'OBJECT' keyword renaming in skyflats
# All the flats are actually sky flats this night, so we will rebrand them to make sure we have all the information.
# In[8]:
for im_name in image_list:
im = fits.open(im_name, 'update')
object_im = im[0].header["OBJECT"]
if 'flat' in object_im.lower() and 'sky' not in object_im.lower():
im[0].header["OBJECT"] = "skyflat"
im.flush()
im.close()
# ##2. WCS restoration
# Proper astrometry is added again to all the images using astrometry.net. This will take some minutes, so take it easy ;).
# In[ ]:
get_ipython().run_cell_magic(u'bash', u'', u'astrom.py --overwrite --radius 1 *.fits')
# ##3. Filter specification
# 'CousR' filter is not recognized by passband.py subroutine so the **filter keyword is changed to 'Cousins R'**, something that can be read.
# Also, a **new keyword named "FILTER" is added** since "INSFLNAM" is not usually recognized as the filter keyword by *any* normal routine.
# In[10]:
for fitsim in image_list:
data, header = fits.getdata(fitsim, header=True)
header["INSFLNAM"] = "Cousins R" # change filter keyword to 'Cousins R' so rename.py can read it correctly
header.set("FILTER","Cousins R") # add a redundant filter keyword ("FILTER") just in case INSFLANM is not identified
fits.writeto(fitsim, data, header, clobber=True)
# ##4. Renaming all the files
# Now we rename the files with a recognizable filter.
# In[11]:
get_ipython().run_cell_magic(u'bash', u'', u'rename.py --copy --overwrite .')
# ###4.1 Manual check and deletion of useless files
# Among the dataset there might be bad images or images from another targets observed during the night. All are manually removed:
# In[12]:
get_ipython().run_cell_magic(u'bash', u'', u"# CIG1019 images in 'cig' folder\nrm -Rf cig/*cig1019*")
# ## 5. Mask images
# Mask out pixels with values above a maximum number of counts
# In[13]:
get_ipython().run_cell_magic(u'bash', u'', u'pwd\ncreate_masks.py --circular --max_val 55000 skyflat/*.fits \ncreate_masks.py --circular --max_val 55000 cig/*.fits \n#create_masks.py --circular --max_val 55000 standards/*.fits\n#create_masks.py --circular --max_val 10000 blanks/*.fits # for these, the max counts number can be lowered drastically')
# ## 6. Bias
# We will median combine all the bias images to get a master bias. Since it is a bias, no scaling is necessary, and we have chosen the median for the combination because even at zero exposure time you get some cosmic rays in each image. The "--all_together" keyword will tell the program combine.py to ignore the filters of the images, and combine all the images together no matter what filters they have.
#
# ###6.1 Bias combination
# In[14]:
get_ipython().run_cell_magic(u'bash', u'', u'cd bias\ncombine.py --output "masterbias.fits" --average "median" --scale "none" --all_together bias*.fits')
# ###6.2 Bias subtraction
# Given that the bias has some small amount of structure, we will remove the whole image instead of an average. We will remove it from all other images, which include cig images, flats, standards, bias and even unknown objects, in case they are needed later. We will add a suffix "-b" before the fits extension. The message "masterbias image removed" will be included in the header and the subtraction will not make use of the keywords "--median" or "--mean", which would subtract only the median or mean of the second image.
# In[15]:
get_ipython().run_cell_magic(u'bash', u'', u'arith.py --suffix " -b" --message "Masterbias image removed" cig/cig*.fits - bias/masterbias.fits\narith.py --suffix " -b" --message "Masterbias image removed" skyflat/skyflat_*.fits - bias/masterbias.fits\narith.py --suffix " -b" --message "Masterbias image removed" bias/bias*.fits - bias/masterbias.fits\n#arith.py --suffix " -b" --message "Masterbias image removed" standards/*.fits - bias/masterbias.fits\n#arith.py --suffix " -b" --message "Masterbias image removed" blanks/blank*.fits - bias/masterbias.fits')
# ##7. Overscan subtraction
# The difference between the median level of the overscan and the rest of the image is usually a constant. Despite the mean/median amount of counts of a bias image may vary throughout the night, such increase or decrease is usually the same for both the bias itself and the overscan region. This way, after subtracting the master-bias, we can subtract the overscan value to avoid any over/infrasubtraction of the real bias values due to those variations throughout the night.
#
# For CAFOS at CAHA2.2m, the overscan region is located on the left side of the image. Its physical extension (in X, Y pixels) is a little larger than the values specified below: [1028:1080,6:1020], but these are good enough for a full subtraction.
# In[16]:
get_ipython().run_cell_magic(u'bash', u'', u'subtract_overscan.py --region 1028 1080 6 1020 --suffix " -o" cig/*-b.fits\nsubtract_overscan.py --region 1028 1080 6 1020 --suffix " -o" skyflat/*-b.fits\nsubtract_overscan.py --region 1028 1080 6 1020 --suffix " -o" bias/*-b.fits\n#subtract_overscan.py --region 1028 1080 6 1020 --suffix " -o" standards/*-b.fits\n#subtract_overscan.py --region 1028 1080 6 1020 --suffix " -o" blanks/*-b.fits')
# ## 8. Flatfields
# ### 8.1 Flatfields combination
# All flats are combined. We do not need to separate them by filters, the routine combine will take care of that. We just need to give all the bias-subtracted flats (xxx-b.fits) as input and it will separate them by filter and rename them accordingly.
# In[17]:
get_ipython().run_cell_magic(u'bash', u'', u'pwd\ncd skyflat/\ncombine.py --output "masterflats.fits" --average "median" --scale "median" skyflat*-o.fits ')
# ### 8.2 Flatfield correction
# One by one, we will flat-field correct all the cig images, standards and skyflats. This would be easiest to do using a program, but we prefer to explicitly show the operations. A lot of "RuntimeWarning: divide by zero [...]" happens in this operation, because it's true, there are a lot of zeroes in the outter parts of the flats images. This will cause no | |
display_name
body['email'] = email
body['last_modified_date_time'] = last_modified_date_time
body['number'] = number
body['payment_method_id'] = payment_method_id
body['payment_terms_id'] = payment_terms_id
body['phone_number'] = phone_number
body['shipment_method_id'] = shipment_method_id
body['tax_area_display_name'] = tax_area_display_name
body['tax_area_id'] = tax_area_id
body['tax_liable'] = tax_liable
body['tax_registration_number'] = tax_registration_number
body['type'] = type_
body['website'] = website
body['currency'] = currency
body['payment_method'] = payment_method
body['payment_term'] = payment_term
body['picture'] = picture
body['shipment_method'] = shipment_method
return client.update_customer(company_id=company_id,
sales_credit_memo_id=sales_credit_memo_id,
body=body)
def financials_financial_company_sale_credit_memo_update_payment_term(client,
company_id,
sales_credit_memo_id,
id_=None,
calculate_discount_on_credit_memos=None,
code=None,
discount_date_calculation=None,
discount_percent=None,
display_name=None,
due_date_calculation=None,
last_modified_date_time=None):
body = {}
body['id'] = id_
body['calculate_discount_on_credit_memos'] = calculate_discount_on_credit_memos
body['code'] = code
body['discount_date_calculation'] = discount_date_calculation
body['discount_percent'] = discount_percent
body['display_name'] = display_name
body['due_date_calculation'] = due_date_calculation
body['last_modified_date_time'] = last_modified_date_time
return client.update_payment_term(company_id=company_id,
sales_credit_memo_id=sales_credit_memo_id,
body=body)
def financials_financial_company_sale_credit_memo_update_sale_credit_memo_line(client,
company_id,
sales_credit_memo_id,
sales_credit_memo_line_id,
id_=None,
account_id=None,
amount_excluding_tax=None,
amount_including_tax=None,
description=None,
discount_amount=None,
discount_applied_before_tax=None,
discount_percent=None,
document_id=None,
invoice_discount_allocation=None,
item_id=None,
line_type=None,
net_amount=None,
net_amount_including_tax=None,
net_tax_amount=None,
quantity=None,
sequence=None,
shipment_date=None,
tax_code=None,
tax_percent=None,
total_tax_amount=None,
unit_of_measure_id=None,
unit_price=None,
account=None,
microsoft_graph_entity_id=None,
base_unit_of_measure_id=None,
blocked=None,
display_name=None,
gtin=None,
inventory=None,
item_category_code=None,
item_category_id=None,
last_modified_date_time=None,
number=None,
price_includes_tax=None,
tax_group_code=None,
tax_group_id=None,
type_=None,
unit_cost=None,
number_unit_price=None,
item_category=None,
picture=None):
body = {}
body['id'] = id_
body['account_id'] = account_id
body['amount_excluding_tax'] = amount_excluding_tax
body['amount_including_tax'] = amount_including_tax
body['description'] = description
body['discount_amount'] = discount_amount
body['discount_applied_before_tax'] = discount_applied_before_tax
body['discount_percent'] = discount_percent
body['document_id'] = document_id
body['invoice_discount_allocation'] = invoice_discount_allocation
body['item_id'] = item_id
body['line_type'] = line_type
body['net_amount'] = net_amount
body['net_amount_including_tax'] = net_amount_including_tax
body['net_tax_amount'] = net_tax_amount
body['quantity'] = quantity
body['sequence'] = sequence
body['shipment_date'] = shipment_date
body['tax_code'] = tax_code
body['tax_percent'] = tax_percent
body['total_tax_amount'] = total_tax_amount
body['unit_of_measure_id'] = unit_of_measure_id
body['unit_price'] = unit_price
body['account'] = account
body['item'] = {}
body['item']['id'] = microsoft_graph_entity_id
body['item']['base_unit_of_measure_id'] = base_unit_of_measure_id
body['item']['blocked'] = blocked
body['item']['display_name'] = display_name
body['item']['gtin'] = gtin
body['item']['inventory'] = inventory
body['item']['item_category_code'] = item_category_code
body['item']['item_category_id'] = item_category_id
body['item']['last_modified_date_time'] = last_modified_date_time
body['item']['number'] = number
body['item']['price_includes_tax'] = price_includes_tax
body['item']['tax_group_code'] = tax_group_code
body['item']['tax_group_id'] = tax_group_id
body['item']['type'] = type_
body['item']['unit_cost'] = unit_cost
body['item']['unit_price'] = number_unit_price
body['item']['item_category'] = item_category
body['item']['picture'] = picture
return client.update_sales_credit_memo_lines(company_id=company_id,
sales_credit_memo_id=sales_credit_memo_id,
sales_credit_memo_line_id=sales_credit_memo_line_id,
body=body)
def financials_financial_company_sale_credit_memo_customer_create_picture(client,
company_id,
sales_credit_memo_id,
content_type,
id_=None,
content=None,
height=None,
width=None):
body = {}
body['id'] = id_
body['content'] = content
body['content_type'] = content_type
body['height'] = height
body['width'] = width
return client.create_picture(company_id=company_id,
sales_credit_memo_id=sales_credit_memo_id,
body=body)
def financials_financial_company_sale_credit_memo_customer_delete_currency(client,
company_id,
sales_credit_memo_id,
if_match=None):
return client.delete_currency(company_id=company_id,
sales_credit_memo_id=sales_credit_memo_id,
if_match=if_match)
def financials_financial_company_sale_credit_memo_customer_delete_payment_method(client,
company_id,
sales_credit_memo_id,
if_match=None):
return client.delete_payment_method(company_id=company_id,
sales_credit_memo_id=sales_credit_memo_id,
if_match=if_match)
def financials_financial_company_sale_credit_memo_customer_delete_payment_term(client,
company_id,
sales_credit_memo_id,
if_match=None):
return client.delete_payment_term(company_id=company_id,
sales_credit_memo_id=sales_credit_memo_id,
if_match=if_match)
def financials_financial_company_sale_credit_memo_customer_delete_picture(client,
company_id,
sales_credit_memo_id,
picture_id,
if_match=None):
return client.delete_picture(company_id=company_id,
sales_credit_memo_id=sales_credit_memo_id,
picture_id=picture_id,
if_match=if_match)
def financials_financial_company_sale_credit_memo_customer_delete_shipment_method(client,
company_id,
sales_credit_memo_id,
if_match=None):
return client.delete_shipment_method(company_id=company_id,
sales_credit_memo_id=sales_credit_memo_id,
if_match=if_match)
def financials_financial_company_sale_credit_memo_customer_list_picture(client,
company_id,
sales_credit_memo_id,
orderby=None,
select=None,
expand=None):
return client.list_picture(company_id=company_id,
sales_credit_memo_id=sales_credit_memo_id,
orderby=orderby,
select=select,
expand=expand)
def financials_financial_company_sale_credit_memo_customer_set_picture_content(client,
company_id,
sales_credit_memo_id,
picture_id,
data):
return client.set_picture_content(company_id=company_id,
sales_credit_memo_id=sales_credit_memo_id,
picture_id=picture_id,
data=data)
def financials_financial_company_sale_credit_memo_customer_show_currency(client,
company_id,
sales_credit_memo_id,
select=None,
expand=None):
return client.get_currency(company_id=company_id,
sales_credit_memo_id=sales_credit_memo_id,
select=select,
expand=expand)
def financials_financial_company_sale_credit_memo_customer_show_payment_method(client,
company_id,
sales_credit_memo_id,
select=None,
expand=None):
return client.get_payment_method(company_id=company_id,
sales_credit_memo_id=sales_credit_memo_id,
select=select,
expand=expand)
def financials_financial_company_sale_credit_memo_customer_show_payment_term(client,
company_id,
sales_credit_memo_id,
select=None,
expand=None):
return client.get_payment_term(company_id=company_id,
sales_credit_memo_id=sales_credit_memo_id,
select=select,
expand=expand)
def financials_financial_company_sale_credit_memo_customer_show_picture(client,
company_id,
sales_credit_memo_id,
picture_id,
select=None,
expand=None):
return client.get_picture(company_id=company_id,
sales_credit_memo_id=sales_credit_memo_id,
picture_id=picture_id,
select=select,
expand=expand)
def financials_financial_company_sale_credit_memo_customer_show_picture_content(client,
company_id,
sales_credit_memo_id,
picture_id):
return client.get_picture_content(company_id=company_id,
sales_credit_memo_id=sales_credit_memo_id,
picture_id=picture_id)
def financials_financial_company_sale_credit_memo_customer_show_shipment_method(client,
company_id,
sales_credit_memo_id,
select=None,
expand=None):
return client.get_shipment_method(company_id=company_id,
sales_credit_memo_id=sales_credit_memo_id,
select=select,
expand=expand)
def financials_financial_company_sale_credit_memo_customer_update_currency(client,
company_id,
sales_credit_memo_id,
id_=None,
amount_decimal_places=None,
amount_rounding_precision=None,
code=None,
display_name=None,
last_modified_date_time=None,
symbol=None):
body = {}
body['id'] = id_
body['amount_decimal_places'] = amount_decimal_places
body['amount_rounding_precision'] = amount_rounding_precision
body['code'] = code
body['display_name'] = display_name
body['last_modified_date_time'] = last_modified_date_time
body['symbol'] = symbol
return client.update_currency(company_id=company_id,
sales_credit_memo_id=sales_credit_memo_id,
body=body)
def financials_financial_company_sale_credit_memo_customer_update_payment_method(client,
company_id,
sales_credit_memo_id,
id_=None,
code=None,
display_name=None,
last_modified_date_time=None):
body = {}
body['id'] = id_
body['code'] = code
body['display_name'] = display_name
body['last_modified_date_time'] = last_modified_date_time
return client.update_payment_method(company_id=company_id,
sales_credit_memo_id=sales_credit_memo_id,
body=body)
def financials_financial_company_sale_credit_memo_customer_update_payment_term(client,
company_id,
sales_credit_memo_id,
id_=None,
calculate_discount_on_credit_memos=None,
code=None,
discount_date_calculation=None,
discount_percent=None,
display_name=None,
due_date_calculation=None,
last_modified_date_time=None):
body = {}
body['id'] = id_
body['calculate_discount_on_credit_memos'] = calculate_discount_on_credit_memos
body['code'] = code
body['discount_date_calculation'] = discount_date_calculation
body['discount_percent'] = discount_percent
body['display_name'] = display_name
body['due_date_calculation'] = due_date_calculation
body['last_modified_date_time'] = last_modified_date_time
return client.update_payment_term(company_id=company_id,
sales_credit_memo_id=sales_credit_memo_id,
body=body)
def financials_financial_company_sale_credit_memo_customer_update_picture(client,
company_id,
sales_credit_memo_id,
picture_id,
content_type,
id_=None,
content=None,
height=None,
width=None):
body = {}
body['id'] = id_
body['content'] = content
body['content_type'] = content_type
body['height'] = height
body['width'] = width
return client.update_picture(company_id=company_id,
sales_credit_memo_id=sales_credit_memo_id,
picture_id=picture_id,
body=body)
def financials_financial_company_sale_credit_memo_customer_update_shipment_method(client,
company_id,
sales_credit_memo_id,
id_=None,
code=None,
display_name=None,
last_modified_date_time=None):
body = {}
body['id'] = id_
body['code'] = code
body['display_name'] = display_name
body['last_modified_date_time'] = last_modified_date_time
return client.update_shipment_method(company_id=company_id,
sales_credit_memo_id=sales_credit_memo_id,
body=body)
def financials_financial_company_sale_credit_memo_sale_credit_memo_line_delete_account(client,
company_id,
sales_credit_memo_id,
sales_credit_memo_line_id,
if_match=None):
return client.delete_account(company_id=company_id,
sales_credit_memo_id=sales_credit_memo_id,
sales_credit_memo_line_id=sales_credit_memo_line_id,
if_match=if_match)
def financials_financial_company_sale_credit_memo_sale_credit_memo_line_delete_item(client,
company_id,
sales_credit_memo_id,
sales_credit_memo_line_id,
if_match=None):
return client.delete_item(company_id=company_id,
sales_credit_memo_id=sales_credit_memo_id,
sales_credit_memo_line_id=sales_credit_memo_line_id,
if_match=if_match)
def financials_financial_company_sale_credit_memo_sale_credit_memo_line_show_account(client,
company_id,
sales_credit_memo_id,
sales_credit_memo_line_id,
select=None,
expand=None):
return client.get_account(company_id=company_id,
sales_credit_memo_id=sales_credit_memo_id,
sales_credit_memo_line_id=sales_credit_memo_line_id,
select=select,
expand=expand)
def financials_financial_company_sale_credit_memo_sale_credit_memo_line_show_item(client,
company_id,
sales_credit_memo_id,
sales_credit_memo_line_id,
select=None,
expand=None):
return client.get_item(company_id=company_id,
sales_credit_memo_id=sales_credit_memo_id,
sales_credit_memo_line_id=sales_credit_memo_line_id,
select=select,
expand=expand)
def financials_financial_company_sale_credit_memo_sale_credit_memo_line_update_account(client,
company_id,
sales_credit_memo_id,
sales_credit_memo_line_id,
id_=None,
blocked=None,
category=None,
display_name=None,
last_modified_date_time=None,
number=None,
sub_category=None):
body = {}
body['id'] = id_
body['blocked'] = blocked
body['category'] = category
body['display_name'] = display_name
body['last_modified_date_time'] = last_modified_date_time
body['number'] = number
body['sub_category'] = sub_category
return client.update_account(company_id=company_id,
sales_credit_memo_id=sales_credit_memo_id,
sales_credit_memo_line_id=sales_credit_memo_line_id,
body=body)
def financials_financial_company_sale_credit_memo_sale_credit_memo_line_update_item(client,
company_id,
sales_credit_memo_id,
sales_credit_memo_line_id,
id_=None,
base_unit_of_measure_id=None,
blocked=None,
display_name=None,
gtin=None,
inventory=None,
item_category_code=None,
item_category_id=None,
last_modified_date_time=None,
number=None,
price_includes_tax=None,
tax_group_code=None,
tax_group_id=None,
type_=None,
unit_cost=None,
unit_price=None,
item_category=None,
picture=None):
body = {}
body['id'] = id_
body['base_unit_of_measure_id'] = base_unit_of_measure_id
body['blocked'] = blocked
body['display_name'] = display_name
body['gtin'] = gtin
body['inventory'] = inventory
body['item_category_code'] = item_category_code
body['item_category_id'] = item_category_id
body['last_modified_date_time'] = last_modified_date_time
body['number'] = number
body['price_includes_tax'] = price_includes_tax
body['tax_group_code'] = tax_group_code
body['tax_group_id'] = tax_group_id
body['type'] = type_
body['unit_cost'] = unit_cost
body['unit_price'] = unit_price
body['item_category'] = item_category
body['picture'] = picture
return client.update_item(company_id=company_id,
sales_credit_memo_id=sales_credit_memo_id,
sales_credit_memo_line_id=sales_credit_memo_line_id,
body=body)
def financials_financial_company_sale_credit_memo_sale_credit_memo_line_item_create_picture(client,
company_id,
sales_credit_memo_id,
sales_credit_memo_line_id,
content_type,
id_=None,
content=None,
height=None,
width=None):
body = {}
body['id'] = id_
body['content'] = content
body['content_type'] = content_type
body['height'] = height
body['width'] = width
return client.create_picture(company_id=company_id,
sales_credit_memo_id=sales_credit_memo_id,
sales_credit_memo_line_id=sales_credit_memo_line_id,
body=body)
def financials_financial_company_sale_credit_memo_sale_credit_memo_line_item_delete_item_category(client,
company_id,
sales_credit_memo_id,
sales_credit_memo_line_id,
if_match=None):
return client.delete_item_category(company_id=company_id,
sales_credit_memo_id=sales_credit_memo_id,
sales_credit_memo_line_id=sales_credit_memo_line_id,
if_match=if_match)
def financials_financial_company_sale_credit_memo_sale_credit_memo_line_item_delete_picture(client,
company_id,
sales_credit_memo_id,
sales_credit_memo_line_id,
picture_id,
if_match=None):
return client.delete_picture(company_id=company_id,
sales_credit_memo_id=sales_credit_memo_id,
sales_credit_memo_line_id=sales_credit_memo_line_id,
picture_id=picture_id,
if_match=if_match)
def financials_financial_company_sale_credit_memo_sale_credit_memo_line_item_list_picture(client,
company_id,
sales_credit_memo_id,
sales_credit_memo_line_id,
orderby=None,
select=None,
expand=None):
return client.list_picture(company_id=company_id,
sales_credit_memo_id=sales_credit_memo_id,
sales_credit_memo_line_id=sales_credit_memo_line_id,
orderby=orderby,
select=select,
expand=expand)
def financials_financial_company_sale_credit_memo_sale_credit_memo_line_item_set_picture_content(client,
company_id,
sales_credit_memo_id,
sales_credit_memo_line_id,
picture_id,
data):
return client.set_picture_content(company_id=company_id,
sales_credit_memo_id=sales_credit_memo_id,
sales_credit_memo_line_id=sales_credit_memo_line_id,
picture_id=picture_id,
data=data)
def financials_financial_company_sale_credit_memo_sale_credit_memo_line_item_show_item_category(client,
company_id,
sales_credit_memo_id,
sales_credit_memo_line_id,
select=None,
expand=None):
return client.get_item_category(company_id=company_id,
sales_credit_memo_id=sales_credit_memo_id,
sales_credit_memo_line_id=sales_credit_memo_line_id,
select=select,
expand=expand)
def financials_financial_company_sale_credit_memo_sale_credit_memo_line_item_show_picture(client,
company_id,
sales_credit_memo_id,
sales_credit_memo_line_id,
picture_id,
select=None,
expand=None):
return client.get_picture(company_id=company_id,
sales_credit_memo_id=sales_credit_memo_id,
sales_credit_memo_line_id=sales_credit_memo_line_id,
picture_id=picture_id,
select=select,
expand=expand)
def financials_financial_company_sale_credit_memo_sale_credit_memo_line_item_show_picture_content(client,
company_id,
sales_credit_memo_id,
sales_credit_memo_line_id,
picture_id):
return client.get_picture_content(company_id=company_id,
sales_credit_memo_id=sales_credit_memo_id,
sales_credit_memo_line_id=sales_credit_memo_line_id,
picture_id=picture_id)
def financials_financial_company_sale_credit_memo_sale_credit_memo_line_item_update_item_category(client,
company_id,
sales_credit_memo_id,
sales_credit_memo_line_id,
id_=None,
code=None,
display_name=None,
last_modified_date_time=None):
body = {}
body['id'] = id_
body['code'] = code
body['display_name'] = display_name
body['last_modified_date_time'] = last_modified_date_time
return client.update_item_category(company_id=company_id,
sales_credit_memo_id=sales_credit_memo_id,
sales_credit_memo_line_id=sales_credit_memo_line_id,
body=body)
def financials_financial_company_sale_credit_memo_sale_credit_memo_line_item_update_picture(client,
company_id,
sales_credit_memo_id,
sales_credit_memo_line_id,
picture_id,
content_type,
id_=None,
content=None,
height=None,
width=None):
body = {}
body['id'] = id_
body['content'] = content
body['content_type'] = content_type
body['height'] = height
body['width'] = width
return client.update_picture(company_id=company_id,
sales_credit_memo_id=sales_credit_memo_id,
sales_credit_memo_line_id=sales_credit_memo_line_id,
picture_id=picture_id,
body=body)
def financials_financial_company_sale_invoice_line_delete_account(client,
company_id,
sales_invoice_line_id,
if_match=None):
return client.delete_account(company_id=company_id,
sales_invoice_line_id=sales_invoice_line_id,
if_match=if_match)
def financials_financial_company_sale_invoice_line_delete_item(client,
company_id,
sales_invoice_line_id,
if_match=None):
return client.delete_item(company_id=company_id,
sales_invoice_line_id=sales_invoice_line_id,
if_match=if_match)
def financials_financial_company_sale_invoice_line_show_account(client,
company_id,
sales_invoice_line_id,
select=None,
expand=None):
return client.get_account(company_id=company_id,
sales_invoice_line_id=sales_invoice_line_id,
select=select,
expand=expand)
def financials_financial_company_sale_invoice_line_show_item(client,
company_id,
sales_invoice_line_id,
select=None,
expand=None):
return client.get_item(company_id=company_id,
sales_invoice_line_id=sales_invoice_line_id,
select=select,
expand=expand)
def financials_financial_company_sale_invoice_line_update_account(client,
company_id,
sales_invoice_line_id,
id_=None,
blocked=None,
category=None,
display_name=None,
last_modified_date_time=None,
number=None,
sub_category=None):
body = {}
body['id'] = id_
body['blocked'] = blocked
body['category'] = category
body['display_name'] = display_name
body['last_modified_date_time'] = last_modified_date_time
body['number'] = number
body['sub_category'] = sub_category
return client.update_account(company_id=company_id,
sales_invoice_line_id=sales_invoice_line_id,
body=body)
def financials_financial_company_sale_invoice_line_update_item(client,
company_id,
sales_invoice_line_id,
id_=None,
base_unit_of_measure_id=None,
blocked=None,
display_name=None,
gtin=None,
inventory=None,
item_category_code=None,
item_category_id=None,
last_modified_date_time=None,
number=None,
price_includes_tax=None,
tax_group_code=None,
tax_group_id=None,
type_=None,
unit_cost=None,
unit_price=None,
item_category=None,
picture=None):
body = {}
body['id'] = id_
body['base_unit_of_measure_id'] = base_unit_of_measure_id
body['blocked'] = blocked
body['display_name'] = display_name
body['gtin'] = gtin
body['inventory'] = inventory
body['item_category_code'] = item_category_code
body['item_category_id'] = item_category_id
body['last_modified_date_time'] = last_modified_date_time
body['number'] = number
body['price_includes_tax'] = price_includes_tax
body['tax_group_code'] = tax_group_code
body['tax_group_id'] = tax_group_id
body['type'] = type_
body['unit_cost'] = unit_cost
body['unit_price'] = unit_price
body['item_category'] = item_category
body['picture'] = picture
return client.update_item(company_id=company_id,
sales_invoice_line_id=sales_invoice_line_id,
body=body)
def financials_financial_company_sale_invoice_line_item_create_picture(client,
company_id,
sales_invoice_line_id,
content_type,
id_=None,
content=None,
height=None,
width=None):
body = {}
body['id'] = id_
body['content'] = content
body['content_type'] = content_type
body['height'] = height
body['width'] = width
return client.create_picture(company_id=company_id,
sales_invoice_line_id=sales_invoice_line_id,
body=body)
def financials_financial_company_sale_invoice_line_item_delete_item_category(client,
company_id,
sales_invoice_line_id,
if_match=None):
return client.delete_item_category(company_id=company_id,
sales_invoice_line_id=sales_invoice_line_id,
if_match=if_match)
def financials_financial_company_sale_invoice_line_item_delete_picture(client,
company_id,
sales_invoice_line_id,
picture_id,
if_match=None):
return client.delete_picture(company_id=company_id,
sales_invoice_line_id=sales_invoice_line_id,
picture_id=picture_id,
if_match=if_match)
def financials_financial_company_sale_invoice_line_item_list_picture(client,
company_id,
sales_invoice_line_id,
orderby=None,
select=None,
expand=None):
return client.list_picture(company_id=company_id,
sales_invoice_line_id=sales_invoice_line_id,
orderby=orderby,
select=select,
expand=expand)
def financials_financial_company_sale_invoice_line_item_set_picture_content(client,
company_id,
sales_invoice_line_id,
picture_id,
data):
return client.set_picture_content(company_id=company_id,
sales_invoice_line_id=sales_invoice_line_id,
picture_id=picture_id,
data=data)
def financials_financial_company_sale_invoice_line_item_show_item_category(client,
company_id,
sales_invoice_line_id,
select=None,
expand=None):
return client.get_item_category(company_id=company_id,
sales_invoice_line_id=sales_invoice_line_id,
select=select,
expand=expand)
def financials_financial_company_sale_invoice_line_item_show_picture(client,
company_id,
sales_invoice_line_id,
picture_id,
select=None,
expand=None):
return client.get_picture(company_id=company_id,
sales_invoice_line_id=sales_invoice_line_id,
picture_id=picture_id,
select=select,
expand=expand)
def financials_financial_company_sale_invoice_line_item_show_picture_content(client,
company_id,
sales_invoice_line_id,
picture_id):
return client.get_picture_content(company_id=company_id,
sales_invoice_line_id=sales_invoice_line_id,
picture_id=picture_id)
def financials_financial_company_sale_invoice_line_item_update_item_category(client,
company_id,
sales_invoice_line_id,
id_=None,
code=None,
display_name=None,
last_modified_date_time=None):
body = {}
body['id'] = id_
body['code'] = code
body['display_name'] = display_name
body['last_modified_date_time'] = last_modified_date_time
return client.update_item_category(company_id=company_id,
sales_invoice_line_id=sales_invoice_line_id,
body=body)
def financials_financial_company_sale_invoice_line_item_update_picture(client,
company_id,
sales_invoice_line_id,
picture_id,
content_type,
id_=None,
content=None,
height=None,
width=None):
body = {}
body['id'] = id_
body['content'] = content
body['content_type'] = content_type
body['height'] = height
body['width'] = width
return client.update_picture(company_id=company_id,
sales_invoice_line_id=sales_invoice_line_id,
picture_id=picture_id,
body=body)
def financials_financial_company_sale_invoice_cancel(client,
company_id,
sales_invoice_id):
return client.cancel(company_id=company_id,
sales_invoice_id=sales_invoice_id)
def financials_financial_company_sale_invoice_cancel_and_send(client,
company_id,
sales_invoice_id):
return client.cancel_and_send(company_id=company_id,
sales_invoice_id=sales_invoice_id)
def financials_financial_company_sale_invoice_create_sale_invoice_line(client,
company_id,
sales_invoice_id,
id_=None,
account_id=None,
amount_excluding_tax=None,
| |
import pycropml.transpiler.antlr_py.grammars
from pycropml.transpiler.antlr_py.grammars.CSharpLexer import CSharpLexer
from pycropml.transpiler.antlr_py.grammars.CSharpParser import CSharpParser
from pycropml.transpiler.antlr_py.grammars.Fortran90Lexer import Fortran90Lexer
from pycropml.transpiler.antlr_py.grammars.Fortran90Parser import Fortran90Parser
from pycropml.transpiler.antlr_py.csharp import csharp_generate_tree
from pycropml.transpiler.antlr_py.fortran import fortran_generate_tree
from antlr4 import *
import warnings
import inspect
from typing import Dict, Optional, List, Union, Type, Any, Callable
from functools import reduce
from collections import OrderedDict, namedtuple
from ast import AST, NodeTransformer
from antlr4.Token import CommonToken
from antlr4 import CommonTokenStream, ParseTreeVisitor, ParserRuleContext, RuleContext
from antlr4.tree.Tree import ErrorNode, TerminalNodeImpl, ParseTree
from antlr4.error.ErrorListener import ErrorListener, ConsoleErrorListener
from operator import methodcaller
from antlr4 import InputStream
languages = ['cs',"bioma", 'f90', 'dssat']
gen = {'cs':"csharp","bioma":"csharp", 'f90':"fortran", 'dssat':"fortran"}
NAMES = {'cs':'CSharp','sirius':'CSharp',"bioma":"CSharp", 'f90':'Fortran90', 'dssat':'Fortran90'}
def langLexerParser(ant):
generator = {
format: getattr(
getattr(
pycropml.transpiler.antlr_py.grammars,
'%s%s' % (NAMES[format], ant)),
'%s%s' % (NAMES[format], ant))
for format in languages
}
return generator
LexersGenerators = langLexerParser("Lexer")
ParsersGenerators = langLexerParser("Parser")
genTree= {
format: getattr(
getattr(
pycropml.transpiler.antlr_py,
'%s' % (gen[format])),
'%s_generate_tree' % (gen[format]))
for format in languages
}
def parsef(code, language,
start="compilation_unit",
strict = "False",
transform: Union[str, Callable] = None,
error_listener: ErrorListener = None,
):
input_stream = InputStream(code) #encoding="utf-8"
lexer = LexersGenerators[language](input_stream)
lexer.removeErrorListeners()
lexer.addErrorListener(LexerErrorListener())
stream = CommonTokenStream(lexer)
parser = ParsersGenerators[language](stream)
#tree = parser.compilation_unit()
tree = genTree[language].generate(parser)
parser.buildParseTrees = True # default
return tree
"""
from antlr-ast
It allows you to use ANTLR grammars and use the parser output to generate an abstract syntax tree (AST).
https://github.com/datacamp/antlr-ast/blob/master/README.md
"""
class CaseTransformInputStream(InputStream):
"""Support case insensitive languages
https://github.com/antlr/antlr4/blob/master/doc/case-insensitive-lexing.md#custom-character-streams-approach
"""
UPPER = "upper"
LOWER = "lower"
def __init__(self, *args, transform=None, **kwargs):
if transform is None:
self.transform = lambda x: x
elif transform == self.UPPER:
self.transform = methodcaller("upper")
elif transform == self.LOWER:
self.transform = methodcaller("lower")
elif callable(transform):
self.transform = transform
else:
raise ValueError("Invalid transform")
super().__init__(*args, **kwargs)
def _loadString(self):
self._index = 0
self.data = [ord(self.transform(c)) for c in self.strdata]
self._size = len(self.data)
def __repr__(self):
return "<{} {}>".format(self.__class__.__name__, self.transform)
def dump_node(node, node_class=AST):
if isinstance(node, node_class):
fields = OrderedDict()
for name in node._fields:
attr = getattr(node, name, None)
if attr is not None:
fields[name] = dump_node(attr, node_class=node_class)
return {"type": node.__class__.__name__, "data": fields}
elif isinstance(node, list):
return [dump_node(x, node_class=node_class) for x in node]
else:
return node
FieldSpec = namedtuple("FieldSpec", ["name", "origin"])
def parse_field_spec(spec: str) -> FieldSpec:
# parse mapping for = and . # old: and indices [] -----
name, *origin = [part.strip() for part in spec.split("=")]
origin = name if not origin else origin[0]
origin = origin.split(".")
return FieldSpec(name, origin)
class AstNodeMeta(type):
@property
def _fields(cls):
od = OrderedDict([(parse_field_spec(el).name, None) for el in cls._fields_spec])
return tuple(od)
# Speaker class ---------------------------------------------------------------
class Speaker:
def __init__(self, **cfg):
"""Initialize speaker instance, for a set of AST nodes.
Arguments:
nodes: dictionary of node names, and their human friendly names.
Each entry for a node may also be a dictionary containing
name: human friendly name, fields: a dictionary to override
the field names for that node.
fields: dictionary of human friendly field names, used as a default
for each node.
"""
self.node_names = cfg["nodes"]
self.field_names = cfg.get("fields", {})
def describe(self, node, fmt="{node_name}", field=None, **kwargs):
cls_name = node.__class__.__name__
def_field_name = (
self.field_names.get(field) or field.replace("_", " ") if field else ""
)
node_cfg = self.node_names.get(cls_name, cls_name)
node_name, field_names = self.get_info(node_cfg)
d = {
"node": node,
"field_name": field_names.get(field, def_field_name),
"node_name": node_name.format(node=node),
}
return fmt.format(**d, **kwargs)
@staticmethod
def get_info(node_cfg):
"""Return a tuple with the verbal name of a node, and a dict of field names."""
node_cfg = node_cfg if isinstance(node_cfg, dict) else {"name": node_cfg}
return node_cfg.get("name"), node_cfg.get("fields", {})
# Error Listener ------------------------------------------------------------------
# from antlr4.error.Errors import RecognitionException
class AntlrException(Exception):
def __init__(self, msg, orig):
self.msg, self.orig = msg, orig
class StrictErrorListener(ErrorListener):
# The recognizer will be the parser instance
def syntaxError(self, recognizer, badSymbol, line, col, msg, e):
msg = "line {line}:{col} {msg}".format(
badSymbol=badSymbol, line=line, col=col, msg=msg
)
raise AntlrException(msg, e)
def reportAmbiguity(
self, recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs
):
return
# raise Exception("TODO")
def reportAttemptingFullContext(
self, recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs
):
return
# raise Exception("TODO")
def reportContextSensitivity(
self, recognizer, dfa, startIndex, stopIndex, prediction, configs
):
return
# raise Exception("TODO")
class LexerErrorListener(ConsoleErrorListener):
def syntaxError(self, recognizer, offendingSymbol, line, column, msg, e):
if isinstance(e.input, CaseTransformInputStream):
msg = msg + " " + repr(e.input)
super().syntaxError(recognizer, offendingSymbol, line, column, msg, e)
# Parse Tree Visitor ----------------------------------------------------------
# TODO: visitor inheritance not really needed, but indicates compatibility
# TODO: make general node (Terminal) accessible in class property (.subclasses)?
IndexReferences = Dict[str, Union[int, List[int]]]
class BaseNode(AST):
"""AST is subclassed so we can use Python ast module visiting and walking on the custom AST"""
def __init__(
self,
children: list,
field_references: IndexReferences,
label_references: IndexReferences,
ctx: Optional[ParserRuleContext] = None,
position: Optional[dict] = None,
text: Optional[str] = None,
):
self.children = children
self._field_references = field_references
self.children_by_field = materialize(self._field_references, self.children)
self._label_references = label_references
self.children_by_label = materialize(self._label_references, self.children)
self._ctx = ctx
self.position = position
self.text = text
_fields = ()
# whether to descend for selection (greater descends into lower)
_priority = 2
# getattr: return None or raise for nonexistent attr
# in Transformer conditionals:
# - getattr(obj, attr, None) works with both
# - hasattr(obj, attr) if strict
# - obj.attr if not strict
_strict = False
@classmethod
def create(
cls,
ctx: ParserRuleContext,
children: Optional[list] = None,
registry: Optional["BaseNodeRegistry"] = None,
) -> "BaseNode":
if registry is None:
registry = BaseNodeRegistry()
if children is None:
children = ctx.children
field_names = get_field_names(ctx)
children_by_field = get_field_references(ctx, field_names)
label_names = get_label_names(ctx)
children_by_label = get_field_references(ctx, label_names)
cls_name = type(ctx).__name__.split("Context")[0]
subclass = registry.get_cls(cls_name, tuple(field_names))
return subclass(children, children_by_field, children_by_label, ctx)
@classmethod
def create_cls(cls, cls_name: str, field_names: tuple) -> Type["BaseNode"]:
return type(cls_name, (cls,), {"_fields": field_names})
def __getattr__(self, name):
try:
result = self.children_by_label.get(name) or self.children_by_field[name]
except KeyError:
if self._strict:
raise AttributeError(
"{}.{} is invalid.".format(self.__class__.__name__, name)
)
else:
result = None
return result
@classmethod
def combine(cls, *fields: "BaseNode") -> List["BaseNode"]:
"""Combine fields
Creates a list field from other fields
Filters None and combines other elements in a flat list
Use in transformer methods.
"""
result = reduce(cls.extend_node_list, fields, [])
return result
@staticmethod
def extend_node_list(
acc: List["BaseNode"], new: Union[List["BaseNode"], "BaseNode"]
) -> List["BaseNode"]:
"""Extend accumulator with Node(s) from new"""
if new is None:
new = []
elif not isinstance(new, list):
new = [new]
return acc + new
def get_text(self, full_text: str = None) -> Optional[str]:
# TODO implement as __str__?
# + easy to combine with str/Terminal
# + use Python instead of custom interface
# (-) very different from repr / json
text = None
if isinstance(self._ctx, (TerminalNodeImpl, RuleContext)):
if full_text is None:
text = self._ctx.getText()
elif getattr(self._ctx, "start", None) and getattr(self._ctx, "stop", None):
text = full_text[self._ctx.start.start : self._ctx.stop.stop + 1]
elif (
getattr(self._ctx, "symbol", None)
and getattr(self._ctx.symbol, "start", None)
and getattr(self._ctx.symbol, "stop", None)
):
text = full_text[self._ctx.symbol.start : self._ctx.symbol.stop + 1]
if text is None and self.text:
text = self.text
return text
def get_position(self) -> Optional[Dict[str, int]]:
position = None
ctx = self._ctx
if ctx is not None:
if isinstance(ctx, TerminalNodeImpl):
position = {
"line_start": ctx.symbol.line,
"column_start": ctx.symbol.column,
"line_end": ctx.symbol.line,
"column_end": ctx.symbol.column
+ (ctx.symbol.stop - ctx.symbol.start),
}
elif getattr(ctx, "start", None) and getattr(ctx, "stop", None):
position = {
"line_start": ctx.start.line,
"column_start": ctx.start.column,
"line_end": ctx.stop.line,
"column_end": ctx.stop.column + (ctx.stop.stop - ctx.stop.start),
}
return position or self.position
def __repr__(self):
return str({**self.children_by_field, **self.children_by_label})
# TODO:
AstNode = BaseNode
class Terminal(BaseNode):
"""This is a thin node wrapper for a string.
The node is transparent when not in debug mode.
In debug mode, it keeps the link to the corresponding ANTLR node.
"""
_fields = tuple(["value"])
DEBUG = True
DEBUG_INSTANCES = []
def __new__(cls, *args, **kwargs):
instance = super().__new__(cls, *args, **kwargs)
if cls.DEBUG:
cls.DEBUG_INSTANCES.append(instance)
return instance
else:
return args[0][0]
@classmethod
def from_text(cls, text: str, ctx: Optional[ParserRuleContext] = None):
return cls([text], {"value": 0}, {}, ctx)
def __eq__(self, other):
return self.value == other
def __str__(self):
# currently just used for better formatting in debugger
return self.value
def __repr__(self):
return "'{}'".format(self.value)
class AliasNode(BaseNode, metaclass=AstNodeMeta):
# TODO: look at AstNode methods
# defines class properties
# - as a property name to copy from ANTLR nodes
# - as a property name defined in terms of (nested) ANTLR node properties
# the field will be set to the first definition that is not undefined
_fields_spec = []
_fields = AstNodeMeta._fields
# Defines which ANTLR nodes to convert to this node. Elements can | |
Shotel+4",
402905: "Chaos Shotel+5",
403000: "Jagged Ghost Blade",
403001: "Jagged Ghost Blade+1",
403002: "Jagged Ghost Blade+2",
403003: "Jagged Ghost Blade+3",
403004: "Jagged Ghost Blade+4",
403005: "Jagged Ghost Blade+5",
405000: "Painting Guardian Sword",
405001: "Painting Guardian Sword+1",
405002: "Painting Guardian Sword+2",
405003: "Painting Guardian Sword+3",
405004: "Painting Guardian Sword+4",
405005: "Painting Guardian Sword+5",
405006: "Painting Guardian Sword+6",
405007: "Painting Guardian Sword+7",
405008: "Painting Guardian Sword+8",
405009: "Painting Guardian Sword+9",
405010: "Painting Guardian Sword+10",
405011: "Painting Guardian Sword+11",
405012: "Painting Guardian Sword+12",
405013: "Painting Guardian Sword+13",
405014: "Painting Guardian Sword+14",
405015: "Painting Guardian Sword+15",
405100: "Crys. Ptg. Guardian Sword",
405101: "Crys. Ptg. Guardian Sword+1",
405102: "Crys. Ptg. Guardian Sword+2",
405103: "Crys. Ptg. Guardian Sword+3",
405104: "Crys. Ptg. Guardian Sword+4",
405105: "Crys. Ptg. Guardian Sword+5",
405200: "Ltng. Ptg. Guardian Sword",
405201: "Ltng. Ptg. Guardian Sword+1",
405202: "Ltng. Ptg. Guardian Sword+2",
405203: "Ltng. Ptg. Guardian Sword+3",
405204: "Ltng. Ptg. Guardian Sword+4",
405205: "Ltng. Ptg. Guardian Sword+5",
405300: "Raw Painting Guardian Sword",
405301: "Raw Ptg. Guardian Sword+1",
405302: "Raw Ptg. Guardian Sword+2",
405303: "Raw Ptg. Guardian Sword+3",
405304: "Raw Ptg. Guardian Sword+4",
405305: "Raw Ptg. Guardian Sword+5",
405400: "Magic Ptg. Guardian Sword",
405401: "Magic Ptg. Guardian Sword+1",
405402: "Magic Ptg. Guardian Sword+2",
405403: "Magic Ptg. Guardian Sword+3",
405404: "Magic Ptg. Guardian Sword+4",
405405: "Magic Ptg. Guardian Sword+5",
405406: "Magic Ptg. Guardian Sword+6",
405407: "Magic Ptg. Guardian Sword+7",
405408: "Magic Ptg. Guardian Sword+8",
405409: "Magic Ptg. Guardian Sword+9",
405410: "Mag. Ptg. Guardian Sword+10",
405500: "Ench. Ptg. Guardian Sword",
405501: "Ench. Ptg. Guardian Sword+1",
405502: "Ench. Ptg. Guardian Sword+2",
405503: "Ench. Ptg. Guardian Sword+3",
405504: "Ench. Ptg. Guardian Sword+4",
405505: "Ench. Ptg. Guardian Sword+5",
405600: "Div. Ptg. Guardian Sword",
405601: "Div. Ptg. Guardian Sword+1",
405602: "Div. Ptg. Guardian Sword+2",
405603: "Div. Ptg. Guardian Sword+3",
405604: "Div. Ptg. Guardian Sword+4",
405605: "Div. Ptg. Guardian Sword+5",
405606: "Div. Ptg. Guardian Sword+6",
405607: "Div. Ptg. Guardian Sword+7",
405608: "Div. Ptg. Guardian Sword+8",
405609: "Div. Ptg. Guardian Sword+9",
405610: "Div. Ptg. Guardian Sword+10",
405700: "Occ. Ptg. Guardian Sword",
405701: "Occ. Ptg. Guardian Sword+1",
405702: "Occ. Ptg. Guardian Sword+2",
405703: "Occ. Ptg. Guardian Sword+3",
405704: "Occ. Ptg. Guardian Sword+4",
405705: "Occ. Ptg. Guardian Sword+5",
405800: "Fire Ptg. Guardian Sword",
405801: "Fire Ptg. Guardian Sword+1",
405802: "Fire Ptg. Guardian Sword+2",
405803: "Fire Ptg. Guardian Sword+3",
405804: "Fire Ptg. Guardian Sword+4",
405805: "Fire Ptg. Guardian Sword+5",
405806: "Fire Ptg. Guardian Sword+6",
405807: "Fire Ptg. Guardian Sword+7",
405808: "Fire Ptg. Guardian Sword+8",
405809: "Fire Ptg. Guardian Sword+9",
405810: "Fire Ptg. Guardian Sword+10",
405900: "Chaos Ptg. Guardian Sword",
405901: "Chaos Ptg. Guardian Sword+1",
405902: "Chaos Ptg. Guardian Sword+2",
405903: "Chaos Ptg. Guardian Sword+3",
405904: "Chaos Ptg. Guardian Sword+4",
405905: "Chaos Ptg. Guardian Sword+5",
406000: "Quelaag's Furysword",
406001: "Quelaag's Furysword+1",
406002: "Quelaag's Furysword+2",
406003: "Quelaag's Furysword+3",
406004: "Quelaag's Furysword+4",
406005: "Quelaag's Furysword+5",
406100: "Quelaag's Furysword",
406101: "Quelaag's Furysword+1",
406102: "Quelaag's Furysword+2",
406103: "Quelaag's Furysword+3",
406104: "Quelaag's Furysword+4",
406105: "Quelaag's Furysword+5",
406200: "Quelaag's Furysword",
406201: "Quelaag's Furysword+1",
406202: "Quelaag's Furysword+2",
406203: "Quelaag's Furysword+3",
406204: "Quelaag's Furysword+4",
406205: "Quelaag's Furysword+5",
406300: "Quelaag's Furysword",
406301: "Quelaag's Furysword+1",
406302: "Quelaag's Furysword+2",
406303: "Quelaag's Furysword+3",
406304: "Quelaag's Furysword+4",
406305: "Quelaag's Furysword+5",
406400: "Quelaag's Furysword",
406401: "Quelaag's Furysword+1",
406402: "Quelaag's Furysword+2",
406403: "Quelaag's Furysword+3",
406404: "Quelaag's Furysword+4",
406405: "Quelaag's Furysword+5",
406500: "Quelaag's Furysword",
406501: "Quelaag's Furysword+1",
406502: "Quelaag's Furysword+2",
406503: "Quelaag's Furysword+3",
406504: "Quelaag's Furysword+4",
406505: "Quelaag's Furysword+5",
450000: "Server",
450001: "Server+1",
450002: "Server+2",
450003: "Server+3",
450004: "Server+4",
450005: "Server+5",
450006: "Server+6",
450007: "Server+7",
450008: "Server+8",
450009: "Server+9",
450010: "Server+10",
450011: "Server+11",
450012: "Server+12",
450013: "Server+13",
450014: "Server+14",
450015: "Server+15",
450100: "Crystal Server",
450101: "Crystal Server+1",
450102: "Crystal Server+2",
450103: "Crystal Server+3",
450104: "Crystal Server+4",
450105: "Crystal Server+5",
450200: "Lightning Server",
450201: "Lightning Server+1",
450202: "Lightning Server+2",
450203: "Lightning Server+3",
450204: "Lightning Server+4",
450205: "Lightning Server+5",
450300: "Raw Server",
450301: "Raw Server+1",
450302: "Raw Server+2",
450303: "Raw Server+3",
450304: "Raw Server+4",
450305: "Raw Server+5",
450400: "Magic Server",
450401: "Magic Server+1",
450402: "Magic Server+2",
450403: "Magic Server+3",
450404: "Magic Server+4",
450405: "Magic Server+5",
450406: "Magic Server+6",
450407: "Magic Server+7",
450408: "Magic Server+8",
450409: "Magic Server+9",
450410: "Magic Server+10",
450500: "Enchanted Server",
450501: "Enchanted Server+1",
450502: "Enchanted Server+2",
450503: "Enchanted Server+3",
450504: "Enchanted Server+4",
450505: "Enchanted Server+5",
450600: "Divine Server",
450601: "Divine Server+1",
450602: "Divine Server+2",
450603: "Divine Server+3",
450604: "Divine Server+4",
450605: "Divine Server+5",
450606: "Divine Server+6",
450607: "Divine Server+7",
450608: "Divine Server+8",
450609: "Divine Server+9",
450610: "Divine Server+10",
450700: "Occult Server",
450701: "Occult Server+1",
450702: "Occult Server+2",
450703: "Occult Server+3",
450704: "Occult Server+4",
450705: "Occult Server+5",
450800: "Fire Server",
450801: "Fire Server+1",
450802: "Fire Server+2",
450803: "Fire Server+3",
450804: "Fire Server+4",
450805: "Fire Server+5",
450806: "Fire Server+6",
450807: "Fire Server+7",
450808: "Fire Server+8",
450809: "Fire Server+9",
450810: "Fire Server+10",
450900: "Chaos Server",
450901: "Chaos Server+1",
450902: "Chaos Server+2",
450903: "Chaos Server+3",
450904: "Chaos Server+4",
450905: "Chaos Server+5",
451000: "Murakumo",
451001: "Murakumo+1",
451002: "Murakumo+2",
451003: "Murakumo+3",
451004: "Murakumo+4",
451005: "Murakumo+5",
451006: "Murakumo+6",
451007: "Murakumo+7",
451008: "Murakumo+8",
451009: "Murakumo+9",
451010: "Murakumo+10",
451011: "Murakumo+11",
451012: "Murakumo+12",
451013: "Murakumo+13",
451014: "Murakumo+14",
451015: "Murakumo+15",
451100: "Crystal Murakumo",
451101: "Crystal Murakumo+1",
451102: "Crystal Murakumo+2",
451103: "Crystal Murakumo+3",
451104: "Crystal Murakumo+4",
451105: "Crystal Murakumo+5",
451200: "Lightning Murakumo",
451201: "Lightning Murakumo+1",
451202: "Lightning Murakumo+2",
451203: "Lightning Murakumo+3",
451204: "Lightning Murakumo+4",
451205: "Lightning Murakumo+5",
451300: "Raw Murakumo",
451301: "Raw Murakumo+1",
451302: "Raw Murakumo+2",
451303: "Raw Murakumo+3",
451304: "Raw Murakumo+4",
451305: "Raw Murakumo+5",
451400: "Magic Murakumo",
451401: "Magic Murakumo+1",
451402: "Magic Murakumo+2",
451403: "Magic Murakumo+3",
451404: "Magic Murakumo+4",
451405: "Magic Murakumo+5",
451406: "Magic Murakumo+6",
451407: "Magic Murakumo+7",
451408: "Magic Murakumo+8",
451409: "Magic Murakumo+9",
451410: "Magic Murakumo+10",
451500: "Enchanted Murakumo",
451501: "Enchanted Murakumo+1",
451502: "Enchanted Murakumo+2",
451503: "Enchanted Murakumo+3",
451504: "Enchanted Murakumo+4",
451505: "Enchanted Murakumo+5",
451600: "Divine Murakumo",
451601: "Divine Murakumo+1",
451602: "Divine Murakumo+2",
451603: "Divine Murakumo+3",
451604: "Divine Murakumo+4",
451605: "Divine Murakumo+5",
451606: "Divine Murakumo+6",
451607: "Divine Murakumo+7",
451608: "Divine Murakumo+8",
451609: "Divine Murakumo+9",
451610: "Divine Murakumo+10",
451700: "Occult Murakumo",
451701: "Occult Murakumo+1",
451702: "Occult Murakumo+2",
451703: "Occult Murakumo+3",
451704: "Occult Murakumo+4",
451705: "Occult Murakumo+5",
451800: "Fire Murakumo",
451801: "Fire Murakumo+1",
451802: "Fire Murakumo+2",
451803: "Fire Murakumo+3",
451804: "Fire Murakumo+4",
451805: "Fire Murakumo+5",
451806: "Fire | |
and fat, trimmed to 1/8" fat, choice, raw
17247: [], # Lamb, shoulder, arm, separable lean and fat, trimmed to 1/8" fat, choice, cooked, braised
17248: [], # Lamb, shoulder, arm, separable lean and fat, trimmed to 1/8" fat, cooked, broiled
17249: [], # Lamb, shoulder, arm, separable lean and fat, trimmed to 1/8" fat, choice, roasted
17250: [], # Lamb, shoulder, blade, separable lean and fat, trimmed to 1/8" fat, choice, raw
17251: [], # Lamb, shoulder, blade, separable lean and fat, trimmed to 1/8" fat, choice, cooked, braised
17252: [], # Lamb, shoulder, blade, separable lean and fat, trimmed to 1/8" fat, choice, cooked, broiled
17253: [], # Lamb, shoulder, blade, separable lean and fat, trimmed to 1/8" fat, choice, cooked, roasted
17254: [], # Lamb, New Zealand, imported, frozen, composite of trimmed retail cuts, separable lean and fat, trimmed to 1/8" fat, raw
17255: [], # Lamb, New Zealand, imported, frozen, composite of trimmed retail cuts, separable lean and fat, trimmed to 1/8" fat, cooked
17256: [], # Lamb, New Zealand, imported, frozen, foreshank, separable lean and fat, trimmed to 1/8" fat, raw
17257: [], # Lamb, New Zealand, imported, frozen, foreshank, separable lean and fat, trimmed to 1/8" fat, cooked, braised
17258: [], # Lamb, New Zealand, imported, frozen, leg, whole (shank and sirloin), separable lean and fat, trimmed to 1/8" fat, raw
17259: [], # Lamb, New Zealand, imported, frozen, leg, whole (shank and sirloin), separable lean and fat, trimmed to 1/8" fat, cooked, roasted
17260: [], # Lamb, New Zealand, imported, frozen, loin, separable lean and fat, trimmed to 1/8" fat, raw
17261: [], # Lamb, New Zealand, imported, frozen, loin, separable lean and fat, trimmed to 1/8" fat, cooked, broiled
17262: [], # Lamb, new zealand, imported, frozen, rib, separable lean and fat, trimmed to 1/8" fat, raw
17263: [], # Lamb, New Zealand, imported, frozen, rib, separable lean and fat, trimmed to 1/8" fat, cooked, roasted
17264: [], # Lamb, New Zealand, imported, frozen, shoulder, whole (arm and blade), separable lean and fat, trimmed to 1/8" fat, raw
17265: [], # Lamb, New Zealand, imported, frozen, shoulder, whole (arm and blade), separable lean and fat, trimmed to 1/8" fat, cooked, braised
17267: [], # Game meat, bison, top sirloin, separable lean only, trimmed to 0" fat, raw
17268: [], # Game meat, bison, ribeye, separable lean only, trimmed to 0" fat, raw
17269: [], # Game meat, bison, shoulder clod, separable lean only, trimmed to 0" fat, raw
17270: [], # Veal, breast, separable fat, cooked
17271: [
"Veal breast",
"boneless whole",
], # Veal, breast, whole, boneless, separable lean and fat, raw
17272: [], # Veal, breast, whole, boneless, separable lean and fat, cooked, braised
17273: [], # Veal, breast, plate half, boneless, separable lean and fat, cooked, braised
17274: [], # Veal, breast, point half, boneless, separable lean and fat, cooked, braised
17275: [], # Veal, breast, whole, boneless, separable lean only, cooked, braised
17276: [], # Veal, shank (fore and hind), separable lean and fat, raw
17277: [], # Veal, shank (fore and hind), separable lean and fat, cooked, braised
17278: [], # Veal, shank (fore and hind), separable lean only, raw
17279: [], # Veal, shank (fore and hind), separable lean only, cooked, braised
17280: [], # Lamb, Australian, imported, fresh, composite of trimmed retail cuts, separable lean and fat, trimmed to 1/8" fat, raw
17281: [], # Lamb, Australian, imported, fresh, composite of trimmed retail cuts, separable lean and fat, trimmed to 1/8" fat, cooked
17282: [], # Lamb, Australian, imported, fresh, composite of trimmed retail cuts, separable lean only, trimmed to 1/8" fat, raw
17283: [], # Lamb, Australian, imported, fresh, composite of trimmed retail cuts, separable lean only, trimmed to 1/8" fat, cooked
17284: [], # Lamb, Australian, imported, fresh, separable fat, raw
17285: [], # Lamb, Australian, imported, fresh, separable fat, cooked
17286: [], # Lamb, Australian, imported, fresh, foreshank, separable lean and fat, trimmed to 1/8" fat, raw
17287: [], # Lamb, Australian, imported, fresh, foreshank, separable lean and fat, trimmed to 1/8" fat, cooked, braised
17288: [], # Lamb, Australian, imported, fresh, foreshank, separable lean only, trimmed to 1/8" fat, raw
17289: [], # Lamb, Australian, imported, fresh, foreshank, separable lean only, trimmed to 1/8" fat, cooked, braised
17290: [], # Lamb, Australian, imported, fresh, leg, whole (shank and sirloin), separable lean and fat, trimmed to 1/8" fat, raw
17291: [], # Lamb, Australian, imported, fresh, leg, whole (shank and sirloin), separable lean and fat, trimmed to 1/8" fat, cooked, roasted
17292: [], # Lamb, Australian, imported, fresh, leg, whole (shank and sirloin), separable lean only, trimmed to 1/8" fat, raw
17293: [], # Lamb, Australian, imported, fresh, leg, whole (shank and sirloin), separable lean only, trimmed to 1/8" fat, cooked, roasted
17294: [], # Lamb, Australian, imported, fresh, leg, shank half, separable lean and fat, trimmed to 1/8" fat, raw
17295: [], # Lamb, Australian, imported, fresh, leg, shank half, separable lean and fat, trimmed to 1/8" fat, cooked, roasted
17296: [], # Lamb, Australian, imported, fresh, leg, shank half, separable lean only, trimmed to 1/8" fat, raw
17297: [], # Lamb, Australian, imported, fresh, leg, shank half, separable lean only, trimmed to 1/8" fat, cooked, roasted
17298: [], # Lamb, Australian, imported, fresh, leg, sirloin half, boneless, separable lean and fat, trimmed to 1/8" fat, raw
17299: [], # Lamb, Australian, imported, fresh, leg, sirloin half, boneless, separable lean and fat, trimmed to 1/8" fat, cooked, roasted
17300: [], # Lamb, Australian, imported, fresh, leg, sirloin half, boneless, separable lean only, trimmed to 1/8" fat, raw
17301: [], # Lamb, Australian, imported, fresh, leg, sirloin half, boneless, separable lean only, trimmed to 1/8" fat, cooked, roasted
17302: [], # Lamb, Australian, imported, fresh, leg, sirloin chops, boneless, separable lean and fat, trimmed to 1/8" fat, raw
17303: [], # Lamb, Australian, imported, fresh, leg, sirloin chops, boneless, separable lean and fat, trimmed to 1/8" fat, cooked, broiled
17304: [], # Lamb, Australian, imported, fresh, leg, sirloin chops, boneless, separable lean only, trimmed to 1/8" fat, raw
17305: [], # Lamb, Australian, imported, fresh, leg, sirloin chops, boneless, separable lean only, trimmed to 1/8" fat, cooked, broiled
17306: [], # Lamb, Australian, imported, fresh, leg, center slice, bone-in, separable lean and fat, trimmed to 1/8" fat, raw
17307: [], # Lamb, Australian, imported, fresh, leg, center slice, bone-in, separable lean and fat, trimmed to 1/8" fat, cooked, broiled
17308: [], # Lamb, Australian, imported, fresh, leg, center slice, bone-in, separable lean only, trimmed to 1/8" fat, raw
17309: [], # Lamb, Australian, imported, fresh, leg, center slice, bone-in, separable lean only, trimmed to 1/8" fat, cooked, broiled
17310: [], # Lamb, Australian, imported, fresh, loin, separable lean and fat, trimmed to 1/8" fat, raw
17311: [], # Lamb, Australian, imported, fresh, loin, separable lean and fat, trimmed to 1/8" fat, cooked, broiled
17312: [], # Lamb, Australian, imported, fresh, loin, separable lean only, trimmed to 1/8" fat, raw
17313: [], # Lamb, Australian, imported, fresh, loin, separable lean only, trimmed to 1/8" fat, cooked, broiled
17314: [], # Lamb, Australian, imported, fresh, rib chop/rack roast, frenched, bone-in, separable lean and fat, trimmed to 1/8" fat, raw
17315: [], # Lamb, Australian, imported, fresh, rib chop, frenched, bone-in, separable lean and fat, trimmed to 1/8" fat, cooked, grilled
17316: [], # Lamb, Australian, imported, fresh, rib chop/rack roast, frenched, bone-in, separable lean only, trimmed to 1/8" fat, raw
17317: [], # Lamb, Australian, imported, fresh, rib chop, frenched, bone-in, separable lean only, trimmed to 1/8" fat, cooked, grilled
17318: [], # Lamb, Australian, imported, fresh, shoulder, whole (arm and blade), separable lean | |
import base64
import json
import re
import time
import urllib
import requests
from requests.packages.urllib3.util.retry import Retry
from requests.adapters import HTTPAdapter
class TokenInvalid(Exception):
pass
class FofaError(Exception):
"""docstring for FofaError"""
def __init__(self, code, message = ""):
self.code = code
self.message = message
super(FofaError, self).__init__(message)
class FofaClient(object):
"""docstring for FofaClient"""
def __init__(self,proxies = None, user_agent =None , captcha_model_path = None):
super(FofaClient, self).__init__()
self.API_ENDPOINT = "https://api.fofa.so/v1"
self.captcha_model_path = captcha_model_path
self.proxies = proxies
if user_agent:
self.ua = user_agent
else:
self.ua = "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:87.0) Gecko/20100101 Firefox/87.0"
self.username = None
self.password = <PASSWORD>
self._display_captcha = False
self.session = self.__create_session()
def __captcha(self,gif):
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
try:
# tensorflow full with lite model
import tensorflow.lite as tflite
except ImportError:
try:
# tensorflow lite with lite model
# lite from: pip3 install --index-url https://google-coral.github.io/py-repo/ tflite_runtime
# Note: no package for python3.9 now (as of 2021.5.31)
import tflite_runtime.interpreter as tflite
except ImportError:
return None
try:
from PIL import Image
except ImportError:
return None
try:
import importlib_resources
except ImportError:
try:
import importlib.resources as importlib_resources
except:
return None
import numpy as np
from io import BytesIO
'''
lite model
// full
import tensorflow.lite as tflite
//lite from: pip3 install --index-url https://google-coral.github.io/py-repo/ tflite_runtime
import tflite_runtime.interpreter as tflite
img = Image.open(BytesIO(gif)).convert("L")
img = np.array(img) / 255.0
np.reshape( img , input_details[0]['shape'] ).astype('float32')
interpreter.set_tensor(input_details[0]['index'] , input_data )
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
"".join([ 'abcdfhijklmnopqrstuvwxyz'[i] for i in output_data.argmax(axis=-1)[0] ] )
'''
# model = tf.keras.models.load_model("rucaptcha/rucaptcha_model")
# img = Image.open(BytesIO(gif))
# img_array = np.array(img) /15.0 # NOTE!!! should same as the data used in training model!!!!
# res = model(np.array([ img_array ]))
# return "".join([ CHARLIST[i] for i in res.numpy().argmax(axis = -1)[0]])
CHARLIST = 'abcdfhijklmnopqrstuvwxyz'
img = Image.open(BytesIO(gif))
if self.captcha_model_path:
interpreter = tflite.Interpreter(model_path=self.captcha_model_path)
else:
ref = importlib_resources.files("fofaclient") / "model" / "rucaptcha.tflite"
with importlib_resources.as_file(ref) as path:
# todo use pkg_resources
interpreter = tflite.Interpreter(model_path=str(path))
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# Sine our model use 16bits color training, we do not normalize here.
img_array = np.array(img)
input_data = np.array(img_array.reshape(input_details[0]['shape'])).astype('float32')
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
return "".join([ CHARLIST[i] for i in output_data.argmax(axis = -1)[0]])
def __create_session(self):
s = requests.session()
retry = Retry(total=5, status_forcelist=[429, 500, 502, 503, 504],backoff_factor = 0.1)
s.mount("https",HTTPAdapter(max_retries= retry ))
s.proxies = self.proxies
s.headers.update({"User-Agent": self.ua})
return s
def login(self , username,password, display_captcha_if_auto_failed = False):
tmp_session = self.__create_session()
prelogin = tmp_session.get("https://i.nosec.org/login?service=https%3A%2F%2Ffofa.so%2Flogin")
para = re.findall(r'''type="hidden" name="((?!authenticity_token).*?)".*value="(.*?)"''',prelogin.text)
# authenticity_token from csrf-token not from type="hidden" name="authenticity_token"
authenticity_token = re.findall(r'''"csrf-token".*?content="(.*?)"''',prelogin.text)[0]
gif = tmp_session.get("https://i.nosec.org/rucaptcha").content
captcha = self.__captcha(gif)
if not captcha:
if display_captcha_if_auto_failed:
import tempfile
with tempfile.NamedTemporaryFile(prefix="fofa",suffix=".gif") as f:
f.write(gif)
print("Open ",f.name, " to see the gif")
captcha = input(">")
else:
raise TokenInvalid("no captcha")
login_dict = {"username":username,"password":password , "_rucaptcha": captcha ,"utf8": "✓" }
for i in para:
login_dict.update({i[0]:i[1]})
login_dict["authenticity_token"] = authenticity_token
resp = tmp_session.post("https://i.nosec.org/login" , data = login_dict)
if resp.status_code == 200 and len(resp.history) >=1 and resp.history[0].status_code == 303:
self.access_token = tmp_session.cookies['fofa_token']
self._userinfo = json.loads(urllib.parse.unquote_plus(tmp_session.cookies['user']))
self.refresh_token = tmp_session.cookies['refresh_token']
self.username = username
self.password = password
self._display_captcha = display_captcha_if_auto_failed
elif "登录验证码错误" in resp.text:
return self.login(username,password,display_captcha_if_auto_failed)
pass
else:
# "用户名或密码错误" in resp.text:
# 您的登录请求没有包含有效的登录授权 recaptcha expire after 2min maybe
raise TokenInvalid("login failed")
tmp_session.close()
return self
def userinfo(self):
'''
id:
mid: ???
is_admin
username:
nickname
email:
avatar_medium / avatar_thumb
key: API_KEY // not appear in cookie["user"] ,only appear via APIENDPOINT /me/
rank_name:
rank_level: 0-> 注册用户 1-> 普通会员 2-> 高级会员 3-> 企业会员
company_name:
coins:
credits:
expiration: "-"
login_at: 0 via /me/ , real in cookie
'''
return self._userinfo
def login_with_refresh_token(self , refresh_token):
access_token_info = self.trade_access_token_with_refresh_token(refresh_token)
self.access_token = access_token_info["access_token"]
self._userinfo = access_token_info["info"]
self.refresh_token = refresh_token
return self
def trade_access_token_with_refresh_token(self,refresh_token):
mid = refresh_token.split(".")[1]
mid_raw = json.loads(base64.b64decode(mid+'==') )# padding
assert mid_raw["iss"] == "refresh"
return self._get_unauth("/users/refresh", extra_headers = {"Authorization": refresh_token})
def search_count(self,q,full=False):
'''
only get how many records we can fetch.
'''
return self.search(q,full = full)["page"]["total"]
def __search_limit(self):
# limit from https://fofa.so/static_pages/vip
if self._userinfo["rank_level"] > 0:
PAGE_SIZE = 20
MAX_COUNT = 10_000
else:
# if you use 20 , then you can only visit 2 page , 2*20 results . else you can visit 5 pages , 5 * 10 results
PAGE_SIZE = 10
MAX_COUNT = 50
return PAGE_SIZE , MAX_COUNT
'''
In Fofa Web
the first query is not XHR and condition is not quoted like test and asn!=1123
but the next query is XHR and condition is quoted "test" and asn!="1123"
Tested: q in response will be modified automatically
Returns results, info
results max count of items of your level (list or iterable).
info dict keys:
max_total: max count globally at any level (number)
q: normalized query (string)
full: full result or not (bool)
mode: normal/extend
is_ipq
took
'''
def search_all(self , q , full=False , iterable = False):
PAGE_SIZE , MAX_COUNT = self.__search_limit()
pg1 = self.search(q,ps = PAGE_SIZE ,full = full)
max_total = pg1["page"]["total"]
info = {"max_total":max_total, "q": pg1["q"], "mode": pg1["mode"], "is_ipq": pg1["is_ipq"], "took": pg1["took"]}
# mode , is_ipq ..... infos
total = MAX_COUNT if max_total > MAX_COUNT else max_total
info["total"] = total
if iterable:
def _iter():
start = 2
for i in pg1["assets"]:
yield i
yield_count = len(pg1["assets"])
while total - yield_count > 0:
page = self.search(q, pn = start , ps = PAGE_SIZE , full = full )
for i in page["assets"]:
yield i
yield_count += len(page["assets"])
start += 1
pass
return _iter(), info
else:
assets_all = pg1["assets"]
start = 2
while total - len(assets_all) > 0:
page = self.search(q, pn = start , ps = PAGE_SIZE , full = full )
assets_all += page["assets"]
start += 1
return assets_all , info
'''
Request:
q: query content
qbase64: base64 of q
full: show result older than 1 yr if True
ps: page size 10/20 two choice
pn: page number
'''
'''
Response:
data{
took: "spended time in ms"
q:
qbase64
mode: normal / extended
is_ipq: false // is unique ip?
"page":{ "num":"pagenum" ,"size":"pagesize", "total":"total" }
assets: [
{
"mtime": "2021-05-25 15:00:40",
type:"subdomain" // two kind 类型分布(网站 , 协议) subdomain ,service
"app_servers": [
{
"name": "apache",
"code": "YXBwc2VydmVyPSJhcGFjaGUi"
}
],
"asn_no": 1234,
"asn_org": "xxxx",
'banner': "HTTP/1.1 200 OK\r\nSContent-Type: text/html; charset=UTF-8\r\nContent-Length: 125",
"base_protocol": "tcp",
"cert": "Version: v3\nSerial Number: xxxx\nSignature Algorithm: "
} "certs_is_valid": true,
"certs_issuer_cn": "DigiCert SHA2 Secure Server CA",
"certs_issuer_org": "DigiCert Inc",
"certs_not_after": "1111-11-11 11:11:11",
"certs_not_before": "1212-12-12 12:12:12",
"certs_subject_cn": "xxx.xxx.xx",
"certs_subject_org": "xxx.xxx.xx",
"certs_valid_type": "", //?? maybe text
"city": "Boydton",
"city_code": "",
"country": "美国",
"country_code2": "US",
"country_qcode": "Y291bnRyeT0iVVMi",
"domain": "", // rPTR
"favicon": "https://xxxx/favicon.ico",
"favicon_hash": -1234,
"header": "HTTP/1.1 200 OK\r\nConnection: close\r\n",
"host": "https://xxxx",
"icp": "", //beian hao?
"id": "https://xxxx",
"ip": "xxxx",
"is_fraud": false,
"is_honeypot": false,
"isp": "", //?
"link": "https://xxxx",
"os": [], //?
"port": 443,
"protocol": "https",
"region": "xxx",
"server": "Apache",
"struct_info": [],
"title": "xxx",
@20210601 new added?
"dom_hash":
"dom_sim_hash"
]
}
'''
def search(self,q , pn =1 , ps = 10 , full=False ):
params = { "q":q , "qbase64":base64.b64encode(q.encode("utf-8")) , "ps":ps , "pn":pn , "full": "true" if full else "false"}
data = self._get("/search", params)
return data
'''
Request
mode: normal/extended see: https://fofa.so/static_pages/api_help mode section
'''
'''
since sign= is return via url_key in page from server
so you need visit page to get that url_key
qbase64=x&mode=normal&full=false&ts=x&app_id=9e9fb94330d97833acfbc041ee1a76793f1bc691&sign=E6
qbase64 will be modified (yes!)? mode is dependes on the input "q" ,
'''
def stats(self, q , full = False):
with self.__create_session() as s:
resp = s.get("https://fofa.so/result" , params = { "qbase64": base64.b64encode(q.encode("utf-8")) ,"full": "true" if full else "false"})
params = re.findall(r'''url_key.*?:.*?"(.*?)"''',resp.text)[0].encode().decode("unicode_escape")
return self._get("/search/stats",params)
def rules_all(self,keyword):
result_all = []
page_one = self.rules(keyword)
total = page_one["page"]["total"]
result_all += page_one["rules"]
start = 2
while total - len(result_all) > 0:
page = self.rules(keyword,pn = start)
result_all += page["rules"]
start += 1
return result_all
'''
Rule is a related item recommendation , (can replace rules/categories api in https://fofa.so/library?cid=0&keyword)
Request
keyword:
ps: page size
pn: page number
'''
'''
Repsonse
Note: in page 1 always only 3 items for display
Note: ps param not work
data:{
"rules":[
{ "name": xxx ,
"code":"base64 query args"}
],
"page":{ "num":"pagenum" ,"size":"pagesize", "total":"total" | |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
import argparse
import datetime
from distutils.dir_util import copy_tree
import os
import shutil
import sys
import operator
import tempfile
import traceback
from seedot.compiler.converter.converter import Converter
import seedot.config as config
from seedot.compiler.compiler import Compiler
from seedot.predictor import Predictor
import seedot.util as Util
class Main:
def __init__(self, algo, version, target, trainingFile, testingFile, modelDir, sf, maximisingMetric, dataset, numOutputs, source):
self.algo, self.version, self.target = algo, version, target
self.trainingFile, self.testingFile, self.modelDir = trainingFile, testingFile, modelDir
self.sf = sf
self.dataset = dataset
self.accuracy = {}
self.maximisingMetric = maximisingMetric
self.numOutputs = numOutputs
self.source = source
self.variableSubstitutions = {} #evaluated during profiling code run
self.scalesForX = {} #populated for multiple code generation
self.scalesForY = {} #populated for multiple code generation
self.problemType = config.ProblemType.default
self.variableToBitwidthMap = {} #Populated during profiling code run
self.sparseMatrixSizes = {} #Populated during profiling code run
self.varDemoteDetails = [] #Populated during variable demotion in VBW mode
self.flAccuracy = -1 #Populated during profiling code run
self.allScales = {} #Eventually populated with scale assignments in final code
self.demotedVarsList = [] #Populated in VBW mode after exploration completed
self.demotedVarsOffsets = {} #Populated in VBW mode after exploration completed
def setup(self):
curr_dir = os.path.dirname(os.path.realpath(__file__))
copy_tree(os.path.join(curr_dir, "Predictor"), os.path.join(config.tempdir, "Predictor"))
for fileName in ["arduino.ino", "config.h", "predict.h"]:
srcFile = os.path.join(curr_dir, "arduino", fileName)
destFile = os.path.join(config.outdir, fileName)
shutil.copyfile(srcFile, destFile)
def get_input_file(self):
if self.source == config.Source.seedot:
return os.path.join(self.modelDir, "input.sd")
elif self.source == config.Source.onnx:
return os.path.join(self.modelDir, "input.onnx")
else:
return os.path.join(self.modelDir, "input.pb")
# Generate the fixed-point code using the input generated from the
# Converter project
def compile(self, version, target, sf, generateAllFiles=True, id=None, printSwitch=-1, scaleForX=None, variableToBitwidthMap=None, demotedVarsList=[], demotedVarsOffsets={}):
print("Generating code...", end='')
if variableToBitwidthMap is None:
variableToBitwidthMap = dict(self.variableToBitwidthMap)
# Set input and output files
inputFile = self.get_input_file()
profileLogFile = os.path.join(
config.tempdir, "Predictor", "output", "float", "profile.txt")
logDir = os.path.join(config.outdir, "output")
os.makedirs(logDir, exist_ok=True)
if version == config.Version.floatt:
outputLogFile = os.path.join(logDir, "log-float.txt")
else:
if config.ddsEnabled:
outputLogFile = os.path.join(logDir, "log-fixed-" + str(abs(scaleForX)) + ".txt")
else:
outputLogFile = os.path.join(logDir, "log-fixed-" + str(abs(sf)) + ".txt")
if target == config.Target.arduino:
outdir = os.path.join(config.outdir, str(config.wordLength), self.algo, self.dataset)
os.makedirs(outdir, exist_ok=True)
outputDir = os.path.join(outdir)
elif target == config.Target.x86:
outputDir = os.path.join(config.tempdir, "Predictor")
obj = Compiler(self.algo, version, target, inputFile, outputDir,
profileLogFile, sf, self.source, outputLogFile,
generateAllFiles, id, printSwitch, self.variableSubstitutions,
scaleForX,
variableToBitwidthMap, self.sparseMatrixSizes, demotedVarsList, demotedVarsOffsets)
obj.run()
self.allScales = dict(obj.varScales)
if version == config.Version.floatt:
self.variableSubstitutions = obj.substitutions
self.variableToBitwidthMap = dict.fromkeys(obj.independentVars, config.wordLength)
self.problemType = obj.problemType
if id is None:
self.scaleForX = obj.scaleForX
self.scaleForY = obj.scaleForY
else:
self.scalesForX[id] = obj.scaleForX
self.scalesForY[id] = obj.scaleForY
print("completed")
return True
# Run the converter project to generate the input files using reading the
# training model
def convert(self, version, datasetType, target, varsForBitwidth={}, demotedVarsOffsets={}):
print("Generating input files for %s %s dataset..." %
(version, datasetType), end='')
# Create output dirs
if target == config.Target.arduino:
outputDir = os.path.join(config.outdir, "input")
datasetOutputDir = outputDir
elif target == config.Target.x86:
outputDir = os.path.join(config.tempdir, "Predictor")
datasetOutputDir = os.path.join(config.tempdir, "Predictor", "input")
else:
assert False
os.makedirs(datasetOutputDir, exist_ok=True)
os.makedirs(outputDir, exist_ok=True)
inputFile = self.get_input_file()
try:
varsForBitwidth = dict(varsForBitwidth)
for var in demotedVarsOffsets:
varsForBitwidth[var] = config.wordLength // 2
obj = Converter(self.algo, version, datasetType, target, self.source,
datasetOutputDir, outputDir, varsForBitwidth, self.allScales, self.numOutputs)
obj.setInput(inputFile, self.modelDir,
self.trainingFile, self.testingFile)
obj.run()
if version == config.Version.floatt:
self.sparseMatrixSizes = obj.sparseMatrixSizes
except Exception as e:
traceback.print_exc()
return False
print("done\n")
return True
# Build and run the Predictor project
def predict(self, version, datasetType):
outputDir = os.path.join("output", version)
curDir = os.getcwd()
os.chdir(os.path.join(config.tempdir, "Predictor"))
obj = Predictor(self.algo, version, datasetType,
outputDir, self.scaleForX, self.scalesForX, self.scaleForY, self.scalesForY, self.problemType, self.numOutputs)
execMap = obj.run()
os.chdir(curDir)
return execMap
# Compile and run the generated code once for a given scaling factor
def partialCompile(self, version, target, scale, generateAllFiles, id, printSwitch, variableToBitwidthMap=None, demotedVarsList=[], demotedVarsOffsets={}):
if config.ddsEnabled:
res = self.compile(version, target, None, generateAllFiles, id, printSwitch, scale, variableToBitwidthMap, demotedVarsList, demotedVarsOffsets)
else:
res = self.compile(version, target, scale, generateAllFiles, id, printSwitch, None, variableToBitwidthMap, demotedVarsList, demotedVarsOffsets)
if res == False:
return False
else:
return True
def runAll(self, version, datasetType, codeIdToScaleFactorMap, demotedVarsToOffsetToCodeId=None, doNotSort=False):
execMap = self.predict(version, datasetType)
if execMap == None:
return False, True
if self.algo == config.Algo.test:
for codeId, sf in codeIdToScaleFactorMap.items():
self.accuracy[sf] = execMap[str(codeId)]
print("The 95th percentile error for sf" + str(sf) + "with respect to dataset is " + str(execMap[str(codeId)][0]) + "%.")
print("The 95th percentile error for sf" + str(sf) + "with respect to float execution is " + str(execMap[str(codeId)][1]) + "%.")
print("\n")
return True,False
if codeIdToScaleFactorMap is not None:
for codeId, sf in codeIdToScaleFactorMap.items():
self.accuracy[sf] = execMap[str(codeId)]
print("Accuracy at scale factor %d is %.3f%%, Disagreement Count is %d, Reduced Disagreement Count is %d\n" % (sf, execMap[str(codeId)][0], execMap[str(codeId)][1], execMap[str(codeId)][2]))
if datasetType == config.DatasetType.testing and self.target == config.Target.arduino:
outdir = os.path.join(config.outdir, str(config.wordLength), self.algo, self.dataset)
os.makedirs(outdir, exist_ok=True)
file = open(os.path.join(outdir, "res"), "w")
file.write("Demoted Vars:\n")
file.write(str(self.demotedVarsOffsets) if hasattr(self, 'demotedVarsOffsets') else "")
file.write("\nAll scales:\n")
file.write(str(self.allScales))
file.write("\nAccuracy at scale factor %d is %.3f%%, Disagreement Count is %d, Reduced Disagreement Count is %d\n" % (sf, execMap[str(codeId)][0], execMap[str(codeId)][1], execMap[str(codeId)][2]))
file.close()
else:
def getMaximisingMetricValue(a):
if self.maximisingMetric == config.MaximisingMetric.accuracy:
return (a[1][0], -a[1][1], -a[1][2])
elif self.maximisingMetric == config.MaximisingMetric.disagreements:
return (-a[1][1], -a[1][2], a[1][0])
elif self.maximisingMetric == config.MaximisingMetric.reducedDisagreements:
return (-a[1][2], -a[1][1], a[1][0])
allVars = []
for demotedVars in demotedVarsToOffsetToCodeId:
offsetToCodeId = demotedVarsToOffsetToCodeId[demotedVars]
print("Demoted vars: %s\n" % str(demotedVars))
x = [(i, execMap[str(offsetToCodeId[i])]) for i in offsetToCodeId]
x.sort(key=getMaximisingMetricValue, reverse=True)
allVars.append(((demotedVars, x[0][0]), x[0][1]))
for offset in offsetToCodeId:
codeId = offsetToCodeId[offset]
print("Offset %d (Code ID %d): Accuracy %.3f%%, Disagreement Count %d, Reduced Disagreement Count %d\n" %(offset, codeId, execMap[str(codeId)][0], execMap[str(codeId)][1], execMap[str(codeId)][2]))
if not doNotSort:
allVars.sort(key=getMaximisingMetricValue, reverse=True)
self.varDemoteDetails = allVars
return True, False
# Iterate over multiple scaling factors and store their accuracies
def performSearch(self):
start, end = config.maxScaleRange
lastStageAcc = -1
fixedPointCounter = 0
while True:
fixedPointCounter += 1
if config.fixedPointVbwIteration:
print("Will compile until conversion to fixed point. Iteration %d"%fixedPointCounter)
highestValidScale = start
firstCompileSuccess = False
while firstCompileSuccess == False:
if highestValidScale == end:
print("Compilation not possible for any Scale Factor. Abort")
return False
# Refactor and remove this try/catch block in the futur
try:
firstCompileSuccess = self.partialCompile(config.Version.fixed, config.Target.x86, highestValidScale, True, None, 0, dict(self.variableToBitwidthMap), list(self.demotedVarsList), dict(self.demotedVarsOffsets))
except:
firstCompileSuccess = False
if firstCompileSuccess:
break
highestValidScale -= 1
lowestValidScale = end + 1
firstCompileSuccess = False
while firstCompileSuccess == False:
try:
firstCompileSuccess = self.partialCompile(config.Version.fixed, config.Target.x86, lowestValidScale, True, None, 0, dict(self.variableToBitwidthMap), list(self.demotedVarsList), dict(self.demotedVarsOffsets))
except:
firstCompileSuccess = False
if firstCompileSuccess:
break
lowestValidScale += 1
#Ignored
self.partialCompile(config.Version.fixed, config.Target.x86, lowestValidScale, True, None, -1, dict(self.variableToBitwidthMap), list(self.demotedVarsList), dict(self.demotedVarsOffsets))
# The iterator logic is as follows:
# Search begins when the first valid scaling factor is found (runOnce returns True)
# Search ends when the execution fails on a particular scaling factor (runOnce returns False)
# This is the window where valid scaling factors exist and we
# select the one with the best accuracy
numCodes = highestValidScale - lowestValidScale + 1
codeId = 0
codeIdToScaleFactorMap = {}
for i in range(highestValidScale, lowestValidScale - 1, -1):
if config.ddsEnabled:
print("Testing with DDS and scale of X as " + str(i))
else:
print("Testing with max scale factor of " + str(i))
codeId += 1
try:
compiled = self.partialCompile(
config.Version.fixed, config.Target.x86, i, False, codeId, -1 if codeId != numCodes else codeId, dict(self.variableToBitwidthMap), list(self.demotedVarsList), dict(self.demotedVarsOffsets))
except: #If some code in the middle fails to compile
codeId -=1
continue
if compiled == False:
return False
codeIdToScaleFactorMap[codeId] = i
res, exit = self.runAll(config.Version.fixed, config.DatasetType.training, codeIdToScaleFactorMap)
if exit == True or res == False:
return False
print("\nSearch completed\n")
print("----------------------------------------------")
print("Best performing scaling factors with accuracy, disagreement, reduced disagreement:")
self.sf = self.getBestScale()
if self.accuracy[self.sf][0] != lastStageAcc:
lastStageAcc = self.accuracy[self.sf][0]
elif config.fixedPointVbwIteration:
print("No difference in iteration %d Stage 2 and iteration %d Stage 1. Stopping search"%(fixedPointCounter-1, fixedPointCounter))
break
if config.vbwEnabled:
assert config.ddsEnabled, "Currently VBW on maxscale not supported"
if config.wordLength != 16:
assert False, "VBW mode only supported if native bitwidth is 16"
print("Scales computed in native bitwidth. Starting exploration over other bitwidths.")
attemptToDemote = [var for var in self.variableToBitwidthMap if (var[-3:] != "val" and var not in self.demotedVarsList)]
numCodes = 3 * len(attemptToDemote) + (6 if 'X' in attemptToDemote else 0) # 9 offsets tried | |
targetfiletype = 'image'
argvalues = {}
arginfo = []
argBox = None
def __init__(self, parent, uiProfile, scModel, targetfiletype, end_im, name, description=None):
"""
:param parent:
:param uiProfile:
:param scModel:
:param targetfiletype:
:param im:
:param name:
:param description:
@type scModel: ImageProjectModel
"""
self.dir = scModel.get_dir()
self.uiProfile = uiProfile
self.end_im = end_im
self.start_im = scModel.startImage()
self.parent = parent
self.scModel = scModel
self.sourcefiletype = scModel.getStartType()
self.targetfiletype = targetfiletype
self.argvalues = description.arguments if description is not None else {}
self.description = description if description is not None else Modification('', '')
self.softwareLoader = SoftwareLoader()
Toplevel.__init__(self, parent)
self.withdraw() # remain invisible for now
# If the master is not viewable, don't
# make the child transient, or else it
# would be opened withdrawn
if parent.winfo_viewable():
self.transient(parent)
self.title(name)
self.parent = parent
self.result = None
body = Frame(self)
self.initial_focus = self.body(body)
self.buttonbox()
body.pack(padx=5, pady=5)
if not self.initial_focus:
self.initial_focus = self
self.protocol("WM_DELETE_WINDOW", self.cancel)
if self.parent is not None:
self.geometry("+%d+%d" % (parent.winfo_rootx() + 50,
parent.winfo_rooty() + 50))
self.deiconify() # become visibile now
self.initial_focus.focus_set()
# wait for window to appear on screen before calling grab_set
self.wait_visibility()
self.grab_set()
self.wait_window(self)
def destroy(self):
'''Destroy the window'''
self.initial_focus = None
Toplevel.destroy(self)
def newsoftware(self, event):
sname = self.e4.get()
self.e5.set_completion_list(self.softwareLoader.get_versions(sname,software_type=self.sourcefiletype),
initialValue=self.softwareLoader.get_preferred_version(name=sname))
def buildArgBox(self, opname):
if self.argBox is not None:
self.argBox.destroy()
properties = [ProjectProperty(name=argumentTuple[0],
description=argumentTuple[0],
information=argumentTuple[1]['description'] if 'description' in argumentTuple[1] else '',
type=argumentTuple[1]['type'],
values=argumentTuple[1]['values'] if 'values' in argumentTuple[1] else [],
value=self.argvalues[argumentTuple[0]] if argumentTuple[
0] in self.argvalues else None) \
for argumentTuple in self.arginfo]
self.argBox= PropertyFrame(self.argBoxMaster, properties,
propertyFunction=EdgePropertyFunction(properties, self.scModel),
changeParameterCB=self.changeParameter,
extra_args={'end_im': self.end_im,
'start_im':self.start_im,
'model': self.scModel,
'op': opname},
dir=self.dir)
self.argBox.grid(row=self.argBoxRow, column=0, columnspan=2, sticky=E + W)
self.argBox.grid_propagate(1)
def newcommand(self, event):
op = getOperationWithGroups(self.e2.get())
self.arginfo = []
if op is not None:
for k, v in op.mandatoryparameters.iteritems():
if 'source' in v and v['source'] != self.sourcefiletype:
continue
if 'target' in v and v['target'] != self.targetfiletype:
continue
self.arginfo.append((k, v))
for k, v in op.optionalparameters.iteritems():
if 'source' in v and v['source'] != self.sourcefiletype:
continue
if 'target' in v and v['target'] != self.targetfiletype:
continue
self.arginfo.append((k, v))
self.buildArgBox(self.e2.get())
if self.okButton is not None:
self.okButton.config(state=ACTIVE if self.__checkParams() else DISABLED)
def organizeOperationsByCategory(self):
return getOperationsByCategoryWithGroups(self.sourcefiletype, self.targetfiletype)
def newcategory(self, event):
opByCat = self.organizeOperationsByCategory()
if self.e1.get() in opByCat:
oplist = opByCat[self.e1.get()]
self.e2.set_completion_list(oplist)
self.newcommand(event)
else:
self.e2.set_completion_list([])
def group_remove(self):
self.listbox.delete(ANCHOR)
def group_add(self):
d = SelectDialog(self, "Set Semantic Group", 'Select a semantic group for these operations.',
getSemanticGroups())
res = d.choice
if res is not None:
self.listbox.insert(END,res)
def listBoxHandler(self,evt):
# Note here that Tkinter passes an event object to onselect()
w = evt.widget
x = w.winfo_rootx()
y = w.winfo_rooty()
if w.curselection() is not None and len(w.curselection()) > 0:
index = int(w.curselection()[0])
self.group_to_remove = index
try:
self.popup.tk_popup(x, y, 0)
finally:
# make sure to release the grab (Tk 8.0a1 only)
self.popup.grab_release()
def body(self, master):
self.okButton = None
self.photo = ImageTk.PhotoImage(fixTransparency(imageResize(self.end_im, (250, 250))).toPIL())
self.c = Canvas(master, width=250, height=250)
self.c.create_image(125, 125, image=self.photo, tag='imgd')
self.c.grid(row=0, column=0, columnspan=2)
Label(master, text="Category:").grid(row=1, sticky=W)
Label(master, text="Operation:").grid(row=2, sticky=W)
# self.attachImage = ImageTk.PhotoImage(file="icons/question.png")
self.b = Button(master, bitmap='info', text="Help", command=self.help, borderwidth=0, relief=FLAT)
self.b.grid(row=2, column=3)
Label(master, text="Description:").grid(row=3, sticky=W)
Label(master, text="Software Name:").grid(row=4, sticky=W)
Label(master, text="Software Version:").grid(row=5, sticky=W)
#Label(master, text='Semantic Groups:', anchor=W, justify=LEFT).grid(row=6, column=0)
self.popup = Menu(master, tearoff=0)
self.popup.add_command(label="Add", command=self.group_add)
self.popup.add_command(label="Remove",command=self.group_remove) #
self.collapseFrame = Accordion(master) #,height=100,width=100)
self.groupFrame = Chord(self.collapseFrame,title='Semantic Groups' )
self.gscrollbar = Scrollbar(self.groupFrame, orient=VERTICAL)
self.listbox = Listbox(self.groupFrame, yscrollcommand=self.gscrollbar.set,height=3)
self.listbox.config(yscrollcommand=self.gscrollbar.set)
self.listbox.bind("<<ListboxSelect>>", self.listBoxHandler)
self.listbox.grid(row=0, column=0,columnspan=3,sticky=E+W)
self.gscrollbar.config(command=self.listbox.yview)
self.gscrollbar.grid(row=0, column=1, stick=N + S)
self.collapseFrame.append_chords([self.groupFrame])
self.collapseFrame.grid(row=6,column=0,columnspan=3,sticky=W)
row = 8
Label(master, text='Parameters:', anchor=W, justify=LEFT).grid(row=row, column=0, columnspan=2)
row += 1
self.argBoxRow = row
self.argBoxMaster = master
self.argBox = self.buildArgBox(None)
row += 1
cats = self.organizeOperationsByCategory()
catlist = list(cats.keys())
catlist.sort()
oplist = cats[catlist[0]] if len(cats) > 0 else []
self.e1 = MyDropDown(master, catlist, command=self.newcategory)
self.e2 = MyDropDown(master, oplist, command=self.newcommand)
self.e4 = MyDropDown(master, sorted(self.softwareLoader.get_names(self.sourcefiletype), key=str.lower), command=self.newsoftware)
self.e5 = AutocompleteEntryInText(master, values=[], takefocus=False, width=40)
self.e1.bind("<Return>", self.newcategory)
self.e1.bind("<<ComboboxSelected>>", self.newcategory)
self.e2.bind("<Return>", self.newcommand)
self.e2.bind("<<ComboboxSelected>>", self.newcommand)
self.e4.bind("<Return>", self.newsoftware)
self.e4.bind("<<ComboboxSelected>>", self.newsoftware)
self.e3 = Text(master, height=2, width=40, font=('Times', '14'), relief=RAISED, borderwidth=2)
self.e1.grid(row=1, column=1, sticky=EW)
self.e2.grid(row=2, column=1, sticky=EW)
self.e3.grid(row=3, column=1, sticky=EW)
self.e4.grid(row=4, column=1, sticky=EW)
self.e5.grid(row=5, column=1)
if self.description is not None:
if self.description.semanticGroups is not None:
pos = 1
for grp in self.description.semanticGroups:
self.listbox.insert(pos,grp)
pos += 1
if (self.description.inputMaskName is not None):
self.inputMaskName = self.description.inputMaskName
if self.description.operationName is not None and len(self.description.operationName) > 0:
selectCat = getCategory(self.description)
self.e1.set_completion_list(catlist, initialValue=selectCat)
oplist = cats[selectCat] if selectCat in cats else []
self.e2.set_completion_list(oplist, initialValue=self.description.operationName)
if (self.description.additionalInfo is not None):
self.e3.delete(1.0, END)
self.e3.insert(1.0, self.description.additionalInfo)
self.newcommand(None)
if self.description.software is not None:
self.e4.set_completion_list(sorted(self.softwareLoader.get_names(self.sourcefiletype), key=str.lower),
initialValue=self.description.software.name)
self.e5.set_completion_list(sorted(self.softwareLoader.get_versions(self.description.software.name,
software_type=self.sourcefiletype,
version=self.description.software.version)),
initialValue=self.description.software.version)
else:
self.e4.set_completion_list(sorted(self.softwareLoader.get_names(self.sourcefiletype), key=str.lower),
initialValue=self.softwareLoader.get_preferred_name())
self.e5.set_completion_list(
sorted(self.softwareLoader.get_versions(self.softwareLoader.get_preferred_name(),
software_type=self.sourcefiletype)),
initialValue=self.softwareLoader.get_preferred_version(self.softwareLoader.get_preferred_name()))
return self.e1 # initial focus
def __getinfo(self,name):
for k,v in self.arginfo:
if k == name:
return v
return None
def __checkParams(self):
ok = True
for k,v in self.argvalues.iteritems():
info = self.__getinfo(k)
if info is None:
continue
cv,error = checkValue(k,info['type'],v)
if v is not None and cv is None:
ok = False
ok &= checkMandatory(self.e2.get(),self.sourcefiletype,self.targetfiletype,self.argvalues)
return ok
def buttonbox(self):
box = Frame(self)
self.okButton = Button(box, text="OK", width=10, command=self.ok, default=ACTIVE,
state=ACTIVE if self.__checkParams() else DISABLED)
self.okButton.pack(side=LEFT, padx=5, pady=5)
w = Button(box, text="Cancel", width=10, command=self.cancel)
w.pack(side=LEFT, padx=5, pady=5)
self.bind("<Escape>", self.cancel)
box.pack(side=BOTTOM)
def ok(self, event=None):
self.withdraw()
self.update_idletasks()
try:
self.apply()
finally:
self.cancel()
def changeParameter(self, name, type, value):
self.argvalues[name] = value
if name == 'inputmaskname' and value is not None:
self.inputMaskName = value
if self.okButton is not None:
self.okButton.config(state=ACTIVE if self.__checkParams() else DISABLED)
def help(self):
op = getOperationWithGroups(self.e2.get())
if op is not None:
tkMessageBox.showinfo(op.name, op.description if op.description is not None and len(
op.description) > 0 else 'No description')
def cancel(self):
if self.cancelled:
self.description = None
# put focus back to the parent window
if self.parent is not None:
self.parent.focus_set()
self.destroy()
def apply(self):
self.cancelled = False
self.description.setOperationName(self.e2.get())
self.description.setAdditionalInfo(self.e3.get(1.0, END).strip())
self.description.setInputMaskName(self.inputMaskName)
self.description.semanticGroups = list(self.listbox.get(0,END))
self.description.setArguments(
{k: v for (k, v) in self.argvalues.iteritems() if v is not None and len(str(v)) > 0 and (k in [x[0] for x in self.arginfo])})
self.description.setSoftware(Software(self.e4.get(), self.e5.get()))
if (self.softwareLoader.add(self.description.software)):
self.softwareLoader.save()
class ItemDescriptionCaptureDialog(Toplevel):
"""
Edit properties of a graph item (node, edge, etc.)
"""
cancelled = True
argvalues = {}
argBox = None
def __init__(self, parent, dictionary, properties, name):
"""
:param parent: parent frame
:param uiProfile:
:param dictionary: items to inspect/edt
:param properties: descriptionof items
:param name: title of window
"""
self.parent = parent
self.properties = properties
for prop_name in self.properties:
if prop_name in dictionary:
self.argvalues[prop_name] = dictionary[prop_name]
Toplevel.__init__(self, parent)
self.withdraw() # remain invisible for now
# If the master is not viewable, don't
# make the child transient, or else it
# would be opened withdrawn
if parent.winfo_viewable():
self.transient(parent)
self.title(name)
self.parent = parent
self.result = None
body = Frame(self)
self.initial_focus = self.body(body)
self.buttonbox()
body.pack(padx=5, pady=5)
if not self.initial_focus:
self.initial_focus = self
self.protocol("WM_DELETE_WINDOW", self.cancel)
if self.parent is not None:
self.geometry("+%d+%d" % (parent.winfo_rootx() + 50,
parent.winfo_rooty() + 50))
self.deiconify() # become visibile now
self.initial_focus.focus_set()
# wait for window to appear on screen before calling grab_set
self.wait_visibility()
self.grab_set()
self.wait_window(self)
def destroy(self):
'''Destroy the window'''
self.initial_focus = None
Toplevel.destroy(self)
def buildArgBox(self, opname):
if self.argBox is not None:
self.argBox.destroy()
disp_properties = [ProjectProperty(name=prop_name,
description=prop_name,
information=prop_def['description'],
type=prop_def['type'],
values=prop_def['values'] if 'values' in prop_def else [],
value=self.argvalues[prop_name] if prop_name in self.argvalues else None) \
for prop_name, prop_def in self.properties.iteritems()]
self.argBox= PropertyFrame(self.argBoxMaster, disp_properties,
propertyFunction=NodePropertyFunction(self.argvalues),
changeParameterCB=self.changeParameter,
dir='.')
self.argBox.grid(row=self.argBoxRow, column=0, columnspan=2, sticky=E + W)
self.argBox.grid_propagate(1)
def body(self, master):
self.okButton = None
row = 0
Label(master, text='Parameters:', anchor=W, justify=LEFT).grid(row=row, column=0, columnspan=2)
row += 1
self.argBoxRow = row
self.argBoxMaster = master
self.argBox = self.buildArgBox(None)
row += 1
return self.argBox # initial focus
def buttonbox(self):
box = Frame(self)
self.okButton = Button(box, text="OK", width=10, command=self.ok, default=ACTIVE,
state=ACTIVE)
self.okButton.pack(side=LEFT, padx=5, pady=5)
w = Button(box, text="Cancel", width=10, command=self.cancel)
w.pack(side=LEFT, padx=5, pady=5)
self.bind("<Escape>", self.cancel)
box.pack(side=BOTTOM)
def ok(self, event=None):
self.withdraw()
self.update_idletasks()
try:
self.apply()
finally:
self.cancel()
def changeParameter(self, name, type, value):
self.argvalues[name] = value
def cancel(self):
if self.cancelled:
self.argvalues = None
# put focus back to the parent window
if self.parent is not None:
self.parent.focus_set()
self.destroy()
def apply(self):
self.cancelled = False
class DescriptionViewDialog(tkSimpleDialog.Dialog):
description = None
metadiff = None
metaBox = None
def __init__(self, parent, scModel, name, description=None, metadiff=None):
"""
:param parent:
:param scModel:
:param im: end image
:param name:
:param description:
:param metadiff:
| |
<reponame>Jay4C/API
import unittest
from mollie.api.client import Client
import requests
# https://github.com/mollie/mollie-api-python
# https://docs.mollie.com/reference/v2/payments-api/create-payment
class UnitTestsPaymentsAPI(unittest.TestCase):
# https://docs.mollie.com/reference/v2/payments-api/create-payment
def test_create_payment(self):
print('test_create_payment')
mollie_client = Client()
mollie_client.set_api_key('')
payment = mollie_client.payments.create({
'amount': {
'currency': 'EUR',
'value': '10.00'
},
'description': 'Order #12345',
'redirectUrl': 'https://webshop.example.org/payments/webhook/',
'webhookUrl': 'https://webshop.example.org/order/12345/',
'locale': 'string optional',
'method': 'string array optional',
'metadata': {
'order_id': '12345'
},
'sequenceType': 'string optional',
'customerId': 'string optional',
'mandateId': 'string optional',
'restrictPaymentMethodsToCountry': 'string optional',
# Payment method-specific parameters for Credit Card
'billingAddress': {
'streetAndNumber': 'string optional',
'postalCode': 'string optional',
'city': 'string optional',
'region': 'string optional',
'country': 'string optional'
},
'cardToken': 'string optional',
'shippingAddress': {
'streetAndNumber': 'string optional',
'postalCode': 'string optional',
'city': 'string optional',
'region': 'string optional',
'country': 'string optional'
},
# Access token parameters
'profileId': 'string required',
'testmode': 'boolean optional',
# Mollie Connect parameters
'applicationFee': {
'amount': {
'currency': 'string required',
'value': 'string required'
},
'description': 'string required'
},
'routing': [
{
'amount': {
'currency': 'string required',
'value': 'string required'
},
'destination': {
'type': 'string required',
'organizationId': 'string optional'
},
'releaseDate': 'date optional'
}
]
})
response = payment.json()
print(str(response))
# https://docs.mollie.com/reference/v2/payments-api/get-payment
def test_get_payment(self):
print('test_get_payment')
mollie_client = Client()
mollie_client.set_api_key('')
payment = mollie_client.payments.get('tr_WDqYK6vllg')
response = payment.json()
print(str(response))
# https://docs.mollie.com/reference/v2/payments-api/update-payment
def test_update_payment(self):
print('test_update_payment')
mollie_client = Client()
mollie_client.set_api_key('')
payment = mollie_client.payments.update("tr_7UhSN1zuXS", {
'description': 'Order #98765',
'webhookUrl': 'https://webshop.example.org/order/98765/',
'redirectUrl': 'https://webshop.example.org/payments/webhook/',
'metadata': {'order_id': '98765'}
})
response = payment.json()
print(str(response))
# https://docs.mollie.com/reference/v2/payments-api/cancel-payment
def test_cancel_payment(self):
print('test_cancel_payment')
mollie_client = Client()
mollie_client.set_api_key('')
canceled_payment = mollie_client.payments.delete('tr_WDqYK6vllg')
response = canceled_payment.json()
print(str(response))
# https://docs.mollie.com/reference/v2/payments-api/list-payments
def test_list_payments(self):
print('test_list_payments')
mollie_client = Client()
mollie_client.set_api_key('')
# get the first page
payments = mollie_client.payments.list()
response = payments.json()
print(str(response))
class UnitTestsMethodsAPI(unittest.TestCase):
# https://docs.mollie.com/reference/v2/methods-api/list-methods
def test_list_payment_methods(self):
print('test_list_payment_methods')
mollie_client = Client()
mollie_client.set_api_key('')
# Methods for the Payments API
methods = mollie_client.methods.list()
# Methods for the Orders API
# methods = mollie_client.methods.list(resource='orders')
response = methods.json()
print(str(response))
# https://docs.mollie.com/reference/v2/methods-api/list-all-methods
def test_list_all_payment_methods(self):
print('test_list_all_payment_methods')
mollie_client = Client()
mollie_client.set_api_key('')
# Methods for the Payments API
methods = mollie_client.methods.all()
# Methods for the Orders API
# methods = mollie_client.methods.list(resource='orders')
response = methods.json()
print(str(response))
# https://docs.mollie.com/reference/v2/methods-api/get-method
def test_get_payment_method(self):
print('test_get_payment_method')
mollie_client = Client()
mollie_client.set_api_key('')
payment_method = mollie_client.methods.get('ideal', include='issuers,pricing')
response = payment_method.json()
print(str(response))
class UnitTestsRefundsAPI(unittest.TestCase):
# https://docs.mollie.com/reference/v2/refunds-api/create-refund
def test_create_payment_refund(self):
print('test_create_payment_refund')
mollie_client = Client()
mollie_client.set_api_key('')
payment = mollie_client.payments.get('tr_WDqYK6vllg')
refund = mollie_client.payment_refunds.on(payment).create({
'amount': {
'value': '5.95',
'currency': 'EUR'
}
})
response = refund.json()
print(response)
# https://docs.mollie.com/reference/v2/refunds-api/get-refund
def test_get_payment_refund(self):
print('test_get_payment_refund')
mollie_client = Client()
mollie_client.set_api_key('')
payment = mollie_client.payments.get('tr_WDqYK6vllg')
refund = mollie_client.payment_refunds.on(payment).get('re_4qqhO89gsT')
response = refund.json()
print(response)
# https://docs.mollie.com/reference/v2/refunds-api/cancel-refund
def test_cancel_payment_refund(self):
print('test_cancel_payment_refund')
mollie_client = Client()
mollie_client.set_api_key('')
payment = mollie_client.payments.get('tr_WDqYK6vllg')
cancel_payment_refund = mollie_client.payment_refunds.on(payment).delete('re_4qqhO89gsT')
response = cancel_payment_refund.json()
print(response)
# https://docs.mollie.com/reference/v2/refunds-api/list-refunds
def test_list_refunds(self):
print('test_list_refunds')
mollie_client = Client()
mollie_client.set_api_key('')
refunds = mollie_client.payments.get('tr_WDqYK6vllg').refunds
response = refunds.json()
print(response)
class UnitTestsChargebacksAPI(unittest.TestCase):
# https://docs.mollie.com/reference/v2/chargebacks-api/get-chargeback
def test_get_chargeback(self):
print('test_get_chargeback')
mollie_client = Client()
mollie_client.set_api_key('')
payment = mollie_client.payments.get('tr_WDqYK6vllg')
chargeback = mollie_client.payment_chargebacks.on(payment).get('chb_n9z0tp')
response = chargeback.json()
print(response)
# https://docs.mollie.com/reference/v2/chargebacks-api/list-chargebacks
def test_list_chargebacks(self):
print('test_list_chargebacks')
mollie_client = Client()
mollie_client.set_api_key('')
payment = mollie_client.payments.get('tr_WDqYK6vllg')
chargebacks = payment.chargebacks
response = chargebacks.json()
print(response)
class UnitTestsCapturesAPI(unittest.TestCase):
# https://docs.mollie.com/reference/v2/captures-api/get-capture
def test_get_capture(self):
print('test_get_capture')
mollie_client = Client()
mollie_client.set_api_key('')
payment = mollie_client.payments.get('tr_WDqYK6vllg')
capture = mollie_client.captures.on(payment).get('cpt_4qqhO89gsT')
response = capture.json()
print(response)
# https://docs.mollie.com/reference/v2/captures-api/list-captures
def test_list_captures(self):
print('test_list_captures')
mollie_client = Client()
mollie_client.set_api_key('')
payment = mollie_client.payments.get('tr_WDqYK6vllg')
capture = mollie_client.captures.on(payment).list()
response = capture.json()
print(response)
class UnitTestsPaymentLinksAPI(unittest.TestCase):
# https://docs.mollie.com/reference/v2/payment-links-api/create-payment-link
def test_create_payment_link(self):
print('test_create_payment_link')
headers = {
'Authorization': 'Bearer ',
}
data = {
'amount[currency]': 'EUR',
'amount[value]': '24.95',
'description': 'Bicycle tires',
'expiresAt': '2021-06-06T11:00:00 00:00',
'redirectUrl': 'https://webshop.example.org/thanks',
'webhookUrl': 'https://webshop.example.org/payment-links/webhook/'
}
response = requests.post('https://api.mollie.com/v2/payment-links', headers=headers, data=data)
print(response)
# https://docs.mollie.com/reference/v2/payment-links-api/get-payment-link
def test_get_payment_link(self):
print('test_get_payment_link')
headers = {
'Authorization': 'Bearer ',
}
response = requests.get('https://api.mollie.com/v2/payment-links/pl_4Y0eZitmBnQ6IDoMqZQKh', headers=headers)
print(response)
# https://docs.mollie.com/reference/v2/payment-links-api/list-payment-links
def test_list_payment_links(self):
print('test_list_payment_links')
headers = {
'Authorization': 'Bearer ',
}
params = (
('limit', '5'),
)
response = requests.get('https://api.mollie.com/v2/payment-links', headers=headers, params=params)
print(response)
class UnitTestsOrdersAPI(unittest.TestCase):
# https://docs.mollie.com/reference/v2/orders-api/create-order
def test_create_order(self):
print("test_create_order")
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ',
}
data = '{ "amount": { "value": "1027.99", "currency": "EUR" }, "billingAddress": ' \
'{ "organizationName": "Mollie B.V.", "streetAndNumber": "Keizersgracht 126", ' \
'"city": "Amsterdam", "region": "Noord-Holland", "postalCode": "1234AB", ' \
'"country": "NL", "title": "Dhr", "givenName": "Piet", "familyName": ' \
'"Mondriaan", "email": "<EMAIL>", "phone": "+31208202070" }, ' \
'"shippingAddress": { "organizationName": "<NAME>.V.", "streetAndNumber": ' \
'"Prinsengracht 126", "streetAdditional": "4th floor", "city": "Haarlem", ' \
'"region": "Noord-Holland", "postalCode": "5678AB", "country": "NL", "title": ' \
'"Mr", "givenName": "Chuck", "familyName": "Norris", "email": ' \
'"<EMAIL>" }, "metadata": { "order_id": "1337", ' \
'"description": "Lego cars" }, "consumerDateOfBirth": "1958-01-31", "locale": ' \
'"nl_NL", "orderNumber": "1337", "redirectUrl": "https://example.org/redirect", ' \
'"webhookUrl": "https://example.org/webhook", "method": "klarnapaylater", ' \
'"lines": [ { "type": "physical", "category": "gift", "sku": "5702016116977", ' \
'"name": "LEGO 42083 Bugatti Chiron", "productUrl": ' \
'"https://shop.lego.com/nl-NL/Bugatti-Chiron-42083", "imageUrl": ' \
'"https://sh-s7-live-s.legocdn.com/is/image//LEGO/42083_alt1?$main$", ' \
'"metadata": { "order_id": "1337", "description": "Bugatti Chiron" }, ' \
'"quantity": 2, "vatRate": "21.00", "unitPrice": { "currency": "EUR", "value": ' \
'"399.00" }, "totalAmount": { "currency": "EUR", "value": "698.00" }, ' \
'"discountAmount": { "currency": "EUR", "value": "100.00" }, "vatAmount": ' \
'{ "currency": "EUR", "value": "121.14" } }, { "type": "physical", "category": ' \
'"gift", "sku": "5702015594028", "name": "LEGO 42056 Porsche 911 GT3 RS", ' \
'"productUrl": "https://shop.lego.com/nl-NL/Porsche-911-GT3-RS-42056", "imageUrl": ' \
'"https://sh-s7-live-s.legocdn.com/is/image/LEGO/42056?$PDPDefault$", "quantity": 1, ' \
'"vatRate": "21.00", "unitPrice": { "currency": "EUR", "value": "329.99" }, "totalAmount": ' \
'{ "currency": "EUR", "value": "329.99" }, "vatAmount": { "currency": "EUR", ' \
'"value": "57.27" } } ] }'
response = requests.post('https://api.mollie.com/v2/orders', headers=headers, data=data)
print(response)
# https://docs.mollie.com/reference/v2/orders-api/get-order
def test_get_order(self):
print('test_get_order')
headers = {
'Authorization': 'Bearer ',
}
params = (
('embed', 'payments,refunds'),
)
response = requests.get('https://api.mollie.com/v2/orders'
'/ord_kEn1PlbGa', headers=headers, params=params)
print(response)
# https://docs.mollie.com/reference/v2/orders-api/update-order
def test_update_order(self):
print('test_update_order')
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ',
}
data = '{ "billingAddress": { "organizationName": "<NAME>.", "streetAndNumber": ' \
'"Keizersgracht 126", "city": "Amsterdam", "region": "Noord-Holland", ' \
'"postalCode": "1234AB", "country": "NL", "title": "Dhr", "givenName": ' \
'"Piet", "familyName": "Mondriaan", "email": "<EMAIL>", "phone": ' \
'"+31208202070" } }'
response = requests.patch('https://api.mollie.com/v2/orders/ord_kEn1PlbGa', headers=headers, data=data)
print(response)
# https://docs.mollie.com/reference/v2/orders-api/update-orderline
def test_update_order_line(self):
print('test_update_order_line')
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ',
}
data = '{ "name": "LEGO 71043 Hogwarts\u2122 Castle", "productUrl": ' \
'"https://shop.lego.com/en-GB/product/Hogwarts-Castle-71043", ' \
'"imageUrl": "https://sh-s7-live-s.legocdn.com/is/image//LEGO/71043_alt1?$main$",' \
' "quantity": 2, "vatRate": "21.00", "unitPrice": { "currency": "EUR", "value": "349.00" }, ' \
'"totalAmount": { "currency": "EUR", "value": "598.00" }, "discountAmount": ' \
'{ "currency": "EUR", "value": "100.00" }, "vatAmount": { "currency": "EUR", ' \
'"value": "103.79" } }'
response = requests.patch('https://api.mollie.com/v2/orders/ord_pbjz8x/lines/odl_dgtxyl', headers=headers,
data=data)
print(response)
# https://docs.mollie.com/reference/v2/orders-api/cancel-order
def test_cancel_order(self):
print('test_cancel_order')
headers = {
'Authorization': 'Bearer ',
}
response = requests.delete('https://api.mollie.com/v2/orders/ord_8wmqcHMN4U', headers=headers)
print(response)
# https://docs.mollie.com/reference/v2/orders-api/list-orders
def test_list_orders(self):
print('test_list_orders')
headers = {
'Authorization': 'Bearer ',
}
response = requests.get('https://api.mollie.com/v2/orders', headers=headers)
print(response)
# https://docs.mollie.com/reference/v2/orders-api/cancel-order-lines
def test_cancel_order_lines(self):
print('test_cancel_order_lines')
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ',
}
data = '{ "lines": [ { "id": "odl_dgtxyl", "quantity": 1 }, { "id": "odl_jp31jz" } ] }'
response = requests.delete('https://api.mollie.com/v2/orders/ord_8wmqcHMN4U'
'/lines', headers=headers, data=data)
print(response)
# https://docs.mollie.com/reference/v2/orders-api/create-order-payment
def test_create_order_payment(self):
print('test_create_order_payment')
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ',
}
data = '{ "method": "banktransfer" }'
response = requests.post('https://api.mollie.com/v2/orders/ord_stTC2WHAuS'
'/payments', headers=headers, data=data)
print(response)
# https://docs.mollie.com/reference/v2/orders-api/create-order-refund
def test_create_order_refund(self):
print('test_create_order_refund')
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ',
}
data = '{ "lines": [ { "id": "odl_dgtxyl", "quantity": 1 } ], ' \
'"description": "Required quantity not in stock, refunding one photo book.", ' \
'"metadata": { "bookkeeping_id": 12345 } }'
response = requests.post('https://api.mollie.com/v2/orders/ord_stTC2WHAuS'
'/refunds', headers=headers, data=data)
print(response)
# https://docs.mollie.com/reference/v2/orders-api/list-order-refunds
def test_list_order_refunds(self):
print('test_list_order_refunds')
headers = {
'Authorization': 'Bearer ',
}
response = requests.get('https://api.mollie.com/v2/orders/ord_pbjz8x/refunds', headers=headers)
print(response)
class UnitTestsShipmentsAPI(unittest.TestCase):
# https://docs.mollie.com/reference/v2/shipments-api/create-shipment
def test_create_shipment(self):
print('test_create_shipment')
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ',
}
data = {
'{ "lines": [ { "id": "odl_dgtxyl", "quantity": 1 }, { "id": "odl_jp31jz" } ], '
'"tracking": { "carrier": "PostNL", "code": "3SKABA000000000", '
'"url": "http://postnl.nl/tracktrace/?B': '3SKABA000000000',
'P': '1015CW',
'D': 'NL',
'T': 'C" } }'
}
response = requests.post('https://api.mollie.com/v2/orders'
'/ord_kEn1PlbGa/shipments', headers=headers, data=data)
print(response)
# https://docs.mollie.com/reference/v2/shipments-api/get-shipment
def test_get_shipment(self):
print('test_get_shipment')
headers = {
'Authorization': 'Bearer ',
}
response = requests.get('https://api.mollie.com/v2/orders/ord_kEn1PlbGa/shipments/shp_3wmsgCJN4U',
headers=headers)
print(response)
# https://docs.mollie.com/reference/v2/shipments-api/list-shipments
def test_list_shipments(self):
print('test_list_shipments')
headers = {
'Authorization': 'Bearer ',
}
response = requests.get('https://api.mollie.com/v2/order/ord_kEn1PlbGa/shipments', headers=headers)
print(response)
# https://docs.mollie.com/reference/v2/shipments-api/update-shipment
def test_update_shipment(self):
print('test_update_shipment')
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ',
}
data = {
'{ "tracking": { "carrier": "PostNL", "code": "3SKABA000000000", '
'"url": "http://postnl.nl/tracktrace/?B': '3SKABA000000000',
| |
truncated tunnel information, we still would
# like to know. This is due to the special tunnel handling
# needed for port matching.
#
result[RecvUpcall.get_ovs_key_attr_str(nla_type)] = bytes()
else:
result[RecvUpcall.get_ovs_key_attr_str(nla_type)] = nla_data
if dump:
print("{}nla_len {}, nla_type {}[{}], data: {}{}".format(
' ' * indent, nla_len,
RecvUpcall.get_ovs_key_attr_str(nla_type),
nla_type,
"".join("{:02x} ".format(b) for b in nla_data), trunc))
if trunc != "":
if dump:
print("{}WARN: decode truncated; nla_len > msg_len[{}] ".
format(' ' * indent, bytes_left))
break
# Update next offset, but make sure it's aligned correctly.
next_offset = (nla_len + 3) & ~(3)
msg = msg[next_offset:]
bytes_left -= next_offset
return result
def get_ovs_key_attr_str(attr):
ovs_key_attr = ["OVS_KEY_ATTR_UNSPEC",
"OVS_KEY_ATTR_ENCAP",
"OVS_KEY_ATTR_PRIORITY",
"OVS_KEY_ATTR_IN_PORT",
"OVS_KEY_ATTR_ETHERNET",
"OVS_KEY_ATTR_VLAN",
"OVS_KEY_ATTR_ETHERTYPE",
"OVS_KEY_ATTR_IPV4",
"OVS_KEY_ATTR_IPV6",
"OVS_KEY_ATTR_TCP",
"OVS_KEY_ATTR_UDP",
"OVS_KEY_ATTR_ICMP",
"OVS_KEY_ATTR_ICMPV6",
"OVS_KEY_ATTR_ARP",
"OVS_KEY_ATTR_ND",
"OVS_KEY_ATTR_SKB_MARK",
"OVS_KEY_ATTR_TUNNEL",
"OVS_KEY_ATTR_SCTP",
"OVS_KEY_ATTR_TCP_FLAGS",
"OVS_KEY_ATTR_DP_HASH",
"OVS_KEY_ATTR_RECIRC_ID",
"OVS_KEY_ATTR_MPLS",
"OVS_KEY_ATTR_CT_STATE",
"OVS_KEY_ATTR_CT_ZONE",
"OVS_KEY_ATTR_CT_MARK",
"OVS_KEY_ATTR_CT_LABELS",
"OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4",
"OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6",
"OVS_KEY_ATTR_NSH"]
if attr < 0 or attr > len(ovs_key_attr):
return "<UNKNOWN>"
return ovs_key_attr[attr]
def handle_event(event):
#
# For us, only upcalls with a packet, flow_key, and upcall_type
# DPIF_UC_MISS are of interest.
#
if event.pkt_size <= 0 or event.key_size <= 0 or \
event.upcall_type != 0:
return
if event.key_size < options.flow_key_size:
key_len = event.key_size
else:
key_len = options.flow_key_size
if event.pkt_size < options.packet_size:
pkt_len = event.pkt_size
else:
pkt_len = options.packet_size
try:
event = RecvUpcall(event.ts, event.pid, event.comm.decode("utf-8"),
event.cpu, event.dpif_name.decode("utf-8"),
bytes(event.key)[:key_len],
bytes(event.pkt)[:pkt_len],
event.pkt_size)
except LookupError:
return None
if not options.quiet:
print(event)
return event
#
# op_flow_execute event class
#
class OpFlowExecute(Event):
def __init__(self, ts, pid, comm, cpu, pkt, pkt_len):
super(OpFlowExecute, self).__init__(ts, pid, comm, cpu,
EventType.OP_FLOW_EXECUTE)
self.pkt = pkt
self.pkt_len = pkt_len
def __str__(self):
return "[{:<22}] {:<16} {:8} [{:03}] {:18.9f}: " \
"{:<17} {:4} {:4}".format(EventType.short_name(self.event_type),
self.comm,
self.pid,
self.cpu,
self.ts / 1000000000,
"",
"",
self.pkt_len)
def handle_event(event):
if event.pkt_size < options.packet_size:
pkt_len = event.pkt_size
else:
pkt_len = options.packet_size
pkt_data = bytes(event.pkt)[:pkt_len]
if len(pkt_data) <= 0 or event.pkt_size == 0:
return
event = OpFlowExecute(event.ts, event.pid, event.comm.decode("utf-8"),
event.cpu, pkt_data, event.pkt_size)
if not options.quiet:
print(event)
return event
#
# get_dp_mapping()
#
def get_dp_mapping(dp, port, return_map=False, dp_map=None):
if options.unit_test:
return port
if dp_map is not None:
get_dp_mapping.dp_port_map_cache = dp_map
#
# Build a cache, so we do not have to execue the ovs command each time.
#
if not hasattr(get_dp_mapping, "dp_port_map_cache"):
try:
output = subprocess.check_output(['ovs-appctl', 'dpctl/show'],
encoding='utf8').split("\n")
except subprocess.CalledProcessError:
output = ""
pass
current_dp = None
get_dp_mapping.dp_port_map_cache = {}
for line in output:
match = re.match("^system@(.*):$", line)
if match is not None:
current_dp = match.group(1)
match = re.match("^ port ([0-9]+): ([^ /]*)", line)
if match is not None and current_dp is not None:
try:
get_dp_mapping.dp_port_map_cache[
current_dp][match.group(2)] = int(match.group(1))
except KeyError:
get_dp_mapping.dp_port_map_cache[current_dp] = \
{match.group(2): int(match.group(1))}
if return_map:
return get_dp_mapping.dp_port_map_cache
if dp not in get_dp_mapping.dp_port_map_cache or \
port not in get_dp_mapping.dp_port_map_cache[dp]:
return None
return get_dp_mapping.dp_port_map_cache[dp][port]
#
# event_to_dict()
#
def event_to_dict(event):
event_dict = {}
for field, _ in event._fields_:
if isinstance(getattr(event, field), (int, bytes)):
event_dict[field] = getattr(event, field)
else:
if (field == "key" and event.key_size == 0) or \
(field == "pkt" and event.pkt_size == 0):
data = bytes()
else:
data = bytes(getattr(event, field))
event_dict[field] = data
return event_dict
#
# receive_event_bcc()
#
def receive_event_bcc(ctx, data, size):
global events_received
events_received += 1
event = b['events'].event(data)
if export_file is not None:
export_file.write("event = {}\n".format(event_to_dict(event)))
receive_event(event)
#
# receive_event()
#
def receive_event(event):
global event_count
if event.event == 0:
trace_event = RecvUpcall.handle_event(event)
elif event.event == 1:
trace_event = DpUpcall.handle_event(event)
elif event.event == 2:
trace_event = Event.handle_event(event)
elif event.event == 3:
trace_event = OpFlowExecute.handle_event(event)
elif event.event == 4:
trace_event = Event.handle_event(event)
try:
event_count['total'][EventType.from_trace(event.event)] += 1
except KeyError:
event_count['total'][EventType.from_trace(event.event)] = 1
event_count['valid'][EventType.from_trace(event.event)] = 0
if trace_event is not None:
event_count['valid'][EventType.from_trace(event.event)] += 1
trace_data.append(trace_event)
#
# collect_event_sets()
#
def collect_event_sets(events, collect_stats=False, profile=False,
spinner=False):
t1_time = 0
def t1_start():
nonlocal t1_time
t1_time = process_time()
def t1_stop(description):
print("* PROFILING: {:<50}: {:.06f} seconds".format(
description, process_time() - t1_time))
warn_parcial_match = False
warn_frag = False
if profile:
t1_start()
#
# First let's create a dict of per handler thread events.
#
threads = {}
threads_result = {}
for idx, event in enumerate(events):
if event.event_type == EventType.DP_UPCALL:
continue
if event.pid not in threads:
threads[event.pid] = []
threads[event.pid].append([idx, event])
if profile:
t1_stop("Creating per thread dictionary")
t1_start()
#
# Now spit them in per upcall sets, but remember that
# RecvUpcall event can be batched.
#
batch_stats = []
for thread, items in threads.items():
thread_set = []
batch = []
ovs_pkt_exec_set = []
batching = True
collecting = 0
has_flow_put = False
has_flow_exec = False
def next_batch():
nonlocal batching, batch, collecting, has_flow_put, has_flow_exec
nonlocal ovs_pkt_exec_set, thread_set
if len(batch) > 0:
#
# If we are done with the batch, see if we need to match up
# any batched OVS_PKT_EXEC events.
#
for event in batch:
if len(ovs_pkt_exec_set) <= 0:
break
if any(isinstance(item,
OpFlowExecute) for item in event[2]):
event[2].append(ovs_pkt_exec_set.pop(0))
#
# Append the batch to the thread-specific set.
#
thread_set = thread_set + batch
if collect_stats:
batch_stats.append(len(batch))
batching = True
batch = []
ovs_pkt_exec_set = []
has_flow_put = False
has_flow_exec = False
collecting = 0
def next_batch_set():
nonlocal has_flow_put, has_flow_exec, collecting
has_flow_put = False
has_flow_exec = False
collecting += 1
for item in items:
idx, event = item
if batching:
if event.event_type == EventType.RECV_UPCALL:
batch.append(item + [[]])
elif len(batch) > 0:
batching = False
collecting = 0
else:
continue
if not batching:
if event.event_type == EventType.RECV_UPCALL:
next_batch()
batch.append(item + [[]])
else:
if event.event_type == EventType.OP_FLOW_PUT:
if has_flow_put:
next_batch_set()
if collecting >= len(batch):
next_batch()
continue
batch[collecting][2].append(item[1])
has_flow_put = True
elif event.event_type == EventType.OP_FLOW_EXECUTE:
if has_flow_exec:
next_batch_set()
if collecting >= len(batch):
next_batch()
continue
if (event.pkt_len == batch[collecting][1].pkt_len
and event.pkt == batch[collecting][1].pkt):
batch[collecting][2].append(item[1])
has_flow_put = True
has_flow_exec = True
else:
#
# If we end up here it could be that an upcall in a
# batch did not generate an EXECUTE and we are out
# of sync. Try to match it to the next batch entry.
#
next_idx = collecting + 1
while True:
if next_idx >= len(batch):
next_batch()
break
if (event.pkt_len == batch[next_idx][1].pkt_len
and event.pkt == batch[next_idx][1].pkt):
batch[next_idx][2] = batch[collecting][2]
batch[collecting][2] = []
collecting = next_idx
batch[collecting][2].append(item[1])
has_flow_put = True
has_flow_exec = True
break
next_idx += 1
elif event.event_type == EventType.OVS_PKT_EXEC:
#
# The OVS_PKT_EXEC might also be batched, so we keep
# them in a separate list and assign them to the
# correct set when completing the set.
#
ovs_pkt_exec_set.append(item[1])
continue
if collecting >= len(batch):
next_batch()
next_batch()
threads_result[thread] = thread_set
if profile:
t1_stop("Creating upcall sets")
t1_start()
#
# Move thread results from list to dictionary
#
thread_stats = {}
for thread, sets in threads_result.items():
if len(sets) > 0:
thread_stats[sets[0][1].comm] = len(sets)
threads_result[thread] = {}
for upcall in sets:
threads_result[thread][upcall[0]] = [upcall[1]] + upcall[2]
if profile:
t1_stop("Moving upcall list to dictionary")
t1_start()
if options.debug & 0x4000000 != 0:
print()
for thread, sets in threads_result.items():
for idx, idx_set in sets.items():
print("DBG: {}".format(idx_set))
#
# Create two lists on with DP_UPCALLs and RECV_UPCALLs
#
dp_upcall_list = []
recv_upcall_list = []
for idx, event in enumerate(events):
if event.event_type == EventType.DP_UPCALL:
dp_upcall_list.append([idx, event])
elif event.event_type == EventType.RECV_UPCALL:
recv_upcall_list.append([idx, event])
if profile:
t1_stop("Creating DP_UPCALL and RECV_UPCALL lists")
t1_start()
if options.debug & 0x4000000 != 0:
print()
for dp_upcall in dp_upcall_list:
print("DBG: {}".format(dp_upcall))
print()
for recv_upcall in recv_upcall_list:
print("DBG: {}".format(recv_upcall))
#
# Now find the matching DP_UPCALL and RECV_UPCALL events
#
event_sets = []
if spinner:
print()
with alive_bar(len(dp_upcall_list),
title="- Matching DP_UPCALLs to RECV_UPCALLs",
spinner=None, disable=not spinner) as bar:
for (idx, event) in dp_upcall_list:
remove_indexes = []
this_set = None
#
# TODO: This part needs some optimization, as it's slow in the
# PVP test scenario. This is because a lot of DP_UPCALLS
# will not have a matching RECV_UPCALL leading to walking
# the entire recv_upcall_list list.
#
# Probably some dictionary, but in the PVP scenario packets
# come from a limited set of ports, and the length is all the
# same. So we do need the key to be recv.dport +
# len(recv.pkt) + recv.pkt, however, the recv.pkt compare
# needs to happen on min(len(event.pkt), len(recv.pkt)).
#
for idx_in_list, (idx_recv, recv) in enumerate(recv_upcall_list):
match = False
if idx_recv < idx:
| |
<reponame>vamshi98/salt-formulas
# -*- coding: utf-8 -*-
'''
Module for returning various status data about a minion.
These data can be useful for compiling into stats later.
'''
from __future__ import absolute_import
# Import python libs
import os
import re
import fnmatch
import collections
from salt.ext.six.moves import range
# Import salt libs
import salt.utils
from salt.utils.network import remote_port_tcp as _remote_port_tcp
from salt.utils.network import host_to_ip as _host_to_ip
import salt.utils.event
import salt.config
__opts__ = {}
# TODO: Make this module support windows hosts
def __virtual__():
if salt.utils.is_windows():
return False
return True
def _number(text):
'''
Convert a string to a number.
Returns an integer if the string represents an integer, a floating
point number if the string is a real number, or the string unchanged
otherwise.
'''
if text.isdigit():
return int(text)
try:
return float(text)
except ValueError:
return text
def procs():
'''
Return the process data
CLI Example:
.. code-block:: bash
salt '*' status.procs
'''
# Get the user, pid and cmd
ret = {}
uind = 0
pind = 0
cind = 0
plines = __salt__['cmd.run'](__grains__['ps']).splitlines()
guide = plines.pop(0).split()
if 'USER' in guide:
uind = guide.index('USER')
elif 'UID' in guide:
uind = guide.index('UID')
if 'PID' in guide:
pind = guide.index('PID')
if 'COMMAND' in guide:
cind = guide.index('COMMAND')
elif 'CMD' in guide:
cind = guide.index('CMD')
for line in plines:
if not line:
continue
comps = line.split()
ret[comps[pind]] = {'user': comps[uind],
'cmd': ' '.join(comps[cind:])}
return ret
def custom():
'''
Return a custom composite of status data and info for this minion,
based on the minion config file. An example config like might be::
status.cpustats.custom: [ 'cpu', 'ctxt', 'btime', 'processes' ]
Where status refers to status.py, cpustats is the function
where we get our data, and custom is this function It is followed
by a list of keys that we want returned.
This function is meant to replace all_status(), which returns
anything and everything, which we probably don't want.
By default, nothing is returned. Warning: Depending on what you
include, there can be a LOT here!
CLI Example:
.. code-block:: bash
salt '*' status.custom
'''
ret = {}
conf = __salt__['config.dot_vals']('status')
for key, val in conf.items():
func = '{0}()'.format(key.split('.')[1])
vals = eval(func) # pylint: disable=W0123
for item in val:
ret[item] = vals[item]
return ret
def uptime():
'''
Return the uptime for this minion
CLI Example:
.. code-block:: bash
salt '*' status.uptime
'''
return __salt__['cmd.run']('uptime')
def loadavg():
'''
Return the load averages for this minion
CLI Example:
.. code-block:: bash
salt '*' status.loadavg
'''
load_avg = os.getloadavg()
return {'1-min': load_avg[0],
'5-min': load_avg[1],
'15-min': load_avg[2]}
def cpustats():
'''
Return the CPU stats for this minion
CLI Example:
.. code-block:: bash
salt '*' status.cpustats
'''
def linux_cpustats():
'''
linux specific implementation of cpustats
'''
procf = '/proc/stat'
if not os.path.isfile(procf):
return {}
stats = salt.utils.fopen(procf, 'r').read().splitlines()
ret = {}
for line in stats:
if not line:
continue
comps = line.split()
if comps[0] == 'cpu':
ret[comps[0]] = {'idle': _number(comps[4]),
'iowait': _number(comps[5]),
'irq': _number(comps[6]),
'nice': _number(comps[2]),
'softirq': _number(comps[7]),
'steal': _number(comps[8]),
'system': _number(comps[3]),
'user': _number(comps[1])}
elif comps[0] == 'intr':
ret[comps[0]] = {'total': _number(comps[1]),
'irqs': [_number(x) for x in comps[2:]]}
elif comps[0] == 'softirq':
ret[comps[0]] = {'total': _number(comps[1]),
'softirqs': [_number(x) for x in comps[2:]]}
else:
ret[comps[0]] = _number(comps[1])
return ret
def freebsd_cpustats():
'''
freebsd specific implementation of cpustats
'''
vmstat = __salt__['cmd.run']('vmstat -P').splitlines()
vm0 = vmstat[0].split()
cpu0loc = vm0.index('cpu0')
vm1 = vmstat[1].split()
usloc = vm1.index('us')
vm2 = vmstat[2].split()
cpuctr = 0
ret = {}
for cpu in vm0[cpu0loc:]:
ret[cpu] = {'us': _number(vm2[usloc + 3 * cpuctr]),
'sy': _number(vm2[usloc + 1 + 3 * cpuctr]),
'id': _number(vm2[usloc + 2 + 3 * cpuctr]), }
cpuctr += 1
return ret
# dict that return a function that does the right thing per platform
get_version = {
'Linux': linux_cpustats,
'FreeBSD': freebsd_cpustats,
}
errmsg = 'This method is unsupported on the current operating system!'
return get_version.get(__grains__['kernel'], lambda: errmsg)()
def meminfo():
'''
Return the memory info for this minion
CLI Example:
.. code-block:: bash
salt '*' status.meminfo
'''
def linux_meminfo():
'''
linux specific implementation of meminfo
'''
procf = '/proc/meminfo'
if not os.path.isfile(procf):
return {}
stats = salt.utils.fopen(procf, 'r').read().splitlines()
ret = {}
for line in stats:
if not line:
continue
comps = line.split()
comps[0] = comps[0].replace(':', '')
ret[comps[0]] = {
'value': comps[1],
}
if len(comps) > 2:
ret[comps[0]]['unit'] = comps[2]
return ret
def freebsd_meminfo():
'''
freebsd specific implementation of meminfo
'''
sysctlvm = __salt__['cmd.run']('sysctl vm').splitlines()
sysctlvm = [x for x in sysctlvm if x.startswith('vm')]
sysctlvm = [x.split(':') for x in sysctlvm]
sysctlvm = [[y.strip() for y in x] for x in sysctlvm]
sysctlvm = [x for x in sysctlvm if x[1]] # If x[1] not empty
ret = {}
for line in sysctlvm:
ret[line[0]] = line[1]
# Special handling for vm.total as it's especially important
sysctlvmtot = __salt__['cmd.run']('sysctl -n vm.vmtotal').splitlines()
sysctlvmtot = [x for x in sysctlvmtot if x]
ret['vm.vmtotal'] = sysctlvmtot
return ret
# dict that return a function that does the right thing per platform
get_version = {
'Linux': linux_meminfo,
'FreeBSD': freebsd_meminfo,
}
errmsg = 'This method is unsupported on the current operating system!'
return get_version.get(__grains__['kernel'], lambda: errmsg)()
def cpuinfo():
'''
Return the CPU info for this minion
CLI Example:
.. code-block:: bash
salt '*' status.cpuinfo
'''
def linux_cpuinfo():
'''
linux specific cpuinfo implementation
'''
procf = '/proc/cpuinfo'
if not os.path.isfile(procf):
return {}
stats = salt.utils.fopen(procf, 'r').read().splitlines()
ret = {}
for line in stats:
if not line:
continue
comps = line.split(':')
comps[0] = comps[0].strip()
if comps[0] == 'flags':
ret[comps[0]] = comps[1].split()
else:
ret[comps[0]] = comps[1].strip()
return ret
def freebsd_cpuinfo():
'''
freebsd specific cpuinfo implementation
'''
freebsd_cmd = 'sysctl hw.model hw.ncpu'
ret = {}
for line in __salt__['cmd.run'](freebsd_cmd).splitlines():
if not line:
continue
comps = line.split(':')
comps[0] = comps[0].strip()
ret[comps[0]] = comps[1].strip()
return ret
# dict that returns a function that does the right thing per platform
get_version = {
'Linux': linux_cpuinfo,
'FreeBSD': freebsd_cpuinfo,
}
errmsg = 'This method is unsupported on the current operating system!'
return get_version.get(__grains__['kernel'], lambda: errmsg)()
def diskstats():
'''
Return the disk stats for this minion
CLI Example:
.. code-block:: bash
salt '*' status.diskstats
'''
def linux_diskstats():
'''
linux specific implementation of diskstats
'''
procf = '/proc/diskstats'
if not os.path.isfile(procf):
return {}
stats = salt.utils.fopen(procf, 'r').read().splitlines()
ret = {}
for line in stats:
if not line:
continue
comps = line.split()
ret[comps[2]] = {'major': _number(comps[0]),
'minor': _number(comps[1]),
'device': _number(comps[2]),
'reads_issued': _number(comps[3]),
'reads_merged': _number(comps[4]),
'sectors_read': _number(comps[5]),
'ms_spent_reading': _number(comps[6]),
'writes_completed': _number(comps[7]),
'writes_merged': _number(comps[8]),
'sectors_written': _number(comps[9]),
'ms_spent_writing': _number(comps[10]),
'io_in_progress': _number(comps[11]),
'ms_spent_in_io': _number(comps[12]),
'weighted_ms_spent_in_io': _number(comps[13])}
return ret
def freebsd_diskstats():
'''
freebsd specific implementation of diskstats
'''
ret = {}
iostat = __salt__['cmd.run']('iostat -xzd').splitlines()
header = iostat[1]
for line in iostat[2:]:
comps = line.split()
ret[comps[0]] = {}
for metric, value in zip(header.split()[1:], comps[1:]):
ret[comps[0]][metric] = _number(value)
return ret
# dict that return a function that does the right thing per platform
get_version = {
'Linux': linux_diskstats,
'FreeBSD': freebsd_diskstats,
}
errmsg = 'This method is unsupported on the current operating system!'
return get_version.get(__grains__['kernel'], lambda: errmsg)()
def diskusage(*args):
'''
Return the disk usage for this minion
Usage::
salt '*' status.diskusage [paths and/or filesystem types]
CLI Example:
.. code-block:: bash
salt '*' status.diskusage # usage for all filesystems
salt '*' status.diskusage / /tmp # usage for / and /tmp
salt '*' status.diskusage ext? # usage for ext[234] filesystems
salt '*' status.diskusage / ext? # usage for / and all ext filesystems
'''
selected = set()
fstypes = set()
if not args:
# select all filesystems
fstypes.add('*')
else:
for arg in args:
if arg.startswith('/'):
# select path
selected.add(arg)
else:
# select fstype
fstypes.add(arg)
if fstypes:
# determine which mount points host the specified fstypes
regex = re.compile(
'|'.join(
fnmatch.translate(fstype).format('(%s)') for fstype in fstypes
)
)
# ifile source of data varies with OS, otherwise all the same
if __grains__['kernel'] == 'Linux':
procf = '/proc/mounts'
if not os.path.isfile(procf):
return {}
ifile = salt.utils.fopen(procf, 'r').readlines()
elif __grains__['kernel'] == 'FreeBSD':
ifile = __salt__['cmd.run']('mount -p').splitlines()
for line in ifile:
comps = line.split()
if len(comps) >= 3:
mntpt = comps[1]
fstype = comps[2]
if regex.match(fstype):
selected.add(mntpt)
# query the filesystems disk | |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for merge composite."""
import tvm
from tvm import relay, tir
from tvm.relay.dataflow_pattern import TupleGetItemPattern, is_op, wildcard
from tvm.relay.testing import run_opt_pass
"""
The merge composite pass is designed to merge multiple relay operators, that
match a given pattern, and combine them into a single relay function.
For example suppose we have the graph:
conv2d
| (merge composite pass)
bias_add ====> conv2d_bias_relu
| (our target)
relu
Our Relay IR before the pass:
fn (%data: Tensor[(1, 512, 28, 28), float32], %kernel: Tensor[(256, 512, 1, 1), float32],
%bias: Tensor[(256), float32]) -> Tensor[(1, 256, 28, 28), float32] {
%0 = nn.conv2d(%data, %kernel, kernel_size=[1, 1])
/* ty=Tensor[(1, 256, 28, 28), float32] */;
%1 = nn.bias_add(%0, %bias) /* ty=Tensor[(1, 256, 28, 28), float32] */;
nn.relu(%1) /* ty=Tensor[(1, 256, 28, 28), float32] */
}
Our Relay IR after the pass:
fn (%data: Tensor[(1, 512, 28, 28), float32], %kernel: Tensor[(256, 512, 1, 1), float32],
%bias: Tensor[(256), float32]) -> Tensor[(1, 256, 28, 28), float32] {
%2 = fn (%x: Tensor[(1, 512, 28, 28), float32], %y: Tensor[(256, 512, 1, 1), float32],
%z: Tensor[(256), float32], Primitive=1, Composite="conv2d_bias_relu") ->
Tensor[(1, 256, 28, 28), float32] {
%0 = nn.conv2d(%x, %y, kernel_size=[1, 1]) /* ty=Tensor[(1, 256, 28, 28), float32] */;
%1 = nn.bias_add(%0, %z) /* ty=Tensor[(1, 256, 28, 28), float32] */;
nn.relu(%1) /* ty=Tensor[(1, 256, 28, 28), float32] */
};
%2(%data, %kernel, %bias) /* ty=Tensor[(1, 256, 28, 28), float32] */
}
As you can see in the second relay example, the pattern we specified has been wrapped
in a function. The function is then called, producing the same result as the first relay
example.
One convenient use for this pass is to offload multiple operators to a single external
codegen function.
"""
def make_add_sub_mul_pattern():
r"""Create a pattern to match the following graph.
add sub
\ /
\ /
mul
"""
x = wildcard()
y = wildcard()
return (x + y) * (x - y)
def make_add_relu_pattern():
r"""Create a pattern to match the following graph.
add
|
relu
"""
add_node = wildcard() + wildcard()
r = is_op('nn.relu')(add_node)
return r
def make_conv_bias_relu_pattern():
r"""Create a pattern to match the following graph.
conv2d
|
bias_add
|
relu
"""
x = wildcard()
y = wildcard()
z = wildcard()
conv_node = is_op('nn.conv2d')(x, y)
bias_node = is_op('nn.bias_add')(conv_node, z)
r = is_op('nn.relu')(bias_node)
return r
def make_pattern_with_optional():
r"""Create a pattern to match the following graph. Note that relu is optinal.
conv2d
|
bias_add
|
(relu)
"""
x = wildcard()
y = wildcard()
z = wildcard()
conv_node = is_op('nn.conv2d')(x, y)
bias_node = is_op('nn.bias_add')(conv_node, z)
r = bias_node.optional(lambda x: is_op('nn.relu')(x))
return r
def make_add_add_add_pattern():
r"""Create a pattern to match the following graph.
Useful for testing re-using a call node.
x y
/ \ /
| add
\ | \
add |
| /
add
"""
x = wildcard()
y = wildcard()
add_node = is_op('add')(x, y)
add_node_1 = is_op('add')(x, add_node)
r = is_op('add')(add_node_1, add_node)
return r
def make_bn_relu_pattern():
r"""Create a pattern to match the following graph.
batch_norm
|
TupleGetItem(0)
|
relu
"""
x = wildcard()
gamma = wildcard()
beta = wildcard()
moving_mean = wildcard()
moving_var = wildcard()
bn_node = is_op('nn.batch_norm')(x, gamma, beta, moving_mean, moving_var)
tuple_get_item_node = TupleGetItemPattern(bn_node, 0)
r = is_op('nn.relu')(tuple_get_item_node)
return r
def check_result(pattern_table, graph, expected_graph):
"""Utility function to check merge composite results."""
result = run_opt_pass(graph, relay.transform.MergeComposite(pattern_table))
assert not relay.analysis.free_vars(result), \
"Found free vars in the result graph: {0}".format(str(result))
expected = run_opt_pass(expected_graph, relay.transform.InferType())
assert tvm.ir.structural_equal(result, expected, map_free_vars=True), \
"Graph mismatch: output vs. expected\n{0}\n=====\n{1}".format(str(result), str(expected))
def test_simple_merge():
r"""Test composite function is correctly produced from simple graph.
We could expect the pattern `make_add_relu_pattern` to be merged
into a single op `add_relu`.
a b
\ / a b
add ====> \ /
| add_relu
relu
"""
pattern_table = [
("add_relu", make_add_relu_pattern())
]
def before():
a = relay.var('a', shape=(10, 10))
b = relay.var('b', shape=(10, 10))
add_node = relay.add(a, b)
r = relay.nn.relu(add_node)
return relay.Function([a, b], r)
def expected():
a = relay.var('a', shape=(10, 10))
b = relay.var('b', shape=(10, 10))
# add_relu function
in_1 = relay.var('in_1', shape=(10, 10))
in_2 = relay.var('in_2', shape=(10, 10))
add_node = relay.add(in_1, in_2)
relu_node = relay.nn.relu(add_node)
add_relu = relay.Function([in_1, in_2], relu_node)
add_relu = add_relu.with_attr("Composite", "add_relu")
add_relu = add_relu.with_attr("PartitionedFromPattern", "add_nn.relu_")
# merged function
r = relay.Call(add_relu, [a, b])
return relay.Function([a, b], r)
check_result(pattern_table, before(), expected())
def test_branch_merge():
r"""Test composite function is correctly produced from branching graph.
We would expect the pattern `make_add_sub_mul_pattern` to be merged
into a single op `add_sub_mul`.
a b a b
\/ \/
add sub a b
\ / \/
\ / add_sub_mul
mul c |
/ \ \ |
c / c | ====> add_sub_mul
\/ \/ |
add sub |
\ / relu
\ /
mul
|
|
relu
"""
pattern_table = [
("add_sub_mul", make_add_sub_mul_pattern())
]
def before():
a = relay.var('a', shape=(10, 10))
b = relay.var('b', shape=(10, 10))
c = relay.var('c', shape=(10, 10))
add_node = relay.add(a, b)
sub_node = relay.subtract(a, b)
mul_node = relay.multiply(add_node, sub_node)
add_node_2 = relay.add(c, mul_node)
sub_node_2 = relay.subtract(c, mul_node)
mul_node_2 = relay.multiply(add_node_2, sub_node_2)
r = relay.nn.relu(mul_node_2)
return relay.Function([a, b, c], r)
def expected():
a = relay.var('a', shape=(10, 10))
b = relay.var('b', shape=(10, 10))
c = relay.var('c', shape=(10, 10))
# add_sub_mul function
in_1 = relay.var('in_1', shape=(10, 10))
in_2 = relay.var('in_2', shape=(10, 10))
add_node = relay.add(in_1, in_2)
sub_node = relay.subtract(in_1, in_2)
mul_node = relay.multiply(add_node, sub_node)
add_sub_mul = relay.Function([in_1, in_2], mul_node)
add_sub_mul = add_sub_mul.with_attr("Composite", "add_sub_mul")
add_sub_mul = add_sub_mul.with_attr("PartitionedFromPattern", "add_subtract_multiply_")
# add_sub_mul1 function
in_3 = relay.var('in_3', shape=(10, 10))
in_4 = relay.var('in_4', shape=(10, 10))
add_node_1 = relay.add(in_3, in_4)
sub_node_1 = relay.subtract(in_3, in_4)
mul_node_1 = relay.multiply(add_node_1, sub_node_1)
add_sub_mul_1 = relay.Function([in_3, in_4], mul_node_1)
add_sub_mul_1 = add_sub_mul_1.with_attr("Composite", "add_sub_mul")
add_sub_mul_1 = add_sub_mul_1.with_attr("PartitionedFromPattern", "add_subtract_multiply_")
# merged function
m_add_sub_mul_1 = relay.Call(add_sub_mul, [a, b])
m_add_sub_mul_2 = relay.Call(add_sub_mul_1, [c, m_add_sub_mul_1])
r = relay.nn.relu(m_add_sub_mul_2)
return relay.Function([a, b, c], r)
check_result(pattern_table, before(), expected())
def test_reuse_call_merge():
r"""Test composite function is correctly produced from simple graph
which re-uses call nodes.
We could expect the pattern `make_add_add_add` to be merged
into a single op `add_add_add`.
x y
\ / \
sub | x y
/ | / \ / |
| add ====> sub |
\ | \ | /
add | add_add_add
| /
add
"""
pattern_table = [
("add_add_add", make_add_add_add_pattern())
]
def before():
a = relay.var('a', shape=(10, 10))
b = relay.var('b', shape=(10, 10))
sub_node = relay.subtract(a, b)
# pattern
add_node = relay.add(sub_node, b)
add_node_1 = relay.add(sub_node, add_node)
r = relay.add(add_node_1, add_node)
return relay.Function([a, b], r)
def expected():
a = relay.var('a', shape=(10, 10))
b = relay.var('b', shape=(10, 10))
# add_relu_add function
in_1 = relay.var('in_1', shape=(10, 10))
in_2 = relay.var('in_2', shape=(10, 10))
add_node = relay.add(in_1, in_2)
add_node_1 = relay.add(in_1, add_node)
add_node_2 = relay.add(add_node_1, add_node)
add_add_add = relay.Function([in_1, in_2], add_node_2)
add_add_add = add_add_add.with_attr("Composite", "add_add_add")
add_add_add = add_add_add.with_attr("PartitionedFromPattern", "add_add_add_")
# merged function
sub_node = relay.subtract(a, b)
call = relay.Call(add_add_add, [sub_node, b])
return relay.Function([a, b], call)
check_result(pattern_table, before(), expected())
def test_multiple_patterns():
r"""Test different patterns are merged correctly in the graph.
We would expect the pattern `make_conv_bias_relu_pattern` to be merged
into a single op `conv_bias_relu`. We would also expect `make_add_relu_pattern`
to be merged into a single op `add_relu`.
data kernel
\ /
\ /
conv2d data kernel bias
| \ | /
| bias conv2d_bias_relu
| / |
bias_add ====> | a
| | /
relu a add_relu
\ / |
add | b
| | /
relu b mul
| /
mul
"""
pattern_table = [
("conv2d_bias_relu", make_conv_bias_relu_pattern()),
("add_relu", make_add_relu_pattern())
]
def before():
data | |
<reponame>kiss2u/google-research
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines standard networks layers that train using variational dropout."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from state_of_sparsity.layers.utils import layer_utils
from state_of_sparsity.layers.variational_dropout import common
def _verify_variational_params(variational_params):
"""Verifies that the format of the input `variational_params`.
Checks that the input parameters is a 2-tuple of tensors of equal shape.
Args:
variational_params: The parameters to check.
Raises:
RuntimeError: If the input is not a 2-tuple of tensors with equal shape.
Returns:
The input `variational_parameters`.
"""
if len(variational_params) != 2:
raise RuntimeError("Incorrect number of variational parameters.")
if variational_params[0].shape != variational_params[1].shape:
raise RuntimeError("Variational parameters must be the same shape.")
return variational_params
def matmul_train(
x,
variational_params,
transpose_a=False,
transpose_b=False,
clip_alpha=None,
eps=common.EPSILON):
R"""Training computation for a variation matmul.
In variational dropout we train a Bayesian neural network where we assume a
fully-factorized Gaussian posterior and log uniform prior over the weights.
During training, we need to sample weights from this distribution. Rather
than sample weights for each sample in the input batch, we can calculate the
parameters of the distribution over the pre-activations analytically (this
step is called the local reparameterization trick). This function calculates
the mean and standard deviation of the distribution over the pre-activations,
and then draws a single sample for each element in the input batch and passes
them as output.
Args:
x: 2D Tensor representing the input batch.
variational_params: 2-tuple of Tensors, where the first tensor is the \theta
values and the second contains the log of the \sigma^2 values.
transpose_a: If True, a is transposed before multiplication.
transpose_b: If True, b is transposed before multiplication.
clip_alpha: Int or None. If integer, we clip the log \alpha values to
[-clip_alpha, clip_alpha]. If None, don't clip the values.
eps: Small constant value to use in log and sqrt operations to avoid NaNs.
Returns:
Output Tensor of the matmul operation.
Raises:
RuntimeError: If the variational_params argument is not a 2-tuple.
"""
# We expect a 2D input tensor, as in standard in fully-connected layers
x.get_shape().assert_has_rank(2)
theta, log_sigma2 = _verify_variational_params(
variational_params)
if clip_alpha is not None:
# Compute the log_alphas and then compute the
# log_sigma2 again so that we can clip on the
# log alpha magnitudes
log_alpha = common.compute_log_alpha(log_sigma2, theta, eps, clip_alpha)
log_sigma2 = common.compute_log_sigma2(log_alpha, theta, eps)
# Compute the mean and standard deviation of the distributions over the
# activations
mu_activation = tf.matmul(
x,
theta,
transpose_a=transpose_a,
transpose_b=transpose_b)
std_activation = tf.sqrt(tf.matmul(
tf.square(x),
tf.exp(log_sigma2),
transpose_a=transpose_a,
transpose_b=transpose_b) + eps)
output_shape = tf.shape(std_activation)
return mu_activation + std_activation * tf.random_normal(output_shape)
def matmul_eval(
x,
variational_params,
transpose_a=False,
transpose_b=False,
threshold=3.0,
eps=common.EPSILON):
R"""Evaluation computation for a variation matmul.
In variational dropout we train a Bayesian neural network where we assume a
fully-factorized Gaussian posterior and log uniform prior over the weights.
The parameters of the posterior are learned during training, and at eval
time we use the learned mean as the weight values.
This method also supports the pruning of weights based on their log \alpha
values. All weights with log \alpha >= `threshold` are set to zero.
Args:
x: 2D Tensor representing the input batch.
variational_params: 2-tuple of Tensors, where the first tensor is the \theta
values and the second contains the log of the \sigma^2 values.
transpose_a: If True, a is transposed before multiplication.
transpose_b: If True, b is transposed before multiplication.
threshold: Weights with a log \alpha_{ij} value greater than this will be
set to zero.
eps: Small constant value to use in log and sqrt operations to avoid NaNs.
Returns:
Output Tensor of the variational matmul operation.
Raises:
RuntimeError: If the variational_params argument is not a 2-tuple.
"""
# We expect a 2D input tensor, as is standard in fully-connected layers
x.get_shape().assert_has_rank(2)
theta, log_sigma2 = _verify_variational_params(
variational_params)
# Compute the weight mask by thresholding on
# the log-space alpha values
log_alpha = common.compute_log_alpha(log_sigma2, theta, eps, value_limit=None)
weight_mask = tf.cast(tf.less(log_alpha, threshold), tf.float32)
return tf.matmul(
x,
theta * weight_mask,
transpose_a=transpose_a,
transpose_b=transpose_b)
def broadcast_matmul_train(
x,
variational_params,
clip_alpha=None,
eps=common.EPSILON):
R"""Training computation for VD matrix multiplication with N input matrices.
Multiplies a 3D tensor `x` with a set of 2D parameters. Each 2D matrix
`x[i, :, :]` in the input tensor is multiplied indendently with the
parameters, resulting in a 3D output tensor with shape
`x.shape[:2] + weight_parameters[0].shape[1]`.
Args:
x: 3D Tensor representing the input batch.
variational_params: 2-tuple of Tensors, where the first tensor is the
unscaled weight values and the second is the log of the alpha values
for the hard concrete distribution.
clip_alpha: Int or None. If integer, we clip the log \alpha values to
[-clip_alpha, clip_alpha]. If None, don't clip the values.
eps: Small constant value to use in log and sqrt operations to avoid NaNs.
Returns:
Output Tensor of the batched matmul operation.
Raises:
RuntimeError: If the variational_params argument is not a 2-tuple.
"""
theta, log_sigma2 = _verify_variational_params(
variational_params)
theta.get_shape().assert_has_rank(2)
log_sigma2.get_shape().assert_has_rank(2)
# The input data must have be rank 2 or greater
assert x.get_shape().ndims >= 2
input_rank = x.get_shape().ndims
if clip_alpha is not None:
# Compute the log_alphas and then compute the
# log_sigma2 again so that we can clip on the
# log alpha magnitudes
log_alpha = common.compute_log_alpha(log_sigma2, theta, eps, clip_alpha)
log_sigma2 = common.compute_log_sigma2(log_alpha, theta, eps)
# Compute the mean and standard deviation of the distributions over the
# activations
mu_activation = tf.tensordot(x, theta, [[input_rank-1], [0]])
var_activation = tf.tensordot(
tf.square(x),
tf.exp(log_sigma2),
[[input_rank-1], [0]])
std_activation = tf.sqrt(var_activation + eps)
# Reshape the output back to the rank of the input
input_shape = x.get_shape().as_list()
weight_shape = theta.get_shape().as_list()
output_shape = input_shape[:-1] + [weight_shape[1]]
mu_activation.set_shape(output_shape)
std_activation.set_shape(output_shape)
# NOTE: We sample noise for each weight in theta, which will be shared by
# each matrix product that was done. This is equivalent to sampling the same
# set of weights for all matrix products done by this op in an iteration.
# The element-wise multiply below broadcasts.
num_pad_dims = len(output_shape) - 2
padding = [tf.constant(1, dtype=tf.int32) for _ in range(num_pad_dims)]
# NOTE: On GPU, the first dim may not be defined w/ the Transformer. Create
# a tf.Tensor from the list shape and TF should match the first dim
# appropriately
batch_size = tf.shape(x)[0]
data_dim = tf.shape(theta)[-1]
noise_shape = tf.stack([batch_size] + padding + [data_dim], axis=0)
output = mu_activation + std_activation * tf.random_normal(noise_shape)
return output
def broadcast_matmul_eval(
x,
variational_params,
threshold=3.0,
eps=common.EPSILON):
R"""Evaluation computation for VD matrix multiplication with N input matrices.
Multiplies a 3D tensor `x` with a set of 2D parameters. Each 2D matrix
`x[i, :, :]` in the input tensor is multiplied indendently with the
parameters, resulting in a 3D output tensor with shape
`x.shape[:2] + weight_parameters[0].shape[1]`.
Args:
x: 3D Tensor representing the input batch.
variational_params: 2-tuple of Tensors, where the first tensor is the
unscaled weight values and the second is the log of the alpha values
for the hard concrete distribution.
threshold: Weights with a log \alpha_{ij} value greater than this will be
set to zero.
eps: Small constant value to use in log and sqrt operations to avoid NaNs.
Returns:
Output Tensor of the batched matmul operation.
Raises:
RuntimeError: If the variational_params argument is not a 2-tuple.
"""
theta, log_sigma2 = _verify_variational_params(
variational_params)
theta.get_shape().assert_has_rank(2)
log_sigma2.get_shape().assert_has_rank(2)
# The input data must have be rank 2 or greater
assert x.get_shape().ndims >= 2
input_rank = x.get_shape().ndims
# Compute the weights mask by thresholding on the log-space alpha values
log_alpha = common.compute_log_alpha(log_sigma2, theta, eps, value_limit=None)
weight_mask = tf.cast(tf.less(log_alpha, threshold), tf.float32)
output | |
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("dark")
plt.rcParams['figure.figsize'] = 16, 12
import pandas as pd
from tqdm import tqdm_notebook, tqdm
import io
from PIL import Image
from glob import glob
from collections import defaultdict
import os
import pickle
from optparse import OptionParser
from datetime import datetime
import json
import sys
import time
from shutil import copyfile
import cv2
cv2.ocl.setUseOpenCL(True)
import random
import imgaug as ia
from imgaug import augmenters as iaa
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
import torchvision.models as models
from kaggle_camera_model_id_lib.utils import PechkaBot, ImageList, NpzFolder, NCrops, MultiDataset
from kaggle_camera_model_id_lib.models import VggHead, StyleVggHead, IEEEfcn, ResNetFC, ResNetX, FatNet1
from kaggle_camera_model_id_lib.models import InceptionResNetV2fc, InceptionResNetV2fcSmall, InceptionResNetV2
from kaggle_camera_model_id_lib.models import ResNetDense, ResNetDenseFC
from kaggle_camera_model_id_lib.utils import jpg_compress, equalize_v_hist, hsv_convert
from kaggle_camera_model_id_lib.utils import scale_crop_pad, gamma_correction
from kaggle_camera_model_id_lib.utils import patch_quality_dich, n_random_crops, n_pseudorandom_crops
_bot = PechkaBot()
def log(txt):
print(txt)
_bot.send_message(txt)
def train_pass(train_loader, model, criterion, optimizer):
loss_train_batch = 0
acc_train_batch = 0
for ix_batch, (X, Y) in tqdm(
enumerate(train_loader),
total=int(len(train_loader.dataset.imgs)/batch_size_train),
desc='Train #%i' % ix_epoch):
bs, ncrops, c, h, w = X.shape
X = X.view(-1, c, h, w)
Y = Y.view(ncrops*bs)
X_var = Variable(X.cuda())
Y_var = Variable(Y.cuda())
log_p = model(X_var)
loss = criterion(log_p, Y_var)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_train_batch += loss.data[0]
acc_train_batch += ((log_p.max(1)[1] == Y_var).float().sum()/Y_var.shape[0]).data[0]
if options.is_debug and ix_batch > 50:
break
X_var = X_var.cpu()
del(X_var)
Y_var = Y_var.cpu()
del(Y_var)
loss_train_batch /= ix_batch + 1
acc_train_batch /= ix_batch + 1
return loss_train_batch, acc_train_batch
def val_pass(val_loader, model, criterion):
loss_val_batch = 0
acc_val_batch = 0
for ix_batch, (X, Y) in tqdm(
enumerate(val_loader),
total=int(len(val_loader.dataset.imgs)/batch_size_val),
desc='Val #%i' % ix_epoch):
bs, ncrops, c, h, w = X.shape
X = X.view(-1, c, h, w)
Y = Y.view(ncrops*bs)
X_var = Variable(X.cuda(), volatile=True)
Y_var = Variable(Y.cuda(), volatile=True)
log_p = model(X_var)
loss = criterion(log_p, Y_var)
loss_val_batch += loss.data[0]
acc_val_batch += ((log_p.max(1)[1] == Y_var).float().sum()/Y_var.shape[0]).data[0]
if options.is_debug and ix_batch > 50:
break
X_var = X_var.cpu()
del(X_var)
Y_var = Y_var.cpu()
del(Y_var)
loss_val_batch /= ix_batch + 1
acc_val_batch /= ix_batch + 1
return loss_val_batch, acc_val_batch
model_factory = {
'Vgg19Head_E_2b_bn': lambda n_classes: VggHead(num_classes=n_classes, vgg_key='E_2b', load_vgg_bn=True, batch_norm=True),
'Vgg19Head_E_3b_bn': lambda n_classes: VggHead(num_classes=n_classes, vgg_key='E_3b', load_vgg_bn=True, batch_norm=True),
'Vgg19Head_E_bn': lambda n_classes: VggHead(num_classes=n_classes, load_vgg_bn=True, vgg_key='E', batch_norm=True),
'Vgg11Head_A_bn': lambda n_classes: VggHead(num_classes=n_classes, load_vgg_bn=True, vgg_key='A', batch_norm=True),
'Vgg11Head_A': lambda n_classes: VggHead(num_classes=n_classes, load_vgg_bn=True, vgg_key='A', batch_norm=False),
'StyleVggHead_bn': lambda n_classes: StyleVggHead(num_classes=n_classes, load_vgg_bn=True),
'IEEEfcn': lambda n_classes: IEEEfcn(n_classes),
'resnet18fc_pretrained': lambda n_classes: ResNetFC(
models.resnet.BasicBlock, [2, 2, 2, 2], num_classes=n_classes, load_resnet='resnet18'),
'resnet18fc': lambda n_classes: ResNetFC(
models.resnet.BasicBlock, [2, 2, 2, 2], num_classes=n_classes, load_resnet=None),
'resnet18X_pretrained': lambda n_classes: ResNetX(
models.resnet.BasicBlock, [2, 2, 2, 2], num_classes=n_classes, load_resnet='resnet18'),
'InceptionResNetV2fc_5_10_4': lambda n_classes: InceptionResNetV2fc(
num_classes=n_classes, nun_block35=5, num_block17=10, num_block8=4),
'InceptionResNetV2fcSmall_5_10': lambda n_classes: InceptionResNetV2fcSmall(
num_classes=n_classes, nun_block35=5, num_block17=10),
'resnet34fc_pretrained': lambda n_classes: ResNetFC(
models.resnet.BasicBlock, [3, 4, 6, 3], num_classes=n_classes, load_resnet='resnet34'),
'resnet34fc_pretrained_maxpool': lambda n_classes: ResNetFC(
models.resnet.BasicBlock, [3, 4, 6, 3], num_classes=n_classes, load_resnet='resnet34', pool_type='max'),
'resnet50fc_pretrained': lambda n_classes: ResNetFC(
models.resnet.Bottleneck, [3, 4, 6, 3], num_classes=n_classes, load_resnet='resnet50'),
'FatNet1': lambda n_classes: FatNet1(n_classes),
'resnet34X_pretrained_maxpool': lambda n_classes: ResNetX(
models.resnet.BasicBlock, [3, 4, 6, 3], num_classes=n_classes, load_resnet='resnet34', pool_type='max'),
'resnet50X_pretrained_maxpool': lambda n_classes: ResNetX(
models.resnet.Bottleneck, [3, 4, 6, 3], num_classes=n_classes, load_resnet='resnet50', pool_type='max'),
'InceptionResNetV2': lambda n_classes: InceptionResNetV2(num_classes=n_classes),
'ResNetDense34_pretrained': lambda n_classes: ResNetDense(
models.resnet.BasicBlock, [3, 4, 6, 3], num_classes=n_classes, load_resnet='resnet34'),
'ResNetDenseFC34_pretrained': lambda n_classes: ResNetDenseFC(
models.resnet.BasicBlock, [3, 4, 6, 3], num_classes=n_classes, load_resnet='resnet34',
zero_first_center=False),
'ResNetDenseFC34_pretrained_zfc': lambda n_classes: ResNetDenseFC(
models.resnet.BasicBlock, [3, 4, 6, 3], num_classes=n_classes, load_resnet='resnet34',
zero_first_center=True)
}
def create_CELoss(prms):
if prms is None:
return nn.CrossEntropyLoss()
if 'weight' in prms:
prms['weight'] = torch.FloatTensor(prms['weight'])
return nn.CrossEntropyLoss(**prms)
criterion_factory = {
'CrossEntropyLoss': lambda prms: create_CELoss(prms),
'MultiMarginLoss': lambda prms: nn.MultiMarginLoss(**prms)
}
if __name__ == '__main__':
parser = OptionParser()
parser.add_option('-c',
'--config',
dest='cfg_path',
help='config path')
parser.add_option('-d',
'--debug',
action="store_true",
dest="is_debug")
(options, args) = parser.parse_args()
if options.cfg_path is None:
sys.exit('cfg_path is not provided')
if options.is_debug:
log('DEBUG MODE ON')
log('-----------\n\nStarting training process: \n %s\n %s' % (str(datetime.now()), __file__))
log('config: %s' % options.cfg_path)
with open(options.cfg_path) as f:
cfg = json.load(f)
log('Config:')
for k, v in cfg.items():
log(' %s = %s' % (k, v))
train_list_path = cfg['train_list_path']
val_path = cfg['val_path']
out_dir = cfg['out_dir']
model_path = cfg['model_path']
crop_size = cfg['crop_size']
step_crop_val = cfg['step_crop_val']
n_crops_train = cfg['n_crops_train']
batch_size_train = cfg['batch_size_train']
batch_size_val = cfg['batch_size_val']
workers = cfg['workers']
n_epoches = cfg['n_epoches']
model_type = cfg['model_type']
n_classes = cfg['n_classes']
learning_rate = cfg['learning_rate']
momentum = cfg['momentum']
lr_scheduler_step_size = cfg['lr_scheduler_step_size']
lr_scheduler_gamma = cfg['lr_scheduler_gamma']
weight_decay = cfg['weight_decay']
optim_type = cfg['optim_type']
crop_center_size = cfg['crop_center_size']
do_random_aug_kaggle = cfg['do_random_aug_kaggle']
p_random_aug_kaggle_train = cfg['p_random_aug_kaggle_train']
p_random_aug_kaggle_val = cfg['p_random_aug_kaggle_val']
do_hard_aug = cfg['do_hard_aug']
p_hard_aug_train = cfg['p_hard_aug_train']
p_hard_aug_val = cfg['p_hard_aug_val']
criterion_type = cfg['criterion_type']
criterion_params = cfg['criterion_params']
n_crops_search_train = cfg['n_crops_search_train']
train_list_pseudo_npz = cfg['train_list_pseudo_npz']
train_list_flickr = cfg['train_list_flickr']
to_tensor = transforms.ToTensor()
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)
random_crop = transforms.RandomCrop(crop_size)
center_crop = transforms.CenterCrop(crop_center_size)
rvf = transforms.RandomVerticalFlip()
rhf = transforms.RandomHorizontalFlip()
random_flip = lambda img: rvf(rhf(img))
scale_05 = lambda img: scale_crop_pad(img, 0.5)
scale_08 = lambda img: scale_crop_pad(img, 0.8)
scale_15 = lambda img: scale_crop_pad(img, 1.5)
scale_20 = lambda img: scale_crop_pad(img, 2.0)
gamma_08 = lambda img: gamma_correction(img, 0.8)
gamma_12 = lambda img: gamma_correction(img, 1.2)
jpg_70 = lambda img: jpg_compress(img, (70, 71))
jpg_90 = lambda img: jpg_compress(img, (90, 91))
augs = [scale_05, scale_08, scale_15, scale_20, gamma_08, gamma_12, jpg_70, jpg_90]
def random_aug_kaggle(img, p=0.5):
if np.random.rand() < p:
return random.choice(augs)(img)
return img
blur = iaa.GaussianBlur(sigma=(0, 2))
sharpen = iaa.Sharpen(alpha=(0, 1), lightness=(0.5, 2))
emboss = iaa.Emboss(alpha=(0, 1), strength=(0, 2))
contrast_normalization = iaa.ContrastNormalization(alpha=(0.7, 1.3))
hard_aug = iaa.OneOf([blur, sharpen, emboss, contrast_normalization])
sometimes_train = iaa.Sometimes(p_hard_aug_train, hard_aug)
sometimes_val = iaa.Sometimes(p_hard_aug_val, hard_aug)
def aug_train(img):
#if min(img.size) > crop_center_size:
# return random_flip(random_crop(center_crop(img)))
#img_np = np.array(img)
#if img_np.shape[0] < crop_center_size and img_np.shape[1] > crop_center_size:
# n = np.random.randint(img_np.shape[1] - crop_center_size)
# return random_flip(random_crop(Image.fromarray(img_np[:, n:(n + crop_center_size), :])))
#if img_np.shape[1] < crop_center_size and img_np.shape[0] > crop_center_size:
# n = np.random.randint(img_np.shape[0] - crop_center_size)
# return random_flip(random_crop(Image.fromarray(img_np[n:(n + crop_center_size), :, :])))
return random_flip(random_crop(img))
def aug_train_fscore(img):
if min(img.size) > crop_center_size:
img_np = np.array(center_crop(img))
else:
img_np = np.array(img)
if img_np.shape[0] < crop_center_size and img_np.shape[1] > crop_center_size:
n = np.random.randint(img_np.shape[1] - crop_center_size)
img_np = img_np[:, n:(n + crop_center_size), :]
if img_np.shape[1] < crop_center_size and img_np.shape[0] > crop_center_size:
n = np.random.randint(img_np.shape[0] - crop_center_size)
img_np = img_np[n:(n + crop_center_size), :, :]
crops = n_pseudorandom_crops(img_np, crop_size, n_crops_train, n_crops_search_train, patch_quality_dich)
for img in crops:
yield random_flip(random_crop(Image.fromarray(img)))
def aug_optional_train(img):
if do_hard_aug:
img = Image.fromarray(sometimes_train.augment_image(np.array(img)))
if do_random_aug_kaggle:
img = random_aug_kaggle(img, p_random_aug_kaggle_train)
return img
def aug_optional_val(img):
if do_hard_aug:
img = Image.fromarray(sometimes_val.augment_image(np.array(img)))
if do_random_aug_kaggle:
img = random_aug_kaggle(img, p_random_aug_kaggle_val)
return img
if n_crops_search_train is None:
log(' -> default transform_train is selected')
transform_train = transforms.Compose([
transforms.Lambda(lambda img: [
aug_optional_train(aug_train(img))
for i in range(n_crops_train)
]),
transforms.Lambda(lambda crops: torch.stack([normalize(to_tensor(crop)) for crop in crops]))
])
else:
log(' -> dich fscore transform_train is selected')
transform_train = transforms.Compose([
transforms.Lambda(lambda img: [
aug_optional_train(img) for img in aug_train_fscore(img)
]),
transforms.Lambda(lambda crops: torch.stack([normalize(to_tensor(crop)) for crop in crops]))
])
ds_train = ImageList(
train_list_path,
transform=transform_train,
target_transform=transforms.Compose([
transforms.Lambda(lambda y: [y]*n_crops_train),
transforms.Lambda(lambda ylist: torch.LongTensor(ylist))
]))
train_ds_list = []
if train_list_pseudo_npz is not None:
ds_train_pseudo = NpzFolder(
train_list_pseudo_npz,
transform=transforms.Compose([
transforms.Lambda(lambda img: [
aug_train(Image.fromarray(img))
for i in range(n_crops_train)
]),
transforms.Lambda(lambda crops: torch.stack([normalize(to_tensor(crop)) for crop in crops]))
]),
target_transform=transforms.Compose([
transforms.Lambda(lambda y: [y]*n_crops_train),
transforms.Lambda(lambda ylist: torch.LongTensor(ylist))
]))
train_ds_list.append(ds_train_pseudo)
log(' -> pseudo dataset is loaded')
if train_list_flickr is not None:
ds_train_flickr = ImageList(
train_list_flickr,
transform=transform_train,
target_transform=transforms.Compose([
transforms.Lambda(lambda y: [y]*n_crops_train),
transforms.Lambda(lambda ylist: torch.LongTensor(ylist))
]))
train_ds_list.append(ds_train_flickr)
log(' -> flickr dataset is loaded')
if len(train_ds_list) > 0:
train_ds_list = [ds_train] + train_ds_list
ds_train = MultiDataset(train_ds_list)
log(' -> MultiDataset is created: %i' % len(train_ds_list))
#for ds in train_ds_list:
# print('; '.join(['%s: %i' % (k, v) for (k, v) in sorted(ds.class_to_idx.items(), key=lambda t: t[1])]))
#sys.exit('DEBUG EXIT')
train_loader = torch.utils.data.DataLoader(
ds_train,
batch_size=batch_size_train,
shuffle=True,
num_workers=workers,
pin_memory=True)
log('train_loader.size: %i' % len(train_loader.dataset.imgs))
if val_path is not None:
ds_val = NpzFolder(
val_path,
transform=transforms.Compose([
transforms.Lambda(lambda img: NCrops(img, crop_size=crop_size, step=step_crop_val)),
transforms.Lambda(lambda crops: torch.stack([normalize(to_tensor(aug_optional_val(Image.fromarray(crop))))
for crop in crops]))
]),
target_transform=transforms.Compose([
transforms.Lambda(lambda y: [y]*int(np.floor(1 + (512 - crop_size)/step_crop_val))**2),
transforms.Lambda(lambda ylist: torch.LongTensor(ylist))
]))
val_loader = torch.utils.data.DataLoader(
ds_val,
batch_size=batch_size_val,
shuffle=False,
num_workers=workers,
pin_memory=True)
log('val_loader.size: %i' % len(val_loader.dataset.imgs))
model = model_factory[model_type](n_classes)
if model_path is not None:
checkpoint = torch.load(model_path)
model.load_state_dict(checkpoint['model'])
| |
# -*- coding: utf-8 -*-
# python 3.7.2
# 2019/07/16 by sryml.
import os
import io
import struct
import threading
import json
import shutil
from argparse import ArgumentParser
from binascii import crc32
from time import sleep
from math import ceil
from timeit import timeit
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
from multiprocessing import Manager
from queue import Queue
from ctypes import windll
from sys import stdout
#
from PIL import Image
# -------------------
CPU_COUNT = max(os.cpu_count()-1, 1)
TIMER = None
# -------------------
class Unbuffered(object):
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
stdout = Unbuffered(stdout)
def GenerateName(root, type_='dir', n=0):
name = os.path.join(root, '_tmp {}'.format(n))
if type_ == 'dir':
return not os.path.isdir(name) and name or \
GenerateName(root, type_, n+1)
elif type_ == 'file':
return not os.path.isfile(name) and name or \
GenerateName(root, type_, n+1)
def image_convert(img, mode):
if img.mode!=mode:
if mode=='P':
if img.mode == 'RGBA':
img = img.convert('RGB')
return img.convert(mode, palette=Image.ADAPTIVE, colors=256)
else:
return img.convert(mode)
return img
def IMG_resize(img, maxsize):
im_size= img.size
im_size_max = max(im_size[0], im_size[1])
if im_size_max > maxsize:
if im_size[0] == im_size[1]:
resize = [maxsize]*2
else:
scale = maxsize/im_size_max
idx = im_size.index(im_size_max)
resize = [maxsize, round(im_size[1-idx]*scale)]
if idx==1:
resize.reverse()
return (1, img.resize(resize, Image.ANTIALIAS))
return (0, img)
def progress_bar(maximum, q, fix_count=None, run=1):
global TIMER
if fix_count and not fix_count.empty():
num = fix_count.get()
maximum -= num
if maximum <= 0:
print ('\r',' '*60, end='')
return
period = 1/40
block = 0.05 # 100%/20%
current = q.qsize()
bar1 = '\r %3d%% ['
bar2 = '%s'
bar3 = '%s'
bar4 = '] %{}d/{}'.format(len(str(maximum)),maximum)
ratio = min(current/maximum, 1.0)
num_up = int(ratio/block)
up = '█' * num_up
down = '▓' * (20-num_up) #▓□
r = ratio * 100
#
cmd_font.SetColor(cmd_font.LightGreen)
stdout.write(bar1 % (r,))
stdout.write(bar2 % (up,))
cmd_font.SetColor()
stdout.write(bar3 % (down,))
cmd_font.SetColor(cmd_font.LightGreen)
stdout.write(bar4 % (current,))
#
if not run:
if r < 100:
print ('\r',' '*60, end='')
else:
print ('\n')
return
if not TIMER.interval:
progress_bar(maximum, q, fix_count, run=0)
return
TIMER = threading.Timer(period, progress_bar, (maximum, q, fix_count))
TIMER.start()
def progress_bar2(str_, n=0):
global TIMER
if not TIMER.interval:
print (str_,'done.', end='')
return
period = 1/10
lst = ("\\", "|", "/", "-")
print ('{}{}'.format(str_, lst[n]), end='')
n = n-3 and n+1
TIMER = threading.Timer(period, progress_bar2, (str_, n))
TIMER.start()
def read_file(file, seek, size):
return [file.seek(seek)] and file.read(size)
ERROR_NAME = 0
def str_codec(str_, method='decode'):
global ERROR_NAME
codecs = ['ISO-8859-1','utf-8']
for codec in codecs:
try: return eval('str_.{}(codec)'.format(method))
except: pass
ERROR_NAME += 1
return 'ErrorName_{}'.format(ERROR_NAME)
#################################################
class mmp_convert(object):
getmode = {1:'P' , 2:'L' , 3:'P' , 4:'RGB' , 5:'RGBA'}
bpp2mode = {'8':'P' , '24':'RGB' , '32':'RGBA' , 'Alpha':'L'}
gettype = {'P':1 , 'L':2 , 'PaletteAlpha':3 , 'RGB':4 , 'RGBA':5}
Palette = 1
Alpha = 2
PaletteAlpha = 3
TrueColour = 4
TrueColourAlpha = 5
valid_format = ('.bmp','.png','.jpg','.jpeg','.webp')
# -------------------
def __init__(self):
self.bpp = None
self.output = None
self.maxsize = None
self.scale = None
self.nTextures = 0
self.overwrite = False
self.mmp_paths = []
self.img_paths = []
self.dir_paths = []
#######################
# mmp unpacking.
#######################
def process_unpacking(self, params, FLAG='init'):
cpu = CPU_COUNT
if FLAG == 'init':
global TIMER
paths,cmd = params
str_ = '\rFiles pre-parsing...'
print (str_, end='')
if cmd:
TIMER = threading.Timer(0.01, progress_bar2, (str_,))
TIMER.start()
self.mmp_paths = []
self.nTextures = 0
for p in paths:
if os.path.isdir(p):
for root, dirs, files in os.walk(p):
files = [os.path.join(root,i) for i in files if os.path.splitext(i)[1].lower() == '.mmp']
self.mmp_paths.extend(files)
elif os.path.splitext(p)[1].lower() == '.mmp':
self.mmp_paths.append(p)
for file in self.mmp_paths:
with open(file,'rb') as f:
self.nTextures += struct.unpack('<I', f.read(4))[0]
if TIMER:
TIMER.interval = 0
sleep(0.2)
print ('\n')
if not self.nTextures:
print ('No mmp file!')
return
#--------------------------------
print ('mmp unpacking...\n')
# 多进程通信管理
manager = Manager()
q = manager.Queue()
fix_count = manager.Queue()
# 控制台模式下创建进度条
if cmd:
TIMER = threading.Timer(0.1, progress_bar, (self.nTextures, q, fix_count))
TIMER.start()
# 开启多进程任务分配
pool = ProcessPoolExecutor(cpu)
futures = []
for task in self.mmp_paths:
future = pool.submit(self.process_unpacking, (task,q,fix_count), FLAG='Process')
futures.append(future)
pool.shutdown()
if TIMER:
TIMER.interval = 0
sleep(0.2)
cmd_font.SetColor()
qsize = q.qsize()
length = len(self.mmp_paths)
print ('\r%d mmp files unpacking done! Generate %d images.' % (length, qsize))
for future in futures:
results = future.result()
if results:
for msg in results:
print (msg)
print ('')
elif FLAG == 'Process':
file, q, fix_count = params
error_msg = []
mmp_file= open(file,'rb')
nTextures= struct.unpack('<I', mmp_file.read(4))[0] #读取小端数据4字节无符号整型
MMP_MAP = []
EOF = os.path.getsize(file)
for i in range(nTextures):
two,checksum,size,name_len\
= struct.unpack('<HIII', mmp_file.read(14))
if two != 2:
fix_count.put(nTextures)
mmp_file.close()
str_ = 'Error: "{}" Invalid file.'.format(os.path.split(file)[1])
return [str_]
name = mmp_file.read(name_len)
im_type,width,height\
= struct.unpack('<III', mmp_file.read(12))
start_seek = mmp_file.tell()
end_seek = mmp_file.seek(size-12, 1)
name = str_codec(name)
MMP_MAP.append(
(
''.join([name,'.bmp']),
im_type, width, height,
(start_seek, end_seek - start_seek)
)
)
if mmp_file.tell() >= EOF:
nCurrents = i+1
if nCurrents != nTextures:
fix_count.put(nTextures-nCurrents)
str_ = 'Warning: {} file show {}, get {}.'.format(os.path.split(file)[1], nTextures, nCurrents)
error_msg.append(str_)
break
unpack_dir= os.path.splitext(file)[0]
if not os.path.exists(unpack_dir):
os.makedirs(unpack_dir)
# 将每块图像数据分配给多线程处理保存
lock = Queue(maxsize=1)
pool = ThreadPoolExecutor(4)
futures = []
for task in MMP_MAP:
future = pool.submit(
self.process_unpacking,
(
task,
q,
unpack_dir,
mmp_file,
lock
),
FLAG='Thread'
)
futures.append(future)
pool.shutdown()
mmp_file.close()
return error_msg
elif FLAG == 'Thread':
bpp = self.bpp
name,im_type,width,height,data_seek = params[0]
q, unpack_dir, mmp_file, lock = params[1:]
lock.put(1)
data = read_file(mmp_file, data_seek[0], data_seek[1])
lock.get()
if im_type == self.Palette:
img= Image.frombytes(self.getmode[im_type],(width,height),data[:-768])
# 调色板像素乘以4恢复亮度
palette= map(lambda i:min(i<<2 , 255),data[-768:])
img.putpalette(palette)
else:
img= Image.frombytes(self.getmode[im_type],(width,height),data)
if bpp in self.bpp2mode:
img = image_convert(img, self.bpp2mode[bpp])
im_path = os.path.join(unpack_dir, name)
img.save(im_path)
q.put(1)
def unpacking(self, paths=[], bpp=None, cmd=False):
if cmd:
paths = parse_args.path
self.bpp = parse_args.bpp
else:
self.bpp = bpp
sec = timeit(lambda:self.process_unpacking((paths,cmd)), number=1)
print ('Time used: {:.2f} sec\n'.format(sec))
#######################
# Image packing.
#######################
def process_packing(self, params, FLAG='init'):
cpu = CPU_COUNT
if FLAG == 'init':
global TIMER
paths, cmd = params
str_ = '\rFiles pre-parsing...'
print (str_, end='')
if cmd:
TIMER = threading.Timer(0.01, progress_bar2, (str_,))
TIMER.start()
self.dir_paths = []
self.nTextures = 0
for p in paths:
for root, dirs, files in os.walk(p):
files = [i for i in files if os.path.splitext(i)[1].lower() in self.valid_format]
if files:
self.dir_paths.append((root,files))
self.nTextures += len(files)
if TIMER:
TIMER.interval = 0
sleep(0.2)
print ('\n')
if not self.nTextures:
print ('No Image!')
return
#------------------------------
print ('bmp packing...\n')
# 多进程通信管理
manager = Manager()
q = manager.Queue()
# 控制台模式下创建进度条
if cmd:
TIMER = threading.Timer(0.1, progress_bar, (self.nTextures, q))
TIMER.start()
# 开启多进程任务分配
pool = ProcessPoolExecutor(cpu)
futures = []
for task in self.dir_paths:
future = pool.submit(self.process_packing, (task,q), FLAG='Process')
futures.append(future)
pool.shutdown()
if TIMER:
TIMER.interval = 0
sleep(0.2)
cmd_font.SetColor()
qsize = q.qsize()
length = len(self.dir_paths)
print ('\r%d images processed done! A total of %d mmp files:\n' % (qsize, length))
hr = 0
for i in futures:
result = i.result()
if result:
if not hr:
cmd_font.SetColor(cmd_font.Aqua)
stdout.write(''.join(['-'*79, '\n']))
hr = 1
stdout.write (''.join([result, '\n']))
if hr:
stdout.write(''.join(['-'*79, '\n']))
cmd_font.SetColor()
print ('')
elif FLAG == 'Process':
root, files = params[0] # abs path, files name
q = params[1]
old_nTextures = [0,0]
nOverwrites = 0
nIgnores = 0
results = ['new "', os.path.split(root)[1], '.mmp"', ": ", "add textures ", "{add}", "."]
mmp_name = ''.join([root,'.mmp'])
# 如果mmp文件已存在则继续添加贴图
if os.path.exists(mmp_name):
mmp_file = open(mmp_name,'rb+')
if os.path.getsize(mmp_name) >= 4:
old_nTextures = [struct.unpack('<I', mmp_file.read(4))[0]] * 2
# mmp文件图像数据区段映射
MMP_MAP = []
for i in range(old_nTextures[1]):
start_seek = mmp_file.tell()
two,checksum,size,name_len = struct.unpack('<HIII', mmp_file.read(14))
name = mmp_file.read(name_len)
end_seek = mmp_file.seek(size,1) # current_pos + size
MMP_MAP.append((str_codec(name), start_seek, end_seek - start_seek))
FILES_LOWER = [os.path.splitext(i)[0].lower() for i in files]
MMP_MAP_LOWER = [i[0].lower() for i in MMP_MAP]
repeats = set()
if self.overwrite:
for idx,name in enumerate(MMP_MAP_LOWER):
if name in FILES_LOWER:
repeats.add(MMP_MAP[idx])
nOverwrites += 1
MMP_MAP = set(MMP_MAP) - repeats
old_nTextures[1] = old_nTextures[0] - nOverwrites
if nOverwrites:
mmp_tmp = mmp_name+'_tmp_'
with open(mmp_tmp,'wb+') as tmp:
os.popen('attrib +h "{}"'.format(mmp_tmp))
tmp.write(struct.pack('<I', old_nTextures[1]))
for i in MMP_MAP:
mmp_file.seek(i[1])
tmp.write(mmp_file.read(i[2]))
mmp_file.close()
os.remove(mmp_name)
os.rename(mmp_tmp,mmp_name)
os.popen('attrib -h "{}"'.format(mmp_name))
mmp_file = open(mmp_name,'rb+')
results[-1] = ', overwrite {}.'.format(nOverwrites)
else:
for idx,name in enumerate(FILES_LOWER):
if name in MMP_MAP_LOWER:
repeats.add(files[idx])
nIgnores += 1
q.put(1)
files = set(files) - repeats
results[-1] = ', ignore {}.'.format(nIgnores)
mmp_file.seek(0,2)
results[0] = 'old "'
results[3] = ' has {}, now {}: '.format(old_nTextures[0], '{now}')
else:
mmp_file= open(mmp_name,'wb+')
if old_nTextures[0] == 0:
mmp_file.seek(0)
mmp_file.write(struct.pack('<I', 0)) #保留头4个字节
# -------------------
# 将每个图像文件分配给多线程编码为mmp字节
nTextures = 0
new_nTextures = old_nTextures[1]
if files:
| |
= 'https://www.baidu.com/s?pn={page_no}&wd={query}&oq={query}'
self.engine_name = "Baidu"
self.MAX_DOMAINS = 2
self.MAX_PAGES = 760
enumratorBaseThreaded.__init__(self, base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
self.querydomain = self.domain
self.q = q
return
def extract_domains(self, resp):
links = list()
found_newdomain = False
subdomain_list = []
link_regx = re.compile('<a.*?class="c-showurl".*?>(.*?)</a>')
try:
links = link_regx.findall(resp)
for link in links:
link = re.sub('<.*?>|>|<| ', '', link)
if not link.startswith('http'):
link = "http://" + link
subdomain = urlparse.urlparse(link).netloc
if subdomain.endswith(self.domain):
subdomain_list.append(subdomain)
if subdomain not in self.subdomains and subdomain != self.domain:
found_newdomain = True
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception:
pass
if not found_newdomain and subdomain_list:
self.querydomain = self.findsubs(subdomain_list)
return links
def findsubs(self, subdomains):
count = Counter(subdomains)
subdomain1 = max(count, key=count.get)
count.pop(subdomain1, "None")
subdomain2 = max(count, key=count.get) if count else ''
return (subdomain1, subdomain2)
def check_response_errors(self, resp):
return True
def should_sleep(self):
time.sleep(random.randint(2, 5))
return
def generate_query(self):
if self.subdomains and self.querydomain != self.domain:
found = ' -site:'.join(self.querydomain)
query = "site:{domain} -site:www.{domain} -site:{found} ".format(domain=self.domain, found=found)
else:
query = "site:{domain} -site:www.{domain}".format(domain=self.domain)
return query
class NetcraftEnum(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
self.base_url = 'https://searchdns.netcraft.com/?restriction=site+ends+with&host={domain}'
self.engine_name = "Netcraft"
self.lock = threading.Lock()
super(NetcraftEnum, self).__init__(self.base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
self.q = q
return
def req(self, url, cookies=None):
cookies = cookies or {}
try:
resp = self.session.get(url, headers=self.headers, timeout=self.timeout, cookies=cookies, verify=False)
except Exception as e:
self.print_(e)
resp = None
return resp
def should_sleep(self):
time.sleep(random.randint(1, 2))
return
def get_next(self, resp):
link_regx = re.compile('<a.*?href="(.*?)">Next Page')
link = link_regx.findall(resp)
url = 'http://searchdns.netcraft.com' + link[0]
return url
def create_cookies(self, cookie):
cookies = dict()
cookies_list = cookie[0:cookie.find(';')].split("=")
cookies[cookies_list[0]] = cookies_list[1]
# hashlib.sha1 requires utf-8 encoded str
cookies['netcraft_js_verification_response'] = hashlib.sha1(urllib.unquote(cookies_list[1]).encode('utf-8')).hexdigest()
return cookies
def get_cookies(self, headers):
if 'set-cookie' in headers:
cookies = self.create_cookies(headers['set-cookie'])
else:
cookies = {}
return cookies
def enumerate(self):
start_url = self.base_url.format(domain='example.com')
resp = self.req(start_url)
cookies = self.get_cookies(resp.headers)
url = self.base_url.format(domain=self.domain)
while True:
resp = self.get_response(self.req(url, cookies))
self.extract_domains(resp)
if 'Next Page' not in resp:
return self.subdomains
break
url = self.get_next(resp)
self.should_sleep()
def extract_domains(self, resp):
links_list = list()
link_regx = re.compile('<a class="results-table__host" href="(.*?)"')
try:
links_list = link_regx.findall(resp)
for link in links_list:
subdomain = urlparse.urlparse(link).netloc
if not subdomain.endswith(self.domain):
continue
if subdomain and subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception:
pass
return links_list
class DNSdumpster(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = 'https://dnsdumpster.com/'
self.live_subdomains = []
self.engine_name = "DNSdumpster"
self.threads = 70
self.lock = threading.BoundedSemaphore(value=self.threads)
self.q = q
super(DNSdumpster, self).__init__(base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
return
def check_host(self, host):
is_valid = False
Resolver = dns.resolver.Resolver()
Resolver.nameservers = ['8.8.8.8', '8.8.4.4']
self.lock.acquire()
try:
ip = Resolver.query(host, 'A')[0].to_text()
if ip:
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, host))
is_valid = True
self.live_subdomains.append(host)
except:
pass
self.lock.release()
return is_valid
def req(self, req_method, url, params=None):
params = params or {}
headers = dict(self.headers)
headers['Referer'] = 'https://dnsdumpster.com'
try:
if req_method == 'GET':
resp = self.session.get(url, headers=headers, timeout=self.timeout, verify=False)
else:
resp = self.session.post(url, data=params, headers=headers, timeout=self.timeout, verify=False)
except Exception as e:
self.print_(e)
resp = None
return self.get_response(resp)
def get_csrftoken(self, resp):
csrf_regex = re.compile('<input type="hidden" name="csrfmiddlewaretoken" value="(.*?)">', re.S)
token = csrf_regex.findall(resp)[0]
return token.strip()
def enumerate(self):
resp = self.req('GET', self.base_url)
token = self.get_csrftoken(resp)
params = {'csrfmiddlewaretoken': token, 'targetip': self.domain}
post_resp = self.req('POST', self.base_url, params)
self.extract_domains(post_resp)
for subdomain in self.subdomains:
t = threading.Thread(target=self.check_host, args=(subdomain,))
t.start()
t.join()
return self.live_subdomains
def extract_domains(self, resp):
tbl_regex = re.compile('<a name="hostanchor"><\/a>Host Records.*?<table.*?>(.*?)</table>', re.S)
link_regex = re.compile('<td class="col-md-4">(.*?)<br>', re.S)
links = []
try:
results_tbl = tbl_regex.findall(resp)[0]
except IndexError:
results_tbl = ''
links_list = link_regex.findall(results_tbl)
links = list(set(links_list))
for link in links:
subdomain = link.strip()
if not subdomain.endswith(self.domain):
continue
if subdomain and subdomain not in self.subdomains and subdomain != self.domain:
self.subdomains.append(subdomain.strip())
return links
class Virustotal(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = 'https://www.virustotal.com/ui/domains/{domain}/subdomains'
self.engine_name = "Virustotal"
self.lock = threading.Lock()
self.q = q
super(Virustotal, self).__init__(base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
self.url = self.base_url.format(domain=self.domain)
return
# the main send_req need to be rewritten
def send_req(self, url):
try:
resp = self.session.get(url, headers=self.headers, timeout=self.timeout, verify=False)
except Exception as e:
self.print_(e)
resp = None
return self.get_response(resp)
# once the send_req is rewritten we don't need to call this function, the stock one should be ok
def enumerate(self):
while self.url != '':
resp = self.send_req(self.url)
resp = json.loads(resp)
if 'error' in resp:
self.print_(Y + "[!] Error: Virustotal probably now is blocking our requests" + W)
break
if 'links' in resp and 'next' in resp['links']:
self.url = resp['links']['next']
else:
self.url = ''
self.extract_domains(resp)
return self.subdomains
def extract_domains(self, resp):
#resp is already parsed as json
try:
for i in resp['data']:
if i['type'] == 'domain':
subdomain = i['id']
if not subdomain.endswith(self.domain):
continue
if subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception:
pass
class ThreatCrowd(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = 'https://www.threatcrowd.org/searchApi/v2/domain/report/?domain={domain}'
self.engine_name = "ThreatCrowd"
self.lock = threading.Lock()
self.q = q
super(ThreatCrowd, self).__init__(base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
return
def req(self, url):
try:
resp = self.session.get(url, headers=self.headers, timeout=self.timeout, verify=False)
except Exception:
resp = None
return self.get_response(resp)
def enumerate(self):
url = self.base_url.format(domain=self.domain)
resp = self.req(url)
self.extract_domains(resp)
return self.subdomains
def extract_domains(self, resp):
try:
links = json.loads(resp)['subdomains']
for link in links:
subdomain = link.strip()
if not subdomain.endswith(self.domain):
continue
if subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception as e:
pass
class CrtSearch(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = 'https://crt.sh/?q=%25.{domain}'
self.engine_name = "SSL Certificates"
self.lock = threading.Lock()
self.q = q
super(CrtSearch, self).__init__(base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
return
def req(self, url):
try:
resp = self.session.get(url, headers=self.headers, timeout=self.timeout, verify=False)
except Exception:
resp = None
return self.get_response(resp)
def enumerate(self):
url = self.base_url.format(domain=self.domain)
resp = self.req(url)
if resp:
self.extract_domains(resp)
return self.subdomains
def extract_domains(self, resp):
link_regx = re.compile('<TD>(.*?)</TD>')
try:
links = link_regx.findall(resp)
for link in links:
link = link.strip()
subdomains = []
if '<BR>' in link:
subdomains = link.split('<BR>')
else:
subdomains.append(link)
for subdomain in subdomains:
if not subdomain.endswith(self.domain) or '*' in subdomain:
continue
if '@' in subdomain:
subdomain = subdomain[subdomain.find('@')+1:]
if subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception as e:
print(e)
pass
class PassiveDNS(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = 'https://api.sublist3r.com/search.php?domain={domain}'
self.engine_name = "PassiveDNS"
self.lock = threading.Lock()
self.q = q
super(PassiveDNS, self).__init__(base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
return
def req(self, url):
try:
resp = self.session.get(url, headers=self.headers, timeout=self.timeout, verify=False)
except Exception as e:
resp = None
return self.get_response(resp)
def enumerate(self):
url = self.base_url.format(domain=self.domain)
resp = self.req(url)
if not resp:
return self.subdomains
self.extract_domains(resp)
return self.subdomains
def extract_domains(self, resp):
try:
subdomains = json.loads(resp)
for subdomain in subdomains:
if subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception as e:
pass
def main(domain, threads, savefile, silent, verbose, engines):
search_list = set()
if is_windows:
subdomains_queue = list()
else:
subdomains_queue = multiprocessing.Manager().list()
# Validate domain
domain_check = re.compile("^(http|https)?[a-zA-Z0-9]+([\-\.]{1}[a-zA-Z0-9]+)*\.[a-zA-Z]{2,}$")
if not domain_check.match(domain):
if not silent:
print(R + "Error: Please enter a valid domain" + W)
return []
if not domain.startswith('http://') or not domain.startswith('https://'):
domain = 'http://' + domain
parsed_domain = urlparse.urlparse(domain)
if not silent:
print(W + "[-] Enumerating subdomains now for %s" % parsed_domain.netloc + W)
if verbose and not silent:
print(Y + "[-] verbosity is enabled, will show the subdomains results in realtime" + W)
supported_engines = {'baidu': BaiduEnum,
'yahoo': YahooEnum,
'google': GoogleEnum,
'bing': BingEnum,
'ask': AskEnum,
'netcraft': NetcraftEnum,
'dnsdumpster': DNSdumpster,
'virustotal': Virustotal,
'threatcrowd': ThreatCrowd,
'ssl': CrtSearch,
'passivedns': PassiveDNS
}
chosenEnums = []
if engines is None:
chosenEnums = [
BaiduEnum, YahooEnum, GoogleEnum, BingEnum, AskEnum,
NetcraftEnum, DNSdumpster, Virustotal, ThreatCrowd,
CrtSearch, PassiveDNS
]
else:
engines = engines.split(',')
for engine in engines:
if engine.lower() in supported_engines:
chosenEnums.append(supported_engines[engine.lower()])
# Start the | |
11390625 * uk_161
+ 961875 * uk_162
+ 81225 * uk_163
+ 6859 * uk_164
+ 3025 * uk_17
+ 5665 * uk_18
+ 1045 * uk_19
+ 55 * uk_2
+ 880 * uk_20
+ 9405 * uk_21
+ 12375 * uk_22
+ 1045 * uk_23
+ 10609 * uk_24
+ 1957 * uk_25
+ 1648 * uk_26
+ 17613 * uk_27
+ 23175 * uk_28
+ 1957 * uk_29
+ 103 * uk_3
+ 361 * uk_30
+ 304 * uk_31
+ 3249 * uk_32
+ 4275 * uk_33
+ 361 * uk_34
+ 256 * uk_35
+ 2736 * uk_36
+ 3600 * uk_37
+ 304 * uk_38
+ 29241 * uk_39
+ 19 * uk_4
+ 38475 * uk_40
+ 3249 * uk_41
+ 50625 * uk_42
+ 4275 * uk_43
+ 361 * uk_44
+ 130470415844959 * uk_45
+ 141482932855 * uk_46
+ 264958946983 * uk_47
+ 48875922259 * uk_48
+ 41158671376 * uk_49
+ 16 * uk_5
+ 439883300331 * uk_50
+ 578793816225 * uk_51
+ 48875922259 * uk_52
+ 153424975 * uk_53
+ 287323135 * uk_54
+ 53001355 * uk_55
+ 44632720 * uk_56
+ 477012195 * uk_57
+ 627647625 * uk_58
+ 53001355 * uk_59
+ 171 * uk_6
+ 538077871 * uk_60
+ 99257083 * uk_61
+ 83584912 * uk_62
+ 893313747 * uk_63
+ 1175412825 * uk_64
+ 99257083 * uk_65
+ 18309559 * uk_66
+ 15418576 * uk_67
+ 164786031 * uk_68
+ 216823725 * uk_69
+ 225 * uk_7
+ 18309559 * uk_70
+ 12984064 * uk_71
+ 138767184 * uk_72
+ 182588400 * uk_73
+ 15418576 * uk_74
+ 1483074279 * uk_75
+ 1951413525 * uk_76
+ 164786031 * uk_77
+ 2567649375 * uk_78
+ 216823725 * uk_79
+ 19 * uk_8
+ 18309559 * uk_80
+ 166375 * uk_81
+ 311575 * uk_82
+ 57475 * uk_83
+ 48400 * uk_84
+ 517275 * uk_85
+ 680625 * uk_86
+ 57475 * uk_87
+ 583495 * uk_88
+ 107635 * uk_89
+ 2572416961 * uk_9
+ 90640 * uk_90
+ 968715 * uk_91
+ 1274625 * uk_92
+ 107635 * uk_93
+ 19855 * uk_94
+ 16720 * uk_95
+ 178695 * uk_96
+ 235125 * uk_97
+ 19855 * uk_98
+ 14080 * uk_99,
uk_0
+ 50719 * uk_1
+ 2789545 * uk_10
+ 114180 * uk_100
+ 148500 * uk_101
+ 67980 * uk_102
+ 1646095 * uk_103
+ 2140875 * uk_104
+ 980045 * uk_105
+ 2784375 * uk_106
+ 1274625 * uk_107
+ 583495 * uk_108
+ 27000 * uk_109
+ 1521570 * uk_11
+ 92700 * uk_110
+ 10800 * uk_111
+ 155700 * uk_112
+ 202500 * uk_113
+ 92700 * uk_114
+ 318270 * uk_115
+ 37080 * uk_116
+ 534570 * uk_117
+ 695250 * uk_118
+ 318270 * uk_119
+ 5224057 * uk_12
+ 4320 * uk_120
+ 62280 * uk_121
+ 81000 * uk_122
+ 37080 * uk_123
+ 897870 * uk_124
+ 1167750 * uk_125
+ 534570 * uk_126
+ 1518750 * uk_127
+ 695250 * uk_128
+ 318270 * uk_129
+ 608628 * uk_13
+ 1092727 * uk_130
+ 127308 * uk_131
+ 1835357 * uk_132
+ 2387025 * uk_133
+ 1092727 * uk_134
+ 14832 * uk_135
+ 213828 * uk_136
+ 278100 * uk_137
+ 127308 * uk_138
+ 3082687 * uk_139
+ 8774387 * uk_14
+ 4009275 * uk_140
+ 1835357 * uk_141
+ 5214375 * uk_142
+ 2387025 * uk_143
+ 1092727 * uk_144
+ 1728 * uk_145
+ 24912 * uk_146
+ 32400 * uk_147
+ 14832 * uk_148
+ 359148 * uk_149
+ 11411775 * uk_15
+ 467100 * uk_150
+ 213828 * uk_151
+ 607500 * uk_152
+ 278100 * uk_153
+ 127308 * uk_154
+ 5177717 * uk_155
+ 6734025 * uk_156
+ 3082687 * uk_157
+ 8758125 * uk_158
+ 4009275 * uk_159
+ 5224057 * uk_16
+ 1835357 * uk_160
+ 11390625 * uk_161
+ 5214375 * uk_162
+ 2387025 * uk_163
+ 1092727 * uk_164
+ 3025 * uk_17
+ 1650 * uk_18
+ 5665 * uk_19
+ 55 * uk_2
+ 660 * uk_20
+ 9515 * uk_21
+ 12375 * uk_22
+ 5665 * uk_23
+ 900 * uk_24
+ 3090 * uk_25
+ 360 * uk_26
+ 5190 * uk_27
+ 6750 * uk_28
+ 3090 * uk_29
+ 30 * uk_3
+ 10609 * uk_30
+ 1236 * uk_31
+ 17819 * uk_32
+ 23175 * uk_33
+ 10609 * uk_34
+ 144 * uk_35
+ 2076 * uk_36
+ 2700 * uk_37
+ 1236 * uk_38
+ 29929 * uk_39
+ 103 * uk_4
+ 38925 * uk_40
+ 17819 * uk_41
+ 50625 * uk_42
+ 23175 * uk_43
+ 10609 * uk_44
+ 130470415844959 * uk_45
+ 141482932855 * uk_46
+ 77172508830 * uk_47
+ 264958946983 * uk_48
+ 30869003532 * uk_49
+ 12 * uk_5
+ 445028134253 * uk_50
+ 578793816225 * uk_51
+ 264958946983 * uk_52
+ 153424975 * uk_53
+ 83686350 * uk_54
+ 287323135 * uk_55
+ 33474540 * uk_56
+ 482591285 * uk_57
+ 627647625 * uk_58
+ 287323135 * uk_59
+ 173 * uk_6
+ 45647100 * uk_60
+ 156721710 * uk_61
+ 18258840 * uk_62
+ 263231610 * uk_63
+ 342353250 * uk_64
+ 156721710 * uk_65
+ 538077871 * uk_66
+ 62688684 * uk_67
+ 903761861 * uk_68
+ 1175412825 * uk_69
+ 225 * uk_7
+ 538077871 * uk_70
+ 7303536 * uk_71
+ 105292644 * uk_72
+ 136941300 * uk_73
+ 62688684 * uk_74
+ 1517968951 * uk_75
+ 1974237075 * uk_76
+ 903761861 * uk_77
+ 2567649375 * uk_78
+ 1175412825 * uk_79
+ 103 * uk_8
+ 538077871 * uk_80
+ 166375 * uk_81
+ 90750 * uk_82
+ 311575 * uk_83
+ 36300 * uk_84
+ 523325 * uk_85
+ 680625 * uk_86
+ 311575 * uk_87
+ 49500 * uk_88
+ 169950 * uk_89
+ 2572416961 * uk_9
+ 19800 * uk_90
+ 285450 * uk_91
+ 371250 * uk_92
+ 169950 * uk_93
+ 583495 * uk_94
+ 67980 * uk_95
+ 980045 * uk_96
+ 1274625 * uk_97
+ 583495 * uk_98
+ 7920 * uk_99,
uk_0
+ 50719 * uk_1
+ 2789545 * uk_10
+ 154000 * uk_100
+ 198000 * uk_101
+ 26400 * uk_102
+ 1684375 * uk_103
+ 2165625 * uk_104
+ 288750 * uk_105
+ 2784375 * uk_106
+ 371250 * uk_107
+ 49500 * uk_108
+ 2985984 * uk_109
+ 7303536 * uk_11
+ 622080 * uk_110
+ 331776 * uk_111
+ 3628800 * uk_112
+ 4665600 * uk_113
+ 622080 * uk_114
+ 129600 * uk_115
+ 69120 * uk_116
+ 756000 * uk_117
+ 972000 * uk_118
+ 129600 * uk_119
+ 1521570 * uk_12
+ 36864 * uk_120
+ 403200 * uk_121
+ 518400 * uk_122
+ 69120 * uk_123
+ 4410000 * uk_124
+ 5670000 * uk_125
+ 756000 * uk_126
+ 7290000 * uk_127
+ 972000 * uk_128
+ 129600 * uk_129
+ 811504 * uk_13
+ 27000 * uk_130
+ 14400 * uk_131
+ 157500 * uk_132
+ 202500 * uk_133
+ 27000 * uk_134
+ 7680 * uk_135
+ 84000 * uk_136
+ 108000 * uk_137
+ 14400 * uk_138
+ 918750 * uk_139
+ 8875825 * uk_14
+ 1181250 * uk_140
+ 157500 * uk_141
+ 1518750 * uk_142
+ 202500 * uk_143
+ 27000 * uk_144
+ 4096 * uk_145
+ 44800 * uk_146
+ 57600 * uk_147
+ 7680 * uk_148
+ 490000 * uk_149
+ 11411775 * uk_15
+ 630000 * uk_150
+ 84000 * uk_151
+ 810000 * uk_152
+ 108000 * | |
<reponame>ianhussey/destigmatize-suicide-bot<gh_stars>1-10
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Script to resolve double redirects, and to delete broken redirects.
Requires access to MediaWiki's maintenance pages or to a XML dump file.
Delete function requires adminship.
Syntax:
python pwb.py redirect action [-arguments ...]
where action can be one of these:
double Fix redirects which point to other redirects.
do Shortcut action command is "do".
broken Tries to fix redirect which point to nowhere by using the last
br moved target of the destination page. If this fails and the
-delete option is set, it either deletes the page or marks it
for deletion depending on whether the account has admin rights.
It will mark the redirect not for deletion if there is no speedy
deletion template available. Shortcut action command is "br".
both Both of the above. Retrieves redirect pages from live wiki,
not from a special page.
and arguments can be:
-xml Retrieve information from a local XML dump
(https://download.wikimedia.org). Argument can also be given as
"-xml:filename.xml". Cannot be used with -fullscan or -moves.
-fullscan Retrieve redirect pages from live wiki, not from a special page
Cannot be used with -xml.
-moves Use the page move log to find double-redirect candidates. Only
works with action "double", does not work with -xml.
NOTE: You may use only one of these options above.
If neither of -xml -fullscan -moves is given, info will be
loaded from a special page of the live wiki.
-page:title Work on a single page
-namespace:n Namespace to process. Can be given multiple times, for several
namespaces. If omitted, only the main (article) namespace is
treated.
-offset:n With -moves, the number of hours ago to start scanning moved
pages. With -xml, the number of the redirect to restart with
(see progress). Otherwise, ignored.
-start:title The starting page title in each namespace. Page need not exist.
-until:title The possible last page title in each namespace. Page needs not
exist.
-total:n The maximum count of redirects to work upon. If omitted, there
is no limit.
-delete Prompt the user whether broken redirects should be deleted (or
marked for deletion if the account has no admin rights) instead
of just skipping them.
-sdtemplate:x Add the speedy deletion template string including brackets.
This enables overriding the default template via i18n or
to enable speedy deletion for projects other than wikipedias.
-always Don't prompt you for each replacement.
"""
#
# (C) <NAME>, 2004
# (C) <NAME>, 2009
# (C) xqt, 2009-2017
# (C) Pywikibot team, 2004-2017
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
import datetime
import sys
import pywikibot
from pywikibot import i18n, xmlreader
from pywikibot.bot import OptionHandler, SingleSiteBot
from pywikibot.exceptions import ArgumentDeprecationWarning
from pywikibot.textlib import extract_templates_and_params_regex_simple
from pywikibot.tools.formatter import color_format
from pywikibot.tools import issue_deprecation_warning
if sys.version_info[0] > 2:
basestring = (str, )
def space_to_underscore(link):
"""Convert spaces to underscore."""
# previous versions weren't expecting spaces but underscores
return link.canonical_title().replace(' ', '_')
class RedirectGenerator(OptionHandler):
"""Redirect generator."""
availableOptions = {
'fullscan': False,
'moves': False,
'namespaces': [0],
'offset': -1,
'page': None,
'start': None,
'total': None,
'until': None,
'xml': None,
}
def __init__(self, action, **kwargs):
"""Constructor."""
super(RedirectGenerator, self).__init__(**kwargs)
self.site = pywikibot.Site()
self.use_api = self.getOption('fullscan')
self.use_move_log = self.getOption('moves')
self.namespaces = self.getOption('namespaces')
self.offset = self.getOption('offset')
self.page_title = self.getOption('page')
self.api_start = self.getOption('start')
self.api_number = self.getOption('total')
self.api_until = self.getOption('until')
self.xmlFilename = self.getOption('xml')
# connect the generator selected by 'action' parameter
cls = self.__class__
if action == 'double':
cls.__iter__ = lambda slf: slf.retrieve_double_redirects()
elif action == 'broken':
cls.__iter__ = lambda slf: slf.retrieve_broken_redirects()
elif action == 'both':
cls.__iter__ = lambda slf: slf.get_redirects_via_api(maxlen=2)
def get_redirects_from_dump(self, alsoGetPageTitles=False):
"""
Extract redirects from dump.
Load a local XML dump file, look at all pages which have the
redirect flag set, and find out where they're pointing at. Return
a dictionary where the redirect names are the keys and the redirect
targets are the values.
"""
xmlFilename = self.xmlFilename
redict = {}
# open xml dump and read page titles out of it
dump = xmlreader.XmlDump(xmlFilename)
redirR = self.site.redirectRegex()
readPagesCount = 0
if alsoGetPageTitles:
pageTitles = set()
for entry in dump.parse():
readPagesCount += 1
# always print status message after 10000 pages
if readPagesCount % 10000 == 0:
pywikibot.output(u'%i pages read...' % readPagesCount)
if len(self.namespaces) > 0:
if pywikibot.Page(self.site, entry.title).namespace() \
not in self.namespaces:
continue
if alsoGetPageTitles:
pageTitles.add(space_to_underscore(pywikibot.Link(entry.title,
self.site)))
m = redirR.match(entry.text)
if m:
target = m.group(1)
# There might be redirects to another wiki. Ignore these.
target_link = pywikibot.Link(target, self.site)
try:
target_link.parse()
except pywikibot.SiteDefinitionError as e:
pywikibot.log(e)
pywikibot.output(
u'NOTE: Ignoring {0} which is a redirect ({1}) to an '
u'unknown site.'.format(entry.title, target))
target_link = None
else:
if target_link.site != self.site:
pywikibot.output(
'NOTE: Ignoring {0} which is a redirect to '
'another site {1}.'
.format(entry.title, target_link.site))
target_link = None
# if the redirect does not link to another wiki
if target_link and target_link.title:
source = pywikibot.Link(entry.title, self.site)
if target_link.anchor:
pywikibot.output(
u'HINT: %s is a redirect with a pipelink.'
% entry.title)
redict[space_to_underscore(source)] = (
space_to_underscore(target_link))
if alsoGetPageTitles:
return redict, pageTitles
else:
return redict
def get_redirect_pages_via_api(self):
"""Yield Pages that are redirects."""
for ns in self.namespaces:
done = False
gen = self.site.allpages(start=self.api_start,
namespace=ns,
filterredir=True)
if self.api_number:
gen.set_maximum_items(self.api_number)
for p in gen:
done = (self.api_until and
p.title(withNamespace=False) >= self.api_until)
if done:
return
yield p
def _next_redirect_group(self):
"""Generator that yields batches of 500 redirects as a list."""
apiQ = []
for page in self.get_redirect_pages_via_api():
apiQ.append(str(page.pageid))
if len(apiQ) >= 500:
yield apiQ
apiQ = []
if apiQ:
yield apiQ
def get_redirects_via_api(self, maxlen=8):
"""
Return a generator that yields tuples of data about redirect Pages.
0 - page title of a redirect page
1 - type of redirect:
0 - broken redirect, target page title missing
1 - normal redirect, target page exists and is not a
redirect
2..maxlen - start of a redirect chain of that many redirects
(currently, the API seems not to return sufficient
data to make these return values possible, but
that may change)
maxlen+1 - start of an even longer chain, or a loop
(currently, the API seems not to return sufficient
data to allow this return values, but that may
change)
None - start of a redirect chain of unknown length, or
loop
2 - target page title of the redirect, or chain (may not exist)
3 - target page of the redirect, or end of chain, or page title
where chain or loop detecton was halted, or None if unknown
"""
for apiQ in self._next_redirect_group():
gen = pywikibot.data.api.Request(
site=self.site, parameters={'action': 'query',
'redirects': True,
'pageids': apiQ})
data = gen.submit()
if 'error' in data:
raise RuntimeError("API query error: %s" % data)
if data == [] or 'query' not in data:
raise RuntimeError("No results given.")
redirects = {}
pages = {}
redirects = dict((x['from'], x['to'])
for x in data['query']['redirects'])
for pagetitle in data['query']['pages'].values():
if 'missing' in pagetitle and 'pageid' not in pagetitle:
pages[pagetitle['title']] = False
else:
pages[pagetitle['title']] = True
for redirect in redirects:
target = redirects[redirect]
result = 0
final = None
try:
if pages[target]:
final = target
try:
while result <= maxlen:
result += 1
final = redirects[final]
# result = None
except KeyError:
pass
except KeyError:
result = None
pass
yield (redirect, result, target, final)
def retrieve_broken_redirects(self):
"""Retrieve broken redirects."""
if self.use_api:
count = 0
for (pagetitle, type, target, final) \
in self.get_redirects_via_api(maxlen=2):
if type == 0:
yield pagetitle
if self.api_number:
count += 1
if count >= self.api_number:
break
elif self.xmlFilename:
# retrieve information from XML dump
pywikibot.output(
u'Getting a list of all redirects and of all page titles...')
redirs, pageTitles = self.get_redirects_from_dump(
alsoGetPageTitles=True)
for (key, value) in redirs.items():
if value not in pageTitles:
yield key
elif self.page_title:
yield self.page_title
else:
pywikibot.output('Retrieving broken redirect special page...')
for page in self.site.preloadpages(self.site.broken_redirects()):
yield page
def retrieve_double_redirects(self):
"""Retrieve double redirects."""
if self.use_move_log:
gen = self.get_moved_pages_redirects()
for redir_page in gen:
yield redir_page.title()
elif self.use_api:
count = 0
for (pagetitle, type, target, final) \
in self.get_redirects_via_api(maxlen=2):
if type != 0 and type != 1:
yield pagetitle
if self.api_number:
count += 1
if count >= self.api_number:
break
elif self.xmlFilename:
redict = self.get_redirects_from_dump()
num = 0
for (key, value) in redict.items():
num += 1
# | |
j, k] * self.lsmhe.nuk_mhe[i, k] for k in self.lsmhe.ykk_mhe)
for j in self.lsmhe.ykk_mhe) for i in self.lsmhe.fe_t))
expr_u_obf = 0
for i in self.lsmhe.fe_t:
for u in self.u:
var_w = getattr(self.lsmhe, "w_" + u + "_mhe") #: u_noise
expr_u_obf += self.lsmhe.U_mhe[i, u] * var_w[i] ** 2
self.lsmhe.U_e_mhe = Expression(expr=0.5 * expr_u_obf) # how about this
self.lsmhe.Arrival_e_mhe = Expression(
expr=0.5 * sum((self.xkN_l[j] - self.lsmhe.x_0_mhe[j]) *
sum(self.lsmhe.PikN_mhe[j, k] * (self.xkN_l[k] - self.lsmhe.x_0_mhe[k]) for k in self.lsmhe.xkNk_mhe)
for j in self.lsmhe.xkNk_mhe))
self.lsmhe.Arrival_dummy_e_mhe = Expression(
expr=100000.0 * sum((self.xkN_l[j] - self.lsmhe.x_0_mhe[j]) ** 2 for j in self.lsmhe.xkNk_mhe))
self.lsmhe.obfun_dum_mhe_deb = Objective(sense=minimize, expr=1.0)
#: no arrival
self.lsmhe.obfun_dum_mhe = Objective(sense=minimize,
expr=self.lsmhe.R_e_mhe + self.lsmhe.Q_e_mhe + self.lsmhe.U_e_mhe)
self.lsmhe.obfun_dum_mhe.deactivate()
self.lsmhe.obfun_mhe_first = Objective(sense=minimize,
expr=self.lsmhe.Arrival_dummy_e_mhe)
self.lsmhe.obfun_mhe_first.deactivate()
self.lsmhe.obfun_mhe = Objective(sense=minimize,
expr=self.lsmhe.Arrival_e_mhe +
self.lsmhe.R_e_mhe +
self.lsmhe.Q_e_mhe +
self.lsmhe.U_e_mhe)
self.lsmhe.obfun_mhe.deactivate()
self._PI = {} #: Container of the KKT matrix
self.xreal_W = {}
self.curr_m_noise = {} #: Current measurement noise
self.curr_y_offset = {} #: Current offset of measurement
self.curr_u_offset = {} #: Current offset of the input
for y in self.y:
for j in self.y_vars[y]:
self.curr_m_noise[(y, j)] = 0.0
self.curr_y_offset[(y, j)] = 0.0
self.curr_meas[(y, j)] = 0.0
for u in self.u:
self.curr_u_offset[u] = 0.0
self.s_estimate = {}
self.s_real = {}
for x in self.x_noisy:
self.s_estimate[x] = []
self.s_real[x] = []
self.y_estimate = {}
self.y_real = {}
self.y_noise_jrnl = {}
self.yk0_jrnl = {}
for y in self.y:
self.y_estimate[y] = []
self.y_real[y] = []
self.y_noise_jrnl[y] = []
self.yk0_jrnl[y] = []
with open("res_mhe_label_" + self.res_file_suf + ".txt", "w") as f:
for x in self.x_noisy:
for j in self.x_vars[x]:
jth = (x, j)
jth = str(jth)
f.write(jth)
f.write('\t')
f.close()
f = open("timings_mhe_kaug_sens.txt", "a")
f.write('\n' + '-' * 30 + '\n')
f.write(str(self.int_file_mhe_suf))
f.write('\n')
f.close()
f = open("timings_mhe_kaug_cov.txt", "a")
f.write('\n' + '-' * 30 + '\n')
f.write(str(self.int_file_mhe_suf))
f.write('\n')
f.close()
f = open("timings_mhe_dot.txt", "a")
f.write('\n' + '-'*30 +'\n')
f.write(str(self.int_file_mhe_suf))
f.write('\n')
f.close()
def init_lsmhe_prep(self, ref, update=True):
# type: (ConcreteModel, bool) -> None
"""Initializes the lsmhe in preparation phase
Args:
update (bool): If true, initialize variables as well.
ref (ConcreteModel): The reference model."""
self.journalist("I", self._iteration_count, "init_lsmhe_prep", "Preparation phase MHE")
dum = self.dum_mhe
if not 'tau_points' in dum.t.get_discretization_info().keys():
aug_discretization(self.dum_mhe, 1, self.ncp_tmhe)
#: Load current solution
load_iguess(ref, dum, 0, 0)
self.load_init_state_gen(dum, src_kind="mod", ref=ref, fe=0)
#: Patching of finite elements
t0ncp = t_ij(self.lsmhe.t, 0, self.ncp_tmhe)
for finite_elem in range(0, self.nfe_tmhe):
#: Cycle ICS
for i in self.states:
pn = i + "_ic"
p = getattr(dum, pn)
vs = getattr(dum, i)
for ks in p.keys():
p[ks].value = value(vs[(t0ncp,) + (ks,)])
if finite_elem == 0:
for i in self.states:
pn = i + "_ic"
p = getattr(self.lsmhe, pn) #: Target
vs = getattr(dum, i) #: Source
for ks in p.keys():
p[ks].value = value(vs[(t0ncp,) + (ks,)])
self.patch_meas_mhe(self.PlantSample, fe=finite_elem)
#: Solve
self.solve_dyn(dum, o_tee=True)
#: Patch
load_iguess(dum, self.lsmhe, 0, finite_elem)
self.patch_input_mhe("mod", src=dum, fe=finite_elem)
self.lsmhe.name = "Preparation MHE" #: Pretty much simulation
tst = self.solve_dyn(self.lsmhe,
o_tee=True,
skip_update=False,
max_cpu_time=600,
jacobian_regularization_value=1e-06,
jacobian_regularization_exponent=2.,
halt_on_ampl_error=True,
output_file="prep_mhe.txt",
mu_strategy="adaptive",
ma57_pre_alloc=5)
if tst != 0:
self.lsmhe.write_nl(name="failed_mhe.nl")
sys.exit()
self.lsmhe.name = "LSMHE (Least-Squares MHE)"
if update:
cut_off_time = t_ij(self.lsmhe.t, self.lsmhe.nfe_t - 1, 0)
for i in self.x_noisy: # only deactivate the relevant noisy-state continuity conditions
cp_con = getattr(self.lsmhe, i + "dot_disc_eq")
for ii in self.x_vars[i]:
for t in self.lsmhe.t:
if t >= cut_off_time or t == 0:
continue
cp_con[(t,) + ii].deactivate()
self.lsmhe.noisy_cont.activate() # activate new noisy-state continuity conditions
self.lsmhe.obfun_dum_mhe_deb.deactivate()
self.lsmhe.obfun_dum_mhe.activate()
# self.deact_icc_mhe()
self.lsmhe.hyk_c_mhe.activate()
for u in self.u:
# need to keep both!!
cc = getattr(self.lsmhe, u + "_cdummy") #: Get the constraint for input
con_w = getattr(self.lsmhe, u + "_cdummy_mhe") #: Get the constraint-noisy
cc.deactivate()
con_w.activate()
self.journalist("I", self._iteration_count, "initialize_lsmhe", "Attempting to initialize lsmhe Done")
def preparation_phase_mhe(self, as_strategy=False):
"""Method that prepares the mhe problem; shift; update u and y; initialize last fe"""
self.shift_mhe()
self.shift_measurement_input_mhe()
#: if as_ patch to a different position
if as_strategy:
self.patch_meas_mhe(None, use_dict=True, fe=self.nfe_tmhe - 2)
self.patch_input_mhe(src_kind="dict")
self.init_step_mhe(patch_pred_y=True)
self.journalist("I", self._iteration_count, "preparation_phase_mhe", "asMHE: Ready")
else:
self.patch_meas_mhe(None, use_dict=True)
self.patch_input_mhe(src_kind="dict") #: At this point it doesn't matter if this is the wrong input
self.init_step_mhe(patch_pred_y=False) #: Just for initialization purposes
self.journalist("I", self._iteration_count, "preparation_phase_mhe", "idMHE: Ready")
def patch_meas_mhe(self, src, **kwargs):
"""Mechanism to assign a value of y0 to the current mhe from the dynamic model
By default load the measurement to the last finite element of the lsmhe
Args:
src(ConcreteModel): The reference model
Returns:
meas_dict (dict): A dictionary containing the measurements list by meas_var
"""
y0dest = getattr(self.lsmhe, "yk0_mhe") #: Param containing data.
fe = kwargs.pop("fe", self.nfe_tmhe - 1)
use_dict = kwargs.pop("use_dict", False)
#: Override patching
if use_dict:
self.journalist("I", self._iteration_count, "patch_meas_mhe", "use_dict")
for y in self.y:
for j in self.y_vars[y]:
k = self.yk_key[(y,) + j]
y0dest[fe, k].value = self.curr_meas[(y, j)]
return dict()
skip_update = kwargs.pop("skip_update", False)
noisy = kwargs.pop("noisy", True)
cp = getattr(src, "ncp_t")
cpa = cp #: From the source
meas_dic = dict.fromkeys(self.y)
l = []
tcpa = t_ij(src.t, 0, cpa)
for y in self.y:
lm = []
var = getattr(src, y)
for j in self.y_vars[y]:
k = self.yk_key[(y,) + j]
lm.append(value(var[(tcpa,) + j]))
y0dest[fe, k].value = value(var[(tcpa,) + j])
meas_dic[y] = lm
# if not skip_update: #: Update the mhe model
self.journalist("I", self._iteration_count, "patch_meas_mhe", "Measurement to:" + str(fe))
return meas_dic
def adjust_nu0_mhe(self):
"""Adjust the initial guess for the nu variable"""
for t in self.lsmhe.fe_t:
k = 0
for i in self.y:
for j in self.y_vars[i]:
kk = self.yk_key[(i,) + j]
target = value(self.lsmhe.yk0_mhe[t, kk]) - value(self.yk_l[t][kk])
self.lsmhe.nuk_mhe[t, kk].set_value(target)
k += 1
def adjust_w_mhe(self):
for i in range(0, self.nfe_tmhe - 1):
j = 0
for x in self.x_noisy:
x_var = getattr(self.lsmhe, x)
for k in self.x_vars[x]:
x1pvar_val = value(x_var[(i+1, 0), k])
x1var_val = value(x_var[(i, self.ncp_tmhe), k])
if self.IgnoreProcessNoise:
pass
else:
self.lsmhe.wk_mhe[i, j].set_value(x1pvar_val - x1var_val)
j += 1
def set_covariance_meas(self, cov_dict):
"""Sets covariance(inverse) for the measurements.
Args:
cov_dict (dict): a dictionary with the following key structure [(meas_name, j), (meas_name, k), time]
Returns:
None
"""
rtarget = getattr(self.lsmhe, "R_mhe")
if self.diag_Q_R:
for i in range(0, self.nfe_tmhe):
for y in self.y:
for jth in self.y_vars[y]: #: the jth variable
v_i = self.yk_key[(y,) + jth]
if cov_dict[y] == 0:
raise ZeroDivisionError('wrong covariance')
rtarget[i, v_i] = 1 / cov_dict[y]
else:
sys.exit(1)
def set_covariance_disturb(self, cov_dict):
"""Assign values to the covariance of the disturbance.
For now we only take diagonal covariance(variance) matrices.
Args:
cov_dict (dict): The values of the covariance
Returns:
None:
"""
qtarget = getattr(self.lsmhe, "Q_mhe")
if self.diag_Q_R:
for i in range(0, self.nfe_tmhe - 1):
for x in self.x_noisy:
for jth in self.x_vars[x]: #: the jth variable
v_i = self.xkN_key[(x,) + jth]
if cov_dict != 0.0:
qtarget[i, v_i] = 1 / cov_dict[x]
else:
raise ZeroDivisionError
else:
raise Exception("Not yet implemented [set_covariance_disturb]")
def set_covariance_u(self, cov_dict):
"""Sets covariance(inverse) for the states.
Args:
cov_dict (dict): a dictionary with the following key structure [(state_name, j), (state_name, k), time]
Returns:
None
"""
qtarget = getattr(self.lsmhe, "U_mhe")
for key in qtarget:
_t = key[0]
vni = key[1]
if cov_dict[vni] == 0.0:
raise ZeroDivisionError
qtarget[_t, vni] = 1 / cov_dict[vni]
def shift_mhe(self):
"""Shifts current initial guesses of variables for the mhe problem by one finite element.
"""
for v in self.lsmhe.component_objects(Var, active=True):
if v._implicit_subsets is None:
if v.index_set() is self.lsmhe.t: #: time is the only set
for i in range(0, self.nfe_tmhe - 1):
for j in range(0, self.ncp_tmhe + 1):
t_dash_i = t_ij(self.lsmhe.t, i, j)
t = t_ij(self.lsmhe.t, i + 1, j)
val = value(v[t])
v[t_dash_i].set_value(val)
else:
continue
else:
if self.lsmhe.t in v._implicit_subsets:
# remaining_set = set(product(v._implicit_subsets[1:], repeat=len(v._implicit_subsets[1:])+1))
remaining_set = v._implicit_subsets[1]
for j in range(2, len(v._implicit_subsets)):
remaining_set *= v._implicit_subsets[j]
for index in remaining_set:
for i in range(0, self.nfe_tmhe - 1):
for j in range(0, self.ncp_tmhe + 1):
t_dash_i = t_ij(self.lsmhe.t, i, j)
t = t_ij(self.lsmhe.t, i + 1, j)
index = index if isinstance(index, tuple) else (index,) #: Transform to tuple
val = value(v[(t,) + index])
v[(t_dash_i,) + index].set_value(val)
else:
continue
def shift_measurement_input_mhe(self):
"""Shifts current measurements for the mhe problem"""
y0 = getattr(self.lsmhe, "yk0_mhe")
#: Start from the second fe
for i in range(1, self.nfe_tmhe):
for j in self.lsmhe.yk0_mhe.keys():
| |
remaining -= 1
cur_address += 1
if start > end and cur_address > max_address:
needs_wrap = False
cur_address = 0
def _format_disassembly(self, address, length, disasm):
cur_address = address
max_address = (2 ** self._mpu.ADDR_WIDTH) - 1
bytes_remaining = length
dump = ''
while bytes_remaining:
if cur_address > max_address:
cur_address = 0
dump += self.byteFmt % self._mpu.memory[cur_address] + " "
cur_address += 1
bytes_remaining -= 1
fieldwidth = 1 + int(1 + self.byteWidth / 4) * 3
fieldfmt = "%%-%ds" % fieldwidth
return "$" + self.addrFmt % address + " " + fieldfmt % dump + disasm
def help_disassemble(self):
self._output("disassemble <address_range>")
self._output("Disassemble instructions in the address range.")
self._output('Range is specified like "<start>:<end>".')
def help_step(self):
self._output("step")
self._output("Single-step through instructions.")
def do_step(self, args):
self._mpu.step()
self.do_disassemble(self.addrFmt % self._mpu.pc)
def help_return(self):
self._output("return")
self._output("Continues execution and returns to the monitor just")
self._output("before the next RTS or RTI is executed.")
def do_return(self, args):
returns = [0x60, 0x40] # RTS, RTI
self._run(stopcodes=returns)
def help_goto(self):
self._output("goto <address>")
self._output("Change the PC to address and continue execution.")
def do_goto(self, args):
if args == '':
return self.help_goto()
self._mpu.pc = self._address_parser.number(args)
stps = [0xdb] # STP
self._run(stopcodes=stps)
def _run(self, stopcodes):
stopcodes = set(stopcodes)
breakpoints = set(self._breakpoints)
mpu = self._mpu
mem = self._mpu.memory
# Switch to immediate (noncanonical) no-echo input mode on POSIX
# operating systems. This has no effect on Windows.
console.noncanonical_mode(self.stdin)
if not breakpoints:
while True:
if mem[mpu.pc] in stopcodes:
break
mpu.step()
else:
while True:
pc = mpu.pc
if mem[pc] in stopcodes:
break
if pc in breakpoints:
msg = "Breakpoint %d reached."
self._output(msg % self._breakpoints.index(pc))
break
mpu.step()
# Switch back to the previous input mode.
console.restore_mode()
def help_radix(self):
self._output("radix [H|D|O|B]")
self._output("Set default radix to hex, decimal, octal, or binary.")
self._output("With no argument, the current radix is printed.")
def help_cycles(self):
self._output("Display the total number of cycles executed.")
def do_cycles(self, args):
self._output(str(self._mpu.processorCycles))
def do_radix(self, args):
radixes = {'Hexadecimal': 16, 'Decimal': 10, 'Octal': 8, 'Binary': 2}
if args != '':
new = args[0].lower()
changed = False
for name, radix in radixes.items():
if name[0].lower() == new:
self._address_parser.radix = radix
changed = True
if not changed:
self._output("Illegal radix: %s" % args)
for name, radix in radixes.items():
if self._address_parser.radix == radix:
self._output("Default radix is %s" % name)
def help_tilde(self):
self._output("~ <number>")
self._output("Display a number in decimal, hex, octal, and binary.")
def do_tilde(self, args):
if args == '':
return self.help_tilde()
try:
num = self._address_parser.number(args)
self._output("+%u" % num)
self._output("$" + self.byteFmt % num)
self._output("%04o" % num)
self._output(itoa(num, 2).zfill(8))
except KeyError:
self._output("Bad label: %s" % args)
except OverflowError:
self._output("Overflow error: %s" % args)
def help_registers(self):
self._output("registers[<name>=<value> [, <name>=<value>]*]")
self._output("Assign respective registers. With no parameters,")
self._output("display register values.")
def do_registers(self, args):
if args == '':
return
pairs = re.findall('([^=,\s]*)=([^=,\s]*)', args)
if pairs == []:
return self._output("Syntax error: %s" % args)
for register, value in pairs:
if register not in ('pc', 'sp', 'a', 'x', 'y', 'p'):
self._output("Invalid register: %s" % register)
else:
try:
intval = self._address_parser.number(value)
except KeyError as exc: # label not found
self._output(exc.args[0])
continue
except OverflowError as exc: # wider than address space
msg = "Overflow: %r too wide for register %r"
self._output(msg % (value, register))
continue
if register != 'pc':
if intval != (intval & self.byteMask):
msg = "Overflow: %r too wide for register %r"
self._output(msg % (value, register))
continue
setattr(self._mpu, register, intval)
def help_cd(self):
self._output("cd <directory>")
self._output("Change the working directory.")
def do_cd(self, args):
if args == '':
return self.help_cd()
try:
os.chdir(args)
except OSError as exc:
msg = "Cannot change directory: [%d] %s" % (exc.errno,
exc.strerror)
self._output(msg)
self.do_pwd()
def help_pwd(self):
self._output("Show the current working directory.")
def do_pwd(self, args=None):
cwd = os.getcwd()
self._output(cwd)
def help_load(self):
self._output("load <filename|url> <address|top>")
self._output("Load a file into memory at the specified address.")
self._output('An address of "top" loads into the top of memory.')
self._output("Commodore-style load address bytes are ignored.")
def do_load(self, args):
split = shlex.split(args)
if len(split) not in (1, 2):
self._output("Syntax error: %s" % args)
return
filename = split[0]
if "://" in filename:
try:
f = urlopen(filename)
bytes = f.read()
f.close()
except Exception as exc:
msg = "Cannot fetch remote file: %s" % str(exc)
self._output(msg)
return
else:
try:
f = open(filename, 'rb')
bytes = f.read()
f.close()
except (OSError, IOError) as exc:
msg = "Cannot load file: [%d] %s" % (exc.errno, exc.strerror)
self._output(msg)
return
if len(split) == 2:
if split[1] == "top":
# load a ROM to top of memory
top_address = self.addrMask
program_size = len(bytes) // (self.byteWidth // 8)
start = top_address - program_size + 1
else:
start = self._address_parser.number(split[1])
else:
start = self._mpu.pc
if self.byteWidth == 8:
if isinstance(bytes, str):
bytes = map(ord, bytes)
else: # Python 3
bytes = [ b for b in bytes ]
elif self.byteWidth == 16:
def format(msb, lsb):
if isinstance(bytes, str):
return (ord(msb) << 8) + ord(lsb)
else: # Python 3
return (msb << 8) + lsb
bytes = list(map(format, bytes[0::2], bytes[1::2]))
self._fill(start, start, bytes)
def help_save(self):
self._output("save \"filename\" <start> <end>")
self._output("Save the specified memory range as a binary file.")
self._output("Commodore-style load address bytes are not written.")
def do_save(self, args):
split = shlex.split(args)
if len(split) != 3:
self._output("Syntax error: %s" % args)
return
filename = split[0]
start = self._address_parser.number(split[1])
end = self._address_parser.number(split[2])
mem = self._mpu.memory[start:end + 1]
try:
f = open(filename, 'wb')
for m in mem:
# output each octect from msb first
for shift in range(self.byteWidth - 8, -1, -8):
f.write(bytearray([(m >> shift) & 0xff]))
f.close()
except (OSError, IOError) as exc:
msg = "Cannot save file: [%d] %s" % (exc.errno, exc.strerror)
self._output(msg)
return
self._output("Saved +%d bytes to %s" % (len(mem), filename))
def help_fill(self):
self._output("fill <address_range> <data_list>")
self._output("Fill memory in the address range with the data in")
self._output("<data_list>. If the size of the address range is")
self._output("greater than the size of the data_list, the data_list ")
self._output("is repeated.")
def do_fill(self, args):
split = shlex.split(args)
if len(split) < 2:
return self.help_fill()
try:
start, end = self._address_parser.range(split[0])
filler = list(map(self._address_parser.number, split[1:]))
except KeyError as exc:
self._output(exc.args[0]) # "Label not found: foo"
else:
self._fill(start, end, filler)
def _fill(self, start, end, filler):
address = start
length, index = len(filler), 0
if start == end:
end = start + length - 1
if (end > self.addrMask):
end = self.addrMask
while address <= end:
address &= self.addrMask
self._mpu.memory[address] = (filler[index] & self.byteMask)
index += 1
if index == length:
index = 0
address += 1
fmt = (end - start + 1, start, end)
starttoend = "$" + self.addrFmt + " to $" + self.addrFmt
self._output(("Wrote +%d bytes from " + starttoend) % fmt)
def help_mem(self):
self._output("mem <address_range>")
self._output("Display the contents of memory.")
self._output('Range is specified like "<start:end>".')
def do_mem(self, args):
split = shlex.split(args)
if len(split) != 1:
return self.help_mem()
start, end = self._address_parser.range(split[0])
line = self.addrFmt % start + ":"
for address in range(start, end + 1):
byte = self._mpu.memory[address]
more = " " + self.byteFmt % byte
exceeded = len(line) + len(more) > self._width
if exceeded:
self._output(line)
line = self.addrFmt % address + ":"
line += more
self._output(line)
def help_add_label(self):
self._output("add_label <address> <label>")
self._output("Map a given address to a label.")
def do_add_label(self, args):
split = shlex.split(args)
if len(split) != 2:
self._output("Syntax error: %s" % args)
return self.help_add_label()
try:
address = self._address_parser.number(split[0])
except KeyError as exc:
self._output(exc.args[0]) # "Label not found: foo"
except OverflowError:
self._output("Overflow error: %s" % args)
else:
label = split[1]
self._address_parser.labels[label] = address
def help_show_labels(self):
self._output("show_labels")
self._output("Display current label mappings.")
def do_show_labels(self, args):
values = list(self._address_parser.labels.values())
keys = list(self._address_parser.labels.keys())
byaddress = list(zip(values, keys))
byaddress.sort()
for address, label in byaddress:
self._output(self.addrFmt % address + ": " + label)
def help_delete_label(self):
self._output("delete_label <label>")
self._output("Remove the specified label from the label tables.")
def do_delete_label(self, args):
if args == '':
return self.help_delete_label()
if args in self._address_parser.labels:
del self._address_parser.labels[args]
def do_width(self, args):
if args != '':
try:
new_width = int(args)
if new_width >= 10:
self._width = new_width
else:
self._output("Minimum terminal width is 10")
except ValueError:
self._output("Illegal width: %s" % args)
self._output("Terminal width is %d" % self._width)
def | |
#!/usr/local/bin/python
##################
# import modules #
##################
from math import sqrt,exp
from random import random,randint,seed
####################
# define constants #
####################
nAtoms = 40 # nb of atoms
Radius = 25.0 # beware that Radius must be in a good range (according to nAtoms)
# in order to be able to place all atoms
Rmin = 2 * Radius # distance at which rmin is mini
BoxDim = [500,500] # box dimension
Atom_Coord = [] # list of the form : [NMAX][2]
deltaRmax = 50.0 # step up to which particules move
Epsilon = 20.0 # well depth
Dielec = 1.0 # dielectric constant
qat = Radius # Atom absolute charge
frac_neg = 0.5 # Fraction negative charges
frac_swap = 0.2 # Fraction of MC steps with charge swapping
OverlapFr = 0.0 # fraction of overlap allowed
CutOff = 250 # non-bonded cutoff
CutOffSquare = CutOff**2
speed = 50 # canvas update speed
cstboltz = 8.3502E-03 # Boltzmann's constant in in J/mol/K
#cstboltz = 0.00198722 # Boltzmann's contstant in cal/mol/K
Temperature = 300.0 # temperature in K
Seed = 100 # random seed
##################
# some functions #
##################
def dist(A,B):
return sqrt((A[0]-B[0])**2+(A[1]-B[1])**2)
# change sign
def SignR(a,b):
if b > 0:
return a
else:
return -a
# generate a random rgb color like #xxxxxx (xx should be an hexadecimal nb)
def random_color():
tmp = "#"
for i in range(6):
rdm = randint(0,15)
tmp += hex(rdm)[-1]
return tmp
# generate a rgb color based on charge like #xxxxxx (xx should be an hexadecimal nb)
def charge_color(charge,qat):
tmp = "#111111"
if charge == qat:
tmp = "#FFFFFF"
else:
tmp = "#333333"
return tmp
#########################
# initialize parameters #
#########################
# generates random coordinates
def InitConf(n,dim,radius,qat,frac_neg):
print("Initializing box, please wait...")
# generate a list of random positions
tmp_coord = []
i = 0
nneg = 0
ntrial = 0
# fix first atom
x = random()*(dim[0]-radius)+radius#dim[0]
y = random()*(dim[1]-radius)+radius#dim[1]
nneg = int(float(n) * frac_neg)
npos = n - nneg
charge = -qat
if (npos == n): charge = qat
i += 1
tmp_coord.append([x,y,charge])
# print "atom ",i, charge, frac_neg, float(nneg)/float(i)
while(i < nneg):
x = random()*(dim[0]-radius)+radius#dim[0]
y = random()*(dim[1]-radius)+radius#dim[1]
# check wether the new particule ovelap an existing one
OVERLAP = 1
for j in range(i):
if dist(tmp_coord[j],[x,y]) < (1-OverlapFr)*2*radius:
OVERLAP = 0
if OVERLAP:
charge = -qat
tmp_coord.append([x,y,charge])
i += 1
# print "atom ",i, charge, frac_neg, float(nneg)/float(i)
ntrial = ntrial + 1
if ntrial > 100000:
print("initialisation failed")
print("==> reduce radius or number of atoms")
sys.exit()
while(i < n):
x = random()*(dim[0]-radius)+radius#dim[0]
y = random()*(dim[1]-radius)+radius#dim[1]
# check wether the new particule ovelap an existing one
OVERLAP = 1
for j in range(i):
if dist(tmp_coord[j],[x,y]) < (1-OverlapFr)*2*radius:
OVERLAP = 0
if OVERLAP:
charge = qat
tmp_coord.append([x,y,charge])
i += 1
# print "atom ",i, charge, frac_neg, float(nneg)/float(i)
ntrial = ntrial + 1
if ntrial > 100000:
print("initialisation failed")
print("==> reduce radius or number of atoms")
sys.exit()
return tmp_coord
# generates random charges
def InitCharge(n,dim,qat,frac_neg):
global Atom_Coord
print("Initializing charges, please wait...")
i = 0
nneg = 0
nneg = int(float(n) * frac_neg)
npos = n - nneg
charge = -qat
if (npos == n): charge = qat
Atom_Coord[i][2]=charge
i += 1
while(i < nneg):
charge = -qat
Atom_Coord[i][2]=charge
i += 1
while(i < n):
charge = qat
Atom_Coord[i][2]=charge
i += 1
####################
# calculate energy #
####################
# classical LJ
def LJ(r,epsilon,rmin):
return epsilon*((rmin/r)**12-(rmin/r)**6)
# classical Coulomb
def Coulomb(r,dielec,qa,qb):
return qa*qb/(dielec*r)
# classical Coulomb2
def Coulomb2(r,dielec,qa,qb):
return qa*qb/(dielec*sqrt(r))
# version without boundary conditions
def Calc_Ene2(coord,epsilon,rmin,dielec,cutoffsquare,boxdim,elec=1):
Ene = 0.0; distsquare = 0
rmin_exp6 = rmin**6
# doubly nested loop over all particule pairs
for i in range(len(coord)-1):
for j in range(i+1,len(coord)):
# calculate the squared atomic distance
for k in range(2):
tmp = coord[j][k] - coord[i][k]
distsquare += tmp**2
qa = coord[i][2]
qb = coord[j][2]
Ene += LJ2(distsquare, epsilon, rmin_exp6)
if (elec): Ene += Coulomb2(distsquare,dielec,qa,qb)
return Ene
# version without boundary conditions, single particles
def Calc_Ene2Single(nat,coord,epsilon,rmin,dielec,cutoffsquare,boxdim,elec=1):
Ene = 0.0; distsquare = 0
rmin_exp6 = rmin**6
# doubly nested loop over all particule pairs
i = nat
for j in range(0,len(coord)-1):
if (j != i):
# calculate the squared atomic distance
for k in range(2):
tmp = coord[j][k] - coord[i][k]
distsquare += tmp**2
qa = coord[i][2]
qb = coord[j][2]
Ene += LJ2(distsquare, epsilon, rmin_exp6)
if (elec): Ene += Coulomb2(distsquare,dielec,qa,qb)
return Ene
# calculate LJ from the squared distance
def LJ2(distsquare, epsilon, rmin_exp6):
Z = (1/distsquare)**3 * rmin_exp6
return epsilon * Z * (Z-1)
# version with boundary conditions
def Calc_Ene(coord,epsilon,rmin,dielec,cutoffsquare,boxdim,elec=1):
Ene = 0.0 ; distsquare = 0
rmin_exp6 = rmin**6
# doubly nested loop over all particule pairs
for i in range(len(coord)-1):
for j in range(i+1,len(coord)):
# calculate the squared atomic distance
distsquare = 0
for k in range(2):
tmp = coord[j][k] - coord[i][k]
# chooses the nearest image
halfbox = boxdim[k]/2
tmp = tmp - SignR(halfbox,tmp-halfbox) - SignR(halfbox,tmp+halfbox)
distsquare += tmp**2
# compute vdw and Coulomb energy
if distsquare < cutoffsquare:
qa = coord[i][2]
qb = coord[j][2]
Ene += LJ2(distsquare, epsilon, rmin_exp6)
if (elec): Ene += Coulomb2(distsquare,dielec,qa,qb)
return Ene
# version with boundary conditions, single particle
def Calc_EneSingle(nat,coord,epsilon,rmin,dielec,cutoffsquare,boxdim,elec=1):
Ene = 0.0 ; distsquare = 0
rmin_exp6 = rmin**6
# doubly nested loop over all particule pairs
i = nat
for j in range(0,len(coord)-1):
if j != i:
# calculate the squared atomic distance
distsquare = 0
for k in range(2):
tmp = coord[j][k] - coord[i][k]
# chooses the nearest image
halfbox = boxdim[k]/2
tmp = tmp - SignR(halfbox,tmp-halfbox) - SignR(halfbox,tmp+halfbox)
distsquare += tmp**2
# compute vdw and Coulomb energy
if distsquare < cutoffsquare:
qa = coord[i][2]
qb = coord[j][2]
Ene += LJ2(distsquare, epsilon, rmin_exp6)
if (elec): Ene += Coulomb2(distsquare,dielec,qa,qb)
return Ene
############################
# move particules in a MC #
############################
def Go(*args):
global Atom_Coord,Radius,deltaRmax,BoxDim,Epsilon,Rmin,CutOffSquare,Iterations,Ene,Accepted
global Color,sttext0,entext1,ptext2,tmove,paccept,Dielec,root,canevas,proba_boltzmann,xx
if Iterations > 0:
sttext0.destroy()
entext1.destroy()
ptext2.destroy()
tmove.destroy()
# calculate Energy
Ene = Calc_Ene(Atom_Coord,Epsilon,Rmin,Dielec,CutOffSquare,BoxDim)
mynewtext="step %d" % Iterations
sttext0=Label(top,text=mynewtext)
sttext0.pack(side='left')
mynewtext="E=%.3f" % Ene
entext1=Label(top,text=mynewtext)
entext1.pack(side='left')
mynewtext="Paccept= %.2f" % paccept
ptext2=Label(top,text=mynewtext)
ptext2.pack(side='left')
mynewtext=" "
tmove=Label(top,text=mynewtext)
tmove.pack(side='left')
ACCEPTED = 0
frac_simple_move=1-frac_swap
calc_elec=1
if qat == 0:
frac_simple_move=1.0
calc_elec=0
while (not ACCEPTED):
Iterations += 1
# select a coordinate randomly
RANDOM_atom = randint(0,len(Atom_Coord)-1)
RANDOM_coord = randint(0,1)
EneSingle = Calc_EneSingle(RANDOM_atom,Atom_Coord,Epsilon,Rmin,Dielec,CutOffSquare,BoxDim,calc_elec)
# save old coordinates of that atom
Xold = Atom_Coord[RANDOM_atom][0]
Yold = Atom_Coord[RANDOM_atom][1]
xx=random()
if xx<frac_simple_move:
type = "move"
# move the particule
factor = ((2*random() - 1) * deltaRmax)
Atom_Coord[RANDOM_atom][RANDOM_coord] += factor
# apply boudary conditions
for i in range(2): # i=0 -> case x coordinate ; i=1 -> case y coordinate
halfbox = BoxDim[i]/2
Z = Atom_Coord[RANDOM_atom][RANDOM_coord]
Atom_Coord[RANDOM_atom][RANDOM_coord] = Z - SignR(halfbox,Z) - SignR(halfbox,Z-BoxDim[i])
EneSingle_new = Calc_EneSingle(RANDOM_atom,Atom_Coord,Epsilon,Rmin,Dielec,CutOffSquare,BoxDim,calc_elec)
deltaE = EneSingle_new - EneSingle
if deltaE < 0.0:
proba_boltzmann = 1.0
ACCEPTED = 1 ; Accepted += 1 ; Ene = Ene + deltaE
else:
proba_boltzmann = exp(-deltaE/(cstboltz*Temperature))
xx = random()
if proba_boltzmann > xx:
ACCEPTED = 1 ; Accepted += 1 ; Ene = Ene + deltaE
else:
ACCEPTED = 0
# get back the orginal coordinate
Atom_Coord[RANDOM_atom][RANDOM_coord] -= factor
paccept = 0.01*ACCEPTED+0.99*paccept;
if Iterations > 0:
sttext0.destroy()
entext1.destroy()
ptext2.destroy()
tmove.destroy()
mynewtext="step %d" % Iterations
sttext0=Label(top,text=mynewtext)
sttext0.pack(side='left')
mynewtext="E= %.3f" % Ene
entext1=Label(top,text=mynewtext)
entext1.pack(side='left')
mynewtext="Paccept= %.2f" % paccept
ptext2=Label(top,text=mynewtext)
ptext2.pack(side='left')
mynewtext="move"
tmove=Label(top,text=mynewtext)
tmove.pack(side='left')
else:
type="swap"
RANDOM_atom2 = randint(0,len(Atom_Coord)-1)
Atom_Coord[RANDOM_atom][0] = Atom_Coord[RANDOM_atom2][0]
Atom_Coord[RANDOM_atom][1] = Atom_Coord[RANDOM_atom2][1]
Atom_Coord[RANDOM_atom2][0] = Xold
Atom_Coord[RANDOM_atom2][1] = Yold
Ene_new = Calc_Ene(Atom_Coord,Epsilon,Rmin,Dielec,CutOffSquare,BoxDim,calc_elec)
if Ene_new < Ene:
proba_boltzmann = 1.0
ACCEPTED = 1 ; Accepted += 1 ; Ene = Ene_new
else:
deltaE = Ene_new - Ene
proba_boltzmann = exp(-deltaE/(cstboltz*Temperature))
xx = random()
if proba_boltzmann > xx:
ACCEPTED = 1 ; Accepted += 1 ; Ene = Ene_new
else:
ACCEPTED = 0
# get back the orginal coordinate
Atom_Coord[RANDOM_atom2][0] = Atom_Coord[RANDOM_atom][0]
Atom_Coord[RANDOM_atom2][1] = Atom_Coord[RANDOM_atom][1]
Atom_Coord[RANDOM_atom][0] = Xold
Atom_Coord[RANDOM_atom][1] = Yold
paccept = 0.01*ACCEPTED+0.99*paccept;
if Iterations > 0:
sttext0.destroy()
entext1.destroy()
ptext2.destroy()
tmove.destroy()
mynewtext="step %d" % Iterations
sttext0=Label(top,text=mynewtext)
sttext0.pack(side='left')
mynewtext="E= %.3f" % Ene
entext1=Label(top,text=mynewtext)
entext1.pack(side='left')
mynewtext="Paccept= %.2f" % paccept
ptext2=Label(top,text=mynewtext)
ptext2.pack(side='left')
mynewtext="swap"
tmove=Label(top,text=mynewtext)
tmove.pack(side='left')
if (float(Iterations)%50.0) == 0.0:
print("step %6i E= %f Paccepted= %5.3f Boltzmann-factor= %10.8f random= %6.4f" % (Iterations,Ene,(Accepted/float(Iterations)),proba_boltzmann,xx))
elif (proba_boltzmann < 1.0 and proba_boltzmann > xx):
print("step %6i E= %f Paccepted= %5.3f Boltzmann-factor= %10.8f random= %6.4f" % (Iterations,Ene,(Accepted/float(Iterations)),proba_boltzmann,xx))
if type=="move":
| |
for bp in bit_position:
if not self.bit_position_limits[0] <= bp <= self.bit_position_limits[1]:
raise ValueError('Data generator pattern bit position needs to be between 0 and 35.')
if not isinstance(pattern_array, np.ndarray):
raise TypeError('Data generator pattern needs to be numpy.ndarray type')
if np.any(pattern_array < 0) or np.any(pattern_array > 1):
raise ValueError('Data generator pattern elements need to be integer 0 or 1')
if not self.start_address_limits[0] <= start_address <= self.start_address_limits[1]:
raise ValueError('Data generator pattern starting address needs to be between 0 and 65535.')
if length is None:
length = pattern_array.size
elif length != pattern_array.size:
raise ValueError('Data generator pattern array length must be the same as the given pattern length.')
if not self.memory_length_limits[0] <= length <= self.memory_length_limits[1]:
raise ValueError('Data generator pattern length needs to be between 1 and 65536.')
# convert numbers to strings
bit_position = ["{:.0f}".format(bp) for bp in bit_position]
start_address = "{:.0f}".format(start_address)
length = "{:.0f}".format(length)
digits = "{:.0f}".format(len(length)) # length is a string, so its len() gives the number of digits as an int.
pattern_string = self.pattern_array_to_string(pattern_array)
command = [self.pattern_cmd + ' ' + bp + ',' + start_address + ',' + length +
',#' + digits + length + pattern_string for bp in bit_position]
command = '\n' + '\n'.join(command) + '\n'
return self.instrument.write(command)
# return [self.simple_write(cmd) for cmd in command] does not work
# the write function does not like to be saved somewhere :/
def get_pattern(self, bit_position, start_address=0, length=None):
if not self.bit_position_limits[0] <= bit_position <= self.bit_position_limits[1]: # hardware limits
raise ValueError('Data generator pattern bit position needs to be between 0 and 35.')
if not self.start_address_limits[0] <= start_address <= self.start_address_limits[1]: # hardware limits
raise ValueError('Data generator pattern starting address needs to be between 0 and 65535.')
if length is None:
length = self.get_memory()
elif not self.memory_length_limits[0] <= length <= self.memory_length_limits[1]: # hardware limits
raise ValueError('Data generator pattern length needs to be between 1 and 65536.')
# convert numbers to strings
bit_position = "{:.0f}".format(bit_position)
start_address = "{:.0f}".format(start_address)
length = "{:.0f}".format(length)
command = self.pattern_cmd + '? ' + bit_position + ',' + start_address + ',' + length
self.simple_write(command)
read_line = self.simple_read()
# The output is of the form ':[CMD] [BITPOS],[STARTADD],[LEN] #[DIGITSOFPATTERN][LEN][PATTERN]\n'
# Since we know the length of the pattern we requested, we can take ony the last digits we care about
# (minus the new line)
pattern_string = read_line[-int(length)-1:-1] # we take only the last pattern length digits
return self.pattern_string_to_array(pattern_string)
def is_running(self):
self.simple_write(self.running_cmd + '?')
read_line = self.simple_read()
value = self.get_cmd_response_string(read_line, self.running_cmd)
return bool(int(value))
def add_sequence_step(self, name, line_number=0, repetitions=1, jump_to_line_number=0, wait_on_trigger=0,
event_jump=0, infinite_loop=1):
# sequences only matter if the mode state is enhanced
# when you want to use a sequence of blocks that have the same attributes defined below, you should define a
# subsequence with all the blocks you want to use, and then put the subsequence under the sequence.
# different blocks of patterns can be repeated different amounts of times. same for subsequences and sequences
# name is the name of the subsequence or the block you want to add to the sequence
# Line number: the line number of the new sequence. Usually, we just overwrite line_number 0 for all of our
# experiments
# Repetitions: The amount of times the sequence will be repeated before stopping, if the mode state is NOT
# 'ENHANCED', or not sequence is not an infinite loop
# Jump to line number: The sequence line number to which the system will jump in case of an event.
# Wait on trigger: Waiting on a trigger (??? not sure what this does)
# Event_jump: Bool in case we jump or not in an event. If yes, the sequence line will change to the
# 'jump_to_line_number' line
# Infinite loop: If True, the sequence will run indefinately.
# If false, the sequence will repeat itself 'repetitions' times before stopping (I think)
if not isinstance(name, str):
raise TypeError('Data generator subsequence/block name to be appended in the sequence should be a string.')
if not self.sequence_repetition_limits[0] <= repetitions <= self.sequence_repetition_limits[1]:
raise ValueError('Data generator sequence step repetitions should be between 1 and 65536.')
if wait_on_trigger != 0 and wait_on_trigger != 1:
raise ValueError('Data generator sequence step wait_one_trigger should be either 0 or 1.')
if event_jump != 0 and event_jump != 1:
raise ValueError('Data generator sequence step event_jump should be either 0 or 1.')
if infinite_loop != 0 and infinite_loop != 1:
raise ValueError('Data generator sequence step infinite_loop should be either 0 or 1.')
# convert name to right format
name = "\"" + name + "\""
# convert numbers to strings
line_number = "{:.0f}".format(line_number)
repetitions = "{:.0f}".format(repetitions)
jump_to_line_number = "{:.0f}".format(jump_to_line_number)
wait_on_trigger = "{:.0f}".format(wait_on_trigger)
event_jump = "{:.0f}".format(event_jump)
infinite_loop = "{:.0f}".format(infinite_loop)
command = self.sequence_add_cmd + ' ' + line_number + ',' + name.upper() + ',' + repetitions + ',' + \
jump_to_line_number + ',' + wait_on_trigger + ',' + event_jump + ',' + infinite_loop
return self.simple_write(command)
def define_sequence(self, names, repetitions=None, jump_to_line_number=None, wait_on_trigger=None, event_jump=None,
infinite_loop=None):
# sequences only matter if the mode state is enhanced
# when you want to use a sequence of blocks that have the same attributes defined below, you should define a
# subsequence with all the blocks you want to use, and then put the subsequence under the sequence.
# different blocks of patterns can be repeated different amounts of times. same for subsequences and sequences
# Line number: the line number of the new sequence. Usually, we just overwrite line_number 0 for all of our
# experiments
# Repetitions: The amount of times the sequence will be repeated before stopping, if the mode state is NOT
# 'ENHANCED', or not sequence is not an infinite loop
# Jump to line number: The sequence line number to which the system will jump in case of an event.
# Wait on trigger: Waiting on a trigger (??? not sure what this does)
# Event_jump: Bool in case we jump or not in an event. If yes, the sequence line will change to the
# 'jump_to_line_number' line
# Infinite loop: If True, the sequence will run indefinately.
# If false, the sequence will repeat itself 'repetitions' times before stopping (I think)
if isinstance(names, str):
names = [names]
if not np.all([isinstance(n, str) for n in names]):
raise TypeError('Data generator subsequence/block_names to be appended in sequence step must be a string or'
' a list of strings.')
if repetitions is None:
repetitions = [1]*len(names)
if not np.all([self.sequence_repetition_limits[0] <= rep <= self.sequence_repetition_limits[1]
for rep in repetitions]):
raise ValueError('One or more data generator subsequence/block repetitions are not between 1 and 65536.')
if jump_to_line_number is None:
jump_to_line_number = [0]*len(names)
if not np.all([0 <= jtln <= 65535 for jtln in jump_to_line_number]):
raise ValueError('One or more data generator subsequence/block repetitions are not between 1 and 65535.')
if wait_on_trigger is None:
wait_on_trigger = [0]*len(names)
if not np.all([(wot == 0 or wot == 1) for wot in wait_on_trigger]):
raise ValueError('One or more data generator subsequence/block wait_on_trigger values are neither 0 nor 1.')
if event_jump is None:
event_jump = [0]*len(names)
if not np.all([(ej == 0 or ej == 1) for ej in event_jump]):
raise ValueError('One or more data generator subsequence/block event_jump values are neither 0 nor 1.')
if infinite_loop is None:
infinite_loop = [1]*len(names)
if not np.all([(il == 0 or il == 1) for il in infinite_loop]):
raise ValueError('One or more data generator subsequence/block infinite_loop values are neither 0 nor 1.')
# convert numbers to strings
repetitions = [str(r) for r in repetitions]
jump_to_line_number = [str(jtln) for jtln in jump_to_line_number]
wait_on_trigger = [str(wot) for wot in wait_on_trigger]
event_jump = [str(ej) for ej in event_jump]
infinite_loop = [str(il) for il in infinite_loop]
# construct a string of the type [subsequence_name],[block1name],[block1rep],[block2name],[block2rep]...
subsequence_string = '\n'.join([names[i].upper() + ',' + repetitions[i] + ',' + jump_to_line_number[i] + ',' +
wait_on_trigger[i] + ',' + event_jump[i] + ',' + infinite_loop[i]
for i in range(len(names))])
definition_length = str(len(subsequence_string)) # | |
# This config is runs the parallelism benchmarks from Savina.
#
# Each experiment only has one sequence parameter, they are
# intended to produce box plots and not line diagrams.
#
# It runs the Akka implementation and LF C++ implementations.
#
# Recommended plotter module: plotterBoxerrorbar
import os
########### INTERFACE ########################################################
# Change parameters listet here according to your environment and likings.
# All paths are relative to the current working directory when running
# this script.
#
# Requirements:
# - Python module cog, install with 'pip3 install cogapp'
# - java version 1.8 for Savina, test with 'java -version'
# - Savina repository from https://github.com/shamsimam/savina
# - Executable lfc in PATH.
# Where are the .lf files of the Savina Cpp implementation?
lfCppSourceFilePathBase = '../../benchmark/Cpp/Savina'
# Path to the jar file from the Savina benchmark suite:
savinaJarPath = '../../../savina-original-var-threads/target/savina-0.0.1-SNAPSHOT-jar-with-dependencies.jar'
# The package path inside of Savina to the benchmarks:
savinaPackagePathBase = 'edu.rice.habanero.benchmarks'
pythonExe = 'python3'
lfcExe = 'lfc'
javaExe = 'java'
# Options for the benchmarks:
numIterationsDefault = 12
numIterationsAkka = 12
########### INTERFACE ########################################################
# predefined global variables for convenience
parsers = {
'savina-akka-default': 'parserSavina',
'lf-cpp-1': 'parserLfCpp',
'lf-cpp-2': 'parserLfCpp',
'lf-cpp-4': 'parserLfCpp',
'lf-cpp-8': 'parserLfCpp',
'lf-cpp-16': 'parserLfCpp',
'lf-c-1': 'parserLfC'
}
sequenceColors = {
'savina-akka-default': '1',
'lf-cpp-1': '2',
'lf-cpp-2': '3',
'lf-cpp-4': '4',
'lf-cpp-8': '5',
'lf-cpp-16': '6',
'lf-c-1': '7'
}
sequenceNames = {
'lf-cpp-1': 'LF1',
'lf-cpp-2': 'LF2',
'lf-cpp-4': 'LF4',
'lf-cpp-8': 'LF8',
'lf-cpp-16': 'LF16',
'savina-akka-default': 'AK',
'lf-c-1': 'LF C (1 thread)'
}
arrangementSequences = [
'savina-akka-default',
'lf-cpp-1',
'lf-cpp-2',
'lf-cpp-4',
'lf-cpp-8',
'lf-cpp-16',
'lf-c-1',
]
cleanup = [
"rm -rf build include lib share src-gen bin"
]
# parameters for the different benchmarks to run
numThreadsLfCpp1 = 1
numThreadsLfCpp2 = 2
numThreadsLfCpp4 = 4
numThreadsLfCpp8 = 8
numThreadsLfCpp16 = 16
experiments = {}
binName = 'ApspBenchmarkGenerator'
lfSrcPath = f'apsp/{binName}.lf'
akkaPkgPath = f'{savinaPackagePathBase}.apsp.ApspAkkaActorBenchmark'
runParamNameLf1 = f'--maxEdgeWeight'
runParamValue1 = '100'
runParamNameLf2 = f''
runParamValue2 = ''
runParamNameLf3 = f''
runParamValue3 = ''
runParamNameLf4 = f''
runParamValue4 = ''
preParamNameLf1 = f'numNodes'
preParamValue1 = '300'
preParamString1 = f'-D {preParamNameLf1}={preParamValue1}' if len(preParamNameLf1) > 0 else ''
preParamNameLf2 = f'blockSize'
preParamValue2 = '50'
preParamString2 = f'-D {preParamNameLf2}={preParamValue2}' if len(preParamNameLf2) > 0 else ''
runCmdLf1 = f'bin/{binName} --fast --numIterations {numIterationsDefault} --threads {numThreadsLfCpp1} {runParamNameLf1} {runParamValue1} {runParamNameLf2} {runParamValue2} {runParamNameLf3} {runParamValue3} {runParamNameLf4} {runParamValue4}'
runCmdLf4 = f'bin/{binName} --fast --numIterations {numIterationsDefault} --threads {numThreadsLfCpp4} {runParamNameLf1} {runParamValue1} {runParamNameLf2} {runParamValue2} {runParamNameLf3} {runParamValue3} {runParamNameLf4} {runParamValue4}'
runCmdLf8 = f'bin/{binName} --fast --numIterations {numIterationsDefault} --threads {numThreadsLfCpp8} {runParamNameLf1} {runParamValue1} {runParamNameLf2} {runParamValue2} {runParamNameLf3} {runParamValue3} {runParamNameLf4} {runParamValue4}'
runCmdLf16 = f'bin/{binName} --fast --numIterations {numIterationsDefault} --threads {numThreadsLfCpp16} {runParamNameLf1} {runParamValue1} {runParamNameLf2} {runParamValue2} {runParamNameLf3} {runParamValue3} {runParamNameLf4} {runParamValue4}'
runCmdAkka = f'{javaExe} -classpath {savinaJarPath} {akkaPkgPath} -iter {numIterationsAkka} -n {preParamValue1} -b {preParamValue2} -w {runParamValue1}'
experiments['Apsp'] = {
'description': f'All Pairs Shortest Path benchmark from the Savina suite.',
'plotTitle': f'All Pairs Shortest Path',
'plotYAxisLabel': 'Execution time in ms (median)',
'plotSequenceColors': sequenceColors,
'plotSequenceNames': sequenceNames,
'plotArrangementSequences': arrangementSequences,
'parsers': parsers,
'finalCleanupExperiment': cleanup,
'sequences': [
('1', {
'lf-cpp-1': [ f'{pythonExe} -m cogapp -r {preParamString1} {preParamString2} {os.path.join(lfCppSourceFilePathBase, lfSrcPath)}'.split(),
f'{lfcExe} {os.path.join(lfCppSourceFilePathBase, lfSrcPath)}'.split(),
runCmdLf1.split() ],
'lf-cpp-4': [ runCmdLf4.split() ],
'lf-cpp-8': [ runCmdLf8.split() ],
'lf-cpp-16': [ runCmdLf16.split() ],
'savina-akka-default': [ f'{runCmdAkka}'.split() ]
})
]
}
binName = 'GuidedSearchBenchmarkGenerator'
lfSrcPath = f'astar/{binName}.lf'
akkaPkgPath = f'{savinaPackagePathBase}.astar.GuidedSearchAkkaActorBenchmark'
runParamNameLf1 = f'--threshold'
runParamValue1 = '1024'
runParamNameLf2 = f'--gridSize'
runParamValue2 = '30'
runParamNameLf3 = f'--priorities'
runParamValue3 = '30'
runParamNameLf4 = f''
runParamValue4 = ''
preParamNameLf1 = f'numWorkers'
preParamValue1 = '20'
preParamString1 = f'-D {preParamNameLf1}={preParamValue1}' if len(preParamNameLf1) > 0 else ''
preParamNameLf2 = f''
preParamValue2 = ''
preParamString2 = f'-D {preParamNameLf2}={preParamValue2}' if len(preParamNameLf2) > 0 else ''
runCmdLf1 = f'bin/{binName} --fast --numIterations {numIterationsDefault} --threads {numThreadsLfCpp1} {runParamNameLf1} {runParamValue1} {runParamNameLf2} {runParamValue2} {runParamNameLf3} {runParamValue3} {runParamNameLf4} {runParamValue4}'
runCmdLf4 = f'bin/{binName} --fast --numIterations {numIterationsDefault} --threads {numThreadsLfCpp4} {runParamNameLf1} {runParamValue1} {runParamNameLf2} {runParamValue2} {runParamNameLf3} {runParamValue3} {runParamNameLf4} {runParamValue4}'
runCmdLf8 = f'bin/{binName} --fast --numIterations {numIterationsDefault} --threads {numThreadsLfCpp8} {runParamNameLf1} {runParamValue1} {runParamNameLf2} {runParamValue2} {runParamNameLf3} {runParamValue3} {runParamNameLf4} {runParamValue4}'
runCmdLf16 = f'bin/{binName} --fast --numIterations {numIterationsDefault} --threads {numThreadsLfCpp16} {runParamNameLf1} {runParamValue1} {runParamNameLf2} {runParamValue2} {runParamNameLf3} {runParamValue3} {runParamNameLf4} {runParamValue4}'
runCmdAkka = f'{javaExe} -classpath {savinaJarPath} {akkaPkgPath} -iter {numIterationsAkka} -w {preParamValue1} -t {runParamValue1} -g {runParamValue2} -p {runParamValue3}'
experiments['Astar'] = {
'description': f'A-star benchmark from the Savina suite.',
'plotTitle': f'A-star',
'plotYAxisLabel': 'Execution time in ms (median)',
'plotSequenceColors': sequenceColors,
'plotSequenceNames': sequenceNames,
'plotArrangementSequences': arrangementSequences,
'parsers': parsers,
'finalCleanupExperiment': cleanup,
'sequences': [
('1', {
'lf-cpp-1': [ f'{pythonExe} -m cogapp -r {preParamString1} {preParamString2} {os.path.join(lfCppSourceFilePathBase, lfSrcPath)}'.split(),
f'{lfcExe} {os.path.join(lfCppSourceFilePathBase, lfSrcPath)}'.split(),
runCmdLf1.split() ],
'lf-cpp-4': [ runCmdLf4.split() ],
'lf-cpp-8': [ runCmdLf8.split() ],
'lf-cpp-16': [ runCmdLf16.split() ],
'savina-akka-default': [ f'{runCmdAkka}'.split() ]
})
]
}
binName = 'NQueensBenchmarkGenerator'
lfSrcPath = f'nqueenk/{binName}.lf'
akkaPkgPath = f'{savinaPackagePathBase}.nqueenk.NQueensAkkaActorBenchmark'
runParamNameLf1 = f'--size'
runParamValue1 = '12'
runParamNameLf2 = f'--threshold'
runParamValue2 = '4'
runParamNameLf3 = f'--solutionsLimit'
runParamValue3 = '1500000'
runParamNameLf4 = f'--priorities'
runParamValue4 = '10'
preParamNameLf1 = f'numWorkers'
preParamValue1 = '20'
preParamString1 = f'-D {preParamNameLf1}={preParamValue1}' if len(preParamNameLf1) > 0 else ''
preParamNameLf2 = f''
preParamValue2 = ''
preParamString2 = f'-D {preParamNameLf2}={preParamValue2}' if len(preParamNameLf2) > 0 else ''
runCmdLf1 = f'bin/{binName} --fast --numIterations {numIterationsDefault} --threads {numThreadsLfCpp1} {runParamNameLf1} {runParamValue1} {runParamNameLf2} {runParamValue2} {runParamNameLf3} {runParamValue3} {runParamNameLf4} {runParamValue4}'
runCmdLf4 = f'bin/{binName} --fast --numIterations {numIterationsDefault} --threads {numThreadsLfCpp4} {runParamNameLf1} {runParamValue1} {runParamNameLf2} {runParamValue2} {runParamNameLf3} {runParamValue3} {runParamNameLf4} {runParamValue4}'
runCmdLf8 = f'bin/{binName} --fast --numIterations {numIterationsDefault} --threads {numThreadsLfCpp8} {runParamNameLf1} {runParamValue1} {runParamNameLf2} {runParamValue2} {runParamNameLf3} {runParamValue3} {runParamNameLf4} {runParamValue4}'
runCmdLf16 = f'bin/{binName} --fast --numIterations {numIterationsDefault} --threads {numThreadsLfCpp16} {runParamNameLf1} {runParamValue1} {runParamNameLf2} {runParamValue2} {runParamNameLf3} {runParamValue3} {runParamNameLf4} {runParamValue4}'
runCmdAkka = f'{javaExe} -classpath {savinaJarPath} {akkaPkgPath} -iter {numIterationsAkka} -n {runParamValue1} -t {runParamValue2} -w {preParamValue1} -s {runParamValue3} -p {runParamValue4}'
experiments['NQueens'] = {
'description': f'N Queens k Solutions benchmark from the Savina suite.',
'plotTitle': f'N Queens k Solutions',
'plotYAxisLabel': 'Execution time in ms (median)',
'plotSequenceColors': sequenceColors,
'plotSequenceNames': sequenceNames,
'plotArrangementSequences': arrangementSequences,
'parsers': parsers,
'finalCleanupExperiment': cleanup,
'sequences': [
('1', {
'lf-cpp-1': [ f'{pythonExe} -m cogapp -r {preParamString1} {preParamString2} {os.path.join(lfCppSourceFilePathBase, lfSrcPath)}'.split(),
f'{lfcExe} {os.path.join(lfCppSourceFilePathBase, lfSrcPath)}'.split(),
runCmdLf1.split() ],
'lf-cpp-4': [ runCmdLf4.split() ],
'lf-cpp-8': [ runCmdLf8.split() ],
'lf-cpp-16': [ runCmdLf16.split() ],
'savina-akka-default': [ f'{runCmdAkka}'.split() ]
})
]
}
binName = 'MatMulBenchmarkGenerator'
lfSrcPath = f'recmatmul/{binName}.lf'
akkaPkgPath = f'{savinaPackagePathBase}.recmatmul.MatMulAkkaActorBenchmark'
runParamNameLf1 = f'--dataLength'
runParamValue1 = '1024'
runParamNameLf2 = f'--blockThreshold'
runParamValue2 = '16384'
runParamNameLf3 = f'--priorities'
runParamValue3 = '10'
runParamNameLf4 = f''
runParamValue4 = ''
preParamNameLf1 = f'numWorkers'
preParamValue1 = '20'
preParamString1 = f'-D {preParamNameLf1}={preParamValue1}' if len(preParamNameLf1) > 0 else ''
preParamNameLf2 = f''
preParamValue2 = ''
preParamString2 = f'-D {preParamNameLf2}={preParamValue2}' if len(preParamNameLf2) > 0 else ''
runCmdLf1 = f'bin/{binName} --fast --numIterations {numIterationsDefault} --threads {numThreadsLfCpp1} {runParamNameLf1} {runParamValue1} {runParamNameLf2} {runParamValue2} {runParamNameLf3} {runParamValue3} {runParamNameLf4} {runParamValue4}'
runCmdLf4 = f'bin/{binName} --fast --numIterations {numIterationsDefault} --threads {numThreadsLfCpp4} {runParamNameLf1} {runParamValue1} {runParamNameLf2} {runParamValue2} {runParamNameLf3} {runParamValue3} {runParamNameLf4} {runParamValue4}'
runCmdLf8 = f'bin/{binName} --fast --numIterations {numIterationsDefault} --threads {numThreadsLfCpp8} {runParamNameLf1} {runParamValue1} {runParamNameLf2} {runParamValue2} {runParamNameLf3} {runParamValue3} {runParamNameLf4} {runParamValue4}'
runCmdLf16 = f'bin/{binName} --fast --numIterations {numIterationsDefault} --threads {numThreadsLfCpp16} {runParamNameLf1} {runParamValue1} {runParamNameLf2} {runParamValue2} {runParamNameLf3} {runParamValue3} {runParamNameLf4} {runParamValue4}'
runCmdAkka = f'{javaExe} -classpath {savinaJarPath} {akkaPkgPath} -iter {numIterationsAkka} -n {runParamValue1} -t {runParamValue2} -w {preParamValue1} -p {runParamValue3}'
experiments['MatrixMultiplication'] = {
'description': f'Recursive Matrix Multiplication benchmark from the Savina suite.',
'plotTitle': f'Recursive Matrix Multiplication',
'plotYAxisLabel': 'Execution time in ms (median)',
'plotSequenceColors': sequenceColors,
'plotSequenceNames': sequenceNames,
'plotArrangementSequences': arrangementSequences,
'parsers': parsers,
'finalCleanupExperiment': cleanup,
'sequences': [
('1', {
'lf-cpp-1': [ f'{pythonExe} -m cogapp -r {preParamString1} {preParamString2} {os.path.join(lfCppSourceFilePathBase, lfSrcPath)}'.split(),
f'{lfcExe} {os.path.join(lfCppSourceFilePathBase, lfSrcPath)}'.split(),
runCmdLf1.split() ],
'lf-cpp-4': [ runCmdLf4.split() ],
'lf-cpp-8': [ runCmdLf8.split() ],
'lf-cpp-16': [ runCmdLf16.split() ],
'savina-akka-default': [ f'{runCmdAkka}'.split() ]
})
]
}
binName = 'RadixSortBenchmark'
lfSrcPath = f'radixsort/{binName}.lf'
akkaPkgPath = f'{savinaPackagePathBase}.radixsort.RadixSortAkkaActorBenchmark'
runParamNameLf1 = f'--dataSize'
runParamValue1 = '100000'
runParamNameLf2 = f'--maxValue'
runParamValue2 = '1152921504606846976'
runParamNameLf3 = f'--seed'
runParamValue3 = '2048'
runParamNameLf4 = f''
runParamValue4 = ''
preParamNameLf1 = f''
preParamValue1 = ''
preParamString1 = f'-D {preParamNameLf1}={preParamValue1}' if len(preParamNameLf1) > 0 else ''
preParamNameLf2 = f''
preParamValue2 = ''
preParamString2 = f'-D {preParamNameLf2}={preParamValue2}' if len(preParamNameLf2) > 0 else ''
runCmdLf1 = f'bin/{binName} --fast --numIterations {numIterationsDefault} --threads {numThreadsLfCpp1} {runParamNameLf1} {runParamValue1} {runParamNameLf2} {runParamValue2} {runParamNameLf3} {runParamValue3} {runParamNameLf4} {runParamValue4}'
runCmdLf4 = f'bin/{binName} --fast --numIterations {numIterationsDefault} --threads {numThreadsLfCpp4} {runParamNameLf1} {runParamValue1} {runParamNameLf2} {runParamValue2} {runParamNameLf3} {runParamValue3} {runParamNameLf4} {runParamValue4}'
runCmdLf8 = f'bin/{binName} --fast --numIterations {numIterationsDefault} --threads {numThreadsLfCpp8} {runParamNameLf1} {runParamValue1} {runParamNameLf2} {runParamValue2} {runParamNameLf3} {runParamValue3} {runParamNameLf4} {runParamValue4}'
runCmdLf16 = f'bin/{binName} --fast --numIterations {numIterationsDefault} --threads {numThreadsLfCpp16} {runParamNameLf1} {runParamValue1} {runParamNameLf2} {runParamValue2} {runParamNameLf3} {runParamValue3} {runParamNameLf4} {runParamValue4}'
runCmdAkka = f'{javaExe} -classpath {savinaJarPath} {akkaPkgPath} -iter {numIterationsAkka} -n {runParamValue1} -m {runParamValue2} -s {runParamValue3}'
experiments['RadixSort'] = {
'description': f'Radix Sort benchmark from the Savina suite.',
'plotTitle': f'Radix Sort',
'plotYAxisLabel': 'Execution time in ms (median)',
'plotSequenceColors': sequenceColors,
'plotSequenceNames': sequenceNames,
'plotArrangementSequences': arrangementSequences,
'parsers': parsers,
'finalCleanupExperiment': cleanup,
'sequences': [
('1', {
'lf-cpp-1': [ f'{lfcExe} {os.path.join(lfCppSourceFilePathBase, lfSrcPath)}'.split(),
runCmdLf1.split() ],
'lf-cpp-4': [ runCmdLf4.split() ],
'lf-cpp-8': [ runCmdLf8.split() ],
'lf-cpp-16': [ runCmdLf16.split() ],
'savina-akka-default': [ f'{runCmdAkka}'.split() ]
})
]
}
binName = 'FilterBankBenchmarkGenerator'
lfSrcPath = f'filterbank/{binName}.lf'
akkaPkgPath = f'{savinaPackagePathBase}.filterbank.FilterBankAkkaActorBenchmark'
runParamNameLf1 = f'--numSimulations'
runParamValue1 = '34816'
runParamNameLf2 = f'--numColumns'
runParamValue2 = '16384'
runParamNameLf3 = f''
runParamValue3 = ''
runParamNameLf4 = f''
runParamValue4 = ''
preParamNameLf1 = f'numChannels'
preParamValue1 = '8'
preParamString1 = f'-D {preParamNameLf1}={preParamValue1}' if len(preParamNameLf1) > 0 else ''
preParamNameLf2 = f''
preParamValue2 = ''
preParamString2 = f'-D {preParamNameLf2}={preParamValue2}' if len(preParamNameLf2) > 0 else ''
runCmdLf1 = f'bin/{binName} --fast --numIterations {numIterationsDefault} --threads {numThreadsLfCpp1} {runParamNameLf1} {runParamValue1} {runParamNameLf2} {runParamValue2} {runParamNameLf3} {runParamValue3} {runParamNameLf4} {runParamValue4}'
runCmdLf4 = f'bin/{binName} --fast --numIterations {numIterationsDefault} --threads {numThreadsLfCpp4} {runParamNameLf1} {runParamValue1} {runParamNameLf2} {runParamValue2} {runParamNameLf3} {runParamValue3} {runParamNameLf4} {runParamValue4}'
runCmdLf8 = f'bin/{binName} --fast --numIterations {numIterationsDefault} --threads {numThreadsLfCpp8} {runParamNameLf1} {runParamValue1} {runParamNameLf2} {runParamValue2} {runParamNameLf3} {runParamValue3} {runParamNameLf4} {runParamValue4}'
runCmdLf16 = f'bin/{binName} | |
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "UKM_hadcrut.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def ViBlGrWhYeOrRe(self):
cname = "ViBlGrWhYeOrRe"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "ViBlGrWhYeOrRe.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def ViBlGrWhYeOrRe_r(self):
cname = "ViBlGrWhYeOrRe_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "ViBlGrWhYeOrRe.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def WhBlGrYeRe(self):
cname = "WhBlGrYeRe"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "WhBlGrYeRe.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def WhBlGrYeRe_r(self):
cname = "WhBlGrYeRe_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "WhBlGrYeRe.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def WhBlReWh(self):
cname = "WhBlReWh"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "WhBlReWh.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def WhBlReWh_r(self):
cname = "WhBlReWh_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "WhBlReWh.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def WhViBlGrYeOrRe(self):
cname = "WhViBlGrYeOrRe"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "WhViBlGrYeOrRe.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def WhViBlGrYeOrRe_r(self):
cname = "WhViBlGrYeOrRe_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "WhViBlGrYeOrRe.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def WhViBlGrYeOrReWh(self):
cname = "WhViBlGrYeOrReWh"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "WhViBlGrYeOrReWh.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def WhViBlGrYeOrReWh_r(self):
cname = "WhViBlGrYeOrReWh_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "WhViBlGrYeOrReWh.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def WhiteBlue(self):
cname = "WhiteBlue"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "WhiteBlue.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def WhiteBlue_r(self):
cname = "WhiteBlue_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "WhiteBlue.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def WhiteBlueGreenYellowRed(self):
cname = "WhiteBlueGreenYellowRed"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "WhiteBlueGreenYellowRed.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def WhiteBlueGreenYellowRed_r(self):
cname = "WhiteBlueGreenYellowRed_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "WhiteBlueGreenYellowRed.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def WhiteGreen(self):
cname = "WhiteGreen"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "WhiteGreen.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def WhiteGreen_r(self):
cname = "WhiteGreen_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "WhiteGreen.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def WhiteYellowOrangeRed(self):
cname = "WhiteYellowOrangeRed"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "WhiteYellowOrangeRed.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def WhiteYellowOrangeRed_r(self):
cname = "WhiteYellowOrangeRed_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "WhiteYellowOrangeRed.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def amwg(self):
cname = "amwg"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "amwg.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def amwg_r(self):
cname = "amwg_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "amwg.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def amwg256(self):
cname = "amwg256"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "amwg256.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def amwg256_r(self):
cname = "amwg256_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "amwg256.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def amwg_blueyellowred(self):
cname = "amwg_blueyellowred"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "amwg_blueyellowred.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def amwg_blueyellowred_r(self):
cname = "amwg_blueyellowred_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "amwg_blueyellowred.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cb_9step(self):
cname = "cb_9step"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "cb_9step.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cb_9step_r(self):
cname = "cb_9step_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "cb_9step.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cb_rainbow(self):
cname = "cb_rainbow"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "cb_rainbow.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cb_rainbow_r(self):
cname = "cb_rainbow_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "cb_rainbow.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cb_rainbow_inv(self):
cname = "cb_rainbow_inv"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "cb_rainbow_inv.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cb_rainbow_inv_r(self):
cname = "cb_rainbow_inv_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "cb_rainbow_inv.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def circular_0(self):
cname = "circular_0"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "circular_0.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def circular_0_r(self):
cname = "circular_0_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "circular_0.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def circular_1(self):
cname = "circular_1"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "circular_1.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def circular_1_r(self):
cname = "circular_1_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "circular_1.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def circular_2(self):
cname = "circular_2"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "circular_2.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def circular_2_r(self):
cname = "circular_2_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "circular_2.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cividis(self):
cname = "cividis"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "cividis.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cividis_r(self):
cname = "cividis_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "cividis.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cmp_b2r(self):
cname = "cmp_b2r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "cmp_b2r.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cmp_b2r_r(self):
cname = "cmp_b2r_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "cmp_b2r.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cmp_flux(self):
cname = "cmp_flux"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "cmp_flux.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cmp_flux_r(self):
cname = "cmp_flux_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "cmp_flux.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cmp_haxby(self):
cname = "cmp_haxby"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "cmp_haxby.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cmp_haxby_r(self):
cname = "cmp_haxby_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "cmp_haxby.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cosam(self):
cname = "cosam"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "cosam.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cosam_r(self):
cname = "cosam_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "cosam.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cosam12(self):
cname = "cosam12"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "cosam12.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cosam12_r(self):
cname = "cosam12_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "cosam12.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cyclic(self):
cname = "cyclic"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "cyclic.rgb")
cmap = Colormap(self._coltbl(cmap_file), name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def cyclic_r(self):
cname = "cyclic_r"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "cyclic.rgb")
cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)
matplotlib.cm.register_cmap(name=cname, cmap=cmap)
return cmap
@property
def default(self):
cname = "default"
if cname in matplotlib.cm._cmap_registry:
return matplotlib.cm.get_cmap(cname)
cmap_file = os.path.join(CMAPSFILE_DIR, "ncar_ncl", "default.rgb")
| |
<filename>dali/python/nvidia/dali/plugin/pytorch.py
# Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali.backend import TensorGPU, TensorListGPU
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
from nvidia.dali import types
from nvidia.dali.plugin.base_iterator import _DaliBaseIterator
from nvidia.dali.plugin.base_iterator import LastBatchPolicy
import torch
import torch.utils.dlpack as torch_dlpack
import ctypes
import math
import numpy as np
to_torch_type = {
np.dtype(np.float32) : torch.float32,
np.dtype(np.float64) : torch.float64,
np.dtype(np.float16) : torch.float16,
np.dtype(np.uint8) : torch.uint8,
np.dtype(np.int8) : torch.int8,
np.dtype(np.int16) : torch.int16,
np.dtype(np.int32) : torch.int32,
np.dtype(np.int64) : torch.int64
}
def feed_ndarray(dali_tensor, arr, cuda_stream = None):
"""
Copy contents of DALI tensor to PyTorch's Tensor.
Parameters
----------
`dali_tensor` : nvidia.dali.backend.TensorCPU or nvidia.dali.backend.TensorGPU
Tensor from which to copy
`arr` : torch.Tensor
Destination of the copy
`cuda_stream` : torch.cuda.Stream, cudaStream_t or any value that can be cast to cudaStream_t.
CUDA stream to be used for the copy
(if not provided, an internal user stream will be selected)
In most cases, using pytorch's current stream is expected (for example,
if we are copying to a tensor allocated with torch.zeros(...))
"""
assert dali_tensor.shape() == list(arr.size()), \
("Shapes do not match: DALI tensor has size {0}"
", but PyTorch Tensor has size {1}".format(dali_tensor.shape(), list(arr.size())))
cuda_stream = types._raw_cuda_stream(cuda_stream)
# turn raw int to a c void pointer
c_type_pointer = ctypes.c_void_p(arr.data_ptr())
if isinstance(dali_tensor, (TensorGPU, TensorListGPU)):
dali_tensor.copy_to_external(c_type_pointer, None if cuda_stream is None else ctypes.c_void_p(cuda_stream))
else:
dali_tensor.copy_to_external(c_type_pointer)
return arr
class DALIGenericIterator(_DaliBaseIterator):
"""
General DALI iterator for PyTorch. It can return any number of
outputs from the DALI pipeline in the form of PyTorch's Tensors.
Please keep in mind that Tensors returned by the iterator are
still owned by DALI. They are valid till the next iterator call.
If the content needs to be preserved please copy it to another tensor.
Parameters
----------
pipelines : list of nvidia.dali.Pipeline
List of pipelines to use
output_map : list of str
List of strings which maps consecutive outputs
of DALI pipelines to user specified name.
Outputs will be returned from iterator as dictionary
of those names.
Each name should be distinct
size : int, default = -1
Number of samples in the shard for the wrapped pipeline (if there is more than one it is a sum)
Providing -1 means that the iterator will work until StopIteration is raised
from the inside of iter_setup(). The options `last_batch_policy`, `last_batch_padded` and
`auto_reset` don't work in such case. It works with only one pipeline inside
the iterator.
Mutually exclusive with `reader_name` argument
reader_name : str, default = None
Name of the reader which will be queried to the shard size, number of shards and
all other properties necessary to count properly the number of relevant and padded
samples that iterator needs to deal with. It automatically sets `last_batch_policy` to
PARTIAL when the FILL is used, and `last_batch_padded` accordingly to match
the reader's configuration
auto_reset : bool, optional, default = False
Whether the iterator resets itself for the next epoch
or it requires reset() to be called separately.
dynamic_shape: bool, optional, default = False
Whether the shape of the output of the DALI pipeline can
change during execution. If True, the pytorch tensor will be resized accordingly
if the shape of DALI returned tensors changes during execution.
If False, the iterator will fail in case of change.
fill_last_batch : bool, optional, default = None
**Deprecated** Please use ``last_batch_policy`` instead
Whether to fill the last batch with data up to 'self.batch_size'.
The iterator would return the first integer multiple
of self._num_gpus * self.batch_size entries which exceeds 'size'.
Setting this flag to False will cause the iterator to return
exactly 'size' entries.
last_batch_policy : default = FILL
What to do with the last batch when there is no enough samples in the epoch
to fully fill it. See :meth:`nvidia.dali.plugin.base_iterator.LastBatchPolicy`
last_batch_padded : bool, optional, default = False
Whether the last batch provided by DALI is padded with the last sample
or it just wraps up. In the conjunction with ``last_batch_policy`` it tells
if the iterator returning last batch with data only partially filled with
data from the current epoch is dropping padding samples or samples from
the next epoch (it doesn't literally drop but sets ``pad`` field of ndarray
so the following code could use it to drop the data). If set to ``False`` next
epoch will end sooner as data from it was consumed but dropped. If set to
True next epoch would be the same length as the first one. For this to happen,
the option `pad_last_batch` in the reader needs to be set to True as well.
It is overwritten when `reader_name` argument is provided
prepare_first_batch : bool, optional, default = True
Whether DALI should buffer the first batch right after the creation of the iterator,
so one batch is already prepared when the iterator is prompted for the data
Example
-------
With the data set ``[1,2,3,4,5,6,7]`` and the batch size 2:
last_batch_policy = PARTIAL, last_batch_padded = True -> last batch = ``[7]``, next iteration will return ``[1, 2]``
last_batch_policy = PARTIAL, last_batch_padded = False -> last batch = ``[7]``, next iteration will return ``[2, 3]``
last_batch_policy = FILL, last_batch_padded = True -> last batch = ``[7, 7]``, next iteration will return ``[1, 2]``
last_batch_policy = FILL, last_batch_padded = False -> last batch = ``[7, 1]``, next iteration will return ``[2, 3]``
last_batch_policy = DROP, last_batch_padded = True -> last batch = ``[5, 6]``, next iteration will return ``[1, 2]``
last_batch_policy = DROP, last_batch_padded = False -> last batch = ``[5, 6]``, next iteration will return ``[2, 3]``
"""
def __init__(self,
pipelines,
output_map,
size=-1,
reader_name=None,
auto_reset=False,
fill_last_batch=None,
dynamic_shape=False,
last_batch_padded=False,
last_batch_policy=LastBatchPolicy.FILL,
prepare_first_batch=True):
# check the assert first as _DaliBaseIterator would run the prefetch
assert len(set(output_map)) == len(output_map), "output_map names should be distinct"
self._output_categories = set(output_map)
self.output_map = output_map
_DaliBaseIterator.__init__(self,
pipelines,
size,
reader_name,
auto_reset,
fill_last_batch,
last_batch_padded,
last_batch_policy,
prepare_first_batch=prepare_first_batch)
self._dynamic_shape = dynamic_shape
# Use double-buffering of data batches
self._data_batches = [None for i in range(self._num_gpus)]
self._first_batch = None
if self._prepare_first_batch:
try:
self._first_batch = DALIGenericIterator.__next__(self)
except StopIteration:
assert False, "It seems that there is no data in the pipeline. This may happen if `last_batch_policy` is set to PARTIAL and the requested batch size is greater than the shard size."
def __next__(self):
if self._first_batch is not None:
batch = self._first_batch
self._first_batch = None
return batch
# Gather outputs
outputs = self._get_outputs()
for i in range(self._num_gpus):
dev_id = self._pipes[i].device_id
# initialize dict for all output categories
category_outputs = dict()
# segregate outputs into categories
for j, out in enumerate(outputs[i]):
category_outputs[self.output_map[j]] = out
# Change DALI TensorLists into Tensors
category_tensors = dict()
category_shapes = dict()
for category, out in category_outputs.items():
category_tensors[category] = out.as_tensor()
category_shapes[category] = category_tensors[category].shape()
# If we did not yet allocate memory for that batch, do it now
if self._data_batches[i] is None:
category_torch_type = dict()
category_device = dict()
torch_gpu_device = None
torch_cpu_device = torch.device('cpu')
# check category and device
for category in self._output_categories:
category_torch_type[category] = to_torch_type[np.dtype(category_tensors[category].dtype())]
if type(category_tensors[category]) is TensorGPU:
if not torch_gpu_device:
torch_gpu_device = torch.device('cuda', dev_id)
category_device[category] = torch_gpu_device
else:
category_device[category] = torch_cpu_device
pyt_tensors = dict()
for category in self._output_categories:
pyt_tensors[category] = torch.empty(category_shapes[category],
dtype=category_torch_type[category],
device=category_device[category])
self._data_batches[i] = pyt_tensors
else:
pyt_tensors = self._data_batches[i]
# Copy data from DALI Tensors to torch tensors
for category, tensor in category_tensors.items():
if self._dynamic_shape and tensor.shape() != list(pyt_tensors[category].size()):
pyt_tensors[category] = torch.empty(category_shapes[category],
dtype=pyt_tensors[category].dtype,
device=pyt_tensors[category].device)
if isinstance(tensor, (TensorGPU, TensorListGPU)):
# Using same cuda_stream used by torch.zeros to set the memory
stream = torch.cuda.current_stream(device=pyt_tensors[category].device)
feed_ndarray(tensor, pyt_tensors[category], cuda_stream=stream)
else:
feed_ndarray(tensor, pyt_tensors[category])
self._schedule_runs()
self._advance_and_check_drop_last()
if self._reader_name:
if_drop, left = self._remove_padded()
if np.any(if_drop):
output | |
"""
Basic greyscale analysis of aligned frames and original (non-aligned)
frame stacks.
Contains:
- analyze_frame(): function that displays the analysis results
- class FrameSeries: getting and analysing aligned and original frames
# Author: <NAME> (Max Planck Institute for Biochemistry)
# $Id$
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from builtins import str
from builtins import range
from builtins import object
#from past.utils import old_div
__version__ = "$Revision$"
import re
import os
import subprocess
import logging
import warnings
import numpy as np
import matplotlib.pyplot as plt
import pyto
from .serial_em import SerialEM
from pyto.grey.image import Image
def analyze_frames(
serialem_dir, serialem_projection_dir=None, align_program=None,
orig_frames_dir=None, aligned_frames_dir=None, aligned_prefix=None,
ucsf_translations_path=None, ucsf_serial=None,
apixel=None, counte=None, gain_corr_acquisition=True,
print_stats=True, print_summary_stats=True, plot_frames=True,
plot_projections=True, bins=list(range(200)), names_only=False):
"""
Calculates basic pixel value stats for all aligned frames (projection)
belonging to a tomo series, as well as for non-aligned (original)
frames. Also prints total tomo series stats.
The behavior depends on the arguments specified:
At least arg serialem_dir has to be specified. In this case counts per
pixel are calculated for the projections of the SerialEM generated stack.
If arg counte is specified, counts are converted to electrons.
If arg aligned_frames_dir is specified and arg frames_dir is None,
counts per pixel are calculated for aligned frames (requires arg
align_prefix). Furthermore, if arg counte is specified,
counts are converted to electrons. Aligned frames files have to have the
same name as the corresponding frame stacks prefixed with arg
aligned_prefix, except that the extension can be the same or mrc (useful
in case frame stacks are saved as tifs and aligned frames as mrcs.
If gain correction was done during acquision by SerialEM, arg
gain_corr_acquisition should be True. If the frames are available
(orig_frames_dir is not None, currently possible for mrc but not
tiff frames) arg counte should be None. In that case counts per pixel
(self.counte) is determined from frames and self.serialem.counte
is set to the same value. If frames are not available, arg
counte should be set (19.2 for MPI Titan2-K2).
If gain correction was not done during acquision by SerialEM, arg
gain_corr_acquisition should be False. In that case, self.counte
will be set to 1. If arg counte is specified (preferred), that value
will be used to set self.serialem.counte. Otherwise the default
value (19.2) will be used.
If arg align_program is specified, frame alignment shifts (translations)
are shown.
If arg serialem_projection_dir is not None, projections from serialem
stack are written as separate files in mrc format.
Intended for use in an environment such as Jupyter notebook.
Uses FrameSeries.projections() to get images and calculate stats. Check
that method for the required file and directory name conventions.
Arguments:
- serialem_dir: directory where serial em mdoc and stacks are
- serialem_projection_dir: directory where individual projections
from serialem are writen, on None if these should not be writen
- orig_frames_dir: directory where original (unaligned) frames are
- aligned_frames_dir: directory where aligned frames are located
- align_program: alignment program used ('dm', 'dimi' or 'ucsf')
- aligned_prefix: prefix added to (unaligned) frames to make aligned
frames file names
- ucsf_translations_path: file path for the ucsf alignment log file
(sent to stdout by ucsf alignment program)
- ucsf_serial: specifies wheteher ucsf alignment (motioncorr
was run in the serial mode (True), in the individual mode
(False), or the mode is determined from the log file by
split_ucsf_translations() method (None).
- apixel: pixel size in A
- counte: number of counts per electron; None to use the value present
in frame stack mrc files, otherwise should be set to the factor
SerialEM uses to multiply pixel values
- gain_corr_acquisition: flag indicating whether gain correction was
done during tomo acquisition
- print_stats: flag indicating whether stats for individual projections
should be printed
- print_summary_stats: flag indicating whether total series stats
should be printed
- bins: bind for counts histogram plots
- plot_frames: flag indicationg whether counts histograms for individual
frames should be plotted
- plot_projections: flag indicating whether counts histograms for
projections (aligned frames and serialem projections)
- names_only: if True only file names are shown, stats are not calculated
not plotted, for testing
"""
# initialize and make aligned frames (projections) iterator
fs = FrameSeries(
serialem_dir=serialem_dir, orig_frames_dir=orig_frames_dir,
serialem_projection_dir=serialem_projection_dir,
aligned_frames_dir=aligned_frames_dir, align=align_program,
aligned_prefix=aligned_prefix,
ucsf_translations_path=ucsf_translations_path,
ucsf_serial=ucsf_serial, counte=counte,
gain_corr_acquisition=gain_corr_acquisition)
projection_iter = fs.projections(
serialem_stack=True, translations=True, names_only=names_only,
projections=None)
# print table header
if (print_stats or print_summary_stats) and not names_only:
print()
#print(' mean std min max mean mean ')
#print(' c/pix c/pix c/pix c/pix e/A^2 e/(pix s)')
print(
' mean std min max mean ')
print(
' c/pix c/pix c/pix c/pix e/A^2')
# loop over aligned frames (projections)
for proj in projection_iter:
# print tilt angle
if print_stats:
print("Tilt angle: %6.2f" % proj['tilt_angle'])
# get images
original = proj.get('original', None)
original_flat = proj.get('original_flat', None)
aligned = proj.get('aligned', None)
translations = proj.get('translations', None)
serialem = proj.get('serialem', None)
# write serialem projections
if not names_only:
if serialem_projection_dir is not None:
try:
os.makedirs(serialem_projection_dir)
except OSError: pass
serialem.write(
file=proj['serialem_projection'], pixel=fs.apixel)
# plot, print data
if not names_only:
# plot count histogram count for frames of this projection
if plot_frames and (original is not None):
plt.figure()
plt.hist(original.data.flatten(), bins=bins, label='frames')
plt.yscale('log')
plt.xlabel('SerialEM counts')
plt.ylabel('N pixels')
plt.legend()
plt.figure()
# plot count histograms for this projection
if fs.counte == fs.serialem.counte:
serialem_adjust = 1
else:
serialem_adjust = fs.counte / fs.serialem.counte
if plot_projections and (original_flat is not None):
plt.hist(
original_flat.data.flatten() / serialem_adjust,
bins=bins, label='flat frames')
if plot_projections and (aligned is not None):
plt.hist(
aligned.data.flatten() / serialem_adjust,
bins=bins, label='aligned frames')
if plot_projections and (serialem is not None):
plt.hist(
serialem.data.flatten(), bins=bins,
label='serialem')
if plot_frames or plot_projections:
#plt.axis([plt.axis()[0], plt.axis()[1], 0, 10**7])
plt.xlabel('SerialEM counts')
plt.ylabel('N pixels')
plt.legend()
plt.show()
# check if counts per electron known
#if (original_flat is not None) or (counte is not None):
# counte_known = True
#else:
# counte_known = False
counte_known = True
# print basic greyscale stats
if print_stats and (original is not None):
print(
'Frames %s: %5.1f %5.1f %5.1f %6.1f %5.3f %5.2f ' %
(proj['original_name'],
original.mean, originl.std, original_min, original_max,
original.mean_ea, original.mean/proj['exposure_time']))
if print_stats and (original_flat is not None):
print(
'Flat frames %s: %5.1f %5.1f %5.1f %6.1f %5.3f' %
(proj['original_name'], original_flat.mean,
original_flat.std, original_flat.min, original_flat_max,
original_flat.mean_ea))
if print_stats and (aligned is not None):
if counte_known:
print('Aligned %s: %5.1f %5.1f %5.1f %6.1f %5.3f' %
(proj['aligned_name'], aligned.mean,
aligned.std, aligned.min, aligned.max,
aligned.mean_ea))
else:
print(
'Aligned %s: %5.1f %5.1f %5.1f %6.1f' %
(proj['aligned_name'], aligned.mean, aligned.std,
aligned.min, aligned.max))
if print_stats and (serialem is not None):
if counte_known:
print(
'SerialEM %s: %5.1f %5.1f %5.1f %6.1f %5.3f' %
(proj['serialem_name'], serialem.mean,
serialem.std, serialem.min,
serialem.max, serialem.mean_ea))
else:
print(
'SerialEM %s: %5.1f %5.1f %5.1f %6.1f' %
(proj['serialem_name'], serialem.mean, serialem.std,
serialem.min, serialem.max))
# print translations
if translations is not None:
print("Frame translations: ")
for trans in translations:
print("\t %7.3f %7.3f" % (trans[0], trans[1]))
if print_stats: print()
else:
# only print file names
if print_stats and (original_flat is not None):
print('\t Frames %s:' % proj['original_name'])
if print_stats and (aligned_frames_dir is not None):
print('\t Aligned %s:' % proj['aligned_name'])
if print_stats: print(
'\t SerialEM %s:' % proj['serialem_name'])
# print summary stats
if print_summary_stats and not names_only:
if original_flat is not None:
print(
'Total original: %6.1f c/pix = %6.2f e/pix = %6.2f e/A^2' %
(fs.original_total, fs.original_total / fs.counte,
fs.original_total_ea))
if aligned_frames_dir is not None:
if counte_known:
print(
'Total aligned: %6.1f c/pix = %6.2f e/pix = %6.2f e/A^2' %
(fs.aligned_total, fs.aligned_total / fs.counte,
fs.aligned_total_ea))
else:
print('Total aligned: %6.1f c/pix ' % (fs.aligned_total, ))
if serialem is not None:
if counte_known:
print(
'Total serialem: %6.1f c/pix = %6.2f e/pix = %6.2f e/A^2' %
(fs.serialem_total, fs.serialem_total / fs.serialem.counte,
fs.serialem_total_ea))
else:
print('Total serialem: %6.1f c/pix ' % (fs.serialem_total, ))
print()
if counte_known:
| |
(optional) The content of the document to ingest. The
maximum supported file size when adding a file to a collection is 50
megabytes, the maximum supported file size when testing a configuration is
1 megabyte. Files larger than the supported size are rejected.
:param str filename: (optional) The filename for file.
:param str file_content_type: (optional) The content type of file.
:param str metadata: (optional) The maximum supported metadata file size is
1 MB. Metadata parts larger than 1 MB are rejected. Example: ``` {
"Creator": "<NAME>",
"Subject": "Apples"
} ```.
:param bool x_watson_discovery_force: (optional) When `true`, the uploaded
document is added to the collection even if the data for that collection is
shared with other collections.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if project_id is None:
raise ValueError('project_id must be provided')
if collection_id is None:
raise ValueError('collection_id must be provided')
if document_id is None:
raise ValueError('document_id must be provided')
headers = {'X-Watson-Discovery-Force': x_watson_discovery_force}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V2',
operation_id='update_document')
headers.update(sdk_headers)
params = {'version': self.version}
form_data = []
if file:
if not filename and hasattr(file, 'name'):
filename = basename(file.name)
if not filename:
raise ValueError('filename must be provided')
form_data.append(('file', (filename, file, file_content_type or
'application/octet-stream')))
if metadata:
metadata = str(metadata)
form_data.append(('metadata', (None, metadata, 'text/plain')))
url = '/v2/projects/{0}/collections/{1}/documents/{2}'.format(
*self._encode_path_vars(project_id, collection_id, document_id))
request = self.prepare_request(method='POST',
url=url,
headers=headers,
params=params,
files=form_data)
response = self.send(request)
return response
def delete_document(self,
project_id: str,
collection_id: str,
document_id: str,
*,
x_watson_discovery_force: bool = None,
**kwargs) -> 'DetailedResponse':
"""
Delete a document.
If the given document ID is invalid, or if the document is not found, then the a
success response is returned (HTTP status code `200`) with the status set to
'deleted'.
**Note:** This operation only works on collections created to accept direct file
uploads. It cannot be used to modify a collection that connects to an external
source such as Microsoft SharePoint.
**Note:** Segments of an uploaded document cannot be deleted individually. Delete
all segments by deleting using the `parent_document_id` of a segment result.
:param str project_id: The ID of the project. This information can be found
from the deploy page of the Discovery administrative tooling.
:param str collection_id: The ID of the collection.
:param str document_id: The ID of the document.
:param bool x_watson_discovery_force: (optional) When `true`, the uploaded
document is added to the collection even if the data for that collection is
shared with other collections.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if project_id is None:
raise ValueError('project_id must be provided')
if collection_id is None:
raise ValueError('collection_id must be provided')
if document_id is None:
raise ValueError('document_id must be provided')
headers = {'X-Watson-Discovery-Force': x_watson_discovery_force}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V2',
operation_id='delete_document')
headers.update(sdk_headers)
params = {'version': self.version}
url = '/v2/projects/{0}/collections/{1}/documents/{2}'.format(
*self._encode_path_vars(project_id, collection_id, document_id))
request = self.prepare_request(method='DELETE',
url=url,
headers=headers,
params=params)
response = self.send(request)
return response
#########################
# Training data
#########################
def list_training_queries(self, project_id: str,
**kwargs) -> 'DetailedResponse':
"""
List training queries.
List the training queries for the specified project.
:param str project_id: The ID of the project. This information can be found
from the deploy page of the Discovery administrative tooling.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if project_id is None:
raise ValueError('project_id must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V2',
operation_id='list_training_queries')
headers.update(sdk_headers)
params = {'version': self.version}
url = '/v2/projects/{0}/training_data/queries'.format(
*self._encode_path_vars(project_id))
request = self.prepare_request(method='GET',
url=url,
headers=headers,
params=params)
response = self.send(request)
return response
def delete_training_queries(self, project_id: str,
**kwargs) -> 'DetailedResponse':
"""
Delete training queries.
Removes all training queries for the specified project.
:param str project_id: The ID of the project. This information can be found
from the deploy page of the Discovery administrative tooling.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if project_id is None:
raise ValueError('project_id must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V2',
operation_id='delete_training_queries')
headers.update(sdk_headers)
params = {'version': self.version}
url = '/v2/projects/{0}/training_data/queries'.format(
*self._encode_path_vars(project_id))
request = self.prepare_request(method='DELETE',
url=url,
headers=headers,
params=params)
response = self.send(request)
return response
def create_training_query(self,
project_id: str,
natural_language_query: str,
examples: List['TrainingExample'],
*,
filter: str = None,
**kwargs) -> 'DetailedResponse':
"""
Create training query.
Add a query to the training data for this project. The query can contain a filter
and natural language query.
:param str project_id: The ID of the project. This information can be found
from the deploy page of the Discovery administrative tooling.
:param str natural_language_query: The natural text query for the training
query.
:param List[TrainingExample] examples: Array of training examples.
:param str filter: (optional) The filter used on the collection before the
**natural_language_query** is applied.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if project_id is None:
raise ValueError('project_id must be provided')
if natural_language_query is None:
raise ValueError('natural_language_query must be provided')
if examples is None:
raise ValueError('examples must be provided')
examples = [self._convert_model(x) for x in examples]
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V2',
operation_id='create_training_query')
headers.update(sdk_headers)
params = {'version': self.version}
data = {
'natural_language_query': natural_language_query,
'examples': examples,
'filter': filter
}
url = '/v2/projects/{0}/training_data/queries'.format(
*self._encode_path_vars(project_id))
request = self.prepare_request(method='POST',
url=url,
headers=headers,
params=params,
data=data)
response = self.send(request)
return response
def get_training_query(self, project_id: str, query_id: str,
**kwargs) -> 'DetailedResponse':
"""
Get a training data query.
Get details for a specific training data query, including the query string and all
examples.
:param str project_id: The ID of the project. This information can be found
from the deploy page of the Discovery administrative tooling.
:param str query_id: The ID of the query used for training.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if project_id is None:
raise ValueError('project_id must be provided')
if query_id is None:
raise ValueError('query_id must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V2',
operation_id='get_training_query')
headers.update(sdk_headers)
params = {'version': self.version}
url = '/v2/projects/{0}/training_data/queries/{1}'.format(
*self._encode_path_vars(project_id, query_id))
request = self.prepare_request(method='GET',
url=url,
headers=headers,
params=params)
response = self.send(request)
return response
def update_training_query(self,
project_id: str,
query_id: str,
natural_language_query: str,
examples: List['TrainingExample'],
*,
filter: str = None,
**kwargs) -> 'DetailedResponse':
"""
Update a training query.
Updates an existing training query and it's examples.
:param str project_id: The ID of the project. This information can be found
from the deploy page of the Discovery administrative tooling.
:param str query_id: The ID of the query used for training.
:param str natural_language_query: The natural text query for the training
query.
:param List[TrainingExample] examples: Array of training examples.
:param str filter: (optional) The filter used on the collection before the
**natural_language_query** is applied.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if project_id is None:
raise ValueError('project_id must be provided')
if query_id is None:
raise ValueError('query_id must be provided')
if natural_language_query is None:
raise ValueError('natural_language_query must be provided')
if examples is None:
raise ValueError('examples must be provided')
examples = [self._convert_model(x) for x in examples]
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V2',
operation_id='update_training_query')
headers.update(sdk_headers)
params = {'version': self.version}
data = {
'natural_language_query': natural_language_query,
'examples': examples,
'filter': filter
}
url = '/v2/projects/{0}/training_data/queries/{1}'.format(
*self._encode_path_vars(project_id, query_id))
request = self.prepare_request(method='POST',
url=url,
headers=headers,
params=params,
data=data)
response = self.send(request)
return response
class AddDocumentEnums(object):
class FileContentType(Enum):
"""
The content type of file.
"""
APPLICATION_JSON = 'application/json'
APPLICATION_MSWORD = 'application/msword'
APPLICATION_VND_OPENXMLFORMATS_OFFICEDOCUMENT_WORDPROCESSINGML_DOCUMENT = 'application/vnd.openxmlformats-officedocument.wordprocessingml.document'
APPLICATION_PDF = 'application/pdf'
TEXT_HTML = 'text/html'
APPLICATION_XHTML_XML = | |
<reponame>harrispirie/stmpy<gh_stars>1-10
import matplotlib as mpl
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import ticker
from matplotlib.colors import Colormap
from matplotlib.widgets import AxesWidget
from .misc import format_eng, grab_bg, blit_bg, volumetype
class SpecLinkedCursors:
'''
Two linked cursors in two separate images/axes. Image shapes/extents must be the same.
Created with the help of matplotlib.widget.MultiCursor
'''
epsilon = 10 # tolerance in pixels for enabling cursor dragging
cursor_color = 'gold'
bias_color = 'forestgreen'
cursor_lw = 0.5
title_fontdict = dict(fontsize=12)
def __init__(self, axtopo, axmap, axspec, tcbar, mcbar, fit_didv=None, use_blit=True):
'''
Inputs:
axtopo, axmap, axspec - Axes instances in the figure
tcbar, mcbar - RangeSlider instances, ajustable colorbars for the images
fit_didv - 3d array, to store fit data for spectrum plot overlay
use_blit - bool, whether to use blitting, default is True
'''
# axmap has attributes: volume = map array; index = bias index to show; ch_names = list of channel names(default = [])
self.axtopo = axtopo
self.axmap = axmap
self.axspec = axspec
self.tcbar = tcbar # RangeSlider instance, to update color limits after mouse scroll
self.mcbar = mcbar # RangeSlider instance, to update color limits after mouse scroll
self.fig = axtopo.figure
self.canvas = self.fig.canvas
self.im1 = axtopo.get_images()[-1] # image on top
self.im2 = axmap.get_images()[-1]
self.spec = axspec.get_lines()[0] # bottom plot is spectrum plot
self.en = self.spec.get_data()[0]
self.en_ind = axmap.index # for bias scroll
self.z_ind = axtopo.index # for topo channel scroll
extent = self.im1.get_extent() # set by scan info
shape = self.im1.get_array().shape
if not np.allclose(extent, self.im2.get_extent()):
print('WARNING: two images have different extents! Please check input arrays!')
if not shape == self.im2.get_array().shape:
print('WARNING: two images have different shapes! Please check input arrays!')
### initialize plots ###
self.dx = (extent[1]-extent[0])/shape[-1]
self.dy = (extent[3]-extent[2])/shape[-2]
self.x = np.arange(extent[0]+self.dx/2, extent[1], self.dx) # half pixel shift in imshow
self.y = np.arange(extent[2]+self.dy/2, extent[3], self.dy)
self._last_indices = [int(shape[-1]/2), int(shape[-2]/2)] # save cursor position in xy indices (snap to data points)
self.spec.set_ydata(axmap.volume[:, self._last_indices[1], self._last_indices[0]])
self.fit_didv = fit_didv
if self.fit_didv is not None:
self.spec_fit, = self.axspec.plot(self.en, self.fit_didv[:, self._last_indices[1], self._last_indices[0]], 'tomato', lw=1, alpha=0.7,)
self.bline = axspec.axvline(self.en[self.en_ind], color=self.bias_color, lw=self.cursor_lw)
self.btext = axmap.set_title('Bias='+format_eng(self.en[self.en_ind])+'V', fontdict=self.title_fontdict)
self.btext.set_color(self.bias_color)
self.ttext = axtopo.set_title(self.axtopo.ch_names[self.z_ind], fontdict=self.title_fontdict)
axspec.relim()
axspec.autoscale_view()
### plot cursors ###
xc, yc = self.x[self._last_indices[0]], self.y[self._last_indices[1]]
self.hline1 = axtopo.axhline(yc, color=self.cursor_color, lw=self.cursor_lw)
self.vline1 = axtopo.axvline(xc, color=self.cursor_color, lw=self.cursor_lw)
self.hline2 = axmap.axhline(yc, color=self.cursor_color, lw=self.cursor_lw)
self.vline2 = axmap.axvline(xc, color=self.cursor_color, lw=self.cursor_lw)
### events initialization ###
self.drag = False
self.cid_press = self.canvas.mpl_connect('button_press_event', self.on_button_press)
self.cid_bscroll = self.canvas.mpl_connect('scroll_event', self.bias_scroll)
if self.axtopo.volume.shape[0] > 1: # suppress scroll event if voloume has only one image
self.cid_tscroll = self.canvas.mpl_connect('scroll_event', self.topo_scroll)
self.use_blit = use_blit
if self.use_blit:
self._bg = None
def on_button_press(self, event):
if event.inaxes not in [self.axtopo, self.axmap]:
return
if event.button != 1:
return
index = self._last_indices
xy_disp = event.inaxes.transData.transform((self.x[index[0]], self.y[index[1]])) # transform cursor position in display pixels
d = np.sqrt((xy_disp[0] - event.x)**2 + (xy_disp[1] - event.y)**2) # event.x, event.y - mouse position in display coordinate
if d <= self.epsilon: # if mouse position is within the display pixel range
self.drag = True
# Only connect to mouse movement when the left mouse button REMAINS pressed
self.cid_release = self.canvas.mpl_connect('button_release_event', self.on_button_release)
self.cid_move = self.canvas.mpl_connect('motion_notify_event', self.drag_cursors)
def on_button_release(self, event):
'''Disconnect events when release the left mouse button'''
if event.button != 1:
return
self.drag = False
self.canvas.mpl_disconnect(self.cid_move)
self.canvas.mpl_disconnect(self.cid_release)
def bias_scroll(self, event):
'''mouse scroll events in the map window'''
if event.inaxes != self.axmap:
return
else:
if event.button == 'up':
ind = self.en_ind - 1
if ind >= 0:
self.bias_scroll_update(ind)
elif event.button == 'down':
ind = self.en_ind + 1
if ind < len(self.en):
self.bias_scroll_update(ind)
def bias_scroll_update(self, ind):
'''
Lots to do when scroll:
1. move axvline correspondingly in the spectrum window
2. update slice in the map window
3. update clims of the map window, and colorbar indicators
4. update title of the map window
'''
if self.use_blit:
self._bg = grab_bg(self.canvas, [self.bline, self.btext, self.im2, self.vline2, self.hline2])
self.canvas.restore_region(self._bg)
self.bline.set_xdata(self.en[ind])
self.im2.set_array(self.axmap.volume[ind])
newmin, newmax = self.axmap.volume[ind].min(), self.axmap.volume[ind].max()
self.im2.set_clim(newmin, newmax)
self.mcbar.set_val(newmin, 0)
self.mcbar.set_val(newmax, 1)
self.btext = self.axmap.set_title('Bias='+format_eng(self.en[ind])+'V', fontdict=self.title_fontdict)
self.en_ind = ind
if self.use_blit:
blit_bg(self.canvas, self._bg, [self.bline, self.btext, self.im2, self.vline2, self.hline2])
else:
self.canvas.draw_idle()
def map_clim_update(self, vals):
'''update clims of the map window.'''
if self.use_blit:
self._bg = grab_bg(self.canvas, [self.im2, self.vline2, self.hline2])
self.canvas.restore_region(self._bg)
newmin, newmax = vals[0], vals[1]
self.im2.set_clim(newmin, newmax)
if self.use_blit:
blit_bg(self.canvas, self._bg, [self.im2, self.vline2, self.hline2])
else:
self.canvas.draw_idle()
def topo_scroll(self, event):
'''mouse scroll events in the topo window'''
if event.inaxes != self.axtopo:
return
else:
if event.button == 'up':
ind = self.z_ind - 1
if ind >= 0:
self.topo_scroll_update(ind)
elif event.button == 'down':
ind = self.z_ind + 1
if ind < self.axtopo.volume.shape[0]:
self.topo_scroll_update(ind)
def topo_scroll_update(self, ind):
'''
Lots to do when scroll:
1. update display channel in the topo window
2. update clims of the topo window, and colorbar indicators
3. update title of the topo window
'''
if self.use_blit:
self._bg = grab_bg(self.canvas, [self.ttext, self.im1, self.vline1, self.hline1])
self.canvas.restore_region(self._bg)
self.im1.set_array(self.axtopo.volume[ind])
newmin, newmax = self.axtopo.volume[ind].min(), self.axtopo.volume[ind].max()
newrange = newmax - newmin
self.im1.set_clim(newmin, newmax)
self.tcbar.set_valmin_valmax(newmin, newmax, update=True)
self.tcbar.set_val(newmin, 0)
self.tcbar.set_val(newmax, 1)
self.ttext = self.axtopo.set_title(self.axtopo.ch_names[ind], fontdict=self.title_fontdict)
self.z_ind = ind
if self.use_blit:
blit_bg(self.canvas, self._bg, [self.ttext, self.im1, self.vline1, self.hline1])
else:
self.canvas.draw_idle()
def topo_clim_update(self, vals):
'''update clims of the topo window'''
if self.use_blit:
self._bg = grab_bg(self.canvas, [self.im1, self.vline1, self.hline1])
self.canvas.restore_region(self._bg)
newmin, newmax = vals[0], vals[1]
self.im1.set_clim(newmin, newmax)
if self.use_blit:
blit_bg(self.canvas, self._bg, [self.im1, self.vline1, self.hline1])
else:
self.canvas.draw_idle()
def drag_cursors(self, event):
'''
update cursor position (always snapped to data points), and spectrum plot(s)
Condition: mouse in the topo or map window, clicked on/near the current cursor position, while no navigation tools are enabled.
'''
if event.inaxes not in [self.axtopo, self.axmap]:
self.drag = False
return
else:
for ax in self.fig.axes:
navmode = ax.get_navigate_mode()
# avoid possible conflicts between drag and navigation toolbar
if navmode is not None:
self.drag = False
break
if self.drag:
x, y = event.xdata, event.ydata # mouse position in data coordinate
indices = [min(np.searchsorted(self.x+self.dx/2, x), len(self.x) - 1), min(np.searchsorted(self.y+self.dy/2, y), len(self.y) - 1)]
if np.allclose(indices, self._last_indices):
return # still on the same data point. Nothing to do.
self._last_indices = indices
x = self.x[indices[0]]
y = self.y[indices[1]]
# update the cursor positions
if self.use_blit:
artists = [self.hline1, self.hline2, self.vline1, self.vline2, self.spec, self.bline,]
if self.fit_didv is not None:
artists.append(self.spec_fit)
self._bg = grab_bg(self.canvas, artists)
self.canvas.restore_region(self._bg)
self.hline1.set_ydata(y)
self.vline1.set_xdata(x)
self.hline2.set_ydata(y)
self.vline2.set_xdata(x)
self.spec.set_ydata(self.axmap.volume[:, indices[1], indices[0]])
if self.fit_didv is not None:
self.spec_fit.set_ydata(self.fit_didv[:, self._last_indices[1], self._last_indices[0]])
self.axspec.autoscale(enable=True, axis='both')
self.axspec.relim()
self.axspec.autoscale_view()
if self.use_blit:
blit_bg(self.canvas, self._bg, artists)
else:
self.canvas.draw_idle()
class ScrollableMap:
'''an extention to imshow on 3d data. its volume attribute stores the 3d array. events are defined in the SpecLinkedCursors class'''
def __init__(self, volume, ax, ch_names=[], **kwargs):
self.ax = ax
self.canvas = ax.figure.canvas
### check volume type and channel names ###
if volumetype(volume) == 'a 3D array':
self.ax.volume = volume
self.ax.ch_names = [] # kill channel name for map (you should know the channel name)
elif volumetype(volume) == 'list of 2D arrays':
self.ax.volume = np.asarray(volume)
if ch_names:
if len(volume) == len(ch_names):
self.ax.ch_names = ch_names
else:
raise ValueError('ERROR: number of ch_names does not match number of channels in volume!')
else:
if len(volume) == 1:
self.ax.ch_names = ['Topo']
else:
raise ValueError('ERROR: please provide %d channel names!'%len(volume))
elif volumetype(volume) == 'a 2D array':
self.ax.volume = volume.reshape(1, volume.shape[0], volume.shape[1])
if ch_names:
self.ax.ch_names = ch_names[0] # only use the first channel name
else:
self.ax.ch_names = ['Topo']
print('WARNING: channel name not provided, use \'Topo\' as default.')
else:
raise TypeError('ERROR: volume argument only supports input types of a 2D/3D array or a list of 2D arrays!')
### draw the first image in volume ###
self.ax.index = 0
self.im = ax.imshow(self.ax.volume[self.ax.index], **kwargs)
class LimLinkedAxes:
'''Link xy limits of two axes so they update together'''
def __init__(self, ax1, ax2):
self.ax1 = ax1
self.ax2 = ax2
self.canvas = ax1.figure.canvas
self.oxy = [self._get_limits(ax) for ax in [ax1, ax2]] # old limits
self.cid1 = self.canvas.mpl_connect('motion_notify_event', self.re_zoom) # for right-click pan/zoom
self.cid2 = self.canvas.mpl_connect('button_release_event',self.re_zoom) # for rectangle-select zoom
def _get_limits(self, ax):
return [list(ax.get_xlim()), list(ax.get_ylim())]
def _set_limits(self, ax, lims):
ax.set_xlim(*(lims[0]))
ax.set_ylim(*(lims[1]))
def re_zoom(self, event):
for ax in [self.ax1, self.ax2]:
navmode = ax.get_navigate_mode()
if navmode is not None:
break
if navmode | |
a comparison in a nice list format
if output='fail' return the number of failed process -- for test--
"""
# Special output for loop processes
if comparison_results[0]['process']['perturbation_couplings']!=[]:
return output_lorentz_inv_loop(comparison_results, output)
proc_col_size = 17
threshold=1e-10
process_header = "Process"
if len(process_header) + 1 > proc_col_size:
proc_col_size = len(process_header) + 1
for proc, values in comparison_results:
if len(proc) + 1 > proc_col_size:
proc_col_size = len(proc) + 1
col_size = 18
pass_proc = 0
fail_proc = 0
no_check_proc = 0
failed_proc_list = []
no_check_proc_list = []
res_str = fixed_string_length(process_header, proc_col_size) + \
fixed_string_length("Min element", col_size) + \
fixed_string_length("Max element", col_size) + \
fixed_string_length("Relative diff.", col_size) + \
"Result"
for one_comp in comparison_results:
proc = one_comp['process'].base_string()
data = one_comp['results']
if data == 'pass':
no_check_proc += 1
no_check_proc_list.append(proc)
continue
values = [data[i]['m2'] for i in range(len(data))]
min_val = min(values)
max_val = max(values)
diff = (max_val - min_val) / abs(max_val)
res_str += '\n' + fixed_string_length(proc, proc_col_size) + \
fixed_string_length("%1.10e" % min_val, col_size) + \
fixed_string_length("%1.10e" % max_val, col_size) + \
fixed_string_length("%1.10e" % diff, col_size)
if diff < threshold:
pass_proc += 1
proc_succeed = True
res_str += "Passed"
else:
fail_proc += 1
proc_succeed = False
failed_proc_list.append(proc)
res_str += "Failed"
#check all the JAMP
# loop over jamp
# Keep in mind that this is not available for loop processes where the
# jamp list is empty
if len(data[0]['jamp'])!=0:
for k in range(len(data[0]['jamp'][0])):
sum = [0] * len(data)
# loop over helicity
for j in range(len(data[0]['jamp'])):
#values for the different lorentz boost
values = [abs(data[i]['jamp'][j][k])**2 for i in range(len(data))]
sum = [sum[i] + values[i] for i in range(len(values))]
# Compare the different lorentz boost
min_val = min(sum)
max_val = max(sum)
if not max_val:
continue
diff = (max_val - min_val) / max_val
tmp_str = '\n' + fixed_string_length(' JAMP %s'%k , proc_col_size) + \
fixed_string_length("%1.10e" % min_val, col_size) + \
fixed_string_length("%1.10e" % max_val, col_size) + \
fixed_string_length("%1.10e" % diff, col_size)
if diff > 1e-10:
if not len(failed_proc_list) or failed_proc_list[-1] != proc:
fail_proc += 1
pass_proc -= 1
failed_proc_list.append(proc)
res_str += tmp_str + "Failed"
elif not proc_succeed:
res_str += tmp_str + "Passed"
res_str += "\nSummary: %i/%i passed, %i/%i failed" % \
(pass_proc, pass_proc + fail_proc,
fail_proc, pass_proc + fail_proc)
if fail_proc != 0:
res_str += "\nFailed processes: %s" % ', '.join(failed_proc_list)
if no_check_proc:
res_str += "\nNot checked processes: %s" % ', '.join(no_check_proc_list)
if output == 'text':
return res_str
else:
return fail_proc
def output_unitary_feynman(comparison_results, output='text'):
"""Present the results of a comparison in a nice list format
if output='fail' return the number of failed process -- for test--
"""
proc_col_size = 17
# We use the first element of the comparison_result list to store the
# process definition object
pert_coupl = comparison_results[0]['perturbation_couplings']
comparison_results = comparison_results[1:]
if pert_coupl:
process_header = "Process [virt="+" ".join(pert_coupl)+"]"
else:
process_header = "Process"
if len(process_header) + 1 > proc_col_size:
proc_col_size = len(process_header) + 1
for data in comparison_results:
proc = data['process']
if len(proc) + 1 > proc_col_size:
proc_col_size = len(proc) + 1
pass_proc = 0
fail_proc = 0
no_check_proc = 0
failed_proc_list = []
no_check_proc_list = []
col_size = 18
res_str = fixed_string_length(process_header, proc_col_size) + \
fixed_string_length("Unitary", col_size) + \
fixed_string_length("Feynman", col_size) + \
fixed_string_length("Relative diff.", col_size) + \
"Result"
for one_comp in comparison_results:
proc = one_comp['process']
data = [one_comp['value_unit'], one_comp['value_feynm']]
if data[0] == 'pass':
no_check_proc += 1
no_check_proc_list.append(proc)
continue
values = [data[i]['m2'] for i in range(len(data))]
min_val = min(values)
max_val = max(values)
# when max_val is also negative
# diff will be negative if there is no abs
diff = (max_val - min_val) / abs(max_val)
res_str += '\n' + fixed_string_length(proc, proc_col_size) + \
fixed_string_length("%1.10e" % values[0], col_size) + \
fixed_string_length("%1.10e" % values[1], col_size) + \
fixed_string_length("%1.10e" % diff, col_size)
if diff < 1e-8:
pass_proc += 1
proc_succeed = True
res_str += "Passed"
else:
fail_proc += 1
proc_succeed = False
failed_proc_list.append(proc)
res_str += "Failed"
#check all the JAMP
# loop over jamp
# This is not available for loop processes where the jamp list returned
# is empty.
if len(data[0]['jamp'])>0:
for k in range(len(data[0]['jamp'][0])):
sum = [0, 0]
# loop over helicity
for j in range(len(data[0]['jamp'])):
#values for the different lorentz boost
values = [abs(data[i]['jamp'][j][k])**2 for i in range(len(data))]
sum = [sum[i] + values[i] for i in range(len(values))]
# Compare the different lorentz boost
min_val = min(sum)
max_val = max(sum)
if not max_val:
continue
diff = (max_val - min_val) / max_val
tmp_str = '\n' + fixed_string_length(' JAMP %s'%k , col_size) + \
fixed_string_length("%1.10e" % sum[0], col_size) + \
fixed_string_length("%1.10e" % sum[1], col_size) + \
fixed_string_length("%1.10e" % diff, col_size)
if diff > 1e-10:
if not len(failed_proc_list) or failed_proc_list[-1] != proc:
fail_proc += 1
pass_proc -= 1
failed_proc_list.append(proc)
res_str += tmp_str + "Failed"
elif not proc_succeed:
res_str += tmp_str + "Passed"
res_str += "\nSummary: %i/%i passed, %i/%i failed" % \
(pass_proc, pass_proc + fail_proc,
fail_proc, pass_proc + fail_proc)
if fail_proc != 0:
res_str += "\nFailed processes: %s" % ', '.join(failed_proc_list)
if no_check_proc:
res_str += "\nNot checked processes: %s" % ', '.join(no_check_proc_list)
if output == 'text':
return res_str
else:
return fail_proc
def CMS_save_path(extension, cms_res, used_model, opts, output_path=None):
"""Creates a suitable filename for saving these results."""
if opts['name']=='auto' and opts['analyze']!='None':
# Reuse the same name then
return '%s.%s'%(os.path.splitext(opts['analyze'].split(',')[0])\
[0],extension)
# if a name is specified, use it
if opts['name']!='auto':
basename = opts['name']
else:
prefix = 'cms_check_'
# Use process name if there is only one process
if len(cms_res['ordered_processes'])==1:
proc = cms_res['ordered_processes'][0]
replacements = [('=>','gt'),('<=','lt'),('/','_no_'),
(' ',''),('+','p'),('-','m'),
('~','x'), ('>','_'),('=','eq'),('^2','squared')]
# Remove the perturbation couplings:
try:
proc=proc[:proc.index('[')]
except ValueError:
pass
for key, value in replacements:
proc = proc.replace(key,value)
basename =prefix+proc+'_%s_'%used_model.get('name')+\
( ('_'+'_'.join(cms_res['perturbation_orders'])) if \
cms_res['perturbation_orders']!=[] else '')
# Use timestamp otherwise
else:
basename = prefix+datetime.datetime.now().strftime("%Y_%m_%d_%Hh%Mm%Ss")
suffix = '_%s'%opts['tweak']['name'] if opts['tweak']['name']!='' else ''
if output_path:
return pjoin(output_path,'%s%s.%s'%(basename,suffix,extension))
else:
return '%s%s.%s'%(basename,suffix,extension)
def output_complex_mass_scheme(result,output_path, options, model, output='text'):
""" Outputs nicely the outcome of the complex mass scheme check performed
by varying the width in the offshell region of resonances found for eahc process.
Output just specifies whether text should be returned or a list of failed
processes. Use 'concise_text' for a consise report of the results."""
pert_orders=result['perturbation_orders']
######## CHECK PARAMETERS #########
#
# diff_lambda_power choses the power by which one should divide the difference
# curve. The test should only work with 1, but it is useful for the LO
# check to see the difference has O(\lambda) contribution by setting this
# parameter to 2. If the Born does not have O(\lambda) contributions
# (i.e. if the test still pas with diff_lambda_power=2) then the NLO test
# will not be sensitive to the CMS implementation details.
diff_lambda_power = options['diff_lambda_power']
# DISLAIMER:
# The CMS check is non trivial to automate and it is actually best done
# manually by looking at plots for various implementation of the CMS.
# The automatic check performed here with the default parameters below
# should typically capture the main features of the CMS implementation.
# There will always be exceptions however.
#
if 'has_FRdecay' in result:
has_FRdecay = result['has_FRdecay']
else:
has_FRdecay = False
# be tighter at LO
if not pert_orders:
CMS_test_threshold = 1e-3
else:
# AT NLO, a correct cancellation is typically of the order of 2% with
# a lowest lambda value of 10^-4. It is clear that the threshold should
# scale with the minimum lambda value because any little offset in the
# LO width value for example (acceptable when less than one 1% if the
# widths were computed numerically) will lead to an inaccuracy of the
# cancellation scaling with lambda.
if not has_FRdecay | |
any normalization function kwargs we've set
if formatspec['lcnorm_kwargs'] is not None:
normkwargs = formatspec['lcnorm_kwargs']
else:
normkwargs = None
# finally, get the magsarefluxes key and fileglob key
magsarefluxes = formatspec['magsarefluxes']
fileglob = formatspec['fileglob']
if not ('neighbors' in checkplotdict and
checkplotdict['neighbors'] and
len(checkplotdict['neighbors']) > 0):
LOGWARNING('no neighbors for %s, not updating...' %
(checkplotdict['objectid']))
return checkplotdict
# if there are actually neighbors, go through them in order
for nbr in checkplotdict['neighbors']:
objectid, ra, decl, dist, lcfpath = (nbr['objectid'],
nbr['ra'],
nbr['decl'],
nbr['dist'],
nbr['lcfpath'])
# get the light curve
if readerkwargs is not None:
lcdict = readerfunc(lcfpath, **readerkwargs)
else:
lcdict = readerfunc(lcfpath)
if isinstance(lcdict, tuple) and isinstance(lcdict[0],dict):
lcdict = lcdict[0]
# normalize using the special function if specified
if normfunc is not None:
if normkwargs is not None:
lcdict = normfunc(lcdict,**normkwargs)
else:
lcdict = normfunc(lcdict)
# get the times, mags, and errs
# dereference the columns and get them from the lcdict
if '.' in timecol:
timecolget = timecol.split('.')
else:
timecolget = [timecol]
times = dict_get(lcdict, timecolget)
if '.' in magcol:
magcolget = magcol.split('.')
else:
magcolget = [magcol]
mags = dict_get(lcdict, magcolget)
if '.' in errcol:
errcolget = errcol.split('.')
else:
errcolget = [errcol]
errs = dict_get(lcdict, errcolget)
# filter the input times, mags, errs; do sigclipping and normalization
stimes, smags, serrs = sigclip_magseries(times,
mags,
errs,
magsarefluxes=magsarefluxes,
sigclip=4.0)
# normalize here if not using special normalization
if normfunc is None:
ntimes, nmags = normalize_magseries(
stimes, smags,
magsarefluxes=magsarefluxes
)
xtimes, xmags, xerrs = ntimes, nmags, serrs
else:
xtimes, xmags, xerrs = stimes, smags, serrs
#
# now we can start doing stuff
#
# 1. make an unphased mag-series plot
nbrdict = _pkl_magseries_plot(xtimes,
xmags,
xerrs,
magsarefluxes=magsarefluxes)
# update the nbr
nbr.update(nbrdict)
# for each lspmethod in the checkplot, make a corresponding plot for
# this neighbor
for lspt in PFMETHODS:
if lspt in checkplotdict:
# initialize this lspmethod entry
nbr[lspt] = {}
# we only care about the best period and its options
operiod, oepoch = (checkplotdict[lspt][0]['period'],
checkplotdict[lspt][0]['epoch'])
(ophasewrap, ophasesort, ophasebin,
ominbinelems, oplotxlim) = (
checkplotdict[lspt][0]['phasewrap'],
checkplotdict[lspt][0]['phasesort'],
checkplotdict[lspt][0]['phasebin'],
checkplotdict[lspt][0]['minbinelems'],
checkplotdict[lspt][0]['plotxlim'],
)
# make the phasedlc plot for this period
nbr = _pkl_phased_magseries_plot(
nbr,
lspt,
0,
xtimes, xmags, xerrs,
operiod, oepoch,
phasewrap=ophasewrap,
phasesort=ophasesort,
phasebin=ophasebin,
minbinelems=ominbinelems,
plotxlim=oplotxlim,
magsarefluxes=magsarefluxes,
verbose=verbose
)
# at this point, this neighbor's dict should be up to date with all
# info, magseries plot, and all phased LC plots
# return the updated checkplotdict
return checkplotdict
########################
## RUNNING CHECKPLOTS ##
########################
def runcp(pfpickle,
outdir,
lcbasedir,
lclistpkl=None,
nbrradiusarcsec=30.0,
lcformat='hat-sql',
timecols=None,
magcols=None,
errcols=None,
mindet=1000):
'''This runs a checkplot for the given period-finding result pickle
produced by runpf.
'''
from astrobase import checkplot
PFMETHODS = ['bls',
'gls',
'aov',
'mav',
'pdm',
'acf',
'win']
#
# handle the lcformat
#
formatspec = get_lcformat_spec(lcformat)
# first, import the reader module
readermod = importlib.import_module(formatspec['lcreader_module'])
# then, get the function we need to read the lightcurve
readerfunc = getattr(readermod, formatspec['lcreader_func'])
# get any default kwargs we've set for this LC format
if formatspec['lcreader_kwargs'] is not None:
readerkwargs = formatspec['lcreader_kwargs']
else:
readerkwargs = None
# get the default timecols, magcols, errcols
if timecols is None:
timecols = formatspec['timecols']
if magcols is None:
magcols = formatspec['magcols']
if errcols is None:
errcols = formatspec['errcols']
# get the normalization module and function if provided
if formatspec['lcnorm_module'] is not None:
normmod = importlib.import_module(formatspec['lcnorm_module'])
normfunc = getattr(normmod, formatspec['lcnorm_func'])
else:
normfunc = None
# get any normalization function kwargs we've set
if formatspec['lcnorm_kwargs'] is not None:
normkwargs = formatspec['lcnorm_kwargs']
else:
normkwargs = None
# finally, get the magsarefluxes key and fileglob key
magsarefluxes = formatspec['magsarefluxes']
fileglob = formatspec['fileglob']
if pfpickle.endswith('.gz'):
infd = gzip.open(pfpickle,'rb')
else:
infd = open(pfpickle,'rb')
pfresults = pickle.load(infd)
infd.close()
objectid = pfresults['objectid']
# find the light curve in lcbasedir
lcfsearchpath = os.path.join(lcbasedir,
'%s-%s' % (objectid, fileglob))
matching = glob.glob(lcfsearchpath)
if matching and len(matching) > 0:
lcfpath = matching[0]
else:
LOGERROR('could not find light curve for pfresult %s, objectid %s' %
(pfpickle, objectid))
return None
if readerkwargs is not None:
lcdict = readerfunc(lcfpath,**readerkwargs)
else:
lcdict = readerfunc(lcfpath)
if isinstance(lcdict, tuple) and isinstance(lcdict[0], dict):
lcdict = lcdict[0]
cpfs = []
for tcol, mcol, ecol in zip(timecols, magcols, errcols):
# dereference the columns and get them from the lcdict
if '.' in tcol:
tcolget = tcol.split('.')
else:
tcolget = [tcol]
times = dict_get(lcdict, tcolget)
if '.' in mcol:
mcolget = mcol.split('.')
else:
mcolget = [mcol]
mags = dict_get(lcdict, mcolget)
if '.' in ecol:
ecolget = ecol.split('.')
else:
ecolget = [ecol]
errs = dict_get(lcdict, ecolget)
pflist = []
# pick up all of the period-finding methods in this pfresults pkl
for pfmethod in PFMETHODS:
if pfmethod in pfresults[mcolget[-1]]:
pflist.append(pfresults[mcolget[-1]][pfmethod])
# generate the output filename
outfile = os.path.join(outdir,
'checkplot-%s-%s.pkl' % (objectid, mcol))
# make sure the checkplot has a valid objectid
if 'objectid' not in lcdict['objectinfo']:
lcdict['objectinfo']['objectid'] = objectid
# generate the checkplotdict
cpd = checkplot.checkplot_dict(
pflist,
times, mags, errs,
objectinfo=lcdict['objectinfo'],
lclistpkl=lclistpkl,
nbrradiusarcsec=nbrradiusarcsec,
verbose=False,
mindet=mindet
)
# include any neighbor information as well
cpdupdated = update_checkplotdict_nbrlcs(
cpd,
tcol, mcol, ecol,
lcformat=lcformat,
verbose=False
)
# write the update checkplot dict to disk
cpf = checkplot._write_checkplot_picklefile(
cpdupdated,
outfile=outfile,
protocol=pickle.HIGHEST_PROTOCOL,
outgzip=False
)
cpfs.append(cpf)
LOGINFO('done with %s -> %s' % (objectid, repr(cpfs)))
return cpfs
def cp2png(checkplotpickle):
'''
This runs cp2png from checkplot.py.
'''
from astrobase.checkplot import checkplot_pickle_to_png
if checkplotpickle.endswith('.gz'):
outfile = checkplotpickle.replace('.pkl.gz','.png')
else:
outfile = checkplotpickle.replace('.pkl','.png')
return checkplot_pickle_to_png(checkplotpickle, outfile)
##########################
## BINNING LIGHT CURVES ##
##########################
def timebinlc(lcfile,
binsizesec,
outdir=None,
lcformat='hat-sql',
timecols=None,
magcols=None,
errcols=None,
minbinelems=7):
'''
This bins the given light curve file in time using binsizesec.
'''
#
# handle the lcformat
#
formatspec = get_lcformat_spec(lcformat)
# first, import the reader module
readermod = importlib.import_module(formatspec['lcreader_module'])
# then, get the function we need to read the lightcurve
readerfunc = getattr(readermod, formatspec['lcreader_func'])
# get any default kwargs we've set for this LC format
if formatspec['lcreader_kwargs'] is not None:
readerkwargs = formatspec['lcreader_kwargs']
else:
readerkwargs = None
# get the default timecols, magcols, errcols
if timecols is None:
timecols = formatspec['timecols']
if magcols is None:
magcols = formatspec['magcols']
if errcols is None:
errcols = formatspec['errcols']
# get the normalization module and function if provided
if formatspec['lcnorm_module'] is not None:
normmod = importlib.import_module(formatspec['lcnorm_module'])
normfunc = getattr(normmod, formatspec['lcnorm_func'])
else:
normfunc = None
# get any normalization function kwargs we've set
if formatspec['lcnorm_kwargs'] is not None:
normkwargs = formatspec['lcnorm_kwargs']
else:
normkwargs = None
# finally, get the magsarefluxes key and fileglob key
magsarefluxes = formatspec['magsarefluxes']
fileglob = formatspec['fileglob']
# get the LC into a dict
if readerkwargs is not None:
lcdict = readerfunc(lcfile, **readerkwargs)
else:
lcdict = readerfunc(lcfile)
if isinstance(lcdict, tuple) and isinstance(lcdict[0],dict):
lcdict = lcdict[0]
# skip already binned light curves
if 'binned' in lcdict:
LOGERROR('this light curve appears to be binned already, skipping...')
return None
for tcol, mcol, ecol in zip(timecols, magcols, errcols):
# dereference the columns and get them from the lcdict
if '.' in tcol:
tcolget = tcol.split('.')
else:
tcolget = [tcol]
times = dict_get(lcdict, tcolget)
if '.' in mcol:
mcolget = mcol.split('.')
else:
mcolget = [mcol]
mags = dict_get(lcdict, mcolget)
if '.' in ecol:
ecolget = ecol.split('.')
else:
ecolget = [ecol]
errs = dict_get(lcdict, ecolget)
# normalize here if not using special normalization
if normfunc is None:
ntimes, nmags = normalize_magseries(
times, mags,
magsarefluxes=magsarefluxes
)
times, mags, errs = ntimes, nmags, errs
# now bin the mag series as requested
binned = time_bin_magseries_with_errs(times,
mags,
errs,
binsize=binsizesec,
minbinelems=minbinelems)
# put this into the special binned key of the lcdict
# we use mcolget[-1] here so we can deal with dereferenced magcols like
# sap.sap_flux or pdc.pdc_sapflux
if 'binned' not in lcdict:
lcdict['binned'] = {mcolget[-1]: {'times':binned['binnedtimes'],
'mags':binned['binnedmags'],
'errs':binned['binnederrs'],
'nbins':binned['nbins'],
'timebins':binned['jdbins'],
'binsizesec':binsizesec}}
else:
lcdict['binned'][mcolget[-1]] = {'times':binned['binnedtimes'],
'mags':binned['binnedmags'],
'errs':binned['binnederrs'],
'nbins':binned['nbins'],
'timebins':binned['jdbins'],
'binsizesec':binsizesec}
# done with binning for all magcols, now generate the output file
# this will always be a pickle
if outdir is None:
outdir = os.path.dirname(lcfile)
outfile = os.path.join(outdir, '%s-binned%.1fsec-%s.pkl' %
(lcdict['objectid'], binsizesec, lcformat))
with open(outfile, 'wb') as outfd:
pickle.dump(lcdict, outfd, protocol=pickle.HIGHEST_PROTOCOL)
return outfile
#####################################
## SUPPORT FOR EXECUTION | |
consumerLevelChecks,
assetLevelChecks)
self.ResetCaches()
def _HasGprimAncestor(self, prim):
from pxr import Sdf, UsdGeom
path = prim.GetPath()
if path in self._hasGprimInPathMap:
return self._hasGprimInPathMap[path]
elif path == Sdf.Path.absoluteRootPath:
self._hasGprimInPathMap[path] = False
return False
else:
val = (self._HasGprimAncestor(prim.GetParent()) or
prim.IsA(UsdGeom.Gprim))
self._hasGprimInPathMap[path] = val
return val
def _FindConnectableAncestor(self, prim):
from pxr import Sdf, UsdShade
path = prim.GetPath()
if path in self._connectableAncestorMap:
return self._connectableAncestorMap[path]
elif path == Sdf.Path.absoluteRootPath:
self._connectableAncestorMap[path] = None
return None
else:
val = self._FindConnectableAncestor(prim.GetParent())
# The GetTypeName() check is to work around a bug in
# ConnectableAPIBehavior registry.
if prim.GetTypeName() and not val:
conn = UsdShade.ConnectableAPI(prim)
if conn:
val = prim
self._connectableAncestorMap[path] = val
return val
def CheckPrim(self, prim):
from pxr import UsdGeom, UsdShade
parent = prim.GetParent()
# Of course we must allow Boundables under other Boundables, so that
# schemas like UsdGeom.Pointinstancer can nest their prototypes. But
# we disallow a PointInstancer under a Mesh just as we disallow a Mesh
# under a Mesh, for the same reason: we cannot then independently
# adjust visibility for the two objects, nor can we reasonably compute
# the parent Mesh's extent.
if prim.IsA(UsdGeom.Boundable):
if parent:
if self._HasGprimAncestor(parent):
self._AddFailedCheck("Gprim <%s> has an ancestor prim that "
"is also a Gprim, which is not allowed."
% prim.GetPath())
connectable = UsdShade.ConnectableAPI(prim)
# The GetTypeName() check is to work around a bug in
# ConnectableAPIBehavior registry.
if prim.GetTypeName() and connectable:
if parent:
pConnectable = UsdShade.ConnectableAPI(parent)
if not parent.GetTypeName():
pConnectable = None
if pConnectable and not pConnectable.IsContainer():
# XXX This should be a failure as it is a violation of the
# UsdShade OM. But pragmatically, there are many
# authoring tools currently producing this structure, which
# does not _currently_ perturb Hydra, so we need to start
# with a warning
self._AddWarning("Connectable %s <%s> cannot reside "
"under a non-Container Connectable %s"
% (prim.GetTypeName(),
prim.GetPath(),
parent.GetTypeName()))
elif not pConnectable:
# it's only OK to have a non-connectable parent if all
# the rest of your ancestors are also non-connectable. The
# error message we give is targeted at the most common
# infraction, using Scope or other grouping prims inside
# a Container like a Material
connAnstr = self._FindConnectableAncestor(parent)
if connAnstr is not None:
self._AddFailedCheck("Connectable %s <%s> can only have"
" Connectable Container ancestors"
" up to %s ancestor <%s>, but its"
" parent %s is a %s." %
(prim.GetTypeName(),
prim.GetPath(),
connAnstr.GetTypeName(),
connAnstr.GetPath(),
parent.GetName(),
parent.GetTypeName()))
def ResetCaches(self):
self._connectableAncestorMap = dict()
self._hasGprimInPathMap = dict()
class NormalMapTextureChecker(BaseRuleChecker):
@staticmethod
def GetDescription():
return """UsdUVTexture nodes that feed the _inputs:normals_ of a
UsdPreviewSurface must ensure that the data is encoded and scaled properly.
Specifically:
- Since normals are expected to be in the range [(-1,-1,-1), (1,1,1)],
the Texture node must transform 8-bit textures from their [0..1] range by
setting its _inputs:scale_ to [2, 2, 2, 1] and
_inputs:bias_ to [-1, -1, -1, 0]
- Normal map data is commonly expected to be linearly encoded. However, many
image-writing tools automatically set the profile of three-channel, 8-bit
images to SRGB. To prevent an unwanted transformation, the UsdUVTexture's
_inputs:sourceColorSpace_ must be set to "raw". This program cannot
currently read the texture metadata itself, so for now we emit warnings
about this potential infraction for all 8 bit image formats.
"""
def __init__(self, verbose, consumerLevelChecks, assetLevelChecks):
super(NormalMapTextureChecker, self).__init__(verbose,
consumerLevelChecks,
assetLevelChecks)
def _GetShaderId(self, shader):
# We might someday try harder to find an identifier...
return shader.GetShaderId()
def _TextureIs8Bit(self, asset):
# Eventually we hope to leverage HioImage through a plugin system,
# when Imaging is present, to answer this and other image queries
# more definitively
from pxr import Ar
ext = Ar.GetResolver().GetExtension(asset.resolvedPath)
# not an exhaustive list, but ones we typically can read
return ext in ["bmp", "tga", "jpg", "png", "tif"]
def _GetInputValue(self, shader, inputName):
from pxr import Usd
input = shader.GetInput(inputName)
if not input:
return None
return input.Get(Usd.TimeCode.EarliestTime())
def CheckPrim(self, prim):
from pxr import UsdShade, Gf
from pxr.UsdShade import Utils as ShadeUtils
if not prim.IsA(UsdShade.Shader):
return
shader = UsdShade.Shader(prim)
if not shader:
self._AddError("Invalid shader prim <%s>." % prim.GetPath())
return
shaderId = self._GetShaderId(shader)
# We may have failed to fetch an identifier for asset/source-based
# nodes. We are only interested in UsdPreviewSurface nodes identified via
# info:id, so it's not an error
if not shaderId or shaderId != NodeTypes.UsdPreviewSurface:
return
normalInput = shader.GetInput(ShaderProps.Normal)
if not normalInput:
return
valueProducingAttrs = ShadeUtils.GetValueProducingAttributes(normalInput)
if not valueProducingAttrs or valueProducingAttrs[0].GetPrim() == prim:
return
sourcePrim = valueProducingAttrs[0].GetPrim()
sourceShader = UsdShade.Shader(sourcePrim)
if not sourceShader:
# In theory, could be connected to an interface attribute of a
# parent connectable... not useful, but not an error
if UsdShade.ConnectableAPI(sourcePrim):
return
self._AddFailedCheck("%s.%s on prim <%s> is connected to a"
" non-Shader prim." % \
(NodeTypes.UsdPreviewSurface,
ShaderProps.Normal))
return
sourceId = self._GetShaderId(sourceShader)
# We may have failed to fetch an identifier for asset/source-based
# nodes. OR, we could potentially be driven by a UsdPrimvarReader,
# in which case we'd have nothing to validate
if not sourceId or sourceId != NodeTypes.UsdUVTexture:
return
texAsset = self._GetInputValue(sourceShader, ShaderProps.File)
if not texAsset or not texAsset.resolvedPath:
self._AddFailedCheck("%s prim <%s> has invalid or unresolvable "
"inputs:file of @%s@" % \
(NodeTypes.UsdUVTexture,
sourcePrim.GetPath(),
texAsset.path if texAsset else ""))
return
if not self._TextureIs8Bit(texAsset):
# really nothing more is required for image depths > 8 bits,
# which we assume FOR NOW, are floating point
return
if not self._GetInputValue(sourceShader, ShaderProps.SourceColorSpace):
self._AddWarning("%s prim <%s> that reads Normal Map @%s@ may need "
"to set inputs:sourceColorSpace to 'raw' as some "
"8-bit image writers always indicate an SRGB "
"encoding." % \
(NodeTypes.UsdUVTexture,
sourcePrim.GetPath(),
texAsset.path))
bias = self._GetInputValue(sourceShader, ShaderProps.Bias)
scale = self._GetInputValue(sourceShader, ShaderProps.Scale)
if not (bias and scale and
isinstance(bias, Gf.Vec4f) and isinstance(scale, Gf.Vec4f)):
# XXX This should be a failure, as it results in broken normal
# maps in Storm and hdPrman, at least. But for the same reason
# as the shader-under-shader check, we cannot fail until at least
# the major authoring tools have been updated.
self._AddWarning("%s prim <%s> reads 8 bit Normal Map @%s@, "
"which requires that inputs:scale be set to "
"[2, 2, 2, 1] and inputs:bias be set to "
"[-1, -1, -1, 0] for proper interpretation." %\
(NodeTypes.UsdUVTexture,
sourcePrim.GetPath(),
texAsset.path))
return
# don't really care about fourth components...
if (bias[0] != -1 or bias[1] != -1 or bias[2] != -1 or
scale[0] != 2 or scale[1] != 2 or scale[2] != 2):
self._AddWarning("%s prim <%s> reads an 8 bit Normal Map, "
"but has non-standard inputs:scale and "
"inputs:bias values of %s and %s" %\
(NodeTypes.UsdUVTexture,
sourcePrim.GetPath(),
str(scale), str(bias)))
class ARKitPackageEncapsulationChecker(BaseRuleChecker):
@staticmethod
def GetDescription():
return "If the root layer is a package, then the composed stage "\
"should not contain references to files outside the package. "\
"In other words, the package should be entirely self-contained."
def __init__(self, verbose, consumerLevelChecks, assetLevelChecks):
super(ARKitPackageEncapsulationChecker, self).\
__init__(verbose, consumerLevelChecks, assetLevelChecks)
def CheckDependencies(self, usdStage, layerDeps, assetDeps):
rootLayer = usdStage.GetRootLayer()
if not _IsPackageOrPackagedLayer(rootLayer):
return
packagePath = usdStage.GetRootLayer().realPath
if packagePath:
if Ar.IsPackageRelativePath(packagePath):
packagePath = Ar.SplitPackageRelativePathOuter(
packagePath)[0]
for layer in layerDeps:
# In-memory layers like session layers (which we must skip when
# doing this check) won't have a real path.
if layer.realPath:
if not layer.realPath.startswith(packagePath):
self._AddFailedCheck("Found loaded layer '%s' that "
"does not belong to the package '%s'." %
(layer.identifier, packagePath))
for asset in assetDeps:
if not asset.startswith(packagePath):
self._AddFailedCheck("Found asset reference '%s' that "
"does not belong to the package '%s'." %
(asset, packagePath))
class ARKitLayerChecker(BaseRuleChecker):
# Only core USD file formats are allowed.
_allowedLayerFormatIds = ('usd', 'usda', 'usdc', 'usdz')
@staticmethod
def GetDescription():
return "All included layers that participate in composition should"\
" have one of the core supported file formats."
def __init__(self, verbose, consumerLevelChecks, assetLevelChecks):
# Check if the prim has an allowed type.
super(ARKitLayerChecker, self).__init__(verbose,
consumerLevelChecks,
assetLevelChecks)
def CheckLayer(self, layer):
self._Msg("Checking layer <%s>." % layer.identifier)
formatId = layer.GetFileFormat().formatId
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Test cases for deploying Virtual Machine using impersonation (passing account and domainId parameters) for shared network
"""
# Import Local Modules
import marvin
from marvin.cloudstackTestCase import *
from marvin.cloudstackAPI import *
from marvin.lib.utils import *
from marvin.lib.base import *
from marvin.lib.common import *
from marvin.cloudstackException import CloudstackAclException
from nose.plugins.attrib import attr
# Import System modules
import time
_multiprocess_shared_ = True
class TestSharedNetworkImpersonation(cloudstackTestCase):
@classmethod
def setUpClass(cls):
"""
Create the following domain tree and accounts that are reqiured for executing impersonation test cases for shared networks:
Under ROOT - create 2 domaind D1 and D2
Under D1 - Create 2 subdomain D11 and D12
Under D11 - Create subdimain D111
Under each of the domain create 1 admin user and couple of regular users.
Create shared network with the following scope:
1. Network with scope="all"
2. Network with scope="domain" with no subdomain access
3. Network with scope="domain" with subdomain access
4. Network with scope="account"
"""
cls.testclient = super(TestSharedNetworkImpersonation, cls).getClsTestClient()
cls.apiclient = cls.testclient.getApiClient()
cls.testdata = cls.testClient.getParsedTestDataConfig()
cls.acldata = cls.testdata["acl"]
cls.domain_1 = None
cls.domain_2 = None
cleanup = []
try:
# backup default apikey and secretkey
cls.default_apikey = cls.apiclient.connection.apiKey
cls.default_secretkey = cls.apiclient.connection.securityKey
# Create domains
cls.domain_1 = Domain.create(
cls.apiclient,
cls.acldata["domain1"]
)
cls.domain_11 = Domain.create(
cls.apiclient,
cls.acldata["domain11"],
parentdomainid=cls.domain_1.id
)
cls.domain_111 = Domain.create(
cls.apiclient,
cls.acldata["domain111"],
parentdomainid=cls.domain_11.id,
)
cls.domain_12 = Domain.create(
cls.apiclient,
cls.acldata["domain12"],
parentdomainid=cls.domain_1.id
)
cls.domain_2 = Domain.create(
cls.apiclient,
cls.acldata["domain2"]
)
# Create 1 admin account and 2 user accounts for doamin_1
cls.account_d1 = Account.create(
cls.apiclient,
cls.acldata["accountD1"],
admin=True,
domainid=cls.domain_1.id
)
user = cls.generateKeysForUser(cls.apiclient, cls.account_d1)
cls.user_d1_apikey = user.apikey
cls.user_d1_secretkey = user.secretkey
cls.account_d1a = Account.create(
cls.apiclient,
cls.acldata["accountD1A"],
admin=False,
domainid=cls.domain_1.id
)
user = cls.generateKeysForUser(cls.apiclient, cls.account_d1a)
cls.user_d1a_apikey = user.apikey
cls.user_d1a_secretkey = user.secretkey
cls.account_d1b = Account.create(
cls.apiclient,
cls.acldata["accountD1B"],
admin=False,
domainid=cls.domain_1.id
)
user = cls.generateKeysForUser(cls.apiclient, cls.account_d1b)
cls.user_d1b_apikey = user.apikey
cls.user_d1b_secretkey = user.secretkey
# Create 1 admin and 2 user accounts for doamin_11
cls.account_d11 = Account.create(
cls.apiclient,
cls.acldata["accountD11"],
admin=True,
domainid=cls.domain_11.id
)
user = cls.generateKeysForUser(cls.apiclient, cls.account_d11)
cls.user_d11_apikey = user.apikey
cls.user_d11_secretkey = user.secretkey
cls.account_d11a = Account.create(
cls.apiclient,
cls.acldata["accountD11A"],
admin=False,
domainid=cls.domain_11.id
)
user = cls.generateKeysForUser(cls.apiclient, cls.account_d11a)
cls.user_d11a_apikey = user.apikey
cls.user_d11a_secretkey = user.secretkey
cls.account_d11b = Account.create(
cls.apiclient,
cls.acldata["accountD11B"],
admin=False,
domainid=cls.domain_11.id
)
user = cls.generateKeysForUser(cls.apiclient, cls.account_d11b)
cls.user_d11b_apikey = user.apikey
cls.user_d11b_secretkey = user.secretkey
# Create 2 user accounts and 1 admin account for doamin_111
cls.account_d111 = Account.create(
cls.apiclient,
cls.acldata["accountD111"],
admin=True,
domainid=cls.domain_111.id
)
user = cls.generateKeysForUser(cls.apiclient, cls.account_d111)
cls.user_d111_apikey = user.apikey
cls.user_d111_secretkey = user.secretkey
cls.account_d111a = Account.create(
cls.apiclient,
cls.acldata["accountD111A"],
admin=False,
domainid=cls.domain_111.id
)
user = cls.generateKeysForUser(cls.apiclient, cls.account_d111a)
cls.user_d111a_apikey = user.apikey
cls.user_d111a_secretkey = user.secretkey
cls.account_d111b = Account.create(
cls.apiclient,
cls.acldata["accountD111B"],
admin=False,
domainid=cls.domain_111.id
)
user = cls.generateKeysForUser(cls.apiclient, cls.account_d111b)
cls.user_d111b_apikey = user.apikey
cls.user_d111b_secretkey = user.secretkey
# Create 2 user accounts for doamin_12
cls.account_d12a = Account.create(
cls.apiclient,
cls.acldata["accountD12A"],
admin=False,
domainid=cls.domain_12.id
)
user = cls.generateKeysForUser(cls.apiclient, cls.account_d12a)
cls.user_d12a_apikey = user.apikey
cls.user_d12a_secretkey = user.secretkey
cls.account_d12b = Account.create(
cls.apiclient,
cls.acldata["accountD12B"],
admin=False,
domainid=cls.domain_12.id
)
user = cls.generateKeysForUser(cls.apiclient, cls.account_d12b)
cls.user_d12b_apikey = user.apikey
cls.user_d12b_secretkey = user.secretkey
# Create 1 user account for domain_2
cls.account_d2a = Account.create(
cls.apiclient,
cls.acldata["accountD2"],
admin=False,
domainid=cls.domain_2.id
)
user = cls.generateKeysForUser(cls.apiclient, cls.account_d2a)
cls.user_d2a_apikey = user.apikey
cls.user_d2a_secretkey = user.secretkey
# Create 1 user account and admin account in "ROOT" domain
cls.account_roota = Account.create(
cls.apiclient,
cls.acldata["accountROOTA"],
admin=False,
)
user = cls.generateKeysForUser(cls.apiclient, cls.account_roota)
cls.user_roota_apikey = user.apikey
cls.user_roota_secretkey = user.secretkey
cls.account_root = Account.create(
cls.apiclient,
cls.acldata["accountROOTA"],
admin=True,
)
user = cls.generateKeysForUser(cls.apiclient, cls.account_root)
cls.user_root_apikey = user.apikey
cls.user_root_secretkey = user.secretkey
# create service offering
cls.service_offering = ServiceOffering.create(
cls.apiclient,
cls.acldata["service_offering"]["small"]
)
cls.zone = get_zone(cls.apiclient, cls.testclient.getZoneForTests())
cls.acldata['mode'] = cls.zone.networktype
cls.template = get_template(cls.apiclient, cls.zone.id, cls.acldata["ostype"])
## As admin user , create shared network with scope "all","domain" with subdomain access , "domain" without subdomain access and "account"
cls.apiclient.connection.apiKey = cls.default_apikey
cls.apiclient.connection.securityKey = cls.default_secretkey
list_shared_network_offerings_response = NetworkOffering.list(
cls.apiclient,
name="DefaultSharedNetworkOffering",
displayText="Offering for Shared networks"
)
# Override name parameter so that there is no overlap with names being used in other shared network test suites
cls.acldata["network_all"]["name"] = cls.acldata["network_all"]["name"] + "-impersonation"
cls.acldata["network_domain_with_no_subdomain_access"]["name"] = cls.acldata["network_domain_with_no_subdomain_access"]["name"] + "-impersonation"
cls.acldata["network_domain_with_subdomain_access"]["name"] = cls.acldata["network_domain_with_subdomain_access"]["name"] + "-impersonation"
cls.acldata["network_account"]["name"] = cls.acldata["network_account"]["name"] + "-impersonation"
cls.shared_network_offering_id = list_shared_network_offerings_response[0].id
cls.shared_network_all = Network.create(
cls.apiclient,
cls.acldata["network_all"],
networkofferingid=cls.shared_network_offering_id,
zoneid=cls.zone.id
)
cls.shared_network_domain_d11 = Network.create(
cls.apiclient,
cls.acldata["network_domain_with_no_subdomain_access"],
networkofferingid=cls.shared_network_offering_id,
zoneid=cls.zone.id,
domainid=cls.domain_11.id,
subdomainaccess=False
)
cls.shared_network_domain_with_subdomain_d11 = Network.create(
cls.apiclient,
cls.acldata["network_domain_with_subdomain_access"],
networkofferingid=cls.shared_network_offering_id,
zoneid=cls.zone.id,
domainid=cls.domain_11.id,
subdomainaccess=True
)
cls.shared_network_account_d111a = Network.create(
cls.apiclient,
cls.acldata["network_account"],
networkofferingid=cls.shared_network_offering_id,
zoneid=cls.zone.id,
domainid=cls.domain_111.id,
accountid=cls.account_d111a.user[0].username
)
cls.vmdata = {"name": "test",
"displayname": "test"
}
cls.cleanup = [
cls.account_root,
cls.account_roota,
cls.shared_network_all,
cls.service_offering,
]
except Exception as e:
cls.domain_1.delete(cls.apiclient, cleanup="true")
cls.domain_2.delete(cls.apiclient, cleanup="true")
cleanup_resources(cls.apiclient, cls.cleanup)
raise Exception("Failed to create the setup required to execute the test cases: %s" % e)
@classmethod
def tearDownClass(cls):
cls.apiclient = super(TestSharedNetworkImpersonation, cls).getClsTestClient().getApiClient()
cls.apiclient.connection.apiKey = cls.default_apikey
cls.apiclient.connection.securityKey = cls.default_secretkey
cls.domain_1.delete(cls.apiclient, cleanup="true")
cls.domain_2.delete(cls.apiclient, cleanup="true")
cleanup_resources(cls.apiclient, cls.cleanup)
return
def setUp(cls):
cls.apiclient = cls.testClient.getApiClient()
cls.dbclient = cls.testClient.getDbConnection()
def tearDown(cls):
# restore back default apikey and secretkey
cls.apiclient.connection.apiKey = cls.default_apikey
cls.apiclient.connection.securityKey = cls.default_secretkey
return
## Test cases relating to deploying Virtual Machine as ROOT admin for other users in shared network with scope=all
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_admin_scope_all_domainuser(self):
"""
Valiate that ROOT admin is able to deploy a VM for other users in a shared network with scope=all
"""
# Deploy VM for a user in a domain under ROOT as admin
self.apiclient.connection.apiKey = self.default_apikey
self.apiclient.connection.securityKey = self.default_secretkey
self.vmdata["name"] = self.acldata["vmD1A"]["name"] + "-shared-scope-all-root-admin"
self.vmdata["displayname"] = self.acldata["vmD1A"]["displayname"] + "-shared-scope-all-root-admin"
vm = VirtualMachine.create(
self.apiclient,
self.vmdata,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.shared_network_all.id,
accountid=self.account_d1a.name,
domainid=self.account_d1a.domainid
)
self.assertEqual(vm.state == "Running" and vm.account == self.account_d1a.name and vm.domainid == self.account_d1a.domainid,
True,
"ROOT admin is not able to deploy a VM for other users in a shared network with scope=all")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_admin_scope_all_domainadminuser(self):
"""
Valiate that ROOT admin is able to deploy a VM for a domain admin users in a shared network with scope=all
"""
# Deploy VM for an admin user in a domain under ROOT as admin
self.apiclient.connection.apiKey = self.default_apikey
self.apiclient.connection.securityKey = self.default_secretkey
self.vmdata["name"] = self.acldata["vmD1"]["name"] + "-shared-scope-all-root-admin"
self.vmdata["displayname"] = self.acldata["vmD1"]["displayname"] + "-shared-scope-all-root-admin"
vm = VirtualMachine.create(
self.apiclient,
self.vmdata,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.shared_network_all.id,
accountid=self.account_d1.name,
domainid=self.account_d1.domainid
)
self.assertEqual(vm.state == "Running" and vm.account == self.account_d1.name and vm.domainid == self.account_d1.domainid,
True,
"ROOT admin is not able to deploy a VM for a domain admin users in a shared network with scope=all")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_admin_scope_all_subdomainuser(self):
"""
Valiate that ROOT admin is able to deploy a VM for any user in a subdomain in a shared network with scope=all
"""
# Deploy VM as user in a subdomain under ROOT
self.apiclient.connection.apiKey = self.default_apikey
self.apiclient.connection.securityKey = self.default_secretkey
self.vmdata["name"] = self.acldata["vmD11A"]["name"] + "-shared-scope-all-root-admin"
self.vmdata["displayname"] = self.acldata["vmD11A"]["displayname"] + "-shared-scope-all-root-admin"
vm = VirtualMachine.create(
self.apiclient,
self.vmdata,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.shared_network_all.id,
accountid=self.account_d11a.name,
domainid=self.account_d11a.domainid
)
self.assertEqual(vm.state == "Running" and vm.account == self.account_d11a.name and vm.domainid == self.account_d11a.domainid,
True,
"ROOT admin is not able to deploy a VM for any user in a subdomain in a shared network with scope=all")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_admin_scope_all_subdomainadminuser(self):
"""
Valiate that ROOT admin is able to deploy a VM for admin user in a domain in a shared network with scope=all
"""
# Deploy VM as an admin user in a subdomain under ROOT
self.apiclient.connection.apiKey = self.default_apikey
self.apiclient.connection.securityKey = self.default_secretkey
self.vmdata["name"] = self.acldata["vmD11"]["name"] + "-shared-scope-all-root-admin"
self.vmdata["displayname"] = self.acldata["vmD11"]["displayname"] + "-shared-scope-all-root-admin"
vm = VirtualMachine.create(
self.apiclient,
self.vmdata,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.shared_network_all.id,
accountid=self.account_d11.name,
domainid=self.account_d11.domainid
)
self.assertEqual(vm.state == "Running" and vm.account == self.account_d11.name and vm.domainid == self.account_d11.domainid,
True,
"ROOT admin is not able to deploy a VM for admin user in a domain in a shared network with scope=all")
@attr("simulator_only", tags=["advanced"], required_hardware="false")
def test_deployVM_in_sharedNetwork_as_admin_scope_all_ROOTuser(self):
"""
| |
"""
An example that uses TensorRT's Python api to make inferences.
"""
import ctypes
import os
import shutil
import random
import argparse
import time
import cv2
import numpy as np
import pycuda.autoinit
import pycuda.driver as cuda
import tensorrt as trt
MAX_OUTPUT_BBOX_COUNT = 1000 #must match with 'MAX_OUTPUT_BBOX_COUNT' in the yololayer.h
def get_img_path_batches(batch_size, img_dir):
ret = []
batch = []
cnt_images = 0
for root, dirs, files in os.walk(img_dir):
for name in files:
if name.find('.png')==-1:
continue
if len(batch) == batch_size:
ret.append(batch)
batch = []
batch.append(os.path.join(root, name))
cnt_images += 1
print(f'loaded {cnt_images} images')
if len(batch) > 0:
ret.append(batch)
return ret
def plot_one_box(x, img, color=None, label=None, line_thickness=None):
"""
description: Plots one bounding box on image img,
this function comes from YoLov5 project.
param:
x: a box likes [x1,y1,x2,y2]
img: a opencv image object
color: color to draw rectangle, such as (0,255,0)
label: str
line_thickness: int
return:
no return
"""
tl = (
line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1
) # line/font thickness
color = color or [random.randint(0, 255) for _ in range(3)]
c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
if label:
tf = max(tl - 1, 1) # font thickness
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled
cv2.putText(
img,
label,
(c1[0], c1[1] - 2),
0,
tl / 3,
[225, 255, 255],
thickness=tf,
lineType=cv2.LINE_AA,
)
class YoLov5TRT(object):
"""
description: A YOLOv5 class that warps TensorRT ops, preprocess and postprocess ops.
"""
def __init__(self, engine_file_path):
# Create a Context on this device,
self.ctx = cuda.Device(0).make_context()
self.stream = cuda.Stream()
TRT_LOGGER = trt.Logger(trt.Logger.INFO)
runtime = trt.Runtime(TRT_LOGGER)
# Deserialize the engine from file
with open(engine_file_path, "rb") as f:
self.engine = runtime.deserialize_cuda_engine(f.read())
self.context = self.engine.create_execution_context()
self.batch_max_size = self.engine.max_batch_size
self.host_inputs = []
self.cuda_inputs = []
self.host_outputs = []
self.cuda_outputs = []
self.bindings = []
for binding in self.engine:
print('bingding:', binding, self.engine.get_binding_shape(binding))
size = trt.volume(self.engine.get_binding_shape(binding)) * self.engine.max_batch_size
dtype = trt.nptype(self.engine.get_binding_dtype(binding))
# Allocate host and device buffers
host_mem = cuda.pagelocked_empty(size, dtype)
cuda_mem = cuda.mem_alloc(host_mem.nbytes)
# Append the device buffer to device bindings.
self.bindings.append(int(cuda_mem))
# Append to the appropriate list.
if self.engine.binding_is_input(binding):
self.input_w = self.engine.get_binding_shape(binding)[-1]
self.input_h = self.engine.get_binding_shape(binding)[-2]
self.host_inputs.append(host_mem)
self.cuda_inputs.append(cuda_mem)
else:
self.host_outputs.append(host_mem)
self.cuda_outputs.append(cuda_mem)
def infer(self, raw_image_generator):
start = time.time()
# Make self the active context, pushing it on top of the context stack.
self.ctx.push()
# Do image preprocess
batch_image_raw = []
batch_origin_h = []
batch_origin_w = []
batch_input_image = np.empty(shape=[self.batch_max_size, 3, self.input_h, self.input_w])
batch_size = 0
for i, image_raw in enumerate(raw_image_generator):
input_image, image_raw, origin_h, origin_w = self.preprocess_image(image_raw)
batch_image_raw.append(image_raw)
batch_origin_h.append(origin_h)
batch_origin_w.append(origin_w)
np.copyto(batch_input_image[i], input_image)
batch_size += 1
batch_input_image = np.ascontiguousarray(batch_input_image)
end = time.time()
preproc_time = end - start
start = time.time()
# Copy input image to host buffer
np.copyto(self.host_inputs[0], batch_input_image.ravel())
# Transfer input data to the GPU.
cuda.memcpy_htod_async(self.cuda_inputs[0], self.host_inputs[0], self.stream)
# Run inference.
self.context.execute_async(batch_size=batch_size, bindings=self.bindings, stream_handle=self.stream.handle)
# Transfer predictions back from the GPU.
cuda.memcpy_dtoh_async(self.host_outputs[0], self.cuda_outputs[0], self.stream)
# Synchronize the stream
self.stream.synchronize()
end = time.time()
exec_time = end - start
start = time.time()
# Remove any context from the top of the context stack, deactivating it.
self.ctx.pop()
# Here we use the first row of output in that batch_size = 1
output = self.host_outputs[0]
# Do postprocess
for i in range(batch_size):
result_boxes, result_scores, result_classid = self.post_process(
output[i * (6*MAX_OUTPUT_BBOX_COUNT + 1): (i + 1) * (6*MAX_OUTPUT_BBOX_COUNT + 1)], batch_origin_h[i], batch_origin_w[i]
)
# Draw rectangles and labels on the original image
for j in range(len(result_boxes)):
box = result_boxes[j]
plot_one_box(
box,
batch_image_raw[i],
label="{}:{:.2f}".format(
categories[int(result_classid[j])], result_scores[j]
),
)
end = time.time()
postproc_time = end - start
return batch_image_raw, {'pre':preproc_time*1000, 'exec':exec_time*1000, 'post':postproc_time*1000}
def destroy(self):
# Remove any context from the top of the context stack, deactivating it.
self.ctx.pop()
def get_raw_image(self, image_path_batch):
"""
description: Read an image from image path
"""
for img_path in image_path_batch:
yield cv2.imread(img_path)
def get_raw_image_zeros(self, image_path_batch=None):
"""
description: Ready data for warmup
"""
for _ in range(self.batch_max_size):
yield np.zeros([self.input_h, self.input_w, 3], dtype=np.uint8)
def preprocess_image(self, image_raw):
"""
description: Convert BGR image to RGB,
resize and pad it to target size, normalize to [0,1],
transform to NCHW format.
param:
input_image_path: str, image path
return:
image: the processed image
image_raw: the original image
h: original height
w: original width
"""
h, w, c = image_raw.shape
assert h==self.input_h
assert w==self.input_w
image = image_raw.astype(np.float32)
# Normalize to [0,1]
image /= 255.0
# HWC to CHW format:
image = np.transpose(image, [2, 0, 1])
#BGR to RGB
image = image[::-1]
# CHW to NCHW format
image = np.expand_dims(image, axis=0)
# Convert the image to row-major order, also known as "C order":
image = np.ascontiguousarray(image)
return image, image_raw, h, w
def xywh2xyxy(self, origin_h, origin_w, x):
"""
description: Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
param:
origin_h: height of original image
origin_w: width of original image
x: A boxes numpy, each row is a box [center_x, center_y, w, h]
return:
y: A boxes numpy, each row is a box [x1, y1, x2, y2]
"""
y = np.zeros_like(x)
r_w = self.input_w / origin_w
r_h = self.input_h / origin_h
if r_h > r_w:
y[:, 0] = x[:, 0] - x[:, 2] / 2
y[:, 2] = x[:, 0] + x[:, 2] / 2
y[:, 1] = x[:, 1] - x[:, 3] / 2 - (self.input_h - r_w * origin_h) / 2
y[:, 3] = x[:, 1] + x[:, 3] / 2 - (self.input_h - r_w * origin_h) / 2
y /= r_w
else:
y[:, 0] = x[:, 0] - x[:, 2] / 2 - (self.input_w - r_h * origin_w) / 2
y[:, 2] = x[:, 0] + x[:, 2] / 2 - (self.input_w - r_h * origin_w) / 2
y[:, 1] = x[:, 1] - x[:, 3] / 2
y[:, 3] = x[:, 1] + x[:, 3] / 2
y /= r_h
return y
def post_process(self, output, origin_h, origin_w):
"""
description: postprocess the prediction
param:
output: A numpy likes [num_boxes,cx,cy,w,h,conf,cls_id, cx,cy,w,h,conf,cls_id, ...]
origin_h: height of original image
origin_w: width of original image
return:
result_boxes: finally boxes, a boxes numpy, each row is a box [x1, y1, x2, y2]
result_scores: finally scores, a numpy, each element is the score correspoing to box
result_classid: finally classid, a numpy, each element is the classid correspoing to box
"""
# Get the num of boxes detected
num = int(output[0])
# Reshape to a two dimentional ndarray
pred = np.reshape(output[1:], (-1, 6))[:num, :]
# Do nms
boxes = self.non_max_suppression(pred, origin_h, origin_w, conf_thres=CONF_THRESH, nms_thres=IOU_THRESHOLD)
result_boxes = boxes[:, :4] if len(boxes) else np.array([])
result_scores = boxes[:, 4] if len(boxes) else np.array([])
result_classid = boxes[:, 5] if len(boxes) else np.array([])
return result_boxes, result_scores, result_classid
def bbox_iou(self, box1, box2, x1y1x2y2=True):
"""
description: compute the IoU of two bounding boxes
param:
box1: A box coordinate (can be (x1, y1, x2, y2) or (x, y, w, h))
box2: A box coordinate (can be (x1, y1, x2, y2) or (x, y, w, h))
x1y1x2y2: select the coordinate format
return:
iou: computed iou
"""
if not x1y1x2y2:
# Transform from center and width to exact coordinates
b1_x1, b1_x2 = box1[:, 0] - box1[:, 2] / 2, box1[:, 0] + box1[:, 2] / 2
b1_y1, b1_y2 = box1[:, 1] - box1[:, 3] / 2, box1[:, 1] + box1[:, 3] / 2
b2_x1, b2_x2 = box2[:, 0] - box2[:, 2] / 2, box2[:, 0] + box2[:, 2] / 2
b2_y1, b2_y2 = box2[:, 1] - box2[:, 3] / 2, box2[:, 1] + box2[:, 3] / 2
else:
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[:, 0], box1[:, 1], box1[:, 2], box1[:, 3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[:, 0], box2[:, 1], box2[:, 2], box2[:, 3]
# Get the coordinates of the intersection rectangle
inter_rect_x1 = np.maximum(b1_x1, b2_x1)
inter_rect_y1 = np.maximum(b1_y1, b2_y1)
inter_rect_x2 = np.minimum(b1_x2, b2_x2)
inter_rect_y2 = np.minimum(b1_y2, b2_y2)
# Intersection area
inter_area = np.clip(inter_rect_x2 - inter_rect_x1 + 1, | |
import torch
from torch.autograd import Variable
import torch.nn.functional as F
from torch.utils import data
from torch.utils.data import SequentialSampler
from torch import nn
from tqdm import tqdm
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from time import time
from sklearn.metrics import mean_squared_error, roc_auc_score, average_precision_score, f1_score
from lifelines.utils import concordance_index
from scipy.stats import pearsonr
import pickle
torch.manual_seed(2) # reproducible torch:2 np:3
np.random.seed(3)
import copy
from prettytable import PrettyTable
import scikitplot as skplt
import os
from DeepPurpose.utils import *
from DeepPurpose.model_helper import Encoder_MultipleLayers, Embeddings
from DeepPurpose.models import transformer, CNN, CNN_RNN, MLP, MPNN
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class Classifier(nn.Sequential):
def __init__(self, model_drug, **config):
super(Classifier, self).__init__()
self.input_dim_drug = config['hidden_dim_drug']
self.model_drug = model_drug
self.dropout = nn.Dropout(0.1)
self.hidden_dims = config['cls_hidden_dims']
layer_size = len(self.hidden_dims) + 1
dims = [self.input_dim_drug] + self.hidden_dims + [1]
self.predictor = nn.ModuleList([nn.Linear(dims[i], dims[i+1]) for i in range(layer_size)])
def forward(self, v_D):
# each encoding
v_f = self.model_drug(v_D)
# concatenate and classify
for i, l in enumerate(self.predictor):
if i==(len(self.predictor)-1):
v_f = l(v_f)
else:
v_f = F.relu(self.dropout(l(v_f)))
return v_f
def model_initialize(**config):
model = Property_Prediction(**config)
return model
def model_pretrained(path_dir = None, model = None):
if model is not None:
path_dir = download_pretrained_model(model)
config = load_dict(path_dir)
model = Property_Prediction(**config)
model.load_pretrained(path_dir + '/model.pt')
return model
def repurpose(X_repurpose, model, drug_names = None,
result_folder = "./result/", convert_y = False, output_num_max = 10, verbose = True):
# X_repurpose: a list of SMILES string
fo = os.path.join(result_folder, "repurposing.txt")
print_list = []
with open(fo, 'w') as fout:
print('repurposing...')
df_data, _, _ = data_process(X_repurpose, drug_encoding = model.drug_encoding, split_method='repurposing_VS')
y_pred = model.predict(df_data)
if convert_y:
y_pred = convert_y_unit(np.array(y_pred), 'p', 'nM')
print('---------------')
if verbose:
print('Drug Repurposing Result')
if model.binary:
table_header = ["Rank", "Drug Name", "Interaction", "Probability"]
else:
### regression
table_header = ["Rank", "Drug Name", "Binding Score"]
table = PrettyTable(table_header)
if drug_names is not None:
f_d = max([len(o) for o in drug_names]) + 1
for i in range(len(X_repurpose)):
if model.binary:
if y_pred[i] > 0.5:
string_lst = [drug_names[i], "YES", "{0:.2f}".format(y_pred[i])]
else:
string_lst = [drug_names[i], "NO", "{0:.2f}".format(y_pred[i])]
else:
#### regression
#### Rank, Drug Name, Target Name, binding score
string_lst = [drug_names[i], "{0:.2f}".format(y_pred[i])]
string = 'Drug ' + '{:<{f_d}}'.format(drug_names[i], f_d =f_d) + \
' predicted to have binding affinity score ' + "{0:.2f}".format(y_pred[i])
#print_list.append((string, y_pred[i]))
print_list.append((string_lst, y_pred[i]))
if convert_y:
print_list.sort(key = lambda x:x[1])
else:
print_list.sort(key = lambda x:x[1], reverse = True)
print_list = [i[0] for i in print_list]
for idx, lst in enumerate(print_list):
lst = [str(idx + 1)] + lst
table.add_row(lst)
fout.write(table.get_string())
if verbose:
with open(fo, 'r') as fin:
lines = fin.readlines()
for idx, line in enumerate(lines):
if idx < 13:
print(line, end = '')
else:
print('checkout ' + fo + ' for the whole list')
break
return y_pred
def mpnn_feature_collate_func(x):
## first version
return [torch.cat([x[j][i] for j in range(len(x))], 0) for i in range(len(x[0]))]
def mpnn_collate_func(x):
#print("len(x) is ", len(x)) ## batch_size == 128
#print("len(x[0]) is ", len(x[0])) ## 3--- data_process_loader_Property_Prediction.__getitem__
#print("len(x[1]) is ", len(x[1])) ## 3--- data_process_loader_Property_Prediction.__getitem__
#print("len(x[2]) is ", len(x[2])) ## 3--- data_process_loader_Property_Prediction.__getitem__
mpnn_feature = [i[0] for i in x]
#print("len(mpnn_feature)", len(mpnn_feature), "len(mpnn_feature[0])", len(mpnn_feature[0]))
mpnn_feature = mpnn_feature_collate_func(mpnn_feature)
from torch.utils.data.dataloader import default_collate
x_remain = [[i[1]] for i in x]
x_remain_collated = default_collate(x_remain)
return [mpnn_feature] + x_remain_collated
## used in dataloader
class Property_Prediction:
'''
Drug Property Prediction
'''
def __init__(self, **config):
drug_encoding = config['drug_encoding']
if drug_encoding == 'Morgan' or drug_encoding=='Pubchem' or drug_encoding=='Daylight' or drug_encoding=='rdkit_2d_normalized':
# Future TODO: support multiple encoding scheme for static input
self.model_drug = MLP(config['input_dim_drug'], config['hidden_dim_drug'], config['mlp_hidden_dims_drug'])
elif drug_encoding == 'CNN':
self.model_drug = CNN('drug', **config)
elif drug_encoding == 'CNN_RNN':
self.model_drug = CNN_RNN('drug', **config)
elif drug_encoding == 'Transformer':
self.model_drug = transformer('drug', **config)
elif drug_encoding == 'MPNN':
self.model_drug = MPNN(config['hidden_dim_drug'], config['mpnn_depth'])
else:
raise AttributeError('Please use one of the available encoding method.')
self.model = Classifier(self.model_drug, **config)
self.config = config
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.drug_encoding = drug_encoding
self.result_folder = config['result_folder']
if not os.path.exists(self.result_folder):
os.mkdir(self.result_folder)
self.binary = False
if 'num_workers' not in self.config.keys():
self.config['num_workers'] = 0
if 'decay' not in self.config.keys():
self.config['decay'] = 0
def test_(self, data_generator, model, repurposing_mode = False, test = False, verbose = True):
y_pred = []
y_label = []
model.eval()
for i, (v_d, label) in enumerate(data_generator):
if self.drug_encoding == "MPNN" or self.drug_encoding == 'Transformer':
v_d = v_d
else:
v_d = v_d.float().to(self.device)
score = self.model(v_d)
if self.binary:
m = torch.nn.Sigmoid()
logits = torch.squeeze(m(score)).detach().cpu().numpy()
else:
logits = torch.squeeze(score).detach().cpu().numpy()
label_ids = label.to('cpu').numpy()
y_label = y_label + label_ids.flatten().tolist()
y_pred = y_pred + logits.flatten().tolist()
outputs = np.asarray([1 if i else 0 for i in (np.asarray(y_pred) >= 0.5)])
model.train()
if self.binary:
if repurposing_mode:
return y_pred
## ROC-AUC curve
if test:
if verbose:
roc_auc_file = os.path.join(self.result_folder, "roc-auc.jpg")
plt.figure(0)
roc_curve(y_pred, y_label, roc_auc_file, self.drug_encoding)
plt.figure(1)
pr_auc_file = os.path.join(self.result_folder, "pr-auc.jpg")
prauc_curve(y_pred, y_label, pr_auc_file, self.drug_encoding)
return roc_auc_score(y_label, y_pred), average_precision_score(y_label, y_pred), f1_score(y_label, outputs), y_pred
else:
if repurposing_mode:
return y_pred
return mean_squared_error(y_label, y_pred), \
pearsonr(y_label, y_pred)[0], \
pearsonr(y_label, y_pred)[1], \
concordance_index(y_label, y_pred), y_pred
def train(self, train, val, test = None, verbose = True):
if len(train.Label.unique()) == 2:
self.binary = True
self.config['binary'] = True
lr = self.config['LR']
decay = self.config['decay']
BATCH_SIZE = self.config['batch_size']
train_epoch = self.config['train_epoch']
if 'test_every_X_epoch' in self.config.keys():
test_every_X_epoch = self.config['test_every_X_epoch']
else:
test_every_X_epoch = 40
loss_history = []
self.model = self.model.to(self.device)
# support multiple GPUs
if torch.cuda.device_count() > 1:
if verbose:
print("Let's use " + str(torch.cuda.device_count()) + " GPUs!")
self.model = nn.DataParallel(self.model, dim = 0)
elif torch.cuda.device_count() == 1:
if verbose:
print("Let's use " + str(torch.cuda.device_count()) + " GPU!")
else:
if verbose:
print("Let's use CPU/s!")
# Future TODO: support multiple optimizers with parameters
opt = torch.optim.Adam(self.model.parameters(), lr = lr, weight_decay = decay)
if verbose:
print('--- Data Preparation ---')
params = {'batch_size': BATCH_SIZE,
'shuffle': True,
'num_workers': self.config['num_workers'],
'drop_last': False}
if (self.drug_encoding == "MPNN"):
params['collate_fn'] = mpnn_collate_func
training_generator = data.DataLoader(data_process_loader_Property_Prediction(train.index.values,
train.Label.values,
train, **self.config),
**params)
validation_generator = data.DataLoader(data_process_loader_Property_Prediction(val.index.values,
val.Label.values,
val, **self.config),
**params)
if test is not None:
info = data_process_loader_Property_Prediction(test.index.values, test.Label.values, test, **self.config)
params_test = {'batch_size': BATCH_SIZE,
'shuffle': False,
'num_workers': self.config['num_workers'],
'drop_last': False,
'sampler':SequentialSampler(info)}
if (self.drug_encoding == "MPNN"):
params_test['collate_fn'] = mpnn_collate_func
testing_generator = data.DataLoader(data_process_loader_Property_Prediction(test.index.values, test.Label.values, test, **self.config), **params_test)
# early stopping
if self.binary:
max_auc = 0
else:
max_MSE = 10000
model_max = copy.deepcopy(self.model)
valid_metric_record = []
valid_metric_header = ["# epoch"]
if self.binary:
valid_metric_header.extend(["AUROC", "AUPRC", "F1"])
else:
valid_metric_header.extend(["MSE", "Pearson Correlation", "with p-value", "Concordance Index"])
table = PrettyTable(valid_metric_header)
float2str = lambda x:'%0.4f'%x
if verbose:
print('--- Go for Training ---')
t_start = time()
for epo in range(train_epoch):
for i, (v_d, label) in enumerate(training_generator):
if self.drug_encoding == "MPNN" or self.drug_encoding == 'Transformer':
v_d = v_d
else:
v_d = v_d.float().to(self.device)
#score = self.model(v_d, v_p.float().to(self.device))
score = self.model(v_d)
label = Variable(torch.from_numpy(np.array(label)).float()).to(self.device)
if self.binary:
loss_fct = torch.nn.BCELoss()
m = torch.nn.Sigmoid()
n = torch.squeeze(m(score), 1)
loss = loss_fct(n, label)
else:
loss_fct = torch.nn.MSELoss()
n = torch.squeeze(score, 1)
loss = loss_fct(n, label)
loss_history.append(loss.item())
opt.zero_grad()
loss.backward()
opt.step()
if verbose:
if (i % 100 == 0):
t_now = time()
if verbose:
print('Training at Epoch ' + str(epo + 1) + ' iteration ' + str(i) + \
' with loss ' + str(loss.cpu().detach().numpy())[:7] +\
". Total time " + str(int(t_now - t_start)/3600)[:7] + " hours")
### record total run time
##### validate, select the best model up to now
with torch.set_grad_enabled(False):
if self.binary:
## binary: ROC-AUC, PR-AUC, F1
auc, auprc, f1, logits = self.test_(validation_generator, self.model)
lst = ["epoch " + str(epo)] + list(map(float2str,[auc, auprc, f1]))
valid_metric_record.append(lst)
if auc > max_auc:
model_max = copy.deepcopy(self.model)
max_auc = auc
if verbose:
print('Validation at Epoch '+ str(epo + 1) + ' , AUROC: ' + str(auc)[:7] + \
' , AUPRC: ' + str(auprc)[:7] + ' , F1: '+str(f1)[:7])
else:
### regression: MSE, Pearson Correlation, with p-value, Concordance Index
mse, r2, p_val, CI, logits = self.test_(validation_generator, self.model)
lst = ["epoch " + str(epo)] + list(map(float2str,[mse, r2, p_val, CI]))
valid_metric_record.append(lst)
if mse < max_MSE:
model_max = copy.deepcopy(self.model)
max_MSE = mse
if verbose:
print('Validation at Epoch '+ str(epo + 1) + ' , MSE: ' + str(mse)[:7] + ' , Pearson Correlation: '\
+ str(r2)[:7] + ' with p-value: ' + str(p_val)[:7] +' , Concordance Index: '+str(CI)[:7])
table.add_row(lst)
#### after training
prettytable_file = os.path.join(self.result_folder, "valid_markdowntable.txt")
with open(prettytable_file, 'w') as fp:
fp.write(table.get_string())
# load early stopped model
self.model = model_max
if test is not None:
if verbose:
print('--- Go for Testing ---')
if self.binary:
auc, auprc, f1, logits = self.test_(testing_generator, model_max, test = True, verbose = verbose)
test_table = PrettyTable(["AUROC", "AUPRC", "F1"])
test_table.add_row(list(map(float2str, [auc, auprc, f1])))
if verbose:
print('Testing AUROC: ' + str(auc) + ' , AUPRC: ' + str(auprc) + ' , F1: '+str(f1))
else:
mse, r2, p_val, CI, logits = self.test_(testing_generator, model_max, test = True, verbose = verbose)
test_table = PrettyTable(["MSE", "Pearson Correlation", "with p-value", "Concordance Index"])
test_table.add_row(list(map(float2str, [mse, r2, p_val, CI])))
if verbose:
print('Testing MSE: ' + str(mse) + ' , Pearson Correlation: ' + str(r2)
+ ' with p-value: ' + str(p_val) +' , Concordance Index: '+str(CI))
np.save(os.path.join(self.result_folder, str(self.drug_encoding)
+ '_logits.npy'), np.array(logits))
######### learning record ###########
### 1. test results
prettytable_file = os.path.join(self.result_folder, "test_markdowntable.txt")
with open(prettytable_file, 'w') as fp:
fp.write(test_table.get_string())
if verbose:
### 2. learning curve
fontsize = 16
iter_num = list(range(1,len(loss_history)+1))
plt.figure(3)
plt.plot(iter_num, loss_history, "bo-")
plt.xlabel("iteration", fontsize = fontsize)
plt.ylabel("loss value", fontsize = fontsize)
pkl_file = os.path.join(self.result_folder, "loss_curve_iter.pkl")
with open(pkl_file, 'wb') as pck:
pickle.dump(loss_history, pck)
fig_file = os.path.join(self.result_folder, "loss_curve.png")
plt.savefig(fig_file)
if verbose:
print('--- Training Finished ---')
def predict(self, df_data, verbose = True):
'''
utils.data_process_repurpose_virtual_screening
pd.DataFrame
'''
if verbose:
print('predicting...')
info = data_process_loader_Property_Prediction(df_data.index.values, df_data.Label.values, df_data, **self.config)
self.model.to(device)
params = {'batch_size': self.config['batch_size'],
'shuffle': False,
'num_workers': self.config['num_workers'],
'drop_last': False,
'sampler':SequentialSampler(info)}
if (self.drug_encoding == "MPNN"):
params['collate_fn'] = mpnn_collate_func
generator = data.DataLoader(info, **params)
score = self.test_(generator, self.model, repurposing_mode = True)
# set repurposong mode to true, will return only the scores.
return score
def save_model(self, path_dir):
if not os.path.exists(path_dir):
os.makedirs(path_dir)
torch.save(self.model.state_dict(), path_dir + '/model.pt')
save_dict(path_dir, self.config)
def load_pretrained(self, path):
if not os.path.exists(path):
os.makedirs(path)
if self.device == 'cuda':
state_dict = torch.load(path)
else:
state_dict = torch.load(path, map_location = torch.device('cpu'))
# to support training from multi-gpus data-parallel:
if next(iter(state_dict))[:7] == 'module.':
# the pretrained model is from data-parallel module
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k[7:] # remove `module.`
new_state_dict[name] = v
state_dict | |
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_505(self):
inp = '''10000'''
fmt = '''(B10.1)'''
result = [16]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_506(self):
inp = '''-10000'''
fmt = '''(B10.1)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_507(self):
inp = '''100000'''
fmt = '''(B10.1)'''
result = [32]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_508(self):
inp = '''-100000'''
fmt = '''(B10.1)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_509(self):
inp = '''10 0 00 10 0 0 1'''
fmt = '''(B10.1)'''
result = [66]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_510(self):
inp = '''10101000'''
fmt = '''(B10.1)'''
result = [168]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_511(self):
inp = '''0'''
fmt = '''(B2.2)'''
result = [0]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_512(self):
inp = '''-0'''
fmt = '''(B2.2)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_513(self):
inp = '''1'''
fmt = '''(B2.2)'''
result = [1]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_514(self):
inp = '''-1'''
fmt = '''(B2.2)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_515(self):
inp = '''2'''
fmt = '''(B2.2)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_516(self):
inp = '''10'''
fmt = '''(B2.2)'''
result = [2]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_517(self):
inp = '''-10'''
fmt = '''(B2.2)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_518(self):
inp = '''100'''
fmt = '''(B2.2)'''
result = [2]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_519(self):
inp = '''-100'''
fmt = '''(B2.2)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_520(self):
inp = '''1000'''
fmt = '''(B2.2)'''
result = [2]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_521(self):
inp = '''-1000'''
fmt = '''(B2.2)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_522(self):
inp = '''10000'''
fmt = '''(B2.2)'''
result = [2]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_523(self):
inp = '''-10000'''
fmt = '''(B2.2)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_524(self):
inp = '''100000'''
fmt = '''(B2.2)'''
result = [2]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_525(self):
inp = '''-100000'''
fmt = '''(B2.2)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_526(self):
inp = '''10 0 00 10 0 0 1'''
fmt = '''(B2.2)'''
result = [2]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_527(self):
inp = '''10101000'''
fmt = '''(B2.2)'''
result = [2]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_528(self):
inp = '''0'''
fmt = '''(B3.2)'''
result = [0]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_529(self):
inp = '''-0'''
fmt = '''(B3.2)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_530(self):
inp = '''1'''
fmt = '''(B3.2)'''
result = [1]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_531(self):
inp = '''-1'''
fmt = '''(B3.2)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_532(self):
inp = '''2'''
fmt = '''(B3.2)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_533(self):
inp = '''10'''
fmt = '''(B3.2)'''
result = [2]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_534(self):
inp = '''-10'''
fmt = '''(B3.2)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_535(self):
inp = '''100'''
fmt = '''(B3.2)'''
result = [4]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_536(self):
inp = '''-100'''
fmt = '''(B3.2)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_537(self):
inp = '''1000'''
fmt = '''(B3.2)'''
result = [4]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_538(self):
inp = '''-1000'''
fmt = '''(B3.2)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_539(self):
inp = '''10000'''
fmt = '''(B3.2)'''
result = [4]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_540(self):
inp = '''-10000'''
fmt = '''(B3.2)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_541(self):
inp = '''100000'''
fmt = '''(B3.2)'''
result = [4]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_542(self):
inp = '''-100000'''
fmt = '''(B3.2)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_543(self):
inp = '''10 0 00 10 0 0 1'''
fmt = '''(B3.2)'''
result = [2]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_544(self):
inp = '''10101000'''
fmt = '''(B3.2)'''
result = [5]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_545(self):
inp = '''0'''
fmt = '''(B4.2)'''
result = [0]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_546(self):
inp = '''-0'''
fmt = '''(B4.2)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_547(self):
inp = '''1'''
fmt = '''(B4.2)'''
result = [1]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_548(self):
inp = '''-1'''
fmt = '''(B4.2)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_549(self):
inp = '''2'''
fmt = '''(B4.2)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_550(self):
inp = '''10'''
fmt = '''(B4.2)'''
result = [2]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_551(self):
inp = '''-10'''
fmt = '''(B4.2)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_552(self):
inp = '''100'''
fmt = '''(B4.2)'''
result = [4]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_553(self):
inp = '''-100'''
fmt = '''(B4.2)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_554(self):
inp = '''1000'''
fmt = '''(B4.2)'''
result = [8]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_555(self):
inp = '''-1000'''
fmt = '''(B4.2)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_556(self):
inp = '''10000'''
fmt = '''(B4.2)'''
result = [8]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_557(self):
inp = '''-10000'''
fmt = '''(B4.2)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_558(self):
inp = '''100000'''
fmt = '''(B4.2)'''
result = [8]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_559(self):
inp = '''-100000'''
fmt = '''(B4.2)'''
result = ['''ERR''']
eds, rev_eds = _parser(_lexer(fmt))
self.assertRaises(ValueError, _input, eds, rev_eds, inp)
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='B')
def test_b_ed_input_560(self):
inp = '''10 0 00 10 0 0 1'''
fmt = '''(B4.2)'''
| |
<reponame>samcaf/JetMonteCarlo
import random
import numpy as np
# Local utils:
# Utilities for parton showering
from jetmontecarlo.utils.vector_utils import *
from jetmontecarlo.utils.montecarlo_utils import *
from jetmontecarlo.utils.partonshower_utils import *
from jetmontecarlo.analytics.QCD_utils import *
# Utilities for analytic expressions
from jetmontecarlo.analytics.radiators import *
from jetmontecarlo.tests.partonshower_tests.test_partonshower_angularities import *
########################################
# Accept-Reject based angular showers:
########################################
def angularity_split_rej(ang_init, beta, jet_type, test_num):
"""A set of splitting methods designed to test
acceptance-rejection sampling in the context of
parton showers.
"""
alpha = alpha_fixed
accept_emission = False
while not accept_emission:
if test_num == -1:
# Control: Larkoski's MLL algorithm.
# ---------------------------------------------
alpha = 1.
r1, r2 = random.random(), random.random()
ang_final = np.exp(-np.sqrt(np.log(2.*ang_init)**2.
- np.pi*beta/(CR(jet_type)*alpha)
* np.log(r1))) / 2.
z = (2.*ang_final)**r2 / 2.
theta = (2.*ang_final)**((1-r2) / beta)
# In the MLL case, we use the acceptance-rejection
# method to generate events associated with running coupling.
# Notice that alpha is 1. for the MLL case. This is always
# larger than alpha(freezing scale = 1 GeV) ~ 0.35
cut = alpha_s(z, theta) / alpha
# Note: there is a typo in Eqn 5.4 of the resource I
# cite above the angularity_split method which is
# important here (a stray factor of 2 on the RHS),
# but the conclusion in Equation 5.5, which we use
# here, is correct
if random.random() < cut:
accept_emission = True
# These next two lines of code are the soul of the veto
# algorithm. Rather than generating from scratch, as you
# would for the von Neumann acceptance-rejection algorithm,
# you use this scale as the scale for the next emission.
# This correctly takes into account the exponentiation of
# multiple emissions, as described in the Pythia manual:
# https://arxiv.org/pdf/hep-ph/0603175.pdf#page=66&zoom=150,0,240
else:
ang_init = ang_final
if test_num == 0:
# Control: This is precisely the LL algorithm.
# ---------------------------------------------
ang_final = np.exp(-np.sqrt(np.log(2.*ang_init)**2.
- np.pi*beta/(CR(jet_type)*alpha)
* np.log(random.random())
)) / 2.
r = random.random()
z = (2.*ang_final)**r / 2.
theta = (2.*ang_final)**((1-r) / beta)
accept_emission = True
if test_num == 1:
# Accept-reject algorithm on the z and theta
# of the splitting.
# Successful in reproducing LL results.
# ---------------------------------------------
ang_final = np.exp(-np.sqrt(np.log(2.*ang_init)**2.
- np.pi*beta/(CR(jet_type)*alpha)
* np.log(random.random())
)) / 2.
thmin = (2.*ang_final)**(1./beta)
theta = random.random()*(1 - thmin) + thmin
z = ang_final / theta**beta
r = random.random()
if r < (1.-thmin)/theta**(beta+1.):
accept_emission = True
if test_num == 2:
# Accept-reject algorithm on final angularity.
# sampling final angularity in lin space.
# Attempts to reproduce LL results.
# This fails by small corrections --
# I believe this is because the pdf_max defined below,
# which is essential in acceptance-rejection sampling,
# is way too small. I think the correct pdf_max
# is actually closer to 10^10 or so.
# ---------------------------------------------
# 1) Generating a final angularity less than the
# given initial value:
ang_final = getLinSample(0, ang_init)
# 2) Defining analytic expressions:
# A useful factor in shortening the LL pdf expression:
f_LL = CR(jet_type)*alpha/(np.pi*beta)
# Analytic expression for the LL cdf and pdf
cdf = np.exp(-f_LL*(np.log(2.*ang_final)**2.
- np.log(2.*ang_init)**2.))
pdf = -2.*f_LL*np.log(2.*ang_final) * cdf / ang_final
pdf_max = 100.
# 3) Implementing the accept-reject algorithm
if random.random() < pdf / pdf_max:
r = random.random()
z = (2.*ang_final)**r / 2.
theta = (2.*ang_final)**((1-r) / beta)
accept_emission = True
if test_num == 3:
# Accept-reject algorithm on final angularity,
# sampling final angularity in log space.
# Successful in reproducing LL results, and
# overccomes the weaknesses of the previous
# algorithm (test_num=2) by going into log space.
# ---------------------------------------------
# 1) Generating a final angularity less than the
# given initial value:
ang_final = getLogSample(0, ang_init)
# 2) Defining analytic expressions:
# A useful factor in shortening the LL pdf expression:
f_LL = CR(jet_type)*alpha/(np.pi*beta)
# Analytic expression for the LL cdf and pdf
cdf = np.exp(-f_LL*(np.log(2.*ang_final)**2.
- np.log(2.*ang_init)**2.))
pdf = -2.*f_LL*np.log(2.*ang_final) * cdf
pdf_max = 10.
# 3) Implementing the accept-reject algorithm
if random.random() < pdf / pdf_max:
r = random.random()
z = (2.*ang_final)**r / 2.
theta = (2.*ang_final)**((1-r) / beta)
accept_emission = True
if test_num == 4:
# Accept-reject algorithm on final angularity,
# sampling final angularity in log space.
# Attempts to reproduce results with runnning
# coupling, but only the singular pieces of
# splitting functions.
# ---------------------------------------------
# 1) Generating a final angularity less than the
# given initial value:
ang_final = getLogSample(0, ang_init,
epsilon=5e-5)
# 2) Analytic expressions for the MLL pdf,
# normalizing by the CDF at ang_init
rad_f, radprime_f = ang_rad_radprime_MLL(ang_final,
beta=beta,
jet_type=jet_type)
rad_i, _ = ang_rad_radprime_MLL(ang_init,
beta=beta,
jet_type=jet_type)
pdf = (-radprime_f * ang_final
* np.exp(-rad_f)/np.exp(-rad_i)
)
# print("pdf: " + str(pdf))
pdf_max = 3.
# 3) Implementing the accept-reject algorithm
if random.random() < pdf / pdf_max:
r = random.random()
z = (2.*ang_final)**r / 2.
theta = (2.*ang_final)**((1-r) / beta)
accept_emission = True
if test_num == 5:
# Accept-reject algorithm on final angularity,
# sampling final angularity by inverse transform.
# Attempts to reproduce results with runnning
# coupling, but only the singular pieces of
# splitting functions.
# ---------------------------------------------
# 1) Generating a final angularity using the inverse
# transfom method at LL
ang_final = np.exp(-np.sqrt(np.log(2.*ang_init)**2.
- np.pi*beta/(CR(jet_type)*alpha)
* np.log(random.random())
)) / 2.
# 2) Analytic expressions for the MLL pdf,
# normalizing by the CDF at ang_init
rad_f, radprime_f = ang_rad_radprime_MLL(ang_final,
beta=beta,
jet_type=jet_type)
rad_i, _ = ang_rad_radprime_MLL(ang_init,
beta=beta,
jet_type=jet_type)
pdf = (-radprime_f * np.exp(-rad_f)/np.exp(-rad_i))
# Analytic expression for the LL cdf and pdf
f_LL = CR(jet_type)*alpha/(np.pi*beta)
cdf_LL = np.exp(-f_LL*(np.log(2.*ang_final)**2.
- np.log(2.*ang_init)**2.))
pdf_LL = -2.*f_LL*np.log(2.*ang_final) * cdf_LL / ang_final
pdf_ratio_max = 3.
# 3) Implementing the accept-reject algorithm
pdf_ratio = pdf / pdf_LL
if random.random() < pdf_ratio / (pdf_ratio_max):
r = random.random()
z = (2.*ang_final)**r / 2.
theta = (2.*ang_final)**((1-r) / beta)
accept_emission = True
if test_num == 6:
# Accept-reject algorithm on final angularity,
# sampling final angularity by inverse transform.
# Attempts to reproduce results with runnning
# coupling, but only the singular pieces of
# splitting functions.
# Afterwards, samples z and theta using accept-reject
# sampling as well, to take running coupling effects
# into account.
# ---------------------------------------------
# 1) Generating a final angularity using the inverse
# transfom method at LL
ang_final = np.exp(-np.sqrt(np.log(2.*ang_init)**2.
- np.pi*beta/(CR(jet_type)*alpha)
* np.log(random.random())
)) / 2.
# 2) Analytic expressions for the MLL pdf,
# normalizing by the CDF at ang_init
rad_f, radprime_f = ang_rad_radprime_MLL(ang_final,
beta=beta,
jet_type=jet_type)
rad_i, _ = ang_rad_radprime_MLL(ang_init,
beta=beta,
jet_type=jet_type)
pdf = (-radprime_f * np.exp(-rad_f)/np.exp(-rad_i))
# Analytic expression for the LL cdf and pdf
f_LL = CR(jet_type)*alpha/(np.pi*beta)
cdf_LL = np.exp(-f_LL*(np.log(2.*ang_final)**2.
- np.log(2.*ang_init)**2.))
pdf_LL = -2.*f_LL*np.log(2.*ang_final) * cdf_LL / ang_final
pdf_ratio_max = 3.
# 3) Implementing the accept-reject algorithm
pdf_ratio = pdf / pdf_LL
if random.random() < pdf_ratio / (pdf_ratio_max):
# Once we have a suitable final angularity,
# accept-reject sampling z and theta:
accept_ztheta = False
while not accept_ztheta:
r = random.random()
# Logarithmically sampling z and theta
z = (2.*ang_final)**r / 2.
theta = (2.*ang_final)**((1-r) / beta)
if random.random() < alpha_s(z, theta)/alpha_s(0, 0):
accept_ztheta = True
accept_emission = True
if test_num == 7:
# Accept-reject algorithm on final angularity,
# sampling final angularity by inverse transform.
# Attempts to reproduce results with runnning
# coupling, but only the singular pieces of
# splitting functions.
# Afterwards, samples z and theta using accept-reject
# sampling as well, to take running coupling effects
# into account.
# ---------------------------------------------
# 1) Generating a final angularity using the inverse
# transfom method at LL
ang_final = np.exp(-np.sqrt(np.log(2.*ang_init)**2.
- np.pi*beta/(CR(jet_type)*alpha)
* np.log(random.random())
)) / 2.
# 2) Analytic expressions for the MLL pdf,
# normalizing by the CDF at ang_init
rad_f, radprime_f = ang_rad_radprime_MLL(ang_final,
beta=beta,
jet_type=jet_type)
rad_i, _ = ang_rad_radprime_MLL(ang_init,
beta=beta,
jet_type=jet_type)
pdf = (-radprime_f * np.exp(-rad_f)/np.exp(-rad_i))
# Analytic expression for the LL cdf and pdf
f_LL = CR(jet_type)*alpha/(np.pi*beta)
cdf_LL = np.exp(-f_LL*(np.log(2.*ang_final)**2.
- np.log(2.*ang_init)**2.))
pdf_LL = -2.*f_LL*np.log(2.*ang_final) * cdf_LL / ang_final
pdf_ratio_max = 3.
# 3) Implementing | |
"""Collection of layers that are using producing weight allocations."""
import cvxpy as cp
from cvxpylayers.torch import CvxpyLayer
import torch
from torch.distributions import MultivariateNormal
import torch.nn as nn
from .misc import Cov2Corr, CovarianceMatrix, KMeans
class AnalyticalMarkowitz(nn.Module):
"""Minimum variance and maximum sharpe ratio with no constraints.
There exists known analytical solutions so numerical solutions are necessary.
References
----------
[1] http://faculty.washington.edu/ezivot/econ424/portfolioTheoryMatrix.pdf
"""
def forward(self, covmat, rets=None):
"""Perform forward pass.
Parameters
----------
covmat : torch.Tensor
Covariance matrix of shape `(n_samples, n_assets, n_assets)`.
rets : torch.Tensor or None
If tensor then of shape `(n_samples, n_assets)` representing expected returns. If provided triggers
computation of maximum share ratio. Else None triggers computation of minimum variance portfolio.
Returns
-------
weights : torch.Tensor
Of shape (n_samples, n_assets) representing the optimal weights. If `rets` provided, then it represents
maximum sharpe ratio portfolio (tangency portfolio). Otherwise minimum variance portfolio.
"""
n_samples, n_assets, _ = covmat.shape
device = covmat.device
dtype = covmat.dtype
covmat_inv = torch.inverse(covmat)
ones = torch.ones(n_samples, n_assets, 1).to(device=device, dtype=dtype)
if rets is not None:
expected_returns = rets.view(n_samples, n_assets, 1)
else:
expected_returns = ones
w_unscaled = torch.matmul(covmat_inv, expected_returns)
denominator = torch.matmul(ones.permute(0, 2, 1), w_unscaled)
w = w_unscaled / denominator
return w.squeeze(-1)
class NCO(nn.Module):
"""Nested cluster optimization.
This optimization algorithm performs the following steps:
1. Divide all assets into clusters
2. Run standard optimization inside of each of these clusters (intra step)
3. Run standard optimization on the resulting portfolios (inter step)
4. Compute the final weights
Parameters
----------
n_clusters : int
Number of clusters to find in the data. Note that the underlying clustering model is
KMeans - ``deepdow.layers.KMeans``.
n_init : int
Number of runs of the clustering algorithm.
init : str, {'random', 'k-means++'}
Initialization strategy of the clustering algorithm.
random_state : int or None
Random state passed to the stochastic k-means clustering.
See Also
--------
deepdow.layers.KMeans : k-means clustering algorithm
References
----------
[1] <NAME>.
"A Robust Estimator of the Efficient Frontier"
Available at SSRN 3469961, 2019
"""
def __init__(self, n_clusters, n_init=10, init='random', random_state=None):
super().__init__()
self.n_clusters = n_clusters
self.n_init = n_init
self.init = init
self.random_state = random_state
self.cov2corr_layer = Cov2Corr()
self.kmeans_layer = KMeans(n_clusters=self.n_clusters,
n_init=self.n_init,
init=self.init,
random_state=self.random_state)
self.analytical_markowitz_layer = AnalyticalMarkowitz()
def forward(self, covmat, rets=None):
"""Perform forward pass.
Parameters
----------
covmat : torch.Tensor
Covariance matrix of shape `(n_samples, n_assets, n_assets)`.
rets : torch.Tensor or None
If tensor then of shape `(n_samples, n_assets)` representing expected returns. If provided triggers
computation of maximum share ratio. Else None triggers computation of minimum variance portfolio.
Returns
-------
weights : torch.Tensor
Of shape (n_samples, n_assets) representing the optimal weights. If `rets` provided, then
maximum sharpe ratio portfolio (tangency portfolio) used both on intra and inter cluster level. Otherwise
minimum variance portfolio.
Notes
-----
Currently there is not batching over the sample dimension - simple for loop is used.
"""
n_samples, n_assets, _ = covmat.shape
dtype, device = covmat.dtype, covmat.device
corrmat = Cov2Corr()(covmat)
w_l = [] # we need to iterate over the sample dimension (currently no speedup)
for i in range(n_samples):
cluster_ixs, cluster_centers = self.kmeans_layer(corrmat[i])
w_intra_clusters = torch.zeros((n_assets, self.n_clusters), dtype=dtype, device=device)
for c in range(self.n_clusters):
in_cluster = torch.where(cluster_ixs == c)[0] # indices from the same cluster
intra_covmat = covmat[[i]].index_select(1, in_cluster).index_select(2, in_cluster) # (1, ?, ?)
intra_rets = None if rets is None else rets[[i]].index_select(1, in_cluster) # (1, ?)
w_intra_clusters[in_cluster, c] = self.analytical_markowitz_layer(intra_covmat, intra_rets)[0]
inter_covmat = w_intra_clusters.T @ (covmat[i] @ w_intra_clusters) # (n_clusters, n_clusters)
inter_rets = None if rets is None else (w_intra_clusters.T @ rets[i]).view(1, -1) # (1, n_clusters)
w_inter_clusters = self.analytical_markowitz_layer(inter_covmat.view(1, self.n_clusters, self.n_clusters),
inter_rets) # (1, n_clusters)
w_final = (w_intra_clusters * w_inter_clusters).sum(dim=1) # (n_assets,)
w_l.append(w_final)
res = torch.stack(w_l, dim=0)
return res
class NumericalMarkowitz(nn.Module):
"""Convex optimization layer stylized into portfolio optimization problem.
Parameters
----------
n_assets : int
Number of assets.
Attributes
----------
cvxpylayer : CvxpyLayer
Custom layer used by a third party package called cvxpylayers.
References
----------
[1] https://github.com/cvxgrp/cvxpylayers
"""
def __init__(self, n_assets, max_weight=1):
"""Construct."""
super().__init__()
covmat_sqrt = cp.Parameter((n_assets, n_assets))
rets = cp.Parameter(n_assets)
alpha = cp.Parameter(nonneg=True)
w = cp.Variable(n_assets)
ret = rets @ w
risk = cp.sum_squares(covmat_sqrt @ w)
reg = alpha * (cp.norm(w) ** 2)
prob = cp.Problem(cp.Maximize(ret - risk - reg),
[cp.sum(w) == 1,
w >= 0,
w <= max_weight
])
assert prob.is_dpp()
self.cvxpylayer = CvxpyLayer(prob, parameters=[rets, covmat_sqrt, alpha], variables=[w])
def forward(self, rets, covmat_sqrt, gamma_sqrt, alpha):
"""Perform forward pass.
Parameters
----------
rets : torch.Tensor
Of shape (n_samples, n_assets) representing expected returns (or whatever the feature extractor decided
to encode).
covmat_sqrt : torch.Tensor
Of shape (n_samples, n_assets, n_assets) representing the square of the covariance matrix.
gamma_sqrt : torch.Tensor
Of shape (n_samples,) representing the tradeoff between risk and return - where on efficient frontier
we are.
alpha : torch.Tensor
Of shape (n_samples,) representing how much L2 regularization is applied to weights. Note that
we pass the absolute value of this variable into the optimizer since when creating the problem
we asserted it is going to be nonnegative.
Returns
-------
weights : torch.Tensor
Of shape (n_samples, n_assets) representing the optimal weights as determined by the convex optimizer.
"""
n_samples, n_assets = rets.shape
gamma_sqrt_ = gamma_sqrt.repeat((1, n_assets * n_assets)).view(n_samples, n_assets, n_assets)
alpha_abs = torch.abs(alpha) # it needs to be nonnegative
return self.cvxpylayer(rets, gamma_sqrt_ * covmat_sqrt, alpha_abs)[0]
class Resample(nn.Module):
"""Meta allocator that bootstraps the input expected returns and covariance matrix.
The idea is to take the input covmat and expected returns and view them as parameters of a Multivariate
Normal distribution. After that, we iterate the below steps `n_portfolios` times:
1. Sample `n_draws` from the distribution
2. Estimate expected_returns and covariance matrix
3. Use the `allocator` to compute weights.
This will results in `n_portfolios` portfolios that we simply average to get the final weights.
Parameters
----------
allocator : AnalyticalMarkowitz or NCO or NumericalMarkowitz
Instance of an allocator.
n_draws : int or None
Number of draws. If None then set equal to number of assets to prevent numerical problems.
n_portfolios : int
Number of samples.
sqrt : bool
If True, then the input array represent the square root of the covariance matrix. Else it is the actual
covariance matrix.
random_state : int or None
Random state (forward passes with same parameters will have same results).
References
----------
[1] Michaud, <NAME>., and <NAME>.
"Estimation error and portfolio optimization: a resampling solution."
Available at SSRN 2658657 (2007)
"""
def __init__(self, allocator, n_draws=None, n_portfolios=5, sqrt=False, random_state=None):
super().__init__()
if not isinstance(allocator, (AnalyticalMarkowitz, NCO, NumericalMarkowitz)):
raise TypeError('Unsupported type of allocator: {}'.format(type(allocator)))
self.allocator = allocator
self.sqrt = sqrt
self.n_draws = n_draws
self.n_portfolios = n_portfolios
self.random_state = random_state
mapper = {'AnalyticalMarkowitz': False,
'NCO': True,
'NumericalMarkowitz': True}
self.uses_sqrt = mapper[allocator.__class__.__name__]
def forward(self, matrix, rets=None, **kwargs):
"""Perform forward pass.
Only accepts keyword arguments to avoid ambiguity.
Parameters
----------
matrix : torch.Tensor
Of shape (n_samples, n_assets, n_assets) representing the square of the covariance matrix if
`self.square=True` else the covariance matrix itself.
rets : torch.Tensor or None
Of shape (n_samples, n_assets) representing expected returns (or whatever the feature extractor decided
to encode). Note that `NCO` and `AnalyticalMarkowitz` allow for `rets=None` (using only minimum variance).
kwargs : dict
All additional input arguments the `self.allocator` needs to perform forward pass.
Returns
-------
weights : torch.Tensor
Of shape (n_samples, n_assets) representing the optimal weights.
"""
if self.random_state is not None:
torch.manual_seed(self.random_state)
n_samples, n_assets, _ = matrix.shape
dtype, device = matrix.dtype, matrix.device
n_draws = self.n_draws or n_assets # make sure that if None then we have the same N=M
covmat = matrix @ matrix if self.sqrt else matrix
dist_rets = torch.zeros(n_samples, n_assets, dtype=dtype, device=device) if rets is None else rets
dist = MultivariateNormal(loc=dist_rets, covariance_matrix=covmat)
portfolios = [] # n_portfolios elements of (n_samples, n_assets)
for _ in range(self.n_portfolios):
draws = dist.sample((n_draws,)) # (n_draws, n_samples, n_assets)
rets_ = draws.mean(dim=0) if rets is not None else None # (n_samples, n_assets)
covmat_ = CovarianceMatrix(sqrt=self.uses_sqrt)(draws.permute(1, 0, 2)) # (n_samples, n_assets, ...)
if isinstance(self.allocator, (AnalyticalMarkowitz, NCO)):
portfolio = self.allocator(covmat=covmat_, rets=rets_)
elif isinstance(self.allocator, NumericalMarkowitz):
gamma = kwargs['gamma']
alpha = kwargs['alpha']
portfolio = self.allocator(rets_, covmat_, gamma, alpha)
portfolios.append(portfolio)
portfolios_t = torch.stack(portfolios, dim=0) # (n_portfolios, n_samples, n_assets)
| |
<gh_stars>0
"""
traits: tools for examining classes, instances, and other python objects
<NAME> <<EMAIL>>
Copyright 2021, <NAME>
License: Apache-2.0
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Contents:
contains
contains_dict
contains_list
contains_set
contains_tuple
contains_parallel
contains_serial
get_annotations
get_attributes
get_contents
get_contents_types
get_methods
get_name
get_properties
get_signatures
get_types
get_variables
has_attributes
has_methods
has_properties
has_signatures
has_traits
has_types
has_types_dict
has_types_list
has_types_sequence
is_class_attribute
is_container
is_function
is_iterable
is_method
is_nested
is_sequence
name_attributes
name_methods
name_parameters
name_properties
name_variables
ToDo:
Adding parsing functionlity to signature-related functions to find
equivalence when one signature has subtypes of the other signature
(e.g., one type annotation is 'dict' and the other is 'MutableMapping').
It might be necessary to create a separate Signature-like class to
implement this functionality. This includes fixing or abandoning
'has_annotations' due to issues matching type annotations.
Add support for nagata Kinds once that system is complete.
Add support for types (using type annotations) in the 'contains' function so
that 'contains' can be applied to classes and not just instances.
Add 'dispatcher' framework to 'contains' once the dispatcher framework is
completed in the 'bobbie' package and the Kind system is completed in
the nagata package. This should replace existing usages of python's
singledispatch, which doesn't propertly deal with subtypes.
"""
from __future__ import annotations
from collections.abc import (
Collection, Container, Hashable, Iterable, Mapping, MutableSequence,
Sequence, Set)
import functools
import inspect
import types
from typing import Any, Optional, Type, Union
from . import utilities
@functools.singledispatch
def contains(
item: object,
contents: Union[Type[Any], tuple[Type[Any], ...]]) -> bool:
"""Returns whether 'item' contains the type(s) in 'contents'.
Args:
item (object): item to examine.
contents (Union[Type[Any], tuple[Type[Any], ...]]): types to check for
in 'item' contents.
Returns:
bool: whether 'item' holds the types in 'contents'.
"""
raise TypeError(f'item {item} is not supported by {__name__}')
# if isinstance(item, Container):
# if isinstance(item, MutableMapping):
# return contains_dict(item = item, contents = contents)
# elif isinstance(item, tuple):
# return contains_tuple(item = item, contents = contents)
# else:
# return contains_serial(item = item, contents = contents)
# else:
# return False
@contains.register(Mapping)
def contains_dict(
item: Mapping[Hashable, Any],
contents: tuple[Union[Type[Any], tuple[Type[Any], ...]],
Union[Type[Any], tuple[Type[Any], ...]]]) -> bool:
"""Returns whether dict 'item' contains the type(s) in 'contents'.
Args:
item (Mapping[Hashable, Any]): item to examine.
contents (Union[Type[Any], tuple[Type[Any], ...]]): types to check for
in 'item' contents.
Returns:
bool: whether 'item' holds the types in 'contents'.
"""
return (
contains_serial(item = item.keys(), contents = contents[0])
and contains_serial(item = item.values(), contents = contents[1]))
@contains.register(MutableSequence)
def contains_list(
item: MutableSequence[Any],
contents: Union[Type[Any], tuple[Type[Any], ...]]) -> bool:
"""Returns whether list 'item' contains the type(s) in 'contents'.
Args:
item (MutableSequence[Any]): item to examine.
contents (Union[Type[Any], tuple[Type[Any], ...]]): types to check for
in 'item' contents.
Returns:
bool: whether 'item' holds the types in 'contents'.
"""
return contains_serial(item = item, contents = contents)
@contains.register(Set)
def contains_set(
item: Set[Any],
contents: Union[Type[Any], tuple[Type[Any], ...]]) -> bool:
"""Returns whether list 'item' contains the type(s) in 'contents'.
Args:
item (Set[Any]): item to examine.
contents (Union[Type[Any], tuple[Type[Any], ...]]): types to check for
in 'item' contents.
Returns:
bool: whether 'item' holds the types in 'contents'.
"""
return contains_serial(item = item, contents = contents)
@contains.register(tuple)
def contains_tuple(
item: tuple[Any, ...],
contents: Union[Type[Any], tuple[Type[Any], ...]]) -> bool:
"""Returns whether tuple 'item' contains the type(s) in 'contents'.
Args:
item (tuple[Any, ...]): item to examine.
contents (Union[Type[Any], tuple[Type[Any], ...]]): types to check for
in 'item' contents.
Returns:
bool: whether 'item' holds the types in 'contents'.
"""
if isinstance(contents, tuple) and len(item) == len(contents):
technique = contains_parallel
else:
technique = contains_serial
return technique(item = item, contents = contents)
@contains.register(Sequence)
def contains_parallel(
item: Sequence[Any],
contents: tuple[Type[Any], ...]) -> bool:
"""Returns whether parallel 'item' contains the type(s) in 'contents'.
Args:
item (Sequence[Any]): item to examine.
contents (Union[Type[Any], tuple[Type[Any], ...]]): types to check for
in 'item' contents.
Returns:
bool: whether 'item' holds the types in 'contents'.
"""
return all(isinstance(item[i], contents[i]) for i in enumerate(item))
@contains.register(Collection)
def contains_serial(
item: Collection[Any],
contents: Union[Type[Any], tuple[Type[Any], ...]]) -> bool:
"""Returns whether serial 'item' contains the type(s) in 'contents'.
Args:
item (Collection[Any]): item to examine.
contents (Union[Type[Any], tuple[Type[Any], ...]]): types to check for
in 'item' contents.
Returns:
bool: whether 'item' holds the types in 'contents'.
"""
return all(isinstance(i, contents) for i in item)
def get_annotations(
item: object,
include_private: bool = False) -> dict[str, Type[Any]]:
"""Returns dict of attributes of 'item' with type annotations.
Args:
item (object): instance to examine.
include_private (bool): whether to include items that begin with '_'
(True) or to exclude them (False). Defauls to False.
Returns:
dict[str, Any]: dict of attributes in 'item' (keys are attribute names
and values are type annotations) that are type annotated.
"""
annotations = item.__annotations__
if include_private:
return annotations
else:
return {k: v for k, v in annotations.items() if not k.startswith('_')}
def get_attributes(
item: object,
include_private: bool = False) -> dict[str, Any]:
"""Returns dict of attributes of 'item'.
Args:
item (Any): item to examine.
include_private (bool): whether to include items that begin with '_'
(True) or to exclude them (False). Defauls to False.
Returns:
dict[str, Any]: dict of attributes in 'item' (keys are attribute names
and values are attribute values).
"""
attributes = name_attributes(item = item, include_private = include_private)
values = [getattr(item, m) for m in attributes]
return dict(zip(attributes, values))
def get_methods(
item: Union[object, Type[Any]],
include_private: bool = False) -> dict[str, types.MethodType]:
"""Returns dict of methods of 'item'.
Args:
item (Union[object, Type[Any]]): class or instance to examine.
include_private (bool): whether to include items that begin with '_'
(True) or to exclude them (False). Defauls to False.
Returns:
dict[str, types.MethodType]: dict of methods in 'item' (keys are method
names and values are methods).
"""
methods = name_methods(item = item, include_private = include_private)
return [getattr(item, m) for m in methods]
def get_name(item: Any, default: Optional[str] = None) -> Optional[str]:
"""Returns str name representation of 'item'.
Args:
item (Any): item to determine a str name.
default(Optional[str]): default name to return if other methods at name
creation fail.
Returns:
str: a name representation of 'item.'
"""
if isinstance(item, str):
return item
else:
if hasattr(item, 'name') and isinstance(item.name, str):
return item.name
else:
try:
return utilities.snakify(item.__name__) # type: ignore
except AttributeError:
if item.__class__.__name__ is not None:
return utilities.snakify( # type: ignore
item.__class__.__name__)
else:
return default
def get_properties(
item: object,
include_private: bool = False) -> dict[str, Any]:
"""Returns properties of 'item'.
Args:
item (object): instance to examine.
include_private (bool): whether to include items that begin with '_'
(True) or to exclude them (False). Defauls to False.
Returns:
dict[str, Any]: dict of properties in 'item' (keys are property names
and values are property values).
"""
properties = name_properties(item = item, include_private = include_private)
values = [getattr(item, p) for p in properties]
return dict(zip(properties, values))
def get_signatures(
item: Union[object, Type[Any]],
include_private: bool = False) -> dict[str, inspect.Signature]:
"""Returns dict of method signatures of 'item'.
Args:
item (Union[object, Type[Any]]): class or instance to examine.
include_private (bool): whether to include items that begin with '_'
(True) or to exclude them (False). Defauls to False.
Returns:
dict[str, inspect.Signature]: dict of method signatures in 'item' (keys
are method names and values are method signatures).
"""
methods = name_methods(item = item, include_private = include_private)
signatures = [inspect.signature(getattr(item, m)) for m in methods]
return dict(zip(methods, signatures))
def get_variables(
item: object,
include_private: bool = False) -> dict[str, Any]:
"""Returns dict of attributes of 'item' that are not methods or properties.
Args:
item (object): instance to examine.
include_private (bool): whether to include items | |
+ 2 + 2*dumpBytesPerLine + 1
sumLine = ' ' * dumpchars + self.line
sumWrap = ""
# if the line is longer and operation exists, a wrap needs to occur
ops = ""
if self.opcodedef is not None and shopOperation:
ops = self.opcodedef.operation.strip()
if len(ops) > 0:
if len(sumLine) >= cwbchars + colPosOperation - 2:
# wrap and ops
sumWrap = sumLine[cwbchars + colPosOperation - 2:]
sumLine = sumLine[:cwbchars + colPosOperation - 2] + "\u2026|" + ops
else:
# fill line and ops
sumLine = sumLine.ljust(cwbchars + colPosOperation - 2) + " |" + ops
# now go into the loop to fill more data
while True:
# prepare hex adress
ha = '{num:{fill}{width}x}'.format(num=self.orgpos + pos, fill='0', width=digitsPerHexAdress) + ':'
# indicator
ic = (indicator.ljust(1))[:1]
# prepare dump bytes
db = ""
for i in range(dumpBytesPerLine):
if pos+i < len(self.bytes):
if pos+i in self.invalidBytePos:
db += "??"
else:
db += "{0:02x}".format(self.bytes[pos+i])
else:
db += " "
# may be compare with bytes provided as external bytearray
cwb = ""
delta = False
if compareWithBytes is not None:
for i in range(dumpBytesPerLine):
if pos+i < len(self.bytes) and self.orgpos - shiftCompare + pos + i < len(compareWithBytes):
cwb += "{0:02x}".format(compareWithBytes[self.orgpos - shiftCompare + pos + i])
if self.bytes[pos+i] != compareWithBytes[self.orgpos - shiftCompare + pos + i]:
delta = True
cwb = cwb.ljust(2 * dumpBytesPerLine) + ("<<" if delta else " ") + ":"
pos += dumpBytesPerLine
# implant data into actual sum
sumLine = cwb + ha + ic + db + "|" + sumLine[dumpchars:]
# print out
print(sumLine)
# when to break?
if pos >= len(self.bytes) and len(sumWrap) < 1:
break
# ok, next line
extras = max(0, cwbchars + colPosOperation - 1 - (dumpchars + len(sumWrap)))
sumLine = ' ' * (dumpchars + extras) + sumWrap
sumWrap = ""
# done
return True
def synthesis(self, digitsPerHexAdress=4, dumpBytesPerLine=4, indicator=' ', tabWidth=8, justOutput=False):
""" generates one or more lines of synthesized listing """
# synthesized line
lab = ""
if len(self.instrParts[0]) > 0:
lab = self.instrParts[0].strip()+':'
line = lab.ljust(tabWidth) + self.instrParts[1] + ' '
for i in (0,1):
if len(self.oplit[i]) > 0:
line += self.oplit[i] + ','
line = line.rstrip(',')
# just asm line output
if justOutput:
return line
# fill the sum with the users input line first
pos = 0
dumpchars = digitsPerHexAdress + 2 + 2*dumpBytesPerLine + 1
sumLine = ' ' * dumpchars + line
sumWrap = ""
# now go into the loop to fill more data
while True:
# prepare hex adress
ha = '{num:{fill}{width}x}'.format(num=self.orgpos + pos, fill='0', width=digitsPerHexAdress) + ':'
# indicator
ic = (indicator.ljust(1))[:1]
# prepare dump bytes
db = ""
for i in range(dumpBytesPerLine):
if pos+i < len(self.bytes):
if pos+i in self.invalidBytePos:
db += "??"
else:
db += "{0:02x}".format(self.bytes[pos+i])
else:
db += " "
pos += dumpBytesPerLine
# implant data into actual sum
sumLine = ha + ic + db + "|" + sumLine[dumpchars:]
# print out
print(sumLine)
# when to break?
if pos >= len(self.bytes) and len(sumWrap) < 1:
break
# ok, next line
sumLine = ""
# done
return True
def compareWithBytes(self, compareWithBytes=None, shiftCompare=0):
""" Expects `compareWithBytes` to be set to alternate binary object.
Will return a tuple `(bytes, invalids, diffs)` as a statistics. """
diffs = 0
invalids = 0
for i in range(len(self.bytes)):
if i in self.invalidBytePos:
invalids += 1
diffs += 1
else:
if self.bytes[i] != compareWithBytes[self.orgpos - shiftCompare + i]:
diffs += 1
return (len(self.bytes), invalids, diffs)
class ListOfAssembleRecords(list):
""" Some convenience functions over a list of assyrecs. """
def __init__(self, *args):
list.__init__(self, *args)
self.orgposToRecord = {}
def add(self, ar: AssembleRecord):
""" Adds an `AssembleRecord`, making also the book keeping """
self.append(ar)
self.orgposToRecord.setdefault(ar.orgpos, [])
self.orgposToRecord[ar.orgpos].append(ar)
def findOrgPos(self, orgpos: int):
""" Returns a list of all `AssembleRecord`, which have a orgpos. """
if not orgpos in self.orgposToRecord:
return []
return self.orgposToRecord[orgpos]
def sortedListOfIndexAssyRec(self):
""" Prepares a list of tuples `(orgpos, assyrec)`, which is sorted
ascending. """
l = []
for ar in self:
l.append((ar.orgpos, ar))
# sort
l.sort(key=lambda k: k[0])
# done
return l
def binarySearchEnclosingAssyRec(self, adr: int, l: list, ll=None, ul=None):
""" Takes the list from `sortedListOfIndexAssyRec` and detemines
the `AssembleRecord` enclosing it and returns it. Else `None` """
# format l: [ .. (orgpos, assyrec) .. ]
# start of recursion; easy signature to outside
if l is None or len(l) < 1:
return None
if ll is None or ul is None:
ll = 0
ul = len(l)-1
OPTIONS.debug(3, "binarySearchEnclosingAssyRec: adr=%d ll=%d ul=%d" % (adr, ll, ul))
# direct hit or miss?
if ul - ll < 2:
for i in (ll,ul):
# looks funny, but works for both cases ll=ul, ll+1=ul
if l[i][1].orgpos <= adr and l[i][1].orgpos + len(l[i][1].bytes) > adr:
return l[i][1]
return None
# recursion for binary divided interval
mid = int((ll + ul) / 2)
if adr < l[mid][1].orgpos:
return self.binarySearchEnclosingAssyRec(adr, l, ll, mid)
else:
return self.binarySearchEnclosingAssyRec(adr, l, mid, ul)
class Assemble:
""" Class to assemble a file. Will hold a byte stream """
def __init__(self, opcodes: OpCodeDefList, syms: SymDict, labels: Labels):
self.assembly = array.array('B') # TODO check needed
self.orgpos = 0
self.opcodes = opcodes
self.syms = syms
self.labels = labels
self.assyrecs = []
# LATER OPTIMIZATION: use http://code.activestate.com/recipes/59857-filter-a-string-and-only-keep-a-given-set-of-chara/
# for now: define filter of allowed chars in an operand or expression
# note: for string constant, there is also a "raw" handling of such strings
self.allowedOpChars = string.ascii_letters + string.digits + "_$()-+*/" + "'" + '"'
def clear(self):
self.assembly = array.array('B')
self.orgpos = 0
self.assyrecs = []
def matchSingleOpcodeDef(self, optemplate, am, opis):
""" will match the given op1/2 `optemplate` of an opcode definition
together with the `am` adressing mode spec against an stated
operand input `opis`.
Returns: `None` if not match, `True` for a plain success or
`( variable, 'BP', bitpattern)` if a match with symbol table could be achieved or
`( variable, 'EX', expression)` if a numeric expression were found. """
# debug
OPTIONS.debug(3, "matchSingleOpcodeDef: opt:" + optemplate + " am:" + am + " opis: "+opis)
# clean operand and AM
optemplate = optemplate.strip() # need to distinct upper/ lower here!
am = am.strip().upper()
# clean even more on user input
opisraw = opis.strip()
opis = opis.strip().upper()
opis = ''.join([c for c in opis if c in self.allowedOpChars])
# try to detect a symbolic variable / expression portion
# Note: only ONE {..} is possible :-()
if optemplate.find("{") >= 0:
match = re.search(r'({\w+})', optemplate)
if match is not None:
key = match.group(1)
rawkey = key[1:-1]
(keyStart, keyEnd) = match.span(1)
#
# Try apply SYMBOLIC SUBSTITUTION
#
if " R M RI B ".find(" "+am+" ") >= 0:
# the hard way: find symbol key in symbols and subsequently
# try out all symbolic substitutions on opis ..
OPTIONS.debug(3, " .. try out all symbolic substitutions for sym key:" + key)
foundSyms = self.syms.get(rawkey)
if foundSyms is not None:
# we have a list of tuples (key, symbol, bitpattern)
OPTIONS.debug(3, " .. found # of tuples:", len(foundSyms))
for (foundKey, foundSymbol, foundBitpattern) in foundSyms:
if foundKey != rawkey:
# UUPS!
OPTIONS.error(104, "Found symbol key mismatch with dictionary!")
continue
# we will prepare an optemplate, which has the instantiated symbol in it
opisLit = optemplate.replace(key, foundSymbol)
opisLit = opisLit.upper()
# debug
OPTIONS.debug(3, " .. compare literated op:" + opisLit + " against given op:" + opis)
# in case of success, opisLit and opis should match char-by-char
if opisLit == opis:
# form a tuple to give back
res = (foundKey[0], 'BP', foundBitpattern)
# debug
OPTIONS.debugObject(3, " .. SUCCESS with result:", res)
# success!
return res
# debug
OPTIONS.debug(3, " .. looking for symbolic resolution NOT successful! Aborting!")
return None
#
# Try apply NUMERIC EXPRESSION
#
if " I IE MPZA L E EJ X ".find(" "+am+" ") | |
<reponame>buahaha/aa-freight
import datetime as dt
from unittest.mock import Mock, patch
import grpc
from dhooks_lite import Embed
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.utils.timezone import now
from esi.errors import TokenExpiredError, TokenInvalidError
from esi.models import Token
from allianceauth.authentication.models import CharacterOwnership
from allianceauth.eveonline.models import EveCharacter, EveCorporationInfo
from allianceauth.eveonline.providers import ObjectNotFound
from allianceauth.tests.auth_utils import AuthUtils
from app_utils.django import app_labels
from app_utils.testing import BravadoOperationStub, NoSocketsTestCase
from ..app_settings import (
FREIGHT_OPERATION_MODE_CORP_IN_ALLIANCE,
FREIGHT_OPERATION_MODE_CORP_PUBLIC,
FREIGHT_OPERATION_MODE_MY_ALLIANCE,
FREIGHT_OPERATION_MODE_MY_CORPORATION,
FREIGHT_OPERATION_MODES,
)
from ..models import (
Contract,
ContractCustomerNotification,
ContractHandler,
EveEntity,
Location,
Pricing,
)
from . import DisconnectPricingSaveHandler
from .testdata import (
characters_data,
contracts_data,
create_contract_handler_w_contracts,
create_entities_from_characters,
create_locations,
)
MODULE_PATH = "freight.models"
PATCH_FREIGHT_OPERATION_MODE = MODULE_PATH + ".FREIGHT_OPERATION_MODE"
class TestPricing(NoSocketsTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.handler, _ = create_contract_handler_w_contracts()
cls.jita = Location.objects.get(id=60003760)
cls.amamake = Location.objects.get(id=1022167642188)
cls.amarr = Location.objects.get(id=60008494)
@patch(MODULE_PATH + ".FREIGHT_FULL_ROUTE_NAMES", False)
def test_str(self):
p = Pricing(
start_location=self.jita, end_location=self.amamake, price_base=50000000
)
expected = "Jita <-> Amamake"
self.assertEqual(str(p), expected)
def test_repr(self):
p = Pricing(
start_location=self.jita, end_location=self.amamake, price_base=50000000
)
expected = (
"Pricing(pk={}, "
"name='Jita IV - Moon 4 - Caldari Navy Assembly Plant "
"<-> Amamake - 3 Time Nearly AT Winners')"
).format(p.pk)
self.assertEqual(repr(p), expected)
@patch(MODULE_PATH + ".FREIGHT_FULL_ROUTE_NAMES", False)
def test_name_from_settings_short(self):
p = Pricing(
start_location=self.jita, end_location=self.amamake, price_base=50000000
)
self.assertEqual(p.name, "Jita <-> Amamake")
def test_name_short(self):
p = Pricing(
start_location=self.jita, end_location=self.amamake, price_base=50000000
)
self.assertEqual(p.name_short, "Jita <-> Amamake")
@patch(MODULE_PATH + ".FREIGHT_FULL_ROUTE_NAMES", True)
def test_name_from_settings_full(self):
p = Pricing(
start_location=self.jita, end_location=self.amamake, price_base=50000000
)
self.assertEqual(
p.name,
"Jita IV - Moon 4 - Caldari Navy Assembly Plant <-> "
"Amamake - 3 Time Nearly AT Winners",
)
def test_name_full(self):
p = Pricing(
start_location=self.jita, end_location=self.amamake, price_base=50000000
)
self.assertEqual(
p.name_full,
"Jita IV - Moon 4 - Caldari Navy Assembly Plant <-> "
"Amamake - 3 Time Nearly AT Winners",
)
def test_create_pricings(self):
with DisconnectPricingSaveHandler():
# first pricing
Pricing.objects.create(
start_location=self.jita,
end_location=self.amamake,
price_base=500000000,
)
# pricing with different route
Pricing.objects.create(
start_location=self.amarr,
end_location=self.amamake,
price_base=250000000,
)
# pricing with reverse route then pricing 1
Pricing.objects.create(
start_location=self.amamake,
end_location=self.jita,
price_base=350000000,
)
def test_create_pricing_no_2nd_bidirectional_allowed(self):
with DisconnectPricingSaveHandler():
Pricing.objects.create(
start_location=self.jita,
end_location=self.amamake,
price_base=500000000,
is_bidirectional=True,
)
p = Pricing.objects.create(
start_location=self.amamake,
end_location=self.jita,
price_base=500000000,
is_bidirectional=True,
)
with self.assertRaises(ValidationError):
p.clean()
def test_create_pricing_no_2nd_unidirectional_allowed(self):
with DisconnectPricingSaveHandler():
Pricing.objects.create(
start_location=self.jita,
end_location=self.amamake,
price_base=500000000,
is_bidirectional=True,
)
p = Pricing.objects.create(
start_location=self.amamake,
end_location=self.jita,
price_base=500000000,
is_bidirectional=False,
)
p.clean()
# this test case has been temporary inverted to allow users
# to migrate their pricings
"""
with self.assertRaises(ValidationError):
p.clean()
"""
def test_create_pricing_2nd_must_be_unidirectional_a(self):
with DisconnectPricingSaveHandler():
Pricing.objects.create(
start_location=self.jita,
end_location=self.amamake,
price_base=500000000,
is_bidirectional=False,
)
p = Pricing.objects.create(
start_location=self.amamake,
end_location=self.jita,
price_base=500000000,
is_bidirectional=True,
)
with self.assertRaises(ValidationError):
p.clean()
def test_create_pricing_2nd_ok_when_unidirectional(self):
with DisconnectPricingSaveHandler():
Pricing.objects.create(
start_location=self.jita,
end_location=self.amamake,
price_base=500000000,
is_bidirectional=False,
)
p = Pricing.objects.create(
start_location=self.amamake,
end_location=self.jita,
price_base=500000000,
is_bidirectional=False,
)
p.clean()
def test_name_uni_directional(self):
p = Pricing(
start_location=self.jita,
end_location=self.amamake,
price_base=50000000,
is_bidirectional=False,
)
self.assertEqual(p.name, "Jita -> Amamake")
def test_get_calculated_price(self):
p = Pricing()
p.price_per_volume = 50
self.assertEqual(p.get_calculated_price(10, 0), 500)
p = Pricing()
p.price_per_collateral_percent = 2
self.assertEqual(p.get_calculated_price(10, 1000), 20)
p = Pricing()
p.price_per_volume = 50
p.price_per_collateral_percent = 2
self.assertEqual(p.get_calculated_price(10, 1000), 520)
p = Pricing()
p.price_base = 20
self.assertEqual(p.get_calculated_price(10, 1000), 20)
p = Pricing()
p.price_min = 1000
self.assertEqual(p.get_calculated_price(10, 1000), 1000)
p = Pricing()
p.price_base = 20
p.price_per_volume = 50
self.assertEqual(p.get_calculated_price(10, 1000), 520)
p = Pricing()
p.price_base = 20
p.price_per_volume = 50
p.price_min = 1000
self.assertEqual(p.get_calculated_price(10, 1000), 1000)
p = Pricing()
p.price_base = 20
p.price_per_volume = 50
p.price_per_collateral_percent = 2
p.price_min = 500
self.assertEqual(p.get_calculated_price(10, 1000), 540)
with self.assertRaises(ValueError):
p.get_calculated_price(-5, 0)
with self.assertRaises(ValueError):
p.get_calculated_price(50, -5)
p = Pricing()
p.price_base = 0
self.assertEqual(p.get_calculated_price(None, None), 0)
p = Pricing()
p.price_per_volume = 50
self.assertEqual(p.get_calculated_price(10, None), 500)
p = Pricing()
p.price_per_collateral_percent = 2
self.assertEqual(p.get_calculated_price(None, 100), 2)
def test_get_contract_pricing_errors(self):
p = Pricing()
p.price_base = 50
self.assertIsNone(p.get_contract_price_check_issues(10, 20, 50))
p = Pricing()
p.price_base = 500
p.volume_max = 300
self.assertIsNotNone(p.get_contract_price_check_issues(350, 1000))
p = Pricing()
p.price_base = 500
p.volume_min = 100
self.assertIsNotNone(p.get_contract_price_check_issues(50, 1000))
p = Pricing()
p.price_base = 500
p.collateral_max = 300
self.assertIsNotNone(p.get_contract_price_check_issues(350, 1000))
p = Pricing()
p.price_base = 500
p.collateral_min = 300
self.assertIsNotNone(p.get_contract_price_check_issues(350, 200))
p = Pricing()
p.price_base = 500
self.assertIsNotNone(p.get_contract_price_check_issues(350, 200, 400))
p = Pricing()
p.price_base = 500
with self.assertRaises(ValueError):
p.get_contract_price_check_issues(-5, 0)
with self.assertRaises(ValueError):
p.get_contract_price_check_issues(50, -5)
with self.assertRaises(ValueError):
p.get_contract_price_check_issues(50, 5, -5)
def test_collateral_min_allows_zero(self):
p = Pricing()
p.price_base = 500
p.collateral_min = 0
self.assertIsNone(p.get_contract_price_check_issues(350, 0))
def test_collateral_min_allows_none(self):
p = Pricing()
p.price_base = 500
self.assertIsNone(p.get_contract_price_check_issues(350, 0))
def test_zero_collateral_allowed_for_collateral_pricing(self):
p = Pricing()
p.collateral_min = 0
p.price_base = 500
p.price_per_collateral_percent = 2
self.assertIsNone(p.get_contract_price_check_issues(350, 0))
self.assertEqual(p.get_calculated_price(350, 0), 500)
def test_requires_volume(self):
self.assertTrue(Pricing(price_per_volume=10000).requires_volume())
self.assertTrue(Pricing(volume_min=10000).requires_volume())
self.assertTrue(
Pricing(price_per_volume=10000, volume_min=10000).requires_volume()
)
self.assertFalse(Pricing().requires_volume())
def test_requires_collateral(self):
self.assertTrue(Pricing(price_per_collateral_percent=2).requires_collateral())
self.assertTrue(Pricing(collateral_min=50000000).requires_collateral())
self.assertTrue(
Pricing(
price_per_collateral_percent=2, collateral_min=50000000
).requires_collateral()
)
self.assertFalse(Pricing().requires_collateral())
def test_clean_force_error(self):
p = Pricing()
with self.assertRaises(ValidationError):
p.clean()
def test_is_fix_price(self):
self.assertTrue(Pricing(price_base=50000000).is_fix_price())
self.assertFalse(
Pricing(price_base=50000000, price_min=40000000).is_fix_price()
)
self.assertFalse(
Pricing(price_base=50000000, price_per_volume=400).is_fix_price()
)
self.assertFalse(
Pricing(price_base=50000000, price_per_collateral_percent=2).is_fix_price()
)
self.assertFalse(Pricing().is_fix_price())
def test_clean_normal(self):
p = Pricing(price_base=50000000)
p.clean()
class TestPricingPricePerVolumeModifier(NoSocketsTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.handler, _ = create_contract_handler_w_contracts()
def test_return_none_if_not_set(self):
p = Pricing()
self.assertIsNone(p.price_per_volume_modifier())
self.assertIsNone(p.price_per_volume_eff())
def test_is_ignored_in_price_calculation_if_not_set(self):
p = Pricing()
p.price_per_volume = 50
self.assertEqual(p.get_calculated_price(10, None), 500)
def test_returns_none_if_not_set_in_pricing(self):
self.handler.price_per_volume_modifier = 10
self.handler.save()
p = Pricing()
p.price_per_volume = 50
self.assertIsNone(p.price_per_volume_modifier())
def test_can_calculate_with_plus_value(self):
self.handler.price_per_volume_modifier = 10
self.handler.save()
p = Pricing()
p.price_per_volume = 50
p.use_price_per_volume_modifier = True
self.assertEqual(p.price_per_volume_eff(), 55)
self.assertEqual(p.get_calculated_price(10, None), 550)
def test_can_calculate_with_negative_value(self):
self.handler.price_per_volume_modifier = -10
self.handler.save()
p = Pricing()
p.price_per_volume = 50
p.use_price_per_volume_modifier = True
self.assertEqual(p.price_per_volume_eff(), 45)
self.assertEqual(p.get_calculated_price(10, None), 450)
def test_calculated_price_is_never_negative(self):
self.handler.price_per_volume_modifier = -200
self.handler.save()
p = Pricing()
p.price_per_volume = 50
p.use_price_per_volume_modifier = True
self.assertEqual(p.price_per_volume_eff(), 0)
def test_returns_none_if_not_set_for_handler(self):
p = Pricing(price_base=50000000)
p.use_price_per_volume_modifier = True
self.assertIsNone(p.price_per_volume_modifier())
def test_returns_none_if_no_handler_defined(self):
ContractHandler.objects.all().delete()
p = Pricing(price_base=50000000)
p.use_price_per_volume_modifier = True
self.assertIsNone(p.price_per_volume_modifier())
class TestContract(NoSocketsTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
for character in characters_data:
EveCharacter.objects.create(**character)
EveCorporationInfo.objects.get_or_create(
corporation_id=character["corporation_id"],
defaults={
"corporation_name": character["corporation_name"],
"corporation_ticker": character["corporation_ticker"],
"member_count": 42,
},
)
# 1 user
cls.character = EveCharacter.objects.get(character_id=90000001)
cls.corporation = EveCorporationInfo.objects.get(
corporation_id=cls.character.corporation_id
)
cls.organization = EveEntity.objects.create(
id=cls.character.alliance_id,
category=EveEntity.Category.ALLIANCE,
name=cls.character.alliance_name,
)
cls.user = User.objects.create_user(
cls.character.character_name, "<EMAIL>", "password"
)
cls.main_ownership = CharacterOwnership.objects.create(
character=cls.character, owner_hash="x1", user=cls.user
)
# Locations
cls.jita = Location.objects.create(
id=60003760,
name="Jita IV - Moon 4 - Caldari Navy Assembly Plant",
solar_system_id=30000142,
type_id=52678,
category_id=3,
)
cls.amamake = Location.objects.create(
id=1022167642188,
name="Amamake - 3 Time Nearly AT Winners",
solar_system_id=30002537,
type_id=35834,
category_id=65,
)
cls.handler = ContractHandler.objects.create(
organization=cls.organization, character=cls.main_ownership
)
def setUp(self):
# create contracts
with DisconnectPricingSaveHandler():
self.pricing = Pricing.objects.create(
start_location=self.jita,
end_location=self.amamake,
price_base=500000000,
)
self.contract = Contract.objects.create(
handler=self.handler,
contract_id=1,
collateral=0,
date_issued=now(),
date_expired=now() + dt.timedelta(days=5),
days_to_complete=3,
end_location=self.amamake,
for_corporation=False,
issuer_corporation=self.corporation,
issuer=self.character,
reward=50000000,
start_location=self.jita,
status=Contract.Status.OUTSTANDING,
volume=50000,
pricing=self.pricing,
)
def test_str(self):
expected = "1: Jita -> Amamake"
self.assertEqual(str(self.contract), expected)
def test_repr(self):
excepted = "Contract(contract_id=1, start_location=Jita, end_location=Amamake)"
self.assertEqual(repr(self.contract), excepted)
def test_hours_issued_2_completed(self):
self.contract.date_completed = self.contract.date_issued + dt.timedelta(hours=9)
self.assertEqual(self.contract.hours_issued_2_completed, 9)
self.contract.date_completed = None
self.assertIsNone(self.contract.hours_issued_2_completed)
def test_date_latest(self):
# initial contract only had date_issued
self.assertEqual(self.contract.date_issued, self.contract.date_latest)
# adding date_accepted to contract
self.contract.date_accepted = self.contract.date_issued + dt.timedelta(days=1)
self.assertEqual(self.contract.date_accepted, self.contract.date_latest)
# adding date_completed to contract
self.contract.date_completed = self.contract.date_accepted + dt.timedelta(
days=1
)
self.assertEqual(self.contract.date_completed, self.contract.date_latest)
@patch(MODULE_PATH + ".FREIGHT_HOURS_UNTIL_STALE_STATUS", 24)
def test_has_stale_status(self):
# initial contract only had date_issued
# date_issued is now
self.assertFalse(self.contract.has_stale_status)
# date_issued is 30 hours ago
self.contract.date_issued = self.contract.date_issued - dt.timedelta(hours=30)
self.assertTrue(self.contract.has_stale_status)
def test_acceptor_name(self):
contract = self.contract
self.assertIsNone(contract.acceptor_name)
contract.acceptor_corporation = self.corporation
self.assertEqual(contract.acceptor_name, self.corporation.corporation_name)
contract.acceptor = self.character
self.assertEqual(contract.acceptor_name, self.character.character_name)
def test_get_issues_list(self):
self.assertListEqual(self.contract.get_issue_list(), [])
self.contract.issues = '["one", "two"]'
self.assertListEqual(self.contract.get_issue_list(), ["one", "two"])
def test_generate_embed_w_pricing(self):
x = self.contract._generate_embed()
self.assertIsInstance(x, Embed)
self.assertEqual(x.color, Contract.EMBED_COLOR_PASSED)
def test_generate_embed_w_pricing_issues(self):
self.contract.issues = ["we have issues"]
x = self.contract._generate_embed()
self.assertIsInstance(x, Embed)
self.assertEqual(x.color, Contract.EMBED_COLOR_FAILED)
def test_generate_embed_wo_pricing(self):
self.contract.pricing = None
x = self.contract._generate_embed()
self.assertIsInstance(x, Embed)
@patch(MODULE_PATH + ".dhooks_lite.Webhook.execute", spec=True)
class TestContractSendPilotNotification(NoSocketsTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.handler, _ = create_contract_handler_w_contracts()
cls.contract = Contract.objects.get(contract_id=149409005)
@patch(MODULE_PATH + ".FREIGHT_DISCORD_WEBHOOK_URL", None)
def test_aborts_without_webhook_url(self, mock_webhook_execute):
mock_webhook_execute.return_value.status_ok = True
self.contract.send_pilot_notification()
self.assertEqual(mock_webhook_execute.call_count, 0)
@patch(MODULE_PATH + ".FREIGHT_DISCORD_WEBHOOK_URL", "url")
@patch(MODULE_PATH + ".FREIGHT_DISCORD_DISABLE_BRANDING", False)
@patch(MODULE_PATH + ".FREIGHT_DISCORD_MENTIONS", None)
def test_with_branding_and_wo_mentions(self, mock_webhook_execute):
mock_webhook_execute.return_value.status_ok = True
self.contract.send_pilot_notification()
self.assertEqual(mock_webhook_execute.call_count, 1)
@patch(MODULE_PATH + ".FREIGHT_DISCORD_WEBHOOK_URL", "url")
@patch(MODULE_PATH + ".FREIGHT_DISCORD_DISABLE_BRANDING", True)
@patch(MODULE_PATH + ".FREIGHT_DISCORD_MENTIONS", None)
def test_wo_branding_and_wo_mentions(self, mock_webhook_execute):
mock_webhook_execute.return_value.status_ok = True
self.contract.send_pilot_notification()
self.assertEqual(mock_webhook_execute.call_count, 1)
@patch(MODULE_PATH + ".FREIGHT_DISCORD_WEBHOOK_URL", "url")
@patch(MODULE_PATH + ".FREIGHT_DISCORD_DISABLE_BRANDING", True)
@patch(MODULE_PATH + ".FREIGHT_DISCORD_MENTIONS", "@here")
def test_with_branding_and_with_mentions(self, mock_webhook_execute):
mock_webhook_execute.return_value.status_ok = True
self.contract.send_pilot_notification()
self.assertEqual(mock_webhook_execute.call_count, 1)
@patch(MODULE_PATH + ".FREIGHT_DISCORD_WEBHOOK_URL", "url")
@patch(MODULE_PATH + ".FREIGHT_DISCORD_DISABLE_BRANDING", True)
@patch(MODULE_PATH + ".FREIGHT_DISCORD_MENTIONS", True)
def test_wo_branding_and_with_mentions(self, mock_webhook_execute):
mock_webhook_execute.return_value.status_ok = True
self.contract.send_pilot_notification()
self.assertEqual(mock_webhook_execute.call_count, 1)
@patch(MODULE_PATH + ".FREIGHT_DISCORD_WEBHOOK_URL", "url")
def test_log_error_from_execute(self, mock_webhook_execute):
mock_webhook_execute.return_value.status_ok = False
mock_webhook_execute.return_value.status_code = 404
self.contract.send_pilot_notification()
self.assertEqual(mock_webhook_execute.call_count, 1)
if "discord" in app_labels():
from allianceauth.services.modules.discord.models import DiscordUser
@patch(MODULE_PATH + ".dhooks_lite.Webhook.execute", spec=True)
class TestContractSendCustomerNotification(NoSocketsTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.handler, cls.user = create_contract_handler_w_contracts()
cls.character = cls.user.profile.main_character
cls.corporation = cls.character.corporation
cls.contract_1 = Contract.objects.get(contract_id=149409005)
cls.contract_2 = Contract.objects.get(contract_id=149409019)
cls.contract_3 = Contract.objects.get(contract_id=149409118)
cls.jita = Location.objects.get(id=60003760)
cls.amamake = Location.objects.get(id=1022167642188)
cls.amarr | |
145, 163, 170 ],
[ 240, 145, 163, 171 ],
[ 240, 145, 163, 172 ],
[ 240, 145, 163, 173 ],
[ 240, 145, 163, 174 ],
[ 240, 145, 163, 175 ],
[ 240, 145, 163, 176 ],
[ 240, 145, 163, 177 ],
[ 240, 145, 163, 178 ],
[ 240, 146, 144, 128 ],
[ 240, 146, 144, 129 ],
[ 240, 146, 144, 130 ],
[ 240, 146, 144, 131 ],
[ 240, 146, 144, 132 ],
[ 240, 146, 144, 133 ],
[ 240, 146, 144, 134 ],
[ 240, 146, 144, 135 ],
[ 240, 146, 144, 136 ],
[ 240, 146, 144, 137 ],
[ 240, 146, 144, 138 ],
[ 240, 146, 144, 139 ],
[ 240, 146, 144, 140 ],
[ 240, 146, 144, 141 ],
[ 240, 146, 144, 142 ],
[ 240, 146, 144, 143 ],
[ 240, 146, 144, 144 ],
[ 240, 146, 144, 145 ],
[ 240, 146, 144, 146 ],
[ 240, 146, 144, 147 ],
[ 240, 146, 144, 148 ],
[ 240, 146, 144, 149 ],
[ 240, 146, 144, 150 ],
[ 240, 146, 144, 151 ],
[ 240, 146, 144, 152 ],
[ 240, 146, 144, 153 ],
[ 240, 146, 144, 154 ],
[ 240, 146, 144, 155 ],
[ 240, 146, 144, 156 ],
[ 240, 146, 144, 157 ],
[ 240, 146, 144, 158 ],
[ 240, 146, 144, 159 ],
[ 240, 146, 144, 160 ],
[ 240, 146, 144, 161 ],
[ 240, 146, 144, 162 ],
[ 240, 146, 144, 163 ],
[ 240, 146, 144, 164 ],
[ 240, 146, 144, 165 ],
[ 240, 146, 144, 166 ],
[ 240, 146, 144, 167 ],
[ 240, 146, 144, 168 ],
[ 240, 146, 144, 169 ],
[ 240, 146, 144, 170 ],
[ 240, 146, 144, 171 ],
[ 240, 146, 144, 172 ],
[ 240, 146, 144, 173 ],
[ 240, 146, 144, 174 ],
[ 240, 146, 144, 175 ],
[ 240, 146, 144, 176 ],
[ 240, 146, 144, 177 ],
[ 240, 146, 144, 178 ],
[ 240, 146, 144, 179 ],
[ 240, 146, 144, 180 ],
[ 240, 146, 144, 181 ],
[ 240, 146, 144, 182 ],
[ 240, 146, 144, 183 ],
[ 240, 146, 144, 184 ],
[ 240, 146, 144, 185 ],
[ 240, 146, 144, 186 ],
[ 240, 146, 144, 187 ],
[ 240, 146, 144, 188 ],
[ 240, 146, 144, 189 ],
[ 240, 146, 144, 190 ],
[ 240, 146, 144, 191 ],
[ 240, 146, 145, 128 ],
[ 240, 146, 145, 129 ],
[ 240, 146, 145, 130 ],
[ 240, 146, 145, 131 ],
[ 240, 146, 145, 132 ],
[ 240, 146, 145, 133 ],
[ 240, 146, 145, 134 ],
[ 240, 146, 145, 135 ],
[ 240, 146, 145, 136 ],
[ 240, 146, 145, 137 ],
[ 240, 146, 145, 138 ],
[ 240, 146, 145, 139 ],
[ 240, 146, 145, 140 ],
[ 240, 146, 145, 141 ],
[ 240, 146, 145, 142 ],
[ 240, 146, 145, 143 ],
[ 240, 146, 145, 144 ],
[ 240, 146, 145, 145 ],
[ 240, 146, 145, 146 ],
[ 240, 146, 145, 147 ],
[ 240, 146, 145, 148 ],
[ 240, 146, 145, 149 ],
[ 240, 146, 145, 150 ],
[ 240, 146, 145, 151 ],
[ 240, 146, 145, 152 ],
[ 240, 146, 145, 153 ],
[ 240, 146, 145, 154 ],
[ 240, 146, 145, 155 ],
[ 240, 146, 145, 156 ],
[ 240, 146, 145, 157 ],
[ 240, 146, 145, 158 ],
[ 240, 146, 145, 159 ],
[ 240, 146, 145, 160 ],
[ 240, 146, 145, 161 ],
[ 240, 146, 145, 162 ],
[ 240, 146, 145, 163 ],
[ 240, 146, 145, 164 ],
[ 240, 146, 145, 165 ],
[ 240, 146, 145, 166 ],
[ 240, 146, 145, 167 ],
[ 240, 146, 145, 168 ],
[ 240, 146, 145, 169 ],
[ 240, 146, 145, 170 ],
[ 240, 146, 145, 171 ],
[ 240, 146, 145, 172 ],
[ 240, 146, 145, 173 ],
[ 240, 146, 145, 174 ],
[ 240, 150, 169, 160 ],
[ 240, 150, 169, 161 ],
[ 240, 150, 169, 162 ],
[ 240, 150, 169, 163 ],
[ 240, 150, 169, 164 ],
[ 240, 150, 169, 165 ],
[ 240, 150, 169, 166 ],
[ 240, 150, 169, 167 ],
[ 240, 150, 169, 168 ],
[ 240, 150, 169, 169 ],
[ 240, 150, 173, 144 ],
[ 240, 150, 173, 145 ],
[ 240, 150, 173, 146 ],
[ 240, 150, 173, 147 ],
[ 240, 150, 173, 148 ],
[ 240, 150, 173, 149 ],
[ 240, 150, 173, 150 ],
[ 240, 150, 173, 151 ],
[ 240, 150, 173, 152 ],
[ 240, 150, 173, 153 ],
[ 240, 150, 173, 155 ],
[ 240, 150, 173, 156 ],
[ 240, 150, 173, 157 ],
[ 240, 150, 173, 158 ],
[ 240, 150, 173, 159 ],
[ 240, 150, 173, 160 ],
[ 240, 150, 173, 161 ],
[ 240, 157, 141, 160 ],
[ 240, 157, 141, 161 ],
[ 240, 157, 141, 162 ],
[ 240, 157, 141, 163 ],
[ 240, 157, 141, 164 ],
[ 240, 157, 141, 165 ],
[ 240, 157, 141, 166 ],
[ 240, 157, 141, 167 ],
[ 240, 157, 141, 168 ],
[ 240, 157, 141, 169 ],
[ 240, 157, 141, 170 ],
[ 240, 157, 141, 171 ],
[ 240, 157, 141, 172 ],
[ 240, 157, 141, 173 ],
[ 240, 157, 141, 174 ],
[ 240, 157, 141, 175 ],
[ 240, 157, 141, 176 ],
[ 240, 157, 141, 177 ],
[ 240, 157, 159, 142 ],
[ 240, 157, 159, 143 ],
[ 240, 157, 159, 144 ],
[ 240, 157, 159, 145 ],
[ 240, 157, 159, 146 ],
[ 240, 157, 159, 147 ],
[ 240, 157, 159, 148 ],
[ 240, 157, 159, 149 ],
[ 240, 157, 159, 150 ],
[ 240, 157, 159, 151 ],
[ 240, 157, 159, 152 ],
[ 240, 157, 159, 153 ],
[ 240, 157, 159, 154 ],
[ 240, 157, 159, 155 ],
[ 240, 157, 159, 156 ],
[ 240, 157, 159, 157 ],
[ 240, 157, 159, 158 ],
[ 240, 157, 159, 159 ],
[ 240, 157, 159, 160 ],
[ 240, 157, 159, 161 ],
[ 240, 157, 159, 162 ],
[ 240, 157, 159, 163 ],
[ 240, 157, 159, 164 ],
[ 240, 157, 159, 165 ],
[ 240, 157, 159, 166 ],
[ 240, 157, 159, 167 ],
[ 240, 157, 159, 168 ],
[ 240, 157, 159, 169 ],
[ 240, 157, 159, 170 ],
[ 240, 157, 159, 171 ],
[ 240, 157, 159, 172 ],
[ 240, 157, 159, 173 ],
[ 240, 157, 159, 174 ],
[ 240, 157, 159, 175 ],
[ 240, 157, 159, 176 ],
[ 240, 157, 159, 177 ],
[ 240, 157, 159, 178 ],
[ 240, 157, 159, 179 ],
[ 240, 157, 159, 180 ],
[ 240, 157, 159, 181 ],
[ 240, 157, 159, 182 ],
[ 240, 157, 159, 183 ],
[ 240, 157, 159, 184 ],
[ 240, 157, 159, 185 ],
[ 240, 157, 159, 186 ],
[ 240, 157, 159, 187 ],
[ 240, 157, 159, 188 ],
[ 240, 157, 159, 189 ],
[ 240, 157, 159, | |
Minimum Stock Quantity. Usually blank, unless a user override to the'
'automatically calculated minimum, (reorder point, as viewed in the Product'
'Analysis) is entered in this field.'),
Column('Max_Stock_Qty',
Numeric(precision=19, scale=4),
nullable=True,
doc='Maximum Stock Quantity',
comment='The maximum stock quantity. Generally not used. The purchasing system projects'
'a maximum based on lead time, demand, and desired inventory turns for the'
'vendor.'),
schema='product')
# noinspection PyPep8Naming
class prod_purch_02_archive(server_utils.mysql_base):
__table__ = Table('prod_purch_02_archive', server_utils.mysql_base.metadata,
Column('ID',
Integer,
primary_key=True,
unique=True,
index=True,
autoincrement=True,
doc='ID',
comment='N/A'),
Column('Date_Time_Stamp',
DateTime,
nullable=True,
doc='Date Time Stamp',
comment='N/A'),
Column('Prod_Num',
String(length=25),
nullable=True,
index=True,
doc='Product (25)',
comment='The Product Number limited to a column 25 characters wide'),
Column('Seasonal',
String(length=8),
nullable=True,
doc='Seasonal',
comment='This Y/N field indicates if the Seasonal box is checked. Seasonal items use the'
'upcoming three months of demand when projecting order quantities.'),
Column('Purch_History_Link_Prod',
String(length=20),
nullable=True,
doc='Purchase History Link Product',
comment='The product ID that is linked for purchase history. Projected Purchase order'
'adds the demand from this superseded item to the parent.'),
Column('Weight',
Numeric(precision=19, scale=4),
nullable=True,
doc='Weight',
comment='The product weight, generally entered in pounds.'),
Column('Volume',
Numeric(precision=19, scale=4),
nullable=True,
doc='Volume',
comment='Cubic Volume'),
Column('Length',
Numeric(precision=19, scale=4),
nullable=True,
doc='Length',
comment='The product"s length value'),
Column('Width',
Numeric(precision=19, scale=4),
nullable=True,
doc='Width',
comment='The product"s width value.'),
Column('Height',
Numeric(precision=19, scale=4),
nullable=True,
doc='Height',
comment='The product"s height value'),
Column('Box_Qty',
Numeric(precision=19, scale=4),
nullable=True,
doc='Box Qty',
comment='The box quantity for the product'),
Column('Carton_Qty',
Numeric(precision=19, scale=4),
nullable=True,
doc='Carton Quantity',
comment='The Carton Quantity for the product.'),
Column('Pallet_Qty',
Numeric(precision=19, scale=4),
nullable=True,
doc='Pallet Quantity',
comment='The pallet quantity for the product, as set on the purchasing tab.'),
Column('Pallet_Layer',
Numeric(precision=19, scale=4),
nullable=True,
doc='Pallet Layer',
comment='The pallet layer for the product, as set on the purchasing tab.'),
Column('Min_PO_Qty',
Numeric(precision=19, scale=4),
nullable=True,
doc='Minimum PO Quantity',
comment='The minimum Purchase Order quantity'),
Column('Min_Stock_Qty',
Numeric(precision=19, scale=4),
nullable=True,
doc='Minimum Stock Qty',
comment='The Minimum Stock Quantity. Usually blank, unless a user override to the'
'automatically calculated minimum, (reorder point, as viewed in the Product'
'Analysis) is entered in this field.'),
Column('Max_Stock_Qty',
Numeric(precision=19, scale=4),
nullable=True,
doc='Maximum Stock Quantity',
comment='The maximum stock quantity. Generally not used. The purchasing system projects'
'a maximum based on lead time, demand, and desired inventory turns for the'
'vendor.'),
schema='product')
# noinspection PyPep8Naming
class prod_replenishments_01_current(server_utils.mysql_base):
__table__ = Table('prod_replenishments_01_current', server_utils.mysql_base.metadata,
Column('ID',
Integer,
primary_key=True,
unique=True,
index=True,
autoincrement=True,
doc='ID',
comment='N/A'),
Column('Date_Time_Stamp',
DateTime,
nullable=True,
doc='Date Time Stamp',
comment='N/A'),
Column('Prod_Num',
String(length=25),
nullable=True,
index=True,
doc='Product (25)',
comment='The Product Number limited to a column 25 characters wide'),
Column('Whse_Num',
Integer,
nullable=True,
doc='Whse',
comment='The two digit reference code for the warehouse that the information pertains'
'to. Use this field in conjunction with fields such as Avail, On PO, COMM, etc.'
'that present data pertaining to multiple warehouses'),
Column('AWR_Whse',
String(length=14),
nullable=True,
doc='AWR Whse',
comment='This field indicates which warehouse the product is set to be replenished from'),
Column('AWR_Min',
Numeric(precision=19, scale=4),
nullable=True,
doc='AWR Min',
comment='The minimum number of weeks that should be replenished from one warehouse to'
'another for the product.'),
Column('AWR_Max',
Numeric(precision=19, scale=4),
nullable=True,
doc='AWR Max',
comment='The maximum number of weeks that should be replenished from one warehouse to'
'another for the product.'),
Column('AWR_Break_Case',
String(length=15),
nullable=True,
doc='AWR Break Case',
comment='This Y/N field indicates if the product is designated to break cases when'
'transferring between warehouses in a warehouse replenishment setup'),
schema='product')
# noinspection PyPep8Naming
class prod_replenishments_02_archive(server_utils.mysql_base):
__table__ = Table('prod_replenishments_02_archive', server_utils.mysql_base.metadata,
Column('ID',
Integer,
primary_key=True,
unique=True,
index=True,
autoincrement=True,
doc='ID',
comment='N/A'),
Column('Date_Time_Stamp',
DateTime,
nullable=True,
doc='Date Time Stamp',
comment='N/A'),
Column('Prod_Num',
String(length=25),
nullable=True,
index=True,
doc='Product (25)',
comment='The Product Number limited to a column 25 characters wide'),
Column('Whse_Num',
Integer,
nullable=True,
doc='Whse',
comment='The two digit reference code for the warehouse that the information pertains'
'to. Use this field in conjunction with fields such as Avail, On PO, COMM, etc.'
'that present data pertaining to multiple warehouses'),
Column('AWR_Whse',
String(length=14),
nullable=True,
doc='AWR Whse',
comment='This field indicates which warehouse the product is set to be replenished from'),
Column('AWR_Min',
Numeric(precision=19, scale=4),
nullable=True,
doc='AWR Min',
comment='The minimum number of weeks that should be replenished from one warehouse to'
'another for the product.'),
Column('AWR_Max',
Numeric(precision=19, scale=4),
nullable=True,
doc='AWR Max',
comment='The maximum number of weeks that should be replenished from one warehouse to'
'another for the product.'),
Column('AWR_Break_Case',
String(length=15),
nullable=True,
doc='AWR Break Case',
comment='This Y/N field indicates if the product is designated to break cases when'
'transferring between warehouses in a warehouse replenishment setup'),
schema='product')
# noinspection PyPep8Naming
class prod_rollup_01_current(server_utils.mysql_base):
__table__ = Table('prod_rollup_01_current', server_utils.mysql_base.metadata,
Column('ID',
Integer,
primary_key=True,
unique=True,
index=True,
autoincrement=True,
doc='ID',
comment='N/A'),
Column('Date_Time_Stamp',
DateTime,
nullable=True,
doc='Date Time Stamp',
comment='N/A'),
Column('Prod_Num',
String(length=25),
nullable=True,
index=True,
doc='Product (25)',
comment='The Product Number limited to a column 25 characters wide'),
Column('Rollup_C1',
Numeric(precision=19, scale=4),
nullable=True,
doc='Rollup C1',
comment='Lists all historical rollup values for C1. This is the total amount of the'
'field after the rollup, no the formula. Pair this field with the ROLLUP.Date'
'field.'),
Column('Rollup_C4',
Numeric(precision=19, scale=4),
nullable=True,
doc='Rollup C4',
comment='Lists all historical rollup values for C4. This is the total amount of the'
'field after the rollup, no the formula. Pair this field with the ROLLUP.Date'
'field.'),
Column('Rollup_C6',
Numeric(precision=19, scale=4),
nullable=True,
doc='Rollup C6',
comment='Lists all historical rollup values for C6. This is the total amount of the'
'field after the rollup, no the formula. Pair this field with the ROLLUP.Date'
'field.'),
Column('Rollup_C7',
Numeric(precision=19, scale=4),
nullable=True,
doc='Rollup C7',
comment='Lists all historical rollup values for C7. This is the total amount of the'
'field after the rollup, no the formula. Pair this field with the ROLLUP.Date'
'field.'),
Column('Rollup_L1',
Numeric(precision=19, scale=4),
nullable=True,
doc='Rollup L1',
comment='Lists all historical rollup values for L1. This is the total amount of the'
'field after the rollup, no the formula. Pair this field with the ROLLUP.Date'
'field.'),
Column('Rollup_L2',
Numeric(precision=19, scale=4),
nullable=True,
doc='Rollup L2',
comment='Lists all historical rollup values for L2. This is the total amount of the'
'field after the rollup, no the formula. Pair this field with the ROLLUP.Date'
'field.'),
Column('Rollup_L3',
Numeric(precision=19, scale=4),
nullable=True,
doc='Rollup L3',
comment='Lists all historical rollup values for L3. This is the total amount of the'
'field after the rollup, no the formula. Pair this field with the ROLLUP.Date'
'field.'),
Column('Rollup_L4',
Numeric(precision=19, scale=4),
nullable=True,
doc='Rollup L4',
comment='Lists all historical rollup values for L4. This is the total amount of the'
'field after the rollup, no the formula. Pair this field with the ROLLUP.Date'
'field.'),
Column('Rollup_Date',
Date,
nullable=True,
doc='Rollup Date',
comment='The date that the rollup change occurred.'),
Column('Rollup_Seq_1',
String(length=20),
nullable=True,
doc='Rollup Sequence 1',
comment='The first sequence of an item"s Price Rollup Schedule'),
Column('Rollup_Seq_2',
String(length=20),
nullable=True,
doc='Rollup Sequence 2',
comment='The second sequence of an item"s Price Rollup Schedule'),
Column('Rollup_Seq_3',
String(length=20),
nullable=True,
doc='Rollup Sequence 3',
comment='The third sequence of an item"s Price Rollup Schedule'),
Column('Rollup_Seq_4',
String(length=20),
nullable=True,
doc='Rollup Sequence 4',
comment='The fourth sequence of an item"s Price Rollup Schedule'),
Column('Rollup_Seq_5',
String(length=20),
nullable=True,
doc='Rollup Sequence 5',
comment='The fifth sequence of an item"s Price Rollup Schedule'),
schema='product')
# noinspection PyPep8Naming
class prod_rollup_02_archive(server_utils.mysql_base):
__table__ = Table('prod_rollup_02_archive', server_utils.mysql_base.metadata,
Column('ID',
Integer,
primary_key=True,
unique=True,
index=True,
autoincrement=True,
doc='ID',
comment='N/A'),
Column('Date_Time_Stamp',
DateTime,
nullable=True,
doc='Date Time Stamp',
comment='N/A'),
Column('Prod_Num',
String(length=25),
nullable=True,
index=True,
doc='Product (25)',
comment='The Product Number limited to a column 25 characters wide'),
Column('Rollup_C1',
Numeric(precision=19, scale=4),
nullable=True,
doc='Rollup C1',
comment='Lists all historical rollup values for C1. This is the total amount of the'
'field after the rollup, no the formula. Pair this field with the ROLLUP.Date'
'field.'),
Column('Rollup_C4',
Numeric(precision=19, scale=4),
nullable=True,
doc='Rollup C4',
comment='Lists all historical rollup values for C4. This is the total amount of the'
'field after the rollup, no the formula. Pair this field with the ROLLUP.Date'
'field.'),
Column('Rollup_C6',
Numeric(precision=19, scale=4),
nullable=True,
doc='Rollup C6',
comment='Lists all historical rollup values for C6. This is the total amount of the'
'field after the rollup, no the formula. Pair this field with the ROLLUP.Date'
'field.'),
Column('Rollup_C7',
Numeric(precision=19, scale=4),
nullable=True,
doc='Rollup C7',
comment='Lists all historical rollup values for C7. This is the total amount of the'
'field after the rollup, no the formula. Pair this field with the ROLLUP.Date'
'field.'),
Column('Rollup_L1',
Numeric(precision=19, scale=4),
nullable=True,
doc='Rollup L1',
comment='Lists all historical rollup values for L1. This is the total amount of the'
'field after the rollup, no the formula. Pair this field with the | |
x s y t a f c b k e c s s w w p w o p n n g
... sample3 e b s w t l f c b n e c s s w w p w o p n n m
... sample4 p x y w t p f c n n e e s s w w p w o p k s u
... sample5 e x s g f n f w b k t e s s w w p w o e n a g
... sample6 e x y y t a f c b n e c s s w w p w o p k n g
... sample7 e b s w t a f c b g e c s s w w p w o p k n m
... sample8 e b y w t l f c b n e c s s w w p w o p n s m
... sample9 p x y w t p f c n p e e s s w w p w o p k v g
... sample10 e b s y t a f c b g e c s s w w p w o p k s m
... '''
>>>
>>> # Column delimiter is spaces or tabs, i.e., sep='\s+'
... # No header rows available, i.e., header_row=None (default).
... # Use all columns, i.e., usecols=None (default).
... # Column '1' contains target values. Encode the target values, i.e., encode_target=True (default).
... main.read_data_csv(StringIO(data), sep='\s+', header_row=None, target_col=1)
info: columns [0] was/were inferred as 'string' or 'date' type feature(s) and dropped
>>>
>>> #Print the processed data samples. Note: Column '0' was inferred as 'string' type feature and dropped.
... print(main.data)
[[ 1. 0. 1. 1. 3. 0. 0. 1. 1. 0. 1. 0. 0. 0. 0. 0. 0. 0. 1. 0. 2. 2.]
[ 1. 0. 3. 1. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 1. 1. 0.]
[ 0. 0. 2. 1. 1. 0. 0. 0. 2. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 1. 1. 1.]
[ 1. 1. 2. 1. 3. 0. 0. 1. 2. 0. 1. 0. 0. 0. 0. 0. 0. 0. 1. 0. 2. 2.]
[ 1. 0. 0. 0. 2. 0. 1. 0. 1. 1. 1. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0.]
[ 1. 1. 3. 1. 0. 0. 0. 0. 2. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 1. 0.]
[ 0. 0. 2. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 1. 1.]
[ 0. 1. 2. 1. 1. 0. 0. 0. 2. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 1. 2. 1.]
[ 1. 1. 2. 1. 3. 0. 0. 1. 3. 0. 1. 0. 0. 0. 0. 0. 0. 0. 1. 0. 3. 0.]
[ 0. 0. 3. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 2. 1.]]
>>>
>>> # Print the names of columns in data
... print(main.columns_)
Int64Index([2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23], dtype='int64')
>>>
>>> # Print the target values, i.e, Column '1' values.
... print(main.target)
[1, 0, 0, 1, 0, 0, 0, 0, 1, 0]
>>>
>>> # Print the distinct (original) classes in target values
... print(main.classes_)
['e', 'p']
"""
dataset = pd.read_csv(filepath_or_buffer=file, sep=sep, skiprows=skiprows, header=header_row, usecols=usecols, index_col=target_col, na_values=na_values, skipinitialspace=True, nrows=nrows, **kargs)
dataset.dropna(axis='index', how='any', inplace=True)
# column index (or names) in data
self.columns_ = dataset.columns
columns_dtypes = dataset.dtypes.values
data, target = dataset.values, None if target_col is None else np.array(dataset.index)
del dataset
if target is not None:
# Distinct (original) classes in target values
self.classes_ = None
if encode_target:
target_labelEncoder = LabelEncoder()
target = target_labelEncoder.fit_transform(target)
self.classes_ = target_labelEncoder.classes_.tolist()
del target_labelEncoder
# Column name indexed dictionary of distinct (original) categories in the data columns. Defaults to ``None`` for numeric (non-categorical) valued columns.
self.columns_categories_ = dict.fromkeys(self.columns_)
# using array of absolute (zero-)indices of columns for ``catergorical_cols`` parameter
if isinstance(categorical_cols, str) and categorical_cols.casefold()=="infer":
n_samples, n_features = data.shape
selected_columns = np.array([True]*n_features)
# maximum number of classes in a column to be "infered" as "categorical (nominal)"
max_infer_nominal_classes = max_classes_nominal(n_samples)
self._nominal_columns = []
for column_index in np.where(columns_dtypes==np.object)[0]:
column_labelEncoder = LabelEncoder()
column_labelEncoder.fit(data.T[column_index])
if len(column_labelEncoder.classes_) <= max_infer_nominal_classes:
self._nominal_columns.append(self.columns_[column_index])
self.columns_categories_[self.columns_[column_index]] = column_labelEncoder.classes_.tolist()
data.T[column_index] = column_labelEncoder.transform(data.T[column_index])
else:
selected_columns[column_index] = False
del self.columns_categories_[self.columns_[column_index]]
del column_labelEncoder
if self._nominal_columns:
logger.info("Columns %s was/were inferred as 'nominal' categorical feature(s) and encoded", self._nominal_columns)
if not selected_columns.all():
logger.info("Columns %s was/were inferred as 'string' or 'date' type feature(s) and dropped", self.columns_[np.where(selected_columns==False)].tolist())
self.columns_ = self.columns_[selected_columns]
data = data.T[selected_columns].T
elif isinstance(categorical_cols, str) and categorical_cols.casefold()=='all':
self._nominal_columns = self.columns_.copy()
for column_index in range(self.columns_.shape[0]):
column_labelEncoder = LabelEncoder()
data.T[column_index] = column_labelEncoder.fit_transform(data.T[column_index])
self.columns_categories_[self.columns_[column_index]] = column_labelEncoder.classes_.tolist()
del column_labelEncoder
elif isinstance(categorical_cols, list) or isinstance(categorical_cols, int) or isinstance(categorical_cols, str):
if isinstance(categorical_cols, int) or isinstance(categorical_cols, str):
categorical_cols = [categorical_cols]
self._nominal_columns = categorical_cols.copy()
# TODO: Process each column in a seperate thread
for column_name in categorical_cols:
column_index, = np.where(self.columns_==column_name)
if column_index.shape == (1,):
column_labelEncoder = LabelEncoder()
data.T[column_index[0]] = column_labelEncoder.fit_transform(data.T[column_index[0]])
self.columns_categories_[column_name] = column_labelEncoder.classes_.tolist()
del column_labelEncoder
else:
logger.warning("Column '%s' could not be (uniquely) identified and was skipped", column_name)
self._nominal_columns.remove(column_name)
continue
elif categorical_cols is None:
self._nominal_columns = None
else:
# print("error: Invalid argument for parameter 'categorical_cols'. Accepted arguments: {list of names (or indices) of nominal columns, 'infer', 'all', None}")
logger.error("Invalid argument for parameter 'categorical_cols'. Accepted arguments: {list of names (or indices) of nominal columns, 'infer', 'all', None}")
raise TypeError("invalid argument for parameter 'categorical_cols'")
try:
data = data.astype(np.number)
except ValueError as err:
# print("warning: Data contains 'string' (or 'date') type features and could not be casted to 'numerical' type")
logger.warning("Data contains 'string' (or 'date') type features and could not be casted to 'numerical' type")
self.data, self.target = data, target
self.n_samples, self.n_features = self.data.shape
def read_data_libsvm(self, file, type='classification', dtype=np.float, n_features=None, **kargs):
"""Read data from LIBSVM format file
Parameters:
file (str or open file or int): Path to LIBSVM data file or ``open file`` object or file descriptor
type ({'classification','regression','ranking'}, default='classification'): Type of dataset
dtype (datatypes, default=``np.float``): Datatype of data array
n_features (int, default= ``None``): Number of features to use. ``None`` implies infer from data.
**kargs: Other Keyword arguments accepted by :func:`sklearn.datasets.load_svmlight_file` (Keyword arguments : offset, length, multilabel ...)
Note:
* ``file-like`` objects passed to 'file' parameter must be opened in binary mode.
* Learning to Rank('ranking' type) datasets are not currently supported
* ``dtype`` parameter accepts only numerical datatypes
* The LIBSVM data file is assumed to have been already preprocessed, i.e., encoding categorical features and removal of missing values.
Examples:
Illustration of **Reading from LIBSVM data file** ::
>>> from automs import eda
>>> main = eda.EDA()
>>>
>>> from io import BytesIO
>>>
>>> # First 10 samples from dataset Breast Cancer (Source: https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/breast-cancer)
... data = b'''
... 2.000000 1:1000025.000000 2:5.000000 3:1.000000 4:1.000000 5:1.000000 6:2.000000 7:1.000000 8:3.000000 9:1.000000 10:1.000000
... 2.000000 1:1002945.000000 2:5.000000 3:4.000000 4:4.000000 5:5.000000 6:7.000000 7:10.000000 8:3.000000 9:2.000000 10:1.000000
... 2.000000 1:1015425.000000 2:3.000000 3:1.000000 4:1.000000 5:1.000000 6:2.000000 7:2.000000 8:3.000000 9:1.000000 10:1.000000
... 2.000000 1:1016277.000000 2:6.000000 3:8.000000 4:8.000000 5:1.000000 6:3.000000 7:4.000000 8:3.000000 9:7.000000 10:1.000000
... 2.000000 1:1017023.000000 2:4.000000 3:1.000000 4:1.000000 5:3.000000 6:2.000000 7:1.000000 8:3.000000 9:1.000000 10:1.000000
... 4.000000 1:1017122.000000 2:8.000000 3:10.000000 4:10.000000 5:8.000000 6:7.000000 7:10.000000 8:9.000000 9:7.000000 10:1.000000
... 2.000000 1:1018099.000000 2:1.000000 3:1.000000 4:1.000000 5:1.000000 6:2.000000 7:10.000000 8:3.000000 9:1.000000 10:1.000000
... 2.000000 1:1018561.000000 2:2.000000 3:1.000000 4:2.000000 5:1.000000 6:2.000000 7:1.000000 8:3.000000 9:1.000000 10:1.000000
... 2.000000 1:1033078.000000 2:2.000000 3:1.000000 4:1.000000 5:1.000000 6:2.000000 7:1.000000 8:1.000000 9:1.000000 10:5.000000
... 2.000000 1:1033078.000000 2:4.000000 3:2.000000 4:1.000000 5:1.000000 6:2.000000 7:1.000000 | |
the absolute visual magnitude (Mv) from the richness
by transforming the isochrone in the SDSS system and using the
g,r -> V transform equations from Jester 2005
[astro-ph/0506022].
TODO: ADW If richness not specified, should use self.richness
Parameters:
-----------
richness : isochrone normalization parameter
steps : number of isochrone sampling steps
Returns:
--------
abs_mag : Absolute magnitude (Mv)
"""
# Using the SDSS g,r -> V from Jester 2005 [astro-ph/0506022]
# for stars with R-I < 1.15
# V = g_sdss - 0.59*(g_sdss - r_sdss) - 0.01
# Create a copy of the isochrone in the SDSS system
params = {k:v.value for k,v in self._params.items()}
params.update(band_1='g',band_2='r',survey='sdss')
iso = self.__class__(**params)
# g, r are absolute magnitude
mass_init, mass_pdf, mass_act, sdss_g, sdss_r = iso.sample(mass_steps=steps)
V = jester_mag_v(sdss_g,sdss_r)
# Sum the V-band absolute magnitudes
return sum_mags(V,weights=mass_pdf*richness)
#V = g - 0.59*(g - r) - 0.01
#flux = np.sum(mass_pdf*10**(-V/2.5))
#Mv = -2.5*np.log10(richness*flux)
#return Mv
def absolute_magnitude_martin(self, richness=1, steps=1e4, n_trials=1000, mag_bright=None, mag_faint=23., alpha=0.32, seed=None):
"""
Calculate the absolute magnitude (Mv) of the isochrone using
the prescription of Martin et al. 2008.
ADW: Seems like the faint and bright limits should depend on the survey maglim?
Parameters:
-----------
richness : Isochrone nomalization factor
steps : Number of steps for sampling the isochrone.
n_trials : Number of bootstrap samples
mag_bright : Bright magnitude limit [SDSS g-band] for luminosity calculation
mag_faint : Faint magnitude limit [SDSS g-band] for luminosity calculation
alpha : Output confidence interval (1-alpha)
seed : Random seed
Returns:
--------
med,lo,hi : Total absolute magnitude interval
"""
# ADW: This function is not quite right. It should restrict
# the catalog to the obsevable space using the mask in each
# pixel. This becomes even more complicated when we transform
# the isochrone into SDSS g,r...
if seed is not None: np.random.seed(seed)
# Create a copy of the isochrone in the SDSS system
params = {k:v.value for k,v in self._params.items()}
params.update(band_1='g',band_2='r',survey='sdss')
iso = self.__class__(**params)
# Analytic part (below detection threshold)
# g, r are absolute magnitudes
mass_init, mass_pdf, mass_act, sdss_g, sdss_r = iso.sample(mass_steps = steps)
V = jester_mag_v(sdss_g, sdss_r)
cut = ( (sdss_g + iso.distance_modulus) > mag_faint)
mag_unobs = sum_mags(V[cut], weights = richness * mass_pdf[cut])
# Stochastic part (above detection threshold)
abs_mag_v = np.zeros(n_trials)
for i in range(n_trials):
if i%100==0: logger.debug('%i absolute magnitude trials'%i)
# g,r are apparent magnitudes
sdss_g, sdss_r = iso.simulate(richness * iso.stellar_mass())
cut = (sdss_g < mag_faint)
# V is absolute magnitude
V = jester_mag_v(sdss_g[cut]-iso.distance_modulus,
sdss_r[cut]-iso.distance_modulus)
mag_obs = sum_mags(V)
abs_mag_v[i] = sum_mags([mag_obs,mag_unobs])
# ADW: Careful, fainter abs mag is larger (less negative) number
q = [100*alpha/2., 50, 100*(1-alpha/2.)]
hi,med,lo = np.percentile(abs_mag_v,q)
return ugali.utils.stats.interval(med,lo,hi)
def simulate(self, stellar_mass, distance_modulus=None, **kwargs):
"""
Simulate a set of stellar magnitudes (no uncertainty) for a
satellite of a given stellar mass and distance.
Parameters:
-----------
stellar_mass : the total stellar mass of the system (Msun)
distance_modulus : distance modulus of the system (if None takes from isochrone)
kwargs : passed to iso.imf.sample
Returns:
--------
mag_1, mag_2 : simulated magnitudes with length stellar_mass/iso.stellar_mass()
"""
if distance_modulus is None: distance_modulus = self.distance_modulus
# Total number of stars in system
n = int(round(stellar_mass / self.stellar_mass()))
f_1 = scipy.interpolate.interp1d(self.mass_init, self.mag_1)
f_2 = scipy.interpolate.interp1d(self.mass_init, self.mag_2)
mass_init_sample = self.imf.sample(n, np.min(self.mass_init), np.max(self.mass_init), **kwargs)
mag_1_sample, mag_2_sample = f_1(mass_init_sample), f_2(mass_init_sample)
return mag_1_sample + distance_modulus, mag_2_sample + distance_modulus
def observableFractionCMDX(self, mask, distance_modulus, mass_min=0.1):
"""
Compute observable fraction of stars with masses greater than mass_min in each
pixel in the interior region of the mask.
ADW: Careful, this function is fragile! The selection here should
be the same as mask.restrictCatalogToObservable space. However,
for technical reasons it is faster to do the calculation with
broadcasting here.
ADW: Could this function be even faster / more readable?
ADW: Should this include magnitude error leakage?
"""
mass_init_array,mass_pdf_array,mass_act_array,mag_1_array,mag_2_array = self.sample(mass_min=mass_min,full_data_range=False)
mag = mag_1_array if self.band_1_detection else mag_2_array
color = mag_1_array - mag_2_array
# ADW: Only calculate observable fraction over interior pixels...
pixels = mask.roi.pixels_interior
mag_1_mask = mask.mask_1.mask_roi_sparse[mask.roi.pixel_interior_cut]
mag_2_mask = mask.mask_2.mask_roi_sparse[mask.roi.pixel_interior_cut]
# ADW: Restrict mag and color to range of mask with sufficient solid angle
cmd_cut = ugali.utils.binning.take2D(mask.solid_angle_cmd,color,mag+distance_modulus,
mask.roi.bins_color, mask.roi.bins_mag) > 0
# Pre-apply these cuts to the 1D mass_pdf_array to save time
mass_pdf_cut = mass_pdf_array*cmd_cut
# Create 2D arrays of cuts for each pixel
mask_1_cut = (mag_1_array+distance_modulus)[:,np.newaxis] < mag_1_mask
mask_2_cut = (mag_2_array+distance_modulus)[:,np.newaxis] < mag_2_mask
mask_cut_repeat = mask_1_cut & mask_2_cut
observable_fraction = (mass_pdf_cut[:,np.newaxis]*mask_cut_repeat).sum(axis=0)
return observable_fraction
def observableFractionCMD(self, mask, distance_modulus, mass_min=0.1):
"""
Compute observable fraction of stars with masses greater than mass_min in each
pixel in the interior region of the mask.
ADW: Careful, this function is fragile! The selection here should
be the same as mask.restrictCatalogToObservable space. However,
for technical reasons it is faster to do the calculation with
broadcasting here.
ADW: Could this function be even faster / more readable?
ADW: Should this include magnitude error leakage?
"""
if distance_modulus is None: distance_modulus = self.distance_modulus
mass_init,mass_pdf,mass_act,mag_1,mag_2 = self.sample(mass_min=mass_min,full_data_range=False)
mag = mag_1 if self.band_1_detection else mag_2
color = mag_1 - mag_2
# ADW: Only calculate observable fraction for unique mask values
mag_1_mask,mag_2_mask = mask.mask_roi_unique.T
# ADW: Restrict mag and color to range of mask with sufficient solid angle
cmd_cut = ugali.utils.binning.take2D(mask.solid_angle_cmd,color,mag+distance_modulus,
mask.roi.bins_color, mask.roi.bins_mag) > 0
# Pre-apply these cuts to the 1D mass_pdf_array to save time
mass_pdf_cut = mass_pdf*cmd_cut
# Create 2D arrays of cuts for each pixel
mask_1_cut = (mag_1+distance_modulus)[:,np.newaxis] < mag_1_mask
mask_2_cut = (mag_2+distance_modulus)[:,np.newaxis] < mag_2_mask
mask_cut_repeat = (mask_1_cut & mask_2_cut)
# Condense back into one per digi
observable_fraction = (mass_pdf_cut[:,np.newaxis]*mask_cut_repeat).sum(axis=0)
# Expand to the roi and multiply by coverage fraction
return observable_fraction[mask.mask_roi_digi[mask.roi.pixel_interior_cut]] * mask.frac_interior_sparse
def observableFractionCDF(self, mask, distance_modulus, mass_min=0.1):
"""
Compute observable fraction of stars with masses greater than mass_min in each
pixel in the interior region of the mask. Incorporates simplistic
photometric errors.
ADW: Careful, this function is fragile! The selection here should
be the same as mask.restrictCatalogToObservable space. However,
for technical reasons it is faster to do the calculation with
broadcasting here.
ADW: This function is currently a rate-limiting step in the likelihood
calculation. Could it be faster?
"""
method = 'step'
mass_init,mass_pdf,mass_act,mag_1,mag_2 = self.sample(mass_min=mass_min,full_data_range=False)
mag_1 = mag_1+distance_modulus
mag_2 = mag_2+distance_modulus
mask_1,mask_2 = mask.mask_roi_unique.T
mag_err_1 = mask.photo_err_1(mask_1[:,np.newaxis]-mag_1)
mag_err_2 = mask.photo_err_2(mask_2[:,np.newaxis]-mag_2)
# "upper" bound set by maglim
delta_hi_1 = (mask_1[:,np.newaxis]-mag_1)/mag_err_1
delta_hi_2 = (mask_2[:,np.newaxis]-mag_2)/mag_err_2
# "lower" bound set by bins_mag (maglim shouldn't be 0)
delta_lo_1 = (mask.roi.bins_mag[0]-mag_1)/mag_err_1
delta_lo_2 = (mask.roi.bins_mag[0]-mag_2)/mag_err_2
cdf_1 = norm_cdf(delta_hi_1) - norm_cdf(delta_lo_1)
cdf_2 = norm_cdf(delta_hi_2) - norm_cdf(delta_lo_2)
cdf = cdf_1*cdf_2
if method is None or method == 'none':
comp_cdf = cdf
elif self.band_1_detection == True:
comp = mask.mask_1.completeness(mag_1, method=method)
comp_cdf = comp*cdf
elif self.band_1_detection == False:
comp =mask.mask_2.completeness(mag_2, method=method)
comp_cdf = comp*cdf
else:
comp_1 = mask.mask_1.completeness(mag_1, method=method)
comp_2 = mask.mask_2.completeness(mag_2, method=method)
comp_cdf = comp_1*comp_2*cdf
observable_fraction = (mass_pdf[np.newaxis]*comp_cdf).sum(axis=-1)
return observable_fraction[mask.mask_roi_digi[mask.roi.pixel_interior_cut]]
def observableFractionMMD(self, mask, distance_modulus, mass_min=0.1):
# This can be done faster...
logger.info('Calculating observable fraction from MMD')
mmd = self.signalMMD(mask,distance_modulus)
obs_frac = mmd.sum(axis=-1).sum(axis=-1)[mask.mask_roi_digi[mask.roi.pixel_interior_cut]]
return obs_frac
observable_fraction = observableFractionCMD
observableFraction = observable_fraction
def signalMMD(self, mask, distance_modulus, mass_min=0.1, nsigma=5, delta_mag=0.03, mass_steps=1000, method='step'):
roi = mask.roi
mass_init,mass_pdf,mass_act,mag_1,mag_2 = self.sample(mass_steps=mass_steps,mass_min=mass_min,full_data_range=False)
mag_1 = mag_1+distance_modulus
mag_2 = mag_2+distance_modulus
mask_1,mask_2 = mask.mask_roi_unique.T
mag_err_1 = mask.photo_err_1(mask_1[:,np.newaxis]-mag_1)
mag_err_2 = mask.photo_err_2(mask_2[:,np.newaxis]-mag_2)
# Set mag_err for mask==0 to epsilon
mag_err_1[mask_1==0] *= -np.inf
mag_err_2[mask_2==0] *= -np.inf
#edges_mag = np.arange(mask.roi.bins_mag[0] - (0.5*delta_mag),
# mask.roi.bins_mag[-1] + (0.5*delta_mag),
# delta_mag)
#nedges = edges_mag.shape[0]
nedges = np.rint((roi.bins_mag[-1]-roi.bins_mag[0])/delta_mag)+1
edges_mag,delta_mag = np.linspace(roi.bins_mag[0],roi.bins_mag[-1],nedges,retstep=True)
edges_mag_1 = edges_mag_2 = edges_mag
nbins = nedges - 1
mag_err_1_max = mag_err_1.max(axis=0)
mag_err_2_max = mag_err_2.max(axis=0)
max_idx_1 = np.searchsorted(edges_mag[:-1],mag_1+nsigma*mag_err_1_max)
min_idx_1 = np.searchsorted(edges_mag[:-1],mag_1-nsigma*mag_err_1_max)
max_idx_2 = np.searchsorted(edges_mag[:-1],mag_2+nsigma*mag_err_1_max)
min_idx_2 = np.searchsorted(edges_mag[:-1],mag_2-nsigma*mag_err_1_max)
# Select only isochrone values that will contribute to the MMD space
sel = (max_idx_1>0)&(min_idx_1<nbins)&(max_idx_2>0)&(min_idx_2<nbins)
if sel.sum() == 0:
msg = 'No isochrone points in magnitude selection range'
raise Exception(msg)
mag_1,mag_2 = mag_1[sel],mag_2[sel]
mag_err_1,mag_err_2 = mag_err_1[:,sel],mag_err_2[:,sel]
mass_pdf = mass_pdf[sel]
mag_err_1_max = mag_err_1.max(axis=0)
mag_err_2_max = mag_err_2.max(axis=0)
| |
- Task #2160: Test skyline with bandit
# Added nosec to exclude from bandit tests
timeseries_dump_dir = "/tmp/skyline/boundary/" + algorithm # nosec
self.mkdir_p(timeseries_dump_dir)
timeseries_dump_file = timeseries_dump_dir + "/" + metric_name + ".json"
with open(timeseries_dump_file, 'w+') as f:
f.write(str(timeseries))
f.close()
# Check if a metric has its own unique BOUNDARY_METRICS alert
# tuple, this allows us to paint an entire metric namespace with
# the same brush AND paint a unique metric or namespace with a
# different brush or scapel
has_unique_tuple = False
run_tupple = False
boundary_metric_tuple = (base_name, algorithm, metric_expiration_time, metric_min_average, metric_min_average_seconds, metric_trigger, alert_threshold, metric_alerters)
wildcard_namespace = True
for metric_tuple in BOUNDARY_METRICS:
if not has_unique_tuple:
CHECK_MATCH_PATTERN = metric_tuple[0]
check_match_pattern = re.compile(CHECK_MATCH_PATTERN)
pattern_match = check_match_pattern.match(base_name)
if pattern_match:
if metric_tuple[0] == base_name:
wildcard_namespace = False
if not has_unique_tuple:
if boundary_metric_tuple == metric_tuple:
has_unique_tuple = True
run_tupple = True
if ENABLE_BOUNDARY_DEBUG:
logger.info('unique_tuple:')
logger.info('boundary_metric_tuple: %s' % str(boundary_metric_tuple))
logger.info('metric_tuple: %s' % str(metric_tuple))
if not has_unique_tuple:
if wildcard_namespace:
if ENABLE_BOUNDARY_DEBUG:
logger.info('wildcard_namespace:')
logger.info('boundary_metric_tuple: %s' % str(boundary_metric_tuple))
run_tupple = True
else:
if ENABLE_BOUNDARY_DEBUG:
logger.info('wildcard_namespace: BUT WOULD NOT RUN')
logger.info('boundary_metric_tuple: %s' % str(boundary_metric_tuple))
if ENABLE_BOUNDARY_DEBUG:
logger.info('WOULD RUN run_selected_algorithm = %s' % run_tupple)
if run_tupple:
# @added 20181126 - Task #2742: Update Boundary
# Feature #2034: analyse_derivatives
# Convert the values of metrics strictly increasing monotonically
# to their deriative products
known_derivative_metric = False
try:
derivative_metrics = list(self.redis_conn.smembers('derivative_metrics'))
except:
derivative_metrics = []
redis_metric_name = '%s%s' % (settings.FULL_NAMESPACE, str(base_name))
if redis_metric_name in derivative_metrics:
known_derivative_metric = True
if known_derivative_metric:
try:
non_derivative_monotonic_metrics = settings.NON_DERIVATIVE_MONOTONIC_METRICS
except:
non_derivative_monotonic_metrics = []
skip_derivative = in_list(redis_metric_name, non_derivative_monotonic_metrics)
if skip_derivative:
known_derivative_metric = False
if known_derivative_metric:
try:
derivative_timeseries = nonNegativeDerivative(timeseries)
timeseries = derivative_timeseries
except:
logger.error('error :: nonNegativeDerivative failed')
# Submit the timeseries and settings to run_selected_algorithm
anomalous, ensemble, datapoint, metric_name, metric_expiration_time, metric_min_average, metric_min_average_seconds, metric_trigger, alert_threshold, metric_alerters, algorithm = run_selected_algorithm(
timeseries, metric_name,
metric_expiration_time,
metric_min_average,
metric_min_average_seconds,
metric_trigger,
alert_threshold,
metric_alerters,
autoaggregate,
autoaggregate_value,
algorithm
)
if ENABLE_BOUNDARY_DEBUG:
logger.info('debug :: analysed - %s' % (metric_name))
# @added 20171214 - Bug #2232: Expiry boundary last_seen keys appropriately
# If it's not anomalous, add it to list
if not anomalous:
not_anomalous_metric = [datapoint, metric_name, metric_expiration_time, metric_min_average, metric_min_average_seconds, metric_trigger, alert_threshold, metric_alerters, algorithm]
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# self.not_anomalous_metrics.append(not_anomalous_metric)
try:
self.redis_conn.sadd('boundary.not_anomalous_metrics', str(not_anomalous_metric))
except Exception as e:
logger.error('error :: could not add %s to Redis set boundary.not_anomalous_metrics: %s' % (
str(not_anomalous_metric), e))
else:
anomalous = False
if ENABLE_BOUNDARY_DEBUG:
logger.info('debug :: more unique metric tuple not analysed - %s' % (metric_name))
# If it's anomalous, add it to list
if anomalous:
anomalous_metric = [datapoint, metric_name, metric_expiration_time, metric_min_average, metric_min_average_seconds, metric_trigger, alert_threshold, metric_alerters, algorithm]
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# self.anomalous_metrics.append(anomalous_metric)
try:
self.redis_conn.sadd('boundary.anomalous_metrics', str(anomalous_metric))
except Exception as e:
logger.error('error :: could not add %s to Redis set boundary.anomalous_metrics: %s' % (
str(anomalous_metric), e))
# Get the anomaly breakdown - who returned True?
triggered_algorithms = []
for index, value in enumerate(ensemble):
if value:
anomaly_breakdown[algorithm] += 1
triggered_algorithms.append(algorithm)
# If Crucible or Panorama are enabled determine details
determine_anomaly_details = False
if settings.ENABLE_CRUCIBLE and settings.BOUNDARY_CRUCIBLE_ENABLED:
determine_anomaly_details = True
if settings.PANORAMA_ENABLED:
determine_anomaly_details = True
if determine_anomaly_details:
metric_timestamp = str(int(timeseries[-1][0]))
from_timestamp = str(int(timeseries[1][0]))
timeseries_dir = base_name.replace('.', '/')
# If Panorama is enabled - create a Panorama check
if settings.PANORAMA_ENABLED:
# Note:
# The values are enclosed is single quoted intentionally
# as the imp.load_source used results in a shift in the
# decimal position when double quoted, e.g.
# value = "5622.0" gets imported as
# 2016-03-02 12:53:26 :: 28569 :: metric variable - value - 562.2
# single quoting results in the desired,
# 2016-03-02 13:16:17 :: 1515 :: metric variable - value - 5622.0
# @modified 20171216 - Task #2236: Change Boundary to only send to Panorama on alert
# added_at now passedas an argument to spin_process so that the panaroma_anomaly_file
# can be moved from SKYLINE_TMP_DIR to the PANORAMA_CHECK_PATH
# added_at = str(int(time()))
source = 'graphite'
panaroma_anomaly_data = 'metric = \'%s\'\n' \
'value = \'%s\'\n' \
'from_timestamp = \'%s\'\n' \
'metric_timestamp = \'%s\'\n' \
'algorithms = [\'%s\']\n' \
'triggered_algorithms = [\'%s\']\n' \
'app = \'%s\'\n' \
'source = \'%s\'\n' \
'added_by = \'%s\'\n' \
'added_at = \'%s\'\n' \
% (base_name, str(datapoint), from_timestamp,
metric_timestamp, str(algorithm), str(algorithm),
skyline_app, source, this_host, added_at)
# Create an anomaly file with details about the anomaly
# @modified 20171214 - Task #2236: Change Boundary to only send to Panorama on alert
# Only send to Panorama IF Boundary is going to alert,
# so here the file is written to SKYLINE_TMP_DIR
# instead and moved in def run() if an alert is sent
# panaroma_anomaly_file = '%s/%s.%s.txt' % (
# settings.PANORAMA_CHECK_PATH, added_at,
# base_name)
# @modified 20171228 - Task #2236: Change Boundary to only send to Panorama on alert
# Added algorithm as it is required if the metric has
# multiple rules covering a number of algorithms
tmp_panaroma_anomaly_file = '%s/%s.%s.%s.panorama_anomaly.txt' % (
settings.SKYLINE_TMP_DIR, added_at, str(algorithm),
base_name)
try:
write_data_to_file(
skyline_app, tmp_panaroma_anomaly_file, 'w',
panaroma_anomaly_data)
logger.info('added tmp panorama anomaly file :: %s' % (tmp_panaroma_anomaly_file))
except:
logger.error('error :: failed to add tmp panorama anomaly file :: %s' % (tmp_panaroma_anomaly_file))
logger.info(traceback.format_exc())
# If crucible is enabled - save timeseries and create a
# crucible check
if settings.ENABLE_CRUCIBLE and settings.BOUNDARY_CRUCIBLE_ENABLED:
crucible_anomaly_dir = settings.CRUCIBLE_DATA_FOLDER + '/' + timeseries_dir + '/' + metric_timestamp
if not os.path.exists(crucible_anomaly_dir):
if python_version == 2:
mode_arg = int('0755')
os.makedirs(crucible_anomaly_dir, mode_arg)
if python_version == 3:
os.makedirs(crucible_anomaly_dir, mode=0o755)
# Note:
# Due to only one algorithm triggering here the
# algorithm related arrays here are a different format
# to there output format in analyzer
# Note:
# The value is enclosed is single quoted intentionally
# as the imp.load_source used in crucible results in a
# shift in the decimal position when double quoted, e.g.
# value = "5622.0" gets imported as
# 2016-03-02 12:53:26 :: 28569 :: metric variable - value - 562.2
# single quoting results in the desired,
# 2016-03-02 13:16:17 :: 1515 :: metric variable - value - 5622.0
crucible_anomaly_data = 'metric = \'%s\'\n' \
'value = \'%s\'\n' \
'from_timestamp = \'%s\'\n' \
'metric_timestamp = \'%s\'\n' \
'algorithms = %s\n' \
'triggered_algorithms = %s\n' \
'anomaly_dir = \'%s\'\n' \
'graphite_metric = True\n' \
'run_crucible_tests = False\n' \
'added_by = \'%s\'\n' \
'added_at = \'%s\'\n' \
% (base_name, str(datapoint), from_timestamp,
metric_timestamp, str(algorithm),
triggered_algorithms, crucible_anomaly_dir,
skyline_app, metric_timestamp)
# Create an anomaly file with details about the anomaly
crucible_anomaly_file = '%s/%s.txt' % (crucible_anomaly_dir, base_name)
with open(crucible_anomaly_file, 'w') as fh:
fh.write(crucible_anomaly_data)
if python_version == 2:
os.chmod(crucible_anomaly_file, 0644)
if python_version == 3:
os.chmod(crucible_anomaly_file, mode=0o644)
logger.info('added crucible anomaly file :: %s/%s.txt' % (crucible_anomaly_dir, base_name))
# Create timeseries json file with the timeseries
json_file = '%s/%s.json' % (crucible_anomaly_dir, base_name)
timeseries_json = str(timeseries).replace('[', '(').replace(']', ')')
with open(json_file, 'w') as fh:
# timeseries
fh.write(timeseries_json)
if python_version == 2:
os.chmod(json_file, 0644)
if python_version == 3:
os.chmod(json_file, mode=0o644)
logger.info('added crucible timeseries file :: %s/%s.json' % (crucible_anomaly_dir, base_name))
# Create a crucible check file
crucible_check_file = '%s/%s.%s.txt' % (settings.CRUCIBLE_CHECK_PATH, metric_timestamp, base_name)
with open(crucible_check_file, 'w') as fh:
fh.write(crucible_anomaly_data)
if python_version == 2:
os.chmod(crucible_check_file, 0644)
if python_version == 3:
os.chmod(crucible_check_file, mode=0o644)
logger.info('added crucible check :: %s,%s' % (base_name, metric_timestamp))
# It could have been deleted by the Roomba
except TypeError:
exceptions['DeletedByRoomba'] += 1
except TooShort:
exceptions['TooShort'] += 1
except Stale:
exceptions['Stale'] += 1
except Boring:
exceptions['Boring'] += 1
except:
exceptions['Other'] += 1
logger.info("exceptions['Other'] traceback follows:")
logger.info(traceback.format_exc())
# Add values to the queue so the parent process can collate
for key, value in anomaly_breakdown.items():
self.anomaly_breakdown_q.put((key, value))
for key, value in exceptions.items():
self.exceptions_q.put((key, value))
def run(self):
"""
Called when the process intializes.
"""
# Log management to prevent overwriting
# Allow the bin/<skyline_app>.d to manage the log
if os.path.isfile(skyline_app_logwait):
try:
os.remove(skyline_app_logwait)
except OSError:
logger.error('error :: failed to remove %s, continuing' % skyline_app_logwait)
pass
now = time()
log_wait_for = now + 5
while now < log_wait_for:
if os.path.isfile(skyline_app_loglock):
sleep(.1)
now = time()
else:
now | |
# -*- coding: utf-8 -*-
# Copyright (c) Vlachos Group, Jung Group
# GNU v3.0 license
__author__ = '<NAME>'
__copyright__ = "Copyright 2019, Vlachos Group, Jung Group"
__version__ = "1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__date__ = "July 31, 2019"
import numpy as np
import json
import os
from collections import defaultdict
import logging
from time import time
from datetime import datetime
from pymatgen import Element, Structure, Molecule, Lattice
from pymatgen.symmetry.analyzer import PointGroupAnalyzer
import networkx as nx
import networkx.algorithms.isomorphism as iso
from scipy.spatial.distance import cdist,pdist,squareform
from .Data import DataLoader
__all__ = [
'UniversalLoader',
'UniversalLoaderInputWriter'
]
# TQDM substitute for stdout
class Progress(object):
def __init__(self, iterator , ndata, step = 100):
logging.getLogger().setLevel(logging.INFO)
self.iter = iterator.__iter__()
self.t = time()
self.ndata = ndata
self.step = step
self.i = 0
s = '%s 0 / %d'%(datetime.now().strftime("[%H:%M:%S]"),self.ndata)
logging.info(s)
def __iter__(self):
return self
def __next__(self):
self.i += 1
if self.i%self.step == 0:
s = '%s %d / %d | %.2f/%d sec/data | ~%.2f sec left'%(datetime.now().strftime("[%H:%M:%S]"),self.i,self.ndata,\
(time()-self.t)/self.i*self.step,self.step,(self.ndata-self.i)/self.i*(time()-self.t))
logging.info(s)
return next(self.iter)
def InputReader(path):
"""Read Input Files
The input format for primitive cell is:
[comment]
[ax][ay][az][pbc]
[bx][by][bz][pbc]
[cx][cy][cz][pbc]
[number of spectator site type][number of active site type]
[os1][os2][os3]...
[number sites]
[site1a][site1b][site1c][site type]
[site2a][site2b][site2c][site type]
...
[number of data]
[datum 1 name]
...
- ax,ay, ... are cell basis vector
- pbc is either T or F indication of the periodic boundary condition
- os# is the name of the possible occupation state (interpretted as string)
- site1a,site1b,site1c are the scaled coordinates of site 1
- site type can be either S1, S2, ... or A1, A2,... indicating spectator
site and itx index and active site and its index respectively.
Example:
#Primitive Cell
2.81859800e+00 0.00000000e+00 0.00000000e+00 T
-1.40929900e+00 2.44097800e+00 0.00000000e+00 T
0.00000000e+00 0.00000000e+00 2.55082550e+01 T
1 1
-1 0 1
6
0.00000000e+00 0.00000000e+00 9.02210000e-02 S1
6.66666666e-01 3.33333333e-01 1.80442000e-01 S1
3.33333333e-01 6.66666666e-01 2.69674534e-01 S1
0.00000000e+00 0.00000000e+00 3.58978557e-01 S1
6.66666666e-01 3.33333333e-01 4.49958662e-01 S1
3.33333333e-01 6.66666666e-01 5.01129144e-01 A1
653
structure000
structure001
...
The input format for a data point is similar:
[property value]
[ax][ay][az]
[bx][by][bz]
[cx][cy][cz]
[number sites]
[site1a][site1b][site1c][site type][occupation state if active site]
[site2a][site2b][site2c][site type][occupation state if active site]
...
- property value indicates the trained value. It must start with #y=...
Example:
#y=-1.209352
2.81859800e+00 0.00000000e+00 0.00000000e+00
-1.40929900e+00 2.44097800e+00 0.00000000e+00
0.00000000e+00 0.00000000e+00 2.55082550e+01
6
0.000000000000 0.000000000000 0.090220999986 S1
0.500000499894 0.622008360788 0.180442000011 S1
0.999999500106 0.666666711253 0.270892474701 S1
0.000000000000 0.000000000000 0.361755713893 S1
0.500000499894 0.622008360788 0.454395429618 S1
0.000000000000 0.666667212896 0.502346789304 A1 1
Parameters
----------
path : input file path
Returns
-------
list of local_env : list of local_env class
"""
with open(path) as f:
s = f.readlines()
s = [line.rstrip('\n') for line in s]
nl = 0
# read comment
if '#y=' in s[nl]:
y = float(s[nl][3:])
datum = True
else:
y = None
datum = False
nl += 1
# load cell and pbc
cell = np.zeros((3,3))
pbc = np.array([True,True,True])
for i in range(3):
t = s[nl].split()
cell[i,:] = [float(i) for i in t[0:3]]
if not datum and t[3] == 'F':
pbc[i] = False
nl += 1
# read sites if primitive
if not datum:
t = s[nl].split()
ns = int(t[0])
na = int(t[1])
nl += 1
aos = s[nl].split()
nl += 1
# read positions
nS = int(s[nl])
nl += 1
coord = np.zeros((nS,3))
st = []
oss = []
for i in range(nS):
t = s[nl].split()
coord[i,:] = [float(i) for i in t[0:3]]
st.append(t[3])
if datum and len(t) == 5:
oss.append(t[4])
nl+=1
# read data name
if not datum:
nd = int(s[nl])
nl += 1
datanames = []
for i in range(nd):
datanames.append(s[nl])
nl += 1
if datum:
return y, cell, coord, st, oss
else:
return cell, pbc, coord, st, ns, na, aos, datanames
def UniversalLoaderInputWriter(path,y,cell,coord,st,oss):
"""Writes datum into file.
This can be used to print out input format of the datum you have.
parameters
----------
path : string. path to file for writing.
y : float. target property value
cell : 3 x 3. list of list of float. cell basis vectors
coord : ns x 3. list of list of float. scaled positions of each site.
ns is the number of sites.
st : ns. list of string. site type for each site.
oss : nsa. list of string. occupancy of each active site. In the order
of appearance in coord. nsa is the number of active site.
"""
s = '#y=%e\n'%y
for v in cell:
s += '%15.8e %15.8e %15.8e\n'%(v[0],v[1],v[2])
s+= str(len(st))+'\n'
n =0
for xyz,ss in zip(coord,st):
if ss == 'S1':
s += '%15.12f %15.12f %15.12f %s\n'%(xyz[0],xyz[1],xyz[2],ss)
else:
s += '%15.12f %15.12f %15.12f %s %s\n'%(xyz[0],xyz[1],xyz[2],ss,oss[n])
n +=1
with open(path,'w') as f:
f.write(s)
class SiteEnvironment(object):
def __init__(self,pos,sitetypes,env2config,permutations,cutoff,\
Grtol=0.0,Gatol=0.01,rtol = 0.01,atol=0.0, tol=0.01,grtol=0.01):
""" Initialize site environment
This class contains local site enrivonment information. This is used
to find neighborlist in the datum (see GetMapping).
Parameters
----------
pos : n x 3 list or numpy array of (non-scaled) positions. n is the
number of atom.
sitetypes : n list of string. String must be S or A followed by a
number. S indicates a spectator sites and A indicates a active
sites.
permutations : p x n list of list of integer. p is the permutation
index and n is the number of sites.
cutoff : float. cutoff used for pooling neighbors. for aesthetics only
Grtol : relative tolerance in distance for forming an edge in graph
Gatol : absolute tolerance in distance for forming an edge in graph
rtol : relative tolerance in rmsd in distance for graph matching
atol : absolute tolerance in rmsd in distance for graph matching
tol : maximum tolerance of position RMSD to decide whether two
environment are the same
grtol : tolerance for deciding symmetric nodes
"""
self.pos = pos
self.sitetypes = sitetypes
self.activesiteidx = [i for i,s in enumerate(self.sitetypes) if 'A' in s]
self.formula = defaultdict(int)
for s in sitetypes:
self.formula[s] += 1
self.permutations = permutations
self.env2config = env2config
self.cutoff = cutoff
# Set up site environment matcher
self.tol = tol
# Graphical option
self.Grtol = Grtol
self.Gatol = Gatol
#tolerance for grouping nodes
self.grtol =1e-3
# determine minimum distance between sitetypes.
# This is used to determine the existence of an edge
dists = squareform(pdist(pos))
mindists = defaultdict(list)
for i,row in enumerate(dists):
row_dists = defaultdict(list)
for j in range(0,len(sitetypes)):
if i == j:
continue
# Sort by bond
row_dists[frozenset((sitetypes[i],sitetypes[j]))].append(dists[i,j])
for pair in row_dists:
mindists[pair].append(np.min(row_dists[pair]))
# You want to maximize this in order to make sure every node gets an edge
self.mindists = {}
for pair in mindists:
self.mindists[pair] = np.max(mindists[pair])
# construct graph
self.G = self._ConstructGraph(pos,sitetypes)
# matcher options
self._nm = iso.categorical_node_match('n','')
self._em = iso.numerical_edge_match('d',0,rtol,0)
def _ConstructGraph(self,pos,sitetypes):
"""Returns local environment graph using networkx and
tolerance specified.
parameters
----------
pos: ns x 3. coordinates of positions. ns is the number of sites.
sitetypes: ns. sitetype for each site
return
------
networkx graph used for matching site positions in
datum.
"""
# construct graph
G = nx.Graph()
dists = cdist([[0,0,0]],pos - np.mean(pos,0))[0]
sdists = np.sort(dists)
#https://stackoverflow.com/questions/37847053/uniquify-an-array-list-with-a-tolerance-in-python-uniquetol-equivalent
uniquedists = sdists[~(np.triu(np.abs(sdists[:,None]-sdists)<=self.grtol,1)).any(0)]
orderfromcenter = np.digitize(dists,uniquedists)
# Add nodes
for i,o in enumerate(orderfromcenter):
G.add_node(i,n=str(o)+sitetypes[i])
# Add edge. distance is edge attribute
dists = pdist(pos); n=0
for i in range(len(sitetypes)):
for j in range(i+1,len(sitetypes)):
if dists[n] < self.mindists[frozenset((sitetypes[i],sitetypes[j]))] or\
(abs(self.mindists[frozenset((sitetypes[i],sitetypes[j]))] - dists[n]) <= self.Gatol + self.Grtol * abs(dists[n])):
G.add_edge(i,j,d=dists[n])
n+=1
return G
def __repr__(self):
s = '<' + self.sitetypes[0]+\
'|%i active neighbors'%(len([s for s in self.sitetypes if 'A' in s])-1)+\
'|%i spectator neighbors'%len([s for s in self.sitetypes if 'S' in s])+\
'|%4.2f Ang Cutoff'%self.cutoff + '| %i permutations>'%len(self.permutations)
return s
def __eq__(self,o):
"""Local environment comparison is done by comparing represented site
"""
if not isinstance(o,SiteEnvironment):
raise ValueError
return self.sitetypes[0] == o.sitetypes[0]
def __ne__(self,o):
"""Local environment comparison is done by comparing represented site
"""
if isinstance(o,SiteEnvironment):
raise ValueError
return not self.__eq__(o)
def GetMapping(self,env,path=None):
| |
<filename>epistasis/__init__.py
__author__ = 'Paul. Made OO by MF'
__version__ = '1.3'
__date__ = '3 Sept 2020'
import numpy as np
#from scipy.stats import ttest_ind_from_stats as ttest
from warnings import warn
import math, random
from .epi_aux_mixin import EpiAuxMixin as _EA
from .epi_base_mixin import EpiBaseMixin as _EB
from typing import List, Tuple, Union, Dict, Iterable
class Epistatic(_EA, _EB):
"""
The original functionality of the script is retained as the class method `user_input` which will ask for input.
The altered usage has a way of creating the scheme thusly:
Epistatic.create_input_scheme('C', '3', '3', 'test.xlsx')
Running from file and calculating and saving:
Epistatic.from_file('C', 'raw.xlsx').calculate().save('wow.xlsx')
Running from panda table:
Epistatic.from_pandas('C',table)
Running from values:
Epistatic(your_study, mutation_number,replicate_number,replicate_list,mutation_names, mutant_list,foundment_values,data_array,replicate_matrix)
Methods:
* create_combination
* mean_and_sd_maker
* origin_finder
* please_more_combinations
* table_filler
* theoretical_stats
* value_list_sorter
* what_epistasis_sign
Class method: user_input for interactive input. (no parameters! `Epistasis.user_input()`)
Attributes:
TODO
The output can be accessed via .theoretical_results and .experimental_results pandas dataframes.
"""
# ============== Central method ====================================================================================
def calculate(self):
if type(self.foundment_values) is None:
raise AssertionError('No data')
# This function gives a tuple (dictionary of mutants associated with mean and std, array of mean and std)
if not self.mean_and_sd_dic:
self.mean_and_sd_dic, array_mean_and_sd = self.mean_and_sd_maker()
# here we just take the first element of the tuple,
# which is the dictionarry. I frankly don't even remember why I did a tuple and not just the dictionary but hey)
# line with Mutant_number
# self.mean_and_sd_array = np.reshape(self.mean_and_sd_maker(data_array)[1], ((Mutant_number), 2))
self.mean_and_sd_array = np.reshape(array_mean_and_sd, (len(self.create_combination()), 2))
origins = self.origin_finder()
all_combinations = self.please_more_combinations(origins)
# ## here will be made the combinations table
count_list = []
for elt in all_combinations:
count_list.append((elt[0]).count(1))
count_list.sort() # this is just a variable coresponding to the number of combinations
ordered_combs = []
for elt in count_list:
for elt2 in all_combinations:
if list(elt2[0]).count(1) == elt:
all_combinations.remove(elt2)
ordered_combs.append(elt2)
# I think this was to remove any potential duplicate of combinations that somehow ended up in the list
self.comb_index = [f"Combination n°{elt}" for elt in range(1, len(ordered_combs) + 1)]
# this line is important for the final table, it gives a proper name to each combination
self.combs_only = [elt[1] for elt in ordered_combs]
# this gives a list of the mutant combinations only
signs_only = []
for elt in ordered_combs:
signs_only.append(elt[0])
# same as above but for the signs only
reshaped_signs = np.reshape(signs_only, ((len(signs_only), (len(self.mutation_names)))))
# in the case of 2 mutants only the math needs a hack or (2,1) => (1,1) fails. MF
# reshaped_combs normally is a np.array of tuples... but gets cast "incorrectly" when there's only one.
if len(signs_only) != 1: # more than 2
reshaped_combs = np.reshape(self.combs_only, (len(signs_only), 1))
else:
reshaped_combs = np.zeros((1, 1)).astype(object)
reshaped_combs[0, 0] = self.combs_only[0]
# reshaping everything to have a good format for the final table
# so a method (the origin one) was altering foundament and here is reverted.
# I made a copy of it as it was a fishy piece of code,
# so no reconversion needed.
self.final_comb_table = np.c_[reshaped_signs, reshaped_combs] # .astype('object')
self.final_comb_table[self.final_comb_table == 1] = "+"
self.final_comb_table[self.final_comb_table == 0] = "-"
temp = np.zeros(self.foundment_values.shape, dtype=str) # purity of dtype
temp[self.foundment_values == 1] = "+"
temp[self.foundment_values == 0] = "-" # reconverting all 1 and 0 into + and -
self.foundment_values = np.c_[temp, self.mean_and_sd_array]
# we also add the averages and standard deviation (experimental) to the sign matrix
# this time for conversion, which is a little different albeit very close.
# the "selectivity" or "conversion" difference (self.your_study) is handled now by the avgWT dyn property
self.stats = self.get_theoretical_stats() # List[Dict[str, Union[str, float]]]
# this all_of_it value is all the data we need, across the program we complete it as it goes
return self
# ============== property methods ==================================================================================
_avgWT = None # cached property
@property
def avgWT(self):
# avgWT is zero for selectivity.
if self._avgWT is not None:
pass
elif not self.wildtype_centred: # S-mode
self._avgWT = 0
elif self.wildtype_centred: # C-mode
self._avgWT = self.mean_and_sd_dic[self.WT][0]
else:
raise ValueError
return self._avgWT
@property
def WT(self):
WT = '-' * self.mutation_number
assert WT in self.mean_and_sd_dic, 'Please fill in WT or make a mutant the WT and remove datapoints without it.'
return WT
# ============== Dependant methods =================================================================================
def create_combination(self) -> List[dict]:
"""
this function creates the mutant combinations based on the number you indicated in mutation_number
:return: list of dicts
"""
dic_list = []
while len(self.mutant_list) > len(dic_list):
for i in self.mutant_list:
elt = {}
for elt2 in range(1, self.mutation_number + 1):
# here we attribute a number for + and - and roll the dice to obtain a random combination under
# the form of a dictionary !
evolution_dice = random.randint(0, 1)
if evolution_dice == 0:
elt[self.mutation_names[elt2 - 1]] = "+"
else:
elt[self.mutation_names[elt2 - 1]] = "-"
count = 0
for elt3 in dic_list:
if elt.items() != elt3.items():
# this line will scan each combination of the list and compare
# it to the new combination
count += 1
if count == len(dic_list):
dic_list.append(elt)
# we add this combination to a new list. If this combination is already in the list,
# then we thrash it and do it again
return dic_list
def value_list_sorter(self, value_list):
"""
this put the combinations of signs together based on the number of + they have
:param value_list:
:return: list called final
"""
sorted_values = []
final = []
ref_list = []
count = 0
value_dic = {}
for value in value_list:
count = value.count("+")
ref_list.append(count)
value_dic[str(value)] = count
ref_list.sort()
for num in ref_list:
for item in value_dic.items():
if num == item[1]:
sorted_values.append(item[0])
del value_dic[item[0]]
break
for elt in sorted_values:
for elt2 in value_list:
if elt == str(elt2):
final.append(elt2)
return final
def table_filler(self, final_table1, final_value_list):
"""
this will fill the matrix with our ordered sign list
:param final_table1:
:param final_value_list:
:param mutation_number:
:return:
"""
i = 0
while i < self.mutation_number:
j = 0
while j < len(self.mutant_list):
final_table1[j][i] = final_value_list[j][i]
j = j + 1
i = i + 1
return final_table1
def mean_and_sd_maker(self):
"""
this function will look into the vqlues of each mutants and make an average and standard deviation out of it.
In the final table those are called "experimental average" and "experimental standard deviation
:return:
"""
data_dic = {}
mean_and_sd = []
for array in self.data_array: #[ 0. 0. 0. 40.408327 37.176372 35.776619]
data = array[self.mutation_number:]
data_float = np.array(data).astype(np.float64)
mutant = self.strigify(array[:self.mutation_number])
if not self.median: # mean
average = float(np.nanmean(data_float))
else:
average = float(np.nanmedian(data_float))
N_replicates = np.count_nonzero(~np.isnan(data_float))
if N_replicates: # non-empty row.
std = float(np.nanstd(data_float)) / math.sqrt(N_replicates)
data_dic[mutant] = [average, std]
else:
data_dic[mutant] = [np.nan, np.nan]
for row in data_dic.values():
mean_and_sd.append(row)
return data_dic, mean_and_sd
def origin_finder(self):
"""
this is the first function that will permit to find possible combinations between mutqnts.
This one is useful to find double mutqnts. For exqmple [+ - + -] and [- + - +].
Returns a list of tuples of
* +-+ as a 1,0 list and the combination
[([1, 1, 0, 0], (2, 3)), ([1, 0, 1, 0], (2, 4)),
:param foundment_values: 2D array of + - +
:return:
"""
# I don't know why but this method alters foundment_values, which may not be intended? MF
# actually this makes a shallow copy... so shmeh
foundment_values = self.foundment_values ## 2D array of + - +
additivity_list = []
# foundment_values is a np.array of 1/0. however, user may have given a +/-
if foundment_values.dtype == np.dtype('<U1') or foundment_values.dtype == np.dtype('object'):
# formerly: np.any(foundment_values == '+') (FutureWarning)
foundment_values[foundment_values == "+"] = 1
# here I change the + and - for 1 and 0. This is useful for calculations
foundment_values[foundment_values == "-"] = 0
else:
pass
# print('Not +-', foundment_values.dtype)
i = 1
while i < len(foundment_values) - 1: # I go through the sign mqtrix
j = i
while j < len(
foundment_values) - 1: # and a second time, so I cqn isolqte two combinqtions qt q time qnd compare them
| |
<filename>lib/oeqa/selftest/cases/updater_qemux86_64.py
# pylint: disable=C0111,C0325
import os
import logging
import re
import subprocess
import unittest
from time import sleep
from uuid import uuid4
from oeqa.selftest.case import OESelftestTestCase
from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_bb_vars
from testutils import qemu_launch, qemu_send_command, qemu_terminate, \
metadir, akt_native_run, verifyNotProvisioned, verifyProvisioned, \
qemu_bake_image, qemu_boot_image
class GeneralTests(OESelftestTestCase):
def test_credentials(self):
logger = logging.getLogger("selftest")
logger.info('Running bitbake to build core-image-minimal')
self.append_config('SOTA_CLIENT_PROV = "aktualizr-shared-prov"')
# note: this also tests ostreepush/garagesign/garagecheck which are
# omitted from other test cases
bitbake('core-image-minimal')
credentials = get_bb_var('SOTA_PACKED_CREDENTIALS')
# skip the test if the variable SOTA_PACKED_CREDENTIALS is not set
if credentials is None:
raise unittest.SkipTest("Variable 'SOTA_PACKED_CREDENTIALS' not set.")
# Check if the file exists
self.assertTrue(os.path.isfile(credentials), "File %s does not exist" % credentials)
deploydir = get_bb_var('DEPLOY_DIR_IMAGE')
imagename = get_bb_var('IMAGE_LINK_NAME', 'core-image-minimal')
# Check if the credentials are included in the output image
result = runCmd('tar -jtvf %s/%s.tar.bz2 | grep sota_provisioning_credentials.zip' %
(deploydir, imagename), ignore_status=True)
self.assertEqual(result.status, 0, "Status not equal to 0. output: %s" % result.output)
class AktualizrToolsTests(OESelftestTestCase):
@classmethod
def setUpClass(cls):
super(AktualizrToolsTests, cls).setUpClass()
logger = logging.getLogger("selftest")
logger.info('Running bitbake to build aktualizr-native tools')
bitbake('aktualizr-native aktualizr-device-prov')
bitbake('build-sysroots -c build_native_sysroot')
def test_cert_provider_help(self):
akt_native_run(self, 'aktualizr-cert-provider --help')
def test_cert_provider_local_output(self):
bb_vars = get_bb_vars(['SOTA_PACKED_CREDENTIALS', 'T'], 'aktualizr-native')
creds = bb_vars['SOTA_PACKED_CREDENTIALS']
temp_dir = bb_vars['T']
bb_vars_prov = get_bb_vars(['WORKDIR', 'libdir'], 'aktualizr-device-prov')
config = bb_vars_prov['WORKDIR'] + '/sysroot-destdir' + bb_vars_prov['libdir'] + '/sota/conf.d/20-sota-device-cred.toml'
akt_native_run(self, 'aktualizr-cert-provider -c {creds} -r -l {temp} -g {config}'
.format(creds=creds, temp=temp_dir, config=config))
# Might be nice if these names weren't hardcoded.
cert_path = temp_dir + '/var/sota/import/client.pem'
self.assertTrue(os.path.isfile(cert_path), "Client certificate not found at %s." % cert_path)
self.assertTrue(os.path.getsize(cert_path) > 0, "Client certificate at %s is empty." % cert_path)
pkey_path = temp_dir + '/var/sota/import/pkey.pem'
self.assertTrue(os.path.isfile(pkey_path), "Private key not found at %s." % pkey_path)
self.assertTrue(os.path.getsize(pkey_path) > 0, "Private key at %s is empty." % pkey_path)
ca_path = temp_dir + '/var/sota/import/root.crt'
self.assertTrue(os.path.isfile(ca_path), "Client certificate not found at %s." % ca_path)
self.assertTrue(os.path.getsize(ca_path) > 0, "Client certificate at %s is empty." % ca_path)
class SharedCredProvTests(OESelftestTestCase):
def setUpLocal(self):
layer = "meta-updater-qemux86-64"
result = runCmd('bitbake-layers show-layers')
if re.search(layer, result.output) is None:
self.meta_qemu = metadir() + layer
runCmd('bitbake-layers add-layer "%s"' % self.meta_qemu)
else:
self.meta_qemu = None
self.append_config('MACHINE = "qemux86-64"')
self.append_config('SOTA_CLIENT_PROV = " aktualizr-shared-prov "')
self.append_config('IMAGE_FSTYPES_remove = "ostreepush garagesign garagecheck"')
self.append_config('SOTA_HARDWARE_ID = "plain_reibekuchen_314"')
self.qemu, self.s = qemu_launch(machine='qemux86-64')
def tearDownLocal(self):
qemu_terminate(self.s)
if self.meta_qemu:
runCmd('bitbake-layers remove-layer "%s"' % self.meta_qemu, ignore_status=True)
def qemu_command(self, command):
return qemu_send_command(self.qemu.ssh_port, command)
def test_provisioning(self):
print('Checking machine name (hostname) of device:')
stdout, stderr, retcode = self.qemu_command('hostname')
self.assertEqual(retcode, 0, "Unable to check hostname. " +
"Is an ssh daemon (such as dropbear or openssh) installed on the device?")
machine = get_bb_var('MACHINE', 'core-image-minimal')
self.assertEqual(stderr, b'', 'Error: ' + stderr.decode())
# Strip off line ending.
value = stdout.decode()[:-1]
self.assertEqual(value, machine,
'MACHINE does not match hostname: ' + machine + ', ' + value)
hwid = get_bb_var('SOTA_HARDWARE_ID')
verifyProvisioned(self, machine, hwid)
class SharedCredProvTestsNonOSTree(SharedCredProvTests):
def setUpLocal(self):
layer = "meta-updater-qemux86-64"
result = runCmd('bitbake-layers show-layers')
if re.search(layer, result.output) is None:
self.meta_qemu = metadir() + layer
runCmd('bitbake-layers add-layer "%s"' % self.meta_qemu)
else:
self.meta_qemu = None
self.append_config('MACHINE = "qemux86-64"')
self.append_config('SOTA_CLIENT_PROV = ""')
self.append_config('IMAGE_FSTYPES_remove = "ostreepush garagesign garagecheck"')
self.append_config('SOTA_HARDWARE_ID = "plain_reibekuchen_314"')
self.append_config('DISTRO = "poky"')
self.append_config('DISTRO_FEATURES_append = " systemd"')
self.append_config('VIRTUAL-RUNTIME_init_manager = "systemd"')
self.append_config('PREFERRED_RPROVIDER_network-configuration ??= "networkd-dhcp-conf"')
self.append_config('PACKAGECONFIG_pn-aktualizr = ""')
self.append_config('SOTA_DEPLOY_CREDENTIALS = "1"')
self.append_config('IMAGE_INSTALL_append += "aktualizr aktualizr-info aktualizr-shared-prov"')
self.qemu, self.s = qemu_launch(machine='qemux86-64', uboot_enable='no')
class ManualControlTests(OESelftestTestCase):
def setUpLocal(self):
layer = "meta-updater-qemux86-64"
result = runCmd('bitbake-layers show-layers')
if re.search(layer, result.output) is None:
self.meta_qemu = metadir() + layer
runCmd('bitbake-layers add-layer "%s"' % self.meta_qemu)
else:
self.meta_qemu = None
self.append_config('MACHINE = "qemux86-64"')
self.append_config('SOTA_CLIENT_PROV = " aktualizr-shared-prov "')
self.append_config('SYSTEMD_AUTO_ENABLE_aktualizr = "disable"')
self.append_config('IMAGE_FSTYPES_remove = "ostreepush garagesign garagecheck"')
self.qemu, self.s = qemu_launch(machine='qemux86-64')
def tearDownLocal(self):
qemu_terminate(self.s)
if self.meta_qemu:
runCmd('bitbake-layers remove-layer "%s"' % self.meta_qemu, ignore_status=True)
def qemu_command(self, command):
return qemu_send_command(self.qemu.ssh_port, command)
def test_manual_run_mode_once(self):
"""
Disable the systemd service then run aktualizr manually
"""
sleep(20)
stdout, stderr, retcode = self.qemu_command('aktualizr-info')
self.assertIn(b'Can\'t open database', stderr,
'Aktualizr should not have run yet' + stderr.decode() + stdout.decode())
stdout, stderr, retcode = self.qemu_command('aktualizr once')
stdout, stderr, retcode = self.qemu_command('aktualizr-info')
self.assertIn(b'Fetched metadata: yes', stdout,
'Aktualizr should have run' + stderr.decode() + stdout.decode())
class DeviceCredProvTests(OESelftestTestCase):
def setUpLocal(self):
layer = "meta-updater-qemux86-64"
result = runCmd('bitbake-layers show-layers')
if re.search(layer, result.output) is None:
self.meta_qemu = metadir() + layer
runCmd('bitbake-layers add-layer "%s"' % self.meta_qemu)
else:
self.meta_qemu = None
self.append_config('MACHINE = "qemux86-64"')
self.append_config('SOTA_CLIENT_PROV = " aktualizr-device-prov "')
self.append_config('SOTA_DEPLOY_CREDENTIALS = "0"')
self.append_config('IMAGE_FSTYPES_remove = "ostreepush garagesign garagecheck"')
self.qemu, self.s = qemu_launch(machine='qemux86-64')
bitbake('build-sysroots -c build_native_sysroot')
def tearDownLocal(self):
qemu_terminate(self.s)
if self.meta_qemu:
runCmd('bitbake-layers remove-layer "%s"' % self.meta_qemu, ignore_status=True)
def qemu_command(self, command):
return qemu_send_command(self.qemu.ssh_port, command)
def test_provisioning(self):
print('Checking machine name (hostname) of device:')
stdout, stderr, retcode = self.qemu_command('hostname')
self.assertEqual(retcode, 0, "Unable to check hostname. " +
"Is an ssh daemon (such as dropbear or openssh) installed on the device?")
machine = get_bb_var('MACHINE', 'core-image-minimal')
self.assertEqual(stderr, b'', 'Error: ' + stderr.decode())
# Strip off line ending.
value = stdout.decode()[:-1]
self.assertEqual(value, machine,
'MACHINE does not match hostname: ' + machine + ', ' + value)
verifyNotProvisioned(self, machine)
# Run aktualizr-cert-provider.
bb_vars = get_bb_vars(['SOTA_PACKED_CREDENTIALS'], 'aktualizr-native')
creds = bb_vars['SOTA_PACKED_CREDENTIALS']
bb_vars_prov = get_bb_vars(['WORKDIR', 'libdir'], 'aktualizr-device-prov')
config = bb_vars_prov['WORKDIR'] + '/sysroot-destdir' + bb_vars_prov['libdir'] + '/sota/conf.d/20-sota-device-cred.toml'
print('Provisining at root@localhost:%d' % self.qemu.ssh_port)
akt_native_run(self, 'aktualizr-cert-provider -c {creds} -t root@localhost -p {port} -s -u -r -g {config}'
.format(creds=creds, port=self.qemu.ssh_port, config=config))
verifyProvisioned(self, machine)
class DeviceCredProvHsmTests(OESelftestTestCase):
def setUpLocal(self):
layer = "meta-updater-qemux86-64"
result = runCmd('bitbake-layers show-layers')
if re.search(layer, result.output) is None:
self.meta_qemu = metadir() + layer
runCmd('bitbake-layers add-layer "%s"' % self.meta_qemu)
else:
self.meta_qemu = None
self.append_config('MACHINE = "qemux86-64"')
self.append_config('SOTA_CLIENT_PROV = "aktualizr-device-prov-hsm"')
self.append_config('SOTA_DEPLOY_CREDENTIALS = "0"')
self.append_config('SOTA_CLIENT_FEATURES = "hsm"')
self.append_config('IMAGE_INSTALL_append = " softhsm-testtoken"')
self.append_config('IMAGE_FSTYPES_remove = "ostreepush garagesign garagecheck"')
self.qemu, self.s = qemu_launch(machine='qemux86-64')
bitbake('build-sysroots -c build_native_sysroot')
def tearDownLocal(self):
qemu_terminate(self.s)
if self.meta_qemu:
runCmd('bitbake-layers remove-layer "%s"' % self.meta_qemu, ignore_status=True)
def qemu_command(self, command):
return qemu_send_command(self.qemu.ssh_port, command)
def test_provisioning(self):
print('Checking machine name (hostname) of device:')
stdout, stderr, retcode = self.qemu_command('hostname')
self.assertEqual(retcode, 0, "Unable to check hostname. " +
"Is an ssh daemon (such as dropbear or openssh) installed on the device?")
machine = get_bb_var('MACHINE', 'core-image-minimal')
self.assertEqual(stderr, b'', 'Error: ' + stderr.decode())
# Strip off line ending.
value = stdout.decode()[:-1]
self.assertEqual(value, machine,
'MACHINE does not match hostname: ' + machine + ', ' + value)
verifyNotProvisioned(self, machine)
# Verify that HSM is not yet initialized.
pkcs11_command = 'pkcs11-tool --module=/usr/lib/softhsm/libsofthsm2.so -O'
stdout, stderr, retcode = self.qemu_command(pkcs11_command)
self.assertNotEqual(retcode, 0, 'pkcs11-tool succeeded before initialization: ' +
stdout.decode() + stderr.decode())
softhsm2_command = 'softhsm2-util --show-slots'
stdout, stderr, retcode = self.qemu_command(softhsm2_command)
self.assertNotEqual(retcode, 0, 'softhsm2-tool succeeded before initialization: ' +
stdout.decode() + stderr.decode())
# Run aktualizr-cert-provider.
bb_vars = get_bb_vars(['SOTA_PACKED_CREDENTIALS'], 'aktualizr-native')
creds = bb_vars['SOTA_PACKED_CREDENTIALS']
bb_vars_prov = get_bb_vars(['WORKDIR', 'libdir'], 'aktualizr-device-prov-hsm')
config = bb_vars_prov['WORKDIR'] + '/sysroot-destdir' + bb_vars_prov['libdir'] + '/sota/conf.d/20-sota-device-cred-hsm.toml'
akt_native_run(self, 'aktualizr-cert-provider -c {creds} -t root@localhost -p {port} -r -s -u -g {config}'
.format(creds=creds, port=self.qemu.ssh_port, config=config))
# Verify that HSM is able to initialize.
for delay in [5, 5, 5, 5, 10]:
sleep(delay)
p11_out, p11_err, p11_ret = self.qemu_command(pkcs11_command)
hsm_out, hsm_err, hsm_ret = self.qemu_command(softhsm2_command)
if (p11_ret == 0 and hsm_ret == 0 and hsm_err == b'' and
b'X.509 cert' in p11_out and b'present token' in p11_err):
break
else:
self.fail('pkcs11-tool or softhsm2-tool failed: ' + p11_err.decode() +
p11_out.decode() + hsm_err.decode() + hsm_out.decode())
self.assertIn(b'Initialized: yes', hsm_out, 'softhsm2-tool failed: ' +
hsm_err.decode() + hsm_out.decode())
self.assertIn(b'User PIN init.: yes', hsm_out, 'softhsm2-tool failed: ' +
hsm_err.decode() + hsm_out.decode())
# Check that pkcs11 output matches sofhsm output.
p11_p = re.compile(r'Using slot [0-9] with a present token \((0x[0-9a-f]*)\)\s')
p11_m = p11_p.search(p11_err.decode())
self.assertTrue(p11_m, 'Slot number not found with pkcs11-tool: ' + p11_err.decode() + p11_out.decode())
self.assertGreater(p11_m.lastindex, 0, 'Slot number not found with pkcs11-tool: ' +
p11_err.decode() + p11_out.decode())
hsm_p = re.compile(r'Description:\s*SoftHSM slot ID (0x[0-9a-f]*)\s')
hsm_m = hsm_p.search(hsm_out.decode())
self.assertTrue(hsm_m, 'Slot number not found with softhsm2-tool: ' + hsm_err.decode() + hsm_out.decode())
self.assertGreater(hsm_m.lastindex, 0, 'Slot number not found with softhsm2-tool: ' +
hsm_err.decode() + hsm_out.decode())
self.assertEqual(p11_m.group(1), hsm_m.group(1), 'Slot number does not match: ' +
p11_err.decode() + p11_out.decode() + hsm_err.decode() + hsm_out.decode())
verifyProvisioned(self, machine)
class IpSecondaryTests(OESelftestTestCase):
class Image:
def __init__(self, imagename, binaryname, machine='qemux86-64', bake=True, **kwargs):
self.machine = machine
self.imagename = imagename
self.boot_kwargs = kwargs
self.binaryname = binaryname
self.stdout = ''
self.stderr = ''
self.retcode = 0
if bake:
self.bake()
def bake(self):
self.configure()
qemu_bake_image(self.imagename)
def send_command(self, cmd, timeout=60):
stdout, stderr, retcode = qemu_send_command(self.qemu.ssh_port, cmd, timeout=timeout)
return str(stdout), str(stderr), retcode
def __enter__(self):
self.qemu, | |
dx * dx
x = nx
w += s * dx
if nx == nxL:
iL += 1
if nx == nxU:
iU += 1
if iL >= len(L) - 1:
break
if iU >= len(U) - 1:
break
return na
def func_f407b7da6ff34d83b31a2d34af60a647(U, cuts, G, part, L):
w = U[0][1] - L[0][1]
a = 0
x = 0
while True:
sL = (L[iL + 1][1] - L[iL][1]) * 1.0 / (L[iL + 1][0] - L[iL][0])
sU = (U[iU + 1][1] - U[iU][1]) * 1.0 / (U[iU + 1][0] - U[iU][0])
s = sU - sL
nxL = L[iL + 1][0]
nxU = U[iU + 1][0]
nx = min(nxL, nxU)
na = 2 * w * (nx - x) + s * (nx - x) * (nx - x)
if a + na >= part:
dx = (part - a) * 1.0 / (w + math.sqrt(w * w + (part - a) * s))
x += dx
a += 2 * w * dx + s * dx * dx
cuts.append(x)
w += s * dx
a = 0
else:
dx = nx - x
a += 2 * w * dx + s * dx * dx
x = nx
w += s * dx
if nx == nxL:
iL += 1
if nx == nxU:
iU += 1
if iL >= len(L) - 1:
break
if iU >= len(U) - 1:
break
return cuts[:G - 1]
def func_7798a99b1d304c0990dddc58b2d2d71f(U, G, L):
area = 0
for i in range(len(L) - 1):
area -= (L[i + 1][0] - L[i][0]) * (L[i + 1][1] + L[i][1])
for i in range(len(U) - 1):
area += (U[i + 1][0] - U[i][0]) * (U[i + 1][1] + U[i][1])
part = area * 1.0 / G
cuts = []
iL = 0
return iL
def func_adf883fbe0f046da83a4ffb31e0624a6(U, G, L):
area = 0
for i in range(len(L) - 1):
area -= (L[i + 1][0] - L[i][0]) * (L[i + 1][1] + L[i][1])
for i in range(len(U) - 1):
area += (U[i + 1][0] - U[i][0]) * (U[i + 1][1] + U[i][1])
part = area * 1.0 / G
cuts = []
iL = 0
return part
def func_c5e363e31ef2458f990361e4b5a8d13e(U, G, L):
area = 0
for i in range(len(L) - 1):
area -= (L[i + 1][0] - L[i][0]) * (L[i + 1][1] + L[i][1])
for i in range(len(U) - 1):
area += (U[i + 1][0] - U[i][0]) * (U[i + 1][1] + U[i][1])
part = area * 1.0 / G
cuts = []
iL = 0
return cuts
def func_13a5cfad57c34eef86d8bf2b18497c5d(U, G, L):
area = 0
for i in range(len(L) - 1):
area -= (L[i + 1][0] - L[i][0]) * (L[i + 1][1] + L[i][1])
for i in range(len(U) - 1):
area += (U[i + 1][0] - U[i][0]) * (U[i + 1][1] + U[i][1])
part = area * 1.0 / G
cuts = []
iL = 0
return area
def func_e8f537425d1b4adab2e94f28094e05bb(U, G, L):
area = 0
for i in range(len(L) - 1):
area -= (L[i + 1][0] - L[i][0]) * (L[i + 1][1] + L[i][1])
for i in range(len(U) - 1):
area += (U[i + 1][0] - U[i][0]) * (U[i + 1][1] + U[i][1])
part = area * 1.0 / G
cuts = []
iL = 0
return i
def func_a9898413b0f44b0f88c55d5e5045e6bd(U, G, L):
for i in range(len(L) - 1):
area -= (L[i + 1][0] - L[i][0]) * (L[i + 1][1] + L[i][1])
for i in range(len(U) - 1):
area += (U[i + 1][0] - U[i][0]) * (U[i + 1][1] + U[i][1])
part = area * 1.0 / G
cuts = []
iL = 0
iU = 0
return part
def func_26cbea671a79461f833f5c98977edd90(U, G, L):
for i in range(len(L) - 1):
area -= (L[i + 1][0] - L[i][0]) * (L[i + 1][1] + L[i][1])
for i in range(len(U) - 1):
area += (U[i + 1][0] - U[i][0]) * (U[i + 1][1] + U[i][1])
part = area * 1.0 / G
cuts = []
iL = 0
iU = 0
return iL
def func_32b20e3845354332a25a63c72136f125(U, G, L):
for i in range(len(L) - 1):
area -= (L[i + 1][0] - L[i][0]) * (L[i + 1][1] + L[i][1])
for i in range(len(U) - 1):
area += (U[i + 1][0] - U[i][0]) * (U[i + 1][1] + U[i][1])
part = area * 1.0 / G
cuts = []
iL = 0
iU = 0
return iU
def func_0a4b03284696468292755f0e96501686(U, G, L):
for i in range(len(L) - 1):
area -= (L[i + 1][0] - L[i][0]) * (L[i + 1][1] + L[i][1])
for i in range(len(U) - 1):
area += (U[i + 1][0] - U[i][0]) * (U[i + 1][1] + U[i][1])
part = area * 1.0 / G
cuts = []
iL = 0
iU = 0
return area
def func_fec3ac37780549c08b5bf7af3bf3ac48(U, G, L):
for i in range(len(L) - 1):
area -= (L[i + 1][0] - L[i][0]) * (L[i + 1][1] + L[i][1])
for i in range(len(U) - 1):
area += (U[i + 1][0] - U[i][0]) * (U[i + 1][1] + U[i][1])
part = area * 1.0 / G
cuts = []
iL = 0
iU = 0
return i
def func_a32838c066694bbca29c8a40849f58ce(U, G, L):
for i in range(len(L) - 1):
area -= (L[i + 1][0] - L[i][0]) * (L[i + 1][1] + L[i][1])
for i in range(len(U) - 1):
area += (U[i + 1][0] - U[i][0]) * (U[i + 1][1] + U[i][1])
part = area * 1.0 / G
cuts = []
iL = 0
iU = 0
return cuts
def func_e30d3f089b2d4fb5b802563327a16a87(U, G, L):
for i in range(len(U) - 1):
area += (U[i + 1][0] - U[i][0]) * (U[i + 1][1] + U[i][1])
part = area * 1.0 / G
cuts = []
iL = 0
iU = 0
w = U[0][1] - L[0][1]
return iU
def func_a3809f8721a2470288dbdf8c17bdfe44(U, G, L):
for i in range(len(U) - 1):
area += (U[i + 1][0] - U[i][0]) * (U[i + 1][1] + U[i][1])
part = area * 1.0 / G
cuts = []
iL = 0
iU = 0
w = U[0][1] - L[0][1]
return iL
def func_1e0a8bdda05e41c18ace45f7407674d5(U, G, L):
for i in range(len(U) - 1):
area += (U[i + 1][0] - U[i][0]) * (U[i + 1][1] + U[i][1])
part = area * 1.0 / G
cuts = []
iL = 0
iU = 0
w = U[0][1] - L[0][1]
return part
def func_9b76fd68a38548ebba8cb99bde146869(U, G, L):
for i in range(len(U) - 1):
area += (U[i + 1][0] - U[i][0]) * (U[i + 1][1] + U[i][1])
part = area * 1.0 / G
cuts = []
iL = 0
iU = 0
w = U[0][1] - L[0][1]
return i
def func_f6388289613d48ebb76fd7d6587883f1(U, G, L):
for i in range(len(U) - 1):
area += (U[i + 1][0] - U[i][0]) * (U[i + 1][1] + U[i][1])
part = area * 1.0 / G
cuts = []
iL = 0
iU = 0
w = U[0][1] - L[0][1]
return w
def func_6f83482e27ce45529a43073c4342d4f6(U, G, L):
for i in range(len(U) - 1):
area += (U[i + 1][0] - U[i][0]) * (U[i + 1][1] + U[i][1])
part = area * 1.0 / G
cuts = []
iL = 0
iU = 0
w = U[0][1] - L[0][1]
return area
def func_8e1a827d353f499087287f603b516b69(U, G, L):
for i in range(len(U) - 1):
area += (U[i + 1][0] - U[i][0]) * (U[i + 1][1] + U[i][1])
part = area * 1.0 / G
cuts = []
iL = 0
iU = 0
w = U[0][1] - L[0][1]
return cuts
def func_f8250d806f304b95b9588695dc6b4f0b(U, area, G, L):
part = area * 1.0 / G
cuts = []
iL = 0
iU = 0
w = U[0][1] - L[0][1]
a = 0
return w
def func_87fb9285e6044ffbbab636889ba08709(U, area, G, L):
part = area * 1.0 / G
cuts = []
iL = 0
iU = 0
w = U[0][1] - L[0][1]
a = 0
return iL
def func_62af612fe0aa40698062ba467dfe231d(U, area, G, L):
part = area * 1.0 / G
cuts = []
iL = 0
iU = 0
| |
<filename>test/some-old-test-cases.py
#!/bin/env python3
import asyncio
import sys
from pprint import pprint
import random
import pyfca
import scramjet.utils as utils
from scramjet.ansi_color_codes import *
log = utils.LogWithTimer.log
fmt = utils.print_formatted
random.seed('Pyfca')
# Use to change delays mocking async function execution
SLOMO_FACTOR = float(sys.argv[1]) if len(sys.argv) > 1 else 0.01
MAX_DELAY = 0.3
# Transformation functions and utilities
def log_results(results):
log('Results:')
pprint(results)
def log_drain_status(drain, item):
log(f'Drain status: {blue}{drain.done()}{reset} '
f'{grey}(last write: {utils.pprint_chunk(item)}){reset}')
async def mock_delay(data):
"""Pretend that we run some async operations that take some time."""
delay = 0
if hasattr(data, 'delay'):
delay = data.delay
elif type(data) is dict:
if 'delay' in data:
delay = data['delay']
elif type(data) is int:
delay = data
if not delay:
delay = random.uniform(0, MAX_DELAY)
await asyncio.sleep(delay * SLOMO_FACTOR)
def identity(x):
log(f'{yellow}identity:{reset} {x}')
return x
async def async_identity(x):
log(f'{yellow}identity start:{reset} {x}')
await mock_delay(x)
log(f'{yellow}identity end:{reset} -> {x}')
return x
def transform_dict_or_num(description, data, function):
if type(data) is dict and 'value' in data:
data['value'] = function(data['value'])
# dropping value means dropping the whole chunk
result = data if data['value'] is not pyfca.DropChunk else pyfca.DropChunk
else:
result = function(data)
log(f'{yellow}{description}:{reset} -> {result}')
return result
def increment(x):
return transform_dict_or_num('increment', x, lambda x: x+1)
async def async_increment(x):
await mock_delay(x)
return increment(x)
def double(x):
return transform_dict_or_num('double', x, lambda x: x*2)
async def async_double(x):
await mock_delay(x)
return double(x)
def keep_even(x):
func = lambda x: x if x % 2 == 0 else pyfca.DropChunk
return transform_dict_or_num('keep_even', x, func)
async def async_keep_even(x):
await mock_delay(x)
return keep_even(x)
# Test cases
TEST_SEQUENCE = [1,2,1,3,2,4]
objects_with_delays = [
{'id': count, 'delay': 0.1 * value}
for count, value
in enumerate(TEST_SEQUENCE)
]
objects_with_values = [
{'id': count, 'value': value}
for count, value
in enumerate(TEST_SEQUENCE)
]
MAX_PARALLEL = 4
def monotonic_sequence(n):
return [{'id': i} for i in range(n)]
async def test_write_then_read_concurrently(input_data):
p = pyfca.Pyfca(MAX_PARALLEL, async_identity)
for x in input_data:
p.write(x)
reads = [p.read() for _ in input_data]
results = await asyncio.gather(*reads)
log_results(results)
# items should appear in the output unchanged and in the same order
assert results == input_data
async def test_write_then_read_sequentially(input_data):
p = pyfca.Pyfca(MAX_PARALLEL, async_identity)
for x in input_data:
p.write(x)
results = [await p.read() for _ in input_data]
log_results(results)
# items should appear in the output unchanged and in the same order
assert results == input_data
async def test_write_and_read_in_turn(input_data):
p = pyfca.Pyfca(MAX_PARALLEL, async_identity)
reads = []
for x in input_data:
p.write(x)
reads.append(p.read())
results = await asyncio.gather(*reads)
log_results(results)
# items should appear in the output unchanged and in the same order
assert results == input_data
async def test_reads_before_write(input_data):
p = pyfca.Pyfca(MAX_PARALLEL, async_identity)
reads = [p.read() for _ in input_data]
for x in input_data:
p.write(x)
results = await asyncio.gather(*reads)
log_results(results)
# items should appear in the output unchanged and in the same order
assert results == input_data
async def test_reads_exceeding_writes(input_data):
p = pyfca.Pyfca(MAX_PARALLEL, async_identity)
for x in input_data:
p.write(x)
reads = [p.read() for _ in range(len(input_data) + 4)]
p.end()
results = await asyncio.gather(*reads)
log_results(results)
# Reads exceeding writes should return None (if accepting input stops).
assert results == input_data + [None]*4
async def test_reads_after_end(input_data):
p = pyfca.Pyfca(MAX_PARALLEL, async_identity)
for x in input_data:
p.write(x)
p.end()
reads = [p.read() for _ in range(len(input_data) + 4)]
results = await asyncio.gather(*reads)
log_results(results)
# It should be possible to read after pyfca stopped accepting input.
# Reads exceeding writes should return None.
assert results == input_data + [None]*4
# If the number of items being processed is below limit, write() should return
# a future that resolves immediately (and therefore code that awaits it should
# actually run synchronously).
async def test_synchronous_draining(input_data):
p = pyfca.Pyfca(MAX_PARALLEL, identity)
event_loop_flag = None
def start_sync_check():
log('The following sequence of instructions should be synchronous.')
nonlocal event_loop_flag
event_loop_flag = False
def update_flag():
log('Next event loop iteration.')
nonlocal event_loop_flag
event_loop_flag = True
# schedule for next event loop iteration
asyncio.get_event_loop().call_soon(update_flag)
def check_async(expected):
log(f'Did next iteration of event loop start already? '
f'{cyan}{event_loop_flag}{reset}')
assert event_loop_flag == expected
async def write_below_limit():
for _ in range(MAX_PARALLEL - 1):
item = input_data.pop(0)
drain = p.write(item)
log_drain_status(drain, item)
# Writes up till MAX_PARALLEL-1 should report below limit
assert drain.done() == True
# This should resolve synchronously
await drain
# Note that we run the test twice because the results may differ for the
# first MAX_PARALLEL items (e.g. the algorithm may return placeholders etc.)
for i in range(2):
log(f'Start batch #{i+1}')
start_sync_check()
# Writes up till MAX_PARALLEL-1 should resolve immediately
await write_below_limit()
check_async(False)
# Create readers so the queue won't get stuck. This is still synchronous.
reads = [p.read() for _ in range(MAX_PARALLEL)]
check_async(False)
# MAX_PARALLEL-th write should reach the limit and awaiting on it
# should trigger entering event loop and processing previous items
await p.write(input_data.pop(0))
check_async(True)
# clean up the queue.
await asyncio.gather(*reads)
async def read_with_debug(pyfca, live_results=None):
"""Log received result and update result list immediately."""
result = await pyfca.read()
log(f'{green}Got result:{reset} {result}')
if live_results is not None:
live_results.append(result)
return result
async def test_limit_waiting_until_items_are_processed(input_data):
p = pyfca.Pyfca(MAX_PARALLEL, async_identity)
results = []
reads = [read_with_debug(p, results) for _ in input_data]
read_futures = asyncio.gather(*reads)
def check(written_count, expected_len):
log(f'Drain after {written_count} items written, '
f'at least {expected} results should be ready')
assert len(results) >= expected_len
for items_written, x in enumerate(input_data, start=1):
drain = p.write(x)
await drain
log_drain_status(drain, x)
expected = items_written - MAX_PARALLEL + 1
# wait one event loop iteration so that appropriate read is evaluated
asyncio.get_event_loop().call_soon(check, items_written, expected)
await read_futures
log_results(results)
# items should appear in the output unchanged and in the same order
assert results == input_data
async def test_limit_waiting_for_reads(input_data):
p = pyfca.Pyfca(MAX_PARALLEL, async_identity)
for x in input_data[:MAX_PARALLEL-1]:
drain = p.write(x)
await drain
log_drain_status(drain, x)
def check_drain(expected):
log_drain_status(drain, next_item)
assert drain.done() == expected
next_item = input_data[MAX_PARALLEL-1]
drain = p.write(next_item)
# Pyfca should report that the limit was reached.
check_drain(False)
# Wait until all items are processed (we need to first ensure that
# last_chunk_status is up-to-date).
await asyncio.sleep(0)
await p.last_chunk_status
# We should still not be drained because there were no reads yet.
check_drain(False)
first_result = await read_with_debug(p)
# Drain status should update after next run of event loop
await asyncio.sleep(0)
check_drain(True)
async def test_writing_above_limit(input_data):
p = pyfca.Pyfca(MAX_PARALLEL, identity)
# Writing shouldn't block if we exceed the limit.
writes = [p.write(x) for x in input_data]
assert len(writes) > MAX_PARALLEL
# First writes should report that they were below the limit
for drain in writes[:MAX_PARALLEL-1]:
assert drain.done() == True
# After reaching the limit write() should return an unresolved future
for drain in writes[MAX_PARALLEL-1:]:
assert drain.done() == False
# collect results to avoid CancelledError and "coroutine was never awaited"
reads = [p.read() for _ in input_data]
await asyncio.gather(*reads)
async def test_empty_transformation_chain(input_data):
p = pyfca.Pyfca(MAX_PARALLEL)
for x in input_data:
p.write(x)
results = [await p.read() for _ in input_data]
log_results(results)
# items should appear in the output unchanged and in the same order
assert results == input_data
async def test_multitransform(input_data):
p = pyfca.Pyfca(MAX_PARALLEL, async_identity)
p.add_transform(async_double)
p.add_transform(async_increment)
for x in input_data:
p.write(x)
reads = [p.read() for _ in input_data]
results = await asyncio.gather(*reads)
log_results(results)
# multiple transformations should be applied to each element, and they
# should arrive in the same order they were written in.
assert results == [
{'id': 0, 'value': 3},
{'id': 1, 'value': 5},
{'id': 2, 'value': 3},
{'id': 3, 'value': 7},
{'id': 4, 'value': 5},
{'id': 5, 'value': 9},
]
async def test_sync_chain(input_data):
p = pyfca.Pyfca(MAX_PARALLEL, increment)
p.add_transform(double)
for x in input_data:
p.write(x)
reads = [p.read() for _ in input_data]
results = await asyncio.gather(*reads)
log_results(results)
# Using synchronous functions as transformations should work.
assert results == [
{'id': 0, 'value': 4},
{'id': 1, 'value': 6},
{'id': 2, 'value': 4},
{'id': 3, 'value': 8},
{'id': 4, 'value': 6},
{'id': 5, 'value': 10},
]
async def test_filtering_should_drop_items(input_data):
p = pyfca.Pyfca(MAX_PARALLEL, async_keep_even)
for x in input_data:
p.write(x)
p.end()
results = [await p.read() for _ in input_data]
log_results(results)
assert results == [
{'id': 1, 'value': 2},
{'id': 4, 'value': 2},
{'id': 5, 'value': 4},
None,
None,
None,
]
async def test_filtering_reads_before_end(input_data):
p = pyfca.Pyfca(MAX_PARALLEL, async_keep_even)
for x in input_data:
p.write(x)
reads = [p.read() for _ in input_data]
p.end()
results = await asyncio.gather(*reads)
log_results(results)
# even though the reads were performed before .end(), they should return
# Nones for filtered out items, and with correct ordering
assert | |
import configparser
import logging
import random
import sys
from copy import deepcopy
import discord
import GameTimer
def load_config():
c = configparser.ConfigParser()
c.read('config.ini')
try:
time = c.getint('game', 'time')
except ValueError:
logger.error(
"В config.ini неверно указано значение time. Значение установлено на 1200")
time = 1200
return {'t': time}
class DiscordClient(discord.Client):
def __init__(self, **kwargs):
discord.Client.__init__(self, **kwargs)
self.paused = False
self.debaters_list = []
self.debater_names = []
self.guesser_attempts = {}
self.guessers_list = []
self.guesser_names = []
self.guesser_points = {}
self.guesser_last_turn = {}
self.guesser_messages = 0
self.debater_cards = {}
self.pack = {}
self.discard = []
try:
config = load_config()
self.t = config["t"]
except:
self.t = 1200
logger.error(
"Файл config.ini отсуствует или содержит некорретные данные, были загруженны настройки по умолчанию.")
print(self.t)
self.game_timer = GameTimer.RenewableTimer(self.t, self.end)
self.started = False
async def on_ready(self):
await self.__reset__()
print('Logged in as')
print(client.user.name)
print(client.user.id)
print('------')
async def __reset__(self):
self.paused = False
self.debaters_list.clear()
self.debater_names.clear()
self.guesser_attempts.clear()
self.guessers_list.clear()
self.guesser_names.clear()
self.guesser_points.clear()
self.guesser_last_turn.clear()
self.guesser_messages = 0
def end(self):
self.loop.create_task(self.end_game())
self.loop.create_task(self.__reset__())
async def end_game(self):
self.started = False
max_points = 0
winners = []
# Определяет отгадчика с максимальным количеством очков
for guesser in self.guesser_points:
if self.guesser_points[guesser] > max_points:
max_points = self.guesser_points[guesser]
winner = guesser
winners = [winner.name]
elif self.guesser_points[guesser] == max_points:
winners.append(guesser.name)
if len(self.guesser_points) < 2:
end_game_message = "Победитель **{}**".format(guesser.name)
elif len(winners) < 2:
end_game_message = "Победитель **{}**".format(winner.name)
elif len(winners) > 1:
end_game_message = "Победители {}".format("**" + "**, **".join(winners) + "**")
score = self.current_score()
for user in self.guessers_list + self.debaters_list:
await user.send("{0}\n{1}\nИгра закончилась".format(score, end_game_message))
print("Игра закончилась")
def current_score(self):
score_message = "Общий счёт (Игрок: очки | попытки):\n"
for guesser in self.guesser_points:
score_message += "**{0}**: {1} | {2} \n".format(guesser.name, self.guesser_points[guesser],
self.guesser_attempts[guesser])
return score_message
async def add_guesser(self, member):
if member not in self.guessers_list:
self.guessers_list.append(member)
self.guesser_names.append(member.name)
guessers = "**" + "**, **".join(self.guesser_names) + "**"
for guesser in self.guessers_list:
if guesser != member:
await guesser.send(
"Игрок {0} добавлен в группу отгадчиков\n"
"Группа отгадчиков: {1}\n"
"Общее количество отгадчиков: **{2}**".format(member.name, guessers,
len(self.guessers_list)))
else:
await guesser.send(
"Вы добавлены в группу отгадчиков\n"
"Группа отгадчиков: {0}\n"
"Общее количество отгадчиков: **{1}**".format(guessers,
len(self.guessers_list)))
elif member in self.guessers_list:
guessers = "**" + "**, **".join(self.guesser_names) + "**"
await member.send(
'Вы уже в группе отгадчиков \nГруппа отгадчиков: {0}\nОбщее количество '
'отгадчиков: **{1}**'.format(guessers, len(self.guessers_list)))
async def remove_guesser(self, member):
if member in self.guessers_list:
self.guessers_list.remove(member)
self.guesser_names.remove(member.name)
guessers = "**" + "**, **".join(self.guesser_names) + "**"
await member.send(
"Вы удалены из группы отгадчиков\n"
"Группа отгадчиков: {0}\n"
"Общее количество отгадчиков: **{1}**".format(guessers, len(self.guessers_list)))
for guesser in self.guessers_list:
await guesser.send(
"Игрок {0} удалён из группы отгадчиков\n"
"Группа отгадчиков: {1}\n"
"Общее количество отгадчиков: **{2}**".format(member.name, guessers,
len(self.guessers_list)))
async def add_debater(self, member):
if member not in self.debaters_list:
self.debaters_list.append(member)
self.debater_names.append(member.name)
debaters = "**" + "**, **".join(self.debater_names) + "**"
for debater in self.debaters_list:
if debater != member:
await debater.send(
"Игрок {0} добавлен в группу спорщиков\n"
"Группа спорщиков: {1}\n"
"Общее количество спорщиков: **{2}**".format(member.name, debaters,
len(self.debaters_list)))
else:
await debater.send(
"Вы добавлены в группу спорщиков\n"
"Группа спорщиков: {1}\n"
"Общее количество спорщиков: **{2}**".format(member.name, debaters,
len(self.debaters_list)))
elif member in self.debaters_list:
debaters = "**" + "**, **".join(self.debater_names) + "**"
await member.send(
'Вы уже в группе спорщиков \nГруппа спорщиков: {0}\nОбщее количество '
'спорщиков: **{1}**'.format(debaters, len(self.debaters_list)))
# we do not want the client to reply to itself
if member == discord.Client.user:
return
async def remove_debater(self, member):
if member in self.debaters_list:
self.debaters_list.remove(member)
self.debater_names.remove(member.name)
debaters = "**" + "**, **".join(self.debater_names) + "**"
await member.send(
"Вы удалены из группы спорщиков\n"
"Группа спорщиков: {0}\n"
"Общее количество спорщиков: **{1}**".format(debaters, len(self.debaters_list)))
for debater in self.debaters_list:
await debater.send(
"Игрок {0} удалён из группы спорщиков\n"
"Группа отгадчиков: {1}\n"
"Общее количество спорщиков: **{2}**".format(member.name, debaters,
len(self.debaters_list)))
async def on_message(self, message):
member = message.author
channel = message.channel
if message.content == "!help" or message.content == "!h":
message_to_other_guessers = """```Чат-бот для игры в Fallacymania
Команды:
"!h" или "!help" - Выводит данную справку
"!r" или "!правила" - Выводит правила
"*" или "!софизмы" - Отправляет в ответ лист с софизмами
"!d" или "!спорщик" - Добавляет пользователя в группу спорщиков
"!-d" или "!-спорщик" - Удаляет пользователя из группы спорщиков
"!g" или "!отгадчик" - Добавляет пользователя в группу отгадчиков
"!-g" или "!-отгадчик" - Удаляет пользователя из группы отгадчиков
"!s" или "!старт" - Если указано минимальное количество отгадчиков и спорщиков, то запускает таймер игры
"!p" или "!пазуа" - Приостанавливает таймер игры
"!stop" или "завершить" - Завершает игру о останавливает таймер
"!reset" или "!сброс" - Удаляет всех игроков из групп отгадчиков и спорщиков
"%номер_софизма%" - Ищет у спорщика софизм по номеру, если находит, то забирает и даёт новый (вбивается без знаков процент)
"+" или "-" - Даёт или забирает 1 очко у отгадчика. Пока у отгадчика есть попытки "-" забирает 1 попытку, а не 1 очко.
".." или "!z" - Отменяет последнее действие отгадчика.
```"""
if not self.started:
await channel.send(message_to_other_guessers)
else:
await member.send(message_to_other_guessers)
if message.content == "!d" or message.content == "!спорщик":
await self.add_debater(member)
await self.remove_guesser(member)
if message.content == "!g" or message.content == "!отгадчик":
await client.loop.create_task(self.add_guesser(member))
await client.loop.create_task(self.remove_debater(member))
if message.content == "!-g" or message.content == "!-отгадчик":
await self.remove_guesser(member)
if message.content == "!-d" or message.content == "!-спорщик":
await self.client.loop.create_task(self.remove_debater(member))
# Сбросить параматеры игры
if message.content == "!reset" or message.content == "!сброс":
if not self.started:
if self.debaters_list + self.guessers_list != []:
for user in self.debaters_list + self.guessers_list:
await user.send("Список игроков и их счёт сброшены")
else:
await member.send("Список игроков и их счёт сброшены")
await self.__reset__()
else:
await member.send(""""Игра уже запущена. Чтобы завершить игру введите "!stop""""")
# Завершить игру
if message.content == "!stop" or message.content == "!завершить":
if self.started:
self.game_timer.cancel()
self.end()
else:
member.send("Нельзя остановить ещё не запущенную игру")
#
# Старт игры
if message.content == '!s' or message.content == '!старт':
# Если таймер не запущен и игра не на паузе, есть как минимум 2 спорщика и 1 отгадчик
if not (self.game_timer.timer.isAlive() or self.paused) and len(self.debaters_list) > 1 and len(
self.guessers_list) > 0:
self.game_timer = GameTimer.RenewableTimer(self.t, self.end)
self.debater_cards = {}
self.pack = deepcopy(fallacies)
self.discard = []
# Перемешать колоду
random.shuffle(self.pack)
# Раздать карты спорщикам
for debater in self.debaters_list:
i = 0
card_list = []
cards = ""
while i < 5:
card = self.pack.pop()
cards += card
card_list.append(card)
i += 1
await debater.send(cards)
self.debater_cards.update({debater: card_list})
# • если отгадчиков 1-2, каждый берёт по 15 карт попыток;
# • если отгадчиков 3-4, каждый берёт по 10 карт попыток;
# • если отгадчиков 5-6, каждый берёт по 8 карт попыток;
# • если отгадчиков больше 6, то 50 карт попыток делятся поровну между отгадчиками,
# а остаток убирается обратно в коробку.
if len(self.guessers_list) < 3:
number_attempts = 15
elif len(self.guessers_list) < 5:
number_attempts = 10
elif len(self.guessers_list) < 7:
number_attempts = 8
elif len(self.guessers_list) > 6:
number_attempts = int(50 / len(self.guessers_list))
for guesser in self.guessers_list:
# Раздать лист с софизмами отгадчикам
await guesser.send(
"http://i.imgur.com/ivEjvmi.png\nhttp://i.imgur.com/BukCpJ7.png\nhttp://i.imgur.com/s4qav82.png")
# Установить начальное количество попыток и очков для отгадчиков
self.guesser_points.update({guesser: 0})
self.guesser_attempts.update({guesser: number_attempts})
self.guesser_last_turn.update({guesser: None})
self.game_timer.start()
await channel.send("Игра началась")
self.started = True
# Если таймер запущен
elif self.game_timer.timer.isAlive() and not self.paused:
await channel.send("Таймер уже запущен")
self.game_timer.pause()
m, s = divmod(int(self.game_timer.get_actual_time()), 60)
await channel.send("Осталось {0}м {1}с".format(m, s))
self.game_timer.resume()
elif self.paused:
for user in self.guessers_list + self.debaters_list:
m, s = divmod(int(self.game_timer.get_actual_time()), 60)
await user.send("Игра продолжается\nОсталось {0}м {1}с".format(m, s))
self.game_timer.resume()
self.paused = False
elif len(self.debaters_list) < 2:
await channel.send("Нужно указать как минимум 2 спорщиков")
elif len(self.guessers_list) < 1:
await channel.send("Нужно указать как минимум 1 отгадчика")
# Пауза
if message.content == '!p' or message.content == '!пауза':
if self.started and not self.paused:
self.game_timer.pause()
self.game_timer.get_actual_time()
self.paused = True
for user in self.guessers_list + self.debaters_list:
m, s = divmod(int(self.game_timer.get_actual_time()), 60)
await user.send("Пауза\nОсталось {0}м {1}с".format(m, s))
elif not self.started:
await channel.send("Игра ещё не запущена")
elif self.paused:
await channel.send("Игра уже на паузе")
# Выдать лист с софизмом
if message.content == '!софизмы' or message.content == '*':
await member.send(
"http://i.imgur.com/ivEjvmi.png\nhttp://i.imgur.com/BukCpJ7.png\nhttp://i.imgur.com/s4qav82.png")
# Начиление очков
if message.content == '+' or message.content == '-':
if not self.started:
return await member.send(
"Игра не запущенна. Проводить манипуляции со счётом до старта игры нельзя.".format(
member))
if member not in self.guesser_points:
return await member.send("'+' или '-' отправленное отгадчиком даёт или отнимает очко у "
"этого отгадчика. **{0}** - не отгадчик".format(member))
if message.content == "+":
self.guesser_points[member] = self.guesser_points[member] + 1
self.guesser_last_turn[member] = "plus_point"
message_to_other_guessers = | |
<filename>eeg_project/plot_data.py<gh_stars>0
""" some tools for plotting EEG data and doing visual comparison """
from eeg_project.read_data import (my_read_eeg_generic, SAMP_FREQ,
pass_through, accumulate_subject_file_list, files_skip_processing,
sample_file_list, match_types)
import numpy as np
import pandas as pd
import torch
from collections import defaultdict
# from six import text_type
import tqdm
from matplotlib import pyplot, cm
# from mpl_toolkits.mplot3d import Axes3D
# # import ipywidgets as widgets
from IPython.display import clear_output
PRF_metrics = ['recall', 'precision', 'f_1_meas']
basic_metrics = ['acc', 'auc']
def plot_train_results(metrics2record, loss_metric,
train_metrics, test_metrics):
""" plot some learning curves for the training results """
pyplot.figure(figsize=(10, 5))
min_, max_ = np.min(loss_metric), np.max(loss_metric)
lg, = pyplot.plot(loss_metric)
pyplot.yticks(min_ + np.arange(5) * (max_ - min_))
# if learning_rate is not None:
# lg, = pyplot.plot(learning_rate)
pyplot.title('Loss')
pyplot.xlabel('Epoch')
pyplot.yscale('log')
pyplot.show()
for prm in basic_metrics:
if prm in metrics2record:
leg = []
met_idx = metrics2record.index(prm)
pyplot.figure(figsize=(10, 5))
lg, = pyplot.plot(train_metrics[:, met_idx], label=('train'))
leg.append(lg)
lg, = pyplot.plot(test_metrics[:, met_idx], label=('test'))
leg.append(lg)
pyplot.legend(handles=leg)
pyplot.title(prm)
pyplot.xlabel('Epoch')
pyplot.show()
has_prf = any([(prm in PRF_metrics) for prm in metrics2record])
if has_prf:
pyplot.figure(figsize=(10, 5))
leg = []
for prm in PRF_metrics:
if prm in metrics2record:
met_idx = metrics2record.index(prm)
lg, = pyplot.plot(train_metrics[:, met_idx],
label=(prm + ':train'))
leg.append(lg)
for prm in PRF_metrics:
if prm in metrics2record:
met_idx = metrics2record.index(prm)
lg, = pyplot.plot(test_metrics[:, met_idx],
label=(prm + ':test'))
leg.append(lg)
pyplot.legend(handles=leg)
pyplot.title('Precision / Recall')
pyplot.xlabel('Epoch')
pyplot.show()
def highlight_correlated_feature_twoclass(
file_samples=100,
match_types_in=match_types, figsize=(12, 14),
process_data=None, pca_vec_to_plot=5, debug=1):
""" sub-sample the data set and plot correlation and dominant (PCA) feature weight
information
"""
corr_accum = None
if process_data is not None:
data_type = '(frqeuency)'
else:
data_type = '(time-domain)'
for match_type in match_types_in:
for aidx, is_alcoholic in enumerate([True, False]):
corr_accum = cov_accum = None
if debug:
print(f'getting example data for match_type[{match_type}]'
f' and is_alcoholic[{is_alcoholic}]')
file_list = sample_file_list(
limitby=dict(
match=match_type,
alcoholic=is_alcoholic),
limit_mult_files=file_samples,
balance_types=[('subject', 10)], df_type='wide',
seed=42, debug=max(0, debug - 1))
for file in tqdm.tqdm(file_list):
df, info = my_read_eeg_generic(
file, orig_tt_indic=('test' in str.lower(file)))
corr = df.corr()
cov = df.cov().values
if corr_accum is None:
sen_names = corr.columns.levels[
corr.columns.names.index('sensor')]
nsen = len(sen_names)
if process_data:
x, Z, xl, yl = process_data([], df.values, '',
'', fs=SAMP_FREQ)
df = pd.DataFrame(Z)
corr = df.corr()
cov = df.cov().values
if corr_accum is None:
corr_accum = corr.values
cov_accum = cov/nsen
else:
corr_accum += corr.values
cov_accum += cov/nsen
corr_accum /= len(file_list)
if aidx == 0:
corr_alcoholic = corr_accum.copy()
cov_alcoholic = cov_accum.copy()
else:
corr_nonalch = corr_accum.copy()
cov_nonalch = cov_accum.copy()
if debug > 1:
pyplot.figure(figsize=figsize)
pyplot.pcolor(np.flipud(corr_accum))
pyplot.xticks(np.arange(nsen), sen_names)
pyplot.yticks(np.arange(nsen), reversed(sen_names))
pyplot.title(f'corr - across sensors {data_type} - '
f'is_alcoholic[{is_alcoholic}] - match[{match_type}]')
pyplot.colorbar()
pyplot.show()
Ua, svs_a, Va = np.linalg.svd(cov_alcoholic, full_matrices=False,
compute_uv=True)
Una, svs_na, Vna = np.linalg.svd(cov_nonalch, full_matrices=False,
compute_uv=True)
print('SVec size', Una.shape)
pyplot.figure(figsize=(figsize[0], 6))
leg = []
lg, = pyplot.plot(svs_a, label='alcoholic')
leg.append(lg)
lg, = pyplot.plot(svs_na, label='not alcoholic')
leg.append(lg)
pyplot.legend(handles=leg)
pyplot.title(f'PCA decomposition: SVs - across sensors {data_type} - '
f'- match[{match_type}]')
pyplot.show()
pyplot.figure(figsize=(figsize[0]+4, 8))
leg = []
is_slice = isinstance(pca_vec_to_plot, tuple)
if is_slice:
pca_vec_to_plot_count = pca_vec_to_plot[1] - pca_vec_to_plot[0]
else:
pca_vec_to_plot_count = pca_vec_to_plot
pca_vec_to_plot = (0, pca_vec_to_plot)
pca_vec_to_plot_count = min(pca_vec_to_plot_count, nsen)
for i, idx in enumerate(range(*pca_vec_to_plot)):
lg, = pyplot.plot(-i + Ua[:, idx], '-',
linewidth=2,
label=f'feat[{idx}]: alcoholic')
leg.append(lg)
for i, idx in enumerate(range(*pca_vec_to_plot)):
lg, = pyplot.plot(-i + Una[:, idx], '--',
linewidth=2,
label=f'feat[{idx}]: not alcoholic')
leg.append(lg)
pyplot.xticks(np.arange(nsen), sen_names, fontsize=7)
pyplot.xlim((0, nsen + 10))
pyplot.yticks(-np.arange(pca_vec_to_plot_count), [
f'PCA_dim[{i}]' for i in np.arange(pca_vec_to_plot_count)])
pyplot.legend(handles=leg)
# pyplot.xticks(np.arange(nsen), sen_names)
# pyplot.yticks(np.arange(nsen), reversed(sen_names))
ind_str = (f"{pca_vec_to_plot[0]} to {pca_vec_to_plot[1]-1}" if is_slice
else f"- first {pca_vec_to_plot_count}")
pyplot.title(f'PCA decomposition: singular vectors {ind_str} '
f'- across sensors {data_type} - '
f'match[{match_type}]')
pyplot.figure(figsize=figsize)
pyplot.pcolor(np.flipud(corr_alcoholic - corr_nonalch))
pyplot.xticks(np.arange(nsen), sen_names)
pyplot.yticks(np.arange(nsen), reversed(sen_names))
pyplot.title(f'corr - across sensors {data_type} - '
f'(alcoholic-nonalcoholic) - match[{match_type}]')
pyplot.colorbar()
pyplot.show()
if is_slice:
return Ua[:, pca_vec_to_plot[0]:pca_vec_to_plot[1]], Una[:,
pca_vec_to_plot[0]:pca_vec_to_plot[1]]
else:
return Ua[:, :pca_vec_to_plot_count], Una[:, :pca_vec_to_plot_count]
def plot_data_subject_dirs(data_dirs=None, file_list=None,
labelby=None, limitby=None, plots=None, figsize=None,
transparency=1., yscale='linear', xrange=None,
yrange=None, force_axes_same_scale=True,
process_data=pass_through, limit_mult_files=np.inf, debug=1):
""" plot EEG data by searching subject directories with some options """
df_type = 'wide'
if plots is None:
plots = dict(grid=True)
senlistorder = None
all_data_overlaid = ('all_data_traces' in plots and
(plots['all_data_traces'] is not None))
printed_entry_info = False
if ((data_dirs is None) and (file_list is None)):
if isinstance(limit_mult_files, tuple):
limit_mult, bal_list = limit_mult_files
else:
bal_list = None
limit_mult = limit_mult_files
if np.isinf(limit_mult):
limit_mult = None
file_list = sample_file_list(
limitby=limitby,
limit_mult_files=limit_mult,
balance_types=bal_list, df_type=df_type,
seed=42, debug=max(0, debug - 1))
if file_list is None:
file_list, unique_entries, total_files = accumulate_subject_file_list(
data_dirs, limitby=limitby, limit_mult_files=limit_mult_files,
df_type=df_type, debug=debug)
if debug:
print('unique entries in metadata from file accumulation')
for k in unique_entries:
print(f' {k}: {unique_entries[k]}')
printed_entry_info = True
else:
total_files = len(file_list)
plot_sensor = None
if all_data_overlaid:
if transparency == 1.:
transparency = 0.5
plot_to_make = sum([bool(plots[k]) for k in plots if 'all_data' not in k])
if isinstance(plots['all_data_traces'], str):
plot_sensor = plots['all_data_traces']
if plot_to_make == 0:
if isinstance(plots['all_data_traces'], str):
plots['overlap'] = True
else:
plots['grid'] = True
assert sum([bool(plots[k]) for k in plots if 'all_data' not in k]) == 1, (
"cannot display multiple plot types")
assert isinstance(plots['all_data_traces'], str) or (
'overlap' not in plots or not(plots['overlap'])), (
"cannot plot single overlapping plot if sensor is not specified")
if figsize is None and 'grid' in plots and isinstance(plots['grid'], str):
if plots['grid'].startswith('square'):
figsize = (16, 18)
else:
figsize = (15, 64 * 8)
pyplot.figure(figsize=figsize)
legd = []
running_min_max = (np.inf, -np.inf)
if debug == 1:
progress_bar = tqdm.tqdm(total=total_files, miniters=1)
else:
legd = None
if figsize is None:
figsize = (12, 14)
if isinstance(limit_mult_files, tuple):
limit_mult_files = limit_mult_files[0]
file_count = 0
color_dict = dict()
unique_entries = defaultdict(set)
for file in file_list:
orig_data_dir = int(('test' in str.lower(file)))
if file in files_skip_processing:
continue
if all_data_overlaid:
if debug == 1:
progress_bar.n = file_count
progress_bar.set_description('files processed')
if file_count >= limit_mult_files:
break
else:
clear_output()
full_file_url = file
if debug > 1:
print(f'read file: {full_file_url}')
df, info = my_read_eeg_generic(full_file_url, df_type=df_type,
orig_tt_indic=orig_data_dir)
if all_data_overlaid:
if labelby and labelby in info:
id = labelby + ':' + str(info[labelby])
else:
id = info['subject']
else:
id = None
if debug > 1:
print(' | '.join([f'{n:>8s}:{str(v):4s}' for n, v in info.items()]))
sen_index = df.columns.names.index('sensor')
senlist = df.columns.levels[sen_index]
if senlistorder is None:
senlistorder = senlist
elif all_data_overlaid:
assert all([sl == chkl
for sl, chkl, in zip(senlist, senlistorder)]), (
'different data set has list of sensors in a '
'different order')
Z = df.values
nsamp, nsen = Z.shape
time = np.arange(nsamp) / SAMP_FREQ
x_data, Z, xlabel, ylabel = process_data(time, Z, 'time (s)',
'voltage (uV)', fs=SAMP_FREQ)
if all_data_overlaid and force_axes_same_scale:
running_min_max = (min(Z.min(), running_min_max[0]),
max(Z.max(), running_min_max[1]))
minv, maxv = running_min_max
else:
minv = maxv = None
if ('overlap' in plots and plots['overlap']):
plot_all_overlaid(x_data, Z, xlabel, ylabel, senlist, figsize,
id=id, yscale=yscale, yrange=yrange, xrange=xrange,
multi_trace_plot_labels=(file_count == 0),
color_dict=color_dict, transparency=transparency,
plot_sensor=plot_sensor, legend=legd)
if ('grid' in plots and plots['grid']):
grid_square = (not(isinstance(plots['grid'], str)) or
plots['grid'].startswith('square'))
plot_grid(x_data, Z, xlabel, ylabel, senlist, minv, maxv,
id=id, grid_square=grid_square, figsize=figsize,
multi_trace_plot_labels=(file_count == 0),
yscale=yscale, yrange=yrange, xrange=xrange,
color_dict=color_dict, transparency=transparency,
legend=legd)
if ('threed' in plots and plots['threed']) and not(
all_data_overlaid):
y_data = df.columns.labels[sen_index].values()
plot_3d(x_data, y_data, Z, df, xlabel, ylabel, figsize=figsize)
if not(all_data_overlaid):
input('press enter to cont...')
file_count += 1
for k in info:
unique_entries[k].add(info[k])
if file_count >= limit_mult_files:
break
if all_data_overlaid:
if 'overlap' in plots and plots['overlap']:
pyplot.xlabel(xlabel, fontsize=14)
pyplot.ylabel(ylabel, fontsize=15)
# if minmax[1]/(minmax[0] if minmax[0] > 0 else 1.) > 1e1:
# pyplot.axes().set_xscale('log', basex=2)
pyplot.title(f'Sensor: {plots["all_data_traces"]}', fontsize=15)
pyplot.legend(handles=legd, fontsize=15)
pyplot.show()
if debug and not(printed_entry_info):
print('unique entries in metadata from file accumulation')
for k in unique_entries:
print(f' {k}: {unique_entries[k]}')
return file_list
def aggregate_behavior(Z):
""" returns some basic trace information """
nsamp, nsen = Z.shape
median_trace = np.median(Z, axis=1)
dev = np.std(Z - np.repeat(np.matrix(median_trace).transpose(),
nsen, axis=1), axis=1)
cmpr_high_variability = [(Z[:, sen_i] > median_trace + 2 * dev
).sum()/nsamp > 0.5 for sen_i in range(nsen)]
return nsamp, nsen, cmpr_high_variability, median_trace, dev
def plot_grid(x_data, Z, xlabel, ylabel, senlist,
minv=None, maxv=None, id=None, grid_square=True,
figsize=(12, 15), multi_trace_plot_labels=False,
yscale='linear', xrange=None, yrange=None,
color_dict={}, transparency=1., legend=None):
""" plot a gride of sensor traces """
nsen = len(senlist)
all_data_overlaid = (id is not None) and (legend is not None)
grid_base_sz = int(np.ceil(np.sqrt(nsen)))
if grid_square:
ncols = nrows = grid_base_sz
else:
ncols, nrows = 1, nsen
sen_i = 0
coli = rowi = 0
if all_data_overlaid:
pyplot.subplots_adjust(wspace=.2, hspace=.35)
for sen_i, sen in enumerate(senlist):
pyplot.subplot(nrows, ncols, sen_i+1)
| |
double quote escapes it in option tag
result = substitute('@',
'<@case\ttest > <@option "option ""1""" >option one<@else>no options<@/case>',
{'test': 'option "1"'})
self.assertEqual('option one', result)
def test_quoted_option2(self):
result = substitute('@',
'<@case\ttest > <@option "option\n 1" >option one<@else>no options<@/case>',
{'test': 'option\n 1'})
self.assertEqual('option one', result)
def test_quoted_option_errors1(self):
## Same as test_quoted_option2, but without quotes on option value
self.assertRaisesAndMatchesTraceback(InvalidTagKeyName,
'15(1,15)',
substitute, '@',
'<@case\ttest > <@option option\n 1 >option one<@else>no options<@/case>',
{'test': 'option\n 1'})
def test_quoted_option_errors2(self):
## Same as test_quoted_option2, but without quotes on option value
self.assertRaisesAndMatchesTraceback(InvalidTagKeyName,
'15(1,15)',
substitute, '@',
'<@case\ttest > <@option x"option\n 1" >option one<@else>no options<@/case>',
{'test': 'option\n 1'})
def test_quoted_option_errors3(self):
## Same as test_quoted_option2, but without quotes on option value
self.assertRaisesAndMatchesTraceback(InvalidTagKeyName,
'15(1,15)',
substitute, '@',
'<@case\ttest > <@option "option\n 1"x >option one<@else>no options<@/case>',
{'test': 'option\n 1'})
def test_quoted_option_errors4(self):
self.assertRaisesAndMatchesTraceback(TagsubTemplateSyntaxError,
'14(1,14)',
substitute, '@',
'<@case test> <@option "',
{'test': 'option 1'})
def test_quoted_option_errors5(self):
self.assertRaisesAndMatchesTraceback(TagsubTemplateSyntaxError,
'14(1,14)',
substitute, '@',
'<@case test> <@option "option 1',
{'test': 'option 1'})
def test_quoted_option_errors6(self):
self.assertRaisesAndMatchesTraceback(TagsubTemplateSyntaxError,
'14(1,14)',
substitute, '@',
'<@case test> <@option "option 1"',
{'test': 'option 1'})
def test_variable_option1(self):
# This one must match. It uses the same variable in the case and the option
result = substitute('@', '<@case test><@option =test>True<@/case>',
{'test': 'arbitrary value'})
self.assertEqual('True', result)
def test_variable_option2(self):
result = substitute('@', '<@case test><@option =value>match<@else>no match<@/case>',
{'test': 'value'})
self.assertEqual('no match', result)
def test_variable_option3(self):
result = substitute('@', '<@case test><@option =value>match<@else>no match<@/case>',
{'test': 'value', 'value': 'value'})
self.assertEqual('match', result)
def test_variable_option4(self):
result = substitute('@',
'<@saveeval value><@test><@/saveeval><@case test><@option =value>match<@else>no match<@/case>',
{'test': 'value', 'value': 'non-matching value'})
self.assertEqual('match', result)
def test_variable_option5(self):
# This now behaves slightly differently from the old C code. Now, value gets evaluated when referenced. In
# the old C code it only got evaluated recursively when being substituted into the output.
result = substitute('@',
'<@saveraw value><@test><@/saveraw><@case test><@option =value>match<@else>no match<@/case>',
{'test': 'value', 'value': 'non-matching value'})
self.assertEqual('match', result)
def test_variable_option6(self):
# We should properly recognize implied loop variables
result = substitute('@',
'<@loop list><@case value><@option =:index><@:rindex>match<@/case><@/loop>',
{'value': '3', 'list': [{}, {}, {}, {}]})
self.assertEqual('2match', result)
def test_variable_option7(self):
# We should properly recognize implied loop variables
result = substitute('@',
'<@loop list><@case value><@option =:index><@:rindex>match<@/case><@/loop>',
{'value': 2, 'list': [{}, {}, {}, {}]})
self.assertEqual('3match', result)
def test_variable_option8(self):
result = substitute('@',
'<@case test><@option ="variable with spaces">match<@else>no match<@/case>',
{'test': 'value', 'variable with spaces': 'value'})
self.assertEqual('match', result)
def test_variable_option9(self):
result = substitute('@',
'<@case test><@option =var>match<@else>no match<@/case>',
{'test': None, 'var': None})
self.assertEqual('match', result)
def test_variable_option9a(self):
result = substitute('@',
'<@case test><@option =var>match<@else>no match<@/case>',
{'test': None, })
self.assertEqual('match', result)
def test_variable_option9b(self):
result = substitute('@',
'<@case test><@option =var>match<@else>no match<@/case>',
{'test': None, 'var': ''})
self.assertEqual('match', result)
def test_variable_option10(self):
result = substitute('@',
'<@case test><@option =var>match<@else>no match<@/case>',
{})
self.assertEqual('match', result)
def test_variable_option10a(self):
result = substitute('@',
'<@case test><@option =var>match<@else>no match<@/case>',
{'var': None})
self.assertEqual('match', result)
def test_variable_option10b(self):
result = substitute('@',
'<@case test><@option =var>match<@else>no match<@/case>',
{'var': ''})
self.assertEqual('match', result)
def test_variable_option11(self):
result = substitute('@',
'<@case test><@option =var>match<@else>no match<@/case>',
{'test': '', 'var': None})
self.assertEqual('match', result)
def test_variable_option11a(self):
result = substitute('@',
'<@case test><@option =var>match<@else>no match<@/case>',
{'test': '', })
self.assertEqual('match', result)
def test_variable_option11b(self):
result = substitute('@',
'<@case test><@option =var>match<@else>no match<@/case>',
{'test': '', 'var': ''})
self.assertEqual('match', result)
def test_variable_option12(self):
# It is probably good to test attribute variable options like we do here,
# but this example helped illustrate a problem in 1.63. We were not
# resetting the isVariableOption flag, so all subsequent options were
# being looked up as variable options. The problem was resolved after
# version 1.63.
class c(object): pass
i = c()
i.attr1 = 'bob'
i.attr2 = 'fred'
result = tagsub.substitute('@',
'<@case name><@option =i.attr1>attr1<@option =i.attr2>attr2<@/case> <@case case_val><@option val1>val1<@option val2>val2<@option val3>val3<@option val4>val4<@else>none<@/case>',
{'i': i, 'case_val': 'val3', 'name': 'fred'})
self.assertEqual('attr2 val3', result)
def test_variable_option13(self):
result = tagsub.substitute(
'@',
'<@case test><@option =varname,val>True<@/case>',
{'test': 'val', 'varname': 'otherval'}
)
self.assertEqual('True', result)
def test_variable_option_error1(self):
self.assertRaisesAndMatchesTraceback(InvalidTagKeyName,
'13(1,13)',
substitute, '@',
'<@case test><@option x=value>match<@else>no match<@/case>',
{'test': 'value', 'value': 'value'})
def test_variable_option_error2(self):
self.assertRaisesAndMatchesTraceback(InvalidTagKeyName,
'13(1,13)',
substitute, '@',
'<@case test><@option =value"x">match<@else>no match<@/case>',
{'test': 'value', 'value': 'value'})
def test_variable_option_error3(self):
self.assertRaisesAndMatchesTraceback(InvalidTagKeyName,
'13(1,13)',
substitute, '@',
'<@case test><@option =value "x">match<@else>no match<@/case>',
{'test': 'value', 'value': 'value'})
def test_composite_option1(self):
result = substitute('@',
'<@case value><@option T,F>checked<@else>unchecked<@/case>',
{})
self.assertEqual('unchecked', result)
def test_composite_option1a(self):
result = substitute('@',
'<@case value><@option T,F>checked<@else>unchecked<@/case>',
{'value': ''})
self.assertEqual('unchecked', result)
def test_composite_option1b(self):
result = substitute('@',
'<@case value><@option T,F>checked<@else>unchecked<@/case>',
{'value': None})
self.assertEqual('unchecked', result)
def test_composite_option1c(self):
result = substitute('@',
'<@case value><@option T,F>checked<@else>unchecked<@/case>',
{'value': 'T'})
self.assertEqual('checked', result)
def test_composite_option1d(self):
result = substitute('@',
'<@case value><@option T,F>checked<@else>unchecked<@/case>',
{'value': 'F'})
self.assertEqual('checked', result)
def test_composite_option2(self):
result = substitute('@',
'<@case letter><@option a,e,i,o,u>vowel<@else>consonant<@/case>',
{'letter': 'a'})
self.assertEqual('vowel', result)
def test_composite_option2a(self):
result = substitute('@',
'<@case letter><@option a,e,i,o,u>vowel<@else>consonant<@/case>',
{'letter': 'b'})
self.assertEqual('consonant', result)
def test_composite_option2b(self):
result = substitute('@',
'<@case letter><@option a,e,i,o,u>vowel<@else>consonant<@/case>',
{'letter': 'e'})
self.assertEqual('vowel', result)
def test_composite_option2c(self):
result = substitute('@',
'<@case letter><@option a,e,i,o,u>vowel<@else>consonant<@/case>',
{'letter': 'i'})
self.assertEqual('vowel', result)
def test_composite_option2d(self):
result = substitute('@',
'<@case letter><@option a,e,i,o,u>vowel<@else>consonant<@/case>',
{'letter': 'o'})
self.assertEqual('vowel', result)
def test_composite_option2e(self):
result = substitute('@',
'<@case letter><@option a,e,i,o,u>vowel<@else>consonant<@/case>',
{'letter': 'u'})
self.assertEqual('vowel', result)
def test_composite_option3(self):
result = substitute('@',
'<@case letter><@option a,e,i,=l,o,u>match<@else>no match<@/case>',
{'letter': 'i', 'l': 'q'})
self.assertEqual('match', result)
def test_composite_option3a(self):
result = substitute('@',
'<@case letter><@option a,e,i,=l,o,u>match<@else>no match<@/case>',
{'letter': 'p', 'l': 'q'})
self.assertEqual('no match', result)
def test_composite_option3b(self):
result = substitute('@',
'<@case letter><@option a,e,i,=l,o,u>match<@else>no match<@/case>',
{'letter': 'q', 'l': 'q'})
self.assertEqual('match', result)
def test_composite_option3c(self):
result = substitute('@',
'<@case letter><@option a,"",i,=l,o,u>match<@else>no match<@/case>',
{'letter': None, 'l': 'q'})
self.assertEqual('match', result)
def test_composite_option4(self):
result = substitute('@',
'<@case test><@option 1,2,3>match<@else>no match<@/case>',
{'test': 2})
self.assertEqual('match', result)
def test_composite_option5(self):
result = substitute('@',
'<@case test><@option "1","2","3">match<@else>no match<@/case>',
{'test': 2})
self.assertEqual('match', result)
def test_composite_option6(self):
result = substitute('@',
'<@case test><@option =1,="2",=3>match<@else>no match<@/case>',
{'test': 'value', '1': 'v1', '2': 'value'})
self.assertEqual('match', result)
def test_composite_option6a(self):
self.assertRaisesAndMatchesTraceback(KeyError, '13(1,13)',
substitute, '@',
'<@case test><@option =1,="2",=3>match<@else>no match<@/case>',
{'test': 'value', '1': 'v1', '2': 'value'}, doStrictKeyLookup=True)
class tagsub_if_tag_children(tagsub_TestCase):
def test_multiple_else_tags(self):
self.assertRaisesAndMatchesTraceback(
TagsubTemplateSyntaxError,
'30(1,30)',
substitute, '@',
"<@if test>Test<@else>testElse<@else>Bad Else<@/if>",
{"test":True}
)
def test_elif_after_else_tags(self):
self.assertRaisesAndMatchesTraceback(
TagsubTemplateSyntaxError,
'30(1,30)',
substitute, '@',
"<@if test>Test<@else>testElse<@elif test2>Bad Else<@/if>",
{"test":True}
)
class tagsub_case_tag_children(tagsub_TestCase):
def test_multiple_else_tags(self):
self.assertRaisesAndMatchesTraceback(
TagsubTemplateSyntaxError,
'32(1,32)',
substitute, '@',
"<@case test>Test<@else>testElse<@else>Bad Else<@/case>",
{"test":True}
)
def test_option_after_else_tags(self):
self.assertRaisesAndMatchesTraceback(
TagsubTemplateSyntaxError,
'32(1,32)',
substitute, '@',
"<@case test>Test<@else>testElse<@option test2>Bad Else<@/case>",
{"test":True}
)
def test_other_tag_after_else_tags(self):
self.assertRaisesAndMatchesTraceback(
TagsubTemplateSyntaxError,
'13(1,13)',
substitute, '@',
"<@case test><@test>Test<@else>testElse<@ test2>Bad Else<@/case>",
{"test":True}
)
class tagsub_blank_line_suppression(tagsub_TestCase):
def test_blank_line_suppression1(self):
result = substitute('@', ' \t', {})
self.assertEqual(' \t', result)
def test_blank_line_suppression2(self):
result = substitute('@', ' \t\n', {})
self.assertEqual(' \t\n', result)
def test_blank_line_suppression3(self):
result = substitute('@', ' <@test>\t', {})
self.assertEqual(' \t', result)
def test_blank_line_suppression4(self):
result = substitute('@', ' <@test>\t\n', {})
self.assertEqual('', result)
def test_blank_line_suppression5(self):
template = """line 1
<@loop test>
<@data>
<@/loop>
trailing line"""
assumed_result = """line 1
loop 1
loop 2
loop 3
trailing line"""
result = substitute('@', template, {'test': [{'data': 'loop 1'}, {'data': 'loop 2'}, {'data': 'loop 3'}]})
self.assertEqual(assumed_result, result)
def test_blank_line_suppression6(self):
# Also suppress substitution tag lines that have no non-whitespace
template = """line 1
<@loop test>
<@data>
<@empty_tag>
<@/loop>
trailing line"""
assumed_result = """line 1
loop 1
loop 2
loop 3
trailing line"""
result = substitute('@', template, {'test': [{'data': 'loop 1'}, {'data': 'loop 2'}, {'data': 'loop 3'}]})
self.assertEqual(assumed_result, result)
def test_blank_line_suppression7(self):
# Suppress the loop tag lines and the supporting if tag family
template = """line 1
<@loop test>
<@data>
<@if empty_tag>
empty tag text
<@else>
no text
<@/if>
<@/loop>
trailing line"""
assumed_result = """line 1
loop 1
no text
loop 2
no text
loop 3
no text
trailing line"""
result = substitute('@', template, {'test': [{'data': 'loop 1'}, {'data': 'loop 2'}, {'data': 'loop 3'}]})
self.assertEqual(assumed_result, result)
def test_blank_line_suppression8(self):
# Don't suppress lines that have non-whitespace characters
template = """line 1
<@loop test>
<@data>
on if line<@if empty_tag>
empty tag text
<@else>
no text
<@/if>
<@/loop>
trailing line"""
assumed_result = """line 1
loop 1
on if line
no text
loop 2
on if line
no text
loop 3
on if line
no text
trailing line"""
result = substitute('@', template, {'test': [{'data': 'loop 1'}, {'data': 'loop 2'}, {'data': 'loop 3'}]})
self.assertEqual(assumed_result, result)
def test_blank_line_suppression9(self):
# Don't suppress lines that have no tags
template = """line 1
<@loop test>
<@data>
<@if empty_tag>
empty tag text
<@else>
no text
<@/if>
<@/loop>
trailing line"""
assumed_result = """line 1
loop 1
no text
loop 2
no text
loop 3
no text
trailing line"""
result = substitute('@', template, {'test': [{'data': 'loop 1'}, {'data': 'loop 2'}, {'data': 'loop 3'}]})
self.assertEqual(assumed_result, result)
def test_blank_line_suppression10(self):
# We had an issue where the | |
so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(
prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(model, prefix='' if hasattr(model, 'bert') else 'bert.')
model.missing_keys = missing_keys
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(error_msgs) > 0:
logger.info('\n'.join(error_msgs))
return model
class BertModel(PreTrainedBertModel):
"""BERT model ("Bidirectional Embedding Representations from a Transformer").
Params:
config: a BertConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`.
Outputs: Tuple of (encoded_layers, pooled_output)
`encoded_layers`: controled by `output_all_encoded_layers` argument:
- `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end
of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each
encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size],
- `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding
to the last attention block of shape [batch_size, sequence_length, hidden_size],
`pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a
classifier pretrained on top of the hidden state associated to the first character of the
input (`CLF`) to train on the Next-Sentence task (see BERT's paper).
```
"""
def __init__(self, config):
super(BertModel, self).__init__(config)
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.apply(self.init_bert_weights)
def rescale_some_parameters(self):
for layer_id, layer in enumerate(self.encoder.layer):
layer.attention.output.dense.weight.data.div_(
math.sqrt(2.0 * (layer_id + 1)))
layer.output.dense.weight.data.div_(math.sqrt(2.0 * (layer_id + 1)))
def get_extended_attention_mask(self, input_ids, token_type_ids, attention_mask):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
if attention_mask.dim() == 2:
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
elif attention_mask.dim() == 3:
extended_attention_mask = attention_mask.unsqueeze(1)
else:
raise NotImplementedError
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(
dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True,
mask_qkv=None, task_idx=None, key_history=None, value_history=None, position_ids=None):
extended_attention_mask = self.get_extended_attention_mask(
input_ids, token_type_ids, attention_mask)
embedding_output = self.embeddings(
input_ids, token_type_ids, task_idx=task_idx, position_ids=position_ids)
encoded_layers = self.encoder(embedding_output, extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers,
mask_qkv=mask_qkv, seg_ids=token_type_ids,
key_history=key_history, value_history=value_history)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers, pooled_output
class BertModelIncr(BertModel):
def __init__(self, config):
super(BertModelIncr, self).__init__(config)
def forward(self, input_ids, token_type_ids, position_ids, attention_mask, output_all_encoded_layers=True,
prev_embedding=None, prev_encoded_layers=None, mask_qkv=None, task_idx=None):
extended_attention_mask = self.get_extended_attention_mask(
input_ids, token_type_ids, attention_mask)
embedding_output = self.embeddings(
input_ids, token_type_ids, position_ids, task_idx=task_idx)
encoded_layers = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers,
prev_embedding=prev_embedding,
prev_encoded_layers=prev_encoded_layers, mask_qkv=mask_qkv,
seg_ids=token_type_ids)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return embedding_output, encoded_layers, pooled_output
class MargeDiscriminator(nn.Module):
def __init__(self, bert_model, label, loss_idx, pool_func='ls', device='cuda', hidden_size=768, eps=1e-7, fp16=False):
super(MargeDiscriminator, self).__init__()
self.bert_model = bert_model
self.hidden_size = hidden_size
self.pool_func = pool_func
self.eps = eps
self.label = label
self.loss_idx = loss_idx
self.device = device
self.fp16 = fp16
# attributes that should be updated per batch
self.slot_rep = None
self.slot_mask = None
def _match(self, cand_rep, slot_rep, instc_mask):
"""
:param cand_rep: d_batch * d_embed
:param slot_rep: d_batch * max_ns * d_embed
:param instc_mask: d_batch * max_ns
:return:
score: d_batch * max_ns
"""
cand_rep = torch.unsqueeze(cand_rep, dim=-1) # d_batch * d_embed * 1
K = int(cand_rep.size(0) / slot_rep.size(0))
if K > 1:
d_embed = cand_rep.size(-2)
d_batch = slot_rep.size(0)
slot_rep = torch.unsqueeze(slot_rep, dim=1) # d_batch * 1 * max_ns * d_embed
cand_rep = torch.reshape(cand_rep, [d_batch, K, d_embed, 1]) # d_batch * K * d_embed * 1
# print(f'K: {K}')
instc_score_in = torch.matmul(slot_rep, cand_rep) # d_batch * K * max_ns * 1
instc_score_in = torch.squeeze(instc_score_in, dim=-1) # d_batch * K * max_ns
instc_score_in = instc_score_in / np.sqrt(self.hidden_size)
instc_score = torch.sigmoid(instc_score_in) # d_batch * K * max_ns
# mask
instc_mask = torch.unsqueeze(instc_mask, dim=1) # d_batch * 1 * max_ns
instc_score = instc_score * instc_mask
instc_score = torch.reshape(instc_score, (-1, instc_score.size(-1))) # (d_batch * K) * max_ns
else:
instc_score_in = torch.matmul(slot_rep, cand_rep) # d_batch * max_ns * 1
instc_score_in = torch.squeeze(instc_score_in, dim=-1) / np.sqrt(self.hidden_size) # d_batch * max_ns
instc_score = torch.sigmoid(instc_score_in)
instc_score = instc_score * instc_mask
return instc_score
def _pool(self, instc_score, instc_mask=None):
"""
:param instc_score: d_batch * max_ns
:param instc_mask: d_batch * max_ns
:return:
group_score: d_batch * 1
"""
if self.pool_func == 'avg':
nom = torch.sum(instc_score, dim=-1, keepdim=True) # d_batch * 1
n_instc = torch.sum(instc_mask, dim=-1, keepdims=True) # d_batch * 1
return nom / n_instc
elif self.pool_func == 'max':
instc_score.masked_fill((1-instc_mask).type(torch.uint8), float('-inf'))
# instc_score[1-instc_mask] = float('-inf')
return torch.max(instc_score, dim=-1)[0]
elif self.pool_func == 'ls':
nom = torch.sum(instc_score ** 2, dim=-1, keepdim=True)
denom = torch.sum(instc_score, dim=-1, keepdim=True) + self.eps
return nom / denom
else:
raise ValueError(f'Invalid pool_func: {self.pool_func}')
def _adapt_var(self, var):
if self.fp16:
return var.half()
else:
return var.float()
def get_loss(self, pred):
pred = pred.view(-1)
label = torch.tensor(pred.shape[0] * [self.label], device=self.device, dtype=torch.float)
label = self._adapt_var(label)
# print(f'label: {label.size()}, pred: {pred.size()}')
loss = MSELoss()(pred, label)
return loss
def get_rand_slot_rep(self, d_batch, max_n_slot):
"""
For debug.
"""
slot_rep = torch.rand(size=(d_batch, max_n_slot, 768), device=self.device, dtype=torch.half)
return slot_rep
# def _forward(self, summ_id, summ_seg_id, summ_mask, slot_id, slot_mask, cand_rep):
# if self.new_batch or self.slot_rep is None: # only update for new data
# self.get_slot_rep(summ_id, summ_seg_id, summ_mask, slot_id, slot_mask)
def _forward_unit_test_0(self, cand_rep):
"""
For unit test. Forward with random slot representations.
"""
slot_rep = self.get_rand_slot_rep(d_batch=cand_rep.size(0), max_n_slot=8)
d_embed = cand_rep.size()[-1]
cand_rep = torch.unsqueeze(cand_rep, dim=-1) # d_batch * d_embed * 1
instc_score_in = torch.matmul(slot_rep, cand_rep) # d_batch * max_ns * 1
instc_score_in = torch.squeeze(instc_score_in, dim=-1) / np.sqrt(self.hidden_size) # d_batch * max_ns
instc_score = torch.sigmoid(instc_score_in)
# instc_score = self._match(cand_rep, slot_rep, instc_mask=slot_mask)
group_score = self._pool(instc_score) # d_batch * 1
group_score = torch.clamp(group_score, min=self.eps, max=1-self.eps) # in (0, 1)
# print(f'group_score: {group_score[0]}\ninstc_score: {instc_score[0]}')
if self.loss_idx >= 0:
pred = instc_score[self.loss_idx]
loss = self.get_loss(pred=instc_score[self.loss_idx])
else:
loss = self.get_loss(pred=group_score)
return loss, group_score, instc_score
def _forward_unit_test_1(self, cand_rep):
"""
For unit test. Forward with random scores.
cand_rep: d_batch * d_embed
"""
cand_rep = torch.sigmoid(cand_rep)
group_score = torch.max(cand_rep, dim=-1)[0] # d_batch * 1
group_score = torch.clamp(group_score, min=self.eps, max=1-self.eps) # in (0, 1)
loss = self.get_loss(pred=group_score)
return loss, group_score, None
def init_slot_rep(self, summ_id, summ_seg_id, summ_mask, slot_id, slot_mask):
max_summ_seq_len = summ_id.size(1)
# with torch.cuda.device(0):
summ_rep = self.bert_model(summ_id,
token_type_ids=summ_seg_id,
attention_mask=summ_mask)[0].view(-1, max_summ_seq_len, self.hidden_size)
# select class reps
slot_rep = summ_rep[torch.arange(summ_rep.size(0)).unsqueeze(1), slot_id]
self.slot_mask = self._adapt_var(slot_mask)
self.slot_rep = slot_rep * self.slot_mask[:, :, None]
self.slot_rep.detach()
def forward(self, cand_rep):
assert (self.slot_rep is not None) or (self.slot_mask | |
"""
Environment.py
"""
__author__ = "<EMAIL>"
import numpy as np
from scipy import stats
import subprocess
import networkx as nx
from helper import pretty, softmax
from Traffic import Traffic
OMTRAFFIC = 'Traffic.txt'
OMBALANCING = 'Balancing.txt'
OMROUTING = 'Routing.txt'
OMDELAY = 'Delay.txt'
TRAFFICLOG = 'TrafficLog.csv'
BALANCINGLOG = 'BalancingLog.csv'
REWARDLOG = 'rewardLog.csv'
WHOLELOG = 'Log.csv'
OMLOG = 'omnetLog.csv'
# FROM MATRIX
def matrix_to_rl(matrix):
return matrix[(matrix!=-1)]
matrix_to_log_v = matrix_to_rl
def matrix_to_omnet_v(matrix):
return matrix.flatten()
def vector_to_file(vector, file_name, action):
string = ','.join(pretty(_) for _ in vector)
with open(file_name, action) as file:
return file.write(string + '\n')
# FROM FILE
def file_to_csv(file_name):
# reads file, outputs csv
with open(file_name, 'r') as file:
return file.readline().strip().strip(',')
def csv_to_matrix(string, nodes_num):
# reads text, outputs matrix
v = np.asarray(tuple(float(x) for x in string.split(',')[:nodes_num**2]))
M = np.split(v, nodes_num)
return np.vstack(M)
def csv_to_lost(string):
return float(string.split(',')[-1])
# FROM RL
def rl_to_matrix(vector, nodes_num):
M = np.split(vector, nodes_num)
for _ in range(nodes_num):
M[_] = np.insert(M[_], _, -1)
return np.vstack(M)
# TO RL
# STATUM = 'T' : 每个节点之间的 traffic
# STATUM = 'RT' : 每个节点之间的 balancing, 每个节点之间的 traffic
# 返回state,这里有两种方式
def rl_state(env):
if env.STATUM == 'RT':
return np.concatenate((matrix_to_rl(env.env_B), matrix_to_rl(env.env_T)))
elif env.STATUM == 'T':
return matrix_to_rl(env.env_T)
# 计算reward,主要是通过delay
def rl_reward(env):
delay = np.asarray(env.env_D)
# np.inf 无穷大
# 这里是做一个mask,将 delay 里 值为np.inf 的位置 置为 1 ,其余为 0
mask = delay == np.inf
# ~ 是取反操作, len(delay)应该是n^2
# np.max(delay[~mask]) 取出所有的真正delay值,然后取最大的(短板效应,最慢的到了,才完全到)
delay[mask] = len(delay)*np.max(delay[~mask])
# PRAEMIUM = AVG
if env.PRAEMIUM == 'AVG':
reward = -np.mean(matrix_to_rl(delay))
elif env.PRAEMIUM == 'MAX':
reward = -np.max(matrix_to_rl(delay))
elif env.PRAEMIUM == 'AXM':
reward = -(np.mean(matrix_to_rl(delay)) + np.max(matrix_to_rl(delay)))/2
elif env.PRAEMIUM == 'GEO':
reward = -stats.gmean(matrix_to_rl(delay))
elif env.PRAEMIUM == 'LOST':
reward = -env.env_L
return reward
# WRAPPER ITSELF
def omnet_wrapper(env):
if env.ENV == 'label':
sim = 'router'
elif env.ENV == 'balancing':
sim = 'balancer'
prefix = ''
if env.CLUSTER == 'arvei':
prefix = '/scratch/nas/1/giorgio/rlnet/'
simexe = prefix + 'omnet/' + sim + '/networkRL'
simfolder = prefix + 'omnet/' + sim + '/'
simini = prefix + 'omnet/' + sim + '/' + 'omnetpp.ini'
try:
omnet_output = subprocess.check_output([simexe, '-n', simfolder, simini, env.folder + 'folder.ini']).decode()
except Exception as e:
omnet_output = e.stdout.decode()
if 'Error' in omnet_output:
omnet_output = omnet_output.replace(',', '')
o_u_l = [_.strip() for _ in omnet_output.split('\n') if _ is not '']
omnet_output = ','.join(o_u_l[4:])
else:
omnet_output = 'ok'
vector_to_file([omnet_output], env.folder + OMLOG, 'a')
def ned_to_capacity(env):
if env.ENV == 'label':
sim = 'router'
elif env.ENV == 'balancing':
sim = 'balancer'
NED = 'omnet/' + sim + '/NetworkAll.ned'
capacity = 0
with open(NED) as nedfile:
for line in nedfile:
if "SlowChannel" in line and "<-->" in line:
capacity += 3
elif "MediumChannel" in line and "<-->" in line:
capacity += 5
elif "FastChannel" in line and "<-->" in line:
capacity += 10
elif "Channel" in line and "<-->" in line:
capacity += 10
return capacity or None
# balancing environment
class OmnetBalancerEnv():
def __init__(self, DDPG_config, folder):
self.ENV = 'balancing'
self.ROUTING = 'Balancer'
self.folder = folder
self.ACTIVE_NODES = DDPG_config['ACTIVE_NODES']
self.ACTUM = DDPG_config['ACTUM']
self.a_dim = self.ACTIVE_NODES**2 - self.ACTIVE_NODES # routing table minus diagonal
self.s_dim = self.ACTIVE_NODES**2 - self.ACTIVE_NODES # traffic minus diagonal
self.STATUM = DDPG_config['STATUM']
if self.STATUM == 'RT':
self.s_dim *= 2 # traffic + routing table minus diagonals
if 'MAX_DELTA' in DDPG_config.keys():
self.MAX_DELTA = DDPG_config['MAX_DELTA']
self.PRAEMIUM = DDPG_config['PRAEMIUM']
capacity = self.ACTIVE_NODES * (self.ACTIVE_NODES -1)
self.TRAFFIC = DDPG_config['TRAFFIC']
self.tgen = Traffic(self.ACTIVE_NODES, self.TRAFFIC, capacity)
self.CLUSTER = DDPG_config['CLUSTER'] if 'CLUSTER' in DDPG_config.keys() else False
self.env_T = np.full([self.ACTIVE_NODES]*2, -1.0, dtype=float) # traffic
self.env_B = np.full([self.ACTIVE_NODES]*2, -1.0, dtype=float) # balancing
self.env_D = np.full([self.ACTIVE_NODES]*2, -1.0, dtype=float) # delay
self.env_L = -1.0 # lost packets
self.counter = 0
def upd_env_T(self, matrix):
self.env_T = np.asarray(matrix)
np.fill_diagonal(self.env_T, -1)
def upd_env_B(self, matrix):
self.env_B = np.asarray(matrix)
np.fill_diagonal(self.env_B, -1)
def upd_env_D(self, matrix):
self.env_D = np.asarray(matrix)
np.fill_diagonal(self.env_D, -1)
def upd_env_L(self, number):
self.env_L = number
def logheader(self):
nice_matrix = np.chararray([self.ACTIVE_NODES]*2, itemsize=20)
for i in range(self.ACTIVE_NODES):
for j in range(self.ACTIVE_NODES):
nice_matrix[i][j] = str(i) + '-' + str(j)
np.fill_diagonal(nice_matrix, '_')
nice_list = list(nice_matrix[(nice_matrix!=b'_')])
th = ['t' + _.decode('ascii') for _ in nice_list]
rh = ['r' + _.decode('ascii') for _ in nice_list]
dh = ['d' + _.decode('ascii') for _ in nice_list]
if self.STATUM == 'T':
sh = ['s' + _.decode('ascii') for _ in nice_list]
elif self.STATUM == 'RT':
sh = ['sr' + _.decode('ascii') for _ in nice_list] + ['st' + _.decode('ascii') for _ in nice_list]
ah = ['a' + _.decode('ascii') for _ in nice_list]
header = ['counter'] + th + rh + dh + ['lost'] + sh + ah + ['reward']
vector_to_file(header, self.folder + WHOLELOG, 'w')
def render(self):
return True
def reset(self):
if self.counter != 0:
return None
self.logheader()
# balancing
self.upd_env_B(np.full([self.ACTIVE_NODES]*2, 0.50, dtype=float))
if self.ACTUM == 'DELTA':
vector_to_file(matrix_to_omnet_v(self.env_B), self.folder + OMBALANCING, 'w')
# traffic
self.upd_env_T(self.tgen.generate())
vector_to_file(matrix_to_omnet_v(self.env_T), self.folder + OMTRAFFIC, 'w')
return rl_state(self)
def step(self, action):
self.counter += 1
# define action: NEW or DELTA
if self.ACTUM == 'NEW':
# bound the action
self.upd_env_B(rl_to_matrix(np.clip(action, 0, 1), self.ACTIVE_NODES))
if self.ACTUM == 'DELTA':
# bound the action
self.upd_env_B(rl_to_matrix(np.clip(action * self.MAX_DELTA + matrix_to_rl(self.env_B), 0, 1), self.ACTIVE_NODES))
# write to file input for Omnet: Balancing
vector_to_file(matrix_to_omnet_v(self.env_B), self.folder + OMBALANCING, 'w')
# execute omnet
omnet_wrapper(self)
# read Omnet's output: Delay and Lost
om_output = file_to_csv(self.folder + OMDELAY)
self.upd_env_D(csv_to_matrix(om_output, self.ACTIVE_NODES))
self.upd_env_L(csv_to_lost(om_output))
reward = rl_reward(self)
# log everything to file
vector_to_file([-reward], self.folder + REWARDLOG, 'a')
cur_state = rl_state(self)
log = np.concatenate(([self.counter], matrix_to_log_v(self.env_T), matrix_to_log_v(self.env_B), matrix_to_log_v(self.env_D), [self.env_L], cur_state, action, [-reward]))
vector_to_file(log, self.folder + WHOLELOG, 'a')
# generate traffic for next iteration
self.upd_env_T(self.tgen.generate())
# write to file input for Omnet: Traffic, or do nothing if static
if self.TRAFFIC.split(':')[0] not in ('STAT', 'STATEQ', 'FILE'):
vector_to_file(matrix_to_omnet_v(self.env_T), self.folder + OMTRAFFIC, 'w')
new_state = rl_state(self)
# return new status and reward
return new_state, reward, 0
def end(self):
return
# label environment
class OmnetLinkweightEnv():
def __init__(self, DDPG_config, folder):
self.ENV = 'label'
self.ROUTING = 'Linkweight'
self.folder = folder
# nodes = 14
self.ACTIVE_NODES = DDPG_config['ACTIVE_NODES']
self.ACTUM = DDPG_config['ACTUM']
# 利用 networkX 创建 网络拓扑图 graph
topology = 'omnet/router/NetworkAll.matrix'
self.graph = nx.Graph(np.loadtxt(topology, dtype=int))
# 这里可以画出来 graph 的拓扑
import matplotlib.pyplot as plt
nx.draw(self.graph)
plt.show()
if self.ACTIVE_NODES != self.graph.number_of_nodes():
return False
ports = 'omnet/router/NetworkAll.ports'
# self.ports
# [[-1 0 1 2 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1]
# [0 - 1 1 - 1 - 1 - 1 - 1 2 - 1 - 1 - 1 - 1 - 1 - 1]
# [0 1 - 1 - 1 - 1 2 - 1 - 1 - 1 - 1 - 1 - 1 - 1 - 1]
# [0 - 1 - 1 - 1 1 - 1 - 1 - 1 2 - 1 - 1 - 1 - 1 - 1]
# [-1 - 1 - 1 0 - 1 1 2 - 1 - 1 - 1 - 1 - 1 - 1 - 1]
# [-1 - 1 0 - 1 1 - 1 - 1 - 1 - 1 - 1 2 - 1 3 - 1]
# [-1 - 1 - 1 - 1 0 - 1 - 1 1 - 1 - 1 - 1 - 1 - 1 - 1]
# [-1 0 - 1 - 1 - 1 - 1 1 - 1 - 1 2 - 1 - 1 - 1 - 1]
# [-1 - 1 - 1 0 - 1 - 1 - 1 - 1 - 1 - 1 - 1 1 - 1 2]
# [-1 - 1 - 1 - 1 - 1 - 1 - 1 0 - 1 - 1 1 2 - 1 3]
# [-1 - 1 - 1 - 1 - 1 0 - 1 - 1 - 1 1 - 1 - 1 - 1 - 1]
# [-1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 0 1 - 1 - 1 2 - 1]
# [-1 - 1 - 1 - 1 - 1 0 - 1 - 1 - 1 - 1 - 1 1 - 1 2]
# [-1 - 1 - 1 - 1 - 1 - 1 - 1 - 1 0 1 | |
<filename>src/models/xception_padding_do.py<gh_stars>0
# -*- coding: utf-8 -*-
"""Xception V1 model for Keras.
On ImageNet, this model gets to a top-1 validation accuracy of 0.790
and a top-5 validation accuracy of 0.945.
Do note that the input image format for this model is different than for
the VGG16 and ResNet models (299x299 instead of 224x224),
and that the input preprocessing function
is also different (same as Inception V3).
Also do note that this model is only available for the TensorFlow backend,
due to its reliance on `SeparableConvolution` layers.
# Reference
- [Xception: Deep Learning with Depthwise Separable Convolutions](https://arxiv.org/abs/1610.02357)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import warnings
import h5py
import numpy as np
import tensorflow as tf
from keras_applications.imagenet_utils import _obtain_input_shape
from tensorflow.keras import backend as K
from tensorflow.keras import layers
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import GlobalAveragePooling2D
from tensorflow.keras.layers import GlobalMaxPooling2D
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import SeparableConv2D
from tensorflow.keras.models import Model
from tensorflow.keras.utils import get_file
from tensorflow.keras.utils import get_source_inputs
from . import NetType
TF_WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.4/xception_weights_tf_dim_ordering_tf_kernels.h5'
TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.4/xception_weights_tf_dim_ordering_tf_kernels_notop.h5'
def Xception_do(net_type, include_top=True, do_p=0.3, weights='imagenet',
input_tensor=None, input_shape=None,
pooling=None,
classes=1000):
"""Instantiates the Xception architecture.
Optionally loads weights pre-trained
on ImageNet. This model is available for TensorFlow only,
and can only be used with inputs following the TensorFlow
data format `(width, height, channels)`.
You should set `image_data_format='channels_last'` in your Keras config
located at ~/.keras/keras.json.
Note that the default input image size for this model is 299x299.
# Arguments
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(299, 299, 3)`.
It should have exactly 3 inputs channels,
and width and height should be no smaller than 71.
E.g. `(150, 150, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
RuntimeError: If attempting to run this model with a
backend that does not support separable convolutions.
"""
if not (weights in {'imagenet', None} or os.path.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as imagenet with `include_top`'
' as true, `classes` should be 1000')
if K.backend() != 'tensorflow':
raise RuntimeError('The Xception model is only available with '
'the TensorFlow backend.')
if K.image_data_format() != 'channels_last':
warnings.warn('The Xception model is only available for the '
'input data format "channels_last" '
'(width, height, channels). '
'However your settings specify the default '
'data format "channels_first" (channels, width, height). '
'You should set `image_data_format="channels_last"` in your Keras '
'config located at ~/.keras/keras.json. '
'The model being returned right now will expect inputs '
'to follow the "channels_last" data format.')
K.set_image_data_format('channels_last')
old_data_format = 'channels_first'
else:
old_data_format = None
# Determine proper input shape
input_shape = _obtain_input_shape(input_shape,
default_size=299,
min_size=71,
data_format=K.image_data_format(),
require_flatten=False,
weights=None) # weights=None to prevent input channels equality check
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
x = Conv2D(32, (3, 3), strides=(2, 2), use_bias=False, name='block1_conv1', padding="same")(img_input)
x = BatchNormalization(name='block1_conv1_bn')(x)
if net_type == NetType.mc:
x = Dropout(do_p)(x, training=True)
elif net_type == NetType.mc_df:
x = Dropout(do_p, noise_shape=(x.shape[0], 1, 1, x.shape[-1]))(x, training=True)
x = Activation('relu', name='block1_conv1_act')(x)
x = Conv2D(64, (3, 3), use_bias=False, name='block1_conv2', padding='same')(x)
x = BatchNormalization(name='block1_conv2_bn')(x)
if net_type == NetType.mc:
x = Dropout(do_p)(x, training=True)
elif net_type == NetType.mc_df:
x = Dropout(do_p, noise_shape=(x.shape[0], 1, 1, x.shape[-1]))(x, training=True)
x = Activation('relu', name='block1_conv2_act')(x)
residual = Conv2D(128, (1, 1), strides=(2, 2),
padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
if net_type == NetType.mc:
residual = Dropout(do_p)(residual, training=True)
elif net_type == NetType.mc_df:
residual = Dropout(do_p, noise_shape=(residual.shape[0], 1, 1, residual.shape[-1]))(residual, training=True)
x = SeparableConv2D(128, (3, 3), padding='same', use_bias=False, name='block2_sepconv1')(x)
x = BatchNormalization(name='block2_sepconv1_bn')(x)
if net_type == NetType.mc:
x = Dropout(do_p)(x, training=True)
elif net_type == NetType.mc_df:
x = Dropout(do_p, noise_shape=(x.shape[0], 1, 1, x.shape[-1]))(x, training=True)
x = Activation('relu', name='block2_sepconv2_act')(x)
x = SeparableConv2D(128, (3, 3), padding='same', use_bias=False, name='block2_sepconv2')(x)
x = BatchNormalization(name='block2_sepconv2_bn')(x)
if net_type == NetType.mc:
x = Dropout(do_p)(x, training=True)
elif net_type == NetType.mc_df:
x = Dropout(do_p, noise_shape=(x.shape[0], 1, 1, x.shape[-1]))(x, training=True)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='block2_pool')(x)
x = layers.add([x, residual])
residual = Conv2D(256, (1, 1), strides=(2, 2),
padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
if net_type == NetType.mc:
residual = Dropout(do_p)(residual, training=True)
elif net_type == NetType.mc_df:
residual = Dropout(do_p, noise_shape=(residual.shape[0], 1, 1, residual.shape[-1]))(residual, training=True)
x = Activation('relu', name='block3_sepconv1_act')(x)
x = SeparableConv2D(256, (3, 3), padding='same', use_bias=False, name='block3_sepconv1')(x)
x = BatchNormalization(name='block3_sepconv1_bn')(x)
if net_type == NetType.mc:
x = Dropout(do_p)(x, training=True)
elif net_type == NetType.mc_df:
x = Dropout(do_p, noise_shape=(x.shape[0], 1, 1, x.shape[-1]))(x, training=True)
x = Activation('relu', name='block3_sepconv2_act')(x)
x = SeparableConv2D(256, (3, 3), padding='same', use_bias=False, name='block3_sepconv2')(x)
x = BatchNormalization(name='block3_sepconv2_bn')(x)
if net_type == NetType.mc:
x = Dropout(do_p)(x, training=True)
elif net_type == NetType.mc_df:
x = Dropout(do_p, noise_shape=(x.shape[0], 1, 1, x.shape[-1]))(x, training=True)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='block3_pool')(x)
x = layers.add([x, residual])
residual = Conv2D(728, (1, 1), strides=(2, 2),
padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
if net_type == NetType.mc:
residual = Dropout(do_p)(residual, training=True)
elif net_type == NetType.mc_df:
residual = Dropout(do_p, noise_shape=(residual.shape[0], 1, 1, residual.shape[-1]))(residual, training=True)
x = Activation('relu', name='block4_sepconv1_act')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name='block4_sepconv1')(x)
x = BatchNormalization(name='block4_sepconv1_bn')(x)
if net_type == NetType.mc:
x = Dropout(do_p)(x, training=True)
elif net_type == NetType.mc_df:
x = Dropout(do_p, noise_shape=(x.shape[0], 1, 1, x.shape[-1]))(x, training=True)
x = Activation('relu', name='block4_sepconv2_act')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name='block4_sepconv2')(x)
x = BatchNormalization(name='block4_sepconv2_bn')(x)
if net_type == NetType.mc:
x = Dropout(do_p)(x, training=True)
elif net_type == NetType.mc_df:
x = Dropout(do_p, noise_shape=(x.shape[0], 1, 1, x.shape[-1]))(x, training=True)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='block4_pool')(x)
x = layers.add([x, residual])
for i in range(8):
residual = x
prefix = 'block' + str(i + 5)
x = Activation('relu', name=prefix + '_sepconv1_act')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name=prefix + '_sepconv1')(x)
x = BatchNormalization(name=prefix + '_sepconv1_bn')(x)
if net_type == NetType.mc:
x = Dropout(do_p)(x, training=True)
elif net_type == NetType.mc_df:
x = Dropout(do_p, noise_shape=(x.shape[0], 1, 1, x.shape[-1]))(x, training=True)
x = Activation('relu', name=prefix + '_sepconv2_act')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name=prefix + '_sepconv2')(x)
x = BatchNormalization(name=prefix + '_sepconv2_bn')(x)
if net_type == NetType.mc:
x = Dropout(do_p)(x, training=True)
elif net_type == NetType.mc_df:
x = Dropout(do_p, noise_shape=(x.shape[0], 1, 1, x.shape[-1]))(x, training=True)
x = Activation('relu', name=prefix + '_sepconv3_act')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name=prefix + '_sepconv3')(x)
x = BatchNormalization(name=prefix + '_sepconv3_bn')(x)
if net_type == NetType.mc:
x = Dropout(do_p)(x, training=True)
elif net_type == NetType.mc_df:
x = Dropout(do_p, noise_shape=(x.shape[0], 1, 1, x.shape[-1]))(x, training=True)
x = layers.add([x, residual])
residual = Conv2D(1024, (1, 1), strides=(2, 2),
padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
if net_type == NetType.mc:
residual = Dropout(do_p)(residual, training=True)
elif net_type == NetType.mc_df:
residual = Dropout(do_p, noise_shape=(residual.shape[0], 1, 1, residual.shape[-1]))(residual, training=True)
x = Activation('relu', name='block13_sepconv1_act')(x)
x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name='block13_sepconv1')(x)
x = BatchNormalization(name='block13_sepconv1_bn')(x)
if net_type == NetType.mc:
x = Dropout(do_p)(x, training=True)
elif net_type == NetType.mc_df:
x = Dropout(do_p, noise_shape=(x.shape[0], 1, 1, x.shape[-1]))(x, training=True)
x = Activation('relu', name='block13_sepconv2_act')(x)
x = SeparableConv2D(1024, (3, 3), padding='same', use_bias=False, name='block13_sepconv2')(x)
x = BatchNormalization(name='block13_sepconv2_bn')(x)
if net_type == NetType.mc:
x | |
res.shift = fit_res.shift
res.ro_amp = 10**(fit_res.power/20)
return True
def find_test_resonators_test(self, with_VNA=None, resonators=None):
"""
Does a power sweep over the resonators to see if they have a qubit
attached or not, and changes the state in the resonator object
"""
if with_VNA is None:
try:
if self.instr_VNA.get_instr() == '':
with_VNA = False
else:
with_VNA = True
except:
with_VNA = False
if resonators is None:
try:
device = self.instr_device.get_instr()
except AttributeError:
logging.warning('Could not find device resonators: '
'No device found for {}'.format(self.name))
return False
resonators = self.instr_device.get_instr().resonators
for res in device.resonators:
freq = res.freq
label = '_resonator_{}'.format(res.identifier)
if res.type == 'test_resonator':
powers = np.linspace(-20, 0.1, 3)
f_step = 25e3
else:
powers = np.arange(-40, 0.1, 10)
f_step = 25e3
if with_VNA:
VNA = self.instr_VNA.get_instr()
VNA.start_frequency(freq - 20e6)
VNA.stop_frequency(freq + 20e6)
self.measure_VNA_power_sweep() # not implemented yet
else:
if res.type == 'test_resonator':
logging.warning('Heterodyne spectroscopy insufficient for '
'test resonators. Skipping')
res.freq_low = res.freq
continue
freqs = np.arange(freq - 6e6, freq + 3e6, f_step)
self.measure_resonator_power(freqs=freqs, powers=powers,
analyze=False, label=label)
fit_res = ma.Resonator_Powerscan_Analysis_test(label='Resonator_power_scan',
close_fig=True,
use_min=True)
# Update resonator types
shift = np.max(np.array([fit_res.shift1, fit_res.shift2]))
if np.abs(shift) > 200e3:
if res.type == 'unknown':
res.type = 'qubit_resonator'
elif res.type == 'qubit_resonator':
print('Resonator {}: confirmed resonator shift.'
.format(res.identifier))
else:
logging.warning('No resonator power shift found for '
'resonator {}. Consider adding/removing '
'attenuation.'.format(res.identifier))
else:
if res.type == 'unknown':
res.type = 'test_resonator'
elif res.type == 'test_resonator':
print('Resonator {}: confirmed test resonator'
.format(res.identifier))
res.freq_low = res.freq
else:
logging.warning('Resonator shift found for test resonator '
'{}. Apperently not a test resonator.'
.format(res.identifier))
# Update resonator attributes
index_f = np.argmax(np.array([fit_res.shift1, fit_res.shift2]))
fit_res.f_low = np.array([fit_res.f_low1, fit_res.f_low2])
res.freq_low = fit_res.f_low[index_f]
fit_res.f_high = np.array([fit_res.f_high1, fit_res.f_high2])
res.freq_high = fit_res.f_high[index_f]
# res.freq_low = fit_res.f_low
# res.freq_high = fit_res.f_high
res.shift = shift
res.ro_amp = 10**(fit_res.power/20)
return True
def find_qubit_resonator_fluxline(self, with_VNA=None, dac_values=None,
verbose=True, resonators=None):
"""
--- WARNING: UPDATING PARAMETERS ONLY WORKS WITH DEVICE OBJECT! ---
Does a resonator DAC scan with all qubit resonators and all fluxlines.
"""
if with_VNA is None:
try:
if self.instr_VNA.get_instr() == '':
with_VNA = False
else:
with_VNA = True
except:
with_VNA = False
if resonators is None:
try:
device = self.instr_device.get_instr()
except AttributeuxError:
logging.warning('Could not find device resonators: '
'No device found for {}.'.format(self.name))
return False
resonators = device.resonators
if dac_values is None:
dac_values = np.arange(-10e-3, 10e-3, 1e-3)
fluxcurrent = self.instr_FluxCtrl.get_instr()
for FBL in fluxcurrent.channel_map:
fluxcurrent[FBL](0)
for res in resonators:
if res.type == 'qubit_resonator':
self.ro_pulse_amp(res.ro_amp)
self.ro_pulse_amp_CW(res.ro_amp)
best_amplitude = 0 # For comparing which one is coupled closest
if with_VNA:
VNA = self.instr_VNA.get_instr()
VNA.start_frequency(res.freq_low - 10e6)
VNA.stop_frequency(res.freq_low + 10e6)
freqs = np.arange(res.freq_low - np.abs(res.shift) - 15e6,
res.freq_low + 4e6,
0.2e6)
for fluxline in fluxcurrent.channel_map:
label = '_resonator_{}_{}'.format(res.identifier, fluxline)
t_start = time.strftime('%Y%m%d_%H%M%S')
self.measure_resonator_frequency_dac_scan(freqs=freqs,
dac_values=dac_values,
fluxChan=fluxline,
analyze=False,
label=label)
fluxcurrent[fluxline](0)
str_freq, unit = plt_tools.SI_val_to_msg_str(res.freq, 'Hz',
float)
print('Finished flux sweep resonator {} ({:.3f} {}) with {}'
.format(res.identifier, str_freq, unit, fluxline))
timestamp = a_tools.get_timestamps_in_range(t_start,
label=self.msmt_suffix)[0]
fit_res = ma2.VNA_DAC_Analysis(timestamp)
amplitude = fit_res.dac_fit_res.params['amplitude'].value
if amplitude > best_amplitude:
best_amplitude = amplitude
res.qubit = fluxline.split('_', 1)[-1]
res.sweetspot = fit_res.sweet_spot_value
res.fl_dc_I_per_phi0 = fit_res.current_to_flux
if verbose:
for res in self.instr_device.get_instr().resonators:
if res.type == 'qubit_resonator':
freq, unit = plt_tools.SI_val_to_msg_str(res.freq_low,
'Hz',
float)
print('{}, f = {:.3f} {}, linked to {},'
' sweetspot current = {:.3f} mA'
.format(res.type, freq, unit, res.qubit, res.sweetspot*1e3))
else:
freq, unit = plt_tools.SI_val_to_msg_str(res.freq,
'Hz',
float)
print('{}, f = {:.3f} {}'.format(res.type, freq, unit))
# Set properties for all qubits in device if device exists
device = self.instr_device.get_instr()
assigned_qubits = []
for q in device.qubits():
if q == 'fakequbit':
pass
qubit = device.find_instrument(q)
for res in device.resonators:
if qubit.name == res.qubit:
if qubit.name in assigned_qubits:
logging.warning('Multiple resonators found for {}. '
'Aborting'.format(qubit.name))
return False
assigned_qubits.append(qubit.name)
qubit.freq_res(res.freq_low)
qubit.ro_freq(res.freq_low)
qubit.fl_dc_I0(res.sweetspot)
qubit.fl_dc_I_per_phi0(res.fl_dc_I_per_phi0)
qubit.fl_dc_ch('FBL_' + res.qubit)
if qubit.freq_qubit() is None:
qubit.freq_qubit(res.freq_low -
np.abs((70e6)**2/(res.shift)))
return True
def find_resonator_sweetspot(self, freqs=None, dac_values=None,
fluxChan=None, update=True):
"""
Finds the resonator sweetspot current.
TODO: - measure all FBL-resonator combinations
TODO: - implement way of distinguishing which fluxline is most coupled
TODO: - create method that moves qubits away from sweetspot when they
are not being measured (should not move them to some other
qubit frequency of course)
"""
if freqs is None:
freq_center = self.freq_res()
freq_range = 20e6
freqs = np.arange(freq_center - freq_range/2,
freq_center + freq_range/2, 0.5e6)
if dac_values is None:
dac_values = np.linspace(-10e-3, 10e-3, 101)
if fluxChan is None:
if self.fl_dc_ch() == 1: # Initial value
fluxChan = 'FBL_1'
else:
fluxChan = self.fl_dc_ch()
t_start = time.strftime('%Y%m%d_%H%M%S')
self.measure_resonator_frequency_dac_scan(freqs=freqs,
dac_values=dac_values,
fluxChan=fluxChan,
analyze=False)
if update:
import pycqed.analysis_v2.spectroscopy_analysis as sa
timestamp = ma.a_tools.get_timestamps_in_range(t_start,label = 'Resonator')[0]
fit_res = sa.VNA_DAC_Analysis(timestamp=timestamp)
sweetspot_current = fit_res.sweet_spot_value
self.fl_dc_I0(sweetspot_current)
fluxcurrent = self.instr_FluxCtrl.get_instr()
fluxcurrent[self.fl_dc_ch()](sweetspot_current)
return True
def find_resonator_frequency(self, use_min=True,
update=True,
freqs=None,
MC=None, close_fig=True):
"""
Performs heterodyne spectroscopy to identify the frequecy of the (readout)
resonator frequency.
Args:
use_min (bool):
'True' uses the frequency at minimum amplitude. 'False' uses
the fit result
update (bool):
update the internal parameters with this fit
Finds the resonator frequency by performing a heterodyne experiment
if freqs == None it will determine a default range dependent on the
last known frequency of the resonator.
freqs (array):
list of frequencies to sweep. By default set to +-5 MHz around
the last recorded frequency, with 100 kHz step
"""
# This snippet exists to be backwards compatible 9/2017.
try:
freq_res_par = self.freq_res
freq_RO_par = self.ro_freq
except:
warnings.warn("Deprecation warning: rename f_res to freq_res")
freq_res_par = self.f_res
freq_RO_par = self.f_RO
old_avg = self.ro_acq_averages()
self.ro_acq_averages(2**14)
if freqs is None:
f_center = freq_res_par()
if f_center is None:
raise ValueError('Specify "freq_res" to generate a freq span')
f_span = 10e6
f_step = 100e3
freqs = np.arange(f_center-f_span/2, f_center+f_span/2, f_step)
self.measure_heterodyne_spectroscopy(freqs, MC, analyze=False)
a = ma.Homodyne_Analysis(label=self.msmt_suffix, close_fig=close_fig)
self.ro_acq_averages(old_avg)
if use_min:
f_res = a.min_frequency
else:
f_res = a.fit_results.params['f0'].value*1e9 # fit converts to Hz
if f_res > max(freqs) or f_res < min(freqs):
logging.warning('extracted frequency outside of range of scan')
elif update: # don't update if the value is out of the scan range
freq_res_par(f_res)
freq_RO_par(f_res)
return f_res
def find_frequency(self, method='spectroscopy', spec_mode='pulsed_marked',
steps=[1, 3, 10, 30, 100, 300, 1000],
artificial_periods=4,
freqs=None,
f_span=100e6,
use_max=False,
f_step=1e6,
verbose=True,
update=True,
close_fig=True,
MC=None,
label = ''):
"""
Finds the qubit frequency using either the spectroscopy or the Ramsey
method.
In case method=='spectroscopy' this routine runs measure_spectroscopy and performs
analysis looking for peaks in the spectrum.
In case metgod=='ramsey' this routine performs series ofamsey ramsey measurements
for increasing range of the delay times. Using short ramsey sequence with relatively
large artificial detuning yields robust measurement of the qubit frequency, and increasing
the relay times allows for more precise frequency measurement.
Args:
method (str {'spectroscopy', 'ramsey'}):
specifies whether to perform spectroscopy ('spectroscopy') or series of
ramsey measurements ('ramsey') to find the qubit frequency.
spec_mode (str {'CW', 'pulsed_marked', 'pulsed_mixer'}):
specifies the mode of the spectroscopy measurements (currently only implemented
by Timo for CCL_Transmon). Possivle values: 'CW', 'pulsed_marked', 'pulsed_mixer'
steps (array):
maximum delay between pi/2 pulses (in microseconds) in a subsequent ramsey measurements.
The find_frequency routine is terminated when all steps are performed or if
the fitted T2* significantly exceeds the maximum delay
artificial_periods (float):
specifies the automatic choice of the artificial detuning in the ramsey
measurements, in such a way that ramsey measurement should show 4 full oscillations.
freqs (array):
list of sweeped frequencies in case of spectroscopy measurement
f_span (float):
span of sweeped frequencies around the currently recorded qubit frequency in
the spectroscopy measurement
f_step (flaot):
increment of frequency between data points in spectroscopy measurement
update (bool):
boolean indicating whether to update the qubit frequency in the qubit object
according to the result of the measurement
"""
if method.lower() == 'spectroscopy':
if freqs is None:
f_qubit_estimate = self.calculate_frequency()
freqs = np.arange(f_qubit_estimate - f_span/2,
f_qubit_estimate + f_span/2,
f_step)
# args here should be handed down from the top.
self.measure_spectroscopy(freqs, mode=spec_mode, MC=MC,
analyze=False, label = label,
close_fig=close_fig)
label = 'spec'
analysis_spec = ma.Qubit_Spectroscopy_Analysis(
label=label, close_fig=True, qb_name=self.name)
# Checks to see if there is a peak:
freq_peak = analysis_spec.peaks['peak']
offset = analysis_spec.fit_res.params['offset'].value
peak_height = np.amax(analysis_spec.data_dist)
if freq_peak is None:
success = False
| |
from contextlib import contextmanager
from queue import Queue
from threading import Event, Timer
from time import sleep
from unittest import TestCase
from unittest.mock import MagicMock
from dakara_base.safe_workers import (
BaseSafeThread,
BaseWorker,
Runner,
SafeThread,
SafeTimer,
UnredefinedThreadError,
UnredefinedTimerError,
Worker,
WorkerSafeThread,
WorkerSafeTimer,
safe,
)
class MyError(Exception):
"""Dummy error class."""
pass
class BaseTestCase(TestCase):
"""Generic test case.
It includes some dummy functions a new assertion method.
"""
def setUp(self):
# create stop event and errors queue
self.stop = Event()
self.errors = Queue()
def function_safe(self):
"""Function that does not raise any error."""
return
def function_error(self):
"""Function that raises a MyError."""
raise MyError("test error")
@contextmanager
def assertNotRaises(self, ExceptionClass):
"""Assert that the provided exception does not raise.
Args:
ExceptionClass (class): Class of the exception.
"""
try:
yield None
except ExceptionClass:
self.fail("{} raised".format(ExceptionClass.__name__))
class SafeTestCase(BaseTestCase):
"""Test the `safe` decorator."""
def create_classes(self):
"""Create dummy classes."""
class Base:
@safe
def function_safe(self2):
self.function_safe()
@safe
def function_error(self2):
self.function_error()
class Thread(BaseSafeThread, Base):
pass
class Worker(BaseWorker, Base):
pass
class Other(Base):
pass
return Thread, Worker, Other
def test_worker_function_safe(self):
"""Test a safe function of a worker.
Test that a non-error function does not trigger any error, does not set
the stop event and does not put an error in the error queue.
"""
# pre assertions
self.assertFalse(self.stop.is_set())
self.assertTrue(self.errors.empty())
# create instance
_, Worker, _ = self.create_classes()
worker = Worker(self.stop, self.errors)
# call the method
worker.function_safe()
# post assertions
self.assertFalse(self.stop.is_set())
self.assertTrue(self.errors.empty())
def test_worker_function_error(self):
"""Test an error function of a worker.
Test that an error function does not trigger any error, sets the stop
event and puts a MyError in the error queue.
"""
# pre assertions
self.assertFalse(self.stop.is_set())
self.assertTrue(self.errors.empty())
# create instance
_, Worker, _ = self.create_classes()
worker = Worker(self.stop, self.errors)
# call the method
with self.assertNotRaises(MyError):
worker.function_error()
# post assertions
self.assertTrue(self.stop.is_set())
self.assertFalse(self.errors.empty())
_, error, _ = self.errors.get()
self.assertIsInstance(error, MyError)
def test_thread(self):
"""Test a thread.
Test that a non-error function does not trigger any error, does not set
the stop event and does not put an error in the error queue.
"""
# pre assertions
self.assertFalse(self.stop.is_set())
self.assertTrue(self.errors.empty())
# create instance
Thread, _, _ = self.create_classes()
thread = Thread(self.stop, self.errors)
# call the method
thread.function_safe()
# post assertions
self.assertFalse(self.stop.is_set())
self.assertTrue(self.errors.empty())
def test_other(self):
"""Test an other class.
Test that the decorator raises an error, as the class is not supported.
"""
# pre assertions
self.assertFalse(self.stop.is_set())
self.assertTrue(self.errors.empty())
# create instance
_, _, Other = self.create_classes()
other = Other()
# call the method
with self.assertRaises(AssertionError):
other.function_safe()
# post assertions
self.assertFalse(self.stop.is_set())
self.assertTrue(self.errors.empty())
class SafeThreadTestCase(BaseTestCase):
"""Test the SafeThread class."""
def create_controlled_thread(self, target):
"""Helper to create a safe thread for a target function."""
return SafeThread(self.stop, self.errors, target=target)
def test_function_safe(self):
"""Test a safe function.
Test that a non-error function run as a thread does not trigger any
error, does not set the stop event and does not put an error in the
error queue.
"""
# pre assertions
self.assertFalse(self.stop.is_set())
self.assertTrue(self.errors.empty())
# create thread
controlled_thread = self.create_controlled_thread(self.function_safe)
# run thread
controlled_thread.start()
controlled_thread.join()
# post assertions
self.assertFalse(self.stop.is_set())
self.assertTrue(self.errors.empty())
def test_function_error(self):
"""Test an error function.
Test that an error function run as a thread does not trigger any error,
sets the stop event and puts a MyError in the error queue.
"""
# pre assertions
self.assertFalse(self.stop.is_set())
self.assertTrue(self.errors.empty())
# create thread
controlled_thread = self.create_controlled_thread(self.function_error)
# run thread
with self.assertNotRaises(MyError):
controlled_thread.start()
controlled_thread.join()
# post assertions
self.assertTrue(self.stop.is_set())
self.assertFalse(self.errors.empty())
_, error, _ = self.errors.get()
self.assertIsInstance(error, MyError)
class SafeTimerTestCase(SafeThreadTestCase):
"""Test the SafeTimer class."""
def create_controlled_thread(self, target):
"""Helper to create a safe timer thread for a target function.
The delay is non null (0.5 s).
"""
return SafeTimer(self.stop, self.errors, 0.5, target) # set a non-null delay
class WorkerTestCase(BaseTestCase):
"""Test the Worker class."""
def test_run_safe(self):
"""Test a safe run.
Test that a worker used with no error does not produce any error,
finishes with a triggered stop event and an empty error queue.
"""
# pre assertions
self.assertFalse(self.stop.is_set())
self.assertTrue(self.errors.empty())
# create and run worker
with Worker(self.stop, self.errors):
self.function_safe()
# post assertions
self.assertTrue(self.stop.is_set())
self.assertTrue(self.errors.empty())
def test_run_error(self):
"""Test a run with error.
Test that a worker used with error does produce an error, finishes
with a triggered stop event and an empty error queue.
"""
# pre assertions
self.assertFalse(self.stop.is_set())
self.assertTrue(self.errors.empty())
# create and run worker
with self.assertRaises(MyError):
with Worker(self.stop, self.errors):
self.function_error()
# there is no point continuing this test from here
# post assertions
self.assertTrue(self.stop.is_set())
self.assertTrue(self.errors.empty())
def test_run_thread_safe(self):
"""Test a run with a safe thread.
Test that a worker used with a non-error thread does not produce any
error, finishes with a triggered stop event and an empty error queue.
"""
# pre assertions
self.assertFalse(self.stop.is_set())
self.assertTrue(self.errors.empty())
# create and run worker
with Worker(self.stop, self.errors) as worker:
worker.thread = worker.create_thread(target=self.function_safe)
worker.thread.start()
worker.thread.join()
# post assertions
self.assertTrue(self.stop.is_set())
self.assertTrue(self.errors.empty())
def test_run_thread_error(self):
"""Test a run with a thread with error.
Test that a worker used with a thread with an error does not produce
any error, finishes with a triggered stop event and a non-empty error
queue.
"""
# pre assertions
self.assertFalse(self.stop.is_set())
self.assertTrue(self.errors.empty())
# create and run worker
with self.assertNotRaises(MyError):
with Worker(self.stop, self.errors) as worker:
worker.thread = worker.create_thread(target=self.function_error)
worker.thread.start()
worker.thread.join()
# post assertions
self.assertTrue(self.stop.is_set())
self.assertFalse(self.errors.empty())
_, error, _ = self.errors.get()
self.assertIsInstance(error, MyError)
class WorkerSafeTimerTestCase(BaseTestCase):
"""Test the WorkerSafeTimer class."""
class WorkerSafeTimerToTest(WorkerSafeTimer):
"""Dummy worker class."""
def function_already_dead(self):
"""Function that ends immediately."""
return
def function_to_cancel(self):
"""Function that calls itself in loop every second."""
self.timer = Timer(1, self.function_to_cancel)
self.timer.start()
def function_to_join(self):
"""Function that waits one second."""
sleep(1)
def test_run_timer_dead(self):
"""Test to end a worker when its timer is dead.
Test that a worker worker stopped with a dead timer finishes with a
triggered stop event, an empty error queue and a still dead timer.
"""
# pre assertions
self.assertFalse(self.stop.is_set())
self.assertTrue(self.errors.empty())
# create and run worker
with self.WorkerSafeTimerToTest(self.stop, self.errors) as worker:
worker.timer = worker.create_timer(0, worker.function_already_dead)
worker.timer.start()
worker.timer.join()
# post assertions
self.assertTrue(self.stop.is_set())
self.assertTrue(self.errors.empty())
self.assertFalse(worker.timer.is_alive())
def test_run_timer_cancelled(self):
"""Test to end a deamon when its timer is waiting.
Test that a worker worker stopped with a waiting timer finishes with a
triggered stop event, an empty error queue and a dead timer.
"""
# pre assertions
self.assertFalse(self.stop.is_set())
self.assertTrue(self.errors.empty())
# create and run worker
with self.WorkerSafeTimerToTest(self.stop, self.errors) as worker:
worker.timer = worker.create_timer(0, worker.function_to_cancel)
worker.timer.start()
sleep(0.5)
# post assertions
self.assertTrue(self.stop.is_set())
self.assertTrue(self.errors.empty())
self.assertFalse(worker.timer.is_alive())
self.assertTrue(worker.timer.finished.is_set())
def test_run_timer_joined(self):
"""Test to end a deamon when its timer is running.
Test that a worker worker stopped with a running timer finishes with a
triggered stop event, an empty error queue and a dead timer.
"""
# pre assertions
self.assertFalse(self.stop.is_set())
self.assertTrue(self.errors.empty())
# create and run worker
with self.WorkerSafeTimerToTest(self.stop, self.errors) as worker:
worker.timer = worker.create_timer(0, worker.function_to_join)
worker.timer.start()
sleep(0.5)
# post assertions
self.assertTrue(self.stop.is_set())
self.assertTrue(self.errors.empty())
self.assertFalse(worker.timer.is_alive())
def test_unredifined_timer(self):
"""Test the timer must be redefined.
Test that a worker worker with its default timer does not generate an
error, but finishes with a triggered stop event and an non-empty error
queue.
"""
# pre assertions
self.assertFalse(self.stop.is_set())
self.assertTrue(self.errors.empty())
# create and run worker
with self.assertNotRaises(UnredefinedTimerError):
with self.WorkerSafeTimerToTest(self.stop, self.errors) as worker:
worker.timer.start()
# post assertions
self.assertTrue(self.stop.is_set())
self.assertFalse(self.errors.empty())
_, error, _ = self.errors.get()
self.assertIsInstance(error, UnredefinedTimerError)
class WorkerSafeThreadTestCase(BaseTestCase):
"""Test the WorkerSafeThread class."""
class WorkerSafeThreadToTest(WorkerSafeThread):
"""Dummy worker class."""
def function_already_dead(self):
"""Function that ends immediately."""
return
def function_to_join(self):
"""Function that waits one second."""
sleep(1)
def test_run_thread_dead(self):
"""Test to end a worker when its thread is dead.
Test that a worker worker stopped with a dead thread finishes with a
triggered stop event, an empty error queue and a still dead thread.
"""
# pre assertions
self.assertFalse(self.stop.is_set())
self.assertTrue(self.errors.empty())
# create and run worker
with self.WorkerSafeThreadToTest(self.stop, self.errors) as worker:
worker.thread = worker.create_thread(target=worker.function_already_dead)
worker.thread.start()
worker.thread.join()
# post assertions
self.assertTrue(self.stop.is_set())
self.assertTrue(self.errors.empty())
self.assertFalse(worker.thread.is_alive())
def test_run_thread_joined(self):
"""Test to end a deamon when its thread is running.
Test that a worker worker stopped with a running thread finishes with a
triggered stop event, an empty error queue and a dead thread.
"""
# pre assertions
self.assertFalse(self.stop.is_set())
self.assertTrue(self.errors.empty())
# create and run worker
with self.WorkerSafeThreadToTest(self.stop, self.errors) as worker:
worker.thread = worker.create_thread(target=worker.function_to_join)
worker.thread.start()
sleep(0.5)
# post assertions
self.assertTrue(self.stop.is_set())
self.assertTrue(self.errors.empty())
self.assertFalse(worker.thread.is_alive())
def test_unredifined_thread(self):
"""Test the thread must be redefined.
Test that a worker worker | |
import ctypes
import os
import matplotlib.pyplot as plt
import nlopt
import numpy as np
from numba import njit
from numba import vectorize
from numba.extending import get_cython_function_address
from scipy import special
from tables import ComplexCol
from tables import Float64Col
from tables import IsDescription
from tables import open_file
from tqdm import tqdm
from SiPANN import scee
addr = get_cython_function_address("scipy.special.cython_special", "binom")
functype = ctypes.CFUNCTYPE(ctypes.c_double, ctypes.c_double, ctypes.c_double)
binom_fn = functype(addr)
##################################################
### HELPER FUNCTIONS ###
### used to help quickly define gap functions ###
##################################################
@njit
def bernstein_quick(n, j, t):
"""Quickly computes the jth bernstein polynomial for the basis of n+1
polynomials.
Parameters
-----------
n : int
The number of elements minus one in the basis of berstein polynomials
j : int
The index of bernstein polynomial that needs to be computed
t : float
[0-1] the value at which to compute the polynomial
Returns
----------
test : float
Result of computing the jth berstein polynomial at t
"""
return binom_fn(n, j) * t ** j * (1 - t) ** (n - j)
def bezier_quick(g, length):
"""Computes the bezier curve for the evenly spaced control points with gaps
g.
Parameters
----------
g : ndarray
Numpy array of size (n,) of gap values at each of the control points
length : float
length of the coupler
Returns
----------
result : dict
{'g': original control points, 'w': length of coupler, 'f': bezier curve function defining gap function, 'df': derivative of gap function, 'd2f': 2nd derivative of gap functions}
"""
n = len(g) - 1
return {
"g": g,
"w": length,
"f": lambda t: np.sum(
np.array(
[(g[j]) * bernstein_quick(n, j, t / length) for j in range(len(g))]
),
axis=0,
),
"df": lambda t: np.sum(
np.array(
[
n
* (g[j])
* (
bernstein_quick(n - 1, j - 1, t / length)
- bernstein_quick(n - 1, j, t / length)
)
for j in range(len(g))
]
),
axis=0,
)
/ length,
"d2f": lambda t: np.sum(
np.array(
[
n
* (n - 1)
* (g[j] / 2)
* (
bernstein_quick(n - 2, j - 2, t / length)
- 2 * bernstein_quick(n - 2, j - 1, t / length)
+ bernstein_quick(n - 2, j, t / length)
)
for j in range(len(g))
]
),
axis=0,
)
/ length ** 2,
}
class Optimization(IsDescription):
"""Class to save the data in h5 files if dataCollect=True."""
width = Float64Col()
thickness = Float64Col()
wavelength = Float64Col()
k = ComplexCol(32)
t = ComplexCol(32)
length = Float64Col()
g1 = Float64Col()
g2 = Float64Col()
g3 = Float64Col()
g4 = Float64Col()
g5 = Float64Col()
g6 = Float64Col()
g7 = Float64Col()
g8 = Float64Col()
#########################################################
### ACTUAL OPTIMIZING ###
### this function does all of the heavy lifting here ###
#########################################################
def make_coupler(
goalK=0.4,
arrayK=None,
waveSweep=np.linspace(1500, 1600, 4),
gapN=16,
algo=35,
edgeN=8,
plot=False,
collectData=False,
width=500,
thickness=220,
radius=5000,
maxiter=None,
verbose=0,
):
"""Optimizes output from a directional coupler defined by a bezier curve to
a specified output magnitude.
Parameters
----------
goalK : float
[0-1] mandatory, unless using arrayK. Desired \|kappa\|^2 magnitude
arrayK : ndarray, optional
Has to have size (2,). [0-1] can specify a \|kappa\|^2 magnitude at start and end of wavelength sweep. Defaults to None
waveSweep : ndarray, optional
Sweep of wavelengths to evaluate objective function at. Defaults to ``np.linspace(1500,1600,4)``
gapN : int, optional
Number of control points that can vary. Defaults to 16.
algo : int, optional
Optimization algorithm that nlopt uses. Defaults to 35
edgeN : int, optional
Number of control points on each edge that are fixed at gap of 1500 nm. Defaults to 8.
plot : bool, optional
If True then optimization will plot the current coupler at each iteration with the control points. Defaults to False.
collectData : bool, optional
Whether to collect data for couplers of each iteration (could be useful for machine learning and even faster design). Defaults to False.
width : float, optional
Width of waveguides in nm. Defaults to 500.
thickness : float, optional
Thickness of waveguides in nm. Defaults to 220.
radius : float, optional
Radius of allowable curve in directional coupler in nm. Defaults to 5000.
maxiter : int, optional
The number of max iterations to run each of the gloabl and local optimization for. If None, doesn't apply. Defaults to None.
verbose : int, optional
Amount of logging to output. If 0, none. If 1, tqdm bar. If 2, prints all information (can be cumbersome). Defaults to 0.
Returns
----------
coupler : GapFuncSymmetric
The final coupler object from SCEE
control_pts : ndarray
The control points defining bezier curve for gap function (nm)
length : ndarray
The length of the coupler (nm)
"""
# initial values for the optimizer to use and bounds for values
couplingWidth = 20000
couplingMin = 5000
couplingMax = 100000
gapMin = 0
gapMax = 1500
iter = [0]
mseVals = []
coupler = scee.GapFuncSymmetric(
width, thickness, lambda x: gapMax, lambda x: 0, 0, couplingWidth
)
curvatureFunc = lambda x: 0
localOpt = False
waveN = len(waveSweep)
waveStart = waveSweep[0]
waveStop = waveSweep[-1]
# set up progress bars
if verbose == 1:
if maxiter is not None:
loop = tqdm(total=maxiter * 2, position=0, leave=True)
else:
loop = tqdm(total=float("inf"), position=0, leave=True)
# varying values of goal coupling if desired
if arrayK is not None:
goalK = np.array(
[arrayK[0] if k < waveN / 2 else arrayK[1] for k in range(waveN)]
)
# sweep of wavelength to optimize over
# waveSweep = np.linspace(waveStart, waveStop, waveN)
# dataPoints = {str(wave):{'g': [], 'k': [], 't': []} for wave in range(waveStart,waveStop+1)}
# define plot for debugging and final result
if plot:
plt.ion()
fig, axes = plt.subplots(2, 1)
axes[0].set_ylim(-gapMax, gapMax + 100)
scatter1 = axes[0].scatter(
np.linspace(0, couplingWidth, gapN), np.zeros(gapN), label="control points"
)
(line1,) = axes[0].plot(
np.linspace(0, couplingWidth, 500), np.zeros(500), label="gap function"
)
(line2,) = axes[1].plot(
waveSweep,
np.zeros_like(waveSweep),
"o-",
label="cross-port goal = " + str(goalK),
)
axes[1].set_ylim(-5, 0)
axes[0].set_xlabel("coupling length (nm)")
axes[0].set_ylabel("coupling gap (nm)")
axes[1].set_xlabel("wavelength (nm)")
axes[1].set_ylabel("error (dB)")
plt.tight_layout()
axes[0].legend(loc=8)
axes[1].legend(loc=8)
# file to collect data in
if collectData:
h5file = open_file(
"./data/" + ("%.2f" % goalK).split(".")[1] + "/" + "data.h5",
mode="w",
title="Simulation Data",
)
group = h5file.create_group("/", "coupler", "Optimization")
table = h5file.create_table(group, "optdata", Optimization, "Data")
point = table.row
def f(g, grad):
"""Optimization function.
Parameters
-----------
x : ndarray
Numpy array of size ((gapN + 1,) of control points, first element is length
grad : ndarray
gradient of the optimization (not used)
Returns
----------
result: float
MSE of power equation (-10 log (|kappa|^2/goalK))
"""
iter[0] += 1
# define optimization points and distance between them
x = np.linspace(0, g[0], 2 * gapN + 2 * edgeN)
# mirror gap control points for symmetry
g_total = np.append(
np.append(np.append(np.full(edgeN, gapMax), g[1:]), g[-1:0:-1]),
np.full(edgeN, gapMax),
)
# get interpolation of waveguide
gap = bezier_quick(g_total, g[0])
coupler.update(gap=gap["f"], dgap=gap["df"], zmax=gap["w"])
# get the cross coupling
currK = coupler.predict((1, 4), waveSweep)
# draw current waveguide and power if debug mode on
if plot and iter[0] % 10000 != 0:
scatter1.set_offsets(np.array([x, g_total]).T)
dom = np.linspace(0, g[0], 500)
axes[0].set_xlim(0, g[0])
line1.set_xdata(dom)
line1.set_ydata(gap["f"](dom))
line2.set_ydata(-np.abs(-10 * np.log10((np.abs(currK) ** 2) / goalK)))
fig.canvas.draw()
fig.canvas.flush_events()
elif plot:
scatter1.set_visible(False)
line1.set_visible(False)
x_plot = np.linspace(0, g[0], 500)
axes[0].set_xlim(0, g[0])
axes[0].get_legend().remove()
y_plot = gap["f"](x_plot) / 2
fill1 = axes[0].fill_between(x_plot, -y_plot - width, -y_plot, color="k")
fill2 = axes[0].fill_between(x_plot, y_plot, y_plot + width, color="k")
line2.set_ydata(-np.abs(-10 * np.log10((np.abs(currK) ** 2) / goalK)))
fig.canvas.draw()
fig.canvas.flush_events()
plt.waitforbuttonpress()
line1.set_visible(True)
fill1.set_visible(False)
fill2.set_visible(False)
scatter1.set_visible(True)
axes[0].legend(loc=8)
# get the mean squared error between current coupling and goal
mse = np.sum((np.log10((np.abs(currK) ** 2) / goalK)) ** 2) / waveN
mseVals.append(mse)
# add penalty if gap function has value < 100 nm (SCEE doesn't perform well for coupling closer than this)
mse += np.sum(gap["f"](np.linspace(0, g[0], 100)) < 100)
# # print iteration progress
if verbose == 2:
print("MSE: {}".format(mse))
print("currK: {}".format(np.abs(currK) ** 2))
print("g: {}".format(g))
# if the optimization changed from global constrained to local optimization
print("local optimization:", localOpt, "\n")
elif verbose == 1:
if localOpt:
o = "LOCAL"
else:
o = "GLOBAL"
loop.update(1)
loop.set_description(
f"{o}, MSE: {np.round(mse,4)}, Mean currK: {np.round((np.abs(currK)**2).mean(),4)}"
)
return mse
def constraint(x, grad, radius=5000):
"""Constraint | |
<reponame>svenbuder/SummerSchool
"""
Definition of :class:`EventList`.
:class:`EventList` is used to handle photon arrival times.
"""
from __future__ import absolute_import, division, print_function
from .io import read, write
from .utils import simon, assign_value_if_none
from .gti import cross_gtis, append_gtis, check_separate
from .lightcurve import Lightcurve
import numpy as np
import numpy.random as ra
import scipy.interpolate as sci
class EventList(object):
def __init__(self, time=None, pha=None, ncounts=None, mjdref=0, dt=0, notes="",
gti=None, pi=None):
"""
Make an event list object from an array of time stamps
Parameters
----------
time: iterable
A list or array of time stamps
Other Parameters
----------------
dt: float
The time resolution of the events. Only relevant when using events
to produce light curves with similar bin time.
pha: iterable
A list of array of photon energy values
mjdref : float
The MJD used as a reference for the time array.
ncounts: int
Number of desired data points in event list.
gtis: [[gti0_0, gti0_1], [gti1_0, gti1_1], ...]
Good Time Intervals
pi : integer, numpy.ndarray
PI channels
Attributes
----------
time: numpy.ndarray
The array of event arrival times, in seconds from the reference
MJD (self.mjdref)
pha: numpy.ndarray
The array of photon energy values
ncounts: int
The number of data points in the event list
dt: float
The time resolution of the events. Only relevant when using events
to produce light curves with similar bin time.
mjdref : float
The MJD used as a reference for the time array.
gtis: [[gti0_0, gti0_1], [gti1_0, gti1_1], ...]
Good Time Intervals
pi : integer, numpy.ndarray
PI channels
"""
self.pha = None if pha is None else np.array(pha)
self.notes = notes
self.dt = dt
self.mjdref = mjdref
self.gti = gti
self.pi = pi
self.ncounts = ncounts
if time is not None:
self.time = np.array(time, dtype=np.longdouble)
self.ncounts = len(time)
else:
self.time = None
if (time is not None) and (pha is not None):
if len(time) != len(pha):
raise ValueError('Lengths of time and pha must be equal.')
def to_lc(self, dt, tstart=None, tseg=None):
"""
Convert event list to a light curve object.
Parameters
----------
dt: float
Binning time of the light curve
Other Parameters
----------------
tstart : float
Initial time of the light curve
tseg: float
Total duration of light curve
Returns
-------
lc: `Lightcurve` object
"""
if tstart is None and self.gti is not None:
tstart = self.gti[0][0]
tseg = self.gti[-1][1] - tstart
return Lightcurve.make_lightcurve(self.time, dt, tstart=tstart,
gti=self.gti, tseg=tseg)
@staticmethod
def from_lc(lc):
"""
Loads eventlist from light curve.
Parameters
----------
lc: lightcurve.Lightcurve object
Light curve data to load from
Returns
-------
ev: events.EventList object
Event List
"""
# Multiply times by number of counts
times = [[i] * j for i,j in zip(lc.time, lc.counts)]
# Concatenate all lists
times = [i for j in times for i in j]
return EventList(time=times)
def simulate_times(self, lc, use_spline=False, bin_time=None):
"""
Assign (simulate) photon arrival times to event list, using acceptance-rejection
method.
Parameters
----------
lc: `Lightcurve` object
"""
times = lc.time
counts = lc.counts
bin_time = assign_value_if_none(bin_time, times[1] - times[0])
n_bin = len(counts)
bin_start = 0
maxlc = np.max(counts)
intlc = maxlc * n_bin
n_events_predict = int(intlc + 10 * np.sqrt(intlc))
# Max number of events per chunk must be < 100000
events_per_bin_predict = n_events_predict / n_bin
if use_spline:
max_bin = np.max([4, 1000000 / events_per_bin_predict])
else:
max_bin = np.max([4, 5000000 / events_per_bin_predict])
ev_list = np.zeros(n_events_predict)
nev = 0
while bin_start < n_bin:
t0 = times[bin_start]
bin_stop = min([bin_start + max_bin, n_bin + 1])
lc_filt = counts[bin_start:bin_stop]
t_filt = times[bin_start:bin_stop]
length = t_filt[-1] - t_filt[0]
n_bin_filt = len(lc_filt)
n_to_simulate = n_bin_filt * max(lc_filt)
safety_factor = 10
if n_to_simulate > 10000:
safety_factor = 4.
n_to_simulate += safety_factor * np.sqrt(n_to_simulate)
n_to_simulate = int(np.ceil(n_to_simulate))
n_predict = ra.poisson(np.sum(lc_filt))
random_ts = ra.uniform(t_filt[0] - bin_time / 2,
t_filt[-1] + bin_time / 2, n_to_simulate)
random_amps = ra.uniform(0, max(lc_filt), n_to_simulate)
if use_spline:
lc_spl = sci.splrep(t_filt, lc_filt, s=np.longdouble(0), k=1)
pts = sci.splev(random_ts, lc_spl)
else:
rough_bins = np.rint((random_ts - t0) / bin_time)
rough_bins = rough_bins.astype(int)
pts = lc_filt[rough_bins]
good = random_amps < pts
len1 = len(random_ts)
random_ts = random_ts[good]
len2 = len(random_ts)
random_ts = random_ts[:n_predict]
random_ts.sort()
new_nev = len(random_ts)
ev_list[nev:nev + new_nev] = random_ts[:]
nev += new_nev
bin_start += max_bin
# Discard all zero entries at the end
time = ev_list[:nev]
time.sort()
self.time = EventList(time).time
self.ncounts = len(self.time)
def simulate_energies(self, spectrum):
"""
Assign (simulate) energies to event list.
Parameters
----------
spectrum: 2-d array or list
Energies versus corresponding fluxes. The 2-d array or list must
have energies across the first dimension and fluxes across the
second one.
"""
if self.ncounts is None:
simon("Either set time values or explicity provide counts.")
return
if isinstance(spectrum, list) or isinstance(spectrum, np.ndarray):
pha = np.array(spectrum)[0]
fluxes = np.array(spectrum)[1]
if not isinstance(pha, np.ndarray):
raise IndexError("Spectrum must be a 2-d array or list")
else:
raise TypeError("Spectrum must be a 2-d array or list")
# Create a set of probability values
prob = fluxes / float(sum(fluxes))
# Calculate cumulative probability
cum_prob = np.cumsum(prob)
# Draw N random numbers between 0 and 1, where N is the size of event list
R = ra.uniform(0, 1, self.ncounts)
# Assign energies to events corresponding to the random numbers drawn
self.pha = np.array([pha[np.argwhere(cum_prob ==
min(cum_prob[(cum_prob - r) > 0]))] for r in R])
def join(self, other):
"""
Join two ``EventList`` objects into one.
If both are empty, an empty ``EventList`` is returned.
GTIs are crossed if the event lists are over a common time interval,
and appended otherwise.
PI and PHA remain None if they are None in both. Otherwise, 0 is used
as a default value for the ``EventList``s where they were None.
Parameters
----------
other : `EventList` object
The other `EventList` object which is supposed to be joined with.
Returns
-------
ev_new : EventList object
The resulting EventList object.
"""
ev_new = EventList()
if self.dt != other.dt:
simon("The time resolution is different."
" Using the rougher by default")
ev_new.dt = np.max([self.dt, other.dt])
if self.time is None and other.time is None:
return ev_new
if (self.time is None):
simon("One of the event lists you are concatenating is empty.")
self.time = np.asarray([])
elif (other.time is None):
simon("One of the event lists you are concatenating is empty.")
other.time = np.asarray([])
ev_new.time = np.concatenate([self.time, other.time])
order = np.argsort(ev_new.time)
ev_new.time = ev_new.time[order]
if (self.pi is None) and (other.pi is None):
ev_new.pi = None
elif (self.pi is None) or (other.pi is None):
self.pi = assign_value_if_none(self.pi, np.zeros_like(self.time))
other.pi = assign_value_if_none(other.pi,
np.zeros_like(other.time))
if (self.pi is not None) and (other.pi is not None):
ev_new.pi = np.concatenate([self.pi, other.pi])
ev_new.pi = ev_new.pi[order]
if (self.pha is None) and (other.pha is None):
ev_new.pha = None
elif (self.pha is None) or (other.pha is None):
self.pha = assign_value_if_none(self.pha, np.zeros_like(self.time))
other.pha = assign_value_if_none(other.pha,
np.zeros_like(other.time))
if (self.pha is not None) and (other.pha is not None):
ev_new.pha = np.concatenate([self.pha, other.pha])
ev_new.pha = ev_new.pha[order]
if self.gti is None and other.gti is not None and len(self.time) > 0:
self.gti = \
assign_value_if_none(self.gti,
np.asarray([[self.time[0] - self.dt / 2,
self.time[-1] + self.dt / 2]]))
if other.gti is None and self.gti is not None and len(other.time) > 0:
other.gti = \
assign_value_if_none(other.gti,
np.asarray([[other.time[0] - other.dt / 2,
other.time[-1] + other.dt / 2]]))
if (self.gti is None) and (other.gti is None):
ev_new.gti = None
elif (self.gti is not None) and (other.gti is not None):
if check_separate(self.gti, other.gti):
ev_new.gti = append_gtis(self.gti, other.gti)
simon('GTIs in these two event lists do not overlap at all.'
'Merging instead of returning an overlap.')
else:
ev_new.gti = cross_gtis([self.gti, other.gti])
return ev_new
@staticmethod
def read(filename, format_='pickle'):
"""
Imports EventList object.
Parameters
----------
filename: str
Name of the EventList object to be read.
format_: str
Available options are 'pickle', 'hdf5', 'ascii' and 'fits'.
Returns
-------
ev: `EventList` object
"""
attributes = ['time', 'pha', 'ncounts', 'mjdref', 'dt',
'notes', 'gti', 'pi']
data = read(filename, format_, cols=attributes)
if format_ == 'ascii':
time = np.array(data.columns[0])
return EventList(time=time)
elif format_ == 'hdf5' or format_ == 'fits':
keys = data.keys()
values | |
of the identity.
- **IdentityId** *(string) --*
A unique identifier in the format REGION:GUID.
- **Logins** *(list) --*
The provider names.
- *(string) --*
- **CreationDate** *(datetime) --*
Date on which the identity was created.
- **LastModifiedDate** *(datetime) --*
Date on which the identity was last modified.
:type IdentityId: string
:param IdentityId: **[REQUIRED]**
A unique identifier in the format REGION:GUID.
:rtype: dict
:returns:
"""
pass
def describe_identity_pool(self, IdentityPoolId: str) -> Dict:
"""
Gets details about a particular identity pool, including the pool name, ID description, creation date, and current number of users.
You must use AWS Developer credentials to call this API.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/DescribeIdentityPool>`_
**Request Syntax**
::
response = client.describe_identity_pool(
IdentityPoolId='string'
)
**Response Syntax**
::
{
'IdentityPoolId': 'string',
'IdentityPoolName': 'string',
'AllowUnauthenticatedIdentities': True|False,
'SupportedLoginProviders': {
'string': 'string'
},
'DeveloperProviderName': 'string',
'OpenIdConnectProviderARNs': [
'string',
],
'CognitoIdentityProviders': [
{
'ProviderName': 'string',
'ClientId': 'string',
'ServerSideTokenCheck': True|False
},
],
'SamlProviderARNs': [
'string',
],
'IdentityPoolTags': {
'string': 'string'
}
}
**Response Structure**
- *(dict) --*
An object representing an Amazon Cognito identity pool.
- **IdentityPoolId** *(string) --*
An identity pool ID in the format REGION:GUID.
- **IdentityPoolName** *(string) --*
A string that you provide.
- **AllowUnauthenticatedIdentities** *(boolean) --*
TRUE if the identity pool supports unauthenticated logins.
- **SupportedLoginProviders** *(dict) --*
Optional key:value pairs mapping provider names to provider app IDs.
- *(string) --*
- *(string) --*
- **DeveloperProviderName** *(string) --*
The "domain" by which Cognito will refer to your users.
- **OpenIdConnectProviderARNs** *(list) --*
A list of OpendID Connect provider ARNs.
- *(string) --*
- **CognitoIdentityProviders** *(list) --*
A list representing an Amazon Cognito user pool and its client ID.
- *(dict) --*
A provider representing an Amazon Cognito user pool and its client ID.
- **ProviderName** *(string) --*
The provider name for an Amazon Cognito user pool. For example, ``cognito-idp.us-east-1.amazonaws.com/us-east-1_123456789`` .
- **ClientId** *(string) --*
The client ID for the Amazon Cognito user pool.
- **ServerSideTokenCheck** *(boolean) --*
TRUE if server-side token validation is enabled for the identity provider’s token.
Once you set ``ServerSideTokenCheck`` to TRUE for an identity pool, that identity pool will check with the integrated user pools to make sure that the user has not been globally signed out or deleted before the identity pool provides an OIDC token or AWS credentials for the user.
If the user is signed out or deleted, the identity pool will return a 400 Not Authorized error.
- **SamlProviderARNs** *(list) --*
An array of Amazon Resource Names (ARNs) of the SAML provider for your identity pool.
- *(string) --*
- **IdentityPoolTags** *(dict) --*
The tags that are assigned to the identity pool. A tag is a label that you can apply to identity pools to categorize and manage them in different ways, such as by purpose, owner, environment, or other criteria.
- *(string) --*
- *(string) --*
:type IdentityPoolId: string
:param IdentityPoolId: **[REQUIRED]**
An identity pool ID in the format REGION:GUID.
:rtype: dict
:returns:
"""
pass
def generate_presigned_url(self, ClientMethod: str = None, Params: Dict = None, ExpiresIn: int = None, HttpMethod: str = None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
``ClientMethod``.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method\'s model.
:returns: The presigned url
"""
pass
def get_credentials_for_identity(self, IdentityId: str, Logins: Dict = None, CustomRoleArn: str = None) -> Dict:
"""
Returns credentials for the provided identity ID. Any provided logins will be validated against supported login providers. If the token is for cognito-identity.amazonaws.com, it will be passed through to AWS Security Token Service with the appropriate role for the token.
This is a public API. You do not need any credentials to call this API.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/GetCredentialsForIdentity>`_
**Request Syntax**
::
response = client.get_credentials_for_identity(
IdentityId='string',
Logins={
'string': 'string'
},
CustomRoleArn='string'
)
**Response Syntax**
::
{
'IdentityId': 'string',
'Credentials': {
'AccessKeyId': 'string',
'SecretKey': 'string',
'SessionToken': 'string',
'Expiration': datetime(2015, 1, 1)
}
}
**Response Structure**
- *(dict) --*
Returned in response to a successful ``GetCredentialsForIdentity`` operation.
- **IdentityId** *(string) --*
A unique identifier in the format REGION:GUID.
- **Credentials** *(dict) --*
Credentials for the provided identity ID.
- **AccessKeyId** *(string) --*
The Access Key portion of the credentials.
- **SecretKey** *(string) --*
The Secret Access Key portion of the credentials
- **SessionToken** *(string) --*
The Session Token portion of the credentials
- **Expiration** *(datetime) --*
The date at which these credentials will expire.
:type IdentityId: string
:param IdentityId: **[REQUIRED]**
A unique identifier in the format REGION:GUID.
:type Logins: dict
:param Logins:
A set of optional name-value pairs that map provider names to provider tokens. The name-value pair will follow the syntax \"provider_name\": \"provider_user_identifier\".
Logins should not be specified when trying to get credentials for an unauthenticated identity.
The Logins parameter is required when using identities associated with external identity providers such as FaceBook. For examples of ``Logins`` maps, see the code examples in the `External Identity Providers <http://docs.aws.amazon.com/cognito/latest/developerguide/external-identity-providers.html>`__ section of the Amazon Cognito Developer Guide.
- *(string) --*
- *(string) --*
:type CustomRoleArn: string
:param CustomRoleArn:
The Amazon Resource Name (ARN) of the role to be assumed when multiple roles were received in the token from the identity provider. For example, a SAML-based identity provider. This parameter is optional for identity providers that do not support role customization.
:rtype: dict
:returns:
"""
pass
def get_id(self, IdentityPoolId: str, AccountId: str = None, Logins: Dict = None) -> Dict:
"""
Generates (or retrieves) a Cognito ID. Supplying multiple logins will create an implicit linked account.
This is a public API. You do not need any credentials to call this API.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/GetId>`_
**Request Syntax**
::
response = client.get_id(
AccountId='string',
IdentityPoolId='string',
Logins={
'string': 'string'
}
)
**Response Syntax**
::
{
'IdentityId': 'string'
}
**Response Structure**
- *(dict) --*
Returned in response to a GetId request.
- **IdentityId** *(string) --*
A unique identifier in the format REGION:GUID.
:type AccountId: string
:param AccountId:
A standard AWS account ID (9+ digits).
:type IdentityPoolId: string
:param IdentityPoolId: **[REQUIRED]**
An identity pool ID in the format REGION:GUID.
:type Logins: dict
:param Logins:
A set of optional name-value pairs that map provider names to provider tokens. The available provider names for ``Logins`` are as follows:
* Facebook: ``graph.facebook.com``
* Amazon Cognito user pool: ``cognito-idp.<region>.amazonaws.com/<YOUR_USER_POOL_ID>`` , for example, ``cognito-idp.us-east-1.amazonaws.com/us-east-1_123456789`` .
* Google: ``accounts.google.com``
* Amazon: ``www.amazon.com``
* Twitter: ``api.twitter.com``
* Digits: ``www.digits.com``
- *(string) --*
- *(string) --*
:rtype: dict
:returns:
"""
pass
def get_identity_pool_roles(self, IdentityPoolId: str) -> Dict:
"""
Gets the roles for an identity pool.
You must use AWS Developer credentials to call this API.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/GetIdentityPoolRoles>`_
**Request Syntax**
::
response = client.get_identity_pool_roles(
IdentityPoolId='string'
)
**Response Syntax**
::
{
'IdentityPoolId': 'string',
'Roles': {
'string': 'string'
},
'RoleMappings': {
'string': {
'Type': 'Token'|'Rules',
'AmbiguousRoleResolution': 'AuthenticatedRole'|'Deny',
'RulesConfiguration': {
'Rules': [
{
'Claim': 'string',
'MatchType': 'Equals'|'Contains'|'StartsWith'|'NotEqual',
'Value': 'string',
'RoleARN': 'string'
},
]
}
}
}
}
**Response Structure**
- *(dict) --*
Returned in response to a successful ``GetIdentityPoolRoles`` operation.
- **IdentityPoolId** *(string) --*
An identity pool ID in the format REGION:GUID.
- **Roles** *(dict) --*
The map of roles associated with this pool. Currently only | |
logger.critical("Output of InitialCollinearVariables.get():")
logger.critical(str(kinematic_variables))
return
@staticmethod
def set(
PS_point, is_child, fs_children, na, nb, kinematic_variables,
precision=1e-6 ):
"""Given the lower multiplicity momentum of the incoming parton
as PS_point[is_child] and collinear variables,
compute and set the final-state children momenta.
Children indices should already refer to the position
of momenta within the PS_point (no momentum dictionary used).
Sum rules are checked to assess numerical accuracy.
"""
if not fs_children: return
pa = PS_point[is_child]
# Get A data
zA = kinematic_variables['z' + str(is_child)]
ktA = kinematic_variables['kt' + str(is_child)]
pA2 = kinematic_variables['s' + str(is_child)]
# Pre-compute variables
nanb = na.dot(nb)
nbpa = nb.dot(pa)
ptA = ktA
# ptA = zA * ktA
nbpA = zA * nbpa
napA = (pA2 - ptA.square()) * nanb / (2*nbpA)
pA = (nbpA*na + napA*nb) / nanb + ptA
# Variables for sums
p_sum = LorentzVector(pa)
# Set momenta for all children
for i in fs_children:
zi = kinematic_variables['z' + str(i)]
kti = kinematic_variables['kt' + str(i)]
pi2 = kinematic_variables['m2' + str(i)]
pti = kti
# pti = kti + zi * ktA
nbpi = zi * nbpa
napi = (pi2 - pti.square()) * nanb / (2 * nbpi)
PS_point[i] = (nbpi*na + napi*nb) / nanb + pti
p_sum -= PS_point[i]
# Check how well the parent's momentum is reproduced
# TODO Ideally switch to quadruple precision if the check fails
deviation = abs(pA - p_sum)
benchmark = abs(pA)
if deviation / benchmark > precision:
logger.critical(FinalCollinearVariables.precision_loss_message)
logger.critical("The sum of children momenta is %s" % str(p_sum))
logger.critical("vs the total: %s" % str(pA))
logger.critical("Inputs for InitialCollinearVariables.set():")
logger.critical("pa = %s" % str(pa))
logger.critical("na = %s, nb = %s" % (str(na), str(nb)))
logger.critical("kinematic variables:")
logger.critical(str(kinematic_variables))
logger.critical("Output of InitialCollinearVariables.set():")
for i in fs_children:
logger.critical("fs_child %d: %s" % (i, str(PS_point[i])))
return
#=========================================================================================
# Soft variables
#=========================================================================================
class SoftVariables(object):
@staticmethod
def names(children):
"""Get the names of variables describing particles going unresolved."""
return ['p' + str(child) for child in children]
@staticmethod
def get(PS_point, children, kinematic_variables):
"""Given unmapped and mapped momenta, compute the kinematic variables
that describe the internal structure of particles going unresolved.
Children indices should already refer to the position
of momenta within the PS_point (no momentum dictionary used).
"""
# For soft particles, just pass the whole momentum
for i in children:
kinematic_variables['p' + str(i)] = PS_point[i].get_copy()
return
@staticmethod
def set(PS_point, children, kinematic_variables):
"""Given a dictionary of variables that describe the unresolved partons,
compute and set the children momenta.
Children indices should already refer to the position
of momenta within the PS_point (no momentum dictionary used).
"""
# Set momenta for all children
for i in children:
PS_point[i] = kinematic_variables['p' + str(i)]
return
#=========================================================================================
# VirtualMapping
#=========================================================================================
class VirtualMapping(object):
"""Base class for elementary mapping implementations."""
# TODO Add a method is_valid_recoiler?
@classmethod
def is_valid_structure(cls, singular_structure):
"""Check if the mapping can be applied to a given singular structure."""
raise NotImplemented
@classmethod
def get_kinematic_variables_names(cls, singular_structure, momenta_dict):
"""Get the names of the variables that describe unresolved particles."""
raise NotImplemented
@classmethod
def map_to_lower_multiplicity(
cls, PS_point, singular_structure, momenta_dict, squared_masses=None,
kinematic_variables=None, compute_jacobian=False ):
"""Map a phase-space point to lower multiplicity,
by clustering the substructures and recoiling against the legs
specified in singular_structure.
:param PS_point: higher-multiplicity phase-space point,
as a dictionary that associates integers to Lorentz vectors;
this will not be modified
:type PS_point: LorentzVectorDict
:param singular_structure: SingularStructure object that specifies
sets of unresolved particles and recoilers recursively
:type PS_point: SingularStructure
:param momenta_dict: two-way dictionary that associates a unique label
to each set of one or more unresolved particles identified by their number
:type momenta_dict: sub.bidict
:param squared_masses: squared masses of parents of particle sets,
as a dictionary {'m2i': $m_i^2$} where i is the parent number
:param kinematic_variables: if a non-empty dictionary is passed,
the kinematic variables that are necessary to reproduce the higher-multiplicity
phase-space point from the lower-multiplicity one will be set
:param compute_jacobian: if False, the jacobian of the mapping will be set to 1
:type compute_jacobian: bool
:return: lower-multiplicity phase-space point and mapping variables,
including the jacobian weight of the mapping and the total momentum involved
"""
raise NotImplemented
@classmethod
def map_to_higher_multiplicity(
cls, PS_point, singular_structure, momenta_dict, kinematic_variables,
compute_jacobian=False ):
"""Map a phase-space point to higher multiplicity,
by splitting the (pseudo)particles and recoiling against the legs
specified in singular_structure.
:param PS_point: lower-multiplicity phase-space point
which will not be modified
:type PS_point: LorentzVectorDict
:param singular_structure: SingularStructure object that specifies
sets of unresolved particles and recoilers recursively
:type PS_point: SingularStructure
:param momenta_dict: two-way dictionary that associates a unique label
to each set of one or more unresolved particles identified by their number
:type momenta_dict: sub.bidict
:param kinematic_variables: variables describing the splitting,
as a dictionary that associates variable names to values
:param compute_jacobian: if False, the jacobian of the mapping will be set to 1
:type compute_jacobian: bool
:return: higher-multiplicity phase-space point and jacobian of the mapping
"""
raise NotImplemented
@classmethod
def can_map_to_higher_multiplicity(
cls, PS_point, singular_structure, momenta_dict, kinematic_variables ):
"""Map a given phase-space point to higher multiplicity,
by splitting the (pseudo)particles and recoiling against the legs
specified in singular_structure.
:param PS_point: lower-multiplicity phase-space point
which will be modified to the higher-multiplicity one
:type PS_point: LorentzVectorDict
:param singular_structure: SingularStructure object that specifies
sets of unresolved particles and recoilers recursively
:type PS_point: SingularStructure
:param momenta_dict: two-way dictionary that associates a unique label
to each set of one or more unresolved particles identified by their number
:type momenta_dict: sub.bidict
:param kinematic_variables: variables describing the splitting,
as a dictionary that associates variable names to values
:return: boolean that specifies if the lower-multiplicity phase-space point
has a corresponding higher-multiplicity point with the given kinematic variables
"""
raise NotImplemented
@classmethod
def rescale_kinematic_variables(
cls, singular_structure, momenta_dict, kinematic_variables, scaling_parameter ):
"""Rescale in-place the given kinematic variables so as to approach the limit.
:param singular_structure: SingularStructure object that specifies
sets of unresolved particles and recoilers recursively
:param momenta_dict: two-way dictionary that associates a unique label
to each set of one or more unresolved particles identified by their number
:param kinematic_variables: variables describing the splitting,
as a dictionary that associates variable names to values
:param scaling_parameter: a parameter from 0 to 1 that indicates the distance
from the singular limit
:return: the modified kinematic variables
"""
raise NotImplemented
class FailedMapping(MadGraph5Error):
"""Exception raised when a mapping cannot be applied."""
pass
#=========================================================================================
# Final mappings changing invariant masses
#=========================================================================================
# Final mapping to/from zero masses
#=========================================================================================
class FinalZeroMassesMapping(VirtualMapping):
"""Mapping that sends massive particles into massless particles."""
plot = False
@classmethod
def is_valid_structure(cls, singular_structure):
assert isinstance(singular_structure, sub.SingularStructure)
# Valid only for final-state particles with no recursive substructure
for substructure in singular_structure.substructures:
if substructure.substructures:
return False
if len(substructure.legs) != 1:
return False
if substructure.get_all_legs().has_initial_state_leg():
return False
# At least two particles
assert len(singular_structure.substructures) > 1
# No recoilers
if singular_structure.legs:
return False
return True
@classmethod
def get_kinematic_variables_names(cls, singular_structure, momenta_dict):
# Consistency checks
assert isinstance(momenta_dict, sub.bidict)
if not cls.is_valid_structure(singular_structure):
raise MappingError("Singular structure '%s' is not supported by mapping '%s'"%(
str(singular_structure), cls.__name__))
names = []
for substructure in singular_structure.substructures:
names.append('s' + str(tuple(substructure.legs)[0].n))
return names
@classmethod
def map_to_lower_multiplicity(
cls, PS_point, singular_structure, momenta_dict, squared_masses=None,
kinematic_variables=None, compute_jacobian=False ):
# Consistency checks
assert isinstance(momenta_dict, sub.bidict)
if not cls.is_valid_structure(singular_structure):
raise MappingError("Singular structure '%s' is not supported by mapping '%s'"%(
str(singular_structure), cls.__name__))
# Determine leg numbers and check that target masses are zero
js = []
for substructure in singular_structure.substructures:
j = tuple(substructure.legs)[0].n
js.append(j)
assert (squared_masses is None) or (squared_masses['m2' + str(j)] == 0)
# Build total momentum
Q = LorentzVector()
for j in js:
Q += PS_point[j]
Q2 = Q.square()
# Compute the parameters gamma, alpha, beta
gamma = {j: (Q.dot(PS_point[j]) / Q2) for j in js}
# assert abs(sum(gamma.values()) - 1.) < 1.e-6
mu2 = {j: (PS_point[j].square() / Q2) for j in js}
alpha = 0.
for j in js:
alpha += (gamma[j] ** 2 - mu2[j]) ** 0.5
beta = {j: ((gamma[j] ** 2 - mu2[j]) ** 0.5 / alpha) for j in js}
# assert abs(sum(beta.values()) - 1.) < 1.e-6
# Map all momenta
new_PS_point | |
0 indicating
positions that should be assigned 0 probability in the output.
dim (int): Dimension over which to take softmax.
log_softmax (bool): Take log-softmax rather than regular softmax.
E.g., some PyTorch functions such as `F.nll_loss` expect log-softmax.
Returns:
probs (torch.Tensor): Result of taking masked softmax over the logits.
"""
mask = mask.type(torch.float32)
masked_logits = mask * logits + (1 - mask) * -1e30
softmax_fn = F.log_softmax if log_softmax else F.softmax
probs = softmax_fn(masked_logits, dim)
return probs
def visualize(tbx, pred_dict, eval_path, step, split, num_visuals):
"""Visualize text examples to TensorBoard.
Args:
tbx (tensorboardX.SummaryWriter): Summary writer.
pred_dict (dict): dict of predictions of the form id -> pred.
eval_path (str): Path to eval JSON file.
step (int): Number of examples seen so far during training.
split (str): Name of data split being visualized.
num_visuals (int): Number of visuals to select at random from preds.
"""
if num_visuals <= 0:
return
if num_visuals > len(pred_dict):
num_visuals = len(pred_dict)
visual_ids = np.random.choice(list(pred_dict), size=num_visuals, replace=False)
with open(eval_path, 'r') as eval_file:
eval_dict = json.load(eval_file)
for i, id_ in enumerate(visual_ids):
pred = pred_dict[id_] or 'N/A'
example = eval_dict[str(id_)]
question = example['question']
context = example['context']
answers = example['answers']
gold = answers[0] if answers else 'N/A'
tbl_fmt = (f'- **Question:** {question}\n'
+ f'- **Context:** {context}\n'
+ f'- **Answer:** {gold}\n'
+ f'- **Prediction:** {pred}')
tbx.add_text(tag=f'{split}/{i+1}_of_{num_visuals}',
text_string=tbl_fmt,
global_step=step)
def save_preds(preds, save_dir, file_name='predictions.csv'):
"""Save predictions `preds` to a CSV file named `file_name` in `save_dir`.
Args:
preds (list): List of predictions each of the form (id, start, end),
where id is an example ID, and start/end are indices in the context.
save_dir (str): Directory in which to save the predictions file.
file_name (str): File name for the CSV file.
Returns:
save_path (str): Path where CSV file was saved.
"""
# Validate format
if (not isinstance(preds, list)
or any(not isinstance(p, tuple) or len(p) != 3 for p in preds)):
raise ValueError('preds must be a list of tuples (id, start, end)')
# Make sure predictions are sorted by ID
preds = sorted(preds, key=lambda p: p[0])
# Save to a CSV file
save_path = os.path.join(save_dir, file_name)
np.savetxt(save_path, np.array(preds), delimiter=',', fmt='%d')
return save_path
def get_save_dir(base_dir, name, training, id_max=100):
"""Get a unique save directory by appending the smallest positive integer
`id < id_max` that is not already taken (i.e., no dir exists with that id).
Args:
base_dir (str): Base directory in which to make save directories.
name (str): Name to identify this training run. Need not be unique.
training (bool): Save dir. is for training (determines subdirectory).
id_max (int): Maximum ID number before raising an exception.
Returns:
save_dir (str): Path to a new directory with a unique name.
"""
for uid in range(1, id_max):
subdir = 'train' if training else 'test'
save_dir = os.path.join(base_dir, subdir, f'{name}-{uid:02d}')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
return save_dir
raise RuntimeError('Too many save directories created with the same name. \
Delete old save directories or use another name.')
def get_logger(log_dir, name):
"""Get a `logging.Logger` instance that prints to the console
and an auxiliary file.
Args:
log_dir (str): Directory in which to create the log file.
name (str): Name to identify the logs.
Returns:
logger (logging.Logger): Logger instance for logging events.
"""
class StreamHandlerWithTQDM(logging.Handler):
"""Let `logging` print without breaking `tqdm` progress bars.
See Also:
> https://stackoverflow.com/questions/38543506
"""
def emit(self, record):
try:
msg = self.format(record)
tqdm.tqdm.write(msg)
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
# Create logger
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
# Log everything (i.e., DEBUG level and above) to a file
log_path = os.path.join(log_dir, 'log.txt')
file_handler = logging.FileHandler(log_path)
file_handler.setLevel(logging.DEBUG)
# Log everything except DEBUG level (i.e., INFO level and above) to console
console_handler = StreamHandlerWithTQDM()
console_handler.setLevel(logging.INFO)
# Create format for the logs
file_formatter = logging.Formatter('[%(asctime)s] %(message)s',
datefmt='%m.%d.%y %H:%M:%S')
file_handler.setFormatter(file_formatter)
console_formatter = logging.Formatter('[%(asctime)s] %(message)s',
datefmt='%m.%d.%y %H:%M:%S')
console_handler.setFormatter(console_formatter)
# add the handlers to the logger
logger.addHandler(file_handler)
logger.addHandler(console_handler)
return logger
def torch_from_json(path, dtype=torch.float32):
"""Load a PyTorch Tensor from a JSON file.
Args:
path (str): Path to the JSON file to load.
dtype (torch.dtype): Data type of loaded array.
Returns:
tensor (torch.Tensor): Tensor loaded from JSON file.
"""
with open(path, 'r') as fh:
array = np.array(json.load(fh))
tensor = torch.from_numpy(array).type(dtype)
return tensor
def discretize(p_start, p_end, max_len=15, no_answer=False):
"""Discretize soft predictions to get start and end indices.
Choose the pair `(i, j)` of indices that maximizes `p1[i] * p2[j]`
subject to `i <= j` and `j - i + 1 <= max_len`.
Args:
p_start (torch.Tensor): Soft predictions for start index.
Shape (batch_size, context_len).
p_end (torch.Tensor): Soft predictions for end index.
Shape (batch_size, context_len).
max_len (int): Maximum length of the discretized prediction.
I.e., enforce that `preds[i, 1] - preds[i, 0] + 1 <= max_len`.
no_answer (bool): Treat 0-index as the no-answer prediction. Consider
a prediction no-answer if `preds[0, 0] * preds[0, 1]` is greater
than the probability assigned to the max-probability span.
Returns:
start_idxs (torch.Tensor): Hard predictions for start index.
Shape (batch_size,)
end_idxs (torch.Tensor): Hard predictions for end index.
Shape (batch_size,)
"""
if p_start.min() < 0 or p_start.max() > 1 \
or p_end.min() < 0 or p_end.max() > 1:
raise ValueError('Expected p_start and p_end to have values in [0, 1]')
# Compute pairwise probabilities
p_start = p_start.unsqueeze(dim=2)
p_end = p_end.unsqueeze(dim=1)
p_joint = torch.matmul(p_start, p_end) # (batch_size, c_len, c_len)
# Restrict to pairs (i, j) such that i <= j <= i + max_len - 1
c_len, device = p_start.size(1), p_start.device
is_legal_pair = torch.triu(torch.ones((c_len, c_len), device=device))
is_legal_pair -= torch.triu(torch.ones((c_len, c_len), device=device),
diagonal=max_len)
if no_answer:
# Index 0 is no-answer
p_no_answer = p_joint[:, 0, 0].clone()
is_legal_pair[0, :] = 0
is_legal_pair[:, 0] = 0
else:
p_no_answer = None
p_joint *= is_legal_pair
# Take pair (i, j) that maximizes p_joint
max_in_row, _ = torch.max(p_joint, dim=2)
max_in_col, _ = torch.max(p_joint, dim=1)
start_idxs = torch.argmax(max_in_row, dim=-1)
end_idxs = torch.argmax(max_in_col, dim=-1)
if no_answer:
# Predict no-answer whenever p_no_answer > max_prob
max_prob, _ = torch.max(max_in_col, dim=-1)
start_idxs[p_no_answer > max_prob] = 0
end_idxs[p_no_answer > max_prob] = 0
return start_idxs, end_idxs
def convert_tokens(eval_dict, qa_id, y_start_list, y_end_list, no_answer):
"""Convert predictions to tokens from the context.
Args:
eval_dict (dict): Dictionary with eval info for the dataset. This is
used to perform the mapping from IDs and indices to actual text.
qa_id (int): List of QA example IDs.
y_start_list (list): List of start predictions.
y_end_list (list): List of end predictions.
no_answer (bool): Questions can have no answer. E.g., SQuAD 2.0.
Returns:
pred_dict (dict): Dictionary index IDs -> predicted answer text.
sub_dict (dict): Dictionary UUIDs -> predicted answer text (submission).
"""
pred_dict = {}
sub_dict = {}
for qid, y_start, y_end in zip(qa_id, y_start_list, y_end_list):
context = eval_dict[str(qid)]["context"]
spans = eval_dict[str(qid)]["spans"]
uuid = eval_dict[str(qid)]["uuid"]
if no_answer and (y_start == 0 or y_end == 0):
pred_dict[str(qid)] = ''
sub_dict[uuid] = ''
else:
if no_answer:
y_start, y_end = y_start - 1, y_end - 1
start_idx = spans[y_start][0]
end_idx = spans[y_end][1]
pred_dict[str(qid)] = context[start_idx: end_idx]
sub_dict[uuid] = context[start_idx: end_idx]
return pred_dict, sub_dict
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
if not ground_truths:
return metric_fn(prediction, '')
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def eval_dicts(gold_dict, pred_dict, no_answer):
avna = f1 = em = total = 0
for key, value in pred_dict.items():
total += 1
ground_truths = gold_dict[key]['answers']
prediction = value
em += metric_max_over_ground_truths(compute_em, prediction, ground_truths)
f1 += metric_max_over_ground_truths(compute_f1, prediction, ground_truths)
if no_answer:
avna += compute_avna(prediction, ground_truths)
eval_dict = {'EM': 100. * em / total,
'F1': 100. * f1 / total}
if no_answer:
eval_dict['AvNA'] = 100. * avna / total
return eval_dict
def compute_avna(prediction, ground_truths):
"""Compute answer vs. no-answer accuracy."""
return float(bool(prediction) == bool(ground_truths))
# All methods below this line are from the official SQuAD 2.0 eval script
# https://worksheets.codalab.org/rest/bundles/0x6b567e1cf2e041ec80d7098f031c5c9e/contents/blob/
def normalize_answer(s):
"""Convert to lowercase and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
regex = re.compile(r'\b(a|an|the)\b', re.UNICODE)
return re.sub(regex, ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def get_tokens(s):
if not s:
return []
return normalize_answer(s).split()
def compute_em(a_gold, a_pred):
return int(normalize_answer(a_gold) == normalize_answer(a_pred))
def | |
"""
Functions to perform normal, weighted and robust fitting.
"""
from __future__ import annotations
import inspect
from typing import Callable, Union, Sized, Optional
import warnings
import numpy as np
import pandas as pd
import scipy.optimize
from xdem.spatialstats import nd_binning
from geoutils.spatial_tools import subsample_raster
try:
from sklearn.metrics import mean_squared_error, median_absolute_error
from sklearn.linear_model import (
LinearRegression, TheilSenRegressor, RANSACRegressor, HuberRegressor)
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import PolynomialFeatures, RobustScaler
_has_sklearn = True
except ImportError:
_has_sklearn = False
def rmse(z: np.ndarray) -> float:
"""
Return root mean square error
:param z: Residuals between predicted and true value
:return: Root Mean Square Error
"""
return np.sqrt(np.nanmean(np.square(z)))
def huber_loss(z: np.ndarray) -> float:
"""
Huber loss cost (reduces the weight of outliers)
:param z: Residuals between predicted and true values
:return: Huber cost
"""
out = np.where(z > 1, 2 * np.sqrt(z[np.where(z > 1)]) - 1, np.square(z))
return out.sum()
def soft_loss(z: np.ndarray, scale = 0.5) -> float:
"""
Soft loss cost (reduces the weight of outliers)
:param z: Residuals between predicted and true values
:param scale: Scale factor
:return: Soft loss cost
"""
return np.sum(np.square(scale) * 2 * (np.sqrt(1 + np.square(z/scale)) - 1))
def _costfun_sumofsin(p, x, y, cost_func):
"""
Calculate robust cost function for sum of sinusoids
"""
z = y - _sumofsinval(x, p)
return cost_func(z)
def _choice_best_order(cost: np.ndarray, margin_improvement : float = 20., verbose: bool = False) -> int:
"""
Choice of the best order (polynomial, sum of sinusoids) with a margin of improvement. The best cost value does
not necessarily mean the best predictive fit because high-degree polynomials tend to overfit, and sum of sinusoids
as well. To mitigate this issue, we should choose the lesser order from which improvement becomes negligible.
:param cost: cost function residuals to the polynomial
:param margin_improvement: improvement margin (percentage) below which the lesser degree polynomial is kept
:param verbose: if text should be printed
:return: degree: degree for the best-fit polynomial
"""
# get percentage of spread from the minimal cost
ind_min = cost.argmin()
min_cost = cost[ind_min]
perc_cost_improv = (cost - min_cost) / min_cost
# costs below threshold and lesser degrees
below_margin = np.logical_and(perc_cost_improv < margin_improvement / 100., np.arange(len(cost))<=ind_min)
costs_below_thresh = cost[below_margin]
# minimal costs
subindex = costs_below_thresh.argmin()
# corresponding index (degree)
ind = np.arange(len(cost))[below_margin][subindex]
if verbose:
print('Order '+str(ind_min+1)+ ' has the minimum cost value of '+str(min_cost))
print('Order '+str(ind+1)+ ' is selected within a '+str(margin_improvement)+' % margin of'
' the minimum cost, with a cost value of '+str(min_cost))
return ind
def _wrapper_scipy_leastsquares(residual_func, p0, x, y, verbose, **kwargs):
"""
Wrapper function for scipy.optimize.least_squares: passes down keyword, extracts cost and final parameters, print
statements in the console
:param residual_func: Residual function to fit
:param p0: Initial guess
:param x: X vector
:param y: Y vector
:param verbose: Whether to print out statements
:return:
"""
# Get arguments of scipy.optimize
fun_args = scipy.optimize.least_squares.__code__.co_varnames[:scipy.optimize.least_squares.__code__.co_argcount]
# Check no other argument is left to be passed
remaining_kwargs = kwargs.copy()
for arg in fun_args:
remaining_kwargs.pop(arg, None)
if len(remaining_kwargs) != 0:
warnings.warn('Keyword arguments: ' + ','.join(list(remaining_kwargs.keys())) + ' were not used.')
# Filter corresponding arguments before passing
filtered_kwargs = {k: kwargs[k] for k in fun_args if k in kwargs}
# Run function with associated keyword arguments
myresults = scipy.optimize.least_squares(residual_func, p0, args=(x, y), **filtered_kwargs)
if verbose:
print("Initial Parameters: ", p0)
print("Status: ", myresults.success, " - ", myresults.status)
print(myresults.message)
print("Lowest cost:", myresults.cost)
print("Parameters:", myresults.x)
cost = myresults.cost
coefs = myresults.x
return cost, coefs
def _wrapper_sklearn_robustlinear(model, estimator_name, cost_func, x, y, **kwargs):
"""
Wrapper function of sklearn.linear_models: passes down keyword, extracts cost and final parameters, sets random
states, scales input and de-scales output data, prints out statements
:param model: Function model to fit (e.g., Polynomial features)
:param estimator_name: Linear estimator to use (one of "Linear", "Theil-Sen", "RANSAC" and "Huber")
:param cost_func: Cost function to use for optimization
:param x: X vector
:param y: Y vector
:return:
"""
# Select sklearn estimator
dict_estimators = {'Linear': LinearRegression, 'Theil-Sen': TheilSenRegressor,
'RANSAC': RANSACRegressor, 'Huber': HuberRegressor}
est = dict_estimators[estimator_name]
# Get existing arguments of the sklearn estimator and model
estimator_args = list(inspect.signature(est.__init__).parameters.keys())
# Check no other argument is left to be passed
remaining_kwargs = kwargs.copy()
for arg in estimator_args:
remaining_kwargs.pop(arg, None)
if len(remaining_kwargs) != 0:
warnings.warn('Keyword arguments: ' + ','.join(list(remaining_kwargs.keys())) + ' were not used.')
# Filter corresponding arguments before passing
filtered_kwargs = {k: kwargs[k] for k in estimator_args if k in kwargs}
# TODO: Find out how to re-scale polynomial coefficient + doc on what is the best scaling for polynomials
# # Scale output data (important for ML algorithms):
# robust_scaler = RobustScaler().fit(x.reshape(-1,1))
# x_scaled = robust_scaler.transform(x.reshape(-1,1))
# # Fit scaled data
# model.fit(x_scaled, y)
# y_pred = model.predict(x_scaled)
# Initialize estimator with arguments
init_estimator = est(**filtered_kwargs)
# Create pipeline
pipeline = make_pipeline(model, init_estimator)
# Run with data
pipeline.fit(x.reshape(-1, 1), y)
y_pred = pipeline.predict(x.reshape(-1, 1))
# Calculate cost
cost = cost_func(y_pred, y)
# Get polynomial coefficients estimated with the estimators Linear, Theil-Sen and Huber
if estimator_name in ['Linear','Theil-Sen','Huber']:
coefs = init_estimator.coef_
# For some reason RANSAC doesn't store coef at the same place
else:
coefs = init_estimator.estimator_.coef_
return cost, coefs
def robust_polynomial_fit(x: np.ndarray, y: np.ndarray, max_order: int = 6, estimator_name: str = 'Theil-Sen',
cost_func: Callable = median_absolute_error, margin_improvement : float = 20.,
subsample: Union[float,int] = 25000, linear_pkg = 'sklearn', verbose: bool = False,
random_state: None | np.random.RandomState | np.random.Generator | int = None, **kwargs) -> tuple[np.ndarray,int]:
"""
Given 1D vectors x and y, compute a robust polynomial fit to the data. Order is chosen automatically by comparing
residuals for multiple fit orders of a given estimator.
Any keyword argument will be passed down to scipy.optimize.least_squares and sklearn linear estimators.
:param x: input x data (N,)
:param y: input y data (N,)
:param max_order: maximum polynomial order tried for the fit
:param estimator_name: robust estimator to use, one of 'Linear', 'Theil-Sen', 'RANSAC' or 'Huber'
:param cost_func: cost function taking as input two vectors y (true y), y' (predicted y) of same length
:param margin_improvement: improvement margin (percentage) below which the lesser degree polynomial is kept
:param subsample: If <= 1, will be considered a fraction of valid pixels to extract.
If > 1 will be considered the number of pixels to extract.
:param linear_pkg: package to use for Linear estimator, one of 'scipy' and 'sklearn'
:param random_state: random seed for testing purposes
:param verbose: if text should be printed
:returns coefs, degree: polynomial coefficients and degree for the best-fit polynomial
"""
if not isinstance(estimator_name, str) or estimator_name not in ['Linear','Theil-Sen','RANSAC','Huber']:
raise ValueError('Attribute estimator must be one of "Linear", "Theil-Sen", "RANSAC" or "Huber".')
if not isinstance(linear_pkg, str) or linear_pkg not in ['sklearn','scipy']:
raise ValueError('Attribute linear_pkg must be one of "scipy" or "sklearn".')
# Remove NaNs
valid_data = np.logical_and(np.isfinite(y), np.isfinite(x))
x = x[valid_data]
y = y[valid_data]
# Subsample data
subsamp = subsample_raster(x, subsample=subsample, return_indices=True, random_state=random_state)
x = x[subsamp]
y = y[subsamp]
# Initialize cost function and output coefficients
list_costs = np.empty(max_order)
list_coeffs = np.zeros((max_order, max_order + 1))
# Loop on polynomial degrees
for deg in np.arange(1, max_order + 1):
# If method is linear and package scipy
if estimator_name == 'Linear' and linear_pkg == 'scipy':
# Define the residual function to optimize with scipy
def fitfun_polynomial(xx, params):
return sum([p * (xx ** i) for i, p in enumerate(params)])
def residual_func(p, xx, yy):
return fitfun_polynomial(xx, p) - yy
# Define the initial guess
p0 = np.polyfit(x, y, deg)
# Run the linear method with scipy
cost, coef = _wrapper_scipy_leastsquares(residual_func, p0, x, y, verbose=verbose, **kwargs)
else:
# Otherwise, we use sklearn
if not _has_sklearn:
raise ValueError("Optional dependency needed. Install 'scikit-learn'")
# Define the polynomial model to insert in the pipeline
model = PolynomialFeatures(degree=deg)
# Run the linear method with sklearn
cost, coef = _wrapper_sklearn_robustlinear(model, estimator_name=estimator_name, cost_func=cost_func,
x=x, y=y, **kwargs)
list_costs[deg - 1] = cost
list_coeffs[deg - 1, 0:coef.size] = coef
# Choose the best polynomial with a margin of improvement on the cost
final_index = _choice_best_order(cost=list_costs, margin_improvement=margin_improvement, verbose=verbose)
# The degree of the best polynomial corresponds to the index plus | |
x = self.window.winfo_rootx() + self.make_canvas.winfo_x()+10
y = self.window.winfo_rooty() + self.make_canvas.winfo_y()+10
x1 = x + self.make_canvas.winfo_width()-20
y1 = y + self.make_canvas.winfo_height()-20
ImageGrab.grab().crop((x,y,x1,y1)).save(file+'.png')
self.window.title("Sketch With Sam" + "-----" + file + ".png")
def undo(self,e):#For undo
self.status['text'] = "Undo"
self.status.place(x=1200, y=685)
if self.notation_box:
if self.notation_box['state'] == DISABLED:
self.notation_box['state'] = NORMAL
self.notation_box.delete(END)
if self.undo_container:
take = self.undo_container.pop()
if type(take) == list:
for x in take:
self.make_canvas.delete(x)
else:
self.make_canvas.delete(take)
if len(self.undo_container) == 0:
self.clear()
def clear(self):#For clear the canvas
self.undo_container.clear()
self.notation_box.delete(0, END)
self.file_menu.entryconfig("Save", state=DISABLED)
self.edit_menu.entryconfig("Undo", state=DISABLED)
self.edit_menu.entryconfig("Clear", state=DISABLED)
self.edit_menu.entryconfig("Cut", state=DISABLED)
self.edit_menu.entryconfig("Copy", state=DISABLED)
self.edit_menu.entryconfig("Paste", state=DISABLED)
self.edit_menu.entryconfig("Screen Shot", state=DISABLED)
self.option_menu.entryconfig("Movement", state=DISABLED)
self.temp.clear()
self.img_container.clear()
self.cut_copy_img.clear()
self.img_counter = -1
self.counter = -1
def cut(self,e):#Cut the selected region
self.copy(1)
self.delete_selected_region(False)
self.status['text'] = "Selected region cut successfully"
self.status.place(x=1120, y=685)
def copy(self,e):#Copy the selected region
try:
if e!=1:
self.make_canvas.delete(self.temp.pop())
self.status['text'] = "Selected region copied"
self.status.place(x=1140, y=685)
else:
self.make_canvas.itemconfig(self.temp[len(self.temp)-1],outline="white")
time.sleep(0.0001)
self.make_canvas.update()
x1 = self.window.winfo_rootx() + self.make_canvas.winfo_x()
y1 = self.window.winfo_rooty() + self.make_canvas.winfo_y()
ImageGrab.grab().crop((x1 + self.old_x, y1 + self.old_y, x1 + self.new_x, y1 + self.new_y)).save("cutting.png")
self.counter += 1
self.reset()
except:
if e == 1:
messagebox.showerror("Cut Error","Select a region by selector tool under 'Tools Collection', then cut the selected region")
print("Cut error")
else:
messagebox.showerror("Copy Error","Select a region by selector tool under 'Tools Collection', then copy the selected region")
print("Copy error")
def paste(self,e):#Paste the region keep in clipboard
try:
if self.notation_box['state'] == DISABLED:
self.notation_box['state'] = NORMAL
self.cut_copy_img.append(ImageTk.PhotoImage(Image.open("cutting.png")))
take = self.make_canvas.create_image(100, 200, image=self.cut_copy_img[self.counter])
self.undo_container.append(take)
self.notation_box.insert(END, len(self.undo_container) - 1)
self.status['text'] = "Paste on the screen"
except:
print("Paste Error")
def select_region(self,e):#For select a region
try:
self.status['text'] = "Select a particular region"
if self.old_x and self.old_y:
take = self.make_canvas.create_rectangle(self.old_x,self.old_y,e.x,e.y)
self.temp.append(take)
def select_region_final(e):
for x in self.temp:
self.make_canvas.delete(x)
self.new_x = e.x
self.new_y = e.y
self.delete_seg = self.make_canvas.create_rectangle(self.old_x, self.old_y, self.new_x, self.new_y)
self.temp.append(self.delete_seg)
self.make_canvas.bind('<ButtonRelease-1>',select_region_final)
else:
self.old_x = e.x
self.old_y = e.y
except:
print("Select region error")
def delete_selected_region(self,e):#For delete selected region
self.make_canvas.itemconfig(self.delete_seg,fill="white",width=0.00001,outline="white")
self.reset()
def screen_shot_taking(self, e):#For take screen shot
try:
self.make_canvas.delete(self.temp.pop())
time.sleep(0.0000001)
self.window.update()
x1 = self.window.winfo_rootx() + self.make_canvas.winfo_x()
y1 = self.window.winfo_rooty() + self.make_canvas.winfo_y()
file = filedialog.asksaveasfilename(initialdir="Screen_shots",title="Screen shot save",filetypes=[("PNG File", "*.png")])
if file:
ImageGrab.grab().crop((x1 + self.old_x, y1 + self.old_y, x1 + self.new_x, y1 + self.new_y)).save(file+".png")
self.reset()
self.status['text'] = "Screen Shot Taken and Saved"
self.status.place(x=1100,y=685)
except:
print("Screen shot Error")
messagebox.showerror("Selection Error","At first select a region by selector under 'Tools Collection', then take screen shot")
def zoom_controller(self,e):#For Zoom in and Zoom out
self.status['text'] = "Zoom Controller"
self.status.place(x=1160, y=685)
try:
if e.delta > 0:
self.make_canvas.scale("all",e.x,e.y,1.1,1.1)
elif e.delta<0:
self.make_canvas.scale("all", e.x, e.y, 0.9, 0.9)
except:
if e == 1:
self.make_canvas.scale("all", 550, 350, 1.1, 1.1)
else:
self.make_canvas.scale("all", 550, 350, 0.9, 0.9)
def color_boxer(self,e):#Colorbox under 'Tools Collection for pen color'
self.status['text'] = "Draw with the color pen"
self.status.place(x=1130,y=685)
if self.old_x and self.old_y:
take = self.make_canvas.create_line(self.old_x, self.old_y, e.x, e.y, fill=self.color_container_box,
width=self.color_circle_width_maintainer, smooth=True, capstyle=ROUND)
self.temp.append(take)
self.old_x = e.x
self.old_y = e.y
def color_input(e):
self.undo_container.append(self.temp)
self.notation_box.insert(END, len(self.undo_container) - 1)
self.reset()
self.make_canvas.bind("<ButtonRelease-1>", color_input)
def color_box_width_controller(self,e):#Color box width maintain by keyboard event or mouse event
try:
print(e)
if e.delta>0:
self.color_circle_width_maintainer += 3
else:
self.color_circle_width_maintainer -= 3
except:
if e == 1:
self.color_circle_width_maintainer += 3
else:
self.color_circle_width_maintainer -= 3
def reset(self):#Reset
self.status['text'] = "Sketch With Passion"
self.status.place(x=1140, y=685)
if self.notation_box:
self.file_menu.entryconfig("Save", state=NORMAL)
self.edit_menu.entryconfig("Undo",state=NORMAL)
self.edit_menu.entryconfig("Clear", state=NORMAL)
self.edit_menu.entryconfig("Cut", state=NORMAL)
self.edit_menu.entryconfig("Copy", state=NORMAL)
self.edit_menu.entryconfig("Paste", state=NORMAL)
self.edit_menu.entryconfig("Screen Shot", state=NORMAL)
self.option_menu.entryconfig("Movement", state=NORMAL)
if self.notation_box['state'] == DISABLED:
self.notation_box['state'] = NORMAL
self.new_x = None
self.new_y = None
self.old_x = None
self.old_y = None
self.temp=[]
def draw_with_pencil(self,e):#Draw with pencil
self.status['text'] = "Draw with the Pencil"
self.status.place(x=1130, y=685)
if self.old_x and self.old_y:
take =self.make_canvas.create_line(self.old_x,self.old_y,e.x,e.y,fill=self.fill_color_line,
width=self.width_maintainer,smooth=True,capstyle=ROUND)
self.temp.append(take)
self.old_x = e.x
self.old_y = e.y
def push_value(e):
self.undo_container.append(self.temp)
self.notation_box.insert(END, len(self.undo_container) - 1)
self.reset()
self.make_canvas.bind("<ButtonRelease-1>", push_value)
def erasing_setup(self,e):#For eraser
self.status['text'] = "Erasing"
self.status.place(x=1180, y=685)
if self.old_x and self.old_y:
take =self.make_canvas.create_rectangle(self.old_x,self.old_y,e.x,e.y,width=self.erase_width_maintainer,fill="white",outline="white")
self.temp.append(take)
self.old_x = e.x
self.old_y = e.y
def real_erasing(e):
self.undo_container.append(self.temp)
self.notation_box.insert(END, len(self.undo_container) - 1)
self.reset()
self.make_canvas.bind("<ButtonRelease-1>", real_erasing)
def text_creation_input_take(self):#Text Creation
def message_show():
messagebox.showinfo("Done","Click on targeting position on the main window to input text")
self.status['text'] = "Make your own Text"
self.status.place(x=1130, y=685)
self.top = Toplevel()
self.top.title("Text here")
self.top.geometry("400x500")
self.top.wm_iconbitmap("Icons/main_logo.ico")
self.top.config(bg="brown")
label_1 = Label(self.top,text="Enter the text",font=("Arial",25,"bold"),fg="#00FFFF",bg="brown")
label_1.pack(pady=20)
entry_take = Entry(self.top,width=20,font=("Arial",20,"bold","italic"),bg="chocolate",fg="green",textvar=self.input_take,relief=SUNKEN,bd=10)
entry_take.pack(pady=10)
entry_take.focus()
ok_btn = Button(self.top, text="OK", fg="red", bg="black",width=10,
font=("Arial", 15, "bold"), relief=RAISED, bd=5, command=message_show)
ok_btn.pack(pady=20)
self.text_collection = Listbox(self.top,width=17,height=9,font=("Arial",13,"bold"),bg="chocolate",fg="yellow",relief=SUNKEN,bd=8)
self.text_collection.place(x=10,y=280)
text_list = ["Arial","Courier New","Cosmic Sans MS","Fixedsys","MS Sans Serif","System","Verdana","Times New Roman","Symbol"]
for x in text_list:
self.text_collection.insert(END,x)
self.text_collection.activate(0)
self.text_collection.selection_set(0)
def color_choose():#For text color set
self.text_fg = colorchooser.askcolor()[1]
color_chooser = Button(self.top, text="Text Color", fg="yellow", bg="chocolate",
font=("Arial", 15, "bold"), relief=RAISED, bd=5, command=color_choose)
color_chooser.place(x=200,y=280)
self.font_size = Scale(self.top,from_=1,to=100,orient=HORIZONTAL,bg="green",fg="yellow",font=("Arial",10,"bold"),activebackground="red")
self.font_size.place(x=200,y=433)
def text_creation(e):#For make text on the screen by click
take = self.make_canvas.create_text(e.x, e.y, text=self.input_take.get(), font=(self.text_collection.get(ACTIVE), self.font_size.get(), "bold", "italic"), fill=self.text_fg)
self.undo_container.append(take)
self.notation_box.insert(END, len(self.undo_container) - 1)
self.input_take.set(" ")
self.top.destroy()
self.make_canvas.bind("<Button-1>", text_creation)
def circle_ranging(self,e):#Make Circle
self.status['text'] = "Draw Circle"
self.status.place(x=1200, y=685)
if self.old_x and self.old_y:
take = self.make_canvas.create_oval(self.old_x, self.old_y, e.x, e.y, width=self.width_maintainer,outline=self.outline_color_line,fill=self.fill_color)
self.temp.append(take)
else:
self.old_x = e.x
self.old_y = e.y
def circle_make(e):
for x in self.temp:
self.make_canvas.delete(x)
try:
take = self.make_canvas.create_oval(self.old_x, self.old_y, e.x, e.y, width=self.width_maintainer,
fill=self.fill_color, outline=self.outline_color_line)
self.undo_container.append(take)
self.notation_box.insert(END, len(self.undo_container) - 1)
self.reset()
except:
print("Error: click only not motion")
self.make_canvas.bind('<ButtonRelease-1>',circle_make)
def rectangle_ranging(self,e):#Rectangle Make
self.status['text'] = "Draw Rectangle"
self.status.place(x=1200, y=685)
if self.old_x and self.old_y:
take = self.make_canvas.create_rectangle(self.old_x, self.old_y, e.x, e.y, width=self.width_maintainer,fill=self.fill_color,outline=self.outline_color_line)
self.temp.append(take)
else:
self.old_x = e.x
self.old_y = e.y
def rectangle_make(e):
for x in self.temp:
self.make_canvas.delete(x)
try:
take = self.make_canvas.create_rectangle(self.old_x, self.old_y, e.x, e.y, width=self.width_maintainer,
fill=self.fill_color, outline=self.outline_color_line)
self.undo_container.append(take)
self.notation_box.insert(END, len(self.undo_container) - 1)
self.reset()
except:
print("Error: click only not motion")
self.make_canvas.bind('<ButtonRelease-1>',rectangle_make)
def straight_line_ranging(self,e):#Straight line make
self.status['text'] = "Draw Straight line"
self.status.place(x=1160, y=685)
if self.old_x and self.old_y:
if e.x-self.old_x>e.y-self.old_y:
take = self.make_canvas.create_line(self.old_x, self.old_y, e.x, self.old_y, width=self.width_maintainer,fill=self.fill_color_line)
self.temp.append(take)
else:
take = self.make_canvas.create_line(self.old_x, self.old_y, self.old_x, e.y, width=self.width_maintainer,fill=self.fill_color_line)
self.temp.append(take)
else:
self.old_x=e.x
self.old_y=e.y
def straight_line_make(e):
for x in self.temp:
self.make_canvas.delete(x)
try:
if e.x - self.old_x > e.y - self.old_y:
take = self.make_canvas.create_line(self.old_x, self.old_y, e.x, self.old_y,
width=self.width_maintainer, fill=self.fill_color_line)
else:
take = self.make_canvas.create_line(self.old_x, self.old_y, self.old_x, e.y,
width=self.width_maintainer, fill=self.fill_color_line)
self.undo_container.append(take)
self.notation_box.insert(END, len(self.undo_container) - 1)
self.reset()
except:
print("Error: click only not motion")
self.make_canvas.bind('<Shift-ButtonRelease-1>',straight_line_make)
def bent_line_ranging(self,e):#Bent line make
self.status['text'] = "Draw bent line"
self.status.place(x=1160, y=685)
if self.old_x and self.old_y:
take = self.make_canvas.create_line(self.old_x, self.old_y, e.x, e.y, width=self.width_maintainer, fill=self.fill_color_line)
self.temp.append(take)
else:
self.old_y=e.y
self.old_x=e.x
def bent_line_make(e):
for x in self.temp:
self.make_canvas.delete(x)
try:
take = self.make_canvas.create_line(self.old_x, self.old_y, e.x, e.y, width=self.width_maintainer,
fill=self.fill_color_line, capstyle=ROUND)
self.undo_container.append(take)
self.notation_box.insert(END, len(self.undo_container) - 1)
self.reset()
except:
print("Error: click only not motion")
self.make_canvas.bind('<ButtonRelease-1>',bent_line_make)
def dashed_line_ranging(self,e):#Dash line make
self.status['text'] = "Draw Dash line"
self.status.place(x=1160, y=685)
if self.old_x and self.old_y:
take = self.make_canvas.create_line(self.old_x, self.old_y, e.x, e.y, width=self.width_maintainer, fill=self.fill_color_line, dash=(10,1))
self.temp.append(take)
else:
self.old_y=e.y
self.old_x=e.x
def dashed_line_make(e):
for x in self.temp:
self.make_canvas.delete(x)
try:
take = self.make_canvas.create_line(self.old_x, self.old_y, e.x, e.y, width=self.width_maintainer,
fill=self.fill_color_line, capstyle=ROUND, dash=(10, 1))
self.undo_container.append(take)
self.notation_box.insert(END, len(self.undo_container) - 1)
self.reset()
except:
print("Error: click only not motion")
self.make_canvas.bind('<ButtonRelease-1>',dashed_line_make)
def traingle_ranging(self, e):#Traingle make
self.status['text'] = "Draw Traingle"
self.status.place(x=1160, y=685)
if self.old_x and self.old_y:
take = self.make_canvas.create_polygon(self.old_x, self.old_y, self.old_x-(e.x-self.old_x), e.y, e.x, e.y,width=self.width_maintainer, fill=self.fill_color,outline=self.outline_color_line)
self.temp.append(take)
else:
self.old_x=e.x
self.old_y=e.y
def traingle_make(e):
for x in self.temp:
self.make_canvas.delete(x)
try:
take = self.make_canvas.create_polygon(self.old_x, self.old_y, self.old_x - (e.x - self.old_x), e.y, e.x,
e.y, width=self.width_maintainer, fill=self.fill_color,
outline=self.outline_color_line)
self.undo_container.append(take)
self.notation_box.insert(END, len(self.undo_container) - 1)
self.reset()
except:
print("Error: click only not motion")
self.make_canvas.bind('<ButtonRelease-1>', traingle_make)
def parallelogram_ranging(self, e):#Parallelogram make
self.status['text'] = "Draw a Parallelogram"
self.status.place(x=1130, y=685)
if self.old_x and self.old_y:
points = [self.old_x,self.old_y,int(self.old_x)+30,e.y,e.x,e.y,int(e.x)-30,self.old_y]
take = self.make_canvas.create_polygon(points,width=1, fill=self.fill_color,outline=self.outline_color_line)
self.temp.append(take)
else:
self.old_x=e.x
self.old_y=e.y
def parallelogram_make(e):
for x in self.temp:
self.make_canvas.delete(x)
try:
points = [self.old_x, self.old_y, int(self.old_x) + 30, e.y, e.x, e.y, int(e.x) - 30, self.old_y]
take = self.make_canvas.create_polygon(points, width=self.width_maintainer, fill=self.fill_color,
outline=self.outline_color_line)
self.undo_container.append(take)
self.notation_box.insert(END, len(self.undo_container) - 1)
self.reset()
except:
print("Error: click only not motion")
self.make_canvas.bind('<ButtonRelease-1>', parallelogram_make)
def pentagon_ranging(self, e):#pentagon make
self.status['text'] = "Draw Pentagon"
self.status.place(x=1160, y=685)
if self.old_x and self.old_y:
points = [self.old_x, self.old_y, int(self.old_x), e.y, e.x, e.y, int(e.x), self.old_y, (self.old_x+e.x)/2,self.old_y-20]
take = self.make_canvas.create_polygon(points,width=self.width_maintainer, fill=self.fill_color,outline=self.outline_color_line)
self.temp.append(take)
else:
self.old_x=e.x
self.old_y=e.y
def pentagon_make(e):
for x in self.temp:
self.make_canvas.delete(x)
try:
points = [self.old_x, self.old_y, int(self.old_x), e.y, e.x, e.y, int(e.x), self.old_y,
(self.old_x + e.x) / 2, self.old_y - 20]
take = self.make_canvas.create_polygon(points, width=self.width_maintainer, fill=self.fill_color,
outline=self.outline_color_line)
self.undo_container.append(take)
self.notation_box.insert(END, len(self.undo_container) - 1)
self.reset()
except:
print("Error: click only not motion")
self.make_canvas.bind('<ButtonRelease-1>', pentagon_make)
def hexagon_ranging(self, e):#Hexagon make
self.status['text'] = "Draw Hexagon"
self.status.place(x=1160, y=685)
if self.old_x and self.old_y:
points = [self.old_x, | |
interval variables in the sequence variable
of the no_overlap constraint.
* In a state function, the transition matrix represents the minimal distance between two integer
states of the function.
A transition matrix can be created:
* Deprecated.
Giving only its size. In this case, a transition matrix is created by this constructor with all
values initialized to zero. Matrix values can then be set using :meth:`set_value` method.
* Giving the matrix values as a list of rows, each row being a list of integers.
Matrix values can not be changed after it has been created.
"""
__slots__ = () # Matrix stored in value field
def __init__(self, size=None, values=None, name=None):
""" **Constructor**
Args:
size (optional): Matrix size (width and height),
name (optional): Name of the matrix. None by default.
values (optional): Matrix values expressed as a list of rows.
"""
super(CpoTransitionMatrix, self).__init__(None, Type_TransitionMatrix)
if name:
self.set_name(name)
# Check type of argument
if size:
assert is_int(size) and size >= 0, "Argument 'size' should be a positive integer."
assert values is None, "Arguments 'size' and 'values' should not be given together."
warnings.warn("Creating editable transition matrix by size is deprecated since release 2.3.",
DeprecationWarning)
self.value = [[0 for i in range(size)] for j in range(size)]
else:
try:
self.value = tuple(tuple(x) for x in values)
except TypeError:
assert False, "Argument 'values' should be an iterable of iterables of integers."
size = len(self.value)
assert all (len(x) == size for x in self.value), \
"Matrix value should be squared (list of rows of the same size)"
assert all(all(is_int(v) and v >= 0 for v in r) for r in self.value), \
"All matrix values should be positive integers"
def get_size(self):
""" Returns the size of the matrix.
Returns:
Matrix size.
"""
return len(self.value)
def get_value(self, from_state, to_state):
""" Returns a value in the transition matrix.
Args:
from_state: Index of the from state.
to_state: Index of the to state.
Returns:
Transition value.
"""
return self.value[from_state][to_state]
def get_all_values(self):
""" Returns an iterator on all matrix values, in row/column order
Returns:
Iterator on all values
"""
sizerg = range(len(self.value))
return (self.value[f][t] for f in sizerg for t in sizerg)
def get_matrix(self):
""" Returns the complete transition matrix.
Returns:
Transition matrix as a list of integers that is the concatenation of all matrix rows.
"""
return self.value
def set_value(self, from_state, to_state, value):
""" Sets a value in the transition matrix.
Args:
from_state: Index of the from state.
to_state: Index of the to state.
value: Transition value.
"""
assert is_int(value) and value >= 0, "Value should be a positive integer"
self.value[from_state][to_state] = value
class CpoStateFunction(CpoVariable):
""" This class represents a *state function* expression node.
State functions are used by *interval variables* to represent the evolution of a state variable over time.
"""
__slots__ = ('trmtx', # Transition matrix
)
def __init__(self, trmtx=None, name=None):
""" **Constructor**
Args:
trmtx (optional): An optional transition matrix defining the transition distance between consecutive states
of the state function.
Transition matrix is given as a list of rows (iterable of iterables of positive integers),
or as the result of a call to the method :meth:`~docplex.cp.expression.transition_matrix`.
name (optional): Name of the state function.
"""
# Force name for state functions
super(CpoStateFunction, self).__init__(Type_StateFunction, name)
self.set_transition_matrix(trmtx)
def set_transition_matrix(self, trmtx):
""" Sets the transition matrix.
Args:
trmtx : A transition matrix defining the transition distance between consecutive states of the state function.
Transition matrix is given as a list of rows (iterable of iterables of positive integers),
or as the result of a call to the method :meth:`~docplex.cp.expression.transition_matrix`.
"""
if trmtx is None:
self.trmtx = None
self.children = ()
else:
trmtx = build_cpo_transition_matrix(trmtx)
self.trmtx = trmtx
assert isinstance(trmtx, CpoTransitionMatrix), "Argument 'trmtx' should be a CpoTransitionMatrix"
self.children = (trmtx,)
def get_transition_matrix(self):
""" Returns the transition matrix.
Returns:
Transition matrix, None if none.
"""
return self.trmtx
def _equals(self, other):
""" Checks the equality of this expression with another object.
This particular method just checks local attributes, but does not check recursively children if any.
Recursion is implemented by method equals() that uses a self-managed stack to avoid too many
recursive calls that may lead to an exception 'RuntimeError: maximum recursion depth exceeded'.
Args:
other: Other object to compare with.
Returns:
True if 'other' is semantically identical to this object, False otherwise.
"""
return super(CpoStateFunction, self)._equals(other)
# Transition matrix is checked as children
#==============================================================================
# Factory Functions
#==============================================================================
def integer_var(min=None, max=None, name=None, domain=None):
""" Creates an integer variable.
An integer variable is a decision variable with a set of potential values called 'domain of the variable'.
This domain can be expressed either:
* as a single interval, with a minimum and a maximum bounds included in the domain,
* or as an extensive list of values and/or intervals.
When the domain is given extensively, an interval of the domain is represented by a tuple (min, max).
Examples of variable domains expressed extensively are:
* (1, 2, 3, 4)
* (1, 2, (3, 7), 9)
* ((1, 2), (7, 9))
Following integer variable declarations are equivalent:
* v = integer_var(0, 9, "X")
* v = integer_var(domain=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), name="X")
* v = integer_var(domain=(0, (1, 5), (6, 7), 8, 9), name="X")
* v = integer_var(domain=((0, 9)), name="X")
Args:
min: Domain min value. Optional if domain is given extensively.
max: Domain max value. Optional if domain is given extensively.
name: Optional variable name. If not given, a name is automatically generated.
domain: Variable domain expressed as extensive list of values and/or intervals expressed as tuples of integers.
Unused if min and max are provided.
Returns:
CpoIntVar expression
"""
return CpoIntVar(_build_int_var_domain(min, max, domain), name)
def integer_var_list(size, min=None, max=None, name=None, domain=None):
""" Creates a list of integer variables.
This methods creates a list of integer variables whose size is given as first parameter.
All other parameters are identical to those requested by the method integer_var()
that allows to create a single integer variable.
See the documentation of :meth:`integer_var` for details.
If a name is given, each variable of the list is created with this
name concatenated with the index of the variable in the list, starting by zero.
Args:
size: Size of the list of variables
min: Domain min value. Optional if domain is given extensively.
max: Domain max value. Optional if domain is given extensively.
name: Optional variable name prefix.
domain: Variable domain expressed as extensive list of values and/or intervals expressed as tuples of integers.
Unused if min and max are provided.
Returns:
List of integer variables.
"""
dom = _build_int_var_domain(min, max, domain)
res = []
if name is None:
for i in range(size):
res.append(CpoIntVar(dom))
else:
name = name + "_"
for i in range(size):
res.append(CpoIntVar(dom, name + str(i)))
return res
def integer_var_dict(keys, min=None, max=None, name=None, domain=None):
""" Creates a dictionary of integer variables.
This methods creates a dictionary of integer variables associated to a list of keys given as first parameter.
All other parameters are identical to those requested by the method integer_var()
that allows to create a single integer variable.
See the documentation of :meth:`integer_var` for details.
If a name is given, each variable of the list is created with this
name concatenated with the string representation of the corresponding key.
The parameter 'name' can also be a function that is called to build the variable name
with the variable key as parameter.
Args:
keys: Iterable of variable keys.
min: Domain min value. Optional if domain is given extensively.
max: Domain max value. Optional if domain is given extensively.
name: Optional variable name. If not given, a name is automatically generated.
domain: Variable domain expressed as extensive list of values and/or intervals expressed as tuples of integers.
Unused if min and max are provided.
Returns:
Dictionary of CpoIntVar objects.
"""
dom = _build_int_var_domain(min, max, domain)
res = {}
if name is None:
for k in | |
and line plot that visualizes the
operation of a component on the basis of a selected commodity.
:param component_name: Name of the component that holds the commodity
of interest.
:type component_name: str
:param commodity: Name of the commodity that should be plotted.
:type commodity: str
:param level_of_detail: Specifies the level of plotting detail. Only the
commodity in the component itself is plotted if 1 is selected.
The composition of the commodity (from which sources formed and to
which destinations sent) is visualized if 2 is selected.
|br| *Default: 2*
:type level_of_detail: int (1 or 2)
:param scale_to_hourly_resolution: States if the data should be scaled
to hourly resolution before plotting. This might be useful, if the
optimization was performed with a value for the EnergySystem keyword
argument 'hours_per_time_step' larger than 1.
|br| *Default: False*
:type scale_to_hourly_resolution: bool
:param plot_single_period_with_index: States if only one period with the
given index number should be plotted. This is only possible if the
optimization was performed with aggregated time series data.
|br| *Default: None*
:type plot_single_period_with_index: int or None
:param show_plot: State whether the plot should be shown once finalized
|br| *Default: False*
:type show_plot: bool
:param save_plot: State whether the plot should be saved once finalized
|br| *Default: True*
:type save_plot: bool
:param file_name: Name of the file (if saved); no file-ending required
|br| *Default: 'operation_plot'*
:type file_name: str
:param kwargs: Additional keyword arguments to manipulate the plot
(e.g., labels, figure size, legend position, ...).
See dict 'props' of the Plotter class.
"""
if self.data is None:
return
# Check the user input:
utils.check_plot_operation_input(
self.data, component_name, commodity, scale_to_hourly_resolution,
plot_single_period_with_index, level_of_detail,
show_plot, save_plot, file_name)
self.single_period = plot_single_period_with_index
self.level_of_detail = level_of_detail
self.comp = component_name
self.model_class = self.data['components'][self.comp]['model_class']
# Get the global plotting properties of the Plotter class (defaults)
props = copy.copy(self.props)
# Set 'dt_plot' and 'dt_scale' according to 'scale_to_hourly_resolution'
# If scaling is requested: Adjust the index and scale the plotted
# variable values (except of the SOC).
if scale_to_hourly_resolution:
self.dt_plot, self.scale_plot = self.dt, 1 / self.dt
props['bar_width'] = props['bar_width'] * self.dt_plot # default
props['xlabel'] = 'Hours of the year [h]' # default
else:
self.dt_plot, self.scale_plot = 1, 1
# Overwrite props with local kwargs if specified and found.
for key, val in kwargs.items():
if key in props.keys():
props[key] = val
else:
warn('Keyword argument "{}" is unknown and ignored'.format(key))
# **********************************************************************
# Plotting
# **********************************************************************
fig, ax = plt.subplots(figsize=(props['fig_width'],
props['fig_height']))
try:
# 1. Find the required commodity in the inlets and / or outlets of
# the component and get the associated port variables.
var_in_name, var_out_name = None, None # init
if commodity in self.data['components'][self.comp][
'inlet_commod_and_var_names'].keys():
var_in_name = self.data['components'][self.comp][
'inlet_commod_and_var_names'][commodity]
if commodity in self.data['components'][self.comp][
'outlet_commod_and_var_names'].keys():
var_out_name = self.data['components'][self.comp][
'outlet_commod_and_var_names'][commodity]
if level_of_detail == 1:
# --------------------------------------------------------------
# Only plot the commodity in the component itself
# --------------------------------------------------------------
# Get the commodity data for inlets ad outlets
_, var_in_data = self._get_and_convert_variable(var_in_name)
_, var_out_data = self._get_and_convert_variable(var_out_name)
# Plot commodity data on inlet port:
if var_in_data is not None:
idx = self._get_index(additional_time_step=False).flatten()
ax.bar(idx, var_in_data.flatten() * self.scale_plot,
props['bar_width'],
align='edge', label=var_in_name, zorder=5,
color=self.bar_colors[0], edgecolor='black',
linewidth=props['bar_lw'])
# Plot commodity data on outlet port:
if var_out_data is not None:
# If commodity also on inlet port -> multiply outlet with -1
if var_in_data is not None:
var_out_data *= -1
idx = self._get_index(additional_time_step=False).flatten()
ax.bar(idx, var_out_data.flatten() * self.scale_plot,
props['bar_width'],
align='edge', label=var_out_name, zorder=5,
color=self.bar_colors[1], edgecolor='black',
linewidth=props['bar_lw'])
else: # level_of_detail == 2
# --------------------------------------------------------------
# Plot the composition of the commodity
# (from which sources formed / to which destinations sent)
# --------------------------------------------------------------
# Get the connected arc names
arc_in_names, arc_out_names = [], [] # init
if var_in_name is not None:
arc_in_names = self.data['components'][self.comp][
'var_connections'][var_in_name]
if var_out_name is not None:
arc_out_names = self.data['components'][self.comp][
'var_connections'][var_out_name]
# Get the data for the connected arcs at inlets and outlets
arc_in_data, arc_out_data = [], [] # init
for arc_name in arc_in_names:
_, data = self._get_and_convert_variable(arc_name)
arc_in_data.append(data.flatten())
for arc_name in arc_out_names:
_, data = self._get_and_convert_variable(arc_name)
arc_out_data.append(data.flatten())
# Order the data (on each side) according to their sum
# => easier to read if series with high occurrence is at bottom.
# Rearrange inlet data (order from large to small and stacked):
if len(arc_in_data) > 0:
order = np.array([sum(v) for v in arc_in_data]).argsort()
order = order[::-1] # reverse order --> from large to small
# Set new order for 'arc_in_data' and 'arc_in_names'
arc_in_data = np.vstack(
[arc_in_data[i] for i in order]) * self.scale_plot
arc_in_names = [arc_in_names[i] for i in order]
# Rearrange outlet data (order from large to small and stacked):
if len(arc_out_data) > 0:
order = np.array([sum(v) for v in arc_out_data]).argsort()
order = order[::-1] # reverse order --> from large to small
# Set new order for 'arc_out_data' and 'arc_out_names'
arc_out_data = np.vstack(
[arc_out_data[i] for i in order]) * self.scale_plot
arc_out_names = [arc_out_names[i] for i in order]
# Create index
idx = self._get_index(additional_time_step=False).flatten()
# If commodity also on inlet port -> multiply outlet with -1
if len(arc_out_data) > 0 and len(arc_in_data) > 0:
arc_out_data *= -1
# Plot stacked bars on inlet port:
for i, val in enumerate(arc_in_data):
if abs(np.sum(val)) <= 0.01: # skip components with zeros
continue
ax.bar(idx, val, props['bar_width'],
bottom=arc_in_data[:i].sum(axis=0), align='edge',
label=arc_in_names[i], zorder=5,
color=self.bar_colors[i],
edgecolor='black', linewidth=props['bar_lw'])
# Plot stacked bars on outlet port:
for i, val in enumerate(arc_out_data):
if abs(np.sum(val)) <= 0.01: # skip components with zeros
continue
ax.bar(idx, val, props['bar_width'],
bottom=arc_out_data[:i].sum(axis=0), align='edge',
label=arc_out_names[i], zorder=5,
color=self.bar_colors[len(arc_in_names)+i],
edgecolor='black', linewidth=props['bar_lw'])
# ------------------------------------------------------------------
# Storage: Add the SOC line and a horizontal line at y=0
# ------------------------------------------------------------------
if self.model_class == 'Storage':
# Get the data for the state of charge variables:
name, var_soc = self._get_and_convert_variable('soc_variable')
_, var_soc_inter = self._get_and_convert_variable(
'soc_inter_variable')
# Only in case the data is clustered and should be scaled to the
# full series (not only one single period is plotted) and the
# inter-period formulation has been applied --> Recalculate SOC!
if self.is_clustered and self.single_period is None \
and var_soc_inter is not None:
soc = np.array([p_soc + var_soc_inter[i]
for i, p_soc in enumerate(var_soc)])
else:
soc = var_soc # just use the original SOC results
# Plot the state if charge variable (SOC):
idx = self._get_index(additional_time_step=True)
for i, p_var in enumerate(soc):
ax.plot(idx[i], p_var, label=(name if i == 0 else None),
zorder=10, color=self.line_colors[0],
linewidth=props['line_lw'])
# Add horizontal line at y=0
ax.axhline(0, color='black', lw=0.8)
# ------------------------------------------------------------------
# Source / Sink: Add commodity rates as step plots (if applied)
# ------------------------------------------------------------------
if self.model_class in ['Source', 'Sink']:
count = 0 # init counter (to have different colors for lines)
for rate in ['commodity_rate_min', 'commodity_rate_max',
'commodity_rate_fix', 'commodity_cost_time_series',
'commodity_revenues_time_series']:
name, para = self._get_and_convert_variable(rate)
if para is not None:
idx = self._get_index(
additional_time_step=False).flatten()
# Extend the data by appending last value at the end
# again --> better representation in the step function!
idx_ext = np.append(idx, idx[-1] + self.dt_plot)
para_ext = np.append(para.flatten(), para.flatten()[-1])
# Plot step function
ax.step(idx_ext, para_ext * self.scale_plot,
where='post', label=name,
zorder=10, color=self.line_colors[count],
linewidth=props['line_lw'])
count += 1 # increase counter by 1
# ***********************************************************
# General Layouts and Finishing
# ***********************************************************
# Plot vertical lines to separate individual typical periods
# that are connected to represent the full scale time series.
if self.is_clustered and self.single_period is None:
for p in range(1, self.nbr_of_periods):
x = p * self.nbr_of_ts_per_period * self.dt_plot
ax.axvline(x, color='black', lw=props['period_lw'],
linestyle='--', zorder=100)
# Catch Exception if problem occurs and print a message in the title
except Exception as e:
ax.set_title('PLOTTING FAILED!', size=40, color='red', ha='center')
print('*** Exception detected while trying to plot:', e)
ax.tick_params(axis='x', labelrotation=props['xticks_rotation'])
ax.set_xlabel(props['xlabel'])
ax.set_ylabel(props['ylabel'])
ax.legend(ncol=props['lgd_ncol'], loc=props['lgd_pos'],
framealpha=0.8, edgecolor='black').set_zorder(100)
if props['grid']:
ax.grid(which='major', linestyle='--', zorder=0)
fig.tight_layout(pad=0.0, w_pad=0.2)
if show_plot:
plt.show()
if save_plot:
if props['save_png']:
fig.savefig(file_name+'.png', bbox_inches="tight",
pad_inches=props['pad_inches'], dpi=props['dpi'])
if props['save_pdf']:
fig.savefig(file_name+'.pdf', bbox_inches="tight",
pad_inches=props['pad_inches'])
if props['save_pgf']:
fig.savefig(file_name+'.pgf', bbox_inches="tight",
pad_inches=props['pad_inches'])
plt.close()
# --------------------------------------------------------------------------
def _get_and_convert_variable(self, var_name):
# if the variable is not in the component dict keys (e.g.
# 'basic_variable', ...) , just use the provided 'var_name' and
# check if it is in the variables or | |
of an object dtype with a number type could
# hit here
if (new_result == result).all():
return new_result
# a datetimelike
elif dtype.kind in ['M','m'] and result.dtype.kind in ['i']:
try:
result = result.astype(dtype)
except:
pass
except:
pass
return result
def _lcd_dtypes(a_dtype, b_dtype):
""" return the lcd dtype to hold these types """
if is_datetime64_dtype(a_dtype) or is_datetime64_dtype(b_dtype):
return _NS_DTYPE
elif is_timedelta64_dtype(a_dtype) or is_timedelta64_dtype(b_dtype):
return _TD_DTYPE
elif is_complex_dtype(a_dtype):
if is_complex_dtype(b_dtype):
return a_dtype
return np.float64
elif is_integer_dtype(a_dtype):
if is_integer_dtype(b_dtype):
if a_dtype.itemsize == b_dtype.itemsize:
return a_dtype
return np.int64
return np.float64
elif is_float_dtype(a_dtype):
if is_float_dtype(b_dtype):
if a_dtype.itemsize == b_dtype.itemsize:
return a_dtype
else:
return np.float64
elif is_integer(b_dtype):
return np.float64
return np.object
def _fill_zeros(result, x, y, name, fill):
"""
if this is a reversed op, then flip x,y
if we have an integer value (or array in y)
and we have 0's, fill them with the fill,
return the result
mask the nan's from x
"""
if fill is not None:
if name.startswith('r'):
x,y = y,x
if not isinstance(y, np.ndarray):
dtype, value = _infer_dtype_from_scalar(y)
y = pa.empty(result.shape, dtype=dtype)
y.fill(value)
if is_integer_dtype(y):
if (y.ravel() == 0).any():
shape = result.shape
result = result.ravel().astype('float64')
# GH 7325, mask and nans must be broadcastable
signs = np.sign(result)
mask = ((y == 0) & ~np.isnan(x)).ravel()
np.putmask(result, mask, fill)
# if we have a fill of inf, then sign it
# correctly
# GH 6178
if np.isinf(fill):
np.putmask(result,signs<0 & mask, -fill)
result = result.reshape(shape)
return result
def _interp_wrapper(f, wrap_dtype, na_override=None):
def wrapper(arr, mask, limit=None):
view = arr.view(wrap_dtype)
f(view, mask, limit=limit)
return wrapper
_pad_1d_datetime = _interp_wrapper(algos.pad_inplace_int64, np.int64)
_pad_2d_datetime = _interp_wrapper(algos.pad_2d_inplace_int64, np.int64)
_backfill_1d_datetime = _interp_wrapper(algos.backfill_inplace_int64,
np.int64)
_backfill_2d_datetime = _interp_wrapper(algos.backfill_2d_inplace_int64,
np.int64)
def pad_1d(values, limit=None, mask=None, dtype=None):
if dtype is None:
dtype = values.dtype
_method = None
if is_float_dtype(values):
_method = getattr(algos, 'pad_inplace_%s' % dtype.name, None)
elif dtype in _DATELIKE_DTYPES or is_datetime64_dtype(values):
_method = _pad_1d_datetime
elif is_integer_dtype(values):
values = _ensure_float64(values)
_method = algos.pad_inplace_float64
elif values.dtype == np.object_:
_method = algos.pad_inplace_object
if _method is None:
raise ValueError('Invalid dtype for pad_1d [%s]' % dtype.name)
if mask is None:
mask = isnull(values)
mask = mask.view(np.uint8)
_method(values, mask, limit=limit)
return values
def backfill_1d(values, limit=None, mask=None, dtype=None):
if dtype is None:
dtype = values.dtype
_method = None
if is_float_dtype(values):
_method = getattr(algos, 'backfill_inplace_%s' % dtype.name, None)
elif dtype in _DATELIKE_DTYPES or is_datetime64_dtype(values):
_method = _backfill_1d_datetime
elif is_integer_dtype(values):
values = _ensure_float64(values)
_method = algos.backfill_inplace_float64
elif values.dtype == np.object_:
_method = algos.backfill_inplace_object
if _method is None:
raise ValueError('Invalid dtype for backfill_1d [%s]' % dtype.name)
if mask is None:
mask = isnull(values)
mask = mask.view(np.uint8)
_method(values, mask, limit=limit)
return values
def pad_2d(values, limit=None, mask=None, dtype=None):
if dtype is None:
dtype = values.dtype
_method = None
if is_float_dtype(values):
_method = getattr(algos, 'pad_2d_inplace_%s' % dtype.name, None)
elif dtype in _DATELIKE_DTYPES or is_datetime64_dtype(values):
_method = _pad_2d_datetime
elif is_integer_dtype(values):
values = _ensure_float64(values)
_method = algos.pad_2d_inplace_float64
elif values.dtype == np.object_:
_method = algos.pad_2d_inplace_object
if _method is None:
raise ValueError('Invalid dtype for pad_2d [%s]' % dtype.name)
if mask is None:
mask = isnull(values)
mask = mask.view(np.uint8)
if np.all(values.shape):
_method(values, mask, limit=limit)
else:
# for test coverage
pass
return values
def backfill_2d(values, limit=None, mask=None, dtype=None):
if dtype is None:
dtype = values.dtype
_method = None
if is_float_dtype(values):
_method = getattr(algos, 'backfill_2d_inplace_%s' % dtype.name, None)
elif dtype in _DATELIKE_DTYPES or is_datetime64_dtype(values):
_method = _backfill_2d_datetime
elif is_integer_dtype(values):
values = _ensure_float64(values)
_method = algos.backfill_2d_inplace_float64
elif values.dtype == np.object_:
_method = algos.backfill_2d_inplace_object
if _method is None:
raise ValueError('Invalid dtype for backfill_2d [%s]' % dtype.name)
if mask is None:
mask = isnull(values)
mask = mask.view(np.uint8)
if np.all(values.shape):
_method(values, mask, limit=limit)
else:
# for test coverage
pass
return values
def _clean_interp_method(method, order=None, **kwargs):
valid = ['linear', 'time', 'values', 'nearest', 'zero', 'slinear',
'quadratic', 'cubic', 'barycentric', 'polynomial',
'krogh', 'piecewise_polynomial',
'pchip', 'spline']
if method in ('spline', 'polynomial') and order is None:
raise ValueError("You must specify the order of the spline or "
"polynomial.")
if method not in valid:
raise ValueError("method must be one of {0}."
"Got '{1}' instead.".format(valid, method))
return method
def interpolate_1d(xvalues, yvalues, method='linear', limit=None,
fill_value=None, bounds_error=False, **kwargs):
"""
Logic for the 1-d interpolation. The result should be 1-d, inputs
xvalues and yvalues will each be 1-d arrays of the same length.
Bounds_error is currently hardcoded to False since non-scipy ones don't
take it as an argumnet.
"""
# Treat the original, non-scipy methods first.
invalid = isnull(yvalues)
valid = ~invalid
valid_y = yvalues[valid]
valid_x = xvalues[valid]
new_x = xvalues[invalid]
if method == 'time':
if not getattr(xvalues, 'is_all_dates', None):
# if not issubclass(xvalues.dtype.type, np.datetime64):
raise ValueError('time-weighted interpolation only works '
'on Series or DataFrames with a '
'DatetimeIndex')
method = 'values'
def _interp_limit(invalid, limit):
"""mask off values that won't be filled since they exceed the limit"""
all_nans = np.where(invalid)[0]
violate = [invalid[x:x + limit + 1] for x in all_nans]
violate = np.array([x.all() & (x.size > limit) for x in violate])
return all_nans[violate] + limit
xvalues = getattr(xvalues, 'values', xvalues)
yvalues = getattr(yvalues, 'values', yvalues)
if limit:
violate_limit = _interp_limit(invalid, limit)
if valid.any():
firstIndex = valid.argmax()
valid = valid[firstIndex:]
invalid = invalid[firstIndex:]
result = yvalues.copy()
if valid.all():
return yvalues
else:
# have to call np.array(xvalues) since xvalues could be an Index
# which cant be mutated
result = np.empty_like(np.array(xvalues), dtype=np.float64)
result.fill(np.nan)
return result
if method in ['linear', 'time', 'values']:
if method in ('values', 'index'):
inds = np.asarray(xvalues)
# hack for DatetimeIndex, #1646
if issubclass(inds.dtype.type, np.datetime64):
inds = inds.view(pa.int64)
if inds.dtype == np.object_:
inds = lib.maybe_convert_objects(inds)
else:
inds = xvalues
inds = inds[firstIndex:]
result[firstIndex:][invalid] = np.interp(inds[invalid], inds[valid],
yvalues[firstIndex:][valid])
if limit:
result[violate_limit] = np.nan
return result
sp_methods = ['nearest', 'zero', 'slinear', 'quadratic', 'cubic',
'barycentric', 'krogh', 'spline', 'polynomial',
'piecewise_polynomial', 'pchip']
if method in sp_methods:
new_x = new_x[firstIndex:]
xvalues = xvalues[firstIndex:]
result[firstIndex:][invalid] = _interpolate_scipy_wrapper(
valid_x, valid_y, new_x, method=method, fill_value=fill_value,
bounds_error=bounds_error, **kwargs)
if limit:
result[violate_limit] = np.nan
return result
def _interpolate_scipy_wrapper(x, y, new_x, method, fill_value=None,
bounds_error=False, order=None, **kwargs):
"""
passed off to scipy.interpolate.interp1d. method is scipy's kind.
Returns an array interpolated at new_x. Add any new methods to
the list in _clean_interp_method
"""
try:
from scipy import interpolate
from pandas import DatetimeIndex
except ImportError:
raise ImportError('{0} interpolation requires Scipy'.format(method))
new_x = np.asarray(new_x)
# ignores some kwargs that could be passed along.
alt_methods = {
'barycentric': interpolate.barycentric_interpolate,
'krogh': interpolate.krogh_interpolate,
'piecewise_polynomial': interpolate.piecewise_polynomial_interpolate,
}
if getattr(x, 'is_all_dates', False):
# GH 5975, scipy.interp1d can't hande datetime64s
x, new_x = x.values.astype('i8'), new_x.astype('i8')
try:
alt_methods['pchip'] = interpolate.pchip_interpolate
except AttributeError:
if method == 'pchip':
raise ImportError("Your version of scipy does not support "
"PCHIP interpolation.")
interp1d_methods = ['nearest', 'zero', 'slinear', 'quadratic', 'cubic',
'polynomial']
if method in interp1d_methods:
if method == 'polynomial':
method = order
terp = interpolate.interp1d(x, y, kind=method, fill_value=fill_value,
bounds_error=bounds_error)
new_y = terp(new_x)
elif method == 'spline':
terp = interpolate.UnivariateSpline(x, y, k=order)
new_y = terp(new_x)
else:
method = alt_methods[method]
new_y = method(x, y, new_x)
return new_y
def interpolate_2d(values, method='pad', axis=0, limit=None, fill_value=None, dtype=None):
""" perform an actual interpolation of values, values will be make 2-d if
needed fills inplace, returns the result
"""
transf = (lambda x: x) if axis == 0 else (lambda x: x.T)
# reshape a 1 dim if needed
ndim = values.ndim
if values.ndim == 1:
if axis != 0: # pragma: no cover
raise AssertionError("cannot interpolate on a ndim == 1 with "
"axis != 0")
values = values.reshape(tuple((1,) + values.shape))
if fill_value is None:
mask = None
else: # todo create faster fill func without masking
mask = mask_missing(transf(values), fill_value)
method = _clean_fill_method(method)
if method == 'pad':
values = transf(pad_2d(transf(values), limit=limit, mask=mask, dtype=dtype))
else:
values = transf(backfill_2d(transf(values), limit=limit, mask=mask, dtype=dtype))
# reshape back
if ndim == 1:
values = values[0]
return values
def _consensus_name_attr(objs):
name = objs[0].name
for obj in objs[1:]:
if obj.name != name:
return None
return name
_fill_methods = {'pad': pad_1d, 'backfill': backfill_1d}
def _get_fill_func(method):
method = _clean_fill_method(method)
return _fill_methods[method]
#----------------------------------------------------------------------
# Lots of little utilities
def _maybe_box(indexer, values, obj, key):
# if we have multiples coming back, box em
if isinstance(values, np.ndarray):
return obj[indexer.get_loc(key)]
# return the value
return values
def _maybe_box_datetimelike(value):
# turn a datetime like into a Timestamp/timedelta as needed
if isinstance(value, | |
# -*- coding: utf-8 -*-
"""
The view module hosts the View class, the central object of cv2d.
"""
import logging
import os
import sys
import six
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
from matplotlib.widgets import Slider, Button
import yaml
from colorview2d import Data
import colorview2d.utils as utils
# setup logging
LOGGER = logging.getLogger('colorview2d')
LOGGER.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
FHAND = logging.FileHandler('colorview2d.log')
FHAND.setLevel(logging.DEBUG)
# create console handler with a higher log level
CHAND = logging.StreamHandler()
CHAND.setLevel(logging.WARN)
# create formatter and add it to the handlers
FORMATTER = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
FHAND.setFormatter(FORMATTER)
CHAND.setFormatter(FORMATTER)
# add the handlers to the logger
LOGGER.addHandler(FHAND)
LOGGER.addHandler(CHAND)
class View(object):
"""
A class to handle a 2d :class:`numpy.ndarray` with (linearly scaled) _axes, apply a (extendable)
range of filters (mods) to the data while keeping track of the
modifications.
Hosts a :class:`matplotlib.pyplot.Figure` of the data. Customization of this figure
is simplified with respect to the matplotlib library.
Provides interactive colorbar controls.
:Undocumented methods:
The class provides methods that are not documented
because they are generated on-init.
- ``add_<Modname>(arg1, ...)`` and ``rm_<Modname>()``.
There is one such method for each mod in ``modlist``.
This simplfies calls ``add_mod(<Modname>, (arg1, ...))``.
- ``set_<Parametername>(Parameter)`` as shortcut to
``View.config[<Parametername>] = Parameter``.
:Example:
::
data = colroview2d.Data(np.random.random((100, 100)))
fig = colorview2d.View(data)
fig.add_Smooth(2, 2)
fig.plot_pdf('Test.pdf')
"""
def __init__(self, data=None,
cfgfile=None,
config=None,
pipeline=None):
self._modlist = {}
self._create_modlist()
self._data = None
if isinstance(data, np.ndarray):
self._data = Data(data)
elif isinstance(data, Data):
self._data = data
else:
raise ValueError("Provide a 2d numpy.ndarray or a colorview2d.Data"
"instance to create a View object.")
self._original_data = self._data.deep_copy()
self._config = utils.Config()
# overwrite the on_change hook of the Config class.
# this way we can react to changes in the config appropriately.
self._config.on_change = self._on_config_change
# The pipeline contains a dict of numbers and tuples with
# strings that are unique to IMod objects
# and their arguments
self._pipeline = []
if cfgfile:
# If a config file is provided, we load that one.
# All other parameters are ignored.
# Note: The filename must be removed from the config file
self.load_config(os.path.join(os.getcwd(), cfgfile))
# if the config argument is not empty we replace the values
if config:
self._config.update_raw(config)
# Matplotlib figure object, contains the actual plot
# Generated upon retrieval by property accessor
# Readonly, Initialized with one pixel
plt.ioff()
self._fig = plt.figure(1, dpi=self._config['Dpi'])
self._colorcontrolfigure = plt.figure(figsize=(9, 1))
# We use the property setter to add the given pipeline.
if pipeline is not None:
self.pipeline = pipeline
self._apply_pipeline()
# generate the config setters
self._generate_config_setter()
@property
def modlist(self):
"""A (autogenerated) list of all mods that can be found in the mods/ subfolder."""
return self._modlist
@property
def data(self):
"""A :class:`colorview2d.Data`. It encapsulates the 2d data."""
return self._data
@data.setter
def data(self, data):
"""Sets the :class:`colorview2d.Data` of the View."""
self._data = data
self._data_changed()
def _data_changed(self):
"""Called when the data is modified.
Takes care to update any exiting plotting facilities.
Is called internally after mod application.
"""
if self.plotting:
self._plot.set_data(self._data.zdata)
self._plot.set_extent([self._data.xleft, self._data.xright,
self._data.ybottom, self._data.ytop])
self._axes.set_xlim(self._data.xleft, self._data.xright)
self._axes.set_ylim(self._data.ybottom, self._data.ytop)
# we redraw the colorbar sliders to set the slider range correctly
if self._colorcontrolfigure.axes:
self._show_cbsliders()
# re-setting the value triggers update of the plot
self._config['Cbmin'] = 'auto'
self._config['Cbmax'] = 'auto'
return
@property
def config(self):
"""Holds information on the plot layout, ticks, fonts etc.
Can be accessed via ``myview.set_<Parametername>(<Parameter>)`` and
in a dict-like fashion ``myview.config['Parametername'] = <Parameter>``.
Also an ``config.update(dict)`` function is available.
The attribute is initialized with a fixed set of parameters read from
``default.cv2d`` config file in the package directory.
*Important:* Does not fully implement a dictionary interface.
Methods:
update (dict): update the configuration with a dictionary containing
valid parameters. Note that the plot, if there is any,
is updated when the config is changed via ``update``
update_raw (dict): update the configuration without updating any
existing plot.
"""
return self._config
@config.setter
def config(self, config_dict):
"""Change the config. Note that it is a custom :class:`colorview2d.utils.ConfigDict` class.
We use the update routine to prevent overwriting the private attribute with
an ordinary :class:`dict`
Anyways: Be careful when overwriting the config because there is no error checking
on the values given!
Args:
config_dict (dict): dictionary with configuration items.
"""
self._config.update(config_dict)
@property
def fig(self):
"""The :class:`matplotlib.pyplot.figure`."""
if not hasattr(self, '_plot'):
self.draw_plot()
return self._fig
@property
def pipeline(self):
"""A dictionary with mod identifiers (strings) and their arguments (tuples)."""
return self._pipeline
@pipeline.setter
def pipeline(self, pipeline):
"""Overwrite the pipeline string. Note that this is used for initialization
and does not trigger any modifications to the data.
Args:
pipeline (list): A list of strings that are valid mod identifiers.
"""
self._pipeline = []
for modstring in pipeline:
self.add_mod(modstring[0], modstring[1])
@property
def plotting(self):
"""Boolean. Are we showing any plot at the moment?"""
# We use the existence of the private _plot attribute
# as a primer.
return hasattr(self, '_plot')
# def show(self):
# """Show the figure in the GUI.
# Can be used only if wxpython is installed.
# The GUI is not yet functional.
# """
# try:
# import colorview2d.mainapp as mainapp
# except ImportError:
# logging.error('Cannot start the GUI. Is wxpython installed?')
# return
# logging.info("Initializing the GUI.")
# self.mainapp = mainapp.MainApp(self)
# self.mainapp.MainLoop()
def _generate_config_setter(self):
"""Add a simplified interface to changing config values.
This interface makes it possible to change config values using
``view.set_<config-parameter>(parameter)``.
"""
def add_method_signature(parameter):
"""Add set_<parameter> as a method signature to View class."""
def setme(args):
self.config[parameter] = args
setme.__name__ = "set_%s" % parameter
setme.__doc__ = "Set the parameter %s in the configuration." % parameter
setattr(self, setme.__name__, setme)
# def getme(*args):
# return self.config[parameter]
# getme.__name__ = "get_%s" % parameter
# getme.__doc__ = "Get the parameter %s from the configuration." % parameter
# setattr(self, getme.__name__, getme)
for parameter in self._config.dict:
add_method_signature(parameter)
def show_plt_fig(self):
"""Show two interactive :class:`matplotlib.pyplot.Figure` plots.
The first displays the data with config and pipeline applied.
The second provides two matplotlib slider widgets to control
the limits of the colorbar interactively and a *Reset* button
to apply the default (full-range) colorbar limits.
"""
# in order to successively open and close the
# interactive figure, we have to
# create a dummy figure and use its
# manager to display "fig"
if not self._plt_fig_is_active():
dummy_fig = plt.figure()
self._fig_manager = dummy_fig.canvas.manager
self._fig_manager.canvas.figure = self._fig
self._fig.set_canvas(self._fig_manager.canvas)
self._fig.canvas.set_window_title('colorview2d plot')
dummy_fig_colorctrls = plt.figure(figsize=(9, 1))
self._fig_manager_colorctrls = dummy_fig_colorctrls.canvas.manager
self._fig_manager_colorctrls.canvas.figure = self._colorcontrolfigure
self._colorcontrolfigure.set_canvas(self._fig_manager_colorctrls.canvas)
self._colorcontrolfigure.canvas.set_window_title('colorview2d colorbar control')
self.draw_plot()
self._fig.show()
self._show_cbsliders()
plt.ion()
def _plt_fig_is_active(self):
"""Check if there is an active canvas manager.
If there is, we are (hopefully) running an active matplotlib.pyplot window
with an interactive plot.
Returns:
boolean
"""
return hasattr(self, '_fig_manager')
def hide_plt_fig(self):
"""Hide the interactive :class:`matplotlib.pyplot.Figure`."""
# To this end we have to destroy the figure manager.
# See maptlotlib.pyplot.close().
if self._plt_fig_is_active():
plt._pylab_helpers.Gcf.destroy(self._fig_manager.num)
delattr(self, '_fig_manager')
plt._pylab_helpers.Gcf.destroy(self._fig_manager_colorctrls.num)
delattr(self, '_fig_manager_colorctrls')
# we delete _plot which indicates that we are not plotting
if hasattr(self, '_plot'):
delattr(self, '_plot')
def _create_modlist(self):
"""
Creates the list of mods from the mods/ folder and adds them
to the private modlist attribute.
We check if the module (with arbitrary name) contains a class
which inherits from colorview2d.IMod
"""
import pkgutil
import inspect
import colorview2d.mods
package = colorview2d.mods
for importer, modname, ispckg in pkgutil.iter_modules(package.__path__):
try:
mod = importer.find_module(modname).load_module(modname)
for name, obj in inspect.getmembers(mod):
if inspect.isclass(obj):
if issubclass(obj, colorview2d.IMod):
self._modlist[name] = obj()
except:
error = sys.exc_info()[0]
logging.error('Can not import mod %s.', modname)
logging.error('Error: %s.', error)
# Now let us export functions of the form add_Modname and rm_Modname
# to the namespace of the View class
def add_func_signatures(modtitle):
def addme(*args):
self.add_mod(modtitle, args)
addme.__name__ = "add_%s" % modtitle
addme.__doc__ = self._modlist[modtitle].do_apply.__doc__
setattr(self, addme.__name__, addme)
def removeme():
self.remove_mod(modtitle)
removeme.__name__ = "rm_%s" % modtitle
removeme.__doc__ = "Remove mod %s from pipeline." % modtitle
setattr(self, removeme.__name__, removeme)
for modtitle in self._modlist:
add_func_signatures(modtitle)
def add_mod(self, modname, modargs=(), pos=-1, do_apply=True):
"""Add a mod to the pipeline by its title string and its arguments either
to the end of the pipeline or at a specified postion.
Args:
modname (string): The type of the mod.
modargs (tuple): | |
<filename>src/ai_training/http_server.py
"""
Module to show/control status of AIs via HTTP (asyncio/aiohttp)
"""
import asyncio
import datetime
import http
import json
import logging
import multiprocessing
import socket
import random
import traceback
import sys
import aiohttp
from aiohttp import web
import ai_training as ait
import ai_training.http_common as ait_http
import ai_training.http_item as http_item
import ai_training.save_controller as save_controller
import ai_training.common as aitc
import api_register
import async_process_pool.watchdog as a_watchdog
import async_process_pool.process_pool as a_pool
def _get_logger():
logger = logging.getLogger('hu.ai_training.http')
return logger
class ReinitializeError(aitc.Error):
"""Error fired when multiple initialization"""
pass
class ShutdownException(aitc.Error):
"""Shutdown exception fired when watchdog timer expires"""
pass
class HttpAiCollection(api_register.AiStatusProviderABC,
ait.AiTrainingControllerABC):
"""Collection of AI training object for aiohttp
Pass in a lookup to the object to the actual training data"""
def __init__(self, training_lookup: ait.AiTrainingProviderABC):
self.training_lookup = training_lookup
self.__http_client_session = None
self.logger = _get_logger()
self.api_register = None
self.reregister_watchdog = None
self.shutdown_watchdog = None
self.chat_lock = asyncio.Lock()
self.__registration_task = None
self.__save_controller = None
self.__multiprocessing_manager = multiprocessing.Manager()
self.__chat_startup_lock = asyncio.Lock()
self.__aiohttp_app = None
self.__initialized = False
async def training_callback(self, item):
"""Callback from an AI that there is status information to send
to API"""
config = self.training_lookup.config
try:
self.logger.debug("In training_callback")
status_with_progress = item.status
state_name = status_with_progress.state.name
data = {
'dev_id': item.dev_id,
'ai_id': item.ai_id,
'ai_engine': self.training_lookup.ai_engine_name,
'training_status': state_name,
'version': config.version,
'language': config.language
}
if status_with_progress.training_progress is not None:
data['training_progress'] = float(
status_with_progress.training_progress)
if status_with_progress.training_error is not None:
data['training_error'] = float(
status_with_progress.training_error)
if status_with_progress.training_hash is None:
data['ai_hash'] = None
else:
data['ai_hash'] = str(status_with_progress.training_hash)
if self.api_register.session_id is not None:
data['server_session_id'] = self.api_register.session_id
url = 'aiservices/{}/status'.format(item.ai_id)
self.logger.debug("Status update '{}' to {}".format(
state_name, url))
status = await self._send_update_to_api(url, data)
# if we get a CONFLICT response, stop training
if status is http.HTTPStatus.CONFLICT:
item.training_rejected()
elif status is None or status is http.HTTPStatus.OK:
self.__save_controller.set_save_state(item.ai_id, True)
else:
self.__save_controller.set_save_state(item.ai_id, False)
except Exception:
# We want to log and swallow exception and not kill the
# calling watch loop
self.logger.error(
"Error caught in training callback", exc_info=True)
def start_registration_with_api(self):
# refresh API server
api_server = self.training_lookup.config.api_server
self.api_register.api_endpoint = api_server
if api_server:
self.__registration_task = asyncio.create_task(
self.__register_with_api())
else:
self.logger.warning(
"No api_endpoint specified, skipping registration")
def __shutdown_watchdog_fired(self):
self.logger.warning("Shutdown watchdog fired!")
self.reregister_watchdog.cancel()
# this must be a non-async function but we want this to cleanup
coro = self.__shutdown_watchdog_fired_async()
asyncio.create_task(coro)
async def __shutdown_watchdog_fired_async(self):
try:
print("Shutdown actions: shutdown")
await self.__aiohttp_app.shutdown()
print("Shutdown actions: cleanup")
await self.__aiohttp_app.cleanup()
print("Shutdown actions: exit")
finally:
self.training_lookup.kill_running_process()
async def create_training_process_pool(self, training_processes: int,
training_queue_size: int,
worker_type: type):
"""Create a training pool"""
training_pool = a_pool.AsyncProcessPool(
self.__multiprocessing_manager, 'Training_pool',
training_processes, training_queue_size, training_queue_size)
await training_pool.initialize_processes(
worker_type, save_controller=self.__save_controller)
return training_pool
@property
def multiprocessing_manager(self):
return self.__multiprocessing_manager
@property
def save_controller(self):
return self.__save_controller
def get_item(self, dev_id, ai_id) -> ait.AiTrainingItemABC:
"""Get item from lookup"""
item = self.training_lookup.lookup_item(dev_id, ai_id)
return item
async def load_training_data_async(self, dev_id,
ai_id) -> ait.AiTrainingItemABC:
"""Get item from lookup"""
item = await self.training_lookup.load_training_data_async(
dev_id, ai_id)
if item is not None:
item.controller = self
return item
async def delete_item(self, dev_id, ai_id) -> ait.AiTrainingItemABC:
"""Get item from lookup"""
item = await self.training_lookup.delete_ai(dev_id, ai_id)
return item
async def on_post_ai(self, req: aiohttp.web.Request):
"""POST handler - upload training data"""
dev_id, ai_id, training_data = await self._get_training_data_from_request(req)
self.logger.info("Add training data for AI {}/{}".format(
dev_id, ai_id))
item = self.training_lookup.lookup_item(dev_id, ai_id)
if item is None:
self.logger.info(
"AI {}/{} is not found - creating "
"training data".format(dev_id, ai_id))
item = self.training_lookup.create(dev_id, ai_id)
else:
if item.status.is_training:
self.logger.info(
"AI {}/{} is training - stopping before changing "
"training data".format(dev_id, ai_id))
await item.stop_training()
item.controller = self
hash_value = ait.training_file.write_training_data_to_disk_v1(
item.ai_data_directory, training_data)
status = ait.AiTrainingStatusWithProgress(
ait.AiTrainingState.ai_ready_to_train,
training_file_hash=hash_value)
# we have an explicit request to change state, so we will
# always save this
item.reset_status(status, always_save=True)
await item.notify_status_update()
url = "{}/{}/{}".format(req.url, dev_id, ai_id)
data = {'status': item.status.state.name, 'url': url}
resp = web.json_response(data)
return resp
async def on_get_statuses(self, req: aiohttp.web.Request):
"""Define an endpoint that reads the statuses of all AIs
This allows API to query a single master instance rather than
wait for all of them"""
if self.training_lookup.config.training_enabled:
ai_statuses = await self.get_ai_statuses_for_api()
ai_data = [{
'ai_id': ai.ai_id,
'training_status': ai.training_status,
'ai_hash': ai.ai_hash
} for ai in ai_statuses]
else:
# If no training capacity don't report AIs
ai_data = []
resp = web.json_response(ai_data)
return resp
async def on_delete_dev(self, req):
"""Request to delete a dev"""
dev_id = req.match_info['dev_id']
try:
await self.training_lookup.delete_dev(dev_id)
except ait.TrainingNotFoundError:
raise aiohttp.web.HTTPNotFound()
resp = web.Response()
return resp
async def on_startup(self, app):
"""Initialise server"""
if self.__initialized:
raise ReinitializeError("Initialized more than once")
self.__initialized = True
self.__aiohttp_app = app
self.__save_controller = save_controller.SaveController(
self.__multiprocessing_manager)
self.__http_client_session = aiohttp.ClientSession()
self.training_lookup.controller = self
await self.training_lookup.on_startup()
for _, value in self.training_lookup.items():
value.controller = self
engine = self.training_lookup.ai_engine_name
config = self.training_lookup.config
heartbeat_timeout = (
config.api_heartbeat_timeout_seconds * random.uniform(0.8, 1.2))
self.reregister_watchdog = a_watchdog.Watchdog(
heartbeat_timeout, self.start_registration_with_api,
"API re-register")
shutdown_timeout = (
config.api_shutdown_timeout_seconds * random.uniform(0.8, 1.2))
self.shutdown_watchdog = a_watchdog.Watchdog(
shutdown_timeout, self.__shutdown_watchdog_fired,
"Shutdown")
training_enabled = config.training_enabled
chat_enabled = config.chat_enabled
this_server_url = config.this_server_url
self.logger.info(
"*** Started backend server type={}, version={}, language={}".
format(engine, config.version, config.language))
self.logger.info(
"*** AI root directory is at {}".
format(config.training_data_root))
if not this_server_url:
# Kubernetes doesn't deal with DNS entries, so we'll need to use
# the pod's IP which
# will be accessible from other pods in the cluster.
# Docker swarm should work either way.
hostname = socket.getfqdn(
) # Using FQDN makes it more likely to work on dev PCs too
primary_ip = socket.gethostbyname(hostname)
this_server_url = "http://{}:9090/ai".format(primary_ip)
self.logger.info(
"*** Server URL not specified, using {}".format(
this_server_url))
else:
self.logger.info(
"*** Service URL={}".
format(this_server_url))
self.logger.info("*** Enabled: train={}, chat={}".format(
training_enabled, chat_enabled))
self.api_register = api_register.Register(
training_enabled,
chat_enabled,
ai_engine_type=engine,
this_service_url=this_server_url,
language=config.language,
version=config.version,
provider=self)
self.start_registration_with_api()
async def on_heartbeat(self, req):
"""Incoming heartbeat from API"""
try:
json_data = await req.json()
except json.JSONDecodeError:
ait_http.raise_bad_request('missing session ID as Json in request')
try:
session_id = json_data['server_session_id']
except KeyError:
ait_http.raise_bad_request('missing session ID in request')
expected_session_id = self.api_register.session_id
if session_id != expected_session_id:
ait_http.raise_bad_request(
'Invalid session ID in request, was expecting {}, got {}'.
format(expected_session_id, session_id))
self.reregister_watchdog.reset_watchdog()
self.shutdown_watchdog.reset_watchdog()
resp = web.Response()
return resp
def is_training_slot_available(self):
"""Returns true if there is an available training slot"""
if not self.training_lookup.config.training_enabled:
return False
for _, value in self.training_lookup.items():
if value.is_training_active:
return False
return True
async def start_chat_for_ai(self, item: ait.AiTrainingItemABC):
"""Starts chat for a given AI"""
config = self.training_lookup.config
# Make sure that we can only process on chat start at a time
async with self.__chat_startup_lock:
chat_enabled = config.chat_enabled
if not chat_enabled:
ait_http.raise_bad_request("Chat not enabled on this server")
active_chat_count = 0
oldest_chat = None
# create a set of AIs that are active, this is the chat affinity
# This includes the AI we are about to start chatting to
chat_affinity = {item.ai_id}
item.last_chat_time = datetime.datetime.utcnow()
# make sure this item knows we are its controller
item.controller = self
for _, value in self.training_lookup.items():
if value.last_chat_time is not None:
active_chat_count += 1
chat_affinity.add(value.ai_id)
if (value.ai_id != item.ai_id and
(oldest_chat is None or
value.last_chat_time < oldest_chat.last_chat_time)):
oldest_chat = value
if active_chat_count > 1:
oldest_chat.last_chat_time = None
await oldest_chat.shutdown_chat()
chat_affinity.remove(oldest_chat.ai_id)
# send chat affinity
session_id = self.api_register.session_id
if session_id is None:
self.logger.info(
"Chat affinity not sent - no session ID available")
return
chat_affinity_list = list(chat_affinity)
data = {
'server_session_id': session_id,
'ai_list': chat_affinity_list,
'version': config.version,
'language': config.language
}
self.logger.info(
"Chat affinity update {}".format(chat_affinity_list))
coro = self._send_update_to_api('aiservices/affinity', data)
asyncio.create_task(coro)
async def on_shutdown(self):
"""Shutdown client connection"""
await self.training_lookup.on_shutdown()
await self.__http_client_session.close()
if (self.__registration_task is not None
and not self.__registration_task.done()):
self.__registration_task.cancel()
async def get_ai_statuses_for_api(self):
ais = []
# force read the current AIs to a list to avoid a race condition
# where the dictionary changes
training_items = list(self.training_lookup.items())
# make sure that we reload training status from disk EVERY time
# This will add load to the disk, but will avoid Bug 3491
for _, item in training_items:
item_from_disk = await self.load_training_data_async(
item.dev_id, item.ai_id)
status = api_register.ApiAiStatus(
item_from_disk.status.state.name, item_from_disk.ai_id,
item_from_disk.status.training_hash)
ais.append(status)
return ais
async def __register_with_api(self):
await self.api_register.registration_loop(self.__http_client_session)
self.reregister_watchdog.reset_watchdog()
async def _get_training_data_from_request(self, req: aiohttp.web.Request):
if 'multipart' not in req.content_type:
ait_http.raise_bad_request(
'bad request: ai creation needs multipart/form-data '
'content type')
try:
reader = await req.multipart()
except ValueError:
self.logger.warning('Failed to read multipart', exc_info=True)
ait_http.raise_bad_request(
'bad request: ai creation multipart is invalid')
dev_id = None
ai_id = None
training_data = None
while True:
part = await reader.next()
if part is None:
break # all read
if part.filename == 'training.txt':
training_data = | |
import copy
import datetime
from decimal import Decimal
import logging
import uuid
import json
import cStringIO
from couchdbkit import ResourceNotFound
import dateutil
from django.core.paginator import Paginator
from django.views.generic import View
from django.db.models import Sum
from django.conf import settings
from django.template.loader import render_to_string
from django.utils.decorators import method_decorator
from django.utils.safestring import mark_safe
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse, Http404
from django.shortcuts import redirect, render
from django.contrib import messages
from django.views.decorators.http import require_POST
from PIL import Image
from django.utils.translation import ugettext as _, ugettext_noop, ugettext_lazy
from corehq.const import USER_DATE_FORMAT
from custom.dhis2.forms import Dhis2SettingsForm
from custom.dhis2.models import Dhis2Settings
from casexml.apps.case.mock import CaseBlock
from casexml.apps.case.xml import V2
from corehq.apps.accounting.async_handlers import Select2BillingInfoHandler
from corehq.apps.accounting.invoicing import DomainWireInvoiceFactory
from corehq.apps.accounting.decorators import (
requires_privilege_with_fallback,
)
from corehq.apps.hqwebapp.tasks import send_mail_async
from corehq.apps.accounting.exceptions import (
NewSubscriptionError,
PaymentRequestError,
)
from corehq.apps.accounting.payment_handlers import (
BulkStripePaymentHandler,
CreditStripePaymentHandler,
InvoiceStripePaymentHandler,
)
from corehq.apps.accounting.subscription_changes import DomainDowngradeStatusHandler
from corehq.apps.accounting.forms import EnterprisePlanContactForm
from corehq.apps.accounting.utils import (
get_change_status, get_privileges, fmt_dollar_amount,
quantize_accounting_decimal, get_customer_cards,
)
from corehq.apps.hqwebapp.async_handler import AsyncHandlerMixin
from corehq.apps.smsbillables.async_handlers import SMSRatesAsyncHandler, SMSRatesSelect2AsyncHandler
from corehq.apps.smsbillables.forms import SMSRateCalculatorForm
from corehq.apps.users.models import DomainInvitation
from corehq.apps.fixtures.models import FixtureDataType
from corehq.toggles import NAMESPACE_DOMAIN, all_toggles, CAN_EDIT_EULA, TRANSFER_DOMAIN
from corehq.util.context_processors import get_domain_type
from dimagi.utils.couch.resource_conflict import retry_resource
from corehq import privileges, feature_previews
from django_prbac.utils import has_privilege
from corehq.apps.accounting.models import (
Subscription, CreditLine, SoftwareProductType, SubscriptionType,
DefaultProductPlan, SoftwarePlanEdition, BillingAccount,
BillingAccountType,
Invoice, BillingRecord, InvoicePdf, PaymentMethodType,
PaymentMethod, EntryPoint, WireInvoice, SoftwarePlanVisibility, FeatureType,
StripePaymentMethod,
)
from corehq.apps.accounting.usage import FeatureUsageCalculator
from corehq.apps.accounting.user_text import (
get_feature_name,
PricingTable,
DESC_BY_EDITION,
get_feature_recurring_interval,
)
from corehq.apps.hqwebapp.models import ProjectSettingsTab
from corehq.apps import receiverwrapper
from corehq.apps.domain.calculations import CALCS, CALC_FNS, CALC_ORDER, dom_calc
from corehq.apps.domain.decorators import (
domain_admin_required, login_required, require_superuser, login_and_domain_required
)
from corehq.apps.domain.forms import (
DomainGlobalSettingsForm, DomainMetadataForm, SnapshotSettingsForm,
SnapshotApplicationForm, DomainInternalForm, PrivacySecurityForm,
ConfirmNewSubscriptionForm, ProBonoForm, EditBillingAccountInfoForm,
ConfirmSubscriptionRenewalForm, SnapshotFixtureForm, TransferDomainForm,
SelectSubscriptionTypeForm, INTERNAL_SUBSCRIPTION_MANAGEMENT_FORMS)
from corehq.apps.domain.models import Domain, LICENSES, TransferDomainRequest
from corehq.apps.domain.utils import normalize_domain_name
from corehq.apps.hqwebapp.views import BaseSectionPageView, BasePageView, CRUDPaginatedViewMixin
from corehq.apps.orgs.models import Organization, OrgRequest, Team
from corehq.apps.domain.forms import ProjectSettingsForm
from dimagi.utils.decorators.memoized import memoized
from dimagi.utils.web import get_ip, json_response, get_site_domain
from corehq.apps.users.decorators import require_can_edit_web_users
from corehq.apps.receiverwrapper.forms import GenericRepeaterForm, FormRepeaterForm
from corehq.apps.receiverwrapper.models import FormRepeater, CaseRepeater, ShortFormRepeater, AppStructureRepeater, \
RepeatRecord
from dimagi.utils.post import simple_post
from toggle.models import Toggle
from corehq.apps.hqwebapp.tasks import send_html_email_async
accounting_logger = logging.getLogger('accounting')
PAYMENT_ERROR_MESSAGES = {
400: ugettext_lazy('Your request was not formatted properly.'),
403: ugettext_lazy('Forbidden.'),
404: ugettext_lazy('Page not found.'),
500: ugettext_lazy("There was an error processing your request."
" We're working quickly to fix the issue. Please try again shortly."),
}
# Domain not required here - we could be selecting it for the first time. See notes domain.decorators
# about why we need this custom login_required decorator
@login_required
def select(request, domain_select_template='domain/select.html', do_not_redirect=False):
domains_for_user = Domain.active_for_user(request.user)
if not domains_for_user:
return redirect('registration_domain', domain_type=get_domain_type(None, request))
email = request.couch_user.get_email()
open_invitations = [e for e in DomainInvitation.by_email(email) if not e.is_expired]
additional_context = {
'domains_for_user': domains_for_user,
'open_invitations': open_invitations,
}
last_visited_domain = request.session.get('last_visited_domain')
if open_invitations \
or do_not_redirect \
or not last_visited_domain:
return render(request, domain_select_template, additional_context)
else:
domain = Domain.get_by_name(last_visited_domain)
if domain and domain.is_active:
# mirrors logic in login_and_domain_required
if (
request.couch_user.is_member_of(domain) or domain.is_public
or (request.user.is_superuser and not domain.restrict_superusers)
or domain.is_snapshot
):
try:
from corehq.apps.dashboard.views import dashboard_default
return dashboard_default(request, last_visited_domain)
except Http404:
pass
del request.session['last_visited_domain']
return render(request, domain_select_template, additional_context)
@require_superuser
def incomplete_email(request,
incomplete_email_template='domain/incomplete_email.html'):
from corehq.apps.domain.tasks import (
incomplete_self_started_domains,
incomplete_domains_to_email
)
context = {
'self_started': incomplete_self_started_domains,
'dimagi_owned': incomplete_domains_to_email,
}
return render(request, incomplete_email_template, context)
class DomainViewMixin(object):
"""
Paving the way for a world of entirely class-based views.
Let's do this, guys. :-)
Set strict_domain_fetching to True in subclasses to bypass the cache.
"""
strict_domain_fetching = False
@property
@memoized
def domain(self):
domain = self.args[0] if len(self.args) > 0 else self.kwargs.get('domain', "")
return normalize_domain_name(domain)
@property
@memoized
def domain_object(self):
domain = Domain.get_by_name(self.domain, strict=self.strict_domain_fetching)
if not domain:
raise Http404()
return domain
class LoginAndDomainMixin(object):
@method_decorator(login_and_domain_required)
def dispatch(self, *args, **kwargs):
return super(LoginAndDomainMixin, self).dispatch(*args, **kwargs)
class SubscriptionUpgradeRequiredView(LoginAndDomainMixin, BasePageView,
DomainViewMixin):
page_title = ugettext_lazy("Upgrade Required")
template_name = "domain/insufficient_privilege_notification.html"
@property
def page_url(self):
return self.request.get_full_path
@property
def page_name(self):
return _("Sorry, you do not have access to %(feature_name)s") % {
'feature_name': self.feature_name,
}
@property
def is_domain_admin(self):
if not hasattr(self.request, 'couch_user'):
return False
return self.request.couch_user.is_domain_admin(self.domain)
@property
def page_context(self):
return {
'domain': self.domain,
'feature_name': self.feature_name,
'plan_name': self.required_plan_name,
'change_subscription_url': reverse(SelectPlanView.urlname,
args=[self.domain]),
'is_domain_admin': self.is_domain_admin,
}
@property
def missing_privilege(self):
return self.args[1]
@property
def feature_name(self):
return privileges.Titles.get_name_from_privilege(self.missing_privilege)
@property
def required_plan_name(self):
return DefaultProductPlan.get_lowest_edition_by_domain(
self.domain_object, [self.missing_privilege]
)
def get(self, request, *args, **kwargs):
self.request = request
self.args = args
return super(SubscriptionUpgradeRequiredView, self).get(
request, *args, **kwargs
)
class BaseDomainView(LoginAndDomainMixin, BaseSectionPageView, DomainViewMixin):
@property
def main_context(self):
main_context = super(BaseDomainView, self).main_context
main_context.update({
'domain': self.domain,
})
return main_context
@property
@memoized
def page_url(self):
if self.urlname:
return reverse(self.urlname, args=[self.domain])
class BaseProjectSettingsView(BaseDomainView):
section_name = ugettext_lazy("Project Settings")
template_name = "settings/base_template.html"
@property
def main_context(self):
main_context = super(BaseProjectSettingsView, self).main_context
main_context.update({
'active_tab': ProjectSettingsTab(
self.request,
self.urlname,
domain=self.domain,
couch_user=self.request.couch_user,
project=self.request.project
),
'is_project_settings': True,
})
return main_context
@property
@memoized
def section_url(self):
return reverse(EditMyProjectSettingsView.urlname, args=[self.domain])
class DefaultProjectSettingsView(BaseDomainView):
urlname = 'domain_settings_default'
def get(self, request, *args, **kwargs):
if request.couch_user.is_domain_admin(self.domain):
return HttpResponseRedirect(reverse(EditBasicProjectInfoView.urlname, args=[self.domain]))
return HttpResponseRedirect(reverse(EditMyProjectSettingsView.urlname, args=[self.domain]))
class BaseAdminProjectSettingsView(BaseProjectSettingsView):
"""
The base class for all project settings views that require administrative
access.
"""
@method_decorator(domain_admin_required)
def dispatch(self, request, *args, **kwargs):
return super(BaseProjectSettingsView, self).dispatch(request, *args, **kwargs)
class BaseEditProjectInfoView(BaseAdminProjectSettingsView):
"""
The base class for all the edit project information views.
"""
strict_domain_fetching = True
@property
def autocomplete_fields(self):
return []
@property
def main_context(self):
context = super(BaseEditProjectInfoView, self).main_context
context.update({
'autocomplete_fields': self.autocomplete_fields,
'commtrack_enabled': self.domain_object.commtrack_enabled,
# ideally the template gets access to the domain doc through
# some other means. otherwise it has to be supplied to every view reachable in that sidebar (every
# view whose template extends users_base.html); mike says he's refactoring all of this imminently, so
# i will not worry about it until he is done
'call_center_enabled': self.domain_object.call_center_config.enabled,
'cloudcare_releases': self.domain_object.cloudcare_releases,
})
return context
class EditBasicProjectInfoView(BaseEditProjectInfoView):
template_name = 'domain/admin/info_basic.html'
urlname = 'domain_basic_info'
page_title = ugettext_lazy("Basic")
@property
def can_user_see_meta(self):
return self.request.couch_user.is_previewer()
@property
def can_use_custom_logo(self):
return has_privilege(self.request, privileges.CUSTOM_BRANDING)
@property
@memoized
def basic_info_form(self):
initial = {
'hr_name': self.domain_object.hr_name or self.domain_object.name,
'default_timezone': self.domain_object.default_timezone,
'case_sharing': json.dumps(self.domain_object.case_sharing),
'call_center_enabled': self.domain_object.call_center_config.enabled,
'call_center_type': self.initial_call_center_type,
'call_center_case_owner': self.initial_call_center_case_owner,
'call_center_case_type': self.domain_object.call_center_config.case_type,
'commtrack_enabled': self.domain_object.commtrack_enabled,
}
if self.request.method == 'POST':
if self.can_user_see_meta:
return DomainMetadataForm(
self.request.POST,
self.request.FILES,
user=self.request.couch_user,
domain=self.domain_object.name,
can_use_custom_logo=self.can_use_custom_logo,
)
return DomainGlobalSettingsForm(
self.request.POST,
self.request.FILES,
domain=self.domain_object.name,
can_use_custom_logo=self.can_use_custom_logo
)
if self.can_user_see_meta:
initial.update({
'is_test': self.domain_object.is_test,
'cloudcare_releases': self.domain_object.cloudcare_releases,
})
return DomainMetadataForm(
can_use_custom_logo=self.can_use_custom_logo,
user=self.request.couch_user,
domain=self.domain_object.name,
initial=initial
)
return DomainGlobalSettingsForm(
initial=initial,
domain=self.domain_object.name,
can_use_custom_logo=self.can_use_custom_logo
)
@property
@memoized
def initial_call_center_case_owner(self):
config = self.domain_object.call_center_config
if config.use_user_location_as_owner:
return DomainGlobalSettingsForm.USE_LOCATIONS_CHOICE
return self.domain_object.call_center_config.case_owner_id
@property
@memoized
def initial_call_center_type(self):
if self.domain_object.call_center_config.use_fixtures:
return DomainGlobalSettingsForm.CASES_AND_FIXTURES_CHOICE
return DomainGlobalSettingsForm.CASES_ONLY_CHOICE
@property
def page_context(self):
return {
'basic_info_form': self.basic_info_form,
}
def post(self, request, *args, **kwargs):
if self.basic_info_form.is_valid():
if self.basic_info_form.save(request, self.domain_object):
messages.success(request, _("Project settings saved!"))
else:
messages.error(request, _("There seems to have been an error saving your settings. Please try again!"))
return self.get(request, *args, **kwargs)
class EditMyProjectSettingsView(BaseProjectSettingsView):
template_name = 'domain/admin/my_project_settings.html'
urlname = 'my_project_settings'
page_title = ugettext_lazy("My Timezone")
@property
@memoized
def my_project_settings_form(self):
initial = { 'global_timezone': self.domain_object.default_timezone }
if self.domain_membership:
initial.update({
'override_global_tz': self.domain_membership.override_global_tz,
'user_timezone': (self.domain_membership.timezone if self.domain_membership.override_global_tz
else self.domain_object.default_timezone),
})
else:
initial.update({
'override_global_tz': False,
'user_timezone': initial["global_timezone"],
})
if self.request.method == 'POST':
return ProjectSettingsForm(self.request.POST, initial=initial)
return ProjectSettingsForm(initial=initial)
@property
@memoized
def domain_membership(self):
return self.request.couch_user.get_domain_membership(self.domain)
@property
def page_context(self):
return {
'my_project_settings_form': self.my_project_settings_form,
'override_global_tz': self.domain_membership.override_global_tz if self.domain_membership else False,
'no_domain_membership': not self.domain_membership,
}
def post(self, request, *args, **kwargs):
if self.my_project_settings_form.is_valid():
self.my_project_settings_form.save(self.request.couch_user, self.domain)
messages.success(request, _("Your project settings have been saved!"))
return self.get(request, *args, **kwargs)
class EditDhis2SettingsView(BaseProjectSettingsView):
template_name = 'domain/admin/dhis2_settings.html'
urlname = 'dhis2_settings'
page_title = ugettext_lazy("DHIS2 API settings")
@property
@memoized
def dhis2_settings_form(self):
settings_ = Dhis2Settings.for_domain(self.domain_object.name)
initial = settings_.dhis2 if settings_ else {'enabled': False}
if self.request.method == 'POST':
return Dhis2SettingsForm(self.request.POST, initial=initial)
return Dhis2SettingsForm(initial=initial)
@property
def page_context(self):
return {
'dhis2_settings_form': self.dhis2_settings_form,
}
def post(self, request, *args, **kwargs):
if self.dhis2_settings_form.is_valid():
if self.dhis2_settings_form.save(self.domain_object):
messages.success(request, _('DHIS2 API settings successfully updated'))
else:
messages.error(request, _('There seems to have been an error. Please try again.'))
return self.get(request, *args, **kwargs)
@require_POST
@require_can_edit_web_users
def drop_repeater(request, domain, repeater_id):
rep = FormRepeater.get(repeater_id)
rep.retire()
messages.success(request, "Form forwarding stopped!")
return HttpResponseRedirect(reverse(DomainForwardingOptionsView.urlname, args=[domain]))
@require_POST
@require_can_edit_web_users
def test_repeater(request, domain):
url = request.POST["url"]
repeater_type = request.POST['repeater_type']
format = request.POST['format']
form = GenericRepeaterForm(
{"url": url, "format": format},
domain=domain,
repeater_class=receiverwrapper.models.repeater_types[repeater_type]
)
if form.is_valid():
url = form.cleaned_data["url"]
# now we fake a post
def _stub(repeater_type):
if 'case' in repeater_type.lower():
return CaseBlock(
case_id='test-case-%s' % uuid.uuid4().hex,
create=True,
case_type='test',
case_name='test case',
).as_string()
else:
return "<?xml version='1.0' ?><data id='test'><TestString>Test post from CommCareHQ on %s</TestString></data>" % \
(datetime.datetime.utcnow())
fake_post = _stub(repeater_type)
try:
resp = simple_post(fake_post, url)
if 200 <= resp.status < 300:
return HttpResponse(json.dumps({"success": True,
"response": resp.read(),
"status": resp.status}))
else:
return HttpResponse(json.dumps({"success": False,
"response": resp.read(),
"status": resp.status}))
except Exception, e:
errors = str(e)
return HttpResponse(json.dumps({"success": False, "response": errors}))
else:
return HttpResponse(json.dumps({"success": False, "response": "Please enter a valid url."}))
def autocomplete_fields(request, field):
prefix = request.GET.get('prefix', '')
results = Domain.field_by_prefix(field, prefix)
return HttpResponse(json.dumps(results))
def logo(request, domain):
logo = Domain.get_by_name(domain).get_custom_logo()
if logo is | |
<reponame>RandalJBarnes/NagadanPy<filename>nagadanpy/nagadan.py
"""
The entry point for the NagadanPy project.
Classes
-------
None
Exceptions
----------
None.
Functions
---------
nagadan(target, npaths, duration,
base, conductivity, porosity, thickness,
wells, observations,
buffer=100, spacing=10, umbra=10,
confined=True, tol=1, maxstep=10):
The entry-point for the NagadanPy project.
log_the_run(
target, npaths, duration,
base, conductivity, porosity, thickness,
wells, observations,
buffer, spacing, umbra,
confined, tol, maxstep):
Print the banner and run information to the log file.
plot_locations(target, wells, obs):
Plot the locations of the wells, with an emphasis on the target
well, and the observations.
Notes
-----
o This package is a work in progress.
o This module currently generates plots using python's matplotlib
facility. We will remove these plots when we integrate into
ArcGIS Pro.
References
----------
o <NAME>, <NAME>, <NAME>, 2004, Regression
Diagnostics - Identifying Influential Data and Sources of
Collinearity, Wiley-Interscience, ISBN: 9780471691174.
Authors
-------
Dr. <NAME>
Department of Civil, Environmental, and Geo- Engineering
University of Minnesota
<NAME>
Source Water Protection
Minnesota Department of Health
Version
-------
07 May 2020
"""
import io
import logging
import matplotlib.pyplot as plt
import numpy as np
import scipy
import statsmodels.stats.outliers_influence as smso
import statsmodels.api as sm
import time
from nagadanpy.boomerang import compute_boomerang
from nagadanpy.capturezone import compute_capturezone
from nagadanpy.model import Model
from nagadanpy.probabilityfield import ProbabilityField
from nagadanpy.utilities import contour_head, filter_obs, summary_statistics
log = logging.getLogger('NagadanPy')
VERSION = '07 May 2020'
# -----------------------------------------------
def nagadan(
target, npaths, duration,
base, conductivity, porosity, thickness,
wells, observations,
xmin=np.nan, xmax=np.nan, ymin=np.nan, ymax=np.nan,
buffer=100, spacing=10, umbra=10,
confined=True, tol=1, maxstep=10):
"""
The entry-point for the NagadanPy project.
Arguments
---------
target : int
The index identifying the target well in the wells.
That is, the well for which we will compute a stochastic
capture zone. This uses python's 0-based indexing.
npaths : int
The number of paths (starting points for the backtraces)
to generate uniformly around the target well. 0 < npaths.
duration : float
The duration of the capture zone [d]. For example, a 10-year
capture zone would have a duration = 10*365.25. 0 < duration.
base : float
The base elevation of the aquifer [m].
conductivity : float
The hydraulic conductivity of the aquifer [m/d]. 0 < conductivity.
porosity : float
The porosity of the aquifer []. 0 < porosity < 1.
thickness : float
The thickness of the aquifer [m]. 0 < thickness.
wells : list
The list of well tuples. Each well tuple has four components.
xw : float
The x-coordinate of the well [m].
yw : float
The y-coordinate of the well [m].
rw : float
The radius of the well [m]. 0 < rw.
qw : float
The discharge of the well [m^3/d].
observations : list of observation tuples.
An observation tuple contains four values: (x, y, z_ev, z_std), where
x : float
The x-coordinate of the observation [m].
y : float
The y-coordinate of the observation [m].
z_ev : float
The expected value of the observed static water level elevation [m].
z_std : float
The standard deviation of the observed static water level elevation [m].
buffer : float, optional
The buffer distance [m] around each well. If an obs falls
within buffer of any well, it is removed. Default is 100 [m].
spacing : float, optional
The spacing of the rows and the columns [m] in the square
ProbabilityField grids. Default is 10 [m].
umbra : float, optional
The vector-to-raster range [m] when mapping a particle path
onto the ProbabilityField grids. If a grid node is within
umbra of a particle path, it is marked as visited. Default is 10 [m].
confined : boolean, optional
True if it is safe to assume that the aquifer is confined
throughout the domain of interest, False otherwise. This is a
speed kludge. Default is True.
tol : float, optional
The tolerance [m] for the local error when solving the
backtrace differential equation. This is an inherent
parameter for an adaptive Runge-Kutta method. Default is 1.
maxstep : float, optional
The maximum allowed step in space [m] when solving the
backtrace differential equation. This is a maximum space
step and NOT a maximum time step. Default is 10.
Returns
-------
None.
Notes
-----
o Most of the time-consuming work is orchestrated by the
create_capturezone function.
"""
# Validate the arguments.
assert(isinstance(target, int) and 0 <= target < len(wells))
assert(isinstance(npaths, int) and 0 < npaths)
assert((isinstance(duration, int) or isinstance(duration, float)) and 0 < duration)
assert(isinstance(base, int) or isinstance(base, float))
assert((isinstance(conductivity, int) or isinstance(conductivity, float)) and 0 < conductivity)
assert(isinstance(porosity, float) and 0 < porosity < 1)
assert((isinstance(thickness, int) or isinstance(thickness, float)) and 0 < thickness)
assert(isinstance(wells, list) and len(wells) >= 1)
for we in wells:
assert(len(we) == 4 and
(isinstance(we[0], int) or isinstance(we[0], float)) and
(isinstance(we[1], int) or isinstance(we[1], float)) and
(isinstance(we[2], int) or isinstance(we[2], float)) and 0 < we[2] and
(isinstance(we[3], int) or isinstance(we[3], float)))
assert(isinstance(observations, list) and len(observations) > 6)
for ob in observations:
assert(len(ob) == 4 and
(isinstance(ob[0], int) or isinstance(ob[0], float)) and
(isinstance(ob[1], int) or isinstance(ob[1], float)) and
(isinstance(ob[2], int) or isinstance(ob[2], float)) and
(isinstance(ob[3], int) or isinstance(ob[3], float)) and 0 <= ob[3])
assert((isinstance(buffer, int) or isinstance(buffer, float)) and 0 < buffer)
assert((isinstance(spacing, int) or isinstance(spacing, float)) and 0 < spacing)
assert((isinstance(umbra, int) or isinstance(umbra, float)) and 0 < umbra)
assert(isinstance(confined, bool))
assert((isinstance(tol, int) or isinstance(tol, float)) and 0 < tol)
assert((isinstance(maxstep, int) or isinstance(maxstep, float)) and 0 < maxstep)
# Initialize the stopwatch.
start_time = time.time()
# Log the run information.
log_the_run(
target, npaths, duration,
base, conductivity, porosity, thickness,
wells, observations,
buffer, spacing, umbra,
confined, tol, maxstep)
# Filter out all of the observations that are too close to any
# pumping well, and average the duplicate observations.
obs = filter_obs(observations, wells, buffer)
nobs = len(obs)
assert(nobs > 6)
# Log summary statistics on the wells and the active observations.
buf = summary_statistics(wells, ['Easting', 'Northing', 'Radius', 'Discharge'],
['12.2f', '12.2f', '12.3f', '12.2f'], 'Wells')
log.info('\n')
log.info(buf.getvalue())
buf = summary_statistics(obs, ['Easting', 'Northing', 'Head', 'Std'],
['12.2f', '12.2f', '10.2f', '10.2f'], 'Active Observations')
log.info('\n')
log.info(buf.getvalue())
# Set the target.
xtarget, ytarget, rtarget = wells[target][0:3]
# Create the model
mo = Model(base, conductivity, porosity, thickness, wells)
# General influence statistics
WA, Wb = mo.construct_fit(obs, xtarget, ytarget)
ols_model = sm.OLS(Wb, WA, hasconst=True)
ols_results = ols_model.fit()
ols_influence = smso.OLSInfluence(ols_results)
log.info('\n')
log.info(ols_results.summary(
xname = ['A', 'B', 'C', 'D', 'E', 'F'], yname = 'scaled potential'))
log.info('\n')
log.info(ols_influence.summary_frame())
# Compute the exhaustive leave-one-out and leave-two-out boomerang analyses.
kldiv_one, kldiv_two, kldiv_three = compute_boomerang(WA, Wb)
kldiv_one.sort(reverse=True)
kldiv_two.sort(reverse=True)
kldiv_three.sort(reverse=True)
most_influential_singleton = kldiv_one[0][1]
most_influential_pair = [kldiv_two[0][1], kldiv_two[0][2]]
most_influential_triple = [kldiv_three[0][1], kldiv_three[0][2], kldiv_three[0][3]]
log.info('\n')
log.info('Top 5 of the Leave-one-out analysis:')
for i in range(min(len(kldiv_one), 5)):
log.info(' {0}'.format(kldiv_one[i]))
log.info('\n')
log.info('Top 5 of the Leave-two-out analysis:')
for i in range(min(len(kldiv_two), 5)):
log.info(' {0}'.format(kldiv_two[i]))
log.info('\n')
log.info('Top 5 of the Leave-three-out analysis:')
for i in range(min(len(kldiv_three), 5)):
log.info(' {0}'.format(kldiv_three[i]))
# Define the local backtracing velocity function.
if confined:
def feval(xy):
Vx, Vy = mo.compute_velocity_confined(xy[0], xy[1])
return np.array([-Vx, -Vy])
else:
def feval(xy):
Vx, Vy = mo.compute_velocity(xy[0], xy[1])
return np.array([-Vx, -Vy])
# Compute the four capture zones around the target well ---
# Using all of the obs.
mo.fit_regional_flow(obs, xtarget, ytarget)
pf0 = ProbabilityField(spacing, spacing, xtarget, ytarget)
compute_capturezone(
xtarget, ytarget, rtarget, npaths, duration,
pf0, umbra, 1.0, tol, maxstep, feval)
# Using all of the obs except the most influential singleton.
obs1 = np.delete(obs, most_influential_singleton, 0)
mo.fit_regional_flow(obs1, xtarget, ytarget)
pf1 = ProbabilityField(spacing, spacing, xtarget, ytarget)
compute_capturezone(
xtarget, ytarget, rtarget, npaths, duration,
pf1, umbra, 1.0, tol, maxstep, feval)
# Using all of the obs except the most influential pair.
obs2 = np.delete(obs, most_influential_pair, 0)
mo.fit_regional_flow(obs2, xtarget, ytarget)
pf2 = ProbabilityField(spacing, spacing, xtarget, ytarget)
compute_capturezone(
xtarget, ytarget, rtarget, npaths, duration,
pf2, umbra, 1.0, tol, maxstep, feval)
# Using all of the obs except the most influential triple.
obs3 = np.delete(obs, most_influential_triple, 0)
mo.fit_regional_flow(obs3, xtarget, ytarget)
pf3 = ProbabilityField(spacing, spacing, xtarget, ytarget)
compute_capturezone(
xtarget, ytarget, rtarget, npaths, duration,
pf3, umbra, 1.0, tol, maxstep, feval)
# Compute the capture zone statistics.
Xmin = min([pf0.xmin, pf1.xmin, pf2.xmin, pf3.xmin])
Xmax = max([pf0.xmax, pf1.xmax, pf2.xmax, pf3.xmax])
Ymin = min([pf0.ymin, pf1.ymin, pf2.ymin, pf3.ymin])
Ymax = max([pf0.ymax, | |
json.loads(f.read())
allID = allPlayers[ID]
string = f'Login ID of {name} is:'
for i in allID:
#_ba.chatmessage(i)
if (i != ID): string += '\n' + i
ba.screenmessage(string, transient=True, color=(1, 1, 1))
commandSuccess = True
except:
ba.screenmessage(f"Using: /whois [ClientID or Name]", clients=[clientID], transient=True)
#MUTE
elif m == '/mute':
import chatFilter
try:
try:
clID = int(a[0])
except:
clID = clientIdFromNick(str(a[0]))
ID = None
for i in _ba.get_game_roster():
if i['clientID'] == clID:
ID = i['account_id']
name = i['display_string']
if (ID not in [None, 'null']):
try:
chatFilter.chatCoolDown[ID] = a[1] * 60
sendError(f'{name} muted for {str(a[1])} minutes.')
commandSuccess = True
except:
chatFilter.chatCoolDown[ID] = 99999 * 60
sendError(f'{name} muted until server restarts.')
commandSuccess = True
else:
sendError(f"{name} is already muted", clientID)
except:
ba.screenmessage(f"Usage: /mute <ClientId/Name> <Minutes>", clients=[clientID], transient=True)
#UN MUTE
elif m == '/unmute':
import chatFilter
try:
try:
clID = int(a[0])
except:
clID = clientIdFromNick(str(a[0]))
ID = None
for i in _ba.get_game_roster():
if i['clientID'] == clID:
ID = i['account_id']
name = i['display_string']
if (ID not in [None, 'null']) and (ID in chatFilter.chatCoolDown) and (chatFilter.chatCoolDown[ID] > 3):
chatFilter.chatCoolDown.pop(ID)
_ba.chatmessage(f'Unmuted {name}')
commandSuccess = True
else:
sendError(f"{name} is not muted yet", clientID)
except:
ba.screenmessage(f"Usage: /unmute <ClientId/Name>", clients=[clientID], transient=True)
#KICK
elif m == '/kick':
if a == []:
ba.screenmessage(f"Using: /kick [name/ClientID]", clients=[clientID], transient=True)
else:
if len(a[0]) > 3:
self.kickByNick(a[0])
else:
try:
s = int(a[0])
_ba.disconnect_client(int(a[0]))
except:
self.kickByNick(a[0])
commandSuccess=True
#KICK
elif m == '/kickall':
try:
for i in ros:
if i['client_id'] != clientID:
_ba.disconnect_client(i['client_id'])
commandSuccess=True
except:
pass
#REMOVE
elif m == '/remove':
if a == [] or a[0] == 'all':
ba.screenmessage(f"Using: /removeall [or] /remove [PlayerID]", transient=True, clients=[clientID])
else:
ba.getactivity().remove_player(splayers[int(a[0])])
commandSuccess=True
#REMOVEALL
elif m == '/removeall':
for i in splayers:
try:
ba.getactivity().remove_player(i)
except:
pass
commandSuccess=True
#SHATTER
elif m == '/shatter':
if a == [] or a[0] == 'all':
ba.screenmessage(f"Using: /shatterall [or] /shatter [PlayerID]", transient=True, clients=[clientID])
else:
players[int(a[0])].actor.node.shattered = int(a[1])
commandSuccess=True
#SHATTERALL
elif m == '/shatterall':
for i in players:
i.actor.node.shattered = int(a[1])
commandSuccess=True
#QUIT
elif m in ('/quit', '/restart', '/restartserver'):
_ba.chatmessage("Server Restarting, Please Join in a moment !")
commandSuccess=True
_ba.quit()
#AC
elif m == '/ac':
try:
if a[0] == 'r':
m = 1.3 if a[1] is None else float(a[1])
s = 1000 if a[2] is None else float(a[2])
ba.animate_array(activity.globalsnode, 'ambient_color',3, {0: (1*m,0,0), s: (0,1*m,0),s*2:(0,0,1*m),s*3:(1*m,0,0)},True)
commandSuccess=True
else:
try:
if a[1] is not None:
activity.globalsnode.ambient_color = (float(a[0]),float(a[1]),float(a[2]))
commandSuccess=True
except:
pass
except:
ba.screenmessage(f"Using: '/ac [Red] [Green] [Blue]' or '/ac r [brightness] [speed]'", transient=True, clients=[clientID])
#TINT
elif m == '/tint':
try:
if a[0] == 'r':
m = 1.3 if a[1] is None else float(a[1])
s = 1000 if a[2] is None else float(a[2])
ba.animate_array(activity.globalsnode, 'tint',3, {0: (1*m,0,0), s: (0,1*m,0),s*2:(0,0,1*m),s*3:(1*m,0,0)},True)
commandSuccess=True
else:
if a[1] is not None:
activity.globalsnode.tint = (float(a[0]),float(a[1]),float(a[2]))
commandSuccess=True
else:
pass
except:
ba.screenmessage(f"Using: '/tint [Red] [Green] [Blue]' or '/tint r [brightness] [speed]'", transient=True, clients=[clientID])
#REFLECTIONS
elif m.startswith('/reflectionNotAvail'):
if a == [] or len(a) < 2:
ba.screenmessage(f"Using: /reflections [type(1/0)] [scale]", transient=True, clients=[clientID])
rs = [int(a[1])]
type = 'soft' if int(a[0]) == 0 else 'powerup'
try:
_ba.get_foreground_host_activity().getMap().node.reflection = type
_ba.get_foreground_host_activity().getMap().node.reflectionScale = rs
except:
pass
try:
_ba.get_foreground_host_activity().getMap().bg.reflection = type
_ba.get_foreground_host_activity().getMap().bg.reflectionScale = rs
except:
pass
try:
_ba.get_foreground_host_activity().getMap().floor.reflection = type
_ba.get_foreground_host_activity().getMap().floor.reflectionScale = rs
except:
pass
try:
_ba.get_foreground_host_activity().getMap().center.reflection = type
_ba.get_foreground_host_activity().getMap().center.reflectionScale = rs
except:
pass
commandSuccess=True
#FLOOR REFLECTION
elif m.startswith('/floorreflectionNotAvail'):
bs.getSharedObject('globals').floorReflection = bs.getSharedObject('globals').floorReflection == False
commandSuccess=True
#ICY or EXCHANGE
elif m in ('/exchange','/icy'):
try:
if True:
try:
player1 = int(a[0])
except:
player1 = playerIdFromNick(a[0])
try:
player2 = int(a[1])
except:
player2 = playerIdFromNick(a[1])
node1 = players[player1].actor.node
node2 = players[player2].actor.node
players[player1].actor.node = node2
players[player2].actor.node = node1
commandSuccess = True
except:
ba.screenmessage(f"Using: /exchange [PlayerID1] [PlayerID2]", transient=True, clients=[clientID])
#ICEOFF or HOCKEY
elif m in ('/hockey','/iceoff'):
try:
activity.getMap().isHockey = activity.getMap().isHockey == False
except:
pass
for i in players:
i.actor.node.hockey = i.actor.node.hockey == False
commandSuccess = True
#VIP
elif m == '/vip':
try:
clID = int(a[0])
updated = roles.vips
ros = _ba.get_game_roster()
for i in ros:
if (i is not None) and (i != {}) and (i['client_id'] == clID):
name = i['players'][0]['name']
newID = i['account_id']
if a[1] == 'add':
if newID not in updated:
roles.vips.append(newID)
commandSuccess=True
else: sendError(f"{str(name)}, is already a vip !",clientID)
elif a[1] == 'remove':
if newID in updated:
roles.vips.remove(newID)
commandSuccess=True
else: sendError(f"{str(name)}, is already not a vip !",clientID)
updated = roles.vips
if (len(a) > 2) and (uniqueID in roles.owners) and commandSuccess:
if str(a[2]).startswith('perm'):
#Add them to members.json (log)
m = open(membersFile, 'r')
d = json.loads(m)
if (newID not in d['vips']): d['vips'][newID] = []
if (name not in d['vips'][newID]): d['vips'][newID].append(name)
m2 = open(membersFile, 'w')
m2.write(json.dumps(d, indent=4))
m2.close()
#Add them to roles.py
with open(python_path + '/roles.py') as (file):
s = [ row for row in file ]
s[8] = 'vips = ' + str(updated) + '\n'
f = open(python_path + '/roles.py', 'w')
for i in s:
f.write(i)
f.close()
except:
ba.screenmessage(f"Using: /vip [ClientID] add/remove perm/None", clients=[clientID], transient=True)
#MAXPLAYERS
elif m.startswith('/maxplayer'):
if a == []:
ba.screenmessage(f"Using: /maxplayers [count]", clients=[clientID], transient=True)
else:
try:
_ba.get_foreground_host_().max_players = int(a[0])
_ba.set_public_party_max_size(int(a[0]))
_ba.chatmessage(f"MaxPlayers limit set to {str(int(a[0]))}")
commandSuccess=True
except:
pass
#SAY (Send Chat Message in Server's name)
elif m == "/say":
if a == []:
ba.screenmessage('Usage: /say <text to send>', transient=True, clients=[clientID])
else:
message = " ".join(a)
_ba.chatmessage(message)
#################### OWNER COMMANDS ########################
#KICK VOTE
elif m == '/kickvote':
try:
if a[0] in ('enable','yes','true'): _ba.set_enable_default_kick_voting(True)
if a[0] in ('disable','no','false'): _ba.set_enable_default_kick_voting(False)
commandSuccess = True
except:
ba.screenmessage(f"Using: /kickvote [enable/yes/true or disable/no/false]", clients=[clientID], transient=True)
#TOP
elif m == '/top':
try:
temp_limit = int(a[0])
temp_toppers = []
f = open(statsFile, 'r')
temp_stats = json.loads(f.read())
for i in range(1,limit+1):
for id in temp_stats:
if int(temp_stats[id]['rank'])==i: temp_toppers.append(id)
if temp_toppers != []:
for account_id in temp_toppers:
temp_name = temp_stats[account_id]['name_html']
#print(temp_toppers.index(account_id)+1,temp_name[temp_name.find('>')+1:].encode('utf8'),temp_stats[account_id]['scores'])
_ba.chatmessage("{0}. {1} -----> {2}".format(temp_toppers.index(account_id)+1,temp_name[temp_name.find('>')+1:].encode('utf8'),temp_stats[account_id]['scores']))
commandSuccess=True
f.close()
except:
sendError('Usage: /top <range>',clientID)
#SETSCORE
elif m in ['/setscore','/reset']:
try:
temp_rank = int(a[0])
temp_stats = getStats()
for id in temp_stats:
if int(temp_stats[id]['rank']) == temp_rank: ID = id
f.close()
temp_name = temp_stats[ID]['name_html']
temp_name = temp_name[temp_name.find('>')+1:].encode('utf-8')
try:
temp_score = int(a[1])
except:
temp_score = 0
stats[ID]['score'] = temp_score
_ba.chatmessage("{}'s score set to {}".format(temp_name,temp_score))
#backup
from shutil import copyfile
src = statsFile
from datetime import datetime
now = datetime.now().strftime('%d-%m %H:%M:%S')
dst = 'stats.bak---' + now
copyfile(src,dst)
#write new stats
f = open(statsFile, 'w')
f.write(json.dumps(temp_stats))
f.close()
'''
import mystats
mystats.refreshStats()
'''
commandSuccess=True
except:
sendError('Usage: /reset <rank of player> (optional:newScore)',clientID)
#WARN
elif m == "/warn":
try:
try:
clID = int(a[0])
except:
clID = clientIdFromNick(str(a[0]))
for i in _ba.get_game_roster():
if i['clientID'] == clID:
ID = i['displayString']
name = ID
try:
name = i['players'][0]['name']
except:
pass
import chatFilter
warnCount = chatFilter.warn(ID)
if warnCount < 3:
bsInternal._chatMessage("Warning {str(name)}.")
for i in range(3):
sendError('Warning!!!!',clID)
sendError("Warn count: % 1d/3"%(warnCount),clID)
else:
chatFilter.abusers.pop(ID)
_ba.chatmessage(f"Warn limit exceeded. Kicking {str(name)}.")
_ba.chatmessage("Warn system Made By Aleena")
_ba.chatmessage(clID)
commandSuccess = True
except:
ba.screenmessage('Usage: /warn <client_id or name>', transient=True, clients=[clientID])
#CLEAR WARN
elif m.startswith("/clearwarn"):
import chatFilter
try:
try:
clID = int(a[0])
except:
clID = clientIdFromNick(str(a[0]))
ID = None
for i in _ba.get_game_roster():
if i['clientID'] == clID:
ID = i['account_id']
name = i['display_string']
chatFilter.abusers.pop(ID)
_ba.chatmessage(f"{name} has been removed from Abuse/Warn List")
commandSuccess = True
except:
ba.screenmessage('Usage: /clearwarn <client_id or name>', transient=True, clients=[clientID])
#WHOINQUEUE
elif m == '/whoinqueue':
def _onQueueQueryResult(result):
from queueChecker import queueID
#print result, ' is result'
inQueue = result['e']
#print inQueue, ' is inQueue'
string = 'No one '
if inQueue != []:
string = ''
for queue in inQueue:
#print queue[3]
string += queue[3] + ' '
_ba.chatmessage(f"{string} is in the queue")
_ba.add_transaction(
{'type': 'PARTY_QUEUE_QUERY', 'q': queueID},callback=ba.Call(_onQueueQueryResult))
_ba.run_transactions()
commandSuccess=True
#TEXT
elif m in ('/text', '/texts'):
from BsTextOnMap import texts
if a == []:
ba.screenmessage(f"Usage: /text showall or /text add [text] or /text del [textnumber]", clients=[clientID], transient=True)
elif a[0] == 'add' and len(a)>1:
#get whole sentence from argument list
newText = u''
for i in range(1,len(a)):
newText += a[i] + ' '
#print newText
texts.append(newText)
#write to file
with open(python_path + '/BsTextOnMap.py') as (file):
s = [ row for row in file ]
s[0] = 'texts = ' + str(texts) + '\n'
f = open(python_path + '/BsTextOnMap.py', 'w')
for i in s:
f.write(i)
| |
<reponame>ConnectionMaster/graphite-influxdb<filename>graphite_influxdb.py<gh_stars>100-1000
import re
import time
import logging
from logging.handlers import TimedRotatingFileHandler
import datetime
from influxdb import InfluxDBClient
try:
import statsd
except ImportError:
pass
logger = logging.getLogger('graphite_influxdb')
try:
from graphite_api.intervals import Interval, IntervalSet
from graphite_api.node import LeafNode, BranchNode
except ImportError:
try:
from graphite.intervals import Interval, IntervalSet
from graphite.node import LeafNode, BranchNode
except ImportError:
raise SystemExit(1, "You have neither graphite_api nor \
the graphite webapp in your pythonpath")
# Tell influxdb to return time as seconds from epoch
_INFLUXDB_CLIENT_PARAMS = {'epoch' : 's'}
class NullStatsd():
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
def timer(self, key, val=None):
return self
def timing(self, key, val):
pass
def start(self):
pass
def stop(self):
pass
def normalize_config(config=None):
ret = {}
if config is not None:
cfg = config.get('influxdb', {})
ret['host'] = cfg.get('host', 'localhost')
ret['port'] = cfg.get('port', 8086)
ret['user'] = cfg.get('user', 'graphite')
ret['passw'] = cfg.get('pass', '<PASSWORD>')
ret['db'] = cfg.get('db', 'graphite')
ssl = cfg.get('ssl', False)
ret['ssl'] = (ssl == 'true')
ret['schema'] = cfg.get('schema', [])
ret['log_file'] = cfg.get('log_file', None)
ret['log_level'] = cfg.get('log_level', 'info')
cfg = config.get('es', {})
ret['es_enabled'] = cfg.get('enabled', False)
ret['es_index'] = cfg.get('index', 'graphite_metrics2')
ret['es_hosts'] = cfg.get('hosts', ['localhost:9200'])
ret['es_field'] = cfg.get('field', '_id')
if config.get('statsd', None):
ret['statsd'] = config.get('statsd')
else:
from django.conf import settings
ret['host'] = getattr(settings, 'INFLUXDB_HOST', 'localhost')
ret['port'] = getattr(settings, 'INFLUXDB_PORT', 8086)
ret['user'] = getattr(settings, 'INFLUXDB_USER', 'graphite')
ret['passw'] = getattr(settings, 'INFLUXDB_PASS', 'graphite')
ret['db'] = getattr(settings, 'INFLUXDB_DB', 'graphite')
ssl = getattr(settings, 'INFLUXDB_SSL', False)
ret['ssl'] = (ssl == 'true')
ret['schema'] = getattr(settings, 'INFLUXDB_SCHEMA', [])
ret['log_file'] = getattr(
settings, 'INFLUXDB_LOG_FILE', None)
# Default log level is 'info'
ret['log_level'] = getattr(
settings, 'INFLUXDB_LOG_LEVEL', 'info')
ret['es_enabled'] = getattr(settings, 'ES_ENABLED', False)
ret['es_index'] = getattr(settings, 'ES_INDEX', 'graphite_metrics2')
ret['es_hosts'] = getattr(settings, 'ES_HOSTS', ['localhost:9200'])
ret['es_field'] = getattr(settings, 'ES_FIELD', '_id')
return ret
def _make_graphite_api_points_list(influxdb_data):
"""Make graphite-api data points dictionary from Influxdb ResultSet data"""
_data = {}
for key in influxdb_data.keys():
_data[key[0]] = [(datetime.datetime.fromtimestamp(float(d['time'])),
d['value']) for d in influxdb_data.get_points(key[0])]
return _data
class InfluxdbReader(object):
__slots__ = ('client', 'path', 'step', 'statsd_client')
def __init__(self, client, path, step, statsd_client):
self.client = client
self.path = path
self.step = step
self.statsd_client = statsd_client
def fetch(self, start_time, end_time):
# in graphite,
# from is exclusive (from=foo returns data at ts=foo+1 and higher)
# until is inclusive (until=bar returns data at ts=bar and lower)
# influx doesn't support <= and >= yet, hence the add.
logger.debug("fetch() path=%s start_time=%s, end_time=%s, step=%d", self.path, start_time, end_time, self.step)
with self.statsd_client.timer('service_is_graphite-api.ext_service_is_influxdb.target_type_is_gauge.unit_is_ms.what_is_query_individual_duration'):
_query = 'select mean(value) as value from "%s" where (time > %ds and time <= %ds) GROUP BY time(%ss)' % (
self.path, start_time, end_time, self.step)
logger.debug("fetch() path=%s querying influxdb query: '%s'", self.path, _query)
data = self.client.query(_query, params=_INFLUXDB_CLIENT_PARAMS)
logger.debug("fetch() path=%s returned data: %s", self.path, data)
try:
data = _make_graphite_api_points_list(data)
except Exception:
logger.debug("fetch() path=%s COULDN'T READ POINTS. SETTING TO EMPTY LIST", self.path)
data = []
time_info = start_time, end_time, self.step
return time_info, [v[1] for v in data[self.path]]
def get_intervals(self):
now = int(time.time())
return IntervalSet([Interval(1, now)])
class InfluxLeafNode(LeafNode):
__fetch_multi__ = 'influxdb'
class InfluxdbFinder(object):
__fetch_multi__ = 'influxdb'
__slots__ = ('client', 'es', 'schemas', 'config', 'statsd_client')
def __init__(self, config=None):
# Shouldn't be trying imports in __init__.
# It turns what should be a load error into a runtime error
config = normalize_config(config)
self.config = config
self.client = InfluxDBClient(config['host'], config['port'], config['user'], config['passw'], config['db'], config['ssl'])
self.schemas = [(re.compile(patt), step) for (patt, step) in config['schema']]
try:
self.statsd_client = statsd.StatsClient(config['statsd'].get('host'),
config['statsd'].get('port', 8125)) \
if 'statsd' in config and config['statsd'].get('host') else NullStatsd()
except NameError:
logger.warning("Statsd client configuration present but 'statsd' module"
"not installed - ignoring statsd configuration..")
self.statsd_client = NullStatsd()
self._setup_logger(config['log_level'], config['log_file'])
self.es = None
if config['es_enabled']:
try:
from elasticsearch import Elasticsearch
except ImportError:
logger.warning("Elasticsearch configuration present but 'elasticsearch'"
"module not installed - ignoring elasticsearch configuration..")
else:
self.es = Elasticsearch(config['es_hosts'])
def _setup_logger(self, level, log_file):
"""Setup log level and log file if set"""
if logger.handlers:
return
level = getattr(logging, level.upper())
logger.setLevel(level)
formatter = logging.Formatter(
'[%(levelname)s] %(asctime)s - %(module)s.%(funcName)s() - %(message)s')
handler = logging.StreamHandler()
logger.addHandler(handler)
handler.setFormatter(formatter)
if not log_file:
return
try:
handler = TimedRotatingFileHandler(log_file)
except IOError:
logger.error("Could not write to %s, falling back to stdout",
log_file)
else:
logger.addHandler(handler)
handler.setFormatter(formatter)
def assure_series(self, query):
key_series = "%s_series" % query.pattern
done = False
if self.es:
# note: ES always treats a regex as anchored at start and end
regex = self.compile_regex('{0}.*', query)
with self.statsd_client.timer('service_is_graphite-api.ext_service_is_elasticsearch.target_type_is_gauge.unit_is_ms.action_is_get_series'):
logger.debug("assure_series() Calling ES with regexp - %s", regex.pattern)
try:
res = self.es.search(index=self.config['es_index'],
size=10000,
body={
"query": {
"regexp": {
self.config['es_field']: regex.pattern,
},
},
"fields": [self.config['es_field']]
}
)
if res['_shards']['successful'] > 0:
# pprint(res['hits']['total'])
series = [hit['fields'][self.config['es_field']] for hit in res['hits']['hits']]
done = True
else:
logger.error("assure_series() Calling ES failed for %s: no successful shards", regex.pattern)
except Exception as e:
logger.error("assure_series() Calling ES failed for %s: %s", regex.pattern, e)
# if no ES configured, or ES failed, try influxdb.
if not done:
# regexes in influxdb are not assumed to be anchored, so anchor them explicitly
regex = self.compile_regex('^{0}', query)
with self.statsd_client.timer('service_is_graphite-api.ext_service_is_influxdb.target_type_is_gauge.unit_is_ms.action_is_get_series'):
_query = "show series from /%s/" % regex.pattern
logger.debug("assure_series() Calling influxdb with query - %s", _query)
ret = self.client.query(_query, params=_INFLUXDB_CLIENT_PARAMS)
# as long as influxdb doesn't have good safeguards against
# series with bad data in the metric names, we must filter out
# like so:
series = [key_name for [key_name] in ret.raw['series'][0]['values']]
return series
def compile_regex(self, fmt, query):
"""Turn glob (graphite) queries into compiled regex
* becomes .*
. becomes \.
fmt argument is so that caller can control anchoring (must contain exactly 1 {0} !"""
return re.compile(fmt.format(
query.pattern.replace('.', '\.').replace('*', '[^\.]*').replace(
'{', '(').replace(',', '|').replace('}', ')')
))
def get_leaves(self, query):
key_leaves = "%s_leaves" % query.pattern
series = self.assure_series(query)
regex = self.compile_regex('^{0}$', query)
logger.debug("get_leaves() key %s", key_leaves)
timer = self.statsd_client.timer('service_is_graphite-api.action_is_find_leaves.target_type_is_gauge.unit_is_ms')
now = datetime.datetime.now()
timer.start()
# return every matching series and its
# resolution (based on first pattern match in schema, fallback to 60s)
leaves = [(name, next((res for (patt, res) in self.schemas if patt.match(name)), 60))
for name in series if regex.match(name)
]
timer.stop()
end = datetime.datetime.now()
dt = end - now
logger.debug("get_leaves() key %s Finished find_leaves in %s.%ss",
key_leaves,
dt.seconds,
dt.microseconds)
return leaves
def get_branches(self, query):
seen_branches = set()
key_branches = "%s_branches" % query.pattern
# Very inefficient call to list
series = self.assure_series(query)
regex = self.compile_regex('^{0}$', query)
logger.debug("get_branches() %s", key_branches)
timer = self.statsd_client.timer('service_is_graphite-api.action_is_find_branches.target_type_is_gauge.unit_is_ms')
start_time = datetime.datetime.now()
timer.start()
branches = []
for name in series:
while '.' in name:
name = name.rsplit('.', 1)[0]
if name not in seen_branches:
seen_branches.add(name)
if regex.match(name) is not None:
logger.debug("get_branches() %s found branch name: %s", key_branches, name)
branches.append(name)
timer.stop()
end_time = datetime.datetime.now()
dt = end_time - start_time
logger.debug("get_branches() %s Finished find_branches in %s.%ss",
key_branches,
dt.seconds, dt.microseconds)
return branches
def find_nodes(self, query):
logger.debug("find_nodes() query %s", query)
# TODO: once we can query influx better for retention periods, honor the start/end time in the FindQuery object
with self.statsd_client.timer('service_is_graphite-api.action_is_yield_nodes.target_type_is_gauge.unit_is_ms.what_is_query_duration'):
for (name, res) in self.get_leaves(query):
yield InfluxLeafNode(name, InfluxdbReader(
self.client, name, res, self.statsd_client))
for name in self.get_branches(query):
logger.debug("Yielding branch %s" % (name,))
yield BranchNode(name)
def fetch_multi(self, nodes, start_time, end_time):
series = ', '.join(['"%s"' % node.path for node in nodes])
# use the step of the node that is the most coarse
# not sure if there's a better way? can we combine series
# with different steps (and use the optimal step for each?)
# probably not
step = max([node.reader.step for node in nodes])
query = 'select mean(value) as value from %s where (time > %ds and time <= %ds) GROUP BY time(%ss)' % (
series, start_time, end_time, step)
logger.debug('fetch_multi() query: %s', query)
logger.debug('fetch_multi() - start_time: %s - end_time: %s, step %s',
datetime.datetime.fromtimestamp(float(start_time)), datetime.datetime.fromtimestamp(float(end_time)), step)
with self.statsd_client.timer('service_is_graphite-api.ext_service_is_influxdb.target_type_is_gauge.unit_is_ms.action_is_select_datapoints'):
logger.debug("Calling influxdb multi fetch with query - %s", query)
data = self.client.query(query, params=_INFLUXDB_CLIENT_PARAMS)
logger.debug('fetch_multi() - Retrieved %d result set(s)', len(data))
data = _make_graphite_api_points_list(data)
# some series we requested might not be in the resultset.
# this is because influx doesn't include series that had no values
# this is a behavior that some people actually appreciate when graphing, but graphite doesn't do this (yet),
# and we want to look the same, so we must add those back in.
# a better reason though, is because for advanced alerting cases like bosun, you want all | |
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.update_buildpack_ex_async(request, headers, runtime)
def update_buildpack_ex(
self,
request: deps_models.UpdateBuildpackRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.UpdateBuildpackResponse:
"""
Description: 更新一个技术栈
Summary: 更新一个技术栈
"""
UtilClient.validate_model(request)
return deps_models.UpdateBuildpackResponse().from_map(
self.do_request('1.0', 'antcloud.deps.buildpack.update', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def update_buildpack_ex_async(
self,
request: deps_models.UpdateBuildpackRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.UpdateBuildpackResponse:
"""
Description: 更新一个技术栈
Summary: 更新一个技术栈
"""
UtilClient.validate_model(request)
return deps_models.UpdateBuildpackResponse().from_map(
await self.do_request_async('1.0', 'antcloud.deps.buildpack.update', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def query_buildpack_findbynames(
self,
request: deps_models.QueryBuildpackFindbynamesRequest,
) -> deps_models.QueryBuildpackFindbynamesResponse:
"""
Description: 根据名称查询
Summary: 根据名称查询
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.query_buildpack_findbynames_ex(request, headers, runtime)
async def query_buildpack_findbynames_async(
self,
request: deps_models.QueryBuildpackFindbynamesRequest,
) -> deps_models.QueryBuildpackFindbynamesResponse:
"""
Description: 根据名称查询
Summary: 根据名称查询
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.query_buildpack_findbynames_ex_async(request, headers, runtime)
def query_buildpack_findbynames_ex(
self,
request: deps_models.QueryBuildpackFindbynamesRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.QueryBuildpackFindbynamesResponse:
"""
Description: 根据名称查询
Summary: 根据名称查询
"""
UtilClient.validate_model(request)
return deps_models.QueryBuildpackFindbynamesResponse().from_map(
self.do_request('1.0', 'antcloud.deps.buildpack.findbynames.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def query_buildpack_findbynames_ex_async(
self,
request: deps_models.QueryBuildpackFindbynamesRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.QueryBuildpackFindbynamesResponse:
"""
Description: 根据名称查询
Summary: 根据名称查询
"""
UtilClient.validate_model(request)
return deps_models.QueryBuildpackFindbynamesResponse().from_map(
await self.do_request_async('1.0', 'antcloud.deps.buildpack.findbynames.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def count_buildpack(
self,
request: deps_models.CountBuildpackRequest,
) -> deps_models.CountBuildpackResponse:
"""
Description: 计算
Summary: 计算
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.count_buildpack_ex(request, headers, runtime)
async def count_buildpack_async(
self,
request: deps_models.CountBuildpackRequest,
) -> deps_models.CountBuildpackResponse:
"""
Description: 计算
Summary: 计算
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.count_buildpack_ex_async(request, headers, runtime)
def count_buildpack_ex(
self,
request: deps_models.CountBuildpackRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.CountBuildpackResponse:
"""
Description: 计算
Summary: 计算
"""
UtilClient.validate_model(request)
return deps_models.CountBuildpackResponse().from_map(
self.do_request('1.0', 'antcloud.deps.buildpack.count', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def count_buildpack_ex_async(
self,
request: deps_models.CountBuildpackRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.CountBuildpackResponse:
"""
Description: 计算
Summary: 计算
"""
UtilClient.validate_model(request)
return deps_models.CountBuildpackResponse().from_map(
await self.do_request_async('1.0', 'antcloud.deps.buildpack.count', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def deprecate_buildpack(
self,
request: deps_models.DeprecateBuildpackRequest,
) -> deps_models.DeprecateBuildpackResponse:
"""
Description: 废弃
Summary: 废弃
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.deprecate_buildpack_ex(request, headers, runtime)
async def deprecate_buildpack_async(
self,
request: deps_models.DeprecateBuildpackRequest,
) -> deps_models.DeprecateBuildpackResponse:
"""
Description: 废弃
Summary: 废弃
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.deprecate_buildpack_ex_async(request, headers, runtime)
def deprecate_buildpack_ex(
self,
request: deps_models.DeprecateBuildpackRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.DeprecateBuildpackResponse:
"""
Description: 废弃
Summary: 废弃
"""
UtilClient.validate_model(request)
return deps_models.DeprecateBuildpackResponse().from_map(
self.do_request('1.0', 'antcloud.deps.buildpack.deprecate', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def deprecate_buildpack_ex_async(
self,
request: deps_models.DeprecateBuildpackRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.DeprecateBuildpackResponse:
"""
Description: 废弃
Summary: 废弃
"""
UtilClient.validate_model(request)
return deps_models.DeprecateBuildpackResponse().from_map(
await self.do_request_async('1.0', 'antcloud.deps.buildpack.deprecate', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def batchdelete_buildpack(
self,
request: deps_models.BatchdeleteBuildpackRequest,
) -> deps_models.BatchdeleteBuildpackResponse:
"""
Description: 批量删除
Summary: 批量删除
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.batchdelete_buildpack_ex(request, headers, runtime)
async def batchdelete_buildpack_async(
self,
request: deps_models.BatchdeleteBuildpackRequest,
) -> deps_models.BatchdeleteBuildpackResponse:
"""
Description: 批量删除
Summary: 批量删除
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.batchdelete_buildpack_ex_async(request, headers, runtime)
def batchdelete_buildpack_ex(
self,
request: deps_models.BatchdeleteBuildpackRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.BatchdeleteBuildpackResponse:
"""
Description: 批量删除
Summary: 批量删除
"""
UtilClient.validate_model(request)
return deps_models.BatchdeleteBuildpackResponse().from_map(
self.do_request('1.0', 'antcloud.deps.buildpack.batchdelete', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def batchdelete_buildpack_ex_async(
self,
request: deps_models.BatchdeleteBuildpackRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.BatchdeleteBuildpackResponse:
"""
Description: 批量删除
Summary: 批量删除
"""
UtilClient.validate_model(request)
return deps_models.BatchdeleteBuildpackResponse().from_map(
await self.do_request_async('1.0', 'antcloud.deps.buildpack.batchdelete', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def exist_buildpack(
self,
request: deps_models.ExistBuildpackRequest,
) -> deps_models.ExistBuildpackResponse:
"""
Description: 查询是否存在
Summary: 查询是否存在
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.exist_buildpack_ex(request, headers, runtime)
async def exist_buildpack_async(
self,
request: deps_models.ExistBuildpackRequest,
) -> deps_models.ExistBuildpackResponse:
"""
Description: 查询是否存在
Summary: 查询是否存在
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.exist_buildpack_ex_async(request, headers, runtime)
def exist_buildpack_ex(
self,
request: deps_models.ExistBuildpackRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.ExistBuildpackResponse:
"""
Description: 查询是否存在
Summary: 查询是否存在
"""
UtilClient.validate_model(request)
return deps_models.ExistBuildpackResponse().from_map(
self.do_request('1.0', 'antcloud.deps.buildpack.exist', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def exist_buildpack_ex_async(
self,
request: deps_models.ExistBuildpackRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.ExistBuildpackResponse:
"""
Description: 查询是否存在
Summary: 查询是否存在
"""
UtilClient.validate_model(request)
return deps_models.ExistBuildpackResponse().from_map(
await self.do_request_async('1.0', 'antcloud.deps.buildpack.exist', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def update_buildpack_updatepackagesstatus(
self,
request: deps_models.UpdateBuildpackUpdatepackagesstatusRequest,
) -> deps_models.UpdateBuildpackUpdatepackagesstatusResponse:
"""
Description: 更新打包状态
Summary: 更新打包状态
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.update_buildpack_updatepackagesstatus_ex(request, headers, runtime)
async def update_buildpack_updatepackagesstatus_async(
self,
request: deps_models.UpdateBuildpackUpdatepackagesstatusRequest,
) -> deps_models.UpdateBuildpackUpdatepackagesstatusResponse:
"""
Description: 更新打包状态
Summary: 更新打包状态
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.update_buildpack_updatepackagesstatus_ex_async(request, headers, runtime)
def update_buildpack_updatepackagesstatus_ex(
self,
request: deps_models.UpdateBuildpackUpdatepackagesstatusRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.UpdateBuildpackUpdatepackagesstatusResponse:
"""
Description: 更新打包状态
Summary: 更新打包状态
"""
UtilClient.validate_model(request)
return deps_models.UpdateBuildpackUpdatepackagesstatusResponse().from_map(
self.do_request('1.0', 'antcloud.deps.buildpack.updatepackagesstatus.update', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def update_buildpack_updatepackagesstatus_ex_async(
self,
request: deps_models.UpdateBuildpackUpdatepackagesstatusRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.UpdateBuildpackUpdatepackagesstatusResponse:
"""
Description: 更新打包状态
Summary: 更新打包状态
"""
UtilClient.validate_model(request)
return deps_models.UpdateBuildpackUpdatepackagesstatusResponse().from_map(
await self.do_request_async('1.0', 'antcloud.deps.buildpack.updatepackagesstatus.update', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def query_buildpack_findidversionpairsbyversion(
self,
request: deps_models.QueryBuildpackFindidversionpairsbyversionRequest,
) -> deps_models.QueryBuildpackFindidversionpairsbyversionResponse:
"""
Description: 查询版本
Summary: 查询版本
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.query_buildpack_findidversionpairsbyversion_ex(request, headers, runtime)
async def query_buildpack_findidversionpairsbyversion_async(
self,
request: deps_models.QueryBuildpackFindidversionpairsbyversionRequest,
) -> deps_models.QueryBuildpackFindidversionpairsbyversionResponse:
"""
Description: 查询版本
Summary: 查询版本
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.query_buildpack_findidversionpairsbyversion_ex_async(request, headers, runtime)
def query_buildpack_findidversionpairsbyversion_ex(
self,
request: deps_models.QueryBuildpackFindidversionpairsbyversionRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.QueryBuildpackFindidversionpairsbyversionResponse:
"""
Description: 查询版本
Summary: 查询版本
"""
UtilClient.validate_model(request)
return deps_models.QueryBuildpackFindidversionpairsbyversionResponse().from_map(
self.do_request('1.0', 'antcloud.deps.buildpack.findidversionpairsbyversion.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def query_buildpack_findidversionpairsbyversion_ex_async(
self,
request: deps_models.QueryBuildpackFindidversionpairsbyversionRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.QueryBuildpackFindidversionpairsbyversionResponse:
"""
Description: 查询版本
Summary: 查询版本
"""
UtilClient.validate_model(request)
return deps_models.QueryBuildpackFindidversionpairsbyversionResponse().from_map(
await self.do_request_async('1.0', 'antcloud.deps.buildpack.findidversionpairsbyversion.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def query_buildpack_groupbytechstack(
self,
request: deps_models.QueryBuildpackGroupbytechstackRequest,
) -> deps_models.QueryBuildpackGroupbytechstackResponse:
"""
Description: 根据技术栈分组查询
Summary: 根据技术栈分组查询
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.query_buildpack_groupbytechstack_ex(request, headers, runtime)
async def query_buildpack_groupbytechstack_async(
self,
request: deps_models.QueryBuildpackGroupbytechstackRequest,
) -> deps_models.QueryBuildpackGroupbytechstackResponse:
"""
Description: 根据技术栈分组查询
Summary: 根据技术栈分组查询
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.query_buildpack_groupbytechstack_ex_async(request, headers, runtime)
def query_buildpack_groupbytechstack_ex(
self,
request: deps_models.QueryBuildpackGroupbytechstackRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.QueryBuildpackGroupbytechstackResponse:
"""
Description: 根据技术栈分组查询
Summary: 根据技术栈分组查询
"""
UtilClient.validate_model(request)
return deps_models.QueryBuildpackGroupbytechstackResponse().from_map(
self.do_request('1.0', 'antcloud.deps.buildpack.groupbytechstack.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def query_buildpack_groupbytechstack_ex_async(
self,
request: deps_models.QueryBuildpackGroupbytechstackRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.QueryBuildpackGroupbytechstackResponse:
"""
Description: 根据技术栈分组查询
Summary: 根据技术栈分组查询
"""
UtilClient.validate_model(request)
return deps_models.QueryBuildpackGroupbytechstackResponse().from_map(
await self.do_request_async('1.0', 'antcloud.deps.buildpack.groupbytechstack.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def query_buildpack_findvisibletechstacks(
self,
request: deps_models.QueryBuildpackFindvisibletechstacksRequest,
) -> deps_models.QueryBuildpackFindvisibletechstacksResponse:
"""
Description: 查询可见技术栈
Summary: 查询可见技术栈
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.query_buildpack_findvisibletechstacks_ex(request, headers, runtime)
async def query_buildpack_findvisibletechstacks_async(
self,
request: deps_models.QueryBuildpackFindvisibletechstacksRequest,
) -> deps_models.QueryBuildpackFindvisibletechstacksResponse:
"""
Description: 查询可见技术栈
Summary: 查询可见技术栈
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.query_buildpack_findvisibletechstacks_ex_async(request, headers, runtime)
def query_buildpack_findvisibletechstacks_ex(
self,
request: deps_models.QueryBuildpackFindvisibletechstacksRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.QueryBuildpackFindvisibletechstacksResponse:
"""
Description: 查询可见技术栈
Summary: 查询可见技术栈
"""
UtilClient.validate_model(request)
return deps_models.QueryBuildpackFindvisibletechstacksResponse().from_map(
self.do_request('1.0', 'antcloud.deps.buildpack.findvisibletechstacks.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def query_buildpack_findvisibletechstacks_ex_async(
self,
request: deps_models.QueryBuildpackFindvisibletechstacksRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.QueryBuildpackFindvisibletechstacksResponse:
"""
Description: 查询可见技术栈
Summary: 查询可见技术栈
"""
UtilClient.validate_model(request)
return deps_models.QueryBuildpackFindvisibletechstacksResponse().from_map(
await self.do_request_async('1.0', 'antcloud.deps.buildpack.findvisibletechstacks.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def query_buildpack_findwritabletechstacks(
self,
request: deps_models.QueryBuildpackFindwritabletechstacksRequest,
) -> deps_models.QueryBuildpackFindwritabletechstacksResponse:
"""
Description: 查询可写技术栈
Summary: 查询可写技术栈
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.query_buildpack_findwritabletechstacks_ex(request, headers, runtime)
async def query_buildpack_findwritabletechstacks_async(
self,
request: deps_models.QueryBuildpackFindwritabletechstacksRequest,
) -> deps_models.QueryBuildpackFindwritabletechstacksResponse:
"""
Description: 查询可写技术栈
Summary: 查询可写技术栈
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.query_buildpack_findwritabletechstacks_ex_async(request, headers, runtime)
def query_buildpack_findwritabletechstacks_ex(
self,
request: deps_models.QueryBuildpackFindwritabletechstacksRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.QueryBuildpackFindwritabletechstacksResponse:
"""
Description: 查询可写技术栈
Summary: 查询可写技术栈
"""
UtilClient.validate_model(request)
return deps_models.QueryBuildpackFindwritabletechstacksResponse().from_map(
self.do_request('1.0', 'antcloud.deps.buildpack.findwritabletechstacks.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def query_buildpack_findwritabletechstacks_ex_async(
self,
request: deps_models.QueryBuildpackFindwritabletechstacksRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.QueryBuildpackFindwritabletechstacksResponse:
"""
Description: 查询可写技术栈
Summary: 查询可写技术栈
"""
UtilClient.validate_model(request)
return deps_models.QueryBuildpackFindwritabletechstacksResponse().from_map(
await self.do_request_async('1.0', 'antcloud.deps.buildpack.findwritabletechstacks.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def query_buildpack_findosbycurrentcloud(
self,
request: deps_models.QueryBuildpackFindosbycurrentcloudRequest,
) -> deps_models.QueryBuildpackFindosbycurrentcloudResponse:
"""
Description: 查询os通过当前云
Summary: 查询os通过当前云
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.query_buildpack_findosbycurrentcloud_ex(request, headers, runtime)
async def query_buildpack_findosbycurrentcloud_async(
self,
request: deps_models.QueryBuildpackFindosbycurrentcloudRequest,
) -> deps_models.QueryBuildpackFindosbycurrentcloudResponse:
"""
Description: 查询os通过当前云
Summary: 查询os通过当前云
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.query_buildpack_findosbycurrentcloud_ex_async(request, headers, runtime)
def query_buildpack_findosbycurrentcloud_ex(
self,
request: deps_models.QueryBuildpackFindosbycurrentcloudRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.QueryBuildpackFindosbycurrentcloudResponse:
"""
| |
<gh_stars>10-100
#!/usr/bin/env python
"""
CLI tool to runs various tasks related to QA.
"""
import os
import time
from pathlib import Path
import sys
import traceback
import json
import yaml
import uuid
import datetime
import click
from .run import RunContext
from .runners import runners, Job, JobGroup
from .runners.lsf import LsfPriority
from .conventions import batch_dir, batch_dir, make_batch_dir, make_batch_conf_dir, make_hash
from .conventions import serialize_config, deserialize_config, get_settings
from .utils import PathType, entrypoint_module, load_tuning_search
from .utils import save_outputs_manifest, total_storage
from .utils import redirect_std_streams
from .utils import getenvs
from .api import url_to_dir, print_url
from .api import get_outputs, notify_qa_database, serialize_paths
from .iterators import iter_inputs, iter_parameters
from .config import config_has_error, ignore_config_errors
from .config import project, project_root, subproject, config
from .config import default_batches_files, get_default_database, default_batch_label, default_platform
from .config import get_default_configuration, default_input_type
from .config import commit_id, outputs_commit, artifacts_commit, root_qatools, artifacts_commit_root, outputs_commit_root
from .config import user, is_ci, on_windows
@click.group()
@click.pass_context
@click.option('--platform', default=default_platform)
@click.option('--configuration', '--config', '-c', 'configurations', multiple=True, help="Will be passed to the run function")
@click.option('--label', '-l', default=default_batch_label, help="Gives tuning experiments a name.")
@click.option('--tuning', default=None, help="Extra parameters for tuning (JSON)")
@click.option('--tuning-filepath', type=PathType(), default=None, help="File with extra parameters for tuning")
@click.option('--dryrun', is_flag=True, help="Only show the commands that would be executed")
@click.option('--share', is_flag=True, help="Show outputs in QA-Board, doesn't just save them locally.")
@click.option('--database', type=PathType(), help="Input database location")
@click.option('--type', 'input_type', default=default_input_type, help="How we define inputs")
@click.option('--offline', is_flag=True, help="Do not notify QA-Board about run statuses.")
def qa(ctx, platform, configurations, label, tuning, tuning_filepath, dryrun, share, database, input_type, offline):
"""Entrypoint to running your algo, launching batchs..."""
# We want all paths to be relative to top-most qaboard.yaml
# it should be located at the root of the git repository
if config_has_error and not ignore_config_errors:
click.secho('Please fix the error(s) above in qaboard.yaml', fg='red', err=True, bold=True)
exit(1)
# Click passes `ctx.obj` to downstream commands, we can use it as a scratchpad
# http://click.pocoo.org/6/complex/
ctx.obj = {}
will_show_help = '-h' in sys.argv or '--help' in sys.argv
noop_command = 'get' in sys.argv or 'init' in sys.argv
if root_qatools and root_qatools != Path().resolve() and not will_show_help and not noop_command:
ctx.obj['previous_cwd'] = os.getcwd()
click.echo(click.style("Working directory changed to: ", fg='blue') + click.style(str(root_qatools), fg='blue', bold=True), err=True)
os.chdir(root_qatools)
# We want open permissions on outputs and artifacts
# it makes collaboration among mutliple users / automated tools so much easier...
os.umask(0)
ctx.obj['project'] = project
ctx.obj['project_root'] = project_root
ctx.obj['subproject'] = subproject
ctx.obj['HOST'] = os.environ.get('HOST', os.environ.get('HOSTNAME'))
ctx.obj['user'] = user
ctx.obj['dryrun'] = dryrun
ctx.obj['share'] = share
ctx.obj['offline'] = offline
ctx.obj['outputs_commit'] = outputs_commit
ctx.obj['artifacts_commit'] = artifacts_commit
# Note: to support multiple databases per project,
# either use / as database, or somehow we need to hash the db in the output path.
ctx.obj['raw_batch_label'] = label
ctx.obj['batch_label'] = label if not share else f"@{user}| {label}"
ctx.obj['platform'] = platform
ctx.obj['input_type'] = input_type
ctx.obj['inputs_settings'] = get_settings(input_type, config)
ctx.obj['database'] = database if database else get_default_database(ctx.obj['inputs_settings'])
# configuration singular is for backward compatibility to a time where there was a single str config
ctx.obj['configuration'] = ':'.join(configurations) if configurations else get_default_configuration(ctx.obj['inputs_settings'])
# we should refactor the str configuration away completly, and do a much simpler parsing, like
# deserialize_config = lambda configurations: return [maybe_json_loads(c) for c in configurations]
ctx.obj['configurations'] = deserialize_config(ctx.obj['configuration'])
ctx.obj['extra_parameters'] = {}
if tuning:
ctx.obj['extra_parameters'] = json.loads(tuning)
elif tuning_filepath:
ctx.obj['tuning_filepath'] = tuning_filepath
with tuning_filepath.open('r') as f:
if tuning_filepath.suffix == '.yaml':
ctx.obj['extra_parameters'] = yaml.load(f, Loader=yaml.SafeLoader)
elif tuning_filepath.suffix == '.cde':
from cde import Config
ctx.obj['extra_parameters'] = Config.loads(f.read()).asdict()
else:
ctx.obj['extra_parameters'] = json.load(f)
# batch runs will override this since batches may have different configurations
ctx.obj['batch_conf_dir'] = make_batch_conf_dir(outputs_commit, ctx.obj['batch_label'], platform, ctx.obj['configurations'], ctx.obj['extra_parameters'], share)
ctx.obj['batch_dir'] = make_batch_dir(outputs_commit, ctx.obj['batch_label'], platform, ctx.obj['configurations'], ctx.obj['extra_parameters'], share)
# For convenience, we allow users to change environment variables using {ENV: {VAR: value}}
# in configurations or tuning parameters
environment_variables = {}
for c in ctx.obj['configurations']:
if not isinstance(c, dict): continue
if 'ENV' in c: environment_variables.update(c['ENV'])
if 'ENV' in ctx.obj['extra_parameters']:
environment_variables.update(ctx.obj['extra_parameters']['ENV'])
os.environ.update(environment_variables)
# we manage stripping ansi color codes ourselfs since we redirect std streams
# to both the original stream and a log file
ctx.color = True
# colors in log files colors will be interpreted in the UIs
ctx.obj['color'] = is_ci or share
@qa.command()
@click.option('-i', '--input', 'input_path', type=PathType(), help='Path of the input/recording/test we should work on, relative to the database directory.')
@click.option('-o', '--output', 'output_path', type=PathType(), default=None, help='Custom output directory path. If not provided, defaults to ctx.obj["batch_conf_dir"] / input_path.with_suffix('')')
@click.argument('variable')
@click.pass_context
def get(ctx, input_path, output_path, variable):
"""Prints the value of the requested variable. Mostly useful for debug."""
try:
output_directory = ctx.obj['batch_conf_dir'] / input_path.with_suffix('') if not output_path else output_path
except:
pass
from .config import outputs_commit, commit_branch, artifacts_branch_root
# backward compatibility
if variable == "branch_ci_dir":
variable = "artifacts_branch_root"
if variable == "commit_ci_dir":
variable = "outputs_commit"
locals().update(globals())
locals().update(ctx.obj)
if variable in locals():
print(locals().get(variable))
else:
click.secho(f"Could not find {variable}", err=True, fg='red')
exit(1)
@qa.command(context_settings=dict(
ignore_unknown_options=True,
allow_interspersed_args=False,
))
@click.pass_context
@click.option('-i', '--input', 'input_path', required=True, type=PathType(), help='Path of the input/recording/test we should work on, relative to the database directory.')
@click.option('-o', '--output', 'output_path', type=PathType(), default=None, help='Custom output directory path. If not provided, defaults to ctx.obj["batch_conf_dir"] / input_path.with_suffix('')')
@click.option('--keep-previous', is_flag=True, help="Don't clean previous outputs before the run.")
@click.option('--no-postprocess', is_flag=True, help="Don't do the postprocessing.")
@click.option('--save-manifests-in-database', is_flag=True, help="Save the input and outputs manifests in the database.")
@click.argument('forwarded_args', nargs=-1, type=click.UNPROCESSED)
def run(ctx, input_path, output_path, keep_previous, no_postprocess, forwarded_args, save_manifests_in_database):
"""
Runs over a given input/recording/test and computes various success metrics and outputs.
"""
run_context = RunContext.from_click_run_context(ctx, config)
# Usually we want to remove any files already present in the output directory.
# It avoids issues with remaining state... This said,
# In some cases users want to debug long, multi-stepped runs, for which they have their own caching
if not keep_previous:
import shutil
shutil.rmtree(run_context.output_dir, ignore_errors=True)
run_context.output_dir.mkdir(parents=True, exist_ok=True)
with (run_context.output_dir / 'run.json').open('w') as f:
json.dump({
# run_context.database is always made absolute, we keep it relative if given so
"database": str(ctx.obj["database"]),
"input_path": str(run_context.rel_input_path),
"input_type": run_context.type,
"configurations": run_context.configurations,
"extra_parameters": run_context.extra_parameters,
"platform": run_context.platform,
}, f, sort_keys=True, indent=2, separators=(',', ': '))
# Without this, we can only log runs from `qa batch`, on linux, via LSF
# this redirect is not 100% perfect, we don't get stdout from C calls
# if not 'LSB_JOBID' in os.environ: # When using LSF, we usally already have incremental logs
with redirect_std_streams(run_context.output_dir / 'log.txt', color=ctx.obj['color']):
# Help reproduce qa runs with something copy-pastable in the logs
if is_ci:
from shlex import quote
click.secho(' '.join(['qa', *map(quote, sys.argv[1:])]), fg='cyan', bold=True)
click.echo(click.style("Outputs: ", fg='cyan') + click.style(str(run_context.output_dir), fg='cyan', bold=True), err=True)
print_url(ctx)
if not ctx.obj['offline']:
notify_qa_database(**ctx.obj, is_pending=True, is_running=True)
start = time.time()
cwd = os.getcwd()
try:
runtime_metrics = entrypoint_module(config).run(run_context)
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
click.secho(f'[ERROR] Your `run` function raised an exception: {e}', fg='red', bold=True)
try:
exc_type, exc_value, exc_traceback = sys.exc_info()
click.secho(''.join(traceback.format_exception(exc_type, exc_value, exc_traceback)), fg='red')
except Exception as e: # debug strange stale file errors, ideally remove this...
print(f"ERROR: {e}")
runtime_metrics = {'is_failed': True}
if not runtime_metrics:
click.secho('[WARNING] Your `run` function should return a dict with a least {"is_failed": False}', fg='yellow')
runtime_metrics = {"is_failed": False}
if not isinstance(runtime_metrics, dict):
click.secho(f'[ERROR] Your `run` function did not return a dict, but {runtime_metrics}', fg='red', bold=True)
runtime_metrics = {'is_failed': True}
runtime_metrics['compute_time'] = time.time() - start
# avoid issues if code in run() changes cwd
if os.getcwd() != cwd:
os.chdir(cwd)
metrics = postprocess_(runtime_metrics, run_context, skip=no_postprocess or runtime_metrics['is_failed'], save_manifests_in_database=save_manifests_in_database)
if not metrics:
metrics = runtime_metrics
if metrics['is_failed']:
click.secho('[ERROR] The run has failed.', fg='red', err=True)
click.secho(str(metrics), fg='red', bold=True)
exit(1)
else:
click.secho(str(metrics), fg='green')
def postprocess_(runtime_metrics, run_context, skip=False, save_manifests_in_database=False):
"""Computes computes various success metrics and outputs."""
from .utils import file_info
try:
if not skip:
try:
entrypoint_postprocess = entrypoint_module(config).postprocess
except:
metrics = runtime_metrics
else:
metrics = entrypoint_postprocess(runtime_metrics, run_context)
else:
metrics = runtime_metrics
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
# TODO: in case of import error because postprocess was not defined, just ignore it...?
# TODO: we should provide a default postprocess function, that reads metrics.json and returns {**previous, **runtime_metrics}
exc_type, exc_value, exc_traceback = sys.exc_info()
click.secho(f'[ERROR] Your `postprocess` function raised an exception:', fg='red', bold=True)
click.secho(''.join(traceback.format_exception(exc_type, exc_value, exc_traceback)), fg='red')
metrics = {**runtime_metrics, 'is_failed': True}
if 'is_failed' not in metrics:
click.secho("[Warning] The result of the `postprocess` function misses a key `is_failed` (bool)", fg='yellow')
metrics['is_failed'] = False
if (run_context.output_dir / 'metrics.json').exists():
with (run_context.output_dir / 'metrics.json').open('r') as f:
previous_metrics = json.load(f)
metrics = {
**previous_metrics,
**metrics,
}
with (run_context.output_dir / 'metrics.json').open('w') as f:
json.dump(metrics, f, sort_keys=True, indent=2, separators=(',', ': '))
# To help identify | |
'NOT_COMPLIANT'
standards_severity = 'requirements'
audit_name_severity = 'insufficient'
file_names_links = [audit_link(path_to_text(file), file) for file in files]
detail = ('Files {} have {} of {}, which is below ENCODE {}. According to '
'ENCODE data standards, a number for this property in a replicate of > {:,} '
'is required, and > {:,} is recommended.'.format(
', '.join(file_names_links),
metric_description,
metric_value,
standards_severity,
lower_limit,
upper_limit,
)
)
yield AuditFailure('{} {}'.format(audit_name_severity, audit_name), detail, level=level)
return
def check_idr(metrics, rescue, self_consistency):
for m in metrics:
if 'rescue_ratio' in m and 'self_consistency_ratio' in m:
rescue_r = m['rescue_ratio']
self_r = m['self_consistency_ratio']
if rescue_r > rescue and self_r > self_consistency:
file_list = []
for f in m['quality_metric_of']:
file_list.append(f)
file_names_links = [audit_link(path_to_text(file), file) for file in file_list]
detail = ('Replicate concordance in ChIP-seq expriments is measured by '
'calculating IDR values (Irreproducible Discovery Rate). '
'ENCODE processed IDR thresholded peaks files {} '
'have a rescue ratio of {:.2f} and a '
'self consistency ratio of {:.2f}. '
'According to ENCODE standards, having both rescue ratio '
'and self consistency ratio values < 2 is recommended, but '
'having only one of the ratio values < 2 is acceptable.'.format(
', '.join(file_names_links),
rescue_r,
self_r
)
)
yield AuditFailure('insufficient replicate concordance', detail,
level='NOT_COMPLIANT')
elif (rescue_r <= rescue and self_r > self_consistency) or \
(rescue_r > rescue and self_r <= self_consistency):
file_list = []
for f in m['quality_metric_of']:
file_list.append(f)
file_names_links = [audit_link(path_to_text(file), file) for file in file_list]
detail = ('Replicate concordance in ChIP-seq expriments is measured by '
'calculating IDR values (Irreproducible Discovery Rate). '
'ENCODE processed IDR thresholded peaks files {} '
'have a rescue ratio of {:.2f} and a '
'self consistency ratio of {:.2f}. '
'According to ENCODE standards, having both rescue ratio '
'and self consistency ratio values < 2 is recommended, but '
'having only one of the ratio values < 2 is acceptable.'.format(
', '.join(file_names_links),
rescue_r,
self_r
)
)
yield AuditFailure('borderline replicate concordance', detail,
level='WARNING')
return
def check_mad(metrics, replication_type, mad_threshold, pipeline):
if replication_type == 'anisogenic':
experiment_replication_type = 'anisogenic'
elif replication_type == 'isogenic':
experiment_replication_type = 'isogenic'
else:
return
mad_value = None
for m in metrics:
if 'MAD of log ratios' in m:
mad_value = m['MAD of log ratios']
if mad_value > 0.2:
file_list = []
for f in m['quality_metric_of']:
file_list.append(f['@id'])
file_names_links = [audit_link(path_to_text(file), file) for file in file_list]
detail = ('ENCODE processed gene quantification files {} '
'has Median-Average-Deviation (MAD) '
'of replicate log ratios from quantification '
'value of {}.'
' For gene quantification files from an {}'
' assay in the {} '
'pipeline, a value <0.2 is recommended, but a value between '
'0.2 and 0.5 is acceptable.'.format(
', '.join(file_names_links),
mad_value,
experiment_replication_type,
pipeline
)
)
if experiment_replication_type == 'isogenic':
if mad_value < 0.5:
yield AuditFailure('low replicate concordance', detail,
level='WARNING')
else:
yield AuditFailure('insufficient replicate concordance', detail,
level='NOT_COMPLIANT')
elif experiment_replication_type == 'anisogenic' and mad_value > 0.5:
file_names_links = [audit_link(path_to_text(file), file) for file in file_list]
detail = ('ENCODE processed gene quantification files {} '
'has Median-Average-Deviation (MAD) '
'of replicate log ratios from quantification '
'value of {}.'
' For gene quantification files from an {}'
' assay in the {} '
'pipeline, a value <0.5 is recommended.'.format(
', '.join(file_names_links),
mad_value,
experiment_replication_type,
pipeline
)
)
yield AuditFailure('low replicate concordance', detail,
level='WARNING')
return
def check_experiment_ERCC_spikeins(experiment, pipeline):
'''
The assumption in this functon is that the regular audit will catch anything without spikeins.
This audit is checking specifically for presence of ERCC spike-in in long-RNA pipeline
experiments
'''
for rep in experiment['replicates']:
lib = rep.get('library')
if lib is None:
continue
size_range = lib.get('size_range')
if size_range != '>200':
continue
ercc_flag = False
some_spikein_present = False
spikes = lib.get('spikeins_used')
if (spikes is not None) and (len(spikes) > 0):
for s in spikes:
some_spikein_present = True
if s.get('files'):
for f in s.get('files'):
if (
('/files/ENCFF001RTP/' == f) or
('/files/ENCFF001RTO/' == f and
experiment['assay_term_name'] ==
'single cell isolation followed by RNA-seq')):
ercc_flag = True
if ercc_flag is False:
if some_spikein_present is True:
detail = ('Library {} in experiment {} '
'that was processed by {} pipeline '
'requires standard ERCC spike-in to be used in its preparation.'.format(
audit_link(path_to_text(lib['@id']), lib['@id']),
audit_link(path_to_text(experiment['@id']), experiment['@id']),
pipeline
)
)
yield AuditFailure('missing spikeins',
detail, level='WARNING')
else:
detail = ('Library {} in experiment {} '
'that was processed by {} pipeline '
'requires ERCC spike-in to be used in its preparation.'.format(
audit_link(path_to_text(lib['@id']), lib['@id']),
audit_link(path_to_text(experiment['@id']), experiment['@id']),
pipeline
)
)
yield AuditFailure('missing spikeins',
detail, level='NOT_COMPLIANT')
return
def check_spearman(metrics, replication_type, isogenic_threshold,
anisogenic_threshold, pipeline):
if replication_type == 'anisogenic':
threshold = anisogenic_threshold
elif replication_type == 'isogenic':
threshold = isogenic_threshold
else:
return
for m in metrics:
if 'Spearman correlation' in m:
spearman_correlation = m['Spearman correlation']
if spearman_correlation < threshold:
file_names = []
for f in m['quality_metric_of']:
file_names.append(f)
file_names_links = [audit_link(path_to_text(f), f) for f in file_names]
detail = ('Replicate concordance in RNA-seq expriments is measured by '
'calculating the Spearman correlation between gene quantifications '
'of the replicates. '
'ENCODE processed gene quantification files {} '
'have a Spearman correlation of {:.2f}. '
'According to ENCODE standards, in an {} '
'assay analyzed using the {} pipeline, '
'a Spearman correlation value > {} '
'is recommended.'.format(
', '.join(file_names_links),
spearman_correlation,
replication_type,
pipeline,
threshold
)
)
yield AuditFailure('low replicate concordance', detail,
level='WARNING')
return
def check_file_chip_seq_library_complexity(alignment_file):
'''
An alignment file from the ENCODE ChIP-seq processing pipeline
should have minimal library complexity in accordance with the criteria
'''
if alignment_file['output_type'] == 'transcriptome alignments':
return
if alignment_file['lab'] not in ['/labs/encode-processing-pipeline/', '/labs/kevin-white/']:
return
if ('quality_metrics' not in alignment_file) or (alignment_file.get('quality_metrics') == []):
return
nrf_detail = ('NRF (Non Redundant Fraction) is equal to the result of the '
'division of the number of reads after duplicates removal by '
'the total number of reads. '
'An NRF value in the range 0 - 0.5 is poor complexity, '
'0.5 - 0.8 is moderate complexity, '
'and > 0.8 high complexity. NRF value > 0.8 is recommended, '
'but > 0.5 is acceptable. ')
pbc1_detail = ('PBC1 (PCR Bottlenecking Coefficient 1, M1/M_distinct) '
'is the ratio of the number of genomic '
'locations where exactly one read maps uniquely (M1) to the number of '
'genomic locations where some reads map (M_distinct). '
'A PBC1 value in the range 0 - 0.5 is severe bottlenecking, 0.5 - 0.8 '
'is moderate bottlenecking, 0.8 - 0.9 is mild bottlenecking, and > 0.9 '
'is no bottlenecking. PBC1 value > 0.9 is recommended, but > 0.8 is '
'acceptable. ')
pbc2_detail = ('PBC2 (PCR Bottlenecking Coefficient 2, M1/M2) is the ratio of the number of '
'genomic locations where exactly one read maps uniquely (M1) to the number of genomic '
'locations where two reads map uniquely (M2). '
'A PBC2 value in the range 0 - 1 is severe bottlenecking, 1 - 3 '
'is moderate bottlenecking, 3 - 10 is mild bottlenecking, > 10 is '
'no bottlenecking. PBC2 value > 10 is recommended, but > 3 is acceptable. ')
quality_metrics = alignment_file.get('quality_metrics')
for metric in quality_metrics:
if 'NRF' in metric:
NRF_value = float(metric['NRF'])
detail = ('{} ENCODE processed {} file {} '
'was generated from a library with '
'NRF value of {:.2f}.'.format(
nrf_detail,
alignment_file['output_type'],
audit_link(path_to_text(alignment_file['@id']), alignment_file['@id']),
NRF_value
)
)
if NRF_value < 0.5:
yield AuditFailure('poor library complexity', detail,
level='NOT_COMPLIANT')
elif NRF_value >= 0.5 and NRF_value < 0.8:
yield AuditFailure('moderate library complexity', detail,
level='WARNING')
if 'PBC1' in metric:
PBC1_value = float(metric['PBC1'])
detail = ('{} ENCODE processed {} file {} '
'was generated from a library with PBC1 value of {:.2f}.'.format(
pbc1_detail,
alignment_file['output_type'],
audit_link(path_to_text(alignment_file['@id']), alignment_file['@id']),
PBC1_value
)
)
if PBC1_value < 0.5:
yield AuditFailure('severe bottlenecking', detail,
level='NOT_COMPLIANT')
elif PBC1_value >= 0.5 and PBC1_value < 0.9:
yield AuditFailure('mild to moderate bottlenecking', detail,
level='WARNING')
if 'PBC2' in metric:
PBC2_raw_value = metric['PBC2']
if PBC2_raw_value == 'Infinity':
PBC2_value = float('inf')
else:
PBC2_value = float(metric['PBC2'])
detail = ('{} ENCODE processed {} file {} '
'was generated from | |
<reponame>felixmiller/ot-dsim<filename>sim_ecc_tests.py<gh_stars>0
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
"""Runs ECC operations based on p256 firmware.
Runs ECC operations based on the primitives contained in the binary blob of
the p256 lib.
"""
from bignum_lib.machine import Machine
from bignum_lib.sim_helpers import *
from sim import ins_objects_from_hex_file
from sim import ins_objects_from_asm_file
from Crypto.Math.Numbers import Integer
from Crypto.PublicKey import ECC
from Crypto.Hash import SHA256
from Crypto.Signature import DSS
import sys
# Switch to True to get a full instruction trace
ENABLE_TRACE_DUMP = True
# Configuration for the statistics prints
STATS_CONFIG = {
'instruction_histo_sort_by': 'key',
}
DMEM_BYTE_ADDRESSING=True
BN_WORD_LEN = 256
BN_LIMB_LEN = 32
BN_MASK = 2**BN_WORD_LEN-1
BN_LIMB_MASK = 2**BN_LIMB_LEN-1
#BN_MAX_WORDS = 16 # Max number of bn words per val (for 4096 bit words)
DMEM_DEPTH = 1024
PROGRAM_HEX_FILE = 'hex/dcrypto_p256.hex'
PROGRAM_ASM_FILE = 'asm/dcrypto_p256.asm_anno'
PROGRAM_OTBN_ASM_FILE = 'asm/p256.s'
#PROGRAM_OTBN_ASM_FILE = 'asm/dcrypto_p256.otbn_asm'
# pointers to dmem areas according to calling conventions of the p256 lib
dmem_mult = 32 if DMEM_BYTE_ADDRESSING else 1
pLoc = 0 # Location of pointer in dmem
pK = 1
pRnd = 2
pMsg = 3
pR = 4
pS = 5
pX = 6
pY = 7
pD = 8
P256INIT_START_ADDR = 22
P256INIT_STOP_ADDR = 43
P256ISONCURVE_START_ADDR = 82
P256ISONCURVE_STOP_ADDR = 105
P256SCALARMULT_START_ADDR = 618
P256SCALARMULT_STOP_ADDR = 629
P256SIGN_START_ADDR = 446
P256SIGN_STOP_ADDR = 479
P256VERIFY_START_ADDR = 538
P256VERIFY_STOP_ADDR = 617
P256_CURVE_ORDER = 0xffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc632551
# Example key
# public
x = 0xb5511a6afacdc5461628ce58db6c8bf36ec0c0b2f36b06899773b7b3bfa8c334
y = 0x42a1c6971f31c14343dd09eab53a17fa7f7a11d0ab9c6924a87070589e008c2e
# private
d = 0xc0fbe2569144233de5f2cbee543b963b2d869bf4aa847f52fbd94efec7df1a56
# Example point on curve
xexp = 0xb5511a6afacdc5461628ce58db6c8bf36ec0c0b2f36b06899773b7b3bfa8c334
yexp = 0x42a1c6971f31c14343dd09eab53a17fa7f7a11d0ab9c6924a87070589e008c2e
# Example scalar
kexp = 0x1420fc41742102631b76ebe83fdfa3799590ef5db0b2c78121d0a016fe6d1071
# Example signature (for msg_digest and kexp)
rexp = 0x815215ad7dd27f336b35843cbe064de299504edd0c7d87dd1147ea5680a9674a
sexp = 0xa3991e01c444042086e30cd999e589ad4dad9404e90a6d17d0b1051ec93fd605
msg_str = b'Hello bignum, can you sign this for me?'
msg_digest = SHA256.new(msg_str)
msg_digest_int = int(msg_digest.hexdigest(), 16)
ins_objects = []
dmem = []
inst_cnt = 0
cycle_cnt = 0
stats = init_stats()
# Helper functions
def bit_len(int_type):
"""Helper function returning the number of bits required to binary encode an integer."""
length = 0
while int_type:
int_type >>= 1
length += 1
return length
def test_bit(int_type, offset):
"""Helper function indicationg if a specific bit in the bin representation of an int is set."""
mask = 1 << offset
return bool(int_type & mask)
def egcd(a, b):
"""Helper function to run the extended euclidian algorithm"""
if a == 0:
return b, 0, 1
g, y, x = egcd(b % a, a)
return g, x - (b // a) * y, y
def mod_inv(val, mod):
"""Helper function to compute a modular inverse"""
g, x, _ = egcd(val, mod)
if g != 1:
raise Exception('modular inverse does not exist')
return x % mod
def get_msg_val(msg):
"""Helper function to return a ascii encoded bignum value for a string"""
msg_hex_str = ''.join(format(ord(x), '02x') for x in msg)
msg_val = int(msg_hex_str, 16)
return msg_val
def get_msg_str(val):
"""Helper function to return a string for an ascii bignum value"""
hex_str = hex(val)
ret = ''
for i in range(2, len(hex_str), 2):
ret += chr(int(hex_str[i:i+2], 16))
return ret
# DMEM manipulation
def init_dmem():
global dmem
"""Create the simulator side of dmem and init with zeros."""
dmem = [0]*DMEM_DEPTH
def load_pointer():
"""Load pointers into 1st dmem word according to calling conventions"""
pval = pK*dmem_mult
pval += (pRnd*dmem_mult << BN_LIMB_LEN*1)
pval += (pMsg*dmem_mult << BN_LIMB_LEN*2)
pval += (pR*dmem_mult << BN_LIMB_LEN*3)
pval += (pS*dmem_mult << BN_LIMB_LEN*4)
pval += (pX*dmem_mult << BN_LIMB_LEN*5)
pval += (pY*dmem_mult << BN_LIMB_LEN*6)
pval += (pD*dmem_mult << BN_LIMB_LEN*7)
dmem[pLoc] = pval
def load_k(k):
"""Load the ECDSA nonce in dmem at appropriate location according to calling conventions"""
dmem[pK] = k
def load_rnd(rnd):
"""Load the random seed in dmem at appropriate location according to calling conventions"""
dmem[pRnd] = rnd
def load_msg(msg):
"""Load the msg digest in dmem at appropriate location according to calling conventions"""
dmem[pMsg] = msg
def load_r(r):
"""Load the r value of the signature in dmem at appropriate location according to calling conventions"""
dmem[pR] = r
def load_s(s):
"""Load the s value of the signature in dmem at appropriate location according to calling conventions"""
dmem[pS] = s
def load_x(x):
"""Load the x coordinate of public key in dmem at appropriate location according to calling conventions"""
dmem[pX] = x
def load_y(y):
"""Load the y coordinate of public key in dmem at appropriate location according to calling conventions"""
dmem[pY] = y
def load_d(d):
"""Load the private key in dmem at appropriate location according to calling conventions"""
dmem[pD] = d
def load_mod1():
dmem[16] = 0x0000_0000
# Program loading
def load_program_hex():
"""Load binary executable from file"""
global ins_objects
global ctx
global start_addr_dict
global stop_addr_dict
global breakpoints
breakpoints = {}
insfile = open(PROGRAM_HEX_FILE)
ins_objects, ctx = ins_objects_from_hex_file(insfile)
insfile.close()
start_addr_dict = {'p256init': P256INIT_START_ADDR, 'p256isoncurve': P256ISONCURVE_START_ADDR,
'p256scalarmult': P256SCALARMULT_START_ADDR, 'p256sign': P256SIGN_START_ADDR,
'p256verify': P256VERIFY_START_ADDR}
stop_addr_dict = {'p256init': P256INIT_STOP_ADDR, 'p256isoncurve': P256ISONCURVE_STOP_ADDR,
'p256scalarmult': P256SCALARMULT_STOP_ADDR, 'p256sign': P256SIGN_STOP_ADDR,
'p256verify': P256VERIFY_STOP_ADDR}
def load_program_asm():
"""Load program from assembly file"""
global ins_objects
global ctx
global start_addr_dict
global stop_addr_dict
global breakpoints
insfile = open(PROGRAM_ASM_FILE)
ins_objects, ctx, breakpoints = ins_objects_from_asm_file(insfile)
insfile.close()
# reverse function address dictionary
function_addr = {v: k for k, v in ctx.functions.items()}
start_addr_dict = {'p256init': function_addr['p256init'], 'p256isoncurve': function_addr['p256isoncurve'],
'p256scalarmult': function_addr['p256scalarmult'], 'p256sign': function_addr['p256sign'],
'p256verify': function_addr['p256verify']}
stop_addr_dict = {'p256init': function_addr['MulMod']-1, 'p256isoncurve': function_addr['ProjAdd']-1,
'p256scalarmult': len(ins_objects)-1, 'p256sign': function_addr['p256scalarbasemult']-1,
'p256verify': function_addr['p256scalarmult']-1}
def load_program_otbn_asm():
"""Load program from otbn assembly file"""
global ins_objects
global ctx
global start_addr_dict
global stop_addr_dict
global breakpoints
insfile = open(PROGRAM_OTBN_ASM_FILE)
ins_objects, ctx, breakpoints = ins_objects_from_asm_file(insfile, dmem_byte_addressing=DMEM_BYTE_ADDRESSING)
insfile.close()
# reverse label address dictionary for function addresses (OTBN asm does not differantiate between generic
# und function labels)
function_addr = {v: k for k, v in ctx.labels.items()}
start_addr_dict = {'p256init': function_addr['p256init'], 'p256isoncurve': function_addr['p256isoncurve'],
'p256scalarmult': function_addr['p256scalarmult'], 'p256sign': function_addr['p256sign'],
'p256verify': function_addr['p256verify']}
stop_addr_dict = {'p256init': function_addr['MulMod']-1, 'p256isoncurve': function_addr['ProjAdd']-1,
'p256scalarmult': len(ins_objects)-1, 'p256sign': function_addr['p256scalarbasemult']-1,
'p256verify': function_addr['p256scalarmult']-1}
def dump_trace_str(trace_string):
if ENABLE_TRACE_DUMP:
print(trace_string)
def run_isoncurve(x, y):
"""Runs the isoncurve primitive to check if a point is a valid curve point"""
global dmem
global inst_cnt
global cycle_cnt
global ctx
global stats
load_pointer()
machine = Machine(dmem.copy(), ins_objects, start_addr_dict['p256init'], stop_addr_dict['p256init'], ctx=ctx, breakpoints=breakpoints)
cont = True
while cont:
cont, trace_str, cycles = machine.step()
dump_trace_str(trace_str)
inst_cnt += 1
cycle_cnt += cycles
dmem = machine.dmem.copy()
load_x(x)
load_y(y)
machine.dmem = dmem.copy()
machine.pc = start_addr_dict['p256isoncurve']
machine.stop_addr = stop_addr_dict['p256isoncurve']
cont = True
machine.stats = stats
while cont:
cont, trace_str, cycles = machine.step()
dump_trace_str(trace_str)
inst_cnt += 1
cycle_cnt += cycles
dmem = machine.dmem.copy()
# point is on curve if r and s are equal
on_curve = (dmem[pS] == dmem[pR])
return on_curve
def run_scalarmult(x, y, k):
"""Runs the scalarmult primitive to multiply a curve point with a scalar"""
global dmem
global inst_cnt
global cycle_cnt
global ctx
global stats
global breakpoints
load_pointer()
machine = Machine(dmem.copy(), ins_objects, start_addr_dict['p256init'], stop_addr_dict['p256init'], ctx=ctx, breakpoints=breakpoints)
machine.stats = stats
cont = True
while cont:
cont, trace_str, cycles = machine.step()
dump_trace_str(trace_str)
inst_cnt += 1
cycle_cnt += cycles
dmem = machine.dmem.copy()
load_x(x)
load_y(y)
load_k(k)
machine.dmem = dmem.copy()
machine.pc = start_addr_dict['p256scalarmult']
machine.stop_addr = stop_addr_dict['p256scalarmult']
machine.stats = stats
cont = True
while cont:
cont, trace_str, cycles = machine.step()
dump_trace_str(trace_str)
inst_cnt += 1
cycle_cnt += cycles
dmem = machine.dmem.copy()
return dmem[pX], dmem[pY]
def run_sign(d, k, msg):
"""Runs the sign primitive to perform an ecdsa sign"""
global dmem
global inst_cnt
global cycle_cnt
global ctx
global stats
global breakpoints
load_pointer()
machine = Machine(dmem.copy(), ins_objects, start_addr_dict['p256init'], stop_addr_dict['p256init'], ctx=ctx, breakpoints=breakpoints)
cont = True
while cont:
cont, trace_str, cycles = machine.step()
dump_trace_str(trace_str)
inst_cnt += 1
cycle_cnt += cycles
dmem = machine.dmem.copy()
load_msg(msg)
load_d(d)
load_k(k)
machine.dmem = dmem.copy()
machine.pc = start_addr_dict['p256sign']
machine.stop_addr = stop_addr_dict['p256sign']
machine.stats = stats
cont = True
while cont:
cont, trace_str, cycles = machine.step()
dump_trace_str(trace_str)
inst_cnt += 1
cycle_cnt += cycles
dmem = machine.dmem.copy()
return dmem[pR], dmem[pS]
def run_verify(x, y, r, s, msg):
"""Runs the sign primitive to perform an ecdsa sign"""
global dmem
global inst_cnt
global cycle_cnt
global ctx
global stats
global breakpoints
load_pointer()
machine = Machine(dmem.copy(), ins_objects, start_addr_dict['p256init'], stop_addr_dict['p256init'], ctx=ctx, breakpoints=breakpoints)
machine.stats = stats
cont = True
while cont:
cont, trace_str, cycles = machine.step()
dump_trace_str(trace_str)
inst_cnt += 1
cycle_cnt += cycles
dmem = machine.dmem.copy()
load_x(x)
load_y(y)
load_r(r)
load_s(s)
load_msg(msg)
machine.dmem = dmem.copy()
machine.pc = start_addr_dict['p256verify']
machine.stop_addr = stop_addr_dict['p256verify']
machine.stats = stats
cont = True
while cont:
cont, trace_str, cycles = machine.step()
dump_trace_str(trace_str)
inst_cnt += 1
cycle_cnt += cycles
dmem = machine.dmem.copy()
# Verification successful if r == rnd
return dmem[pR] == dmem[pRnd]
def run_test_curvepoint_deterministic():
res = run_isoncurve(xexp, yexp)
if not res:
raise Exception('Test point (deterministic) should be on curve')
def run_test_curvepoint_random():
#rand = Integer.random_range(min_inclusive=1, max_exclusive=P256_CURVE_ORDER)
randkey = ECC.generate(curve='P-256')
randx = int(randkey.public_key().pointQ.x.to_bytes(32).hex(), 16)
randy = int(randkey.public_key().pointQ.y.to_bytes(32).hex(), 16)
res = run_isoncurve(randx, randy)
if not res:
raise Exception('Test point (random) should be on | |
import math
import random
import pytorch_lightning as pl
import torch
import os
import pickle
import cv2
import numpy as np
from torch.utils import data
from datasets.init_dataset import get_dataset
from ACT_utils.ACT_utils import tubelet_in_out_tubes, tubelet_has_gt
from MOC_utils.gaussian_hm import gaussian_radius, draw_umich_gaussian
from ACT_utils.ACT_aug import apply_distort, apply_expand, crop_image
from pprint import pprint
class UCFDataset(data.Dataset):
def __init__(self,
root_dir,
mode, # train or val
pkl_filename = 'UCF101v2-GT.pkl',
K=7,
skip=1,
downratio=4,
mean=[0.40789654, 0.44719302, 0.47026115],
std=[0.28863828, 0.27408164, 0.27809835],
resize=(288, 288), # (h, w)
max_objs=128):
super().__init__()
self.root_dir = root_dir
self.mode = mode
self.K = K
self.skip = skip # TODO implement skiping frames in getitem
self._resize_height = resize[0]
self._resize_width = resize[1]
self.down_ratio = downratio
self.mean = mean
self.std = std
self.max_objs = max_objs
pkl_file = os.path.join(root_dir, pkl_filename)
with open(pkl_file, 'rb') as fid:
pkl = pickle.load(fid, encoding='iso-8859-1')
for k in pkl:
setattr(self, ('_' if k != 'labels' else '') + k, pkl[k])
# labels, _nframes, _train_videos, _test_videos
# _gttubes, _resolution
self.num_classes = len(self.labels)
self._indices = []
video_list = self._train_videos if mode == 'train' else self._test_videos
for v in video_list:
vtubes = sum(self._gttubes[v].values(), [])
self._indices += [(v, i) for i in range(1, self._nframes[v] + 2 - self.K, self.K)
if tubelet_in_out_tubes(vtubes, i, self.K) and tubelet_has_gt(vtubes, i, self.K)]
self.init_aug_params()
def init_aug_params(self):
self._mean_values = [104.0136177, 114.0342201, 119.91659325]
self.distort_param = {
'brightness_prob': 0.5,
'brightness_delta': 32,
'contrast_prob': 0.5,
'contrast_lower': 0.5,
'contrast_upper': 1.5,
'hue_prob': 0.5,
'hue_delta': 18,
'saturation_prob': 0.5,
'saturation_lower': 0.5,
'saturation_upper': 1.5,
'random_order_prob': 0.0,
}
self.expand_param = {
'expand_prob': 0.5,
'max_expand_ratio': 4.0,
}
self.batch_samplers = [{
'sampler': {},
'max_trials': 1,
'max_sample': 1,
}, {
'sampler': {'min_scale': 0.3, 'max_scale': 1.0, 'min_aspect_ratio': 0.5, 'max_aspect_ratio': 2.0, },
'sample_constraint': {'min_jaccard_overlap': 0.1, },
'max_trials': 50,
'max_sample': 1,
}, {
'sampler': {'min_scale': 0.3, 'max_scale': 1.0, 'min_aspect_ratio': 0.5, 'max_aspect_ratio': 2.0, },
'sample_constraint': {'min_jaccard_overlap': 0.3, },
'max_trials': 50,
'max_sample': 1,
}, {
'sampler': {'min_scale': 0.3, 'max_scale': 1.0, 'min_aspect_ratio': 0.5, 'max_aspect_ratio': 2.0, },
'sample_constraint': {'min_jaccard_overlap': 0.5, },
'max_trials': 50,
'max_sample': 1,
}, {
'sampler': {'min_scale': 0.3, 'max_scale': 1.0, 'min_aspect_ratio': 0.5, 'max_aspect_ratio': 2.0, },
'sample_constraint': {'min_jaccard_overlap': 0.7, },
'max_trials': 50,
'max_sample': 1,
}, {
'sampler': {'min_scale': 0.3, 'max_scale': 1.0, 'min_aspect_ratio': 0.5, 'max_aspect_ratio': 2.0, },
'sample_constraint': {'min_jaccard_overlap': 0.9, },
'max_trials': 50,
'max_sample': 1,
}, {
'sampler': {'min_scale': 0.3, 'max_scale': 1.0, 'min_aspect_ratio': 0.5, 'max_aspect_ratio': 2.0, },
'sample_constraint': {'max_jaccard_overlap': 1.0, },
'max_trials': 50,
'max_sample': 1,
}, ]
def __len__(self):
return len(self._indices)
def imagefile(self, v, i):
return os.path.join(self.root_dir, 'rgb-images', v, '{:0>5}.jpg'.format(i))
def flip_video(self, images, frame, v):
do_mirror = random.getrandbits(1) == 1
# filp the image
if do_mirror:
images = [im[:, ::-1, :] for im in images]
h, w = self._resolution[v]
gt_bbox = {}
for ilabel, tubes in self._gttubes[v].items():
for t in tubes:
if frame not in t[:, 0]:
continue
assert frame + self.K - 1 in t[:, 0]
# copy otherwise it will change the gt of the dataset also
t = t.copy()
if do_mirror:
# filp the gt bbox
xmin = w - t[:, 3]
t[:, 3] = w - t[:, 1]
t[:, 1] = xmin
boxes = t[(t[:, 0] >= frame) * (t[:, 0] < frame + self.K), 1:5]
assert boxes.shape[0] == self.K
if ilabel not in gt_bbox:
gt_bbox[ilabel] = []
# gt_bbox[ilabel] ---> a list of numpy array, each one is K, x1, x2, y1, y2
gt_bbox[ilabel].append(boxes)
return images, gt_bbox
def make_gttbox(self, frame, v):
gt_bbox = {}
for ilabel, tubes in self._gttubes[v].items():
for t in tubes:
if frame not in t[:, 0]:
continue
assert frame + self.K - 1 in t[:, 0]
t = t.copy()
boxes = t[(t[:, 0] >= frame) * (t[:, 0] < frame + self.K), 1:5]
assert boxes.shape[0] == self.K
if ilabel not in gt_bbox:
gt_bbox[ilabel] = []
gt_bbox[ilabel].append(boxes)
return gt_bbox
def resize_video(self, images, gt_bbox):
original_h, original_w = images[0].shape[:2]
output_h = self._resize_height // self.down_ratio
output_w = self._resize_width // self.down_ratio
# resize the original img and it's GT bbox
for ilabel in gt_bbox:
for itube in range(len(gt_bbox[ilabel])):
gt_bbox[ilabel][itube][:, 0] = gt_bbox[ilabel][itube][:, 0] / original_w * output_w
gt_bbox[ilabel][itube][:, 1] = gt_bbox[ilabel][itube][:, 1] / original_h * output_h
gt_bbox[ilabel][itube][:, 2] = gt_bbox[ilabel][itube][:, 2] / original_w * output_w
gt_bbox[ilabel][itube][:, 3] = gt_bbox[ilabel][itube][:, 3] / original_h * output_h
images = [cv2.resize(im, (self._resize_width, self._resize_height), interpolation=cv2.INTER_LINEAR) for im in images]
return images, gt_bbox
def normalize(self, images):
data = [np.empty((3, self._resize_height, self._resize_width), dtype=np.float32) for i in range(self.K)]
mean = np.tile(np.array(self.mean, dtype=np.float32)[:, None, None], (1, 1, 1))
std = np.tile(np.array(self.std, dtype=np.float32)[:, None, None], (1, 1, 1))
for i in range(self.K):
data[i][0:3, :, :] = np.transpose(images[i], (2, 0, 1))
data[i] = ((data[i] / 255.) - mean) / std
return data
def draw_ground_truths(self, gt_bbox):
output_h = self._resize_height // self.down_ratio
output_w = self._resize_width // self.down_ratio
hm = np.zeros((self.num_classes, output_h, output_w), dtype=np.float32)
wh = np.zeros((self.max_objs, self.K * 2), dtype=np.float32)
mov = np.zeros((self.max_objs, self.K * 2), dtype=np.float32)
index = np.zeros((self.max_objs), dtype=np.int64)
index_all = np.zeros((self.max_objs, self.K * 2), dtype=np.int64)
mask = np.zeros((self.max_objs), dtype=np.uint8)
num_objs = 0
for ilabel in gt_bbox:
for itube in range(len(gt_bbox[ilabel])):
key = self.K // 2
# key frame's bbox height and width (both on the feature map)
key_h, key_w = gt_bbox[ilabel][itube][key, 3] - gt_bbox[ilabel][itube][key, 1], gt_bbox[ilabel][itube][key, 2] - gt_bbox[ilabel][itube][key, 0]
# create gaussian heatmap
radius = gaussian_radius((math.ceil(key_h), math.ceil(key_w)))
radius = max(0, int(radius))
# ground truth bbox's center in key frame
center = np.array([(gt_bbox[ilabel][itube][key, 0] + gt_bbox[ilabel][itube][key, 2]) / 2, (gt_bbox[ilabel][itube][key, 1] + gt_bbox[ilabel][itube][key, 3]) / 2], dtype=np.float32)
center_int = center.astype(np.int32)
assert 0 <= center_int[0] and center_int[0] <= output_w and 0 <= center_int[1] and center_int[1] <= output_h
# draw ground truth gaussian heatmap at each center location
draw_umich_gaussian(hm[ilabel], center_int, radius)
for i in range(self.K):
center_all = np.array([(gt_bbox[ilabel][itube][i, 0] + gt_bbox[ilabel][itube][i, 2]) / 2, (gt_bbox[ilabel][itube][i, 1] + gt_bbox[ilabel][itube][i, 3]) / 2], dtype=np.float32)
center_all_int = center_all.astype(np.int32)
# wh is ground truth bbox's height and width in i_th frame
wh[num_objs, i * 2: i * 2 + 2] = 1. * (gt_bbox[ilabel][itube][i, 2] - gt_bbox[ilabel][itube][i, 0]), 1. * (gt_bbox[ilabel][itube][i, 3] - gt_bbox[ilabel][itube][i, 1])
# mov is ground truth movement from i_th frame to key frame
mov[num_objs, i * 2: i * 2 + 2] = (gt_bbox[ilabel][itube][i, 0] + gt_bbox[ilabel][itube][i, 2]) / 2 - \
center_int[0], (gt_bbox[ilabel][itube][i, 1] + gt_bbox[ilabel][itube][i, 3]) / 2 - center_int[1]
# index_all are all frame's bbox center position
index_all[num_objs, i * 2: i * 2 + 2] = center_all_int[1] * output_w + center_all_int[0], center_all_int[1] * output_w + center_all_int[0]
# index is key frame's boox center position
index[num_objs] = center_int[1] * output_w + center_int[0]
# mask indicate how many objects in this tube
mask[num_objs] = 1
num_objs = num_objs + 1
return hm, wh, mov, index, index_all, mask
def __getitem__(self, id):
v, frame = self._indices[id]
# Read images
images = [cv2.imread(self.imagefile(v, frame + i)).astype(np.float32) for i in range(0,self.K,self.skip)]
if self.mode == 'train':
# apply data augmentation
images, gt_bbox = self.flip_video(images, frame, v)
images = apply_distort(images, self.distort_param)
images, gt_bbox = apply_expand(images, gt_bbox, self.expand_param, self._mean_values)
images, gt_bbox = crop_image(images, gt_bbox, self.batch_samplers)
else:
# no data augmentation or flip when validation
gt_bbox = self.make_gttbox(frame, v)
# Resize the video
images, gt_bbox = self.resize_video(images, gt_bbox)
data = self.normalize(images)
hm, wh, mov, index, index_all, mask = self.draw_ground_truths(gt_bbox)
return {'input': data, 'hm': hm, 'mov': mov, 'wh': wh, 'mask': mask, 'index': index, 'index_all': index_all}
def _draw_bb(self, video, frame, index):
i = index
for label in self._gttubes[video]:
# print(label)
tubes = self._gttubes[video][label]
for tube in tubes:
x = np.where(tube[..., 0] == i)[0]
if (len(x) != 0):
x = int(x)
x1, y1, x2, y2 = tube[x, 1:]
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
frame = cv2.rectangle(frame, (x1, y1), (x2, y2), color=(255, 0, 0), thickness=2)
return frame
def save_video(self, index, fps=25, drawbb=True, save_dir='.'):
video, start_frame = self._indices[index]
h, w = self._resolution[video]
save_path = video.split(os.path.sep)[-1] + '_'+ str(index) + '.mp4'
save_path = os.path.join(save_dir, save_path)
out = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
for i in range(start_frame, start_frame+self.K, self.skip):
frame = cv2.imread(self.imagefile(video, i))
if drawbb:
frame = self._draw_bb(video, frame, i)
out.write(frame)
out.release()
class VideoDataModule(pl.LightningDataModule):
def __init__(self,
root_dir,
pkl_file,
K,
resize,
batch_size,
num_workers=None,
pin_memory=False):
super().__init__()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.