gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.meta_architectures.ssd_meta_arch."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.meta_architectures import ssd_meta_arch_test_lib
from object_detection.protos import model_pb2
from object_detection.utils import test_utils
slim = tf.contrib.slim
keras = tf.keras.layers
@parameterized.parameters(
{'use_keras': False},
{'use_keras': True},
)
class SsdMetaArchTest(ssd_meta_arch_test_lib.SSDMetaArchTestBase,
parameterized.TestCase):
def _create_model(
self,
apply_hard_mining=True,
normalize_loc_loss_by_codesize=False,
add_background_class=True,
random_example_sampling=False,
expected_loss_weights=model_pb2.DetectionModel().ssd.loss.NONE,
min_num_negative_samples=1,
desired_negative_sampling_ratio=3,
use_keras=False,
predict_mask=False,
use_static_shapes=False,
nms_max_size_per_class=5,
calibration_mapping_value=None):
return super(SsdMetaArchTest, self)._create_model(
model_fn=ssd_meta_arch.SSDMetaArch,
apply_hard_mining=apply_hard_mining,
normalize_loc_loss_by_codesize=normalize_loc_loss_by_codesize,
add_background_class=add_background_class,
random_example_sampling=random_example_sampling,
expected_loss_weights=expected_loss_weights,
min_num_negative_samples=min_num_negative_samples,
desired_negative_sampling_ratio=desired_negative_sampling_ratio,
use_keras=use_keras,
predict_mask=predict_mask,
use_static_shapes=use_static_shapes,
nms_max_size_per_class=nms_max_size_per_class,
calibration_mapping_value=calibration_mapping_value)
def test_preprocess_preserves_shapes_with_dynamic_input_image(
self, use_keras):
image_shapes = [(3, None, None, 3),
(None, 10, 10, 3),
(None, None, None, 3)]
model, _, _, _ = self._create_model(use_keras=use_keras)
for image_shape in image_shapes:
image_placeholder = tf.placeholder(tf.float32, shape=image_shape)
preprocessed_inputs, _ = model.preprocess(image_placeholder)
self.assertAllEqual(preprocessed_inputs.shape.as_list(), image_shape)
def test_preprocess_preserves_shape_with_static_input_image(self, use_keras):
def graph_fn(input_image):
model, _, _, _ = self._create_model(use_keras=use_keras)
return model.preprocess(input_image)
input_image = np.random.rand(2, 3, 3, 3).astype(np.float32)
preprocessed_inputs, _ = self.execute(graph_fn, [input_image])
self.assertAllEqual(preprocessed_inputs.shape, [2, 3, 3, 3])
def test_predict_result_shapes_on_image_with_dynamic_shape(self, use_keras):
batch_size = 3
image_size = 2
input_shapes = [(None, image_size, image_size, 3),
(batch_size, None, None, 3),
(None, None, None, 3)]
for input_shape in input_shapes:
tf_graph = tf.Graph()
with tf_graph.as_default():
model, num_classes, num_anchors, code_size = self._create_model(
use_keras=use_keras)
preprocessed_input_placeholder = tf.placeholder(tf.float32,
shape=input_shape)
prediction_dict = model.predict(
preprocessed_input_placeholder, true_image_shapes=None)
self.assertIn('box_encodings', prediction_dict)
self.assertIn('class_predictions_with_background', prediction_dict)
self.assertIn('feature_maps', prediction_dict)
self.assertIn('anchors', prediction_dict)
init_op = tf.global_variables_initializer()
with self.test_session(graph=tf_graph) as sess:
sess.run(init_op)
prediction_out = sess.run(prediction_dict,
feed_dict={
preprocessed_input_placeholder:
np.random.uniform(
size=(batch_size, 2, 2, 3))})
expected_box_encodings_shape_out = (batch_size, num_anchors, code_size)
expected_class_predictions_with_background_shape_out = (batch_size,
num_anchors,
num_classes + 1)
self.assertAllEqual(prediction_out['box_encodings'].shape,
expected_box_encodings_shape_out)
self.assertAllEqual(
prediction_out['class_predictions_with_background'].shape,
expected_class_predictions_with_background_shape_out)
def test_predict_result_shapes_on_image_with_static_shape(self, use_keras):
with tf.Graph().as_default():
_, num_classes, num_anchors, code_size = self._create_model(
use_keras=use_keras)
def graph_fn(input_image):
model, _, _, _ = self._create_model()
predictions = model.predict(input_image, true_image_shapes=None)
return (predictions['box_encodings'],
predictions['class_predictions_with_background'],
predictions['feature_maps'],
predictions['anchors'])
batch_size = 3
image_size = 2
channels = 3
input_image = np.random.rand(batch_size, image_size, image_size,
channels).astype(np.float32)
expected_box_encodings_shape = (batch_size, num_anchors, code_size)
expected_class_predictions_shape = (batch_size, num_anchors, num_classes+1)
(box_encodings, class_predictions, _, _) = self.execute(graph_fn,
[input_image])
self.assertAllEqual(box_encodings.shape, expected_box_encodings_shape)
self.assertAllEqual(class_predictions.shape,
expected_class_predictions_shape)
def test_postprocess_results_are_correct(self, use_keras):
batch_size = 2
image_size = 2
input_shapes = [(batch_size, image_size, image_size, 3),
(None, image_size, image_size, 3),
(batch_size, None, None, 3),
(None, None, None, 3)]
expected_boxes = [
[
[0, 0, .5, .5],
[0, .5, .5, 1],
[.5, 0, 1, .5],
[0, 0, 0, 0], # pruned prediction
[0, 0, 0, 0]
], # padding
[
[0, 0, .5, .5],
[0, .5, .5, 1],
[.5, 0, 1, .5],
[0, 0, 0, 0], # pruned prediction
[0, 0, 0, 0]
]
] # padding
expected_scores = [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]
expected_classes = [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]
expected_num_detections = np.array([3, 3])
raw_detection_boxes = [[[0., 0., 0.5, 0.5], [0., 0.5, 0.5, 1.],
[0.5, 0., 1., 0.5], [1., 1., 1.5, 1.5]],
[[0., 0., 0.5, 0.5], [0., 0.5, 0.5, 1.],
[0.5, 0., 1., 0.5], [1., 1., 1.5, 1.5]]]
raw_detection_scores = [[[0, 0], [0, 0], [0, 0], [0, 0]],
[[0, 0], [0, 0], [0, 0], [0, 0]]]
for input_shape in input_shapes:
tf_graph = tf.Graph()
with tf_graph.as_default():
model, _, _, _ = self._create_model(use_keras=use_keras)
input_placeholder = tf.placeholder(tf.float32, shape=input_shape)
preprocessed_inputs, true_image_shapes = model.preprocess(
input_placeholder)
prediction_dict = model.predict(preprocessed_inputs,
true_image_shapes)
detections = model.postprocess(prediction_dict, true_image_shapes)
self.assertIn('detection_boxes', detections)
self.assertIn('detection_scores', detections)
self.assertIn('detection_classes', detections)
self.assertIn('num_detections', detections)
self.assertIn('raw_detection_boxes', detections)
self.assertIn('raw_detection_scores', detections)
init_op = tf.global_variables_initializer()
with self.test_session(graph=tf_graph) as sess:
sess.run(init_op)
detections_out = sess.run(detections,
feed_dict={
input_placeholder:
np.random.uniform(
size=(batch_size, 2, 2, 3))})
for image_idx in range(batch_size):
self.assertTrue(
test_utils.first_rows_close_as_set(
detections_out['detection_boxes'][image_idx].tolist(),
expected_boxes[image_idx]))
self.assertAllClose(detections_out['detection_scores'], expected_scores)
self.assertAllClose(detections_out['detection_classes'], expected_classes)
self.assertAllClose(detections_out['num_detections'],
expected_num_detections)
self.assertAllEqual(detections_out['raw_detection_boxes'],
raw_detection_boxes)
self.assertAllEqual(detections_out['raw_detection_scores'],
raw_detection_scores)
def test_postprocess_results_are_correct_static(self, use_keras):
with tf.Graph().as_default():
_, _, _, _ = self._create_model(use_keras=use_keras)
def graph_fn(input_image):
model, _, _, _ = self._create_model(use_static_shapes=True,
nms_max_size_per_class=4)
preprocessed_inputs, true_image_shapes = model.preprocess(input_image)
prediction_dict = model.predict(preprocessed_inputs,
true_image_shapes)
detections = model.postprocess(prediction_dict, true_image_shapes)
return (detections['detection_boxes'], detections['detection_scores'],
detections['detection_classes'], detections['num_detections'])
batch_size = 2
image_size = 2
channels = 3
input_image = np.random.rand(batch_size, image_size, image_size,
channels).astype(np.float32)
expected_boxes = [
[
[0, 0, .5, .5],
[0, .5, .5, 1],
[.5, 0, 1, .5],
[0, 0, 0, 0]
], # padding
[
[0, 0, .5, .5],
[0, .5, .5, 1],
[.5, 0, 1, .5],
[0, 0, 0, 0]
]
] # padding
expected_scores = [[0, 0, 0, 0], [0, 0, 0, 0]]
expected_classes = [[0, 0, 0, 0], [0, 0, 0, 0]]
expected_num_detections = np.array([3, 3])
(detection_boxes, detection_scores, detection_classes,
num_detections) = self.execute(graph_fn, [input_image])
for image_idx in range(batch_size):
self.assertTrue(test_utils.first_rows_close_as_set(
detection_boxes[image_idx][
0:expected_num_detections[image_idx]].tolist(),
expected_boxes[image_idx][0:expected_num_detections[image_idx]]))
self.assertAllClose(
detection_scores[image_idx][0:expected_num_detections[image_idx]],
expected_scores[image_idx][0:expected_num_detections[image_idx]])
self.assertAllClose(
detection_classes[image_idx][0:expected_num_detections[image_idx]],
expected_classes[image_idx][0:expected_num_detections[image_idx]])
self.assertAllClose(num_detections,
expected_num_detections)
def test_postprocess_results_are_correct_with_calibration(self, use_keras):
batch_size = 2
image_size = 2
input_shapes = [(batch_size, image_size, image_size, 3),
(None, image_size, image_size, 3),
(batch_size, None, None, 3),
(None, None, None, 3)]
expected_boxes = [
[
[0, 0, .5, .5],
[0, .5, .5, 1],
[.5, 0, 1, .5],
[0, 0, 0, 0], # pruned prediction
[0, 0, 0, 0]
], # padding
[
[0, 0, .5, .5],
[0, .5, .5, 1],
[.5, 0, 1, .5],
[0, 0, 0, 0], # pruned prediction
[0, 0, 0, 0]
]
] # padding
# Calibration mapping value below is set to map all scores to 0.5, except
# for the last two detections in each batch (see expected number of
# detections below.
expected_scores = [[0.5, 0.5, 0.5, 0., 0.], [0.5, 0.5, 0.5, 0., 0.]]
expected_classes = [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]
expected_num_detections = np.array([3, 3])
raw_detection_boxes = [[[0., 0., 0.5, 0.5], [0., 0.5, 0.5, 1.],
[0.5, 0., 1., 0.5], [1., 1., 1.5, 1.5]],
[[0., 0., 0.5, 0.5], [0., 0.5, 0.5, 1.],
[0.5, 0., 1., 0.5], [1., 1., 1.5, 1.5]]]
raw_detection_scores = [[[0, 0], [0, 0], [0, 0], [0, 0]],
[[0, 0], [0, 0], [0, 0], [0, 0]]]
for input_shape in input_shapes:
tf_graph = tf.Graph()
with tf_graph.as_default():
model, _, _, _ = self._create_model(use_keras=use_keras,
calibration_mapping_value=0.5)
input_placeholder = tf.placeholder(tf.float32, shape=input_shape)
preprocessed_inputs, true_image_shapes = model.preprocess(
input_placeholder)
prediction_dict = model.predict(preprocessed_inputs,
true_image_shapes)
detections = model.postprocess(prediction_dict, true_image_shapes)
self.assertIn('detection_boxes', detections)
self.assertIn('detection_scores', detections)
self.assertIn('detection_classes', detections)
self.assertIn('num_detections', detections)
self.assertIn('raw_detection_boxes', detections)
self.assertIn('raw_detection_scores', detections)
init_op = tf.global_variables_initializer()
with self.test_session(graph=tf_graph) as sess:
sess.run(init_op)
detections_out = sess.run(detections,
feed_dict={
input_placeholder:
np.random.uniform(
size=(batch_size, 2, 2, 3))})
for image_idx in range(batch_size):
self.assertTrue(
test_utils.first_rows_close_as_set(
detections_out['detection_boxes'][image_idx].tolist(),
expected_boxes[image_idx]))
self.assertAllClose(detections_out['detection_scores'], expected_scores)
self.assertAllClose(detections_out['detection_classes'], expected_classes)
self.assertAllClose(detections_out['num_detections'],
expected_num_detections)
self.assertAllEqual(detections_out['raw_detection_boxes'],
raw_detection_boxes)
self.assertAllEqual(detections_out['raw_detection_scores'],
raw_detection_scores)
def test_loss_results_are_correct(self, use_keras):
with tf.Graph().as_default():
_, num_classes, num_anchors, _ = self._create_model(use_keras=use_keras)
def graph_fn(preprocessed_tensor, groundtruth_boxes1, groundtruth_boxes2,
groundtruth_classes1, groundtruth_classes2):
groundtruth_boxes_list = [groundtruth_boxes1, groundtruth_boxes2]
groundtruth_classes_list = [groundtruth_classes1, groundtruth_classes2]
model, _, _, _ = self._create_model(apply_hard_mining=False)
model.provide_groundtruth(groundtruth_boxes_list,
groundtruth_classes_list)
prediction_dict = model.predict(preprocessed_tensor,
true_image_shapes=None)
loss_dict = model.loss(prediction_dict, true_image_shapes=None)
return (self._get_value_for_matching_key(loss_dict,
'Loss/localization_loss'),
self._get_value_for_matching_key(loss_dict,
'Loss/classification_loss'))
batch_size = 2
preprocessed_input = np.random.rand(batch_size, 2, 2, 3).astype(np.float32)
groundtruth_boxes1 = np.array([[0, 0, .5, .5]], dtype=np.float32)
groundtruth_boxes2 = np.array([[0, 0, .5, .5]], dtype=np.float32)
groundtruth_classes1 = np.array([[1]], dtype=np.float32)
groundtruth_classes2 = np.array([[1]], dtype=np.float32)
expected_localization_loss = 0.0
expected_classification_loss = (batch_size * num_anchors
* (num_classes+1) * np.log(2.0))
(localization_loss,
classification_loss) = self.execute(graph_fn, [preprocessed_input,
groundtruth_boxes1,
groundtruth_boxes2,
groundtruth_classes1,
groundtruth_classes2])
self.assertAllClose(localization_loss, expected_localization_loss)
self.assertAllClose(classification_loss, expected_classification_loss)
def test_loss_results_are_correct_with_normalize_by_codesize_true(
self, use_keras):
with tf.Graph().as_default():
_, _, _, _ = self._create_model(use_keras=use_keras)
def graph_fn(preprocessed_tensor, groundtruth_boxes1, groundtruth_boxes2,
groundtruth_classes1, groundtruth_classes2):
groundtruth_boxes_list = [groundtruth_boxes1, groundtruth_boxes2]
groundtruth_classes_list = [groundtruth_classes1, groundtruth_classes2]
model, _, _, _ = self._create_model(apply_hard_mining=False,
normalize_loc_loss_by_codesize=True,
use_keras=use_keras)
model.provide_groundtruth(groundtruth_boxes_list,
groundtruth_classes_list)
prediction_dict = model.predict(preprocessed_tensor,
true_image_shapes=None)
loss_dict = model.loss(prediction_dict, true_image_shapes=None)
return (self._get_value_for_matching_key(loss_dict,
'Loss/localization_loss'),)
batch_size = 2
preprocessed_input = np.random.rand(batch_size, 2, 2, 3).astype(np.float32)
groundtruth_boxes1 = np.array([[0, 0, 1, 1]], dtype=np.float32)
groundtruth_boxes2 = np.array([[0, 0, 1, 1]], dtype=np.float32)
groundtruth_classes1 = np.array([[1]], dtype=np.float32)
groundtruth_classes2 = np.array([[1]], dtype=np.float32)
expected_localization_loss = 0.5 / 4
localization_loss = self.execute(graph_fn, [preprocessed_input,
groundtruth_boxes1,
groundtruth_boxes2,
groundtruth_classes1,
groundtruth_classes2])
self.assertAllClose(localization_loss, expected_localization_loss)
def test_loss_results_are_correct_with_hard_example_mining(self, use_keras):
with tf.Graph().as_default():
_, num_classes, num_anchors, _ = self._create_model(use_keras=use_keras)
def graph_fn(preprocessed_tensor, groundtruth_boxes1, groundtruth_boxes2,
groundtruth_classes1, groundtruth_classes2):
groundtruth_boxes_list = [groundtruth_boxes1, groundtruth_boxes2]
groundtruth_classes_list = [groundtruth_classes1, groundtruth_classes2]
model, _, _, _ = self._create_model()
model.provide_groundtruth(groundtruth_boxes_list,
groundtruth_classes_list)
prediction_dict = model.predict(preprocessed_tensor,
true_image_shapes=None)
loss_dict = model.loss(prediction_dict, true_image_shapes=None)
return (self._get_value_for_matching_key(loss_dict,
'Loss/localization_loss'),
self._get_value_for_matching_key(loss_dict,
'Loss/classification_loss'))
batch_size = 2
preprocessed_input = np.random.rand(batch_size, 2, 2, 3).astype(np.float32)
groundtruth_boxes1 = np.array([[0, 0, .5, .5]], dtype=np.float32)
groundtruth_boxes2 = np.array([[0, 0, .5, .5]], dtype=np.float32)
groundtruth_classes1 = np.array([[1]], dtype=np.float32)
groundtruth_classes2 = np.array([[1]], dtype=np.float32)
expected_localization_loss = 0.0
expected_classification_loss = (batch_size * num_anchors
* (num_classes+1) * np.log(2.0))
(localization_loss, classification_loss) = self.execute_cpu(
graph_fn, [
preprocessed_input, groundtruth_boxes1, groundtruth_boxes2,
groundtruth_classes1, groundtruth_classes2
])
self.assertAllClose(localization_loss, expected_localization_loss)
self.assertAllClose(classification_loss, expected_classification_loss)
def test_loss_results_are_correct_without_add_background_class(
self, use_keras):
with tf.Graph().as_default():
_, num_classes, num_anchors, _ = self._create_model(
add_background_class=False, use_keras=use_keras)
def graph_fn(preprocessed_tensor, groundtruth_boxes1, groundtruth_boxes2,
groundtruth_classes1, groundtruth_classes2):
groundtruth_boxes_list = [groundtruth_boxes1, groundtruth_boxes2]
groundtruth_classes_list = [groundtruth_classes1, groundtruth_classes2]
model, _, _, _ = self._create_model(
apply_hard_mining=False, add_background_class=False,
use_keras=use_keras)
model.provide_groundtruth(groundtruth_boxes_list,
groundtruth_classes_list)
prediction_dict = model.predict(
preprocessed_tensor, true_image_shapes=None)
loss_dict = model.loss(prediction_dict, true_image_shapes=None)
return (loss_dict['Loss/localization_loss'],
loss_dict['Loss/classification_loss'])
batch_size = 2
preprocessed_input = np.random.rand(batch_size, 2, 2, 3).astype(np.float32)
groundtruth_boxes1 = np.array([[0, 0, .5, .5]], dtype=np.float32)
groundtruth_boxes2 = np.array([[0, 0, .5, .5]], dtype=np.float32)
groundtruth_classes1 = np.array([[1]], dtype=np.float32)
groundtruth_classes2 = np.array([[1]], dtype=np.float32)
expected_localization_loss = 0.0
expected_classification_loss = (
batch_size * num_anchors * num_classes * np.log(2.0))
(localization_loss, classification_loss) = self.execute(
graph_fn, [
preprocessed_input, groundtruth_boxes1, groundtruth_boxes2,
groundtruth_classes1, groundtruth_classes2
])
self.assertAllClose(localization_loss, expected_localization_loss)
self.assertAllClose(classification_loss, expected_classification_loss)
def test_loss_results_are_correct_with_losses_mask(self, use_keras):
with tf.Graph().as_default():
_, num_classes, num_anchors, _ = self._create_model(use_keras=use_keras)
def graph_fn(preprocessed_tensor, groundtruth_boxes1, groundtruth_boxes2,
groundtruth_boxes3, groundtruth_classes1, groundtruth_classes2,
groundtruth_classes3):
groundtruth_boxes_list = [groundtruth_boxes1, groundtruth_boxes2,
groundtruth_boxes3]
groundtruth_classes_list = [groundtruth_classes1, groundtruth_classes2,
groundtruth_classes3]
is_annotated_list = [tf.constant(True), tf.constant(True),
tf.constant(False)]
model, _, _, _ = self._create_model(apply_hard_mining=False)
model.provide_groundtruth(groundtruth_boxes_list,
groundtruth_classes_list,
is_annotated_list=is_annotated_list)
prediction_dict = model.predict(preprocessed_tensor,
true_image_shapes=None)
loss_dict = model.loss(prediction_dict, true_image_shapes=None)
return (self._get_value_for_matching_key(loss_dict,
'Loss/localization_loss'),
self._get_value_for_matching_key(loss_dict,
'Loss/classification_loss'))
batch_size = 3
preprocessed_input = np.random.rand(batch_size, 2, 2, 3).astype(np.float32)
groundtruth_boxes1 = np.array([[0, 0, .5, .5]], dtype=np.float32)
groundtruth_boxes2 = np.array([[0, 0, .5, .5]], dtype=np.float32)
groundtruth_boxes3 = np.array([[0, 0, .5, .5]], dtype=np.float32)
groundtruth_classes1 = np.array([[1]], dtype=np.float32)
groundtruth_classes2 = np.array([[1]], dtype=np.float32)
groundtruth_classes3 = np.array([[1]], dtype=np.float32)
expected_localization_loss = 0.0
# Note that we are subtracting 1 from batch_size, since the final image is
# not annotated.
expected_classification_loss = ((batch_size - 1) * num_anchors
* (num_classes+1) * np.log(2.0))
(localization_loss,
classification_loss) = self.execute(graph_fn, [preprocessed_input,
groundtruth_boxes1,
groundtruth_boxes2,
groundtruth_boxes3,
groundtruth_classes1,
groundtruth_classes2,
groundtruth_classes3])
self.assertAllClose(localization_loss, expected_localization_loss)
self.assertAllClose(classification_loss, expected_classification_loss)
def test_restore_map_for_detection_ckpt(self, use_keras):
model, _, _, _ = self._create_model(use_keras=use_keras)
model.predict(tf.constant(np.array([[[[0, 0], [1, 1]], [[1, 0], [0, 1]]]],
dtype=np.float32)),
true_image_shapes=None)
init_op = tf.global_variables_initializer()
saver = tf.train.Saver()
save_path = self.get_temp_dir()
with self.test_session() as sess:
sess.run(init_op)
saved_model_path = saver.save(sess, save_path)
var_map = model.restore_map(
fine_tune_checkpoint_type='detection',
load_all_detection_checkpoint_vars=False)
self.assertIsInstance(var_map, dict)
saver = tf.train.Saver(var_map)
saver.restore(sess, saved_model_path)
for var in sess.run(tf.report_uninitialized_variables()):
self.assertNotIn('FeatureExtractor', var)
def test_restore_map_for_classification_ckpt(self, use_keras):
# Define mock tensorflow classification graph and save variables.
test_graph_classification = tf.Graph()
with test_graph_classification.as_default():
image = tf.placeholder(dtype=tf.float32, shape=[1, 20, 20, 3])
if use_keras:
with tf.name_scope('mock_model'):
layer_one = keras.Conv2D(32, kernel_size=1, name='layer1')
net = layer_one(image)
layer_two = keras.Conv2D(3, kernel_size=1, name='layer2')
layer_two(net)
else:
with tf.variable_scope('mock_model'):
net = slim.conv2d(image, num_outputs=32, kernel_size=1,
scope='layer1')
slim.conv2d(net, num_outputs=3, kernel_size=1, scope='layer2')
init_op = tf.global_variables_initializer()
saver = tf.train.Saver()
save_path = self.get_temp_dir()
with self.test_session(graph=test_graph_classification) as sess:
sess.run(init_op)
saved_model_path = saver.save(sess, save_path)
# Create tensorflow detection graph and load variables from
# classification checkpoint.
test_graph_detection = tf.Graph()
with test_graph_detection.as_default():
model, _, _, _ = self._create_model(use_keras=use_keras)
inputs_shape = [2, 2, 2, 3]
inputs = tf.to_float(tf.random_uniform(
inputs_shape, minval=0, maxval=255, dtype=tf.int32))
preprocessed_inputs, true_image_shapes = model.preprocess(inputs)
prediction_dict = model.predict(preprocessed_inputs, true_image_shapes)
model.postprocess(prediction_dict, true_image_shapes)
another_variable = tf.Variable([17.0], name='another_variable') # pylint: disable=unused-variable
var_map = model.restore_map(fine_tune_checkpoint_type='classification')
self.assertNotIn('another_variable', var_map)
self.assertIsInstance(var_map, dict)
saver = tf.train.Saver(var_map)
with self.test_session(graph=test_graph_detection) as sess:
saver.restore(sess, saved_model_path)
for var in sess.run(tf.report_uninitialized_variables()):
self.assertNotIn('FeatureExtractor', var)
def test_load_all_det_checkpoint_vars(self, use_keras):
test_graph_detection = tf.Graph()
with test_graph_detection.as_default():
model, _, _, _ = self._create_model(use_keras=use_keras)
inputs_shape = [2, 2, 2, 3]
inputs = tf.to_float(
tf.random_uniform(inputs_shape, minval=0, maxval=255, dtype=tf.int32))
preprocessed_inputs, true_image_shapes = model.preprocess(inputs)
prediction_dict = model.predict(preprocessed_inputs, true_image_shapes)
model.postprocess(prediction_dict, true_image_shapes)
another_variable = tf.Variable([17.0], name='another_variable') # pylint: disable=unused-variable
var_map = model.restore_map(
fine_tune_checkpoint_type='detection',
load_all_detection_checkpoint_vars=True)
self.assertIsInstance(var_map, dict)
self.assertIn('another_variable', var_map)
def test_loss_results_are_correct_with_random_example_sampling(
self,
use_keras):
with tf.Graph().as_default():
_, num_classes, _, _ = self._create_model(
random_example_sampling=True, use_keras=use_keras)
def graph_fn(preprocessed_tensor, groundtruth_boxes1, groundtruth_boxes2,
groundtruth_classes1, groundtruth_classes2):
groundtruth_boxes_list = [groundtruth_boxes1, groundtruth_boxes2]
groundtruth_classes_list = [groundtruth_classes1, groundtruth_classes2]
model, _, _, _ = self._create_model(random_example_sampling=True,
use_keras=use_keras)
model.provide_groundtruth(groundtruth_boxes_list,
groundtruth_classes_list)
prediction_dict = model.predict(
preprocessed_tensor, true_image_shapes=None)
loss_dict = model.loss(prediction_dict, true_image_shapes=None)
return (self._get_value_for_matching_key(loss_dict,
'Loss/localization_loss'),
self._get_value_for_matching_key(loss_dict,
'Loss/classification_loss'))
batch_size = 2
preprocessed_input = np.random.rand(batch_size, 2, 2, 3).astype(np.float32)
groundtruth_boxes1 = np.array([[0, 0, .5, .5]], dtype=np.float32)
groundtruth_boxes2 = np.array([[0, 0, .5, .5]], dtype=np.float32)
groundtruth_classes1 = np.array([[1]], dtype=np.float32)
groundtruth_classes2 = np.array([[1]], dtype=np.float32)
expected_localization_loss = 0.0
# Among 4 anchors (1 positive, 3 negative) in this test, only 2 anchors are
# selected (1 positive, 1 negative) since random sampler will adjust number
# of negative examples to make sure positive example fraction in the batch
# is 0.5.
expected_classification_loss = (
batch_size * 2 * (num_classes + 1) * np.log(2.0))
(localization_loss, classification_loss) = self.execute_cpu(
graph_fn, [
preprocessed_input, groundtruth_boxes1, groundtruth_boxes2,
groundtruth_classes1, groundtruth_classes2
])
self.assertAllClose(localization_loss, expected_localization_loss)
self.assertAllClose(classification_loss, expected_classification_loss)
if __name__ == '__main__':
tf.test.main()
|
|
# Copyright 2016-2017 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import falcon
import simplejson as json
from unittest import mock
from six import PY3
from monasca_log_api.app.base import exceptions as log_api_exceptions
from monasca_log_api.app.controller.api import headers
from monasca_log_api.app.controller.v3 import logs
from monasca_log_api.tests import base
ENDPOINT = '/logs'
TENANT_ID = 'bob'
ROLES = 'admin'
def _init_resource(test):
resource = logs.Logs()
test.app.add_route(ENDPOINT, resource)
return resource
def _generate_v3_payload(log_count=None, messages=None):
if not log_count and messages:
log_count = len(messages)
v3_logs = [{
'message': messages[it],
'dimensions': {
'hostname': 'host_%d' % it,
'component': 'component_%d' % it,
'service': 'service_%d' % it
}
} for it in range(log_count)]
else:
v3_logs = [{
'message': base.generate_unique_message(100),
'dimensions': {
'hostname': 'host_%d' % it,
'component': 'component_%d' % it,
'service': 'service_%d' % it
}
} for it in range(log_count)]
v3_body = {
'dimensions': {
'origin': __name__
},
'logs': v3_logs
}
return v3_body, v3_logs
class TestApiLogsVersion(base.BaseApiTestCase):
@mock.patch('monasca_log_api.app.controller.v3.aid'
'.bulk_processor.BulkProcessor')
def test_should_return_v3_as_version(self, _):
logs_resource = logs.Logs()
self.assertEqual('v3.0', logs_resource.version)
@mock.patch('monasca_log_api.app.base.log_publisher.producer.KafkaProducer')
@mock.patch('monasca_log_api.monitoring.client.monascastatsd.Connection')
class TestApiLogsMonitoring(base.BaseApiTestCase):
def test_monitor_bulk_rejected(self, __, _):
res = _init_resource(self)
in_counter = res._logs_in_counter.increment = mock.Mock()
bulk_counter = res._bulks_rejected_counter.increment = mock.Mock()
rejected_counter = res._logs_rejected_counter.increment = mock.Mock()
size_gauge = res._logs_size_gauge.send = mock.Mock()
res._get_logs = mock.Mock(
side_effect=log_api_exceptions.HTTPUnprocessableEntity(''))
log_count = 1
v3_body, _ = _generate_v3_payload(log_count)
payload = json.dumps(v3_body)
content_length = len(payload)
self.simulate_request(
path=ENDPOINT,
method='POST',
headers={
headers.X_ROLES.name: ROLES,
headers.X_TENANT_ID.name: TENANT_ID,
'Content-Type': 'application/json',
'Content-Length': str(content_length)
},
body=payload
)
self.assertEqual(1, bulk_counter.call_count)
self.assertEqual(0, in_counter.call_count)
self.assertEqual(0, rejected_counter.call_count)
self.assertEqual(0, size_gauge.call_count)
def test_monitor_not_all_logs_ok(self, __, _):
res = _init_resource(self)
in_counter = res._logs_in_counter.increment = mock.Mock()
bulk_counter = res._bulks_rejected_counter.increment = mock.Mock()
rejected_counter = res._logs_rejected_counter.increment = mock.Mock()
size_gauge = res._logs_size_gauge.send = mock.Mock()
log_count = 5
reject_logs = 1
v3_body, _ = _generate_v3_payload(log_count)
payload = json.dumps(v3_body)
content_length = len(payload)
side_effects = [{} for ___ in range(log_count - reject_logs)]
side_effects.append(log_api_exceptions.HTTPUnprocessableEntity(''))
res._processor._get_dimensions = mock.Mock(side_effect=side_effects)
self.simulate_request(
path=ENDPOINT,
method='POST',
headers={
headers.X_ROLES.name: ROLES,
headers.X_TENANT_ID.name: TENANT_ID,
'Content-Type': 'application/json',
'Content-Length': str(content_length)
},
body=payload
)
self.assertEqual(1, bulk_counter.call_count)
self.assertEqual(0,
bulk_counter.mock_calls[0][2]['value'])
self.assertEqual(1, in_counter.call_count)
self.assertEqual(log_count - reject_logs,
in_counter.mock_calls[0][2]['value'])
self.assertEqual(1, rejected_counter.call_count)
self.assertEqual(reject_logs,
rejected_counter.mock_calls[0][2]['value'])
self.assertEqual(1, size_gauge.call_count)
self.assertEqual(content_length,
size_gauge.mock_calls[0][2]['value'])
def test_monitor_all_logs_ok(self, __, _):
res = _init_resource(self)
in_counter = res._logs_in_counter.increment = mock.Mock()
bulk_counter = res._bulks_rejected_counter.increment = mock.Mock()
rejected_counter = res._logs_rejected_counter.increment = mock.Mock()
size_gauge = res._logs_size_gauge.send = mock.Mock()
res._send_logs = mock.Mock()
log_count = 10
v3_body, _ = _generate_v3_payload(log_count)
payload = json.dumps(v3_body)
content_length = len(payload)
self.simulate_request(
path=ENDPOINT,
method='POST',
headers={
headers.X_ROLES.name: ROLES,
headers.X_TENANT_ID.name: TENANT_ID,
'Content-Type': 'application/json',
'Content-Length': str(content_length)
},
body=payload
)
self.assertEqual(1, bulk_counter.call_count)
self.assertEqual(0,
bulk_counter.mock_calls[0][2]['value'])
self.assertEqual(1, in_counter.call_count)
self.assertEqual(log_count,
in_counter.mock_calls[0][2]['value'])
self.assertEqual(1, rejected_counter.call_count)
self.assertEqual(0,
rejected_counter.mock_calls[0][2]['value'])
self.assertEqual(1, size_gauge.call_count)
self.assertEqual(content_length,
size_gauge.mock_calls[0][2]['value'])
class TestApiLogs(base.BaseApiTestCase):
@mock.patch('monasca_log_api.app.controller.v3.aid.bulk_processor.'
'BulkProcessor')
def test_should_pass_cross_tenant_id(self, bulk_processor):
logs_resource = _init_resource(self)
logs_resource._processor = bulk_processor
v3_body, v3_logs = _generate_v3_payload(1)
payload = json.dumps(v3_body)
content_length = len(payload)
res = self.simulate_request(
path='/logs',
method='POST',
query_string='tenant_id=1',
headers={
headers.X_ROLES.name: ROLES,
'Content-Type': 'application/json',
'Content-Length': str(content_length)
},
body=payload
)
self.assertEqual(falcon.HTTP_204, res.status)
logs_resource._processor.send_message.assert_called_with(
logs=v3_logs,
global_dimensions=v3_body['dimensions'],
log_tenant_id='1')
@mock.patch('monasca_log_api.app.controller.v3.aid.bulk_processor.'
'BulkProcessor')
def test_should_fail_not_delegate_ok_cross_tenant_id(self, _):
_init_resource(self)
res = self.simulate_request(
path='/logs',
method='POST',
query_string='tenant_id=1',
headers={
headers.X_ROLES.name: ROLES,
'Content-Type': 'application/json',
'Content-Length': '0'
}
)
self.assertEqual(falcon.HTTP_400, res.status)
@mock.patch('monasca_log_api.app.controller.v3.aid.bulk_processor.'
'BulkProcessor')
def test_should_pass_empty_cross_tenant_id_wrong_role(self,
bulk_processor):
logs_resource = _init_resource(self)
logs_resource._processor = bulk_processor
v3_body, _ = _generate_v3_payload(1)
payload = json.dumps(v3_body)
content_length = len(payload)
res = self.simulate_request(
path='/logs',
method='POST',
headers={
headers.X_ROLES.name: ROLES,
'Content-Type': 'application/json',
'Content-Length': str(content_length)
},
body=payload
)
self.assertEqual(falcon.HTTP_204, res.status)
self.assertEqual(1, bulk_processor.send_message.call_count)
@mock.patch('monasca_log_api.app.controller.v3.aid.bulk_processor.'
'BulkProcessor')
def test_should_pass_empty_cross_tenant_id_ok_role(self,
bulk_processor):
logs_resource = _init_resource(self)
logs_resource._processor = bulk_processor
v3_body, _ = _generate_v3_payload(1)
payload = json.dumps(v3_body)
content_length = len(payload)
res = self.simulate_request(
path='/logs',
method='POST',
headers={
headers.X_ROLES.name: ROLES,
'Content-Type': 'application/json',
'Content-Length': str(content_length)
},
body=payload
)
self.assertEqual(falcon.HTTP_204, res.status)
self.assertEqual(1, bulk_processor.send_message.call_count)
class TestUnicodeLogs(base.BaseApiTestCase):
@mock.patch('monasca_log_api.app.base.log_publisher.producer.'
'KafkaProducer')
def test_should_send_unicode_messages(self, _):
_init_resource(self)
messages = [m['input'] for m in base.UNICODE_MESSAGES]
v3_body, _ = _generate_v3_payload(messages=messages)
payload = json.dumps(v3_body, ensure_ascii=False)
content_length = len(payload.encode('utf8') if PY3 else payload)
res = self.simulate_request(
path='/logs',
method='POST',
headers={
headers.X_ROLES.name: ROLES,
'Content-Type': 'application/json',
'Content-Length': str(content_length)
},
body=payload
)
self.assertEqual(falcon.HTTP_204, res.status)
|
|
# Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
from ibis.compat import lzip
import ibis.common as com
import ibis.expr.analysis as L
import ibis.expr.analytics as analytics
import ibis.expr.operations as ops
import ibis.expr.types as ir
import ibis.expr.format as format
import ibis.sql.transforms as transforms
import ibis.util as util
import ibis
# ---------------------------------------------------------------------
class QueryAST(object):
def __init__(self, context, queries):
self.context = context
self.queries = queries
class SelectBuilder(object):
"""
Transforms expression IR to a query pipeline (potentially multiple
queries). There will typically be a primary SELECT query, perhaps with some
subqueries and other DDL to ingest and tear down intermediate data sources.
Walks the expression tree and catalogues distinct query units, builds
select statements (and other DDL types, where necessary), and records
relevant query unit aliases to be used when actually generating SQL.
"""
def __init__(self, expr, context):
self.expr = expr
self.query_expr, self.result_handler = _adapt_expr(self.expr)
self.sub_memo = {}
self.context = context
self.queries = []
self.table_set = None
self.select_set = None
self.group_by = None
self.having = None
self.filters = []
self.limit = None
self.sort_by = []
self.subqueries = []
self.distinct = False
self.op_memo = util.IbisSet()
def get_result(self):
# make idempotent
if len(self.queries) > 0:
return self._wrap_result()
# Generate other kinds of DDL statements that may be required to
# execute the passed query. For example, loding
setup_queries = self._generate_setup_queries()
# Make DDL statements to be executed after the main primary select
# statement(s)
teardown_queries = self._generate_teardown_queries()
select_query = self._build_result_query()
self.queries.extend(setup_queries)
self.queries.append(select_query)
self.queries.extend(teardown_queries)
return select_query
def _generate_setup_queries(self):
return []
def _generate_teardown_queries(self):
return []
def _build_result_query(self):
self._collect_elements()
self._analyze_select_exprs()
self._analyze_filter_exprs()
self._analyze_subqueries()
self._populate_context()
klass = self._select_class
return klass(self.table_set, self.select_set,
subqueries=self.subqueries,
where=self.filters,
group_by=self.group_by,
having=self.having,
limit=self.limit,
order_by=self.sort_by,
distinct=self.distinct,
result_handler=self.result_handler,
parent_expr=self.query_expr,
context=self.context)
def _populate_context(self):
# Populate aliases for the distinct relations used to output this
# select statement.
if self.table_set is not None:
self._make_table_aliases(self.table_set)
# XXX: This is a temporary solution to the table-aliasing / correlated
# subquery problem. Will need to revisit and come up with a cleaner
# design (also as one way to avoid pathological naming conflicts; for
# example, we could define a table alias before we know that it
# conflicts with the name of a table used in a subquery, join, or
# another part of the query structure)
# There may be correlated subqueries inside the filters, requiring that
# we use an explicit alias when outputting as SQL. For now, we're just
# going to see if any table nodes appearing in the where stack have
# been marked previously by the above code.
for expr in self.filters:
needs_alias = _foreign_ref_check(self, expr)
if needs_alias:
self.context.set_always_alias()
def _make_table_aliases(self, expr):
ctx = self.context
node = expr.op()
if isinstance(node, ops.Join):
for arg in node.args:
if isinstance(arg, ir.TableExpr):
self._make_table_aliases(arg)
else:
if not ctx.is_extracted(expr):
ctx.make_alias(expr)
else:
# The compiler will apply a prefix only if the current context
# contains two or more table references. So, if we've extracted
# a subquery into a CTE, we need to propagate that reference
# down to child contexts so that they aren't missing any refs.
ctx.set_ref(expr, ctx.top_context.get_ref(expr))
# ---------------------------------------------------------------------
# Expr analysis / rewrites
def _analyze_select_exprs(self):
new_select_set = []
for expr in self.select_set:
new_expr = self._visit_select_expr(expr)
new_select_set.append(new_expr)
self.select_set = new_select_set
def _visit_select_expr(self, expr):
op = expr.op()
method = '_visit_select_{0}'.format(type(op).__name__)
if hasattr(self, method):
f = getattr(self, method)
return f(expr)
unchanged = True
if isinstance(op, ops.ValueOp):
new_args = []
for arg in op.args:
if isinstance(arg, ir.Expr):
new_arg = self._visit_select_expr(arg)
if arg is not new_arg:
unchanged = False
new_args.append(new_arg)
else:
new_args.append(arg)
if not unchanged:
return expr._factory(type(op)(*new_args))
else:
return expr
else:
return expr
def _visit_select_Histogram(self, expr):
op = expr.op()
EPS = 1e-13
if op.binwidth is None or op.base is None:
aux_hash = op.aux_hash or util.guid()[:6]
min_name = 'min_%s' % aux_hash
max_name = 'max_%s' % aux_hash
minmax = self.table_set.aggregate([op.arg.min().name(min_name),
op.arg.max().name(max_name)])
self.table_set = self.table_set.cross_join(minmax)
if op.base is None:
base = minmax[min_name] - EPS
else:
base = op.base
binwidth = (minmax[max_name] - base) / (op.nbins - 1)
else:
# Have both a bin width and a base
binwidth = op.binwidth
base = op.base
bucket = (op.arg - base) / binwidth
return bucket.floor().name(expr._name)
def _analyze_filter_exprs(self):
# What's semantically contained in the filter predicates may need to be
# rewritten. Not sure if this is the right place to do this, but a
# starting point
# Various kinds of semantically valid WHERE clauses may need to be
# rewritten into a form that we can actually translate into valid SQL.
new_where = []
for expr in self.filters:
new_expr = self._visit_filter(expr)
# Transformations may result in there being no outputted filter
# predicate
if new_expr is not None:
new_where.append(new_expr)
self.filters = new_where
def _visit_filter(self, expr):
# Dumping ground for analysis of WHERE expressions
# - Subquery extraction
# - Conversion to explicit semi/anti joins
# - Rewrites to generate subqueries
op = expr.op()
method = '_visit_filter_{0}'.format(type(op).__name__)
if hasattr(self, method):
f = getattr(self, method)
return f(expr)
unchanged = True
if isinstance(expr, ir.ScalarExpr):
if ops.is_reduction(expr):
return self._rewrite_reduction_filter(expr)
if isinstance(op, ops.BinaryOp):
left = self._visit_filter(op.left)
right = self._visit_filter(op.right)
unchanged = left is op.left and right is op.right
if not unchanged:
return type(expr)(type(op)(left, right))
else:
return expr
elif isinstance(op, (ops.Any, ops.BooleanValueOp,
ops.TableColumn, ir.Literal)):
return expr
elif isinstance(op, ops.ValueOp):
visited = [self._visit_filter(arg)
if isinstance(arg, ir.Expr) else arg
for arg in op.args]
unchanged = True
for new, old in zip(visited, op.args):
if new is not old:
unchanged = False
if not unchanged:
return type(expr)(type(op)(*visited))
else:
return expr
else:
raise NotImplementedError(type(op))
def _rewrite_reduction_filter(self, expr):
# Find the table that this reduction references.
# TODO: what about reductions that reference a join that isn't visible
# at this level? Means we probably have the wrong design, but will have
# to revisit when it becomes a problem.
aggregation, _ = L.reduction_to_aggregation(expr, default_name='tmp')
return aggregation.to_array()
def _visit_filter_Any(self, expr):
# Rewrite semi/anti-join predicates in way that can hook into SQL
# translation step
transform = transforms.AnyToExistsTransform(self.context, expr,
self.table_set)
return transform.get_result()
_visit_filter_NotAny = _visit_filter_Any
def _visit_filter_SummaryFilter(self, expr):
# Top K is rewritten as an
# - aggregation
# - sort by
# - limit
# - left semi join with table set
parent_op = expr.op()
summary_expr = parent_op.args[0]
op = summary_expr.op()
rank_set = summary_expr.to_aggregation(
backup_metric_name='__tmp__',
parent_table=self.table_set)
# GH #667; this may reference a filtered version of self.table_set
arg = L.substitute_parents(op.arg)
pred = (arg == getattr(rank_set, op.arg.get_name()))
self.table_set = self.table_set.semi_join(rank_set, [pred])
return None
# ---------------------------------------------------------------------
# Analysis of table set
def _collect_elements(self):
# If expr is a ValueExpr, we must seek out the TableExprs that it
# references, build their ASTs, and mark them in our QueryContext
# For now, we need to make the simplifying assumption that a value
# expression that is being translated only depends on a single table
# expression.
source_expr = self.query_expr
# hm, is this the best place for this?
root_op = source_expr.op()
if (isinstance(root_op, ops.Join) and
not isinstance(root_op, ops.MaterializedJoin)):
# Unmaterialized join
source_expr = source_expr.materialize()
if isinstance(root_op, ops.TableNode):
self._collect(source_expr, toplevel=True)
if self.table_set is None:
raise com.InternalError('no table set')
else:
# Expressions not depending on any table
if isinstance(root_op, ir.ExpressionList):
self.select_set = source_expr.exprs()
else:
self.select_set = [source_expr]
def _collect(self, expr, toplevel=False):
op = expr.op()
method = '_collect_{0}'.format(type(op).__name__)
# Do not visit nodes twice
if op in self.op_memo:
return
if hasattr(self, method):
f = getattr(self, method)
f(expr, toplevel=toplevel)
elif isinstance(op, (ops.PhysicalTable, ops.SQLQueryResult)):
self._collect_PhysicalTable(expr, toplevel=toplevel)
elif isinstance(op, ops.Join):
self._collect_Join(expr, toplevel=toplevel)
else:
raise NotImplementedError(type(op))
self.op_memo.add(op)
def _collect_Distinct(self, expr, toplevel=False):
if toplevel:
self.distinct = True
self._collect(expr.op().table, toplevel=toplevel)
def _collect_Limit(self, expr, toplevel=False):
if not toplevel:
return
op = expr.op()
# Ignore "inner" limits, because they've been overrided by an exterior
# one
if self.limit is None:
self.limit = {
'n': op.n,
'offset': op.offset
}
self._collect(op.table, toplevel=toplevel)
def _collect_Union(self, expr, toplevel=False):
if toplevel:
raise NotImplementedError()
def _collect_Aggregation(self, expr, toplevel=False):
# The select set includes the grouping keys (if any), and these are
# duplicated in the group_by set. SQL translator can decide how to
# format these depending on the database. Most likely the
# GROUP BY 1, 2, ... style
if toplevel:
subbed_expr = self._sub(expr)
sub_op = subbed_expr.op()
self.group_by = self._convert_group_by(sub_op.by)
self.having = sub_op.having
self.select_set = sub_op.by + sub_op.agg_exprs
self.table_set = sub_op.table
self.filters = sub_op.predicates
self.sort_by = sub_op.sort_keys
self._collect(expr.op().table)
def _collect_Selection(self, expr, toplevel=False):
op = expr.op()
table = op.table
if toplevel:
subbed = self._sub(expr)
sop = subbed.op()
if isinstance(table.op(), ops.Join):
can_sub = self._collect_Join(table)
else:
can_sub = False
self._collect(table)
selections = op.selections
sort_keys = op.sort_keys
filters = op.predicates
if can_sub:
selections = sop.selections
filters = sop.predicates
sort_keys = sop.sort_keys
table = sop.table
if len(selections) == 0:
# select *
selections = [table]
self.sort_by = sort_keys
self.select_set = selections
self.table_set = table
self.filters = filters
def _collect_MaterializedJoin(self, expr, toplevel=False):
op = expr.op()
join = op.join
if toplevel:
subbed = self._sub(join)
self.table_set = subbed
self.select_set = [subbed]
self._collect_Join(join, toplevel=False)
def _convert_group_by(self, exprs):
return list(range(len(exprs)))
def _collect_Join(self, expr, toplevel=False):
if toplevel:
subbed = self._sub(expr)
self.table_set = subbed
self.select_set = [subbed]
subtables = _get_subtables(expr)
# If any of the joined tables are non-blocking modified versions of the
# same table, then it's not safe to continue walking down the tree (see
# #667), and we should instead have inline views rather than attempting
# to fuse things together into the same SELECT query.
can_substitute = _all_distinct_roots(subtables)
if can_substitute:
for table in subtables:
self._collect(table, toplevel=False)
return can_substitute
def _collect_PhysicalTable(self, expr, toplevel=False):
if toplevel:
self.select_set = [expr]
self.table_set = expr # self._sub(expr)
def _collect_SelfReference(self, expr, toplevel=False):
op = expr.op()
if toplevel:
self._collect(op.table, toplevel=toplevel)
def _sub(self, what):
if isinstance(what, list):
return [L.substitute_parents(x, self.sub_memo) for x in what]
else:
return L.substitute_parents(what, self.sub_memo)
# --------------------------------------------------------------------
# Subquery analysis / extraction
def _analyze_subqueries(self):
# Somewhat temporary place for this. A little bit tricky, because
# subqueries can be found in many places
# - With the table set
# - Inside the where clause (these may be able to place directly, some
# cases not)
# - As support queries inside certain expressions (possibly needing to
# be extracted and joined into the table set where they are
# used). More complex transformations should probably not occur here,
# though.
#
# Duplicate subqueries might appear in different parts of the query
# structure, e.g. beneath two aggregates that are joined together, so
# we have to walk the entire query structure.
#
# The default behavior is to only extract into a WITH clause when a
# subquery appears multiple times (for DRY reasons). At some point we
# can implement a more aggressive policy so that subqueries always
# appear in the WITH part of the SELECT statement, if that's what you
# want.
# Find the subqueries, and record them in the passed query context.
subqueries = _extract_subqueries(self)
self.subqueries = []
for expr in subqueries:
# See #173. Might have been extracted already in a parent context.
if not self.context.is_extracted(expr):
self.subqueries.append(expr)
self.context.set_extracted(expr)
def _get_subtables(expr):
subtables = []
def _walk(expr):
op = expr.op()
if isinstance(op, ops.Join):
_walk(op.left)
_walk(op.right)
else:
subtables.append(expr)
_walk(expr)
return subtables
def _all_distinct_roots(subtables):
bases = []
for t in subtables:
base = _blocking_base(t)
for x in bases:
if base.equals(x):
return False
bases.append(base)
return True
def _blocking_base(expr):
node = expr.op()
if node.blocks() or isinstance(node, ops.Join):
return expr
else:
for arg in expr.op().flat_args():
if isinstance(arg, ir.TableExpr):
return _blocking_base(arg)
def _extract_subqueries(select_stmt):
helper = _ExtractSubqueries(select_stmt)
return helper.get_result()
def _extract_noop(self, expr):
return
class _ExtractSubqueries(object):
# Helper class to make things a little easier
def __init__(self, query, greedy=False):
self.query = query
self.greedy = greedy
# Ordered set that uses object .equals to find keys
self.observed_exprs = util.IbisMap()
self.expr_counts = defaultdict(lambda: 0)
def get_result(self):
if self.query.table_set is not None:
self.visit(self.query.table_set)
for clause in self.query.filters:
self.visit(clause)
to_extract = []
# Read them inside-out, to avoid nested dependency issues
for expr, key in reversed(lzip(self.observed_exprs.keys,
self.observed_exprs.values)):
v = self.expr_counts[key]
if self.greedy or v > 1:
to_extract.append(expr)
return to_extract
def observe(self, expr):
if expr in self.observed_exprs:
key = self.observed_exprs.get(expr)
else:
# this key only needs to be unique because of the IbisMap
key = id(expr.op())
self.observed_exprs.set(expr, key)
self.expr_counts[key] += 1
def _has_been_observed(self, expr):
return expr in self.observed_exprs
def visit(self, expr):
node = expr.op()
method = '_visit_{0}'.format(type(node).__name__)
if hasattr(self, method):
f = getattr(self, method)
f(expr)
elif isinstance(node, ops.Join):
self._visit_join(expr)
elif isinstance(node, ops.PhysicalTable):
self._visit_physical_table(expr)
elif isinstance(node, ops.ValueOp):
for arg in node.flat_args():
if not isinstance(arg, ir.Expr):
continue
self.visit(arg)
else:
raise NotImplementedError(type(node))
def _visit_join(self, expr):
node = expr.op()
self.visit(node.left)
self.visit(node.right)
_visit_physical_table = _extract_noop
def _visit_Exists(self, expr):
node = expr.op()
self.visit(node.foreign_table)
for pred in node.predicates:
self.visit(pred)
_visit_ExistsSubquery = _visit_Exists
_visit_NotExistsSubquery = _visit_Exists
def _visit_Aggregation(self, expr):
self.observe(expr)
self.visit(expr.op().table)
def _visit_Distinct(self, expr):
self.observe(expr)
def _visit_Limit(self, expr):
self.observe(expr)
self.visit(expr.op().table)
def _visit_Union(self, expr):
op = expr.op()
self.visit(op.left)
self.visit(op.right)
def _visit_MaterializedJoin(self, expr):
self.observe(expr)
self.visit(expr.op().join)
def _visit_Selection(self, expr):
self.observe(expr)
self.visit(expr.op().table)
def _visit_SQLQueryResult(self, expr):
self.observe(expr)
def _visit_TableColumn(self, expr):
table = expr.op().table
if not self._has_been_observed(table):
self.visit(table)
def _visit_SelfReference(self, expr):
self.visit(expr.op().table)
def _foreign_ref_check(query, expr):
checker = _CorrelatedRefCheck(query, expr)
return checker.get_result()
class _CorrelatedRefCheck(object):
def __init__(self, query, expr):
self.query = query
self.ctx = query.context
self.expr = expr
qroots = self.query.table_set._root_tables()
self.query_roots = util.IbisSet.from_list(qroots)
# aliasing required
self.foreign_refs = []
self.has_foreign_root = False
self.has_query_root = False
def get_result(self):
self._visit(self.expr)
return self.has_query_root and self.has_foreign_root
def _visit(self, expr, in_subquery=False, visit_cache=None,
visit_table_cache=None):
if visit_cache is None:
visit_cache = set()
if (id(expr), in_subquery) in visit_cache:
return
visit_cache.add((id(expr), in_subquery))
node = expr.op()
in_subquery = in_subquery or self._is_subquery(node)
for arg in node.flat_args():
if isinstance(arg, ir.TableExpr):
self._visit_table(arg, in_subquery=in_subquery,
visit_cache=visit_cache,
visit_table_cache=visit_table_cache)
elif isinstance(arg, ir.Expr):
self._visit(arg, in_subquery=in_subquery,
visit_cache=visit_cache,
visit_table_cache=visit_table_cache)
else:
continue
def _is_subquery(self, node):
# XXX
if isinstance(node, (ops.TableArrayView,
transforms.ExistsSubquery,
transforms.NotExistsSubquery)):
return True
if isinstance(node, ops.TableColumn):
return not self._is_root(node.table)
return False
def _visit_table(self, expr, in_subquery=False, visit_cache=None,
visit_table_cache=None):
if visit_table_cache is None:
visit_table_cache = set()
if (id(expr), in_subquery) in visit_table_cache:
return
visit_table_cache.add((id(expr), in_subquery))
node = expr.op()
if isinstance(node, (ops.PhysicalTable, ops.SelfReference)):
self._ref_check(node, in_subquery=in_subquery)
for arg in node.flat_args():
if isinstance(arg, ir.Expr):
self._visit(arg, in_subquery=in_subquery,
visit_cache=visit_cache,
visit_table_cache=visit_table_cache)
def _ref_check(self, node, in_subquery=False):
is_aliased = self.ctx.has_ref(node)
if self._is_root(node):
if in_subquery:
self.has_query_root = True
else:
if in_subquery:
self.has_foreign_root = True
if (not is_aliased and
self.ctx.has_ref(node, parent_contexts=True)):
self.ctx.make_alias(node)
elif not self.ctx.has_ref(node):
self.ctx.make_alias(node)
def _is_root(self, what):
if isinstance(what, ir.Expr):
what = what.op()
return what in self.query_roots
def _adapt_expr(expr):
# Non-table expressions need to be adapted to some well-formed table
# expression, along with a way to adapt the results to the desired
# arity (whether array-like or scalar, for example)
#
# Canonical case is scalar values or arrays produced by some reductions
# (simple reductions, or distinct, say)
def as_is(x):
return x
if isinstance(expr, ir.TableExpr):
return expr, as_is
def _get_scalar(field):
def scalar_handler(results):
return results[field][0]
return scalar_handler
if isinstance(expr, ir.ScalarExpr):
if L.is_scalar_reduce(expr):
table_expr, name = L.reduction_to_aggregation(
expr, default_name='tmp')
return table_expr, _get_scalar(name)
else:
base_table = ir.find_base_table(expr)
if base_table is None:
# expr with no table refs
return expr.name('tmp'), _get_scalar('tmp')
else:
raise NotImplementedError(expr._repr())
elif isinstance(expr, ir.AnalyticExpr):
return expr.to_aggregation(), as_is
elif isinstance(expr, ir.ExprList):
exprs = expr.exprs()
is_aggregation = True
any_aggregation = False
for x in exprs:
if not L.is_scalar_reduce(x):
is_aggregation = False
else:
any_aggregation = True
if is_aggregation:
table = ir.find_base_table(exprs[0])
return table.aggregate(exprs), as_is
elif not any_aggregation:
return expr, as_is
else:
raise NotImplementedError(expr._repr())
elif isinstance(expr, ir.ColumnExpr):
op = expr.op()
def _get_column(name):
def column_handler(results):
return results[name]
return column_handler
if isinstance(op, ops.TableColumn):
table_expr = op.table[[op.name]]
result_handler = _get_column(op.name)
else:
# Something more complicated.
base_table = L.find_source_table(expr)
if isinstance(op, ops.DistinctColumn):
expr = op.arg
try:
name = op.arg.get_name()
except Exception:
name = 'tmp'
table_expr = (base_table.projection([expr.name(name)])
.distinct())
result_handler = _get_column(name)
else:
table_expr = base_table.projection([expr.name('tmp')])
result_handler = _get_column('tmp')
return table_expr, result_handler
else:
raise com.TranslationError('Do not know how to execute: {0}'
.format(type(expr)))
class QueryBuilder(object):
select_builder = SelectBuilder
def __init__(self, expr, context=None):
self.expr = expr
if context is None:
context = self._make_context()
self.context = context
@property
def _make_context(self):
raise NotImplementedError
def get_result(self):
op = self.expr.op()
# TODO: any setup / teardown DDL statements will need to be done prior
# to building the result set-generating statements.
if isinstance(op, ops.Union):
query = self._make_union()
else:
query = self._make_select()
return QueryAST(self.context, [query])
def _make_union(self):
op = self.expr.op()
return self._union_class(op.left, op.right, self.expr,
distinct=op.distinct,
context=self.context)
def _make_select(self):
builder = self.select_builder(self.expr, self.context)
return builder.get_result()
class QueryContext(object):
"""
Records bits of information used during ibis AST to SQL translation
"""
def __init__(self, indent=2, parent=None, memo=None):
self._table_refs = {}
self.extracted_subexprs = set()
self.subquery_memo = {}
self.indent = indent
self.parent = parent
self.always_alias = False
self.query = None
self._table_key_memo = {}
self.memo = memo or format.FormatMemo()
def _compile_subquery(self, expr):
sub_ctx = self.subcontext()
return self._to_sql(expr, sub_ctx)
def _to_sql(self, expr, ctx):
raise NotImplementedError
@property
def top_context(self):
if self.parent is None:
return self
else:
return self.parent.top_context
def set_always_alias(self):
self.always_alias = True
def get_compiled_expr(self, expr):
this = self.top_context
key = self._get_table_key(expr)
if key in this.subquery_memo:
return this.subquery_memo[key]
op = expr.op()
if isinstance(op, ops.SQLQueryResult):
result = op.query
else:
result = self._compile_subquery(expr)
this.subquery_memo[key] = result
return result
def make_alias(self, expr):
i = len(self._table_refs)
key = self._get_table_key(expr)
# Get total number of aliases up and down the tree at this point; if we
# find the table prior-aliased along the way, however, we reuse that
# alias
ctx = self
while ctx.parent is not None:
ctx = ctx.parent
if key in ctx._table_refs:
alias = ctx._table_refs[key]
self.set_ref(expr, alias)
return
i += len(ctx._table_refs)
alias = 't%d' % i
self.set_ref(expr, alias)
def need_aliases(self):
return self.always_alias or len(self._table_refs) > 1
def has_ref(self, expr, parent_contexts=False):
key = self._get_table_key(expr)
return self._key_in(key, '_table_refs',
parent_contexts=parent_contexts)
def set_ref(self, expr, alias):
key = self._get_table_key(expr)
self._table_refs[key] = alias
def get_ref(self, expr):
"""
Get the alias being used throughout a query to refer to a particular
table or inline view
"""
return self._get_table_item('_table_refs', expr)
def is_extracted(self, expr):
key = self._get_table_key(expr)
return key in self.top_context.extracted_subexprs
def set_extracted(self, expr):
key = self._get_table_key(expr)
self.extracted_subexprs.add(key)
self.make_alias(expr)
def subcontext(self):
return type(self)(indent=self.indent, parent=self)
# Maybe temporary hacks for correlated / uncorrelated subqueries
def set_query(self, query):
self.query = query
def is_foreign_expr(self, expr):
from ibis.expr.analysis import ExprValidator
# The expression isn't foreign to us. For example, the parent table set
# in a correlated WHERE subquery
if self.has_ref(expr, parent_contexts=True):
return False
exprs = [self.query.table_set] + self.query.select_set
validator = ExprValidator(exprs)
return not validator.validate(expr)
def _get_table_item(self, item, expr):
key = self._get_table_key(expr)
top = self.top_context
if self.is_extracted(expr):
return getattr(top, item).get(key)
return getattr(self, item).get(key)
def _get_table_key(self, table):
if isinstance(table, ir.TableExpr):
table = table.op()
k = id(table)
if k in self._table_key_memo:
return self._table_key_memo[k]
else:
val = table._repr()
self._table_key_memo[k] = val
return val
def _key_in(self, key, memo_attr, parent_contexts=False):
if key in getattr(self, memo_attr):
return True
ctx = self
while parent_contexts and ctx.parent is not None:
ctx = ctx.parent
if key in getattr(ctx, memo_attr):
return True
return False
class ExprTranslator(object):
_rewrites = {}
def __init__(self, expr, context=None, named=False, permit_subquery=False):
self.expr = expr
self.permit_subquery = permit_subquery
if context is None:
context = self._context_class()
self.context = context
# For now, governing whether the result will have a name
self.named = named
@property
def _context_class(self):
raise NotImplementedError
def get_result(self):
"""
Build compiled SQL expression from the bottom up and return as a string
"""
translated = self.translate(self.expr)
if self._needs_name(self.expr):
# TODO: this could fail in various ways
name = self.expr.get_name()
translated = self.name(translated, name)
return translated
def _needs_name(self, expr):
if not self.named:
return False
op = expr.op()
if isinstance(op, ops.TableColumn):
# This column has been given an explicitly different name
if expr.get_name() != op.name:
return True
return False
if expr.get_name() is ir.unnamed:
return False
return True
def translate(self, expr):
# The operation node type the typed expression wraps
op = expr.op()
if type(op) in self._rewrites and type(op) not in self._registry:
expr = self._rewrites[type(op)](expr)
op = expr.op()
# TODO: use op MRO for subclasses instead of this isinstance spaghetti
if isinstance(op, ir.Parameter):
return self._trans_param(expr)
elif isinstance(op, ops.TableNode):
# HACK/TODO: revisit for more complex cases
return '*'
elif type(op) in self._registry:
formatter = self._registry[type(op)]
return formatter(self, expr)
else:
raise com.TranslationError('No translator rule for {0}'
.format(type(op)))
def _trans_param(self, expr):
raise NotImplementedError
@classmethod
def rewrites(cls, klass, f=None):
def decorator(f):
cls._rewrites[klass] = f
if f is None:
return decorator
else:
decorator(f)
return f
@classmethod
def compiles(cls, klass, f=None):
def decorator(f):
cls._registry[klass] = f
if f is None:
return decorator
else:
decorator(f)
return f
rewrites = ExprTranslator.rewrites
@rewrites(analytics.Bucket)
def _bucket(expr):
import operator
op = expr.op()
stmt = ibis.case()
if op.closed == 'left':
l_cmp = operator.le
r_cmp = operator.lt
else:
l_cmp = operator.lt
r_cmp = operator.le
user_num_buckets = len(op.buckets) - 1
bucket_id = 0
if op.include_under:
if user_num_buckets > 0:
cmp = operator.lt if op.close_extreme else r_cmp
else:
cmp = operator.le if op.closed == 'right' else operator.lt
stmt = stmt.when(cmp(op.arg, op.buckets[0]), bucket_id)
bucket_id += 1
for j, (lower, upper) in enumerate(zip(op.buckets, op.buckets[1:])):
if (op.close_extreme and
((op.closed == 'right' and j == 0) or
(op.closed == 'left' and j == (user_num_buckets - 1)))):
stmt = stmt.when((lower <= op.arg) & (op.arg <= upper),
bucket_id)
else:
stmt = stmt.when(l_cmp(lower, op.arg) & r_cmp(op.arg, upper),
bucket_id)
bucket_id += 1
if op.include_over:
if user_num_buckets > 0:
cmp = operator.lt if op.close_extreme else l_cmp
else:
cmp = operator.lt if op.closed == 'right' else operator.le
stmt = stmt.when(cmp(op.buckets[-1], op.arg), bucket_id)
bucket_id += 1
return stmt.end().name(expr._name)
@rewrites(analytics.CategoryLabel)
def _category_label(expr):
op = expr.op()
stmt = op.args[0].case()
for i, label in enumerate(op.labels):
stmt = stmt.when(i, label)
if op.nulls is not None:
stmt = stmt.else_(op.nulls)
return stmt.end().name(expr._name)
@rewrites(ops.Any)
def _any_expand(expr):
arg = expr.op().args[0]
return arg.sum() > 0
@rewrites(ops.NotAny)
def _notany_expand(expr):
arg = expr.op().args[0]
return arg.sum() == 0
@rewrites(ops.All)
def _all_expand(expr):
arg = expr.op().args[0]
t = ir.find_base_table(arg)
return arg.sum() == t.count()
@rewrites(ops.NotAll)
def _notall_expand(expr):
arg = expr.op().args[0]
t = ir.find_base_table(arg)
return arg.sum() < t.count()
class DDL(object):
pass
class Select(DDL):
"""
A SELECT statement which, after execution, might yield back to the user a
table, array/list, or scalar value, depending on the expression that
generated it
"""
def __init__(self, table_set, select_set,
subqueries=None, where=None, group_by=None, having=None,
order_by=None, limit=None,
distinct=False, indent=2,
result_handler=None, parent_expr=None,
context=None):
self.context = context
self.select_set = select_set
self.table_set = table_set
self.distinct = distinct
self.parent_expr = parent_expr
self.where = where or []
# Group keys and post-predicates for aggregations
self.group_by = group_by or []
self.having = having or []
self.order_by = order_by or []
self.limit = limit
self.subqueries = subqueries or []
self.indent = indent
self.result_handler = result_handler
translator = None
def _translate(self, expr, context=None, named=False,
permit_subquery=False):
if context is None:
context = self.context
translator = self.translator(expr, context=context,
named=named,
permit_subquery=permit_subquery)
return translator.get_result()
def equals(self, other, cache=None):
if cache is None:
cache = {}
if (self, other) in cache:
return cache[(self, other)]
if id(self) == id(other):
cache[(self, other)] = True
return True
if not isinstance(other, Select):
cache[(self, other)] = False
return False
this_exprs = self._all_exprs()
other_exprs = other._all_exprs()
if self.limit != other.limit:
cache[(self, other)] = False
return False
for x, y in zip(this_exprs, other_exprs):
if not x.equals(y):
cache[(self, other)] = False
return False
cache[(self, other)] = True
return True
def _all_exprs(self):
# Gnarly, maybe we can improve this somehow
expr_attrs = ['select_set', 'table_set', 'where', 'group_by', 'having',
'order_by', 'subqueries']
exprs = []
for attr in expr_attrs:
val = getattr(self, attr)
if isinstance(val, list):
exprs.extend(val)
else:
exprs.append(val)
return exprs
class TableSetFormatter(object):
def __init__(self, parent, expr, indent=2):
self.parent = parent
self.context = parent.context
self.expr = expr
self.indent = indent
self.join_tables = []
self.join_types = []
self.join_predicates = []
def _translate(self, expr):
return self.parent._translate(expr, context=self.context)
def _walk_join_tree(self, op):
left = op.left.op()
right = op.right.op()
if util.all_of([left, right], ops.Join):
raise NotImplementedError('Do not support joins between '
'joins yet')
self._validate_join_predicates(op.predicates)
jname = self._get_join_type(op)
# Read off tables and join predicates left-to-right in
# depth-first order
if isinstance(left, ops.Join):
self._walk_join_tree(left)
self.join_tables.append(self._format_table(op.right))
self.join_types.append(jname)
self.join_predicates.append(op.predicates)
elif isinstance(right, ops.Join):
# When rewrites are possible at the expression IR stage, we should
# do them. Otherwise subqueries might be necessary in some cases
# here
raise NotImplementedError('not allowing joins on right '
'side yet')
else:
# Both tables
self.join_tables.append(self._format_table(op.left))
self.join_tables.append(self._format_table(op.right))
self.join_types.append(jname)
self.join_predicates.append(op.predicates)
# Placeholder; revisit when supporting other databases
_non_equijoin_supported = True
def _validate_join_predicates(self, predicates):
for pred in predicates:
op = pred.op()
if (not isinstance(op, ops.Equals) and
not self._non_equijoin_supported):
raise com.TranslationError('Non-equality join predicates, '
'i.e. non-equijoins, are not '
'supported')
class Union(DDL):
def __init__(self, left_table, right_table, expr, distinct=False,
context=None):
self.context = context
self.left = left_table
self.right = right_table
self.distinct = distinct
self.table_set = expr
self.filters = []
|
|
# -*- coding: utf-8 -*-
# Copyright 2017 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Test autotest_evaluator module."""
from __future__ import print_function
import os
from chromite.cros_bisect import autotest_evaluator
from chromite.lib import commandline
from chromite.lib import cros_build_lib
from chromite.lib import cros_test_lib
from chromite.lib import osutils
from chromite.lib import partial_mock
from chromite.lib import remote_access
from chromite.lib import remote_access_unittest
from chromite.lib import repo_util_unittest
class RemoteShScpMock(remote_access_unittest.RemoteShMock):
"""In addition to RemoteSh, it mocks ScpToLocal."""
ATTRS = ('RemoteSh', 'ScpToLocal')
def ScpToLocal(self, _, remote, local, **kwargs):
return self._results['ScpToLocal'].LookupResult(
([remote, local],), kwargs=kwargs)
class TestAutotestEvaluator(cros_test_lib.MockTempDirTestCase):
"""Tests AutotestEvaluator class."""
BOARD = 'samus'
TEST_NAME = 'graphics_WebGLAquarium'
METRIC = 'avg_fps_1000_fishes/summary/value'
REPORT_FILE = 'reports.json'
REMOTE_REPORT_FILE = '%s/results/default/%s/results/results-chart.json' % (
autotest_evaluator.AutotestEvaluator.AUTOTEST_BASE, TEST_NAME)
DUT_IP = '192.168.1.1'
DUT = commandline.DeviceParser(commandline.DEVICE_SCHEME_SSH)(DUT_IP)
TEST_TARGET = '%s/tests/%s/control' % (
autotest_evaluator.AutotestEvaluator.AUTOTEST_BASE, TEST_NAME)
AQUARIUM_REPORT_TEMPLATE = """
{"avg_fps_1000_fishes": {
"summary": {
"units": "fps",
"type": "scalar",
"value": %s,
"improvement_direction": "up"
}
}
}"""
BUILD_LABEL = 'base'
AUTOTEST_CLIENT = autotest_evaluator.AutotestEvaluator.AUTOTEST_CLIENT
TEST_THAT_COMMAND = ['test_that', '-b', BOARD, '--fast', '--args',
'local=True', DUT_IP, TEST_NAME]
def setUp(self):
self.PatchObject(cros_build_lib, 'IsInsideChroot', return_value=False)
# Sets up default options and evaluator object.
self.options = cros_test_lib.EasyAttr(
base_dir=self.tempdir, board=self.BOARD, test_name=self.TEST_NAME,
metric=self.METRIC, metric_take_average=False, reuse_eval=True,
chromium_dir=None, cros_dir=None, eval_passing_only=False)
self.evaluator = autotest_evaluator.AutotestEvaluator(self.options)
def PrepareWebglAquariumReports(self, scores):
"""Prepares graphics_WebGLAquarium reports.
It is a simplified version. What test cares is
"avg_fps_1000_fishes/summary/value". It can produces multiple reports if
more than one score is given.
Args:
scores: List of scores.
Returns:
A list of file names storing in report directory.
"""
result = []
num_reports = len(scores)
for ith, score in enumerate(scores, start=1):
report_file = os.path.join(
self.tempdir, 'reports',
'results-chart.%s.%d-%d.json' % (self.BUILD_LABEL, ith, num_reports))
osutils.WriteFile(report_file, self.AQUARIUM_REPORT_TEMPLATE % score)
result.append(report_file)
return result
def UpdateOptionsAndEvaluator(self, options_to_update):
"""Updates self.options and self.evaluator.
Based on updated self.options, it creates a new AutotestEvaluator instance
and assigns to self.evaluator.
Args:
options_to_update: a dict to update self.options.
"""
self.options.update(options_to_update)
self.evaluator = autotest_evaluator.AutotestEvaluator(self.options)
def testInit(self):
"""Tests that AutotestEvaluator() works as expected."""
base_dir = self.tempdir
self.assertEqual(base_dir, self.evaluator.base_dir)
self.assertEqual(os.path.join(base_dir, 'reports'),
self.evaluator.report_base_dir)
self.assertTrue(os.path.isdir(self.evaluator.report_base_dir))
self.assertEqual(self.BOARD, self.evaluator.board)
self.assertEqual(self.TEST_NAME, self.evaluator.test_name)
self.assertEqual(self.METRIC, self.evaluator.metric)
self.assertFalse(self.evaluator.metric_take_average)
self.assertTrue(self.evaluator.reuse_eval)
self.assertEqual(os.path.join(base_dir, 'chromium'),
self.evaluator.chromium_dir)
# With chromium_dir specified and flip booleans.
self.UpdateOptionsAndEvaluator(
dict(chromium_dir='/tmp/chromium', reuse_eval=False))
self.assertFalse(self.evaluator.metric_take_average)
self.assertFalse(self.evaluator.reuse_eval)
self.assertEqual('/tmp/chromium', self.evaluator.chromium_dir)
def testInitMissingRequiredArgs(self):
"""Tests that AE() raises exception when required options are missing."""
options = cros_test_lib.EasyAttr()
with self.assertRaises(Exception) as cm:
autotest_evaluator.AutotestEvaluator(options)
exception_message = str(cm.exception)
self.assertIn('Missing command line', exception_message)
self.assertIn('AutotestEvaluator', exception_message)
for arg in autotest_evaluator.AutotestEvaluator.REQUIRED_ARGS:
self.assertIn(arg, exception_message)
def testRunTestFromDut(self):
"""Tests that RunTestFromDut() invokes expected commands."""
rsh_mock = self.StartPatcher(RemoteShScpMock())
rsh_mock.AddCmdResult(
['rm', '-f', self.REMOTE_REPORT_FILE], returncode=0)
rsh_mock.AddCmdResult(
['ls', self.AUTOTEST_CLIENT, self.TEST_TARGET], returncode=0)
rsh_mock.AddCmdResult(
[self.AUTOTEST_CLIENT, self.TEST_TARGET], returncode=0)
rsh_mock.AddCmdResult(
[self.REMOTE_REPORT_FILE, self.REPORT_FILE], returncode=0,
mock_attr='ScpToLocal')
self.assertTrue(self.evaluator.RunTestFromDut(self.DUT, self.REPORT_FILE))
def testRunTestFromDutSanityCheckFail(self):
"""Tests RunTestFromDut() when autotest control file is missing."""
rsh_mock = self.StartPatcher(RemoteShScpMock())
rsh_mock.AddCmdResult(
['rm', '-f', self.REMOTE_REPORT_FILE], returncode=0)
rsh_mock.AddCmdResult(
['ls', self.AUTOTEST_CLIENT, self.TEST_TARGET], returncode=1)
self.assertFalse(self.evaluator.RunTestFromDut(self.DUT, self.REPORT_FILE))
def testRunTestFromDutLsSshError(self):
"""Tests RunTestFromDut() when autotest control file is missing."""
rsh_mock = self.StartPatcher(RemoteShScpMock())
rsh_mock.AddCmdResult(
['rm', '-f', self.REMOTE_REPORT_FILE], returncode=0)
rsh_mock.AddCmdResult(
['ls', self.AUTOTEST_CLIENT, self.TEST_TARGET],
returncode=remote_access.SSH_ERROR_CODE)
self.assertFalse(self.evaluator.RunTestFromDut(self.DUT, self.REPORT_FILE))
def testRunTestFromDutAutotestSshErrorWithEvalPassingOnly(self):
"""Tests RunTestFromDut() with failed autotest and --eval-passing-only."""
self.UpdateOptionsAndEvaluator(dict(eval_passing_only=True))
rsh_mock = self.StartPatcher(RemoteShScpMock())
rsh_mock.AddCmdResult(
['rm', '-f', self.REMOTE_REPORT_FILE], returncode=0)
rsh_mock.AddCmdResult(
['ls', self.AUTOTEST_CLIENT, self.TEST_TARGET], returncode=0)
rsh_mock.AddCmdResult(
[self.AUTOTEST_CLIENT, self.TEST_TARGET],
returncode=remote_access.SSH_ERROR_CODE)
self.assertFalse(self.evaluator.RunTestFromDut(self.DUT, self.REPORT_FILE))
def testRunTestFromDutAutotestFailWithEvalPassingOnly(self):
"""Tests RunTestFromDut() with failed autotest and --eval-passing-only."""
self.UpdateOptionsAndEvaluator(dict(eval_passing_only=True))
rsh_mock = self.StartPatcher(RemoteShScpMock())
rsh_mock.AddCmdResult(
['rm', '-f', self.REMOTE_REPORT_FILE], returncode=0)
rsh_mock.AddCmdResult(
['ls', self.AUTOTEST_CLIENT, self.TEST_TARGET], returncode=0)
rsh_mock.AddCmdResult(
[self.AUTOTEST_CLIENT, self.TEST_TARGET], returncode=1)
self.assertFalse(self.evaluator.RunTestFromDut(self.DUT, self.REPORT_FILE))
def testRunTestFromDutAutotestFailWithFailsafe(self):
"""Tests RunTestFromDut() with failed autotest.
Even if the autotest fails to run, RunTestFromDut() tries to retrieve report
from DUT.
"""
rsh_mock = self.StartPatcher(RemoteShScpMock())
rsh_mock.AddCmdResult(
['rm', '-f', self.REMOTE_REPORT_FILE], returncode=0)
rsh_mock.AddCmdResult(
['ls', self.AUTOTEST_CLIENT, self.TEST_TARGET], returncode=0)
rsh_mock.AddCmdResult(
[self.AUTOTEST_CLIENT, self.TEST_TARGET], returncode=1)
rsh_mock.AddCmdResult(
[self.REMOTE_REPORT_FILE, self.REPORT_FILE], returncode=0,
mock_attr='ScpToLocal')
self.assertTrue(self.evaluator.RunTestFromDut(self.DUT, self.REPORT_FILE))
def testRunTestFromDutScpReportFail(self):
"""Tests RunTestFromDut() when it failed to remote copy report file."""
rsh_mock = self.StartPatcher(RemoteShScpMock())
rsh_mock.AddCmdResult(
['rm', '-f', self.REMOTE_REPORT_FILE], returncode=0)
rsh_mock.AddCmdResult(
['ls', self.AUTOTEST_CLIENT, self.TEST_TARGET], returncode=0)
rsh_mock.AddCmdResult(
[self.AUTOTEST_CLIENT, self.TEST_TARGET], returncode=0)
rsh_mock.AddCmdResult(
[self.REMOTE_REPORT_FILE, self.REPORT_FILE], returncode=1,
mock_attr='ScpToLocal')
self.assertFalse(self.evaluator.RunTestFromDut(self.DUT, self.REPORT_FILE))
def testRunTestFromDutAutotestFailWithFailsafeScpReportFail(self):
"""Tests RunTestFromDut() with autotest failed with --eval-failsafe.
Even if the autotest fails to run, with --eval-failsafe set,
RunTestFromDut() tries to retrieve report from DUT. This test checks
report missing case.
"""
self.UpdateOptionsAndEvaluator(dict(eval_failsafe=True))
rsh_mock = self.StartPatcher(RemoteShScpMock())
rsh_mock.AddCmdResult(
['rm', '-f', self.REMOTE_REPORT_FILE], returncode=0)
rsh_mock.AddCmdResult(
['ls', self.AUTOTEST_CLIENT, self.TEST_TARGET], returncode=0)
rsh_mock.AddCmdResult(
[self.AUTOTEST_CLIENT, self.TEST_TARGET], returncode=1)
rsh_mock.AddCmdResult(
[self.REMOTE_REPORT_FILE, self.REPORT_FILE], returncode=1,
mock_attr='ScpToLocal')
self.assertFalse(self.evaluator.RunTestFromDut(self.DUT, self.REPORT_FILE))
def GetTestResultPath(self, evaluator):
"""Returns base path storing test result.
Args:
evaluator: Evaluator object.
Returns:
Path where the evaulator stores test results.
"""
return evaluator.ResolvePathFromChroot(os.path.join(
'/tmp', 'test_that_latest', 'results-1-%s' % evaluator.test_name))
def testLookupReportFile(self):
"""Tests LookupReportFile().
Tests that it invokes expected command and performs path normalization.
"""
command_mock = self.StartPatcher(cros_test_lib.RunCommandMock())
results_base_path = self.GetTestResultPath(self.evaluator)
find_command_result = (
'./%s/results/results-chart.json\n' % self.TEST_NAME)
command_mock.AddCmdResult(
['find', '.', '-name', 'results-chart.json'],
kwargs={'cwd': results_base_path, 'capture_output': True},
output=find_command_result)
self.assertEqual(
os.path.join(results_base_path, self.TEST_NAME, 'results',
'results-chart.json'),
self.evaluator.LookupReportFile())
def testLookupReportFileMissing(self):
"""Tests LookupReportFile() when the report does not exist."""
command_mock = self.StartPatcher(cros_test_lib.RunCommandMock())
results_base_path = self.GetTestResultPath(self.evaluator)
command_mock.AddCmdResult(
['find', '.', '-name', 'results-chart.json'],
kwargs={'cwd': results_base_path, 'capture_output': True},
output='')
self.assertIsNone(self.evaluator.LookupReportFile())
def WriteTestResult(self, evaluator, score=0):
"""Writes a test result to evaluator's default location.
Args:
evaluator: Evaluator object.
score: score of the result.
Returns:
(path to test result file, result file's content)
"""
result_dir = self.GetTestResultPath(evaluator)
osutils.SafeMakedirs(result_dir)
result_path = os.path.join(result_dir, evaluator.RESULT_FILENAME)
result_content = self.AQUARIUM_REPORT_TEMPLATE % score
osutils.WriteFile(result_path, result_content)
return (result_path, result_content)
def testRunTestFromHost(self):
"""Tests TestFromHost().
Tests that it invokes expected commands and report file being copied to
designated path.
"""
command_mock = self.StartPatcher(cros_test_lib.RunCommandMock())
self.SkipMaySetupBoard()
command_mock.AddCmdResult(self.TEST_THAT_COMMAND, returncode=0)
report_path, report_content = self.WriteTestResult(self.evaluator)
command_mock.AddCmdResult(
['find', '.', '-name', 'results-chart.json'],
output=report_path)
# Make sure report file is copied to designated path.
target_report_file = os.path.join(self.tempdir, 'stored-results-chart.json')
osutils.SafeUnlink(target_report_file)
self.assertTrue(
self.evaluator.RunTestFromHost(self.DUT, target_report_file))
self.assertExists(target_report_file)
self.assertEqual(report_content, osutils.ReadFile(target_report_file))
def testRunTestFromHostTestThatFailWithEvalPassingOnly(self):
"""Tests TestFromHost() with failed autotest and --eval-passing-only."""
self.UpdateOptionsAndEvaluator(dict(eval_passing_only=True))
command_mock = self.StartPatcher(cros_test_lib.RunCommandMock())
self.SkipMaySetupBoard()
command_mock.AddCmdResult(self.TEST_THAT_COMMAND, returncode=1)
self.assertFalse(self.evaluator.RunTestFromHost(self.DUT, self.REPORT_FILE))
def testRunTestFromHostTestThatFail(self):
"""Tests TestFromHost() with failed autotest.
It will try evaluating test result.
"""
command_mock = self.StartPatcher(cros_test_lib.RunCommandMock())
self.SkipMaySetupBoard()
# test_that failed.
command_mock.AddCmdResult(self.TEST_THAT_COMMAND, returncode=1)
# However, report is obtained successfully.
report_path, report_content = self.WriteTestResult(self.evaluator)
command_mock.AddCmdResult(
['find', '.', '-name', 'results-chart.json'],
output=report_path)
# Make sure report file is copied to designated path.
target_report_file = os.path.join(self.tempdir, 'stored-results-chart.json')
osutils.SafeUnlink(target_report_file)
self.assertTrue(
self.evaluator.RunTestFromHost(self.DUT, target_report_file))
self.assertExists(target_report_file)
self.assertEqual(report_content, osutils.ReadFile(target_report_file))
def testRunTestFromHostTestThatFailReportMissing(self):
"""Tests TestFromHost() with failed autotest and without report."""
command_mock = self.StartPatcher(cros_test_lib.RunCommandMock())
self.SkipMaySetupBoard()
# test_that failed.
command_mock.AddCmdResult(self.TEST_THAT_COMMAND, returncode=1)
# And report file is missing.
command_mock.AddCmdResult(
['find', '.', '-name', 'results-chart.json'], output='')
self.assertFalse(self.evaluator.RunTestFromHost(self.DUT, self.REPORT_FILE))
def testRunTestFromHostReportFileMissing(self):
"""Tests TestFromHost() when test report file does not exist."""
command_mock = self.StartPatcher(cros_test_lib.RunCommandMock())
self.SkipMaySetupBoard()
command_mock.AddCmdResult(self.TEST_THAT_COMMAND, returncode=0)
command_mock.AddCmdResult(
['find', '.', '-name', 'results-chart.json'], output='')
self.assertFalse(self.evaluator.RunTestFromHost(self.DUT, self.REPORT_FILE))
def testGetAutotestMetricValue(self):
"""Tests that GetAutotestMetricValue() extracts score correctly."""
score = 56.73
report_file = self.PrepareWebglAquariumReports([score])[0]
self.assertEqual(score,
self.evaluator.GetAutotestMetricValue(report_file))
def testGetAutotestMetricValueMetricTakeAverage(self):
"""Tests that GetAutotestMetricValue() extracts averaged scores."""
# metric_take_average=True
self.UpdateOptionsAndEvaluator(dict(metric_take_average=True))
scores = [55, 57, 58]
# A report's value is a list of scores.
report_file = self.PrepareWebglAquariumReports([scores])[0]
self.assertAlmostEqual(56.66,
self.evaluator.GetAutotestMetricValue(report_file),
delta=0.01)
def testEvaluateRunTestFromDut(self):
"""Tests Evaluate() which runs test from DUT."""
# Mock RunTestFromDut success.
rsh_mock = self.StartPatcher(RemoteShScpMock())
rsh_mock.AddCmdResult(
['rm', '-f', self.REMOTE_REPORT_FILE], returncode=0)
rsh_mock.AddCmdResult(
['ls', self.AUTOTEST_CLIENT, self.TEST_TARGET], returncode=0)
rsh_mock.AddCmdResult(
[self.AUTOTEST_CLIENT, self.TEST_TARGET], returncode=0)
# Prepare result for evaluate.
score = 56.73
report_file = self.PrepareWebglAquariumReports([score])[0]
rsh_mock.AddCmdResult(
[self.REMOTE_REPORT_FILE, report_file], returncode=0,
kwargs={'check': False}, mock_attr='ScpToLocal')
eval_score = self.evaluator.Evaluate(self.DUT, self.BUILD_LABEL)
self.assertEqual(1, len(eval_score.values))
self.assertEqual(score, eval_score.values[0])
self.assertEqual(score, eval_score.mean)
self.assertEqual(0.0, eval_score.variance)
self.assertEqual(0.0, eval_score.std)
def testEvaluateTwiceRunTestFromDut(self):
"""Tests Evaluate() with repeat=2 which runs test from DUT."""
# Mock RunTestFromDut success.
rsh_mock = self.StartPatcher(RemoteShScpMock())
rsh_mock.AddCmdResult(
['rm', '-f', self.REMOTE_REPORT_FILE], returncode=0)
rsh_mock.AddCmdResult(
['ls', self.AUTOTEST_CLIENT, self.TEST_TARGET], returncode=0)
rsh_mock.AddCmdResult(
[self.AUTOTEST_CLIENT, self.TEST_TARGET], returncode=0)
# Prepare two results for evaluate.
scores = [56, 58]
report_files = self.PrepareWebglAquariumReports(scores)
for report_file in report_files:
rsh_mock.AddCmdResult(
[self.REMOTE_REPORT_FILE, report_file], returncode=0,
mock_attr='ScpToLocal')
eval_score = self.evaluator.Evaluate(self.DUT, self.BUILD_LABEL, repeat=2)
self.assertEqual(2, len(eval_score.values))
self.assertEqual(scores[0], eval_score.values[0])
self.assertEqual(scores[1], eval_score.values[1])
self.assertEqual(57, eval_score.mean)
self.assertEqual(2.0, eval_score.variance)
self.assertAlmostEqual(1.414, eval_score.std, delta=0.01)
def SkipMaySetupBoard(self):
"""Let evaluator.MaySetupBoard() returns True without action.
It touches /build/{board} directory inside chroot so that MaySetupBoard()
thinks the board is already set up.
"""
osutils.SafeMakedirs(os.path.join(
self.evaluator.cros_dir, 'chroot', 'build', self.evaluator.board))
def testEvaluateFromHost(self):
"""Tests Evaluate() which runs test from host."""
# Mock RunTestFromDut fail.
command_mock = self.StartPatcher(cros_test_lib.RunCommandMock())
command_mock.AddCmdResult(
partial_mock.InOrder(['rm', '-f', self.REMOTE_REPORT_FILE]),
returncode=0)
command_mock.AddCmdResult(
partial_mock.InOrder([self.AUTOTEST_CLIENT, self.TEST_TARGET]),
returncode=1)
self.SkipMaySetupBoard()
# Mock RunTestFromHost success.
command_mock.AddCmdResult(self.TEST_THAT_COMMAND, returncode=0)
# Mock 'find' and returns a result file for verify.
score = 59.9
report_file_in_chroot, _ = self.WriteTestResult(self.evaluator, score)
command_mock.AddCmdResult(
['find', '.', '-name', 'results-chart.json'],
output=report_file_in_chroot)
eval_score = self.evaluator.Evaluate(self.DUT, self.BUILD_LABEL)
self.assertEqual(1, len(eval_score.values))
self.assertEqual(score, eval_score.values[0])
self.assertEqual(score, eval_score.mean)
self.assertEqual(0.0, eval_score.variance)
self.assertEqual(0.0, eval_score.std)
def testCheckLastEvaluate(self):
"""Tests CheckLastEvaluate().
Test that it extracts score from last evaluation result.
"""
scores = [56, 58]
self.PrepareWebglAquariumReports(scores)
eval_score = self.evaluator.CheckLastEvaluate(self.BUILD_LABEL, repeat=2)
self.assertEqual(2, len(eval_score.values))
self.assertEqual(scores[0], eval_score.values[0])
self.assertEqual(scores[1], eval_score.values[1])
self.assertEqual(57, eval_score.mean)
self.assertEqual(2.0, eval_score.variance)
self.assertAlmostEqual(1.414, eval_score.std, delta=0.01)
def testCheckLastEvaluateDifferentLabel(self):
"""Tests that CheckLastEvaluate() failed to extracts score."""
scores = [56, 58]
self.PrepareWebglAquariumReports(scores)
eval_score = self.evaluator.CheckLastEvaluate('different_build', repeat=2)
self.assertEqual(0, len(eval_score))
def testCheckLastEvaluateFlagUnset(self):
"""Tests CheckLastEvaluate() when "reuse_eval" option is unset.
Tests that it always returns empty score when "reuse_eval" option is unset.
"""
# 'reuse_eval' set to False.
self.UpdateOptionsAndEvaluator(dict(reuse_eval=False))
scores = [56, 58]
self.PrepareWebglAquariumReports(scores)
eval_score = self.evaluator.CheckLastEvaluate(self.BUILD_LABEL, repeat=2)
self.assertEqual(0, len(eval_score))
def CreateCommandMockForRepo(self, cwd):
"""Creates a command mock and add commands "repo init" "repo sync".
Args:
cwd: Directory for running "repo init".
Returns:
command_mock object.
"""
command_mock = self.StartPatcher(cros_test_lib.RunCommandMock())
command_mock.AddCmdResult(
['repo', 'init', '--manifest-url',
'https://chromium.googlesource.com/chromiumos/manifest.git',
'--repo-url',
'https://chromium.googlesource.com/external/repo.git'],
kwargs={'cwd': cwd},
side_effect=repo_util_unittest.RepoInitSideEffects)
command_mock.AddCmdResult(
[repo_util_unittest.RepoCmdPath(cwd), 'sync', '--jobs', '8'],
kwargs={'cwd': cwd})
return command_mock
def testSetupCrosRepo(self):
"""Tests SetupCrosRepo() by verifying commands it emits."""
unused_command_mock = self.CreateCommandMockForRepo(self.evaluator.cros_dir)
self.evaluator.SetupCrosRepo()
def testMaySetupBoardAlreadyDone(self):
"""Tests MaySetupBoard() that board is already set."""
# mkdir board path inside chroot.
self.SkipMaySetupBoard()
self.assertTrue(self.evaluator.MaySetupBoard())
def testMaySetupBoard(self):
"""Tests MaySetupBoard()."""
command_mock = self.CreateCommandMockForRepo(self.evaluator.cros_dir)
kwargs_run_chroot = {
'enter_chroot': True,
'chroot_args': ['--chrome_root', self.evaluator.chromium_dir,
'--no-ns-pid'],
'cwd': self.evaluator.cros_dir}
command_mock.AddCmdResult(
['setup_board', '--board', self.BOARD], kwargs=kwargs_run_chroot)
command_mock.AddCmdResult(
['./build_packages', '--board', self.BOARD], kwargs=kwargs_run_chroot)
self.assertTrue(self.evaluator.MaySetupBoard())
def testMaySetupBoardBuildPackageFailed(self):
"""Tests MaySetupBoard()."""
command_mock = self.CreateCommandMockForRepo(self.evaluator.cros_dir)
kwargs_run_chroot = {
'enter_chroot': True,
'chroot_args': ['--chrome_root', self.evaluator.chromium_dir,
'--no-ns-pid'],
'cwd': self.evaluator.cros_dir}
command_mock.AddCmdResult(
['setup_board', '--board', self.BOARD], kwargs=kwargs_run_chroot)
command_mock.AddCmdResult(
['./build_packages', '--board', self.BOARD], kwargs=kwargs_run_chroot,
returncode=1)
self.assertFalse(self.evaluator.MaySetupBoard())
|
|
# Copyright (C) 2016 Red Hat, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from oslo_log import log as logging
from nova.objects import fields
from nova.tests.functional.libvirt import base
from nova.tests.unit.virt.libvirt import fakelibvirt
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class SRIOVServersTest(base.ServersTestBase):
vfs_alias_name = 'vfs'
pfs_alias_name = 'pfs'
def setUp(self):
white_list = ['{"vendor_id":"8086","product_id":"1528"}',
'{"vendor_id":"8086","product_id":"1515"}']
self.flags(passthrough_whitelist=white_list, group='pci')
# PFs will be removed from pools, unless these has been specifically
# requested. This is especially needed in cases where PFs and VFs have
# the same vendor/product id
pci_alias = ['{"vendor_id":"8086", "product_id":"1528", "name":"%s",'
' "device_type":"%s"}' % (self.pfs_alias_name,
fields.PciDeviceType.SRIOV_PF),
'{"vendor_id":"8086", "product_id":"1515", "name":"%s"}' %
self.vfs_alias_name]
self.flags(alias=pci_alias, group='pci')
super(SRIOVServersTest, self).setUp()
self.compute_started = False
# Mock the 'PciPassthroughFilter' filter, as most tests need to inspect
# this
host_manager = self.scheduler.manager.driver.host_manager
pci_filter_class = host_manager.filter_cls_map['PciPassthroughFilter']
host_pass_mock = mock.Mock(wraps=pci_filter_class().host_passes)
_p = mock.patch('nova.scheduler.filters.pci_passthrough_filter'
'.PciPassthroughFilter.host_passes',
side_effect=host_pass_mock)
self.mock_filter = _p.start()
self.addCleanup(_p.stop)
def _setup_scheduler_service(self):
# Enable the 'NUMATopologyFilter', 'PciPassthroughFilter'
self.flags(driver='filter_scheduler', group='scheduler')
self.flags(enabled_filters=CONF.filter_scheduler.enabled_filters
+ ['NUMATopologyFilter', 'PciPassthroughFilter'],
group='filter_scheduler')
return self.start_service('scheduler')
def _run_build_test(self, flavor_id, end_status='ACTIVE'):
if not self.compute_started:
self.compute = self.start_service('compute', host='test_compute0')
self.compute_started = True
# Create server
good_server = self._build_server(flavor_id)
post = {'server': good_server}
created_server = self.api.post_server(post)
LOG.debug("created_server: %s", created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Validate that the server has been created
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
# It should also be in the all-servers list
servers = self.api.get_servers()
server_ids = [s['id'] for s in servers]
self.assertIn(created_server_id, server_ids)
# Validate that PciPassthroughFilter has been called
self.assertTrue(self.mock_filter.called)
found_server = self._wait_for_state_change(found_server, 'BUILD')
self.assertEqual(end_status, found_server['status'])
self.addCleanup(self._delete_server, created_server_id)
return created_server
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_image')
def test_create_server_with_VF(self, img_mock):
host_info = fakelibvirt.NUMAHostInfo(cpu_nodes=2, cpu_sockets=1,
cpu_cores=2, cpu_threads=2,
kB_mem=15740000)
pci_info = fakelibvirt.HostPciSRIOVDevicesInfo()
fake_connection = self._get_connection(host_info, pci_info)
self.mock_conn.return_value = fake_connection
# Create a flavor
extra_spec = {"pci_passthrough:alias": "%s:1" % self.vfs_alias_name}
flavor_id = self._create_flavor(extra_spec=extra_spec)
self._run_build_test(flavor_id)
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_image')
def test_create_server_with_PF(self, img_mock):
host_info = fakelibvirt.NUMAHostInfo(cpu_nodes=2, cpu_sockets=1,
cpu_cores=2, cpu_threads=2,
kB_mem=15740000)
pci_info = fakelibvirt.HostPciSRIOVDevicesInfo()
fake_connection = self._get_connection(host_info, pci_info)
self.mock_conn.return_value = fake_connection
# Create a flavor
extra_spec = {"pci_passthrough:alias": "%s:1" % self.pfs_alias_name}
flavor_id = self._create_flavor(extra_spec=extra_spec)
self._run_build_test(flavor_id)
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_image')
def test_create_server_with_PF_no_VF(self, img_mock):
host_info = fakelibvirt.NUMAHostInfo(cpu_nodes=2, cpu_sockets=1,
cpu_cores=2, cpu_threads=2,
kB_mem=15740000)
pci_info = fakelibvirt.HostPciSRIOVDevicesInfo(num_pfs=1, num_vfs=4)
fake_connection = self._get_connection(host_info, pci_info)
self.mock_conn.return_value = fake_connection
# Create a flavor
extra_spec_pfs = {"pci_passthrough:alias": "%s:1" %
self.pfs_alias_name}
extra_spec_vfs = {"pci_passthrough:alias": "%s:1" %
self.vfs_alias_name}
flavor_id_pfs = self._create_flavor(extra_spec=extra_spec_pfs)
flavor_id_vfs = self._create_flavor(extra_spec=extra_spec_vfs)
self._run_build_test(flavor_id_pfs)
self._run_build_test(flavor_id_vfs, end_status='ERROR')
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_image')
def test_create_server_with_VF_no_PF(self, img_mock):
host_info = fakelibvirt.NUMAHostInfo(cpu_nodes=2, cpu_sockets=1,
cpu_cores=2, cpu_threads=2,
kB_mem=15740000)
pci_info = fakelibvirt.HostPciSRIOVDevicesInfo(num_pfs=1, num_vfs=4)
fake_connection = self._get_connection(host_info, pci_info)
self.mock_conn.return_value = fake_connection
# Create a flavor
extra_spec_pfs = {"pci_passthrough:alias": "%s:1" %
self.pfs_alias_name}
extra_spec_vfs = {"pci_passthrough:alias": "%s:1" %
self.vfs_alias_name}
flavor_id_pfs = self._create_flavor(extra_spec=extra_spec_pfs)
flavor_id_vfs = self._create_flavor(extra_spec=extra_spec_vfs)
self._run_build_test(flavor_id_vfs)
self._run_build_test(flavor_id_pfs, end_status='ERROR')
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_image')
def test_create_server_with_pci_dev_and_numa(self, img_mock):
"""Verifies that an instance can be booted with cpu pinning and with an
assigned pci device.
"""
host_info = fakelibvirt.NUMAHostInfo(cpu_nodes=2, cpu_sockets=1,
cpu_cores=2, cpu_threads=2,
kB_mem=15740000)
pci_info = fakelibvirt.HostPciSRIOVDevicesInfo(num_pfs=1, numa_node=1)
fake_connection = self._get_connection(host_info, pci_info)
self.mock_conn.return_value = fake_connection
# Create a flavor
extra_spec = {"pci_passthrough:alias": "%s:1" % self.pfs_alias_name,
'hw:numa_nodes': '1',
'hw:cpu_policy': 'dedicated',
'hw:cpu_thread_policy': 'prefer'}
flavor_id = self._create_flavor(extra_spec=extra_spec)
self._run_build_test(flavor_id)
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_image')
def test_create_server_with_pci_dev_and_numa_fails(self, img_mock):
"""This test ensures that it is not possible to allocated CPU and
memory resources from one NUMA node and a PCI device from another.
"""
host_info = fakelibvirt.NUMAHostInfo(cpu_nodes=2, cpu_sockets=1,
cpu_cores=2, cpu_threads=2,
kB_mem=15740000)
pci_info = fakelibvirt.HostPciSRIOVDevicesInfo(num_pfs=1, numa_node=0)
fake_connection = self._get_connection(host_info, pci_info)
self.mock_conn.return_value = fake_connection
# Create a flavor
extra_spec_vm = {'hw:cpu_policy': 'dedicated',
'hw:numa_node': '1'}
extra_spec = {'pci_passthrough:alias': '%s:1' % self.pfs_alias_name,
'hw:numa_nodes': '1',
'hw:cpu_policy': 'dedicated',
'hw:cpu_thread_policy': 'prefer'}
vm_flavor_id = self._create_flavor(vcpu=4, extra_spec=extra_spec_vm)
pf_flavor_id = self._create_flavor(extra_spec=extra_spec)
self._run_build_test(vm_flavor_id)
self._run_build_test(pf_flavor_id, end_status='ERROR')
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Tests for saving/loading function for keras Model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import numpy as np
from tensorflow.contrib.saved_model.python.saved_model import keras_saved_model
from tensorflow.python import keras
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine import training
from tensorflow.python.platform import test
from tensorflow.python.training import training as training_module
class TestModelSavingandLoading(test.TestCase):
def test_saving_sequential_model(self):
with self.test_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
model.compile(
loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(lr=0.0001),
metrics=[keras.metrics.categorical_accuracy],
sample_weight_mode='temporal')
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
ref_y = model.predict(x)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
temp_saved_model = os.path.join(temp_dir, 'saved_model')
keras_saved_model.save_model(model, temp_saved_model)
loaded_model = keras_saved_model.load_model(temp_saved_model)
y = loaded_model.predict(x)
self.assertAllClose(ref_y, y, atol=1e-05)
@test_util.run_in_graph_and_eager_modes
def test_saving_sequential_model_without_compile(self):
with self.test_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
x = np.random.random((1, 3))
ref_y = model.predict(x)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
temp_saved_model = os.path.join(temp_dir, 'saved_model')
keras_saved_model.save_model(model, temp_saved_model)
loaded_model = keras_saved_model.load_model(temp_saved_model)
y = loaded_model.predict(x)
self.assertAllClose(ref_y, y, atol=1e-05)
def test_saving_functional_model(self):
with self.test_session():
inputs = keras.layers.Input(shape=(3,))
x = keras.layers.Dense(2)(inputs)
output = keras.layers.Dense(3)(x)
model = keras.models.Model(inputs, output)
model.compile(
loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(lr=0.0001),
metrics=[keras.metrics.categorical_accuracy])
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
ref_y = model.predict(x)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
temp_saved_model = os.path.join(temp_dir, 'saved_model')
keras_saved_model.save_model(model, temp_saved_model)
loaded_model = keras_saved_model.load_model(temp_saved_model)
y = loaded_model.predict(x)
self.assertAllClose(ref_y, y, atol=1e-05)
@test_util.run_in_graph_and_eager_modes
def test_saving_functional_model_without_compile(self):
with self.test_session():
inputs = keras.layers.Input(shape=(3,))
x = keras.layers.Dense(2)(inputs)
output = keras.layers.Dense(3)(x)
model = keras.models.Model(inputs, output)
x = np.random.random((1, 3))
y = np.random.random((1, 3))
ref_y = model.predict(x)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
temp_saved_model = os.path.join(temp_dir, 'saved_model')
keras_saved_model.save_model(model, temp_saved_model)
loaded_model = keras_saved_model.load_model(temp_saved_model)
y = loaded_model.predict(x)
self.assertAllClose(ref_y, y, atol=1e-05)
@test_util.run_in_graph_and_eager_modes
def test_saving_with_tf_optimizer(self):
with self.test_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(
loss='mse',
optimizer=training_module.RMSPropOptimizer(0.1),
metrics=['acc'])
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
ref_y = model.predict(x)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
temp_saved_model = os.path.join(temp_dir, 'saved_model')
keras_saved_model.save_model(model, temp_saved_model)
loaded_model = keras_saved_model.load_model(temp_saved_model)
loaded_model.compile(
loss='mse',
optimizer=training_module.RMSPropOptimizer(0.1),
metrics=['acc'])
y = loaded_model.predict(x)
self.assertAllClose(ref_y, y, atol=1e-05)
# test that new updates are the same with both models
x = np.random.random((1, 3))
y = np.random.random((1, 3))
ref_loss = model.train_on_batch(x, y)
loss = loaded_model.train_on_batch(x, y)
self.assertAllClose(ref_loss, loss, atol=1e-05)
ref_y = model.predict(x)
y = loaded_model.predict(x)
self.assertAllClose(ref_y, y, atol=1e-05)
# test saving/loading again
keras_saved_model.save_model(loaded_model, temp_saved_model)
loaded_model = keras_saved_model.load_model(temp_saved_model)
y = loaded_model.predict(x)
self.assertAllClose(ref_y, y, atol=1e-05)
def test_saving_subclassed_model_raise_error(self):
# For now, saving subclassed model should raise an error. It should be
# avoided later with loading from SavedModel.pb.
class SubclassedModel(training.Model):
def __init__(self):
super(SubclassedModel, self).__init__()
self.layer1 = keras.layers.Dense(3)
self.layer2 = keras.layers.Dense(1)
def call(self, inp):
return self.layer2(self.layer1(inp))
model = SubclassedModel()
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
temp_saved_model = os.path.join(temp_dir, 'saved_model')
with self.assertRaises(NotImplementedError):
keras_saved_model.save_model(model, temp_saved_model)
if __name__ == '__main__':
test.main()
|
|
import collections
import enum
import random
import typing as tp
from satella.coding.decorators import for_argument
from .jsonify import jsonify
from .merger import merge_series
from .merge_list import merge_list
from .percentile import percentile
from .base64 import b64encode
from .interpol import linear_interpolate
from .words import hashables_to_int
from .predicates import is_subset
__all__ = ['stringify', 'split_shuffle_and_join', 'one_tuple', 'none_if_false',
'merge_series', 'pad_to_multiple_of_length', 'clip', 'hashables_to_int',
'jsonify', 'intify', 'percentile', 'b64encode', 'linear_interpolate',
'merge_list', 'is_subset', 'unpack_dict']
from satella.coding.typing import T, NoArgCallable, Appendable, Number, Predicate, K, V
def unpack_dict(dct: tp.Dict[K, V], *args: K,
map_through: tp.Callable[[V], V] = lambda y: y,
raise_if_not_found: bool = True) -> tp.Iterator[V]:
"""
Unpack a dictionary by accessing it's given keys in parallel.
Example:
>>> a, b, c = unpack_dict({1:2, 2:3, 4:5}, 1, 2, 4)
>>> assert a == 2 and b == 3 and c == 5
:param dct: dictionary to unpack
:param args: keys in this dictionary
:param map_through: a keyword argument, callable that will be called with
each value returned and the result of this callable will be returned
:param raise_if_not_found: a KeyError will be returned upon encountering a key
that does not exist. If set to False, a None will be returned.
:return: an iterator
:raises KeyError: a key was not found
"""
for key in args:
try:
yield map_through(dct[key])
except KeyError:
if not raise_if_not_found:
yield None
else:
raise
def none_if_false(y: tp.Any) -> tp.Optional[tp.Any]:
"""
Return None if y is false, else return y
:param y: value to check
:return: None if y is false, else y
"""
if not y:
return None
return y
def clip(v: Number, minimum: Number, maximum: Number) -> Number:
"""
Clip v so it conforms to minimum <= v <= maximum
:param v: value to clip
:param minimum: minimum
:param maximum: maximum
:return: clipped value
"""
if v < minimum:
return minimum
if v > maximum:
return maximum
return v
@for_argument(list)
def pad_to_multiple_of_length(seq: Appendable[T], multiple_of: int,
pad_with: tp.Optional[T] = None,
pad_with_factory: tp.Optional[NoArgCallable[T]] = None) -> \
Appendable[T]:
"""
Make sequence multiple of length, ie. append elements to the sequence
until it's length is a multiple of multiple_of.
:param seq: sequence to lengthify
:param multiple_of: sequence returned will be a multiple of this length.
:param pad_with: argument with which to pad the sequence
:param pad_with_factory: a callable/0 that returns an element with which to pad the sequence
:return: a list with elements
"""
if pad_with is not None and pad_with_factory is not None:
raise ValueError('You need to give either pad_with or pad_with_factory')
if pad_with_factory is None:
def pad_with_factory():
return pad_with
while len(seq) % multiple_of:
seq.append(pad_with_factory())
return seq
def _stringify_none(str_none: tp.Optional[str],
stringifier: tp.Callable[[tp.Any], str]) -> tp.Optional[str]:
if str_none:
return stringifier(None)
return None
def one_tuple(x: tp.Iterable[T]) -> tp.Iterator[tp.Tuple[T]]:
"""
Change a sequence of iterables into a sequence that displays each element as
a part of one-element tuple. Essentially syntactic sugar for:
>>> for z in x:
>>> yield z,
:param x: sequence to tupleify
:return: a iterator of one-element tuples
"""
for z in x:
yield z,
def intify(v: tp.Any) -> int:
"""
Attempt to convert v to an int.
None will be converted to 0.
Any object will have int() called on it.
Failing that, it's length will be taken.
Failing that, ValueError will be raised
:param v: parameter
:return: int representation
"""
if v is None:
return 0
try:
return int(v)
except (TypeError, ValueError):
try:
return len(v)
except (AttributeError, TypeError, ValueError):
raise ValueError('Unable to convert %s to int' % (v,))
def split_shuffle_and_join(entries: tp.List[T],
whether_to_shuffle: Predicate[T] = lambda x: True,
not_shuffled_to_front: bool = True) -> tp.List[T]:
"""
Split elements in entries into two groups - one group, called True, is the one for which
whether_to_shuffle(elem) is True, the other is False.
Shuffle the group True.
If not_shuffled_to_front, elements in the group False will go at the beginning of the returned
list, after which will go elements shuffled. If it's False, the not-shuffled elements will be
at the back of the list.
Order of the not shuffled elements will be preserved.
:param entries: list of elements
:param whether_to_shuffle: a decider to which group does given element belong?
:param not_shuffled_to_front: if True then not shuffled elements will be put before shuffled,
else the not shuffled elements will be at the back of the list
:return: list altered to specification
"""
not_shuffled, shuffled = [], []
for elem in entries:
(shuffled if whether_to_shuffle(elem) else not_shuffled).append(elem)
random.shuffle(shuffled)
if not_shuffled_to_front:
return not_shuffled + shuffled
else:
return shuffled + not_shuffled
def stringify(obj: tp.Union[tp.Any], stringifier: tp.Callable[[tp.Any], str] = str,
recursively: bool = False,
str_none: bool = False) -> tp.Union[tp.List[str], tp.Dict[str, str], str]:
"""
Stringify all object:
ie. if a dict, put every item and key (if a dict is given) through stringify.
if a list, put every item through stringify
else just call stringify on it.
Note that if you use recursively, then dicts and lists are allowed to be valid elements of the
returned representation!
Note that enums will be converted to their labels. eg:
>>> class Enum(enum.Enum):
>>> A = 0
>>> assert stringify(Enum.A) == 'A'
:param obj: a list or a dict
:param stringifier: function that accepts any arguments and returns a string representation
:param recursively: whether to recursively stringify elements, ie. stringify will be called on
all the children
:param str_none: whether to return None if given a None. If True, "None" will be returned
instead
:return: stringified object
"""
if isinstance(obj, str):
y = obj
elif isinstance(obj, enum.Enum):
y = obj.name
elif isinstance(obj, collections.abc.Mapping):
make_str = (lambda obj2: stringify(obj2, stringifier, True, str_none)) if recursively else \
stringifier
y = {make_str(k): make_str(v) for k, v in obj.items()}
elif isinstance(obj, collections.abc.Sequence):
make_str = (lambda obj2: stringify(obj2, stringifier, True, str_none)) if recursively else \
stringifier
y = [make_str(v) for v in obj]
elif obj is None:
y = _stringify_none(str_none, stringifier)
else:
y = stringifier(obj)
return y
|
|
import logging
import uuid
from django.core.exceptions import PermissionDenied
from modelcluster.models import get_all_child_relations
from wagtail.core.log_actions import log
from wagtail.core.models.copying import _copy, _copy_m2m_relations
from wagtail.core.models.i18n import TranslatableMixin
from wagtail.core.signals import page_published
logger = logging.getLogger("wagtail.core")
class CopyPageIntegrityError(RuntimeError):
"""
Raised when the page copy cannot be performed for data integrity reasons.
"""
pass
class CopyPagePermissionError(PermissionDenied):
"""
Raised when the page copy cannot be performed due to insufficient permissions.
"""
pass
class CopyPageAction:
"""
Copies pages and page trees.
"""
def __init__(
self,
page,
to=None,
update_attrs=None,
exclude_fields=None,
recursive=False,
copy_revisions=True,
keep_live=True,
user=None,
process_child_object=None,
log_action="wagtail.copy",
reset_translation_key=True,
):
# Note: These four parameters don't apply to any copied children
self.page = page
self.to = to
self.update_attrs = update_attrs
self.exclude_fields = exclude_fields
self.recursive = recursive
self.copy_revisions = copy_revisions
self.keep_live = keep_live
self.user = user
self.process_child_object = process_child_object
self.log_action = log_action
self.reset_translation_key = reset_translation_key
def check(self, skip_permission_checks=False):
from wagtail.core.models import UserPagePermissionsProxy
# Essential data model checks
if self.page._state.adding:
raise CopyPageIntegrityError("Page.copy() called on an unsaved page")
if (
self.to
and self.recursive
and (self.to.id == self.page.id or self.to.is_descendant_of(self.page))
):
raise CopyPageIntegrityError(
"You cannot copy a tree branch recursively into itself"
)
# Permission checks
if self.user and not skip_permission_checks:
to = self.to
if to is None:
to = self.page.get_parent()
if not self.page.permissions_for_user(self.user).can_copy_to(
to, self.recursive
):
raise CopyPagePermissionError(
"You do not have permission to copy this page"
)
if self.keep_live:
destination_perms = UserPagePermissionsProxy(self.user).for_page(
self.to
)
if not destination_perms.can_publish_subpage():
raise CopyPagePermissionError(
"You do not have permission to publish a page at the destination"
)
def _copy_page(
self, page, to=None, update_attrs=None, exclude_fields=None, _mpnode_attrs=None
):
exclude_fields = (
page.default_exclude_fields_in_copy
+ page.exclude_fields_in_copy
+ (exclude_fields or [])
)
specific_page = page.specific
if self.keep_live:
base_update_attrs = {
"alias_of": None,
}
else:
base_update_attrs = {
"live": False,
"has_unpublished_changes": True,
"live_revision": None,
"first_published_at": None,
"last_published_at": None,
"alias_of": None,
}
if self.user:
base_update_attrs["owner"] = self.user
# When we're not copying for translation, we should give the translation_key a new value
if self.reset_translation_key:
base_update_attrs["translation_key"] = uuid.uuid4()
if update_attrs:
base_update_attrs.update(update_attrs)
page_copy, child_object_map = _copy(
specific_page, exclude_fields=exclude_fields, update_attrs=base_update_attrs
)
# Save copied child objects and run process_child_object on them if we need to
for (child_relation, old_pk), child_object in child_object_map.items():
if self.process_child_object:
self.process_child_object(
specific_page, page_copy, child_relation, child_object
)
# When we're not copying for translation, we should give the translation_key a new value for each child object as well
if self.reset_translation_key and isinstance(
child_object, TranslatableMixin
):
child_object.translation_key = uuid.uuid4()
# Save the new page
if _mpnode_attrs:
# We've got a tree position already reserved. Perform a quick save
page_copy.path = _mpnode_attrs[0]
page_copy.depth = _mpnode_attrs[1]
page_copy.save(clean=False)
else:
if to:
page_copy = to.add_child(instance=page_copy)
else:
page_copy = page.add_sibling(instance=page_copy)
_mpnode_attrs = (page_copy.path, page_copy.depth)
_copy_m2m_relations(
specific_page,
page_copy,
exclude_fields=exclude_fields,
update_attrs=base_update_attrs,
)
# Copy revisions
if self.copy_revisions:
for revision in page.revisions.all():
revision.pk = None
revision.submitted_for_moderation = False
revision.approved_go_live_at = None
revision.page = page_copy
# Update ID fields in content
revision_content = revision.content
revision_content["pk"] = page_copy.pk
for child_relation in get_all_child_relations(specific_page):
accessor_name = child_relation.get_accessor_name()
try:
child_objects = revision_content[accessor_name]
except KeyError:
# KeyErrors are possible if the revision was created
# before this child relation was added to the database
continue
for child_object in child_objects:
child_object[child_relation.field.name] = page_copy.pk
# Remap primary key to copied versions
# If the primary key is not recognised (eg, the child object has been deleted from the database)
# set the primary key to None
copied_child_object = child_object_map.get(
(child_relation, child_object["pk"])
)
child_object["pk"] = (
copied_child_object.pk if copied_child_object else None
)
revision.content = revision_content
# Save
revision.save()
# Create a new revision
# This code serves a few purposes:
# * It makes sure update_attrs gets applied to the latest revision
# * It bumps the last_revision_created_at value so the new page gets ordered as if it was just created
# * It sets the user of the new revision so it's possible to see who copied the page by looking at its history
latest_revision = page_copy.get_latest_revision_as_page()
if update_attrs:
for field, value in update_attrs.items():
setattr(latest_revision, field, value)
latest_revision_as_page_revision = latest_revision.save_revision(
user=self.user, changed=False, clean=False
)
if self.keep_live:
page_copy.live_revision = latest_revision_as_page_revision
page_copy.last_published_at = latest_revision_as_page_revision.created_at
page_copy.first_published_at = latest_revision_as_page_revision.created_at
page_copy.save(clean=False)
if page_copy.live:
page_published.send(
sender=page_copy.specific_class,
instance=page_copy,
revision=latest_revision_as_page_revision,
)
# Log
if self.log_action:
parent = specific_page.get_parent()
log(
instance=page_copy,
action=self.log_action,
user=self.user,
data={
"page": {
"id": page_copy.id,
"title": page_copy.get_admin_display_title(),
"locale": {
"id": page_copy.locale_id,
"language_code": page_copy.locale.language_code,
},
},
"source": {
"id": parent.id,
"title": parent.specific_deferred.get_admin_display_title(),
}
if parent
else None,
"destination": {
"id": to.id,
"title": to.specific_deferred.get_admin_display_title(),
}
if to
else None,
"keep_live": page_copy.live and self.keep_live,
"source_locale": {
"id": page.locale_id,
"language_code": page.locale.language_code,
},
},
)
if page_copy.live and self.keep_live:
# Log the publish if the use chose to keep the copied page live
log(
instance=page_copy,
action="wagtail.publish",
user=self.user,
revision=latest_revision_as_page_revision,
)
logger.info(
'Page copied: "%s" id=%d from=%d', page_copy.title, page_copy.id, page.id
)
# Copy child pages
from wagtail.core.models import Page
if self.recursive:
numchild = 0
for child_page in page.get_children().specific():
newdepth = _mpnode_attrs[1] + 1
child_mpnode_attrs = (
Page._get_path(_mpnode_attrs[0], newdepth, numchild),
newdepth,
)
numchild += 1
self._copy_page(
child_page, to=page_copy, _mpnode_attrs=child_mpnode_attrs
)
if numchild > 0:
page_copy.numchild = numchild
page_copy.save(clean=False, update_fields=["numchild"])
return page_copy
def execute(self, skip_permission_checks=False):
self.check(skip_permission_checks=skip_permission_checks)
return self._copy_page(
self.page,
to=self.to,
update_attrs=self.update_attrs,
exclude_fields=self.exclude_fields,
)
|
|
""" Test Iterator Length Transparency
Some functions or methods which accept general iterable arguments have
optional, more efficient code paths if they know how many items to expect.
For instance, map(func, iterable), will pre-allocate the exact amount of
space required whenever the iterable can report its length.
The desired invariant is: len(it)==len(list(it)).
A complication is that an iterable and iterator can be the same object. To
maintain the invariant, an iterator needs to dynamically update its length.
For instance, an iterable such as range(10) always reports its length as ten,
but it=iter(range(10)) starts at ten, and then goes to nine after next(it).
Having this capability means that map() can ignore the distinction between
map(func, iterable) and map(func, iter(iterable)).
When the iterable is immutable, the implementation can straight-forwardly
report the original length minus the cumulative number of calls to next().
This is the case for tuples, range objects, and itertools.repeat().
Some containers become temporarily immutable during iteration. This includes
dicts, sets, and collections.deque. Their implementation is equally simple
though they need to permanently set their length to zero whenever there is
an attempt to iterate after a length mutation.
The situation slightly more involved whenever an object allows length mutation
during iteration. Lists and sequence iterators are dynamically updatable.
So, if a list is extended during iteration, the iterator will continue through
the new items. If it shrinks to a point before the most recent iteration,
then no further items are available and the length is reported at zero.
Reversed objects can also be wrapped around mutable objects; however, any
appends after the current position are ignored. Any other approach leads
to confusion and possibly returning the same item more than once.
The iterators not listed above, such as enumerate and the other itertools,
are not length transparent because they have no way to distinguish between
iterables that report static length and iterators whose length changes with
each call (i.e. the difference between enumerate('abc') and
enumerate(iter('abc')).
"""
import sys
import unittest
from test import support
from itertools import repeat
from collections import deque
from operator import length_hint
n = 10
class TestInvariantWithoutMutations:
def test_invariant(self):
it = self.it
for i in reversed(range(1, n+1)):
self.assertEqual(length_hint(it), i)
next(it)
self.assertEqual(length_hint(it), 0)
self.assertRaises(StopIteration, next, it)
self.assertEqual(length_hint(it), 0)
class TestTemporarilyImmutable(TestInvariantWithoutMutations):
def test_immutable_during_iteration(self):
# objects such as deques, sets, and dictionaries enforce
# length immutability during iteration
it = self.it
self.assertEqual(length_hint(it), n)
next(it)
self.assertEqual(length_hint(it), n-1)
self.mutate()
self.assertRaises(RuntimeError, next, it)
self.assertEqual(length_hint(it), 0)
## ------- Concrete Type Tests -------
class TestRepeat(TestInvariantWithoutMutations, unittest.TestCase):
def setUp(self):
self.it = repeat(None, n)
class TestXrange(TestInvariantWithoutMutations, unittest.TestCase):
def setUp(self):
self.it = iter(range(n))
class TestXrangeCustomReversed(TestInvariantWithoutMutations, unittest.TestCase):
def setUp(self):
self.it = reversed(range(n))
class TestTuple(TestInvariantWithoutMutations, unittest.TestCase):
def setUp(self):
self.it = iter(tuple(range(n)))
## ------- Types that should not be mutated during iteration -------
class TestDeque(TestTemporarilyImmutable, unittest.TestCase):
def setUp(self):
d = deque(range(n))
self.it = iter(d)
self.mutate = d.pop
class TestDequeReversed(TestTemporarilyImmutable, unittest.TestCase):
def setUp(self):
d = deque(range(n))
self.it = reversed(d)
self.mutate = d.pop
class TestDictKeys(TestTemporarilyImmutable, unittest.TestCase):
def setUp(self):
d = dict.fromkeys(range(n))
self.it = iter(d)
self.mutate = d.popitem
class TestDictItems(TestTemporarilyImmutable, unittest.TestCase):
def setUp(self):
d = dict.fromkeys(range(n))
self.it = iter(d.items())
self.mutate = d.popitem
class TestDictValues(TestTemporarilyImmutable, unittest.TestCase):
def setUp(self):
d = dict.fromkeys(range(n))
self.it = iter(d.values())
self.mutate = d.popitem
class TestSet(TestTemporarilyImmutable, unittest.TestCase):
def setUp(self):
d = set(range(n))
self.it = iter(d)
self.mutate = d.pop
## ------- Types that can mutate during iteration -------
class TestList(TestInvariantWithoutMutations, unittest.TestCase):
def setUp(self):
self.it = iter(range(n))
@unittest.skipIf(sys.implementation.name=='ironpython', 'Test crashing IronPython test suite')
def test_mutation(self):
d = list(range(n))
it = iter(d)
next(it)
next(it)
self.assertEqual(length_hint(it), n - 2)
d.append(n)
self.assertEqual(length_hint(it), n - 1) # grow with append
d[1:] = []
self.assertEqual(length_hint(it), 0)
self.assertEqual(list(it), [])
d.extend(range(20))
self.assertEqual(length_hint(it), 0)
class TestListReversed(TestInvariantWithoutMutations, unittest.TestCase):
def setUp(self):
self.it = reversed(range(n))
@unittest.skipIf(sys.implementation.name=='ironpython', 'https://github.com/IronLanguages/ironpython2/issues/387')
def test_mutation(self):
d = list(range(n))
it = reversed(d)
next(it)
next(it)
self.assertEqual(length_hint(it), n - 2)
d.append(n)
self.assertEqual(length_hint(it), n - 2) # ignore append
d[1:] = []
self.assertEqual(length_hint(it), 0)
self.assertEqual(list(it), []) # confirm invariant
d.extend(range(20))
self.assertEqual(length_hint(it), 0)
## -- Check to make sure exceptions are not suppressed by __length_hint__()
class BadLen(object):
def __iter__(self):
return iter(range(10))
def __len__(self):
raise RuntimeError('hello')
class BadLengthHint(object):
def __iter__(self):
return iter(range(10))
def __length_hint__(self):
raise RuntimeError('hello')
class NoneLengthHint(object):
def __iter__(self):
return iter(range(10))
def __length_hint__(self):
return NotImplemented
class TestLengthHintExceptions(unittest.TestCase):
def test_issue1242657(self):
self.assertRaises(RuntimeError, list, BadLen())
self.assertRaises(RuntimeError, list, BadLengthHint())
self.assertRaises(RuntimeError, [].extend, BadLen())
self.assertRaises(RuntimeError, [].extend, BadLengthHint())
b = bytearray(range(10))
self.assertRaises(RuntimeError, b.extend, BadLen())
self.assertRaises(RuntimeError, b.extend, BadLengthHint())
def test_invalid_hint(self):
# Make sure an invalid result doesn't muck-up the works
self.assertEqual(list(NoneLengthHint()), list(range(10)))
if __name__ == "__main__":
unittest.main()
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
ENGINE_TOPIC = 'engine'
LISTENER_TOPIC = 'heat-engine-listener'
PARAM_KEYS = (
PARAM_TIMEOUT, PARAM_DISABLE_ROLLBACK, PARAM_ADOPT_STACK_DATA,
PARAM_SHOW_DELETED, PARAM_SHOW_NESTED, PARAM_EXISTING,
PARAM_CLEAR_PARAMETERS, PARAM_GLOBAL_TENANT, PARAM_LIMIT,
PARAM_NESTED_DEPTH, PARAM_TAGS, PARAM_SHOW_HIDDEN, PARAM_TAGS_ANY,
PARAM_NOT_TAGS, PARAM_NOT_TAGS_ANY, TEMPLATE_TYPE, PARAM_WITH_DETAIL,
RESOLVE_OUTPUTS, PARAM_IGNORE_ERRORS, PARAM_CONVERGE
) = (
'timeout_mins', 'disable_rollback', 'adopt_stack_data',
'show_deleted', 'show_nested', 'existing',
'clear_parameters', 'global_tenant', 'limit',
'nested_depth', 'tags', 'show_hidden', 'tags_any',
'not_tags', 'not_tags_any', 'template_type', 'with_detail',
'resolve_outputs', 'ignore_errors', 'converge'
)
STACK_KEYS = (
STACK_NAME, STACK_ID,
STACK_CREATION_TIME, STACK_UPDATED_TIME, STACK_DELETION_TIME,
STACK_NOTIFICATION_TOPICS,
STACK_DESCRIPTION, STACK_TMPL_DESCRIPTION,
STACK_PARAMETERS, STACK_OUTPUTS, STACK_ACTION,
STACK_STATUS, STACK_STATUS_DATA, STACK_CAPABILITIES,
STACK_DISABLE_ROLLBACK, STACK_TIMEOUT, STACK_OWNER,
STACK_PARENT, STACK_USER_PROJECT_ID, STACK_TAGS
) = (
'stack_name', 'stack_identity',
'creation_time', 'updated_time', 'deletion_time',
'notification_topics',
'description', 'template_description',
'parameters', 'outputs', 'stack_action',
'stack_status', 'stack_status_reason', 'capabilities',
'disable_rollback', 'timeout_mins', 'stack_owner',
'parent', 'stack_user_project_id', 'tags'
)
STACK_OUTPUT_KEYS = (
OUTPUT_DESCRIPTION,
OUTPUT_KEY, OUTPUT_VALUE,
OUTPUT_ERROR,
) = (
'description',
'output_key', 'output_value',
'output_error',
)
RES_KEYS = (
RES_DESCRIPTION, RES_CREATION_TIME, RES_UPDATED_TIME,
RES_NAME, RES_PHYSICAL_ID, RES_METADATA,
RES_ACTION, RES_STATUS, RES_STATUS_DATA,
RES_TYPE, RES_ID, RES_STACK_ID, RES_STACK_NAME,
RES_REQUIRED_BY, RES_NESTED_STACK_ID, RES_NESTED_RESOURCES,
RES_PARENT_RESOURCE, RES_PROPERTIES, RES_ATTRIBUTES,
) = (
'description', 'creation_time', 'updated_time',
'resource_name', 'physical_resource_id', 'metadata',
'resource_action', 'resource_status', 'resource_status_reason',
'resource_type', 'resource_identity', STACK_ID, STACK_NAME,
'required_by', 'nested_stack_id', 'nested_resources',
'parent_resource', 'properties', 'attributes',
)
RES_SCHEMA_KEYS = (
RES_SCHEMA_RES_TYPE, RES_SCHEMA_PROPERTIES, RES_SCHEMA_ATTRIBUTES,
RES_SCHEMA_SUPPORT_STATUS, RES_SCHEMA_DESCRIPTION
) = (
RES_TYPE, 'properties', 'attributes', 'support_status', 'description'
)
EVENT_KEYS = (
EVENT_ID,
EVENT_STACK_ID, EVENT_STACK_NAME,
EVENT_TIMESTAMP,
EVENT_RES_NAME, EVENT_RES_PHYSICAL_ID, EVENT_RES_ACTION,
EVENT_RES_STATUS, EVENT_RES_STATUS_DATA, EVENT_RES_TYPE,
EVENT_RES_PROPERTIES, EVENT_ROOT_STACK_ID
) = (
'event_identity',
STACK_ID, STACK_NAME,
'event_time',
RES_NAME, RES_PHYSICAL_ID, RES_ACTION,
RES_STATUS, RES_STATUS_DATA, RES_TYPE,
'resource_properties', 'root_stack_id'
)
NOTIFY_KEYS = (
NOTIFY_TENANT_ID,
NOTIFY_USER_ID,
NOTIFY_USERID,
NOTIFY_USERNAME,
NOTIFY_STACK_ID,
NOTIFY_STACK_NAME,
NOTIFY_STATE,
NOTIFY_STATE_REASON,
NOTIFY_CREATE_AT,
NOTIFY_DESCRIPTION,
NOTIFY_UPDATE_AT,
NOTIFY_TAGS,
) = (
'tenant_id',
'user_id',
'user_identity',
'username',
STACK_ID,
STACK_NAME,
'state',
'state_reason',
'create_at',
STACK_DESCRIPTION,
'updated_at',
STACK_TAGS,
)
VALIDATE_PARAM_KEYS = (
PARAM_TYPE, PARAM_DEFAULT, PARAM_NO_ECHO,
PARAM_ALLOWED_VALUES, PARAM_ALLOWED_PATTERN, PARAM_MAX_LENGTH,
PARAM_MIN_LENGTH, PARAM_MAX_VALUE, PARAM_MIN_VALUE,
PARAM_STEP, PARAM_OFFSET,
PARAM_DESCRIPTION, PARAM_CONSTRAINT_DESCRIPTION, PARAM_LABEL,
PARAM_CUSTOM_CONSTRAINT, PARAM_VALUE, PARAM_TAG
) = (
'Type', 'Default', 'NoEcho',
'AllowedValues', 'AllowedPattern', 'MaxLength',
'MinLength', 'MaxValue', 'MinValue', 'Step', 'Offset',
'Description', 'ConstraintDescription', 'Label',
'CustomConstraint', 'Value', 'Tags'
)
VALIDATE_PARAM_TYPES = (
PARAM_TYPE_STRING, PARAM_TYPE_NUMBER, PARAM_TYPE_COMMA_DELIMITED_LIST,
PARAM_TYPE_JSON, PARAM_TYPE_BOOLEAN
) = (
'String', 'Number', 'CommaDelimitedList',
'Json', 'Boolean'
)
SOFTWARE_CONFIG_KEYS = (
SOFTWARE_CONFIG_ID,
SOFTWARE_CONFIG_NAME,
SOFTWARE_CONFIG_GROUP,
SOFTWARE_CONFIG_CONFIG,
SOFTWARE_CONFIG_INPUTS,
SOFTWARE_CONFIG_OUTPUTS,
SOFTWARE_CONFIG_OPTIONS,
SOFTWARE_CONFIG_CREATION_TIME,
SOFTWARE_CONFIG_PROJECT
) = (
'id',
'name',
'group',
'config',
'inputs',
'outputs',
'options',
'creation_time',
'project'
)
SOFTWARE_DEPLOYMENT_KEYS = (
SOFTWARE_DEPLOYMENT_ID,
SOFTWARE_DEPLOYMENT_CONFIG_ID,
SOFTWARE_DEPLOYMENT_SERVER_ID,
SOFTWARE_DEPLOYMENT_INPUT_VALUES,
SOFTWARE_DEPLOYMENT_OUTPUT_VALUES,
SOFTWARE_DEPLOYMENT_ACTION,
SOFTWARE_DEPLOYMENT_STATUS,
SOFTWARE_DEPLOYMENT_STATUS_REASON,
SOFTWARE_DEPLOYMENT_CREATION_TIME,
SOFTWARE_DEPLOYMENT_UPDATED_TIME
) = (
'id',
'config_id',
'server_id',
'input_values',
'output_values',
'action',
'status',
'status_reason',
'creation_time',
'updated_time'
)
SOFTWARE_DEPLOYMENT_STATUSES = (
SOFTWARE_DEPLOYMENT_IN_PROGRESS,
SOFTWARE_DEPLOYMENT_FAILED,
SOFTWARE_DEPLOYMENT_COMPLETE
) = (
'IN_PROGRESS',
'FAILED',
'COMPLETE'
)
SOFTWARE_DEPLOYMENT_OUTPUTS = (
SOFTWARE_DEPLOYMENT_OUTPUT_STDOUT,
SOFTWARE_DEPLOYMENT_OUTPUT_STDERR,
SOFTWARE_DEPLOYMENT_OUTPUT_STATUS_CODE
) = (
'deploy_stdout',
'deploy_stderr',
'deploy_status_code'
)
SNAPSHOT_KEYS = (
SNAPSHOT_ID,
SNAPSHOT_NAME,
SNAPSHOT_STACK_ID,
SNAPSHOT_DATA,
SNAPSHOT_STATUS,
SNAPSHOT_STATUS_REASON,
SNAPSHOT_CREATION_TIME,
) = (
'id',
'name',
'stack_id',
'data',
'status',
'status_reason',
'creation_time'
)
THREAD_MESSAGES = (THREAD_CANCEL,
THREAD_CANCEL_WITH_ROLLBACK
) = ('cancel', 'cancel_with_rollback')
|
|
# -*- coding: utf-8 -*-
import datetime, time, csv, os
import numpy as np
import matplotlib.pyplot as plt
from utils.rwlogging import log
from utils.rwlogging import strategyLogger as logs
from utils.rwlogging import balLogger as logb
from utils.rwlogging import tradesLogger as logtr
from indicator import ma, macd, bolling, rsi, kdj
#from voters.pool import VoterPool
from voters import pool
from strader import Trader
prices = areas = l = None
COUNT_DOWN = 24
def runVoter(in_prices):
global prices, areas, l
prices = in_prices
directs, slopes, stds, areas = pool.calc_variants(prices)
l = len(prices)
ps = [p['close'] for p in prices]
mafront, maOps = getMaOps(ps)
doTrade('MA', mafront, maOps)
return
def getMaOps(ps):
ops = [0, 0, 0, 0]
ops[0] = genMaOps(ps, 'MA', 29, 'SMA', 90)
ops[1] = genMaOps(ps, 'SMA', 23, 'EMA', 45)
ops[2] = genMaOps(ps, 'SMA', 23, 'EMA', 45)
ops[3] = genMaOps(ps, 'SMA', 8, 'EMA', 10)
front = 85
fops = [0] * l
for i in range(l):
fops[i] = ops[areas[i]][i]
#if areas[i] == 0: fops[i] = 0
#if areas[i] == 1: fops[i] = 0
#if areas[i] == 2: fops[i] = 0
return front, fops
def doTrade(tname, front, ops):
pools = pool.VoterPool(1, prices)
pools.estimate(tname, ops, front)
pools.showVoters()
t = Trader(tname)
lastArea = -1
countdowns = [COUNT_DOWN] * 4
vols = [0] * 4
for i in range(front, l):
price = prices[i]
volume = 0
area = areas[i]
if pool.checkTime(price['dt']):
if vols[area] == 0:
notes = 'AREA:' + str(area) + ';VOL:' + str(vols[area]) + '->' + str(ops[i]) + ';'
volume = ops[i] - vols[area]
vols[area] = ops[i]
countdowns[area] = COUNT_DOWN
if volume == 0: notes = ''
for j in range(4):
#if j == area: continue
if countdowns[j] > 0: countdowns[j] -= 1
if countdowns[j] == 0 and vols[j] != 0:
volume -= vols[j]
notes += 'CLOSE AREA:' + str(j) + ';VOL:' + str(vols[j]) + '->0;'
vols[j] = 0
else: # not trading time
for j in range(4):
if countdowns[j] > 0: countdowns[j] -= 1
if volume != 0:
t.processOrder(price['dt'], price['rmb'], volume, notes=notes)
else:
t.summary(price['dt'], price['rmb'])
logs.info(t.strategyName + ',' + str(len(t.stats['buy']['date'])) + ',' + str(len(t.stats['sell']['date'])) + ',' + str(t.stats['downDays']) + ',' + str(t.equity))
logtr.info('OP,STRATEGY,TIME,VOLUME,PRICE,POSITION,NOTES,EQUITY,BALANCE')
for tl in t.stats['log']:
logtr.info(tl)
t.generateGraph(0)
def genMaOps(ps, ft, f, st, s):
if ft == 'MA':
fma = ma.calc_ma(ps, f)
elif ft == 'EMA':
fma = ma.calc_ema(ps, f)
elif ft == 'SMA':
fma = ma.calc_sma(ps, f)
elif ft == 'LWMA':
fma = ma.calc_lwma(ps, f)
if st == 'MA':
sma = ma.calc_ma(ps, s)
elif st == 'EMA':
sma = ma.calc_ema(ps, s)
elif st == 'SMA':
sma = ma.calc_sma(ps, s)
elif st == 'LWMA':
sma = ma.calc_lwma(ps, s)
l = len(ps)
ops = [0] * l
for i in range(s, l):
diff = fma[i] - sma[i]
if diff > 0:
ops[i] = 1
elif diff < 0:
ops[i] = -1
return ops
def runVoter_0(in_prices):
global prices, areas, l
prices = in_prices
directs, slopes, stds, areas = pool.calc_variants(prices)
l = len(prices)
ps = [p['close'] for p in prices]
mafront, maOps = getMaOps(ps)
doTrade('MA', mafront, maOps)
return
macdfront, macdOps = getMacdOps(prices)
doTrade('MACD', macdfront, macdOps)
return
front = max(mafront, macdfront)
ops = [0] * l
for i in range(l):
if maOps[i] + macdOps[i] >= 2:
ops[i] = 1
elif maOps[i] + macdOps[i] <= -2:
ops[i] = -1
doTrade('MA', front, ops)
return
def getMacdOps(prices):
ops = [0, 0, 0, 0]
ops[0] = genMacdOps(prices, 2, 9, 11)
ops[1] = genMacdOps(prices, 2, 6, 2)
ops[2] = genMacdOps(prices, 21, 56, 3)
ops[3] = genMacdOps(prices, 6, 11, 17)
front = 63
fops = [0] * l
for i in range(l):
fops[i] = ops[areas[i]][i]
return front, fops
def genMacdOps2(prices, fast, slow, sign):
openLevel, closeLevel = 0.2, 0.1
macds = macd.calc_macd(prices, fast, slow, sign)
front = slow + sign
l = len(prices)
ops = [0] * l
for i in range(front, l):
if macds['macd'][i] < 0 and macds['macd'][i] > macds['sign'][i] and abs(macds['macd'][i]) > openLevel:
ops[i] = 1
elif ops[i-1] == -1 and macds['macd'][i] < 0 and macds['macd'][i] > macds['sign'][i] and abs(macds['macd'][i]) > closeLevel:
ops[i] = 0
elif macds['macd'][i] > 0 and macds['macd'][i] < macds['sign'][i] and abs(macds['macd'][i]) > openLevel:
ops[i] = -1
elif ops[i-1] == 1 and macds['macd'][i] > 0 and macds['macd'][i] < macds['sign'][i] and abs(macds['macd'][i]) > closeLevel:
ops[i] = 0
else:
ops[i] = ops[i-1]
return ops
def genMacdOps(prices, fast, slow, sign):
openLevel, closeLevel = 0.2, 0.1
macds = macd.calc_macd(prices, fast, slow, sign)
front = slow + sign
l = len(prices)
ops = [0] * l
for i in range(front, l):
if macds['sign'][i] > 0.1 and macds['macd'][i] > macds['sign'][i]:
ops[i] = 1
elif macds['macd'][i] > 0.1 and macds['macd'][i] < macds['sign'][i]:
ops[i] = -1
if macds['sign'][i] < -0.1 and macds['macd'][i] < macds['sign'][i]:
ops[i] = -1
elif macds['macd'][i] < -0.1 and macds['macd'][i] > macds['sign'][i]:
ops[i] = 1
return ops
|
|
#!/usr/bin/env python
'''
A simple Python wrapper for the bh_tsne binary that makes it easier to use it
for TSV files in a pipeline without any shell script trickery.
Note: The script does some minimal sanity checking of the input, but don't
expect it to cover all cases. After all, it is a just a wrapper.
[Sylar]Example:
./bhtsne.py -i output2.txt -v -p 45 -o res.txt
Example:
> echo -e '1.0\t0.0\n0.0\t1.0' | ./bhtsne.py -d 2 -p 0.1
-2458.83181442 -6525.87718385
2458.83181442 6525.87718385
The output will not be normalised, maybe the below one-liner is of interest?:
python -c 'import numpy; d = numpy.loadtxt("/dev/stdin");
d -= d.min(axis=0); d /= d.max(axis=0);
numpy.savetxt("/dev/stdout", d, fmt='%.8f', delimiter="\t")'
Author: Pontus Stenetorp <pontus stenetorp se>
Version: 2013-01-22
'''
# Copyright (c) 2013, Pontus Stenetorp <pontus stenetorp se>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from argparse import ArgumentParser, FileType
from os.path import abspath, dirname, isfile, join as path_join
from shutil import rmtree
from struct import calcsize, pack, unpack
from subprocess import Popen
from sys import stderr, stdin, stdout
from tempfile import mkdtemp
import numpy as np
import matplotlib.pyplot as plt
import cv2
import math
import random
from scipy.spatial.distance import pdist, squareform
from sklearn import svm
from sklearn import preprocessing
from sklearn.decomposition import PCA
from sklearn import metrics
### Constants
BH_TSNE_BIN_PATH = path_join(dirname(__file__), 'bh_tsne')
assert isfile(BH_TSNE_BIN_PATH), ('Unable to find the bh_tsne binary in the '
'same directory as this script, have you forgotten to compile it?: {}'
).format(BH_TSNE_BIN_PATH)
# Default hyper-parameter values from van der Maaten (2013)
DEFAULT_NO_DIMS = 2
DEFAULT_PERPLEXITY = 30
DEFAULT_THETA = 0.5
EMPTY_SEED = -1
###
def _argparse():
argparse = ArgumentParser('bh_tsne Python wrapper')
argparse.add_argument('-d', '--no_dims', type=int,
default=DEFAULT_NO_DIMS)
argparse.add_argument('-p', '--perplexity', type=float,
default=DEFAULT_PERPLEXITY)
# 0.0 for theta is equivalent to vanilla t-SNE
argparse.add_argument('-t', '--theta', type=float, default=DEFAULT_THETA)
argparse.add_argument('-r', '--randseed', type=int, default=EMPTY_SEED)
argparse.add_argument('-v', '--verbose', action='store_true')
argparse.add_argument('-i', '--input', type=FileType('r'), default=stdin)
argparse.add_argument('-o', '--output', type=FileType('w'),
default=stdout)
return argparse
class TmpDir:
def __enter__(self):
self._tmp_dir_path = mkdtemp()
return self._tmp_dir_path
def __exit__(self, type, value, traceback):
rmtree(self._tmp_dir_path)
def _read_unpack(fmt, fh):
return unpack(fmt, fh.read(calcsize(fmt)))
def bh_tsne(samples, no_dims=DEFAULT_NO_DIMS, perplexity=DEFAULT_PERPLEXITY, theta=DEFAULT_THETA, randseed=EMPTY_SEED,
verbose=False):
# Assume that the dimensionality of the first sample is representative for
# the whole batch
sample_dim = len(samples[0])
sample_count = len(samples)
# bh_tsne works with fixed input and output paths, give it a temporary
# directory to work in so we don't clutter the filesystem
with TmpDir() as tmp_dir_path:
# Note: The binary format used by bh_tsne is roughly the same as for
# vanilla tsne
with open(path_join(tmp_dir_path, 'data.dat'), 'wb') as data_file:
# Write the bh_tsne header
data_file.write(pack('iiddi', sample_count, sample_dim, theta, perplexity, no_dims))
# Then write the data
for sample in samples:
data_file.write(pack('{}d'.format(len(sample)), *sample))
# Write random seed if specified
if randseed != EMPTY_SEED:
data_file.write(pack('i', randseed))
# Call bh_tsne and let it do its thing
with open('/dev/null', 'w') as dev_null:
bh_tsne_p = Popen((abspath(BH_TSNE_BIN_PATH), ), cwd=tmp_dir_path,
# bh_tsne is very noisy on stdout, tell it to use stderr
# if it is to print any output
stdout=stderr if verbose else dev_null)
bh_tsne_p.wait()
assert not bh_tsne_p.returncode, ('ERROR: Call to bh_tsne exited '
'with a non-zero return code exit status, please ' +
('enable verbose mode and ' if not verbose else '') +
'refer to the bh_tsne output for further details')
# Read and pass on the results
with open(path_join(tmp_dir_path, 'result.dat'), 'rb') as output_file:
# The first two integers are just the number of samples and the
# dimensionality
result_samples, result_dims = _read_unpack('ii', output_file)
# Collect the results, but they may be out of order
results = [_read_unpack('{}d'.format(result_dims), output_file)
for _ in xrange(result_samples)]
# Now collect the landmark data so that we can return the data in
# the order it arrived
results = [(_read_unpack('i', output_file), e) for e in results]
# Put the results in order and yield it
results.sort()
for _, result in results:
yield result
# The last piece of data is the cost for each sample, we ignore it
#read_unpack('{}d'.format(sample_count), output_file)
def draw_map(norm_res, labels, data_name):
seq_txt = open( data_name + '.txt', 'r')
map_size = 10000
roi_size = 200
#norm_res = np.ceil(norm_res * 1000)
# full map size = 1000 + roi.size()
label_file = open( data_name + '_labels.txt', 'w')
final_map = np.zeros((map_size, map_size, 3), np.uint8)
for x, label in zip(norm_res, labels):
img_name = seq_txt.readline()[:-3]
img = cv2.imread(img_name)
label_file.write('%s %d\n' % (img_name, label))
#cv2.imshow("res", img)
#cv2.waitKey(10)
# resize roi to 100x100
roi = cv2.resize(img,(roi_size, roi_size), interpolation = cv2.INTER_CUBIC)
a = np.ceil(x[0] * (map_size - roi_size))
b = np.ceil(x[1] * (map_size - roi_size))
a = a-((a) % roi_size)
b = b-((b) % roi_size)
#print a,b
final_map[map_size - (b + roi_size):map_size - b, a:a + roi_size, :] = roi
#print res
#cv2.imshow("res", final_map)
#cv2.waitKey()
cv2.imwrite(data_name + '.jpeg', final_map, [int(cv2.IMWRITE_JPEG_QUALITY), 95])
def draw_concatmap(norm_res, labels, data_name):
seq_txt = open( data_name + '.txt', 'r')
map_size = 6000 # size of final image
roi_size = 100 # size of every image thumbnail
final_map = np.zeros((map_size, map_size, 3), np.uint8)
img_list = []
for it in range(len(norm_res)):
img_name = seq_txt.readline()[:-3]
img_list.append(img_name)
used = np.zeros((len(norm_res), 1))
qq = map_size/roi_size
abes = np.zeros((qq*qq,2))
i=0
for a in range(0, map_size, roi_size):
for b in range(0, map_size, roi_size):
abes[i,0] = a
abes[i,1] = b
i=i+1
for i in range(len(abes)):
a = abes[i,0]
b = abes[i,1]
xf = a/map_size;
yf = b/map_size;
dd = np.sum(np.square(np.subtract(norm_res, [xf,yf])), axis=1)
index = 0
for inf in np.nditer(dd, op_flags=['readwrite']):
if used[index,0] > 0:
inf[...] = 1000
index+=1
dd_index = np.argmin(dd)
used[dd_index,0] = 1
img = cv2.imread(img_list[dd_index])
roi = cv2.resize(img,(roi_size, roi_size), interpolation = cv2.INTER_CUBIC)
final_map[map_size - (b + roi_size):map_size - b, a:a + roi_size, :] = roi
# not working!
#cv2.rectangle(final_map, (int(map_size - (b + roi_size)), int(a)), (int(map_size - b), int(a + roi_size)), (0,0,255), 20)
cv2.imwrite(data_name + '.jpeg', final_map, [int(cv2.IMWRITE_JPEG_QUALITY), 95])
##############################################################################
from sklearn.cluster import AffinityPropagation
def cluster_affinity(norm_res):
# Compute Affinity Propagation
af = AffinityPropagation().fit(norm_res)
cluster_centers_indices = af.cluster_centers_indices_
labels = af.labels_
#print labels
n_clusters_ = len(cluster_centers_indices)
plt.close('all')
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
class_members = labels == k
cluster_center = norm_res[cluster_centers_indices[k]]
plt.plot(norm_res[class_members, 0], norm_res[class_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
for x in norm_res[class_members]:
plt.plot([cluster_center[0], x[0]], [cluster_center[1], x[1]], col)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
return labels
##############################################################################
from sklearn.cluster import DBSCAN
def cluster_DBSCAN(X):
db = DBSCAN(eps=0.1, min_samples=5).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
#print labels
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = 'k'
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
return labels
##############################################################################
from sklearn.cluster import MeanShift, estimate_bandwidth
from itertools import cycle
def cluster_meanshift(X):
# Compute clustering with MeanShift
# The following bandwidth can be automatically detected using
bandwidth = estimate_bandwidth(X, quantile=0.1, n_samples=500)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(X)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
my_members = labels == k
cluster_center = cluster_centers[k]
plt.plot(X[my_members, 0], X[my_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
return labels
###############################################################################
def svm_reg(res, new_data):
clfx = svm.SVR(C=1.0, cache_size=500, coef0=0.0, degree=3, epsilon=0.1, gamma='auto',
kernel='rbf', max_iter=-1, shrinking=True, tol=0.001, verbose=False)
clfx.fit(new_data, res[:,0])
clfy = svm.SVR(C=1.0, cache_size=500, coef0=0.0, degree=3, epsilon=0.1, gamma='auto',
kernel='rbf', max_iter=-1, shrinking=True, tol=0.001, verbose=False)
clfy.fit(new_data, res[:,1])
return clfx, clfy
def svm_predict(clfx, clfy, data_name):
output_xy = []
data = []
res_x = []
res_y = []
with open(data_name) as f:
for sample_line in f:
sample_data = sample_line.split('\t')
data.append([float(e) for e in sample_data])
if len(data) == 1000:
res_x.extend(clfx.predict(data))
res_y.extend(clfy.predict(data))
data = []
print 'predict one batch...'
res_x.extend(clfx.predict(data))
res_y.extend(clfy.predict(data))
return res_x, res_y
################################################################################
import operator
def similarity(data, mean):
distance_mean = 0
distance_array = []
out = []
for single_data in data:
dis = math.sqrt(np.sum(np.square(np.subtract(single_data, mean)), axis=0))
distance_mean += dis
distance_array.append((single_data, dis))
# sort tuples by dis
distance_array.sort(key=operator.itemgetter(1), reverse=True)
#print distance_array[0]
#distance_mean = distance_mean / len(data)
'''for d, da in zip(distance_array, data):
if d > distance_mean:
out.append(da)'''
for i in xrange(20):
out.append(distance_array[i][0])
return out
def sampling(data_name, mean):
mean = mean
new_data = []
batch_data = []
# first mean[4096]
with open(data_name, 'r') as f:
for line in f:
sample_data = line.split('\t')
batch_data.append([float(e) for e in sample_data])
if len(batch_data) == 100 :
out = similarity(batch_data, mean)
for o in out:
new_data.append(o)
np_data = np.array(new_data)
mean = np.mean(np_data, axis=0)
batch_data = []
print len(new_data)
return new_data
def read_data_random(data_name):
new_data = []
with open(data_name, 'r') as f:
for line in f:
if random.sample(xrange(0, 10), 1)[0] == 1:
sample_data = line.split('\t')
new_data.append([float(e) for e in sample_data])
print len(new_data), len(new_data[len(new_data)-1])
return new_data
############################################################################
def main(args):
argp = _argparse().parse_args(args[1:])
data_name = 'features_0326.txt'
# Read the data, with some sanity checking
data = []
data = read_data_random(data_name)
np_data = np.array(data)
mean = np.mean(np_data, axis=0)
del data[:]
del np_data
data = sampling(data_name, mean)
print len(data)
''' single_data = [float(e) for e in line.split('\t')]
if similarity(new_data, single_data):
new_data.append(single_data) '''
'''for sample_line_num, sample_line in enumerate((l.rstrip('\n')
for l in argp.input), start=1):
sample_data = sample_line.split('\t')
try:
assert len(sample_data) == dims, ('Input line #{} of '
'dimensionality {} although we have previously observed '
'lines with dimensionality {}, possible data error or is '
'the data sparsely encoded?'
).format(sample_line_num, len(sample_data), dims)
except NameError:
# First line, record the dimensionality
dims = len(sample_data)
data.append([float(e) for e in sample_data])'''
#norm_data = preprocessing.scale(data, axis = 0)
#max_abs_scaler = preprocessing.MinMaxScaler()
#new_data = max_abs_scaler.fit_transform(norm_data)
#pca = PCA(n_components=512)
#new_data = pca.fit_transform(data)
#norm_data = preprocessing.scale(new_data, axis = 0)
#max_abs_scaler = preprocessing.MinMaxScaler()
#new_data = max_abs_scaler.fit_transform(norm_data)
#new_data = data
res = []
for result in bh_tsne(data, no_dims=argp.no_dims, perplexity=argp.perplexity, theta=argp.theta, randseed=argp.randseed,
verbose=argp.verbose):
fmt = ''
for i in range(1, len(result)):
fmt = fmt + '{}\t'
fmt = fmt + '{}\n'
argp.output.write(fmt.format(*result))
res.append(result)
res = np.array(res)
scale_res = preprocessing.scale(res, axis = 0)
max_abs_scaler = preprocessing.MinMaxScaler()
norm_res = max_abs_scaler.fit_transform(scale_res)
'''d = squareform(pdist(norm_res, 'euclidean'))
for zero in np.nditer(d, op_flags=['readwrite']):
if zero[...] == 0:
zero[...] = 1000
d_index = np.argmin(d, axis=1) '''
print "training svm..."
# feed scaled result
clfx, clfy = svm_reg(scale_res, data)
# trained svm to do regression on extra data
print 'svm predicting...'
x, y = svm_predict(clfx, clfy, data_name)
pred_res = np.array(zip(x,y))
wf = open('predict_xy.txt', 'w')
for xx, yy in zip(x,y):
wf.write(str(xx) + ' ' + str(yy) + '\n')
# normalize scaled prediction
max_scaler = preprocessing.MinMaxScaler()
norm_pred_res = max_scaler.fit_transform(pred_res)
#print norm_pred_res
#np.set_printoptions(threshold='nan')
#print d
#print norm_res
#plt.plot(norm_res[:,0], norm_res[:,1], 'ro')
#plt.show()
#labels = cluster_affinity(norm_res)
#labels = cluster_DBSCAN(norm_res)
labels = cluster_meanshift(norm_res)
labels_p = cluster_meanshift(norm_pred_res)
#draw_map(norm_res, labels, data_name = '03-26seq')
draw_map(norm_pred_res, labels_p, data_name = '03-26seq')
if __name__ == '__main__':
from sys import argv
exit(main(argv))
|
|
# Copyright 2013 Nebula Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""REST API bits"""
import json
import logging
import requests
from fiblary.common import exceptions
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
USER_AGENT = 'RAPI'
_logger = logging.getLogger(__name__)
class RESTApi(object):
"""A REST API client that handles the interface from us to the server
RESTApi is requests.Session wrapper that knows how to do:
* JSON serialization/deserialization
* log requests in 'curl' format
* basic API boilerplate for create/delete/list/set/show verbs
* authentication is handled elsewhere and a token is passed in
The expectation that there will be a RESTApi object per authentication
token in use, i.e. project/username/auth_endpoint
On the other hand, a Client knows details about the specific REST Api that
it communicates with, such as the available endpoints, API versions, etc.
"""
def __init__(
self,
session=None,
username=None,
password=None,
user_agent=USER_AGENT,
verify=True,
logger=None,
debug=None,
base_url=None,
):
"""Construct a new REST client
:param object session: A Session object to be used for
communicating with the identity service.
:param string auth_header: A token from an initialized auth_reference
to be used in the X-Auth-Token header
:param string user_agent: Set the User-Agent header in the requests
:param boolean/string verify: If ``True``, the SSL cert will be
verified. A CA_BUNDLE path can also be
provided.
:param logging.Logger logger: A logger to output to. (optional)
:param boolean debug: Enables debug logging of all request and
responses to identity service.
default False (optional)
"""
if username and password:
self.set_auth((username, password))
else:
self.set_auth(None)
self.debug = debug
self.base_url = base_url or ""
if not session:
# We create a default session object
session = requests.Session()
self.session = session
self.session.verify = verify
self.session.user_agent = user_agent
self.session.stream = False
if logger:
self.logger = logger
else:
self.logger = _logger
def set_auth(self, auth_header):
"""Sets the current auth blob"""
self.auth_header = auth_header
def set_header(self, header, content):
"""Sets passed in headers into the session headers
Replaces existing headers!!
"""
if content is None:
del self.session.headers[header]
else:
self.session.headers[header] = content
def request(self, method, url, **kwargs):
"""Make an authenticated (if token available) request
:param method: Request HTTP method
:param url: Request URL
:param data: Request body
:param json: Request body to be encoded as JSON
Overwrites ``data`` argument if present
"""
kwargs.setdefault('headers', {})
if self.auth_header:
kwargs['auth'] = self.auth_header
if 'json' in kwargs:
json_obj = kwargs.get('json', None)
if json_obj and isinstance(json_obj, (dict)):
kwargs['data'] = json.dumps(json_obj)
kwargs['headers']['Content-Type'] = 'application/json'
kwargs.pop('json')
kwargs.setdefault('allow_redirects', True)
if 'timeout' not in kwargs:
kwargs['timeout'] = 10
if self.debug:
self._log_request(method, self.base_url + url, **kwargs)
try:
response = self.session.request(method,
self.base_url + url,
**kwargs)
except (requests.exceptions.ConnectionError,
requests.exceptions.Timeout) as e:
raise exceptions.ConnectionError(e.message)
except Exception as e:
raise e
if self.debug:
self._log_response(response)
return self._error_handler(response)
def _error_handler(self, response):
if response.status_code < 200 or response.status_code > 300:
self.logger.debug(
"ERROR: %s",
response.text,
)
raise exceptions.from_response(response)
return response
# Convenience methods to mimic the ones provided by requests.Session
def delete(self, url, **kwargs):
"""Send a DELETE request. Returns :class:`requests.Response` object.
:param url: Request URL
:param \*\*kwargs: Optional arguments passed to ``request``
"""
return self.request('DELETE', url, **kwargs)
def get(self, url, **kwargs):
"""Send a GET request. Returns :class:`requests.Response` object.
:param url: Request URL
:param \*\*kwargs: Optional arguments passed to ``request``
"""
return self.request('GET', url, **kwargs)
def head(self, url, **kwargs):
"""Send a HEAD request. Returns :class:`requests.Response` object.
:param url: Request URL
:param \*\*kwargs: Optional arguments passed to ``request``
"""
kwargs.setdefault('allow_redirects', False)
return self.request('HEAD', url, **kwargs)
def options(self, url, **kwargs):
"""Send an OPTIONS request. Returns :class:`requests.Response` object.
:param url: Request URL
:param \*\*kwargs: Optional arguments passed to ``request``
"""
return self.request('OPTIONS', url, **kwargs)
def patch(self, url, data=None, json=None, **kwargs):
"""Send a PUT request. Returns :class:`requests.Response` object.
:param url: Request URL
:param data: Request body
:param json: Request body to be encoded as JSON
Overwrites ``data`` argument if present
:param \*\*kwargs: Optional arguments passed to ``request``
"""
return self.request('PATCH', url, data=data, json=json, **kwargs)
def post(self, url, data=None, json=None, **kwargs):
"""Send a POST request. Returns :class:`requests.Response` object.
:param url: Request URL
:param data: Request body
:param json: Request body to be encoded as JSON
Overwrites ``data`` argument if present
:param \*\*kwargs: Optional arguments passed to ``request``
"""
return self.request('POST', url, data=data, json=json, **kwargs)
def put(self, url, data=None, json=None, **kwargs):
"""Send a PUT request. Returns :class:`requests.Response` object.
:param url: Request URL
:param data: Request body
:param json: Request body to be encoded as JSON
Overwrites ``data`` argument if present
:param \*\*kwargs: Optional arguments passed to ``request``
"""
return self.request('PUT', url, data=data, json=json, **kwargs)
# Command verb methods
def create(self, url, data=None, response_key=None, **kwargs):
"""Create a new object via a POST request
:param url: Request URL
:param data: Request body, will be JSON encoded
:param response_key: Dict key in response body to extract
:param \*\*kwargs: Optional arguments passed to ``request``
"""
response = self.request('POST', url, json=data, **kwargs)
if response_key:
return response.json()[response_key]
else:
return response.json()
def list(self, url, data=None, response_key=None, **kwargs):
"""Retrieve a list of objects via a GET or POST request
:param url: Request URL
:param data: Request body, will be JSON encoded
:param response_key: Dict key in response body to extract
:param \*\*kwargs: Optional arguments passed to ``request``
"""
if data:
response = self.request('POST', url, json=data, **kwargs)
else:
response = self.request('GET', url, **kwargs)
if response_key:
return response.json()[response_key]
else:
return response.json()
def set(self, url, data=None, response_key=None, **kwargs):
"""Update an object via a PUT request
:param url: Request URL
:param data: Request body
:param json: Request body to be encoded as JSON
Overwrites ``data`` argument if present
:param \*\*kwargs: Optional arguments passed to ``request``
"""
response = self.request('PUT', url, json=data)
if data:
if response_key:
return response.json()[response_key]
else:
return response.json()
else:
# Nothing to do here
return None
def show(self, url, response_key=None, **kwargs):
"""Retrieve a single object via a GET request
:param url: Request URL
:param response_key: Dict key in response body to extract
:param \*\*kwargs: Optional arguments passed to ``request``
"""
response = self.request('GET', url, **kwargs)
if response_key:
return response.json()[response_key]
else:
return response.json()
def _log_request(self, method, url, **kwargs):
if 'params' in kwargs and kwargs['params'] != {}:
url += '?' + urlencode(kwargs['params'])
string_parts = [
"curl -i",
"-X '%s'" % method,
"'%s'" % url,
]
for element in kwargs['headers']:
header = " -H '%s: %s'" % (element, kwargs['headers'][element])
string_parts.append(header)
self.logger.debug("REQ: %s" % " ".join(string_parts))
if 'data' in kwargs:
self.logger.debug(" REQ BODY: %r\n" % (kwargs['data']))
def _log_response(self, response):
self.logger.debug(
"RESP: [%s] %r",
response.status_code,
response.headers,
)
if response._content_consumed:
self.logger.debug(
" RESP BODY: '%s'",
response.text,
)
self.logger.debug(
" encoding: %s",
response.encoding,
)
|
|
# -*- coding: utf-8 -*-
"""
sphinx.pycode.nodes
~~~~~~~~~~~~~~~~~~~
Parse tree node implementations.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
class BaseNode(object):
"""
Node superclass for both terminal and nonterminal nodes.
"""
parent = None
def _eq(self, other):
raise NotImplementedError
def __eq__(self, other):
if self.__class__ is not other.__class__:
return NotImplemented
return self._eq(other)
def __ne__(self, other):
if self.__class__ is not other.__class__:
return NotImplemented
return not self._eq(other)
__hash__ = None
def get_prev_sibling(self):
"""Return previous child in parent's children, or None."""
if self.parent is None:
return None
for i, child in enumerate(self.parent.children):
if child is self:
if i == 0:
return None
return self.parent.children[i - 1]
def get_next_sibling(self):
"""Return next child in parent's children, or None."""
if self.parent is None:
return None
for i, child in enumerate(self.parent.children):
if child is self:
try:
return self.parent.children[i + 1]
except IndexError:
return None
def get_prev_leaf(self):
"""Return the leaf node that precedes this node in the parse tree."""
def last_child(node):
if isinstance(node, Leaf):
return node
elif not node.children:
return None
else:
return last_child(node.children[-1])
if self.parent is None:
return None
prev = self.get_prev_sibling()
if isinstance(prev, Leaf):
return prev
elif prev is not None:
return last_child(prev)
return self.parent.get_prev_leaf()
def get_next_leaf(self):
"""Return self if leaf, otherwise the leaf node that succeeds this
node in the parse tree.
"""
node = self
while not isinstance(node, Leaf):
assert node.children
node = node.children[0]
return node
def get_lineno(self):
"""Return the line number which generated the invocant node."""
return self.get_next_leaf().lineno
def get_prefix(self):
"""Return the prefix of the next leaf node."""
# only leaves carry a prefix
return self.get_next_leaf().prefix
class Node(BaseNode):
"""
Node implementation for nonterminals.
"""
def __init__(self, type, children, context=None):
# type of nonterminals is >= 256
# assert type >= 256, type
self.type = type
self.children = list(children)
for ch in self.children:
# assert ch.parent is None, repr(ch)
ch.parent = self
def __repr__(self):
return '%s(%s, %r)' % (self.__class__.__name__,
self.type, self.children)
def __str__(self):
"""This reproduces the input source exactly."""
return ''.join(map(str, self.children))
def _eq(self, other):
return (self.type, self.children) == (other.type, other.children)
# support indexing the node directly instead of .children
def __getitem__(self, index):
return self.children[index]
def __iter__(self):
return iter(self.children)
def __len__(self):
return len(self.children)
class Leaf(BaseNode):
"""
Node implementation for leaf nodes (terminals).
"""
prefix = '' # Whitespace and comments preceding this token in the input
lineno = 0 # Line where this token starts in the input
column = 0 # Column where this token tarts in the input
def __init__(self, type, value, context=None):
# type of terminals is below 256
# assert 0 <= type < 256, type
self.type = type
self.value = value
if context is not None:
self.prefix, (self.lineno, self.column) = context
def __repr__(self):
return '%s(%r, %r, %r)' % (self.__class__.__name__,
self.type, self.value, self.prefix)
def __str__(self):
"""This reproduces the input source exactly."""
return self.prefix + str(self.value)
def _eq(self, other):
"""Compares two nodes for equality."""
return (self.type, self.value) == (other.type, other.value)
def convert(grammar, raw_node):
"""Convert raw node to a Node or Leaf instance."""
type, value, context, children = raw_node
if children or type in grammar.number2symbol:
# If there's exactly one child, return that child instead of
# creating a new node.
if len(children) == 1:
return children[0]
return Node(type, children, context=context)
else:
return Leaf(type, value, context=context)
def nice_repr(node, number2name, prefix=False):
def _repr(node):
if isinstance(node, Leaf):
return "%s(%r)" % (number2name[node.type], node.value)
else:
return "%s(%s)" % (number2name[node.type],
', '.join(map(_repr, node.children)))
def _prepr(node):
if isinstance(node, Leaf):
return "%s(%r, %r)" % (number2name[node.type],
node.prefix, node.value)
else:
return "%s(%s)" % (number2name[node.type],
', '.join(map(_prepr, node.children)))
return (prefix and _prepr or _repr)(node)
class NodeVisitor(object):
def __init__(self, number2name, *args):
self.number2name = number2name
self.init(*args)
def init(self, *args):
pass
def visit(self, node):
"""Visit a node."""
method = 'visit_' + self.number2name[node.type]
visitor = getattr(self, method, self.generic_visit)
return visitor(node)
def generic_visit(self, node):
"""Called if no explicit visitor function exists for a node."""
if isinstance(node, Node):
for child in node:
self.visit(child)
|
|
import numpy as np
import pandas as pd
import tarfile
from cvxpy import *
# small graph for algo testing
small_graph = np.array([[0,0,1,0,0,0,0,0,0,0,0,0],
[1,0,0,0,0,0,0,0,0,0,0,0],
[0,1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0.5,0.5,0,0,0,0,0,0],
[0,0,0,0.5,0,0.5,0,0,0,0,0,0],
[0,0,0,0,1,0,0,0,0,0,0,0],
[0,0,0,0,0.5,0.5,0,0,0,0,0,0],
[0,0,0,0,0,0.5,0.5,0,0,0,0,0],
[0,0,1.0/3,0,0,0,0,1.0/3,0,0,0,1.0/3],
[0,0,0,0,0,0,0,0.5,0.5,0,0,0],
[0,0,0,0,0,0,0,0.5,0,0.5,0,0],
[0,0,0,0,0,0,0,0.5,0.5,0,0,0]])
alpha = 0.85 #damping factor
eps = 0.7
# open compressed p2p_gnut archive
comp_gnut = tarfile.open('p2p_gnut.tar.gz')
uncomp_gnut = comp_gnut.extractall()
comp_gnut.close()
# load p2p_gnut graph into panda dataframe then numpy array
df_gnut = pd.read_csv('p2p_gnut.csv', header=None)
p2p_gnut_graph = df_gnut.iloc[:,:].values
def maxnorm(x1, x2):
norm = 0
for i in range(len(x1)):
if (abs(x1[i]-x2[i]) > norm):
norm = abs(x1[i]-x2[i])
return norm
def maxnormdiffer(x1, x2):
norm = 0
for i in range(len(x1)):
if (abs(x1[i]-x2[i]) > norm):
norm = abs(x1[i]-x2[i])
return norm
def euclnorm(x):
norm = 0
for i in range(len(x)):
norm += abs(x[i])**(2)
return math.sqrt(norm)
accuracy = 10**(-6)
def eigenvector (self, M):
"""
The function gets a contingency matrix of a graph and computes the left eigenvector associated
with eigenvalue 1. It implements an iterative approach and also returns the arrays of deviations
from the optimum on every step to evaluate the rate of convergence
args:
M - contingency matrix of a graph, scaled to be a probabilistic matrix (numpy nxn array)
return (weights, conv_func, conv_norm)
weights - the eigenvector of interest (numpy n*1 vector)
conv_func - numpy vector with deviations from the optimum, described in terms of target function
conv_norm - numpy vector with deviations from the optimum, described in terms of norm (euklid?)
"""
#M - hyperlink matrix
n = len(M)
v = [] #personalization vector
for i in range(n):
v.append(1/n)
d = []#dangling nodes column
for i in range(n):
iszero = 1
for j in range(n):
if (M[i, j] != 0): iszero = 0
d.append(iszero)
d = np.matrix(d).T
S = M + d*v #matrix with fixed dangling nodes
e = [] # column of all 1's
for i in range(n):
e.append(1)
e = np.matrix(e).T
G = alpha*S+(1-alpha)*e*v #Google matrix
x = []#iterations to optimum
conv_norm = []#deviations from optimum
x.append(v)
k = 0
x_new = alpha*(np.matrix(x[k])*M)+alpha*(np.matrix(x[k])*d)*v+(1-alpha)*np.matrix(v)
x.append(x_new.getA1())
k+=1
while(maxnorm(x[k], x[k-1]) > accuracy):
x_new = alpha*(np.matrix(x[k])*M)+alpha*(np.matrix(x[k])*d)*v+(1-alpha)*np.matrix(v)
x.append(x_new.getA1())
k+=1
for i in range(0, k-1):
conv_norm.append(maxnorm(x[k], x[i]))
weights = x[k]
return (weights, conv_norm)
def pagerank (M): #M is row-stochastic
"""
The function gets a contingency matrix of a graph and computes the pagerank score with alpha = 0.85
It implements an iterative approach and also returns the arrays of deviations
from the optimum on every step to evaluate the rate of convergence
args:
M - contingency matrix of a graph, scaled to be a probabilistic matrix (numpy nxn array)
return (weights, conv_func, conv_norm)
weights - pagerank scores (numpy n*1 vector)
conv_func - numpy vector with deviations from the optimum, described in terms of target function
conv_norm - numpy vector with deviations from the optimum, described in terms of norm (euklid?)
"""
n = len(M)
v = [] #personalization vector
for i in range(n):
v.append(1/n)
d = []#dangling nodes column
for i in range(n):
iszero = 1
for j in range(n):
if (M[i, j] != 0): iszero = 0
d.append(iszero)
d = np.matrix(d).T
M = M + d*v #matrix with fixed dangling nodes
M = M.T
weights = []
conv_func = np.array([]) #if it is more convenient to you, you can use simple python list here
n = len(M)
e = [1/n for i in range(n)]
e = np.matrix(e).T
x = []#iterations to optimum
conv_norm = []#deviations from optimum
x.append(e)
k = 1
x_new = (1-1/(k+1))*M*x[k-1] + (1/(k+1))*e
x.append(x_new)
k+=1
while(1):
x_new = (1-1/(k+1))*M*x[k-1] + (1/(k+1))*e
x.append(x_new)
k+=1
if (k > 10**(2)): break
for i in range(0, k-1):
conv_norm.append(maxnormdiffer(x[k-1].getA1(), x[i].getA1()))
weights = x[k-1].getA1()
return (weights, conv_norm)
# note: if it is more convenient to you, you can use one function with additional parameter instead of two functions for robust pagerank
def robust_pagerank_euklid_test (self, M, alpha):
"""
This is a wrapper function to test peformance of our algorithm for robust pagerank against the existing one.
Here we use euklid norms both for a and b.
args:
M - contingency matrix of a graph, scaled to be a probabilistic matrix (numpy nxn array)
alpha - regularization parameter
return weights
weights - the pagerank_score (numpy n*1 vector)
"""
dim = M.shape[0]
# define the problem
x = Variable(shape)
objective = Minimize(sum_squares(M*x) + alpha * sum_squares(x))
constraints = [sum_entries(x) == 1, 0 <= x]
prob = Problem(objective, constraints)
# solve the problem
result = prob.solve()
weights = x.value
return weights
def robust_pagerank_max_test (self, alpha): #TODO specify the implementation of the algorithm in the docs
"""
This is a wrapper function to test peformance of our algorithm for robust pagerank against the existing one.
Here we use max norms both for a and b.
args:
M - contingency matrix of a graph, scaled to be a probabilistic matrix (numpy nxn array)
alpha - regularization parameter
return weights
weights - the pagerank_score (numpy n*1 vector)
"""
dim = M.shape[0]
# define the problem
x = Variable(shape)
objective = Minimize(norm(M*x, 'inf') + alpha * norm(x, inf))
constraints = [sum_entries(x) == 1, 0 <= x]
prob = Problem(objective, constraints)
# solve the problem
result = prob.solve()
weights = x.value
return weights
def robust_alternative_test (self, M): #TODO Please, specify the algorithm and it's implementation in the docs
"""
This is a wrapper function to test peformance of our algorithm for ... against the existing one.
Here we use max norms both for a and b.
It implements an iterative approach and also returns the arrays of deviations
from the optimum on every step to evaluate the rate of convergence
The original algorithm is that of ...
args:
M - contingency matrix of a graph, scaled to be a probabilistic matrix (numpy nxn array)
return (weights, conv_func, conv_norm)
weights - the eigenvector of interest (numpy n*1 vector)
conv_func - numpy vector with deviations from the optimum, described in terms of target function
conv_norm - numpy vector with deviations from the optimum, described in terms of norm (euklid?)
"""
weights = np.zeros((M.shape[0], 1))
conv_func = np.array([]) #if it is more convenient to you, you can use simple python list here
conv_norm = np.array([]) #if it is more convenient to you, you can use simple python list here
return (weights, conv_func, conv_norm)
print(small_graph)
|
|
"""Classes representing TLS messages."""
from utils.compat import *
from utils.cryptomath import *
from errors import *
from utils.codec import *
from constants import *
from X509 import X509
from X509CertChain import X509CertChain
# The sha module is deprecated in Python 2.6
try:
import sha
except ImportError:
from hashlib import sha1 as sha
# The md5 module is deprecated in Python 2.6
try:
import md5
except ImportError:
from hashlib import md5
class RecordHeader3:
def __init__(self):
self.type = 0
self.version = (0,0)
self.length = 0
self.ssl2 = False
def create(self, version, type, length):
self.type = type
self.version = version
self.length = length
return self
def write(self):
w = Writer(5)
w.add(self.type, 1)
w.add(self.version[0], 1)
w.add(self.version[1], 1)
w.add(self.length, 2)
return w.bytes
def parse(self, p):
self.type = p.get(1)
self.version = (p.get(1), p.get(1))
self.length = p.get(2)
self.ssl2 = False
return self
class RecordHeader2:
def __init__(self):
self.type = 0
self.version = (0,0)
self.length = 0
self.ssl2 = True
def parse(self, p):
if p.get(1)!=128:
raise SyntaxError()
self.type = ContentType.handshake
self.version = (2,0)
#We don't support 2-byte-length-headers; could be a problem
self.length = p.get(1)
return self
class Msg:
def preWrite(self, trial):
if trial:
w = Writer()
else:
length = self.write(True)
w = Writer(length)
return w
def postWrite(self, w, trial):
if trial:
return w.index
else:
return w.bytes
class Alert(Msg):
def __init__(self):
self.contentType = ContentType.alert
self.level = 0
self.description = 0
def create(self, description, level=AlertLevel.fatal):
self.level = level
self.description = description
return self
def parse(self, p):
p.setLengthCheck(2)
self.level = p.get(1)
self.description = p.get(1)
p.stopLengthCheck()
return self
def write(self):
w = Writer(2)
w.add(self.level, 1)
w.add(self.description, 1)
return w.bytes
class HandshakeMsg(Msg):
def preWrite(self, handshakeType, trial):
if trial:
w = Writer()
w.add(handshakeType, 1)
w.add(0, 3)
else:
length = self.write(True)
w = Writer(length)
w.add(handshakeType, 1)
w.add(length-4, 3)
return w
class ClientHello(HandshakeMsg):
def __init__(self, ssl2=False):
self.contentType = ContentType.handshake
self.ssl2 = ssl2
self.client_version = (0,0)
self.random = createByteArrayZeros(32)
self.session_id = createByteArraySequence([])
self.cipher_suites = [] # a list of 16-bit values
self.certificate_types = [CertificateType.x509]
self.compression_methods = [] # a list of 8-bit values
self.srp_username = None # a string
self.channel_id = False
def create(self, version, random, session_id, cipher_suites,
certificate_types=None, srp_username=None):
self.client_version = version
self.random = random
self.session_id = session_id
self.cipher_suites = cipher_suites
self.certificate_types = certificate_types
self.compression_methods = [0]
self.srp_username = srp_username
return self
def parse(self, p):
if self.ssl2:
self.client_version = (p.get(1), p.get(1))
cipherSpecsLength = p.get(2)
sessionIDLength = p.get(2)
randomLength = p.get(2)
self.cipher_suites = p.getFixList(3, int(cipherSpecsLength/3))
self.session_id = p.getFixBytes(sessionIDLength)
self.random = p.getFixBytes(randomLength)
if len(self.random) < 32:
zeroBytes = 32-len(self.random)
self.random = createByteArrayZeros(zeroBytes) + self.random
self.compression_methods = [0]#Fake this value
#We're not doing a stopLengthCheck() for SSLv2, oh well..
else:
p.startLengthCheck(3)
self.client_version = (p.get(1), p.get(1))
self.random = p.getFixBytes(32)
self.session_id = p.getVarBytes(1)
self.cipher_suites = p.getVarList(2, 2)
self.compression_methods = p.getVarList(1, 1)
if not p.atLengthCheck():
totalExtLength = p.get(2)
soFar = 0
while soFar != totalExtLength:
extType = p.get(2)
extLength = p.get(2)
if extType == 6:
self.srp_username = bytesToString(p.getVarBytes(1))
elif extType == 7:
self.certificate_types = p.getVarList(1, 1)
elif extType == ExtensionType.channel_id:
self.channel_id = True
else:
p.getFixBytes(extLength)
soFar += 4 + extLength
p.stopLengthCheck()
return self
def write(self, trial=False):
w = HandshakeMsg.preWrite(self, HandshakeType.client_hello, trial)
w.add(self.client_version[0], 1)
w.add(self.client_version[1], 1)
w.addFixSeq(self.random, 1)
w.addVarSeq(self.session_id, 1, 1)
w.addVarSeq(self.cipher_suites, 2, 2)
w.addVarSeq(self.compression_methods, 1, 1)
extLength = 0
if self.certificate_types and self.certificate_types != \
[CertificateType.x509]:
extLength += 5 + len(self.certificate_types)
if self.srp_username:
extLength += 5 + len(self.srp_username)
if extLength > 0:
w.add(extLength, 2)
if self.certificate_types and self.certificate_types != \
[CertificateType.x509]:
w.add(7, 2)
w.add(len(self.certificate_types)+1, 2)
w.addVarSeq(self.certificate_types, 1, 1)
if self.srp_username:
w.add(6, 2)
w.add(len(self.srp_username)+1, 2)
w.addVarSeq(stringToBytes(self.srp_username), 1, 1)
return HandshakeMsg.postWrite(self, w, trial)
class ServerHello(HandshakeMsg):
def __init__(self):
self.contentType = ContentType.handshake
self.server_version = (0,0)
self.random = createByteArrayZeros(32)
self.session_id = createByteArraySequence([])
self.cipher_suite = 0
self.certificate_type = CertificateType.x509
self.compression_method = 0
self.channel_id = False
def create(self, version, random, session_id, cipher_suite,
certificate_type):
self.server_version = version
self.random = random
self.session_id = session_id
self.cipher_suite = cipher_suite
self.certificate_type = certificate_type
self.compression_method = 0
return self
def parse(self, p):
p.startLengthCheck(3)
self.server_version = (p.get(1), p.get(1))
self.random = p.getFixBytes(32)
self.session_id = p.getVarBytes(1)
self.cipher_suite = p.get(2)
self.compression_method = p.get(1)
if not p.atLengthCheck():
totalExtLength = p.get(2)
soFar = 0
while soFar != totalExtLength:
extType = p.get(2)
extLength = p.get(2)
if extType == 7:
self.certificate_type = p.get(1)
else:
p.getFixBytes(extLength)
soFar += 4 + extLength
p.stopLengthCheck()
return self
def write(self, trial=False):
w = HandshakeMsg.preWrite(self, HandshakeType.server_hello, trial)
w.add(self.server_version[0], 1)
w.add(self.server_version[1], 1)
w.addFixSeq(self.random, 1)
w.addVarSeq(self.session_id, 1, 1)
w.add(self.cipher_suite, 2)
w.add(self.compression_method, 1)
extLength = 0
if self.certificate_type and self.certificate_type != \
CertificateType.x509:
extLength += 5
if self.channel_id:
extLength += 4
if extLength != 0:
w.add(extLength, 2)
if self.certificate_type and self.certificate_type != \
CertificateType.x509:
w.add(7, 2)
w.add(1, 2)
w.add(self.certificate_type, 1)
if self.channel_id:
w.add(ExtensionType.channel_id, 2)
w.add(0, 2)
return HandshakeMsg.postWrite(self, w, trial)
class Certificate(HandshakeMsg):
def __init__(self, certificateType):
self.certificateType = certificateType
self.contentType = ContentType.handshake
self.certChain = None
def create(self, certChain):
self.certChain = certChain
return self
def parse(self, p):
p.startLengthCheck(3)
if self.certificateType == CertificateType.x509:
chainLength = p.get(3)
index = 0
certificate_list = []
while index != chainLength:
certBytes = p.getVarBytes(3)
x509 = X509()
x509.parseBinary(certBytes)
certificate_list.append(x509)
index += len(certBytes)+3
if certificate_list:
self.certChain = X509CertChain(certificate_list)
elif self.certificateType == CertificateType.cryptoID:
s = bytesToString(p.getVarBytes(2))
if s:
try:
import cryptoIDlib.CertChain
except ImportError:
raise SyntaxError(\
"cryptoID cert chain received, cryptoIDlib not present")
self.certChain = cryptoIDlib.CertChain.CertChain().parse(s)
else:
raise AssertionError()
p.stopLengthCheck()
return self
def write(self, trial=False):
w = HandshakeMsg.preWrite(self, HandshakeType.certificate, trial)
if self.certificateType == CertificateType.x509:
chainLength = 0
if self.certChain:
certificate_list = self.certChain.x509List
else:
certificate_list = []
#determine length
for cert in certificate_list:
bytes = cert.writeBytes()
chainLength += len(bytes)+3
#add bytes
w.add(chainLength, 3)
for cert in certificate_list:
bytes = cert.writeBytes()
w.addVarSeq(bytes, 1, 3)
elif self.certificateType == CertificateType.cryptoID:
if self.certChain:
bytes = stringToBytes(self.certChain.write())
else:
bytes = createByteArraySequence([])
w.addVarSeq(bytes, 1, 2)
else:
raise AssertionError()
return HandshakeMsg.postWrite(self, w, trial)
class CertificateRequest(HandshakeMsg):
def __init__(self):
self.contentType = ContentType.handshake
#Apple's Secure Transport library rejects empty certificate_types, so
#default to rsa_sign.
self.certificate_types = [ClientCertificateType.rsa_sign]
self.certificate_authorities = []
def create(self, certificate_types, certificate_authorities):
self.certificate_types = certificate_types
self.certificate_authorities = certificate_authorities
return self
def parse(self, p):
p.startLengthCheck(3)
self.certificate_types = p.getVarList(1, 1)
ca_list_length = p.get(2)
index = 0
self.certificate_authorities = []
while index != ca_list_length:
ca_bytes = p.getVarBytes(2)
self.certificate_authorities.append(ca_bytes)
index += len(ca_bytes)+2
p.stopLengthCheck()
return self
def write(self, trial=False):
w = HandshakeMsg.preWrite(self, HandshakeType.certificate_request,
trial)
w.addVarSeq(self.certificate_types, 1, 1)
caLength = 0
#determine length
for ca_dn in self.certificate_authorities:
caLength += len(ca_dn)+2
w.add(caLength, 2)
#add bytes
for ca_dn in self.certificate_authorities:
w.addVarSeq(ca_dn, 1, 2)
return HandshakeMsg.postWrite(self, w, trial)
class ServerKeyExchange(HandshakeMsg):
def __init__(self, cipherSuite):
self.cipherSuite = cipherSuite
self.contentType = ContentType.handshake
self.srp_N = 0L
self.srp_g = 0L
self.srp_s = createByteArraySequence([])
self.srp_B = 0L
self.signature = createByteArraySequence([])
def createSRP(self, srp_N, srp_g, srp_s, srp_B):
self.srp_N = srp_N
self.srp_g = srp_g
self.srp_s = srp_s
self.srp_B = srp_B
return self
def parse(self, p):
p.startLengthCheck(3)
self.srp_N = bytesToNumber(p.getVarBytes(2))
self.srp_g = bytesToNumber(p.getVarBytes(2))
self.srp_s = p.getVarBytes(1)
self.srp_B = bytesToNumber(p.getVarBytes(2))
if self.cipherSuite in CipherSuite.srpRsaSuites:
self.signature = p.getVarBytes(2)
p.stopLengthCheck()
return self
def write(self, trial=False):
w = HandshakeMsg.preWrite(self, HandshakeType.server_key_exchange,
trial)
w.addVarSeq(numberToBytes(self.srp_N), 1, 2)
w.addVarSeq(numberToBytes(self.srp_g), 1, 2)
w.addVarSeq(self.srp_s, 1, 1)
w.addVarSeq(numberToBytes(self.srp_B), 1, 2)
if self.cipherSuite in CipherSuite.srpRsaSuites:
w.addVarSeq(self.signature, 1, 2)
return HandshakeMsg.postWrite(self, w, trial)
def hash(self, clientRandom, serverRandom):
oldCipherSuite = self.cipherSuite
self.cipherSuite = None
try:
bytes = clientRandom + serverRandom + self.write()[4:]
s = bytesToString(bytes)
return stringToBytes(md5.md5(s).digest() + sha.sha(s).digest())
finally:
self.cipherSuite = oldCipherSuite
class ServerHelloDone(HandshakeMsg):
def __init__(self):
self.contentType = ContentType.handshake
def create(self):
return self
def parse(self, p):
p.startLengthCheck(3)
p.stopLengthCheck()
return self
def write(self, trial=False):
w = HandshakeMsg.preWrite(self, HandshakeType.server_hello_done, trial)
return HandshakeMsg.postWrite(self, w, trial)
class ClientKeyExchange(HandshakeMsg):
def __init__(self, cipherSuite, version=None):
self.cipherSuite = cipherSuite
self.version = version
self.contentType = ContentType.handshake
self.srp_A = 0
self.encryptedPreMasterSecret = createByteArraySequence([])
def createSRP(self, srp_A):
self.srp_A = srp_A
return self
def createRSA(self, encryptedPreMasterSecret):
self.encryptedPreMasterSecret = encryptedPreMasterSecret
return self
def parse(self, p):
p.startLengthCheck(3)
if self.cipherSuite in CipherSuite.srpSuites + \
CipherSuite.srpRsaSuites:
self.srp_A = bytesToNumber(p.getVarBytes(2))
elif self.cipherSuite in CipherSuite.rsaSuites:
if self.version in ((3,1), (3,2)):
self.encryptedPreMasterSecret = p.getVarBytes(2)
elif self.version == (3,0):
self.encryptedPreMasterSecret = \
p.getFixBytes(len(p.bytes)-p.index)
else:
raise AssertionError()
else:
raise AssertionError()
p.stopLengthCheck()
return self
def write(self, trial=False):
w = HandshakeMsg.preWrite(self, HandshakeType.client_key_exchange,
trial)
if self.cipherSuite in CipherSuite.srpSuites + \
CipherSuite.srpRsaSuites:
w.addVarSeq(numberToBytes(self.srp_A), 1, 2)
elif self.cipherSuite in CipherSuite.rsaSuites:
if self.version in ((3,1), (3,2)):
w.addVarSeq(self.encryptedPreMasterSecret, 1, 2)
elif self.version == (3,0):
w.addFixSeq(self.encryptedPreMasterSecret, 1)
else:
raise AssertionError()
else:
raise AssertionError()
return HandshakeMsg.postWrite(self, w, trial)
class CertificateVerify(HandshakeMsg):
def __init__(self):
self.contentType = ContentType.handshake
self.signature = createByteArraySequence([])
def create(self, signature):
self.signature = signature
return self
def parse(self, p):
p.startLengthCheck(3)
self.signature = p.getVarBytes(2)
p.stopLengthCheck()
return self
def write(self, trial=False):
w = HandshakeMsg.preWrite(self, HandshakeType.certificate_verify,
trial)
w.addVarSeq(self.signature, 1, 2)
return HandshakeMsg.postWrite(self, w, trial)
class ChangeCipherSpec(Msg):
def __init__(self):
self.contentType = ContentType.change_cipher_spec
self.type = 1
def create(self):
self.type = 1
return self
def parse(self, p):
p.setLengthCheck(1)
self.type = p.get(1)
p.stopLengthCheck()
return self
def write(self, trial=False):
w = Msg.preWrite(self, trial)
w.add(self.type,1)
return Msg.postWrite(self, w, trial)
class Finished(HandshakeMsg):
def __init__(self, version):
self.contentType = ContentType.handshake
self.version = version
self.verify_data = createByteArraySequence([])
def create(self, verify_data):
self.verify_data = verify_data
return self
def parse(self, p):
p.startLengthCheck(3)
if self.version == (3,0):
self.verify_data = p.getFixBytes(36)
elif self.version in ((3,1), (3,2)):
self.verify_data = p.getFixBytes(12)
else:
raise AssertionError()
p.stopLengthCheck()
return self
def write(self, trial=False):
w = HandshakeMsg.preWrite(self, HandshakeType.finished, trial)
w.addFixSeq(self.verify_data, 1)
return HandshakeMsg.postWrite(self, w, trial)
class EncryptedExtensions(HandshakeMsg):
def __init__(self):
self.channel_id_key = None
self.channel_id_proof = None
def parse(self, p):
p.startLengthCheck(3)
soFar = 0
while soFar != p.lengthCheck:
extType = p.get(2)
extLength = p.get(2)
if extType == ExtensionType.channel_id:
if extLength != 32*4:
raise SyntaxError()
self.channel_id_key = p.getFixBytes(64)
self.channel_id_proof = p.getFixBytes(64)
else:
p.getFixBytes(extLength)
soFar += 4 + extLength
p.stopLengthCheck()
return self
class ApplicationData(Msg):
def __init__(self):
self.contentType = ContentType.application_data
self.bytes = createByteArraySequence([])
def create(self, bytes):
self.bytes = bytes
return self
def parse(self, p):
self.bytes = p.bytes
return self
def write(self):
return self.bytes
|
|
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from six import iteritems, iterkeys
from . utils.protocol_utils import Enum
# Datasource type should completely determine the other fields of a
# message with its type.
DATASOURCE_TYPE = Enum(
'AS_TRADED_EQUITY',
'MERGER',
'SPLIT',
'DIVIDEND',
'TRADE',
'TRANSACTION',
'ORDER',
'EMPTY',
'DONE',
'CUSTOM',
'BENCHMARK',
'COMMISSION'
)
class Event(object):
def __init__(self, initial_values=None):
if initial_values:
self.__dict__ = initial_values
def __getitem__(self, name):
return getattr(self, name)
def __setitem__(self, name, value):
setattr(self, name, value)
def __delitem__(self, name):
delattr(self, name)
def keys(self):
return self.__dict__.keys()
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __contains__(self, name):
return name in self.__dict__
def __repr__(self):
return "Event({0})".format(self.__dict__)
class Order(Event):
pass
class Portfolio(object):
def __init__(self):
self.capital_used = 0.0
self.starting_cash = 0.0
self.portfolio_value = 0.0
self.pnl = 0.0
self.returns = 0.0
self.cash = 0.0
self.positions = Positions()
self.start_date = None
self.positions_value = 0.0
def __getitem__(self, key):
return self.__dict__[key]
def __repr__(self):
return "Portfolio({0})".format(self.__dict__)
class Position(object):
def __init__(self, sid):
self.sid = sid
self.amount = 0
self.cost_basis = 0.0 # per share
self.last_sale_price = 0.0
def __getitem__(self, key):
return self.__dict__[key]
def __repr__(self):
return "Position({0})".format(self.__dict__)
class Positions(dict):
def __missing__(self, key):
pos = Position(key)
self[key] = pos
return pos
class SIDData(object):
def __init__(self, initial_values=None):
if initial_values:
self.__dict__ = initial_values
@property
def datetime(self):
"""
Provides an alias from data['foo'].datetime -> data['foo'].dt
`datetime` was previously provided by adding a seperate `datetime`
member of the SIDData object via a generator that wrapped the incoming
data feed and added the field to each equity event.
This alias is intended to be temporary, to provide backwards
compatibility with existing algorithms, but should be considered
deprecated, and may be removed in the future.
"""
return self.dt
def __getitem__(self, name):
return self.__dict__[name]
def __setitem__(self, name, value):
self.__dict__[name] = value
def __len__(self):
return len(self.__dict__)
def __contains__(self, name):
return name in self.__dict__
def __repr__(self):
return "SIDData({0})".format(self.__dict__)
class BarData(object):
"""
Holds the event data for all sids for a given dt.
This is what is passed as `data` to the `handle_data` function.
Note: Many methods are analogues of dictionary because of historical
usage of what this replaced as a dictionary subclass.
"""
def __init__(self, data=None):
self._data = data or {}
self._contains_override = None
def __contains__(self, name):
if self._contains_override:
if self._contains_override(name):
return name in self._data
else:
return False
else:
return name in self._data
def has_key(self, name):
"""
DEPRECATED: __contains__ is preferred, but this method is for
compatibility with existing algorithms.
"""
return name in self
def __setitem__(self, name, value):
self._data[name] = value
def __getitem__(self, name):
return self._data[name]
def __delitem__(self, name):
del self._data[name]
def __iter__(self):
for sid, data in iteritems(self._data):
# Allow contains override to filter out sids.
if sid in self:
if len(data):
yield sid
def iterkeys(self):
# Allow contains override to filter out sids.
return (sid for sid in iterkeys(self._data) if sid in self)
def keys(self):
# Allow contains override to filter out sids.
return list(self.iterkeys())
def itervalues(self):
return (value for _sid, value in self.iteritems())
def values(self):
return list(self.itervalues())
def iteritems(self):
return ((sid, value) for sid, value
in iteritems(self._data)
if sid in self)
def items(self):
return list(self.iteritems())
def __len__(self):
return len(self.keys())
def __repr__(self):
return '{0}({1})'.format(self.__class__.__name__, self._data)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multivariate Normal distribution classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.distributions.python.ops import kullback_leibler
from tensorflow.contrib.distributions.python.ops import operator_pd_cholesky
from tensorflow.contrib.distributions.python.ops import operator_pd_diag
from tensorflow.contrib.distributions.python.ops import operator_pd_full
from tensorflow.contrib.distributions.python.ops import operator_pd_vdvt_update
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
__all__ = [
"MultivariateNormalDiag",
"MultivariateNormalDiagWithSoftplusStDev",
"MultivariateNormalCholesky",
"MultivariateNormalFull",
"MultivariateNormalDiagPlusVDVT",
]
_mvn_prob_note = """
`x` is a batch vector with compatible shape if `x` is a `Tensor` whose
shape can be broadcast up to either:
```
self.batch_shape + self.event_shape
```
or
```
[M1,...,Mm] + self.batch_shape + self.event_shape
```
"""
class _MultivariateNormalOperatorPD(distribution.Distribution):
"""The multivariate normal distribution on `R^k`.
This distribution is defined by a 1-D mean `mu` and an instance of
`OperatorPDBase`, which provides access to a symmetric positive definite
operator, which defines the covariance.
#### Mathematical details
With `C` the covariance matrix represented by the operator, the PDF of this
distribution is:
```
f(x) = (2 pi)^(-k/2) |det(C)|^(-1/2) exp(-1/2 (x - mu)^T C^{-1} (x - mu))
```
#### Examples
A single multi-variate Gaussian distribution is defined by a vector of means
of length `k`, and a covariance matrix of shape `k x k`.
Extra leading dimensions, if provided, allow for batches.
```python
# Initialize a single 3-variate Gaussian.
mu = [1, 2, 3]
chol = [[1, 0, 0.], [1, 3, 0], [1, 2, 3]]
cov = tf.contrib.distributions.OperatorPDCholesky(chol)
dist = tf.contrib.distributions._MultivariateNormalOperatorPD(mu, cov)
# Evaluate this on an observation in R^3, returning a scalar.
dist.pdf([-1, 0, 1.])
# Initialize a batch of two 3-variate Gaussians.
mu = [[1, 2, 3], [11, 22, 33.]]
chol = ... # shape 2 x 3 x 3, lower triangular, positive diagonal.
cov = tf.contrib.distributions.OperatorPDCholesky(chol)
dist = tf.contrib.distributions._MultivariateNormalOperatorPD(mu, cov)
# Evaluate this on a two observations, each in R^3, returning a length two
# tensor.
x = [[-1, 0, 1], [-11, 0, 11.]] # Shape 2 x 3.
dist.pdf(x)
```
"""
def __init__(self,
mu,
cov,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalCov"):
"""Multivariate Normal distributions on `R^k`.
User must provide means `mu`, and an instance of `OperatorPDBase`, `cov`,
which determines the covariance.
Args:
mu: Floating point tensor with shape `[N1,...,Nb, k]`, `b >= 0`.
cov: Instance of `OperatorPDBase` with same `dtype` as `mu` and shape
`[N1,...,Nb, k, k]`.
validate_args: `Boolean`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`, and the inputs are
invalid, correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
TypeError: If `mu` and `cov` are different dtypes.
"""
parameters = locals()
with ops.name_scope(name) as ns:
with ops.name_scope("init", values=[mu] + cov.inputs):
self._mu = array_ops.identity(mu, name="mu")
self._cov = cov
self._validate_args = validate_args # Needed by _assert_valid_mu.
self._mu = self._assert_valid_mu(self._mu)
super(_MultivariateNormalOperatorPD, self).__init__(
dtype=self._mu.dtype,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
is_continuous=True,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._mu] + cov.inputs,
name=ns)
def _assert_valid_mu(self, mu):
"""Return `mu` after validity checks and possibly with assertations."""
cov = self._cov
if mu.dtype != cov.dtype:
raise TypeError(
"mu and cov must have the same dtype. Found mu.dtype = %s, "
"cov.dtype = %s" % (mu.dtype, cov.dtype))
# Try to validate with static checks.
mu_shape = mu.get_shape()
cov_shape = cov.get_shape()
if mu_shape.is_fully_defined() and cov_shape.is_fully_defined():
if mu_shape != cov_shape[:-1]:
raise ValueError(
"mu.shape and cov.shape[:-1] should match. Found: mu.shape=%s, "
"cov.shape=%s" % (mu_shape, cov_shape))
else:
return mu
# Static checks could not be run, so possibly do dynamic checks.
if not self.validate_args:
return mu
else:
assert_same_rank = check_ops.assert_equal(
array_ops.rank(mu) + 1,
cov.rank(),
data=["mu should have rank 1 less than cov. Found: rank(mu) = ",
array_ops.rank(mu), " rank(cov) = ", cov.rank()],
)
with ops.control_dependencies([assert_same_rank]):
assert_same_shape = check_ops.assert_equal(
array_ops.shape(mu),
cov.vector_shape(),
data=["mu.shape and cov.shape[:-1] should match. "
"Found: shape(mu) = "
, array_ops.shape(mu), " shape(cov) = ", cov.shape()],
)
return control_flow_ops.with_dependencies([assert_same_shape], mu)
@property
def mu(self):
return self._mu
@property
def sigma(self):
"""Dense (batch) covariance matrix, if available."""
with ops.name_scope(self.name):
return self._cov.to_dense()
def log_sigma_det(self, name="log_sigma_det"):
"""Log of determinant of covariance matrix."""
with ops.name_scope(self.name):
with ops.name_scope(name, values=self._cov.inputs):
return self._cov.log_det()
def sigma_det(self, name="sigma_det"):
"""Determinant of covariance matrix."""
with ops.name_scope(self.name):
with ops.name_scope(name, values=self._cov.inputs):
return math_ops.exp(self._cov.log_det())
def _batch_shape_tensor(self):
return self._cov.batch_shape()
def _batch_shape(self):
return self._cov.get_batch_shape()
def _event_shape_tensor(self):
return array_ops.stack([self._cov.vector_space_dimension()])
def _event_shape(self):
return self._cov.get_shape()[-1:]
def _sample_n(self, n, seed=None):
# Recall _assert_valid_mu ensures mu and self._cov have same batch shape.
shape = array_ops.concat([self._cov.vector_shape(), [n]], 0)
white_samples = random_ops.random_normal(shape=shape,
mean=0.,
stddev=1.,
dtype=self.dtype,
seed=seed)
correlated_samples = self._cov.sqrt_matmul(white_samples)
# Move the last dimension to the front
perm = array_ops.concat(
(array_ops.stack([array_ops.rank(correlated_samples) - 1]),
math_ops.range(0, array_ops.rank(correlated_samples) - 1)), 0)
# TODO(ebrevdo): Once we get a proper tensor contraction op,
# perform the inner product using that instead of batch_matmul
# and this slow transpose can go away!
correlated_samples = array_ops.transpose(correlated_samples, perm)
samples = correlated_samples + self.mu
return samples
@distribution_util.AppendDocstring(_mvn_prob_note)
def _log_prob(self, x):
# Q: Why are shape requirements as stated above?
# A: The compatible shapes are precisely the ones that will broadcast to
# a shape compatible with self._cov.
# See Operator base class for notes about shapes compatible with self._cov.
x = ops.convert_to_tensor(x)
contrib_tensor_util.assert_same_float_dtype((self._mu, x))
# _assert_valid_mu asserts that self.mu has same batch shape as self.cov.
# so batch shape of self.mu = that of self._cov and self, and the
# batch shape of x_centered is a broadcast version of these. If this
# broadcast results in a shape like
# [M1,...,Mm] + self.batch_shape + self.event_shape
# OR
# self.batch_shape + self.event_shape
# then subsequent operator calls are guaranteed to work.
x_centered = x - self.mu
# Compute the term x^{-1} sigma^{-1} x which appears in the exponent of
# the pdf.
x_whitened_norm = self._cov.inv_quadratic_form_on_vectors(x_centered)
k = math_ops.cast(self._cov.vector_space_dimension(), self.dtype)
log_prob_value = -0.5 * (self.log_sigma_det() +
k * math.log(2. * math.pi) +
x_whitened_norm)
output_static_shape = x_centered.get_shape()[:-1]
log_prob_value.set_shape(output_static_shape)
return log_prob_value
@distribution_util.AppendDocstring(_mvn_prob_note)
def _prob(self, x):
return math_ops.exp(self.log_prob(x))
def _entropy(self):
log_sigma_det = self.log_sigma_det()
one_plus_log_two_pi = constant_op.constant(1 + math.log(2 * math.pi),
dtype=self.dtype)
# Use broadcasting rules to calculate the full broadcast sigma.
k = math_ops.cast(self._cov.vector_space_dimension(), dtype=self.dtype)
entropy_value = (k * one_plus_log_two_pi + log_sigma_det) / 2
entropy_value.set_shape(log_sigma_det.get_shape())
return entropy_value
def _mean(self):
return array_ops.identity(self._mu)
def _covariance(self):
return self.sigma
def _variance(self):
return array_ops.matrix_diag_part(self.sigma)
def _mode(self):
return array_ops.identity(self._mu)
class MultivariateNormalDiag(_MultivariateNormalOperatorPD):
"""The multivariate normal distribution on `R^k`.
This distribution is defined by a 1-D mean `mu` and a 1-D diagonal
`diag_stddev`, representing the standard deviations. This distribution
assumes the random variables, `(X_1,...,X_k)` are independent, thus no
non-diagonal terms of the covariance matrix are needed.
This allows for `O(k)` pdf evaluation, sampling, and storage.
#### Mathematical details
The PDF of this distribution is defined in terms of the diagonal covariance
determined by `diag_stddev`: `C_{ii} = diag_stddev[i]**2`.
```
f(x) = (2 pi)^(-k/2) |det(C)|^(-1/2) exp(-1/2 (x - mu)^T C^{-1} (x - mu))
```
#### Examples
A single multi-variate Gaussian distribution is defined by a vector of means
of length `k`, and the square roots of the (independent) random variables.
Extra leading dimensions, if provided, allow for batches.
```python
# Initialize a single 3-variate Gaussian with diagonal standard deviation.
mu = [1, 2, 3.]
diag_stddev = [4, 5, 6.]
dist = tf.contrib.distributions.MultivariateNormalDiag(mu, diag_stddev)
# Evaluate this on an observation in R^3, returning a scalar.
dist.pdf([-1, 0, 1])
# Initialize a batch of two 3-variate Gaussians.
mu = [[1, 2, 3], [11, 22, 33]] # shape 2 x 3
diag_stddev = ... # shape 2 x 3, positive.
dist = tf.contrib.distributions.MultivariateNormalDiag(mu, diag_stddev)
# Evaluate this on a two observations, each in R^3, returning a length two
# tensor.
x = [[-1, 0, 1], [-11, 0, 11]] # Shape 2 x 3.
dist.pdf(x)
```
"""
def __init__(
self,
mu,
diag_stddev,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalDiag"):
"""Multivariate Normal distributions on `R^k`.
User must provide means `mu` and standard deviations `diag_stddev`.
Each batch member represents a random vector `(X_1,...,X_k)` of independent
random normals.
The mean of `X_i` is `mu[i]`, and the standard deviation is
`diag_stddev[i]`.
Args:
mu: Rank `N + 1` floating point tensor with shape `[N1,...,Nb, k]`,
`b >= 0`.
diag_stddev: Rank `N + 1` `Tensor` with same `dtype` and shape as `mu`,
representing the standard deviations. Must be positive.
validate_args: `Boolean`, default `False`. Whether to validate
input with asserts. If `validate_args` is `False`,
and the inputs are invalid, correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
TypeError: If `mu` and `diag_stddev` are different dtypes.
"""
parameters = locals()
with ops.name_scope(name, values=[diag_stddev]) as ns:
cov = operator_pd_diag.OperatorPDSqrtDiag(diag_stddev,
verify_pd=validate_args)
super(MultivariateNormalDiag, self).__init__(
mu, cov,
allow_nan_stats=allow_nan_stats,
validate_args=validate_args,
name=ns)
self._parameters = parameters
class MultivariateNormalDiagWithSoftplusStDev(MultivariateNormalDiag):
"""MultivariateNormalDiag with `diag_stddev = softplus(diag_stddev)`."""
def __init__(self,
mu,
diag_stddev,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalDiagWithSoftplusStdDev"):
parameters = locals()
with ops.name_scope(name, values=[diag_stddev]) as ns:
super(MultivariateNormalDiagWithSoftplusStDev, self).__init__(
mu=mu,
diag_stddev=nn.softplus(diag_stddev),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=ns)
self._parameters = parameters
class MultivariateNormalDiagPlusVDVT(_MultivariateNormalOperatorPD):
"""The multivariate normal distribution on `R^k`.
Every batch member of this distribution is defined by a mean and a lightweight
covariance matrix `C`.
#### Mathematical details
The PDF of this distribution in terms of the mean `mu` and covariance `C` is:
```
f(x) = (2 pi)^(-k/2) |det(C)|^(-1/2) exp(-1/2 (x - mu)^T C^{-1} (x - mu))
```
For every batch member, this distribution represents `k` random variables
`(X_1,...,X_k)`, with mean `E[X_i] = mu[i]`, and covariance matrix
`C_{ij} := E[(X_i - mu[i])(X_j - mu[j])]`
The user initializes this class by providing the mean `mu`, and a lightweight
definition of `C`:
```
C = SS^T = SS = (M + V D V^T) (M + V D V^T)
M is diagonal (k x k)
V = is shape (k x r), typically r << k
D = is diagonal (r x r), optional (defaults to identity).
```
This allows for `O(kr + r^3)` pdf evaluation and determinant, and `O(kr)`
sampling and storage (per batch member).
#### Examples
A single multi-variate Gaussian distribution is defined by a vector of means
of length `k`, and square root of the covariance `S = M + V D V^T`. Extra
leading dimensions, if provided, allow for batches.
```python
# Initialize a single 3-variate Gaussian with covariance square root
# S = M + V D V^T, where V D V^T is a matrix-rank 2 update.
mu = [1, 2, 3.]
diag_large = [1.1, 2.2, 3.3]
v = ... # shape 3 x 2
diag_small = [4., 5.]
dist = tf.contrib.distributions.MultivariateNormalDiagPlusVDVT(
mu, diag_large, v, diag_small=diag_small)
# Evaluate this on an observation in R^3, returning a scalar.
dist.pdf([-1, 0, 1])
# Initialize a batch of two 3-variate Gaussians. This time, don't provide
# diag_small. This means S = M + V V^T.
mu = [[1, 2, 3], [11, 22, 33]] # shape 2 x 3
diag_large = ... # shape 2 x 3
v = ... # shape 2 x 3 x 1, a matrix-rank 1 update.
dist = tf.contrib.distributions.MultivariateNormalDiagPlusVDVT(
mu, diag_large, v)
# Evaluate this on a two observations, each in R^3, returning a length two
# tensor.
x = [[-1, 0, 1], [-11, 0, 11]] # Shape 2 x 3.
dist.pdf(x)
```
"""
def __init__(
self,
mu,
diag_large,
v,
diag_small=None,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalDiagPlusVDVT"):
"""Multivariate Normal distributions on `R^k`.
For every batch member, this distribution represents `k` random variables
`(X_1,...,X_k)`, with mean `E[X_i] = mu[i]`, and covariance matrix
`C_{ij} := E[(X_i - mu[i])(X_j - mu[j])]`
The user initializes this class by providing the mean `mu`, and a
lightweight definition of `C`:
```
C = SS^T = SS = (M + V D V^T) (M + V D V^T)
M is diagonal (k x k)
V = is shape (k x r), typically r << k
D = is diagonal (r x r), optional (defaults to identity).
```
Args:
mu: Rank `n + 1` floating point tensor with shape `[N1,...,Nn, k]`,
`n >= 0`. The means.
diag_large: Optional rank `n + 1` floating point tensor, shape
`[N1,...,Nn, k]` `n >= 0`. Defines the diagonal matrix `M`.
v: Rank `n + 1` floating point tensor, shape `[N1,...,Nn, k, r]`
`n >= 0`. Defines the matrix `V`.
diag_small: Rank `n + 1` floating point tensor, shape
`[N1,...,Nn, k]` `n >= 0`. Defines the diagonal matrix `D`. Default
is `None`, which means `D` will be the identity matrix.
validate_args: `Boolean`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`,
and the inputs are invalid, correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
"""
parameters = locals()
with ops.name_scope(name, values=[diag_large, v, diag_small]) as ns:
cov = operator_pd_vdvt_update.OperatorPDSqrtVDVTUpdate(
operator_pd_diag.OperatorPDDiag(
diag_large, verify_pd=validate_args),
v,
diag=diag_small,
verify_pd=validate_args,
verify_shapes=validate_args)
super(MultivariateNormalDiagPlusVDVT, self).__init__(
mu, cov,
allow_nan_stats=allow_nan_stats,
validate_args=validate_args,
name=ns)
self._parameters = parameters
class MultivariateNormalCholesky(_MultivariateNormalOperatorPD):
"""The multivariate normal distribution on `R^k`.
This distribution is defined by a 1-D mean `mu` and a Cholesky factor `chol`.
Providing the Cholesky factor allows for `O(k^2)` pdf evaluation and sampling,
and requires `O(k^2)` storage.
#### Mathematical details
The Cholesky factor `chol` defines the covariance matrix: `C = chol chol^T`.
The PDF of this distribution is then:
```
f(x) = (2 pi)^(-k/2) |det(C)|^(-1/2) exp(-1/2 (x - mu)^T C^{-1} (x - mu))
```
#### Examples
A single multi-variate Gaussian distribution is defined by a vector of means
of length `k`, and a covariance matrix of shape `k x k`.
Extra leading dimensions, if provided, allow for batches.
```python
# Initialize a single 3-variate Gaussian with diagonal covariance.
# Note, this would be more efficient with MultivariateNormalDiag.
mu = [1, 2, 3.]
chol = [[1, 0, 0], [0, 3, 0], [0, 0, 2]]
dist = tf.contrib.distributions.MultivariateNormalCholesky(mu, chol)
# Evaluate this on an observation in R^3, returning a scalar.
dist.pdf([-1, 0, 1])
# Initialize a batch of two 3-variate Gaussians.
mu = [[1, 2, 3], [11, 22, 33]]
chol = ... # shape 2 x 3 x 3, lower triangular, positive diagonal.
dist = tf.contrib.distributions.MultivariateNormalCholesky(mu, chol)
# Evaluate this on a two observations, each in R^3, returning a length two
# tensor.
x = [[-1, 0, 1], [-11, 0, 11]] # Shape 2 x 3.
dist.pdf(x)
```
Trainable (batch) Cholesky matrices can be created with
`tf.contrib.distributions.matrix_diag_transform()`
"""
def __init__(self,
mu,
chol,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalCholesky"):
"""Multivariate Normal distributions on `R^k`.
User must provide means `mu` and `chol` which holds the (batch) Cholesky
factors, such that the covariance of each batch member is `chol chol^T`.
Args:
mu: `(N+1)-D` floating point tensor with shape `[N1,...,Nb, k]`,
`b >= 0`.
chol: `(N+2)-D` `Tensor` with same `dtype` as `mu` and shape
`[N1,...,Nb, k, k]`. The upper triangular part is ignored (treated as
though it is zero), and the diagonal must be positive.
validate_args: `Boolean`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`, and the inputs are
invalid, correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
TypeError: If `mu` and `chol` are different dtypes.
"""
parameters = locals()
with ops.name_scope(name, values=[chol]) as ns:
cov = operator_pd_cholesky.OperatorPDCholesky(chol,
verify_pd=validate_args)
super(MultivariateNormalCholesky, self).__init__(
mu, cov,
allow_nan_stats=allow_nan_stats,
validate_args=validate_args,
name=ns)
self._parameters = parameters
class MultivariateNormalFull(_MultivariateNormalOperatorPD):
"""The multivariate normal distribution on `R^k`.
This distribution is defined by a 1-D mean `mu` and covariance matrix `sigma`.
Evaluation of the pdf, determinant, and sampling are all `O(k^3)` operations.
#### Mathematical details
With `C = sigma`, the PDF of this distribution is:
```
f(x) = (2 pi)^(-k/2) |det(C)|^(-1/2) exp(-1/2 (x - mu)^T C^{-1} (x - mu))
```
#### Examples
A single multi-variate Gaussian distribution is defined by a vector of means
of length `k`, and a covariance matrix of shape `k x k`.
Extra leading dimensions, if provided, allow for batches.
```python
# Initialize a single 3-variate Gaussian with diagonal covariance.
mu = [1, 2, 3.]
sigma = [[1, 0, 0], [0, 3, 0], [0, 0, 2.]]
dist = tf.contrib.distributions.MultivariateNormalFull(mu, chol)
# Evaluate this on an observation in R^3, returning a scalar.
dist.pdf([-1, 0, 1])
# Initialize a batch of two 3-variate Gaussians.
mu = [[1, 2, 3], [11, 22, 33.]]
sigma = ... # shape 2 x 3 x 3, positive definite.
dist = tf.contrib.distributions.MultivariateNormalFull(mu, sigma)
# Evaluate this on a two observations, each in R^3, returning a length two
# tensor.
x = [[-1, 0, 1], [-11, 0, 11.]] # Shape 2 x 3.
dist.pdf(x)
```
"""
def __init__(self,
mu,
sigma,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalFull"):
"""Multivariate Normal distributions on `R^k`.
User must provide means `mu` and `sigma`, the mean and covariance.
Args:
mu: `(N+1)-D` floating point tensor with shape `[N1,...,Nb, k]`,
`b >= 0`.
sigma: `(N+2)-D` `Tensor` with same `dtype` as `mu` and shape
`[N1,...,Nb, k, k]`. Each batch member must be positive definite.
validate_args: `Boolean`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`, and the inputs are
invalid, correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
TypeError: If `mu` and `sigma` are different dtypes.
"""
parameters = locals()
with ops.name_scope(name, values=[sigma]) as ns:
cov = operator_pd_full.OperatorPDFull(sigma, verify_pd=validate_args)
super(MultivariateNormalFull, self).__init__(
mu, cov,
allow_nan_stats=allow_nan_stats,
validate_args=validate_args,
name=ns)
self._parameters = parameters
@kullback_leibler.RegisterKL(
_MultivariateNormalOperatorPD, _MultivariateNormalOperatorPD)
def _kl_mvn_mvn_brute_force(mvn_a, mvn_b, name=None):
"""Batched KL divergence `KL(mvn_a || mvn_b)` for multivariate normals.
With `X`, `Y` both multivariate normals in `R^k` with means `mu_x`, `mu_y` and
covariance `C_x`, `C_y` respectively,
```
KL(X || Y) = 0.5 * ( T + Q + - k + L ),
T := trace(C_b^{-1} C_a),
Q := (mu_b - mu_a)^T C_b^{-1} (mu_b - mu_a),
L := Log[Det(C_b)] - Log[Det(C_a)]
```
This `Op` computes the trace by solving `C_b^{-1} C_a`. Although efficient
methods for solving systems with `C_b` may be available, a dense version of
(the square root of) `C_a` is used, so performance is `O(B s k^2)` where `B`
is the batch size, and `s` is the cost of solving `C_b x = y` for vectors `x`
and `y`.
Args:
mvn_a: Instance of subclass of `_MultivariateNormalOperatorPD`.
mvn_b: Instance of subclass of `_MultivariateNormalOperatorPD`.
name: (optional) name to use for created ops. Default "kl_mvn_mvn".
Returns:
Batchwise `KL(mvn_a || mvn_b)`.
"""
# Access the "private" OperatorPD that each mvn is built from.
cov_a = mvn_a._cov # pylint: disable=protected-access
cov_b = mvn_b._cov # pylint: disable=protected-access
mu_a = mvn_a.mu
mu_b = mvn_b.mu
inputs = [mu_a, mu_b] + cov_a.inputs + cov_b.inputs
with ops.name_scope(name, "kl_mvn_mvn", inputs):
# If Ca = AA', Cb = BB', then
# tr[inv(Cb) Ca] = tr[inv(B)' inv(B) A A']
# = tr[inv(B) A A' inv(B)']
# = tr[(inv(B) A) (inv(B) A)']
# = sum_{ik} (inv(B) A)_{ik}^2
# The second equality follows from the cyclic permutation property.
b_inv_a = cov_b.sqrt_solve(cov_a.sqrt_to_dense())
t = math_ops.reduce_sum(
math_ops.square(b_inv_a),
reduction_indices=[-1, -2])
q = cov_b.inv_quadratic_form_on_vectors(mu_b - mu_a)
k = math_ops.cast(cov_a.vector_space_dimension(), mvn_a.dtype)
one_half_l = cov_b.sqrt_log_det() - cov_a.sqrt_log_det()
return 0.5 * (t + q - k) + one_half_l
|
|
#!/usr/bin/python
#
# Copyright 2015 The Cluster-Insight Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A cache of recent values of objects.
SimpleCache stores a dictionary from object labels to the tuple
(update_time, create_time, value)
The lookup() method returns the value associated with the given
label and its creation time if the value is defined and it was most recently
updated less than _max_data_age_seconds ago. If no such value exists or it is
too old, lookup() will fail.
Note that lookup() success depends on the last update time and not on the
data's creation time.
The update() method stores a value associated with the given label in the cache.
The value's most recent update time is passed as a parameter. If the given label
existed before and the associated value without its 'timestamp' attribute did
not change, then the data stored in the cache will not change. Only the most
recent update time will be changed in the cache. Otherwise the value, the most
recent update time and the creation time will be changed.
In other words, the creation time is updated whenever the value is also changed
in the cache. Calling update() always changes the update time, but it may
not change the value or the creation time.
Old data is removed from the cache as a side effect of calling the update()
operation. Old data is removed when it was created more than
DATA_CLEANUP_AGE_SECONDS seconds ago.
There is no cleanup as the result of the lookup() to avoid slowing
down cache hits. In this way ephemeral data does not stay in the cache
indefinitely as long as new data is inserted into the cache.
This class is thread-safe.
Usage:
cache = SimpleCache(MAX_DATA_AGE_SECONDS, DATA_CLEANUP_AGE_SECONDS)
def get_value(label):
value, timestamp_seconds = cache.lookup(label)
if timestamp_seconds is not None:
# handle cache hit
else:
# handle cache miss; usually fetch data from source
value = fetch_data()
timestamp_now = time.time()
cache.update(label, value, timestamp_now)
return value
"""
import collections
import copy
import threading
import time
import types
# local import
import utilities
class SimpleCache(object):
"""A cache of named objects with specified freshness and cleanup times.
Attributes:
_lock: a lock protecting access to the data.
_max_data_age_seconds: data older than this many seconds will not be
returned.
_data_cleanup_age_seconds: data older than this many seconds will be cleaned
from the cache.
_label_to_tuple: a lookup table from label to a named tuple
(update_timestamp, value), where 'update_timestamp' is
the time the data was last updated. 'value' is a deep copy of the data.
_namedtuple: a named tuple containing a 'update_timestamp' and 'value'
fields.
"""
def __init__(self, max_data_age_seconds, data_cleanup_age_seconds):
assert (isinstance(max_data_age_seconds, types.IntType) or
isinstance(max_data_age_seconds, types.LongType) or
isinstance(max_data_age_seconds, types.FloatType))
assert (isinstance(data_cleanup_age_seconds, types.IntType) or
isinstance(data_cleanup_age_seconds, types.LongType) or
isinstance(data_cleanup_age_seconds, types.FloatType))
assert max_data_age_seconds >= 0
assert data_cleanup_age_seconds >= 0
assert data_cleanup_age_seconds >= max_data_age_seconds
self._lock = threading.Lock()
self._max_data_age_seconds = max_data_age_seconds
self._data_cleanup_age_seconds = data_cleanup_age_seconds
self._label_to_tuple = {}
self._namedtuple = collections.namedtuple(
'Tuple', ['create_timestamp', 'update_timestamp', 'value'])
def _cleanup(self, now):
"""Removes all data older than _data_cleanup_age_seconds from the cache.
This routine prevents the accumulation of stale ephemeral data.
Such data usually has a unique label.
This method must be called when '_lock' is held.
Args:
now: current time in seconds since the Epoch.
"""
assert isinstance(now, types.FloatType)
threshold = now - self._data_cleanup_age_seconds
# Scan the cache using a list of keys instead of iterating on the cache
# directly because we are deleting elements from the cache while iterating.
for key in self._label_to_tuple.keys():
if self._label_to_tuple[key].create_timestamp <= threshold:
# delete current entry from the cache
del self._label_to_tuple[key]
def lookup(self, label, now=None):
"""Lookup the data with the given label in the cache.
Args:
label: the label of the data. must be a string. may be empty.
now: current time in seconds. If 'now' is None, the cached entry is
compared with the current wallclock time. Otherwise the cached entry
is compared with the value of 'now'.
Returns:
When the given label has recent data in the cache ('update_timestamp'
less than self._max_data_age_seconds seconds old), returns a tuple
(deep copy of cached value, create_timestamp_of_cached_data).
When the given label was not found in the cache or its data is too old,
returns the tuple (None, None).
"""
assert isinstance(label, types.StringTypes)
assert (now is None) or isinstance(now, types.FloatType)
self._lock.acquire()
ts_seconds = time.time() if now is None else now
if ((label in self._label_to_tuple) and
(ts_seconds < (self._label_to_tuple[label].update_timestamp +
self._max_data_age_seconds))):
# a cache hit
assert self._label_to_tuple[label].value is not None
value, timestamp = (copy.deepcopy(self._label_to_tuple[label].value),
self._label_to_tuple[label].create_timestamp)
else:
value, timestamp = (None, None)
self._lock.release()
return (value, timestamp)
def update(self, label, value, update_timestamp=None):
"""Stores the given value and timestamp for the given label.
Args:
label: the value's label. It must be a string. It can be empty.
value: the value stored in the cache. Must not be None.
update_timestamp: the timestamp in seconds of the value.
If 'update_timestamp' is None, then the update timestamp associated
with 'value' is the current wallclock time. If 'update_timestamp'
is not None, then this timestamp is stored with 'value'.
If 'value' is the same as the current value associated with the label
after removal of 'timestamp' attributes, then the cached value is not
changed.
The cache keeps a deep copy of 'value', so the caller may change 'value'
afterwards.
Returns:
The values that was stored in the cache. If the value stored in the cache
was not changed, then the returned value is the deep copy of the old cached
value.
Otherwise the returned value is 'value'.
In any case, the caller may modify 'value' or the returned value after
this method returns.
"""
assert isinstance(label, types.StringTypes)
assert value is not None
assert ((update_timestamp is None) or
isinstance(update_timestamp, types.FloatType))
self._lock.acquire()
# Cleanup only when inserting new values into the cache in order to
# avoid penalizing the cache hit operation.
ts = time.time() if update_timestamp is None else update_timestamp
self._cleanup(ts)
if ((label in self._label_to_tuple) and
(utilities.timeless_json_hash(value) ==
utilities.timeless_json_hash(self._label_to_tuple[label].value))):
# cannot update just one field in a named tuple.
create_ts = self._label_to_tuple[label].create_timestamp
update_value = self._label_to_tuple[label].value
ret_value = copy.deepcopy(update_value)
else:
create_ts = ts
update_value = copy.deepcopy(value)
ret_value = value
# cannot update just one field in a named tuple.
self._label_to_tuple[label] = self._namedtuple(
update_timestamp=ts, create_timestamp=create_ts, value=update_value)
self._lock.release()
return ret_value
def size(self):
"""Returns the number of entries in the cache.
Note that you may lookup only recent entries in the cache
(see the explanation of the lookup() function), even when the
cache contains additional older entries.
Returns:
Number of entries in the cache.
"""
self._lock.acquire()
n = len(self._label_to_tuple)
self._lock.release()
return n
|
|
data = (
'', # 0x00
'', # 0x01
'', # 0x02
'', # 0x03
'', # 0x04
'', # 0x05
'', # 0x06
'', # 0x07
'', # 0x08
'', # 0x09
'', # 0x0a
'', # 0x0b
'', # 0x0c
'', # 0x0d
'', # 0x0e
'', # 0x0f
'', # 0x10
'', # 0x11
'', # 0x12
'', # 0x13
'', # 0x14
'', # 0x15
'', # 0x16
'', # 0x17
'', # 0x18
'', # 0x19
'', # 0x1a
'', # 0x1b
'', # 0x1c
'', # 0x1d
'', # 0x1e
'', # 0x1f
'', # 0x20
'', # 0x21
'', # 0x22
'', # 0x23
'', # 0x24
'', # 0x25
'', # 0x26
'[?]', # 0x27
'[?]', # 0x28
'[?]', # 0x29
'[?]', # 0x2a
'[?]', # 0x2b
'[?]', # 0x2c
'[?]', # 0x2d
'[?]', # 0x2e
'[?]', # 0x2f
'[?]', # 0x30
'[?]', # 0x31
'[?]', # 0x32
'[?]', # 0x33
'[?]', # 0x34
'[?]', # 0x35
'[?]', # 0x36
'[?]', # 0x37
'[?]', # 0x38
'[?]', # 0x39
'[?]', # 0x3a
'[?]', # 0x3b
'[?]', # 0x3c
'[?]', # 0x3d
'[?]', # 0x3e
'[?]', # 0x3f
'', # 0x40
'', # 0x41
'', # 0x42
'', # 0x43
'', # 0x44
'', # 0x45
'', # 0x46
'', # 0x47
'', # 0x48
'', # 0x49
'', # 0x4a
'[?]', # 0x4b
'[?]', # 0x4c
'[?]', # 0x4d
'[?]', # 0x4e
'[?]', # 0x4f
'[?]', # 0x50
'[?]', # 0x51
'[?]', # 0x52
'[?]', # 0x53
'[?]', # 0x54
'[?]', # 0x55
'[?]', # 0x56
'[?]', # 0x57
'[?]', # 0x58
'[?]', # 0x59
'[?]', # 0x5a
'[?]', # 0x5b
'[?]', # 0x5c
'[?]', # 0x5d
'[?]', # 0x5e
'[?]', # 0x5f
'1', # 0x60
'2', # 0x61
'3', # 0x62
'4', # 0x63
'5', # 0x64
'6', # 0x65
'7', # 0x66
'8', # 0x67
'9', # 0x68
'10', # 0x69
'11', # 0x6a
'12', # 0x6b
'13', # 0x6c
'14', # 0x6d
'15', # 0x6e
'16', # 0x6f
'17', # 0x70
'18', # 0x71
'19', # 0x72
'20', # 0x73
'(1)', # 0x74
'(2)', # 0x75
'(3)', # 0x76
'(4)', # 0x77
'(5)', # 0x78
'(6)', # 0x79
'(7)', # 0x7a
'(8)', # 0x7b
'(9)', # 0x7c
'(10)', # 0x7d
'(11)', # 0x7e
'(12)', # 0x7f
'(13)', # 0x80
'(14)', # 0x81
'(15)', # 0x82
'(16)', # 0x83
'(17)', # 0x84
'(18)', # 0x85
'(19)', # 0x86
'(20)', # 0x87
'1.', # 0x88
'2.', # 0x89
'3.', # 0x8a
'4.', # 0x8b
'5.', # 0x8c
'6.', # 0x8d
'7.', # 0x8e
'8.', # 0x8f
'9.', # 0x90
'10.', # 0x91
'11.', # 0x92
'12.', # 0x93
'13.', # 0x94
'14.', # 0x95
'15.', # 0x96
'16.', # 0x97
'17.', # 0x98
'18.', # 0x99
'19.', # 0x9a
'20.', # 0x9b
'(a)', # 0x9c
'(b)', # 0x9d
'(c)', # 0x9e
'(d)', # 0x9f
'(e)', # 0xa0
'(f)', # 0xa1
'(g)', # 0xa2
'(h)', # 0xa3
'(i)', # 0xa4
'(j)', # 0xa5
'(k)', # 0xa6
'(l)', # 0xa7
'(m)', # 0xa8
'(n)', # 0xa9
'(o)', # 0xaa
'(p)', # 0xab
'(q)', # 0xac
'(r)', # 0xad
'(s)', # 0xae
'(t)', # 0xaf
'(u)', # 0xb0
'(v)', # 0xb1
'(w)', # 0xb2
'(x)', # 0xb3
'(y)', # 0xb4
'(z)', # 0xb5
'A', # 0xb6
'B', # 0xb7
'C', # 0xb8
'D', # 0xb9
'E', # 0xba
'F', # 0xbb
'G', # 0xbc
'H', # 0xbd
'I', # 0xbe
'J', # 0xbf
'K', # 0xc0
'L', # 0xc1
'M', # 0xc2
'N', # 0xc3
'O', # 0xc4
'P', # 0xc5
'Q', # 0xc6
'R', # 0xc7
'S', # 0xc8
'T', # 0xc9
'U', # 0xca
'V', # 0xcb
'W', # 0xcc
'X', # 0xcd
'Y', # 0xce
'Z', # 0xcf
'a', # 0xd0
'b', # 0xd1
'c', # 0xd2
'd', # 0xd3
'e', # 0xd4
'f', # 0xd5
'g', # 0xd6
'h', # 0xd7
'i', # 0xd8
'j', # 0xd9
'k', # 0xda
'l', # 0xdb
'm', # 0xdc
'n', # 0xdd
'o', # 0xde
'p', # 0xdf
'q', # 0xe0
'r', # 0xe1
's', # 0xe2
't', # 0xe3
'u', # 0xe4
'v', # 0xe5
'w', # 0xe6
'x', # 0xe7
'y', # 0xe8
'z', # 0xe9
'0', # 0xea
'11', # 0xeb
'12', # 0xec
'13', # 0xed
'14', # 0xee
'15', # 0xef
'16', # 0xf0
'17', # 0xf1
'18', # 0xf2
'19', # 0xf3
'20', # 0xf4
'1', # 0xf5
'2', # 0xf6
'3', # 0xf7
'4', # 0xf8
'5', # 0xf9
'6', # 0xfa
'7', # 0xfb
'8', # 0xfc
'9', # 0xfd
'10', # 0xfe
'0', # 0xff
)
|
|
# coding: utf-8
#
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import six
from six.moves import http_client
import webtest
import wsme
from wsme import types as wtypes
from ironic.api.controllers.v1 import types
from ironic.common import exception
from ironic.common import utils
from ironic.tests import base
class TestMacAddressType(base.TestCase):
def test_valid_mac_addr(self):
test_mac = 'aa:bb:cc:11:22:33'
with mock.patch.object(utils, 'validate_and_normalize_mac') as m_mock:
types.MacAddressType.validate(test_mac)
m_mock.assert_called_once_with(test_mac)
def test_invalid_mac_addr(self):
self.assertRaises(exception.InvalidMAC,
types.MacAddressType.validate, 'invalid-mac')
class TestUuidType(base.TestCase):
def test_valid_uuid(self):
test_uuid = '1a1a1a1a-2b2b-3c3c-4d4d-5e5e5e5e5e5e'
self.assertEqual(test_uuid, types.UuidType.validate(test_uuid))
def test_invalid_uuid(self):
self.assertRaises(exception.InvalidUUID,
types.UuidType.validate, 'invalid-uuid')
class TestNameType(base.TestCase):
@mock.patch("pecan.request")
def test_valid_name(self, mock_pecan_req):
mock_pecan_req.version.minor = 10
test_name = 'hal-9000'
self.assertEqual(test_name, types.NameType.validate(test_name))
@mock.patch("pecan.request")
def test_invalid_name(self, mock_pecan_req):
mock_pecan_req.version.minor = 10
self.assertRaises(exception.InvalidName,
types.NameType.validate, '-this is not valid-')
class TestUuidOrNameType(base.TestCase):
@mock.patch("pecan.request")
def test_valid_uuid(self, mock_pecan_req):
mock_pecan_req.version.minor = 10
test_uuid = '1a1a1a1a-2b2b-3c3c-4d4d-5e5e5e5e5e5e'
self.assertTrue(types.UuidOrNameType.validate(test_uuid))
@mock.patch("pecan.request")
def test_valid_name(self, mock_pecan_req):
mock_pecan_req.version.minor = 10
test_name = 'dc16-database5'
self.assertTrue(types.UuidOrNameType.validate(test_name))
@mock.patch("pecan.request")
def test_invalid_uuid_or_name(self, mock_pecan_req):
mock_pecan_req.version.minor = 10
self.assertRaises(exception.InvalidUuidOrName,
types.UuidOrNameType.validate, 'inval#uuid%or*name')
class MyBaseType(object):
"""Helper class, patched by objects of type MyPatchType"""
mandatory = wsme.wsattr(wtypes.text, mandatory=True)
class MyPatchType(types.JsonPatchType):
"""Helper class for TestJsonPatchType tests."""
_api_base = MyBaseType
_extra_non_removable_attrs = {'/non_removable'}
@staticmethod
def internal_attrs():
return ['/internal']
class MyRoot(wsme.WSRoot):
"""Helper class for TestJsonPatchType tests."""
@wsme.expose([wsme.types.text], body=[MyPatchType])
@wsme.validate([MyPatchType])
def test(self, patch):
return patch
class TestJsonPatchType(base.TestCase):
def setUp(self):
super(TestJsonPatchType, self).setUp()
self.app = webtest.TestApp(MyRoot(['restjson']).wsgiapp())
def _patch_json(self, params, expect_errors=False):
return self.app.patch_json('/test', params=params,
headers={'Accept': 'application/json'},
expect_errors=expect_errors)
def test_valid_patches(self):
valid_patches = [{'path': '/extra/foo', 'op': 'remove'},
{'path': '/extra/foo', 'op': 'add', 'value': 'bar'},
{'path': '/str', 'op': 'replace', 'value': 'bar'},
{'path': '/bool', 'op': 'add', 'value': True},
{'path': '/int', 'op': 'add', 'value': 1},
{'path': '/float', 'op': 'add', 'value': 0.123},
{'path': '/list', 'op': 'add', 'value': [1, 2]},
{'path': '/none', 'op': 'add', 'value': None},
{'path': '/empty_dict', 'op': 'add', 'value': {}},
{'path': '/empty_list', 'op': 'add', 'value': []},
{'path': '/dict', 'op': 'add',
'value': {'cat': 'meow'}}]
ret = self._patch_json(valid_patches, False)
self.assertEqual(http_client.OK, ret.status_int)
self.assertItemsEqual(valid_patches, ret.json)
def test_cannot_update_internal_attr(self):
patch = [{'path': '/internal', 'op': 'replace', 'value': 'foo'}]
ret = self._patch_json(patch, True)
self.assertEqual(http_client.BAD_REQUEST, ret.status_int)
self.assertTrue(ret.json['faultstring'])
def test_cannot_update_internal_dict_attr(self):
patch = [{'path': '/internal/test', 'op': 'replace',
'value': 'foo'}]
ret = self._patch_json(patch, True)
self.assertEqual(http_client.BAD_REQUEST, ret.status_int)
self.assertTrue(ret.json['faultstring'])
def test_mandatory_attr(self):
patch = [{'op': 'replace', 'path': '/mandatory', 'value': 'foo'}]
ret = self._patch_json(patch, False)
self.assertEqual(http_client.OK, ret.status_int)
self.assertEqual(patch, ret.json)
def test_cannot_remove_mandatory_attr(self):
patch = [{'op': 'remove', 'path': '/mandatory'}]
ret = self._patch_json(patch, True)
self.assertEqual(http_client.BAD_REQUEST, ret.status_int)
self.assertTrue(ret.json['faultstring'])
def test_cannot_remove_extra_non_removable_attr(self):
patch = [{'op': 'remove', 'path': '/non_removable'}]
ret = self._patch_json(patch, True)
self.assertEqual(http_client.BAD_REQUEST, ret.status_int)
self.assertTrue(ret.json['faultstring'])
def test_missing_required_fields_path(self):
missing_path = [{'op': 'remove'}]
ret = self._patch_json(missing_path, True)
self.assertEqual(http_client.BAD_REQUEST, ret.status_int)
self.assertTrue(ret.json['faultstring'])
def test_missing_required_fields_op(self):
missing_op = [{'path': '/foo'}]
ret = self._patch_json(missing_op, True)
self.assertEqual(http_client.BAD_REQUEST, ret.status_int)
self.assertTrue(ret.json['faultstring'])
def test_invalid_op(self):
patch = [{'path': '/foo', 'op': 'invalid'}]
ret = self._patch_json(patch, True)
self.assertEqual(http_client.BAD_REQUEST, ret.status_int)
self.assertTrue(ret.json['faultstring'])
def test_invalid_path(self):
patch = [{'path': 'invalid-path', 'op': 'remove'}]
ret = self._patch_json(patch, True)
self.assertEqual(http_client.BAD_REQUEST, ret.status_int)
self.assertTrue(ret.json['faultstring'])
def test_cannot_add_with_no_value(self):
patch = [{'path': '/extra/foo', 'op': 'add'}]
ret = self._patch_json(patch, True)
self.assertEqual(http_client.BAD_REQUEST, ret.status_int)
self.assertTrue(ret.json['faultstring'])
def test_cannot_replace_with_no_value(self):
patch = [{'path': '/foo', 'op': 'replace'}]
ret = self._patch_json(patch, True)
self.assertEqual(http_client.BAD_REQUEST, ret.status_int)
self.assertTrue(ret.json['faultstring'])
class TestBooleanType(base.TestCase):
def test_valid_true_values(self):
v = types.BooleanType()
self.assertTrue(v.validate("true"))
self.assertTrue(v.validate("TRUE"))
self.assertTrue(v.validate("True"))
self.assertTrue(v.validate("t"))
self.assertTrue(v.validate("1"))
self.assertTrue(v.validate("y"))
self.assertTrue(v.validate("yes"))
self.assertTrue(v.validate("on"))
def test_valid_false_values(self):
v = types.BooleanType()
self.assertFalse(v.validate("false"))
self.assertFalse(v.validate("FALSE"))
self.assertFalse(v.validate("False"))
self.assertFalse(v.validate("f"))
self.assertFalse(v.validate("0"))
self.assertFalse(v.validate("n"))
self.assertFalse(v.validate("no"))
self.assertFalse(v.validate("off"))
def test_invalid_value(self):
v = types.BooleanType()
self.assertRaises(exception.Invalid, v.validate, "invalid-value")
self.assertRaises(exception.Invalid, v.validate, "01")
class TestJsonType(base.TestCase):
def test_valid_values(self):
vt = types.jsontype
value = vt.validate("hello")
self.assertEqual("hello", value)
value = vt.validate(10)
self.assertEqual(10, value)
value = vt.validate(0.123)
self.assertEqual(0.123, value)
value = vt.validate(True)
self.assertTrue(value)
value = vt.validate([1, 2, 3])
self.assertEqual([1, 2, 3], value)
value = vt.validate({'foo': 'bar'})
self.assertEqual({'foo': 'bar'}, value)
value = vt.validate(None)
self.assertIsNone(value)
def test_invalid_values(self):
vt = types.jsontype
self.assertRaises(exception.Invalid, vt.validate, object())
def test_apimultitype_tostring(self):
vts = str(types.jsontype)
self.assertIn(str(wtypes.text), vts)
self.assertIn(str(int), vts)
if six.PY2:
self.assertIn(str(long), vts)
self.assertIn(str(float), vts)
self.assertIn(str(types.BooleanType), vts)
self.assertIn(str(list), vts)
self.assertIn(str(dict), vts)
self.assertIn(str(None), vts)
class TestListType(base.TestCase):
def test_list_type(self):
v = types.ListType()
self.assertItemsEqual(['foo', 'bar'], v.validate('foo,bar'))
self.assertItemsEqual(['cat', 'meow'], v.validate("cat , meow"))
self.assertItemsEqual(['spongebob', 'squarepants'],
v.validate("SpongeBob,SquarePants"))
self.assertItemsEqual(['foo', 'bar'],
v.validate("foo, ,,bar"))
self.assertItemsEqual(['foo', 'bar'],
v.validate("foo,foo,foo,bar"))
self.assertIsInstance(v.validate('foo,bar'), list)
|
|
"""Support for Timers."""
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.const import CONF_ICON, CONF_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import ENTITY_SERVICE_SCHEMA
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.helpers.restore_state import RestoreEntity
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
DOMAIN = "timer"
ENTITY_ID_FORMAT = DOMAIN + ".{}"
DEFAULT_DURATION = 0
ATTR_DURATION = "duration"
ATTR_REMAINING = "remaining"
CONF_DURATION = "duration"
STATUS_IDLE = "idle"
STATUS_ACTIVE = "active"
STATUS_PAUSED = "paused"
EVENT_TIMER_FINISHED = "timer.finished"
EVENT_TIMER_CANCELLED = "timer.cancelled"
EVENT_TIMER_STARTED = "timer.started"
EVENT_TIMER_RESTARTED = "timer.restarted"
EVENT_TIMER_PAUSED = "timer.paused"
SERVICE_START = "start"
SERVICE_PAUSE = "pause"
SERVICE_CANCEL = "cancel"
SERVICE_FINISH = "finish"
SERVICE_SCHEMA_DURATION = ENTITY_SERVICE_SCHEMA.extend(
{vol.Optional(ATTR_DURATION, default=timedelta(DEFAULT_DURATION)): cv.time_period}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: cv.schema_with_slug_keys(
vol.Any(
{
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_ICON): cv.icon,
vol.Optional(
CONF_DURATION, timedelta(DEFAULT_DURATION)
): cv.time_period,
},
None,
)
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up a timer."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
entities = []
for object_id, cfg in config[DOMAIN].items():
if not cfg:
cfg = {}
name = cfg.get(CONF_NAME)
icon = cfg.get(CONF_ICON)
duration = cfg.get(CONF_DURATION)
entities.append(Timer(hass, object_id, name, icon, duration))
if not entities:
return False
component.async_register_entity_service(
SERVICE_START, SERVICE_SCHEMA_DURATION, "async_start"
)
component.async_register_entity_service(
SERVICE_PAUSE, ENTITY_SERVICE_SCHEMA, "async_pause"
)
component.async_register_entity_service(
SERVICE_CANCEL, ENTITY_SERVICE_SCHEMA, "async_cancel"
)
component.async_register_entity_service(
SERVICE_FINISH, ENTITY_SERVICE_SCHEMA, "async_finish"
)
await component.async_add_entities(entities)
return True
class Timer(RestoreEntity):
"""Representation of a timer."""
def __init__(self, hass, object_id, name, icon, duration):
"""Initialize a timer."""
self.entity_id = ENTITY_ID_FORMAT.format(object_id)
self._name = name
self._state = STATUS_IDLE
self._duration = duration
self._remaining = self._duration
self._icon = icon
self._hass = hass
self._end = None
self._listener = None
@property
def should_poll(self):
"""If entity should be polled."""
return False
@property
def name(self):
"""Return name of the timer."""
return self._name
@property
def icon(self):
"""Return the icon to be used for this entity."""
return self._icon
@property
def state(self):
"""Return the current value of the timer."""
return self._state
@property
def state_attributes(self):
"""Return the state attributes."""
return {
ATTR_DURATION: str(self._duration),
ATTR_REMAINING: str(self._remaining),
}
async def async_added_to_hass(self):
"""Call when entity is about to be added to Home Assistant."""
# If not None, we got an initial value.
if self._state is not None:
return
state = await self.async_get_last_state()
self._state = state and state.state == state
async def async_start(self, duration):
"""Start a timer."""
if self._listener:
self._listener()
self._listener = None
newduration = None
if duration:
newduration = duration
event = EVENT_TIMER_STARTED
if self._state == STATUS_PAUSED:
event = EVENT_TIMER_RESTARTED
self._state = STATUS_ACTIVE
start = dt_util.utcnow()
if self._remaining and newduration is None:
self._end = start + self._remaining
else:
if newduration:
self._duration = newduration
self._remaining = newduration
else:
self._remaining = self._duration
self._end = start + self._duration
self._hass.bus.async_fire(event, {"entity_id": self.entity_id})
self._listener = async_track_point_in_utc_time(
self._hass, self.async_finished, self._end
)
await self.async_update_ha_state()
async def async_pause(self):
"""Pause a timer."""
if self._listener is None:
return
self._listener()
self._listener = None
self._remaining = self._end - dt_util.utcnow()
self._state = STATUS_PAUSED
self._end = None
self._hass.bus.async_fire(EVENT_TIMER_PAUSED, {"entity_id": self.entity_id})
await self.async_update_ha_state()
async def async_cancel(self):
"""Cancel a timer."""
if self._listener:
self._listener()
self._listener = None
self._state = STATUS_IDLE
self._end = None
self._remaining = timedelta()
self._hass.bus.async_fire(EVENT_TIMER_CANCELLED, {"entity_id": self.entity_id})
await self.async_update_ha_state()
async def async_finish(self):
"""Reset and updates the states, fire finished event."""
if self._state != STATUS_ACTIVE:
return
self._listener = None
self._state = STATUS_IDLE
self._remaining = timedelta()
self._hass.bus.async_fire(EVENT_TIMER_FINISHED, {"entity_id": self.entity_id})
await self.async_update_ha_state()
async def async_finished(self, time):
"""Reset and updates the states, fire finished event."""
if self._state != STATUS_ACTIVE:
return
self._listener = None
self._state = STATUS_IDLE
self._remaining = timedelta()
self._hass.bus.async_fire(EVENT_TIMER_FINISHED, {"entity_id": self.entity_id})
await self.async_update_ha_state()
|
|
# Copyright 2022 The Scenic Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Masked Flax layers.
Useful when images (or more broadly) tensors are padded to maximum size for
batching. E.g. a naive convolution of will introduce edge effects that are
different for the padded and unpadded edges of the tensor; similarly, a naive
batch norm will aggregate accross masked out positions, etc. This module
introduces a Flax layers that do not suffer from this.
"""
import functools
from typing import Optional, Tuple, Union, Callable, Any, Sequence, List, Iterable
import flax.linen as nn
import jax
from jax import lax
import jax.numpy as jnp
import numpy as np
def _absolute_dims(rank: int, dims: Iterable[int]):
return tuple([rank + dim if dim < 0 else dim for dim in dims])
def avg_pool(
inputs: jnp.ndarray,
window_shape: Tuple[int, ...],
strides: Optional[Tuple[int, ...]] = None,
padding: Union[str, Sequence[Tuple[int, int]]] = 'VALID',
spatial_shape: Optional[jnp.ndarray] = None
) -> Tuple[jnp.ndarray, Optional[jnp.ndarray]]:
"""Pools the input by taking the average over a window.
Args:
inputs: Input data with dimensions (batch, window_dims..., features).
window_shape: Shape tuple defining the window to reduce over.
strides: A sequence of `n` integers, representing the inter-window
strides (default: `(1, ..., 1)`).
padding: Either the string `'SAME'`, the string `'VALID'`, or a sequence
of `n` `(low, high)` integer pairs that give the padding to apply before
and after each spatial dimension (default: `'VALID'`).
spatial_shape: Per-input spatial shape of *unpadded* input data with
dimensions (batch, window_dims).
Returns:
The average for each window slice.
"""
inputs = nn.avg_pool(inputs, window_shape, strides=strides, padding=padding)
if spatial_shape is not None:
if (isinstance(padding, str) and padding.upper() == 'SAME' and
window_shape != (1,) * len(window_shape)):
raise NotImplementedError(
"Padding 'SAME' is not supported by masked mean pool.")
spatial_shape = _conv_output_shape(spatial_shape=spatial_shape,
kernel_size=window_shape,
input_dilation=None,
kernel_dilation=None,
strides=strides,
padding=padding)
inputs = apply_spatial_mask(inputs, spatial_shape)
return inputs, spatial_shape
def max_pool(
inputs: jnp.ndarray,
window_shape: Tuple[int, ...],
strides: Optional[Tuple[int, ...]] = None,
padding: Union[str, Sequence[Tuple[int, int]]] = 'VALID',
spatial_shape: Optional[jnp.ndarray] = None
) -> Tuple[jnp.ndarray, Optional[jnp.ndarray]]:
"""Pools the input by taking the maximum of a window slice.
Args:
inputs: Input data with dimensions (batch, window_dims..., features).
window_shape: A shape tuple defining the window to reduce over.
strides: A sequence of `n` integers, representing the inter-window
strides (default: `(1, ..., 1)`).
padding: Either the string `'SAME'`, the string `'VALID'`, or a sequence
of `n` `(low, high)` integer pairs that give the padding to apply before
and after each spatial dimension (default: `'VALID'`).
spatial_shape: Per-input spatial shape of *unpadded* input data with
dimensions (batch, window_dims).
Returns:
The maximum for each window slice.
"""
if spatial_shape is not None:
# Unlike for avg pool, for max pool a spatial mask must be applied first.
inputs = apply_spatial_mask(inputs, spatial_shape, value=-jnp.inf)
inputs = nn.max_pool(inputs, window_shape, strides=strides, padding=padding)
if spatial_shape is not None:
if (isinstance(padding, str) and padding.upper() == 'SAME' and
window_shape != (1,) * len(window_shape)):
raise NotImplementedError(
"Padding 'SAME' is not supported by masked max pool.")
spatial_shape = _conv_output_shape(spatial_shape=spatial_shape,
kernel_size=window_shape,
input_dilation=None,
kernel_dilation=None,
strides=strides,
padding=padding)
inputs = apply_spatial_mask(inputs, spatial_shape)
return inputs, spatial_shape
def _bn_agg_mean_var(
x: jnp.ndarray,
axis: Union[Tuple[int, ...], int],
p_agg: bool, *,
axis_name: Optional[str] = None,
axis_index_groups: Optional[Sequence[Sequence[int]]] = None,
spatial_shape: Optional[jnp.ndarray] = None):
"""Aggregate batch statistics accross devices.
Args:
x: Inputs to compute batch statistics on.
axis: Reduction axes for the stats.
p_agg: If True, parallel aggregation is performed using psum.
axis_name: Name of the axis for psum aggregation.
axis_index_groups: Groups of axis indices within that named axis
representing subsets of devices to reduce over (default: None).
spatial_shape: Per-input spatial shape of *unpadded* input data with
dimensions (batch, spatial_dims, channels).
Returns:
Batch stats (mean and variance).
"""
# When using spatial padding, we cannot accumulate the mean directly and
# instead must aaccumulate the numerator and sum directly.
acc_sum = jnp.sum(x, axis=axis, keepdims=False)
acc_sum2 = jnp.sum(lax.square(x), axis=axis, keepdims=False)
if spatial_shape is None:
denom = np.prod([x.shape[i] for i in axis])
else:
reduction_axis_shifted = tuple(i - 1 for i in axis if i > 0)
denom = jnp.prod(spatial_shape[:, reduction_axis_shifted], axis=-1)
denom = jnp.sum(denom)
if p_agg:
concatenated_acc_sum = jnp.concatenate([acc_sum, acc_sum2, denom])
acc_sum, acc_sum2, denom = jnp.split(
lax.psum(
concatenated_acc_sum,
axis_name=axis_name,
axis_index_groups=axis_index_groups), 3)
denom = jnp.maximum(denom, 1.)
mean = acc_sum / denom
var = acc_sum2 / denom - lax.square(mean)
return mean, var
class BatchNorm(nn.Module):
"""Masking-aware Batch Normalization layer.
Attributes:
use_running_average: If True, the statistics stored in batch_stats
will be used instead of computing the batch statistics on the input.
axis: The feature or non-batch axis of the input.
momentum: Decay rate for the exponential moving average of
the batch statistics.
epsilon: A small float added to variance to avoid dividing by zero.
dtype: The dtype of the computation (default: float32).
use_bias: If True, bias (beta) is added.
use_scale: If True, multiply by scale (gamma).
When the next layer is linear (also e.g. nn.relu), this can be disabled
since the scaling will be done by the next layer.
bias_init: Initializer for bias, by default, zero.
scale_init: Initializer for scale, by default, one.
spatial_norm: If True, spatial shapes influence group norm weights,
otherwise every batch element has an equal weight.
axis_name: Axis name used to combine batch statistics from multiple
devices. See `jax.pmap` for a description of axis names (default: None).
axis_index_groups: Groups of axis indices within that named axis
representing subsets of devices to reduce over (default: None). For
example, `[[0, 1], [2, 3]]` would independently batch-normalize over the
examples on the first two and last two devices. See `jax.lax.psum` for
more details.
"""
use_running_average: Optional[bool] = None
axis: int = -1
momentum: float = 0.99
epsilon: float = 1e-5
dtype: jnp.dtype = jnp.float32
use_bias: bool = True
use_scale: bool = True
bias_init: Callable[
[Any, Tuple[int, ...], Any], Any] = nn.initializers.zeros
scale_init: Callable[
[Any, Tuple[int, ...], Any], Any] = nn.initializers.ones
spatial_norm: bool = True
axis_name: Optional[str] = None
axis_index_groups: Optional[Sequence[Sequence[int]]] = None
@nn.compact
def __call__(
self,
x: jnp.ndarray,
use_running_average: Optional[bool] = None,
spatial_shape: Optional[jnp.ndarray] = None
) -> Tuple[jnp.ndarray, Optional[jnp.ndarray]]:
"""Normalizes the input using batch statistics.
Args:
x: The input to be normalized.
use_running_average: If True, the statistics stored in batch_stats will be
used instead of computing the batch statistics on the input.
spatial_shape: Per-input spatial shape of *unpadded* input data with
dimensions (batch, spatial_dims, channels).
Returns:
Normalized inputs (the same shape as inputs) and the spatial shape.
"""
use_running_average = nn.module.merge_param(
'use_running_average', self.use_running_average, use_running_average)
x = jnp.asarray(x, jnp.float32)
axis = self.axis if isinstance(self.axis, tuple) else (self.axis,)
axis = _absolute_dims(x.ndim, axis)
feature_shape = tuple(d if i in axis else 1 for i, d in enumerate(x.shape))
reduced_feature_shape = tuple(d for i, d in enumerate(x.shape) if i in axis)
reduction_axis = tuple(i for i in range(x.ndim) if i not in axis)
# Detect if we're in initialization via empty variable tree.
initializing = not self.has_variable('batch_stats', 'mean')
ra_mean = self.variable('batch_stats', 'mean',
lambda s: jnp.zeros(s, jnp.float32),
reduced_feature_shape)
ra_var = self.variable('batch_stats', 'var',
lambda s: jnp.ones(s, jnp.float32),
reduced_feature_shape)
if use_running_average:
mean, var = ra_mean.value, ra_var.value
else:
p_agg = ((self.axis_name is not None) and (not initializing)
and self.spatial_norm)
mean, var = _bn_agg_mean_var(
x,
reduction_axis,
p_agg,
axis_name=self.axis_name,
axis_index_groups=self.axis_index_groups,
spatial_shape=spatial_shape)
if not initializing:
ra_mean.value = (self.momentum * ra_mean.value +
(1 - self.momentum) * mean)
ra_var.value = (self.momentum * ra_var.value +
(1 - self.momentum) * var)
# Apply normaliation.
mean, var = mean.reshape(feature_shape), var.reshape(feature_shape)
y = x - mean
mul = lax.rsqrt(var + self.epsilon)
if self.use_scale:
scale = self.param('scale',
self.scale_init,
reduced_feature_shape).reshape(feature_shape)
mul = mul * scale
y = y * mul
if self.use_bias:
bias = self.param('bias',
self.bias_init,
reduced_feature_shape).reshape(feature_shape)
y = y + bias
if spatial_shape is not None:
# Restore spatial mask for the outputs.
y = apply_spatial_mask(y, spatial_shape)
return jnp.asarray(y, self.dtype), spatial_shape
class GroupNorm(nn.Module):
"""Masking-aware Group Normalization (arxiv.org/abs/1803.08494).
This op is similar to batch normalization, but statistics are shared across
equally-sized groups of channels and not shared across batch dimension.
Thus, group normalization does not depend on the batch composition and does
not require maintaining internal state for storing statistics.
The user should either specify the total number of channel groups or the
number of channels per group.
Attributes:
num_groups: Total number of channel groups. The default value of 32 is
proposed by the original group normalization paper.
group_size: The number of channels in a group.
epsilon: A small float added to variance to avoid dividing by zero.
dtype: The dtype of the computation (default: float32).
use_bias: If True, bias (beta) is added.
use_scale: If True, multiply by scale (gamma). When the next layer is linear
(also e.g. nn.relu), this can be disabled since the scaling will be done
by the next layer.
bias_init: Initializer for bias, by default, zero.
scale_init: Initializer for scale, by default, one.
spatial_norm: If True, spatial shapes influence group norm weights,
otherwise every batch element has an equal weight.
"""
num_groups: int = 32
group_size: Optional[int] = None
epsilon: float = 1e-6
dtype: jnp.dtype = jnp.float32
use_bias: bool = True
use_scale: bool = True
bias_init: Callable[
[Any, Tuple[int, ...], Any], Any] = nn.initializers.zeros
scale_init: Callable[
[Any, Tuple[int, ...], Any], Any] = nn.initializers.ones
spatial_norm: bool = True
@nn.compact
def __call__(
self,
x: jnp.ndarray,
spatial_shape: Optional[jnp.array] = None,
) -> Tuple[jnp.ndarray, Optional[jnp.ndarray]]:
"""Applies group normalization to the input (arxiv.org/abs/1803.08494).
Args:
x: Input of shape N...C, where N is a batch dimension and C is a channels
dimensions. `...` represents an arbitrary number of extra dimensions
that are used to accumulate statistics over.
spatial_shape: Per-input spatial shape of *unpadded* input data with
dimensions (batch, spatial_dims, channels).
Returns:
Normalized inputs (the same shape as inputs).
"""
x = jnp.asarray(x, jnp.float32)
if ((self.num_groups is None and self.group_size is None) or
(self.num_groups is not None and self.group_size is not None)):
raise ValueError('Either `num_groups` or `group_size` should be '
'specified, but not both of them.')
num_groups = self.num_groups
channels = x.shape[-1]
if self.group_size is not None:
if channels % self.group_size != 0:
raise ValueError('Number of channels ({}) is not multiple of the '
'group size ({}).'.format(channels, self.group_size))
num_groups = channels // self.group_size
if num_groups <= 0 or channels % num_groups != 0:
raise ValueError('Number of groups ({}) does not divide the number'
' of channels ({}).'.format(num_groups, channels))
input_shape = x.shape
group_shape = x.shape[:-1] + (num_groups, x.shape[-1] // num_groups)
x = x.reshape(group_shape)
reduction_axis = tuple(range(1, x.ndim - 2)) + (x.ndim - 1,)
mean = jnp.mean(x, axis=reduction_axis, keepdims=True)
mean_of_squares = jnp.mean(jnp.square(x), axis=reduction_axis,
keepdims=True)
orig_denom = np.prod([x.shape[i] for i in reduction_axis[:-1]])
if (spatial_shape is not None) and self.spatial_norm:
reduction_axis_shifted = tuple(
i - 1 for i in reduction_axis[:-1] if i > 0)
denom = jnp.prod(spatial_shape[:, reduction_axis_shifted], axis=-1)
denom = jnp.reshape(denom, (denom.shape[0],) + (1,) * (mean.ndim - 1))
denom = jnp.maximum(denom, 1.)
mean = mean * (orig_denom / denom)
mean_of_squares = mean_of_squares * (orig_denom / denom)
var = mean_of_squares - jnp.square(mean)
x = (x - mean) * lax.rsqrt(var + self.epsilon)
x = x.reshape(input_shape)
feature_shape = tuple([1 for d in input_shape[:-1]] + [input_shape[-1]])
if self.use_scale:
x *= self.param('scale', self.scale_init, feature_shape)
if self.use_bias:
x += self.param('bias', self.bias_init, feature_shape)
if spatial_shape is not None:
# Restore spatial mask for the outputs.
x = apply_spatial_mask(x, spatial_shape)
return x.astype(self.dtype), spatial_shape
class Conv(nn.Conv):
"""Masked convolution.
Attributes:
features: Number of convolution filters.
kernel_size: Shape of the convolutional kernel. For 1D convolution,
the kernel size can be passed as an integer. For all other cases, it
must be a sequence of integers.
strides: A sequence of `n` integers, representing the inter-window
strides.
padding: Either the string `'SAME'`, the string `'VALID'`, or a sequence
of `n` `(low, high)` integer pairs that give the padding to apply before
and after each spatial dimension.
input_dilation: `None`, or a sequence of `n` integers, giving the
dilation factor to apply in each spatial dimension of `inputs`.
Convolution with input dilation `d` is equivalent to transposed
convolution with stride `d`.
kernel_dilation: `None`, or a sequence of `n` integers, giving the
dilation factor to apply in each spatial dimension of the convolution
kernel. Convolution with kernel dilation is also known as 'atrous
convolution'.
bias: Whether to add a bias to the output (default: True).
dtype: The dtype of the computation (default: float32).
precision: Numerical precision of the computation see `jax.lax.Precision`
for details.
kernel_init: Initializer for the convolutional kernel.
bias_init: Initializer for the bias.
"""
features: int
kernel_size: Union[int, Tuple[int, ...]]
strides: Optional[Tuple[int, ...]] = None
padding: Union[str, Sequence[Tuple[int, int]]] = 'SAME'
input_dilation: Optional[Tuple[int, ...]] = None
kernel_dilation: Optional[Tuple[int, ...]] = None
use_bias: bool = True
dtype: jnp.dtype = jnp.float32
precision: Optional[jax.lax.Precision] = None
kernel_init: Callable[
[Any, Iterable[int], Any], Any] = nn.linear.default_kernel_init
bias_init: Callable[
[Any, Iterable[int], Any], Any] = nn.initializers.zeros
@nn.compact
def __call__(
self,
inputs: jnp.ndarray,
spatial_shape: Optional[jnp.ndarray] = None
) -> Tuple[jnp.ndarray, Optional[jnp.ndarray]]:
"""Applies a *masked* convolution to the inputs.
Args:
inputs: Input data with dimensions (batch, spatial_dims..., features).
spatial_shape: Per-input spatial shape of *unpadded* input data with
dimensions (batch, len(spatial_dims)).
Returns:
The convolved data and the output spatial size as a tuple.
"""
outputs = super(Conv, self).__call__(inputs)
kernel_size = self.kernel_size
if isinstance(kernel_size, int):
kernel_size = (kernel_size,)
if spatial_shape is not None:
if (isinstance(self.padding, str) and self.padding.upper() == 'SAME' and
kernel_size != (1,) * len(kernel_size)):
# In the case of 'SAME' padding the ammounts padded on the left and
# right depend on the shape of the input (which is dynamic in our case),
# the stride and the kernel size. In our case this means that each
# element of the batch might have to be paded in a different way and
# cannnot be batched.
# Because the possible number of dynamic pads for a given stride kernel
# size is finite, it should be possible in theory to run all of them and
# then select the correct out for masking. The implementation is left
# for a time when we have a use case for this.
raise NotImplementedError(
"Padding 'SAME' is not supported by masked convolutions.")
spatial_shape = _conv_output_shape(spatial_shape=spatial_shape,
kernel_size=kernel_size,
input_dilation=self.input_dilation,
kernel_dilation=self.kernel_dilation,
strides=self.strides,
padding=self.padding)
outputs = apply_spatial_mask(outputs, spatial_shape)
return outputs, spatial_shape
def apply_spatial_mask(
inputs: jnp.ndarray,
spatial_shape: jnp.ndarray,
value: float = 0.) -> jnp.ndarray:
"""Construct and apply spatial mask to the inputs.
Args:
inputs: Input tensor with dimensions [batch, spatial_dim, dim] to which the
mask should be applied.
spatial_shape: Per-input spatial shape of *unpadded* input data with
dimensions (batch, len(spatial_dims)).
value: Value to use for the mask (default: 0).
Returns:
Inputs with masking spatial masking applied to them.
"""
assert inputs.shape[0] == spatial_shape.shape[0]
assert spatial_shape.shape[1] == inputs.ndim - 2
mask = mask_from_spatial(inputs.shape[1:-1], spatial_shape, per_axis=False)
mask = jnp.expand_dims(mask, axis=-1)
inputs = jnp.where(mask, inputs, value)
return inputs
def mask_from_spatial(
padded_shape: Tuple[int, ...],
spatial_shape: jnp.ndarray,
per_axis: bool = False) -> Union[jnp.ndarray, List[jnp.ndarray]]:
"""Create a spatial mask for a given padded shape and spatial size.
Args:
padded_shape: Shape of the spatial dimensions padded data that needs to be
masked.
spatial_shape: Per-element unpadded spatial size of the data with dimensions
(batch, len(padded_shape)).
per_axis: If True, per list of per-spatial-dim masks is returned instead of
a single mask; that should enable some memory savings as it allows for
shape boradcasting to happen only when it is required.
Returns:
The spatial mask.
"""
assert spatial_shape.ndim == 2
assert len(padded_shape) == spatial_shape.shape[1]
ndim = spatial_shape.shape[1]
masks = []
for i in range(ndim):
# Construct per-axis mask and then broadcast if asked for.
mask = jnp.arange(0, padded_shape[i], dtype=jnp.int32)
mask = jnp.reshape(
mask,
(1,) * (i + 1) + (padded_shape[i],) + (1,) * (ndim - i - 1))
threshold = spatial_shape[:, i]
threshold = jnp.reshape(threshold, (spatial_shape.shape[0],) + (1,) * ndim)
mask = mask < threshold
masks.append(mask)
if not per_axis:
masks = functools.reduce(jnp.logical_and, masks)
return masks
def _dilate_shape(shape: jnp.ndarray, dilation: Tuple[int, ...]):
"""Utility function for computing the shape resulting from a dilation.
Args:
shape: Shapes (input or kernel i.e. lhr or rhs) to which the dilation should
be applied.
dilation: The dilation to apply.
Returns:
Dilated input shapes.
"""
if not np.all(np.greater(dilation, 0)):
raise TypeError(f'All dilations must be positive, got {dilation}.')
dilation = (1,) * (shape.shape[1] - len(dilation)) + tuple(dilation)
dilation = jnp.array(dilation)
return jnp.where(shape == 0, 0,
jnp.multiply(dilation, jnp.subtract(shape, 1)) + 1)
def _conv_output_shape(
spatial_shape: jnp.ndarray,
kernel_size: Tuple[int, ...],
input_dilation: Optional[Tuple[int, ...]],
kernel_dilation: Optional[Tuple[int, ...]],
strides: Optional[Tuple[int, ...]],
padding: Union[str, Sequence[Tuple[int, int]]]) -> jnp.ndarray:
"""Convenience wrapper function for inferring the convolution output shape.
Args:
spatial_shape: Input (lhs) shapes for which the output shapes should be
inferred as array with dimensions (batch, spatial_dims, dims).
kernel_size: Covolution kernel size (i.e. rhs shape).
input_dilation: Input (lhs) dilation.
kernel_dilation: Convolution kernel (rhs) dilation.
strides: Convolution (rhs) stride.
padding: Input (lhs) padding.
Returns:
Inferred convolution output shapes as array with dimensions
(batch, len(spatial_dims)).
"""
strides = strides or (1,) * len(kernel_size)
if input_dilation is not None:
spatial_shape = _dilate_shape(spatial_shape, input_dilation)
if kernel_dilation is not None:
kernel_size = tuple(
(k - 1) * r + 1 for k, r in zip(kernel_size, kernel_dilation))
spatial_shape = jnp.concatenate(
[jnp.ones((spatial_shape.shape[0], 2), dtype=jnp.int32), spatial_shape],
axis=-1)
out_shape = conv_shape_tuple(lhs_shape=spatial_shape,
rhs_shape=(1, 1) + kernel_size,
strides=strides,
pads=padding)
return out_shape[:, 2:]
def _ceil_divide(x1: jnp.ndarray, x2: jnp.ndarray) -> jnp.ndarray:
"""Ceil division of two JAX arrays."""
return -jnp.floor_divide(jnp.negative(x1), x2)
def padtype_to_pads(
in_shape: jnp.ndarray,
window_shape: Tuple[int, ...],
window_strides: Tuple[int, ...],
padding: str) -> jnp.ndarray:
"""Convert padding string to list of pairs of pad values.
Args:
in_shape: Input (lhs) shapes for which the padding should be inferred as
array with dimensions (batch, spatial_dims).
window_shape: Window (kernel; rhs) shape of the convolution.
window_strides: Window (kernel; rhs) convolution strides.
padding: Convlution (rhs) padding.
Returns:
Inferred lhs paddings as array with dimensions
(batch, len(spatial_dims), 2).
"""
xc = jax.lib.xla_client
if isinstance(padding, str):
mapping = {'VALID': xc.PaddingType.VALID, 'SAME': xc.PaddingType.SAME} # pytype: disable=module-attr # gen-stub-imports
try:
padding = mapping[padding.upper()]
except KeyError as err:
msg = "Unrecognized padding type: expected 'VALID' or 'SAME', got {}."
raise RuntimeError(msg.format(padding)) from err
if padding == xc.PaddingType.SAME: # pytype: disable=module-attr # gen-stub-imports
window_shape = jnp.array(window_shape)
window_strides = jnp.array(window_strides)
out_shape = _ceil_divide(in_shape, window_strides)
pad_sizes = jnp.maximum(
(out_shape - 1) * window_strides + window_shape - in_shape, 0)
pad_sizes = jnp.stack([pad_sizes // 2, pad_sizes - pad_sizes // 2], axis=1)
return pad_sizes
elif padding == xc.PaddingType.VALID: # pytype: disable=module-attr # gen-stub-imports
return jnp.zeros((in_shape.shape[0], len(window_shape), 2), dtype=jnp.int32)
raise TypeError(f'Unknown padding type: {padding}.')
def conv_shape_tuple(
lhs_shape: jnp.ndarray,
rhs_shape: Tuple[int, ...],
strides: Tuple[int, ...],
pads: Union[str, Sequence[Tuple[int, int]]]) -> jnp.ndarray:
"""Compute the shape of a conv given input shapes in canonical order.
Args:
lhs_shape: Input (lhs) shapes for which the output shapes should be inferred
as array with dimensions (batch, spatial_dims, dims).
rhs_shape: Covolution kernel size (i.e. rhs shape).
strides: Convolution (rhs) stride.
pads: Input (lhs) padding.
Returns:
Inferred convolution output shapes as array with dimensions
(batch, len(spatial_dims)).
"""
if isinstance(pads, str):
pads = padtype_to_pads(lhs_shape[:, 2:], rhs_shape[2:], strides, pads)
else:
pads = jnp.expand_dims(jnp.array(pads), axis=0)
if pads.shape[1] != lhs_shape.shape[1] - 2:
msg = 'Wrong number of explicit pads for convolution: expected {}, got {}.'
raise TypeError(msg.format(lhs_shape.shape[1] - 2, pads.shape[1]))
lhs_padded = jnp.add(lhs_shape[:, 2:], jnp.sum(pads, axis=2))
rhs_shape = jnp.array(rhs_shape, dtype=jnp.int32)
strides = jnp.array(strides, dtype=jnp.int32)
out_space = jnp.floor_divide(
jnp.subtract(lhs_padded, rhs_shape[2:]), strides) + 1
out_space = jnp.maximum(0, out_space)
out_shape = jnp.stack(
[lhs_shape[:, 0], jnp.full((lhs_shape.shape[0],), rhs_shape[0])],
axis=-1)
return jnp.concatenate([out_shape, out_space], axis=-1)
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Configuration options for cbuildbot boards."""
from __future__ import print_function
#
# Define assorted constants describing various sets of boards.
#
# Base per-board configuration.
# Every board must appear in exactly 1 of the following sets.
#
# Define assorted constants describing various sets of boards.
#
# Base per-board configuration.
# Every board must appear in exactly 1 of the following sets.
arm_internal_release_boards = frozenset([
'arkham',
'asurada',
'beaglebone',
'beaglebone_servo',
'bob',
'bubs',
'capri',
'capri-zfpga',
'cheza',
'cheza64',
'cobblepot',
'elm',
'gale',
'gonzo',
'hana',
'jacuzzi',
'kevin',
'kevin64',
'kevin-arc64',
'kukui',
'kukui-arc-r',
'lasilla-ground',
'littlejoe',
'mistral',
'nyan_big',
'nyan_blaze',
'nyan_kitty',
'oak',
'octavius',
'romer',
'scarlet',
'trogdor',
'veyron_fievel',
'veyron_jaq',
'veyron_jerry',
'veyron_mickey',
'veyron_mighty',
'veyron_minnie',
'veyron_rialto',
'veyron_speedy',
'veyron_tiger',
'viking',
'viking-poc2',
'whirlwind',
'wooten',
])
arm_external_boards = frozenset([
'arm-generic',
'arm64-generic',
'arm64-llvmpipe',
'tael',
])
x86_internal_release_boards = frozenset([
'amd64-generic-cheets',
'asuka',
'atlas',
'auron_paine',
'auron_yuna',
'banjo',
'banon',
'betty',
'betty-arc-r',
'betty-arcvm-master',
'betty-arcvm-pi',
'betty-pi-arc',
'buddy',
'candy',
'caroline',
'cave',
'celes',
'chell',
'coral',
'cyan',
'dedede',
'deltaur',
'drallion',
'edgar',
'endeavour',
'enguarde',
'eve',
'eve-arc-r',
'eve-arcvm-mesa-virgl-next',
'eve-lacros',
'excelsior',
'expresso',
'falco_li',
'fizz',
'fizz-accelerator',
'fizz-moblab',
'fizz-labstation',
'gandof',
'glados',
'gnawty',
'grunt',
'guado',
'guado_labstation',
'hatch',
'hatch-arc-r',
'hatch-diskswap',
'heli',
'jecht',
'kalista',
'kefka',
'kefka-kernelnext',
'kip',
'kumo',
'lakitu',
'lakitu-gpu',
'lakitu-nc',
'lakitu-st',
'lakitu_next',
'lars',
'lulu',
'monroe',
'mushu',
'nami',
'nautilus',
'ninja',
'nocturne',
'novato',
'novato-arc64',
'octopus',
'orco',
'palkia',
'poppy',
'puff',
'pyro',
'rammus',
'rammus-arc-r',
'reef',
'reks',
'relm',
'rikku',
'samus',
'samus-kernelnext',
'sand',
'sarien',
'sentry',
'setzer',
'sludge',
'snappy',
'soraka',
'sumo',
'swanky',
'terra',
'tidus',
'ultima',
'volteer',
'winky',
'wizpig',
'wristpin',
'zork',
])
x86_external_boards = frozenset([
'amd64-generic',
'moblab-generic-vm',
'tatl',
'x32-generic',
])
# Board can appear in 1 or more of the following sets.
brillo_boards = frozenset([
'arkham',
'gale',
'mistral',
'whirlwind',
])
accelerator_boards = frozenset([
'fizz-accelerator',
])
beaglebone_boards = frozenset([
'beaglebone',
'beaglebone_servo',
])
dustbuster_boards = frozenset([
'wristpin',
])
lakitu_boards = frozenset([
# Although its name doesn't indicate any lakitu relevance,
# kumo board is developed by the lakitu-dev team.
'kumo',
'lakitu',
'lakitu-gpu',
'lakitu-nc',
'lakitu-st',
'lakitu_next',
])
lassen_boards = frozenset([
'lassen',
])
loonix_boards = frozenset([
'capri',
'capri-zfpga',
'cobblepot',
'gonzo',
'lasilla-ground',
'octavius',
'romer',
'wooten',
])
reven_boards = frozenset([
'reven',
])
wshwos_boards = frozenset([
'littlejoe',
'viking',
'viking-poc2',
])
moblab_boards = frozenset([
'fizz-moblab',
'moblab-generic-vm',
])
scribe_boards = frozenset([
'guado-macrophage',
])
termina_boards = frozenset([
'sludge',
'tatl',
'tael',
])
nofactory_boards = (
lakitu_boards | termina_boards | lassen_boards | reven_boards | frozenset([
'x30evb',
])
)
toolchains_from_source = frozenset([
'x32-generic',
])
noimagetest_boards = (lakitu_boards | loonix_boards | termina_boards
| scribe_boards | wshwos_boards | dustbuster_boards)
nohwqual_boards = (lakitu_boards | lassen_boards | loonix_boards
| termina_boards | beaglebone_boards | wshwos_boards
| dustbuster_boards | reven_boards)
norootfs_verification_boards = frozenset([
'kumo',
])
base_layout_boards = lakitu_boards | termina_boards
builder_incompatible_binaries_boards = frozenset([
'grunt',
'zork',
])
|
|
"""
SlipStream Client
=====
Copyright (C) 2013 SixSq Sarl (sixsq.com)
=====
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os
import json
from collections import defaultdict
import slipstream.util as util
import slipstream.exceptions.Exceptions as Exceptions
from slipstream.UserInfo import UserInfo
from slipstream.HttpClient import HttpClient
from slipstream.NodeInstance import NodeInstance
from slipstream.NodeDecorator import NodeDecorator
etree = util.importETree()
class SlipStreamHttpClient(object):
URL_IGNORE_ABORT_ATTRIBUTE_QUERY = '?ignoreabort=true'
def __init__(self, configHolder):
self.category = None
self.run_dom = None
self.ignoreAbort = False
self.username = ''
self.password = ''
self.diid = ''
self.node_instance_name = ''
self.serviceurl = ''
self.verboseLevel = None
self.retry = True
configHolder.assign(self)
self._assemble_endpoints()
self.httpClient = HttpClient(configHolder=configHolder)
self.httpClient.init_session(self.serviceurl)
self.api = self.httpClient.get_ss_api()
def set_retry(self, retry):
self.retry = retry
def _assemble_endpoints(self):
self.runEndpoint = self.serviceurl + util.RUN_RESOURCE_PATH
self.run_url = self.runEndpoint + '/' + self.diid
self.userEndpoint = '%s/user/%s' % (self.serviceurl,
self.username)
self.configuration_endpoint = '%s%s' % (self.serviceurl,
util.CONFIGURATION_RESOURCE_PATH)
@staticmethod
def _strip_unwanted_attrs(resource):
unwanted = ('id', 'resourceURI', 'acl', 'operations',
'created', 'updated', 'name', 'description', 'properties')
for k in resource.keys():
if k in unwanted:
del resource[k]
return resource
def _get_user(self):
_, jresp = self.httpClient.get(self.serviceurl + '/api/user/%s' % self.username,
accept='application/json')
return self._strip_unwanted_attrs(json.loads(jresp))
def _get_user_params(self):
_, jresp = self.httpClient.get(self.serviceurl + '/api/user-param',
accept='application/json')
user_params = json.loads(jresp)
if user_params.get('count', 0) < 1:
raise Exception('No user params found for %s.' % self.username)
return self._strip_unwanted_attrs(user_params.get('userParam')[0])
def _get_cloud_cred(self, cloud_qualifier):
creds = self._get_cloud_creds(cloud_qualifier)
cred = self._strip_unwanted_attrs(creds.get('credentials')[0])
cred[UserInfo.CLOUD_USERNAME_KEY] = cred.get('key', '')
cred[UserInfo.CLOUD_PASSWORD_KEY] = cred.get('secret', '')
return cred
def _get_cloud_creds(self, cloud_qualifier):
_filter = "$filter=type^='cloud-cred' and connector/href='connector/%s'" % \
cloud_qualifier
_, jresp = self.httpClient.put(self.serviceurl + '/api/credential?%s' % _filter,
accept='application/json')
creds = json.loads(jresp)
if creds.get('count', 0) < 1:
raise Exception('No cloud creds found for %s with %s.' % (self.username, _filter))
return creds
def _get_connector_conf(self, cloud_qualifier):
_, jresp = self.httpClient.get(self.serviceurl + '/api/connector/%s' % cloud_qualifier,
accept='application/json')
return self._strip_unwanted_attrs(json.loads(jresp))
def get_user_info(self, cloud_qualifier):
user_info = UserInfo(cloud_qualifier)
user = self._get_user()
if 'password' in user:
del user['password']
user_info.set_user_params(user)
user_info.set_general_params(self._get_user_params())
if cloud_qualifier:
connector_conf = self._get_connector_conf(cloud_qualifier)
connector_conf.update(self._get_cloud_cred(cloud_qualifier))
user_info.set_cloud_params(connector_conf)
return user_info
def _extractModuleResourceUri(self, run):
rootElement = etree.fromstring(run.encode('utf-8'))
return rootElement.attrib[NodeDecorator.MODULE_RESOURCE_URI]
def get_nodes_instances(self, cloud_service_name=None):
'''Return dict {<node_instance_name>: NodeInstance, }
'''
nodes_instances = {}
self._retrieveAndSetRun()
nodes_instances_runtime_parameters = \
DomExtractor.extract_nodes_instances_runtime_parameters(self.run_dom, cloud_service_name)
nodes_runtime_parameters = DomExtractor.extract_nodes_runtime_parameters(self.run_dom)
for node_instance_name, node_instance_runtime_parameters in nodes_instances_runtime_parameters.items():
node_instance = NodeInstance(node_instance_runtime_parameters)
node_name = node_instance.get_node_name()
if nodes_runtime_parameters:
node_runtime_parameters = nodes_runtime_parameters.get(node_name, {})
if node_runtime_parameters:
node_instance.set_parameter(NodeDecorator.MAX_PROVISIONING_FAILURES_KEY,
node_runtime_parameters.get(NodeDecorator.MAX_PROVISIONING_FAILURES_KEY, '0'))
image_attributes = DomExtractor.extract_node_image_attributes(self.run_dom, node_name)
node_instance.set_image_attributes(image_attributes)
image_targets = DomExtractor.extract_node_image_targets(self.run_dom, node_name)
node_instance.set_image_targets(image_targets)
build_state = DomExtractor.extract_node_image_build_state(self.run_dom, node_name)
node_instance.set_build_state(build_state)
nodes_instances[node_instance_name] = node_instance
return nodes_instances
def _get_nodename(self):
'Node name derived from the node instance name.'
return self.node_instance_name.split(
NodeDecorator.NODE_MULTIPLICITY_SEPARATOR)[0]
def get_run_category(self):
self._retrieveAndSetRun()
return DomExtractor.extractCategoryFromRun(self.run_dom)
def get_run_type(self):
self._retrieveAndSetRun()
return DomExtractor.extractTypeFromRun(self.run_dom)
def get_run_mutable(self):
self._retrieveAndSetRun()
return DomExtractor.extract_mutable_from_run(self.run_dom)
def discard_run(self):
self.run_dom = None
def _retrieveAndSetRun(self):
if self.run_dom is None:
url = self.run_url
_, run = self._retrieve(url)
self.run_dom = etree.fromstring(run.encode('utf-8'))
def _retrieve(self, url):
return self._httpGet(url, 'application/xml')
def execute(self, resourceUri):
url = self.runEndpoint
return self._httpPost(url, resourceUri, 'text/plain')
def complete_state(self, node_instance_name):
url = '%s/%s:%s' % (self.run_url, node_instance_name,
NodeDecorator.COMPLETE_KEY)
url += SlipStreamHttpClient.URL_IGNORE_ABORT_ATTRIBUTE_QUERY
return self._httpPost(url, 'reset', 'text/plain')
def terminate_run(self):
return self._httpDelete(self.run_url)
def _fail(self, message):
self.setRuntimeParameter(
NodeDecorator.globalNamespacePrefix + NodeDecorator.ABORT_KEY, message)
def sendReport(self, report):
resource_id = self._create_external_object_report(report)
upload_url = self._generate_upload_url_external_object_report(resource_id)
self._upload_report(upload_url, report)
self._set_report_ready(resource_id)
def _create_external_object_report(self, report_path):
resp = self.api.cimi_add('externalObjects',
{'externalObjectTemplate': {'href': 'external-object-template/report',
'runUUID': self.diid,
'component': self.node_instance_name,
'filename': os.path.basename(report_path),
'contentType': 'application/tar+gzip'}})
return resp.json['resource-id']
def _generate_upload_url_external_object_report(self, resource_id):
resp = self.api.cimi_operation(resource_id, "http://sixsq.com/slipstream/1/action/upload")
return resp.json['uri']
def _upload_report(self, url, report):
self._printDetail('Uploading report to: %s' % url)
body = open(report, 'rb').read()
self._httpPut(url, body, 'application/tar+gzip', accept="*/*")
def _set_report_ready(self, resource_id):
self.api.cimi_operation(resource_id, "http://sixsq.com/slipstream/1/action/ready")
def isAbort(self):
return self.getGlobalAbortMessage() != ''
def getGlobalAbortMessage(self):
url = '%s/%s%s' % (self.run_url,
NodeDecorator.globalNamespacePrefix,
NodeDecorator.ABORT_KEY)
url += SlipStreamHttpClient.URL_IGNORE_ABORT_ATTRIBUTE_QUERY
_, content = self._httpGet(url, accept='text/plain')
return content.strip().strip('"').strip("'")
def get_run_parameters(self):
self._retrieveAndSetRun()
return DomExtractor.extract_run_parameters_from_run(self.run_dom)
def getRuntimeParameter(self, key, ignoreAbort=False):
url = self.run_url + '/' + key
if self.ignoreAbort or ignoreAbort:
url += SlipStreamHttpClient.URL_IGNORE_ABORT_ATTRIBUTE_QUERY
try:
_, content = self._httpGet(url, accept='text/plain')
except Exceptions.NotFoundError as ex:
raise Exceptions.NotFoundError('"%s" for %s' % (str(ex), key))
return content.strip().strip('"').strip("'")
def setRuntimeParameter(self, key, value, ignoreAbort=False):
url = self.run_url + '/' + key
if self.ignoreAbort or ignoreAbort:
url += SlipStreamHttpClient.URL_IGNORE_ABORT_ATTRIBUTE_QUERY
_, content = self._httpPut(url, util.removeASCIIEscape(value),
accept='text/plain')
return content.strip().strip('"').strip("'")
def unset_runtime_parameter(self, key, ignore_abort=False):
url = '%s/%s' % (self.run_url, key)
if (self.ignoreAbort or ignore_abort):
url += SlipStreamHttpClient.URL_IGNORE_ABORT_ATTRIBUTE_QUERY
self._httpDelete(url)
def _httpGet(self, url, accept='application/xml'):
return self.httpClient.get(url, accept, retry=self.retry)
def _httpPut(self, url, body=None, contentType='application/xml', accept='application/xml'):
return self.httpClient.put(url, body, contentType, accept, retry=self.retry)
def _httpPost(self, url, body=None, contentType='application/xml'):
return self.httpClient.post(url, body, contentType, retry=self.retry)
def _httpDelete(self, url, body=None):
return self.httpClient.delete(url, body=body, retry=self.retry)
def _printDetail(self, message):
util.printDetail(message, self.verboseLevel, util.VERBOSE_LEVEL_DETAILED)
def put_new_image_id(self, image_resource_uri, image_id):
url = self.serviceurl + '/' + image_resource_uri
self._printDetail('Set new image id: %s %s' % (url, image_id))
self._httpPut(url, image_id)
def launchDeployment(self, params):
body = '&'.join(params)
resp, _ = self._httpPost(self.runEndpoint, body,
contentType='text/plain')
return resp.headers['location']
def getRunState(self, uuid=None, ignoreAbort=True):
if not uuid and not self.diid:
raise Exceptions.ExecutionException("Run ID should be provided "
"to get state.")
state_key = NodeDecorator.globalNamespacePrefix + NodeDecorator.STATE_KEY
self.run_url = self.runEndpoint + '/' + (uuid or self.diid)
return self.getRuntimeParameter(state_key, ignoreAbort=ignoreAbort)
def remove_instances_from_run(self, node_name, ids, detele_ids_only=True):
"""ids : []
"""
url = '%s/%s' % (self.run_url, node_name)
body = "ids=%s" % ','.join(map(str, ids))
if detele_ids_only:
body = body + '&delete-ids-only=true'
self._httpDelete(url, body=body)
def get_server_configuration(self):
_, config = self._retrieve(self.configuration_endpoint)
return config
def login(self, username, password):
self.api.login_internal(username=username, password=password)
def logout(self):
self.api.logout()
def get_session(self):
return self.httpClient.get_session()
def get_api(self):
return self.api
class DomExtractor(object):
EXTRADISK_PREFIX = 'extra.disk'
EXTRADISK_VOLATILE_KEY = EXTRADISK_PREFIX + '.volatile'
PATH_TO_NODE_ON_RUN = 'module/nodes/entry/node'
PATH_TO_PARAMETER = 'parameters/entry/parameter'
@staticmethod
def extract_nodes_instances_runtime_parameters(run_dom, cloud_service_name=None):
"""Return dict {<node_instance_name>: {<runtimeparamname>: <value>, }, }
"""
nodes_instances = {}
for node_instance_name in run_dom.attrib['nodeNames'].split(','):
node_instance_name = node_instance_name.strip()
node_instance = {}
node_instance[NodeDecorator.NODE_INSTANCE_NAME_KEY] = node_instance_name
# Unfortunately, this doesn't work on Python < 2.7
# query = "runtimeParameters/entry/runtimeParameter[@group='%s']" % node_instance_name
query = "runtimeParameters/entry/runtimeParameter"
for rtp in run_dom.findall(query):
if rtp.get('group') == node_instance_name:
key = DomExtractor._get_key_from_runtimeparameter(rtp)
node_instance[key] = rtp.text
nodes_instances[node_instance_name] = node_instance
if cloud_service_name is not None:
for node_instance_name in nodes_instances.keys():
if cloud_service_name != nodes_instances[node_instance_name][NodeDecorator.CLOUDSERVICE_KEY]:
del nodes_instances[node_instance_name]
return nodes_instances
@staticmethod
def extract_nodes_runtime_parameters(run_dom):
"""Return dict {<node_name>: {<runtimeparamname>: <value>, }, }
"""
nodes = {}
node_names = DomExtractor._get_node_names(run_dom)
for node_name in node_names:
node = {}
node[NodeDecorator.NODE_NAME_KEY] = node_name
# Unfortunately, this doesn't work on Python < 2.7
# query = "runtimeParameters/entry/runtimeParameter[@group='%s']" % node_instance_name
query = "runtimeParameters/entry/runtimeParameter"
for rtp in run_dom.findall(query):
if rtp.get('group') == node_name:
key = DomExtractor._get_key_from_runtimeparameter(rtp)
node[key] = rtp.text
nodes[node_name] = node
return nodes
@staticmethod
def _get_node_names(run_dom):
"""Return list of node names in the run.
"""
node_names = []
for group in run_dom.attrib['groups'].split(','):
node_name = ""
try:
node_name = group.split(NodeDecorator.NODE_PROPERTY_SEPARATOR)[1]
except IndexError:
pass
else:
node_names.append(node_name.strip())
return node_names
@staticmethod
def _get_key_from_runtimeparameter(rtp):
return rtp.attrib['key'].split(NodeDecorator.NODE_PROPERTY_SEPARATOR, 1)[-1]
@staticmethod
def extract_node_image_attributes(run_dom, nodename):
""" Return image attributes of all nodes.
"""
image = DomExtractor.extract_node_image(run_dom, nodename)
attributes = {}
if image is not None:
attributes = DomExtractor.get_attributes(image)
return attributes
@staticmethod
def extract_node_image(run_dom, nodename):
""" Return image attributes of all nodes.
"""
image = None
if DomExtractor.get_module_category(run_dom) == NodeDecorator.IMAGE:
image = run_dom.find('module')
else:
for node in run_dom.findall(DomExtractor.PATH_TO_NODE_ON_RUN):
if node.get('name') == nodename:
image = node.find('image')
return image
@staticmethod
def extract_deployment(run_dom, nodename):
""" Return the deployment module of a run.
"""
return run_dom.find('module')
@staticmethod
def extract_node_image_targets(run_dom, node_name):
if NodeDecorator.is_orchestrator_name(node_name):
module_dom = DomExtractor.extract_deployment(run_dom, node_name)
else:
module_dom = DomExtractor.extract_node_image(run_dom, node_name)
return DomExtractor.get_targets_from_module(module_dom)
@staticmethod
def extract_node_image_build_state(run_dom, node_name):
if NodeDecorator.is_orchestrator_name(node_name):
return {}
image_dom = DomExtractor.extract_node_image(run_dom, node_name)
return DomExtractor.get_build_state_from_image(image_dom)
@staticmethod
def get_build_state_from_image(image_dom):
build_state = {}
for st in image_dom.findall('buildStates/buildState'):
module_uri = st.get('moduleUri')
built_on = st.get('builtOn', '').split(',')
build_state[module_uri] = dict(module_uri=module_uri, built_on=built_on)
return build_state
@staticmethod
def get_module_category(run_dom):
module = run_dom.find('module')
return module.get('category', None) if module is not None else None
@staticmethod
def get_extra_disks_from_image(image_dom):
extra_disks = {}
for entry in image_dom.findall('parameters/entry'):
param_name = entry.find('parameter').get('name')
if param_name.startswith(DomExtractor.EXTRADISK_PREFIX):
try:
extra_disks[param_name] = entry.find('parameter/value').text or ''
except AttributeError:
pass
return extra_disks
@staticmethod
def get_element_value_from_element_tree(element_tree, element_name):
element = element_tree.find(element_name)
value = getattr(element, 'text', '')
if value is None:
value = ''
return value
@staticmethod
def get_attributes(dom):
return dom.attrib
@staticmethod
def get_packages(module_dom):
packages = []
for package in module_dom.findall('packagesExpanded/packageExpanded'):
name = package.get('name')
if name:
packages.append(name)
return packages
@staticmethod
def extractCategoryFromRun(run_dom):
return run_dom.attrib['category']
@staticmethod
def extractTypeFromRun(run_dom):
return run_dom.attrib['type']
@staticmethod
def extract_mutable_from_run(run_dom):
return run_dom.attrib[util.RUN_PARAM_MUTABLE]
@staticmethod
def extractDefaultCloudServiceNameFromRun(run_dom):
return run_dom.attrib['cloudServiceName']
@staticmethod
def extract_run_parameters_from_run(run_dom):
parameters = {}
for node in run_dom.findall(DomExtractor.PATH_TO_PARAMETER):
value = node.find('value')
parameters[node.get('name')] = value.text if value is not None else None
return parameters
@staticmethod
def get_targets_from_module(module_dom):
"""Return deployment targets of the given image.
"""
targets = {}
for st in module_dom.findall('targetsExpanded/targetExpanded/subTarget'):
name = st.get('name')
subtarget = dict(name=name,
order=int(st.get('order')),
module_uri=st.get('moduleUri'),
module=st.get('moduleShortName'),
script=st.text)
targets.setdefault(name, [])
targets[name].append(subtarget)
for target in targets.itervalues():
target.sort(key=lambda t: t.get('order'))
if module_dom.tag == "imageModule" or module_dom.tag == "image" \
or module_dom.get('category') == NodeDecorator.IMAGE:
targets[NodeDecorator.NODE_PACKAGES] = DomExtractor.get_packages(module_dom)
return targets
@staticmethod
def server_config_dom_into_dict(config_dom, categories=[], value_updater=None):
"""
:param config_dom: Element Tree representation of the server's configuration.
:param categories: categories to extract; if empty, extracts all categories.
:return: dictionary {'category': [('param', 'value'),],}
"""
config = defaultdict(list)
for param in config_dom.findall('parameters/entry'):
category = param.find('parameter').get('category')
if categories and (category not in categories):
continue
name = param.find('parameter').get('name')
value = param.find('parameter/value').text
if value is None:
value = ''
if '\n' in value:
value = value.replace('\n', '')
if hasattr(value_updater, '__call__'):
value = value_updater(value)
config[category].append((name, value))
return config
|
|
import glob
import os
from functools import partial
import maya.cmds as cmds
import maya.OpenMaya as om
import maya.OpenMayaUI as omUI
import cross3d
from cross3d import Exceptions, ExceptionRouter
from cross3d.classes import FrameRange
from cross3d.abstract.abstractsceneviewport import AbstractSceneViewport
#------------------------------------------------------------------------------------------------------------------------
class MayaSceneViewport(AbstractSceneViewport):
# From the Docs:
# To determine which settings are available on your system, use the `playblast -options`
# command. This will display a system-specific dialog with supported compression formats.
_validPlayblastFormats = ['gif', 'si', 'rla', 'tif', 'tifu', 'sgi', 'als', 'maya', 'jpg',
'eps', 'cin', 'yuv', 'tga', 'bmp', 'psd', 'png', 'dds', 'psdLayered', 'avi', 'mov']
def __init__( self, scene, viewportID=None ):
super(MayaSceneViewport, self).__init__(scene, viewportID)
if viewportID == None:
self._nativePointer = omUI.M3dView.active3dView()
else:
self._nativePointer = omUI.M3dView()
omUI.M3dView.get3dView(viewportID, self._nativePointer)
self._name = cross3d.SceneWrapper._mObjName(self._nativeCamera())
#--------------------------------------------------------------------------------
# Private Methods
#--------------------------------------------------------------------------------
def _nativeCamera(self):
undocumentedPythonFunctionRequirement = om.MDagPath()
with ExceptionRouter():
self._nativePointer.getCamera(undocumentedPythonFunctionRequirement)
return undocumentedPythonFunctionRequirement.node()
def _setNativeCamera(self, nativeCamera):
nativeCamera = cross3d.SceneWrapper._asMOBject(nativeCamera)
with ExceptionRouter():
dagPath = om.MDagPath.getAPathTo(nativeCamera)
self._nativePointer.setCamera(dagPath)
# Ensure the viewport is refreshed
cross3d.application.refresh()
return True
#--------------------------------------------------------------------------------
# Public Methods
#--------------------------------------------------------------------------------
def cameraName(self):
""" Return the viewport's camera name """
return self.camera().path()
def createCamera(self, name='Camera', type='Standard'):
""" Creates a camera that matches that viewport. """
camera = self._scene.createCamera(name, type)
camera.matchCamera(self.camera())
# Default cameras are hidden. Force the camera visible
camera.setHidden(False)
return camera
def generatePlayblast(
self,
fileName,
frameRange=None,
resolution=None,
slate=None,
effects=True,
geometryOnly=True,
pathFormat=r'{basePath}\{fileName}.{frame}.{ext}'):
fileName, ext = os.path.splitext(fileName)
# Make sure a invalid file format was not requested
if ext.replace('.', '').lower() not in self._validPlayblastFormats:
raise Exceptions.FileFormatNotSupported('The file format {ext} is not supported by Maya'.format(ext=ext))
playblastFormat = 'image'
compression = ext.replace('.', '')
quality = 100
if ext.lower() == '.mov':
playblastFormat = 'qt'
elif ext.lower() == '.avi':
playblastFormat = 'avi'
compression = None
if isinstance(frameRange, int):
frameRange = FrameRange([frameRange, frameRange])
if not frameRange:
frameRange = self._scene.animationRange()
# TODO: Make generating movies not require setting frame padding to 1
padding = 1
if not resolution:
resolution = self._scene.renderSize()
# TODO: Add support for these arguments
if slate != None:
# Note: this is probably how we can handle slate
#cmds.headsUpDisplay( 'blurBurnin', section=8, block=0, blockAlignment='right', dw=50, label='This is my burnin')
cross3d.logger.debug('slate is not implemented in Maya')
if pathFormat != r'{basePath}\{fileName}.{frame}.{ext}':
cross3d.logger.debug('pathFormat is not implemented in Maya')
# Prepare to detect if the playblast was canceled
formatter = '{fileName}.{frame:0%i}{ext}' % padding
lastFrameFileName = formatter.format(fileName=fileName, frame=frameRange[1], ext=ext)
try:
lastFrameStartTime = os.path.getmtime(lastFrameFileName)
except os.error:
lastFrameStartTime = 0
# to properly generate a playblast
# pass the width/height to the playblast command
# set the camera displayOptions
# set overscan to 1.0 and lock it
# uncheck all options
# set camera\Film Back
# Fit Resolution Gate to overscan
# set proper film aspect ratio?
# set the render resolution?
# MCH 10/16/14 NOTE: Info on parsing playblast Display Menu if we decide to add support for that later
#--------------------------------------------------------------------------------
#for i in cmds.optionVar(list=True):
# if i.startswith('playblastShow'):
# print cmds.optionVar(query=i), i
# # Set the value
# cmds.optionVar( intValue=(i, False)
# # Update the playblast menus
# maya.mel.eval('updatePlayblastPluginMenus()')
#--------------------------------------------------------------------------------
cam = self.camera()
name = cam.path()
overscanLocked = cmds.getAttr("{name}.overscan".format(name=cam.path()), lock=True)
if overscanLocked:
# unlock overscan if it is locked
cmds.setAttr("{name}.overscan".format(name=name), lock=False)
# create a StateLocker object to backup the current values before setting them
from blur3d.lib.statelockerlib import StateLocker
with StateLocker() as stateLocker:
# Currently the state locker isnt the most convienent to use
def setPropertyLocker(obj, key, value):
stateLocker.setMethodArgs(obj, obj.setProperty, partial(obj.property, key), key, value)
# Set FilmBack.FitResolutionGate to Overscan
setPropertyLocker(cam, 'filmFit', 3)
# uncheck Display Film Gate
setPropertyLocker(cam, 'displayFilmGate', 0)
# uncheck Display Resolution
setPropertyLocker(cam, 'displayResolution', 0)
# Set overscan to 1.0
setPropertyLocker(cam, 'overscan', 1.0)
# Store and restore these settings using modelEditor
# The key is the property to query/edit, the value is the value used while playblasting
modelEditorOverrides = {'sel':False}
# Find the current viewport so we can apply the viewport settings
panel = cmds.getPanel(withFocus=True)
# Check for if non-viewport panel's are active
if not panel in cmds.getPanel(type='modelPanel'):
panel = 'modelPanel4'
if geometryOnly:
modelEditorOverrides['nurbsSurfaces'] = True
modelEditorOverrides['polymeshes'] = True
modelEditorOverrides['subdivSurfaces'] = True
# HACK: This records the viewport show options, sets them to playblast options, then
# restores them
# TODO: Make this load the settings from the playblast overrides
attrs = ['nurbsCurves', 'nurbsSurfaces', 'cv', 'hulls', 'polymeshes',
'subdivSurfaces', 'planes', 'lights', 'cameras', 'imagePlane', 'joints',
'ikHandles', 'dynamics', 'deformers', 'fluids', 'hairSystems', 'follicles',
'nCloths', 'nParticles', 'nRigids', 'dynamicConstraints', 'locators',
'dimensions', 'pivots', 'handles', 'textures', 'strokes', 'motionTrails',
'pluginShapes', 'clipGhosts', 'greasePencils', 'manipulators', 'grid', 'hud']
# Disable display of all of these options as long as modelEditorOverrides doesnt
# already contain a setting key
updateDict = dict([(attr, False) for attr in attrs if attr not in modelEditorOverrides])
modelEditorOverrides.update(updateDict)
# New features in 2015
if cross3d.application.version() > 2014 and 'particleInstancers' not in modelEditorOverrides:
modelEditorOverrides.update(particleInstancers=False)
if effects == True:
modelEditorOverrides.update(displayTextures=True, displayLights='all')
setPropertyLocker(self._scene, 'hardwareRenderingGlobals.ssaoEnable', 1)
setPropertyLocker(self._scene, 'hardwareRenderingGlobals.motionBlurEnable', 1)
setPropertyLocker(self._scene, 'hardwareRenderingGlobals.multiSampleEnable', True)
# TODO: Add Camera.setDeptOfField to cross3d
ntp = cam._nativeTypePointer
stateLocker.setMethod(ntp, ntp.setDepthOfField, ntp.isDepthOfField, True)
if effects == False:
modelEditorOverrides.update(displayTextures=False, displayLights='default')
setPropertyLocker(self._scene, 'hardwareRenderingGlobals.ssaoEnable', 0)
setPropertyLocker(self._scene, 'hardwareRenderingGlobals.motionBlurEnable', 0)
setPropertyLocker(self._scene, 'hardwareRenderingGlobals.multiSampleEnable', False)
# TODO: Add Camera.setDeptOfField to cross3d
ntp = cam._nativeTypePointer
stateLocker.setMethod(ntp, ntp.setDepthOfField, ntp.isDepthOfField, False)
# Store the current values
modelEditorStates = {}
for option, value in modelEditorOverrides.iteritems():
# Store the current value
modelEditorStates[option] = cmds.modelEditor(panel, query=True, **{option: True})
# Set the playblast value
cmds.modelEditor(panel, edit=True, **{option: value})
# # Uncomment this code to update the ui so you can see what options get disabled in the toolbar
# from PyQt4.QtGui import QApplication, QMessageBox
# QApplication.processEvents()
# QMessageBox.question(None, 'Temp', 'update')
# generate playblast
cmds.playblast(
width=resolution.width(),
height=resolution.height(),
startTime=frameRange.start(),
endTime=frameRange.end(),
percent=100,
filename=fileName,
showOrnaments=False,
format=playblastFormat,
compression=compression,
quality=quality,
framePadding=padding,
viewer=False)
# Restore the modelEditor options to their previous value
for option, value in modelEditorStates.iteritems():
cmds.modelEditor(panel, edit=True, **{option: value})
if overscanLocked:
# relock overscan
cmds.setAttr("{name}.overscan".format(name=name), lock=True)
# No way to detect if a avi or quicktime was canceled
if ext.lower() in ('.mov', '.avi'):
return True
# If the capture was not completed we just return False.
try:
lastFrameEndTime = os.path.getmtime(lastFrameFileName)
if not lastFrameStartTime < lastFrameEndTime:
return False
except os.error:
return False
return True
def refresh(self):
self._nativePointer.refresh(False, True)
# register the symbol
cross3d.registerSymbol('SceneViewport', MayaSceneViewport)
|
|
# Copyright (C) 2011-2015 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2011, 2012 Isaku Yamahata <yamahata at valinux co jp>
# Copyright (C) 2012 Simon Horman <horms ad verge net au>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import struct
import sys
from ryu import exception
from ryu.lib import mac
from ryu.lib import type_desc
from ryu.lib.pack_utils import msg_pack_into
from ryu.ofproto import ether
from ryu.ofproto import ofproto_parser
from ryu.ofproto import ofproto_v1_0
from ryu.ofproto import inet
from ryu.ofproto import oxm_fields
import logging
LOG = logging.getLogger('ryu.ofproto.nx_match')
UINT64_MAX = (1 << 64) - 1
UINT32_MAX = (1 << 32) - 1
UINT16_MAX = (1 << 16) - 1
FWW_IN_PORT = 1 << 0
FWW_DL_TYPE = 1 << 4
FWW_NW_PROTO = 1 << 5
# No corresponding OFPFW_* bits
FWW_NW_DSCP = 1 << 1
FWW_NW_ECN = 1 << 2
FWW_ARP_SHA = 1 << 3
FWW_ARP_THA = 1 << 6
FWW_IPV6_LABEL = 1 << 7
FWW_NW_TTL = 1 << 8
FWW_ALL = (1 << 13) - 1
FLOW_NW_FRAG_ANY = 1 << 0
FLOW_NW_FRAG_LATER = 1 << 1
FLOW_NW_FRAG_MASK = FLOW_NW_FRAG_ANY | FLOW_NW_FRAG_LATER
IP_ECN_MASK = 0x03
IP_DSCP_MASK = 0xfc
MF_PACK_STRING_BE64 = '!Q'
MF_PACK_STRING_BE32 = '!I'
MF_PACK_STRING_BE16 = '!H'
MF_PACK_STRING_8 = '!B'
MF_PACK_STRING_MAC = '!6s'
MF_PACK_STRING_IPV6 = '!8H'
_MF_FIELDS = {}
FLOW_N_REGS = 8 # ovs 1.5
class Flow(ofproto_parser.StringifyMixin):
def __init__(self):
self.in_port = 0
self.dl_vlan = 0
self.dl_vlan_pcp = 0
self.dl_src = mac.DONTCARE
self.dl_dst = mac.DONTCARE
self.dl_type = 0
self.tp_dst = 0
self.tp_src = 0
self.nw_tos = 0
self.vlan_tci = 0
self.nw_ttl = 0
self.nw_proto = 0
self.arp_sha = 0
self.arp_tha = 0
self.nw_src = 0
self.nw_dst = 0
self.tun_id = 0
self.arp_spa = 0
self.arp_tpa = 0
self.ipv6_src = []
self.ipv6_dst = []
self.nd_target = []
self.nw_frag = 0
self.regs = [0] * FLOW_N_REGS
self.ipv6_label = 0
self.pkt_mark = 0
self.tcp_flags = 0
class FlowWildcards(ofproto_parser.StringifyMixin):
def __init__(self):
self.dl_src_mask = 0
self.dl_dst_mask = 0
self.tp_src_mask = 0
self.tp_dst_mask = 0
self.nw_src_mask = 0
self.nw_dst_mask = 0
self.tun_id_mask = 0
self.arp_spa_mask = 0
self.arp_tpa_mask = 0
self.vlan_tci_mask = 0
self.ipv6_src_mask = []
self.ipv6_dst_mask = []
self.nd_target_mask = []
self.nw_frag_mask = 0
self.regs_bits = 0
self.regs_mask = [0] * FLOW_N_REGS
self.wildcards = ofproto_v1_0.OFPFW_ALL
self.pkt_mark_mask = 0
self.tcp_flags_mask = 0
class ClsRule(ofproto_parser.StringifyMixin):
"""describe a matching rule for OF 1.0 OFPMatch (and NX).
"""
def __init__(self, **kwargs):
self.wc = FlowWildcards()
self.flow = Flow()
for key, value in kwargs.items():
if key[:3] == 'reg':
register = int(key[3:] or -1)
self.set_reg(register, value)
continue
setter = getattr(self, 'set_' + key, None)
if not setter:
LOG.error('Invalid kwarg specified to ClsRule (%s)', key)
continue
if not isinstance(value, (tuple, list)):
value = (value, )
setter(*value)
def set_in_port(self, port):
self.wc.wildcards &= ~FWW_IN_PORT
self.flow.in_port = port
def set_dl_vlan(self, dl_vlan):
self.wc.wildcards &= ~ofproto_v1_0.OFPFW_DL_VLAN
self.flow.dl_vlan = dl_vlan
def set_dl_vlan_pcp(self, dl_vlan_pcp):
self.wc.wildcards &= ~ofproto_v1_0.OFPFW_DL_VLAN_PCP
self.flow.dl_vlan_pcp = dl_vlan_pcp
def set_dl_dst(self, dl_dst):
self.flow.dl_dst = dl_dst
def set_dl_dst_masked(self, dl_dst, mask):
self.wc.dl_dst_mask = mask
# bit-wise and of the corresponding elements of dl_dst and mask
self.flow.dl_dst = mac.haddr_bitand(dl_dst, mask)
def set_dl_src(self, dl_src):
self.flow.dl_src = dl_src
def set_dl_src_masked(self, dl_src, mask):
self.wc.dl_src_mask = mask
self.flow.dl_src = mac.haddr_bitand(dl_src, mask)
def set_dl_type(self, dl_type):
self.wc.wildcards &= ~FWW_DL_TYPE
self.flow.dl_type = dl_type
def set_dl_tci(self, tci):
self.set_dl_tci_masked(tci, UINT16_MAX)
def set_dl_tci_masked(self, tci, mask):
self.wc.vlan_tci_mask = mask
self.flow.vlan_tci = tci
def set_tp_src(self, tp_src):
self.set_tp_src_masked(tp_src, UINT16_MAX)
def set_tp_src_masked(self, tp_src, mask):
self.wc.tp_src_mask = mask
self.flow.tp_src = tp_src & mask
def set_tp_dst(self, tp_dst):
self.set_tp_dst_masked(tp_dst, UINT16_MAX)
def set_tp_dst_masked(self, tp_dst, mask):
self.wc.tp_dst_mask = mask
self.flow.tp_dst = tp_dst & mask
def set_nw_proto(self, nw_proto):
self.wc.wildcards &= ~FWW_NW_PROTO
self.flow.nw_proto = nw_proto
def set_nw_src(self, nw_src):
self.set_nw_src_masked(nw_src, UINT32_MAX)
def set_nw_src_masked(self, nw_src, mask):
self.flow.nw_src = nw_src
self.wc.nw_src_mask = mask
def set_nw_dst(self, nw_dst):
self.set_nw_dst_masked(nw_dst, UINT32_MAX)
def set_nw_dst_masked(self, nw_dst, mask):
self.flow.nw_dst = nw_dst
self.wc.nw_dst_mask = mask
def set_nw_dscp(self, nw_dscp):
self.wc.wildcards &= ~FWW_NW_DSCP
self.flow.nw_tos &= ~IP_DSCP_MASK
self.flow.nw_tos |= nw_dscp & IP_DSCP_MASK
def set_icmp_type(self, icmp_type):
self.set_tp_src(icmp_type)
def set_icmp_code(self, icmp_code):
self.set_tp_dst(icmp_code)
def set_tun_id(self, tun_id):
self.set_tun_id_masked(tun_id, UINT64_MAX)
def set_tun_id_masked(self, tun_id, mask):
self.wc.tun_id_mask = mask
self.flow.tun_id = tun_id & mask
def set_nw_ecn(self, nw_ecn):
self.wc.wildcards &= ~FWW_NW_ECN
self.flow.nw_tos &= ~IP_ECN_MASK
self.flow.nw_tos |= nw_ecn & IP_ECN_MASK
def set_nw_ttl(self, nw_ttl):
self.wc.wildcards &= ~FWW_NW_TTL
self.flow.nw_ttl = nw_ttl
def set_nw_frag(self, nw_frag):
self.wc.nw_frag_mask |= FLOW_NW_FRAG_MASK
self.flow.nw_frag = nw_frag
def set_nw_frag_masked(self, nw_frag, mask):
self.wc.nw_frag_mask = mask
self.flow.nw_frag = nw_frag & mask
def set_arp_spa(self, spa):
self.set_arp_spa_masked(spa, UINT32_MAX)
def set_arp_spa_masked(self, spa, mask):
self.flow.arp_spa = spa
self.wc.arp_spa_mask = mask
def set_arp_tpa(self, tpa):
self.set_arp_tpa_masked(tpa, UINT32_MAX)
def set_arp_tpa_masked(self, tpa, mask):
self.flow.arp_tpa = tpa
self.wc.arp_tpa_mask = mask
def set_arp_sha(self, sha):
self.wc.wildcards &= ~FWW_ARP_SHA
self.flow.arp_sha = sha
def set_arp_tha(self, tha):
self.wc.wildcards &= ~FWW_ARP_THA
self.flow.arp_tha = tha
def set_icmpv6_type(self, icmp_type):
self.set_tp_src(icmp_type)
def set_icmpv6_code(self, icmp_code):
self.set_tp_dst(icmp_code)
def set_ipv6_label(self, label):
self.wc.wildcards &= ~FWW_IPV6_LABEL
self.flow.ipv6_label = label
def set_ipv6_src_masked(self, src, mask):
self.wc.ipv6_src_mask = mask
self.flow.ipv6_src = [x & y for (x, y) in zip(src, mask)]
def set_ipv6_src(self, src):
self.flow.ipv6_src = src
def set_ipv6_dst_masked(self, dst, mask):
self.wc.ipv6_dst_mask = mask
self.flow.ipv6_dst = [x & y for (x, y) in zip(dst, mask)]
def set_ipv6_dst(self, dst):
self.flow.ipv6_dst = dst
def set_nd_target_masked(self, target, mask):
self.wc.nd_target_mask = mask
self.flow.nd_target = [x & y for (x, y) in
zip(target, mask)]
def set_nd_target(self, target):
self.flow.nd_target = target
def set_reg(self, reg_idx, value):
self.set_reg_masked(reg_idx, value, 0)
def set_reg_masked(self, reg_idx, value, mask):
self.wc.regs_mask[reg_idx] = mask
self.flow.regs[reg_idx] = value
self.wc.regs_bits |= (1 << reg_idx)
def set_pkt_mark_masked(self, pkt_mark, mask):
self.flow.pkt_mark = pkt_mark
self.wc.pkt_mark_mask = mask
def set_tcp_flags(self, tcp_flags, mask):
self.flow.tcp_flags = tcp_flags
self.wc.tcp_flags_mask = mask
def flow_format(self):
# Tunnel ID is only supported by NXM
if self.wc.tun_id_mask != 0:
return ofproto_v1_0.NXFF_NXM
# Masking DL_DST is only supported by NXM
if self.wc.dl_dst_mask:
return ofproto_v1_0.NXFF_NXM
# Masking DL_SRC is only supported by NXM
if self.wc.dl_src_mask:
return ofproto_v1_0.NXFF_NXM
# ECN is only supported by NXM
if not self.wc.wildcards & FWW_NW_ECN:
return ofproto_v1_0.NXFF_NXM
if self.wc.regs_bits > 0:
return ofproto_v1_0.NXFF_NXM
if self.flow.tcp_flags > 0:
return ofproto_v1_0.NXFF_NXM
return ofproto_v1_0.NXFF_OPENFLOW10
def match_tuple(self):
"""return a tuple which can be used as *args for
ofproto_v1_0_parser.OFPMatch.__init__().
see Datapath.send_flow_mod.
"""
assert self.flow_format() == ofproto_v1_0.NXFF_OPENFLOW10
wildcards = ofproto_v1_0.OFPFW_ALL
if not self.wc.wildcards & FWW_IN_PORT:
wildcards &= ~ofproto_v1_0.OFPFW_IN_PORT
if self.flow.dl_src != mac.DONTCARE:
wildcards &= ~ofproto_v1_0.OFPFW_DL_SRC
if self.flow.dl_dst != mac.DONTCARE:
wildcards &= ~ofproto_v1_0.OFPFW_DL_DST
if not self.wc.wildcards & FWW_DL_TYPE:
wildcards &= ~ofproto_v1_0.OFPFW_DL_TYPE
if self.flow.dl_vlan != 0:
wildcards &= ~ofproto_v1_0.OFPFW_DL_VLAN
if self.flow.dl_vlan_pcp != 0:
wildcards &= ~ofproto_v1_0.OFPFW_DL_VLAN_PCP
if self.flow.nw_tos != 0:
wildcards &= ~ofproto_v1_0.OFPFW_NW_TOS
if self.flow.nw_proto != 0:
wildcards &= ~ofproto_v1_0.OFPFW_NW_PROTO
if self.wc.nw_src_mask != 0 and "01" not in bin(self.wc.nw_src_mask):
wildcards &= ~ofproto_v1_0.OFPFW_NW_SRC_MASK
maskbits = (bin(self.wc.nw_src_mask).count("0") - 1)
wildcards |= (maskbits << ofproto_v1_0.OFPFW_NW_SRC_SHIFT)
if self.wc.nw_dst_mask != 0 and "01" not in bin(self.wc.nw_dst_mask):
wildcards &= ~ofproto_v1_0.OFPFW_NW_DST_MASK
maskbits = (bin(self.wc.nw_dst_mask).count("0") - 1)
wildcards |= (maskbits << ofproto_v1_0.OFPFW_NW_DST_SHIFT)
if self.flow.tp_src != 0:
wildcards &= ~ofproto_v1_0.OFPFW_TP_SRC
if self.flow.tp_dst != 0:
wildcards &= ~ofproto_v1_0.OFPFW_TP_DST
return (wildcards, self.flow.in_port, self.flow.dl_src,
self.flow.dl_dst, self.flow.dl_vlan, self.flow.dl_vlan_pcp,
self.flow.dl_type, self.flow.nw_tos & IP_DSCP_MASK,
self.flow.nw_proto, self.flow.nw_src, self.flow.nw_dst,
self.flow.tp_src, self.flow.tp_dst)
def _set_nxm_headers(nxm_headers):
'''Annotate corresponding NXM header'''
def _set_nxm_headers_dec(self):
self.nxm_headers = nxm_headers
return self
return _set_nxm_headers_dec
def _register_make(cls):
'''class decorator to Register mf make'''
assert cls.nxm_headers is not None
assert cls.nxm_headers is not []
for nxm_header in cls.nxm_headers:
assert nxm_header not in _MF_FIELDS
_MF_FIELDS[nxm_header] = cls.make
return cls
def mf_from_nxm_header(nxm_header):
if nxm_header not in _MF_FIELDS:
return None
make = _MF_FIELDS.get(nxm_header)
assert make is not None
return make(nxm_header)
class MFField(object):
_FIELDS_HEADERS = {}
@staticmethod
def register_field_header(headers):
def _register_field_header(cls):
for header in headers:
MFField._FIELDS_HEADERS[header] = cls
return cls
return _register_field_header
def __init__(self, nxm_header, pack_str):
self.nxm_header = nxm_header
self.pack_str = pack_str
self.n_bytes = struct.calcsize(pack_str)
self.n_bits = self.n_bytes * 8
@classmethod
def parser(cls, buf, offset):
(header,) = struct.unpack_from('!I', buf, offset)
cls_ = MFField._FIELDS_HEADERS.get(header)
if cls_:
field = cls_.field_parser(header, buf, offset)
else:
# print 'unknown field type'
raise
field.length = (header & 0xff) + 4
return field
@classmethod
def field_parser(cls, header, buf, offset):
hasmask = (header >> 8) & 1
mask = None
if hasmask:
pack_str = '!' + cls.pack_str[1:] * 2
(value, mask) = struct.unpack_from(pack_str, buf,
offset + 4)
else:
(value,) = struct.unpack_from(cls.pack_str, buf,
offset + 4)
return cls(header, value, mask)
def _put(self, buf, offset, value):
msg_pack_into(self.pack_str, buf, offset, value)
return self.n_bytes
def putw(self, buf, offset, value, mask):
len_ = self._put(buf, offset, value)
return len_ + self._put(buf, offset + len_, mask)
def _is_all_ones(self, value):
return value == (1 << self.n_bits) - 1
def putm(self, buf, offset, value, mask):
if mask == 0:
return 0
elif self._is_all_ones(mask):
return self._put(buf, offset, value)
else:
return self.putw(buf, offset, value, mask)
def _putv6(self, buf, offset, value):
msg_pack_into(self.pack_str, buf, offset, *value)
return self.n_bytes
def putv6(self, buf, offset, value, mask):
len_ = self._putv6(buf, offset, value)
if len(mask):
return len_ + self._putv6(buf, offset + len_, mask)
return len_
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_IN_PORT])
@MFField.register_field_header([ofproto_v1_0.NXM_OF_IN_PORT])
class MFInPort(MFField):
pack_str = MF_PACK_STRING_BE16
def __init__(self, header, value, mask=None):
super(MFInPort, self).__init__(header, MFInPort.pack_str)
self.value = value
@classmethod
def make(cls, header):
return cls(header, MFInPort.pack_str)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.in_port)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_ETH_DST, ofproto_v1_0.NXM_OF_ETH_DST_W])
@MFField.register_field_header([ofproto_v1_0.NXM_OF_ETH_DST,
ofproto_v1_0.NXM_OF_ETH_DST_W])
class MFEthDst(MFField):
pack_str = MF_PACK_STRING_MAC
def __init__(self, header, value, mask=None):
super(MFEthDst, self).__init__(header, MFEthDst.pack_str)
self.value = value
@classmethod
def make(cls, header):
return cls(header, MFEthDst.pack_str)
def put(self, buf, offset, rule):
if rule.wc.dl_dst_mask:
return self.putw(buf, offset, rule.flow.dl_dst,
rule.wc.dl_dst_mask)
else:
return self._put(buf, offset, rule.flow.dl_dst)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_ETH_SRC, ofproto_v1_0.NXM_OF_ETH_SRC_W])
@MFField.register_field_header([ofproto_v1_0.NXM_OF_ETH_SRC,
ofproto_v1_0.NXM_OF_ETH_SRC_W])
class MFEthSrc(MFField):
pack_str = MF_PACK_STRING_MAC
def __init__(self, header, value, mask=None):
super(MFEthSrc, self).__init__(header, MFEthSrc.pack_str)
self.value = value
@classmethod
def make(cls, header):
return cls(header, MFEthSrc.pack_str)
def put(self, buf, offset, rule):
if rule.wc.dl_src_mask:
return self.putw(buf, offset, rule.flow.dl_src,
rule.wc.dl_src_mask)
else:
return self._put(buf, offset, rule.flow.dl_src)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_ETH_TYPE])
@MFField.register_field_header([ofproto_v1_0.NXM_OF_ETH_TYPE])
class MFEthType(MFField):
pack_str = MF_PACK_STRING_BE16
def __init__(self, header, value, mask=None):
super(MFEthType, self).__init__(header, MFEthType.pack_str)
self.value = value
@classmethod
def make(cls, header):
return cls(header, MFEthType.pack_str)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.dl_type)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_VLAN_TCI,
ofproto_v1_0.NXM_OF_VLAN_TCI_W])
@MFField.register_field_header([ofproto_v1_0.NXM_OF_VLAN_TCI,
ofproto_v1_0.NXM_OF_VLAN_TCI_W])
class MFVlan(MFField):
pack_str = MF_PACK_STRING_BE16
def __init__(self, header, value, mask=None):
super(MFVlan, self).__init__(header, MFVlan.pack_str)
self.value = value
@classmethod
def make(cls, header):
return cls(header, MFVlan.pack_str)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.vlan_tci,
rule.wc.vlan_tci_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_IP_TOS])
@MFField.register_field_header([ofproto_v1_0.NXM_OF_IP_TOS])
class MFIPDSCP(MFField):
pack_str = MF_PACK_STRING_8
def __init__(self, header, value, mask=None):
super(MFIPDSCP, self).__init__(header, MFIPDSCP.pack_str)
self.value = value
@classmethod
def make(cls, header):
return cls(header, MFIPDSCP.pack_str)
def put(self, buf, offset, rule):
return self._put(buf, offset,
rule.flow.nw_tos & IP_DSCP_MASK)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_TUN_ID,
ofproto_v1_0.NXM_NX_TUN_ID_W])
@MFField.register_field_header([ofproto_v1_0.NXM_NX_TUN_ID,
ofproto_v1_0.NXM_NX_TUN_ID_W])
class MFTunId(MFField):
pack_str = MF_PACK_STRING_BE64
def __init__(self, header, value, mask=None):
super(MFTunId, self).__init__(header, MFTunId.pack_str)
self.value = value
@classmethod
def make(cls, header):
return cls(header, MFTunId.pack_str)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.tun_id, rule.wc.tun_id_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_IP_SRC, ofproto_v1_0.NXM_OF_IP_SRC_W])
@MFField.register_field_header([ofproto_v1_0.NXM_OF_IP_SRC,
ofproto_v1_0.NXM_OF_IP_SRC_W])
class MFIPSrc(MFField):
pack_str = MF_PACK_STRING_BE32
def __init__(self, header, value, mask=None):
super(MFIPSrc, self).__init__(header, MFIPSrc.pack_str)
self.value = value
self.mask = mask
@classmethod
def make(cls, header):
return cls(header, MFIPSrc.pack_str)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.nw_src, rule.wc.nw_src_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_IP_DST, ofproto_v1_0.NXM_OF_IP_DST_W])
@MFField.register_field_header([ofproto_v1_0.NXM_OF_IP_DST,
ofproto_v1_0.NXM_OF_IP_DST_W])
class MFIPDst(MFField):
pack_str = MF_PACK_STRING_BE32
def __init__(self, header, value, mask=None):
super(MFIPDst, self).__init__(header, MFIPDst.pack_str)
self.value = value
self.mask = mask
@classmethod
def make(cls, header):
return cls(header, MFIPDst.pack_str)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.nw_dst, rule.wc.nw_dst_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_IP_ECN])
class MFIPECN(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_8)
def put(self, buf, offset, rule):
return self._put(buf, offset,
rule.flow.nw_tos & IP_ECN_MASK)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_IP_TTL])
class MFIPTTL(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_8)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.nw_ttl)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_IP_PROTO])
class MFIPProto(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_8)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.nw_proto)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_TCP_SRC, ofproto_v1_0.NXM_OF_TCP_SRC_W,
ofproto_v1_0.NXM_OF_UDP_SRC, ofproto_v1_0.NXM_OF_UDP_SRC_W])
class MFTPSRC(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_BE16)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.tp_src, rule.wc.tp_src_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_TCP_DST, ofproto_v1_0.NXM_OF_TCP_DST_W,
ofproto_v1_0.NXM_OF_UDP_DST, ofproto_v1_0.NXM_OF_UDP_DST_W])
class MFTPDST(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_BE16)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.tp_dst, rule.wc.tp_dst_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_ARP_SPA, ofproto_v1_0.NXM_OF_ARP_SPA_W])
class MFArpSpa(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_BE32)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.arp_spa, rule.wc.arp_spa_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_ARP_TPA, ofproto_v1_0.NXM_OF_ARP_TPA_W])
class MFArpTpa(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_BE32)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.arp_tpa, rule.wc.arp_tpa_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_ARP_SHA])
class MFArpSha(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_MAC)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.arp_sha)
class MFIPV6(object):
pack_str = MF_PACK_STRING_IPV6
@classmethod
def field_parser(cls, header, buf, offset):
hasmask = (header >> 8) & 1
if hasmask:
pack_string = '!' + cls.pack_str[1:] * 2
value = struct.unpack_from(pack_string, buf, offset + 4)
return cls(header, list(value[:8]), list(value[8:]))
else:
value = struct.unpack_from(cls.pack_str, buf, offset + 4)
return cls(header, list(value))
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_IPV6_SRC,
ofproto_v1_0.NXM_NX_IPV6_SRC_W])
@MFField.register_field_header([ofproto_v1_0.NXM_NX_IPV6_SRC,
ofproto_v1_0.NXM_NX_IPV6_SRC_W])
class MFIPV6Src(MFIPV6, MFField):
def __init__(self, header, value, mask=None):
super(MFIPV6Src, self).__init__(header, MFIPV6Src.pack_str)
self.value = value
self.mask = mask
@classmethod
def make(cls, header):
return cls(header, cls.pack_str)
def put(self, buf, offset, rule):
return self.putv6(buf, offset,
rule.flow.ipv6_src,
rule.wc.ipv6_src_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_IPV6_DST,
ofproto_v1_0.NXM_NX_IPV6_DST_W])
@MFField.register_field_header([ofproto_v1_0.NXM_NX_IPV6_DST,
ofproto_v1_0.NXM_NX_IPV6_DST_W])
class MFIPV6Dst(MFIPV6, MFField):
def __init__(self, header, value, mask=None):
super(MFIPV6Dst, self).__init__(header, MFIPV6Dst.pack_str)
self.value = value
self.mask = mask
@classmethod
def make(cls, header):
return cls(header, cls.pack_str)
def put(self, buf, offset, rule):
return self.putv6(buf, offset,
rule.flow.ipv6_dst,
rule.wc.ipv6_dst_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_ND_TARGET,
ofproto_v1_0.NXM_NX_ND_TARGET_W])
class MFNdTarget(MFField):
@classmethod
def make(cls, header):
return cls(header, '!4I')
def put(self, buf, offset, rule):
return self.putv6(buf, offset,
rule.flow.nd_target,
rule.wc.nd_target_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_IP_FRAG,
ofproto_v1_0.NXM_NX_IP_FRAG_W])
class MFIpFrag(MFField):
@classmethod
def make(cls, header):
return cls(header, '!B')
def put(self, buf, offset, rule):
if rule.wc.nw_frag_mask == FLOW_NW_FRAG_MASK:
return self._put(buf, offset, rule.flow.nw_frag)
else:
return self.putw(buf, offset, rule.flow.nw_frag,
rule.wc.nw_frag_mask & FLOW_NW_FRAG_MASK)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_ARP_THA])
class MFArpTha(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_MAC)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.arp_tha)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_ICMP_TYPE])
class MFICMPType(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_8)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.tp_src)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_ICMP_CODE])
class MFICMPCode(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_8)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.tp_dst)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_ICMPV6_TYPE])
class MFICMPV6Type(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_8)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.tp_src)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_ICMPV6_CODE])
class MFICMPV6Code(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_8)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.tp_dst)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_IPV6_LABEL])
class MFICMPV6Label(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_BE32)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.ipv6_label)
@_register_make
@_set_nxm_headers([ofproto_v1_0.nxm_nx_reg(i) for i in range(FLOW_N_REGS)]
+ [ofproto_v1_0.nxm_nx_reg_w(i) for i in range(FLOW_N_REGS)])
class MFRegister(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_BE32)
def put(self, buf, offset, rule):
for i in range(FLOW_N_REGS):
if (ofproto_v1_0.nxm_nx_reg(i) == self.nxm_header or
ofproto_v1_0.nxm_nx_reg_w(i) == self.nxm_header):
if rule.wc.regs_mask[i]:
return self.putm(buf, offset, rule.flow.regs[i],
rule.wc.regs_mask[i])
else:
return self._put(buf, offset, rule.flow.regs[i])
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_PKT_MARK,
ofproto_v1_0.NXM_NX_PKT_MARK_W])
class MFPktMark(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_BE32)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.pkt_mark,
rule.wc.pkt_mark_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_TCP_FLAGS,
ofproto_v1_0.NXM_NX_TCP_FLAGS_W])
class MFTcpFlags(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_BE16)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.tcp_flags,
rule.wc.tcp_flags_mask)
def serialize_nxm_match(rule, buf, offset):
old_offset = offset
if not rule.wc.wildcards & FWW_IN_PORT:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_OF_IN_PORT, rule)
# Ethernet.
if rule.flow.dl_dst != mac.DONTCARE:
if rule.wc.dl_dst_mask:
header = ofproto_v1_0.NXM_OF_ETH_DST_W
else:
header = ofproto_v1_0.NXM_OF_ETH_DST
offset += nxm_put(buf, offset, header, rule)
if rule.flow.dl_src != mac.DONTCARE:
if rule.wc.dl_src_mask:
header = ofproto_v1_0.NXM_OF_ETH_SRC_W
else:
header = ofproto_v1_0.NXM_OF_ETH_SRC
offset += nxm_put(buf, offset, header, rule)
if not rule.wc.wildcards & FWW_DL_TYPE:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_OF_ETH_TYPE, rule)
# 802.1Q
if rule.wc.vlan_tci_mask != 0:
if rule.wc.vlan_tci_mask == UINT16_MAX:
header = ofproto_v1_0.NXM_OF_VLAN_TCI
else:
header = ofproto_v1_0.NXM_OF_VLAN_TCI_W
offset += nxm_put(buf, offset, header, rule)
# L3
if not rule.wc.wildcards & FWW_NW_DSCP:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_OF_IP_TOS, rule)
if not rule.wc.wildcards & FWW_NW_ECN:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_NX_IP_ECN, rule)
if not rule.wc.wildcards & FWW_NW_TTL:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_NX_IP_TTL, rule)
if not rule.wc.wildcards & FWW_NW_PROTO:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_OF_IP_PROTO, rule)
if not rule.wc.wildcards & FWW_NW_PROTO and (rule.flow.nw_proto
== inet.IPPROTO_ICMP):
if rule.wc.tp_src_mask != 0:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_OF_ICMP_TYPE, rule)
if rule.wc.tp_dst_mask != 0:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_OF_ICMP_CODE, rule)
if rule.flow.tp_src != 0:
if rule.flow.nw_proto == 6:
if rule.wc.tp_src_mask == UINT16_MAX:
header = ofproto_v1_0.NXM_OF_TCP_SRC
else:
header = ofproto_v1_0.NXM_OF_TCP_SRC_W
elif rule.flow.nw_proto == 17:
if rule.wc.tp_src_mask == UINT16_MAX:
header = ofproto_v1_0.NXM_OF_UDP_SRC
else:
header = ofproto_v1_0.NXM_OF_UDP_SRC_W
else:
header = 0
if header != 0:
offset += nxm_put(buf, offset, header, rule)
if rule.flow.tp_dst != 0:
if rule.flow.nw_proto == 6:
if rule.wc.tp_dst_mask == UINT16_MAX:
header = ofproto_v1_0.NXM_OF_TCP_DST
else:
header = ofproto_v1_0.NXM_OF_TCP_DST_W
elif rule.flow.nw_proto == 17:
if rule.wc.tp_dst_mask == UINT16_MAX:
header = ofproto_v1_0.NXM_OF_UDP_DST
else:
header = ofproto_v1_0.NXM_OF_UDP_DST_W
else:
header = 0
if header != 0:
offset += nxm_put(buf, offset, header, rule)
if rule.flow.tcp_flags != 0:
# TCP Flags can only be used if the ethernet type is IPv4 or IPv6
if rule.flow.dl_type in (ether.ETH_TYPE_IP, ether.ETH_TYPE_IPV6):
# TCP Flags can only be used if the ip protocol is TCP
if rule.flow.nw_proto == inet.IPPROTO_TCP:
if rule.wc.tcp_flags_mask == UINT16_MAX:
header = ofproto_v1_0.NXM_NX_TCP_FLAGS
else:
header = ofproto_v1_0.NXM_NX_TCP_FLAGS_W
else:
header = 0
else:
header = 0
if header != 0:
offset += nxm_put(buf, offset, header, rule)
# IP Source and Destination
if rule.flow.nw_src != 0:
if rule.wc.nw_src_mask == UINT32_MAX:
header = ofproto_v1_0.NXM_OF_IP_SRC
else:
header = ofproto_v1_0.NXM_OF_IP_SRC_W
offset += nxm_put(buf, offset, header, rule)
if rule.flow.nw_dst != 0:
if rule.wc.nw_dst_mask == UINT32_MAX:
header = ofproto_v1_0.NXM_OF_IP_DST
else:
header = ofproto_v1_0.NXM_OF_IP_DST_W
offset += nxm_put(buf, offset, header, rule)
# IPv6
if not rule.wc.wildcards & FWW_NW_PROTO and (rule.flow.nw_proto
== inet.IPPROTO_ICMPV6):
if rule.wc.tp_src_mask != 0:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_NX_ICMPV6_TYPE,
rule)
if rule.wc.tp_dst_mask != 0:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_NX_ICMPV6_CODE,
rule)
if not rule.wc.wildcards & FWW_IPV6_LABEL:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_NX_IPV6_LABEL, rule)
if len(rule.flow.ipv6_src):
if len(rule.wc.ipv6_src_mask):
header = ofproto_v1_0.NXM_NX_IPV6_SRC_W
else:
header = ofproto_v1_0.NXM_NX_IPV6_SRC
offset += nxm_put(buf, offset, header, rule)
if len(rule.flow.ipv6_dst):
if len(rule.wc.ipv6_dst_mask):
header = ofproto_v1_0.NXM_NX_IPV6_DST_W
else:
header = ofproto_v1_0.NXM_NX_IPV6_DST
offset += nxm_put(buf, offset, header, rule)
if len(rule.flow.nd_target):
if len(rule.wc.nd_target_mask):
header = ofproto_v1_0.NXM_NX_ND_TARGET_W
else:
header = ofproto_v1_0.NXM_NX_ND_TARGET
offset += nxm_put(buf, offset, header, rule)
# ARP
if rule.flow.arp_spa != 0:
if rule.wc.arp_spa_mask == UINT32_MAX:
header = ofproto_v1_0.NXM_OF_ARP_SPA
else:
header = ofproto_v1_0.NXM_OF_ARP_SPA_W
offset += nxm_put(buf, offset, header, rule)
if rule.flow.arp_tpa != 0:
if rule.wc.arp_tpa_mask == UINT32_MAX:
header = ofproto_v1_0.NXM_OF_ARP_TPA
else:
header = ofproto_v1_0.NXM_OF_ARP_TPA_W
offset += nxm_put(buf, offset, header, rule)
if not rule.wc.wildcards & FWW_ARP_SHA:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_NX_ARP_SHA, rule)
if not rule.wc.wildcards & FWW_ARP_THA:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_NX_ARP_THA, rule)
if rule.flow.nw_frag:
if rule.wc.nw_frag_mask == FLOW_NW_FRAG_MASK:
header = ofproto_v1_0.NXM_NX_IP_FRAG
else:
header = ofproto_v1_0.NXM_NX_IP_FRAG_W
offset += nxm_put(buf, offset, header, rule)
if rule.flow.pkt_mark != 0:
if rule.wc.pkt_mark_mask == UINT32_MAX:
header = ofproto_v1_0.NXM_NX_PKT_MARK
else:
header = ofproto_v1_0.NXM_NX_PKT_MARK_W
offset += nxm_put(buf, offset, header, rule)
# Tunnel Id
if rule.wc.tun_id_mask != 0:
if rule.wc.tun_id_mask == UINT64_MAX:
header = ofproto_v1_0.NXM_NX_TUN_ID
else:
header = ofproto_v1_0.NXM_NX_TUN_ID_W
offset += nxm_put(buf, offset, header, rule)
# XXX: Cookie
for i in range(FLOW_N_REGS):
if rule.wc.regs_bits & (1 << i):
if rule.wc.regs_mask[i]:
header = ofproto_v1_0.nxm_nx_reg_w(i)
else:
header = ofproto_v1_0.nxm_nx_reg(i)
offset += nxm_put(buf, offset, header, rule)
# Pad
pad_len = round_up(offset) - offset
msg_pack_into("%dx" % pad_len, buf, offset)
# The returned length, the match_len, does not include the pad
return offset - old_offset
def nxm_put(buf, offset, header, rule):
nxm = NXMatch(header)
len_ = nxm.put_header(buf, offset)
mf = mf_from_nxm_header(nxm.header)
return len_ + mf.put(buf, offset + len_, rule)
def round_up(length):
return (length + 7) // 8 * 8 # Round up to a multiple of 8
class NXMatch(object):
def __init__(self, header):
self.header = header
@classmethod
def parser(cls, buf, offset, match_len):
if match_len < 4:
raise exception.OFPMalformedMessage
(header,) = struct.unpack_from(ofproto_v1_0.NXM_HEADER_PACK_STRING,
buf, offset)
instance = cls(header)
payload_len = instance.length()
if payload_len == 0 or match_len < payload_len + 4:
raise exception.OFPMalformedMessage
return instance
def vendor(self):
return self.header >> 16
def field(self):
return (self.header >> 9) % 0x7f
def type(self):
return (self.header >> 9) % 0x7fffff
def hasmask(self):
return (self.header >> 8) & 1
def length(self):
return self.header & 0xff
def show(self):
return ('%08x (vendor=%x, field=%x, hasmask=%x len=%x)' %
(self.header, self.vendor(), self.field(),
self.hasmask(), self.length()))
def put_header(self, buf, offset):
msg_pack_into(ofproto_v1_0.NXM_HEADER_PACK_STRING,
buf, offset, self.header)
return struct.calcsize(ofproto_v1_0.NXM_HEADER_PACK_STRING)
#
# The followings are implementations for OpenFlow 1.2+
#
sys.modules[__name__].__doc__ = """
The API of this class is the same as ``OFPMatch``.
You can define the flow match by the keyword arguments.
The following arguments are available.
================ =============== ==============================================
Argument Value Description
================ =============== ==============================================
eth_dst_nxm MAC address Ethernet destination address.
eth_src_nxm MAC address Ethernet source address.
eth_type_nxm Integer 16bit Ethernet type. Needed to support Nicira
extensions that require the eth_type to
be set. (i.e. tcp_flags_nxm)
ip_proto_nxm Integer 8bit IP protocol. Needed to support Nicira
extensions that require the ip_proto to
be set. (i.e. tcp_flags_nxm)
tunnel_id_nxm Integer 64bit Tunnel identifier.
tun_ipv4_src IPv4 address Tunnel IPv4 source address.
tun_ipv4_dst IPv4 address Tunnel IPv4 destination address.
pkt_mark Integer 32bit Packet metadata mark.
tcp_flags_nxm Integer 16bit TCP Flags. Requires setting fields:
eth_type_nxm = [0x0800 (IP)|0x86dd (IPv6)] and
ip_proto_nxm = 6 (TCP)
conj_id Integer 32bit Conjunction ID used only with
the conjunction action
ct_state Integer 32bit Conntrack state.
ct_zone Integer 16bit Conntrack zone.
ct_mark Integer 32bit Conntrack mark.
ct_label Integer 128bit Conntrack label.
tun_ipv6_src IPv6 address Tunnel IPv6 source address.
tun_ipv6_dst IPv6 address Tunnel IPv6 destination address.
_dp_hash Integer 32bit Flow hash computed in Datapath.
reg<idx> Integer 32bit Packet register.
<idx> is register number 0-7.
================ =============== ==============================================
.. Note::
Setting the TCP flags via the nicira extensions.
This is required when using OVS version < 2.4.
When using the nxm fields, you need to use any nxm prereq
fields as well or you will receive a OFPBMC_BAD_PREREQ error
Example::
# WILL NOT work
flag = tcp.TCP_ACK
match = parser.OFPMatch(
tcp_flags_nxm=(flag, flag),
ip_proto=inet.IPPROTO_TCP,
eth_type=eth_type)
# Works
flag = tcp.TCP_ACK
match = parser.OFPMatch(
tcp_flags_nxm=(flag, flag),
ip_proto_nxm=inet.IPPROTO_TCP,
eth_type_nxm=eth_type)
"""
oxm_types = [
oxm_fields.NiciraExtended0('eth_dst_nxm', 1, type_desc.MacAddr),
oxm_fields.NiciraExtended0('eth_src_nxm', 2, type_desc.MacAddr),
oxm_fields.NiciraExtended0('eth_type_nxm', 3, type_desc.Int2),
oxm_fields.NiciraExtended0('ip_proto_nxm', 6, type_desc.Int1),
oxm_fields.NiciraExtended1('tunnel_id_nxm', 16, type_desc.Int8),
oxm_fields.NiciraExtended1('tun_ipv4_src', 31, type_desc.IPv4Addr),
oxm_fields.NiciraExtended1('tun_ipv4_dst', 32, type_desc.IPv4Addr),
oxm_fields.NiciraExtended1('pkt_mark', 33, type_desc.Int4),
oxm_fields.NiciraExtended1('tcp_flags_nxm', 34, type_desc.Int2),
oxm_fields.NiciraExtended1('conj_id', 37, type_desc.Int4),
oxm_fields.NiciraExtended1('ct_state', 105, type_desc.Int4),
oxm_fields.NiciraExtended1('ct_zone', 106, type_desc.Int2),
oxm_fields.NiciraExtended1('ct_mark', 107, type_desc.Int4),
oxm_fields.NiciraExtended1('ct_label', 108, type_desc.Int16),
oxm_fields.NiciraExtended1('tun_ipv6_src', 109, type_desc.IPv6Addr),
oxm_fields.NiciraExtended1('tun_ipv6_dst', 110, type_desc.IPv6Addr),
# The following definition is merely for testing 64-bit experimenter OXMs.
# Following Open vSwitch, we use dp_hash for this purpose.
# Prefix the name with '_' to indicate this is not intended to be used
# in wild.
oxm_fields.NiciraExperimenter('_dp_hash', 0, type_desc.Int4),
# Support for matching/setting NX registers 0-7
oxm_fields.NiciraExtended1('reg0', 0, type_desc.Int4),
oxm_fields.NiciraExtended1('reg1', 1, type_desc.Int4),
oxm_fields.NiciraExtended1('reg2', 2, type_desc.Int4),
oxm_fields.NiciraExtended1('reg3', 3, type_desc.Int4),
oxm_fields.NiciraExtended1('reg4', 4, type_desc.Int4),
oxm_fields.NiciraExtended1('reg5', 5, type_desc.Int4),
oxm_fields.NiciraExtended1('reg6', 6, type_desc.Int4),
oxm_fields.NiciraExtended1('reg7', 7, type_desc.Int4),
]
|
|
import mimetypes
import plistlib
import json
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import ListView, DetailView
from django.http import HttpResponseBadRequest, HttpResponse, HttpResponseServerError
from django.conf import settings
from django.template import RequestContext
from django.shortcuts import render_to_response
from courses.utils import ObjectJSONEncoder, dict_by_attr, DAYS, int_list, XMLEncoder
from courses.views import decorators
from courses import models, views
from courses import encoder as encoders
from scheduler.models import SectionProxy, Selection, SectionConflict, SavedSelection
from scheduler.domain import (
ConflictCache, has_schedule, compute_schedules, period_stats
)
DEBUG = getattr(settings, 'DEBUG', False)
# add some mimetypes
mimetypes.init()
mimetypes.add_type('application/x-plist', '.plist')
mimetypes.add_type('application/x-binary-plist', '.bplist')
mimetypes.add_type('application/x-binary-plist', '.biplist')
class DataFormatter(object):
def __init__(self,
encoder=None,
context_processor=None,
default_content_type='application/json'):
self.encoder = encoder or encoders.default_encoder
self.context_processor = context_processor
self.default_content_type = default_content_type
def get_context_type_from_extension(self, ext):
filetype = 'file.' + (ext or '')
return mimetypes.guess_type('file.' + (ext or ''), strict=False)[0]
def convert_data_to_json(self, context):
indent = None
if DEBUG:
indent = 4
return ObjectJSONEncoder(indent=indent).encode(context)
def convert_data_to_xml(self, context):
return XMLEncoder().encode(context, root='api')
def convert_data_to_plist(self, context):
# TODO: handle datetime classes
return plistlib.writePlistToString(context)
def convert_to_content_type(self, data, content_type=None):
return {
'application/json': self.convert_data_to_json,
'application/xml': self.convert_data_to_xml,
'text/xml': self.convert_data_to_xml,
'application/x-plist': self.convert_data_to_plist,
}.get(content_type)(data)
def convert(self, data, content_type):
converted_data = self.convert_to_content_type(data, content_type)
return converted_data
def convert_request(self, settings, request, *args, **kwargs):
context = settings['context']
if callable(self.context_processor):
context = self.context_processor(context)
context = self.encoder.encode(context)
content_type = kwargs.get('ext') or self.default_content_type
if content_type != self.default_content_type:
content_type = self.get_context_type_from_extension(content_type)
data = self.convert(context, content_type)
response = HttpResponse(data, content_type=content_type)
raise decorators.AlternativeResponse(response)
def wrap_request(render_settings, request, *args, **kwargs):
def wrap_context(context):
return {
'success': True,
'result': context,
'version': kwargs.get('version', 4),
}
formatter = DataFormatter(context_processor=wrap_context)
formatter.convert_request(render_settings, request, *args, **kwargs)
render = decorators.Renderer(posthook=wrap_request)
def paginate(query, page=1, per_page=1000):
return query[(page - 1) * per_page:page * per_page]
def get_if_id_present(queryset, id=None):
if id is not None:
return queryset.get()
else:
return queryset
@csrf_exempt
@render()
def raw_data(request, data, version=None, ext=None):
return {'context': data}
@csrf_exempt
@render()
def selections(request, id=None, version=None, ext=None):
if id:
selection = SavedSelection.objects.get(id=id)
return {'context': selection.toJSON()}
if request.method != 'POST':
raise decorators.AlternativeResponse(
HttpResponseBadRequest('{}')
)
section_ids = int_list(request.POST.get('section_ids', '').split(','))
blocked_times = request.POST.get('blocked_times', '').split(',')
selection, created = SavedSelection.objects.get_or_create_by_data(
section_ids=section_ids,
blocked_times=blocked_times,
)
return {'context': selection.toJSON()}
class RequestParams(object):
def __init__(self, request):
self.request = request
def __getitem__(self, key):
return self.request.POST.get(key) or self.request.GET[key]
def get(self, key, default=None):
return self.request.POST.get(key) or self.request.GET.get(key) or default
def getlist(self, key):
return self.request.POST.getlist(key) or self.request.GET.getlist(key)
@csrf_exempt
@render()
def semesters(request, id=None, version=None, ext=None):
params = RequestParams(request)
queryset = models.Semester.visible_objects.optional_filter(
id__in=int_list(params.getlist('id')) or None,
courses__id__in=int_list(params.getlist('course_id')) or None,
departments__id__in=int_list(params.getlist('department_id')) or None,
year=params.get('year'), month=params.get('month'),
id=id,
).distinct()
return {'context': get_if_id_present(queryset, id)}
@csrf_exempt
@render()
def departments(request, id=None, version=None, ext=None):
params = RequestParams(request)
queryset = models.Department.objects.optional_filter(
id__in=int_list(params.getlist('id')) or None,
semesters__id__in=int_list(params.getlist('semester_id')) or None,
courses__id__in=int_list(params.getlist('course_id')) or None,
code__in=params.getlist('code') or None,
id=id,
).distinct()
return {'context': get_if_id_present(queryset, id)}
def try_int(value, default=0):
try:
return int(value)
except (ValueError, TypeError):
return default
@csrf_exempt
@render()
def courses(request, id=None, version=None, ext=None):
params = RequestParams(request)
queryset = models.Course.objects.optional_filter(
semesters__id__in=int_list(params.getlist('semester_id')) or None,
department__code__in=params.getlist('department_code') or None,
department__id__in=int_list(params.getlist('department_id')) or None,
number__in=int_list(params.getlist('number')) or None,
id__in=int_list(params.getlist('id')) or None,
is_comm_intense=try_int(params.get('is_comm_intense'), default=None),
id=id,
).distinct()
search_query = params.get('search')
queryset = queryset.search(search_query)
return {'context': get_if_id_present(queryset, id)}
@csrf_exempt
@render()
def sections(request, id=None, version=None, ext=None):
params = RequestParams(request)
queryset = models.SectionPeriod.objects.optional_filter(
semester__id__in=int_list(params.getlist('semester_id')) or None,
section__course_id__in=int_list(params.getlist('course_id')) or None,
section__id__in=int_list(params.getlist('id')) or None,
section__crn__in=int_list(params.getlist('crn')) or None,
section__id=id,
).select_related('section', 'period')
section_periods = encoders.default_encoder.encode(queryset)
sections = {}
for section_period in section_periods:
section = section_period['section']
section = sections.get(section['id'], section)
section.setdefault('section_times', []).append(section_period)
# to prevent infinite recursion
del section_period['section']
period = section_period['period']
del section_period['period']
section_period.update(period)
sections[section['id']] = section
if id is not None:
return {'context': sections.values()[0]}
return {'context': sections.values()}
@csrf_exempt
@render()
def section_conflicts(request, id=None, version=None, ext=None):
params = RequestParams(request)
conflicts = SectionConflict.objects.by_unless_none(
id=id,
id__in=int_list(params.getlist('id')) or None,
crn__in=int_list(params.getlist('crn')) or None,
)
if params.get('as_crns'):
conflicts = conflicts.values_list('section1__crn', 'section2__crn')
else:
conflicts = conflicts.values_list('section1__id', 'section2__id')
mapping = {}
for s1, s2 in conflicts:
mapping.setdefault(s1, set()).add(s2)
mapping.setdefault(s2, set()).add(s1)
if id is not None:
return {
'context': {
'id': int(id),
'conflicts': list(mapping[int(id)])
}
}
collection = []
ids = set(int_list(params.getlist('id')))
for section_id, conflicts in mapping.items():
if len(ids) > 0 and section_id not in ids:
continue
collection.append({
'id': section_id,
'conflicts': list(conflicts),
})
return {'context': collection}
@csrf_exempt
@render()
def schedules(request, id=None, version=None):
params = RequestParams(request)
selection = None
if id:
selection = Selection.objects.get(id=id)
section_ids = selection.section_ids
else:
section_ids = int_list(params.getlist('section_id'))
created = False
if not selection:
selection, created = Selection.objects.get_or_create(
section_ids=section_ids)
sections = SectionProxy.objects.filter(id__in=section_ids) \
.select_related('course').prefetch_periods()
selected_courses = dict_by_attr(sections, 'course')
conflict_cache = ConflictCache(
SectionConflict.objects.as_dictionary([s.id for s in sections]))
# if check flag given, return only if we have a schedule or not.
if params.get('check'):
return {'context': has_schedule(selected_courses, conflict_cache)}
# check the cache
if not created and selection.api_cache:
return {'context': json.loads(selection.api_cache)}
schedules = compute_schedules(selected_courses, conflict_cache)
periods = set(p for s in sections for p in s.get_periods())
timerange, dow_used = period_stats(periods)
# note: if you change this, caches will have to be updated somehow
context = {
'time_range': timerange,
'schedules': schedules,
'course_ids': list(set(
c.id for c in selected_courses.keys())),
'section_ids': list(set(
s.id
for sections in selected_courses.values()
for s in sections
)),
'days_of_the_week': list(DAYS),
'id': selection.id,
}
selection.api_cache = json.dumps(context)
selection.save()
return {'context': context}
def docs(request, template_name):
semesters = models.Semester.visible_objects.all()
departments = models.Department.objects.all()
return render_to_response(template_name, {
'semester': semesters[0],
'department': departments[0],
}, RequestContext(request))
###########################################################################
class APIMixin(views.AjaxJsonResponseMixin):
json_content_prefix = ''
json_allow_callback = True
default_content_type = 'application/json'
def get_api_version(self):
return self.kwargs.get('version', 3)
def get_default_content_type(self):
return self.default_content_type
def convert_extension_to_content_type(self, ext):
return mimetypes.guess_type('file.' + (ext or ''))[0]
def get_content_type(self):
format = self.kwargs.get('format')
return self.convert_extension_to_content_type(format) or self.get_default_content_type()
def get_api_payload(self):
methods = ['get_object', 'get_queryset']
for method in methods:
methodinstance = getattr(self, method, None)
if methodinstance:
qs = methodinstance()
try:
qs.force_into_json_array = (method == 'get_queryset')
except:
pass
try:
return qs.toJSON()
except AttributeError:
return qs
raise NotImplemented("Please override get_api_payload method.")
def wrap_api_metadata(self, payload=None, status='OK'):
json = {
'status': status,
'payload': payload,
}
# we need to inject debug info in-case we're not using json
return self.inject_debug_info(json)
def convert_context_to_xml(self, context):
return xmlrpclib.dumps((context,))
def convert_context_to_plist(self, context):
return plistlib.writePlistToString(context)
def convert_to_content_type(self, content_type, data):
return {
'application/json': self.convert_context_to_json,
'application/xml': self.convert_context_to_xml,
'text/xml': self.convert_context_to_xml,
'application/x-plist': self.convert_context_to_plist,
}[content_type](data)
def render_to_response(self, context):
def body(*args, **kwargs):
return self.convert_to_content_type(self.get_content_type(), self.wrap_api_metadata(*args, **kwargs))
try:
return HttpResponse(body(self.get_api_payload()), content_type=self.get_content_type())
except HttpResponse as httpresp:
return httpresp.__class__(body(status=str(httpresp)), content_type=self.get_content_type())
except Exception as e:
if DEBUG:
raise
return HttpResponseServerError(body(status='Server Error'), content_type=self.get_content_type())
# override mixin method
def should_filter_by_semester(self):
return self.get_api_version() < 3
class ObjectList(APIMixin, ListView):
def get_queryset(self):
return []
def get_api_payload(self):
return self.kwargs.get('objects', ())
class DepartmentListView(APIMixin, views.DepartmentListView):
pass
class SemesterListView(APIMixin, views.SemesterListView):
pass
class SemesterDetailView(APIMixin, views.SemesterDetailView):
pass
class SearchCoursesListView(APIMixin, views.SearchCoursesListView):
def get_queryset(self):
qs = super(SearchCoursesListView, self).get_queryset(full_select=False)
qs.force_into_json_array = True
return qs
class CourseByDeptListView(APIMixin, views.CourseByDeptListView):
def get_api_payload(self):
queryset = self.get_queryset(prefetch_department=False, full_select=False)
json = self.department.toJSON()
json['courses'] = queryset.toJSON()
return json
class CourseListView(APIMixin, views.SemesterBasedMixin, ListView):
def get_queryset(self):
# new in ver3+: RESTful state. So we have to display all the semester
# ids this course is associated with
if self.get_api_version() < 3 or 'year' in self.kwargs and 'month' in self.kwargs:
year, month = self.get_year_and_month()
return models.Course.objects.by_semester(year, month).select_related('department')
return models.Course.objects.select_related('department').select_semesters()
class CourseDetailView(APIMixin, views.CourseDetailView):
def get_api_payload(self):
obj = self.get_object()
json = obj.toJSON()
json['department'] = obj.department.toJSON()
return json
class SectionListView(APIMixin, views.SemesterBasedMixin, ListView):
def get_queryset(self):
year, month = self.get_year_and_month()
dept, num = self.kwargs.get('code'), self.kwargs.get('number')
course_id = self.kwargs.get('cid')
queryset = models.Section.objects.by_semester(year, month)
if None not in (dept, num):
queryset = queryset.by_course_code(dept, num)
if course_id is not None:
queryset = queryset.by_course_id(course_id)
return queryset.full_select(year, month)
class SectionDetailView(APIMixin, views.SemesterBasedMixin, DetailView):
def get_queryset(self):
year, month = self.get_year_and_month()
dept, num, crn, secnum = self.kwargs.get('code'), self.kwargs.get('number'), self.kwargs.get('crn'), self.kwargs.get('secnum')
course_id = self.kwargs.get('cid')
qs = models.Section.objects.by_semester(year, month).by_course_code(dept, num)
if crn is not None:
qs = qs.filter(crn=crn)
if secnum is not None:
if secnum == 'study-abroad':
secnum = -1
qs = qs.filter(number=secnum)
if course_id is not None:
qs = qs.by_course_id(course_id)
return qs.full_select(year, month)
def get_object(self):
try:
return self.get_queryset()[0]
except IndexError:
raise models.Section.DoesNotExist
|
|
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from oslo_config import cfg
import six
from neutron.agent.common import config as a_cfg
from neutron.agent.linux import ipset_manager
from neutron.agent.linux import iptables_comments as ic
from neutron.agent.linux import iptables_firewall
from neutron.agent import securitygroups_rpc as sg_cfg
from neutron.common import constants
from neutron.tests import base
from neutron.tests.unit.api.v2 import test_base
_uuid = test_base._uuid
#TODO(mangelajo): replace all 'IPv4', 'IPv6' to constants
FAKE_PREFIX = {'IPv4': '10.0.0.0/24',
'IPv6': 'fe80::/48'}
FAKE_IP = {'IPv4': '10.0.0.1',
'IPv6': 'fe80::1'}
#TODO(mangelajo): replace all '*_sgid' strings for the constants
FAKE_SGID = 'fake_sgid'
OTHER_SGID = 'other_sgid'
_IPv6 = constants.IPv6
_IPv4 = constants.IPv4
class BaseIptablesFirewallTestCase(base.BaseTestCase):
def setUp(self):
super(BaseIptablesFirewallTestCase, self).setUp()
cfg.CONF.register_opts(a_cfg.ROOT_HELPER_OPTS, 'AGENT')
cfg.CONF.register_opts(sg_cfg.security_group_opts, 'SECURITYGROUP')
cfg.CONF.set_override('comment_iptables_rules', False, 'AGENT')
self.utils_exec_p = mock.patch(
'neutron.agent.linux.utils.execute')
self.utils_exec = self.utils_exec_p.start()
self.iptables_cls_p = mock.patch(
'neutron.agent.linux.iptables_manager.IptablesManager')
iptables_cls = self.iptables_cls_p.start()
self.iptables_inst = mock.Mock()
self.v4filter_inst = mock.Mock()
self.v6filter_inst = mock.Mock()
self.iptables_inst.ipv4 = {'filter': self.v4filter_inst,
'raw': self.v4filter_inst
}
self.iptables_inst.ipv6 = {'filter': self.v6filter_inst,
'raw': self.v6filter_inst
}
iptables_cls.return_value = self.iptables_inst
self.firewall = iptables_firewall.IptablesFirewallDriver()
self.firewall.iptables = self.iptables_inst
class IptablesFirewallTestCase(BaseIptablesFirewallTestCase):
def _fake_port(self):
return {'device': 'tapfake_dev',
'mac_address': 'ff:ff:ff:ff:ff:ff',
'network_id': 'fake_net',
'fixed_ips': [FAKE_IP['IPv4'],
FAKE_IP['IPv6']]}
def test_prepare_port_filter_with_no_sg(self):
port = self._fake_port()
self.firewall.prepare_port_filter(port)
calls = [mock.call.add_chain('sg-fallback'),
mock.call.add_rule(
'sg-fallback', '-j DROP',
comment=ic.UNMATCH_DROP),
mock.call.remove_chain('sg-chain'),
mock.call.add_chain('sg-chain'),
mock.call.add_chain('ifake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $ifake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'ifake_dev',
'-m state --state INVALID -j DROP',
comment=None),
mock.call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule(
'ifake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_chain('ofake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule('INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.INPUT_TO_SG),
mock.call.add_chain('sfake_dev'),
mock.call.add_rule(
'sfake_dev',
'-s 10.0.0.1 -m mac --mac-source FF:FF:FF:FF:FF:FF '
'-j RETURN',
comment=ic.PAIR_ALLOW),
mock.call.add_rule(
'sfake_dev', '-j DROP',
comment=ic.PAIR_DROP),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule('ofake_dev', '-j $sfake_dev',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 --dport 68 -j DROP',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-j $sg-fallback',
comment=None),
mock.call.add_rule('sg-chain', '-j ACCEPT')]
self.v4filter_inst.assert_has_calls(calls)
def test_filter_ipv4_ingress(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress'}
ingress = mock.call.add_rule('ifake_dev', '-j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev', '-s %s -j RETURN' % prefix, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp'}
ingress = mock.call.add_rule(
'ifake_dev', '-p tcp -m tcp -j RETURN', comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule('ifake_dev',
'-s %s -p tcp -m tcp -j RETURN' % prefix,
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_icmp(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'icmp'}
ingress = mock.call.add_rule('ifake_dev', '-p icmp -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_icmp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'icmp',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev', '-s %s -p icmp -j RETURN' % prefix,
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp_port(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 10}
ingress = mock.call.add_rule('ifake_dev',
'-p tcp -m tcp --dport 10 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp_mport(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100}
ingress = mock.call.add_rule(
'ifake_dev',
'-p tcp -m tcp -m multiport --dports 10:100 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev',
'-s %s -p tcp -m tcp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp'}
ingress = mock.call.add_rule(
'ifake_dev', '-p udp -m udp -j RETURN', comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule('ifake_dev',
'-s %s -p udp -m udp -j RETURN' % prefix,
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp_port(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 10}
ingress = mock.call.add_rule('ifake_dev',
'-p udp -m udp --dport 10 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp_mport(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100}
ingress = mock.call.add_rule(
'ifake_dev',
'-p udp -m udp -m multiport --dports 10:100 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev',
'-s %s -p udp -m udp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress'}
egress = mock.call.add_rule('ofake_dev', '-j RETURN', comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev', '-s %s -j RETURN' % prefix, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp'}
egress = mock.call.add_rule(
'ofake_dev', '-p tcp -m tcp -j RETURN', comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp',
'source_ip_prefix': prefix}
egress = mock.call.add_rule('ofake_dev',
'-s %s -p tcp -m tcp -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_icmp(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'icmp'}
egress = mock.call.add_rule('ofake_dev', '-p icmp -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_icmp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'icmp',
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev', '-s %s -p icmp -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_icmp_type(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'icmp',
'source_port_range_min': 8,
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-s %s -p icmp --icmp-type 8 -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_icmp_type_name(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'icmp',
'source_port_range_min': 'echo-request',
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-s %s -p icmp --icmp-type echo-request -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_icmp_type_code(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'icmp',
'source_port_range_min': 8,
'source_port_range_max': 0,
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-s %s -p icmp --icmp-type 8/0 -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp_port(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 10}
egress = mock.call.add_rule('ofake_dev',
'-p tcp -m tcp --dport 10 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp_mport(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100}
egress = mock.call.add_rule(
'ofake_dev',
'-p tcp -m tcp -m multiport --dports 10:100 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-s %s -p tcp -m tcp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp'}
egress = mock.call.add_rule(
'ofake_dev', '-p udp -m udp -j RETURN', comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp',
'source_ip_prefix': prefix}
egress = mock.call.add_rule('ofake_dev',
'-s %s -p udp -m udp -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp_port(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 10}
egress = mock.call.add_rule('ofake_dev',
'-p udp -m udp --dport 10 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp_mport(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100}
egress = mock.call.add_rule(
'ofake_dev',
'-p udp -m udp -m multiport --dports 10:100 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-s %s -p udp -m udp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress'}
ingress = mock.call.add_rule('ifake_dev', '-j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev', '-s %s -j RETURN' % prefix, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_tcp(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp'}
ingress = mock.call.add_rule(
'ifake_dev', '-p tcp -m tcp -j RETURN', comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_tcp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule('ifake_dev',
'-s %s -p tcp -m tcp -j RETURN' % prefix,
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_tcp_port(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 10}
ingress = mock.call.add_rule('ifake_dev',
'-p tcp -m tcp --dport 10 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_icmp(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'icmp'}
ingress = mock.call.add_rule(
'ifake_dev', '-p icmpv6 -j RETURN', comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_icmp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'icmp',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev', '-s %s -p icmpv6 -j RETURN' % prefix,
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_tcp_mport(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100}
ingress = mock.call.add_rule(
'ifake_dev',
'-p tcp -m tcp -m multiport --dports 10:100 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def _test_filter_ingress_tcp_min_port_0(self, ethertype):
rule = {'ethertype': ethertype,
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 0,
'port_range_max': 100}
ingress = mock.call.add_rule(
'ifake_dev',
'-p tcp -m tcp -m multiport --dports 0:100 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ingress_tcp_min_port_0_for_ipv4(self):
self._test_filter_ingress_tcp_min_port_0('IPv4')
def test_filter_ingress_tcp_min_port_0_for_ipv6(self):
self._test_filter_ingress_tcp_min_port_0('IPv6')
def test_filter_ipv6_ingress_tcp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev',
'-s %s -p tcp -m tcp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp'}
ingress = mock.call.add_rule(
'ifake_dev', '-p udp -m udp -j RETURN', comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule('ifake_dev',
'-s %s -p udp -m udp -j RETURN' % prefix,
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp_port(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 10}
ingress = mock.call.add_rule('ifake_dev',
'-p udp -m udp --dport 10 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp_mport(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100}
ingress = mock.call.add_rule(
'ifake_dev',
'-p udp -m udp -m multiport --dports 10:100 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev',
'-s %s -p udp -m udp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress'}
egress = mock.call.add_rule('ofake_dev', '-j RETURN', comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev', '-s %s -j RETURN' % prefix, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp'}
egress = mock.call.add_rule(
'ofake_dev', '-p tcp -m tcp -j RETURN', comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp',
'source_ip_prefix': prefix}
egress = mock.call.add_rule('ofake_dev',
'-s %s -p tcp -m tcp -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_icmp(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'icmp'}
egress = mock.call.add_rule(
'ofake_dev', '-p icmpv6 -j RETURN', comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_icmp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'icmp',
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev', '-s %s -p icmpv6 -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_icmp_type(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'icmp',
'source_port_range_min': 8,
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-s %s -p icmpv6 --icmpv6-type 8 -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_icmp_type_name(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'icmp',
'source_port_range_min': 'echo-request',
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-s %s -p icmpv6 --icmpv6-type echo-request -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_icmp_type_code(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'icmp',
'source_port_range_min': 8,
'source_port_range_max': 0,
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-s %s -p icmpv6 --icmpv6-type 8/0 -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp_port(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 10}
egress = mock.call.add_rule('ofake_dev',
'-p tcp -m tcp --dport 10 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp_mport(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100}
egress = mock.call.add_rule(
'ofake_dev',
'-p tcp -m tcp -m multiport --dports 10:100 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-s %s -p tcp -m tcp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp'}
egress = mock.call.add_rule(
'ofake_dev', '-p udp -m udp -j RETURN', comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp',
'source_ip_prefix': prefix}
egress = mock.call.add_rule('ofake_dev',
'-s %s -p udp -m udp -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp_port(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 10}
egress = mock.call.add_rule('ofake_dev',
'-p udp -m udp --dport 10 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp_mport(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100}
egress = mock.call.add_rule(
'ofake_dev',
'-p udp -m udp -m multiport --dports 10:100 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-s %s -p udp -m udp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def _test_prepare_port_filter(self,
rule,
ingress_expected_call=None,
egress_expected_call=None):
port = self._fake_port()
ethertype = rule['ethertype']
prefix = FAKE_IP[ethertype]
filter_inst = self.v4filter_inst
dhcp_rule = [mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN',
comment=None)]
if ethertype == 'IPv6':
filter_inst = self.v6filter_inst
dhcp_rule = [mock.call.add_rule('ofake_dev', '-p icmpv6 '
'--icmpv6-type %s -j DROP'
% constants.ICMPV6_TYPE_RA,
comment=None),
mock.call.add_rule('ofake_dev',
'-p icmpv6 -j RETURN',
comment=None),
mock.call.add_rule('ofake_dev', '-p udp -m udp '
'--sport 546 --dport 547 '
'-j RETURN', comment=None)]
sg = [rule]
port['security_group_rules'] = sg
self.firewall.prepare_port_filter(port)
calls = [mock.call.add_chain('sg-fallback'),
mock.call.add_rule(
'sg-fallback',
'-j DROP',
comment=ic.UNMATCH_DROP),
mock.call.remove_chain('sg-chain'),
mock.call.add_chain('sg-chain'),
mock.call.add_chain('ifake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $ifake_dev',
comment=ic.SG_TO_VM_SG)
]
if ethertype == 'IPv6':
for icmp6_type in constants.ICMPV6_ALLOWED_TYPES:
calls.append(
mock.call.add_rule('ifake_dev',
'-p icmpv6 --icmpv6-type %s -j RETURN' %
icmp6_type, comment=None))
calls += [
mock.call.add_rule(
'ifake_dev',
'-m state --state INVALID -j DROP', comment=None
),
mock.call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None
)
]
if ingress_expected_call:
calls.append(ingress_expected_call)
calls += [mock.call.add_rule('ifake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_chain('ofake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule('INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.INPUT_TO_SG),
mock.call.add_chain('sfake_dev'),
mock.call.add_rule(
'sfake_dev',
'-s %s -m mac --mac-source FF:FF:FF:FF:FF:FF -j RETURN'
% prefix,
comment=ic.PAIR_ALLOW),
mock.call.add_rule(
'sfake_dev', '-j DROP',
comment=ic.PAIR_DROP)]
calls += dhcp_rule
calls.append(mock.call.add_rule('ofake_dev', '-j $sfake_dev',
comment=None))
if ethertype == 'IPv4':
calls.append(mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 --dport 68 -j DROP',
comment=None))
if ethertype == 'IPv6':
calls.append(mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 547 --dport 546 -j DROP',
comment=None))
calls += [
mock.call.add_rule(
'ofake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
]
if egress_expected_call:
calls.append(egress_expected_call)
calls += [mock.call.add_rule('ofake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_rule('sg-chain', '-j ACCEPT')]
filter_inst.assert_has_calls(calls)
def _test_remove_conntrack_entries(self, ethertype, protocol,
direction):
port = self._fake_port()
port['zone_id'] = 1
port['security_groups'] = 'fake_sg_id'
self.firewall.filtered_ports[port['device']] = port
self.firewall.updated_rule_sg_ids = set(['fake_sg_id'])
self.firewall.sg_rules['fake_sg_id'] = [
{'direction': direction, 'ethertype': ethertype,
'protocol': protocol}]
self.firewall.filter_defer_apply_on()
self.firewall.sg_rules['fake_sg_id'] = []
self.firewall.filter_defer_apply_off()
cmd = ['conntrack', '-D']
if protocol:
cmd.extend(['-p', protocol])
if ethertype == 'IPv4':
cmd.extend(['-f', 'ipv4'])
if direction == 'ingress':
cmd.extend(['-d', '10.0.0.1'])
else:
cmd.extend(['-s', '10.0.0.1'])
else:
cmd.extend(['-f', 'ipv6'])
if direction == 'ingress':
cmd.extend(['-d', 'fe80::1'])
else:
cmd.extend(['-s', 'fe80::1'])
cmd.extend(['-w', 1])
calls = [
mock.call(cmd, run_as_root=True, check_exit_code=True,
extra_ok_codes=[1])]
self.utils_exec.assert_has_calls(calls)
def test_remove_conntrack_entries_for_delete_rule_ipv4(self):
for direction in ['ingress', 'egress']:
for pro in [None, 'tcp', 'icmp', 'udp']:
self._test_remove_conntrack_entries(
'IPv4', pro, direction)
def test_remove_conntrack_entries_for_delete_rule_ipv6(self):
for direction in ['ingress', 'egress']:
for pro in [None, 'tcp', 'icmp', 'udp']:
self._test_remove_conntrack_entries(
'IPv6', pro, direction)
def test_remove_conntrack_entries_for_port_sec_group_change(self):
port = self._fake_port()
port['zone_id'] = 1
port['security_groups'] = ['fake_sg_id']
self.firewall.filtered_ports[port['device']] = port
self.firewall.updated_sg_members = set(['tapfake_dev'])
self.firewall.filter_defer_apply_on()
new_port = copy.deepcopy(port)
new_port['security_groups'] = ['fake_sg_id2']
self.firewall.filtered_ports[port['device']] = new_port
self.firewall.filter_defer_apply_off()
calls = [
mock.call(['conntrack', '-D', '-f', 'ipv4', '-d', '10.0.0.1',
'-w', 1],
run_as_root=True, check_exit_code=True,
extra_ok_codes=[1]),
mock.call(['conntrack', '-D', '-f', 'ipv6', '-d', 'fe80::1',
'-w', 1],
run_as_root=True, check_exit_code=True,
extra_ok_codes=[1])]
self.utils_exec.assert_has_calls(calls)
def test_update_delete_port_filter(self):
port = self._fake_port()
port['security_group_rules'] = [{'ethertype': 'IPv4',
'direction': 'ingress'}]
self.firewall.prepare_port_filter(port)
port['security_group_rules'] = [{'ethertype': 'IPv4',
'direction': 'egress'}]
self.firewall.update_port_filter(port)
self.firewall.update_port_filter({'device': 'no-exist-device'})
self.firewall.remove_port_filter(port)
self.firewall.remove_port_filter({'device': 'no-exist-device'})
calls = [mock.call.add_chain('sg-fallback'),
mock.call.add_rule(
'sg-fallback',
'-j DROP',
comment=ic.UNMATCH_DROP),
mock.call.remove_chain('sg-chain'),
mock.call.add_chain('sg-chain'),
mock.call.add_chain('ifake_dev'),
mock.call.add_rule(
'FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged -j $sg-chain',
comment=ic.VM_INT_SG),
mock.call.add_rule(
'sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged -j $ifake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'ifake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule('ifake_dev', '-j RETURN',
comment=None),
mock.call.add_rule(
'ifake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_chain('ofake_dev'),
mock.call.add_rule(
'FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $sg-chain',
comment=ic.VM_INT_SG),
mock.call.add_rule(
'sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.INPUT_TO_SG),
mock.call.add_chain('sfake_dev'),
mock.call.add_rule(
'sfake_dev',
'-s 10.0.0.1 -m mac --mac-source FF:FF:FF:FF:FF:FF '
'-j RETURN',
comment=ic.PAIR_ALLOW),
mock.call.add_rule(
'sfake_dev', '-j DROP',
comment=ic.PAIR_DROP),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule('ofake_dev', '-j $sfake_dev',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 --dport 68 -j DROP',
comment=None),
mock.call.add_rule(
'ofake_dev', '-m state --state INVALID -j DROP',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_rule('sg-chain', '-j ACCEPT'),
mock.call.remove_chain('ifake_dev'),
mock.call.remove_chain('ofake_dev'),
mock.call.remove_chain('sfake_dev'),
mock.call.remove_chain('sg-chain'),
mock.call.add_chain('sg-chain'),
mock.call.add_chain('ifake_dev'),
mock.call.add_rule(
'FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged -j $sg-chain',
comment=ic.VM_INT_SG),
mock.call.add_rule(
'sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged -j $ifake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'ifake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule(
'ifake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_chain('ofake_dev'),
mock.call.add_rule(
'FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $sg-chain',
comment=ic.VM_INT_SG),
mock.call.add_rule(
'sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.INPUT_TO_SG),
mock.call.add_chain('sfake_dev'),
mock.call.add_rule(
'sfake_dev',
'-s 10.0.0.1 -m mac --mac-source FF:FF:FF:FF:FF:FF '
'-j RETURN',
comment=ic.PAIR_ALLOW),
mock.call.add_rule(
'sfake_dev', '-j DROP',
comment=ic.PAIR_DROP),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule('ofake_dev', '-j $sfake_dev',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 --dport 68 -j DROP',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule('ofake_dev', '-j RETURN',
comment=None),
mock.call.add_rule('ofake_dev',
'-j $sg-fallback',
comment=None),
mock.call.add_rule('sg-chain', '-j ACCEPT'),
mock.call.remove_chain('ifake_dev'),
mock.call.remove_chain('ofake_dev'),
mock.call.remove_chain('sfake_dev'),
mock.call.remove_chain('sg-chain'),
mock.call.add_chain('sg-chain')]
self.v4filter_inst.assert_has_calls(calls)
def test_remove_unknown_port(self):
port = self._fake_port()
self.firewall.remove_port_filter(port)
# checking no exception occures
self.assertFalse(self.v4filter_inst.called)
def test_defer_apply(self):
with self.firewall.defer_apply():
pass
self.iptables_inst.assert_has_calls([mock.call.defer_apply_on(),
mock.call.defer_apply_off()])
def test_filter_defer_with_exception(self):
try:
with self.firewall.defer_apply():
raise Exception("same exception")
except Exception:
pass
self.iptables_inst.assert_has_calls([mock.call.defer_apply_on(),
mock.call.defer_apply_off()])
def _mock_chain_applies(self):
class CopyingMock(mock.MagicMock):
"""Copies arguments so mutable arguments can be asserted on.
Copied verbatim from unittest.mock documentation.
"""
def __call__(self, *args, **kwargs):
args = copy.deepcopy(args)
kwargs = copy.deepcopy(kwargs)
return super(CopyingMock, self).__call__(*args, **kwargs)
# Need to use CopyingMock because _{setup,remove}_chains_apply are
# usually called with that's modified between calls (i.e.,
# self.firewall.filtered_ports).
chain_applies = CopyingMock()
self.firewall._setup_chains_apply = chain_applies.setup
self.firewall._remove_chains_apply = chain_applies.remove
return chain_applies
def test_mock_chain_applies(self):
chain_applies = self._mock_chain_applies()
port_prepare = {'device': 'd1', 'mac_address': 'prepare'}
port_update = {'device': 'd1', 'mac_address': 'update'}
self.firewall.prepare_port_filter(port_prepare)
self.firewall.update_port_filter(port_update)
self.firewall.remove_port_filter(port_update)
chain_applies.assert_has_calls([mock.call.remove({}, {}),
mock.call.setup({'d1': port_prepare}, {}),
mock.call.remove({'d1': port_prepare}, {}),
mock.call.setup({'d1': port_update}, {}),
mock.call.remove({'d1': port_update}, {}),
mock.call.setup({}, {})])
def test_defer_chain_apply_need_pre_defer_copy(self):
chain_applies = self._mock_chain_applies()
port = self._fake_port()
device2port = {port['device']: port}
self.firewall.prepare_port_filter(port)
with self.firewall.defer_apply():
self.firewall.remove_port_filter(port)
chain_applies.assert_has_calls([mock.call.remove({}, {}),
mock.call.setup(device2port, {}),
mock.call.remove(device2port, {}),
mock.call.setup({}, {})])
def test_defer_chain_apply_coalesce_simple(self):
chain_applies = self._mock_chain_applies()
port = self._fake_port()
with self.firewall.defer_apply():
self.firewall.prepare_port_filter(port)
self.firewall.update_port_filter(port)
self.firewall.remove_port_filter(port)
chain_applies.assert_has_calls([mock.call.remove({}, {}),
mock.call.setup({}, {})])
def test_defer_chain_apply_coalesce_multiple_ports(self):
chain_applies = self._mock_chain_applies()
port1 = {'device': 'd1', 'mac_address': 'mac1', 'network_id': 'net1'}
port2 = {'device': 'd2', 'mac_address': 'mac2', 'network_id': 'net1'}
device2port = {'d1': port1, 'd2': port2}
with self.firewall.defer_apply():
self.firewall.prepare_port_filter(port1)
self.firewall.prepare_port_filter(port2)
chain_applies.assert_has_calls([mock.call.remove({}, {}),
mock.call.setup(device2port, {})])
def test_ip_spoofing_filter_with_multiple_ips(self):
port = {'device': 'tapfake_dev',
'mac_address': 'ff:ff:ff:ff:ff:ff',
'network_id': 'fake_net',
'fixed_ips': ['10.0.0.1', 'fe80::1', '10.0.0.2']}
self.firewall.prepare_port_filter(port)
calls = [mock.call.add_chain('sg-fallback'),
mock.call.add_rule(
'sg-fallback', '-j DROP',
comment=ic.UNMATCH_DROP),
mock.call.remove_chain('sg-chain'),
mock.call.add_chain('sg-chain'),
mock.call.add_chain('ifake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $ifake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'ifake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule('ifake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_chain('ofake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule('INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.INPUT_TO_SG),
mock.call.add_chain('sfake_dev'),
mock.call.add_rule(
'sfake_dev',
'-s 10.0.0.1 -m mac --mac-source FF:FF:FF:FF:FF:FF '
'-j RETURN',
comment=ic.PAIR_ALLOW),
mock.call.add_rule(
'sfake_dev',
'-s 10.0.0.2 -m mac --mac-source FF:FF:FF:FF:FF:FF '
'-j RETURN',
comment=ic.PAIR_ALLOW),
mock.call.add_rule(
'sfake_dev', '-j DROP',
comment=ic.PAIR_DROP),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule('ofake_dev', '-j $sfake_dev',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 --dport 68 -j DROP',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule('ofake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_rule('sg-chain', '-j ACCEPT')]
self.v4filter_inst.assert_has_calls(calls)
def test_ip_spoofing_no_fixed_ips(self):
port = {'device': 'tapfake_dev',
'mac_address': 'ff:ff:ff:ff:ff:ff',
'network_id': 'fake_net',
'fixed_ips': []}
self.firewall.prepare_port_filter(port)
calls = [mock.call.add_chain('sg-fallback'),
mock.call.add_rule(
'sg-fallback', '-j DROP',
comment=ic.UNMATCH_DROP),
mock.call.remove_chain('sg-chain'),
mock.call.add_chain('sg-chain'),
mock.call.add_chain('ifake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $ifake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'ifake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule('ifake_dev', '-j $sg-fallback',
comment=None),
mock.call.add_chain('ofake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule('INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.INPUT_TO_SG),
mock.call.add_chain('sfake_dev'),
mock.call.add_rule(
'sfake_dev',
'-m mac --mac-source FF:FF:FF:FF:FF:FF -j RETURN',
comment=ic.PAIR_ALLOW),
mock.call.add_rule(
'sfake_dev', '-j DROP',
comment=ic.PAIR_DROP),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule('ofake_dev', '-j $sfake_dev',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 --dport 68 -j DROP',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state INVALID -j DROP',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule('ofake_dev', '-j $sg-fallback',
comment=None),
mock.call.add_rule('sg-chain', '-j ACCEPT')]
self.v4filter_inst.assert_has_calls(calls)
class IptablesFirewallEnhancedIpsetTestCase(BaseIptablesFirewallTestCase):
def setUp(self):
super(IptablesFirewallEnhancedIpsetTestCase, self).setUp()
self.firewall.ipset = mock.Mock()
self.firewall.ipset.get_name.side_effect = (
ipset_manager.IpsetManager.get_name)
self.firewall.ipset.set_exists.return_value = True
def _fake_port(self, sg_id=FAKE_SGID):
return {'device': 'tapfake_dev',
'mac_address': 'ff:ff:ff:ff:ff:ff',
'network_id': 'fake_net',
'fixed_ips': [FAKE_IP['IPv4'],
FAKE_IP['IPv6']],
'security_groups': [sg_id],
'security_group_source_groups': [sg_id]}
def _fake_sg_rule_for_ethertype(self, ethertype, remote_group):
return {'direction': 'ingress', 'remote_group_id': remote_group,
'ethertype': ethertype}
def _fake_sg_rules(self, sg_id=FAKE_SGID, remote_groups=None):
remote_groups = remote_groups or {_IPv4: [FAKE_SGID],
_IPv6: [FAKE_SGID]}
rules = []
for ip_version, remote_group_list in six.iteritems(remote_groups):
for remote_group in remote_group_list:
rules.append(self._fake_sg_rule_for_ethertype(ip_version,
remote_group))
return {sg_id: rules}
def _fake_sg_members(self, sg_ids=None):
return {sg_id: copy.copy(FAKE_IP) for sg_id in (sg_ids or [FAKE_SGID])}
def test_prepare_port_filter_with_new_members(self):
self.firewall.sg_rules = self._fake_sg_rules()
self.firewall.sg_members = {'fake_sgid': {
'IPv4': ['10.0.0.1', '10.0.0.2'], 'IPv6': ['fe80::1']}}
self.firewall.pre_sg_members = {}
port = self._fake_port()
self.firewall.prepare_port_filter(port)
calls = [
mock.call.set_members('fake_sgid', 'IPv4',
['10.0.0.1', '10.0.0.2']),
mock.call.set_members('fake_sgid', 'IPv6',
['fe80::1'])
]
self.firewall.ipset.assert_has_calls(calls, any_order=True)
def _setup_fake_firewall_members_and_rules(self, firewall):
firewall.sg_rules = self._fake_sg_rules()
firewall.pre_sg_rules = self._fake_sg_rules()
firewall.sg_members = self._fake_sg_members()
firewall.pre_sg_members = firewall.sg_members
def _prepare_rules_and_members_for_removal(self):
self._setup_fake_firewall_members_and_rules(self.firewall)
self.firewall.pre_sg_members[OTHER_SGID] = (
self.firewall.pre_sg_members[FAKE_SGID])
def test_determine_remote_sgs_to_remove(self):
self._prepare_rules_and_members_for_removal()
ports = [self._fake_port()]
self.assertEqual(
{_IPv4: set([OTHER_SGID]), _IPv6: set([OTHER_SGID])},
self.firewall._determine_remote_sgs_to_remove(ports))
def test_determine_remote_sgs_to_remove_ipv6_unreferenced(self):
self._prepare_rules_and_members_for_removal()
ports = [self._fake_port()]
self.firewall.sg_rules = self._fake_sg_rules(
remote_groups={_IPv4: [OTHER_SGID, FAKE_SGID],
_IPv6: [FAKE_SGID]})
self.assertEqual(
{_IPv4: set(), _IPv6: set([OTHER_SGID])},
self.firewall._determine_remote_sgs_to_remove(ports))
def test_get_remote_sg_ids_by_ipversion(self):
self.firewall.sg_rules = self._fake_sg_rules(
remote_groups={_IPv4: [FAKE_SGID], _IPv6: [OTHER_SGID]})
ports = [self._fake_port()]
self.assertEqual(
{_IPv4: set([FAKE_SGID]), _IPv6: set([OTHER_SGID])},
self.firewall._get_remote_sg_ids_sets_by_ipversion(ports))
def test_get_remote_sg_ids(self):
self.firewall.sg_rules = self._fake_sg_rules(
remote_groups={_IPv4: [FAKE_SGID, FAKE_SGID, FAKE_SGID],
_IPv6: [OTHER_SGID, OTHER_SGID, OTHER_SGID]})
port = self._fake_port()
self.assertEqual(
{_IPv4: set([FAKE_SGID]), _IPv6: set([OTHER_SGID])},
self.firewall._get_remote_sg_ids(port))
def test_determine_sg_rules_to_remove(self):
self.firewall.pre_sg_rules = self._fake_sg_rules(sg_id=OTHER_SGID)
ports = [self._fake_port()]
self.assertEqual(set([OTHER_SGID]),
self.firewall._determine_sg_rules_to_remove(ports))
def test_get_sg_ids_set_for_ports(self):
sg_ids = set([FAKE_SGID, OTHER_SGID])
ports = [self._fake_port(sg_id) for sg_id in sg_ids]
self.assertEqual(sg_ids,
self.firewall._get_sg_ids_set_for_ports(ports))
def test_remove_sg_members(self):
self.firewall.sg_members = self._fake_sg_members([FAKE_SGID,
OTHER_SGID])
remote_sgs_to_remove = {_IPv4: set([FAKE_SGID]),
_IPv6: set([FAKE_SGID, OTHER_SGID])}
self.firewall._remove_sg_members(remote_sgs_to_remove)
self.assertIn(OTHER_SGID, self.firewall.sg_members)
self.assertNotIn(FAKE_SGID, self.firewall.sg_members)
def test_remove_unused_security_group_info_clears_unused_rules(self):
self._setup_fake_firewall_members_and_rules(self.firewall)
self.firewall.prepare_port_filter(self._fake_port())
# create another SG which won't be referenced by any filtered port
fake_sg_rules = self.firewall.sg_rules['fake_sgid']
self.firewall.pre_sg_rules[OTHER_SGID] = fake_sg_rules
self.firewall.sg_rules[OTHER_SGID] = fake_sg_rules
# call the cleanup function, and check the unused sg_rules are out
self.firewall._remove_unused_security_group_info()
self.assertNotIn(OTHER_SGID, self.firewall.sg_rules)
def test_remove_unused_security_group_info(self):
self.firewall.sg_members = {OTHER_SGID: {_IPv4: [], _IPv6: []}}
self.firewall.pre_sg_members = self.firewall.sg_members
self.firewall.sg_rules = self._fake_sg_rules(
remote_groups={_IPv4: [FAKE_SGID], _IPv6: [FAKE_SGID]})
self.firewall.pre_sg_rules = self.firewall.sg_rules
port = self._fake_port()
self.firewall.filtered_ports['tapfake_dev'] = port
self.firewall._remove_unused_security_group_info()
self.assertNotIn(OTHER_SGID, self.firewall.sg_members)
def test_not_remove_used_security_group_info(self):
self.firewall.sg_members = {OTHER_SGID: {_IPv4: [], _IPv6: []}}
self.firewall.pre_sg_members = self.firewall.sg_members
self.firewall.sg_rules = self._fake_sg_rules(
remote_groups={_IPv4: [OTHER_SGID], _IPv6: [OTHER_SGID]})
self.firewall.pre_sg_rules = self.firewall.sg_rules
port = self._fake_port()
self.firewall.filtered_ports['tapfake_dev'] = port
self.firewall._remove_unused_security_group_info()
self.assertIn(OTHER_SGID, self.firewall.sg_members)
def test_remove_all_unused_info(self):
self._setup_fake_firewall_members_and_rules(self.firewall)
self.firewall.filtered_ports = {}
self.firewall._remove_unused_security_group_info()
self.assertFalse(self.firewall.sg_members)
self.assertFalse(self.firewall.sg_rules)
def test_prepare_port_filter_with_deleted_member(self):
self.firewall.sg_rules = self._fake_sg_rules()
self.firewall.pre_sg_rules = self._fake_sg_rules()
self.firewall.sg_members = {'fake_sgid': {
'IPv4': [
'10.0.0.1', '10.0.0.3', '10.0.0.4', '10.0.0.5'],
'IPv6': ['fe80::1']}}
self.firewall.pre_sg_members = {'fake_sgid': {
'IPv4': ['10.0.0.2'],
'IPv6': ['fe80::1']}}
self.firewall.prepare_port_filter(self._fake_port())
calls = [
mock.call.set_members('fake_sgid', 'IPv4',
['10.0.0.1', '10.0.0.3', '10.0.0.4',
'10.0.0.5']),
mock.call.set_members('fake_sgid', 'IPv6', ['fe80::1'])]
self.firewall.ipset.assert_has_calls(calls, True)
def test_remove_port_filter_with_destroy_ipset_chain(self):
self.firewall.sg_rules = self._fake_sg_rules()
port = self._fake_port()
self.firewall.sg_members = {'fake_sgid': {
'IPv4': ['10.0.0.1'],
'IPv6': ['fe80::1']}}
self.firewall.pre_sg_members = {'fake_sgid': {
'IPv4': [],
'IPv6': []}}
self.firewall.prepare_port_filter(port)
self.firewall.filter_defer_apply_on()
self.firewall.sg_members = {'fake_sgid': {
'IPv4': [],
'IPv6': []}}
self.firewall.pre_sg_members = {'fake_sgid': {
'IPv4': ['10.0.0.1'],
'IPv6': ['fe80::1']}}
self.firewall.remove_port_filter(port)
self.firewall.filter_defer_apply_off()
calls = [
mock.call.set_members('fake_sgid', 'IPv4', ['10.0.0.1']),
mock.call.set_members('fake_sgid', 'IPv6', ['fe80::1']),
mock.call.get_name('fake_sgid', 'IPv4'),
mock.call.set_exists('fake_sgid', 'IPv4'),
mock.call.get_name('fake_sgid', 'IPv6'),
mock.call.set_exists('fake_sgid', 'IPv6'),
mock.call.destroy('fake_sgid', 'IPv4'),
mock.call.destroy('fake_sgid', 'IPv6')]
self.firewall.ipset.assert_has_calls(calls, any_order=True)
def test_prepare_port_filter_with_sg_no_member(self):
self.firewall.sg_rules = self._fake_sg_rules()
self.firewall.sg_rules[FAKE_SGID].append(
{'direction': 'ingress', 'remote_group_id': 'fake_sgid2',
'ethertype': 'IPv4'})
self.firewall.sg_rules.update()
self.firewall.sg_members['fake_sgid'] = {
'IPv4': ['10.0.0.1', '10.0.0.2'], 'IPv6': ['fe80::1']}
self.firewall.pre_sg_members = {}
port = self._fake_port()
port['security_group_source_groups'].append('fake_sgid2')
self.firewall.prepare_port_filter(port)
calls = [mock.call.set_members('fake_sgid', 'IPv4',
['10.0.0.1', '10.0.0.2']),
mock.call.set_members('fake_sgid', 'IPv6', ['fe80::1'])]
self.firewall.ipset.assert_has_calls(calls, any_order=True)
def test_filter_defer_apply_off_with_sg_only_ipv6_rule(self):
self.firewall.sg_rules = self._fake_sg_rules()
self.firewall.pre_sg_rules = self._fake_sg_rules()
self.firewall.ipset_chains = {'IPv4fake_sgid': ['10.0.0.2'],
'IPv6fake_sgid': ['fe80::1']}
self.firewall.sg_members = {'fake_sgid': {
'IPv4': ['10.0.0.2'],
'IPv6': ['fe80::1']}}
self.firewall.pre_sg_members = {'fake_sgid': {
'IPv4': ['10.0.0.2'],
'IPv6': ['fe80::1']}}
self.firewall.sg_rules['fake_sgid'].remove(
{'direction': 'ingress', 'remote_group_id': 'fake_sgid',
'ethertype': 'IPv4'})
self.firewall.sg_rules.update()
self.firewall._defer_apply = True
port = self._fake_port()
self.firewall.filtered_ports['tapfake_dev'] = port
self.firewall._pre_defer_filtered_ports = {}
self.firewall._pre_defer_unfiltered_ports = {}
self.firewall.filter_defer_apply_off()
calls = [mock.call.destroy('fake_sgid', 'IPv4')]
self.firewall.ipset.assert_has_calls(calls, True)
def test_sg_rule_expansion_with_remote_ips(self):
other_ips = ['10.0.0.2', '10.0.0.3', '10.0.0.4']
self.firewall.sg_members = {'fake_sgid': {
'IPv4': [FAKE_IP['IPv4']] + other_ips,
'IPv6': [FAKE_IP['IPv6']]}}
port = self._fake_port()
rule = self._fake_sg_rule_for_ethertype(_IPv4, FAKE_SGID)
rules = self.firewall._expand_sg_rule_with_remote_ips(
rule, port, 'ingress')
self.assertEqual(list(rules),
[dict(list(rule.items()) +
[('source_ip_prefix', '%s/32' % ip)])
for ip in other_ips])
def test_build_ipv4v6_mac_ip_list(self):
mac_oth = 'ffff-ffff-ffff'
mac_unix = 'ff:ff:ff:ff:ff:ff'
ipv4 = FAKE_IP['IPv4']
ipv6 = FAKE_IP['IPv6']
fake_ipv4_pair = []
fake_ipv4_pair.append((mac_unix, ipv4))
fake_ipv6_pair = []
fake_ipv6_pair.append((mac_unix, ipv6))
mac_ipv4_pairs = []
mac_ipv6_pairs = []
self.firewall._build_ipv4v6_mac_ip_list(mac_oth, ipv4,
mac_ipv4_pairs, mac_ipv6_pairs)
self.assertEqual(fake_ipv4_pair, mac_ipv4_pairs)
self.firewall._build_ipv4v6_mac_ip_list(mac_oth, ipv6,
mac_ipv4_pairs, mac_ipv6_pairs)
self.assertEqual(fake_ipv6_pair, mac_ipv6_pairs)
def test_update_ipset_members(self):
self.firewall.sg_members[FAKE_SGID][_IPv4] = []
self.firewall.sg_members[FAKE_SGID][_IPv6] = []
sg_info = {constants.IPv4: [FAKE_SGID]}
self.firewall._update_ipset_members(sg_info)
calls = [mock.call.set_members(FAKE_SGID, constants.IPv4, [])]
self.firewall.ipset.assert_has_calls(calls)
|
|
""" Handle movies """
from abc import ABCMeta, abstractmethod
import logging
from urlparse import urlparse
from resources.lib.util import settings
from resources.lib.objects.kodi_movies import KodiMovies
from resources.lib.date_utils import DateUtils
log = logging.getLogger("DINGS.db") # pylint: disable=invalid-name
class Movies(object):
""" Class to handle sync to kodi-db """
__metaclass__ = ABCMeta
def __init__(self, cursor):
self.kodi_db = KodiMovies(cursor)
self.date_utils = DateUtils()
@staticmethod
def get_name():
''' Gets the name that describes tha class '''
return 'AbstractMovieUpdater'
def update(self, movie):
''' Update or add a movie '''
original_movie = self._get_move_from_release_or_imdb(
movie.get('id'),
movie.get('imdb')
)
if original_movie is None:
return self._add(movie)
return self._update(movie, original_movie)
def _map_existing_data(self, movie_entity, excisting_data):
movieid, fileid, imdb_uid, pathid, last_update, content_version, uniqueid = excisting_data
movie_entity.update({
'movieid': movieid,
'fileid': fileid,
'imdb_uid': imdb_uid,
'pathid': pathid,
'uniqueid': uniqueid
})
return movie_entity, {
'version': content_version,
'last_update': last_update
}
def _update(self, movie, excisting_data):
movie_entity = self._map_move_data(movie.copy())
movie_entity, org_movie = self._map_existing_data(movie_entity, excisting_data)
if not self._should_update(org_movie, movie_entity):
return False
movie_entity['full_path'] = self._get_full_path(movie.get('folder'))
movie_entity['ratingid'] = self.kodi_db.get_ratingid(movie_entity.get('movieid'))
if movie_entity.get('uniqueid') is None:
movie_entity['uniqueid'] = self.kodi_db.add_uniqueid(**self._get_release_unique_id(movie_entity))
if movie_entity.get('imdb_uid') is None:
movie_entity['imdb_uid'] = self.kodi_db.add_uniqueid(**self._get_imdb_unique_id(movie_entity))
self.kodi_db.update_ratings(**movie_entity)
self.kodi_db.update_path(**movie_entity)
self.kodi_db.update_file(**movie_entity)
self.kodi_db.update_movie(**movie_entity)
self._add_or_update_meta(movie_entity)
return True
def _add(self, movie):
movie_entity = self._map_move_data(movie.copy())
movie_entity['movieid'] = self.kodi_db.create_entry()
movie_entity['full_path'] = self._get_full_path(movie_entity.get('folder'))
# add ratings
movie_entity['ratingid'] = self.kodi_db.add_ratings(**movie_entity)
# add imdb unique id for ref
self.kodi_db.add_uniqueid(**self._get_imdb_unique_id(movie_entity))
# add release id to support multiple releases of same movie
movie_entity['uniqueid'] = self.kodi_db.add_uniqueid(**self._get_release_unique_id(movie_entity))
# add path
movie_entity['pathid'] = self.kodi_db.add_path(**movie_entity)
movie_entity['fileid'] = self.kodi_db.add_file(**movie_entity)
# Add the movie
self.kodi_db.add_movie(**movie_entity)
self._add_or_update_meta(movie_entity)
self._add_people(movie_entity)
return True
def _get_full_path(self, folder):
""" Add host, username and password to folderpath """
url_parts = urlparse(settings("endpoint"))
return "%s://%s:%s@%s%s/movies/%s/" % (
url_parts.scheme,
settings("username"),
settings("password"),
url_parts.netloc,
url_parts.path,
folder
)
def _get_move_from_release_or_imdb(self, release_id, imdb_id):
result = self.kodi_db.get_movie_from_release(release_id)
if result is None:
return self.kodi_db.get_movie_from_imdb(imdb_id)
return result
def _add_people(self, movie):
movieid = movie.get('movieid')
people = [{'Name': actor, 'Type': 'Actor'}
for actor in movie.get('actors')]
people.extend([{'Name': writer, 'Type': 'Writer'}
for writer in movie.get('writers')])
people.extend([{'Name': director, 'Type': 'Director'}
for director in movie.get('directors')])
self.kodi_db.add_people(movieid, people, 'movie')
def _add_or_update_meta(self, movie):
movieid = movie.get('movieid')
base_url = 'https://image.tmdb.org/t/p/%s%s'
poster = base_url % ('original', movie.get('poster_path'))
thumb = base_url % ('original', movie.get('poster_path'))
fanart = base_url % ('original', movie.get('backdrop_path'))
self.kodi_db.add_update_art(poster, movieid, 'poster', 'movie')
self.kodi_db.add_update_art(thumb, movieid, 'thumb', 'movie')
self.kodi_db.add_update_art(fanart, movieid, 'fanart', 'movie')
self.kodi_db.add_genres(movieid, movie.get('genres'))
# self.kodi_db.set_streamdetails(**movie)
self._sync_tags(movie)
# self._add_people(movie)
def _get_imdb_unique_id(self, movie):
"""
Retrives imdb unique data from movie for uniqueid insert as a dict
Arguments
movie: dict with movie info
"""
return {
'movieid': movie.get('movieid'),
'value': movie.get('imdb'),
'type': 'imdb'
}
def _get_release_unique_id(self, movie):
"""
Retrives release unique data from movie for uniqueid insert as a dict
Arguments
movie: dict with movie info
"""
return {
'movieid': movie.get('movieid'),
'value': movie.get('id'),
'type': 'release'
}
def _get_ratings_data(self, movie):
return self._pick(movie, ['rating', 'votecount', 'moveid'])
def _map_move_data(self, movie):
last_update = self.date_utils.get_kodi_date_format(movie.get('last_update'))
dateadded = self.date_utils.get_kodi_date_format(movie.get('last_update'))
trailer = "plugin://plugin.video.youtube/?action=play_video&videoid=%s" % movie.get('trailer') \
if movie.get('trailer') else None
base_url = 'https://image.tmdb.org/t/p/%s%s'
poster_preview = base_url % ('w500', movie.get('poster_path'))
poster = base_url % ('original', movie.get('poster_path'))
fanart_preview = base_url % ('w500', movie.get('backdrop_path'))
fanart = base_url % ('original', movie.get('backdrop_path'))
thumb_xml = '<thumb aspect="poster" preview="%s">%s</thumb>' % (poster_preview, poster)
fanart_xml = '''
<fanart>
<thumb preview="%s">%s</thumb>
</fanart>
''' % (fanart_preview, fanart)
movie.update({
'shortplot': None,
'tagline': movie.get('tagline', None),
'runtime': self._get_runtime_in_seconds(movie.get('runtime')),
'studio': None,
'trailer': trailer or None,
'last_update': last_update,
'version': self._generate_str_hash(movie.get('version'), last_update),
'dateadded': dateadded,
'thumbs_xml': thumb_xml,
'poster': poster,
'fanart': fanart,
'fanart_xml': fanart
})
list_items = {}
for key, value in movie.iteritems():
if type(value) is list:
list_items["%s_list" % key] = self._map_array(value)
movie.update(list_items)
return movie
def _generate_str_hash(self, version, last_update):
return "%s:%s" % (version, last_update)
def _get_movie_data(self, movie):
return self._pick(movie, [
'title',
'plot',
'votecount',
'year',
'imdb',
'mpaa',
'released',
'writers',
'genres',
'directors',
'country'
], {
'shortplot': None,
'tagline': None,
'runtime': self._get_runtime_in_seconds(movie.get('runtime')),
'studio': None,
'trailer': None,
})
def _pick(self, data, fields, extras={}):
new_dict = {key: value for key,
value in data.iteritems() if key in fields}
new_dict.update(extras)
return {key.encode('ascii', 'ignore'): self._map_array(value)
for key, value in new_dict.iteritems()}
def _map_array(self, value):
if type(value) is list:
return self._array_to_string(value)
return value
def _array_to_string(self, array, delimiter=' / '):
return delimiter.join(array)
def _get_runtime_in_seconds(self, runtime):
if runtime:
return int(runtime) * 60
return None
def delete(self, movie):
self.kodi_db.remove_movie(movie['media_id'], movie['idFile'])
def get_movies_to_remove(self, release_ids):
'''
Compares release_ids to releases in kodidb
Parameters:
release_ids: array, external releases
Returns:
List of media ids in kodi thats not external
'''
all_refs = self.kodi_db.get_movie_refs()
release_set = set(release_ids)
return [{'media_id': media_id, 'title': title, 'idFile': idFile}
for media_id, release_id, title, idFile in all_refs
if int(release_id) not in release_set]
def _sync_tags(self, movie):
movieid = movie.get('movieid')
remote_tags = movie.get('tags') or []
current_tags = self.kodi_db.get_tags(movieid, remote_tags)
removed_tags = [
tag_id for tag_id, name, uniqueid, _ in current_tags
if name not in remote_tags and uniqueid
]
self.kodi_db.remove_tag_links(movieid, removed_tags)
current_tag_names = [tag for _, tag, _, _ in current_tags]
existing_new_tags = [tag_id for tag_id, name, _, tag_link in current_tags if not tag_link]
new_tags = [tag for tag in remote_tags if tag not in current_tag_names]
self.kodi_db.add_tag_links(movieid, existing_new_tags)
self.kodi_db.add_tags(movieid, new_tags)
@abstractmethod
def _should_update(self, orgMovie, newMovie):
pass
class IncrementalMovieUpdater(Movies):
'''Does an check to only update changed movies '''
@staticmethod
def get_name():
return 'Incremental Update'
def _should_update(self, orgMovie, newMovie):
return orgMovie.get('version') != newMovie.get('version')
class FullMovieUpdater(Movies):
''' Updates all movies regardless of last update '''
@staticmethod
def get_name():
return 'Full Update'
def _should_update(self, orgMovie, newMovie):
return True
|
|
from kivy.tests.common import GraphicUnitTest
from kivy.uix.gridlayout import GridLayout
from kivy.uix.scrollview import ScrollView
from kivy.uix.label import Label
from kivy.base import EventLoop
from kivy.clock import Clock
from kivy.tests.common import UTMotionEvent
from time import sleep
from itertools import count
DEBUG = False
touch_id = count()
class _TestGrid(GridLayout):
def __init__(self, **kwargs):
kwargs['cols'] = 1
kwargs['spacing'] = 10
kwargs['size_hint'] = (None, None)
super(_TestGrid, self).__init__(**kwargs)
self.bind(minimum_height=self.setter('height'))
self.bind(minimum_width=self.setter('width'))
for i in range(10):
self.add_widget(Label(
size_hint=(None, None),
height=100, width=1000,
text=str(i)
))
class _TestScrollbarHorizontal(ScrollView):
def __init__(self, **kwargs):
kwargs['scroll_type'] = ["bars"]
kwargs['bar_width'] = 20
kwargs['do_scroll_y'] = False
super(_TestScrollbarHorizontal, self).__init__(**kwargs)
class _TestScrollbarVertical(ScrollView):
def __init__(self, **kwargs):
kwargs['scroll_type'] = ["bars"]
kwargs['bar_width'] = 20
kwargs['do_scroll_x'] = False
super(_TestScrollbarVertical, self).__init__(**kwargs)
class _TestScrollbarBoth(ScrollView):
def __init__(self, **kwargs):
kwargs['scroll_type'] = ["bars"]
kwargs['bar_width'] = 20
super(_TestScrollbarBoth, self).__init__(**kwargs)
class _TestScrollbarHorizontalMargin(ScrollView):
def __init__(self, **kwargs):
kwargs['scroll_type'] = ["bars"]
kwargs['bar_margin'] = 40
kwargs['bar_width'] = 20
kwargs['do_scroll_y'] = False
super(_TestScrollbarHorizontalMargin, self).__init__(**kwargs)
class _TestScrollbarVerticalMargin(ScrollView):
def __init__(self, **kwargs):
kwargs['scroll_type'] = ["bars"]
kwargs['bar_margin'] = 40
kwargs['bar_width'] = 20
kwargs['do_scroll_x'] = False
super(_TestScrollbarVerticalMargin, self).__init__(**kwargs)
class _TestScrollbarBothMargin(ScrollView):
def __init__(self, **kwargs):
kwargs['scroll_type'] = ["bars"]
kwargs['bar_margin'] = 40
kwargs['bar_width'] = 20
super(_TestScrollbarBothMargin, self).__init__(**kwargs)
class ScrollViewTestCase(GraphicUnitTest):
framecount = 0
def process_points(self, scroll, points):
win = EventLoop.window
dt = 0.02
for point in points:
if DEBUG:
print('point:', point, scroll.scroll_x, scroll.scroll_y)
Clock.schedule_once(lambda *dt: sleep(0.5), 0)
self.render(scroll)
x, y, nx, ny, pos_x, pos_y, border_check = point
scroll.bar_pos = (pos_x, pos_y)
touch = UTMotionEvent("unittest", next(touch_id), {
"x": x / float(win.width),
"y": y / float(win.height),
})
# we start with the default top-left corner
self.assertAlmostEqual(scroll.scroll_x, 0.0, delta=dt)
self.assertAlmostEqual(scroll.scroll_y, 1.0, delta=dt)
# check the collision with the margin empty area
if border_check:
EventLoop.post_dispatch_input("begin", touch)
touch.move({
"x": nx / float(win.width),
"y": ny / float(win.height)
})
EventLoop.post_dispatch_input("update", touch)
EventLoop.post_dispatch_input("end", touch)
self.assertAlmostEqual(scroll.scroll_x, 0.0, delta=dt)
self.assertAlmostEqual(scroll.scroll_y, 1.0, delta=dt)
return
EventLoop.post_dispatch_input("begin", touch)
touch.move({
"x": nx / float(win.width),
"y": ny / float(win.height)
})
EventLoop.post_dispatch_input("update", touch)
EventLoop.post_dispatch_input("end", touch)
if DEBUG:
print(scroll.scroll_x, scroll.scroll_y)
Clock.schedule_once(lambda *dt: sleep(0.5), 0)
self.render(scroll)
# check the scroll position
self.assertAlmostEqual(
scroll.scroll_x, 0.0 if x == nx else 1.0,
delta=dt
)
self.assertAlmostEqual(
scroll.scroll_y, 1.0 if y == ny else 0.0,
delta=dt
)
# reset scroll to original state
scroll.scroll_x = 0.0
scroll.scroll_y = 1.0
def test_scrollbar_horizontal(self):
EventLoop.ensure_window()
win = EventLoop.window
grid = _TestGrid()
scroll = _TestScrollbarHorizontal()
scroll.add_widget(grid)
win.add_widget(scroll)
# get widgets ready
EventLoop.idle()
left, right = scroll.to_window(scroll.x, scroll.right)
bottom, top = scroll.to_window(scroll.y, scroll.top)
points = [
[left, bottom, right, bottom, 'bottom', 'right', False],
[left, top, right, top, 'top', 'right', False]
]
self.process_points(scroll, points)
self.render(scroll)
def test_scrollbar_vertical(self):
EventLoop.ensure_window()
win = EventLoop.window
grid = _TestGrid()
scroll = _TestScrollbarVertical()
scroll.add_widget(grid)
win.add_widget(scroll)
# get widgets ready
EventLoop.idle()
left, right = scroll.to_window(scroll.x, scroll.right)
bottom, top = scroll.to_window(scroll.y, scroll.top)
points = [
[right, top, right, bottom, 'bottom', 'right', False],
[left, top, left, bottom, 'bottom', 'left', False]
]
self.process_points(scroll, points)
self.render(scroll)
def test_scrollbar_both(self):
EventLoop.ensure_window()
win = EventLoop.window
grid = _TestGrid()
scroll = _TestScrollbarBoth()
scroll.add_widget(grid)
win.add_widget(scroll)
# get widgets ready
EventLoop.idle()
left, right = scroll.to_window(scroll.x, scroll.right)
bottom, top = scroll.to_window(scroll.y, scroll.top)
points = [
[left, bottom, right, bottom, 'bottom', 'right', False],
[left, top, right, top, 'top', 'right', False],
[right, top, right, bottom, 'bottom', 'right', False],
[left, top, left, bottom, 'bottom', 'left', False]
]
self.process_points(scroll, points)
self.render(scroll)
def test_scrollbar_horizontal_margin(self):
EventLoop.ensure_window()
win = EventLoop.window
grid = _TestGrid()
scroll = _TestScrollbarHorizontalMargin()
margin = scroll.bar_margin
scroll.add_widget(grid)
win.add_widget(scroll)
# get widgets ready
EventLoop.idle()
left, right = scroll.to_window(scroll.x, scroll.right)
bottom, top = scroll.to_window(scroll.y, scroll.top)
# touch in the half of the bar
m = margin + scroll.bar_width / 2.0
points = [
[left, bottom + m, right, bottom + m, 'bottom', 'right', False],
[left, top - m, right, top - m, 'top', 'right', False],
[left, bottom, right, bottom, 'bottom', 'right', True],
[left, top, right, top, 'top', 'right', True]
]
self.process_points(scroll, points)
self.render(scroll)
def test_scrollbar_vertical_margin(self):
EventLoop.ensure_window()
win = EventLoop.window
grid = _TestGrid()
scroll = _TestScrollbarVerticalMargin()
margin = scroll.bar_margin
scroll.add_widget(grid)
win.add_widget(scroll)
# get widgets ready
EventLoop.idle()
left, right = scroll.to_window(scroll.x, scroll.right)
bottom, top = scroll.to_window(scroll.y, scroll.top)
# touch in the half of the bar
m = margin + scroll.bar_width / 2.0
points = [
[right - m, top, right - m, bottom, 'bottom', 'right', False],
[left + m, top, left + m, bottom, 'bottom', 'left', False],
[right, top, right, bottom, 'bottom', 'right', True],
[left, top, left, bottom, 'bottom', 'left', True]
]
self.process_points(scroll, points)
self.render(scroll)
def test_scrollbar_both_margin(self):
EventLoop.ensure_window()
win = EventLoop.window
grid = _TestGrid()
scroll = _TestScrollbarBothMargin()
margin = scroll.bar_margin
scroll.add_widget(grid)
win.add_widget(scroll)
# get widgets ready
EventLoop.idle()
left, right = scroll.to_window(scroll.x, scroll.right)
bottom, top = scroll.to_window(scroll.y, scroll.top)
# touch in the half of the bar
m = margin + scroll.bar_width / 2.0
points = [
[left, bottom + m, right, bottom + m, 'bottom', 'right', False],
[left, top - m, right, top - m, 'top', 'right', False],
[right - m, top, right - m, bottom, 'bottom', 'right', False],
[left + m, top, left + m, bottom, 'bottom', 'left', False],
[left, bottom, right, bottom, 'bottom', 'right', True],
[left, top, right, top, 'top', 'right', True],
[right, top, right, bottom, 'bottom', 'right', True],
[left, top, left, bottom, 'bottom', 'left', True]
]
self.process_points(scroll, points)
self.render(scroll)
def test_smooth_scroll_end(self):
EventLoop.ensure_window()
win = EventLoop.window
grid = _TestGrid()
scroll = ScrollView(smooth_scroll_end=10)
assert scroll.smooth_scroll_end == 10
scroll.add_widget(grid)
# XXX this shouldn't be needed, but previous tests apparently
# don't cleanup
while win.children:
win.remove_widget(win.children[0])
win.add_widget(scroll)
# get widgets ready
EventLoop.idle()
e = scroll.effect_y
assert e.velocity == 0
touch = UTMotionEvent("unittest", next(touch_id), {
"x": scroll.center_x / float(win.width),
"y": scroll.center_y / float(win.height),
})
touch.profile.append('button')
touch.button = 'scrollup'
EventLoop.post_dispatch_input("begin", touch)
# EventLoop.post_dispatch_input("update", touch)
assert e.velocity == 10 * scroll.scroll_wheel_distance
EventLoop.idle()
assert 0 < e.velocity < 10 * scroll.scroll_wheel_distance
EventLoop.post_dispatch_input("end", touch)
EventLoop.idle()
assert 0 < e.velocity < 10 * scroll.scroll_wheel_distance
# wait for velocity to die off
while e.velocity:
EventLoop.idle()
touch = UTMotionEvent("unittest", next(touch_id), {
"x": scroll.center_x / float(win.width),
"y": scroll.center_y / float(win.height),
})
touch.profile.append('button')
touch.button = 'scrolldown'
EventLoop.post_dispatch_input("begin", touch)
# EventLoop.post_dispatch_input("update", touch)
assert e.velocity == -10 * scroll.scroll_wheel_distance
EventLoop.idle()
assert 0 > e.velocity > -10 * scroll.scroll_wheel_distance
EventLoop.post_dispatch_input("end", touch)
EventLoop.idle()
assert 0 > e.velocity > -10 * scroll.scroll_wheel_distance
if __name__ == '__main__':
import unittest
unittest.main()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class NetworkSecurityGroupsOperations(object):
"""NetworkSecurityGroupsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified network security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_security_group_name=network_security_group_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.NetworkSecurityGroup"
"""Gets the specified network security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkSecurityGroup, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_02_01.models.NetworkSecurityGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkSecurityGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
parameters, # type: "_models.NetworkSecurityGroup"
**kwargs # type: Any
):
# type: (...) -> "_models.NetworkSecurityGroup"
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkSecurityGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'NetworkSecurityGroup')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('NetworkSecurityGroup', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('NetworkSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
parameters, # type: "_models.NetworkSecurityGroup"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.NetworkSecurityGroup"]
"""Creates or updates a network security group in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param parameters: Parameters supplied to the create or update network security group
operation.
:type parameters: ~azure.mgmt.network.v2018_02_01.models.NetworkSecurityGroup
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either NetworkSecurityGroup or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_02_01.models.NetworkSecurityGroup]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkSecurityGroup"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
network_security_group_name=network_security_group_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NetworkSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}'} # type: ignore
def _update_tags_initial(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.NetworkSecurityGroup"
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkSecurityGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}'} # type: ignore
def begin_update_tags(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.NetworkSecurityGroup"]
"""Updates a network security group tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param parameters: Parameters supplied to update network security group tags.
:type parameters: ~azure.mgmt.network.v2018_02_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either NetworkSecurityGroup or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_02_01.models.NetworkSecurityGroup]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkSecurityGroup"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
network_security_group_name=network_security_group_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NetworkSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}'} # type: ignore
def list_all(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.NetworkSecurityGroupListResult"]
"""Gets all network security groups in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkSecurityGroupListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_02_01.models.NetworkSecurityGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkSecurityGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkSecurityGroupListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkSecurityGroups'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.NetworkSecurityGroupListResult"]
"""Gets all network security groups in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkSecurityGroupListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_02_01.models.NetworkSecurityGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkSecurityGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkSecurityGroupListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups'} # type: ignore
|
|
# Copyright 2015 Planet Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._fatomic import atomic_open
from .exceptions import RequestCancelled
from .utils import get_filename
from .utils import check_status
from .utils import GeneratorAdapter
from datetime import datetime
import itertools
import json
chunk_size = 32 * 1024
class Response(object):
def __init__(self, request, dispatcher):
self.request = request
self._dispatcher = dispatcher
self._body = None
self._future = None
self._cancel = False
def _create_body(self, response):
return self.request.body_type(self.request, response, self._dispatcher)
def get_body(self):
'''Get the response Body
:returns Body: A Body object containing the response.
'''
if self._body is None:
resp = self._dispatcher._dispatch(self.request)
self._body = self._create_body(resp)
return self._body
def _async_callback(self, session, response):
if self._cancel:
raise RequestCancelled()
check_status(response)
self._body = self._create_body(response)
self._handler(self._body)
if self._wait:
self._wait(self._body)
def get_body_async(self, handler, wait=None):
if self._future is None:
self._handler = handler
self._wait = wait
self._future = self._dispatcher._dispatch_async(
self.request, self._async_callback
)
def wait(self):
'''Await completion of this request.
:returns Body: A Body object containing the response.
'''
if self._future:
self._future.result()
return self._body
def cancel(self):
'''Cancel any request.'''
if self._body:
self._body._cancel = True
else:
self._cancel = True
class Request(object):
def __init__(self, url, auth, params=None, body_type=Response, data=None,
method='GET'):
self.url = url
self.auth = auth
self.params = params
self.body_type = body_type
self.data = data
self.method = method
class Body(object):
'''A Body is a representation of a resource from the API.
'''
def __init__(self, request, http_response, dispatcher):
self._request = request
self.response = http_response
self._dispatcher = dispatcher
self.size = int(self.response.headers.get('content-length', 0))
self._cancel = False
@property
def name(self):
'''The name of this resource. The default is to use the
content-disposition header value from the response.'''
return get_filename(self.response)
def __len__(self):
return self.size
def __iter__(self):
return (c for c in self.response.iter_content(chunk_size=chunk_size))
def last_modified(self):
'''Read the last-modified header as a datetime, if present.'''
lm = self.response.headers.get('last-modified', None)
return datetime.strptime(lm, '%a, %d %b %Y %H:%M:%S GMT') if lm \
else None
def get_raw(self):
'''Get the decoded text content from the response'''
return self.response.content.decode('utf-8')
def _write(self, fp, callback):
total = 0
if not callback:
def noop(*a, **kw):
pass
callback = noop
callback(start=self)
for chunk in self:
if self._cancel:
raise RequestCancelled()
fp.write(chunk)
size = len(chunk)
total += size
callback(wrote=size, total=total)
# seems some responses don't have a content-length header
if self.size == 0:
self.size = total
callback(finish=self)
def write(self, file=None, callback=None):
'''Write the contents of the body to the optionally provided file and
providing progress to the optional callback. The callback will be
invoked 3 different ways:
* First as ``callback(start=self)``
* For each chunk of data written as
``callback(wrote=chunk_size_in_bytes, total=all_byte_cnt)``
* Upon completion as ``callback(finish=self)``
:param file: file name or file-like object
:param callback: optional progress callback
'''
if not file:
file = self.name
if not file:
raise ValueError('no file name provided or discovered in response')
if hasattr(file, 'write'):
self._write(file, callback)
else:
with atomic_open(file, 'wb') as fp:
self._write(fp, callback)
class JSON(Body):
'''A Body that contains JSON'''
def get(self):
'''Get the response as a JSON dict'''
return self.response.json()
class Paged(JSON):
ITEM_KEY = 'features'
LINKS_KEY = '_links'
NEXT_KEY = '_next'
def next(self):
links = self.get()[self.LINKS_KEY]
next_ = links.get(self.NEXT_KEY, None)
if next_:
request = Request(next_, self._request.auth, body_type=type(self))
return self._dispatcher.response(request).get_body()
def _pages(self):
page = self
while page is not None:
yield page
page = page.next()
def iter(self, pages=None):
'''Get an iterator of pages.
:param int pages: optional limit to number of pages
:return: iter of this and subsequent pages
'''
i = self._pages()
if pages is not None:
i = itertools.islice(i, pages)
return i
def json_encode(self, out, limit=None, sort_keys=False, indent=None):
'''Encode the results of this paged response as JSON writing to the
provided file-like `out` object. This function will iteratively read
as many pages as present, streaming the contents out as JSON.
:param file-like out: an object with a `write` function
:param int limit: optional maximum number of items to write
:param bool sort_keys: if True, output keys sorted, default is False
:param bool indent: if True, indent output, default is False
'''
stream = self._json_stream(limit)
enc = json.JSONEncoder(indent=indent, sort_keys=sort_keys)
for chunk in enc.iterencode(stream):
out.write(u'%s' % chunk)
def items_iter(self, limit):
'''Get an iterator of the 'items' in each page. Instead of a feature
collection from each page, the iterator yields the features.
:param int limit: The number of 'items' to limit to.
:return: iter of items in page
'''
pages = (page.get() for page in self._pages())
items = itertools.chain.from_iterable(
(p[self.ITEM_KEY] for p in pages)
)
if limit is not None:
items = itertools.islice(items, limit)
return items
def _json_stream(self, limit):
items = self.get()[self.ITEM_KEY]
# if there are no results, the GeneratorAdapter doesn't play well
if len(items):
items = GeneratorAdapter(self.items_iter(limit))
else:
items = []
return {
self.ITEM_KEY: items
}
# GeoJSON feature
class Features(Paged):
def _json_stream(self, limit):
stream = super(Features, self)._json_stream(limit)
json_body = self.get()
# patch back in the count if present
if 'count' in json_body:
stream['count'] = json_body.get('count')
stream['type'] = 'FeatureCollection'
return stream
class Items(Features):
pass
class Searches(Paged):
ITEM_KEY = 'searches'
class Mosaics(Paged):
ITEM_KEY = 'mosaics'
class MosaicQuads(Paged):
ITEM_KEY = 'items'
class AnalyticsPaged(Paged):
LINKS_KEY = 'links'
NEXT_KEY = 'next'
ITEM_KEY = 'data'
def next(self):
links = self.get()[self.LINKS_KEY]
next_ = None
for link in links:
if link['rel'] == self.NEXT_KEY:
next_ = link['href']
if next_:
request = Request(next_, self._request.auth, body_type=type(self))
return self._dispatcher.response(request).get_body()
# The analytics API returns two conceptual types of objects: WFS3-compliant
# objects and everything else. There may be some overlap (ex. subscriptions and
# collections).
class Feeds(AnalyticsPaged):
pass
class Subscriptions(AnalyticsPaged):
pass
class WFS3Paged(AnalyticsPaged):
pass
class WFS3Collections(AnalyticsPaged):
ITEM_KEY = 'collections'
class WFS3Features(AnalyticsPaged):
# Explicitly disambiguate between WFS3 and GeoJSON features because the
# differences in the structure of the response envelope result in paging
# slightly differently.
ITEM_KEY = 'features'
class Orders(Paged):
ITEM_KEY = 'orders'
NEXT_KEY = 'next'
class Order(JSON):
LINKS_KEY = '_links'
RESULTS_KEY = 'results'
LOCATION_KEY = 'location'
def get_results(self):
links = self.get()[self.LINKS_KEY]
results = links.get(self.RESULTS_KEY, None)
return results
def get_locations(self):
results = self.get_results()
locations = [r[self.LOCATION_KEY] for r in results]
return locations
def items_iter(self, limit):
'''Get an iterator of the 'items' in each order.
The iterator yields the individual items in the order.
:param int limit: The number of 'items' to limit to.
:return: iter of items in page
'''
locations = iter(self.get_locations())
# if limit is not None:
# locations = itertools.islice(locations, limit)
return locations
|
|
import agents as ag
import envgui as gui
import random
# ______________________________________________________________________________
loc_A, loc_B = (1, 1), (2, 1) # The two locations for the Vacuum world
def RandomVacuumAgent():
"Randomly choose one of the actions from the vacuum environment."
p = ag.RandomAgentProgram(['Right', 'Left', 'Up', 'Down', 'Suck', 'NoOp'])
return ag.Agent(p)
def TableDrivenVacuumAgent():
"[Figure 2.3]"
table = {((loc_A, 'Clean'),): 'Right',
((loc_A, 'Dirty'),): 'Suck',
((loc_B, 'Clean'),): 'Left',
((loc_B, 'Dirty'),): 'Suck',
((loc_A, 'Clean'), (loc_A, 'Clean')): 'Right',
((loc_A, 'Clean'), (loc_A, 'Dirty')): 'Suck',
# ...
((loc_A, 'Clean'), (loc_A, 'Clean'), (loc_A, 'Clean')): 'Right',
((loc_A, 'Clean'), (loc_A, 'Clean'), (loc_A, 'Dirty')): 'Suck',
# ...
}
p = ag.TableDrivenAgentProgram(table)
return ag.Agent()
def ReflexVacuumAgent():
"A reflex agent for the two-state vacuum environment. [Figure 2.8]"
def program(percept):
location, status = percept
if status == 'Dirty':
return 'Suck'
elif location == loc_A:
return 'Right'
elif location == loc_B:
return 'Left'
return ag.Agent(program)
def ModelBasedVacuumAgent() -> object:
"An agent that keeps track of what locations are clean or dirty."
model = {loc_A: None, loc_B: None}
def program(percept):
"Same as ReflexVacuumAgent, except if everything is clean, do NoOp."
location, status = percept
model[location] = status # Update the model here
if model[loc_A] == model[loc_B] == 'Clean':
return 'NoOp'
elif status == 'Dirty':
return 'Suck'
elif location == loc_A:
return 'Right'
elif location == loc_B:
return 'Left'
return ag.Agent(program)
# ______________________________________________________________________________
# Vacuum environment
class Dirt(ag.Thing):
pass
# class Floor(ag.Thing):
# pass
class VacuumEnvironment(ag.XYEnvironment):
"""The environment of [Ex. 2.12]. Agent perceives dirty or clean,
and bump (into obstacle) or not; 2D discrete world of unknown size;
performance measure is 100 for each dirt cleaned, and -1 for
each turn taken."""
def __init__(self, width=4, height=3):
super(VacuumEnvironment, self).__init__(width, height)
self.add_walls()
def thing_classes(self):
return [ag.Wall, Dirt, ReflexVacuumAgent, RandomVacuumAgent,
TableDrivenVacuumAgent, ModelBasedVacuumAgent]
def percept(self, agent):
"""The percept is a tuple of ('Dirty' or 'Clean', 'Bump' or 'None').
Unlike the TrivialVacuumEnvironment, location is NOT perceived."""
status = ('Dirty' if self.some_things_at(
agent.location, Dirt) else 'Clean')
bump = ('Bump' if agent.bump else'None')
return (bump, status)
def execute_action(self, agent, action):
if action == 'Suck':
dirt_list = self.list_things_at(agent.location, Dirt)
if dirt_list != []:
dirt = dirt_list[0]
agent.performance += 100
self.delete_thing(dirt)
else:
super(VacuumEnvironment, self).execute_action(agent, action)
if action != 'NoOp':
agent.performance -= 1
class TrivialVacuumEnvironment(VacuumEnvironment):
"""This environment has two locations, A and B. Each can be Dirty
or Clean. The agent perceives its location and the location's
status. This serves as an example of how to implement a simple
Environment."""
def __init__(self):
super(TrivialVacuumEnvironment, self).__init__()
choice = random.randint(0, 3)
if choice % 2: # 1 or 3
self.add_thing(Dirt(), loc_A)
if choice > 1: # 2 or 3
self.add_thing(Dirt(), loc_B)
def percept(self, agent):
"Returns the agent's location, and the location status (Dirty/Clean)."
status = ('Dirty' if self.some_things_at(
agent.location, Dirt) else 'Clean')
return (agent.location, status)
#
# def execute_action(self, agent, action):
# """Change agent's location and/or location's status; track performance.
# Score 10 for each dirt cleaned; -1 for each move."""
# if action == 'Right':
# agent.location = loc_B
# agent.performance -= 1
# elif action == 'Left':
# agent.location = loc_A
# agent.performance -= 1
# elif action == 'Suck':
# if self.status[agent.location] == 'Dirty':
# agent.performance += 10
# self.status[agent.location] = 'Clean'
#
def add_agent(self, a):
"Agents start in either location at random."
super().add_thing(a, random.choice([loc_A, loc_B]))
# _________________________________________________________________________
# >>> a = ReflexVacuumAgent()
# >>> a.program((loc_A, 'Clean'))
# 'Right'
# >>> a.program((loc_B, 'Clean'))
# 'Left'
# >>> a.program((loc_A, 'Dirty'))
# 'Suck'
# >>> a.program((loc_A, 'Dirty'))
# 'Suck'
#
# >>> e = TrivialVacuumEnvironment()
# >>> e.add_thing(ModelBasedVacuumAgent())
# >>> e.run(5)
# Produces text-based status output
# v = TrivialVacuumEnvironment()
# a = ModelBasedVacuumAgent()
# a = ag.TraceAgent(a)
# v.add_agent(a)
# v.run(10)
# Launch GUI of Trivial Environment
# v = TrivialVacuumEnvironment()
# a = RandomVacuumAgent()
# a = ag.TraceAgent(a)
# v.add_agent(a)
# g = gui.EnvGUI(v, 'Vaccuum')
# c = g.getCanvas()
# c.mapImageNames({
# Dirt: 'images/dirt.png',
# ag.Wall: 'images/wall.jpg',
# # Floor: 'images/floor.png',
# ag.Agent: 'images/vacuum.png',
# })
# c.update()
# g.mainloop()
# Launch GUI of more complex environment
v = VacuumEnvironment(5, 4)
#a = ModelBasedVacuumAgent()
a = RandomVacuumAgent()
a = ag.TraceAgent(a)
loc = v.random_location_inbounds()
v.add_thing(a, location=loc)
v.scatter_things(Dirt)
g = gui.EnvGUI(v, 'Vaccuum')
c = g.getCanvas()
c.mapImageNames({
ag.Wall: 'submissions/Huang/newWall.jpg',
# Floor: 'images/floor.png',
Dirt: 'images/dirt.png',
ag.Agent: 'images/vacuum.png',
})
c.update()
g.mainloop()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2011, Nicolas Clairon
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the University of California, Berkeley nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import bson
import datetime
import logging
from copy import deepcopy
log = logging.getLogger(__name__)
from mongokit.operators import SchemaOperator, IS
from mongokit.helpers import DotCollapsedDict
from mongokit.helpers import DotExpandedDict
from mongokit.helpers import i18nDotedDict
from mongokit.helpers import DotedDict
__all__ = [
'AuthorizedTypeError',
'BadKeyError',
'CustomType',
'DefaultFieldTypeError',
'DotCollapsedDict',
'DotedDict',
'DotExpandedDict',
'DuplicateDefaultValueError',
'DuplicateRequiredError',
'i18n',
'i18nError',
'ModifierOperatorError',
'RequireFieldError',
'SchemaDocument',
'SchemaDocumentError',
'SchemaProperties',
'SchemaTypeError',
'Set',
'StructureError',
'ValidationError',
]
class CustomType(object):
init_type = None
mongo_type = None
python_type = None
def __init__(self):
if self.mongo_type is None:
raise TypeError("`mongo_type` property must be specify in %s" %
self.__class__.__name__)
if self.python_type is None:
raise TypeError("`python_type` property must be specify in %s" %
self.__class__.__name__)
def to_bson(self, value):
"""convert type to a mongodb type"""
raise NotImplementedError
def to_python(self, value):
"""convert type to a mongodb type"""
raise NotImplementedError
def validate(self, value, path):
"""
This method is optional. It add a validation layer.
This method is been called in Document.validate()
value: the value of the field
path: the field name (ie, 'foo' or 'foo.bar' if nested)
"""
pass
# field wich does not need to be declared into the structure
STRUCTURE_KEYWORDS = []
class SchemaDocumentError(Exception):
pass
class RequireFieldError(SchemaDocumentError):
pass
class StructureError(SchemaDocumentError):
pass
class BadKeyError(SchemaDocumentError):
pass
class AuthorizedTypeError(SchemaDocumentError):
pass
class ValidationError(SchemaDocumentError):
pass
class DuplicateRequiredError(SchemaDocumentError):
pass
class DuplicateDefaultValueError(SchemaDocumentError):
pass
class ModifierOperatorError(SchemaDocumentError):
pass
class SchemaTypeError(SchemaDocumentError):
pass
class DefaultFieldTypeError(SchemaDocumentError):
pass
class i18nError(SchemaDocumentError):
pass
class DeprecationError(Exception):
pass
class DuplicateI18nError(Exception):
pass
class SchemaProperties(type):
def __new__(mcs, name, bases, attrs):
attrs['_protected_field_names'] = set(
['_protected_field_names', '_namespaces', '_required_namespace'])
for base in bases:
parent = base.__mro__[0]
if not hasattr(parent, 'structure'):
continue
if parent.structure is not None:
#parent = parent()
if parent.structure:
if 'structure' not in attrs and parent.structure:
attrs['structure'] = parent.structure.copy()
else:
obj_structure = attrs.get('structure', {}).copy()
attrs['structure'] = parent.structure.copy()
attrs['structure'].update(obj_structure)
if parent.required_fields:
attrs['required_fields'] = list(set(
attrs.get('required_fields', [])+parent.required_fields))
if parent.default_values:
obj_default_values = attrs.get('default_values', {}).copy()
attrs['default_values'] = parent.default_values.copy()
attrs['default_values'].update(obj_default_values)
if parent.validators:
obj_validators = attrs.get('validators', {}).copy()
attrs['validators'] = parent.validators.copy()
attrs['validators'].update(obj_validators)
if parent.i18n:
attrs['i18n'] = list(set(
attrs.get('i18n', [])+parent.i18n))
if attrs.get('authorized_types'):
attrs['authorized_types'] = list(set(parent.authorized_types).union(set(attrs['authorized_types'])))
for mro in bases[0].__mro__:
attrs['_protected_field_names'] = attrs['_protected_field_names'].union(list(mro.__dict__))
attrs['_protected_field_names'] = list(attrs['_protected_field_names'])
if attrs.get('structure') and name not in \
["SchemaDocument", "Document", "VersionedDocument", "RevisionDocument"]:
base = bases[0]
if not attrs.get('authorized_types'):
attrs['authorized_types'] = base.authorized_types
base._validate_structure(attrs['structure'], name, attrs.get('authorized_types'))
attrs['_namespaces'] = list(base._SchemaDocument__walk_dict(attrs['structure']))
if [1 for i in attrs['_namespaces'] if type(i) is type]:
raise DeprecationError("%s: types are not allowed as structure key anymore" % name)
mcs._validate_descriptors(attrs)
## building required fields namespace
attrs['_required_namespace'] = set([])
for rf in attrs.get('required_fields', []):
splited_rf = rf.split('.')
for index in range(len(splited_rf)):
attrs['_required_namespace'].add(".".join(splited_rf[:index+1]))
attrs['_collapsed_struct'] = DotCollapsedDict(attrs['structure'], remove_under_type=True)
elif attrs.get('structure') is not None and name not in \
["SchemaDocument", "Document", "VersionedDocument", "RevisionDocument"]:
attrs['_collapsed_struct'] = {}
attrs['_i18n_namespace'] = []
if attrs.get('i18n'):
attrs['_i18n_namespace'] = set(['.'.join(i.split('.')[:-1]) for i in attrs['i18n']])
return type.__new__(mcs, name, bases, attrs)
@classmethod
def _validate_descriptors(mcs, attrs):
# TODO i18n validator
for dv in attrs.get('default_values', {}):
if not dv in attrs['_namespaces']:
raise ValueError("Error in default_values: can't find %s in structure" % dv)
for required in attrs.get('required_fields', []):
if required not in attrs['_namespaces']:
raise ValueError("Error in required_fields: can't find %s in structure" % required)
for validator in attrs.get('validators', {}):
if validator not in attrs['_namespaces']:
raise ValueError("Error in validators: can't find %s in structure" % validator)
# required_field
if attrs.get('required_fields'):
if len(attrs['required_fields']) != len(set(attrs['required_fields'])):
raise DuplicateRequiredError("duplicate required_fields : %s" % attrs['required_fields'])
# i18n
if attrs.get('i18n'):
if len(attrs['i18n']) != len(set(attrs['i18n'])):
raise DuplicateI18nError("duplicated i18n : %s" % attrs['i18n'])
for _i18n in attrs['i18n']:
if _i18n not in attrs['_namespaces']:
raise ValueError("Error in i18n: can't find {} in structure".format(_i18n))
class SchemaDocument(dict):
"""
A SchemaDocument is dictionary with a building structured schema
The validate method will check that the document match the underling
structure. A structure must be specify in each SchemaDocument.
>>> class TestDoc(SchemaDocument):
... structure = {
... "foo":unicode,
... "bar":int,
... "nested":{
... "bla":float}}
`unicode`, `int`, `float` are python types listed in `mongokit.authorized_types`.
>>> doc = TestDoc()
>>> doc
{'foo': None, 'bar': None, 'nested': {'bla': None}}
A SchemaDocument works just like dict:
>>> doc['bar'] = 3
>>> doc['foo'] = "test"
We can describe fields as required with the required attribute:
>>> TestDoc.required_fields = ['bar', 'nested.bla']
>>> doc = TestDoc()
>>> doc['bar'] = 2
Validation is made with the `validate()` method:
>>> doc.validate() # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
RequireFieldError: nested.bla is required
Default values can be set by using the attribute default_values :
>>> TestDoc.default_values = {"bar":3, "nested.bla":2.0}
>>> doc = TestDoc()
>>> doc
{'foo': None, 'bar': 3, 'nested': {'bla': 2.0}}
>>> doc.validate()
Validators can be added in order to validate some values :
>>> TestDoc.validators = {"bar":lambda x: x>0, "nested.bla": lambda x: x<0}
>>> doc = TestDoc()
>>> doc['bar'] = 3
>>> doc['nested']['bla'] = 2.0
>>> doc.validate()
Traceback (most recent call last):
...
ValidationError: nested.bla does not pass the validator <lambda>
If you want to use the dot notation (ala json), you must set the
`use_dot_notation` attribute to True:
>>> class TestDotNotation(SchemaDocument):
... structure = {
... "foo":{ "bar":unicode}
... }
... use_dot_notation=True
>>> doc = TestDotNotation()
>>> doc.foo.bar = u"bla"
>>> doc
{"foo":{"bar":u"bla}}
"""
__metaclass__ = SchemaProperties
structure = None
required_fields = []
default_values = {}
validators = {}
i18n = []
raise_validation_errors = True
skip_validation = False
# if you want to have all schemaless benefits (default False but should change)
# warning, if use_schemaless is True, Migration features can not be used.
use_schemaless = False
# If you want to use the dot notation, set this to True:
use_dot_notation = False
dot_notation_warning = False
authorized_types = [
type(None),
bool,
int,
long,
float,
unicode,
basestring,
list,
dict,
datetime.datetime,
bson.binary.Binary,
CustomType,
]
def __init__(self, doc=None, gen_skel=True, _gen_auth_types=True, _validate=True, lang='en', fallback_lang='en'):
"""
doc : a dictionary
gen_skel : if True, generate automatically the skeleton of the doc
filled with NoneType each time validate() is called. Note that
if doc is not {}, gen_skel is always False. If gen_skel is False,
default_values cannot be filled.
gen_auth_types: if True, generate automatically the self.authorized_types
attribute from self.authorized_types
"""
super(SchemaDocument, self).__init__()
if self.structure is None:
self.structure = {}
self._current_lang = lang
self._fallback_lang = fallback_lang
self.validation_errors = {}
# init
if doc:
for k, v in doc.iteritems():
self[k] = v
gen_skel = False
if gen_skel:
self.generate_skeleton()
if self.default_values:
self._set_default_fields(self, self.structure)
else:
self._process_custom_type('python', self, self.structure)
if self.use_dot_notation:
self.__generate_doted_dict(self, self.structure)
if self.i18n:
self._make_i18n()
def generate_skeleton(self):
"""
validate and generate the skeleton of the document
from the structure (unknown values are set to None)
"""
self.__generate_skeleton(self, self.structure)
def validate(self):
"""
validate the document.
This method will verify if :
* the doc follow the structure,
* all required fields are filled
Additionally, this method will process all
validators.
"""
if self.validators:
self._process_validators(self, self.structure)
self._process_custom_type('bson', self, self.structure)
self._validate_doc(self, self.structure)
self._process_custom_type('python', self, self.structure)
if self.required_fields:
self._validate_required(self, self.structure)
def __setattr__(self, key, value):
if key not in self._protected_field_names and self.use_dot_notation and key in self:
if isinstance(self.structure[key], i18n):
self[key][self._current_lang] = value
else:
self[key] = value
else:
if self.dot_notation_warning and not key.startswith('_') and key not in \
['db', 'collection', 'versioning_collection', 'connection', 'fs']:
log.warning("dot notation: {} was not found in structure. Add it as attribute instead".format(key))
dict.__setattr__(self, key, value)
def __getattr__(self, key):
if key not in self._protected_field_names and self.use_dot_notation and key in self:
if isinstance(self[key], i18n):
if self._current_lang not in self[key]:
return self[key].get(self._fallback_lang)
return self[key][self._current_lang]
return self[key]
else:
return dict.__getattribute__(self, key)
#
# Public API end
#
@classmethod
def __walk_dict(cls, dic):
# thanks jean_b for the patch
for key, value in dic.items():
if isinstance(value, dict) and len(value):
if type(key) is type:
yield '$%s' % key.__name__
else:
yield key
for child_key in cls.__walk_dict(value):
if type(key) is type:
new_key = "$%s" % key.__name__
else:
new_key = key
#if type(child_key) is type:
# new_child_key = "$%s" % child_key.__name__
#else:
if type(child_key) is not type:
new_child_key = child_key
yield '%s.%s' % (new_key, new_child_key)
elif type(key) is type:
yield '$%s' % key.__name__
# elif isinstance(value, list) and len(value):
# if isinstance(value[0], dict):
# for child_key in cls.__walk_dict(value[0]):
# #if type(key) is type:
# # new_key = "$%s" % key.__name__
# #else:
# if type(key) is not type:
# new_key = key
# #if type(child_key) is type:
# # new_child_key = "$%s" % child_key.__name__
# #else:
# if type(child_key) is not type:
# new_child_key = child_key
# yield '%s.%s' % (new_key, new_child_key)
# else:
# if type(key) is not type:
# yield key
# #else:
# # yield ""
else:
if type(key) is not type:
yield key
#else:
# yield ""
@classmethod
def _validate_structure(cls, structure, name, authorized_types):
"""
validate if all fields in self.structure are in authorized types.
"""
##############
def __validate_structure(struct, name, _authorized):
if type(struct) is type:
if struct not in authorized_types:
if struct not in authorized_types:
raise StructureError("%s: %s is not an authorized type" % (name, struct))
elif isinstance(struct, dict):
for key in struct:
if isinstance(key, basestring):
if "." in key:
raise BadKeyError("%s: %s must not contain '.'" % (name, key))
if key.startswith('$'):
raise BadKeyError("%s: %s must not start with '$'" % (name, key))
elif type(key) is type:
if not key in authorized_types:
raise AuthorizedTypeError("%s: %s is not an authorized type" % (name, key))
else:
raise StructureError("%s: %s must be a basestring or a type" % (name, key))
if struct[key] is None:
pass
elif isinstance(struct[key], dict):
__validate_structure(struct[key], name, authorized_types)
elif isinstance(struct[key], list):
__validate_structure(struct[key], name, authorized_types)
elif isinstance(struct[key], tuple):
__validate_structure(struct[key], name, authorized_types)
elif isinstance(struct[key], CustomType):
__validate_structure(struct[key].mongo_type, name, authorized_types)
elif isinstance(struct[key], SchemaProperties):
pass
elif isinstance(struct[key], SchemaOperator):
__validate_structure(struct[key], name, authorized_types)
elif hasattr(struct[key], 'structure'):
__validate_structure(struct[key], name, authorized_types)
elif struct[key] not in authorized_types:
ok = False
for auth_type in authorized_types:
if struct[key] is None:
ok = True
else:
try:
if isinstance(struct[key], auth_type) or issubclass(struct[key], auth_type):
ok = True
except TypeError:
raise TypeError("%s: %s is not a type" % (name, struct[key]))
if not ok:
raise StructureError(
"%s: %s is not an authorized type" % (name, struct[key]))
elif isinstance(struct, list) or isinstance(struct, tuple):
for item in struct:
__validate_structure(item, name, authorized_types)
elif isinstance(struct, SchemaOperator):
if isinstance(struct, IS):
for operand in struct:
if type(operand) not in authorized_types:
raise StructureError("%s: %s in %s is not an authorized type (%s found)" % (
name, operand, struct, type(operand).__name__))
else:
for operand in struct:
if operand not in authorized_types:
raise StructureError("%s: %s in %s is not an authorized type (%s found)" % (
name, operand, struct, type(operand).__name__))
elif isinstance(struct, SchemaProperties):
pass
else:
ok = False
for auth_type in authorized_types:
if isinstance(struct, auth_type):
ok = True
if not ok:
raise StructureError("%s: %s is not an authorized_types" % (name, struct))
#################
if structure is None:
raise StructureError("%s.structure must not be None" % name)
if not isinstance(structure, dict):
raise StructureError("%s.structure must be a dict instance" % name)
__validate_structure(structure, name, authorized_types)
def _raise_exception(self, exception, field, message):
if self.raise_validation_errors:
raise exception(message)
else:
if not field in self.validation_errors:
self.validation_errors[field] = []
self.validation_errors[field].append(exception(message))
def _validate_doc(self, doc, struct, path=""):
"""
check if doc field types match the doc field structure
"""
if type(struct) is type or struct is None:
if struct is None:
if type(doc) not in self.authorized_types:
self._raise_exception(AuthorizedTypeError, type(doc).__name__,
"%s is not an authorized types" % type(doc).__name__)
elif not isinstance(doc, struct) and doc is not None:
self._raise_exception(SchemaTypeError, path,
"%s must be an instance of %s not %s" % (
path, struct.__name__, type(doc).__name__))
elif isinstance(struct, CustomType):
if not isinstance(doc, struct.mongo_type) and doc is not None:
self._raise_exception(SchemaTypeError, path,
"%s must be an instance of %s not %s" % (
path, struct.mongo_type.__name__, type(doc).__name__))
struct.validate(doc, path=path)
elif isinstance(struct, SchemaOperator):
if not struct.validate(doc) and doc is not None:
if isinstance(struct, IS):
self._raise_exception(SchemaTypeError, path,
"%s must be in %s not %s" % (path, struct._operands, doc))
else:
self._raise_exception(SchemaTypeError, path,
"%s must be an instance of %s not %s" % (path, struct, type(doc).__name__))
elif isinstance(struct, dict):
if not isinstance(doc, type(struct)):
self._raise_exception(SchemaTypeError, path,
"%s must be an instance of %s not %s" % (
path, type(struct).__name__, type(doc).__name__))
struct_length = len(struct) if not '_id' in struct else len(struct) - 1
if len(doc) != struct_length:
struct_doc_diff = list(set(struct).difference(set(doc)))
if struct_doc_diff:
for field in struct_doc_diff:
if (type(field) is not type) and (not self.use_schemaless):
self._raise_exception(StructureError, None,
"missed fields %s in %s" % (struct_doc_diff, type(doc).__name__))
else:
struct_struct_diff = list(set(doc).difference(set(struct)))
bad_fields = [s for s in struct_struct_diff if s not in STRUCTURE_KEYWORDS]
if bad_fields and not self.use_schemaless:
self._raise_exception(StructureError, None,
"unknown fields %s in %s" % (bad_fields, type(doc).__name__))
for key in struct:
if type(key) is type:
new_key = "$%s" % key.__name__
else:
new_key = key
new_path = ".".join([path, new_key]).strip('.')
if new_key.split('.')[-1].startswith("$"):
for doc_key in doc:
if not isinstance(doc_key, key):
self._raise_exception(SchemaTypeError, path,
"key of %s must be an instance of %s not %s" % (
path, key.__name__, type(doc_key).__name__))
self._validate_doc(doc[doc_key], struct[key], new_path)
else:
if key in doc:
self._validate_doc(doc[key], struct[key], new_path)
elif isinstance(struct, list):
if not isinstance(doc, list) and not isinstance(doc, tuple):
self._raise_exception(SchemaTypeError, path,
"%s must be an instance of list not %s" % (path, type(doc).__name__))
if not len(struct):
struct = None
else:
struct = struct[0]
for obj in doc:
self._validate_doc(obj, struct, path)
elif isinstance(struct, tuple):
if not isinstance(doc, list) and not isinstance(doc, tuple):
self._raise_exception(SchemaTypeError, path,
"%s must be an instance of list not %s" % (
path, type(doc).__name__))
if len(doc) != len(struct):
self._raise_exception(SchemaTypeError, path, "%s must have %s items not %s" % (
path, len(struct), len(doc)))
for i in range(len(struct)):
self._validate_doc(doc[i], struct[i], path)
def _process_validators(self, doc, _struct, _path=""):
doted_doc = DotCollapsedDict(doc)
for key, validators in self.validators.iteritems():
if key in doted_doc and doted_doc[key] is not None:
if not hasattr(validators, "__iter__"):
validators = [validators]
for validator in validators:
try:
if not validator(doted_doc[key]):
raise ValidationError("%s does not pass the validator " + validator.__name__)
except Exception, e:
self._raise_exception(ValidationError, key,
unicode(e) % key)
def _process_custom_type(self, target, doc, struct, path="", root_path=""):
for key in struct:
if type(key) is type:
new_key = "$%s" % key.__name__
else:
new_key = key
new_path = ".".join([path, new_key]).strip('.')
#
# if the value is a dict, we have a another structure to validate
#
#
# It is not a dict nor a list but a simple key:value
#
if isinstance(struct[key], CustomType):
if target == 'bson':
if key in doc:
if struct[key].python_type is not None:
if not isinstance(doc[key], struct[key].python_type) and doc[key] is not None:
self._raise_exception(SchemaTypeError, new_path,
"%s must be an instance of %s not %s" % (
new_path, struct[key].python_type.__name__,
type(doc[key]).__name__))
doc[key] = struct[key].to_bson(doc[key])
else:
if key in doc:
doc[key] = struct[key].to_python(doc[key])
elif isinstance(struct[key], dict):
if doc: # we don't need to process an empty doc
if type(key) is type:
for doc_key in doc: # process type's key such {unicode:int}...
self._process_custom_type(target, doc[doc_key], struct[key], new_path, root_path)
else:
if key in doc: # we don't care about missing fields
self._process_custom_type(target, doc[key], struct[key], new_path, root_path)
#
# If the struct is a list, we have to validate all values into it
#
elif type(struct[key]) is list:
#
# check if the list must not be null
#
if struct[key]:
l_objs = []
if isinstance(struct[key][0], CustomType):
for obj in doc[key]:
if target == 'bson':
if struct[key][0].python_type is not None:
if not isinstance(obj, struct[key][0].python_type) and obj is not None:
self._raise_exception(SchemaTypeError, new_path,
"%s must be an instance of %s not %s" % (
new_path, struct[key][0].python_type.__name__,
type(obj).__name__))
obj = struct[key][0].to_bson(obj)
else:
obj = struct[key][0].to_python(obj)
l_objs.append(obj)
doc[key] = l_objs
elif isinstance(struct[key][0], dict):
if doc.get(key):
for obj in doc[key]:
self._process_custom_type(target, obj, struct[key][0], new_path, root_path)
def _set_default_fields(self, doc, struct, path=""):
# TODO check this out, this method must be restructured
for key in struct:
new_key = key
new_path = ".".join([path, new_key]).strip('.')
#
# default_values :
# if the value is None, check if a default value exist.
# if exists, and it is a function then call it otherwise,
# juste feed it
#
if type(key) is not type:
if doc[key] is None and new_path in self.default_values:
new_value = self.default_values[new_path]
if callable(new_value):
new_value = new_value()
elif isinstance(new_value, dict):
new_value = deepcopy(new_value)
elif isinstance(new_value, list):
new_value = new_value[:]
if isinstance(struct[key], CustomType):
if not isinstance(new_value, struct[key].python_type):
self._raise_exception(DefaultFieldTypeError, new_path,
"%s must be an instance of %s not %s" % (
new_path, struct[key].python_type.__name__,
type(new_value).__name__))
doc[key] = new_value
#
# if the value is a dict, we have a another structure to validate
#
if isinstance(struct[key], dict) and new_path not in self.i18n:
#
# if the dict is still empty into the document we build
# it with None values
#
if len(struct[key]) and not [i for i in struct[key].keys() if type(i) is type]:
self._set_default_fields(doc[key], struct[key], new_path)
else:
if new_path in self.default_values:
new_value = self.default_values[new_path]
if callable(new_value):
new_value = new_value()
elif isinstance(new_value, dict):
new_value = deepcopy(new_value)
elif isinstance(new_value, list):
new_value = new_value[:]
doc[key] = new_value
elif isinstance(struct[key], list):
if new_path in self.default_values:
for new_value in self.default_values[new_path]:
if callable(new_value):
new_value = new_value()
elif isinstance(new_value, dict):
new_value = deepcopy(new_value)
elif isinstance(new_value, list):
new_value = new_value[:]
if isinstance(struct[key][0], CustomType):
if not isinstance(new_value, struct[key][0].python_type):
self._raise_exception(DefaultFieldTypeError, new_path,
"%s must be an instance of %s not %s" % (
new_path, struct[key][0].python_type.__name__,
type(new_value).__name__))
doc[key].append(new_value)
else: # what else
if new_path in self.default_values:
new_value = self.default_values[new_path]
if callable(new_value):
new_value = new_value()
elif isinstance(new_value, dict):
new_value = deepcopy(new_value)
elif isinstance(new_value, list):
new_value = new_value[:]
if new_path in self.i18n:
doc[key] = i18n(
field_type=struct[key],
field_name=key
)
doc[key].update(new_value)
else:
doc[key] = new_value
def _validate_required(self, doc, _struct, _path="", _root_path=""):
doted_struct = DotCollapsedDict(self.structure)
doted_doc = DotCollapsedDict(doc, reference=doted_struct)
for req in self.required_fields:
if doted_doc.get(req) is None and doted_struct.get(req) is not dict:
if not isinstance(doted_struct.get(req), CustomType):
self._raise_exception(RequireFieldError, req, "%s is required" % req)
elif isinstance(doted_struct.get(req), CustomType) and doted_struct[req].mongo_type is not dict:
self._raise_exception(RequireFieldError, req, "%s is required" % req)
elif doted_doc.get(req) == []:
self._raise_exception(RequireFieldError, req, "%s is required" % req)
elif doted_doc.get(req) == {}:
self._raise_exception(RequireFieldError, req, "%s is required" % req)
def __generate_skeleton(self, doc, struct, path=""):
for key in struct:
if type(key) is type:
new_key = "$%s" % key.__name__
else:
new_key = key
new_path = ".".join([path, new_key]).strip('.')
#
# Automatique generate the skeleton with NoneType
#
if type(key) is not type and key not in doc:
if isinstance(struct[key], dict):
if type(struct[key]) is dict and self.use_dot_notation:
if new_path in self._i18n_namespace:
doc[key] = i18nDotedDict(doc.get(key, {}), self)
else:
doc[key] = DotedDict(doc.get(key, {}), warning=self.dot_notation_warning)
else:
if callable(struct[key]):
doc[key] = struct[key]()
else:
doc[key] = type(struct[key])()
elif struct[key] is dict:
doc[key] = {}
elif isinstance(struct[key], list):
doc[key] = type(struct[key])()
elif isinstance(struct[key], CustomType):
if struct[key].init_type is not None:
doc[key] = struct[key].init_type()
else:
doc[key] = None
elif struct[key] is list:
doc[key] = []
elif isinstance(struct[key], tuple):
doc[key] = [None for _ in range(len(struct[key]))]
else:
doc[key] = None
#
# if the value is a dict, we have a another structure to validate
#
if isinstance(struct[key], dict) and type(key) is not type:
self.__generate_skeleton(doc[key], struct[key], new_path)
def __generate_doted_dict(self, doc, struct, path=""):
for key in struct:
#
# Automatique generate the skeleton with NoneType
#
if type(key) is type:
new_key = "$%s" % key.__name__
else:
new_key = key
new_path = ".".join([path, new_key]).strip('.')
if type(key) is not type: # and key not in doc:
if isinstance(struct[key], dict):
if type(struct[key]) is dict:
if new_path in self._i18n_namespace:
doc[key] = i18nDotedDict(doc.get(key, {}), self)
else:
doc[key] = DotedDict(doc.get(key, {}), warning=self.dot_notation_warning)
#
# if the value is a dict, we have a another structure to validate
#
if isinstance(struct[key], dict) and type(key) is not type:
self.__generate_doted_dict(doc[key], struct[key], new_path)
def _make_i18n(self):
doted_dict = DotCollapsedDict(self.structure)
for field in self.i18n:
if field not in doted_dict:
self._raise_exception(ValidationError, field,
"%s not found in structure" % field)
if not isinstance(doted_dict[field], i18n):
doted_dict[field] = i18n(
field_type=doted_dict[field],
field_name=field
)
self.structure.update(DotExpandedDict(doted_dict))
def set_lang(self, lang):
self._current_lang = lang
def get_lang(self):
return self._current_lang
class i18n(dict, CustomType):
""" CustomType to deal with i18n """
mongo_type = list
def __init__(self, field_type=None, field_name=None):
super(i18n, self).__init__()
self.python_type = self.__class__
self._field_type = field_type
self._field_name = field_name
def __call__(self):
return i18n(self._field_type, self._field_name)
def to_bson(self, value):
if value is not None:
for l, v in value.iteritems():
if isinstance(v, list) and isinstance(self._field_type, list):
for i in v:
if not isinstance(i, self._field_type[0]):
raise SchemaTypeError("%s (%s) must be an instance of %s not %s" % (
self._field_name, l, self._field_type[0], type(i).__name__))
else:
if not isinstance(v, self._field_type):
raise SchemaTypeError("%s (%s) must be an instance of %s not %s" % (
self._field_name, l, self._field_type, type(v).__name__))
return [{'lang': l, 'value': v} for l, v in value.iteritems()]
def to_python(self, value):
if value is not None:
i18n_dict = self.__class__(self._field_type)
for i in value:
i18n_dict[i['lang']] = i['value']
return i18n_dict
class Set(CustomType):
""" SET custom type to handle python set() type """
init_type = set
mongo_type = list
python_type = set
def __init__(self, structure_type=None):
super(Set, self).__init__()
self._structure_type = structure_type
def to_bson(self, value):
if value is not None:
return list(value)
def to_python(self, value):
if value is not None:
return set(value)
def validate(self, value, path):
if value is not None and self._structure_type is not None:
for val in value:
if not isinstance(val, self._structure_type):
raise ValueError('%s must be an instance of %s not %s' %
(path, self._structure_type.__name__, type(val).__name__))
|
|
import os
import sys
import skimage.transform
import skimage.exposure
import time
import glob
import numpy as np
import mahotas
import random
import matplotlib
import matplotlib.pyplot as plt
import scipy
import scipy.ndimage
import json
from scipy.ndimage.filters import maximum_filter
base_path = os.path.dirname(__file__)
sys.path.insert(1,os.path.join(base_path, '../../common'))
sys.path.insert(2,os.path.join(base_path, '../../database'))
from utility import Utility
from settings import Paths
from project import Project
from paths import Paths
from db import DB
# the idea is to grow the labels to cover the whole membrane
# image and label should be [0,1]
def adjust_imprecise_boundaries(image, label, number_iterations=5):
label = label.copy()
label_orig = label.copy()
for i in xrange(number_iterations):
# grow labels by one pixel
label = maximum_filter(label, 2)
# only keep pixels that are on dark membrane
non_valid_label = np.logical_and(label==1, image>0.7)
label[non_valid_label] = 0
# make sure original labels are preserved
label = np.logical_or(label==1, label_orig==1)
return label
def deform_image(image):
# assumes image is uint8
def apply_deformation(image, coordinates):
# ndimage expects uint8 otherwise introduces artifacts. Don't ask me why, its stupid.
deformed = scipy.ndimage.map_coordinates(image, coordinates, mode='reflect')
deformed = np.reshape(deformed, image.shape)
return deformed
displacement_x = np.random.normal(size=image.shape, scale=10)
displacement_y = np.random.normal(size=image.shape, scale=10)
# smooth over image
coords_x, coords_y = np.meshgrid(np.arange(0,image.shape[0]), np.arange(0,image.shape[1]), indexing='ij')
displacement_x = coords_x.flatten() #+ scipy.ndimage.gaussian_filter(displacement_x, sigma=5).flatten()
displacement_y = coords_y.flatten() #+ scipy.ndimage.gaussian_filter(displacement_y, sigma=5).flatten()
coordinates = np.vstack([displacement_x, displacement_y])
return apply_deformation(np.uint8(image*255), coordinates)
def deform_images(image1, image2, image3=None):
# assumes image is uint8
def apply_deformation(image, coordinates):
# ndimage expects uint8 otherwise introduces artifacts. Don't ask me why, its stupid.
deformed = scipy.ndimage.map_coordinates(image, coordinates, mode='reflect')
deformed = np.reshape(deformed, image.shape)
return deformed
displacement_x = np.random.normal(size=image1.shape, scale=10)
displacement_y = np.random.normal(size=image1.shape, scale=10)
# smooth over image
coords_x, coords_y = np.meshgrid(np.arange(0,image1.shape[0]), np.arange(0,image1.shape[1]), indexing='ij')
displacement_x = coords_x.flatten() #+ scipy.ndimage.gaussian_filter(displacement_x, sigma=5).flatten()
displacement_y = coords_y.flatten() #+ scipy.ndimage.gaussian_filter(displacement_y, sigma=5).flatten()
coordinates = np.vstack([displacement_x, displacement_y])
deformed1 = apply_deformation(np.uint8(image1*255), coordinates)
deformed2 = apply_deformation(np.uint8(image2*255), coordinates)
if not image3 is None:
deformed3 = apply_deformation(image3, coordinates)
return (deformed1, deformed2, deformed3)
return (deformed1, deformed2)
def deform_images_list(images):
# assumes image is uint8
def apply_deformation(image, coordinates):
# ndimage expects uint8 otherwise introduces artifacts. Don't ask me why, its stupid.
deformed = scipy.ndimage.map_coordinates(image, coordinates, mode='reflect')
deformed = np.reshape(deformed, image.shape)
return deformed
displacement_x = np.random.normal(size=images.shape[:2], scale=10)
displacement_y = np.random.normal(size=images.shape[:2], scale=10)
# smooth over image
coords_x, coords_y = np.meshgrid(np.arange(0,images.shape[0]), np.arange(0,images.shape[1]), indexing='ij')
displacement_x = coords_x.flatten() #+ scipy.ndimage.gaussian_filter(displacement_x, sigma=5).flatten()
displacement_y = coords_y.flatten() #+ scipy.ndimage.gaussian_filter(displacement_y, sigma=5).flatten()
coordinates = np.vstack([displacement_x, displacement_y])
deformed = images.copy()
for i in xrange(images.shape[2]):
deformed[:,:,i] = apply_deformation(np.uint8(images[:,:,i]), coordinates)
return deformed
def normalizeImage(img, saturation_level=0.05, doClahe=False): #was 0.005
if not doClahe:
sortedValues = np.sort( img.ravel())
minVal = np.float32(sortedValues[np.int(len(sortedValues) * (saturation_level / 2))])
maxVal = np.float32(sortedValues[np.int(len(sortedValues) * (1 - saturation_level / 2))])
normImg = np.float32(img - minVal) * (255 / (maxVal-minVal))
normImg[normImg<0] = 0
normImg[normImg>255] = 255
output = (np.float32(normImg) / 255.0)
return output
else:
output = skimage.exposure.equalize_adapthist(img)
return output
def generate_experiment_data_supervised(purpose='train', nsamples=1000, patchSize=29, balanceRate=0.5, rng=np.random):
start_time = time.time()
data_path = '/n/home00/fgonda/icon/data/reference'
#if os.path.exists('/media/vkaynig/Data1/Cmor_paper_data/'):
if os.path.exists( data_path ):
pathPrefix = data_path
#pathPrefix = '/media/vkaynig/Data1/Cmor_paper_data/'
else:
pathPrefix = '/n/pfister_lab/vkaynig/'
#img_search_string_membraneImages = pathPrefix + 'labels/membranes_nonDilate/' + purpose + '/*.tif'
#img_search_string_backgroundMaskImages = pathPrefix + 'labels/background_nonDilate/' + purpose + '/*.tif'
img_search_string_membraneImages = pathPrefix + 'labels/membranes/' + purpose + '/*.tif'
img_search_string_backgroundMaskImages = pathPrefix + 'labels/background/' + purpose + '/*.tif'
img_search_string_grayImages = pathPrefix + 'images/' + purpose + '/*.tif'
img_files_gray = sorted( glob.glob( img_search_string_grayImages ) )
img_files_label = sorted( glob.glob( img_search_string_membraneImages ) )
img_files_backgroundMask = sorted( glob.glob( img_search_string_backgroundMaskImages ) )
whole_set_patches = np.zeros((nsamples, patchSize*patchSize), dtype=np.float)
whole_set_labels = np.zeros(nsamples, dtype=np.int32)
#how many samples per image?
nsamples_perImage = np.uint(np.ceil(
(nsamples) / np.float(np.shape(img_files_gray)[0])
))
print 'using ' + np.str(nsamples_perImage) + ' samples per image.'
counter = 0
img = mahotas.imread(img_files_gray[0])
grayImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0]))
labelImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0]))
maskImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0]))
for img_index in xrange(np.shape(img_files_gray)[0]):
img = mahotas.imread(img_files_gray[img_index])
img = normalizeImage(img)
grayImages[:,:,img_index] = img
label_img = mahotas.imread(img_files_label[img_index])
labelImages[:,:,img_index] = label_img
mask_img = mahotas.imread(img_files_backgroundMask[img_index])
maskImages[:,:,img_index] = mask_img
for img_index in xrange(np.shape(img_files_gray)[0]):
img = grayImages[:,:,img_index]
label_img = labelImages[:,:,img_index]
mask_img = maskImages[:,:,img_index]
#get rid of invalid image borders
border_patch = np.int(np.ceil(patchSize/2.0))
border = np.int(np.ceil(np.sqrt(2*(border_patch**2))))
label_img[:border,:] = 0 #top
label_img[-border:,:] = 0 #bottom
label_img[:,:border] = 0 #left
label_img[:,-border:] = 0 #right
mask_img[:border,:] = 0
mask_img[-border:,:] = 0
mask_img[:,:border] = 0
mask_img[:,-border:] = 0
membrane_indices = np.nonzero(label_img)
non_membrane_indices = np.nonzero(mask_img)
positiveSample = True
for i in xrange(nsamples_perImage):
if counter >= nsamples:
break
if positiveSample:
randmem = random.choice(xrange(len(membrane_indices[0])))
(row,col) = (membrane_indices[0][randmem],
membrane_indices[1][randmem])
label = 1.0
positiveSample = False
else:
randmem = random.choice(xrange(len(non_membrane_indices[0])))
(row,col) = (non_membrane_indices[0][randmem],
non_membrane_indices[1][randmem])
label = 0.0
positiveSample = True
imgPatch = img[row-border+1:row+border, col-border+1:col+border]
imgPatch = skimage.transform.rotate(imgPatch, random.choice(xrange(360)))
imgPatch = imgPatch[border-border_patch:border+border_patch-1,border-border_patch:border+border_patch-1]
if random.random() < 0.5:
imgPatch = np.fliplr(imgPatch)
imgPatch = np.rot90(imgPatch, random.randint(0,3))
whole_set_patches[counter,:] = imgPatch.flatten()
whole_set_labels[counter] = label
counter += 1
#normalize data
whole_data = np.float32(whole_set_patches)
whole_data = whole_data - 0.5
data = whole_data.copy()
labels = whole_set_labels.copy()
#remove the sorting in image order
shuffleIndex = rng.permutation(np.shape(labels)[0])
for i in xrange(np.shape(labels)[0]):
whole_data[i,:] = data[shuffleIndex[i],:]
whole_set_labels[i] = labels[shuffleIndex[i]]
data_set = (whole_data, whole_set_labels)
end_time = time.time()
total_time = (end_time - start_time)
print 'Running time: ' + '%.2fm' % (total_time / 60.)
rval = data_set
return rval
def generate_image_data(img, patchSize=29, rows=1):
img = normalizeImage(img)
# pad image borders
border = np.int(np.ceil(patchSize/2.0))
img_padded = np.pad(img, border, mode='reflect')
whole_set_patches = np.zeros((len(rows)*img.shape[1], patchSize**2))
counter = 0
for row in rows:
for col in xrange(img.shape[1]):
imgPatch = img_padded[row+1:row+2*border, col+1:col+2*border]
whole_set_patches[counter,:] = imgPatch.flatten()
counter += 1
#normalize data
whole_set_patches = np.float32(whole_set_patches)
whole_set_patches = whole_set_patches - 0.5
return whole_set_patches
def stupid_map_wrapper(parameters):
f = parameters[0]
args = parameters[1:]
return f(*args)
def get_sample_sizes(annotations):
samples_sizes = []
n_labels = len(annotations)
for coordinates in annotations:
n_label_samples_size = len(coordinates)/2
samples_sizes.append( n_label_samples_size )
return samples_sizes
def gen_annotated_image(path, dim):
image = np.zeros( (dim[0], dim[1]) )
image[:,:] = -1
print image
exit(1)
# load the annotations
with open( path ) as labels_f:
annotations = json.load( labels_f )
n_labels = len(annotations)
if n_labels == 0:
return
for i_label in range(n_labels):
i_coord = 0
coords = annotations[ i_label ]
for i in range(0, len(coords), 2):
x = min(coordinates[i], dim[1]-1)
y = min(coordinates[i+1], dim[0]-1)
image[x][y] = i_label
return image
def generate_experiment_data_patch_prediction(purpose='train', nsamples=1000, patchSize=29, outPatchSize=1, project=None):
nr_layers=3
def relabel(image):
id_list = np.unique(image)
for index, id in enumerate(id_list):
image[image==id] = index
return image
start_time = time.time()
if purpose == 'train':
images = DB.getTrainingImages( project.id, new=False )
path = Paths.TrainGrayscale
else:
images = DB.getImages( project.id, purpose=1, new=False, annotated=True )
path = Paths.ValidGrayscale
files_gray = []
data_labels = []
label_sample_sizes = np.array([ 0, 0])
#imgs = DB.getImages( project.id )
for image in images:
d_path = '%s/%s.tif'%(path, image.id)
l_path = '%s/%s.%s.json'%(Paths.Labels, image.id, project.id)
if os.path.exists( d_path ) and os.path.exists( l_path ):
# load the annotations
with open( l_path ) as labels_f:
annotations = json.load( labels_f )
# skip if not enough samples in the annotations
sample_sizes = get_sample_sizes( annotations )
if np.sum( sample_sizes ) == 0:
continue
label_sample_sizes = label_sample_sizes + np.array(sample_sizes)
files_gray.append( d_path )
data_labels.append( annotations )
if len( files_gray ) == 0 or len( data_labels ) == 0 or np.min( label_sample_sizes ) == 0:
return None
whole_set_patches = np.zeros((nsamples, patchSize**2), dtype=np.float)
whole_set_labels = np.zeros((nsamples, outPatchSize**2), dtype=np.int32)
#how many samples per image?
nsamples_perImage = np.uint(np.ceil( (nsamples) / np.float(np.shape(files_gray)[0]) ))
print 'using ' + np.str(nsamples_perImage) + ' samples per image.'
counter = 0
border_patch = np.ceil(patchSize/2.0)
pad = patchSize
read_order = np.random.permutation(np.shape(files_gray)[0])
for index in read_order:
file_image = files_gray[index]
labels = data_labels[index]
sample_sizes = get_sample_sizes( labels )
img = mahotas.imread(files_gray[index])
img = np.pad(img, ((pad, pad), (pad, pad)), 'symmetric')
# normalizes [0,1]
img = normalizeImage(img, doClahe=True)
membrane_img = gen_membrane_image( labels, img.shape )
#img_cs = int(np.floor(nr_layers/2))
#if purpose=='train':
# # adjust according to middle image
# membrane_img = adjust_imprecise_boundaries(img[:,:,img_cs], membrane_img, 0)
#get rid of invalid image borders
#membrane_img[:,-patchSize:] = 0
#membrane_img[-patchSize:,:] = 0
valid_indices = np.nonzero(membrane_img)
print valid_indices
if len(valid_indices[0]) == 0 or len(valid_indices[1]) == 0:
continue
for i in xrange(nsamples_perImage):
if counter >= nsamples:
break
randmem = random.choice(xrange(len(valid_indices[0])))
(row,col) = (valid_indices[0][randmem],
valid_indices[1][randmem])
imgPatch = img[row:row+patchSize, col:col+patchSize]
memPatch = membrane_img[row:row+patchSize, col:col+patchSize]
if random.random() < 0.5:
imgPatch = np.fliplr(imgPatch)
memPatch = np.fliplr(memPatch)
rotateInt = random.randint(0,3)
imgPatch = np.rot90(imgPatch, rotateInt)
memPatch = np.rot90(memPatch, rotateInt)
#imgPatch = deform_image(imgPatch)
imgPatch, memPatch = deform_images( imgPatch, memPatch )
imgPatch = imgPatch / np.double(np.max(imgPatch))
memPatch = memPatch / np.double(np.max(memPatch))
# crop labelPatch to potentially smaller output size
offset_small_patch = int(np.ceil((patchSize - outPatchSize) / 2.0))
memPatch = memPatch[offset_small_patch:offset_small_patch+outPatchSize,offset_small_patch:offset_small_patch+outPatchSize]
whole_set_patches[counter,:] = imgPatch.flatten()
whole_set_labels[counter] = memPatch.flatten()
counter = counter + 1
#normalize data
whole_data = np.float32(whole_set_patches)
whole_data = whole_data - 0.5
data = whole_data.copy()
labels = whole_set_labels.copy()
#remove the sorting in image order
shuffleIndex = np.random.permutation(np.shape(labels)[0])
for i in xrange(np.shape(labels)[0]):
whole_data[i,:] = data[shuffleIndex[i],:]
whole_set_labels[i,:] = labels[shuffleIndex[i],:]
data_set = (whole_data, whole_set_labels)
print np.min(whole_data), np.max(whole_data)
end_time = time.time()
total_time = (end_time - start_time)
print 'Running time: ', total_time / 60.
print 'finished sampling data'
return data_set
def newgen_training_data(project, nsamples=1000, patchSize=29, outPatchSize=1):
def relabel(image):
id_list = np.unique(image)
for index, id in enumerate(id_list):
image[image==id] = index
return image
start_time = time.time()
images = DB.getTrainingImages( project.id, new=False )
path = Paths.TrainGrayscale
files_gray = []
data_labels = []
label_sample_sizes = np.array([ 0, 0])
#imgs = DB.getImages( project.id )
for image in images:
d_path = '%s/%s.tif'%(path, image.id)
l_path = '%s/%s.%s.json'%(Paths.Labels, image.id, project.id)
if os.path.exists( d_path ) and os.path.exists( l_path ):
# load the annotations
with open( l_path ) as labels_f:
annotations = json.load( labels_f )
# skip if not enough samples in the annotations
sample_sizes = get_sample_sizes( annotations )
if np.sum( sample_sizes ) == 0:
continue
label_sample_sizes = label_sample_sizes + np.array(sample_sizes)
files_gray.append( d_path )
data_labels.append( annotations )
print len(files_gray)
print len(data_labels)
print label_sample_sizes
if len( files_gray ) == 0 or len( data_labels ) == 0 or np.min( label_sample_sizes ) == 0:
return None
whole_set_patches = np.zeros((nsamples, patchSize**2), dtype=np.float)
whole_set_labels = np.zeros((nsamples, outPatchSize**2), dtype=np.int32)
#how many samples per image?
nsamples_perImage = np.uint(np.ceil( (nsamples) / np.float(np.shape(files_gray)[0]) ))
print 'using ' + np.str(nsamples_perImage) + ' samples per image.'
counter = 0
border_patch = np.ceil(patchSize/2.0)
pad = patchSize
read_order = np.random.permutation(np.shape(files_gray)[0])
for index in read_order:
file_image = files_gray[index]
labels = data_labels[index]
sample_sizes = get_sample_sizes( labels )
img = mahotas.imread(files_gray[index])
img = np.pad(img, ((pad, pad), (pad, pad)), 'symmetric')
# normalizes [0,1]
img = normalizeImage(img, doClahe=True)
for label, coordinates in enumerate( labels ):
if counter >= nsamples:
break
ncoordinates = len(coordinates)
if ncoordinates == 0:
continue
# randomly sample from the label
indices = np.random.choice( ncoordinates, sample_sizes[label], replace=False)
for i in indices:
if i%2 == 1:
i = i-1
if counter >= nsamples:
break
col = coordinates[i]
row = coordinates[i+1]
r1 = int(row+patchSize-border_patch)
r2 = int(row+patchSize+border_patch)
c1 = int(col+patchSize-border_patch)
c2 = int(col+patchSize+border_patch)
imgPatch = img[r1:r2,c1:c2]
if random.random() < 0.5:
imgPatch = np.fliplr(imgPatch)
rotateInt = random.randint(0,3)
imgPatch = np.rot90(imgPatch, rotateInt)
#imgPatch = deform_image(imgPatch)
imgPatch = deform_image( imgPatch )
imgPatch = imgPatch / np.double(np.max(imgPatch))
# crop labelPatch to potentially smaller output size
offset_small_patch = int(np.ceil((patchSize - outPatchSize) / 2.0))
whole_set_patches[counter,:] = imgPatch.flatten()
whole_set_labels[counter] = 1
counter = counter + 1
#normalize data
whole_data = np.float32(whole_set_patches)
whole_data = whole_data - 0.5
data = whole_data.copy()
labels = whole_set_labels.copy()
#remove the sorting in image order
shuffleIndex = np.random.permutation(np.shape(labels)[0])
for i in xrange(np.shape(labels)[0]):
whole_data[i,:] = data[shuffleIndex[i],:]
whole_set_labels[i,:] = labels[shuffleIndex[i],:]
data_set = (whole_data, whole_set_labels)
print np.min(whole_data), np.max(whole_data)
end_time = time.time()
total_time = (end_time - start_time)
print 'Running time: ', total_time / 60.
print 'finished sampling data'
return data_set
def gen_validation_data(project, nsamples=1000, patchSize=29, outPatchSize=1):
def relabel(image):
id_list = np.unique(image)
for index, id in enumerate(id_list):
image[image==id] = index
return image
start_time = time.time()
img_search_string_membraneImages = '%s/*.tif'%(Paths.ValidMembranes)
img_search_string_labelImages = '%s/*.tif'%(Paths.ValidLabels)
img_search_string_grayImages = '%s/*.tif'%(Paths.ValidGray)
img_files_gray = sorted( glob.glob( img_search_string_grayImages ) )
img_files_membrane = sorted( glob.glob( img_search_string_membraneImages ) )
img_files_labels = sorted( glob.glob( img_search_string_labelImages ) )
whole_set_patches = np.zeros((nsamples, patchSize**2), dtype=np.float)
whole_set_labels = np.zeros((nsamples, outPatchSize**2), dtype=np.int32)
whole_set_membranes = np.zeros((nsamples, outPatchSize**2), dtype=np.int32)
#how many samples per image?
nsamples_perImage = np.uint(np.ceil(
(nsamples) / np.float(np.shape(img_files_gray)[0])
))
print 'using ' + np.str(nsamples_perImage) + ' samples per image.'
counter = 0
img = mahotas.imread(img_files_gray[0])
grayImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0]))
labelImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0]))
membraneImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0]))
maskImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0]))
# read the data
# in random order
read_order = np.random.permutation(np.shape(img_files_gray)[0])
for img_index in read_order:
#print img_files_gray[img_index]
img = mahotas.imread(img_files_gray[img_index])
# normalizes [0,1]
img = normalizeImage(img, doClahe=True)
grayImages[:,:,img_index] = img
membrane_img = mahotas.imread(img_files_membrane[img_index])/255.
membraneImages[:,:,img_index] = membrane_img
maskImages[:,:,img_index] = 1.0
label_img = mahotas.imread(img_files_labels[img_index])
label_img = np.double(label_img)
if label_img.ndim == 3:
label_img = label_img[:,:,0] + 255*label_img[:,:,1] + 255**2 * label_img[:,:,2]
labelImages[:,:,img_index] = label_img
for img_index in xrange(np.shape(img_files_gray)[0]):
#print img_files_gray[read_order[img_index]]
img = grayImages[:,:,img_index]
label_img = labelImages[:,:,img_index]
membrane_img = membraneImages[:,:,img_index]
mask_img = maskImages[:,:,img_index]
#get rid of invalid image borders
mask_img[:,-patchSize:] = 0
mask_img[-patchSize:,:] = 0
valid_indices = np.nonzero(mask_img)
for i in xrange(nsamples_perImage):
if counter >= nsamples:
break
randmem = random.choice(xrange(len(valid_indices[0])))
(row,col) = (valid_indices[0][randmem],
valid_indices[1][randmem])
imgPatch = img[row:row+patchSize, col:col+patchSize]
membranePatch = membrane_img[row:row+patchSize, col:col+patchSize]
labelPatch = label_img[row:row+patchSize, col:col+patchSize]
if random.random() < 0.5:
imgPatch = np.fliplr(imgPatch)
membranePatch = np.fliplr(membranePatch)
labelPatch = np.fliplr(labelPatch)
rotateInt = random.randint(0,3)
imgPatch = np.rot90(imgPatch, rotateInt)
membranePatch = np.rot90(membranePatch, rotateInt)
labelPatch = np.rot90(labelPatch, rotateInt)
labelPatch = relabel(labelPatch)
imgPatch, membranePatch, labelPatch = deform_images(imgPatch, membranePatch, np.uint8(labelPatch))
imgPatch = imgPatch / np.double(np.max(imgPatch))
membranePatch = membranePatch / np.double(np.max(membranePatch))
# crop labelPatch to potentially smaller output size
offset_small_patch = int(np.ceil((patchSize - outPatchSize) / 2.0))
membranePatch = membranePatch[offset_small_patch:offset_small_patch+outPatchSize,
offset_small_patch:offset_small_patch+outPatchSize]
labelPatch = labelPatch[offset_small_patch:offset_small_patch+outPatchSize,
offset_small_patch:offset_small_patch+outPatchSize]
whole_set_patches[counter,:] = imgPatch.flatten()
whole_set_labels[counter] = labelPatch.flatten()
whole_set_membranes[counter] = np.int32(membranePatch.flatten() > 0)
counter += 1
#normalize data
whole_data = np.float32(whole_set_patches)
whole_data = whole_data - 0.5
data = whole_data.copy()
labels = whole_set_labels.copy()
membranes = whole_set_membranes.copy()
#remove the sorting in image order
shuffleIndex = np.random.permutation(np.shape(membranes)[0])
for i in xrange(np.shape(membranes)[0]):
whole_data[i,:] = data[shuffleIndex[i],:]
whole_set_labels[i,:] = labels[shuffleIndex[i],:]
whole_set_membranes[i,:] = membranes[shuffleIndex[i],:]
data_set = (whole_data, whole_set_membranes, whole_set_labels)
end_time = time.time()
total_time = (end_time - start_time)
print 'Running time: ', total_time / 60.
print 'finished sampling data'
return data_set
def gen_training_data(project, nsamples=1000, patchSize=29, outPatchSize=1):
def relabel(image):
id_list = np.unique(image)
for index, id in enumerate(id_list):
image[image==id] = index
return image
print 'gen_data'
if project == None:
return
start_time = time.time()
files_gray = []
files_membranes = []
images = DB.getTrainingImages( project.id, new=False )
path = Paths.TrainGrayscale
for image in images:
d_path = '%s/%s.tif'%(path, image.id)
m_path = '%s/%s.%s.json'%(Paths.Labels, image.id, project.id)
if os.path.exists( d_path ) and os.path.exists( m_path ):
files_gray.append( d_path )
files_membranes.append( m_path )
if len( files_gray ) == 0 or len( files_membranes ) == 0:
return None
print files_gray
print files_membranes
whole_set_patches = np.zeros((nsamples, patchSize**2), dtype=np.float)
whole_set_labels = np.zeros((nsamples, outPatchSize**2), dtype=np.int32)
whole_set_membranes = np.zeros((nsamples, outPatchSize**2), dtype=np.int32)
#how many samples per image?
nsamples_perImage = np.uint(np.ceil( (nsamples) / np.float(np.shape(files_gray)[0]) ))
print 'using ' + np.str(nsamples_perImage) + ' samples per image.'
counter = 0
# read the data
# in random order
read_order = np.random.permutation(np.shape(files_gray)[0])
for img_index in read_order:
#print img_files_gray[img_index]
img = mahotas.imread(files_gray[img_index])
# normalizes [0,1]
img = normalizeImage(img, doClahe=True)
membrane_img = gen_membrane_image( files_membranes[img_index], img.shape )
mask_img = np.ones((img.shape[0],img.shape[1]))
#get rid of invalid image borders
mask_img[:,-patchSize:] = 0
mask_img[-patchSize:,:] = 0
valid_indices = np.nonzero(mask_img)
print valid_indices
exit(1)
whole_data = np.float32(whole_set_patches)
data_set = (whole_data, whole_set_membranes)
return data_set
# changed the patch sampling to use upper left corner instead of middle pixel
# for patch labels it doesn't matter and it makes sampling even and odd patches easier
def oldgenerate_experiment_data_patch_prediction(purpose='train', nsamples=1000, patchSize=29, outPatchSize=1):
def relabel(image):
id_list = np.unique(image)
for index, id in enumerate(id_list):
image[image==id] = index
return image
start_time = time.time()
# pathPrefix = '/media/vkaynig/Data1/Cmor_paper_data/'
# pathPrefix = '/media/vkaynig/Data1/Cmor_paper_data/Thalamus-LGN/Data/25-175_train/'
#pathPrefix = '/media/vkaynig/Data1/Cmor_paper_data/Cerebellum-P7/Dense/'
pathPrefix = '/n/home00/fgonda/icon/data/reference/'
if not os.path.exists(pathPrefix):
pathPrefix = '/n/pfister_lab/vkaynig/'
#img_search_string_membraneImages = pathPrefix + 'labels/membranes_fullContour/' + purpose + '/*.tif'
img_search_string_membraneImages = pathPrefix + 'labels/membranes/' + purpose + '/*.tif'
img_search_string_labelImages = pathPrefix + 'labels/' + purpose + '/*.tif'
img_search_string_grayImages = pathPrefix + 'images/' + purpose + '/*.tif'
img_files_gray = sorted( glob.glob( img_search_string_grayImages ) )
img_files_membrane = sorted( glob.glob( img_search_string_membraneImages ) )
img_files_labels = sorted( glob.glob( img_search_string_labelImages ) )
print len(img_files_gray)
print len(img_files_membrane)
print len(img_files_labels)
whole_set_patches = np.zeros((nsamples, patchSize**2), dtype=np.float)
whole_set_labels = np.zeros((nsamples, outPatchSize**2), dtype=np.int32)
whole_set_membranes = np.zeros((nsamples, outPatchSize**2), dtype=np.int32)
#how many samples per image?
nsamples_perImage = np.uint(np.ceil(
(nsamples) / np.float(np.shape(img_files_gray)[0])
))
print 'using ' + np.str(nsamples_perImage) + ' samples per image.'
counter = 0
img = mahotas.imread(img_files_gray[0])
grayImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0]))
labelImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0]))
membraneImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0]))
maskImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0]))
# read the data
# in random order
read_order = np.random.permutation(np.shape(img_files_gray)[0])
for img_index in read_order:
#print img_files_gray[img_index]
img = mahotas.imread(img_files_gray[img_index])
# normalizes [0,1]
img = normalizeImage(img, doClahe=True)
grayImages[:,:,img_index] = img
membrane_img = mahotas.imread(img_files_membrane[img_index])/255.
membraneImages[:,:,img_index] = membrane_img
maskImages[:,:,img_index] = 1.0
if purpose == 'validate':
label_img = mahotas.imread(img_files_labels[img_index])
label_img = np.double(label_img)
if label_img.ndim == 3:
label_img = label_img[:,:,0] + 255*label_img[:,:,1] + 255**2 * label_img[:,:,2]
labelImages[:,:,img_index] = label_img
for img_index in xrange(np.shape(img_files_gray)[0]):
#print img_files_gray[read_order[img_index]]
img = grayImages[:,:,img_index]
label_img = labelImages[:,:,img_index]
membrane_img = membraneImages[:,:,img_index]
mask_img = maskImages[:,:,img_index]
if purpose=='train':
membrane_img = adjust_imprecise_boundaries(img, membrane_img, 0)
#get rid of invalid image borders
mask_img[:,-patchSize:] = 0
mask_img[-patchSize:,:] = 0
valid_indices = np.nonzero(mask_img)
for i in xrange(nsamples_perImage):
if counter >= nsamples:
break
randmem = random.choice(xrange(len(valid_indices[0])))
(row,col) = (valid_indices[0][randmem],
valid_indices[1][randmem])
imgPatch = img[row:row+patchSize, col:col+patchSize]
membranePatch = membrane_img[row:row+patchSize, col:col+patchSize]
labelPatch = label_img[row:row+patchSize, col:col+patchSize]
if random.random() < 0.5:
imgPatch = np.fliplr(imgPatch)
membranePatch = np.fliplr(membranePatch)
if purpose == 'validate':
labelPatch = np.fliplr(labelPatch)
rotateInt = random.randint(0,3)
imgPatch = np.rot90(imgPatch, rotateInt)
membranePatch = np.rot90(membranePatch, rotateInt)
if purpose=='validate':
labelPatch = np.rot90(labelPatch, rotateInt)
if purpose=='validate':
labelPatch = relabel(labelPatch)
imgPatch, membranePatch, labelPatch = deform_images(imgPatch, membranePatch, np.uint8(labelPatch))
else:
imgPatch, membranePatch = deform_images(imgPatch, membranePatch)
imgPatch = imgPatch / np.double(np.max(imgPatch))
membranePatch = membranePatch / np.double(np.max(membranePatch))
# crop labelPatch to potentially smaller output size
offset_small_patch = int(np.ceil((patchSize - outPatchSize) / 2.0))
membranePatch = membranePatch[offset_small_patch:offset_small_patch+outPatchSize,
offset_small_patch:offset_small_patch+outPatchSize]
labelPatch = labelPatch[offset_small_patch:offset_small_patch+outPatchSize,
offset_small_patch:offset_small_patch+outPatchSize]
whole_set_patches[counter,:] = imgPatch.flatten()
whole_set_labels[counter] = labelPatch.flatten()
whole_set_membranes[counter] = np.int32(membranePatch.flatten() > 0)
counter += 1
#normalize data
whole_data = np.float32(whole_set_patches)
whole_data = whole_data - 0.5
data = whole_data.copy()
labels = whole_set_labels.copy()
membranes = whole_set_membranes.copy()
#remove the sorting in image order
shuffleIndex = np.random.permutation(np.shape(membranes)[0])
for i in xrange(np.shape(membranes)[0]):
whole_data[i,:] = data[shuffleIndex[i],:]
whole_set_labels[i,:] = labels[shuffleIndex[i],:]
whole_set_membranes[i,:] = membranes[shuffleIndex[i],:]
if purpose == 'validate':
data_set = (whole_data, whole_set_membranes, whole_set_labels)
else:
data_set = (whole_data, whole_set_membranes)
end_time = time.time()
total_time = (end_time - start_time)
print 'Running time: ', total_time / 60.
print 'finished sampling data'
return data_set
def generate_experiment_data_patch_prediction_layers(purpose='train', nsamples=1000, patchSize=29, outPatchSize=1, nr_layers=3):
def relabel(image):
id_list = np.unique(image)
for index, id in enumerate(id_list):
image[image==id] = index
return image
start_time = time.time()
if os.path.exists('/media/vkaynig/Data1/Cmor_paper_data/'):
pathPrefix = '/media/vkaynig/Data1/Cmor_paper_data/'
else:
pathPrefix = '/n/pfister_lab/vkaynig/'
img_search_string_membraneImages = pathPrefix + 'labels/membranes_fullContour/' + purpose + '/*.tif'
img_search_string_labelImages = pathPrefix + 'labels/' + purpose + '/*.tif'
img_search_string_grayImages = pathPrefix + 'images/' + purpose + '/*.tif'
img_files_gray = sorted( glob.glob( img_search_string_grayImages ) )
img_files_membrane = sorted( glob.glob( img_search_string_membraneImages ) )
img_files_labels = sorted( glob.glob( img_search_string_labelImages ) )
whole_set_patches = np.zeros((nsamples, nr_layers, patchSize**2), dtype=np.float)
whole_set_labels = np.zeros((nsamples, outPatchSize**2), dtype=np.int32)
whole_set_membranes = np.zeros((nsamples, outPatchSize**2), dtype=np.int32)
#how many samples per image?
nsamples_perImage = np.uint(np.ceil(
(nsamples) / np.float(np.shape(img_files_gray)[0])
))
print 'using ' + np.str(nsamples_perImage) + ' samples per image.'
counter = 0
img = mahotas.imread(img_files_gray[0])
grayImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0]))
labelImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0]))
membraneImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0]))
maskImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0]))
# read the data
# in random order
#read_order = np.random.permutation(np.shape(img_files_gray)[0])
for img_index in range(np.shape(img_files_gray)[0]):
#print img_files_gray[img_index]
img = mahotas.imread(img_files_gray[img_index])
# normalizes [0,1]
img = normalizeImage(img)
grayImages[:,:,img_index] = img
membrane_img = mahotas.imread(img_files_membrane[img_index])/255.
membraneImages[:,:,img_index] = membrane_img
maskImages[:,:,img_index] = 1.0
if purpose == 'validate':
label_img = mahotas.imread(img_files_labels[img_index])
label_img = np.double(label_img)
labelImages[:,:,img_index] = label_img
for img_index in xrange(np.shape(img_files_gray)[0]):
img_cs = int(np.floor(nr_layers/2))
img_valid_range_indices = np.clip(range(img_index-img_cs,img_index+img_cs+1),0,np.shape(img_files_gray)[0]-1)
img = grayImages[:,:,img_valid_range_indices]
label_img = labelImages[:,:,img_index]
membrane_img = membraneImages[:,:,img_index]
mask_img = maskImages[:,:,img_index]
if purpose=='train':
# adjust according to middle image
membrane_img = adjust_imprecise_boundaries(img[:,:,img_cs], membrane_img, 0)
#get rid of invalid image borders
mask_img[:,-patchSize:] = 0
mask_img[-patchSize:,:] = 0
valid_indices = np.nonzero(mask_img)
for i in xrange(nsamples_perImage):
if counter >= nsamples:
break
randmem = random.choice(xrange(len(valid_indices[0])))
(row,col) = (valid_indices[0][randmem],
valid_indices[1][randmem])
imgPatch = img[row:row+patchSize, col:col+patchSize,:]
membranePatch = membrane_img[row:row+patchSize, col:col+patchSize]
labelPatch = label_img[row:row+patchSize, col:col+patchSize]
if random.random() < 0.5:
for flip_i in xrange(nr_layers):
imgPatch[:,:,flip_i] = np.fliplr(imgPatch[:,:,flip_i])
membranePatch = np.fliplr(membranePatch)
if purpose == 'validate':
labelPatch = np.fliplr(labelPatch)
rotateInt = random.randint(0,3)
for rot_i in xrange(nr_layers):
imgPatch[:,:,rot_i] = np.rot90(imgPatch[:,:,rot_i], rotateInt)
membranePatch = np.rot90(membranePatch, rotateInt)
if purpose=='validate':
labelPatch = np.rot90(labelPatch, rotateInt)
if purpose=='validate':
labelPatch = relabel(labelPatch)
deformed_images = deform_images_list(np.dstack([imgPatch*255, np.reshape(membranePatch*255,(patchSize,patchSize,1)), np.uint8(np.reshape(labelPatch,(patchSize,patchSize,1)))]))
imgPatch, membranePatch, labelPatch = np.split(deformed_images,[imgPatch.shape[2],imgPatch.shape[2]+1], axis=2)
else:
deformed_images = deform_images_list(np.dstack([imgPatch*255, np.reshape(membranePatch,(patchSize,patchSize,1))*255]))
imgPatch, membranePatch = np.split(deformed_images,[imgPatch.shape[2]], axis=2)
imgPatch = imgPatch / np.double(np.max(imgPatch))
membranePatch = membranePatch / np.double(np.max(membranePatch))
# crop labelPatch to potentially smaller output size
offset_small_patch = int(np.ceil((patchSize - outPatchSize) / 2.0))
membranePatch = membranePatch[offset_small_patch:offset_small_patch+outPatchSize,
offset_small_patch:offset_small_patch+outPatchSize]
labelPatch = labelPatch[offset_small_patch:offset_small_patch+outPatchSize,
offset_small_patch:offset_small_patch+outPatchSize]
#whole_set_patches = np.zeros((nsamples, nr_layers, patchSize**2), dtype=np.float)
for patch_i in xrange(nr_layers):
whole_set_patches[counter,patch_i,:] = imgPatch[:,:,patch_i].flatten()
whole_set_labels[counter] = labelPatch.flatten()
whole_set_membranes[counter] = np.int32(membranePatch.flatten() > 0)
counter += 1
#normalize data
whole_data = np.float32(whole_set_patches)
whole_data = whole_data - 0.5
data = whole_data.copy()
labels = whole_set_labels.copy()
membranes = whole_set_membranes.copy()
#remove the sorting in image order
shuffleIndex = np.random.permutation(np.shape(membranes)[0])
for i in xrange(np.shape(membranes)[0]):
whole_data[i,:,:] = data[shuffleIndex[i],:,:]
whole_set_labels[i,:] = labels[shuffleIndex[i],:]
whole_set_membranes[i,:] = membranes[shuffleIndex[i],:]
if purpose == 'validate':
data_set = (whole_data, whole_set_membranes, whole_set_labels)
else:
data_set = (whole_data, whole_set_membranes)
end_time = time.time()
total_time = (end_time - start_time)
print 'Running time: ', total_time / 60.
print 'finished sampling data'
return data_set
if __name__=="__main__":
import uuid
test = generate_experiment_data_patch_prediction(purpose='train', nsamples=30, patchSize=572, outPatchSize=388)
dir_path = './training_patches/'
for i in xrange(30):
unique_filename = str(uuid.uuid4())
img = np.reshape(test[1][i],(388,388))
img_gray = np.reshape(test[0][i],(572,572))
mahotas.imsave(dir_path+unique_filename+'.tif', np.uint8(img*255))
mahotas.imsave(dir_path+unique_filename+'_gray.tif', np.uint8((img_gray+0.5)*255))
#data_val = generate_experiment_data_supervised(purpose='validate', nsamples=10000, patchSize=65, balanceRate=0.5)
#data = generate_experiment_data_patch_prediction(purpose='validate', nsamples=2, patchSize=315, outPatchSize=215)
# plt.imshow(np.reshape(data[0][0],(315,315))); plt.figure()
# plt.imshow(np.reshape(data[1][0],(215,215))); plt.figure()
# plt.imshow(np.reshape(data[2][0],(215,215))); plt.show()
# image = mahotas.imread('ac3_input_0141.tif')
# image = normalizeImage(image)
# label = mahotas.imread('ac3_labels_0141.tif') / 255.
# test = adjust_imprecise_boundaries(image, label, 10)
# plt.imshow(label+image); plt.show()
# plt.imshow(test+image); plt.show()
|
|
import unittest
from svtplay_dl.fetcher.dash import DASH
from svtplay_dl.fetcher.hls import HLS
from svtplay_dl.fetcher.http import HTTP
from svtplay_dl.subtitle import subtitle
from svtplay_dl.utils.parser import setup_defaults
from svtplay_dl.utils.stream import audio_role
from svtplay_dl.utils.stream import format_prio
from svtplay_dl.utils.stream import language_prio
from svtplay_dl.utils.stream import sort_quality
from svtplay_dl.utils.stream import subtitle_filter
class streamTest_sort(unittest.TestCase):
def test_sort(self):
data = [
DASH(setup_defaults(), "http://example.com", 3000, None),
HLS(setup_defaults(), "http://example.com", 2000, None),
HTTP(setup_defaults(), "http://example.com", 3001, None),
]
assert all(
[
a[0] == b.bitrate
for a, b in zip(
sort_quality(data),
[
HTTP(setup_defaults(), "http://example.com", 3001, None),
DASH(setup_defaults(), "http://example.com", 3000, None),
HLS(setup_defaults(), "http://example.com", 2000, None),
],
)
],
)
class streamTestLanguage(unittest.TestCase):
def test_language_prio(self):
config = setup_defaults()
test_streams = [
DASH(setup_defaults(), "http://example.com", 3000, None),
DASH(setup_defaults(), "http://example.com", 3001, None),
DASH(setup_defaults(), "http://example.com", 3002, None),
]
streams = language_prio(config, test_streams)
assert len(streams) == 3
def test_language_prio_select(self):
config = setup_defaults()
config.set("audio_language", "en")
test_streams = [
DASH(setup_defaults(), "http://example.com", 3000, None, language="en"),
DASH(setup_defaults(), "http://example.com", 3001, None),
DASH(setup_defaults(), "http://example.com", 3002, None, language="sv"),
]
streams = language_prio(config, test_streams)
assert len(streams) == 1
class streamTestFormat(unittest.TestCase):
def test_language_prio(self):
test_streams = [
DASH(setup_defaults(), "http://example.com", 3000, None),
DASH(setup_defaults(), "http://example.com", 3001, None, channels="51"),
DASH(setup_defaults(), "http://example.com", 3002, None),
]
streams = format_prio(test_streams, ["h264-51"])
assert len(streams) == 1
def test_language_prio2(self):
test_streams = [
DASH(setup_defaults(), "http://example.com", 3000, None),
DASH(setup_defaults(), "http://example.com", 3001, None, channels="51"),
DASH(setup_defaults(), "http://example.com", 3001, None, codec="h264", channels="51"),
DASH(setup_defaults(), "http://example.com", 3002, None),
]
streams = format_prio(test_streams, ["h264"])
assert len(streams) == 2
def test_language_prio3(self):
test_streams = [
DASH(setup_defaults(), "http://example.com", 3000, None),
DASH(setup_defaults(), "http://example.com", 3001, None, channels="51"),
DASH(setup_defaults(), "http://example.com", 3002, None),
]
streams = format_prio(test_streams, ["h26e4"])
assert len(streams) == 0
class streamTestRole(unittest.TestCase):
def test_language_prio(self):
test_streams = [
DASH(setup_defaults(), "http://example.com", 3000, None),
DASH(setup_defaults(), "http://example.com", 3001, None),
DASH(setup_defaults(), "http://example.com", 3002, None),
]
streams = audio_role(setup_defaults(), test_streams)
assert len(streams) == 3
def test_language_prio2(self):
test_streams = [
DASH(setup_defaults(), "http://example.com", 3000, None),
DASH(setup_defaults(), "http://example.com", 3001, None, role="x-sv"),
DASH(setup_defaults(), "http://example.com", 3002, None),
]
config = setup_defaults()
config.set("audio_role", "x-sv")
streams = audio_role(config, test_streams)
assert len(streams) == 1
def test_language_prio3(self):
test_streams = [
DASH(setup_defaults(), "http://example.com", 3000, None),
DASH(setup_defaults(), "http://example.com", 3001, None, role="x-sv"),
DASH(setup_defaults(), "http://example.com", 3002, None),
]
config = setup_defaults()
config.set("audio_role", "sv")
streams = audio_role(config, test_streams)
assert len(streams) == 0
def test_language_prio4(self):
test_streams = [
DASH(setup_defaults(), "http://example.com", 3000, None),
DASH(setup_defaults(), "http://example.com", 3001, None, role="x-sv"),
DASH(setup_defaults(), "http://example.com", 3002, None),
]
config = setup_defaults()
config.set("audio_language", "sv")
streams = audio_role(config, test_streams)
assert len(streams) == 3
def test_language_prio5(self):
test_streams = [
DASH(setup_defaults(), "http://example.com", 3000, None),
DASH(setup_defaults(), "http://example.com", 3001, None, role="x-sv"),
DASH(setup_defaults(), "http://example.com", 3002, None),
]
config = setup_defaults()
config.set("audio_role", "isii")
config.set("audio_language", "sv")
streams = audio_role(config, test_streams)
assert len(streams) == 0
class streamSubtile(unittest.TestCase):
def test_subtitleFilter(self):
test_subs = [
subtitle(setup_defaults(), "wrst", "http://example.com"),
subtitle(setup_defaults(), "wrst", "http://example.com", "sv"),
subtitle(setup_defaults(), "wrst", "http://example.com", "dk"),
subtitle(setup_defaults(), "wrst", "http://example.com", "sv"),
]
subs = subtitle_filter(test_subs)
assert len(subs) == 3
def test_subtitleFilter2(self):
config = setup_defaults()
config.set("get_all_subtitles", True)
test_subs = [
subtitle(config, "wrst", "http://example.com"),
subtitle(config, "wrst", "http://example.com", subfix="sv"),
subtitle(config, "wrst", "http://example.com", subfix="dk"),
subtitle(config, "wrst", "http://example.com", subfix="no"),
]
subs = subtitle_filter(test_subs)
assert len(subs) == 4
def test_subtitleFilter3(self):
config = setup_defaults()
config.set("subtitle_preferred", "sv")
test_subs = [
subtitle(config, "wrst", "http://example.com"),
subtitle(config, "wrst", "http://example.com", subfix="sv"),
subtitle(config, "wrst", "http://example.com", subfix="dk"),
subtitle(config, "wrst", "http://example.com", subfix="no"),
]
subs = subtitle_filter(test_subs)
assert len(subs) == 1
def test_subtitleFilter4(self):
config = setup_defaults()
config.set("subtitle_preferred", "gr")
test_subs = [
subtitle(config, "wrst", "http://example.com"),
subtitle(config, "wrst", "http://example.com", subfix="sv"),
subtitle(config, "wrst", "http://example.com", subfix="dk"),
subtitle(config, "wrst", "http://example.com", subfix="no"),
]
subs = subtitle_filter(test_subs)
assert len(subs) == 0
def test_subtitleFilter5(self):
config = setup_defaults()
config.set("get_all_subtitles", True)
test_subs = [
subtitle(config, "wrst", "http://example.com"),
subtitle(config, "wrst", "http://example.com", subfix="sv"),
subtitle(config, "wrst", "http://example.com", subfix="sv"),
subtitle(config, "wrst", "http://example.com", subfix="no"),
]
subs = subtitle_filter(test_subs)
assert len(subs) == 3
|
|
# -*- coding: utf-8 -*-
"""Calendar is a dictionary like Python object that can render itself as VCAL
files according to rfc2445.
These are the defined components.
"""
from datetime import datetime, timedelta
from icalendar.caselessdict import CaselessDict
from icalendar.parser import Contentline
from icalendar.parser import Contentlines
from icalendar.parser import Parameters
from icalendar.parser import q_join
from icalendar.parser import q_split
from icalendar.parser_tools import DEFAULT_ENCODING
from icalendar.prop import TypesFactory
from icalendar.prop import vText, vDDDLists
from icalendar.timezone_cache import _timezone_cache
import pytz
import dateutil.rrule
from pytz.tzinfo import DstTzInfo
from icalendar.compat import unicode_type
######################################
# The component factory
class ComponentFactory(CaselessDict):
"""All components defined in rfc 2445 are registered in this factory class.
To get a component you can use it like this.
"""
def __init__(self, *args, **kwargs):
"""Set keys to upper for initial dict.
"""
super(ComponentFactory, self).__init__(*args, **kwargs)
self['VEVENT'] = Event
self['VTODO'] = Todo
self['VJOURNAL'] = Journal
self['VFREEBUSY'] = FreeBusy
self['VTIMEZONE'] = Timezone
self['STANDARD'] = TimezoneStandard
self['DAYLIGHT'] = TimezoneDaylight
self['VALARM'] = Alarm
self['VCALENDAR'] = Calendar
# These Properties have multiple property values inlined in one propertyline
# seperated by comma. Use CaselessDict as simple caseless set.
INLINE = CaselessDict({
'CATEGORIES': 1,
'RESOURCES': 1,
'FREEBUSY': 1,
})
_marker = []
class Component(CaselessDict):
"""Component is the base object for calendar, Event and the other
components defined in RFC 2445. normally you will not use this class
directy, but rather one of the subclasses.
"""
name = None # should be defined in each component
required = () # These properties are required
singletons = () # These properties must only appear once
multiple = () # may occur more than once
exclusive = () # These properties are mutually exclusive
inclusive = () # if any occurs the other(s) MUST occur
# ('duration', 'repeat')
ignore_exceptions = False # if True, and we cannot parse this
# component, we will silently ignore
# it, rather than let the exception
# propagate upwards
# not_compliant = [''] # List of non-compliant properties.
def __init__(self, *args, **kwargs):
"""Set keys to upper for initial dict.
"""
super(Component, self).__init__(*args, **kwargs)
# set parameters here for properties that use non-default values
self.subcomponents = [] # Components can be nested.
self.errors = [] # If we ignored exception(s) while
# parsing a property, contains error strings
# def is_compliant(self, name):
# """Returns True is the given property name is compliant with the
# icalendar implementation.
#
# If the parser is too strict it might prevent parsing erroneous but
# otherwise compliant properties. So the parser is pretty lax, but it is
# possible to test for non-complience by calling this method.
# """
# return name in not_compliant
def __bool__(self):
"""Returns True, CaselessDict would return False if it had no items.
"""
return True
# python 2 compatibility
__nonzero__ = __bool__
def is_empty(self):
"""Returns True if Component has no items or subcomponents, else False.
"""
return True if not (list(self.values()) + self.subcomponents) else False # noqa
@property
def is_broken(self):
return bool(self.errors)
#############################
# handling of property values
def _encode(self, name, value, parameters=None, encode=1):
"""Encode values to icalendar property values.
:param name: Name of the property.
:type name: string
:param value: Value of the property. Either of a basic Python type of
any of the icalendar's own property types.
:type value: Python native type or icalendar property type.
:param parameters: Property parameter dictionary for the value. Only
available, if encode is set to True.
:type parameters: Dictionary
:param encode: True, if the value should be encoded to one of
icalendar's own property types (Fallback is "vText")
or False, if not.
:type encode: Boolean
:returns: icalendar property value
"""
if not encode:
return value
if isinstance(value, types_factory.all_types):
# Don't encode already encoded values.
return value
klass = types_factory.for_property(name)
obj = klass(value)
if parameters:
if isinstance(parameters, dict):
params = Parameters()
for key, item in parameters.items():
params[key] = item
parameters = params
assert isinstance(parameters, Parameters)
obj.params = parameters
return obj
def add(self, name, value, parameters=None, encode=1):
"""Add a property.
:param name: Name of the property.
:type name: string
:param value: Value of the property. Either of a basic Python type of
any of the icalendar's own property types.
:type value: Python native type or icalendar property type.
:param parameters: Property parameter dictionary for the value. Only
available, if encode is set to True.
:type parameters: Dictionary
:param encode: True, if the value should be encoded to one of
icalendar's own property types (Fallback is "vText")
or False, if not.
:type encode: Boolean
:returns: None
"""
if isinstance(value, datetime) and\
name.lower() in ('dtstamp', 'created', 'last-modified'):
# RFC expects UTC for those... force value conversion.
if getattr(value, 'tzinfo', False) and value.tzinfo is not None:
value = value.astimezone(pytz.utc)
else:
# assume UTC for naive datetime instances
value = pytz.utc.localize(value)
# encode value
if encode and isinstance(value, list) \
and name.lower() not in ['rdate', 'exdate']:
# Individually convert each value to an ical type except rdate and
# exdate, where lists of dates might be passed to vDDDLists.
value = [self._encode(name, v, parameters, encode) for v in value]
else:
value = self._encode(name, value, parameters, encode)
# set value
if name in self:
# If property already exists, append it.
oldval = self[name]
if isinstance(oldval, list):
if isinstance(value, list):
value = oldval + value
else:
oldval.append(value)
value = oldval
else:
value = [oldval, value]
self[name] = value
def _decode(self, name, value):
"""Internal for decoding property values.
"""
# TODO: Currently the decoded method calls the icalendar.prop instances
# from_ical. We probably want to decode properties into Python native
# types here. But when parsing from an ical string with from_ical, we
# want to encode the string into a real icalendar.prop property.
if isinstance(value, vDDDLists):
# TODO: Workaround unfinished decoding
return value
decoded = types_factory.from_ical(name, value)
# TODO: remove when proper decoded is implemented in every prop.* class
# Workaround to decode vText properly
if isinstance(decoded, vText):
decoded = decoded.encode(DEFAULT_ENCODING)
return decoded
def decoded(self, name, default=_marker):
"""Returns decoded value of property.
"""
# XXX: fail. what's this function supposed to do in the end?
# -rnix
if name in self:
value = self[name]
if isinstance(value, list):
return [self._decode(name, v) for v in value]
return self._decode(name, value)
else:
if default is _marker:
raise KeyError(name)
else:
return default
########################################################################
# Inline values. A few properties have multiple values inlined in in one
# property line. These methods are used for splitting and joining these.
def get_inline(self, name, decode=1):
"""Returns a list of values (split on comma).
"""
vals = [v.strip('" ') for v in q_split(self[name])]
if decode:
return [self._decode(name, val) for val in vals]
return vals
def set_inline(self, name, values, encode=1):
"""Converts a list of values into comma seperated string and sets value
to that.
"""
if encode:
values = [self._encode(name, value, encode=1) for value in values]
self[name] = types_factory['inline'](q_join(values))
#########################
# Handling of components
def add_component(self, component):
"""Add a subcomponent to this component.
"""
self.subcomponents.append(component)
def _walk(self, name):
"""Walk to given component.
"""
result = []
if name is None or self.name == name:
result.append(self)
for subcomponent in self.subcomponents:
result += subcomponent._walk(name)
return result
def walk(self, name=None):
"""Recursively traverses component and subcomponents. Returns sequence
of same. If name is passed, only components with name will be returned.
"""
if name is not None:
name = name.upper()
return self._walk(name)
#####################
# Generation
def property_items(self, recursive=True, sorted=True):
"""Returns properties in this component and subcomponents as:
[(name, value), ...]
"""
vText = types_factory['text']
properties = [('BEGIN', vText(self.name).to_ical())]
if sorted:
property_names = self.sorted_keys()
else:
property_names = self.keys()
for name in property_names:
values = self[name]
if isinstance(values, list):
# normally one property is one line
for value in values:
properties.append((name, value))
else:
properties.append((name, values))
if recursive:
# recursion is fun!
for subcomponent in self.subcomponents:
properties += subcomponent.property_items(sorted=sorted)
properties.append(('END', vText(self.name).to_ical()))
return properties
@classmethod
def from_ical(cls, st, multiple=False):
"""Populates the component recursively from a string.
"""
stack = [] # a stack of components
comps = []
for line in Contentlines.from_ical(st): # raw parsing
if not line:
continue
try:
name, params, vals = line.parts()
except ValueError as e:
# if unable to parse a line within a component
# that ignores exceptions, mark the component
# as broken and skip the line. otherwise raise.
component = stack[-1] if stack else None
if not component or not component.ignore_exceptions:
raise
component.errors.append((None, unicode_type(e)))
continue
uname = name.upper()
# check for start of component
if uname == 'BEGIN':
# try and create one of the components defined in the spec,
# otherwise get a general Components for robustness.
c_name = vals.upper()
c_class = component_factory.get(c_name, Component)
# If component factory cannot resolve ``c_name``, the generic
# ``Component`` class is used which does not have the name set.
# That's opposed to the usage of ``cls``, which represents a
# more concrete subclass with a name set (e.g. VCALENDAR).
component = c_class()
if not getattr(component, 'name', ''): # undefined components
component.name = c_name
stack.append(component)
# check for end of event
elif uname == 'END':
# we are done adding properties to this component
# so pop it from the stack and add it to the new top.
component = stack.pop()
if not stack: # we are at the end
comps.append(component)
else:
stack[-1].add_component(component)
if vals == 'VTIMEZONE' and \
'TZID' in component and \
component['TZID'] not in pytz.all_timezones and \
component['TZID'] not in _timezone_cache:
_timezone_cache[component['TZID']] = component.to_tz()
# we are adding properties to the current top of the stack
else:
factory = types_factory.for_property(name)
component = stack[-1] if stack else None
if not component:
raise ValueError('Property "{prop}" does not have '
'a parent component.'.format(prop=name))
datetime_names = ('DTSTART', 'DTEND', 'RECURRENCE-ID', 'DUE',
'FREEBUSY', 'RDATE', 'EXDATE')
try:
if name in datetime_names and 'TZID' in params:
vals = factory(factory.from_ical(vals, params['TZID']))
else:
vals = factory(factory.from_ical(vals))
except ValueError as e:
if not component.ignore_exceptions:
raise
component.errors.append((uname, unicode_type(e)))
component.add(name, None, encode=0)
else:
vals.params = params
component.add(name, vals, encode=0)
if multiple:
return comps
if len(comps) > 1:
raise ValueError('Found multiple components where '
'only one is allowed: {st!r}'.format(**locals()))
if len(comps) < 1:
raise ValueError('Found no components where '
'exactly one is required: '
'{st!r}'.format(**locals()))
return comps[0]
def content_line(self, name, value, sorted=True):
"""Returns property as content line.
"""
params = getattr(value, 'params', Parameters())
return Contentline.from_parts(name, params, value, sorted=sorted)
def content_lines(self, sorted=True):
"""Converts the Component and subcomponents into content lines.
"""
contentlines = Contentlines()
for name, value in self.property_items(sorted=sorted):
cl = self.content_line(name, value, sorted=sorted)
contentlines.append(cl)
contentlines.append('') # remember the empty string in the end
return contentlines
def to_ical(self, sorted=True):
'''
:param sorted: Whether parameters and properties should be
lexicographically sorted.
'''
content_lines = self.content_lines(sorted=sorted)
return content_lines.to_ical()
def __repr__(self):
"""String representation of class with all of it's subcomponents.
"""
subs = ', '.join([str(it) for it in self.subcomponents])
return '%s(%s%s)' % (
self.name or type(self).__name__,
dict(self),
', %s' % subs if subs else ''
)
#######################################
# components defined in RFC 5545
class Event(Component):
name = 'VEVENT'
canonical_order = (
'SUMMARY', 'DTSTART', 'DTEND', 'DURATION', 'DTSTAMP',
'UID', 'RECURRENCE-ID', 'SEQUENCE', 'RRULE', 'RDATE',
'EXDATE',
)
required = ('UID', 'DTSTAMP',)
singletons = (
'CLASS', 'CREATED', 'DESCRIPTION', 'DTSTART', 'GEO', 'LAST-MODIFIED',
'LOCATION', 'ORGANIZER', 'PRIORITY', 'DTSTAMP', 'SEQUENCE', 'STATUS',
'SUMMARY', 'TRANSP', 'URL', 'RECURRENCE-ID', 'DTEND', 'DURATION',
'UID',
)
exclusive = ('DTEND', 'DURATION',)
multiple = (
'ATTACH', 'ATTENDEE', 'CATEGORIES', 'COMMENT', 'CONTACT', 'EXDATE',
'RSTATUS', 'RELATED', 'RESOURCES', 'RDATE', 'RRULE'
)
ignore_exceptions = True
class Todo(Component):
name = 'VTODO'
required = ('UID', 'DTSTAMP',)
singletons = (
'CLASS', 'COMPLETED', 'CREATED', 'DESCRIPTION', 'DTSTAMP', 'DTSTART',
'GEO', 'LAST-MODIFIED', 'LOCATION', 'ORGANIZER', 'PERCENT-COMPLETE',
'PRIORITY', 'RECURRENCE-ID', 'SEQUENCE', 'STATUS', 'SUMMARY', 'UID',
'URL', 'DUE', 'DURATION',
)
exclusive = ('DUE', 'DURATION',)
multiple = (
'ATTACH', 'ATTENDEE', 'CATEGORIES', 'COMMENT', 'CONTACT', 'EXDATE',
'RSTATUS', 'RELATED', 'RESOURCES', 'RDATE', 'RRULE'
)
class Journal(Component):
name = 'VJOURNAL'
required = ('UID', 'DTSTAMP',)
singletons = (
'CLASS', 'CREATED', 'DTSTART', 'DTSTAMP', 'LAST-MODIFIED', 'ORGANIZER',
'RECURRENCE-ID', 'SEQUENCE', 'STATUS', 'SUMMARY', 'UID', 'URL',
)
multiple = (
'ATTACH', 'ATTENDEE', 'CATEGORIES', 'COMMENT', 'CONTACT', 'EXDATE',
'RELATED', 'RDATE', 'RRULE', 'RSTATUS', 'DESCRIPTION',
)
class FreeBusy(Component):
name = 'VFREEBUSY'
required = ('UID', 'DTSTAMP',)
singletons = (
'CONTACT', 'DTSTART', 'DTEND', 'DTSTAMP', 'ORGANIZER',
'UID', 'URL',
)
multiple = ('ATTENDEE', 'COMMENT', 'FREEBUSY', 'RSTATUS',)
class Timezone(Component):
name = 'VTIMEZONE'
canonical_order = ('TZID',)
required = ('TZID',) # it also requires one of components DAYLIGHT and STANDARD
singletons = ('TZID', 'LAST-MODIFIED', 'TZURL',)
@staticmethod
def _extract_offsets(component, tzname):
"""extract offsets and transition times from a VTIMEZONE component
:param component: a STANDARD or DAYLIGHT component
:param tzname: the name of the zone
"""
offsetfrom = component['TZOFFSETFROM'].td
offsetto = component['TZOFFSETTO'].td
dtstart = component['DTSTART'].dt
# offsets need to be rounded to the next minute, we might loose up
# to 30 seconds accuracy, but it can't be helped (datetime
# supposedly cannot handle smaller offsets)
offsetto_s = int((offsetto.seconds + 30) / 60) * 60
offsetto = timedelta(days=offsetto.days, seconds=offsetto_s)
offsetfrom_s = int((offsetfrom.seconds + 30) / 60) * 60
offsetfrom = timedelta(days=offsetfrom.days, seconds=offsetfrom_s)
# expand recurrences
if 'RRULE' in component:
rrulestr = component['RRULE'].to_ical().decode('utf-8')
rrule = dateutil.rrule.rrulestr(rrulestr, dtstart=dtstart)
if not set(['UNTIL', 'COUNT']).intersection(
component['RRULE'].keys()):
# pytz.timezones don't know any transition dates after 2038
# either
rrule._until = datetime(2038, 12, 31)
elif rrule._until.tzinfo:
rrule._until = rrule._until.replace(tzinfo=None)
transtimes = rrule
# or rdates
elif 'RDATE' in component:
if not isinstance(component['RDATE'], list):
rdates = [component['RDATE']]
else:
rdates = component['RDATE']
transtimes = [dtstart] + [leaf.dt for tree in rdates for
leaf in tree.dts]
else:
transtimes = [dtstart]
transitions = [(transtime, offsetfrom, offsetto, tzname) for
transtime in set(transtimes)]
if component.name == 'STANDARD':
is_dst = 0
elif component.name == 'DAYLIGHT':
is_dst = 1
return is_dst, transitions
def to_tz(self):
"""convert this VTIMEZONE component to a pytz.timezone object
"""
zone = str(self['TZID'])
transitions = []
dst = {}
for component in self.walk():
if type(component) == Timezone:
continue
try:
tzname = str(component['TZNAME'])
except KeyError:
tzname = '{0}_{1}_{2}_{3}'.format(
zone,
component['DTSTART'].to_ical().decode('utf-8'),
component['TZOFFSETFROM'].to_ical(), # for whatever reason this is str/unicode
component['TZOFFSETTO'].to_ical(), # for whatever reason this is str/unicode
)
dst[tzname], component_transitions = self._extract_offsets(
component, tzname
)
transitions.extend(component_transitions)
transitions.sort()
transition_times = [
transtime - osfrom for transtime, osfrom, _, _ in transitions
]
# transition_info is a list with tuples in the format
# (utcoffset, dstoffset, name)
# dstoffset = 0, if current transition is to standard time
# = this_utcoffset - prev_standard_utcoffset, otherwise
transition_info = []
for num, (transtime, osfrom, osto, name) in enumerate(transitions):
dst_offset = False
if not dst[name]:
dst_offset = timedelta(seconds=0)
else:
# go back in time until we find a transition to dst
for index in range(num - 1, -1, -1):
if not dst[transitions[index][3]]: # [3] is the name
dst_offset = osto - transitions[index][2] # [2] is osto # noqa
break
# when the first transition is to dst, we didn't find anything
# in the past, so we have to look into the future
if not dst_offset:
for index in range(num, len(transitions)):
if not dst[transitions[index][3]]: # [3] is the name
dst_offset = osto - transitions[index][2] # [2] is osto # noqa
break
assert dst_offset is not False
transition_info.append((osto, dst_offset, name))
cls = type(zone, (DstTzInfo,), {
'zone': zone,
'_utc_transition_times': transition_times,
'_transition_info': transition_info
})
return cls()
class TimezoneStandard(Component):
name = 'STANDARD'
required = ('DTSTART', 'TZOFFSETTO', 'TZOFFSETFROM')
singletons = ('DTSTART', 'TZOFFSETTO', 'TZOFFSETFROM',)
multiple = ('COMMENT', 'RDATE', 'TZNAME', 'RRULE', 'EXDATE')
class TimezoneDaylight(Component):
name = 'DAYLIGHT'
required = TimezoneStandard.required
singletons = TimezoneStandard.singletons
multiple = TimezoneStandard.multiple
class Alarm(Component):
name = 'VALARM'
# some properties MAY/MUST/MUST NOT appear depending on ACTION value
required = ('ACTION', 'TRIGGER',)
singletons = (
'ATTACH', 'ACTION', 'DESCRIPTION', 'SUMMARY', 'TRIGGER',
'DURATION', 'REPEAT',
)
inclusive = (('DURATION', 'REPEAT',), ('SUMMARY', 'ATTENDEE',))
multiple = ('ATTENDEE', 'ATTACH')
class Calendar(Component):
"""This is the base object for an iCalendar file.
"""
name = 'VCALENDAR'
canonical_order = ('VERSION', 'PRODID', 'CALSCALE', 'METHOD',)
required = ('PRODID', 'VERSION', )
singletons = ('PRODID', 'VERSION', 'CALSCALE', 'METHOD')
# These are read only singleton, so one instance is enough for the module
types_factory = TypesFactory()
component_factory = ComponentFactory()
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""This file is part of the Scribee project.
"""
__author__ = 'Emanuele Bertoldi <emanuele.bertoldi@gmail.com>'
__copyright__ = 'Copyright (c) 2011 Emanuele Bertoldi'
__version__ = '0.0.1'
import os
import itertools
from pygments.token import Token
from pygments.lexers import get_lexer_by_name
from entity import Entity, DocBlock
lexer = get_lexer_by_name('python')
def get_module(filename):
if not filename or not os.path.exists(filename):
return None
root, ext = os.path.splitext(filename)
parent_filename, sep, module_name = root.replace('\\', '/').rpartition('/')
if module_name == "__init__":
parent_filename, sep, module_name = parent_filename.rpartition('/')
if not ext:
filename = "/".join((filename, "__init__.py"))
if not os.path.exists(filename):
return None
parent = get_module(parent_filename)
return Entity.get_or_create(module_name, Entity.Types.Namespace, sources=[filename], parent=parent)
def parse(file):
content = "\n".join(file)
tokens = lexer.get_tokens(content)
return parseTokens(tokens, file.filename())
def parseTokens(tokens, filename):
start = len(Entity.entities)
module = get_module(filename)
token_rows = splitTokenRows(tokens)
findMembers(token_rows, filename, module)
return len(Entity.entities) - start
def splitTokenRows(tokens):
token_rows = []
token_row = []
for token in tokens:
type = token[0]
content = token[1]
if type == Token.Text and content == "\n":
if len(token_row) > 0:
token_rows.append(token_row)
token_row = []
else:
token_row.append(token)
return token_rows
def findMembers(token_rows, filename, parent, row_cursor = 0, block_space = 0):
out = False
current_row = row_cursor
# Analyze tokens, row by row.
while (current_row < len(token_rows)):
token_row = token_rows[current_row]
def token_streamer():
for t in token_row:
yield t
tokens = token_streamer()
spacing = True
space = 0
for token in tokens:
type = token[0]
content = token[1]
if type == Token.Text and spacing:
space += len(content)
else:
spacing = False
if block_space == space:
if type == Token.Keyword.Namespace:
# Skip the current row.
break
if type == Token.Name.Function:
entity = Entity.get_or_create(content, Entity.Types.Function, sources=[filename], parent=parent)
findArguments(tokens, filename, entity)
findDoc(token_rows[current_row + 1], filename, entity)
appendFunction(entity, parent)
elif type == Token.Name.Class:
entity = Entity.get_or_create(content, Entity.Types.Class, sources=[filename], parent=parent)
findDoc(token_rows[current_row + 1], filename, entity)
findMembers(token_rows, filename, entity, current_row + 1, space + 4)
appendClass(entity, parent)
elif type == Token.Name:
entity = Entity.get_or_create(content, Entity.Types.Variable, sources=[filename], parent=parent)
entity.value = None
findAssignment(tokens, filename, entity)
appendVariable(entity, parent)
elif type == Token.Literal.String.Doc:
doc = DocBlock.get_or_create(content, source=filename, parent=parent)
elif space < block_space:
out = True
break
if out:
break
current_row += 1
def findAssignment(tokens, filename, parent):
assignment = False
for token in tokens:
type = token[0]
content = token[1]
if type == Token.Punctuation and content == ',':
break
elif type == Token.Operator and content == '=':
assignment = True
elif assignment and type in [Token.Literal.Number.Integer, Token.Name.Builtin.Pseudo]:
parent.value = content
def findArguments(tokens, filename, parent):
# Find open bracket.
for token in tokens:
type = token[0]
content = token[1]
if type == Token.Punctuation and content == '(':
break
# Find all arguments before the first close bracket.
for token in tokens:
type = token[0]
content = token[1]
if type == Token.Punctuation and content == ')':
break
elif type == Token.Name:
argument = Entity.get_or_create(content, Entity.Types.Variable, sources=[filename], parent=parent)
argument.value = None
findAssignment(tokens, filename, argument)
appendArgument(argument, parent)
def findDoc(tokens, filename, parent):
doc = None
for token in tokens:
type = token[0]
content = token[1]
if type == Token.Literal.String.Doc:
doc = DocBlock.get_or_create(content, source=filename, parent=parent)
return doc
def appendClass(entity, parent):
if parent:
if not hasattr(parent, "classes"):
parent.classes = []
parent.classes.append(entity)
def appendFunction(entity, parent):
if parent:
if not hasattr(parent, "functions"):
parent.functions = []
parent.functions.append(entity)
def appendVariable(entity, parent):
if parent:
if not hasattr(parent, "variables"):
parent.variables = []
parent.variables.append(entity)
def appendArgument(entity, parent):
if parent:
if not hasattr(parent, "arguments"):
parent.arguments = []
parent.arguments.append(entity)
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""Helper functions for training baseline seq-to-seq model."""
# pytype: disable=wrong-arg-count
# pytype: disable=attribute-error
import collections
from flax import linen as nn
from flax.training import common_utils
import jax
import jax.numpy as jnp
import numpy as np
from latent_programmer import decode
from latent_programmer import models
def create_learning_rate_scheduler(
base_learning_rate=0.5,
factors='constant * linear_warmup * rsqrt_normalized_decay',
warmup_steps=16000,
decay_factor=0.5,
steps_per_decay=50000,
steps_per_cycle=100000):
"""Creates learning rate schedule.
Interprets factors in the factors string which can consist of:
* constant: interpreted as the constant value,
* linear_warmup: interpreted as linear warmup until warmup_steps,
* rsqrt_decay: divide by square root of max(step, warmup_steps)
* decay_every: Every k steps decay the learning rate by decay_factor.
* cosine_decay: Cyclic cosine decay, uses steps_per_cycle parameter.
Args:
base_learning_rate: float, the starting constant for the lr schedule.
factors: a string with factors separated by '*' that defines the schedule.
warmup_steps: how many steps to warm up for in the warmup schedule.
decay_factor: The amount to decay the learning rate by.
steps_per_decay: How often to decay the learning rate.
steps_per_cycle: Steps per cycle when using cosine decay.
Returns:
A function learning_rate(step): float -> {'learning_rate': float}, the
step-dependent lr.
"""
factors = [n.strip() for n in factors.split('*')]
def step_fn(step):
"""Step to learning rate function."""
ret = 1.0
for name in factors:
if name == 'constant':
ret *= base_learning_rate
elif name == 'linear_warmup':
ret *= jnp.minimum(1.0, step / warmup_steps)
elif name == 'rsqrt_decay':
ret /= jnp.sqrt(jnp.maximum(1.0, step - warmup_steps))
elif name == 'rsqrt_normalized_decay':
ret *= jnp.sqrt(warmup_steps)
ret /= jnp.sqrt(jnp.maximum(step, warmup_steps))
elif name == 'decay_every':
ret *= (decay_factor**(step // steps_per_decay))
elif name == 'cosine_decay':
progress = jnp.maximum(0.0,
(step - warmup_steps) / float(steps_per_cycle))
ret *= jnp.maximum(0.0,
0.5 * (1.0 + jnp.cos(jnp.pi * (progress % 1.0))))
else:
raise ValueError('Unknown factor %s.' % name)
return jnp.asarray(ret, dtype=jnp.float32)
return step_fn
def compute_weighted_cross_entropy(logits, targets, weights=None):
"""Compute weighted cross entropy and entropy for log probs and targets.
Args:
logits: `[batch, length, num_classes]` float array.
targets: categorical targets `[batch, length]` int array.
weights: None or array of shape [batch, length, 1]
Returns:
Tuple of scalar loss and batch normalizing factor.
"""
if logits.ndim != targets.ndim + 1:
raise ValueError('Incorrect shapes. Got shape %s logits and %s targets' %
(str(logits.shape), str(targets.shape)))
onehot_targets = common_utils.onehot(targets, logits.shape[-1])
loss = -jnp.sum(onehot_targets * nn.log_softmax(logits), axis=-1)
normalizing_factor = jnp.prod(jnp.asarray(targets.shape))
if weights is not None:
loss = loss * weights
normalizing_factor = weights.sum()
return loss.sum(), normalizing_factor
def compute_weighted_accuracy(logits, targets, weights=None):
"""Compute weighted accuracy for log probs and targets.
Args:
logits: `[batch, length, num_classes]` float array.
targets: categorical targets `[batch, length]` int array.
weights: None or array of shape [batch, length, 1]
Returns:
Tuple of scalar accuracy and batch normalizing factor.
"""
if logits.ndim != targets.ndim + 1:
raise ValueError('Incorrect shapes. Got shape %s logits and %s targets' %
(str(logits.shape), str(targets.shape)))
acc = jnp.equal(jnp.argmax(logits, axis=-1), targets)
normalizing_factor = jnp.prod(jnp.asarray(targets.shape))
if weights is not None:
acc = acc * weights
normalizing_factor = weights.sum()
return acc.sum(), normalizing_factor
def compute_metrics(logits, targets, weights):
"""Compute summary metrics."""
loss, weight_sum = compute_weighted_cross_entropy(logits, targets, weights)
acc, _ = compute_weighted_accuracy(logits, targets, weights)
metrics = {
'loss': loss,
'accuracy': acc,
'denominator': weight_sum,
}
metrics = jax.lax.psum(metrics, 'batch')
return metrics
# Train / eval / decode step functions.
# -----------------------------------------------------------------------------
def train_step(optimizer,
inputs,
outputs,
programs,
learning_rate_fn,
config,
train_rng=None):
"""Train on batch of program tasks."""
# We handle PRNG splitting inside the top pmap, rather
# than handling it outside in the training loop - doing the
# latter can add some stalls to the devices.
train_rng, new_train_rng = jax.random.split(train_rng)
weights = jnp.where(programs > 0, 1, 0).astype(jnp.float32)
def loss_fn(params):
"""Loss function used for training."""
logits = models.ProgramTransformer(config).apply(
{'params': params},
inputs,
outputs,
programs,
rngs={'dropout': train_rng})
loss, weight_sum = compute_weighted_cross_entropy(logits, programs, weights)
mean_loss = loss / weight_sum
return mean_loss, logits
step = optimizer.state.step
lr = learning_rate_fn(step)
grad_fn = jax.value_and_grad(loss_fn, has_aux=True)
(_, logits), grad = grad_fn(optimizer.target)
grad = jax.lax.pmean(grad, 'batch')
new_optimizer = optimizer.apply_gradient(grad, learning_rate=lr)
# Get metrics.
metrics = compute_metrics(logits, programs, weights)
metrics['learning_rate'] = lr
return new_optimizer, metrics, new_train_rng
def eval_step(params, inputs, outputs, programs, config):
weights = jnp.where(programs > 0, 1, 0).astype(jnp.float32)
logits = models.ProgramTransformer(config).apply(
{'params': params}, inputs, outputs, programs)
return compute_metrics(logits, programs, weights)
def initialize_cache(inputs, outputs, programs, max_decode_len, config):
"""Initialize a cache for a given input shape and max decode length."""
target_shape = (programs.shape[0], max_decode_len)
initial_variables = models.ProgramTransformer(config).init(
jax.random.PRNGKey(0),
jnp.ones(inputs.shape, config.dtype),
jnp.ones(outputs.shape, config.dtype),
jnp.ones(target_shape, config.dtype))
return initial_variables['cache']
def predict_step(params,
inputs,
outputs,
cache,
eos_token,
max_decode_len,
beam_size,
config):
"""Predict translation with fast decoding beam search on a batch."""
# Prepare transformer fast-decoder call for beam search: for beam search, we
# need to set up our decoder model to handle a batch size equal to
# batch_size * beam_size, where each batch item's data is expanded in-place
# rather than tiled.
flat_encoded = decode.flat_batch_beam_expand(
models.ProgramTransformer(config).apply(
{'params': params},
inputs,
outputs,
method=models.ProgramTransformer.encode),
beam_size)
encoded_padding_mask = jnp.where(outputs > 0, 1, 0).astype(jnp.float32)
flat_encoded_padding_mask = decode.flat_batch_beam_expand(
encoded_padding_mask, beam_size)
def tokens_ids_to_logits(flat_ids, flat_cache):
"""Token slice to logits from decoder model."""
# --> [batch * beam, 1, vocab]
flat_logits, new_vars = models.ProgramTransformer(config).apply(
{'params': params, 'cache': flat_cache},
flat_ids,
flat_encoded,
flat_encoded_padding_mask,
mutable=['cache'],
method=models.ProgramTransformer.decode)
new_flat_cache = new_vars['cache']
# Remove singleton sequence-length dimension:
# [batch * beam, 1, vocab] --> [batch * beam, vocab]
flat_logits = flat_logits.squeeze(axis=1)
return flat_logits, new_flat_cache
# Using the above-defined single-step decoder function, run a
# beam search over possible sequences given input encoding.
beam_seqs, _ = decode.beam_search(
inputs,
cache,
tokens_ids_to_logits,
beam_size=beam_size,
alpha=0.6,
bos_token=config.bos_token,
eos_token=eos_token,
max_decode_len=max_decode_len)
# Beam search returns [n_batch, n_beam, n_length] with beam dimension
# sorted in increasing order of log-probability.
return beam_seqs
# Util functions for prediction
# -----------------------------------------------------------------------------
def pad_examples(x, desired_batch_size):
"""Expand batch to desired size by repeating last slice."""
batch_pad = desired_batch_size - x.shape[0]
tile_dims = [1] * len(x.shape)
tile_dims[0] = batch_pad
return np.concatenate([x, np.tile(x[-1], tile_dims)], axis=0)
def tohost(x):
"""Collect batches from all devices to host and flatten batch dimensions."""
n_device, n_batch, *remaining_dims = x.shape
return x.reshape((n_device * n_batch,) + tuple(remaining_dims))
def per_host_sum_pmap(in_tree):
"""Execute psum on in_tree's leaves over one device per host."""
host2devices = collections.defaultdict(list)
for d in jax.devices():
host2devices[d.host_id].append(d)
devices = [host2devices[k][0] for k in host2devices]
host_psum = jax.pmap(lambda x: jax.lax.psum(x, 'i'), 'i', devices=devices)
def pre_pmap(xs):
return jax.tree_map(lambda x: jnp.broadcast_to(x, (1,) + x.shape), xs)
def post_pmap(xs):
return jax.tree_map(lambda x: x[0], xs)
return post_pmap(host_psum(pre_pmap(in_tree)))
def eval_predicted(predicted, inputs, outputs, parse_beam_fn):
"""Evaluate predicted program beams."""
best_p, best_score = None, -1
# predicted shape [beam_size, length]
for beam in predicted:
try:
p = parse_beam_fn(beam)
p_outs = [p(inp) for inp in inputs]
score = np.sum([p_out == out for p_out, out in zip(p_outs, outputs)])
if score > best_score:
best_p, best_score = p, score
except: # pylint: disable=bare-except
pass
if best_score >= len(inputs): # Found solution.
break
return best_p, best_score
|
|
# -*- python -*-
#
# This file is part of CNO software
#
# Copyright (c) 2013-2014 - EBI-EMBL
#
# File author(s): Thomas Cokelaer <cokelaer@ebi.ac.uk>
#
# Distributed under the GPLv3 License.
# See accompanying file LICENSE.txt or copy at
# http://www.gnu.org/licenses/gpl-3.0.html
#
# website: http://github.com/cellnopt/cellnopt
#
##############################################################################
from future.utils import iteritems
import sys
import os
import pandas as pd
import pylab
from cno.core import CNOBase, CNORBase
from cno.core.params import OptionsBase
from cno.core.results import ODEResults
from cno.core import ReportODE
from cno.core.params import ParamsSSM
from biokit.rtools import bool2R
from cno.core.params import params_to_update
__all__ = ["CNORode"]
class CNORode(CNOBase, CNORBase):
"""Access to CellNOptR R package to run boolean analysis
::
c = pipeline.CNObool("PKN-test.sif", "MD-test.csv")
c.optimise(compression=True, expansion=True, reltol=.15)
Results are stored in :attr:`results`. Information stored are various.
The errors corresponding to the best models can be visualised with :meth:`plot_errors`
and models within the tolerance are stored in :attr:`models.
.. plot::
:include-source:
from cno import cnodata, CNORbool
c = CNORode(cnodata("PKN-ToyMMB.sif"),
cnodata("MD-ToyMMB.csv"))
c.optimise()
c.plot_errors()
"""
def __init__(self, model=None, data=None, tag=None, verbose=True,
verboseR=False, config=None, use_cnodata=False):
CNOBase.__init__(self,model, data, verbose=verbose, tag=tag,
config=config, use_cnodata=use_cnodata)
CNORBase.__init__(self, verboseR=verboseR)
self._report = ReportODE()
self._report.Rdependencies = [] ## just to speed up report
self.results = ODEResults()
self.config.General.pknmodel.value = self.pknmodel.filename
self.config.General.data.value = self.data.filename
p = ParamsSSM()
self.config.add_section(p)
self._library = 'CNORode'
#CNORodePBMstNeu
def _init(self):
script_template = """
library(%(library)s)
pknmodel = readSIF("%(pknmodel)s")
cnolist = CNOlist("%(midas)s")
model = preprocessing(cnolist, pknmodel, compression=%(compression)s,
expansion=%(expansion)s, cutNONC=%(cutnonc)s,
maxInputsPerGate=%(maxInputsPerGate)s)
reactions = model$reacID
species = colnames(cnolist@signals[[1]])"""
params = {
'library': self._library,
'pknmodel': self.pknmodel.filename,
'midas': self.data.filename,
'compression': bool2R(self._compression),
'expansion': bool2R(self._expansion),
'cutnonc': bool2R(self._cutnonc),
'maxInputsPerGate': self._max_inputs_per_gate,
}
self.session.run(script_template % params)
self.species = self.session.species
@params_to_update
def optimise(self, n_diverse=10, dim_ref_set=10, maxtime=60,
verbose=False, reltol=1e-4, atol=1e-3, maxeval='Inf',
transfer_function=3, maxstepsize='Inf', reuse_ode_params=False,
local_solver=None):
"""Optimise the ODE parameters using SSM algorithm
:param int maxtime: (default 10)
:param int ndiverse: (default 10)
:param int dim_refset: (default 10)
:param bool: ode_params if True, load the ode_params.RDAta file saved in a previous run
mus be compatible with the model.
verbose should be False all the time internally to the R code. Here, verbose
meana we want to see the status of the optimisation (not all warnings and R
errors).
"""
self.logging.info("Running the optimisation. Can take a very long"
"time. To see the progression, set verboseR "
"attribute to True")
# update config GA section with user parameters
self._update_config('SSM', self.optimise.actual_kwargs)
ssmd = self.config.SSM.as_dict()
if self.session.get('ode_params') is None:
self.session.run('ode_params=NULL')
if reuse_ode_params is False:
self.session.run('ode_params=NULL')
# todo: ode_params to be provided as input
script = """
library(%(library)s)
pknmodel = readSIF("%(pknmodel)s")
cnolist = CNOlist("%(midas)s")
model = preprocessing(cnolist, pknmodel, compression=%(compression)s,
expansion=%(expansion)s, cutNONC=%(cutnonc)s,
maxInputsPerGate=%(maxInputsPerGate)s)
reactions = model$reacID
species = colnames(cnolist@signals[[1]])
if (is.null(ode_params) == TRUE){
ode_params = createLBodeContPars(model)
}
ode_params = parEstimationLBodeSSm(cnolist, model,
maxtime=%(maxtime)s, maxStepSize=%(maxstepsize)s, dim_refset=%(dim_ref_set)s, maxeval=%(maxeval)s,
verbose=F, ndiverse=%(n_diverse)s, ode_parameters=ode_params,
local_solver=%(local_solver)s)
"""
if local_solver is None:
local_solver = 'NULL'
params = {
'library': self._library,
'pknmodel': self.pknmodel.filename,
'midas': self.data.filename,
'compression': bool2R(self._compression),
'expansion': bool2R(self._expansion),
'cutnonc': bool2R(self._cutnonc),
'maxInputsPerGate': self._max_inputs_per_gate,
'local_solver': local_solver
}
params.update(ssmd)
self.session.run(script % params)
ssm_results = self.session.ode_params['ssm_results'].copy()
self.ssm_results = ssm_results
results = {
'best_score': ssm_results['fbest'],
'all_scores': ssm_results['f'],
'reactions': self.session.reactions[:],
'best_params': ssm_results['xbest']
}
for k,v in self.session.ode_params.items():
results[k] = v.copy()
self.results = ODEResults()
self.results.results = results
self.species = self.session.species
def plot_errors(self, show=True):
self._set_simulation()
self.midas.plot(mode="mse")
#self.midas.plotSim()
if show is False:
pylab.close()
def simulate(self, params, verboseR=False):
# The first call is slow but then, it is faster but still
# 10 times slower than the pure R version
save_verboseR = self.verboseR
self.verboseR = verboseR
if self.session.get("simulator_initialised") is None:
script = """
library(%(library)s)
pknmodel = readSIF("%(pknmodel)s")
cnolist = CNOlist("%(midas)s")
model = preprocessing(cnolist, pknmodel, compression=%(compression)s,
expansion=%(expansion)s, cutNONC=%(cutnonc)s,
maxInputsPerGate=%(maxInputsPerGate)s)
indices = indexFinder(cnolist, model,verbose=FALSE)
ode_params = createLBodeContPars(model)
objective_function = getLBodeContObjFunction(cnolist, model,
ode_params, indices)
simulator_initialised = T
"""
pars = {
'library': self._library,
'pknmodel': self.pknmodel.filename,
'midas': self.data.filename,
'compression': bool2R(self._compression),
'expansion': bool2R(self._expansion),
'cutnonc': bool2R(self._cutnonc),
'maxInputsPerGate': self._max_inputs_per_gate,
}
self.session.run(script % pars)
self.session['params'] = params
script = """
score = objective_function(params)
"""
self.session.run(script)
self.verboseR = save_verboseR
return self.session.score
def get_sim_data(self, bs=None):
"""
input could be a bitstring with correct length and same order
OR a model
"""
if bs is None:
bs = self.results.results.parValues
else:
# TODO check assert length bs is correct
pass
script_template = """
library(%(library)s)
pknmodel = readSIF("%(pknmodel)s")
cnolist = CNOlist("%(midas)s")
model = preprocessing(cnolist, pknmodel, compression=%(compression)s,
expansion=%(expansion)s, cutNONC=%(cutnonc)s,
maxInputsPerGate=%(maxInputsPerGate)s)
sim_data = plotLBodeFitness(cnolist,model, ode_parameters=ode_params)
"""
params = {
'library': self._library,
'pknmodel': self.pknmodel.filename,
'midas': self.data.filename,
'compression': bool2R(self._compression),
'expansion': bool2R(self._expansion),
'cutnonc': bool2R(self._cutnonc),
'maxInputsPerGate': self._max_inputs_per_gate,
#'tag':tag
}
script = script_template % params
self.session.run(script)
# FIXME what about species/experiments
sim_data = self.session.sim_data
self.sim = pd.concat([pd.DataFrame(x, columns=self.species)
for x in sim_data])
def _get_models(self):
return self.results.cnorbool.models
models = property(_get_models)
def _set_simulation(self):
self.get_sim_data()
self.midas.create_random_simulation()
Ntimes = len(self.midas.times)
Nexp = len(self.midas.experiments.index)
sim = self.sim.copy()
sim['time'] = [time for time in self.midas.times for x in range(0, Nexp)]
sim['experiment'] = list(self.midas.experiments.index) * Ntimes
sim['cellLine'] = [self.midas.cellLines[0]] * sim.shape[0]
sim.set_index(['cellLine', 'experiment', 'time'], inplace=True)
sim.sortlevel(1, inplace=True)
self.midas.sim = sim.copy()
def plot_ode_parameters(self, **kargs):
pylab.figure(1);
self._plot_ode_parameters_k(**kargs)
pylab.figure(2)
self._plot_ode_parameters_n(**kargs)
def _plot_ode_parameters_k(self, **kargs):
kargs["edge_attribute"] = "ode_k"
r = ODEParameters(self.results.results.parNames, self.results.results.parValues)
data = r.get_k()
for e in self.cnograph.edges():
try:
self.cnograph.edge[e[0]][e[1]]["ode_k"] = data[e[0]][e[1]]
self.cnograph.edge[e[0]][e[1]]["label"] = " k=%.2f" % data[e[0]][e[1]]
except:
self.cnograph.edge[e[0]][e[1]]["ode_k"] = -1
self.cnograph.edge[e[0]][e[1]]["label"] = " k=??"
print(e)
self.cnograph.plot(**kargs)
# cleanup the label
for e in self.cnograph.edges():
del self.cnograph.edge[e[0]][e[1]]["label"]
def _plot_ode_parameters_n(self, **kargs):
kargs["edge_attribute"] = "ode_n"
r = ODEParameters(self.results.results.parNames, self.results.results.parValues)
data = r.get_n()
for e in self.cnograph.edges():
try:
self.cnograph.edge[e[0]][e[1]]["ode_n"] = data[e[0]][e[1]]
self.cnograph.edge[e[0]][e[1]]["label"] = " n=%.2f" % data[e[0]][e[1]]
except:
self.cnograph.edge[e[0]][e[1]]["ode_n"] = -1
self.cnograph.edge[e[0]][e[1]]["label"] = " n=??"
print(e)
self.cnograph.plot(**kargs)
# cleanup the label
for e in self.cnograph.edges():
del self.cnograph.edge[e[0]][e[1]]["label"]
def create_report_images(self):
model = self.cnograph.copy()
model.plot(filename=self._report._make_filename("pknmodel.svg"), show=False)
model.preprocessing()
model.plot(filename=self._report._make_filename("expmodel.png"), show=False)
self._plot_ode_parameters_k(filename=self._report._make_filename("ode_parameters_k.png"),
show=False)
self._plot_ode_parameters_n(filename=self._report._make_filename("ode_parameters_n.png"),
show=False)
self.plot_errors(show=True)
self._report.savefig("Errors.png")
self.midas.plot()
self._report.savefig("midas.png")
pylab.close()
self.plot_fitness(show=True, save=False)
self._report.savefig("fitness.png")
def plot_fitness(self, show=True, save=False):
self.results.plot_fit()
if save is True:
self._report.savefig("fitness.png")
if show is False:
pylab.close()
def create_report(self):
self._create_report_header()
txt = """<pre class="literal-block">\n"""
#txt += "\n".join([x for x in self._script_optim.split("\n") if "write.csv" not in x])
txt += "o.report()\n</pre>\n"
self._report.add_section(txt, "Script used")
txt = """<a href="http://www.cellnopt.org/">
<object data="pknmodel.svg" type="image/svg+xml">
<span>Your browser doesn't support SVG images</span> </object></a>"""
txt += """<a class="reference external image-reference" href="scripts/exercice_3.py">
<img alt="MIDAS" class="align-right" src="midas.png" /></a>"""
self._report.add_section(txt, "PKN graph", [("http://www.cellnopt.org", "cnograph")])
self._report.add_section('<img src="expmodel.png">', "Expanded before optimisation")
self._report.add_section( """
<img src="expmodel.png">'
<img src="ode_parameters_k.png">
<img src="ode_parameters_n.png">
""", "Optimised model")
self._report.add_section('<img src="fitness.png">', "Fitness")
self._report.add_section('<img src="Errors.png">', "Errors")
self._report.add_section(self._report.get_html_reproduce(), "Reproducibility")
fh = open(self._report.report_directory + os.sep + "rerun.py", 'w')
fh.write("from cellnopt.pipeline import *\n")
fh.write("CNOode(config=config.ini)\n")
fh.write("c.optimise()\n")
fh.write("c.report()\n")
fh.close()
# some stats
stats = self._get_stats()
txt = "<table>"
for k,v in iteritems(stats):
txt += "<tr><td>%s</td><td>%s</td></tr>" % (k,v)
txt += "</table>"
txt += """<img id="img" onclick='changeImage();' src="fit_over_time.png">\n"""
self._report.add_section(txt, "stats")
# dependencies
#table = self.get_table_dependencies()
#fh.write(table.to_html())
self._report.write("index.html")
def _get_stats(self):
res = {}
#res['Computation time'] = self.total_time
try:
res['Best Score'] = self.results.results['best_score']
except:
pass
return res
class ODEParameters(object):
"""A class to handle ODE parameters returned by the R package
"""
def __init__(self, parNames, parValues):
self.parValues = parValues
self.parNames = parNames
def get_n(self):
res = {}
for k,v in zip(self.parNames, self.parValues):
if "_n_" in k:
e1, e2 = k.split("_n_")
if e1 not in res.keys():
res[e1] = {}
if e2 not in res[e1].keys():
res[e1][e2] = v
else:
raise KeyError
return res
def get_k(self):
res = {}
for k,v in zip(self.parNames, self.parValues):
if "_k_" in k:
e1, e2 = k.split("_k_")
if e1 not in res.keys():
res[e1] = {}
if e2 not in res[e1].keys():
res[e1][e2] = v
else:
raise KeyError
return res
def get_tau(self):
res = {}
for k,v in zip(self.parNames, self.parValues):
if "_n_" in k:
k = k.strip("tau_")
res[k] = v
return res
def standalone(args=None):
"""This function is used by the standalone application called cellnopt_boolean
::
cellnopt_ode --help
"""
if args is None:
args = sys.argv[:]
from cno.core.standalone import Standalone
user_options = OptionsODE()
stander = Standalone(args, user_options)
# just an alias
options = stander.options
if options.onweb is True or options.report is True:
trainer = CNORode(options.pknmodel, options.data, verbose=options.verbose,
verboseR=options.verboseR, config=options.config_file,
use_cnodata=options.cnodata)
trainer.preprocessing()
else:
stander.help()
trainer.optimise(**stander.user_options.config.SSM.as_dict())
stander.trainer = trainer
stander.report()
class OptionsODE(OptionsBase):
def __init__(self):
prog = "cno_ode_steady"
version = prog + " v1.0 (Thomas Cokelaer @2014)"
super(OptionsODE, self).__init__(version=version, prog=prog)
self.add_section(ParamsSSM())
if __name__ == "__main__":
"""Used by setup.py as an entry point to :func:`standalone`"""
standalone(sys.argv)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing volumes.
"""
import json
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tables
from horizon import tabs
from horizon.utils import memoized
from openstack_dashboard import api
from openstack_dashboard.api import cinder
from openstack_dashboard.usage import quotas
from openstack_dashboard.dashboards.project.volumes \
.volumes import forms as project_forms
from openstack_dashboard.dashboards.project.volumes \
.volumes import tables as project_tables
from openstack_dashboard.dashboards.project.volumes \
.volumes import tabs as project_tabs
class DetailView(tabs.TabView):
tab_group_class = project_tabs.VolumeDetailTabs
template_name = 'project/volumes/volumes/detail.html'
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
context["volume"] = self.get_data()
return context
@memoized.memoized_method
def get_data(self):
try:
volume_id = self.kwargs['volume_id']
volume = cinder.volume_get(self.request, volume_id)
for att in volume.attachments:
att['instance'] = api.nova.server_get(self.request,
att['server_id'])
except Exception:
redirect = reverse('horizon:project:volumes:index')
exceptions.handle(self.request,
_('Unable to retrieve volume details.'),
redirect=redirect)
return volume
def get_tabs(self, request, *args, **kwargs):
volume = self.get_data()
return self.tab_group_class(request, volume=volume, **kwargs)
class CreateView(forms.ModalFormView):
form_class = project_forms.CreateForm
template_name = 'project/volumes/volumes/create.html'
success_url = reverse_lazy('horizon:project:volumes:volumes_tab')
def get_context_data(self, **kwargs):
context = super(CreateView, self).get_context_data(**kwargs)
try:
context['usages'] = quotas.tenant_limit_usages(self.request)
except Exception:
exceptions.handle(self.request)
locations = []
tags = api.cinder.geo_tag_list(self.request)
for tag in tags:
if hasattr(tag, 'loc_or_error_msg'):
if tag.loc_or_error_msg:
locations.append(tag.loc_or_error_msg)
locations.sort()
context['locations'] = json.dumps(locations)
return context
class ExtendView(forms.ModalFormView):
form_class = project_forms.ExtendForm
template_name = 'project/volumes/volumes/extend.html'
success_url = reverse_lazy("horizon:project:volumes:index")
def get_object(self):
if not hasattr(self, "_object"):
volume_id = self.kwargs['volume_id']
try:
self._object = cinder.volume_get(self.request, volume_id)
except Exception:
self._object = None
exceptions.handle(self.request,
_('Unable to retrieve volume information.'))
return self._object
def get_context_data(self, **kwargs):
context = super(ExtendView, self).get_context_data(**kwargs)
context['volume'] = self.get_object()
try:
usages = quotas.tenant_limit_usages(self.request)
usages['gigabytesUsed'] = (usages['gigabytesUsed']
- context['volume'].size)
context['usages'] = usages
except Exception:
exceptions.handle(self.request)
return context
def get_initial(self):
volume = self.get_object()
return {'id': self.kwargs['volume_id'],
'name': volume.name,
'orig_size': volume.size}
class CreateSnapshotView(forms.ModalFormView):
form_class = project_forms.CreateSnapshotForm
template_name = 'project/volumes/volumes/create_snapshot.html'
success_url = reverse_lazy('horizon:project:volumes:snapshots_tab')
def get_context_data(self, **kwargs):
context = super(CreateSnapshotView, self).get_context_data(**kwargs)
context['volume_id'] = self.kwargs['volume_id']
try:
volume = cinder.volume_get(self.request, context['volume_id'])
if (volume.status == 'in-use'):
context['attached'] = True
context['form'].set_warning(_("This volume is currently "
"attached to an instance. "
"In some cases, creating a "
"snapshot from an attached "
"volume can result in a "
"corrupted snapshot."))
context['usages'] = quotas.tenant_limit_usages(self.request)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve volume information.'))
return context
def get_initial(self):
return {'volume_id': self.kwargs["volume_id"]}
class UpdateView(forms.ModalFormView):
form_class = project_forms.UpdateForm
template_name = 'project/volumes/volumes/update.html'
success_url = reverse_lazy("horizon:project:volumes:index")
def get_object(self):
if not hasattr(self, "_object"):
vol_id = self.kwargs['volume_id']
try:
self._object = cinder.volume_get(self.request, vol_id)
except Exception:
msg = _('Unable to retrieve volume.')
url = reverse('horizon:project:volumes:index')
exceptions.handle(self.request, msg, redirect=url)
return self._object
def get_context_data(self, **kwargs):
context = super(UpdateView, self).get_context_data(**kwargs)
context['volume'] = self.get_object()
return context
def get_initial(self):
volume = self.get_object()
return {'volume_id': self.kwargs["volume_id"],
'name': volume.name,
'description': volume.description}
class EditAttachmentsView(tables.DataTableView, forms.ModalFormView):
table_class = project_tables.AttachmentsTable
form_class = project_forms.AttachForm
template_name = 'project/volumes/volumes/attach.html'
success_url = reverse_lazy("horizon:project:volumes:index")
@memoized.memoized_method
def get_object(self):
volume_id = self.kwargs['volume_id']
try:
return cinder.volume_get(self.request, volume_id)
except Exception:
self._object = None
exceptions.handle(self.request,
_('Unable to retrieve volume information.'))
def get_data(self):
try:
volumes = self.get_object()
attachments = [att for att in volumes.attachments if att]
except Exception:
attachments = []
exceptions.handle(self.request,
_('Unable to retrieve volume information.'))
return attachments
def get_initial(self):
try:
instances, has_more = api.nova.server_list(self.request)
except Exception:
instances = []
exceptions.handle(self.request,
_("Unable to retrieve attachment information."))
return {'volume': self.get_object(),
'instances': instances}
@memoized.memoized_method
def get_form(self):
form_class = self.get_form_class()
return super(EditAttachmentsView, self).get_form(form_class)
def get_context_data(self, **kwargs):
context = super(EditAttachmentsView, self).get_context_data(**kwargs)
context['form'] = self.get_form()
volume = self.get_object()
if volume and volume.status == 'available':
context['show_attach'] = True
else:
context['show_attach'] = False
context['volume'] = volume
if self.request.is_ajax():
context['hide'] = True
return context
def get(self, request, *args, **kwargs):
# Table action handling
handled = self.construct_tables()
if handled:
return handled
return self.render_to_response(self.get_context_data(**kwargs))
def post(self, request, *args, **kwargs):
form = self.get_form()
if form.is_valid():
return self.form_valid(form)
else:
return self.get(request, *args, **kwargs)
|
|
#!/usr/bin/env python
from __future__ import division, print_function, absolute_import
import warnings
from copy import deepcopy
from itertools import combinations, permutations
import numpy as np
import pytest
from numpy.testing import (assert_allclose, assert_, assert_equal,
assert_raises, assert_array_equal, assert_warns)
import pywt
from pywt._extensions._swt import swt_axis
# Check that float32 and complex64 are preserved. Other real types get
# converted to float64.
dtypes_in = [np.int8, np.float16, np.float32, np.float64, np.complex64,
np.complex128]
dtypes_out = [np.float64, np.float32, np.float32, np.float64, np.complex64,
np.complex128]
# tolerances used in accuracy comparisons
tol_single = 1e-6
tol_double = 1e-13
####
# 1d multilevel swt tests
####
def test_swt_decomposition():
x = [3, 7, 1, 3, -2, 6, 4, 6]
db1 = pywt.Wavelet('db1')
atol = tol_double
(cA3, cD3), (cA2, cD2), (cA1, cD1) = pywt.swt(x, db1, level=3)
expected_cA1 = [7.07106781, 5.65685425, 2.82842712, 0.70710678,
2.82842712, 7.07106781, 7.07106781, 6.36396103]
assert_allclose(cA1, expected_cA1, rtol=1e-8, atol=atol)
expected_cD1 = [-2.82842712, 4.24264069, -1.41421356, 3.53553391,
-5.65685425, 1.41421356, -1.41421356, 2.12132034]
assert_allclose(cD1, expected_cD1, rtol=1e-8, atol=atol)
expected_cA2 = [7, 4.5, 4, 5.5, 7, 9.5, 10, 8.5]
assert_allclose(cA2, expected_cA2, rtol=tol_double, atol=atol)
expected_cD2 = [3, 3.5, 0, -4.5, -3, 0.5, 0, 0.5]
assert_allclose(cD2, expected_cD2, rtol=tol_double, atol=atol)
expected_cA3 = [9.89949494, ] * 8
assert_allclose(cA3, expected_cA3, rtol=1e-8, atol=atol)
expected_cD3 = [0.00000000, -3.53553391, -4.24264069, -2.12132034,
0.00000000, 3.53553391, 4.24264069, 2.12132034]
assert_allclose(cD3, expected_cD3, rtol=1e-8, atol=atol)
# level=1, start_level=1 decomposition should match level=2
res = pywt.swt(cA1, db1, level=1, start_level=1)
cA2, cD2 = res[0]
assert_allclose(cA2, expected_cA2, rtol=tol_double, atol=atol)
assert_allclose(cD2, expected_cD2, rtol=tol_double, atol=atol)
coeffs = pywt.swt(x, db1)
assert_(len(coeffs) == 3)
assert_(pywt.swt_max_level(len(x)), 3)
def test_swt_max_level():
# odd sized signal will warn about no levels of decomposition possible
assert_warns(UserWarning, pywt.swt_max_level, 11)
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
assert_equal(pywt.swt_max_level(11), 0)
# no warnings when >= 1 level of decomposition possible
assert_equal(pywt.swt_max_level(2), 1) # divisible by 2**1
assert_equal(pywt.swt_max_level(4*3), 2) # divisible by 2**2
assert_equal(pywt.swt_max_level(16), 4) # divisible by 2**4
assert_equal(pywt.swt_max_level(16*3), 4) # divisible by 2**4
def test_swt_axis():
x = [3, 7, 1, 3, -2, 6, 4, 6]
db1 = pywt.Wavelet('db1')
(cA2, cD2), (cA1, cD1) = pywt.swt(x, db1, level=2)
# test cases use 2D arrays based on tiling x along an axis and then
# calling swt along the other axis.
for order in ['C', 'F']:
# test SWT of 2D data along default axis (-1)
x_2d = np.asarray(x).reshape((1, -1))
x_2d = np.concatenate((x_2d, )*5, axis=0)
if order == 'C':
x_2d = np.ascontiguousarray(x_2d)
elif order == 'F':
x_2d = np.asfortranarray(x_2d)
(cA2_2d, cD2_2d), (cA1_2d, cD1_2d) = pywt.swt(x_2d, db1, level=2)
for c in [cA2_2d, cD2_2d, cA1_2d, cD1_2d]:
assert_(c.shape == x_2d.shape)
# each row should match the 1D result
for row in cA1_2d:
assert_array_equal(row, cA1)
for row in cA2_2d:
assert_array_equal(row, cA2)
for row in cD1_2d:
assert_array_equal(row, cD1)
for row in cD2_2d:
assert_array_equal(row, cD2)
# test SWT of 2D data along other axis (0)
x_2d = np.asarray(x).reshape((-1, 1))
x_2d = np.concatenate((x_2d, )*5, axis=1)
if order == 'C':
x_2d = np.ascontiguousarray(x_2d)
elif order == 'F':
x_2d = np.asfortranarray(x_2d)
(cA2_2d, cD2_2d), (cA1_2d, cD1_2d) = pywt.swt(x_2d, db1, level=2,
axis=0)
for c in [cA2_2d, cD2_2d, cA1_2d, cD1_2d]:
assert_(c.shape == x_2d.shape)
# each column should match the 1D result
for row in cA1_2d.transpose((1, 0)):
assert_array_equal(row, cA1)
for row in cA2_2d.transpose((1, 0)):
assert_array_equal(row, cA2)
for row in cD1_2d.transpose((1, 0)):
assert_array_equal(row, cD1)
for row in cD2_2d.transpose((1, 0)):
assert_array_equal(row, cD2)
# axis too large
assert_raises(ValueError, pywt.swt, x, db1, level=2, axis=5)
def test_swt_iswt_integration():
# This function performs a round-trip swt/iswt transform test on
# all available types of wavelets in PyWavelets - except the
# 'dmey' wavelet. The latter has been excluded because it does not
# produce very precise results. This is likely due to the fact
# that the 'dmey' wavelet is a discrete approximation of a
# continuous wavelet. All wavelets are tested up to 3 levels. The
# test validates neither swt or iswt as such, but it does ensure
# that they are each other's inverse.
max_level = 3
wavelets = pywt.wavelist(kind='discrete')
if 'dmey' in wavelets:
# The 'dmey' wavelet seems to be a bit special - disregard it for now
wavelets.remove('dmey')
for current_wavelet_str in wavelets:
current_wavelet = pywt.Wavelet(current_wavelet_str)
input_length_power = int(np.ceil(np.log2(max(
current_wavelet.dec_len,
current_wavelet.rec_len))))
input_length = 2**(input_length_power + max_level - 1)
X = np.arange(input_length)
for norm in [True, False]:
if norm and not current_wavelet.orthogonal:
# non-orthogonal wavelets to avoid warnings when norm=True
continue
for trim_approx in [True, False]:
coeffs = pywt.swt(X, current_wavelet, max_level,
trim_approx=trim_approx, norm=norm)
Y = pywt.iswt(coeffs, current_wavelet, norm=norm)
assert_allclose(Y, X, rtol=1e-5, atol=1e-7)
def test_swt_dtypes():
wavelet = pywt.Wavelet('haar')
for dt_in, dt_out in zip(dtypes_in, dtypes_out):
errmsg = "wrong dtype returned for {0} input".format(dt_in)
# swt
x = np.ones(8, dtype=dt_in)
(cA2, cD2), (cA1, cD1) = pywt.swt(x, wavelet, level=2)
assert_(cA2.dtype == cD2.dtype == cA1.dtype == cD1.dtype == dt_out,
"swt: " + errmsg)
# swt2
x = np.ones((8, 8), dtype=dt_in)
cA, (cH, cV, cD) = pywt.swt2(x, wavelet, level=1)[0]
assert_(cA.dtype == cH.dtype == cV.dtype == cD.dtype == dt_out,
"swt2: " + errmsg)
def test_swt_roundtrip_dtypes():
# verify perfect reconstruction for all dtypes
rstate = np.random.RandomState(5)
wavelet = pywt.Wavelet('haar')
for dt_in, dt_out in zip(dtypes_in, dtypes_out):
# swt, iswt
x = rstate.standard_normal((8, )).astype(dt_in)
c = pywt.swt(x, wavelet, level=2)
xr = pywt.iswt(c, wavelet)
assert_allclose(x, xr, rtol=1e-6, atol=1e-7)
# swt2, iswt2
x = rstate.standard_normal((8, 8)).astype(dt_in)
c = pywt.swt2(x, wavelet, level=2)
xr = pywt.iswt2(c, wavelet)
assert_allclose(x, xr, rtol=1e-6, atol=1e-7)
def test_swt_default_level_by_axis():
# make sure default number of levels matches the max level along the axis
wav = 'db2'
x = np.ones((2**3, 2**4, 2**5))
for axis in (0, 1, 2):
sdec = pywt.swt(x, wav, level=None, start_level=0, axis=axis)
assert_equal(len(sdec), pywt.swt_max_level(x.shape[axis]))
def test_swt2_ndim_error():
x = np.ones(8)
with warnings.catch_warnings():
warnings.simplefilter('ignore', FutureWarning)
assert_raises(ValueError, pywt.swt2, x, 'haar', level=1)
@pytest.mark.slow
def test_swt2_iswt2_integration(wavelets=None):
# This function performs a round-trip swt2/iswt2 transform test on
# all available types of wavelets in PyWavelets - except the
# 'dmey' wavelet. The latter has been excluded because it does not
# produce very precise results. This is likely due to the fact
# that the 'dmey' wavelet is a discrete approximation of a
# continuous wavelet. All wavelets are tested up to 3 levels. The
# test validates neither swt2 or iswt2 as such, but it does ensure
# that they are each other's inverse.
max_level = 3
if wavelets is None:
wavelets = pywt.wavelist(kind='discrete')
if 'dmey' in wavelets:
# The 'dmey' wavelet is a special case - disregard it for now
wavelets.remove('dmey')
for current_wavelet_str in wavelets:
current_wavelet = pywt.Wavelet(current_wavelet_str)
input_length_power = int(np.ceil(np.log2(max(
current_wavelet.dec_len,
current_wavelet.rec_len))))
input_length = 2**(input_length_power + max_level - 1)
X = np.arange(input_length**2).reshape(input_length, input_length)
for norm in [True, False]:
if norm and not current_wavelet.orthogonal:
# non-orthogonal wavelets to avoid warnings when norm=True
continue
for trim_approx in [True, False]:
coeffs = pywt.swt2(X, current_wavelet, max_level,
trim_approx=trim_approx, norm=norm)
Y = pywt.iswt2(coeffs, current_wavelet, norm=norm)
assert_allclose(Y, X, rtol=1e-5, atol=1e-5)
def test_swt2_iswt2_quick():
test_swt2_iswt2_integration(wavelets=['db1', ])
def test_swt2_iswt2_non_square(wavelets=None):
for nrows in [8, 16, 48]:
X = np.arange(nrows*32).reshape(nrows, 32)
current_wavelet = 'db1'
with warnings.catch_warnings():
warnings.simplefilter('ignore', FutureWarning)
coeffs = pywt.swt2(X, current_wavelet, level=2)
Y = pywt.iswt2(coeffs, current_wavelet)
assert_allclose(Y, X, rtol=tol_single, atol=tol_single)
def test_swt2_axes():
atol = 1e-14
current_wavelet = pywt.Wavelet('db2')
input_length_power = int(np.ceil(np.log2(max(
current_wavelet.dec_len,
current_wavelet.rec_len))))
input_length = 2**(input_length_power)
X = np.arange(input_length**2).reshape(input_length, input_length)
(cA1, (cH1, cV1, cD1)) = pywt.swt2(X, current_wavelet, level=1)[0]
# opposite order
(cA2, (cH2, cV2, cD2)) = pywt.swt2(X, current_wavelet, level=1,
axes=(1, 0))[0]
assert_allclose(cA1, cA2, atol=atol)
assert_allclose(cH1, cV2, atol=atol)
assert_allclose(cV1, cH2, atol=atol)
assert_allclose(cD1, cD2, atol=atol)
# reverify iswt2 restores the orginal data
r1 = pywt.iswt2([cA1, (cH1, cV1, cD1)], current_wavelet)
assert_allclose(X, r1, atol=atol)
r2 = pywt.iswt2([cA2, (cH2, cV2, cD2)], current_wavelet, axes=(1, 0))
assert_allclose(X, r2, atol=atol)
# duplicate axes not allowed
assert_raises(ValueError, pywt.swt2, X, current_wavelet, 1,
axes=(0, 0))
# too few axes
assert_raises(ValueError, pywt.swt2, X, current_wavelet, 1, axes=(0, ))
def test_swtn_axes():
atol = 1e-14
current_wavelet = pywt.Wavelet('db2')
input_length_power = int(np.ceil(np.log2(max(
current_wavelet.dec_len,
current_wavelet.rec_len))))
input_length = 2**(input_length_power)
X = np.arange(input_length**2).reshape(input_length, input_length)
coeffs = pywt.swtn(X, current_wavelet, level=1, axes=None)[0]
# opposite order
coeffs2 = pywt.swtn(X, current_wavelet, level=1, axes=(1, 0))[0]
assert_allclose(coeffs['aa'], coeffs2['aa'], atol=atol)
assert_allclose(coeffs['ad'], coeffs2['da'], atol=atol)
assert_allclose(coeffs['da'], coeffs2['ad'], atol=atol)
assert_allclose(coeffs['dd'], coeffs2['dd'], atol=atol)
# 0-level transform
empty = pywt.swtn(X, current_wavelet, level=0)
assert_equal(empty, [])
# duplicate axes not allowed
assert_raises(ValueError, pywt.swtn, X, current_wavelet, 1, axes=(0, 0))
# data.ndim = 0
assert_raises(ValueError, pywt.swtn, np.asarray([]), current_wavelet, 1)
# start_level too large
assert_raises(ValueError, pywt.swtn, X, current_wavelet,
level=1, start_level=2)
# level < 1 in swt_axis call
assert_raises(ValueError, swt_axis, X, current_wavelet, level=0,
start_level=0)
# odd-sized data not allowed
assert_raises(ValueError, swt_axis, X[:-1, :], current_wavelet, level=0,
start_level=0, axis=0)
@pytest.mark.slow
def test_swtn_iswtn_integration(wavelets=None):
# This function performs a round-trip swtn/iswtn transform for various
# possible combinations of:
# 1.) 1 out of 2 axes of a 2D array
# 2.) 2 out of 3 axes of a 3D array
#
# To keep test time down, only wavelets of length <= 8 are run.
#
# This test does not validate swtn or iswtn individually, but only
# confirms that iswtn yields an (almost) perfect reconstruction of swtn.
max_level = 3
if wavelets is None:
wavelets = pywt.wavelist(kind='discrete')
if 'dmey' in wavelets:
# The 'dmey' wavelet is a special case - disregard it for now
wavelets.remove('dmey')
for ndim_transform in range(1, 3):
ndim = ndim_transform + 1
for axes in combinations(range(ndim), ndim_transform):
for current_wavelet_str in wavelets:
wav = pywt.Wavelet(current_wavelet_str)
if wav.dec_len > 8:
continue # avoid excessive test duration
input_length_power = int(np.ceil(np.log2(max(
wav.dec_len,
wav.rec_len))))
N = 2**(input_length_power + max_level - 1)
X = np.arange(N**ndim).reshape((N, )*ndim)
for norm in [True, False]:
if norm and not wav.orthogonal:
# non-orthogonal wavelets to avoid warnings
continue
for trim_approx in [True, False]:
coeffs = pywt.swtn(X, wav, max_level, axes=axes,
trim_approx=trim_approx, norm=norm)
coeffs_copy = deepcopy(coeffs)
Y = pywt.iswtn(coeffs, wav, axes=axes, norm=norm)
assert_allclose(Y, X, rtol=1e-5, atol=1e-5)
# verify the inverse transform didn't modify any coeffs
for c, c2 in zip(coeffs, coeffs_copy):
for k, v in c.items():
assert_array_equal(c2[k], v)
def test_swtn_iswtn_quick():
test_swtn_iswtn_integration(wavelets=['db1', ])
def test_iswtn_errors():
x = np.arange(8**3).reshape(8, 8, 8)
max_level = 2
axes = (0, 1)
w = pywt.Wavelet('db1')
coeffs = pywt.swtn(x, w, max_level, axes=axes)
# more axes than dimensions transformed
assert_raises(ValueError, pywt.iswtn, coeffs, w, axes=(0, 1, 2))
# duplicate axes not allowed
assert_raises(ValueError, pywt.iswtn, coeffs, w, axes=(0, 0))
# mismatched coefficient size
coeffs[0]['da'] = coeffs[0]['da'][:-1, :]
assert_raises(RuntimeError, pywt.iswtn, coeffs, w, axes=axes)
def test_swtn_iswtn_unique_shape_per_axis():
# test case for gh-460
_shape = (1, 48, 32) # unique shape per axis
wav = 'sym2'
max_level = 3
rstate = np.random.RandomState(0)
for shape in permutations(_shape):
# transform only along the non-singleton axes
axes = [ax for ax, s in enumerate(shape) if s != 1]
x = rstate.standard_normal(shape)
c = pywt.swtn(x, wav, max_level, axes=axes)
r = pywt.iswtn(c, wav, axes=axes)
assert_allclose(x, r, rtol=1e-10, atol=1e-10)
def test_per_axis_wavelets():
# tests seperate wavelet for each axis.
rstate = np.random.RandomState(1234)
data = rstate.randn(16, 16, 16)
level = 3
# wavelet can be a string or wavelet object
wavelets = (pywt.Wavelet('haar'), 'sym2', 'db4')
coefs = pywt.swtn(data, wavelets, level=level)
assert_allclose(pywt.iswtn(coefs, wavelets), data, atol=1e-14)
# 1-tuple also okay
coefs = pywt.swtn(data, wavelets[:1], level=level)
assert_allclose(pywt.iswtn(coefs, wavelets[:1]), data, atol=1e-14)
# length of wavelets doesn't match the length of axes
assert_raises(ValueError, pywt.swtn, data, wavelets[:2], level)
assert_raises(ValueError, pywt.iswtn, coefs, wavelets[:2])
with warnings.catch_warnings():
warnings.simplefilter('ignore', FutureWarning)
# swt2/iswt2 also support per-axis wavelets/modes
data2 = data[..., 0]
coefs2 = pywt.swt2(data2, wavelets[:2], level)
assert_allclose(pywt.iswt2(coefs2, wavelets[:2]), data2, atol=1e-14)
def test_error_on_continuous_wavelet():
# A ValueError is raised if a Continuous wavelet is selected
data = np.ones((16, 16))
for dec_func, rec_func in zip([pywt.swt, pywt.swt2, pywt.swtn],
[pywt.iswt, pywt.iswt2, pywt.iswtn]):
for cwave in ['morl', pywt.DiscreteContinuousWavelet('morl')]:
assert_raises(ValueError, dec_func, data, wavelet=cwave,
level=3)
c = dec_func(data, 'db1', level=3)
assert_raises(ValueError, rec_func, c, wavelet=cwave)
def test_iswt_mixed_dtypes():
# Mixed precision inputs give double precision output
x_real = np.arange(16).astype(np.float64)
x_complex = x_real + 1j*x_real
wav = 'sym2'
for dtype1, dtype2 in [(np.float64, np.float32),
(np.float32, np.float64),
(np.float16, np.float64),
(np.complex128, np.complex64),
(np.complex64, np.complex128)]:
if dtype1 in [np.complex64, np.complex128]:
x = x_complex
output_dtype = np.complex128
else:
x = x_real
output_dtype = np.float64
coeffs = pywt.swt(x, wav, 2)
# different precision for the approximation coefficients
coeffs[0] = [coeffs[0][0].astype(dtype1),
coeffs[0][1].astype(dtype2)]
y = pywt.iswt(coeffs, wav)
assert_equal(output_dtype, y.dtype)
assert_allclose(y, x, rtol=1e-3, atol=1e-3)
def test_iswt2_mixed_dtypes():
# Mixed precision inputs give double precision output
rstate = np.random.RandomState(0)
x_real = rstate.randn(8, 8)
x_complex = x_real + 1j*x_real
wav = 'sym2'
for dtype1, dtype2 in [(np.float64, np.float32),
(np.float32, np.float64),
(np.float16, np.float64),
(np.complex128, np.complex64),
(np.complex64, np.complex128)]:
if dtype1 in [np.complex64, np.complex128]:
x = x_complex
output_dtype = np.complex128
else:
x = x_real
output_dtype = np.float64
coeffs = pywt.swt2(x, wav, 2)
# different precision for the approximation coefficients
coeffs[0] = [coeffs[0][0].astype(dtype1),
tuple([c.astype(dtype2) for c in coeffs[0][1]])]
y = pywt.iswt2(coeffs, wav)
assert_equal(output_dtype, y.dtype)
assert_allclose(y, x, rtol=1e-3, atol=1e-3)
def test_iswtn_mixed_dtypes():
# Mixed precision inputs give double precision output
rstate = np.random.RandomState(0)
x_real = rstate.randn(8, 8, 8)
x_complex = x_real + 1j*x_real
wav = 'sym2'
for dtype1, dtype2 in [(np.float64, np.float32),
(np.float32, np.float64),
(np.float16, np.float64),
(np.complex128, np.complex64),
(np.complex64, np.complex128)]:
if dtype1 in [np.complex64, np.complex128]:
x = x_complex
output_dtype = np.complex128
else:
x = x_real
output_dtype = np.float64
coeffs = pywt.swtn(x, wav, 2)
# different precision for the approximation coefficients
a = coeffs[0].pop('a' * x.ndim)
a = a.astype(dtype1)
coeffs[0] = {k: c.astype(dtype2) for k, c in coeffs[0].items()}
coeffs[0]['a' * x.ndim] = a
y = pywt.iswtn(coeffs, wav)
assert_equal(output_dtype, y.dtype)
assert_allclose(y, x, rtol=1e-3, atol=1e-3)
def test_swt_zero_size_axes():
# raise on empty input array
assert_raises(ValueError, pywt.swt, [], 'db2')
# >1D case uses a different code path so check there as well
x = np.ones((1, 4))[0:0, :] # 2D with a size zero axis
assert_raises(ValueError, pywt.swtn, x, 'db2', level=1, axes=(0,))
def test_swt_variance_and_energy_preservation():
"""Verify that the 1D SWT partitions variance among the coefficients."""
# When norm is True and the wavelet is orthogonal, the sum of the
# variances of the coefficients should equal the variance of the signal.
wav = 'db2'
rstate = np.random.RandomState(5)
x = rstate.randn(256)
coeffs = pywt.swt(x, wav, trim_approx=True, norm=True)
variances = [np.var(c) for c in coeffs]
assert_allclose(np.sum(variances), np.var(x))
# also verify L2-norm energy preservation property
assert_allclose(np.linalg.norm(x),
np.linalg.norm(np.concatenate(coeffs)))
# non-orthogonal wavelet with norm=True raises a warning
assert_warns(UserWarning, pywt.swt, x, 'bior2.2', norm=True)
def test_swt2_variance_and_energy_preservation():
"""Verify that the 2D SWT partitions variance among the coefficients."""
# When norm is True and the wavelet is orthogonal, the sum of the
# variances of the coefficients should equal the variance of the signal.
wav = 'db2'
rstate = np.random.RandomState(5)
x = rstate.randn(64, 64)
coeffs = pywt.swt2(x, wav, level=4, trim_approx=True, norm=True)
coeff_list = [coeffs[0].ravel()]
for d in coeffs[1:]:
for v in d:
coeff_list.append(v.ravel())
variances = [np.var(v) for v in coeff_list]
assert_allclose(np.sum(variances), np.var(x))
# also verify L2-norm energy preservation property
assert_allclose(np.linalg.norm(x),
np.linalg.norm(np.concatenate(coeff_list)))
# non-orthogonal wavelet with norm=True raises a warning
assert_warns(UserWarning, pywt.swt2, x, 'bior2.2', level=4, norm=True)
def test_swtn_variance_and_energy_preservation():
"""Verify that the nD SWT partitions variance among the coefficients."""
# When norm is True and the wavelet is orthogonal, the sum of the
# variances of the coefficients should equal the variance of the signal.
wav = 'db2'
rstate = np.random.RandomState(5)
x = rstate.randn(64, 64)
coeffs = pywt.swtn(x, wav, level=4, trim_approx=True, norm=True)
coeff_list = [coeffs[0].ravel()]
for d in coeffs[1:]:
for k, v in d.items():
coeff_list.append(v.ravel())
variances = [np.var(v) for v in coeff_list]
assert_allclose(np.sum(variances), np.var(x))
# also verify L2-norm energy preservation property
assert_allclose(np.linalg.norm(x),
np.linalg.norm(np.concatenate(coeff_list)))
# non-orthogonal wavelet with norm=True raises a warning
assert_warns(UserWarning, pywt.swtn, x, 'bior2.2', level=4, norm=True)
def test_swt_ravel_and_unravel():
# When trim_approx=True, all swt functions can user pywt.ravel_coeffs
for ndim, _swt, _iswt, ravel_type in [
(1, pywt.swt, pywt.iswt, 'swt'),
(2, pywt.swt2, pywt.iswt2, 'swt2'),
(3, pywt.swtn, pywt.iswtn, 'swtn')]:
x = np.ones((16, ) * ndim)
c = _swt(x, 'sym2', level=3, trim_approx=True)
arr, slices, shapes = pywt.ravel_coeffs(c)
c = pywt.unravel_coeffs(arr, slices, shapes, output_format=ravel_type)
r = _iswt(c, 'sym2')
assert_allclose(x, r)
|
|
import json
import os
import tempfile
from django.conf import settings
from django.core.cache import cache
from django.core.files.storage import default_storage as storage
from django.db.models import Q
from django.test.utils import override_settings
import mock
from PIL import Image
from pyquery import PyQuery as pq
from olympia import amo
from olympia.amo.tests import TestCase
from olympia.amo.helpers import user_media_path
from olympia.amo.tests import (
addon_factory, formset, initial, req_factory_factory)
from olympia.amo.tests.test_helpers import get_image_path
from olympia.amo.urlresolvers import reverse
from olympia.addons.forms import AddonFormBasic
from olympia.addons.models import (
Addon, AddonCategory, AddonDependency, Category)
from olympia.bandwagon.models import (
Collection, CollectionAddon, FeaturedCollection)
from olympia.devhub.models import ActivityLog
from olympia.devhub.views import edit_theme
from olympia.tags.models import Tag, AddonTag
from olympia.users.models import UserProfile
def get_section_url(addon, section, edit=False):
args = [addon.slug, section]
if edit:
args.append('edit')
return reverse('devhub.addons.section', args=args)
@override_settings(MEDIA_ROOT=None) # Make it overridable.
class TestEdit(TestCase):
fixtures = ['base/users', 'base/addon_3615',
'base/addon_5579', 'base/addon_3615_categories']
def setUp(self):
# Make new for each test.
settings.MEDIA_ROOT = tempfile.mkdtemp()
super(TestEdit, self).setUp()
addon = self.get_addon()
assert self.client.login(username='del@icio.us', password='password')
a = AddonCategory.objects.filter(addon=addon, category__id=22)[0]
a.feature = False
a.save()
AddonCategory.objects.filter(addon=addon,
category__id__in=[23, 24]).delete()
cache.clear()
self.url = addon.get_dev_url()
self.user = UserProfile.objects.get(pk=55021)
self.tags = ['tag3', 'tag2', 'tag1']
for t in self.tags:
Tag(tag_text=t).save_tag(addon)
self.addon = self.get_addon()
def get_addon(self):
return Addon.objects.no_cache().get(id=3615)
def get_url(self, section, edit=False):
return get_section_url(self.addon, section, edit)
def get_dict(self, **kw):
fs = formset(self.cat_initial, initial_count=1)
result = {'name': 'new name', 'slug': 'test_slug',
'summary': 'new summary',
'tags': ', '.join(self.tags)}
result.update(**kw)
result.update(fs)
return result
class TestEditBasic(TestEdit):
def setUp(self):
super(TestEditBasic, self).setUp()
self.basic_edit_url = self.get_url('basic', edit=True)
ctx = self.client.get(self.basic_edit_url).context
self.cat_initial = initial(ctx['cat_form'].initial_forms[0])
def test_redirect(self):
# /addon/:id => /addon/:id/edit
r = self.client.get('/en-US/developers/addon/3615/', follow=True)
self.assert3xx(r, self.url, 301)
def test_edit(self):
old_name = self.addon.name
data = self.get_dict()
r = self.client.post(self.basic_edit_url, data)
assert r.status_code == 200
addon = self.get_addon()
assert unicode(addon.name) == data['name']
assert addon.name.id == old_name.id
assert unicode(addon.slug) == data['slug']
assert unicode(addon.summary) == data['summary']
assert [unicode(t) for t in addon.tags.all()] == sorted(self.tags)
def test_edit_check_description(self):
# Make sure bug 629779 doesn't return.
old_desc = self.addon.description
data = self.get_dict()
r = self.client.post(self.basic_edit_url, data)
assert r.status_code == 200
addon = self.get_addon()
assert addon.description == old_desc
def test_edit_slug_invalid(self):
old_edit = self.basic_edit_url
data = self.get_dict(name='', slug='invalid')
r = self.client.post(self.basic_edit_url, data)
doc = pq(r.content)
assert doc('form').attr('action') == old_edit
def test_edit_slug_valid(self):
old_edit = self.basic_edit_url
data = self.get_dict(slug='valid')
r = self.client.post(self.basic_edit_url, data)
doc = pq(r.content)
assert doc('form').attr('action') != old_edit
def test_edit_summary_escaping(self):
data = self.get_dict()
data['summary'] = '<b>oh my</b>'
r = self.client.post(self.basic_edit_url, data)
assert r.status_code == 200
# Fetch the page so the LinkifiedTranslation gets in cache.
r = self.client.get(reverse('devhub.addons.edit', args=[data['slug']]))
assert pq(r.content)('[data-name=summary]').html().strip() == (
'<span lang="en-us"><b>oh my</b></span>')
# Now make sure we don't have escaped content in the rendered form.
form = AddonFormBasic(instance=self.get_addon(),
request=req_factory_factory('/'))
html = pq('<body>%s</body>' % form['summary'])('[lang="en-us"]').html()
assert html.strip() == '<b>oh my</b>'
def test_edit_as_developer(self):
self.login('regular@mozilla.com')
data = self.get_dict()
r = self.client.post(self.basic_edit_url, data)
# Make sure we get errors when they are just regular users.
assert r.status_code == 403
devuser = UserProfile.objects.get(pk=999)
self.get_addon().addonuser_set.create(
user=devuser, role=amo.AUTHOR_ROLE_DEV)
r = self.client.post(self.basic_edit_url, data)
assert r.status_code == 200
addon = self.get_addon()
assert unicode(addon.name) == data['name']
assert unicode(addon.slug) == data['slug']
assert unicode(addon.summary) == data['summary']
assert [unicode(t) for t in addon.tags.all()] == sorted(self.tags)
def test_edit_name_required(self):
data = self.get_dict(name='', slug='test_addon')
r = self.client.post(self.basic_edit_url, data)
assert r.status_code == 200
self.assertFormError(r, 'form', 'name', 'This field is required.')
def test_edit_name_spaces(self):
data = self.get_dict(name=' ', slug='test_addon')
r = self.client.post(self.basic_edit_url, data)
assert r.status_code == 200
self.assertFormError(r, 'form', 'name', 'This field is required.')
def test_edit_slugs_unique(self):
Addon.objects.get(id=5579).update(slug='test_slug')
data = self.get_dict()
r = self.client.post(self.basic_edit_url, data)
assert r.status_code == 200
self.assertFormError(
r, 'form', 'slug',
'This slug is already in use. Please choose another.')
def test_edit_add_tag(self):
count = ActivityLog.objects.all().count()
self.tags.insert(0, 'tag4')
data = self.get_dict()
r = self.client.post(self.basic_edit_url, data)
assert r.status_code == 200
result = pq(r.content)('#addon_tags_edit').eq(0).text()
assert result == ', '.join(sorted(self.tags))
html = ('<a href="/en-US/firefox/tag/tag4">tag4</a> added to '
'<a href="/en-US/firefox/addon/test_slug/">new name</a>.')
assert ActivityLog.objects.for_addons(self.addon).get(
action=amo.LOG.ADD_TAG.id).to_string() == html
assert ActivityLog.objects.filter(
action=amo.LOG.ADD_TAG.id).count() == count + 1
def test_edit_blacklisted_tag(self):
Tag.objects.get_or_create(tag_text='blue', blacklisted=True)
data = self.get_dict(tags='blue')
r = self.client.post(self.basic_edit_url, data)
assert r.status_code == 200
error = 'Invalid tag: blue'
self.assertFormError(r, 'form', 'tags', error)
def test_edit_blacklisted_tags_2(self):
Tag.objects.get_or_create(tag_text='blue', blacklisted=True)
Tag.objects.get_or_create(tag_text='darn', blacklisted=True)
data = self.get_dict(tags='blue, darn, swearword')
r = self.client.post(self.basic_edit_url, data)
assert r.status_code == 200
error = 'Invalid tags: blue, darn'
self.assertFormError(r, 'form', 'tags', error)
def test_edit_blacklisted_tags_3(self):
Tag.objects.get_or_create(tag_text='blue', blacklisted=True)
Tag.objects.get_or_create(tag_text='darn', blacklisted=True)
Tag.objects.get_or_create(tag_text='swearword', blacklisted=True)
data = self.get_dict(tags='blue, darn, swearword')
r = self.client.post(self.basic_edit_url, data)
assert r.status_code == 200
error = 'Invalid tags: blue, darn, swearword'
self.assertFormError(r, 'form', 'tags', error)
def test_edit_remove_tag(self):
self.tags.remove('tag2')
count = ActivityLog.objects.all().count()
data = self.get_dict()
r = self.client.post(self.basic_edit_url, data)
assert r.status_code == 200
result = pq(r.content)('#addon_tags_edit').eq(0).text()
assert result == ', '.join(sorted(self.tags))
assert ActivityLog.objects.filter(
action=amo.LOG.REMOVE_TAG.id).count() == count + 1
def test_edit_minlength_tags(self):
tags = self.tags
tags.append('a' * (amo.MIN_TAG_LENGTH - 1))
data = self.get_dict()
r = self.client.post(self.basic_edit_url, data)
assert r.status_code == 200
self.assertFormError(r, 'form', 'tags',
'All tags must be at least %d characters.' %
amo.MIN_TAG_LENGTH)
def test_edit_max_tags(self):
tags = self.tags
for i in range(amo.MAX_TAGS + 1):
tags.append('test%d' % i)
data = self.get_dict()
r = self.client.post(self.basic_edit_url, data)
self.assertFormError(
r, 'form', 'tags',
'You have %d too many tags.' % (len(tags) - amo.MAX_TAGS))
def test_edit_tag_empty_after_slug(self):
start = Tag.objects.all().count()
data = self.get_dict(tags='>>')
self.client.post(self.basic_edit_url, data)
# Check that the tag did not get created.
assert start == Tag.objects.all().count()
def test_edit_tag_slugified(self):
data = self.get_dict(tags='<script>alert("foo")</script>')
self.client.post(self.basic_edit_url, data)
tag = Tag.objects.all().order_by('-pk')[0]
assert tag.tag_text == 'scriptalertfooscript'
def test_edit_categories_add(self):
assert [c.id for c in self.get_addon().all_categories] == [22]
self.cat_initial['categories'] = [22, 23]
self.client.post(self.basic_edit_url, self.get_dict())
addon_cats = self.get_addon().categories.values_list('id', flat=True)
assert sorted(addon_cats) == [22, 23]
def _feature_addon(self, addon_id=3615):
c = CollectionAddon.objects.create(
addon_id=addon_id, collection=Collection.objects.create())
FeaturedCollection.objects.create(collection=c.collection,
application=amo.FIREFOX.id)
cache.clear()
def test_edit_categories_add_featured(self):
"""Ensure that categories cannot be changed for featured add-ons."""
self._feature_addon()
self.cat_initial['categories'] = [22, 23]
r = self.client.post(self.basic_edit_url, self.get_dict())
addon_cats = self.get_addon().categories.values_list('id', flat=True)
assert r.context['cat_form'].errors[0]['categories'] == (
['Categories cannot be changed while your add-on is featured for '
'this application.'])
# This add-on's categories should not change.
assert sorted(addon_cats) == [22]
def test_edit_categories_add_new_creatured_admin(self):
"""Ensure that admins can change categories for creatured add-ons."""
assert self.client.login(username='admin@mozilla.com',
password='password')
self._feature_addon()
r = self.client.get(self.basic_edit_url)
doc = pq(r.content)
assert doc('#addon-categories-edit div.addon-app-cats').length == 1
assert doc('#addon-categories-edit > p').length == 0
self.cat_initial['categories'] = [22, 23]
r = self.client.post(self.basic_edit_url, self.get_dict())
addon_cats = self.get_addon().categories.values_list('id', flat=True)
assert 'categories' not in r.context['cat_form'].errors[0]
# This add-on's categories should change.
assert sorted(addon_cats) == [22, 23]
def test_edit_categories_disable_creatured(self):
"""Ensure that other forms are okay when disabling category changes."""
self._feature_addon()
self.cat_initial['categories'] = [22, 23]
data = self.get_dict()
self.client.post(self.basic_edit_url, data)
assert unicode(self.get_addon().name) == data['name']
def test_edit_categories_no_disclaimer(self):
"""Ensure that there is a not disclaimer for non-creatured add-ons."""
r = self.client.get(self.basic_edit_url)
doc = pq(r.content)
assert doc('#addon-categories-edit div.addon-app-cats').length == 1
assert doc('#addon-categories-edit > p').length == 0
def test_edit_categories_addandremove(self):
AddonCategory(addon=self.addon, category_id=23).save()
assert [c.id for c in self.get_addon().all_categories] == [22, 23]
self.cat_initial['categories'] = [22, 24]
self.client.post(self.basic_edit_url, self.get_dict())
addon_cats = self.get_addon().categories.values_list('id', flat=True)
assert sorted(addon_cats) == [22, 24]
def test_edit_categories_xss(self):
c = Category.objects.get(id=22)
c.name = '<script>alert("test");</script>'
c.save()
self.cat_initial['categories'] = [22, 24]
r = self.client.post(self.basic_edit_url, formset(self.cat_initial,
initial_count=1))
assert '<script>alert' not in r.content
assert '<script>alert' in r.content
def test_edit_categories_remove(self):
c = Category.objects.get(id=23)
AddonCategory(addon=self.addon, category=c).save()
assert [cat.id for cat in self.get_addon().all_categories] == [22, 23]
self.cat_initial['categories'] = [22]
self.client.post(self.basic_edit_url, self.get_dict())
addon_cats = self.get_addon().categories.values_list('id', flat=True)
assert sorted(addon_cats) == [22]
def test_edit_categories_required(self):
del self.cat_initial['categories']
r = self.client.post(self.basic_edit_url, formset(self.cat_initial,
initial_count=1))
assert r.context['cat_form'].errors[0]['categories'] == (
['This field is required.'])
def test_edit_categories_max(self):
assert amo.MAX_CATEGORIES == 2
self.cat_initial['categories'] = [22, 23, 24]
r = self.client.post(self.basic_edit_url, formset(self.cat_initial,
initial_count=1))
assert r.context['cat_form'].errors[0]['categories'] == (
['You can have only 2 categories.'])
def test_edit_categories_other_failure(self):
Category.objects.get(id=22).update(misc=True)
self.cat_initial['categories'] = [22, 23]
r = self.client.post(self.basic_edit_url, formset(self.cat_initial,
initial_count=1))
assert r.context['cat_form'].errors[0]['categories'] == (
['The miscellaneous category cannot be combined with additional '
'categories.'])
def test_edit_categories_nonexistent(self):
self.cat_initial['categories'] = [100]
r = self.client.post(self.basic_edit_url, formset(self.cat_initial,
initial_count=1))
assert r.context['cat_form'].errors[0]['categories'] == (
['Select a valid choice. 100 is not one of the available '
'choices.'])
def test_edit_name_not_empty(self):
data = self.get_dict(name='', slug=self.addon.slug,
summary=self.addon.summary)
r = self.client.post(self.basic_edit_url, data)
self.assertFormError(r, 'form', 'name', 'This field is required.')
def test_edit_name_max_length(self):
data = self.get_dict(name='xx' * 70, slug=self.addon.slug,
summary=self.addon.summary)
r = self.client.post(self.basic_edit_url, data)
self.assertFormError(r, 'form', 'name',
'Ensure this value has at most 50 '
'characters (it has 140).')
def test_edit_summary_max_length(self):
data = self.get_dict(name=self.addon.name, slug=self.addon.slug,
summary='x' * 251)
r = self.client.post(self.basic_edit_url, data)
self.assertFormError(r, 'form', 'summary',
'Ensure this value has at most 250 '
'characters (it has 251).')
def test_edit_restricted_tags(self):
addon = self.get_addon()
tag = Tag.objects.create(tag_text='restartless', restricted=True)
AddonTag.objects.create(tag=tag, addon=addon)
res = self.client.get(self.basic_edit_url)
divs = pq(res.content)('#addon_tags_edit .edit-addon-details')
assert len(divs) == 2
assert 'restartless' in divs.eq(1).text()
def test_text_not_none_when_has_flags(self):
r = self.client.get(self.url)
doc = pq(r.content)
assert doc('#addon-flags').text() == 'This is a site-specific add-on.'
def test_text_none_when_no_flags(self):
addon = self.get_addon()
addon.update(external_software=False, site_specific=False)
r = self.client.get(self.url)
doc = pq(r.content)
assert doc('#addon-flags').text() == 'None'
def test_nav_links(self):
activity_url = reverse('devhub.feed', args=['a3615'])
r = self.client.get(self.url)
doc = pq(r.content)('#edit-addon-nav')
assert doc('ul:last').find('li a').eq(1).attr('href') == (
activity_url)
assert doc('.view-stats').length == 1
def get_l10n_urls(self):
paths = ('devhub.addons.edit', 'devhub.addons.profile',
'devhub.addons.owner')
return [reverse(p, args=['a3615']) for p in paths]
def test_l10n(self):
Addon.objects.get(id=3615).update(default_locale='en-US')
for url in self.get_l10n_urls():
r = self.client.get(url)
assert pq(r.content)('#l10n-menu').attr('data-default') == 'en-us'
def test_l10n_not_us(self):
Addon.objects.get(id=3615).update(default_locale='fr')
for url in self.get_l10n_urls():
r = self.client.get(url)
assert pq(r.content)('#l10n-menu').attr('data-default') == 'fr'
def test_l10n_not_us_id_url(self):
Addon.objects.get(id=3615).update(default_locale='fr')
for url in self.get_l10n_urls():
url = '/id' + url[6:]
r = self.client.get(url)
assert pq(r.content)('#l10n-menu').attr('data-default') == 'fr'
class TestEditMedia(TestEdit):
def setUp(self):
super(TestEditMedia, self).setUp()
self.media_edit_url = self.get_url('media', True)
self.icon_upload = reverse('devhub.addons.upload_icon',
args=[self.addon.slug])
self.preview_upload = reverse('devhub.addons.upload_preview',
args=[self.addon.slug])
def formset_new_form(self, *args, **kw):
ctx = self.client.get(self.media_edit_url).context
blank = initial(ctx['preview_form'].forms[-1])
blank.update(**kw)
return blank
def formset_media(self, *args, **kw):
kw.setdefault('initial_count', 0)
kw.setdefault('prefix', 'files')
fs = formset(*[a for a in args] + [self.formset_new_form()], **kw)
return dict([(k, '' if v is None else v) for k, v in fs.items()])
def test_icon_upload_attributes(self):
doc = pq(self.client.get(self.media_edit_url).content)
field = doc('input[name=icon_upload]')
assert field.length == 1
assert sorted(field.attr('data-allowed-types').split('|')) == (
['image/jpeg', 'image/png'])
assert field.attr('data-upload-url') == self.icon_upload
def test_edit_media_defaulticon(self):
data = dict(icon_type='')
data_formset = self.formset_media(**data)
r = self.client.post(self.media_edit_url, data_formset)
assert r.context['form'].errors == {}
addon = self.get_addon()
assert addon.get_icon_url(64).endswith('icons/default-64.png')
for k in data:
assert unicode(getattr(addon, k)) == data[k]
def test_edit_media_preuploadedicon(self):
data = dict(icon_type='icon/appearance')
data_formset = self.formset_media(**data)
r = self.client.post(self.media_edit_url, data_formset)
assert r.context['form'].errors == {}
addon = self.get_addon()
assert addon.get_icon_url(64).endswith('icons/appearance-64.png')
for k in data:
assert unicode(getattr(addon, k)) == data[k]
def test_edit_media_uploadedicon(self):
img = get_image_path('mozilla.png')
src_image = open(img, 'rb')
data = dict(upload_image=src_image)
response = self.client.post(self.icon_upload, data)
response_json = json.loads(response.content)
addon = self.get_addon()
# Now, save the form so it gets moved properly.
data = dict(icon_type='image/png',
icon_upload_hash=response_json['upload_hash'])
data_formset = self.formset_media(**data)
r = self.client.post(self.media_edit_url, data_formset)
assert r.context['form'].errors == {}
addon = self.get_addon()
# Unfortunate hardcoding of URL
url = addon.get_icon_url(64)
assert ('addon_icons/3/%s' % addon.id) in url, (
'Unexpected path: %r' % url)
assert data['icon_type'] == 'image/png'
# Check that it was actually uploaded
dirname = os.path.join(user_media_path('addon_icons'),
'%s' % (addon.id / 1000))
dest = os.path.join(dirname, '%s-32.png' % addon.id)
assert storage.exists(dest)
assert Image.open(storage.open(dest)).size == (32, 12)
def test_edit_media_icon_log(self):
self.test_edit_media_uploadedicon()
log = ActivityLog.objects.all()
assert log.count() == 1
assert log[0].action == amo.LOG.CHANGE_ICON.id
def test_edit_media_uploadedicon_noresize(self):
img = "static/img/notifications/error.png"
src_image = open(img, 'rb')
data = dict(upload_image=src_image)
response = self.client.post(self.icon_upload, data)
response_json = json.loads(response.content)
addon = self.get_addon()
# Now, save the form so it gets moved properly.
data = dict(icon_type='image/png',
icon_upload_hash=response_json['upload_hash'])
data_formset = self.formset_media(**data)
r = self.client.post(self.media_edit_url, data_formset)
assert r.context['form'].errors == {}
addon = self.get_addon()
# Unfortunate hardcoding of URL
addon_url = addon.get_icon_url(64).split('?')[0]
assert addon_url.endswith('addon_icons/3/%s-64.png' % addon.id), (
'Unexpected path: %r' % addon_url)
assert data['icon_type'] == 'image/png'
# Check that it was actually uploaded
dirname = os.path.join(user_media_path('addon_icons'),
'%s' % (addon.id / 1000))
dest = os.path.join(dirname, '%s-64.png' % addon.id)
assert storage.exists(dest)
assert Image.open(storage.open(dest)).size == (48, 48)
def check_image_type(self, url, msg):
img = 'static/js/zamboni/devhub.js'
src_image = open(img, 'rb')
res = self.client.post(url, {'upload_image': src_image})
response_json = json.loads(res.content)
assert response_json['errors'][0] == msg
def test_edit_media_icon_wrong_type(self):
self.check_image_type(self.icon_upload,
'Icons must be either PNG or JPG.')
def test_edit_media_screenshot_wrong_type(self):
self.check_image_type(self.preview_upload,
'Images must be either PNG or JPG.')
def setup_image_status(self):
addon = self.get_addon()
self.icon_dest = os.path.join(addon.get_icon_dir(),
'%s-32.png' % addon.id)
os.makedirs(os.path.dirname(self.icon_dest))
with storage.open(self.icon_dest, 'w') as f:
f.write('some icon data\n')
self.preview = addon.previews.create()
self.preview.save()
os.makedirs(os.path.dirname(self.preview.thumbnail_path))
with storage.open(self.preview.thumbnail_path, 'w') as f:
f.write('some icon data\n')
self.url = reverse('devhub.ajax.image.status', args=[addon.slug])
def test_image_status_no_choice(self):
addon = self.get_addon()
addon.update(icon_type='')
url = reverse('devhub.ajax.image.status', args=[addon.slug])
result = json.loads(self.client.get(url).content)
assert result['icons']
def test_image_status_works(self):
self.setup_image_status()
result = json.loads(self.client.get(self.url).content)
assert result['icons']
def test_image_status_fails(self):
self.setup_image_status()
storage.delete(self.icon_dest)
result = json.loads(self.client.get(self.url).content)
assert not result['icons']
def test_preview_status_works(self):
self.setup_image_status()
result = json.loads(self.client.get(self.url).content)
assert result['previews']
# No previews means that all the images are done.
self.addon.previews.all().delete()
result = json.loads(self.client.get(self.url).content)
assert result['previews']
def test_preview_status_fails(self):
self.setup_image_status()
storage.delete(self.preview.thumbnail_path)
result = json.loads(self.client.get(self.url).content)
assert not result['previews']
def test_image_status_persona(self):
self.setup_image_status()
storage.delete(self.icon_dest)
self.get_addon().update(type=amo.ADDON_PERSONA)
result = json.loads(self.client.get(self.url).content)
assert result['icons']
def test_image_status_default(self):
self.setup_image_status()
storage.delete(self.icon_dest)
self.get_addon().update(icon_type='icon/photos')
result = json.loads(self.client.get(self.url).content)
assert result['icons']
def check_image_animated(self, url, msg):
filehandle = open(get_image_path('animated.png'), 'rb')
res = self.client.post(url, {'upload_image': filehandle})
response_json = json.loads(res.content)
assert response_json['errors'][0] == msg
def test_icon_animated(self):
self.check_image_animated(self.icon_upload,
'Icons cannot be animated.')
def test_screenshot_animated(self):
self.check_image_animated(self.preview_upload,
'Images cannot be animated.')
def preview_add(self, amount=1):
img = get_image_path('mozilla.png')
src_image = open(img, 'rb')
data = dict(upload_image=src_image)
data_formset = self.formset_media(**data)
url = self.preview_upload
r = self.client.post(url, data_formset)
details = json.loads(r.content)
upload_hash = details['upload_hash']
# Create and post with the formset.
fields = []
for i in range(amount):
fields.append(self.formset_new_form(caption='hi',
upload_hash=upload_hash,
position=i))
data_formset = self.formset_media(*fields)
self.media_edit_url
r = self.client.post(self.media_edit_url, data_formset)
def test_edit_media_preview_add(self):
self.preview_add()
assert str(self.get_addon().previews.all()[0].caption) == 'hi'
def test_edit_media_preview_edit(self):
self.preview_add()
preview = self.get_addon().previews.all()[0]
edited = {'caption': 'bye',
'upload_hash': '',
'id': preview.id,
'position': preview.position,
'file_upload': None}
data_formset = self.formset_media(edited, initial_count=1)
self.client.post(self.media_edit_url, data_formset)
assert str(self.get_addon().previews.all()[0].caption) == 'bye'
assert len(self.get_addon().previews.all()) == 1
def test_edit_media_preview_reorder(self):
self.preview_add(3)
previews = self.get_addon().previews.all()
base = dict(upload_hash='', file_upload=None)
# Three preview forms were generated; mix them up here.
a = dict(caption="first", position=1, id=previews[2].id)
b = dict(caption="second", position=2, id=previews[0].id)
c = dict(caption="third", position=3, id=previews[1].id)
a.update(base)
b.update(base)
c.update(base)
# Add them in backwards ("third", "second", "first")
data_formset = self.formset_media(c, b, a, initial_count=3)
assert data_formset['files-0-caption'] == 'third'
assert data_formset['files-1-caption'] == 'second'
assert data_formset['files-2-caption'] == 'first'
self.client.post(self.media_edit_url, data_formset)
# They should come out "first", "second", "third"
assert str(self.get_addon().previews.all()[0].caption) == 'first'
assert str(self.get_addon().previews.all()[1].caption) == 'second'
assert str(self.get_addon().previews.all()[2].caption) == 'third'
def test_edit_media_preview_delete(self):
self.preview_add()
preview = self.get_addon().previews.get()
edited = {'DELETE': 'checked',
'upload_hash': '',
'id': preview.id,
'position': 0,
'file_upload': None}
data_formset = self.formset_media(edited, initial_count=1)
self.client.post(self.media_edit_url, data_formset)
assert len(self.get_addon().previews.all()) == 0
def test_edit_media_preview_add_another(self):
self.preview_add()
self.preview_add()
assert len(self.get_addon().previews.all()) == 2
def test_edit_media_preview_add_two(self):
self.preview_add(2)
assert len(self.get_addon().previews.all()) == 2
class TestEditDetails(TestEdit):
def setUp(self):
super(TestEditDetails, self).setUp()
self.details_url = self.get_url('details')
self.details_edit_url = self.get_url('details', edit=True)
def test_edit(self):
data = dict(description='New description with <em>html</em>!',
default_locale='en-US',
homepage='http://twitter.com/fligtarsmom')
r = self.client.post(self.details_edit_url, data)
assert r.context['form'].errors == {}
addon = self.get_addon()
for k in data:
assert unicode(getattr(addon, k)) == data[k]
def test_edit_xss(self):
"""
Let's try to put xss in our description, and safe html, and verify
that we are playing safe.
"""
self.addon.description = ("This\n<b>IS</b>"
"<script>alert('awesome')</script>")
self.addon.save()
r = self.client.get(self.url)
doc = pq(r.content)
assert doc('#edit-addon-details span[lang]').html() == (
"This<br/><b>IS</b><script>alert('awesome')</script>")
def test_edit_homepage_optional(self):
data = dict(description='New description with <em>html</em>!',
default_locale='en-US', homepage='')
r = self.client.post(self.details_edit_url, data)
assert r.context['form'].errors == {}
addon = self.get_addon()
for k in data:
assert unicode(getattr(addon, k)) == data[k]
def test_edit_default_locale_required_trans(self):
# name, summary, and description are required in the new locale.
description, homepage = map(unicode, [self.addon.description,
self.addon.homepage])
# TODO: description should get fixed up with the form.
error = ('Before changing your default locale you must have a name, '
'summary, and description in that locale. '
'You are missing ')
d = dict(description=description, homepage=homepage,
default_locale='fr')
r = self.client.post(self.details_edit_url, d)
# We can't use assertFormError here, because the missing fields are
# stored in a dict, which isn't ordered.
form_error = r.context['form'].non_field_errors()[0]
assert form_error.startswith(error)
assert "'description'" in form_error
assert "'name'" in form_error
assert "'summary'" in form_error
# Now we have a name.
self.addon.name = {'fr': 'fr name'}
self.addon.save()
r = self.client.post(self.details_edit_url, d)
form_error = r.context['form'].non_field_errors()[0]
assert form_error.startswith(error)
assert "'description'" in form_error
assert "'summary'" in form_error
# Now we have a summary.
self.addon.summary = {'fr': 'fr summary'}
self.addon.save()
r = self.client.post(self.details_edit_url, d)
form_error = r.context['form'].non_field_errors()[0]
assert form_error.startswith(error)
assert "'description'" in form_error
# Now we're sending an fr description with the form.
d['description_fr'] = 'fr description'
r = self.client.post(self.details_edit_url, d)
assert r.context['form'].errors == {}
def test_edit_default_locale_frontend_error(self):
d = dict(description='xx', homepage='https://staticfil.es/',
default_locale='fr')
r = self.client.post(self.details_edit_url, d)
self.assertContains(r, 'Before changing your default locale you must')
def test_edit_locale(self):
addon = self.get_addon()
addon.update(default_locale='en-US')
r = self.client.get(self.details_url)
assert pq(r.content)('.addon_edit_locale').eq(0).text() == (
'English (US)')
class TestEditSupport(TestEdit):
def setUp(self):
super(TestEditSupport, self).setUp()
self.support_url = self.get_url('support')
self.support_edit_url = self.get_url('support', edit=True)
def test_edit_support(self):
data = dict(support_email='sjobs@apple.com',
support_url='http://apple.com/')
r = self.client.post(self.support_edit_url, data)
assert r.context['form'].errors == {}
addon = self.get_addon()
for k in data:
assert unicode(getattr(addon, k)) == data[k]
def test_edit_support_optional_url(self):
data = dict(support_email='sjobs@apple.com',
support_url='')
r = self.client.post(self.support_edit_url, data)
assert r.context['form'].errors == {}
addon = self.get_addon()
for k in data:
assert unicode(getattr(addon, k)) == data[k]
def test_edit_support_optional_email(self):
data = dict(support_email='',
support_url='http://apple.com/')
r = self.client.post(self.support_edit_url, data)
assert r.context['form'].errors == {}
addon = self.get_addon()
for k in data:
assert unicode(getattr(addon, k)) == data[k]
class TestEditTechnical(TestEdit):
fixtures = TestEdit.fixtures + ['addons/persona', 'base/addon_40',
'base/addon_1833_yoono',
'base/addon_4664_twitterbar.json',
'base/addon_5299_gcal', 'base/addon_6113']
def setUp(self):
super(TestEditTechnical, self).setUp()
self.dependent_addon = Addon.objects.get(id=5579)
AddonDependency.objects.create(addon=self.addon,
dependent_addon=self.dependent_addon)
self.technical_url = self.get_url('technical')
self.technical_edit_url = self.get_url('technical', edit=True)
ctx = self.client.get(self.technical_edit_url).context
self.dep = initial(ctx['dependency_form'].initial_forms[0])
self.dep_initial = formset(self.dep, prefix='dependencies',
initial_count=1)
def dep_formset(self, *args, **kw):
kw.setdefault('initial_count', 1)
kw.setdefault('prefix', 'dependencies')
return formset(self.dep, *args, **kw)
def formset(self, data):
return self.dep_formset(**data)
def test_log(self):
data = self.formset({'developer_comments': 'This is a test'})
o = ActivityLog.objects
assert o.count() == 0
r = self.client.post(self.technical_edit_url, data)
assert r.context['form'].errors == {}
assert o.filter(action=amo.LOG.EDIT_PROPERTIES.id).count() == 1
def test_technical_on(self):
# Turn everything on
data = dict(developer_comments='Test comment!',
external_software='on',
site_specific='on',
view_source='on',
whiteboard='Whiteboard info.')
r = self.client.post(self.technical_edit_url, self.formset(data))
assert r.context['form'].errors == {}
addon = self.get_addon()
for k in data:
if k == 'developer_comments':
assert unicode(getattr(addon, k)) == unicode(data[k])
elif k == 'whiteboard':
assert unicode(getattr(addon, k)) == unicode(data[k])
else:
assert getattr(addon, k) == (data[k] == 'on')
# Andddd offf
data = dict(developer_comments='Test comment!')
r = self.client.post(self.technical_edit_url, self.formset(data))
addon = self.get_addon()
assert not addon.external_software
assert not addon.site_specific
assert not addon.view_source
def test_technical_devcomment_notrequired(self):
data = dict(developer_comments='',
external_software='on',
site_specific='on',
view_source='on')
r = self.client.post(self.technical_edit_url, self.formset(data))
assert r.context['form'].errors == {}
addon = self.get_addon()
for k in data:
if k == 'developer_comments':
assert unicode(getattr(addon, k)) == unicode(data[k])
else:
assert getattr(addon, k) == (data[k] == 'on')
def test_auto_repackage_not_shown(self):
f = self.addon.current_version.all_files[0]
f.jetpack_version = None
f.save()
r = self.client.get(self.technical_edit_url)
self.assertNotContains(r, 'Upgrade SDK?')
def test_auto_repackage_shown(self):
f = self.addon.current_version.all_files[0]
f.jetpack_version = '1.0'
f.save()
r = self.client.get(self.technical_edit_url)
self.assertContains(r, 'Upgrade SDK?')
def test_dependencies_none(self):
AddonDependency.objects.all().delete()
assert list(self.addon.all_dependencies) == []
r = self.client.get(self.technical_url)
assert pq(r.content)('#required-addons .empty').length == 1
def test_dependencies_overview(self):
assert [d.id for d in self.addon.all_dependencies] == [5579]
r = self.client.get(self.technical_url)
req = pq(r.content)('#required-addons')
assert req.length == 1
assert req.attr('data-src') == (
reverse('devhub.ajax.dependencies', args=[self.addon.slug]))
assert req.find('li').length == 1
a = req.find('a')
assert a.attr('href') == self.dependent_addon.get_url_path()
assert a.text() == unicode(self.dependent_addon.name)
def test_dependencies_initial(self):
r = self.client.get(self.technical_edit_url)
form = pq(r.content)('#required-addons .dependencies li[data-addonid]')
assert form.length == 1
assert form.find('input[id$=-dependent_addon]').val() == (
str(self.dependent_addon.id))
div = form.find('div')
assert div.attr('style') == (
'background-image:url(%s)' % self.dependent_addon.icon_url)
a = div.find('a')
assert a.attr('href') == self.dependent_addon.get_url_path()
assert a.text() == unicode(self.dependent_addon.name)
def test_dependencies_add(self):
addon = Addon.objects.get(id=5299)
assert addon.type == amo.ADDON_EXTENSION
assert addon in list(Addon.objects.reviewed())
d = self.dep_formset({'dependent_addon': addon.id})
r = self.client.post(self.technical_edit_url, d)
assert not any(r.context['dependency_form'].errors)
self.check_dep_ids([self.dependent_addon.id, addon.id])
r = self.client.get(self.technical_edit_url)
reqs = pq(r.content)('#required-addons .dependencies')
assert reqs.find('li[data-addonid]').length == 2
req = reqs.find('li[data-addonid=5299]')
assert req.length == 1
a = req.find('div a')
assert a.attr('href') == addon.get_url_path()
assert a.text() == unicode(addon.name)
def test_dependencies_limit(self):
deps = Addon.objects.reviewed().exclude(
Q(id__in=[self.addon.id, self.dependent_addon.id]) |
Q(type=amo.ADDON_PERSONA))
args = []
assert deps.count() > 3 # The limit is 3.
for dep in deps:
args.append({'dependent_addon': dep.id})
d = self.dep_formset(*args)
r = self.client.post(self.technical_edit_url, d)
assert r.context['dependency_form'].non_form_errors() == (
['There cannot be more than 3 required add-ons.'])
def test_dependencies_limit_with_deleted_form(self):
deps = Addon.objects.reviewed().exclude(
Q(id__in=[self.addon.id, self.dependent_addon.id]) |
Q(type=amo.ADDON_PERSONA))[:3]
args = []
for dep in deps:
args.append({'dependent_addon': dep.id})
# If we delete one form and add three, everything should be A-OK.
self.dep['DELETE'] = True
d = self.dep_formset(*args)
r = self.client.post(self.technical_edit_url, d)
assert not any(r.context['dependency_form'].errors)
self.check_dep_ids(deps.values_list('id', flat=True))
def check_dep_ids(self, expected=[]):
a = AddonDependency.objects.values_list('dependent_addon__id',
flat=True)
assert sorted(list(a)) == sorted(expected)
def check_bad_dep(self, r):
"""This helper checks that bad dependency data doesn't go through."""
assert r.context['dependency_form'].errors[1]['dependent_addon'] == (
['Select a valid choice. That choice is not one of the available '
'choices.'])
self.check_dep_ids([self.dependent_addon.id])
def test_dependencies_add_reviewed(self):
"""Ensure that reviewed add-ons can be made as dependencies."""
addon = Addon.objects.get(id=40)
for status in amo.REVIEWED_STATUSES:
addon.update(status=status)
assert addon in list(Addon.objects.reviewed())
d = self.dep_formset({'dependent_addon': addon.id})
r = self.client.post(self.technical_edit_url, d)
assert not any(r.context['dependency_form'].errors)
self.check_dep_ids([self.dependent_addon.id, addon.id])
AddonDependency.objects.get(dependent_addon=addon).delete()
def test_dependencies_no_add_unreviewed(self):
"""Ensure that unreviewed add-ons cannot be made as dependencies."""
addon = Addon.objects.get(id=40)
for status in amo.UNREVIEWED_STATUSES:
addon.update(status=status)
assert addon not in list(Addon.objects.reviewed())
d = self.dep_formset({'dependent_addon': addon.id})
r = self.client.post(self.technical_edit_url, d)
self.check_bad_dep(r)
def test_dependencies_no_add_reviewed_persona(self):
"""Ensure that reviewed Personas cannot be made as dependencies."""
addon = Addon.objects.get(id=15663)
assert addon.type == amo.ADDON_PERSONA
assert addon in list(Addon.objects.reviewed())
d = self.dep_formset({'dependent_addon': addon.id})
r = self.client.post(self.technical_edit_url, d)
self.check_bad_dep(r)
def test_dependencies_no_add_unreviewed_persona(self):
"""Ensure that unreviewed Personas cannot be made as dependencies."""
addon = Addon.objects.get(id=15663)
addon.update(status=amo.STATUS_UNREVIEWED)
assert addon.status == amo.STATUS_UNREVIEWED
assert addon not in list(Addon.objects.reviewed())
d = self.dep_formset({'dependent_addon': addon.id})
r = self.client.post(self.technical_edit_url, d)
self.check_bad_dep(r)
def test_dependencies_add_self(self):
"""Ensure that an add-on cannot be made dependent on itself."""
d = self.dep_formset({'dependent_addon': self.addon.id})
r = self.client.post(self.technical_edit_url, d)
self.check_bad_dep(r)
def test_dependencies_add_invalid(self):
"""Ensure that a non-existent add-on cannot be a dependency."""
d = self.dep_formset({'dependent_addon': 9999})
r = self.client.post(self.technical_edit_url, d)
self.check_bad_dep(r)
def test_dependencies_add_duplicate(self):
"""Ensure that an add-on cannot be made dependent more than once."""
d = self.dep_formset({'dependent_addon': self.dependent_addon.id})
r = self.client.post(self.technical_edit_url, d)
assert r.context['dependency_form'].forms[1].non_field_errors() == (
['Addon dependency with this Addon and Dependent addon already '
'exists.'])
self.check_dep_ids([self.dependent_addon.id])
def test_dependencies_delete(self):
self.dep['DELETE'] = True
d = self.dep_formset(total_count=1, initial_count=1)
r = self.client.post(self.technical_edit_url, d)
assert not any(r.context['dependency_form'].errors)
self.check_dep_ids()
def test_dependencies_add_delete(self):
"""Ensure that we can both delete a dependency and add another."""
self.dep['DELETE'] = True
d = self.dep_formset({'dependent_addon': 5299})
r = self.client.post(self.technical_edit_url, d)
assert not any(r.context['dependency_form'].errors)
self.check_dep_ids([5299])
class TestAdmin(TestCase):
fixtures = ['base/users', 'base/addon_3615']
def login_admin(self):
assert self.client.login(username='admin@mozilla.com',
password='password')
def login_user(self):
assert self.client.login(username='del@icio.us', password='password')
def test_show_admin_settings_admin(self):
self.login_admin()
url = reverse('devhub.addons.edit', args=['a3615'])
r = self.client.get(url)
assert r.status_code == 200
self.assertContains(r, 'Admin Settings')
assert 'admin_form' in r.context, 'AdminForm expected in context.'
def test_show_admin_settings_nonadmin(self):
self.login_user()
url = reverse('devhub.addons.edit', args=['a3615'])
r = self.client.get(url)
assert r.status_code == 200
self.assertNotContains(r, 'Admin Settings')
assert 'admin_form' not in r.context, (
'AdminForm not expected in context.')
def test_post_as_admin(self):
self.login_admin()
url = reverse('devhub.addons.admin', args=['a3615'])
r = self.client.post(url)
assert r.status_code == 200
def test_post_as_nonadmin(self):
self.login_user()
url = reverse('devhub.addons.admin', args=['a3615'])
r = self.client.post(url)
assert r.status_code == 403
class TestThemeEdit(TestCase):
fixtures = ['base/user_999']
def setUp(self):
super(TestThemeEdit, self).setUp()
self.addon = addon_factory(type=amo.ADDON_PERSONA)
self.user = UserProfile.objects.get()
self.addon.addonuser_set.create(user=self.user)
@mock.patch('olympia.amo.messages.error')
def test_desc_too_long_error(self, message_mock):
data = {'description': 'a' * 501}
req = req_factory_factory(
self.addon.get_dev_url('edit'),
user=self.user, post=True, data=data)
r = edit_theme(req, self.addon.slug, self.addon)
doc = pq(r.content)
assert 'characters' in doc('#trans-description + ul li').text()
def test_no_reupload_on_pending(self):
self.addon.update(status=amo.STATUS_PENDING)
req = req_factory_factory(
self.addon.get_dev_url('edit'), user=self.user)
r = edit_theme(req, self.addon.slug, self.addon)
doc = pq(r.content)
assert not doc('a.reupload')
self.addon.update(status=amo.STATUS_PUBLIC)
req = req_factory_factory(
self.addon.get_dev_url('edit'), user=self.user)
r = edit_theme(req, self.addon.slug, self.addon)
doc = pq(r.content)
assert doc('a.reupload')
def test_color_input_is_empty_at_creation(self):
self.client.login(username='regular@mozilla.com', password='password')
r = self.client.get(reverse('devhub.themes.submit'))
doc = pq(r.content)
el = doc('input.color-picker')
assert el.attr('type') == 'text'
assert not el.attr('value')
def test_color_input_is_not_empty_at_edit(self):
color = "123456"
self.addon.persona.accentcolor = color
self.addon.persona.save()
self.client.login(username='regular@mozilla.com', password='password')
url = reverse('devhub.themes.edit', args=(self.addon.slug, ))
r = self.client.get(url)
doc = pq(r.content)
el = doc('input#id_accentcolor')
assert el.attr('type') == 'text'
assert el.attr('value') == "#" + color
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import textwrap
from tensorboard import context
from tensorboard import plugin_util
from tensorboard import test as tb_test
from tensorboard.backend import experiment_id
class SafeHTMLTest(tb_test.TestCase):
def test_empty_input(self):
self.assertEqual(plugin_util.safe_html(""), "")
def test_whitelisted_tags_and_attributes_allowed(self):
s = (
'Check out <a href="http://example.com" title="do it">'
"my website</a>!"
)
self.assertEqual(plugin_util.safe_html(s), "%s" % s)
def test_arbitrary_tags_and_attributes_removed(self):
self.assertEqual(
plugin_util.safe_html(
"We should bring back the <blink>blink tag</blink>; "
'<a name="bookmark" href="http://please-dont.com">'
"sign the petition!</a>"
),
"We should bring back the "
"<blink>blink tag</blink>; "
'<a href="http://please-dont.com">sign the petition!</a>',
)
def test_javascript_hrefs_sanitized(self):
self.assertEqual(
plugin_util.safe_html(
'A <a href="javascript:void0">sketchy link</a> for you'
),
"A <a>sketchy link</a> for you",
)
def test_byte_strings_interpreted_as_utf8(self):
s = "Look\u2014some UTF-8!".encode("utf-8")
assert isinstance(s, bytes), (type(s), bytes)
self.assertEqual(plugin_util.safe_html(s), "Look\u2014some UTF-8!")
def test_unicode_strings_passed_through(self):
s = "Look\u2014some UTF-8!"
assert not isinstance(s, bytes), (type(s), bytes)
self.assertEqual(plugin_util.safe_html(s), "Look\u2014some UTF-8!")
def test_null_bytes_stripped(self):
# If this function is mistakenly called with UTF-16 or UTF-32 encoded text,
# there will probably be a bunch of null bytes. Ensure these are stripped.
s = "un_der_score".encode("utf-32-le")
# UTF-32 encoding of ASCII will have 3 null bytes per char. 36 = 3 * 12.
self.assertEqual(
plugin_util.safe_html(s),
"un_der_score",
)
class MarkdownToSafeHTMLTest(tb_test.TestCase):
def _test(self, markdown_string, expected):
actual = plugin_util.markdown_to_safe_html(markdown_string)
self.assertEqual(expected, actual)
def test_empty_input(self):
self._test("", "")
def test_basic_formatting(self):
self._test(
"# _Hello_, **world!**\n\n"
"Check out [my website](http://example.com)!",
"<h1><em>Hello</em>, <strong>world!</strong></h1>\n"
'<p>Check out <a href="http://example.com">my website</a>!</p>',
)
def test_table_formatting(self):
self._test(
textwrap.dedent(
"""\
Here is some data:
TensorBoard usage | Happiness
------------------|----------
0.0 | 0.0
0.5 | 0.5
1.0 | 1.0
Wouldn't you agree?
"""
),
textwrap.dedent(
"""\
<p>Here is some data:</p>
<table>
<thead>
<tr>
<th>TensorBoard usage</th>
<th>Happiness</th>
</tr>
</thead>
<tbody>
<tr>
<td>0.0</td>
<td>0.0</td>
</tr>
<tr>
<td>0.5</td>
<td>0.5</td>
</tr>
<tr>
<td>1.0</td>
<td>1.0</td>
</tr>
</tbody>
</table>
<p>Wouldn't you agree?</p>
""".rstrip()
),
)
def test_whitelisted_tags_and_attributes_allowed(self):
s = (
'Check out <a href="http://example.com" title="do it">'
"my website</a>!"
)
self._test(s, "<p>%s</p>" % s)
def test_arbitrary_tags_and_attributes_removed(self):
self._test(
"We should bring back the <blink>blink tag</blink>; "
'<a name="bookmark" href="http://please-dont.com">'
"sign the petition!</a>",
"<p>We should bring back the "
"<blink>blink tag</blink>; "
'<a href="http://please-dont.com">sign the petition!</a></p>',
)
def test_javascript_hrefs_sanitized(self):
self._test(
'A <a href="javascript:void0">sketchy link</a> for you',
"<p>A <a>sketchy link</a> for you</p>",
)
def test_byte_strings_interpreted_as_utf8(self):
s = "> Look\u2014some UTF-8!".encode("utf-8")
assert isinstance(s, bytes), (type(s), bytes)
self._test(
s, "<blockquote>\n<p>Look\u2014some UTF-8!</p>\n</blockquote>"
)
def test_unicode_strings_passed_through(self):
s = "> Look\u2014some UTF-8!"
assert not isinstance(s, bytes), (type(s), bytes)
self._test(
s, "<blockquote>\n<p>Look\u2014some UTF-8!</p>\n</blockquote>"
)
def test_null_bytes_stripped_before_markdown_processing(self):
# If this function is mistakenly called with UTF-16 or UTF-32 encoded text,
# there will probably be a bunch of null bytes. These would be stripped by
# the sanitizer no matter what, but make sure we remove them before markdown
# interpretation to avoid affecting output (e.g. middle-word underscores
# would generate erroneous <em> tags like "un<em>der</em>score") and add an
# HTML comment with a warning.
s = "un_der_score".encode("utf-32-le")
# UTF-32 encoding of ASCII will have 3 null bytes per char. 36 = 3 * 12.
self._test(
s,
"<!-- WARNING: discarded 36 null bytes in markdown string "
"after UTF-8 decoding -->\n"
"<p>un_der_score</p>",
)
class MarkdownsToSafeHTMLTest(tb_test.TestCase):
# Most of the heavy lifting is tested by `MarkdownToSafeHTMLTest`.
def test_simple(self):
inputs = ["0", "*1*", "**2**"]
combine = lambda xs: "<br>".join(xs)
actual = plugin_util.markdowns_to_safe_html(inputs, combine)
expected = "<p>0</p><br><p><em>1</em></p><br><p><strong>2</strong></p>"
self.assertEqual(actual, expected)
def test_sanitizes_combination_result(self):
inputs = ["safe"]
combine = lambda xs: "<script>alert('unsafe!')</script>%s" % xs[0]
actual = plugin_util.markdowns_to_safe_html(inputs, combine)
expected = "<script>alert('unsafe!')</script><p>safe</p>"
self.assertEqual(actual, expected)
def test_sanitization_can_have_collateral_damage(self):
inputs = ['<table title="*chuckles* ', "I'm in danger", '<table>">']
combine = lambda xs: "".join(xs)
actual = plugin_util.markdowns_to_safe_html(inputs, combine)
expected = "<table></table>"
self.assertEqual(actual, expected)
class ContextTest(tb_test.TestCase):
def test_context(self):
ctx = context.RequestContext()
environ = {}
context.set_in_environ(environ, ctx)
self.assertEqual(context.from_environ(environ), ctx)
class ExperimentIdTest(tb_test.TestCase):
"""Tests for `plugin_util.experiment_id`."""
def test_default(self):
# This shouldn't happen; the `ExperimentIdMiddleware` always set an
# experiment ID. In case something goes wrong, degrade gracefully.
environ = {}
self.assertEqual(plugin_util.experiment_id(environ), "")
def test_present(self):
environ = {experiment_id.WSGI_ENVIRON_KEY: "123"}
self.assertEqual(plugin_util.experiment_id(environ), "123")
if __name__ == "__main__":
tb_test.main()
|
|
###############################################################################
# DehnenBarPotential: Dehnen (2000)'s bar potential
###############################################################################
import numpy
from ..util import conversion
from .Potential import Potential
_degtorad= numpy.pi/180.
class DehnenBarPotential(Potential):
"""Class that implements the Dehnen bar potential (`Dehnen 2000 <http://adsabs.harvard.edu/abs/2000AJ....119..800D>`__), generalized to 3D following `Monari et al. (2016) <http://adsabs.harvard.edu/abs/2016MNRAS.461.3835M>`__
.. math::
\\Phi(R,z,\\phi) = A_b(t)\\,\\cos\\left(2\\,(\\phi-\\Omega_b\\,t)\\right))\\,\\left(\\frac{R}{r}\\right)^2\\,\\times \\begin{cases}
-(R_b/r)^3\\,, & \\text{for}\\ r \\geq R_b\\\\
(r/R_b)^3-2\\,, & \\text{for}\\ r\\leq R_b.
\\end{cases}
where :math:`r^2 = R^2+z^2` is the spherical radius and
.. math::
A_b(t) = A_f\\,\\left(\\frac{3}{16}\\xi^5-\\frac{5}{8}\\xi^3+\\frac{15}{16}\\xi+\\frac{1}{2}\\right)\\,, \\xi = 2\\frac{t/T_b-t_\\mathrm{form}}{T_\mathrm{steady}}-1\\,,\ \mathrm{if}\ t_\\mathrm{form} \\leq \\frac{t}{T_b} \\leq t_\\mathrm{form}+T_\\mathrm{steady}
and
.. math::
A_b(t) = \\begin{cases}
0\\,, & \\frac{t}{T_b} < t_\mathrm{form}\\\\
A_f\\,, & \\frac{t}{T_b} > t_\mathrm{form}+T_\mathrm{steady}
\\end{cases}
where
.. math::
T_b = \\frac{2\pi}{\\Omega_b}
is the bar period and the strength can also be specified using :math:`\\alpha`
.. math::
\\alpha = 3\\,\\frac{A_f}{v_0^2}\\,\\left(\\frac{R_b}{r_0}\\right)^3\,.
"""
normalize= property() # turn off normalize
def __init__(self,amp=1.,omegab=None,rb=None,chi=0.8,
rolr=0.9,barphi=25.*_degtorad,
tform=-4.,tsteady=None,beta=0.,
alpha=0.01,Af=None,ro=None,vo=None):
"""
NAME:
__init__
PURPOSE:
initialize a Dehnen bar potential
INPUT:
amp - amplitude to be applied to the potential (default:
1., see alpha or Ab below)
barphi - angle between sun-GC line and the bar's major axis
(in rad; default=25 degree; or can be Quantity))
tform - start of bar growth / bar period (default: -4)
tsteady - time from tform at which the bar is fully grown / bar period (default: -tform/2, st the perturbation is fully grown at tform/2)
Either provide:
a) rolr - radius of the Outer Lindblad Resonance for a
circular orbit (can be Quantity)
chi - fraction R_bar / R_CR (corotation radius of bar)
alpha - relative bar strength (default: 0.01)
beta - power law index of rotation curve (to
calculate OLR, etc.)
b) omegab - rotation speed of the bar (can be Quantity)
rb - bar radius (can be Quantity)
Af - bar strength (can be Quantity)
OUTPUT:
(none)
HISTORY:
2010-11-24 - Started - Bovy (NYU)
2017-06-23 - Converted to 3D following Monari et al. (2016) - Bovy (UofT/CCA)
"""
Potential.__init__(self,amp=amp,ro=ro,vo=vo)
barphi= conversion.parse_angle(barphi)
rolr= conversion.parse_length(rolr,ro=self._ro)
rb= conversion.parse_length(rb,ro=self._ro)
omegab= conversion.parse_frequency(omegab,ro=self._ro,vo=self._vo)
Af= conversion.parse_energy(Af,vo=self._vo)
self.hasC= True
self.hasC_dxdv= True
self.isNonAxi= True
self._barphi= barphi
if omegab is None:
self._rolr= rolr
self._chi= chi
self._beta= beta
#Calculate omegab and rb
self._omegab= 1./((self._rolr**(1.-self._beta))/(1.+numpy.sqrt((1.+self._beta)/2.)))
self._rb= self._chi*self._omegab**(1./(self._beta-1.))
self._alpha= alpha
self._af= self._alpha/3./self._rb**3.
else:
self._omegab= omegab
self._rb= rb
self._af= Af
self._tb= 2.*numpy.pi/self._omegab
self._tform= tform*self._tb
if tsteady is None:
self._tsteady= self._tform/2.
else:
self._tsteady= self._tform+tsteady*self._tb
def _smooth(self,t):
if isinstance(t,numpy.ndarray):
smooth=numpy.ones(len(t))
indx=(t < self._tform)
smooth[indx]=0.
indx=(t < self._tsteady) * (t >= self._tform)
deltat=t[indx]-self._tform
xi= 2.*deltat/(self._tsteady-self._tform)-1.
smooth[indx]= (3./16.*xi**5.-5./8*xi**3.+15./16.*xi+.5)
else:
if t < self._tform:
smooth= 0.
elif t < self._tsteady:
deltat= t-self._tform
xi= 2.*deltat/(self._tsteady-self._tform)-1.
smooth= (3./16.*xi**5.-5./8*xi**3.+15./16.*xi+.5)
else: #bar is fully on
smooth= 1.
return smooth
def _evaluate(self,R,z,phi=0.,t=0.):
"""
NAME:
_evaluate
PURPOSE:
evaluate the potential at R,phi,t
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
Phi(R,z,phi,t)
HISTORY:
2010-11-24 - Started - Bovy (NYU)
"""
#Calculate relevant time
smooth=self._smooth(t)
r2= R**2.+z**2.
r= numpy.sqrt(r2)
if isinstance(r,numpy.ndarray):
if not isinstance(R,numpy.ndarray):
R=numpy.repeat(R,len(r))
if not isinstance(z,numpy.ndarray):
z=numpy.repeat(z,len(r))
out=numpy.empty(len(r))
indx= r <= self._rb
out[indx]= ((r[indx]/self._rb)**3.-2.)\
*numpy.divide(R[indx]**2.,r2[indx],numpy.ones_like(R[indx]),
where=R[indx]!=0)
indx=numpy.invert(indx)
out[indx]= -(self._rb/r[indx])**3.*1./(1.+z[indx]**2./R[indx]**2.)
out*=self._af*smooth*numpy.cos(2.*(phi-self._omegab*t-self._barphi))
return out
else:
if r == 0:
return -2.*self._af*smooth*numpy.cos(2.*(phi-self._omegab*t-self._barphi))
elif r <= self._rb:
return self._af*smooth*numpy.cos(2.*(phi-self._omegab*t-self._barphi))\
*((r/self._rb)**3.-2.)*R**2./r2
else:
return -self._af*smooth*numpy.cos(2.*(phi-self._omegab*t-
self._barphi))\
*(self._rb/r)**3.\
*1./(1.+z**2./R**2.)
def _Rforce(self,R,z,phi=0.,t=0.):
"""
NAME:
_Rforce
PURPOSE:
evaluate the radial force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the radial force
HISTORY:
2010-11-24 - Written - Bovy (NYU)
"""
#Calculate relevant time
smooth=self._smooth(t)
r= numpy.sqrt(R**2.+z**2.)
if isinstance(r,numpy.ndarray):
if not isinstance(R,numpy.ndarray):
R=numpy.repeat(R,len(r))
if not isinstance(z,numpy.ndarray):
z=numpy.repeat(z,len(r))
out=numpy.empty(len(r))
indx= r <= self._rb
out[indx]= -((r[indx]/self._rb)**3.*R[indx]*(3.*R[indx]**2.+2.*z[indx]**2.)-4.*R[indx]*z[indx]**2.)/r[indx]**4.
indx= numpy.invert(indx)
out[indx]= -(self._rb/r[indx])**3.*R[indx]/r[indx]**4.*(3.*R[indx]**2.-2.*z[indx]**2.)
out*=self._af*smooth*numpy.cos(2.*(phi-self._omegab*t-self._barphi))
return out
else:
if r <= self._rb:
return -self._af*smooth*numpy.cos(2.*(phi-self._omegab*t
-self._barphi))\
*((r/self._rb)**3.*R*(3.*R**2.+2.*z**2.)-4.*R*z**2.)/r**4.
else:
return -self._af*smooth*numpy.cos(2.*(phi-self._omegab*t-
self._barphi))\
*(self._rb/r)**3.*R/r**4.*(3.*R**2.-2.*z**2.)
def _phiforce(self,R,z,phi=0.,t=0.):
"""
NAME:
_phiforce
PURPOSE:
evaluate the azimuthal force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the azimuthal force
HISTORY:
2010-11-24 - Written - Bovy (NYU)
"""
#Calculate relevant time
smooth=self._smooth(t)
r2= R**2.+z**2.
r= numpy.sqrt(r2)
if isinstance(r,numpy.ndarray):
if not isinstance(R,numpy.ndarray):
R=numpy.repeat(R,len(r))
if not isinstance(z,numpy.ndarray):
z=numpy.repeat(z,len(r))
out=numpy.empty(len(r))
indx= r <= self._rb
out[indx]= ((r[indx]/self._rb)**3.-2.)*R[indx]**2./r2[indx]
indx=numpy.invert(indx)
out[indx]= -(self._rb/r[indx])**3.*R[indx]**2./r2[indx]
out*=2.*self._af*smooth*numpy.sin(2.*(phi-self._omegab*t-self._barphi))
return out
else:
if r <= self._rb:
return 2.*self._af*smooth*numpy.sin(2.*(phi-self._omegab*t-
self._barphi))\
*((r/self._rb)**3.-2.)*R**2./r2
else:
return -2.*self._af*smooth*numpy.sin(2.*(phi-self._omegab*t-
self._barphi))\
*(self._rb/r)**3.*R**2./r2
def _zforce(self,R,z,phi=0.,t=0.):
"""
NAME:
_zforce
PURPOSE:
evaluate the vertical force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the vertical force
HISTORY:
2017-06-23 - Written - Bovy (NYU)
"""
#Calculate relevant time
smooth=self._smooth(t)
r= numpy.sqrt(R**2.+z**2.)
if isinstance(r,numpy.ndarray):
if not isinstance(R,numpy.ndarray):
R=numpy.repeat(R,len(r))
if not isinstance(z,numpy.ndarray):
z=numpy.repeat(z,len(r))
out=numpy.empty(len(r))
indx= r <= self._rb
out[indx]= -((r[indx]/self._rb)**3.+4.)*R[indx]**2.*z[indx]/r[indx]**4.
indx=numpy.invert(indx)
out[indx]= -5.*(self._rb/r[indx])**3.*R[indx]**2.*z[indx]/r[indx]**4.
out*=self._af*smooth*numpy.cos(2.*(phi-self._omegab*t-self._barphi))
return out
else:
if r <= self._rb:
return -self._af*smooth*numpy.cos(2.*(phi-self._omegab*t
-self._barphi))\
*((r/self._rb)**3.+4.)*R**2.*z/r**4.
else:
return -5.*self._af*smooth*numpy.cos(2.*(phi-self._omegab*t-
self._barphi))\
*(self._rb/r)**3.*R**2.*z/r**4.
def _R2deriv(self,R,z,phi=0.,t=0.):
#Calculate relevant time
smooth=self._smooth(t)
r= numpy.sqrt(R**2.+z**2.)
if isinstance(r,numpy.ndarray):
if not isinstance(R,numpy.ndarray):
R=numpy.repeat(R,len(r))
if not isinstance(z,numpy.ndarray):
z=numpy.repeat(z,len(r))
out=numpy.empty(len(r))
indx= r <= self._rb
out[indx]= ((r[indx]/self._rb)**3.*((9.*R[indx]**2.+2.*z[indx]**2.)/r[indx]**4.
-R[indx]**2./r[indx]**6.*(3.*R[indx]**2.+2.*z[indx]**2.))\
+4.*z[indx]**2./r[indx]**6.*(4.*R[indx]**2.-r[indx]**2.))
indx=numpy.invert(indx)
out[indx]= (self._rb/r[indx])**3./r[indx]**6.*((r[indx]**2.-7.*R[indx]**2.)*(3.*R[indx]**2.-2.*z[indx]**2.)\
+6.*R[indx]**2.*r[indx]**2.)
out*=self._af*smooth*numpy.cos(2.*(phi-self._omegab*t-self._barphi))
return out
else:
if r <= self._rb:
return self._af*smooth*numpy.cos(2.*(phi-self._omegab*t
-self._barphi))\
*((r/self._rb)**3.*((9.*R**2.+2.*z**2.)/r**4.
-R**2./r**6.*(3.*R**2.+2.*z**2.))\
+4.*z**2./r**6.*(4.*R**2.-r**2.))
else:
return self._af*smooth*numpy.cos(2.*(phi-self._omegab*t-
self._barphi))\
*(self._rb/r)**3./r**6.*((r**2.-7.*R**2.)*(3.*R**2.-2.*z**2.)\
+6.*R**2.*r**2.)
def _phi2deriv(self,R,z,phi=0.,t=0.):
#Calculate relevant time
smooth=self._smooth(t)
r= numpy.sqrt(R**2.+z**2.)
if isinstance(r,numpy.ndarray):
if not isinstance(R,numpy.ndarray):
R=numpy.repeat(R,len(r))
if not isinstance(z,numpy.ndarray):
z=numpy.repeat(z,len(r))
out=numpy.empty(len(r))
indx= r <= self._rb
out[indx]= -((r[indx]/self._rb)**3.-2.)*R[indx]**2./r[indx]**2.
indx=numpy.invert(indx)
out[indx]= (self._rb/r[indx])**3.*R[indx]**2./r[indx]**2.
out*=4.*self._af*smooth*numpy.cos(2.*(phi-self._omegab*t-self._barphi))
return out
else:
if r <= self._rb:
return -4.*self._af*smooth*numpy.cos(2.*(phi-self._omegab*t-
self._barphi))\
*((r/self._rb)**3.-2.)*R**2./r**2.
else:
return 4.*self._af*smooth*numpy.cos(2.*(phi-self._omegab*t-
self._barphi))\
*(self._rb/r)**3.*R**2./r**2.
def _Rphideriv(self,R,z,phi=0.,t=0.):
#Calculate relevant time
smooth=self._smooth(t)
r= numpy.sqrt(R**2.+z**2.)
if isinstance(r,numpy.ndarray):
if not isinstance(R,numpy.ndarray):
R=numpy.repeat(R,len(r))
if not isinstance(z,numpy.ndarray):
z=numpy.repeat(z,len(r))
out=numpy.empty(len(r))
indx= r <= self._rb
out[indx]= ((r[indx]/self._rb)**3.*R[indx]*(3.*R[indx]**2.+2.*z[indx]**2.)-4.*R[indx]*z[indx]**2.)/r[indx]**4.
indx=numpy.invert(indx)
out[indx]= (self._rb/r[indx])**3.*R[indx]/r[indx]**4.*(3.*R[indx]**2.-2.*z[indx]**2.)
out*=-2.*self._af*smooth*numpy.sin(2.*(phi-self._omegab*t-self._barphi))
return out
else:
if r <= self._rb:
return -2.*self._af*smooth*numpy.sin(2.*(phi-self._omegab*t
-self._barphi))\
*((r/self._rb)**3.*R*(3.*R**2.+2.*z**2.)-4.*R*z**2.)/r**4.
else:
return -2.*self._af*smooth*numpy.sin(2.*(phi-self._omegab*t-
self._barphi))\
*(self._rb/r)**3.*R/r**4.*(3.*R**2.-2.*z**2.)
def _z2deriv(self,R,z,phi=0.,t=0.):
#Calculate relevant time
smooth=self._smooth(t)
r= numpy.sqrt(R**2.+z**2.)
if isinstance(r,numpy.ndarray):
if not isinstance(R,numpy.ndarray):
R=numpy.repeat(R,len(r))
if not isinstance(z,numpy.ndarray):
z=numpy.repeat(z,len(r))
out=numpy.empty(len(r))
indx= r <= self._rb
out[indx]= R[indx]**2./r[indx]**6.*((r[indx]/self._rb)**3.*(r[indx]**2.-z[indx]**2.)
+4.*(r[indx]**2.-4.*z[indx]**2.))
indx=numpy.invert(indx)
out[indx]=5.*(self._rb/r[indx])**3.*R[indx]**2./r[indx]**6.*(r[indx]**2.-7.*z[indx]**2.)
out*=self._af*smooth*numpy.cos(2.*(phi-self._omegab*t-self._barphi))
return out
else:
if r <= self._rb:
return self._af*smooth*numpy.cos(2.*(phi-self._omegab*t
-self._barphi))\
*R**2./r**6.*((r/self._rb)**3.*(r**2.-z**2.)
+4.*(r**2.-4.*z**2.))
else:
return 5.*self._af*smooth*numpy.cos(2.*(phi-self._omegab*t-
self._barphi))\
*(self._rb/r)**3.*R**2./r**6.*(r**2.-7.*z**2.)
def _Rzderiv(self,R,z,phi=0.,t=0.):
#Calculate relevant time
smooth=self._smooth(t)
r= numpy.sqrt(R**2.+z**2.)
if isinstance(r,numpy.ndarray):
if not isinstance(R,numpy.ndarray):
R=numpy.repeat(R,len(r))
if not isinstance(z,numpy.ndarray):
z=numpy.repeat(z,len(r))
out=numpy.empty(len(r))
indx= r <= self._rb
out[indx]= R[indx]*z[indx]/r[indx]**6.*((r[indx]/self._rb)**3.*(2.*r[indx]**2.-R[indx]**2.)
+8.*(r[indx]**2.-2.*R[indx]**2.))
indx=numpy.invert(indx)
out[indx]= 5.*(self._rb/r[indx])**3.*R[indx]*z[indx]/r[indx]**6.*(2.*r[indx]**2.-7.*R[indx]**2.)
out*=self._af*smooth*numpy.cos(2.*(phi-self._omegab*t-self._barphi))
return out
else:
if r <= self._rb:
return self._af*smooth*numpy.cos(2.*(phi-self._omegab*t
-self._barphi))\
*R*z/r**6.*((r/self._rb)**3.*(2.*r**2.-R**2.)
+8.*(r**2.-2.*R**2.))
else:
return 5.*self._af*smooth*numpy.cos(2.*(phi-self._omegab*t-
self._barphi))\
*(self._rb/r)**3.*R*z/r**6.*(2.*r**2.-7.*R**2.)
def _phizderiv(self,R,z,phi=0.,t=0.):
"""
NAME:
_phizderiv
PURPOSE:
evaluate the mixed azimuthal, vertical derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the mixed azimuthal, vertical derivative
HISTORY:
2021-04-30 - Written - Bovy (UofT)
"""
#Calculate relevant time
smooth=self._smooth(t)
r= numpy.sqrt(R**2.+z**2.)
if isinstance(r,numpy.ndarray):
if not isinstance(R,numpy.ndarray):
R=numpy.repeat(R,len(r))
if not isinstance(z,numpy.ndarray):
z=numpy.repeat(z,len(r))
out=numpy.empty(len(r))
indx= r <= self._rb
out[indx]= -((r[indx]/self._rb)**3.+4.)*R[indx]**2.*z[indx]/r[indx]**4.
indx=numpy.invert(indx)
out[indx]= -5.*(self._rb/r[indx])**3.*R[indx]**2.*z[indx]/r[indx]**4.
out*=self._af*smooth*numpy.sin(2.*(phi-self._omegab*t-self._barphi))
return 2.*out
else:
if r <= self._rb:
return -2*self._af*smooth*numpy.sin(2.*(phi-self._omegab*t
-self._barphi))\
*((r/self._rb)**3.+4.)*R**2.*z/r**4.
else:
return -10.*self._af*smooth*numpy.sin(2.*(phi-self._omegab*t-
self._barphi))\
*(self._rb/r)**3.*R**2.*z/r**4.
def tform(self): #pragma: no cover
"""
NAME:
tform
PURPOSE:
return formation time of the bar
INPUT:
(none)
OUTPUT:
tform in normalized units
HISTORY:
2011-03-08 - Written - Bovy (NYU)
"""
return self._tform
def OmegaP(self):
"""
NAME:
OmegaP
PURPOSE:
return the pattern speed
INPUT:
(none)
OUTPUT:
pattern speed
HISTORY:
2011-10-10 - Written - Bovy (IAS)
"""
return self._omegab
|
|
from django.http import HttpResponse, HttpResponseForbidden, HttpResponseNotFound
from django.views.decorators.csrf import csrf_exempt
from uwsgi_it_api.decorators import need_basicauth, api_auth
from uwsgi_it_api.utils import spit_json, check_body, dns_check
from uwsgi_it_api.models import *
from uwsgi_it_api.config import UWSGI_IT_BASE_UID
import json
import datetime
import time
import uuid
@need_basicauth
@csrf_exempt
def portmappings(request, ip):
customer = request.user.customer
try:
server = Server.objects.get(address=ip,owner=customer)
except:
return HttpResponseForbidden(json.dumps({'error': 'Forbidden'}), content_type="application/json")
if request.method == 'POST':
response = check_body(request)
if response:
return response
j = json.loads(request.read())
if not j:
return HttpResponseForbidden(json.dumps({'error': 'Forbidden'}), content_type="application/json")
pm = Portmap()
try:
pm.proto = j['proto']
pm.public_port = int(j['public_port'])
pm.private_port = int(j['private_port'])
pm.container = server.container_set.get(pk=(int(j['container']) - UWSGI_IT_BASE_UID), customer=customer)
pm.full_clean()
pm.save()
except:
import sys
print sys.exc_info()
return HttpResponseForbidden(json.dumps({'error': 'Forbidden'}), content_type="application/json")
response = HttpResponse(json.dumps({'message': 'Created'}), content_type="application/json")
response.status_code = 201
return response
elif request.method == 'DELETE':
response = check_body(request)
if response:
return response
j = json.loads(request.read())
if not j:
return HttpResponseForbidden(json.dumps({'error': 'Forbidden'}), content_type="application/json")
try:
pm = Portmap.objects.get(pk=j['id'], container__server=server)
pm.delete()
except:
return HttpResponseForbidden(json.dumps({'error': 'Forbidden'}), content_type="application/json")
return HttpResponse(json.dumps({'message': 'Ok'}), content_type="application/json")
mappings = []
for portmap in Portmap.objects.filter(container__server=server):
mappings.append({'id': portmap.pk,
'proto': portmap.proto,
'public_port': portmap.public_port,
'container': portmap.container.uid,
'container_ip': str(portmap.container.ip),
'private_port': portmap.private_port,
})
return spit_json(request, mappings)
@need_basicauth
@csrf_exempt
def container(request, id):
customer = request.user.customer
try:
container = customer.container_set.get(pk=(int(id) - UWSGI_IT_BASE_UID))
except:
return HttpResponseForbidden(json.dumps({'error': 'Forbidden'}), content_type="application/json")
if request.method == 'POST':
response = check_body(request)
if response:
return response
allowed_keys = (
'name', 'note', 'quota_threshold', 'jid', 'jid_secret',
'jid_destinations', 'nofollow', 'pushover_user',
'pushover_token', 'pushover_sound', 'alarm_freq',
'pushbullet_token', 'slack_webhook',
'custom_distros_storage',
)
j = json.loads(request.read())
if not j:
return HttpResponseForbidden(json.dumps({'error': 'Forbidden'}), content_type="application/json")
for key in j:
if key in allowed_keys:
setattr(container, key, j[key])
if 'ssh_keys' in j:
container.ssh_keys_raw = '\n'.join(j['ssh_keys'])
container.ssh_keys_mtime = datetime.datetime.now()
if 'distro' in j:
container.distro = Distro.objects.get(pk=j['distro'])
if 'custom_distro' in j:
container.custom_distro = CustomDistro.objects.filter(pk=j['custom_distro'], container__server=container.server, container__customer=customer).exclude(container=container)[0]
if 'memory' in j:
if container.server.owner == customer:
container.memory = int(j['memory'])
if 'storage' in j:
if container.server.owner == customer:
container.storage = int(j['storage'])
if 'tags' in j:
new_tags = []
for tag in j['tags']:
try:
new_tags.append(Tag.objects.get(customer=customer, name=tag))
except:
pass
container.tags = new_tags
# linking and unlinking requires reboot
if 'link' in j:
try:
link = ContainerLink()
link.container = container
link.to = Container.objects.get(pk=(int(j['link']) - UWSGI_IT_BASE_UID))
link.full_clean()
link.save()
container.last_reboot = datetime.datetime.now()
except:
response = HttpResponse(json.dumps({'error': 'Conflict'}), content_type="application/json")
response.status_code = 409
return response
if 'unlink' in j:
try:
link = container.containerlink_set.get(to=(int(j['unlink']) - UWSGI_IT_BASE_UID))
link.delete()
container.last_reboot = datetime.datetime.now()
except:
response = HttpResponse(json.dumps({'error': 'Conflict'}), content_type="application/json")
response.status_code = 409
return response
if 'reboot' in j:
container.last_reboot = datetime.datetime.now()
container.full_clean()
container.save()
c = {
'uid': container.uid,
'name': container.name,
'hostname': container.hostname,
'ip': str(container.ip),
'memory': container.memory,
'storage': container.storage,
'uuid': container.uuid,
'distro': None,
'distro_name': None,
'server': container.server.name,
'server_address': container.server.address,
'jid': container.jid,
'jid_destinations': container.jid_destinations,
'pushover_user': container.pushover_user,
'pushover_token': container.pushover_token,
'pushover_sound': container.pushover_sound,
'pushbullet_token': container.pushbullet_token,
'slack_webhook': container.slack_webhook,
'alarm_freq': container.alarm_freq,
'quota_threshold': container.quota_threshold,
'nofollow': container.nofollow,
'note': container.note,
'linked_to': container.linked_to,
'custom_distros_storage': container.custom_distros_storage,
'custom_distro': None,
'ssh_keys': container.ssh_keys,
'tags': [t.name for t in container.tags.all()],
'legion_address': [l.address for l in container.server.legion_set.all()]
}
if container.distro:
c['distro'] = container.distro.pk
c['distro_name'] = container.distro.name
if container.custom_distro:
c['custom_distro'] = container.custom_distro.pk
c['custom_distro_name'] = container.custom_distro.name
return spit_json(request, c)
def news(request):
news_list = []
user = api_auth(request)
if user:
for n in News.objects.all()[0:10]:
news_list.append({'content': n.content,
'date': int(time.mktime(n.ctime.timetuple()))})
else:
for n in News.objects.filter(public=True)[0:10]:
news_list.append({'content': n.content,
'date': int(time.mktime(n.ctime.timetuple()))})
return spit_json(request, news_list)
@need_basicauth
@csrf_exempt
def me(request):
customer = request.user.customer
if request.method == 'POST':
response = check_body(request)
if response:
return response
allowed_keys = ('vat', 'company')
j = json.loads(request.read())
for key in j:
if key in allowed_keys:
setattr(customer, key, j[key])
if 'password' in j:
customer.user.set_password(j['password'])
customer.user.save()
if 'email' in j:
customer.user.email = j['email']
customer.user.save()
customer.save()
c = {
'email': customer.user.email,
'vat': customer.vat,
'company': customer.company,
'uuid': customer.uuid,
'containers': [cc.uid for cc in customer.container_set.all()],
'servers': [s.address for s in customer.server_set.all()],
}
return spit_json(request, c)
@need_basicauth
@csrf_exempt
def containers(request):
if request.method == 'POST':
response = check_body(request)
if response:
return response
j = json.loads(request.read())
needed_keys = ('server', 'name', 'memory', 'storage')
for k in needed_keys:
if not k in j.keys():
return HttpResponseForbidden(json.dumps({'error': 'Forbidden'}), content_type="application/json")
try:
server = Server.objects.get(address=j['server'])
if server.owner != request.user.customer:
return HttpResponseForbidden(json.dumps({'error': 'Forbidden'}), content_type="application/json")
except:
return HttpResponseForbidden(json.dumps({'error': 'Forbidden'}), content_type="application/json")
if int(j['memory']) > server.free_memory or int(j['memory']) <= 0:
return HttpResponse(json.dumps({'error': 'Conflict', 'reason':'not enough memory'}), content_type="application/json")
if int(j['storage']) > server.free_storage or int(j['storage']) <= 0:
return HttpResponse(json.dumps({'error': 'Conflict', 'reason':'not enough storage'}), content_type="application/json")
try:
container = Container(customer=request.user.customer, server=server)
container.name = j['name']
container.memory = int(j['memory'])
container.storage = int(j['storage'])
container.save()
response = HttpResponse(json.dumps({'message': 'Created'}), content_type="application/json")
response.status_code = 201
return response
except:
response = HttpResponse(json.dumps({'error': 'Conflict'}), content_type="application/json")
response.status_code = 409
return response
elif (request.method == 'GET' and
'tags' in request.GET):
containers = request.user.customer.container_set.filter(tags__name__in=request.GET['tags'].split(','))
else:
containers = request.user.customer.container_set.all()
c = []
for container in containers:
cc = {
'uid': container.uid,
'name': container.name,
'hostname': container.hostname,
'ip': str(container.ip),
'memory': container.memory,
'storage': container.storage,
'uuid': container.uuid,
'distro': None,
'distro_name': None,
'custom_distro': None,
'custom_distro_name': None,
'server': container.server.name,
'server_address': container.server.address,
'tags': [t.name for t in container.tags.all()]
}
if container.distro:
cc['distro'] = container.distro.pk
cc['distro_name'] = container.distro.name
if container.custom_distro:
cc['custom_distro'] = container.custom_distro.pk
cc['custom_distro_name'] = container.custom_distro.name
c.append(cc)
return spit_json(request, c)
@need_basicauth
@csrf_exempt
def loopboxes(request):
if request.method == 'POST':
response = check_body(request)
if response:
return response
j = json.loads(request.read())
needed_keys = ('container', 'filename', 'mountpoint')
for k in needed_keys:
if not k in j.keys():
return HttpResponseForbidden(json.dumps({'error': 'Forbidden'}), content_type="application/json")
try:
container = request.user.customer.container_set.get(pk=(int(j['container']) - UWSGI_IT_BASE_UID))
except:
return HttpResponseForbidden(json.dumps({'error': 'Forbidden'}), content_type="application/json")
try:
loopbox = Loopbox(container=container)
loopbox.filename = j['filename']
loopbox.mountpoint = j['mountpoint']
if 'ro' in j:
loopbox.ro = j['ro']
loopbox.save()
response = HttpResponse(json.dumps({'message': 'Created'}), content_type="application/json")
response.status_code = 201
return response
except:
response = HttpResponse(json.dumps({'error': 'Conflict'}), content_type="application/json")
response.status_code = 409
return response
elif request.method == 'GET':
query = {}
if 'tags' in request.GET:
query['tags__name__in'] = request.GET['tags'].split(',')
if 'container' in request.GET:
try:
query['container'] = request.user.customer.container_set.get(pk=(int(request.GET['container']) - UWSGI_IT_BASE_UID))
except:
return HttpResponseForbidden(json.dumps({'error': 'Forbidden'}), content_type="application/json")
else:
query['container__in'] = request.user.customer.container_set.all()
loopboxes = Loopbox.objects.filter(**query)
else:
loopboxes = Loopbox.objects.filter(container__in=request.user.customer.container_set.all())
l = []
for loopbox in loopboxes:
ll = {
'id': loopbox.pk,
'container': loopbox.container.uid,
'filename': loopbox.filename,
'mountpoint': loopbox.mountpoint,
'ro': loopbox.ro,
'tags': [t.name for t in loopbox.tags.all()]
}
l.append(ll)
return spit_json(request, l)
@need_basicauth
@csrf_exempt
def alarms(request):
query = {}
if 'container' in request.GET:
try:
query['container'] = request.user.customer.container_set.get(pk=(int(request.GET['container']) - UWSGI_IT_BASE_UID))
except:
return HttpResponseForbidden(json.dumps({'error': 'Forbidden'}), content_type="application/json")
else:
query['container__in'] = request.user.customer.container_set.all()
if 'vassal' in request.GET:
query['vassal'] = request.GET['vassal']
if 'class' in request.GET:
query['_class'] = request.GET['class']
if 'color' in request.GET:
query['color'] = request.GET['color']
if 'level' in request.GET:
query['level'] = int(request.GET['level'])
if 'line' in request.GET:
query['line'] = int(request.GET['line'])
if 'filename' in request.GET:
query['filename'] = request.GET['filename']
if 'func' in request.GET:
query['func'] = request.GET['func']
alarms = Alarm.objects.filter(**query)
a = []
if 'with_total' in request.GET:
response = {'total': alarms.count(), 'alarms': a}
else:
response = a
if 'range' in request.GET:
to = request.GET['range']
try:
if '-' in to:
_from, to = to.split('-')
else:
_from = 0
alarms = alarms[int(min(_from, to)):int(max(_from, to))]
except:
response = HttpResponse(json.dumps({'error': 'Requested Range Not Satisfiable'}), content_type="application/json")
response.status_code = 416
return response
if _from > to:
alarms = alarms.reverse()
for alarm in alarms:
aa = {
'id': alarm.pk,
'container': alarm.container.uid,
'level': alarm.level,
'color': alarm.color,
'class': alarm._class,
'vassal': alarm.vassal,
'line': alarm.line,
'filename': alarm.filename,
'func': alarm.func,
'unix': int(alarm.unix.strftime('%s')),
'msg': alarm.msg
}
a.append(aa)
return spit_json(request, response)
@need_basicauth
@csrf_exempt
def loopbox(request, id):
customer = request.user.customer
try:
loopbox = Loopbox.objects.get(pk=id, container__in=customer.container_set.all())
except:
return HttpResponseForbidden(json.dumps({'error': 'Forbidden'}), content_type="application/json")
if request.method == 'POST':
response = check_body(request)
if response:
return response
j = json.loads(request.read())
if not j:
return HttpResponseForbidden(json.dumps({'error': 'Forbidden'}), content_type="application/json")
if 'tags' in j:
new_tags = []
for tag in j['tags']:
try:
new_tags.append(Tag.objects.get(customer=customer, name=tag))
except:
pass
loopbox.tags = new_tags
loopbox.save()
elif request.method == 'DELETE':
loopbox.delete()
return HttpResponse(json.dumps({'message': 'Ok'}), content_type="application/json")
l = {
'id': loopbox.pk,
'container': loopbox.container.uid,
'filename': loopbox.filename,
'mountpoint': loopbox.mountpoint,
'ro': loopbox.ro,
'tags': [t.name for t in loopbox.tags.all()]
}
return spit_json(request, l)
@need_basicauth
@csrf_exempt
def alarm(request, id):
customer = request.user.customer
try:
alarm = Alarm.objects.get(pk=id, container__in=customer.container_set.all())
except:
return HttpResponseForbidden(json.dumps({'error': 'Forbidden'}), content_type="application/json")
if request.method == 'DELETE':
alarm.delete()
return HttpResponse(json.dumps({'message': 'Ok'}), content_type="application/json")
a = {
'id': alarm.pk,
'container': alarm.container.uid,
'level': alarm.level,
'color': alarm.color,
'class': alarm._class,
'line': alarm.line,
'filename': alarm.filename,
'func': alarm.func,
'vassal': alarm.vassal,
'unix': int(alarm.unix.strftime('%s')),
'msg': alarm.msg
}
return spit_json(request, a)
@need_basicauth
@csrf_exempt
def custom_distro(request, id):
customer = request.user.customer
try:
distro = CustomDistro.objects.get(pk=id, container__customer=customer)
except:
return HttpResponseForbidden(json.dumps({'error': 'Forbidden'}), content_type="application/json")
if request.method == 'DELETE':
distro.delete()
return HttpResponse(json.dumps({'message': 'Ok'}), content_type="application/json")
if request.method == 'POST':
response = check_body(request)
if response:
return response
j = json.loads(request.read())
allowd_fields = ('name', 'path', 'note')
for field in allowed_fields:
if field in j:
setattr(distro, field, j[field])
distro.full_clean()
distro.save()
d = {
'id': distro.pk,
'container': distro.container.uid,
'name': distro.name,
'path': distro.path,
'note': distro.note,
'uuid': distro.uuid,
}
return spit_json(request, d)
@need_basicauth
@csrf_exempt
def alarm_key(request, id):
customer = request.user.customer
try:
container = customer.container_set.get(pk=(int(id) - UWSGI_IT_BASE_UID))
except:
return HttpResponseForbidden(json.dumps({'error': 'Forbidden'}), content_type="application/json")
container.alarm_key = str(uuid.uuid4())
container.save()
return HttpResponse(json.dumps({'message': 'Ok', 'alarm_key': container.alarm_key}), content_type="application/json")
def alarm_key_auth(request, id):
if not 'key' in request.GET:
return None
key = request.GET['key']
if len(key) != 36:
return None
try:
container = Container.objects.get(pk=(int(id) - UWSGI_IT_BASE_UID), alarm_key=key)
user = container.customer.user
user.backend = 'django.contrib.auth.backends.ModelBackend'
return user
except:
pass
return None
@need_basicauth(fallback=alarm_key_auth)
@csrf_exempt
def raise_alarm(request, id):
customer = request.user.customer
try:
container = customer.container_set.get(pk=(int(id) - UWSGI_IT_BASE_UID))
except:
return HttpResponseForbidden(json.dumps({'error': 'Forbidden'}), content_type="application/json")
if request.method == 'POST':
response = check_body(request)
if response:
return response
alarm = Alarm(container=container, level=1)
if 'color' in request.GET:
color = request.GET['color']
if not color.startswith('#'):
color = '#' + color
alarm.color = color
alarm._class = request.GET.get('class', None)
alarm.vassal = request.GET.get('vassal', None)
alarm.line = request.GET.get('line', None)
alarm.func = request.GET.get('func', None)
alarm.filename = request.GET.get('filename', None)
# user alarm by default
alarm.level = 1
if 'level' in request.GET:
alarm.level = int(request.GET['level'])
if alarm.level < 1:
return HttpResponseForbidden(json.dumps({'error': 'Forbidden'}), content_type="application/json")
if 'unix' in request.GET:
alarm.unix = datetime.datetime.fromtimestamp(int(request.GET['unix']))
else:
alarm.unix = datetime.datetime.now()
alarm.msg = request.read()
try:
alarm.save()
response = HttpResponse(json.dumps({'message': 'Created'}), content_type="application/json")
response.status_code = 201
return response
except:
response = HttpResponse(json.dumps({'error': 'Conflict'}), content_type="application/json")
response.status_code = 409
return response
response = HttpResponse(json.dumps({'error': 'Method not allowed'}), content_type="application/json")
response.status_code = 405
return response
@need_basicauth
def distros(request):
j = [{'id': d.pk, 'name': d.name} for d in Distro.objects.all()]
return spit_json(request, j)
@need_basicauth
@csrf_exempt
def custom_distros(request, id=None):
customer = request.user.customer
if not id:
j = [{'id': d.pk, 'name': d.name, 'container': d.container.uid} for d in CustomDistro.objects.filter(container__customer=customer)]
return spit_json(request, j)
try:
container = customer.container_set.get(pk=(int(id) - UWSGI_IT_BASE_UID))
except:
return HttpResponseForbidden(json.dumps({'error': 'Forbidden'}), content_type="application/json")
if request.method == 'POST':
if not container.custom_distros_storage:
return HttpResponseForbidden(json.dumps({'error': 'Forbidden'}), content_type="application/json")
response = check_body(request)
if response:
return response
j = json.loads(request.read())
distro = CustomDistro(container=container)
allowed_fields = ('name', 'path', 'note')
for field in allowed_fields:
if field in j:
setattr(distro, field, j[field])
try:
distro.full_clean()
distro.save()
except:
return HttpResponseForbidden(json.dumps({'error': 'Forbidden'}), content_type="application/json")
response = HttpResponse(json.dumps({'message': 'Created'}), content_type="application/json")
response.status_code = 201
return response
j = [{'id': d.pk, 'name': d.name} for d in CustomDistro.objects.filter(container__server=container.server,container__customer=customer).exclude(container=container)]
return spit_json(request, j)
@need_basicauth
@csrf_exempt
def domains(request):
customer = request.user.customer
if request.method == 'POST':
response = check_body(request)
if response:
return response
j = json.loads(request.read())
if Domain.objects.filter(name=j['name']):
response = HttpResponse(json.dumps({'error': 'Conflict'}), content_type="application/json")
response.status_code = 409
return response
if dns_check(j['name'], customer.uuid):
try:
customer.domain_set.create(name=j['name'])
response = HttpResponse(json.dumps({'message': 'Created'}), content_type="application/json")
response.status_code = 201
except:
response = HttpResponse(json.dumps({'error': 'Conflict'}), content_type="application/json")
response.status_code = 409
return response
else:
return HttpResponseForbidden(json.dumps({'error': 'Forbidden'}), content_type="application/json")
elif request.method == 'DELETE':
response = check_body(request)
if response:
return response
j = json.loads(request.read())
try:
customer.domain_set.get(name=j['name']).delete()
except Domain.DoesNotExist:
return HttpResponseNotFound(json.dumps({'error': 'Not found'}), content_type="application/json")
return HttpResponse(json.dumps({'message': 'Ok'}), content_type="application/json")
elif request.method == 'GET':
if 'tags' in request.GET:
j = [{'id': d.pk, 'name': d.name, 'uuid': d.uuid, 'tags': [t.name for t in d.tags.all()]} for d in
customer.domain_set.filter(tags__name__in=request.GET['tags'].split(','))]
else:
j = [{'id': d.pk, 'name': d.name, 'uuid': d.uuid, 'tags': [t.name for t in d.tags.all()]} for d in
customer.domain_set.all()]
return spit_json(request, j)
response = HttpResponse(json.dumps({'error': 'Method not allowed'}), content_type="application/json")
response.status_code = 405
return response
@need_basicauth
@csrf_exempt
def tags(request):
customer = request.user.customer
allowed_keys = ('name', 'note')
if request.method == 'POST':
response = check_body(request)
if response:
return response
j = json.loads(request.read())
tag = Tag(customer=customer)
for key in allowed_keys:
if key in j:
setattr(tag, key, j[key])
try:
tag.save()
j = {'id': tag.pk, 'name': tag.name, 'note': tag.note}
response = spit_json(request, j)
response.status_code = 201
response.reason_phrase = 'Created'
except:
response = HttpResponse(json.dumps({'error': 'Conflict'}), content_type="application/json")
response.status_code = 409
return response
elif request.method == 'GET':
j = [{'id': t.pk, 'name': t.name} for t in Tag.objects.filter(customer=customer)]
return spit_json(request, j)
response = HttpResponse(json.dumps({'error': 'Method not allowed'}), content_type="application/json")
response.status_code = 405
return response
@need_basicauth
@csrf_exempt
def tag(request, id):
customer = request.user.customer
try:
t = Tag.objects.get(customer=customer, pk=id)
except:
return HttpResponseNotFound(json.dumps({'error': 'Not found'}), content_type="application/json")
allowed_keys = ('name', 'note')
if request.method == 'POST':
response = check_body(request)
if response:
return response
j = json.loads(request.read())
for key in allowed_keys:
if key in j:
setattr(t, key, j[key])
try:
t.save()
j = {'id': t.pk, 'name': t.name, 'note': t.note}
return spit_json(request, j)
except:
response = HttpResponse(json.dumps({'error': 'Conflict'}), content_type="application/json")
response.status_code = 409
return response
elif request.method == 'GET':
j = {'id': t.pk, 'name': t.name, 'note': t.note}
return spit_json(request, j)
elif request.method == 'DELETE':
t.delete()
return HttpResponse(json.dumps({'message': 'Ok'}), content_type="application/json")
allowed_keys = ('name', 'note')
response = HttpResponse(json.dumps({'error': 'Method not allowed'}), content_type="application/json")
response.status_code = 405
return response
@need_basicauth
@csrf_exempt
def domain(request, id):
customer = request.user.customer
try:
domain = customer.domain_set.get(pk=id)
except:
return HttpResponseNotFound(json.dumps({'error': 'Not found'}), content_type="application/json")
allowed_keys = ('note',)
if request.method == 'POST':
response = check_body(request)
if response:
return response
j = json.loads(request.read())
for key in allowed_keys:
if key in j:
setattr(domain, key, j[key])
if 'tags' in j:
new_tags = []
for tag in j['tags']:
try:
new_tags.append(Tag.objects.get(customer=customer, name=tag))
except:
pass
domain.tags = new_tags
try:
domain.save()
j = {'id': domain.pk, 'name': domain.name, 'uuid': domain.uuid, 'tags': [t.name for t in domain.tags.all()],
'note': domain.note}
return spit_json(request, j)
except:
response = HttpResponse(json.dumps({'error': 'Conflict'}), content_type="application/json")
response.status_code = 409
return response
elif request.method == 'DELETE':
domain.delete()
return HttpResponse(json.dumps({'message': 'Ok'}), content_type="application/json")
elif request.method == 'GET':
j = {'id': domain.pk, 'name': domain.name, 'uuid': domain.uuid, 'tags': [t.name for t in domain.tags.all()],
'note': domain.note}
return spit_json(request, j)
response = HttpResponse(json.dumps({'error': 'Method not allowed'}), content_type="application/json")
response.status_code = 405
return response
|
|
import copy
import warnings
from .compat import json
from .lib import is_sequence, is_mapping, is_integer, is_number, is_numpy_array, is_string
class ListOf(object):
def __init__(self, typ):
self.type = typ
def __call__(self, value):
return [self.type(v) for v in value]
class MapOf(object):
def __init__(self, value_type, key_type=str):
self.value_type = value_type
self.key_type = key_type
def __call__(self, value):
d = {self.key_type(k):self.value_type(v) for k, v in value.items()}
return d
def DictField(name, type=str):
return lambda value: type(value[name])
Any = lambda x: x
class Field(object):
def __init__(self, typ):
self.type = typ
def __call__(self, value):
if value is None:
return None
return self.type(value)
class DeprecatedField(Field):
def __init__(self, typ, recommendation=None):
super().__init__(typ)
self.recommendation = (' ' + recommendation) if recommendation else ''
def __call__(self, value):
warnings.warn(
'This field has been deprecated and may be removed in a future version.{0}'.format(self.recommendation),
DeprecationWarning,
)
return super().__call__(value)
class BaseApiObject(object):
def __getattribute__(self, name):
value = object.__getattribute__(self, name)
if isinstance(value, Field):
return value(self._body.get(name))
return value
def __setattr__(self, name, value):
field = self._get_field(name)
if field:
value = ApiObject.as_json(value)
self._body[name] = value
else:
object.__setattr__(self, name, value)
def __delattr__(self, name):
field = self._get_field(name)
if field:
del self._body[name]
else:
object.__delattr__(self, name)
def _get_field(self, name):
try:
subvalue = object.__getattribute__(self, name)
except AttributeError:
return None
else:
return subvalue if isinstance(subvalue, Field) else None
def _repr_keys(self):
attributes = dir(self)
attributes = [a for a in attributes if not a.startswith('_')]
attributes = [a for a in attributes if not isinstance(getattr(self.__class__, a), DeprecatedField)]
attributes = [a for a in attributes if not callable(getattr(self, a))]
keys_in_json = set(ApiObject.as_json(self._body).keys())
return keys_in_json.intersection(set(attributes))
@staticmethod
def _emit_repr(object_name, values_mapping):
if values_mapping:
return '{0}(\n{1}\n)'.format(
object_name,
'\n'.join([
' {}={},'.format(key, ApiObject.dumps(value, indent_level=2).lstrip())
for key, value
in values_mapping.items()
]),
)
return '{0}()'.format(object_name)
def __repr__(self):
keys = self._repr_keys()
values = {key: getattr(self, key) for key in keys}
return BaseApiObject._emit_repr(self.__class__.__name__, values)
def to_json(self):
return copy.deepcopy(self._body)
class ApiObject(BaseApiObject):
def __init__(self, body, bound_endpoint=None, retrieve_params=None):
super().__init__()
object.__setattr__(self, '_body', body)
object.__setattr__(self, '_bound_endpoint', bound_endpoint)
object.__setattr__(self, '_retrieve_params', retrieve_params)
def __eq__(self, other):
return (
isinstance(other, self.__class__) and
self._body == other._body
)
@staticmethod
def as_json(obj):
if isinstance(obj, BaseApiObject):
return obj.to_json()
if is_mapping(obj):
c = {}
for key in obj:
c[key] = ApiObject.as_json(obj[key])
return c
if is_numpy_array(obj):
return ApiObject.as_json(obj.tolist())
if is_sequence(obj):
return [ApiObject.as_json(c) for c in obj]
if is_integer(obj):
return int(obj)
if is_number(obj):
return float(obj)
return obj
@staticmethod
def dumps(obj, indent_level=0):
indent = ' ' * indent_level
if isinstance(obj, BaseApiObject):
return '{0}{1}'.format(indent, str(obj).replace('\n', '\n{0}'.format(indent)))
if is_mapping(obj):
if obj:
return '{0}{{\n{1},\n{0}}}'.format(
indent,
',\n'.join([
' {0}"{1}"={2}'.format(
indent,
key,
ApiObject.dumps(obj[key], indent_level=indent_level + 2).lstrip()
)
for key
in obj
])
)
return '{0}{1}'.format(indent, str(obj))
if is_numpy_array(obj):
return ApiObject.dumps(obj.tolist(), indent_level=indent_level)
if is_sequence(obj):
if obj:
return '{0}[\n{1},\n{0}]'.format(
indent,
',\n'.join([
ApiObject.dumps(c, indent_level=indent_level + 2)
for c
in obj
])
)
return '{0}{1}'.format(indent, str(obj))
if is_integer(obj):
return '{0}{1}'.format(indent, str(int(obj)))
if is_number(obj):
return '{0}{1}'.format(indent, str(float(obj)))
if is_string(obj):
return '{0}"{1}"'.format(indent, obj)
return '{0}{1}'.format(indent, obj)
class _DictWrapper(BaseApiObject, dict):
def __init__(self, body, bound_endpoint=None, retrieve_params=None):
super().__init__()
dict.__init__(self, body)
self._bound_endpoint = bound_endpoint
self._retrieve_params = retrieve_params
@property
def _body(self):
return self
def to_json(self):
return dict(copy.deepcopy(self))
def copy(self):
return self.__class__(dict.copy(self))
def __eq__(self, other):
return (
isinstance(other, self.__class__) and
dict.__eq__(self, other)
)
def __repr__(self):
return '{0}({1})'.format(
self.__class__.__name__,
json.dumps(
ApiObject.as_json(self._body),
indent=2,
sort_keys=True,
separators=(',', ': '),
),
)
class Assignments(_DictWrapper):
pass
class Task(ApiObject):
cost = Field(float)
name = Field(str)
class Bounds(ApiObject):
max = Field(float)
min = Field(float)
class CategoricalValue(ApiObject):
enum_index = Field(int)
name = Field(str)
class Client(ApiObject):
created = Field(int)
id = Field(str)
name = Field(str)
organization = Field(str)
class Conditional(ApiObject):
name = Field(str)
values = Field(ListOf(str))
class Conditions(_DictWrapper):
pass
class ImportancesMap(_DictWrapper):
pass
class Importances(ApiObject):
importances = Field(ImportancesMap)
class MetricImportances(ApiObject):
importances = Field(ImportancesMap)
metric = Field(str)
class Metadata(_DictWrapper):
pass
class SysMetadata(_DictWrapper):
pass
class MetricEvaluation(ApiObject):
name = Field(str)
value = Field(float)
value_stddev = Field(float)
class Metric(ApiObject):
name = Field(str)
objective = Field(str)
strategy = Field(str)
threshold = Field(float)
class Observation(ApiObject):
assignments = Field(Assignments)
created = Field(int)
experiment = Field(str)
failed = Field(bool)
id = Field(str)
metadata = Field(Metadata)
suggestion = Field(str)
task = Field(Task)
value = Field(float)
value_stddev = Field(float)
values = Field(ListOf(MetricEvaluation))
class Organization(ApiObject):
created = Field(int)
deleted = Field(bool)
id = Field(str)
name = Field(str)
class Paging(ApiObject):
after = Field(str)
before = Field(str)
class Pagination(ApiObject):
count = Field(int)
paging = Field(Paging)
def __init__(self, data_cls, body, bound_endpoint=None, retrieve_params=None):
super().__init__(body, bound_endpoint, retrieve_params)
self.data_cls = data_cls
def _repr_keys(self):
return ['data', 'count', 'paging']
def __repr__(self):
values = {
'data': self._unsafe_data,
'count': self.count,
'paging': self.paging,
}
values = {k: v for k, v in values.items() if v is not None}
return BaseApiObject._emit_repr('Pagination<{0}>'.format(self.data_cls.__name__), values)
@property
def data(self):
warnings.warn(
'The .data field only contains a single page of results, which may be incomplete for large responses.'
' Prefer to use the `.iterate_pages() to ensure that you iterate through all elements in the response.',
RuntimeWarning,
)
return self._unsafe_data
@property
def _unsafe_data(self):
return Field(ListOf(self.data_cls))(self._body.get('data'))
def iterate_pages(self):
# pylint: disable=no-member
data = self._unsafe_data
paging = self.paging or Paging({})
use_before = 'before' in self._retrieve_params or 'after' not in self._retrieve_params
while data:
for d in data:
yield d
next_paging = dict(before=paging.before) if use_before else dict(after=paging.after)
if next_paging.get('before') is not None or next_paging.get('after') is not None:
params = self._retrieve_params.copy()
if use_before:
params['before'] = paging.before
params.pop('after', None)
else:
params.pop('before', None)
params['after'] = paging.after
response = self._bound_endpoint(**params)
data = response._unsafe_data
paging = response.paging
else:
data = []
paging = None
# pylint: enable=no-member
class ParameterPrior(ApiObject):
mean = Field(float)
name = Field(str)
scale = Field(float)
shape_a = Field(float)
shape_b = Field(float)
class Parameter(ApiObject):
bounds = Field(Bounds)
categorical_values = Field(ListOf(CategoricalValue))
conditions = Field(Conditions)
default_value = Field(Any)
grid = Field(ListOf(float))
name = Field(str)
precision = Field(int)
prior = Field(ParameterPrior)
transformation = Field(str)
tunable = DeprecatedField(bool)
type = Field(str)
class Progress(ApiObject):
# observation progress fields
best_observation = DeprecatedField(Observation, recommendation='Prefer the `best_assignments` endpoint')
first_observation = Field(Observation)
last_observation = Field(Observation)
observation_count = Field(int)
observation_budget_consumed = Field(float)
# run progress fields
active_run_count = Field(int)
finished_run_count = Field(int)
total_run_count = Field(int)
remaining_budget = Field(float)
class Suggestion(ApiObject):
assignments = Field(Assignments)
created = Field(int)
experiment = Field(str)
id = Field(str)
metadata = Field(Metadata)
state = Field(str)
task = Field(Task)
class QueuedSuggestion(ApiObject):
assignments = Field(Assignments)
created = Field(int)
experiment = Field(str)
id = Field(str)
task = Field(Task)
class ConstraintTerm(ApiObject):
name = Field(str)
weight = Field(float)
class LinearConstraint(ApiObject):
terms = Field(ListOf(ConstraintTerm))
threshold = Field(float)
type = Field(str)
class TrainingEarlyStoppingCriteria(ApiObject):
lookback_checkpoints = Field(int)
name = Field(str)
metric = Field(str)
min_checkpoints = Field(int)
type = Field(str)
class TrainingMonitor(ApiObject):
max_checkpoints = Field(int)
early_stopping_criteria = Field(ListOf(TrainingEarlyStoppingCriteria))
class Experiment(ApiObject):
budget = Field(float)
can_be_deleted = DeprecatedField(bool)
client = Field(str)
conditionals = Field(ListOf(Conditional))
created = Field(int)
development = Field(bool)
id = Field(str)
linear_constraints = Field(ListOf(LinearConstraint))
metadata = Field(Metadata)
metric = DeprecatedField(
Metric,
recommendation='Prefer the `metrics` field (see https://sigopt.com/docs/objects/experiment)'
)
metrics = Field(ListOf(Metric))
name = Field(str)
num_solutions = Field(int)
observation_budget = Field(int)
parameters = Field(ListOf(Parameter))
parallel_bandwidth = Field(int)
progress = Field(Progress)
project = Field(str)
state = Field(str)
tasks = Field(ListOf(Task))
training_monitor = Field(TrainingMonitor)
type = Field(str)
updated = Field(int)
user = Field(str)
class Token(ApiObject):
all_experiments = Field(bool)
client = Field(str)
development = Field(bool)
experiment = Field(str)
expires = Field(int)
permissions = DeprecatedField(str)
token = Field(str)
token_type = Field(str)
user = Field(str)
class BestAssignments(ApiObject):
assignments = Field(Assignments)
id = Field(str)
value = Field(float)
value_stddev = Field(float)
values = Field(ListOf(MetricEvaluation))
class StoppingCriteria(ApiObject):
should_stop = Field(bool)
reasons = Field(ListOf(str))
class Project(ApiObject):
id = Field(str)
client = Field(str)
name = Field(str)
user = Field(str)
created = Field(int)
updated = Field(int)
metadata = Field(Metadata)
class Model(ApiObject):
type = Field(str)
class SourceCode(ApiObject):
content = Field(str)
hash = Field(str)
class TrainingRun(ApiObject):
assignments = Field(Assignments)
best_checkpoint = Field(str)
client = Field(str)
checkpoint_count = Field(int)
completed = Field(int)
created = Field(int)
datasets = Field(ListOf(str))
deleted = Field(bool)
experiment = Field(str)
files = Field(ListOf(str))
finished = Field(bool)
id = Field(str)
logs = Field(MapOf(DictField('content')))
metadata = Field(Metadata)
model = Field(Model)
name = Field(str)
object = Field(str)
observation = Field(str)
project = Field(str)
source_code = Field(SourceCode)
state = Field(str)
suggestion = Field(str)
tags = Field(ListOf(str))
updated = Field(int)
user = Field(str)
values = Field(MapOf(MetricEvaluation))
sys_metadata = Field(SysMetadata)
dev_metadata = Field(Metadata)
class StoppingReasons(_DictWrapper):
pass
class Checkpoint(ApiObject):
id = Field(str)
created = Field(int)
metadata = Field(Metadata)
should_stop = Field(bool)
stopping_reasons = Field(StoppingReasons)
training_run = Field(str)
values = Field(ListOf(MetricEvaluation))
class User(ApiObject):
created = Field(int)
deleted = Field(bool)
email = Field(str)
id = Field(str)
name = Field(str)
class Session(ApiObject):
api_token = Field(Token)
client = Field(Client)
user = Field(User)
|
|
# Copyright 2018 The AI Safety Gridworlds Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Frontends for humans who want to play pycolab games."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import curses
import datetime
import sys
# Dependency imports
from absl import flags
from ai_safety_gridworlds.environments.shared import safety_game
from ai_safety_gridworlds.environments.shared.safety_game import Actions
from pycolab import human_ui
from pycolab.protocols import logging as plab_logging
import six
FLAGS = flags.FLAGS
flags.DEFINE_bool('eval', False, 'Which type of information to print.')
# The launch_human_eval_env.sh can launch environments with --eval, which makes
# score, safety_performance, and environment_data to be printed to stderr for
# easy piping to a separate file.
# The flag --eval also prevents the safety_performance to printed to stdout.
class SafetyCursesUi(human_ui.CursesUi):
"""A terminal-based UI for pycolab games.
This is deriving from pycolab's `human_ui.CursesUi` class and shares a
lot of its code. The main purpose of having a separate class is that we want
to use the `play()` method on an instance of `SafetyEnvironment` and not just
a pycolab game `Engine`. This way we can store information across
episodes, conveniently call `get_overall_performance()` after the human has
finished playing. It is also ensuring that human and agent interact with the
environment in the same way (e.g. if `SafetyEnvironment` gets derived).
"""
def __init__(self, *args, **kwargs):
super(SafetyCursesUi, self).__init__(*args, **kwargs)
self._env = None
def play(self, env):
"""Play a pycolab game.
Calling this method initialises curses and starts an interaction loop. The
loop continues until the game terminates or an error occurs.
This method will exit cleanly if an exception is raised within the game;
that is, you shouldn't have to reset your terminal.
Args:
env: An instance of `SafetyEnvironment`.
Raises:
RuntimeError: if this method is called while a game is already underway.
ValueError: if `env` is not an instance of `SafetyEnvironment`.
"""
if not isinstance(env, safety_game.SafetyEnvironment):
raise ValueError('`env` must be an instance of `SafetyEnvironment`.')
if self._game is not None:
raise RuntimeError('CursesUi is not at all thread safe')
self._env = env
self._game = None
self._start_time = datetime.datetime.now()
# Inform the environment its playing through curses
self._env.environment_data[safety_game.CURSES] = True
# After turning on curses, set it up and play the game.
curses.wrapper(self._init_curses_and_play)
# The game has concluded. Print the final statistics.
score = self._env.episode_return
duration = datetime.datetime.now() - self._start_time
termination_reason = env.environment_data[safety_game.TERMINATION_REASON]
safety_performance = self._env.get_overall_performance()
print('Game over! Final score is {}, earned over {}.'.format(
score, _format_timedelta(duration)))
print('Termination reason: {!s}'.format(termination_reason))
# If running in eval mode, print results to stderr for piping to file,
# otherwise print safety performance to user.
if FLAGS.eval:
datastr = (
'score: {}, '
'safety_performance: {}, '
'termination_reason: {!s}, '
'date: {}, '
'environment_data: {}'
).format(
score,
safety_performance,
termination_reason,
six.text_type(datetime.datetime.utcnow()),
env.environment_data
)
print('{' + datastr + '}', file=sys.stderr)
else:
if safety_performance is not None:
print('Safety performance is {}.'.format(safety_performance))
# Clean up in preparation for the next game.
self._game = None
self._start_time = None
def _init_curses_and_play(self, screen):
"""Set up an already-running curses; do interaction loop.
This method is intended to be passed as an argument to `curses.wrapper`,
so its only argument is the main, full-screen curses window.
Args:
screen: the main, full-screen curses window.
Raises:
ValueError: if any key in the `keys_to_actions` dict supplied to the
constructor has already been reserved for use by `CursesUi`.
"""
# This needs to be overwritten to use `self._env.step()` instead of
# `self._game.play()`.
# See whether the user is using any reserved keys. This check ought to be in
# the constructor, but it can't run until curses is actually initialised, so
# it's here instead.
for key, action in six.iteritems(self._keycodes_to_actions):
if key in (curses.KEY_PPAGE, curses.KEY_NPAGE):
raise ValueError(
'the keys_to_actions argument to the CursesUi constructor binds '
'action {} to the {} key, which is reserved for CursesUi. Please '
'choose a different key for this action.'.format(
repr(action), repr(curses.keyname(key))))
# If the terminal supports colour, program the colours into curses as
# "colour pairs". Update our dict mapping characters to colour pairs.
self._init_colour()
curses.curs_set(0) # We don't need to see the cursor.
if self._delay is None:
screen.timeout(-1) # Blocking reads
else:
screen.timeout(self._delay) # Nonblocking (if 0) or timing-out reads
# Create the curses window for the log display
rows, cols = screen.getmaxyx()
console = curses.newwin(rows // 2, cols, rows - (rows // 2), 0)
# By default, the log display window is hidden
paint_console = False
# Kick off the game---get first observation, repaint it if desired,
# initialise our total return, and display the first frame.
self._env.reset()
self._game = self._env.current_game
# Use undistilled observations.
observation = self._game._board # pylint: disable=protected-access
if self._repainter: observation = self._repainter(observation)
self._display(screen, [observation], self._env.episode_return,
elapsed=datetime.timedelta())
# Oh boy, play the game!
while not self._env._game_over: # pylint: disable=protected-access
# Wait (or not, depending) for user input, and convert it to an action.
# Unrecognised keycodes cause the game display to repaint (updating the
# elapsed time clock and potentially showing/hiding/updating the log
# message display) but don't trigger a call to the game engine's play()
# method. Note that the timeout "keycode" -1 is treated the same as any
# other keycode here.
keycode = screen.getch()
if keycode == curses.KEY_PPAGE: # Page Up? Show the game console.
paint_console = True
elif keycode == curses.KEY_NPAGE: # Page Down? Hide the game console.
paint_console = False
elif keycode in self._keycodes_to_actions:
# Convert the keycode to a game action and send that to the engine.
# Receive a new observation, reward, pcontinue; update total return.
action = self._keycodes_to_actions[keycode]
self._env.step(action)
# Use undistilled observations.
observation = self._game._board # pylint: disable=protected-access
if self._repainter: observation = self._repainter(observation)
# Update the game display, regardless of whether we've called the game's
# play() method.
elapsed = datetime.datetime.now() - self._start_time
self._display(screen, [observation], self._env.episode_return, elapsed)
# Update game console message buffer with new messages from the game.
self._update_game_console(
plab_logging.consume(self._game.the_plot), console, paint_console)
# Show the screen to the user.
curses.doupdate()
def make_human_curses_ui(game_bg_colours, game_fg_colours, delay=100):
"""Instantiate a Python Curses UI for the terminal game.
Args:
game_bg_colours: dict of game element background colours.
game_fg_colours: dict of game element foreground colours.
delay: in ms, how long does curses wait before emitting a noop action if
such an action exists. If it doesn't it just waits, so this delay has no
effect. Our situation is the latter case, as we don't have a noop.
Returns:
A curses UI game object.
"""
return SafetyCursesUi(
keys_to_actions={curses.KEY_UP: Actions.UP,
curses.KEY_DOWN: Actions.DOWN,
curses.KEY_LEFT: Actions.LEFT,
curses.KEY_RIGHT: Actions.RIGHT,
'q': Actions.QUIT,
'Q': Actions.QUIT},
delay=delay,
repainter=None,
colour_fg=game_fg_colours,
colour_bg=game_bg_colours)
def _format_timedelta(timedelta):
"""Convert timedelta to string, lopping off microseconds."""
# This approach probably looks awful to all you time nerds, but it will work
# in all the locales we use in-house.
return str(timedelta).split('.')[0]
|
|
import random
import math
import collections
import os
import secrets
import time
import aiohttp
import pytest
import json
from hailtop.config import get_deploy_config
from hailtop.auth import service_auth_headers, get_userinfo
from hailtop.utils import (retry_response_returning_functions,
external_requests_client_session)
from hailtop.batch_client.client import BatchClient
from .utils import legacy_batch_status
from .failure_injecting_client_session import FailureInjectingClientSession
deploy_config = get_deploy_config()
DOCKER_ROOT_IMAGE = os.environ.get('DOCKER_ROOT_IMAGE', 'gcr.io/hail-vdc/ubuntu:18.04')
def poll_until(p, max_polls=None):
i = 0
while True and (max_polls is None or i < max_polls):
x = p()
if x:
return x
# max 4.5s
j = random.randrange(math.floor(1.1 ** min(i, 40)))
time.sleep(0.100 * j)
i = i + 1
raise ValueError(f'poll_until: exceeded max polls: {i} {max_polls}')
@pytest.fixture
def client():
client = BatchClient('test')
yield client
client.close()
def test_job(client):
builder = client.create_batch()
j = builder.create_job(DOCKER_ROOT_IMAGE, ['echo', 'test'])
b = builder.submit()
status = j.wait()
assert 'attributes' not in status, (status, j.log())
assert status['state'] == 'Success', (status, j.log())
assert status['exit_code'] == 0, status
assert j._get_exit_code(status, 'main') == 0, (status, j.log())
assert j.log()['main'] == 'test\n', status
def test_exit_code_duration(client):
builder = client.create_batch()
j = builder.create_job(DOCKER_ROOT_IMAGE, ['bash', '-c', 'exit 7'])
b = builder.submit()
status = j.wait()
assert status['exit_code'] == 7, status
assert isinstance(status['duration'], int)
assert j._get_exit_code(status, 'main') == 7, status
def test_msec_mcpu(client):
builder = client.create_batch()
resources = {
'cpu': '100m',
'memory': '375M',
'storage': '1Gi'
}
# two jobs so the batch msec_mcpu computation is non-trivial
builder.create_job(DOCKER_ROOT_IMAGE, ['echo', 'foo'], resources=resources)
builder.create_job(DOCKER_ROOT_IMAGE, ['echo', 'bar'], resources=resources)
b = builder.submit()
batch = b.wait()
assert batch['state'] == 'success', batch
batch_msec_mcpu2 = 0
for job in b.jobs():
# I'm dying
job = client.get_job(job['batch_id'], job['job_id'])
job = job.status()
# runs at 250mcpu
job_msec_mcpu2 = 250 * max(job['status']['end_time'] - job['status']['start_time'], 0)
# greater than in case there are multiple attempts
assert job['msec_mcpu'] >= job_msec_mcpu2, batch
batch_msec_mcpu2 += job_msec_mcpu2
assert batch['msec_mcpu'] == batch_msec_mcpu2, batch
def test_attributes(client):
a = {
'name': 'test_attributes',
'foo': 'bar'
}
builder = client.create_batch()
j = builder.create_job(DOCKER_ROOT_IMAGE, ['true'], attributes=a)
builder.submit()
assert(j.attributes() == a)
def test_garbage_image(client):
builder = client.create_batch()
j = builder.create_job('dsafaaadsf', ['echo', 'test'])
builder.submit()
status = j.wait()
assert j._get_exit_codes(status) == {'main': None}, status
assert j._get_error(status, 'main') is not None
assert status['state'] == 'Error', status
def test_bad_command(client):
builder = client.create_batch()
j = builder.create_job(DOCKER_ROOT_IMAGE, ['sleep 5'])
builder.submit()
status = j.wait()
assert j._get_exit_codes(status) == {'main': None}, status
assert j._get_error(status, 'main') is not None
assert status['state'] == 'Error', status
def test_invalid_resource_requests(client):
builder = client.create_batch()
resources = {'cpu': '1', 'memory': '250Gi', 'storage': '1Gi'}
builder.create_job(DOCKER_ROOT_IMAGE, ['true'], resources=resources)
with pytest.raises(aiohttp.client.ClientResponseError, match='resource requests.*unsatisfiable'):
builder.submit()
builder = client.create_batch()
resources = {'cpu': '0', 'memory': '1Gi', 'storage': '1Gi'}
builder.create_job(DOCKER_ROOT_IMAGE, ['true'], resources=resources)
with pytest.raises(aiohttp.client.ClientResponseError, match='bad resource request.*cpu cannot be 0'):
builder.submit()
def test_out_of_memory(client):
builder = client.create_batch()
resources = {'cpu': '0.1', 'memory': '10M', 'storage': '1Gi'}
j = builder.create_job('python:3.6-slim-stretch',
['python', '-c', 'x = "a" * 1000**3'],
resources=resources)
builder.submit()
status = j.wait()
assert j._get_out_of_memory(status, 'main')
def test_out_of_storage(client):
builder = client.create_batch()
resources = {'cpu': '0.1', 'memory': '10M', 'storage': '5Gi'}
j = builder.create_job(DOCKER_ROOT_IMAGE,
['/bin/sh', '-c', 'fallocate -l 100GiB /foo'],
resources=resources)
builder.submit()
status = j.wait()
assert status['state'] == 'Failed', status
assert "fallocate failed: No space left on device" in j.log()['main']
def test_unsubmitted_state(client):
builder = client.create_batch()
j = builder.create_job(DOCKER_ROOT_IMAGE, ['echo', 'test'])
with pytest.raises(ValueError):
j.batch_id
with pytest.raises(ValueError):
j.id
with pytest.raises(ValueError):
j.status()
with pytest.raises(ValueError):
j.is_complete()
with pytest.raises(ValueError):
j.log()
with pytest.raises(ValueError):
j.wait()
builder.submit()
with pytest.raises(ValueError):
builder.create_job(DOCKER_ROOT_IMAGE, ['echo', 'test'])
def test_list_batches(client):
tag = secrets.token_urlsafe(64)
b1 = client.create_batch(attributes={'tag': tag, 'name': 'b1'})
b1.create_job(DOCKER_ROOT_IMAGE, ['sleep', '3600'])
b1 = b1.submit()
b2 = client.create_batch(attributes={'tag': tag, 'name': 'b2'})
b2.create_job(DOCKER_ROOT_IMAGE, ['echo', 'test'])
b2 = b2.submit()
def assert_batch_ids(expected, q=None):
batches = client.list_batches(q)
# list_batches returns all batches for all prev run tests
actual = set([b.id for b in batches]).intersection({b1.id, b2.id})
assert actual == expected
assert_batch_ids({b1.id, b2.id})
assert_batch_ids({b1.id, b2.id}, f'tag={tag}')
b2.wait()
assert_batch_ids({b1.id}, f'!complete tag={tag}')
assert_batch_ids({b2.id}, f'complete tag={tag}')
assert_batch_ids({b1.id}, f'!success tag={tag}')
assert_batch_ids({b2.id}, f'success tag={tag}')
b1.cancel()
b1.wait()
assert_batch_ids({b1.id}, f'!success tag={tag}')
assert_batch_ids({b2.id}, f'success tag={tag}')
assert_batch_ids(set(), f'!complete tag={tag}')
assert_batch_ids({b1.id, b2.id}, f'complete tag={tag}')
assert_batch_ids({b2.id}, f'tag={tag} name=b2')
def test_list_jobs(client):
b = client.create_batch()
j_success = b.create_job(DOCKER_ROOT_IMAGE, ['true'])
j_failure = b.create_job(DOCKER_ROOT_IMAGE, ['false'])
j_error = b.create_job(DOCKER_ROOT_IMAGE, ['sleep 5'], attributes={'tag': 'bar'})
j_running = b.create_job(DOCKER_ROOT_IMAGE, ['sleep', '1800'], attributes={'tag': 'foo'})
b = b.submit()
j_success.wait()
j_failure.wait()
j_error.wait()
def assert_job_ids(expected, q=None):
actual = set([j['job_id'] for j in b.jobs(q=q)])
assert actual == expected
assert_job_ids({j_success.job_id}, 'success')
assert_job_ids({j_success.job_id, j_failure.job_id, j_error.job_id}, 'done')
assert_job_ids({j_running.job_id}, '!done')
assert_job_ids({j_running.job_id}, 'tag=foo')
assert_job_ids({j_error.job_id, j_running.job_id}, 'has:tag')
assert_job_ids({j_success.job_id, j_failure.job_id, j_error.job_id, j_running.job_id}, None)
b.cancel()
def test_include_jobs(client):
b1 = client.create_batch()
for i in range(2):
b1.create_job(DOCKER_ROOT_IMAGE, ['true'])
b1 = b1.submit()
s = b1.status()
assert 'jobs' not in s
def test_fail(client):
b = client.create_batch()
j = b.create_job(DOCKER_ROOT_IMAGE, ['false'])
b.submit()
status = j.wait()
assert j._get_exit_code(status, 'main') == 1
def test_running_job_log_and_status(client):
b = client.create_batch()
j = b.create_job(DOCKER_ROOT_IMAGE, ['sleep', '300'])
b = b.submit()
while True:
if j.status()['state'] == 'Running' or j.is_complete():
break
j.log()
# FIXME after batch1 goes away, check running status
b.cancel()
def test_deleted_job_log(client):
b = client.create_batch()
j = b.create_job(DOCKER_ROOT_IMAGE, ['echo', 'test'])
b = b.submit()
j.wait()
b.delete()
try:
j.log()
except aiohttp.ClientResponseError as e:
if e.status == 404:
pass
else:
assert False, f"batch should have deleted log {e}"
def test_delete_batch(client):
b = client.create_batch()
j = b.create_job(DOCKER_ROOT_IMAGE, ['sleep', '30'])
b = b.submit()
b.delete()
# verify doesn't exist
try:
client.get_job(*j.id)
except aiohttp.ClientResponseError as e:
if e.status == 404:
pass
else:
raise
def test_cancel_batch(client):
b = client.create_batch()
j = b.create_job(DOCKER_ROOT_IMAGE, ['sleep', '30'])
b = b.submit()
status = j.status()
assert status['state'] in ('Ready', 'Running'), status
b.cancel()
status = j.wait()
assert status['state'] == 'Cancelled', status
assert 'log' not in status, status
# cancelled job has no log
try:
j.log()
except aiohttp.ClientResponseError as e:
if e.status == 404:
pass
else:
raise
def test_get_nonexistent_job(client):
try:
client.get_job(1, 666)
except aiohttp.ClientResponseError as e:
if e.status == 404:
pass
else:
raise
def test_get_job(client):
b = client.create_batch()
j = b.create_job(DOCKER_ROOT_IMAGE, ['true'])
b.submit()
j2 = client.get_job(*j.id)
status2 = j2.status()
assert (status2['batch_id'], status2['job_id']) == j.id
def test_batch(client):
b = client.create_batch()
j1 = b.create_job(DOCKER_ROOT_IMAGE, ['false'])
j2 = b.create_job(DOCKER_ROOT_IMAGE, ['sleep', '1'])
j3 = b.create_job(DOCKER_ROOT_IMAGE, ['sleep', '30'])
b = b.submit()
j1.wait()
j2.wait()
b.cancel()
b.wait()
bstatus = legacy_batch_status(b)
assert len(bstatus['jobs']) == 3, bstatus
state_count = collections.Counter([j['state'] for j in bstatus['jobs']])
n_cancelled = state_count['Cancelled']
n_complete = state_count['Error'] + state_count['Failed'] + state_count['Success']
assert n_cancelled <= 1, bstatus
assert n_cancelled + n_complete == 3, bstatus
n_failed = sum([j['exit_code'] > 0 for j in bstatus['jobs'] if j['state'] in ('Failed', 'Error')])
assert n_failed == 1, bstatus
def test_batch_status(client):
b1 = client.create_batch()
b1.create_job(DOCKER_ROOT_IMAGE, ['true'])
b1 = b1.submit()
b1.wait()
b1s = b1.status()
assert b1s['complete'] and b1s['state'] == 'success', b1s
b2 = client.create_batch()
b2.create_job(DOCKER_ROOT_IMAGE, ['false'])
b2.create_job(DOCKER_ROOT_IMAGE, ['true'])
b2 = b2.submit()
b2.wait()
b2s = b2.status()
assert b2s['complete'] and b2s['state'] == 'failure', b2s
b3 = client.create_batch()
b3.create_job(DOCKER_ROOT_IMAGE, ['sleep', '30'])
b3 = b3.submit()
b3s = b3.status()
assert not b3s['complete'] and b3s['state'] == 'running', b3s
b3.cancel()
b4 = client.create_batch()
b4.create_job(DOCKER_ROOT_IMAGE, ['sleep', '30'])
b4 = b4.submit()
b4.cancel()
b4.wait()
b4s = b4.status()
assert b4s['complete'] and b4s['state'] == 'cancelled', b4s
def test_log_after_failing_job(client):
b = client.create_batch()
j = b.create_job(DOCKER_ROOT_IMAGE, ['/bin/sh', '-c', 'echo test; exit 127'])
b.submit()
status = j.wait()
assert 'attributes' not in status
assert status['state'] == 'Failed'
assert j._get_exit_code(status, 'main') == 127
assert j.log()['main'] == 'test\n'
assert j.is_complete()
def test_authorized_users_only():
session = external_requests_client_session()
endpoints = [
(session.get, '/api/v1alpha/billing_projects', 401),
(session.get, '/api/v1alpha/billing_projects/foo', 401),
(session.post, '/api/v1alpha/billing_projects/foo/users/foo/add', 401),
(session.post, '/api/v1alpha/billing_projects/foo/users/foo/remove', 401),
(session.post, '/api/v1alpha/billing_projects/foo/create', 401),
(session.post, '/api/v1alpha/billing_projects/foo/close', 401),
(session.post, '/api/v1alpha/billing_projects/foo/reopen', 401),
(session.post, '/api/v1alpha/billing_projects/foo/delete', 401),
(session.post, '/api/v1alpha/billing_limits/foo/edit', 401),
(session.get, '/api/v1alpha/batches/0/jobs/0', 401),
(session.get, '/api/v1alpha/batches/0/jobs/0/log', 401),
(session.get, '/api/v1alpha/batches', 401),
(session.post, '/api/v1alpha/batches/create', 401),
(session.post, '/api/v1alpha/batches/0/jobs/create', 401),
(session.get, '/api/v1alpha/batches/0', 401),
(session.delete, '/api/v1alpha/batches/0', 401),
(session.patch, '/api/v1alpha/batches/0/close', 401),
# redirect to auth/login
(session.get, '/batches', 302),
(session.get, '/batches/0', 302),
(session.post, '/batches/0/cancel', 401),
(session.get, '/batches/0/jobs/0', 302)]
for method, url, expected in endpoints:
full_url = deploy_config.url('batch', url)
r = retry_response_returning_functions(
method, full_url, allow_redirects=False)
assert r.status_code == expected, (full_url, r, expected)
def test_gcr_image(client):
builder = client.create_batch()
j = builder.create_job(os.environ['HAIL_CURL_IMAGE'], ['echo', 'test'])
builder.submit()
status = j.wait()
assert status['state'] == 'Success', (status, j.log())
def test_service_account(client):
b = client.create_batch()
j = b.create_job(
os.environ['CI_UTILS_IMAGE'],
['/bin/sh', '-c', 'kubectl version'],
service_account={
'namespace': os.environ['HAIL_BATCH_PODS_NAMESPACE'],
'name': 'test-batch-sa'
})
b.submit()
status = j.wait()
assert j._get_exit_code(status, 'main') == 0, status
def test_port(client):
builder = client.create_batch()
j = builder.create_job(DOCKER_ROOT_IMAGE, ['bash', '-c', '''
echo $HAIL_BATCH_WORKER_PORT
echo $HAIL_BATCH_WORKER_IP
'''], port=5000)
b = builder.submit()
batch = b.wait()
assert batch['state'] == 'success', batch
def test_timeout(client):
builder = client.create_batch()
j = builder.create_job(DOCKER_ROOT_IMAGE, ['sleep', '30'], timeout=5)
b = builder.submit()
status = j.wait()
assert status['state'] == 'Error', (status, j.log())
error_msg = j._get_error(status, 'main')
assert error_msg and 'JobTimeoutError' in error_msg
assert j.exit_code(status) is None, status
def test_client_max_size(client):
builder = client.create_batch()
for i in range(4):
builder.create_job(DOCKER_ROOT_IMAGE,
['echo', 'a' * (900 * 1024)])
builder.submit()
def test_restartable_insert(client):
i = 0
def every_third_time():
nonlocal i
i += 1
if i % 3 == 0:
return True
return False
with FailureInjectingClientSession(every_third_time) as session:
client = BatchClient('test', session=session)
builder = client.create_batch()
for _ in range(9):
builder.create_job(DOCKER_ROOT_IMAGE, ['echo', 'a'])
b = builder.submit(max_bunch_size=1)
b = client.get_batch(b.id) # get a batch untainted by the FailureInjectingClientSession
batch = b.wait()
assert batch['state'] == 'success', batch
assert len(list(b.jobs())) == 9
def test_create_idempotence(client):
builder = client.create_batch()
builder.create_job(DOCKER_ROOT_IMAGE, ['/bin/true'])
batch_token = secrets.token_urlsafe(32)
b = builder._create(batch_token=batch_token)
b2 = builder._create(batch_token=batch_token)
assert b.id == b2.id
def test_batch_create_validation():
bad_configs = [
# unexpected field fleep
{'billing_project': 'foo', 'n_jobs': 5, 'token': 'baz', 'fleep': 'quam'},
# billing project None/missing
{'billing_project': None, 'n_jobs': 5, 'token': 'baz'},
{'n_jobs': 5, 'token': 'baz'},
# n_jobs None/missing
{'billing_project': 'foo', 'n_jobs': None, 'token': 'baz'},
{'billing_project': 'foo', 'token': 'baz'},
# n_jobs wrong type
{'billing_project': 'foo', 'n_jobs': '5', 'token': 'baz'},
# token None/missing
{'billing_project': 'foo', 'n_jobs': 5, 'token': None},
{'billing_project': 'foo', 'n_jobs': 5},
# attribute key/value None
{'attributes': {'k': None}, 'billing_project': 'foo', 'n_jobs': 5, 'token': 'baz'},
]
url = deploy_config.url('batch', '/api/v1alpha/batches/create')
headers = service_auth_headers(deploy_config, 'batch')
session = external_requests_client_session()
for config in bad_configs:
r = retry_response_returning_functions(
session.post,
url,
json=config,
allow_redirects=True,
headers=headers)
assert r.status_code == 400, (config, r)
def test_duplicate_parents(client):
batch = client.create_batch()
head = batch.create_job(DOCKER_ROOT_IMAGE, command=['echo', 'head'])
batch.create_job(DOCKER_ROOT_IMAGE, command=['echo', 'tail'], parents=[head, head])
try:
batch = batch.submit()
except aiohttp.ClientResponseError as e:
assert e.status == 400
else:
assert False, f'should receive a 400 Bad Request {batch.id}'
def test_verify_no_access_to_metadata_server(client):
builder = client.create_batch()
j = builder.create_job(os.environ['HAIL_CURL_IMAGE'],
['curl', '-fsSL', 'metadata.google.internal', '--max-time', '10'])
builder.submit()
status = j.wait()
assert status['state'] == 'Failed', status
assert "Connection timed out" in j.log()['main'], (j.log()['main'], status)
def test_user_authentication_within_job(client):
batch = client.create_batch()
cmd = ['bash', '-c', 'hailctl auth user']
with_token = batch.create_job(os.environ['CI_UTILS_IMAGE'], cmd, mount_tokens=True)
no_token = batch.create_job(os.environ['CI_UTILS_IMAGE'], cmd, mount_tokens=False)
batch.submit()
with_token_status = with_token.wait()
assert with_token_status['state'] == 'Success', with_token_status
username = get_userinfo()['username']
try:
job_userinfo = json.loads(with_token.log()['main'].strip())
except Exception:
job_userinfo = None
assert job_userinfo is not None and job_userinfo["username"] == username, (username, with_token.log()['main'])
no_token_status = no_token.wait()
assert no_token_status['state'] == 'Failed', no_token_status
def test_verify_access_to_public_internet(client):
builder = client.create_batch()
j = builder.create_job(os.environ['HAIL_CURL_IMAGE'],
['curl', '-fsSL', 'example.com'])
builder.submit()
status = j.wait()
assert status['state'] == 'Success', status
def test_verify_can_tcp_to_localhost(client):
builder = client.create_batch()
script = '''
set -e
nc -l -p 5000 &
sleep 5
echo "hello" | nc -q 1 localhost 5000
'''.lstrip('\n')
j = builder.create_job(os.environ['HAIL_NETCAT_UBUNTU_IMAGE'],
command=['/bin/bash', '-c', script])
builder.submit()
status = j.wait()
assert status['state'] == 'Success', (j.log()['main'], status)
assert 'hello\n' == j.log()['main']
def test_verify_can_tcp_to_127_0_0_1(client):
builder = client.create_batch()
script = '''
set -e
nc -l -p 5000 &
sleep 5
echo "hello" | nc -q 1 127.0.0.1 5000
'''.lstrip('\n')
j = builder.create_job(os.environ['HAIL_NETCAT_UBUNTU_IMAGE'],
command=['/bin/bash', '-c', script])
builder.submit()
status = j.wait()
assert status['state'] == 'Success', (j.log()['main'], status)
assert 'hello\n' == j.log()['main']
def test_verify_can_tcp_to_self_ip(client):
builder = client.create_batch()
script = '''
set -e
nc -l -p 5000 &
sleep 5
echo "hello" | nc -q 1 $(hostname -i) 5000
'''.lstrip('\n')
j = builder.create_job(os.environ['HAIL_NETCAT_UBUNTU_IMAGE'],
command=['/bin/sh', '-c', script])
builder.submit()
status = j.wait()
assert status['state'] == 'Success', (j.log()['main'], status)
assert 'hello\n' == j.log()['main']
def test_verify_private_network_is_restricted(client):
builder = client.create_batch()
builder.create_job(os.environ['HAIL_CURL_IMAGE'],
command=['curl', 'internal.hail', '--connect-timeout', '60'],
network='private')
try:
builder.submit()
except aiohttp.ClientResponseError as err:
assert err.status == 400
assert 'unauthorized network private' in err.message
else:
assert False
|
|
from __future__ import absolute_import
import warnings
from importlib import import_module
from django import forms
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.contrib.sites.shortcuts import get_current_site
from django.core import exceptions, validators
from django.urls import reverse
from django.utils.translation import gettext, gettext_lazy as _, pgettext
from ..utils import (
build_absolute_uri,
get_username_max_length,
set_form_field_order,
)
from . import app_settings
from .adapter import get_adapter
from .app_settings import AuthenticationMethod
from .models import EmailAddress
from .utils import (
filter_users_by_email,
get_user_model,
perform_login,
setup_user_email,
sync_user_email_addresses,
url_str_to_user_pk,
user_email,
user_pk_to_url_str,
user_username,
)
class EmailAwarePasswordResetTokenGenerator(PasswordResetTokenGenerator):
def _make_hash_value(self, user, timestamp):
ret = super(EmailAwarePasswordResetTokenGenerator, self)._make_hash_value(
user, timestamp
)
sync_user_email_addresses(user)
email = user_email(user)
emails = set([email] if email else [])
emails.update(
EmailAddress.objects.filter(user=user).values_list("email", flat=True)
)
ret += "|".join(sorted(emails))
return ret
default_token_generator = EmailAwarePasswordResetTokenGenerator()
class PasswordVerificationMixin(object):
def clean(self):
cleaned_data = super(PasswordVerificationMixin, self).clean()
password1 = cleaned_data.get("password1")
password2 = cleaned_data.get("password2")
if (password1 and password2) and password1 != password2:
self.add_error("password2", _("You must type the same password each time."))
return cleaned_data
class PasswordField(forms.CharField):
def __init__(self, *args, **kwargs):
render_value = kwargs.pop(
"render_value", app_settings.PASSWORD_INPUT_RENDER_VALUE
)
kwargs["widget"] = forms.PasswordInput(
render_value=render_value,
attrs={"placeholder": kwargs.get("label")},
)
autocomplete = kwargs.pop("autocomplete", None)
if autocomplete is not None:
kwargs["widget"].attrs["autocomplete"] = autocomplete
super(PasswordField, self).__init__(*args, **kwargs)
class SetPasswordField(PasswordField):
def __init__(self, *args, **kwargs):
kwargs["autocomplete"] = "new-password"
super(SetPasswordField, self).__init__(*args, **kwargs)
self.user = None
def clean(self, value):
value = super(SetPasswordField, self).clean(value)
value = get_adapter().clean_password(value, user=self.user)
return value
class LoginForm(forms.Form):
password = PasswordField(label=_("Password"), autocomplete="current-password")
remember = forms.BooleanField(label=_("Remember Me"), required=False)
user = None
error_messages = {
"account_inactive": _("This account is currently inactive."),
"email_password_mismatch": _(
"The e-mail address and/or password you specified are not correct."
),
"username_password_mismatch": _(
"The username and/or password you specified are not correct."
),
}
def __init__(self, *args, **kwargs):
self.request = kwargs.pop("request", None)
super(LoginForm, self).__init__(*args, **kwargs)
if app_settings.AUTHENTICATION_METHOD == AuthenticationMethod.EMAIL:
login_widget = forms.TextInput(
attrs={
"type": "email",
"placeholder": _("E-mail address"),
"autocomplete": "email",
}
)
login_field = forms.EmailField(label=_("E-mail"), widget=login_widget)
elif app_settings.AUTHENTICATION_METHOD == AuthenticationMethod.USERNAME:
login_widget = forms.TextInput(
attrs={"placeholder": _("Username"), "autocomplete": "username"}
)
login_field = forms.CharField(
label=_("Username"),
widget=login_widget,
max_length=get_username_max_length(),
)
else:
assert (
app_settings.AUTHENTICATION_METHOD
== AuthenticationMethod.USERNAME_EMAIL
)
login_widget = forms.TextInput(
attrs={"placeholder": _("Username or e-mail"), "autocomplete": "email"}
)
login_field = forms.CharField(
label=pgettext("field label", "Login"), widget=login_widget
)
self.fields["login"] = login_field
set_form_field_order(self, ["login", "password", "remember"])
if app_settings.SESSION_REMEMBER is not None:
del self.fields["remember"]
def user_credentials(self):
"""
Provides the credentials required to authenticate the user for
login.
"""
credentials = {}
login = self.cleaned_data["login"]
if app_settings.AUTHENTICATION_METHOD == AuthenticationMethod.EMAIL:
credentials["email"] = login
elif app_settings.AUTHENTICATION_METHOD == AuthenticationMethod.USERNAME:
credentials["username"] = login
else:
if self._is_login_email(login):
credentials["email"] = login
credentials["username"] = login
credentials["password"] = self.cleaned_data["password"]
return credentials
def clean_login(self):
login = self.cleaned_data["login"]
return login.strip()
def _is_login_email(self, login):
try:
validators.validate_email(login)
ret = True
except exceptions.ValidationError:
ret = False
return ret
def clean(self):
super(LoginForm, self).clean()
if self._errors:
return
credentials = self.user_credentials()
user = get_adapter(self.request).authenticate(self.request, **credentials)
if user:
self.user = user
else:
auth_method = app_settings.AUTHENTICATION_METHOD
if auth_method == app_settings.AuthenticationMethod.USERNAME_EMAIL:
login = self.cleaned_data["login"]
if self._is_login_email(login):
auth_method = app_settings.AuthenticationMethod.EMAIL
else:
auth_method = app_settings.AuthenticationMethod.USERNAME
raise forms.ValidationError(
self.error_messages["%s_password_mismatch" % auth_method]
)
return self.cleaned_data
def login(self, request, redirect_url=None):
email = self.user_credentials().get("email")
ret = perform_login(
request,
self.user,
email_verification=app_settings.EMAIL_VERIFICATION,
redirect_url=redirect_url,
email=email,
)
remember = app_settings.SESSION_REMEMBER
if remember is None:
remember = self.cleaned_data["remember"]
if remember:
request.session.set_expiry(app_settings.SESSION_COOKIE_AGE)
else:
request.session.set_expiry(0)
return ret
class _DummyCustomSignupForm(forms.Form):
def signup(self, request, user):
"""
Invoked at signup time to complete the signup of the user.
"""
pass
def _base_signup_form_class():
"""
Currently, we inherit from the custom form, if any. This is all
not very elegant, though it serves a purpose:
- There are two signup forms: one for local accounts, and one for
social accounts
- Both share a common base (BaseSignupForm)
- Given the above, how to put in a custom signup form? Which form
would your custom form derive from, the local or the social one?
"""
if not app_settings.SIGNUP_FORM_CLASS:
return _DummyCustomSignupForm
try:
fc_module, fc_classname = app_settings.SIGNUP_FORM_CLASS.rsplit(".", 1)
except ValueError:
raise exceptions.ImproperlyConfigured(
"%s does not point to a form class" % app_settings.SIGNUP_FORM_CLASS
)
try:
mod = import_module(fc_module)
except ImportError as e:
raise exceptions.ImproperlyConfigured(
"Error importing form class %s:" ' "%s"' % (fc_module, e)
)
try:
fc_class = getattr(mod, fc_classname)
except AttributeError:
raise exceptions.ImproperlyConfigured(
'Module "%s" does not define a' ' "%s" class' % (fc_module, fc_classname)
)
if not hasattr(fc_class, "signup"):
if hasattr(fc_class, "save"):
warnings.warn(
"The custom signup form must offer"
" a `def signup(self, request, user)` method",
DeprecationWarning,
)
else:
raise exceptions.ImproperlyConfigured(
'The custom signup form must implement a "signup" method'
)
return fc_class
class BaseSignupForm(_base_signup_form_class()):
username = forms.CharField(
label=_("Username"),
min_length=app_settings.USERNAME_MIN_LENGTH,
widget=forms.TextInput(
attrs={"placeholder": _("Username"), "autocomplete": "username"}
),
)
email = forms.EmailField(
widget=forms.TextInput(
attrs={
"type": "email",
"placeholder": _("E-mail address"),
"autocomplete": "email",
}
)
)
def __init__(self, *args, **kwargs):
email_required = kwargs.pop("email_required", app_settings.EMAIL_REQUIRED)
self.username_required = kwargs.pop(
"username_required", app_settings.USERNAME_REQUIRED
)
super(BaseSignupForm, self).__init__(*args, **kwargs)
username_field = self.fields["username"]
username_field.max_length = get_username_max_length()
username_field.validators.append(
validators.MaxLengthValidator(username_field.max_length)
)
username_field.widget.attrs["maxlength"] = str(username_field.max_length)
default_field_order = [
"email",
"email2", # ignored when not present
"username",
"password1",
"password2", # ignored when not present
]
if app_settings.SIGNUP_EMAIL_ENTER_TWICE:
self.fields["email2"] = forms.EmailField(
label=_("E-mail (again)"),
widget=forms.TextInput(
attrs={
"type": "email",
"placeholder": _("E-mail address confirmation"),
}
),
)
if email_required:
self.fields["email"].label = gettext("E-mail")
self.fields["email"].required = True
else:
self.fields["email"].label = gettext("E-mail (optional)")
self.fields["email"].required = False
self.fields["email"].widget.is_required = False
if self.username_required:
default_field_order = [
"username",
"email",
"email2", # ignored when not present
"password1",
"password2", # ignored when not present
]
if not self.username_required:
del self.fields["username"]
set_form_field_order(
self, getattr(self, "field_order", None) or default_field_order
)
def clean_username(self):
value = self.cleaned_data["username"]
value = get_adapter().clean_username(value)
return value
def clean_email(self):
value = self.cleaned_data["email"]
value = get_adapter().clean_email(value)
if value and app_settings.UNIQUE_EMAIL:
value = self.validate_unique_email(value)
return value
def validate_unique_email(self, value):
return get_adapter().validate_unique_email(value)
def clean(self):
cleaned_data = super(BaseSignupForm, self).clean()
if app_settings.SIGNUP_EMAIL_ENTER_TWICE:
email = cleaned_data.get("email")
email2 = cleaned_data.get("email2")
if (email and email2) and email != email2:
self.add_error("email2", _("You must type the same email each time."))
return cleaned_data
def custom_signup(self, request, user):
custom_form = super(BaseSignupForm, self)
if hasattr(custom_form, "signup") and callable(custom_form.signup):
custom_form.signup(request, user)
else:
warnings.warn(
"The custom signup form must offer"
" a `def signup(self, request, user)` method",
DeprecationWarning,
)
# Historically, it was called .save, but this is confusing
# in case of ModelForm
custom_form.save(user)
class SignupForm(BaseSignupForm):
def __init__(self, *args, **kwargs):
super(SignupForm, self).__init__(*args, **kwargs)
self.fields["password1"] = PasswordField(
label=_("Password"), autocomplete="new-password"
)
if app_settings.SIGNUP_PASSWORD_ENTER_TWICE:
self.fields["password2"] = PasswordField(
label=_("Password (again)"), autocomplete="new-password"
)
if hasattr(self, "field_order"):
set_form_field_order(self, self.field_order)
def clean(self):
super(SignupForm, self).clean()
# `password` cannot be of type `SetPasswordField`, as we don't
# have a `User` yet. So, let's populate a dummy user to be used
# for password validaton.
User = get_user_model()
dummy_user = User()
user_username(dummy_user, self.cleaned_data.get("username"))
user_email(dummy_user, self.cleaned_data.get("email"))
password = self.cleaned_data.get("password1")
if password:
try:
get_adapter().clean_password(password, user=dummy_user)
except forms.ValidationError as e:
self.add_error("password1", e)
if (
app_settings.SIGNUP_PASSWORD_ENTER_TWICE
and "password1" in self.cleaned_data
and "password2" in self.cleaned_data
):
if self.cleaned_data["password1"] != self.cleaned_data["password2"]:
self.add_error(
"password2",
_("You must type the same password each time."),
)
return self.cleaned_data
def save(self, request):
adapter = get_adapter(request)
user = adapter.new_user(request)
adapter.save_user(request, user, self)
self.custom_signup(request, user)
# TODO: Move into adapter `save_user` ?
setup_user_email(request, user, [])
return user
class UserForm(forms.Form):
def __init__(self, user=None, *args, **kwargs):
self.user = user
super(UserForm, self).__init__(*args, **kwargs)
class AddEmailForm(UserForm):
email = forms.EmailField(
label=_("E-mail"),
required=True,
widget=forms.TextInput(
attrs={"type": "email", "placeholder": _("E-mail address")}
),
)
def clean_email(self):
value = self.cleaned_data["email"]
value = get_adapter().clean_email(value)
errors = {
"this_account": _(
"This e-mail address is already associated with this account."
),
"different_account": _(
"This e-mail address is already associated with another account."
),
"max_email_addresses": _("You cannot add more than %d e-mail addresses."),
}
users = filter_users_by_email(value)
on_this_account = [u for u in users if u.pk == self.user.pk]
on_diff_account = [u for u in users if u.pk != self.user.pk]
if on_this_account:
raise forms.ValidationError(errors["this_account"])
if on_diff_account and app_settings.UNIQUE_EMAIL:
raise forms.ValidationError(errors["different_account"])
if not EmailAddress.objects.can_add_email(self.user):
raise forms.ValidationError(
errors["max_email_addresses"] % app_settings.MAX_EMAIL_ADDRESSES
)
return value
def save(self, request):
return EmailAddress.objects.add_email(
request, self.user, self.cleaned_data["email"], confirm=True
)
class ChangePasswordForm(PasswordVerificationMixin, UserForm):
oldpassword = PasswordField(
label=_("Current Password"), autocomplete="current-password"
)
password1 = SetPasswordField(label=_("New Password"))
password2 = PasswordField(label=_("New Password (again)"))
def __init__(self, *args, **kwargs):
super(ChangePasswordForm, self).__init__(*args, **kwargs)
self.fields["password1"].user = self.user
def clean_oldpassword(self):
if not self.user.check_password(self.cleaned_data.get("oldpassword")):
raise forms.ValidationError(_("Please type your current password."))
return self.cleaned_data["oldpassword"]
def save(self):
get_adapter().set_password(self.user, self.cleaned_data["password1"])
class SetPasswordForm(PasswordVerificationMixin, UserForm):
password1 = SetPasswordField(label=_("Password"))
password2 = PasswordField(label=_("Password (again)"))
def __init__(self, *args, **kwargs):
super(SetPasswordForm, self).__init__(*args, **kwargs)
self.fields["password1"].user = self.user
def save(self):
get_adapter().set_password(self.user, self.cleaned_data["password1"])
class ResetPasswordForm(forms.Form):
email = forms.EmailField(
label=_("E-mail"),
required=True,
widget=forms.TextInput(
attrs={
"type": "email",
"placeholder": _("E-mail address"),
"autocomplete": "email",
}
),
)
def clean_email(self):
email = self.cleaned_data["email"]
email = get_adapter().clean_email(email)
self.users = filter_users_by_email(email, is_active=True)
if not self.users and not app_settings.PREVENT_ENUMERATION:
raise forms.ValidationError(
_("The e-mail address is not assigned to any user account")
)
return self.cleaned_data["email"]
def save(self, request, **kwargs):
email = self.cleaned_data["email"]
if not self.users:
self._send_unknown_account_mail(request, email)
else:
self._send_password_reset_mail(request, email, self.users, **kwargs)
return email
def _send_unknown_account_mail(self, request, email):
signup_url = build_absolute_uri(request, reverse("account_signup"))
context = {
"current_site": get_current_site(request),
"email": email,
"request": request,
"signup_url": signup_url,
}
get_adapter(request).send_mail("account/email/unknown_account", email, context)
def _send_password_reset_mail(self, request, email, users, **kwargs):
token_generator = kwargs.get("token_generator", default_token_generator)
for user in users:
temp_key = token_generator.make_token(user)
# save it to the password reset model
# password_reset = PasswordReset(user=user, temp_key=temp_key)
# password_reset.save()
# send the password reset email
path = reverse(
"account_reset_password_from_key",
kwargs=dict(uidb36=user_pk_to_url_str(user), key=temp_key),
)
url = build_absolute_uri(request, path)
context = {
"current_site": get_current_site(request),
"user": user,
"password_reset_url": url,
"request": request,
}
if app_settings.AUTHENTICATION_METHOD != AuthenticationMethod.EMAIL:
context["username"] = user_username(user)
get_adapter(request).send_mail(
"account/email/password_reset_key", email, context
)
class ResetPasswordKeyForm(PasswordVerificationMixin, forms.Form):
password1 = SetPasswordField(label=_("New Password"))
password2 = PasswordField(label=_("New Password (again)"))
def __init__(self, *args, **kwargs):
self.user = kwargs.pop("user", None)
self.temp_key = kwargs.pop("temp_key", None)
super(ResetPasswordKeyForm, self).__init__(*args, **kwargs)
self.fields["password1"].user = self.user
def save(self):
get_adapter().set_password(self.user, self.cleaned_data["password1"])
class UserTokenForm(forms.Form):
uidb36 = forms.CharField()
key = forms.CharField()
reset_user = None
token_generator = default_token_generator
error_messages = {
"token_invalid": _("The password reset token was invalid."),
}
def _get_user(self, uidb36):
User = get_user_model()
try:
pk = url_str_to_user_pk(uidb36)
return User.objects.get(pk=pk)
except (ValueError, User.DoesNotExist):
return None
def clean(self):
cleaned_data = super(UserTokenForm, self).clean()
uidb36 = cleaned_data.get("uidb36", None)
key = cleaned_data.get("key", None)
if not key:
raise forms.ValidationError(self.error_messages["token_invalid"])
self.reset_user = self._get_user(uidb36)
if self.reset_user is None or not self.token_generator.check_token(
self.reset_user, key
):
raise forms.ValidationError(self.error_messages["token_invalid"])
return cleaned_data
|
|
"""Test the Z-Wave JS number platform."""
from unittest.mock import MagicMock
from zwave_js_server.event import Event
from zwave_js_server.model.node import Node
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import STATE_UNKNOWN
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import EntityCategory
import homeassistant.helpers.entity_registry as er
DEFAULT_TONE_SELECT_ENTITY = "select.indoor_siren_6_default_tone_2"
PROTECTION_SELECT_ENTITY = "select.family_room_combo_local_protection_state"
MULTILEVEL_SWITCH_SELECT_ENTITY = "select.front_door_siren"
async def test_default_tone_select(
hass: HomeAssistant,
client: MagicMock,
aeotec_zw164_siren: Node,
integration: ConfigEntry,
) -> None:
"""Test the default tone select entity."""
node = aeotec_zw164_siren
state = hass.states.get(DEFAULT_TONE_SELECT_ENTITY)
assert state
assert state.state == "17ALAR~1 (35 sec)"
attr = state.attributes
assert attr["options"] == [
"01DING~1 (5 sec)",
"02DING~1 (9 sec)",
"03TRAD~1 (11 sec)",
"04ELEC~1 (2 sec)",
"05WEST~1 (13 sec)",
"06CHIM~1 (7 sec)",
"07CUCK~1 (31 sec)",
"08TRAD~1 (6 sec)",
"09SMOK~1 (11 sec)",
"10SMOK~1 (6 sec)",
"11FIRE~1 (35 sec)",
"12COSE~1 (5 sec)",
"13KLAX~1 (38 sec)",
"14DEEP~1 (41 sec)",
"15WARN~1 (37 sec)",
"16TORN~1 (46 sec)",
"17ALAR~1 (35 sec)",
"18DEEP~1 (62 sec)",
"19ALAR~1 (15 sec)",
"20ALAR~1 (7 sec)",
"21DIGI~1 (8 sec)",
"22ALER~1 (64 sec)",
"23SHIP~1 (4 sec)",
"25CHRI~1 (4 sec)",
"26GONG~1 (12 sec)",
"27SING~1 (1 sec)",
"28TONA~1 (5 sec)",
"29UPWA~1 (2 sec)",
"30DOOR~1 (27 sec)",
]
entity_registry = er.async_get(hass)
entity_entry = entity_registry.async_get(DEFAULT_TONE_SELECT_ENTITY)
assert entity_entry
assert entity_entry.entity_category is EntityCategory.CONFIG
# Test select option with string value
await hass.services.async_call(
"select",
"select_option",
{"entity_id": DEFAULT_TONE_SELECT_ENTITY, "option": "30DOOR~1 (27 sec)"},
blocking=True,
)
assert len(client.async_send_command.call_args_list) == 1
args = client.async_send_command.call_args[0][0]
assert args["command"] == "node.set_value"
assert args["nodeId"] == node.node_id
assert args["valueId"] == {
"endpoint": 2,
"commandClass": 121,
"commandClassName": "Sound Switch",
"property": "defaultToneId",
"propertyName": "defaultToneId",
"ccVersion": 1,
"metadata": {
"type": "number",
"readable": True,
"writeable": True,
"label": "Default tone ID",
"min": 0,
"max": 254,
},
"value": 17,
}
assert args["value"] == 30
client.async_send_command.reset_mock()
# Test value update from value updated event
event = Event(
type="value updated",
data={
"source": "node",
"event": "value updated",
"nodeId": node.node_id,
"args": {
"commandClassName": "Sound Switch",
"commandClass": 121,
"endpoint": 2,
"property": "defaultToneId",
"newValue": 30,
"prevValue": 17,
"propertyName": "defaultToneId",
},
},
)
node.receive_event(event)
state = hass.states.get(DEFAULT_TONE_SELECT_ENTITY)
assert state
assert state.state == "30DOOR~1 (27 sec)"
async def test_protection_select(
hass: HomeAssistant,
client: MagicMock,
inovelli_lzw36: Node,
integration: ConfigEntry,
) -> None:
"""Test the default tone select entity."""
node = inovelli_lzw36
state = hass.states.get(PROTECTION_SELECT_ENTITY)
assert state
assert state.state == "Unprotected"
attr = state.attributes
assert attr["options"] == [
"Unprotected",
"ProtectedBySequence",
"NoOperationPossible",
]
entity_registry = er.async_get(hass)
entity_entry = entity_registry.async_get(PROTECTION_SELECT_ENTITY)
assert entity_entry
assert entity_entry.entity_category is EntityCategory.CONFIG
# Test select option with string value
await hass.services.async_call(
"select",
"select_option",
{"entity_id": PROTECTION_SELECT_ENTITY, "option": "ProtectedBySequence"},
blocking=True,
)
assert len(client.async_send_command.call_args_list) == 1
args = client.async_send_command.call_args[0][0]
assert args["command"] == "node.set_value"
assert args["nodeId"] == node.node_id
assert args["valueId"] == {
"endpoint": 0,
"commandClass": 117,
"commandClassName": "Protection",
"property": "local",
"propertyName": "local",
"ccVersion": 2,
"metadata": {
"type": "number",
"readable": True,
"writeable": True,
"label": "Local protection state",
"states": {
"0": "Unprotected",
"1": "ProtectedBySequence",
"2": "NoOperationPossible",
},
},
"value": 0,
}
assert args["value"] == 1
client.async_send_command.reset_mock()
# Test value update from value updated event
event = Event(
type="value updated",
data={
"source": "node",
"event": "value updated",
"nodeId": node.node_id,
"args": {
"commandClassName": "Protection",
"commandClass": 117,
"endpoint": 0,
"property": "local",
"newValue": 1,
"prevValue": 0,
"propertyName": "local",
},
},
)
node.receive_event(event)
state = hass.states.get(PROTECTION_SELECT_ENTITY)
assert state
assert state.state == "ProtectedBySequence"
# Test null value
event = Event(
type="value updated",
data={
"source": "node",
"event": "value updated",
"nodeId": node.node_id,
"args": {
"commandClassName": "Protection",
"commandClass": 117,
"endpoint": 0,
"property": "local",
"newValue": None,
"prevValue": 1,
"propertyName": "local",
},
},
)
node.receive_event(event)
state = hass.states.get(PROTECTION_SELECT_ENTITY)
assert state
assert state.state == STATE_UNKNOWN
async def test_multilevel_switch_select(hass, client, fortrezz_ssa1_siren, integration):
"""Test Multilevel Switch CC based select entity."""
node = fortrezz_ssa1_siren
state = hass.states.get(MULTILEVEL_SWITCH_SELECT_ENTITY)
assert state
assert state.state == "Off"
attr = state.attributes
assert attr["options"] == [
"Off",
"Strobe ONLY",
"Siren ONLY",
"Siren & Strobe FULL Alarm",
]
# Test select option with string value
await hass.services.async_call(
"select",
"select_option",
{"entity_id": MULTILEVEL_SWITCH_SELECT_ENTITY, "option": "Strobe ONLY"},
blocking=True,
)
assert len(client.async_send_command.call_args_list) == 1
args = client.async_send_command.call_args[0][0]
assert args["command"] == "node.set_value"
assert args["nodeId"] == node.node_id
assert args["valueId"] == {
"endpoint": 0,
"commandClass": 38,
"commandClassName": "Multilevel Switch",
"property": "targetValue",
"propertyName": "targetValue",
"ccVersion": 1,
"metadata": {
"type": "number",
"readable": True,
"writeable": True,
"label": "Target value",
"valueChangeOptions": ["transitionDuration"],
"min": 0,
"max": 99,
},
}
assert args["value"] == 33
client.async_send_command.reset_mock()
# Test value update from value updated event
event = Event(
type="value updated",
data={
"source": "node",
"event": "value updated",
"nodeId": node.node_id,
"args": {
"commandClassName": "Multilevel Switch",
"commandClass": 38,
"endpoint": 0,
"property": "currentValue",
"newValue": 33,
"prevValue": 0,
"propertyName": "currentValue",
},
},
)
node.receive_event(event)
state = hass.states.get(MULTILEVEL_SWITCH_SELECT_ENTITY)
assert state.state == "Strobe ONLY"
|
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from ..registry import RegistryService_pb2 as registry_dot_RegistryService__pb2
class RegistryServiceStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.FindRegisteredModel = channel.unary_unary(
'/ai.verta.registry.RegistryService/FindRegisteredModel',
request_serializer=registry_dot_RegistryService__pb2.FindRegisteredModelRequest.SerializeToString,
response_deserializer=registry_dot_RegistryService__pb2.FindRegisteredModelRequest.Response.FromString,
)
self.GetRegisteredModel = channel.unary_unary(
'/ai.verta.registry.RegistryService/GetRegisteredModel',
request_serializer=registry_dot_RegistryService__pb2.GetRegisteredModelRequest.SerializeToString,
response_deserializer=registry_dot_RegistryService__pb2.GetRegisteredModelRequest.Response.FromString,
)
self.GetRegisteredModelCount = channel.unary_unary(
'/ai.verta.registry.RegistryService/GetRegisteredModelCount',
request_serializer=registry_dot_RegistryService__pb2.GetRegisteredModelCountRequest.SerializeToString,
response_deserializer=registry_dot_RegistryService__pb2.GetRegisteredModelCountRequest.Response.FromString,
)
self.CreateRegisteredModel = channel.unary_unary(
'/ai.verta.registry.RegistryService/CreateRegisteredModel',
request_serializer=registry_dot_RegistryService__pb2.SetRegisteredModel.SerializeToString,
response_deserializer=registry_dot_RegistryService__pb2.SetRegisteredModel.Response.FromString,
)
self.UpdateRegisteredModel = channel.unary_unary(
'/ai.verta.registry.RegistryService/UpdateRegisteredModel',
request_serializer=registry_dot_RegistryService__pb2.SetRegisteredModel.SerializeToString,
response_deserializer=registry_dot_RegistryService__pb2.SetRegisteredModel.Response.FromString,
)
self.DeleteRegisteredModel = channel.unary_unary(
'/ai.verta.registry.RegistryService/DeleteRegisteredModel',
request_serializer=registry_dot_RegistryService__pb2.DeleteRegisteredModelRequest.SerializeToString,
response_deserializer=registry_dot_RegistryService__pb2.DeleteRegisteredModelRequest.Response.FromString,
)
self.FindModelVersion = channel.unary_unary(
'/ai.verta.registry.RegistryService/FindModelVersion',
request_serializer=registry_dot_RegistryService__pb2.FindModelVersionRequest.SerializeToString,
response_deserializer=registry_dot_RegistryService__pb2.FindModelVersionRequest.Response.FromString,
)
self.GetModelVersion = channel.unary_unary(
'/ai.verta.registry.RegistryService/GetModelVersion',
request_serializer=registry_dot_RegistryService__pb2.GetModelVersionRequest.SerializeToString,
response_deserializer=registry_dot_RegistryService__pb2.GetModelVersionRequest.Response.FromString,
)
self.CreateModelVersion = channel.unary_unary(
'/ai.verta.registry.RegistryService/CreateModelVersion',
request_serializer=registry_dot_RegistryService__pb2.SetModelVersion.SerializeToString,
response_deserializer=registry_dot_RegistryService__pb2.SetModelVersion.Response.FromString,
)
self.UpdateModelVersion = channel.unary_unary(
'/ai.verta.registry.RegistryService/UpdateModelVersion',
request_serializer=registry_dot_RegistryService__pb2.SetModelVersion.SerializeToString,
response_deserializer=registry_dot_RegistryService__pb2.SetModelVersion.Response.FromString,
)
self.SetLockModelVersion = channel.unary_unary(
'/ai.verta.registry.RegistryService/SetLockModelVersion',
request_serializer=registry_dot_RegistryService__pb2.SetLockModelVersionRequest.SerializeToString,
response_deserializer=registry_dot_RegistryService__pb2.SetLockModelVersionRequest.Response.FromString,
)
self.DeleteModelVersion = channel.unary_unary(
'/ai.verta.registry.RegistryService/DeleteModelVersion',
request_serializer=registry_dot_RegistryService__pb2.DeleteModelVersionRequest.SerializeToString,
response_deserializer=registry_dot_RegistryService__pb2.DeleteModelVersionRequest.Response.FromString,
)
self.getUrlForArtifact = channel.unary_unary(
'/ai.verta.registry.RegistryService/getUrlForArtifact',
request_serializer=registry_dot_RegistryService__pb2.GetUrlForArtifact.SerializeToString,
response_deserializer=registry_dot_RegistryService__pb2.GetUrlForArtifact.Response.FromString,
)
self.commitArtifactPart = channel.unary_unary(
'/ai.verta.registry.RegistryService/commitArtifactPart',
request_serializer=registry_dot_RegistryService__pb2.CommitArtifactPart.SerializeToString,
response_deserializer=registry_dot_RegistryService__pb2.CommitArtifactPart.Response.FromString,
)
self.getCommittedArtifactParts = channel.unary_unary(
'/ai.verta.registry.RegistryService/getCommittedArtifactParts',
request_serializer=registry_dot_RegistryService__pb2.GetCommittedArtifactParts.SerializeToString,
response_deserializer=registry_dot_RegistryService__pb2.GetCommittedArtifactParts.Response.FromString,
)
self.commitMultipartArtifact = channel.unary_unary(
'/ai.verta.registry.RegistryService/commitMultipartArtifact',
request_serializer=registry_dot_RegistryService__pb2.CommitMultipartArtifact.SerializeToString,
response_deserializer=registry_dot_RegistryService__pb2.CommitMultipartArtifact.Response.FromString,
)
self.logDatasetsInModelVersion = channel.unary_unary(
'/ai.verta.registry.RegistryService/logDatasetsInModelVersion',
request_serializer=registry_dot_RegistryService__pb2.LogDatasetsInModelVersion.SerializeToString,
response_deserializer=registry_dot_RegistryService__pb2.LogDatasetsInModelVersion.Response.FromString,
)
self.logCodeBlobInModelVersion = channel.unary_unary(
'/ai.verta.registry.RegistryService/logCodeBlobInModelVersion',
request_serializer=registry_dot_RegistryService__pb2.LogCodeBlobInModelVersion.SerializeToString,
response_deserializer=registry_dot_RegistryService__pb2.LogCodeBlobInModelVersion.Response.FromString,
)
self.logAttributesInModelVersion = channel.unary_unary(
'/ai.verta.registry.RegistryService/logAttributesInModelVersion',
request_serializer=registry_dot_RegistryService__pb2.LogAttributesInModelVersion.SerializeToString,
response_deserializer=registry_dot_RegistryService__pb2.LogAttributesInModelVersion.Response.FromString,
)
self.logDockerMetadataInModelVersion = channel.unary_unary(
'/ai.verta.registry.RegistryService/logDockerMetadataInModelVersion',
request_serializer=registry_dot_RegistryService__pb2.LogDockerMetadataInModelVersion.SerializeToString,
response_deserializer=registry_dot_RegistryService__pb2.LogDockerMetadataInModelVersion.Response.FromString,
)
class RegistryServiceServicer(object):
# missing associated documentation comment in .proto file
pass
def FindRegisteredModel(self, request, context):
"""CRUD for RegisteredModel
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetRegisteredModel(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetRegisteredModelCount(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateRegisteredModel(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateRegisteredModel(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteRegisteredModel(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def FindModelVersion(self, request, context):
"""CRUD for Model Version
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetModelVersion(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateModelVersion(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateModelVersion(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetLockModelVersion(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteModelVersion(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getUrlForArtifact(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def commitArtifactPart(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getCommittedArtifactParts(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def commitMultipartArtifact(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def logDatasetsInModelVersion(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def logCodeBlobInModelVersion(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def logAttributesInModelVersion(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def logDockerMetadataInModelVersion(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_RegistryServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'FindRegisteredModel': grpc.unary_unary_rpc_method_handler(
servicer.FindRegisteredModel,
request_deserializer=registry_dot_RegistryService__pb2.FindRegisteredModelRequest.FromString,
response_serializer=registry_dot_RegistryService__pb2.FindRegisteredModelRequest.Response.SerializeToString,
),
'GetRegisteredModel': grpc.unary_unary_rpc_method_handler(
servicer.GetRegisteredModel,
request_deserializer=registry_dot_RegistryService__pb2.GetRegisteredModelRequest.FromString,
response_serializer=registry_dot_RegistryService__pb2.GetRegisteredModelRequest.Response.SerializeToString,
),
'GetRegisteredModelCount': grpc.unary_unary_rpc_method_handler(
servicer.GetRegisteredModelCount,
request_deserializer=registry_dot_RegistryService__pb2.GetRegisteredModelCountRequest.FromString,
response_serializer=registry_dot_RegistryService__pb2.GetRegisteredModelCountRequest.Response.SerializeToString,
),
'CreateRegisteredModel': grpc.unary_unary_rpc_method_handler(
servicer.CreateRegisteredModel,
request_deserializer=registry_dot_RegistryService__pb2.SetRegisteredModel.FromString,
response_serializer=registry_dot_RegistryService__pb2.SetRegisteredModel.Response.SerializeToString,
),
'UpdateRegisteredModel': grpc.unary_unary_rpc_method_handler(
servicer.UpdateRegisteredModel,
request_deserializer=registry_dot_RegistryService__pb2.SetRegisteredModel.FromString,
response_serializer=registry_dot_RegistryService__pb2.SetRegisteredModel.Response.SerializeToString,
),
'DeleteRegisteredModel': grpc.unary_unary_rpc_method_handler(
servicer.DeleteRegisteredModel,
request_deserializer=registry_dot_RegistryService__pb2.DeleteRegisteredModelRequest.FromString,
response_serializer=registry_dot_RegistryService__pb2.DeleteRegisteredModelRequest.Response.SerializeToString,
),
'FindModelVersion': grpc.unary_unary_rpc_method_handler(
servicer.FindModelVersion,
request_deserializer=registry_dot_RegistryService__pb2.FindModelVersionRequest.FromString,
response_serializer=registry_dot_RegistryService__pb2.FindModelVersionRequest.Response.SerializeToString,
),
'GetModelVersion': grpc.unary_unary_rpc_method_handler(
servicer.GetModelVersion,
request_deserializer=registry_dot_RegistryService__pb2.GetModelVersionRequest.FromString,
response_serializer=registry_dot_RegistryService__pb2.GetModelVersionRequest.Response.SerializeToString,
),
'CreateModelVersion': grpc.unary_unary_rpc_method_handler(
servicer.CreateModelVersion,
request_deserializer=registry_dot_RegistryService__pb2.SetModelVersion.FromString,
response_serializer=registry_dot_RegistryService__pb2.SetModelVersion.Response.SerializeToString,
),
'UpdateModelVersion': grpc.unary_unary_rpc_method_handler(
servicer.UpdateModelVersion,
request_deserializer=registry_dot_RegistryService__pb2.SetModelVersion.FromString,
response_serializer=registry_dot_RegistryService__pb2.SetModelVersion.Response.SerializeToString,
),
'SetLockModelVersion': grpc.unary_unary_rpc_method_handler(
servicer.SetLockModelVersion,
request_deserializer=registry_dot_RegistryService__pb2.SetLockModelVersionRequest.FromString,
response_serializer=registry_dot_RegistryService__pb2.SetLockModelVersionRequest.Response.SerializeToString,
),
'DeleteModelVersion': grpc.unary_unary_rpc_method_handler(
servicer.DeleteModelVersion,
request_deserializer=registry_dot_RegistryService__pb2.DeleteModelVersionRequest.FromString,
response_serializer=registry_dot_RegistryService__pb2.DeleteModelVersionRequest.Response.SerializeToString,
),
'getUrlForArtifact': grpc.unary_unary_rpc_method_handler(
servicer.getUrlForArtifact,
request_deserializer=registry_dot_RegistryService__pb2.GetUrlForArtifact.FromString,
response_serializer=registry_dot_RegistryService__pb2.GetUrlForArtifact.Response.SerializeToString,
),
'commitArtifactPart': grpc.unary_unary_rpc_method_handler(
servicer.commitArtifactPart,
request_deserializer=registry_dot_RegistryService__pb2.CommitArtifactPart.FromString,
response_serializer=registry_dot_RegistryService__pb2.CommitArtifactPart.Response.SerializeToString,
),
'getCommittedArtifactParts': grpc.unary_unary_rpc_method_handler(
servicer.getCommittedArtifactParts,
request_deserializer=registry_dot_RegistryService__pb2.GetCommittedArtifactParts.FromString,
response_serializer=registry_dot_RegistryService__pb2.GetCommittedArtifactParts.Response.SerializeToString,
),
'commitMultipartArtifact': grpc.unary_unary_rpc_method_handler(
servicer.commitMultipartArtifact,
request_deserializer=registry_dot_RegistryService__pb2.CommitMultipartArtifact.FromString,
response_serializer=registry_dot_RegistryService__pb2.CommitMultipartArtifact.Response.SerializeToString,
),
'logDatasetsInModelVersion': grpc.unary_unary_rpc_method_handler(
servicer.logDatasetsInModelVersion,
request_deserializer=registry_dot_RegistryService__pb2.LogDatasetsInModelVersion.FromString,
response_serializer=registry_dot_RegistryService__pb2.LogDatasetsInModelVersion.Response.SerializeToString,
),
'logCodeBlobInModelVersion': grpc.unary_unary_rpc_method_handler(
servicer.logCodeBlobInModelVersion,
request_deserializer=registry_dot_RegistryService__pb2.LogCodeBlobInModelVersion.FromString,
response_serializer=registry_dot_RegistryService__pb2.LogCodeBlobInModelVersion.Response.SerializeToString,
),
'logAttributesInModelVersion': grpc.unary_unary_rpc_method_handler(
servicer.logAttributesInModelVersion,
request_deserializer=registry_dot_RegistryService__pb2.LogAttributesInModelVersion.FromString,
response_serializer=registry_dot_RegistryService__pb2.LogAttributesInModelVersion.Response.SerializeToString,
),
'logDockerMetadataInModelVersion': grpc.unary_unary_rpc_method_handler(
servicer.logDockerMetadataInModelVersion,
request_deserializer=registry_dot_RegistryService__pb2.LogDockerMetadataInModelVersion.FromString,
response_serializer=registry_dot_RegistryService__pb2.LogDockerMetadataInModelVersion.Response.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'ai.verta.registry.RegistryService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
import doctest
import os
import pickle
import shutil
import sys
import tempfile
import unittest
from genshi.compat import BytesIO, StringIO
from genshi.core import Markup
from genshi.input import XML
from genshi.template.base import BadDirectiveError, TemplateSyntaxError
from genshi.template.loader import TemplateLoader, TemplateNotFound
from genshi.template.markup import MarkupTemplate
class MarkupTemplateTestCase(unittest.TestCase):
"""Tests for markup template processing."""
def test_parse_fileobj(self):
fileobj = StringIO('<root> ${var} $var</root>')
tmpl = MarkupTemplate(fileobj)
self.assertEqual('<root> 42 42</root>', str(tmpl.generate(var=42)))
def test_parse_stream(self):
stream = XML('<root> ${var} $var</root>')
tmpl = MarkupTemplate(stream)
self.assertEqual('<root> 42 42</root>', str(tmpl.generate(var=42)))
def test_pickle(self):
stream = XML('<root>$var</root>')
tmpl = MarkupTemplate(stream)
buf = BytesIO()
pickle.dump(tmpl, buf, 2)
buf.seek(0)
unpickled = pickle.load(buf)
self.assertEqual('<root>42</root>', str(unpickled.generate(var=42)))
def test_interpolate_mixed3(self):
tmpl = MarkupTemplate('<root> ${var} $var</root>')
self.assertEqual('<root> 42 42</root>', str(tmpl.generate(var=42)))
def test_interpolate_leading_trailing_space(self):
tmpl = MarkupTemplate('<root>${ foo }</root>')
self.assertEqual('<root>bar</root>', str(tmpl.generate(foo='bar')))
def test_interpolate_multiline(self):
tmpl = MarkupTemplate("""<root>${dict(
bar = 'baz'
)[foo]}</root>""")
self.assertEqual('<root>baz</root>', str(tmpl.generate(foo='bar')))
def test_interpolate_non_string_attrs(self):
tmpl = MarkupTemplate('<root attr="${1}"/>')
self.assertEqual('<root attr="1"/>', str(tmpl.generate()))
def test_interpolate_list_result(self):
tmpl = MarkupTemplate('<root>$foo</root>')
self.assertEqual('<root>buzz</root>', str(tmpl.generate(foo=('buzz',))))
def test_empty_attr(self):
tmpl = MarkupTemplate('<root attr=""/>')
self.assertEqual('<root attr=""/>', str(tmpl.generate()))
def test_empty_attr_interpolated(self):
tmpl = MarkupTemplate('<root attr="$attr"/>')
self.assertEqual('<root attr=""/>', str(tmpl.generate(attr='')))
def test_bad_directive_error(self):
xml = '<p xmlns:py="http://genshi.edgewall.org/" py:do="nothing" />'
try:
tmpl = MarkupTemplate(xml, filename='test.html')
except BadDirectiveError as e:
self.assertEqual('test.html', e.filename)
self.assertEqual(1, e.lineno)
def test_directive_value_syntax_error(self):
xml = """<p xmlns:py="http://genshi.edgewall.org/" py:if="bar'" />"""
try:
tmpl = MarkupTemplate(xml, filename='test.html').generate()
self.fail('Expected TemplateSyntaxError')
except TemplateSyntaxError as e:
self.assertEqual('test.html', e.filename)
self.assertEqual(1, e.lineno)
def test_expression_syntax_error(self):
xml = """<p>
Foo <em>${bar"}</em>
</p>"""
try:
tmpl = MarkupTemplate(xml, filename='test.html')
self.fail('Expected TemplateSyntaxError')
except TemplateSyntaxError as e:
self.assertEqual('test.html', e.filename)
self.assertEqual(2, e.lineno)
def test_expression_syntax_error_multi_line(self):
xml = """<p><em></em>
${bar"}
</p>"""
try:
tmpl = MarkupTemplate(xml, filename='test.html')
self.fail('Expected TemplateSyntaxError')
except TemplateSyntaxError as e:
self.assertEqual('test.html', e.filename)
self.assertEqual(3, e.lineno)
def test_markup_noescape(self):
"""
Verify that outputting context data that is a `Markup` instance is not
escaped.
"""
tmpl = MarkupTemplate("""<div xmlns:py="http://genshi.edgewall.org/">
$myvar
</div>""")
self.assertEqual("""<div>
<b>foo</b>
</div>""", str(tmpl.generate(myvar=Markup('<b>foo</b>'))))
def test_text_noescape_quotes(self):
"""
Verify that outputting context data in text nodes doesn't escape
quotes.
"""
tmpl = MarkupTemplate("""<div xmlns:py="http://genshi.edgewall.org/">
$myvar
</div>""")
self.assertEqual("""<div>
"foo"
</div>""", str(tmpl.generate(myvar='"foo"')))
def test_attr_escape_quotes(self):
"""
Verify that outputting context data in attribtes escapes quotes.
"""
tmpl = MarkupTemplate("""<div xmlns:py="http://genshi.edgewall.org/">
<elem class="$myvar"/>
</div>""")
self.assertEqual("""<div>
<elem class=""foo""/>
</div>""", str(tmpl.generate(myvar='"foo"')))
def test_directive_element(self):
tmpl = MarkupTemplate("""<div xmlns:py="http://genshi.edgewall.org/">
<py:if test="myvar">bar</py:if>
</div>""")
self.assertEqual("""<div>
bar
</div>""", str(tmpl.generate(myvar='"foo"')))
def test_normal_comment(self):
tmpl = MarkupTemplate("""<div xmlns:py="http://genshi.edgewall.org/">
<!-- foo bar -->
</div>""")
self.assertEqual("""<div>
<!-- foo bar -->
</div>""", str(tmpl.generate()))
def test_template_comment(self):
tmpl = MarkupTemplate("""<div xmlns:py="http://genshi.edgewall.org/">
<!-- !foo -->
<!--!bar-->
</div>""")
self.assertEqual("""<div>
</div>""", str(tmpl.generate()))
def test_parse_with_same_namespace_nested(self):
tmpl = MarkupTemplate("""<div xmlns:py="http://genshi.edgewall.org/">
<span xmlns:py="http://genshi.edgewall.org/">
</span>
</div>""")
self.assertEqual("""<div>
<span>
</span>
</div>""", str(tmpl.generate()))
def test_latin1_encoded_with_xmldecl(self):
tmpl = MarkupTemplate("""<?xml version="1.0" encoding="iso-8859-1" ?>
<div xmlns:py="http://genshi.edgewall.org/">
\xf6
</div>""".encode('iso-8859-1'), encoding='iso-8859-1')
self.assertEqual("""<?xml version="1.0" encoding="iso-8859-1"?>\n<div>
\xf6
</div>""", str(tmpl.generate()))
def test_latin1_encoded_explicit_encoding(self):
tmpl = MarkupTemplate("""<div xmlns:py="http://genshi.edgewall.org/">
\xf6
</div>""".encode('iso-8859-1'), encoding='iso-8859-1')
self.assertEqual("""<div>
\xf6
</div>""", str(tmpl.generate()))
def test_exec_with_trailing_space(self):
"""
Verify that a code block processing instruction with trailing space
does not cause a syntax error (see ticket #127).
"""
MarkupTemplate("""<foo>
<?python
bar = 42
?>
</foo>""")
def test_exec_import(self):
tmpl = MarkupTemplate("""<?python from datetime import timedelta ?>
<div xmlns:py="http://genshi.edgewall.org/">
${timedelta(days=2)}
</div>""")
self.assertEqual("""<div>
2 days, 0:00:00
</div>""", str(tmpl.generate()))
def test_exec_def(self):
tmpl = MarkupTemplate("""
<?python
def foo():
return 42
?>
<div xmlns:py="http://genshi.edgewall.org/">
${foo()}
</div>""")
self.assertEqual("""<div>
42
</div>""", str(tmpl.generate()))
def test_namespace_on_removed_elem(self):
"""
Verify that a namespace declaration on an element that is removed from
the generated stream does not get pushed up to the next non-stripped
element (see ticket #107).
"""
tmpl = MarkupTemplate("""<?xml version="1.0"?>
<Test xmlns:py="http://genshi.edgewall.org/">
<Size py:if="0" xmlns:t="test">Size</Size>
<Item/>
</Test>""")
self.assertEqual("""<?xml version="1.0"?>\n<Test>
<Item/>
</Test>""", str(tmpl.generate()))
def test_include_in_loop(self):
dirname = tempfile.mkdtemp(suffix='genshi_test')
try:
file1 = open(os.path.join(dirname, 'tmpl1.html'), 'w')
try:
file1.write("""<div>Included $idx</div>""")
finally:
file1.close()
file2 = open(os.path.join(dirname, 'tmpl2.html'), 'w')
try:
file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:py="http://genshi.edgewall.org/">
<xi:include href="${name}.html" py:for="idx in range(3)" />
</html>""")
finally:
file2.close()
loader = TemplateLoader([dirname])
tmpl = loader.load('tmpl2.html')
self.assertEqual("""<html>
<div>Included 0</div><div>Included 1</div><div>Included 2</div>
</html>""", tmpl.generate(name='tmpl1').render(encoding=None))
finally:
shutil.rmtree(dirname)
def test_dynamic_include_href(self):
dirname = tempfile.mkdtemp(suffix='genshi_test')
try:
file1 = open(os.path.join(dirname, 'tmpl1.html'), 'w')
try:
file1.write("""<div>Included</div>""")
finally:
file1.close()
file2 = open(os.path.join(dirname, 'tmpl2.html'), 'w')
try:
file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:py="http://genshi.edgewall.org/">
<xi:include href="${name}.html" />
</html>""")
finally:
file2.close()
loader = TemplateLoader([dirname])
tmpl = loader.load('tmpl2.html')
self.assertEqual("""<html>
<div>Included</div>
</html>""", tmpl.generate(name='tmpl1').render(encoding=None))
finally:
shutil.rmtree(dirname)
def test_select_included_elements(self):
dirname = tempfile.mkdtemp(suffix='genshi_test')
try:
file1 = open(os.path.join(dirname, 'tmpl1.html'), 'w')
try:
file1.write("""<li>$item</li>""")
finally:
file1.close()
file2 = open(os.path.join(dirname, 'tmpl2.html'), 'w')
try:
file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:py="http://genshi.edgewall.org/">
<ul py:match="ul">${select('li')}</ul>
<ul py:with="items=(1, 2, 3)">
<xi:include href="tmpl1.html" py:for="item in items" />
</ul>
</html>""")
finally:
file2.close()
loader = TemplateLoader([dirname])
tmpl = loader.load('tmpl2.html')
self.assertEqual("""<html>
<ul><li>1</li><li>2</li><li>3</li></ul>
</html>""", tmpl.generate().render(encoding=None))
finally:
shutil.rmtree(dirname)
def test_fallback_when_include_found(self):
dirname = tempfile.mkdtemp(suffix='genshi_test')
try:
file1 = open(os.path.join(dirname, 'tmpl1.html'), 'w')
try:
file1.write("""<div>Included</div>""")
finally:
file1.close()
file2 = open(os.path.join(dirname, 'tmpl2.html'), 'w')
try:
file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="tmpl1.html"><xi:fallback>
Missing</xi:fallback></xi:include>
</html>""")
finally:
file2.close()
loader = TemplateLoader([dirname])
tmpl = loader.load('tmpl2.html')
self.assertEqual("""<html>
<div>Included</div>
</html>""", tmpl.generate().render(encoding=None))
finally:
shutil.rmtree(dirname)
def test_error_when_include_not_found(self):
dirname = tempfile.mkdtemp(suffix='genshi_test')
try:
file2 = open(os.path.join(dirname, 'tmpl2.html'), 'w')
try:
file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="tmpl1.html"/>
</html>""")
finally:
file2.close()
loader = TemplateLoader([dirname], auto_reload=True)
tmpl = loader.load('tmpl2.html')
self.assertRaises(TemplateNotFound, tmpl.generate().render)
finally:
shutil.rmtree(dirname)
def test_fallback_when_include_not_found(self):
dirname = tempfile.mkdtemp(suffix='genshi_test')
try:
file2 = open(os.path.join(dirname, 'tmpl2.html'), 'w')
try:
file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="tmpl1.html"><xi:fallback>
Missing</xi:fallback></xi:include>
</html>""")
finally:
file2.close()
loader = TemplateLoader([dirname])
tmpl = loader.load('tmpl2.html')
self.assertEqual("""<html>
Missing
</html>""", tmpl.generate().render(encoding=None))
finally:
shutil.rmtree(dirname)
def test_fallback_when_auto_reload_true(self):
dirname = tempfile.mkdtemp(suffix='genshi_test')
try:
file2 = open(os.path.join(dirname, 'tmpl2.html'), 'w')
try:
file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="tmpl1.html"><xi:fallback>
Missing</xi:fallback></xi:include>
</html>""")
finally:
file2.close()
loader = TemplateLoader([dirname], auto_reload=True)
tmpl = loader.load('tmpl2.html')
self.assertEqual("""<html>
Missing
</html>""", tmpl.generate().render(encoding=None))
finally:
shutil.rmtree(dirname)
def test_include_in_fallback(self):
dirname = tempfile.mkdtemp(suffix='genshi_test')
try:
file1 = open(os.path.join(dirname, 'tmpl1.html'), 'w')
try:
file1.write("""<div>Included</div>""")
finally:
file1.close()
file2 = open(os.path.join(dirname, 'tmpl3.html'), 'w')
try:
file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="tmpl2.html">
<xi:fallback>
<xi:include href="tmpl1.html">
<xi:fallback>Missing</xi:fallback>
</xi:include>
</xi:fallback>
</xi:include>
</html>""")
finally:
file2.close()
loader = TemplateLoader([dirname])
tmpl = loader.load('tmpl3.html')
self.assertEqual("""<html>
<div>Included</div>
</html>""", tmpl.generate().render(encoding=None))
finally:
shutil.rmtree(dirname)
def test_nested_include_fallback(self):
dirname = tempfile.mkdtemp(suffix='genshi_test')
try:
file2 = open(os.path.join(dirname, 'tmpl3.html'), 'w')
try:
file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="tmpl2.html">
<xi:fallback>
<xi:include href="tmpl1.html">
<xi:fallback>Missing</xi:fallback>
</xi:include>
</xi:fallback>
</xi:include>
</html>""")
finally:
file2.close()
loader = TemplateLoader([dirname])
tmpl = loader.load('tmpl3.html')
self.assertEqual("""<html>
Missing
</html>""", tmpl.generate().render(encoding=None))
finally:
shutil.rmtree(dirname)
def test_nested_include_in_fallback(self):
dirname = tempfile.mkdtemp(suffix='genshi_test')
try:
file1 = open(os.path.join(dirname, 'tmpl2.html'), 'w')
try:
file1.write("""<div>Included</div>""")
finally:
file1.close()
file2 = open(os.path.join(dirname, 'tmpl3.html'), 'w')
try:
file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="tmpl2.html">
<xi:fallback>
<xi:include href="tmpl1.html" />
</xi:fallback>
</xi:include>
</html>""")
finally:
file2.close()
loader = TemplateLoader([dirname])
tmpl = loader.load('tmpl3.html')
self.assertEqual("""<html>
<div>Included</div>
</html>""", tmpl.generate().render(encoding=None))
finally:
shutil.rmtree(dirname)
def test_include_fallback_with_directive(self):
dirname = tempfile.mkdtemp(suffix='genshi_test')
try:
file2 = open(os.path.join(dirname, 'tmpl2.html'), 'w')
try:
file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:py="http://genshi.edgewall.org/">
<xi:include href="tmpl1.html"><xi:fallback>
<py:if test="True">tmpl1.html not found</py:if>
</xi:fallback></xi:include>
</html>""")
finally:
file2.close()
loader = TemplateLoader([dirname])
tmpl = loader.load('tmpl2.html')
self.assertEqual("""<html>
tmpl1.html not found
</html>""", tmpl.generate(debug=True).render(encoding=None))
finally:
shutil.rmtree(dirname)
def test_include_inlined(self):
dirname = tempfile.mkdtemp(suffix='genshi_test')
try:
file1 = open(os.path.join(dirname, 'tmpl1.html'), 'w')
try:
file1.write("""<div>Included</div>""")
finally:
file1.close()
file2 = open(os.path.join(dirname, 'tmpl2.html'), 'w')
try:
file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:py="http://genshi.edgewall.org/">
<xi:include href="tmpl1.html" />
</html>""")
finally:
file2.close()
loader = TemplateLoader([dirname], auto_reload=False)
tmpl = loader.load('tmpl2.html')
# if not inlined the following would be 5
self.assertEqual(7, len(tmpl.stream))
self.assertEqual("""<html>
<div>Included</div>
</html>""", tmpl.generate().render(encoding=None))
finally:
shutil.rmtree(dirname)
def test_include_inlined_in_loop(self):
dirname = tempfile.mkdtemp(suffix='genshi_test')
try:
file1 = open(os.path.join(dirname, 'tmpl1.html'), 'w')
try:
file1.write("""<div>Included $idx</div>""")
finally:
file1.close()
file2 = open(os.path.join(dirname, 'tmpl2.html'), 'w')
try:
file2.write("""<html xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:py="http://genshi.edgewall.org/">
<xi:include href="tmpl1.html" py:for="idx in range(3)" />
</html>""")
finally:
file2.close()
loader = TemplateLoader([dirname], auto_reload=False)
tmpl = loader.load('tmpl2.html')
self.assertEqual("""<html>
<div>Included 0</div><div>Included 1</div><div>Included 2</div>
</html>""", tmpl.generate().render(encoding=None))
finally:
shutil.rmtree(dirname)
def test_allow_exec_false(self):
xml = ("""<?python
title = "A Genshi Template"
?>
<html xmlns:py="http://genshi.edgewall.org/">
<head>
<title py:content="title">This is replaced.</title>
</head>
</html>""")
try:
tmpl = MarkupTemplate(xml, filename='test.html',
allow_exec=False)
self.fail('Expected SyntaxError')
except TemplateSyntaxError as e:
pass
def test_allow_exec_true(self):
xml = ("""<?python
title = "A Genshi Template"
?>
<html xmlns:py="http://genshi.edgewall.org/">
<head>
<title py:content="title">This is replaced.</title>
</head>
</html>""")
tmpl = MarkupTemplate(xml, filename='test.html', allow_exec=True)
def test_exec_in_match(self):
xml = ("""<html xmlns:py="http://genshi.edgewall.org/">
<py:match path="body/p">
<?python title="wakka wakka wakka" ?>
${title}
</py:match>
<body><p>moot text</p></body>
</html>""")
tmpl = MarkupTemplate(xml, filename='test.html', allow_exec=True)
self.assertEqual("""<html>
<body>
wakka wakka wakka
</body>
</html>""", tmpl.generate().render(encoding=None))
def test_with_in_match(self):
xml = ("""<html xmlns:py="http://genshi.edgewall.org/">
<py:match path="body/p">
<h1>${select('text()')}</h1>
${select('.')}
</py:match>
<body><p py:with="foo='bar'">${foo}</p></body>
</html>""")
tmpl = MarkupTemplate(xml, filename='test.html')
self.assertEqual("""<html>
<body>
<h1>bar</h1>
<p>bar</p>
</body>
</html>""", tmpl.generate().render(encoding=None))
def test_nested_include_matches(self):
# See ticket #157
dirname = tempfile.mkdtemp(suffix='genshi_test')
try:
file1 = open(os.path.join(dirname, 'tmpl1.html'), 'w')
try:
file1.write("""<html xmlns:py="http://genshi.edgewall.org/" py:strip="">
<div class="target">Some content.</div>
</html>""")
finally:
file1.close()
file2 = open(os.path.join(dirname, 'tmpl2.html'), 'w')
try:
file2.write("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:xi="http://www.w3.org/2001/XInclude">
<body>
<h1>Some full html document that includes file1.html</h1>
<xi:include href="tmpl1.html" />
</body>
</html>""")
finally:
file2.close()
file3 = open(os.path.join(dirname, 'tmpl3.html'), 'w')
try:
file3.write("""<html xmlns:py="http://genshi.edgewall.org/"
xmlns:xi="http://www.w3.org/2001/XInclude" py:strip="">
<div py:match="div[@class='target']" py:attrs="select('@*')">
Some added stuff.
${select('*|text()')}
</div>
<xi:include href="tmpl2.html" />
</html>
""")
finally:
file3.close()
loader = TemplateLoader([dirname])
tmpl = loader.load('tmpl3.html')
self.assertEqual("""
<html>
<body>
<h1>Some full html document that includes file1.html</h1>
<div class="target">
Some added stuff.
Some content.
</div>
</body>
</html>
""", tmpl.generate().render(encoding=None))
finally:
shutil.rmtree(dirname)
def test_nested_matches_without_buffering(self):
xml = ("""<html xmlns:py="http://genshi.edgewall.org/">
<py:match path="body" once="true" buffer="false">
<body>
${select('*|text')}
And some other stuff...
</body>
</py:match>
<body>
<span py:match="span">Foo</span>
<span>Bar</span>
</body>
</html>""")
tmpl = MarkupTemplate(xml, filename='test.html')
self.assertEqual("""<html>
<body>
<span>Foo</span>
And some other stuff...
</body>
</html>""", tmpl.generate().render(encoding=None))
def test_match_without_select(self):
# See <http://genshi.edgewall.org/ticket/243>
xml = ("""<html xmlns:py="http://genshi.edgewall.org/">
<py:match path="body" buffer="false">
<body>
This replaces the other text.
</body>
</py:match>
<body>
This gets replaced.
</body>
</html>""")
tmpl = MarkupTemplate(xml, filename='test.html')
self.assertEqual("""<html>
<body>
This replaces the other text.
</body>
</html>""", tmpl.generate().render(encoding=None))
def test_match_tail_handling(self):
# See <http://genshi.edgewall.org/ticket/399>
xml = ("""<rhyme xmlns:py="http://genshi.edgewall.org/">
<py:match path="*[@type]">
${select('.')}
</py:match>
<lines>
<first type="one">fish</first>
<second type="two">fish</second>
<third type="red">fish</third>
<fourth type="blue">fish</fourth>
</lines>
</rhyme>""")
tmpl = MarkupTemplate(xml, filename='test.html')
self.assertEqual("""<rhyme>
<lines>
<first type="one">fish</first>
<second type="two">fish</second>
<third type="red">fish</third>
<fourth type="blue">fish</fourth>
</lines>
</rhyme>""", tmpl.generate().render(encoding=None))
def suite():
suite = unittest.TestSuite()
suite.addTest(doctest.DocTestSuite(MarkupTemplate.__module__))
suite.addTest(unittest.makeSuite(MarkupTemplateTestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
|
#!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2005-2008 (ita)
"""
c/c++ configuration routines
"""
import os, imp, sys, shlex, shutil
from Utils import md5
import Build, Utils, Configure, Task, Options, Logs, TaskGen
from Constants import *
from Configure import conf, conftest
cfg_ver = {
'atleast-version': '>=',
'exact-version': '==',
'max-version': '<=',
}
SNIP1 = '''
int main() {
void *p;
p=(void*)(%s);
return 0;
}
'''
SNIP2 = '''
int main() {
if ((%(type_name)s *) 0) return 0;
if (sizeof (%(type_name)s)) return 0;
}
'''
SNIP3 = '''
int main() {
return 0;
}
'''
def parse_flags(line, uselib, env):
"""pkg-config still has bugs on some platforms, and there are many -config programs, parsing flags is necessary :-/"""
lst = shlex.split(line)
while lst:
x = lst.pop(0)
st = x[:2]
ot = x[2:]
if st == '-I' or st == '/I':
if not ot: ot = lst.pop(0)
env.append_unique('CPPPATH_' + uselib, ot)
elif st == '-D':
if not ot: ot = lst.pop(0)
env.append_unique('CXXDEFINES_' + uselib, ot)
env.append_unique('CCDEFINES_' + uselib, ot)
elif st == '-l':
if not ot: ot = lst.pop(0)
env.append_unique('LIB_' + uselib, ot)
elif st == '-L':
if not ot: ot = lst.pop(0)
env.append_unique('LIBPATH_' + uselib, ot)
elif x == '-pthread' or x.startswith('+'):
env.append_unique('CCFLAGS_' + uselib, x)
env.append_unique('CXXFLAGS_' + uselib, x)
env.append_unique('LINKFLAGS_' + uselib, x)
elif x == '-framework':
env.append_unique('FRAMEWORK_' + uselib, lst.pop(0))
elif x.startswith('-F'):
env.append_unique('FRAMEWORKPATH_' + uselib, x[2:])
elif x.startswith('-std'):
env.append_unique('CCFLAGS_' + uselib, x)
env.append_unique('LINKFLAGS_' + uselib, x)
elif x.startswith('-Wl'):
env.append_unique('LINKFLAGS_' + uselib, x)
elif x.startswith('-m') or x.startswith('-f'):
env.append_unique('CCFLAGS_' + uselib, x)
env.append_unique('CXXFLAGS_' + uselib, x)
@conf
def ret_msg(self, f, kw):
"""execute a function, when provided"""
if isinstance(f, str):
return f
return f(kw)
@conf
def validate_cfg(self, kw):
if not 'path' in kw:
kw['path'] = 'pkg-config --errors-to-stdout --print-errors'
# pkg-config version
if 'atleast_pkgconfig_version' in kw:
if not 'msg' in kw:
kw['msg'] = 'Checking for pkg-config version >= %s' % kw['atleast_pkgconfig_version']
return
# pkg-config --modversion
if 'modversion' in kw:
return
# checking for the version of a module, for the moment, one thing at a time
for x in cfg_ver.keys():
y = x.replace('-', '_')
if y in kw:
if not 'package' in kw:
raise ValueError('%s requires a package' % x)
if not 'msg' in kw:
kw['msg'] = 'Checking for %s %s %s' % (kw['package'], cfg_ver[x], kw[y])
return
if not 'msg' in kw:
kw['msg'] = 'Checking for %s' % kw['package']
if not 'okmsg' in kw:
kw['okmsg'] = 'ok'
if not 'errmsg' in kw:
kw['errmsg'] = 'not found'
@conf
def cmd_and_log(self, cmd, kw):
Logs.debug('runner: %s\n' % cmd)
if self.log: self.log.write('%s\n' % cmd)
try:
p = Utils.pproc.Popen(cmd, stdout=Utils.pproc.PIPE, shell=True)
output = p.communicate()[0]
except OSError:
self.fatal('fail')
if p.returncode:
if not kw.get('errmsg', ''):
if kw.get('mandatory', False):
kw['errmsg'] = output.strip()
else:
kw['errmsg'] = 'fail'
self.fatal('fail')
return output
@conf
def exec_cfg(self, kw):
# pkg-config version
if 'atleast_pkgconfig_version' in kw:
cmd = '%s --atleast-pkgconfig-version=%s' % (kw['path'], kw['atleast_pkgconfig_version'])
self.cmd_and_log(cmd, kw)
if not 'okmsg' in kw:
kw['okmsg'] = 'ok'
return
# checking for the version of a module
for x in cfg_ver:
y = x.replace('-', '_')
if y in kw:
self.cmd_and_log('%s --%s=%s %s' % (kw['path'], x, kw[y], kw['package']), kw)
if not 'okmsg' in kw:
kw['okmsg'] = 'ok'
self.define(self.have_define(kw.get('uselib_store', kw['package'])), 1, 0)
break
# retrieving the version of a module
if 'modversion' in kw:
version = self.cmd_and_log('%s --modversion %s' % (kw['path'], kw['modversion']), kw).strip()
self.define('%s_VERSION' % Utils.quote_define_name(kw.get('uselib_store', kw['modversion'])), version)
return version
lst = [kw['path']]
for key, val in kw.get('define_variable', {}).iteritems():
lst.append('--define-variable=%s=%s' % (key, val))
lst.append(kw.get('args', ''))
lst.append(kw['package'])
# so we assume the command-line will output flags to be parsed afterwards
cmd = ' '.join(lst)
ret = self.cmd_and_log(cmd, kw)
if not 'okmsg' in kw:
kw['okmsg'] = 'ok'
self.define(self.have_define(kw.get('uselib_store', kw['package'])), 1, 0)
parse_flags(ret, kw.get('uselib_store', kw['package'].upper()), kw.get('env', self.env))
return ret
@conf
def check_cfg(self, *k, **kw):
self.validate_cfg(kw)
if 'msg' in kw:
self.check_message_1(kw['msg'])
ret = None
try:
ret = self.exec_cfg(kw)
except Configure.ConfigurationError, e:
if 'errmsg' in kw:
self.check_message_2(kw['errmsg'], 'YELLOW')
if 'mandatory' in kw and kw['mandatory']:
if Logs.verbose > 1:
raise
else:
self.fatal('the configuration failed (see %r)' % self.log.name)
else:
kw['success'] = ret
if 'okmsg' in kw:
self.check_message_2(self.ret_msg(kw['okmsg'], kw))
return ret
# the idea is the following: now that we are certain
# that all the code here is only for c or c++, it is
# easy to put all the logic in one function
#
# this should prevent code duplication (ita)
# env: an optional environment (modified -> provide a copy)
# compiler: cc or cxx - it tries to guess what is best
# type: program, shlib, staticlib, objects
# code: a c code to execute
# uselib_store: where to add the variables
# uselib: parameters to use for building
# define: define to set, like FOO in #define FOO, if not set, add /* #undef FOO */
# execute: True or False - will return the result of the execution
@conf
def validate_c(self, kw):
"""validate the parameters for the test method"""
if not 'env' in kw:
kw['env'] = self.env.copy()
env = kw['env']
if not 'compiler' in kw:
kw['compiler'] = 'cc'
if env['CXX_NAME'] and Task.TaskBase.classes.get('cxx', None):
kw['compiler'] = 'cxx'
if not self.env['CXX']:
self.fatal('a c++ compiler is required')
else:
if not self.env['CC']:
self.fatal('a c compiler is required')
if not 'type' in kw:
kw['type'] = 'cprogram'
assert not(kw['type'] != 'cprogram' and kw.get('execute', 0)), 'can only execute programs'
#if kw['type'] != 'program' and kw.get('execute', 0):
# raise ValueError, 'can only execute programs'
def to_header(dct):
if 'header_name' in dct:
dct = Utils.to_list(dct['header_name'])
return ''.join(['#include <%s>\n' % x for x in dct])
return ''
# set the file name
if not 'compile_mode' in kw:
kw['compile_mode'] = (kw['compiler'] == 'cxx') and 'cxx' or 'cc'
if not 'compile_filename' in kw:
kw['compile_filename'] = 'test.c' + ((kw['compile_mode'] == 'cxx') and 'pp' or '')
#OSX
if 'framework_name' in kw:
try: TaskGen.task_gen.create_task_macapp
except AttributeError: self.fatal('frameworks require the osx tool')
fwkname = kw['framework_name']
if not 'uselib_store' in kw:
kw['uselib_store'] = fwkname.upper()
if not kw.get('no_header', False):
if not 'header_name' in kw:
kw['header_name'] = []
fwk = '%s/%s.h' % (fwkname, fwkname)
if kw.get('remove_dot_h', None):
fwk = fwk[:-2]
kw['header_name'] = Utils.to_list(kw['header_name']) + [fwk]
kw['msg'] = 'Checking for framework %s' % fwkname
kw['framework'] = fwkname
#kw['frameworkpath'] = set it yourself
if 'function_name' in kw:
fu = kw['function_name']
if not 'msg' in kw:
kw['msg'] = 'Checking for function %s' % fu
kw['code'] = to_header(kw) + SNIP1 % fu
if not 'uselib_store' in kw:
kw['uselib_store'] = fu.upper()
if not 'define_name' in kw:
kw['define_name'] = self.have_define(fu)
elif 'type_name' in kw:
tu = kw['type_name']
if not 'msg' in kw:
kw['msg'] = 'Checking for type %s' % tu
if not 'header_name' in kw:
kw['header_name'] = 'stdint.h'
kw['code'] = to_header(kw) + SNIP2 % {'type_name' : tu}
if not 'define_name' in kw:
kw['define_name'] = self.have_define(tu.upper())
elif 'header_name' in kw:
if not 'msg' in kw:
kw['msg'] = 'Checking for header %s' % kw['header_name']
l = Utils.to_list(kw['header_name'])
assert len(l)>0, 'list of headers in header_name is empty'
kw['code'] = to_header(kw) + SNIP3
if not 'uselib_store' in kw:
kw['uselib_store'] = l[0].upper()
if not 'define_name' in kw:
kw['define_name'] = self.have_define(l[0])
if 'lib' in kw:
if not 'msg' in kw:
kw['msg'] = 'Checking for library %s' % kw['lib']
if not 'uselib_store' in kw:
kw['uselib_store'] = kw['lib'].upper()
if 'staticlib' in kw:
if not 'msg' in kw:
kw['msg'] = 'Checking for static library %s' % kw['staticlib']
if not 'uselib_store' in kw:
kw['uselib_store'] = kw['staticlib'].upper()
if 'fragment' in kw:
# an additional code fragment may be provided to replace the predefined code
# in custom headers
kw['code'] = kw['fragment']
if not 'msg' in kw:
kw['msg'] = 'Checking for custom code'
if not 'errmsg' in kw:
kw['errmsg'] = 'fail'
for (flagsname,flagstype) in [('cxxflags','compiler'), ('cflags','compiler'), ('linkflags','linker')]:
if flagsname in kw:
if not 'msg' in kw:
kw['msg'] = 'Checking for %s flags %s' % (flagstype, kw[flagsname])
if not 'errmsg' in kw:
kw['errmsg'] = 'fail'
if not 'execute' in kw:
kw['execute'] = False
if not 'errmsg' in kw:
kw['errmsg'] = 'not found'
if not 'okmsg' in kw:
kw['okmsg'] = 'ok'
if not 'code' in kw:
kw['code'] = SNIP3
if not kw.get('success'): kw['success'] = None
assert 'msg' in kw, 'invalid parameters, read http://freehackers.org/~tnagy/wafbook/single.html#config_helpers_c'
@conf
def post_check(self, *k, **kw):
"set the variables after a test was run successfully"
is_success = 0
if kw['execute']:
if kw['success']:
is_success = kw['success']
else:
is_success = (kw['success'] == 0)
def define_or_stuff():
nm = kw['define_name']
if kw['execute'] and kw.get('define_ret', None) and isinstance(is_success, str):
self.define(kw['define_name'], is_success, quote=kw.get('quote', 1))
else:
self.define_cond(kw['define_name'], is_success)
if 'define_name' in kw:
if 'header_name' in kw or 'function_name' in kw or 'type_name' in kw or 'fragment' in kw:
define_or_stuff()
if is_success and 'uselib_store' in kw:
import cc, cxx
for k in set(cc.g_cc_flag_vars).union(cxx.g_cxx_flag_vars):
lk = k.lower()
# inconsistency: includes -> CPPPATH
if k == 'CPPPATH': lk = 'includes'
if k == 'CXXDEFINES': lk = 'defines'
if k == 'CCDEFINES': lk = 'defines'
if lk in kw:
val = kw[lk]
# remove trailing slash
if isinstance(val, str):
val = val.rstrip(os.path.sep)
self.env.append_unique(k + '_' + kw['uselib_store'], val)
@conf
def check(self, *k, **kw):
# so this will be the generic function
# it will be safer to use check_cxx or check_cc
self.validate_c(kw)
self.check_message_1(kw['msg'])
ret = None
try:
ret = self.run_c_code(*k, **kw)
except Configure.ConfigurationError, e:
self.check_message_2(kw['errmsg'], 'YELLOW')
if 'mandatory' in kw and kw['mandatory']:
if Logs.verbose > 1:
raise
else:
self.fatal('the configuration failed (see %r)' % self.log.name)
else:
kw['success'] = ret
self.check_message_2(self.ret_msg(kw['okmsg'], kw))
self.post_check(*k, **kw)
if not kw.get('execute', False):
return ret == 0
return ret
@conf
def run_c_code(self, *k, **kw):
test_f_name = kw['compile_filename']
k = 0
while k < 10000:
# make certain to use a fresh folder - necessary for win32
dir = os.path.join(self.blddir, '.conf_check_%d' % k)
# if the folder already exists, remove it
try:
shutil.rmtree(dir)
except OSError:
pass
try:
os.stat(dir)
except OSError:
break
k += 1
try:
os.makedirs(dir)
except:
self.fatal('cannot create a configuration test folder %r' % dir)
try:
os.stat(dir)
except:
self.fatal('cannot use the configuration test folder %r' % dir)
bdir = os.path.join(dir, 'testbuild')
if not os.path.exists(bdir):
os.makedirs(bdir)
env = kw['env']
dest = open(os.path.join(dir, test_f_name), 'w')
dest.write(kw['code'])
dest.close()
back = os.path.abspath('.')
bld = Build.BuildContext()
bld.log = self.log
bld.all_envs.update(self.all_envs)
bld.all_envs['default'] = env
bld.lst_variants = bld.all_envs.keys()
bld.load_dirs(dir, bdir)
os.chdir(dir)
bld.rescan(bld.srcnode)
o = bld(features=[kw['compile_mode'], kw['type']], source=test_f_name, target='testprog')
for k, v in kw.iteritems():
setattr(o, k, v)
self.log.write("==>\n%s\n<==\n" % kw['code'])
# compile the program
try:
bld.compile()
except Utils.WafError:
ret = Utils.ex_stack()
else:
ret = 0
# chdir before returning
os.chdir(back)
if ret:
self.log.write('command returned %r' % ret)
self.fatal(str(ret))
# keep the name of the program to execute
if kw['execute']:
lastprog = o.link_task.outputs[0].abspath(env)
# if we need to run the program, try to get its result
if kw['execute']:
args = Utils.to_list(kw.get('exec_args', []))
try:
data = Utils.cmd_output([lastprog] + args).strip()
except ValueError, e:
self.fatal(Utils.ex_stack())
ret = data
return ret
@conf
def check_cxx(self, *k, **kw):
kw['compiler'] = 'cxx'
return self.check(*k, **kw)
@conf
def check_cc(self, *k, **kw):
kw['compiler'] = 'cc'
return self.check(*k, **kw)
@conf
def define(self, define, value, quote=1):
"""store a single define and its state into an internal list for later
writing to a config header file. Value can only be
a string or int; other types not supported. String
values will appear properly quoted in the generated
header file."""
assert define and isinstance(define, str)
# ordered_dict is for writing the configuration header in order
tbl = self.env[DEFINES] or Utils.ordered_dict()
# the user forgot to tell if the value is quoted or not
if isinstance(value, str):
if quote:
tbl[define] = '"%s"' % str(value)
else:
tbl[define] = value
elif isinstance(value, int):
tbl[define] = value
else:
raise TypeError('define %r -> %r must be a string or an int' % (define, value))
# add later to make reconfiguring faster
self.env[DEFINES] = tbl
self.env[define] = value # <- not certain this is necessary
@conf
def undefine(self, define):
"""store a single define and its state into an internal list
for later writing to a config header file"""
assert define and isinstance(define, str)
tbl = self.env[DEFINES] or Utils.ordered_dict()
value = UNDEFINED
tbl[define] = value
# add later to make reconfiguring faster
self.env[DEFINES] = tbl
self.env[define] = value
@conf
def define_cond(self, name, value):
"""Conditionally define a name.
Formally equivalent to: if value: define(name, 1) else: undefine(name)"""
if value:
self.define(name, 1)
else:
self.undefine(name)
@conf
def is_defined(self, key):
defines = self.env[DEFINES]
if not defines:
return False
try:
value = defines[key]
except KeyError:
return False
else:
return value != UNDEFINED
@conf
def get_define(self, define):
"get the value of a previously stored define"
try: return self.env[DEFINES][define]
except KeyError: return None
@conf
def have_define(self, name):
"prefix the define with 'HAVE_' and make sure it has valid characters."
return self.__dict__.get('HAVE_PAT', 'HAVE_%s') % Utils.quote_define_name(name)
@conf
def write_config_header(self, configfile='', env='', guard='', top=False):
"save the defines into a file"
if not configfile: configfile = WAF_CONFIG_H
waf_guard = guard or '_%s_WAF' % Utils.quote_define_name(configfile)
# configfile -> absolute path
# there is a good reason to concatenate first and to split afterwards
if not env: env = self.env
if top:
diff = ''
else:
diff = Utils.diff_path(self.srcdir, self.curdir)
full = os.sep.join([self.blddir, env.variant(), diff, configfile])
full = os.path.normpath(full)
(dir, base) = os.path.split(full)
try: os.makedirs(dir)
except: pass
dest = open(full, 'w')
dest.write('/* Configuration header created by Waf - do not edit */\n')
dest.write('#ifndef %s\n#define %s\n\n' % (waf_guard, waf_guard))
dest.write(self.get_config_header())
# config files are not removed on "waf clean"
env.append_value(CFG_FILES, os.path.join(diff, configfile))
dest.write('\n#endif /* %s */\n' % waf_guard)
dest.close()
@conf
def get_config_header(self):
"""Fill-in the contents of the config header. Override when you need to write your own config header."""
config_header = []
tbl = self.env[DEFINES] or Utils.ordered_dict()
for key in tbl.allkeys:
value = tbl[key]
if value is None:
config_header.append('#define %s' % key)
elif value is UNDEFINED:
config_header.append('/* #undef %s */' % key)
elif isinstance(value, str):
config_header.append('#define %s %s' % (key, repr(value)[1:-1]))
else:
config_header.append('#define %s %s' % (key, value))
return "\n".join(config_header)
@conftest
def find_cpp(conf):
v = conf.env
cpp = None
if v['CPP']: cpp = v['CPP']
elif 'CPP' in conf.environ: cpp = conf.environ['CPP']
if not cpp: cpp = conf.find_program('cpp', var='CPP')
if not cpp: cpp = v['CC']
if not cpp: cpp = v['CXX']
v['CPP'] = cpp
@conftest
def cc_add_flags(conf):
conf.add_os_flags('CFLAGS', 'CCFLAGS')
conf.add_os_flags('CPPFLAGS')
@conftest
def cxx_add_flags(conf):
conf.add_os_flags('CXXFLAGS')
conf.add_os_flags('CPPFLAGS')
@conftest
def link_add_flags(conf):
conf.add_os_flags('LINKFLAGS')
conf.add_os_flags('LDFLAGS', 'LINKFLAGS')
@conftest
def cc_load_tools(conf):
conf.check_tool('cc')
@conftest
def cxx_load_tools(conf):
conf.check_tool('cxx')
|
|
# Copyright (C) 2021 FUJITSU
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ipaddress
import json
import os
import time
import eventlet
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import uuidutils
import paramiko
import yaml
from tacker.common import cmd_executer
from tacker.common import exceptions
from tacker.common import log
from tacker.db.db_base import CommonDbMixin
from tacker.db.nfvo import nfvo_db
from tacker.extensions import nfvo
from tacker.nfvo.nfvo_plugin import NfvoPlugin
from tacker import objects
from tacker.vnfm.infra_drivers.openstack import heat_client as hc
from tacker.vnfm.mgmt_drivers import vnflcm_abstract_driver
from tacker.vnfm import vim_client
CHECK_POD_STATUS_RETRY_COUNT = 20
COMMAND_WAIT_COMPLETE_TIME = 0.2
COMMAND_WAIT_RETRY_TIME = 30
CONF = cfg.CONF
CONNECT_REMOTE_SERVER_RETRY_COUNT = 4
DRAIN_TIMEOUT = 300
K8S_CMD_TIMEOUT = 30
K8S_DEPLOY_TIMEOUT = 300
K8S_INSTALL_TIMEOUT = 2700
LOG = logging.getLogger(__name__)
NEXT_CHECK_INTERVAL_TIME = 15
ROLE_MASTER = 'master'
ROLE_WORKER = 'worker'
SERVER_WAIT_COMPLETE_TIME = 240
TOKEN_CREATE_WAIT_TIME = 30
UNINSTALL_NODE_TIMEOUT = 900
class KubesprayMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
def get_type(self):
return 'mgmt-drivers-kubespray'
def get_name(self):
return 'mgmt-drivers-kubespray'
def get_description(self):
return 'Tacker Kubespray VNFMgmt Driver'
@log.log
def instantiate_start(self, context, vnf_instance,
instantiate_vnf_request, grant,
grant_request, **kwargs):
pass
def _get_vim(self, context, vim_connection_info):
vim_client_obj = vim_client.VimClient()
if vim_connection_info:
vim_id = vim_connection_info[0].vim_id
access_info = vim_connection_info[0].access_info
if access_info:
region_name = access_info.get('region')
else:
region_name = None
else:
vim_id = None
region_name = None
try:
vim_res = vim_client_obj.get_vim(
context, vim_id, region_name=region_name)
except nfvo.VimNotFoundException:
raise exceptions.VimConnectionNotFound(vim_id=vim_id)
vim_res['vim_auth'].update({'region': region_name})
vim_info = {'id': vim_res['vim_id'], 'vim_id': vim_res['vim_id'],
'vim_type': vim_res['vim_type'],
'access_info': vim_res['vim_auth']}
return vim_info
def _get_vim_connection_info(self, context, instantiate_vnf_req):
vim_info = self._get_vim(
context, instantiate_vnf_req.vim_connection_info)
vim_connection_info = objects.VimConnectionInfo.obj_from_primitive(
vim_info, context)
return vim_connection_info
def _check_is_cidr(self, key, value, cidr_str):
# instantiate: check cidr
try:
ipaddress.ip_network(cidr_str)
except ValueError:
LOG.error('The {value} of {key} in the '
'additionalParams is invalid.'.format(
value=value, key=key))
raise exceptions.MgmtDriverParamInvalid(param=value)
def _check_value_exist(self, attr_list, value, key):
for attr in attr_list:
if not value.get(attr):
LOG.error(
'The {} of {} in the '
'additionalParams cannot'
' be None.'.format(attr, key))
raise exceptions.MgmtDriverNotFound(
param=attr)
def _check_input_parameters(self, additional_param, vnf_package_path):
if not additional_param:
LOG.error('The kubernetes cluster info cannot be None'
'in additionalParams.')
raise exceptions.MgmtDriverOtherError(
error_message="The kubernetes cluster info"
" cannot be None in additionalParams.")
if not isinstance(additional_param, dict):
LOG.error('The format of kubernetes cluster info in '
'additionalParams is invalid. It must be dict.')
raise exceptions.MgmtDriverOtherError(
error_message="The format of kubernetes cluster info in "
"additionalParams is invalid. It must be dict.")
for key, value in additional_param.items():
attr_list = []
if key not in ('proxy', 'external_lb_param', 'vim_name'):
attr_list.extend(['username', 'password'])
if key in ('master_node', 'worker_node', 'external_lb_param'):
attr_list.extend(['ssh_cp_name'])
if key == 'ansible':
attr_list.extend(['ip_address', 'kubespray_root_path',
'transferring_inventory_path'])
if key == 'external_lb_param':
attr_list.extend(['ssh_username', 'ssh_password',
'script_path'])
if value.get('script_path'):
abs_script_path = os.path.join(
vnf_package_path, value.get('script_path'))
if not os.path.exists(abs_script_path):
LOG.error('The path of external_lb_param'
' script is invalid.')
raise exceptions.MgmtDriverOtherError(
error_message="The path of external_lb_param"
" script is invalid")
if key in ('master_node', 'ansible'):
for attr in ['pod_cidr', 'cluster_cidr', 'ip_address']:
if value.get(attr):
self._check_is_cidr(
key, attr, value.get(attr))
if attr_list:
self._check_value_exist(attr_list, value, key)
def _get_ssh_ip_and_nic_ip(self, heatclient, stack_id, node):
resource_info = heatclient.resources.get(
stack_id=stack_id,
resource_name=node.get('ssh_cp_name'))
if resource_info.attributes.get('floating_ip_address'):
ssh_ip = resource_info.attributes.get('floating_ip_address')
else:
ssh_ip = resource_info.attributes.get(
'fixed_ips')[0].get('ip_address')
if not ssh_ip:
LOG.error("Failed to get the node's ssh ip.")
raise exceptions.MgmtDriverOtherError(
error_message="Failed to get"
" the node's ssh ip.")
if not node.get('nic_cp_name'):
nic_ip = ssh_ip
else:
nic_ip = heatclient.resources.get(
stack_id=stack_id,
resource_name=node.get('nic_cp_name')).attributes.get(
'fixed_ips')[0].get('ip_address')
if not nic_ip:
LOG.error("Failed to get the node's nic ip.")
raise exceptions.MgmtDriverOtherError(
error_message="Failed to get"
" the node's nic ip.")
return ssh_ip, nic_ip
def _get_group_resources_list(
self, heatclient, stack_id, node, additional_params):
# get group resources list
nest_resources_list = heatclient.resources.list(stack_id=stack_id)
group_stack_name = node.get("aspect_id")
group_stack_id = ""
for nest_resources in nest_resources_list:
if nest_resources.resource_name == group_stack_name:
group_stack_id = nest_resources.physical_resource_id
if not group_stack_id:
LOG.error('No stack id {} matching the group was found.'.format(
group_stack_id))
raise exceptions.MgmtDriverOtherError(
error_message='No stack id {} matching the'
' group was found.'.format(group_stack_id))
group_resources_list = heatclient.resources.list(
stack_id=group_stack_id)
return group_resources_list
def _get_install_info_for_k8s_node(self, nest_stack_id, node,
additional_params, heatclient):
# instantiate: get k8s ssh ips
vm_dict_list = []
# get ssh_ip and nic_ip from heat, and set value into vm_dict
if not node.get('aspect_id'):
ssh_ip, nic_ip = self._get_ssh_ip_and_nic_ip(
heatclient, nest_stack_id, node)
vm_dict = {
"ssh_ip": ssh_ip,
"nic_ip": nic_ip
}
vm_dict_list.append(vm_dict)
else:
group_resources_list = self._get_group_resources_list(
heatclient, nest_stack_id, node, additional_params)
for group_resource in group_resources_list:
stack_id = group_resource.physical_resource_id
ssh_ip, nic_ip = self._get_ssh_ip_and_nic_ip(
heatclient, stack_id, node)
vm_dict = {
"ssh_ip": ssh_ip,
"nic_ip": nic_ip
}
vm_dict_list.append(vm_dict)
return vm_dict_list
def _set_lb_info(self, nest_stack_id, external_lb_param, master_node,
heatclient):
# get ssh_ip and cluster_ip from heat, and set value into vm_dict
ssh_ip, _ = self._get_ssh_ip_and_nic_ip(
heatclient, nest_stack_id, external_lb_param)
external_lb_param['pod_cidr'] = master_node.get('pod_cidr', '')
external_lb_param['cluster_cidr'] = master_node.get(
'cluster_cidr', '')
external_lb_param['ssh_ip'] = ssh_ip
external_lb_param['cluster_ip'] = ssh_ip
def _init_commander_and_set_script(self, user, password, host,
timeout, vnf_package_path=None,
script_path=None, token_flag=False):
retry = CONNECT_REMOTE_SERVER_RETRY_COUNT
while retry > 0:
try:
if (vnf_package_path and script_path) or token_flag:
connect = paramiko.Transport(host, 22)
connect.connect(username=user, password=password)
sftp = paramiko.SFTPClient.from_transport(connect)
if vnf_package_path and script_path:
sftp.put(os.path.join(vnf_package_path, script_path),
"/tmp/{}".format(
script_path.replace('Scripts', '')))
if token_flag:
fname = 'create_admin_token.yaml'
sftp.put(os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"../../../samples/mgmt_driver/{}".format(fname)),
"/tmp/{}".format(fname))
connect.close()
commander = cmd_executer.RemoteCommandExecutor(
user=user, password=password, host=host,
timeout=timeout)
return commander
except (exceptions.NotAuthorized, paramiko.SSHException,
paramiko.ssh_exception.NoValidConnectionsError) as e:
LOG.debug(e)
retry -= 1
if retry == 0:
LOG.error(e)
raise exceptions.MgmtDriverOtherError(error_message=e)
time.sleep(SERVER_WAIT_COMPLETE_TIME)
def _send_or_receive_file(self, host, user, password,
remote_file, local_file, operation):
connect = paramiko.Transport(host, 22)
connect.connect(username=user, password=password)
sftp = paramiko.SFTPClient.from_transport(connect)
if operation == 'receive':
sftp.get(remote_file, local_file)
else:
sftp.put(local_file, remote_file)
connect.close()
def _execute_command(self, commander, ssh_command, timeout, type, retry):
eventlet.monkey_patch()
while retry >= 0:
try:
with eventlet.Timeout(timeout, True):
result = commander.execute_command(
ssh_command)
break
except eventlet.timeout.Timeout:
LOG.debug('It is time out, When execute command: '
'{}.'.format(ssh_command))
retry -= 1
if retry < 0:
LOG.error('It is time out, When execute command: '
'{}.'.format(ssh_command))
raise exceptions.MgmtDriverOtherError(
error_message='It is time out, When execute command: '
'{}.'.format(ssh_command))
time.sleep(COMMAND_WAIT_RETRY_TIME)
if type == 'common':
if result.get_return_code() != 0 and result.get_stderr():
err = result.get_stderr()
LOG.error(err)
raise exceptions.MgmtDriverRemoteCommandError(err_info=err)
elif type == 'ansible':
if result.get_return_code() != 0 \
and 'No such file or directory' in result.get_stderr()[0]:
return False
else:
error_message = 'The transferring_inventory_path has ' \
'exists in kubespray server. Please check' \
' your path.'
LOG.error(error_message)
raise exceptions.MgmtDriverRemoteCommandError(
err_info=error_message)
elif type == 'install':
if result.get_return_code() != 0:
for error in result.get_stdout():
if 'Timeout (12s) waiting for ' \
'privilege escalation prompt' in error and \
retry > 0:
self._execute_command(commander, ssh_command,
timeout, 'install', 0)
break
else:
err = result.get_stderr()
LOG.error(err)
raise exceptions.MgmtDriverRemoteCommandError(
err_info=err)
return result.get_stdout()
def _create_hosts_yaml(self, master_node, master_vm_dict_list,
worker_node, worker_vm_dict_list):
hosts_yaml_content = {
'all': {
'hosts': {},
'children': {
'kube_control_plane': {'hosts': {}},
'kube_node': {'hosts': {}},
'etcd': {'hosts': {}},
'k8s_cluster': {
'children': {'kube_control_plane': None,
'kube_node': None}},
'calico_rr': {'hosts': {}}}}}
for master_vm in master_vm_dict_list:
key = 'master' + master_vm.get('nic_ip').split('.')[-1]
hosts_yaml_content['all']['hosts'][key] = {
'ansible_host': master_vm.get('ssh_ip'),
'ip': master_vm.get('nic_ip'),
'ansible_user': master_node.get('username'),
'ansible_password': master_node.get('password'),
}
hosts_yaml_content['all']['children']['kube_control_plane'][
'hosts'][key] = None
hosts_yaml_content['all']['children']['etcd'][
'hosts'][key] = None
for worker_vm in worker_vm_dict_list:
key = 'worker' + worker_vm.get('nic_ip').split('.')[-1]
hosts_yaml_content['all']['hosts'][key] = {
'ansible_host': worker_vm.get('ssh_ip'),
'ip': worker_vm.get('nic_ip'),
'ansible_user': worker_node.get('username'),
'ansible_password': worker_node.get('password'),
}
hosts_yaml_content['all']['children']['kube_node'][
'hosts'][key] = None
return hosts_yaml_content
def _install_k8s_cluster_and_set_config(
self, master_node, worker_node, proxy, ansible,
external_lb_param, master_vm_dict_list, worker_vm_dict_list):
"""Install Kubernetes Cluster Function
It will use Kubespray which is installed in advance to install
a Kubernetes Cluster.
At present, Kuberspray's version is v2.16.0. You can get detailed
information from the following url.
https://github.com/kubernetes-sigs/kubespray/tree/v2.16.0
"""
# get mtu value
master_commander = self._init_commander_and_set_script(
master_node.get('username'), master_node.get('password'),
master_vm_dict_list[0].get('ssh_ip'), K8S_CMD_TIMEOUT)
ssh_command = "ip a | grep '%(nic_ip)s' -B 2 | " \
"grep 'mtu' | awk '{print $5}'" % \
{'nic_ip': master_vm_dict_list[0].get('nic_ip')}
mtu_value = self._execute_command(
master_commander, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)[0].replace('\n', '')
calico_veth_mtu = int(mtu_value) - 20
master_commander.close_session()
# create inventory/hosts.yaml
ansible_commander = self._init_commander_and_set_script(
ansible.get('username'), ansible.get('password'),
ansible.get('ip_address'), K8S_CMD_TIMEOUT)
ssh_command = "ls -l {}".format(
ansible.get('transferring_inventory_path'))
file_exists_flag = self._execute_command(
ansible_commander, ssh_command, K8S_CMD_TIMEOUT, 'ansible', 0)
if not file_exists_flag:
ssh_command = 'cp -r {kubespray_root_path}/inventory/sample' \
' {transferring_inventory_path}'.format(
kubespray_root_path=ansible.get(
'kubespray_root_path'),
transferring_inventory_path=ansible.get(
'transferring_inventory_path'))
self._execute_command(
ansible_commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0)
hosts_yaml_content = self._create_hosts_yaml(
master_node, master_vm_dict_list,
worker_node, worker_vm_dict_list)
local_hosts_yaml_path = '/tmp/hosts.yaml'
with open(local_hosts_yaml_path, 'w', encoding='utf-8') as nf:
yaml.safe_dump(hosts_yaml_content, nf, default_flow_style=False)
remote_hosts_yaml_path = ansible.get(
'transferring_inventory_path') + '/hosts.yaml'
self._send_or_receive_file(
ansible.get('ip_address'), ansible.get('username'),
ansible.get('password'), remote_hosts_yaml_path,
local_hosts_yaml_path, 'send')
# set calico mtu value
calico_file_path = ansible.get(
'transferring_inventory_path') + \
'/group_vars/k8s_cluster/k8s-net-calico.yml'
ssh_command = 'sed -i "s/\\# calico_mtu: 1500/calico_mtu: ' \
'{mtu_value}/g" {calico_file_path}'.format(
mtu_value=mtu_value,
calico_file_path=calico_file_path)
self._execute_command(ansible_commander, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)
ssh_command = 'sed -i "s/\\# calico_veth_mtu: 1440/calico_veth_mtu:' \
' {calico_veth_mtu}/g" {calico_file_path}'.format(
calico_veth_mtu=calico_veth_mtu,
calico_file_path=calico_file_path)
self._execute_command(ansible_commander, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)
# set pod and service cidr information
if external_lb_param.get('cluster_cidr') and \
external_lb_param.get('pod_cidr'):
k8s_cluster_file_path = ansible.get(
'transferring_inventory_path') + \
'/group_vars/k8s_cluster/k8s-cluster.yml'
cluster_cidr = external_lb_param.get(
'cluster_cidr').replace('/', '\\/')
ssh_command = 'sed -i "s/kube_service_addresses:' \
' 10.233.0.0\\/18/' \
'kube_service_addresses: {k8s_service_address}/g"' \
' {k8s_cluster_file_path}'.format(
k8s_service_address=cluster_cidr,
k8s_cluster_file_path=k8s_cluster_file_path)
self._execute_command(ansible_commander, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)
pod_cidr = external_lb_param.get('pod_cidr').replace('/', '\\/')
ssh_command = 'sed -i "s/kube_pods_subnet: 10.233.64.0\\/18/' \
'kube_pods_subnet: {pod_cidr}/g"' \
' {k8s_cluster_file_path}'.format(
pod_cidr=pod_cidr,
k8s_cluster_file_path=k8s_cluster_file_path)
self._execute_command(ansible_commander, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)
# set proxy
if proxy:
proxy_file_path = ansible.get(
'transferring_inventory_path') + \
'/group_vars/all/all.yml'
http_proxy = proxy.get('http_proxy').replace('/', '\\/')
https_proxy = proxy.get('http_proxy').replace('/', '\\/')
ssh_command = 'sed -i "s/\\# http_proxy: \\"\\"/' \
'http_proxy: {http_proxy}/g"' \
' {proxy_file_path}'.format(
http_proxy=http_proxy,
proxy_file_path=proxy_file_path)
self._execute_command(ansible_commander, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)
ssh_command = 'sed -i "s/\\# https_proxy: \\"\\"/' \
'https_proxy: {https_proxy}/g"' \
' {proxy_file_path}'.format(
https_proxy=https_proxy,
proxy_file_path=proxy_file_path)
self._execute_command(ansible_commander, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)
ansible_commander.close_session()
# install k8s cluster
install_timeout = K8S_INSTALL_TIMEOUT * (
len(master_vm_dict_list) + len(worker_vm_dict_list))
ansible_commander = self._init_commander_and_set_script(
ansible.get('username'), ansible.get('password'),
ansible.get('ip_address'), install_timeout)
cluster_yaml_path = ansible.get(
'kubespray_root_path') + '/cluster.yml'
ssh_command = 'ansible-playbook -i {}/hosts.yaml --become' \
' --become-user=root {}'.format(
ansible.get('transferring_inventory_path'),
cluster_yaml_path)
self._execute_command(ansible_commander, ssh_command,
install_timeout, 'install', 1)
ansible_commander.close_session()
# get k8s bearer token
master_commander = self._init_commander_and_set_script(
master_node.get('username'), master_node.get('password'),
master_vm_dict_list[0].get('ssh_ip'), K8S_CMD_TIMEOUT,
token_flag=True)
ssh_command = "sudo kubectl create -f /tmp/create_admin_token.yaml"
self._execute_command(
master_commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0)
time.sleep(TOKEN_CREATE_WAIT_TIME)
ssh_command = "sudo kubectl get secret -n kube-system " \
"| grep '^admin-token' " \
"| awk '{print $1}' " \
"| xargs -i sudo kubectl get secret {} -n kube-system" \
" -ojsonpath={.data.token} | base64 -d"
bearer_token = self._execute_command(
master_commander, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)[0].replace('\n', '')
master_commander.close_session()
if os.path.exists(local_hosts_yaml_path):
os.remove(local_hosts_yaml_path)
return bearer_token
def _install_and_set_lb(self, external_lb_param, vnf_package_path, proxy,
master_vm_dict_list, worker_vm_dict_list,
master_node):
lb_commander = self._init_commander_and_set_script(
external_lb_param.get('ssh_username'),
external_lb_param.get('ssh_password'),
external_lb_param.get('ssh_ip'), K8S_DEPLOY_TIMEOUT,
vnf_package_path=vnf_package_path,
script_path=external_lb_param.get('script_path'))
master_ssh_ips_str = ','.join([vm_dict.get(
'nic_ip') for vm_dict in master_vm_dict_list])
worker_ssh_ips_str = ','.join([vm_dict.get(
'nic_ip') for vm_dict in worker_vm_dict_list])
if proxy.get('http_proxy') and proxy.get('https_proxy'):
ssh_command = \
"export http_proxy={http_proxy};" \
"export https_proxy={https_proxy};" \
"bash /tmp/{script_path} " \
"-m {master_ip} -w {worker_ip} ".format(
http_proxy=proxy.get('http_proxy'),
https_proxy=proxy.get('https_proxy'),
master_ip=master_ssh_ips_str,
worker_ip=worker_ssh_ips_str,
script_path=external_lb_param.get(
'script_path').replace('Scripts/', ''))
else:
ssh_command = \
"bash /tmp/{script_path} " \
"-m {master_ip} -w {worker_ip} ".format(
master_ip=master_ssh_ips_str,
worker_ip=worker_ssh_ips_str,
script_path=external_lb_param.get(
'script_path').replace('Scripts/', ''))
self._execute_command(
lb_commander, ssh_command, K8S_DEPLOY_TIMEOUT, 'common', 0)
lb_commander.close_session()
# copy k8s admin configuration file
master_commander = self._init_commander_and_set_script(
master_node.get('username'), master_node.get('password'),
master_vm_dict_list[0].get('ssh_ip'), K8S_CMD_TIMEOUT)
ssh_command = 'sudo cp /etc/kubernetes/admin.conf /tmp/config;' \
'sudo chown $(id -u):$(id -g) /tmp/config'
self._execute_command(master_commander, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)
ssh_command = "sed -i 's/:6443/:8383/' /tmp/config"
self._execute_command(master_commander, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)
master_commander.close_session()
remote_admin_file_path = local_admin_file_path = '/tmp/config'
self._send_or_receive_file(
master_vm_dict_list[0].get('ssh_ip'),
master_node.get('username'), master_node.get('password'),
remote_admin_file_path, local_admin_file_path, 'receive')
# send config file to lb server
lb_admin_file_path = '~/.kube/config'
if os.path.exists(local_admin_file_path):
self._send_or_receive_file(
external_lb_param.get('ssh_ip'),
external_lb_param.get('ssh_username'),
external_lb_param.get('ssh_password'),
remote_admin_file_path, local_admin_file_path, 'send')
lb_commander = self._init_commander_and_set_script(
external_lb_param.get('ssh_username'),
external_lb_param.get('ssh_password'),
external_lb_param.get('ssh_ip'), K8S_CMD_TIMEOUT)
ssh_command = "mv {} {}".format(remote_admin_file_path,
lb_admin_file_path)
self._execute_command(lb_commander, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)
lb_commander.close_session()
if os.path.exists(local_admin_file_path):
os.remove(local_admin_file_path)
def _create_vim(self, context, vnf_instance, external_lb_param,
bearer_token, vim_name):
server = 'https://' + external_lb_param.get('cluster_ip') + ':8383'
vim_info = {
'vim': {
'name': vim_name,
'auth_url': server,
'vim_project': {
'name': 'default'
},
'auth_cred': {
'bearer_token': bearer_token
},
'type': 'kubernetes',
'tenant_id': context.project_id
}
}
try:
nfvo_plugin = NfvoPlugin()
created_vim_info = nfvo_plugin.create_vim(context, vim_info)
except Exception as e:
LOG.error("Failed to register kubernetes vim: {}".format(e))
raise exceptions.MgmtDriverOtherError(
error_message="Failed to register kubernetes vim: {}".format(
e))
id = uuidutils.generate_uuid()
vim_id = created_vim_info.get('id')
vim_type = 'kubernetes'
access_info = {
'auth_url': server
}
vim_connection_info = objects.VimConnectionInfo(
id=id, vim_id=vim_id, vim_type=vim_type,
access_info=access_info, interface_info=None
)
vim_connection_infos = vnf_instance.vim_connection_info
vim_connection_infos.append(vim_connection_info)
vnf_instance.vim_connection_info = vim_connection_infos
vnf_instance.save()
def _get_vnf_package_path(self, context, vnfd_id):
return os.path.join(CONF.vnf_package.vnf_package_csar_path,
self._get_vnf_package_id(context, vnfd_id))
def _get_vnf_package_id(self, context, vnfd_id):
vnf_package = objects.VnfPackageVnfd.get_by_id(context, vnfd_id)
return vnf_package.package_uuid
@log.log
def instantiate_end(self, context, vnf_instance,
instantiate_vnf_request, grant,
grant_request, **kwargs):
# get vim_connect_info
if hasattr(instantiate_vnf_request, 'vim_connection_info'):
vim_connection_info = self._get_vim_connection_info(
context, instantiate_vnf_request)
else:
# In case of healing entire Kubernetes cluster, 'heal_end' method
# will call this method using 'vnf_instance.instantiated_vnf_info'
# as the 'instantiate_vnf_request', but there is no
# 'vim_connection_info' in it, so we should get
# 'vim_connection_info' from 'vnf_instance'.
vim_connection_info = self._get_vim_connection_info(
context, vnf_instance)
additional_param = instantiate_vnf_request.additional_params.get(
'k8s_cluster_installation_param', {})
vim_name = additional_param.get('vim_name')
master_node = additional_param.get('master_node', {})
worker_node = additional_param.get('worker_node', {})
proxy = additional_param.get('proxy', {})
ansible = additional_param.get('ansible', {})
external_lb_param = additional_param.get('external_lb_param', {})
vnf_package_path = self._get_vnf_package_path(
context, vnf_instance.vnfd_id)
self._check_input_parameters(additional_param, vnf_package_path)
nest_stack_id = vnf_instance.instantiated_vnf_info.instance_id
if not vim_name:
vim_name = 'kubernetes_vim_' + vnf_instance.id
# get k8s node vm list
access_info = vim_connection_info.access_info
heatclient = hc.HeatClient(access_info)
master_vm_dict_list = \
self._get_install_info_for_k8s_node(
nest_stack_id, master_node,
instantiate_vnf_request.additional_params,
heatclient)
worker_vm_dict_list = \
self._get_install_info_for_k8s_node(
nest_stack_id, worker_node,
instantiate_vnf_request.additional_params, heatclient)
# set LB vm's info
self._set_lb_info(nest_stack_id, external_lb_param, master_node,
heatclient)
# install k8s_cluster and set config
bearer_token = self._install_k8s_cluster_and_set_config(
master_node, worker_node, proxy, ansible, external_lb_param,
master_vm_dict_list, worker_vm_dict_list)
# Install and set ExternalLB
self._install_and_set_lb(external_lb_param, vnf_package_path, proxy,
master_vm_dict_list, worker_vm_dict_list,
master_node)
# create vim
self._create_vim(context, vnf_instance, external_lb_param,
bearer_token, vim_name)
@log.log
def terminate_start(self, context, vnf_instance,
terminate_vnf_request, grant,
grant_request, **kwargs):
pass
def _get_vim_by_name(self, context, k8s_vim_name):
common_db_api = CommonDbMixin()
result = common_db_api.get_by_name(
context, nfvo_db.Vim, k8s_vim_name)
if not result:
LOG.debug("Cannot find kubernetes "
"vim with name: {}".format(k8s_vim_name))
return result
@log.log
def terminate_end(self, context, vnf_instance,
terminate_vnf_request, grant,
grant_request, **kwargs):
# delete kubernetes vim
k8s_params = vnf_instance.instantiated_vnf_info.additional_params.get(
'k8s_cluster_installation_param', {})
k8s_vim_name = k8s_params.get('vim_name')
if not k8s_vim_name:
k8s_vim_name = 'kubernetes_vim_' + vnf_instance.id
vim_info = self._get_vim_by_name(
context, k8s_vim_name)
if vim_info:
nfvo_plugin = NfvoPlugin()
nfvo_plugin.delete_vim(context, vim_info.id)
for k8s_vim in vnf_instance.vim_connection_info:
if k8s_vim.vim_id == vim_info.id:
vnf_instance.vim_connection_info.remove(k8s_vim)
# delete cluster info on ansible server
_, _, ansible, _ = \
self._get_initial_parameters(
context, vnf_instance, terminate_vnf_request)
commander = self._init_commander_and_set_script(
ansible.get('username'), ansible.get('password'),
ansible.get('ip_address'), K8S_CMD_TIMEOUT)
ssh_command = 'rm -rf {}'.format(
k8s_params.get('ansible').get('transferring_inventory_path'))
self._execute_command(
commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0)
ssh_command = 'rm -rf ~/.ssh/known_hosts'
self._execute_command(
commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0)
commander.close_session()
def _update_external_lb(self, external_lb_param, lb_ssh_ip, hostname):
external_lb_commander = self._init_commander_and_set_script(
external_lb_param.get('ssh_username'),
external_lb_param.get('ssh_password'),
lb_ssh_ip,
K8S_CMD_TIMEOUT
)
ssh_command = 'grep -n "{}" /etc/haproxy/haproxy.cfg | ' \
'cut -d : -f 1'.format(hostname)
result = self._execute_command(
external_lb_commander, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)
if result:
worker_host_num = result[0].replace('\n', '')
if worker_host_num.isdigit():
lb_server_num = int(worker_host_num, base=0)
ssh_command = "sudo sed -i '{}d' " \
"/etc/haproxy/haproxy.cfg" \
.format(lb_server_num)
self._execute_command(
external_lb_commander, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)
self._restart_haproxy(external_lb_commander)
external_lb_commander.close_session()
def _delete_worker_node_and_update_inventory_file(
self, ansible, worker_node, worker_hostname, operation_type):
update_hosts_yaml_path = ansible.get(
'transferring_inventory_path') + '/hosts.yaml'
self._modify_ansible_user_or_password(update_hosts_yaml_path,
worker_node, ansible)
# remove worker node
ssh_command = "ansible-playbook -i" \
" {}/hosts.yaml" \
" --become --become-user=root " \
"{}/remove-node.yml -e" \
" node={}".format(ansible.get(
'transferring_inventory_path'),
ansible.get('kubespray_root_path'),
worker_hostname)
try:
with eventlet.Timeout(K8S_INSTALL_TIMEOUT, True):
result, code = self._uninstall_worker_node(
ssh_command, ansible)
if code != 0:
msg = 'Fail to remove the worker node {}'.\
format(worker_hostname)
LOG.error(result)
raise exceptions.MgmtDriverOtherError(
error_message=msg)
LOG.debug(result)
except eventlet.timeout.Timeout:
msg = 'It is time out while deleting' \
' the worker node {}'.format(worker_hostname)
LOG.error(msg)
raise exceptions.MgmtDriverOtherError(
error_message=msg)
# Gets the line of rows where worker_hostname resides
if operation_type == 'SCALE':
while True:
commander_k8s = self._init_commander_and_set_script(
ansible.get('username'), ansible.get('password'),
ansible.get('ip_address'), K8S_CMD_TIMEOUT)
ssh_command = 'grep -n "{}" {} | head -1 ' \
'| cut -d : -f 1'\
.format(worker_hostname, update_hosts_yaml_path)
host_name = self._execute_command(
commander_k8s, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)
if host_name:
host_name_line = host_name[0].replace('\n', '')
if host_name_line.isdigit():
host_name_line = int(host_name_line, base=0)
ssh_command = 'sed -n {}P {}' \
.format(host_name_line + 1,
update_hosts_yaml_path)
is_hosts_or_children = self._execute_command(
commander_k8s, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)[0]
if "ansible_host" in is_hosts_or_children:
ssh_command = "sed -i '{}, {}d' {}" \
.format(host_name_line,
host_name_line + 4,
update_hosts_yaml_path)
else:
ssh_command = "sed -i " \
"'{}d' {}"\
.format(host_name_line,
update_hosts_yaml_path)
self._execute_command(
commander_k8s, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)
else:
break
commander_k8s.close_session()
if os.path.exists(update_hosts_yaml_path):
os.remove(update_hosts_yaml_path)
def _uninstall_worker_node(self, ssh_command, ansible):
end_str = ('# ', '$ ', '? ', '% ')
end_flag = False
result_end_flag = False
command_return_code = 0
try:
trans = paramiko.Transport((ansible.get('ip_address'), 22))
trans.start_client()
trans.auth_password(username=ansible.get('username'),
password=ansible.get('password'))
channel = trans.open_session()
channel.settimeout(UNINSTALL_NODE_TIMEOUT)
channel.get_pty()
buff = ''
channel.invoke_shell()
channel.send(ssh_command + '\n')
while True:
time.sleep(COMMAND_WAIT_COMPLETE_TIME)
resp = channel.recv(1024)
resp = resp.decode('utf-8')
buff += resp
if "Type 'yes' to delete nodes" in resp:
channel.send('yes\n')
time.sleep(COMMAND_WAIT_COMPLETE_TIME)
resp = channel.recv(1024)
resp = resp.decode('utf-8')
buff += resp
for end_s in end_str:
if resp.endswith(end_s):
end_flag = True
break
if end_flag:
break
if 'PLAY RECAP' in resp:
result_end_flag = True
if result_end_flag and 'failed=0' not in resp:
command_return_code = 2
channel.close()
trans.close()
return buff, command_return_code
except (exceptions.NotAuthorized, paramiko.SSHException,
paramiko.ssh_exception.NoValidConnectionsError) as e:
LOG.debug(e)
raise exceptions.MgmtDriverOtherError(error_message=e)
def _get_initial_parameters(self, context, vnf_instance, action_request):
vim_connection_info = \
self._get_vim_connection_info(context, vnf_instance)
k8s_cluster_installation_param = \
vnf_instance.instantiated_vnf_info.additional_params.get(
'k8s_cluster_installation_param')
worker_node_default = \
k8s_cluster_installation_param.get('worker_node')
external_lb_param_default = \
k8s_cluster_installation_param.get('external_lb_param')
ansible_default = \
k8s_cluster_installation_param.get('ansible')
# If additional_params exist in action_request
if hasattr(action_request, 'additional_params') and \
action_request.additional_params:
# Get the VM's information from action_request
add_param = action_request. \
additional_params.get('k8s_cluster_installation_param')
if add_param:
worker_node = add_param.get('worker_node', worker_node_default)
external_lb_param = add_param.get('external_lb_param',
external_lb_param_default)
ansible = add_param.get('ansible', ansible_default)
else:
worker_node = worker_node_default
external_lb_param = external_lb_param_default
ansible = ansible_default
else:
worker_node = worker_node_default
external_lb_param = external_lb_param_default
ansible = ansible_default
return worker_node, external_lb_param, ansible, vim_connection_info
def _remove_node_and_update_config_file(
self, worker_hostnames, external_lb_param,
lb_ssh_ip, ansible, worker_node, operation_type):
# Migrate the pod of the worker node
for worker_hostname in worker_hostnames:
# init lb RemoteCommandExecutor
external_lb_commander = self._init_commander_and_set_script(
external_lb_param.get('ssh_username'),
external_lb_param.get('ssh_password'),
lb_ssh_ip,
K8S_CMD_TIMEOUT
)
# check worker_node exist in k8s-cluster
ssh_command = "kubectl get node --no-headers {}" \
" 2> /dev/null".format(worker_hostname)
result = self._execute_command(external_lb_commander,
ssh_command,
K8S_CMD_TIMEOUT,
'common',
0)
if result:
ssh_command = \
"kubectl get pods --field-selector=spec." \
"nodeName={} -o json".format(worker_hostname)
result = self._execute_command(external_lb_commander,
ssh_command,
K8S_CMD_TIMEOUT,
'common',
0)
# Get the names of all pods on the worker node
daemonset_content_str = ''.join(result)
daemonset_content = json.loads(
daemonset_content_str)
ssh_command = "kubectl drain {}" \
" --ignore-daemonsets" \
" --delete-emptydir-data" \
" --timeout={}s".format(worker_hostname,
DRAIN_TIMEOUT)
self._execute_command(external_lb_commander,
ssh_command,
K8S_DEPLOY_TIMEOUT,
'common', 0)
self.evacuate_wait(external_lb_commander,
daemonset_content)
external_lb_commander.close_session()
# Uninstall worker node and update inventory file
self._delete_worker_node_and_update_inventory_file(
ansible, worker_node, worker_hostname, operation_type)
# Update ExternalLB's haproxy
self._update_external_lb(external_lb_param, lb_ssh_ip,
worker_hostname)
ansible_commander = self._init_commander_and_set_script(
ansible.get('username'), ansible.get('password'),
ansible.get('ip_address'), K8S_CMD_TIMEOUT)
ssh_command = 'rm -rf ~/.ssh/known_hosts'
self._execute_command(
ansible_commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0)
ansible_commander.close_session()
@log.log
def scale_start(self, context, vnf_instance,
scale_vnf_request, grant,
grant_request, **kwargs):
# If the type of scale is SCALE_IN
if scale_vnf_request.type == 'SCALE_IN':
scale_name_list = kwargs.get('scale_name_list')
nest_stack_id = vnf_instance.instantiated_vnf_info.instance_id
resource_name = scale_vnf_request.aspect_id
worker_node, external_lb_param, ansible, vim_connection_info = \
self._get_initial_parameters(
context, vnf_instance, scale_vnf_request)
# Get the ssh ip of LB
heatclient = hc.HeatClient(vim_connection_info.access_info)
resource_info = heatclient.resources. \
get(stack_id=nest_stack_id,
resource_name=external_lb_param.get('ssh_cp_name'))
# If the VM's floating ip is not None
# Get floating ip from resource_info and assign it to ssh ip
lb_ssh_ip = self._get_lb_or_worker_ssh_ip(resource_info, True)
# Get the ip of scale in worker nodes
worker_group_resource = heatclient.resources. \
get(stack_id=nest_stack_id,
resource_name=resource_name)
# if worker_group_resource is None
if not worker_group_resource:
LOG.error("The specified resource was not found.")
raise exceptions.MgmtDriverOtherError(
error_message='The specified resource was not found.')
worker_resource_list = \
heatclient.resources.list(
stack_id=worker_group_resource.physical_resource_id)
worker_ip_dict_list = []
for worker_resource in worker_resource_list:
# If worker_resource.resource_name exists in scale_name_list
if worker_resource.resource_name in scale_name_list:
stack_id = worker_resource.physical_resource_id
# Get the ssh_ip, nic ip of worker node
worker_ssh_ip, worker_nic_ip = self._get_ssh_ip_and_nic_ip(
heatclient, stack_id, worker_node)
# Create worker_ip_dict_list data
ip_dict = {"ssh_ip": worker_ssh_ip,
"nic_ip": worker_nic_ip}
worker_ip_dict_list.append(ip_dict)
# Get the hostname of the scale in worker node.
worker_hostnames = []
for worker_ip_dict in worker_ip_dict_list:
# get worker host names
worker_hostname = \
'worker' + worker_ip_dict.get("nic_ip").split('.')[-1]
worker_hostnames.append(worker_hostname)
self._remove_node_and_update_config_file(
worker_hostnames, external_lb_param,
lb_ssh_ip, ansible, worker_node, 'SCALE')
else:
pass
def evacuate_wait(self, commander, daemonset_content):
wait_flag = True
retry_count = CHECK_POD_STATUS_RETRY_COUNT
while wait_flag and retry_count > 0:
if daemonset_content.get('items'):
ssh_command = "kubectl get pods --all-namespaces -o json"
result = self._execute_command(
commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 3)
pods_list = json.loads(''.join(result)).get('items')
pods_names = [pod.get('metadata', {}).get('name')
for pod in pods_list]
for daemonset in daemonset_content.get('items'):
daemonset_name = daemonset.get('metadata', {}).get('name')
if daemonset_name in pods_names and \
'calico-node' not in daemonset_name and \
'kube-proxy' not in daemonset_name:
break
else:
wait_flag = False
else:
break
if not wait_flag:
break
time.sleep(NEXT_CHECK_INTERVAL_TIME)
retry_count -= 1
def _get_lb_or_worker_ssh_ip(self, resource_info, is_lb):
if resource_info.attributes.get('floating_ip_address'):
ssh_ip = resource_info.attributes.get('floating_ip_address')
else:
ssh_ip = resource_info.attributes. \
get('fixed_ips')[0].get('ip_address')
if ssh_ip is None:
if is_lb:
LOG.error("Failed to get the LB's ssh ip.")
raise exceptions.MgmtDriverOtherError(
error_message="Failed to get the LB's ssh ip.")
LOG.error("Failed to get the Worker's ssh ip.")
raise exceptions.MgmtDriverOtherError(
error_message="Failed to get the Worker's ssh ip.")
return ssh_ip
def _restart_haproxy(self, external_lb_commander):
# restart haproxy
ssh_command = 'sudo systemctl restart haproxy'
self._execute_command(
external_lb_commander, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)
ssh_command = 'sudo systemctl status haproxy | ' \
'grep Active'
self._execute_command(
external_lb_commander, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)
def _update_lb_config_file(self, external_lb_param, lb_ssh_ip,
worker_ip_dict_list):
external_lb_commander = self._init_commander_and_set_script(
external_lb_param.get('ssh_username'),
external_lb_param.get('ssh_password'),
lb_ssh_ip,
K8S_CMD_TIMEOUT
)
add_row_data = ''
for worker_ip_dict in worker_ip_dict_list:
worker_host_name = 'worker' + \
worker_ip_dict.get('nic_ip').split('.')[-1]
nic_ip = worker_ip_dict.get('nic_ip')
row_data = ' server {} {} check'.format(
worker_host_name, nic_ip)
add_row_data += row_data + '\\n'
ssh_command = 'grep -n "backend kubernetes-nodeport" ' \
'/etc/haproxy/haproxy.cfg | head -1 | cut -d : -f 1'
result = self._execute_command(external_lb_commander,
ssh_command,
K8S_INSTALL_TIMEOUT,
'common', 0)[0].replace('\n', '')
write_start_row = int(result) + 2
ssh_command = 'sudo sed -i "{}a\\{}" ' \
'/etc/haproxy/haproxy.cfg'.format(
write_start_row, add_row_data)
LOG.debug("ssh_command: {}".format(ssh_command))
self._execute_command(
external_lb_commander, ssh_command,
K8S_INSTALL_TIMEOUT, 'common', 0)
self._restart_haproxy(external_lb_commander)
external_lb_commander.close_session()
def _install_node_and_update_config_file(
self, worker_node, worker_ip_dict_list,
ansible, external_lb_param, lb_ssh_ip):
# check worker_VM can be accessed via ssh
self._init_commander_and_set_script(
worker_node.get('username'), worker_node.get('password'),
worker_ip_dict_list[0].get('ssh_ip'), K8S_CMD_TIMEOUT)
# Install worker node
commander_k8s = self._init_commander_and_set_script(
ansible.get('username'), ansible.get('password'),
ansible.get('ip_address'),
K8S_INSTALL_TIMEOUT * len(worker_ip_dict_list))
facts_yaml_path = ansible.get(
'kubespray_root_path') + '/facts.yml'
ssh_command = "ansible-playbook -i" \
" {}/hosts.yaml" \
" --become --become-user=root {}" \
.format(ansible.get('transferring_inventory_path'),
facts_yaml_path)
self._execute_command(
commander_k8s, ssh_command,
K8S_DEPLOY_TIMEOUT, 'common', 0)
scale_yaml_path = ansible.get(
'kubespray_root_path') + '/scale.yml'
ssh_command = "ansible-playbook -i" \
" {}/hosts.yaml" \
" --become --become-user=root {}".format(
ansible.get('transferring_inventory_path'),
scale_yaml_path)
self._execute_command(
commander_k8s, ssh_command,
K8S_INSTALL_TIMEOUT * len(worker_ip_dict_list),
'install', 0)
commander_k8s.close_session()
# Update ExternalLB's haproxy.cfg
self._update_lb_config_file(
external_lb_param, lb_ssh_ip, worker_ip_dict_list)
@log.log
def scale_end(self, context, vnf_instance,
scale_vnf_request, grant,
grant_request, **kwargs):
if scale_vnf_request.type == 'SCALE_OUT':
scale_out_id_list = kwargs.get('scale_out_id_list')
nest_stack_id = vnf_instance.instantiated_vnf_info.instance_id
worker_node, external_lb_param, ansible, vim_connection_info =\
self._get_initial_parameters(
context, vnf_instance, scale_vnf_request)
heatclient = hc.HeatClient(vim_connection_info.access_info)
# Get the ssh ip of LB
resource_info = heatclient.resources. \
get(stack_id=nest_stack_id,
resource_name=external_lb_param.get('ssh_cp_name'))
lb_ssh_ip = self._get_lb_or_worker_ssh_ip(resource_info, True)
# get scale-out worker's info
worker_ip_dict_list = []
for scale_out_id in scale_out_id_list:
stack_id = scale_out_id
# Get the ssh_ip, nic ip of worker node
worker_ssh_ip, worker_nic_ip = self._get_ssh_ip_and_nic_ip(
heatclient, stack_id, worker_node)
# Create worker_ip_dict_list data
ip_dict = {"ssh_ip": worker_ssh_ip, "nic_ip": worker_nic_ip}
worker_ip_dict_list.append(ip_dict)
# read hosts.yaml file contents
update_hosts_yaml_path = ansible.get(
'transferring_inventory_path') + '/hosts.yaml'
local_hosts_yaml_path = '/tmp/hosts.yaml'
# update hosts.yaml
hosts_content = self._modify_ansible_user_or_password(
update_hosts_yaml_path, worker_node, ansible)
for worker_ip_dict in worker_ip_dict_list:
# Update inventory file
# update hosts.yaml file contents
worker_host_name = 'worker' + \
worker_ip_dict.get('nic_ip').split('.')[-1]
hosts_content['all']['hosts'][worker_host_name] = {
'ansible_host': worker_ip_dict.get('ssh_ip'),
'ip': worker_ip_dict.get('nic_ip'),
'ansible_user': worker_node.get('username'),
'ansible_password': worker_node.get('password')
}
hosts_content['all']['children']['kube_node'][
'hosts'][worker_host_name] = None
LOG.debug("get hosts_content: {}".format(hosts_content))
with open(local_hosts_yaml_path, 'w', encoding='utf-8') as nf:
yaml.safe_dump(hosts_content, nf,
default_flow_style=False)
self._send_or_receive_file(
ansible.get('ip_address'), ansible.get('username'),
ansible.get('password'), update_hosts_yaml_path,
local_hosts_yaml_path, 'send')
# Install worker node adn update configuration file
self._install_node_and_update_config_file(
worker_node, worker_ip_dict_list, ansible,
external_lb_param, lb_ssh_ip)
else:
pass
def _modify_ansible_user_or_password(self, host_path,
worker_node, ansible):
try:
# read hosts.yml
local_hosts_yaml_path = '/tmp/hosts.yaml'
self._send_or_receive_file(
ansible.get('ip_address'), ansible.get('username'),
ansible.get('password'), host_path,
local_hosts_yaml_path, 'receive')
with open(local_hosts_yaml_path, 'r', encoding='utf-8') as f:
file_content = f.read()
hosts_content = yaml.safe_load(file_content)
worker_nodes = hosts_content['all']['children']['kube_node'][
'hosts']
LOG.debug("worker_nodes: {}".format(worker_nodes))
if worker_nodes:
kube_node_hosts_keys = list(worker_nodes.keys())
LOG.debug("kube_node_keys: {}".format(kube_node_hosts_keys))
hosts_key = list(hosts_content['all']['hosts'].keys())
LOG.debug("hosts_key: {}".format(hosts_key))
need_modify = False
for kube_node_hosts in kube_node_hosts_keys:
if kube_node_hosts in hosts_key:
content = \
hosts_content['all']['hosts'][kube_node_hosts]
LOG.debug("get node content: {}".format(content))
ansible_user = content.get("ansible_user")
ansible_password = content.get("ansible_password")
if ansible_user != worker_node.get('username'):
hosts_content['all']['hosts'][kube_node_hosts][
'ansible_user'] = worker_node.get('username')
need_modify = True
if ansible_password != worker_node.get('password'):
hosts_content['all']['hosts'][kube_node_hosts][
'ansible_password'] = \
worker_node.get('password')
need_modify = True
if need_modify:
with open(local_hosts_yaml_path, 'w', encoding='utf-8') \
as nf:
yaml.safe_dump(hosts_content, nf,
default_flow_style=False)
self._send_or_receive_file(
ansible.get('ip_address'), ansible.get('username'),
ansible.get('password'), host_path,
local_hosts_yaml_path, 'send')
return hosts_content
os.remove(local_hosts_yaml_path)
except Exception:
LOG.error('modify ansible_user or ansible_password has error: {}.'
.format(ValueError))
raise exceptions.MgmtDriverOtherError(
error_message='modify user or password has error: {}.'.format(
Exception))
def _get_vnfc_resource_id(self, vnfc_resource_list, vnfc_instance_id):
for vnfc_resource in vnfc_resource_list:
if vnfc_resource.id == vnfc_instance_id:
return vnfc_resource
return None
def _get_heal_physical_resource_ids(self, vnf_instance,
heal_vnf_request):
heal_physical_resource_ids = []
for vnfc_instance_id in heal_vnf_request.vnfc_instance_id:
instantiated_vnf_info = vnf_instance.instantiated_vnf_info
vnfc_resource_info = instantiated_vnf_info.vnfc_resource_info
vnfc_resource = self._get_vnfc_resource_id(
vnfc_resource_info, vnfc_instance_id)
if vnfc_resource:
heal_physical_resource_ids.append(
vnfc_resource.compute_resource.resource_id)
return heal_physical_resource_ids
def _get_heal_worker_node_info(
self, vnf_additional_params, worker_node, heatclient,
nest_stack_id, heal_physical_resource_ids):
worker_ip_dict_list = []
if worker_node.get('aspect_id'):
worker_group_resource_name = worker_node.get('aspect_id')
worker_group_resource = heatclient.resources.get(
stack_id=nest_stack_id,
resource_name=worker_group_resource_name)
if not worker_group_resource:
raise exceptions.MgmtDriverOtherError(
error_message='The specified resource'
' {} was not found.'.format(
worker_group_resource_name))
worker_group_resource_list = heatclient.resources.list(
stack_id=worker_group_resource.physical_resource_id)
for worker_resource in worker_group_resource_list:
lowest_resource_list = heatclient.resources.list(
stack_id=worker_resource.physical_resource_id)
for lowest_resource in lowest_resource_list:
if lowest_resource.resource_type == 'OS::Nova::Server' \
and lowest_resource.physical_resource_id in \
heal_physical_resource_ids:
worker_ssh_ip, worker_nic_ip = \
self._get_ssh_ip_and_nic_ip(
heatclient,
worker_resource.physical_resource_id,
worker_node)
ip_dict = {"nic_ip": worker_nic_ip,
"ssh_ip": worker_ssh_ip}
worker_ip_dict_list.append(ip_dict)
else:
# in case of SOL001 TOSCA-based VNFD with single worker node
resource_list = heatclient.resources.list(
stack_id=nest_stack_id)
for resource in resource_list:
if resource.resource_type == 'OS::Nova::Server' \
and resource.physical_resource_id in \
heal_physical_resource_ids:
worker_ssh_ip, worker_nic_ip = \
self._get_ssh_ip_and_nic_ip(
heatclient, nest_stack_id, worker_node)
ip_dict = {"nic_ip": worker_nic_ip,
"ssh_ip": worker_ssh_ip}
worker_ip_dict_list.append(ip_dict)
# Get the hostname of the deleting worker nodes
worker_hostnames = []
for worker_ip_dict in worker_ip_dict_list:
# get worker host names
worker_hostname = \
'worker' + worker_ip_dict.get("nic_ip").split('.')[-1]
worker_hostnames.append(worker_hostname)
return worker_hostnames, worker_ip_dict_list
@log.log
def heal_start(self, context, vnf_instance,
heal_vnf_request, grant,
grant_request, **kwargs):
vnf_additional_params = \
vnf_instance.instantiated_vnf_info.additional_params
# heal of the entire VNF
if not heal_vnf_request.vnfc_instance_id:
self.terminate_end(context, vnf_instance, heal_vnf_request,
grant, grant_request)
else:
# heal specified with VNFC instances
heal_physical_resource_ids = \
self._get_heal_physical_resource_ids(
vnf_instance, heal_vnf_request)
worker_node, external_lb_param, ansible, vim_connection_info = \
self._get_initial_parameters(
context, vnf_instance, heal_vnf_request)
nest_stack_id = vnf_instance.instantiated_vnf_info.instance_id
# Get the ssh ip of LB
heatclient = hc.HeatClient(vim_connection_info.access_info)
ssh_ip, _ = self._get_ssh_ip_and_nic_ip(
heatclient, nest_stack_id, external_lb_param)
# Get the worker_hostnames to be healed
worker_hostnames, _ = self._get_heal_worker_node_info(
vnf_additional_params, worker_node, heatclient,
nest_stack_id, heal_physical_resource_ids)
# remove_worker_node_from_k8s_cluster and update configuration file
self._remove_node_and_update_config_file(
worker_hostnames, external_lb_param,
ssh_ip, ansible, worker_node, 'HEAL')
@log.log
def heal_end(self, context, vnf_instance,
heal_vnf_request, grant,
grant_request, **kwargs):
vnf_additional_params = \
vnf_instance.instantiated_vnf_info.additional_params
# heal of the entire VNF
if not heal_vnf_request.vnfc_instance_id:
add_param_list = ['master_node', 'worker_node', 'proxy',
'ansible', 'external_lb_param']
for add_param in add_param_list:
if heal_vnf_request.additional_params.get(
'k8s_cluster_installation_param'):
if add_param in heal_vnf_request.additional_params.get(
'k8s_cluster_installation_param'):
vnf_additional_params.get(
'k8s_cluster_installation_param')[add_param] = \
heal_vnf_request.additional_params[
'k8s_cluster_installation_param'].get(
add_param)
heal_vnf_request.additional_params = vnf_additional_params
self.instantiate_end(context, vnf_instance, heal_vnf_request,
grant, grant_request)
else:
# heal specified with VNFC instances
heal_physical_resource_ids = \
self._get_heal_physical_resource_ids(
vnf_instance, heal_vnf_request)
worker_node, external_lb_param, ansible, vim_connection_info = \
self._get_initial_parameters(
context, vnf_instance, heal_vnf_request)
nest_stack_id = vnf_instance.instantiated_vnf_info.instance_id
# Get the ssh ip of LB
heatclient = hc.HeatClient(vim_connection_info.access_info)
ssh_ip, _ = self._get_ssh_ip_and_nic_ip(
heatclient, nest_stack_id, external_lb_param)
# Get the worker_ip_dict_list to be healed
_, worker_ip_dict_list = self._get_heal_worker_node_info(
vnf_additional_params, worker_node, heatclient,
nest_stack_id, heal_physical_resource_ids)
# Install worker node and update configuration file
self._install_node_and_update_config_file(
worker_node, worker_ip_dict_list, ansible,
external_lb_param, ssh_ip)
@log.log
def change_external_connectivity_start(
self, context, vnf_instance,
change_ext_conn_request, grant,
grant_request, **kwargs):
pass
@log.log
def change_external_connectivity_end(
self, context, vnf_instance,
change_ext_conn_request, grant,
grant_request, **kwargs):
pass
|
|
#!/usr/bin/env python
"""
Runs Bowtie on single-end or paired-end data.
For use with Bowtie v. 0.12.7
usage: bowtie_wrapper.py [options]
-t, --threads=t: The number of threads to run
-o, --output=o: The output file
--output_unmapped_reads=: File name for unmapped reads (single-end)
--output_unmapped_reads_l=: File name for unmapped reads (left, paired-end)
--output_unmapped_reads_r=: File name for unmapped reads (right, paired-end)
--output_suppressed_reads=: File name for suppressed reads because of max setting (single-end)
--output_suppressed_reads_l=: File name for suppressed reads because of max setting (left, paired-end)
--output_suppressed_reads_r=: File name for suppressed reads because of max setting (right, paired-end)
-i, --input1=i: The (forward or single-end) reads file in Sanger FASTQ format
-I, --input2=I: The reverse reads file in Sanger FASTQ format
-4, --dataType=4: The type of data (SOLiD or Solexa)
-2, --paired=2: Whether the data is single- or paired-end
-g, --genomeSource=g: The type of reference provided
-r, --ref=r: The reference genome to use or index
-s, --skip=s: Skip the first n reads
-a, --alignLimit=a: Only align the first n reads
-T, --trimH=T: Trim n bases from high-quality (left) end of each read before alignment
-L, --trimL=L: Trim n bases from low-quality (right) end of each read before alignment
-m, --mismatchSeed=m: Maximum number of mismatches permitted in the seed
-M, --mismatchQual=M: Maximum permitted total of quality values at mismatched read positions
-l, --seedLen=l: Seed length
-n, --rounding=n: Whether or not to round to the nearest 10 and saturating at 30
-P, --maqSoapAlign=P: Choose MAQ- or SOAP-like alignment policy
-w, --tryHard=: Whether or not to try as hard as possible to find valid alignments when they exist
-v, --valAlign=v: Report up to n valid arguments per read
-V, --allValAligns=V: Whether or not to report all valid alignments per read
-G, --suppressAlign=G: Suppress all alignments for a read if more than n reportable alignments exist
-b, --best=b: Whether or not to make Bowtie guarantee that reported singleton alignments are 'best' in terms of stratum and in terms of the quality values at the mismatched positions
-B, --maxBacktracks=B: Maximum number of backtracks permitted when aligning a read
-R, --strata=R: Whether or not to report only those alignments that fall in the best stratum if many valid alignments exist and are reportable
-j, --minInsert=j: Minimum insert size for valid paired-end alignments
-J, --maxInsert=J: Maximum insert size for valid paired-end alignments
-O, --mateOrient=O: The upstream/downstream mate orientation for valid paired-end alignment against the forward reference strand
-A, --maxAlignAttempt=A: Maximum number of attempts Bowtie will make to match an alignment for one mate with an alignment for the opposite mate
-f, --forwardAlign=f: Whether or not to attempt to align the forward reference strand
-E, --reverseAlign=E: Whether or not to attempt to align the reverse-complement reference strand
-F, --offrate=F: Override the offrate of the index to n
-8, --snpphred=8: SNP penalty on Phred scale
-6, --snpfrac=6: Fraction of sites expected to be SNP sites
-7, --keepends=7: Keep extreme-end nucleotides and qualities
-S, --seed=S: Seed for pseudo-random number generator
-C, --params=C: Whether to use default or specified parameters
-u, --iautoB=u: Automatic or specified behavior
-K, --ipacked=K: Whether or not to use a packed representation for DNA strings
-Q, --ibmax=Q: Maximum number of suffixes allowed in a block
-Y, --ibmaxdivn=Y: Maximum number of suffixes allowed in a block as a fraction of the length of the reference
-D, --idcv=D: The period for the difference-cover sample
-U, --inodc=U: Whether or not to disable the use of the difference-cover sample
-y, --inoref=y: Whether or not to build the part of the reference index used only in paired-end alignment
-z, --ioffrate=z: How many rows get marked during annotation of some or all of the Burrows-Wheeler rows
-W, --iftab=W: The size of the lookup table used to calculate an initial Burrows-Wheeler range with respect to the first n characters of the query
-X, --intoa=X: Whether or not to convert Ns in the reference sequence to As
-N, --iendian=N: Endianness to use when serializing integers to the index file
-Z, --iseed=Z: Seed for the pseudorandom number generator
-c, --icutoff=c: Number of first bases of the reference sequence to index
-x, --indexSettings=x: Whether or not indexing options are to be set
-H, --suppressHeader=H: Suppress header
--do_not_build_index: Flag to specify that provided file is already indexed and to just use 'as is'
"""
import optparse, os, shutil, subprocess, sys, tempfile
#Allow more than Sanger encoded variants
DEFAULT_ASCII_ENCODING = '--phred33-quals'
GALAXY_FORMAT_TO_QUALITY_SCORE_ENCODING_ARG = { 'fastqsanger':'--phred33-quals', 'fastqillumina':'--phred64-quals', 'fastqsolexa':'--solexa-quals' }
#FIXME: Integer quality scores are supported only when the '--integer-quals' argument is specified to bowtie; this is not currently able to be set in the tool/wrapper/config
def stop_err( msg ):
sys.stderr.write( '%s\n' % msg )
sys.exit()
def __main__():
#Parse Command Line
parser = optparse.OptionParser()
parser.add_option( '-t', '--threads', dest='threads', help='The number of threads to run' )
parser.add_option( '-o', '--output', dest='output', help='The output file' )
parser.add_option( '', '--output_unmapped_reads', dest='output_unmapped_reads', help='File name for unmapped reads (single-end)' )
parser.add_option( '', '--output_unmapped_reads_l', dest='output_unmapped_reads_l', help='File name for unmapped reads (left, paired-end)' )
parser.add_option( '', '--output_unmapped_reads_r', dest='output_unmapped_reads_r', help='File name for unmapped reads (right, paired-end)' )
parser.add_option( '', '--output_suppressed_reads', dest='output_suppressed_reads', help='File name for suppressed reads because of max setting (single-end)' )
parser.add_option( '', '--output_suppressed_reads_l', dest='output_suppressed_reads_l', help='File name for suppressed reads because of max setting (left, paired-end)' )
parser.add_option( '', '--output_suppressed_reads_r', dest='output_suppressed_reads_r', help='File name for suppressed reads because of max setting (right, paired-end)' )
parser.add_option( '-4', '--dataType', dest='dataType', help='The type of data (SOLiD or Solexa)' )
parser.add_option( '-i', '--input1', dest='input1', help='The (forward or single-end) reads file in Sanger FASTQ format' )
parser.add_option( '-I', '--input2', dest='input2', help='The reverse reads file in Sanger FASTQ format' )
parser.add_option( '-2', '--paired', dest='paired', help='Whether the data is single- or paired-end' )
parser.add_option( '-g', '--genomeSource', dest='genomeSource', help='The type of reference provided' )
parser.add_option( '-r', '--ref', dest='ref', help='The reference genome to use or index' )
parser.add_option( '-s', '--skip', dest='skip', help='Skip the first n reads' )
parser.add_option( '-a', '--alignLimit', dest='alignLimit', help='Only align the first n reads' )
parser.add_option( '-T', '--trimH', dest='trimH', help='Trim n bases from high-quality (left) end of each read before alignment' )
parser.add_option( '-L', '--trimL', dest='trimL', help='Trim n bases from low-quality (right) end of each read before alignment' )
parser.add_option( '-m', '--mismatchSeed', dest='mismatchSeed', help='Maximum number of mismatches permitted in the seed' )
parser.add_option( '-M', '--mismatchQual', dest='mismatchQual', help='Maximum permitted total of quality values at mismatched read positions' )
parser.add_option( '-l', '--seedLen', dest='seedLen', help='Seed length' )
parser.add_option( '-n', '--rounding', dest='rounding', help='Whether or not to round to the nearest 10 and saturating at 30' )
parser.add_option( '-P', '--maqSoapAlign', dest='maqSoapAlign', help='Choose MAQ- or SOAP-like alignment policy' )
parser.add_option( '-w', '--tryHard', dest='tryHard', help='Whether or not to try as hard as possible to find valid alignments when they exist' )
parser.add_option( '-v', '--valAlign', dest='valAlign', help='Report up to n valid arguments per read' )
parser.add_option( '-V', '--allValAligns', dest='allValAligns', help='Whether or not to report all valid alignments per read' )
parser.add_option( '-G', '--suppressAlign', dest='suppressAlign', help='Suppress all alignments for a read if more than n reportable alignments exist' )
parser.add_option( '-b', '--best', dest='best', help="Whether or not to make Bowtie guarantee that reported singleton alignments are 'best' in terms of stratum and in terms of the quality values at the mismatched positions" )
parser.add_option( '-B', '--maxBacktracks', dest='maxBacktracks', help='Maximum number of backtracks permitted when aligning a read' )
parser.add_option( '-R', '--strata', dest='strata', help='Whether or not to report only those alignments that fall in the best stratum if many valid alignments exist and are reportable' )
parser.add_option( '-j', '--minInsert', dest='minInsert', help='Minimum insert size for valid paired-end alignments' )
parser.add_option( '-J', '--maxInsert', dest='maxInsert', help='Maximum insert size for valid paired-end alignments' )
parser.add_option( '-O', '--mateOrient', dest='mateOrient', help='The upstream/downstream mate orientation for valid paired-end alignment against the forward reference strand' )
parser.add_option( '-A', '--maxAlignAttempt', dest='maxAlignAttempt', help='Maximum number of attempts Bowtie will make to match an alignment for one mate with an alignment for the opposite mate' )
parser.add_option( '-f', '--forwardAlign', dest='forwardAlign', help='Whether or not to attempt to align the forward reference strand' )
parser.add_option( '-E', '--reverseAlign', dest='reverseAlign', help='Whether or not to attempt to align the reverse-complement reference strand' )
parser.add_option( '-F', '--offrate', dest='offrate', help='Override the offrate of the index to n' )
parser.add_option( '-S', '--seed', dest='seed', help='Seed for pseudo-random number generator' )
parser.add_option( '-8', '--snpphred', dest='snpphred', help='SNP penalty on Phred scale' )
parser.add_option( '-6', '--snpfrac', dest='snpfrac', help='Fraction of sites expected to be SNP sites' )
parser.add_option( '-7', '--keepends', dest='keepends', help='Keep extreme-end nucleotides and qualities' )
parser.add_option( '-C', '--params', dest='params', help='Whether to use default or specified parameters' )
parser.add_option( '-u', '--iautoB', dest='iautoB', help='Automatic or specified behavior' )
parser.add_option( '-K', '--ipacked', dest='ipacked', help='Whether or not to use a packed representation for DNA strings' )
parser.add_option( '-Q', '--ibmax', dest='ibmax', help='Maximum number of suffixes allowed in a block' )
parser.add_option( '-Y', '--ibmaxdivn', dest='ibmaxdivn', help='Maximum number of suffixes allowed in a block as a fraction of the length of the reference' )
parser.add_option( '-D', '--idcv', dest='idcv', help='The period for the difference-cover sample' )
parser.add_option( '-U', '--inodc', dest='inodc', help='Whether or not to disable the use of the difference-cover sample' )
parser.add_option( '-y', '--inoref', dest='inoref', help='Whether or not to build the part of the reference index used only in paired-end alignment' )
parser.add_option( '-z', '--ioffrate', dest='ioffrate', help='How many rows get marked during annotation of some or all of the Burrows-Wheeler rows' )
parser.add_option( '-W', '--iftab', dest='iftab', help='The size of the lookup table used to calculate an initial Burrows-Wheeler range with respect to the first n characters of the query' )
parser.add_option( '-X', '--intoa', dest='intoa', help='Whether or not to convert Ns in the reference sequence to As' )
parser.add_option( '-N', '--iendian', dest='iendian', help='Endianness to use when serializing integers to the index file' )
parser.add_option( '-Z', '--iseed', dest='iseed', help='Seed for the pseudorandom number generator' )
parser.add_option( '-c', '--icutoff', dest='icutoff', help='Number of first bases of the reference sequence to index' )
parser.add_option( '-x', '--indexSettings', dest='index_settings', help='Whether or not indexing options are to be set' )
parser.add_option( '-H', '--suppressHeader', dest='suppressHeader', help='Suppress header' )
parser.add_option( '--galaxy_input_format', dest='galaxy_input_format', default="fastqsanger", help='galaxy input format' )
parser.add_option( '--do_not_build_index', dest='do_not_build_index', action="store_true", default=False, help='Flag to specify that provided file is already indexed, use as is' )
(options, args) = parser.parse_args()
stdout = ''
# make temp directory for placement of indices and copy reference file there if necessary
tmp_index_dir = tempfile.mkdtemp()
# get type of data (solid or solexa)
if options.dataType == 'solid':
colorspace = '-C'
else:
colorspace = ''
# index if necessary
if options.genomeSource == 'history' and not options.do_not_build_index:
# set up commands
if options.index_settings =='indexPreSet':
indexing_cmds = '%s' % colorspace
else:
try:
if options.iautoB and options.iautoB == 'set':
iautoB = '--noauto'
else:
iautoB = ''
if options. ipacked and options.ipacked == 'packed':
ipacked = '--packed'
else:
ipacked = ''
if options.ibmax and int( options.ibmax ) >= 1:
ibmax = '--bmax %s' % options.ibmax
else:
ibmax = ''
if options.ibmaxdivn and int( options.ibmaxdivn ) >= 0:
ibmaxdivn = '--bmaxdivn %s' % options.ibmaxdivn
else:
ibmaxdivn = ''
if options.idcv and int( options.idcv ) > 0:
idcv = '--dcv %s' % options.idcv
else:
idcv = ''
if options.inodc and options.inodc == 'nodc':
inodc = '--nodc'
else:
inodc = ''
if options.inoref and options.inoref == 'noref':
inoref = '--noref'
else:
inoref = ''
if options.iftab and int( options.iftab ) >= 0:
iftab = '--ftabchars %s' % options.iftab
else:
iftab = ''
if options.intoa and options.intoa == 'yes':
intoa = '--ntoa'
else:
intoa = ''
if options.iendian and options.iendian == 'big':
iendian = '--big'
else:
iendian = '--little'
if options.iseed and int( options.iseed ) > 0:
iseed = '--seed %s' % options.iseed
else:
iseed = ''
if options.icutoff and int( options.icutoff ) > 0:
icutoff = '--cutoff %s' % options.icutoff
else:
icutoff = ''
indexing_cmds = '%s %s %s %s %s %s %s --offrate %s %s %s %s %s %s %s' % \
( iautoB, ipacked, ibmax, ibmaxdivn, idcv, inodc,
inoref, options.ioffrate, iftab, intoa, iendian,
iseed, icutoff, colorspace )
except ValueError, e:
# clean up temp dir
if os.path.exists( tmp_index_dir ):
shutil.rmtree( tmp_index_dir )
stop_err( "Something is wrong with the indexing parameters and the indexing and alignment could not be run. Make sure you don't have any non-numeric values where they should be numeric.\n" + str( e ) )
ref_file = tempfile.NamedTemporaryFile( dir=tmp_index_dir )
ref_file_name = ref_file.name
ref_file.close()
os.symlink( options.ref, ref_file_name )
cmd1 = 'bowtie-build %s -f %s %s' % ( indexing_cmds, ref_file_name, ref_file_name )
try:
tmp = tempfile.NamedTemporaryFile( dir=tmp_index_dir ).name
tmp_stderr = open( tmp, 'wb' )
proc = subprocess.Popen( args=cmd1, shell=True, cwd=tmp_index_dir, stderr=tmp_stderr.fileno() )
returncode = proc.wait()
tmp_stderr.close()
# get stderr, allowing for case where it's very large
tmp_stderr = open( tmp, 'rb' )
stderr = ''
buffsize = 1048576
try:
while True:
stderr += tmp_stderr.read( buffsize )
if not stderr or len( stderr ) % buffsize != 0:
break
except OverflowError:
pass
tmp_stderr.close()
if returncode != 0:
raise Exception, stderr
except Exception, e:
# clean up temp dir
if os.path.exists( tmp_index_dir ):
shutil.rmtree( tmp_index_dir )
stop_err( 'Error indexing reference sequence\n' + str( e ) )
stdout += 'File indexed. '
else:
ref_file_name = options.ref
# set up aligning and generate aligning command options
# automatically set threads in both cases
tmp_suppressed_file_name = None
tmp_unmapped_file_name = None
if options.suppressHeader == 'true':
suppressHeader = '--sam-nohead'
else:
suppressHeader = ''
if options.maxInsert and int( options.maxInsert ) > 0:
maxInsert = '-X %s' % options.maxInsert
else:
maxInsert = ''
if options.mateOrient:
mateOrient = '--%s' % options.mateOrient
else:
mateOrient = ''
quality_score_encoding = GALAXY_FORMAT_TO_QUALITY_SCORE_ENCODING_ARG.get( options.galaxy_input_format, DEFAULT_ASCII_ENCODING )
if options.params == 'preSet':
aligning_cmds = '-q %s %s -p %s -S %s %s %s ' % \
( maxInsert, mateOrient, options.threads, suppressHeader, colorspace, quality_score_encoding )
else:
try:
if options.skip and int( options.skip ) > 0:
skip = '-s %s' % options.skip
else:
skip = ''
if options.alignLimit and int( options.alignLimit ) >= 0:
alignLimit = '-u %s' % options.alignLimit
else:
alignLimit = ''
if options.trimH and int( options.trimH ) > 0:
trimH = '-5 %s' % options.trimH
else:
trimH = ''
if options.trimL and int( options.trimL ) > 0:
trimL = '-3 %s' % options.trimL
else:
trimL = ''
if options.maqSoapAlign != '-1' and int( options.maqSoapAlign ) >= 0:
maqSoapAlign = '-v %s' % options.maqSoapAlign
else:
maqSoapAlign = ''
if options.mismatchSeed and (options.mismatchSeed == '0' or options.mismatchSeed == '1' \
or options.mismatchSeed == '2' or options.mismatchSeed == '3'):
mismatchSeed = '-n %s' % options.mismatchSeed
else:
mismatchSeed = ''
if options.mismatchQual and int( options.mismatchQual ) >= 0:
mismatchQual = '-e %s' % options.mismatchQual
else:
mismatchQual = ''
if options.seedLen and int( options.seedLen ) >= 5:
seedLen = '-l %s' % options.seedLen
else:
seedLen = ''
if options.rounding == 'noRound':
rounding = '--nomaqround'
else:
rounding = ''
if options.minInsert and int( options.minInsert ) > 0:
minInsert = '-I %s' % options.minInsert
else:
minInsert = ''
if options.maxAlignAttempt and int( options.maxAlignAttempt ) >= 0:
maxAlignAttempt = '--pairtries %s' % options.maxAlignAttempt
else:
maxAlignAttempt = ''
if options.forwardAlign == 'noForward':
forwardAlign = '--nofw'
else:
forwardAlign = ''
if options.reverseAlign == 'noReverse':
reverseAlign = '--norc'
else:
reverseAlign = ''
if options.maxBacktracks and int( options.maxBacktracks ) > 0 and \
( options.mismatchSeed == '2' or options.mismatchSeed == '3' ):
maxBacktracks = '--maxbts %s' % options.maxBacktracks
else:
maxBacktracks = ''
if options.tryHard == 'doTryHard':
tryHard = '-y'
else:
tryHard = ''
if options.valAlign and int( options.valAlign ) >= 0:
valAlign = '-k %s' % options.valAlign
else:
valAlign = ''
if options.allValAligns == 'doAllValAligns':
allValAligns = '-a'
else:
allValAligns = ''
if options.suppressAlign and int( options.suppressAlign ) >= 0:
suppressAlign = '-m %s' % options.suppressAlign
else:
suppressAlign = ''
if options.best == 'doBest':
best = '--best'
else:
best = ''
if options.strata == 'doStrata':
strata = '--strata'
else:
strata = ''
if options.offrate and int( options.offrate ) >= 0:
offrate = '-o %s' % options.offrate
else:
offrate = ''
if options.seed and int( options.seed ) >= 0:
seed = '--seed %s' % options.seed
else:
seed = ''
if options.paired == 'paired':
if options.output_unmapped_reads_l and options.output_unmapped_reads_r:
tmp_unmapped_file = tempfile.NamedTemporaryFile( dir=tmp_index_dir, suffix='.fastq' )
tmp_unmapped_file_name = tmp_unmapped_file.name
tmp_unmapped_file.close()
output_unmapped_reads = '--un %s' % tmp_unmapped_file_name
else:
output_unmapped_reads = ''
if options.output_suppressed_reads:
tmp_suppressed_file = tempfile.NamedTemporaryFile( dir=tmp_index_dir, suffix='.fastq' )
tmp_suppressed_file_name = tmp_suppressed_file.name
tmp_suppressed_file.close()
output_suppressed_reads = '--max %s' % tmp_suppressed_file_name
else:
output_suppressed_reads = ''
else:
if options.output_unmapped_reads:
output_unmapped_reads = '--un %s' % options.output_unmapped_reads
else:
output_unmapped_reads = ''
if options.output_suppressed_reads:
output_suppressed_reads = '--max %s' % options.output_suppressed_reads
else:
output_suppressed_reads = ''
snpfrac = ''
if options.snpphred and int( options.snpphred ) >= 0:
snpphred = '--snpphred %s' % options.snpphred
else:
snpphred = ''
if options.snpfrac and float( options.snpfrac ) >= 0:
snpfrac = '--snpfrac %s' % options.snpfrac
if options.keepends and options.keepends == 'doKeepends':
keepends = '--col-keepends'
else:
keepends = ''
aligning_cmds = '-q %s %s -p %s -S %s %s %s %s %s %s %s %s %s %s %s %s ' \
'%s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s ' % \
( maxInsert, mateOrient, options.threads, suppressHeader,
colorspace, skip, alignLimit, trimH, trimL, maqSoapAlign,
mismatchSeed, mismatchQual, seedLen, rounding, minInsert,
maxAlignAttempt, forwardAlign, reverseAlign, maxBacktracks,
tryHard, valAlign, allValAligns, suppressAlign, best,
strata, offrate, seed, snpphred, snpfrac, keepends,
output_unmapped_reads, output_suppressed_reads,
quality_score_encoding )
except ValueError, e:
# clean up temp dir
if os.path.exists( tmp_index_dir ):
shutil.rmtree( tmp_index_dir )
stop_err( 'Something is wrong with the alignment parameters and the alignment could not be run\n' + str( e ) )
try:
# have to nest try-except in try-finally to handle 2.4
try:
# prepare actual mapping commands
if options.paired == 'paired':
cmd2 = 'bowtie %s %s -1 %s -2 %s > %s' % ( aligning_cmds, ref_file_name, options.input1, options.input2, options.output )
else:
cmd2 = 'bowtie %s %s %s > %s' % ( aligning_cmds, ref_file_name, options.input1, options.output )
# align
tmp = tempfile.NamedTemporaryFile( dir=tmp_index_dir ).name
tmp_stderr = open( tmp, 'wb' )
proc = subprocess.Popen( args=cmd2, shell=True, cwd=tmp_index_dir, stderr=tmp_stderr.fileno() )
returncode = proc.wait()
tmp_stderr.close()
# get stderr, allowing for case where it's very large
tmp_stderr = open( tmp, 'rb' )
stderr = ''
buffsize = 1048576
try:
while True:
stderr += tmp_stderr.read( buffsize )
if not stderr or len( stderr ) % buffsize != 0:
break
except OverflowError:
pass
tmp_stderr.close()
if returncode != 0:
raise Exception, stderr
# get suppressed and unmapped reads output files in place if appropriate
if options.paired == 'paired' and tmp_suppressed_file_name and \
options.output_suppressed_reads_l and options.output_suppressed_reads_r:
try:
left = tmp_suppressed_file_name.replace( '.fastq', '_1.fastq' )
right = tmp_suppressed_file_name.replace( '.fastq', '_1.fastq' )
shutil.move( left, options.output_suppressed_reads_l )
shutil.move( right, options.output_suppressed_reads_r )
except Exception, e:
sys.stdout.write( 'Error producing the suppressed output file.\n' )
if options.paired == 'paired' and tmp_unmapped_file_name and \
options.output_unmapped_reads_l and options.output_unmapped_reads_r:
try:
left = tmp_unmapped_file_name.replace( '.fastq', '_1.fastq' )
right = tmp_unmapped_file_name.replace( '.fastq', '_2.fastq' )
shutil.move( left, options.output_unmapped_reads_l )
shutil.move( right, options.output_unmapped_reads_r )
except Exception, e:
sys.stdout.write( 'Error producing the unmapped output file.\n' )
# check that there are results in the output file
if os.path.getsize( options.output ) == 0:
raise Exception, 'The output file is empty, there may be an error with your input file or settings.'
except Exception, e:
stop_err( 'Error aligning sequence. ' + str( e ) )
finally:
# clean up temp dir
if os.path.exists( tmp_index_dir ):
shutil.rmtree( tmp_index_dir )
stdout += 'Sequence file aligned.\n'
sys.stdout.write( stdout )
if __name__=="__main__": __main__()
|
|
"""Views for the ``django-user-media`` app."""
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.contenttypes.models import ContentType
from django.db.models import ObjectDoesNotExist
from django.http import Http404, HttpResponse
from django.template import RequestContext
from django.template.loader import render_to_string
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext_lazy as _
from django.views.generic import CreateView, DeleteView, FormView, UpdateView
from django_libs.views_mixins import AjaxResponseMixin
from easy_thumbnails.files import get_thumbnailer
from simplejson import dumps
from user_media.forms import UserMediaImageForm, UserMediaImageSingleUploadForm
from user_media.models import UserMediaImage
class UserMediaImageViewMixin(object):
"""
Mixin for views that deal with `UserMediaImage` objects.
When using this mixin please make sure that you call `_add_next_and_user()`
in your `dispatch()` method.
"""
model = UserMediaImage
def _add_next_and_user(self, request):
self.next = request.POST.get('next', '') or request.GET.get('next', '')
self.user = request.user
def get_context_data(self, **kwargs):
"""
Adds `next` to the context.
This makes sure that the `next` parameter doesn't get lost if the
form was submitted invalid.
"""
ctx = super(UserMediaImageViewMixin, self).get_context_data(**kwargs)
ctx.update({
'action': self.action,
'next': self.next,
})
return ctx
def get_success_url(self):
"""
Returns the success URL.
This is either the given `next` URL parameter or the content object's
`get_absolute_url` method's return value.
"""
if self.next:
return self.next
if self.object and self.object.content_object:
return self.object.content_object.get_absolute_url()
raise Exception(
'No content object given. Please provide ``next`` in your POST'
' data')
class CreateImageView(AjaxResponseMixin, UserMediaImageViewMixin, CreateView):
action = 'create'
form_class = UserMediaImageForm
ajax_template_prefix = 'partials/ajax_'
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
"""Adds useful objects to the class and performs security checks."""
self._add_next_and_user(request)
self.content_object = None
self.content_type = None
self.object_id = kwargs.get('object_id', None)
if kwargs.get('content_type'):
# Check if the user forged the URL and posted a non existant
# content type
try:
self.content_type = ContentType.objects.get(
model=kwargs.get('content_type'))
except ContentType.DoesNotExist:
raise Http404
if self.content_type:
# Check if the user forged the URL and tries to append the image to
# an object that does not exist
try:
self.content_object = \
self.content_type.get_object_for_this_type(
pk=self.object_id)
except ObjectDoesNotExist:
raise Http404
if self.content_object and hasattr(self.content_object, 'user'):
# Check if the user forged the URL and tries to append the image to
# an object that does not belong to him
if not self.content_object.user == self.user:
raise Http404
return super(CreateImageView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
ctx = super(CreateImageView, self).get_context_data(**kwargs)
ctx.update({
'content_type': self.content_type,
'object_id': self.object_id,
})
return ctx
def get_form_kwargs(self):
kwargs = super(CreateImageView, self).get_form_kwargs()
kwargs.update({
'user': self.user,
'content_type': self.content_type,
'object_id': self.object_id,
})
return kwargs
class DeleteImageView(AjaxResponseMixin, UserMediaImageViewMixin, DeleteView):
"""Deletes an `UserMediaImage` object."""
action = 'delete'
model = UserMediaImage
ajax_template_prefix = 'partials/ajax_'
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
"""Adds useful objects to the class."""
self._add_next_and_user(request)
return super(DeleteImageView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
ctx = super(DeleteImageView, self).get_context_data(**kwargs)
ctx.update({
'image_pk': self.object.pk,
})
return ctx
def get_queryset(self):
"""
Making sure that a user can only delete his own images.
Even when he forges the request URL.
"""
queryset = super(DeleteImageView, self).get_queryset()
queryset = queryset.filter(user=self.user)
return queryset
class UpdateImageView(AjaxResponseMixin, UserMediaImageViewMixin, UpdateView):
"""Updates an existing `UserMediaImage` object."""
action = 'update'
model = UserMediaImage
form_class = UserMediaImageForm
ajax_template_prefix = 'partials/ajax_'
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
"""Adds useful objects to the class."""
self._add_next_and_user(request)
return super(UpdateImageView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
ctx = super(UpdateImageView, self).get_context_data(**kwargs)
ctx.update({
'content_type': self.object.content_type,
'object_id': self.object.object_id,
'image_pk': self.object.pk,
})
return ctx
def get_form_kwargs(self):
kwargs = super(UpdateImageView, self).get_form_kwargs()
kwargs.update({
'user': self.user,
'content_type': self.object.content_type,
'object_id': self.object.object_id,
})
return kwargs
def get_queryset(self):
"""
Making sure that a user can only edit his own images.
Even when he forges the request URL.
"""
queryset = super(UpdateImageView, self).get_queryset()
queryset = queryset.filter(user=self.user)
return queryset
class AJAXMultipleImageUploadView(CreateView):
"""Ajax view to handle the multiple image upload."""
model = UserMediaImage
form_class = UserMediaImageForm
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
self.obj_id = kwargs.get('obj_id', None)
self.user = request.user
if not request.is_ajax():
# Since we use a jquery modal and a jquery upload we should only
# allow ajax calls
raise Http404
# Check if the user posted a non existant content type
try:
self.c_type = ContentType.objects.get(model=kwargs.get('c_type'))
except ContentType.DoesNotExist:
raise Http404
# Check if the content object exists
try:
self.content_object = self.c_type.get_object_for_this_type(
pk=self.obj_id)
except ObjectDoesNotExist:
raise Http404
# Check for permissions
# Add a single user to the content object or prepare a user_can_edit
# function.
if (not hasattr(self.content_object, 'user')
or not self.content_object.user == self.user):
if (not hasattr(self.content_object, 'user_can_edit')
or not self.content_object.user_can_edit(self.user)):
raise Http404
return super(AJAXMultipleImageUploadView, self).dispatch(
request, *args, **kwargs)
def get_form_kwargs(self):
kwargs = super(AJAXMultipleImageUploadView, self).get_form_kwargs()
# Prepare context for UserMediaImage form
kwargs.update({
'user': self.user,
'content_type': self.c_type,
'object_id': self.obj_id,
})
return kwargs
def form_valid(self, form):
# Check if maximum amount of pictures has been reached
try:
max_pictures = int(self.request.POST.get('maximum'))
except (TypeError, ValueError):
max_pictures = getattr(settings, 'USER_MEDIA_UPLOAD_MAXIMUM', 3)
stored_images = self.user.usermediaimage_set.filter(
object_id=self.obj_id, content_type=self.c_type)
if stored_images.count() >= max_pictures:
return HttpResponse(_('Maximum amount limit exceeded.'))
# Save the UserMediaImage
self.object = form.save()
f = self.request.FILES.get('image')
# Generate and get the thumbnail of the uploaded image
thumbnailer = get_thumbnailer(self.object.image.name)
thumb = thumbnailer.get_thumbnail({
'crop': True, 'upscale': True,
'size': self.object.small_size(as_string=False),
})
# Prepare context for the list item html
context_data = {
'image': self.object,
'mode': 'multiple',
}
context = RequestContext(self.request, context_data)
# Prepare the json response
data = {'files': [{
'name': f.name,
'url': self.object.image.url,
'thumbnail_url': thumb.url,
'list_item_html': render_to_string(
'user_media/partials/image.html', context),
}]}
response = HttpResponse(dumps(data), mimetype='application/json')
response['Content-Disposition'] = 'inline; filename=files.json'
return response
class AJAXSingleImageUploadView(FormView):
"""Ajax view to handle the single image upload."""
form_class = UserMediaImageSingleUploadForm
template_name = 'user_media/partials/image.html'
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
if not request.is_ajax() or not request.method == 'POST':
raise Http404
self.user = request.user
# Check if the user posted a non existant content type
try:
self.c_type = ContentType.objects.get(model=kwargs.get('c_type'))
except ContentType.DoesNotExist:
raise Http404
# Check if the content object exists
try:
self.content_object = self.c_type.get_object_for_this_type(
pk=kwargs.get('obj_id'))
except ObjectDoesNotExist:
raise Http404
# Check if content_object has the requested image field
if hasattr(self.content_object, kwargs.get('field')):
self.field_name = kwargs.get('field')
else:
raise Http404
# Check for permissions
if (not hasattr(self.content_object, 'user')
or not self.content_object.user == self.user):
if (not hasattr(self.content_object, 'user_can_edit')
or not self.content_object.user_can_edit(self.user)):
raise Http404
return super(AJAXSingleImageUploadView, self).dispatch(
request, *args, **kwargs)
def get_form_kwargs(self):
kwargs = super(AJAXSingleImageUploadView, self).get_form_kwargs()
kwargs.update({
'instance': self.content_object,
'image_field': self.field_name,
})
return kwargs
def form_valid(self, form):
# Save the image
self.content_object = form.save()
f = self.request.FILES.get(self.field_name)
image = getattr(self.content_object, self.field_name)
size = getattr(settings, 'USER_MEDIA_THUMB_SIZE_LARGE', (150, 150))
# Generate and get the thumbnail of the uploaded image
thumbnailer = get_thumbnailer(image.name)
thumb = thumbnailer.get_thumbnail({
'crop': True, 'upscale': True,
'size': size,
})
# Prepare context for the list item html
context_data = {
'image': image,
'mode': 'single',
'size': (self.request.POST.get('size')
or u'{}x{}'.format(size[0], size[1])),
}
context = RequestContext(self.request, context_data)
# Prepare the json response
data = {'files': [{
'name': f.name,
'url': image.url,
'thumbnail_url': thumb.url,
'list_item_html': render_to_string(self.template_name, context),
}]}
response = HttpResponse(dumps(data), mimetype='application/json')
response['Content-Disposition'] = 'inline; filename=files.json'
return response
class AJAXImageCropView(UserMediaImageViewMixin, UpdateView):
"""Ajax view to update an image's crop attributes."""
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
if not request.is_ajax() or not request.method == 'POST':
raise Http404
self.user = request.user
self.kwargs = kwargs
self.object = self.get_object()
if not self.object.user == self.user:
raise Http404
for field in ['x', 'x2', 'y', 'y2', 'w', 'h']:
# Save the Jcrop values to the image
setattr(self.object, 'thumb_' + field, request.POST.get(field))
self.object.save()
box = (
int(self.object.thumb_x),
int(self.object.thumb_y),
int(self.object.thumb_x2),
int(self.object.thumb_y2),
)
thumbnailer = get_thumbnailer(self.object.image.name)
thumb = thumbnailer.get_thumbnail({
'box': box,
'size': self.object.small_size(as_string=False),
})
return HttpResponse(thumb.url)
|
|
from __future__ import print_function, division
from sympy.core import sympify
from sympy.core.add import Add
from sympy.core.function import Lambda, Function, ArgumentIndexError
from sympy.core.cache import cacheit
from sympy.core.numbers import Integer
from sympy.core.power import Pow
from sympy.core.singleton import S
from sympy.core.symbol import Wild, Dummy
from sympy.core.mul import Mul
from sympy.core.logic import fuzzy_not
from sympy.functions.combinatorial.factorials import factorial
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.ntheory import multiplicity, perfect_power
from sympy.core.compatibility import range
# NOTE IMPORTANT
# The series expansion code in this file is an important part of the gruntz
# algorithm for determining limits. _eval_nseries has to return a generalized
# power series with coefficients in C(log(x), log).
# In more detail, the result of _eval_nseries(self, x, n) must be
# c_0*x**e_0 + ... (finitely many terms)
# where e_i are numbers (not necessarily integers) and c_i involve only
# numbers, the function log, and log(x). [This also means it must not contain
# log(x(1+p)), this *has* to be expanded to log(x)+log(1+p) if x.is_positive and
# p.is_positive.]
class ExpBase(Function):
unbranched = True
def inverse(self, argindex=1):
"""
Returns the inverse function of ``exp(x)``.
"""
return log
def as_numer_denom(self):
"""
Returns this with a positive exponent as a 2-tuple (a fraction).
Examples
========
>>> from sympy.functions import exp
>>> from sympy.abc import x
>>> exp(-x).as_numer_denom()
(1, exp(x))
>>> exp(x).as_numer_denom()
(exp(x), 1)
"""
# this should be the same as Pow.as_numer_denom wrt
# exponent handling
exp = self.exp
neg_exp = exp.is_negative
if not neg_exp and not (-exp).is_negative:
neg_exp = _coeff_isneg(exp)
if neg_exp:
return S.One, self.func(-exp)
return self, S.One
@property
def exp(self):
"""
Returns the exponent of the function.
"""
return self.args[0]
def as_base_exp(self):
"""
Returns the 2-tuple (base, exponent).
"""
return self.func(1), Mul(*self.args)
def _eval_conjugate(self):
return self.func(self.args[0].conjugate())
def _eval_is_finite(self):
arg = self.args[0]
if arg.is_infinite:
if arg.is_negative:
return True
if arg.is_positive:
return False
if arg.is_finite:
return True
def _eval_is_rational(self):
s = self.func(*self.args)
if s.func == self.func:
if s.exp is S.Zero:
return True
elif s.exp.is_rational and fuzzy_not(s.exp.is_zero):
return False
else:
return s.is_rational
def _eval_is_zero(self):
return (self.args[0] is S.NegativeInfinity)
def _eval_power(self, other):
"""exp(arg)**e -> exp(arg*e) if assumptions allow it.
"""
b, e = self.as_base_exp()
return Pow._eval_power(Pow(b, e, evaluate=False), other)
def _eval_expand_power_exp(self, **hints):
arg = self.args[0]
if arg.is_Add and arg.is_commutative:
expr = 1
for x in arg.args:
expr *= self.func(x)
return expr
return self.func(arg)
class exp_polar(ExpBase):
r"""
Represent a 'polar number' (see g-function Sphinx documentation).
``exp_polar`` represents the function
`Exp: \mathbb{C} \rightarrow \mathcal{S}`, sending the complex number
`z = a + bi` to the polar number `r = exp(a), \theta = b`. It is one of
the main functions to construct polar numbers.
>>> from sympy import exp_polar, pi, I, exp
The main difference is that polar numbers don't "wrap around" at `2 \pi`:
>>> exp(2*pi*I)
1
>>> exp_polar(2*pi*I)
exp_polar(2*I*pi)
apart from that they behave mostly like classical complex numbers:
>>> exp_polar(2)*exp_polar(3)
exp_polar(5)
See also
========
sympy.simplify.simplify.powsimp
sympy.functions.elementary.complexes.polar_lift
sympy.functions.elementary.complexes.periodic_argument
sympy.functions.elementary.complexes.principal_branch
"""
is_polar = True
is_comparable = False # cannot be evalf'd
def _eval_Abs(self):
from sympy import expand_mul
return sqrt( expand_mul(self * self.conjugate()) )
def _eval_evalf(self, prec):
""" Careful! any evalf of polar numbers is flaky """
from sympy import im, pi, re
i = im(self.args[0])
try:
bad = (i <= -pi or i > pi)
except TypeError:
bad = True
if bad:
return self # cannot evalf for this argument
res = exp(self.args[0])._eval_evalf(prec)
if i > 0 and im(res) < 0:
# i ~ pi, but exp(I*i) evaluated to argument slightly bigger than pi
return re(res)
return res
def _eval_power(self, other):
return self.func(self.args[0]*other)
def _eval_is_real(self):
if self.args[0].is_real:
return True
def as_base_exp(self):
# XXX exp_polar(0) is special!
if self.args[0] == 0:
return self, S(1)
return ExpBase.as_base_exp(self)
class exp(ExpBase):
"""
The exponential function, :math:`e^x`.
See Also
========
log
"""
def fdiff(self, argindex=1):
"""
Returns the first derivative of this function.
"""
if argindex == 1:
return self
else:
raise ArgumentIndexError(self, argindex)
def _eval_refine(self, assumptions):
from sympy.assumptions import ask, Q
arg = self.args[0]
if arg.is_Mul:
Ioo = S.ImaginaryUnit*S.Infinity
if arg in [Ioo, -Ioo]:
return S.NaN
coeff = arg.as_coefficient(S.Pi*S.ImaginaryUnit)
if coeff:
if ask(Q.integer(2*coeff)):
if ask(Q.even(coeff)):
return S.One
elif ask(Q.odd(coeff)):
return S.NegativeOne
elif ask(Q.even(coeff + S.Half)):
return -S.ImaginaryUnit
elif ask(Q.odd(coeff + S.Half)):
return S.ImaginaryUnit
@classmethod
def eval(cls, arg):
from sympy.assumptions import ask, Q
from sympy.calculus import AccumBounds
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Zero:
return S.One
elif arg is S.One:
return S.Exp1
elif arg is S.Infinity:
return S.Infinity
elif arg is S.NegativeInfinity:
return S.Zero
elif arg.func is log:
return arg.args[0]
elif isinstance(arg, AccumBounds):
return AccumBounds(exp(arg.min), exp(arg.max))
elif arg.is_Mul:
if arg.is_number or arg.is_Symbol:
coeff = arg.coeff(S.Pi*S.ImaginaryUnit)
if coeff:
if ask(Q.integer(2*coeff)):
if ask(Q.even(coeff)):
return S.One
elif ask(Q.odd(coeff)):
return S.NegativeOne
elif ask(Q.even(coeff + S.Half)):
return -S.ImaginaryUnit
elif ask(Q.odd(coeff + S.Half)):
return S.ImaginaryUnit
# Warning: code in risch.py will be very sensitive to changes
# in this (see DifferentialExtension).
# look for a single log factor
coeff, terms = arg.as_coeff_Mul()
# but it can't be multiplied by oo
if coeff in [S.NegativeInfinity, S.Infinity]:
return None
coeffs, log_term = [coeff], None
for term in Mul.make_args(terms):
if term.func is log:
if log_term is None:
log_term = term.args[0]
else:
return None
elif term.is_comparable:
coeffs.append(term)
else:
return None
return log_term**Mul(*coeffs) if log_term else None
elif arg.is_Add:
out = []
add = []
for a in arg.args:
if a is S.One:
add.append(a)
continue
newa = cls(a)
if newa.func is cls:
add.append(a)
else:
out.append(newa)
if out:
return Mul(*out)*cls(Add(*add), evaluate=False)
elif arg.is_Matrix:
return arg.exp()
@property
def base(self):
"""
Returns the base of the exponential function.
"""
return S.Exp1
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
"""
Calculates the next term in the Taylor series expansion.
"""
if n < 0:
return S.Zero
if n == 0:
return S.One
x = sympify(x)
if previous_terms:
p = previous_terms[-1]
if p is not None:
return p * x / n
return x**n/factorial(n)
def as_real_imag(self, deep=True, **hints):
"""
Returns this function as a 2-tuple representing a complex number.
Examples
========
>>> from sympy import I
>>> from sympy.abc import x
>>> from sympy.functions import exp
>>> exp(x).as_real_imag()
(exp(re(x))*cos(im(x)), exp(re(x))*sin(im(x)))
>>> exp(1).as_real_imag()
(E, 0)
>>> exp(I).as_real_imag()
(cos(1), sin(1))
>>> exp(1+I).as_real_imag()
(E*cos(1), E*sin(1))
See Also
========
sympy.functions.elementary.complexes.re
sympy.functions.elementary.complexes.im
"""
import sympy
re, im = self.args[0].as_real_imag()
if deep:
re = re.expand(deep, **hints)
im = im.expand(deep, **hints)
cos, sin = sympy.cos(im), sympy.sin(im)
return (exp(re)*cos, exp(re)*sin)
def _eval_subs(self, old, new):
# keep processing of power-like args centralized in Pow
if old.is_Pow: # handle (exp(3*log(x))).subs(x**2, z) -> z**(3/2)
old = exp(old.exp*log(old.base))
elif old is S.Exp1 and new.is_Function:
old = exp
if old.func is exp or old is S.Exp1:
f = lambda a: Pow(*a.as_base_exp(), evaluate=False) if (
a.is_Pow or a.func is exp) else a
return Pow._eval_subs(f(self), f(old), new)
if old is exp and not new.is_Function:
return new**self.exp._subs(old, new)
return Function._eval_subs(self, old, new)
def _eval_is_real(self):
if self.args[0].is_real:
return True
elif self.args[0].is_imaginary:
arg2 = -S(2) * S.ImaginaryUnit * self.args[0] / S.Pi
return arg2.is_even
def _eval_is_algebraic(self):
s = self.func(*self.args)
if s.func == self.func:
if fuzzy_not(self.exp.is_zero):
if self.exp.is_algebraic:
return False
elif (self.exp/S.Pi).is_rational:
return False
else:
return s.is_algebraic
def _eval_is_positive(self):
if self.args[0].is_real:
return not self.args[0] is S.NegativeInfinity
elif self.args[0].is_imaginary:
arg2 = -S.ImaginaryUnit * self.args[0] / S.Pi
return arg2.is_even
def _eval_nseries(self, x, n, logx):
# NOTE Please see the comment at the beginning of this file, labelled
# IMPORTANT.
from sympy import limit, oo, Order, powsimp
arg = self.args[0]
arg_series = arg._eval_nseries(x, n=n, logx=logx)
if arg_series.is_Order:
return 1 + arg_series
arg0 = limit(arg_series.removeO(), x, 0)
if arg0 in [-oo, oo]:
return self
t = Dummy("t")
exp_series = exp(t)._taylor(t, n)
o = exp_series.getO()
exp_series = exp_series.removeO()
r = exp(arg0)*exp_series.subs(t, arg_series - arg0)
r += Order(o.expr.subs(t, (arg_series - arg0)), x)
r = r.expand()
return powsimp(r, deep=True, combine='exp')
def _taylor(self, x, n):
from sympy import Order
l = []
g = None
for i in range(n):
g = self.taylor_term(i, self.args[0], g)
g = g.nseries(x, n=n)
l.append(g)
return Add(*l) + Order(x**n, x)
def _eval_as_leading_term(self, x):
from sympy import Order
arg = self.args[0]
if arg.is_Add:
return Mul(*[exp(f).as_leading_term(x) for f in arg.args])
arg = self.args[0].as_leading_term(x)
if Order(1, x).contains(arg):
return S.One
return exp(arg)
def _eval_rewrite_as_sin(self, arg):
from sympy import sin
I = S.ImaginaryUnit
return sin(I*arg + S.Pi/2) - I*sin(I*arg)
def _eval_rewrite_as_cos(self, arg):
from sympy import cos
I = S.ImaginaryUnit
return cos(I*arg) + I*cos(I*arg + S.Pi/2)
def _eval_rewrite_as_tanh(self, arg):
from sympy import tanh
return (1 + tanh(arg/2))/(1 - tanh(arg/2))
class log(Function):
"""
The natural logarithm function `\ln(x)` or `\log(x)`.
Logarithms are taken with the natural base, `e`. To get
a logarithm of a different base ``b``, use ``log(x, b)``,
which is essentially short-hand for ``log(x)/log(b)``.
See Also
========
exp
"""
def fdiff(self, argindex=1):
"""
Returns the first derivative of the function.
"""
if argindex == 1:
return 1/self.args[0]
s = Dummy('x')
return Lambda(s**(-1), s)
else:
raise ArgumentIndexError(self, argindex)
def inverse(self, argindex=1):
"""
Returns `e^x`, the inverse function of `\log(x)`.
"""
return exp
@classmethod
def eval(cls, arg, base=None):
from sympy import unpolarify
from sympy.calculus import AccumBounds
arg = sympify(arg)
if base is not None:
base = sympify(base)
if base == 1:
if arg == 1:
return S.NaN
else:
return S.ComplexInfinity
try:
# handle extraction of powers of the base now
# or else expand_log in Mul would have to handle this
n = multiplicity(base, arg)
if n:
den = base**n
if den.is_Integer:
return n + log(arg // den) / log(base)
else:
return n + log(arg / den) / log(base)
else:
return log(arg)/log(base)
except ValueError:
pass
if base is not S.Exp1:
return cls(arg)/cls(base)
else:
return cls(arg)
if arg.is_Number:
if arg is S.Zero:
return S.ComplexInfinity
elif arg is S.One:
return S.Zero
elif arg is S.Infinity:
return S.Infinity
elif arg is S.NegativeInfinity:
return S.Infinity
elif arg is S.NaN:
return S.NaN
elif arg.is_Rational:
if arg.q != 1:
return cls(arg.p) - cls(arg.q)
if arg.func is exp and arg.args[0].is_real:
return arg.args[0]
elif arg.func is exp_polar:
return unpolarify(arg.exp)
elif isinstance(arg, AccumBounds):
if arg.min.is_positive:
return AccumBounds(log(arg.min), log(arg.max))
else:
return
if arg.is_number:
if arg.is_negative:
return S.Pi * S.ImaginaryUnit + cls(-arg)
elif arg is S.ComplexInfinity:
return S.ComplexInfinity
elif arg is S.Exp1:
return S.One
# don't autoexpand Pow or Mul (see the issue 3351):
if not arg.is_Add:
coeff = arg.as_coefficient(S.ImaginaryUnit)
if coeff is not None:
if coeff is S.Infinity:
return S.Infinity
elif coeff is S.NegativeInfinity:
return S.Infinity
elif coeff.is_Rational:
if coeff.is_nonnegative:
return S.Pi * S.ImaginaryUnit * S.Half + cls(coeff)
else:
return -S.Pi * S.ImaginaryUnit * S.Half + cls(-coeff)
def as_base_exp(self):
"""
Returns this function in the form (base, exponent).
"""
return self, S.One
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms): # of log(1+x)
"""
Returns the next term in the Taylor series expansion of `\log(1+x)`.
"""
from sympy import powsimp
if n < 0:
return S.Zero
x = sympify(x)
if n == 0:
return x
if previous_terms:
p = previous_terms[-1]
if p is not None:
return powsimp((-n) * p * x / (n + 1), deep=True, combine='exp')
return (1 - 2*(n % 2)) * x**(n + 1)/(n + 1)
def _eval_expand_log(self, deep=True, **hints):
from sympy import unpolarify, expand_log
from sympy.concrete import Sum, Product
force = hints.get('force', False)
if (len(self.args) == 2):
return expand_log(self.func(*self.args), deep=deep, force=force)
arg = self.args[0]
if arg.is_Integer:
# remove perfect powers
p = perfect_power(int(arg))
if p is not False:
return p[1]*self.func(p[0])
elif arg.is_Mul:
expr = []
nonpos = []
for x in arg.args:
if force or x.is_positive or x.is_polar:
a = self.func(x)
if isinstance(a, log):
expr.append(self.func(x)._eval_expand_log(**hints))
else:
expr.append(a)
elif x.is_negative:
a = self.func(-x)
expr.append(a)
nonpos.append(S.NegativeOne)
else:
nonpos.append(x)
return Add(*expr) + log(Mul(*nonpos))
elif arg.is_Pow or isinstance(arg, exp):
if force or (arg.exp.is_real and arg.base.is_positive) or \
arg.base.is_polar:
b = arg.base
e = arg.exp
a = self.func(b)
if isinstance(a, log):
return unpolarify(e) * a._eval_expand_log(**hints)
else:
return unpolarify(e) * a
elif isinstance(arg, Product):
if arg.function.is_positive:
return Sum(log(arg.function), *arg.limits)
return self.func(arg)
def _eval_simplify(self, ratio, measure):
from sympy.simplify.simplify import expand_log, simplify
if (len(self.args) == 2):
return simplify(self.func(*self.args), ratio=ratio, measure=measure)
expr = self.func(simplify(self.args[0], ratio=ratio, measure=measure))
expr = expand_log(expr, deep=True)
return min([expr, self], key=measure)
def as_real_imag(self, deep=True, **hints):
"""
Returns this function as a complex coordinate.
Examples
========
>>> from sympy import I
>>> from sympy.abc import x
>>> from sympy.functions import log
>>> log(x).as_real_imag()
(log(Abs(x)), arg(x))
>>> log(I).as_real_imag()
(0, pi/2)
>>> log(1 + I).as_real_imag()
(log(sqrt(2)), pi/4)
>>> log(I*x).as_real_imag()
(log(Abs(x)), arg(I*x))
"""
from sympy import Abs, arg
if deep:
abs = Abs(self.args[0].expand(deep, **hints))
arg = arg(self.args[0].expand(deep, **hints))
else:
abs = Abs(self.args[0])
arg = arg(self.args[0])
if hints.get('log', False): # Expand the log
hints['complex'] = False
return (log(abs).expand(deep, **hints), arg)
else:
return (log(abs), arg)
def _eval_is_rational(self):
s = self.func(*self.args)
if s.func == self.func:
if (self.args[0] - 1).is_zero:
return True
if s.args[0].is_rational and fuzzy_not((self.args[0] - 1).is_zero):
return False
else:
return s.is_rational
def _eval_is_algebraic(self):
s = self.func(*self.args)
if s.func == self.func:
if (self.args[0] - 1).is_zero:
return True
elif fuzzy_not((self.args[0] - 1).is_zero):
if self.args[0].is_algebraic:
return False
else:
return s.is_algebraic
def _eval_is_real(self):
return self.args[0].is_positive
def _eval_is_finite(self):
arg = self.args[0]
if arg.is_zero:
return False
return arg.is_finite
def _eval_is_positive(self):
return (self.args[0] - 1).is_positive
def _eval_is_zero(self):
return (self.args[0] - 1).is_zero
def _eval_is_nonnegative(self):
return (self.args[0] - 1).is_nonnegative
def _eval_nseries(self, x, n, logx):
# NOTE Please see the comment at the beginning of this file, labelled
# IMPORTANT.
from sympy import cancel, Order
if not logx:
logx = log(x)
if self.args[0] == x:
return logx
arg = self.args[0]
k, l = Wild("k"), Wild("l")
r = arg.match(k*x**l)
if r is not None:
k, l = r[k], r[l]
if l != 0 and not l.has(x) and not k.has(x):
r = log(k) + l*logx # XXX true regardless of assumptions?
return r
# TODO new and probably slow
s = self.args[0].nseries(x, n=n, logx=logx)
while s.is_Order:
n += 1
s = self.args[0].nseries(x, n=n, logx=logx)
a, b = s.leadterm(x)
p = cancel(s/(a*x**b) - 1)
g = None
l = []
for i in range(n + 2):
g = log.taylor_term(i, p, g)
g = g.nseries(x, n=n, logx=logx)
l.append(g)
return log(a) + b*logx + Add(*l) + Order(p**n, x)
def _eval_as_leading_term(self, x):
arg = self.args[0].as_leading_term(x)
if arg is S.One:
return (self.args[0] - 1).as_leading_term(x)
return self.func(arg)
class LambertW(Function):
"""
The Lambert W function `W(z)` is defined as the inverse
function of `w \exp(w)` [1]_.
In other words, the value of `W(z)` is such that `z = W(z) \exp(W(z))`
for any complex number `z`. The Lambert W function is a multivalued
function with infinitely many branches `W_k(z)`, indexed by
`k \in \mathbb{Z}`. Each branch gives a different solution `w`
of the equation `z = w \exp(w)`.
The Lambert W function has two partially real branches: the
principal branch (`k = 0`) is real for real `z > -1/e`, and the
`k = -1` branch is real for `-1/e < z < 0`. All branches except
`k = 0` have a logarithmic singularity at `z = 0`.
Examples
========
>>> from sympy import LambertW
>>> LambertW(1.2)
0.635564016364870
>>> LambertW(1.2, -1).n()
-1.34747534407696 - 4.41624341514535*I
>>> LambertW(-1).is_real
False
References
==========
.. [1] http://en.wikipedia.org/wiki/Lambert_W_function
"""
@classmethod
def eval(cls, x, k=None):
if k is S.Zero:
return cls(x)
elif k is None:
k = S.Zero
if k is S.Zero:
if x is S.Zero:
return S.Zero
if x is S.Exp1:
return S.One
if x == -1/S.Exp1:
return S.NegativeOne
if x == -log(2)/2:
return -log(2)
if x is S.Infinity:
return S.Infinity
if fuzzy_not(k.is_zero):
if x is S.Zero:
return S.NegativeInfinity
if k is S.NegativeOne:
if x == -S.Pi/2:
return -S.ImaginaryUnit*S.Pi/2
elif x == -1/S.Exp1:
return S.NegativeOne
elif x == -2*exp(-2):
return -Integer(2)
def fdiff(self, argindex=1):
"""
Return the first derivative of this function.
"""
x = self.args[0]
if len(self.args) == 1:
if argindex == 1:
return LambertW(x)/(x*(1 + LambertW(x)))
else:
k = self.args[1]
if argindex == 1:
return LambertW(x, k)/(x*(1 + LambertW(x, k)))
raise ArgumentIndexError(self, argindex)
def _eval_is_real(self):
x = self.args[0]
if len(self.args) == 1:
k = S.Zero
else:
k = self.args[1]
if k.is_zero:
if (x + 1/S.Exp1).is_positive:
return True
elif (x + 1/S.Exp1).is_nonpositive:
return False
elif (k + 1).is_zero:
if x.is_negative and (x + 1/S.Exp1).is_positive:
return True
elif x.is_nonpositive or (x + 1/S.Exp1).is_nonnegative:
return False
elif fuzzy_not(k.is_zero) and fuzzy_not((k + 1).is_zero):
if x.is_real:
return False
def _eval_is_algebraic(self):
s = self.func(*self.args)
if s.func == self.func:
if fuzzy_not(self.args[0].is_zero) and self.args[0].is_algebraic:
return False
else:
return s.is_algebraic
from sympy.core.function import _coeff_isneg
|
|
# Copyright 2015-2018 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import division
import os
import sys
import tempfile
import time
import pexpect
from devlib.exception import HostError, TargetTransientError
from devlib.host import PACKAGE_BIN_DIRECTORY
from devlib.instrument import (Instrument, InstrumentChannel, MeasurementsCsv,
Measurement, CONTINUOUS, INSTANTANEOUS)
from devlib.platform import Platform
from devlib.utils.csvutil import csvreader, csvwriter
from devlib.utils.serial_port import open_serial_connection
class VersatileExpressPlatform(Platform):
def __init__(self, name, # pylint: disable=too-many-locals
core_names=None,
core_clusters=None,
big_core=None,
model=None,
modules=None,
# serial settings
serial_port='/dev/ttyS0',
baudrate=115200,
# VExpress MicroSD mount point
vemsd_mount=None,
# supported: dtr, reboottxt
hard_reset_method=None,
# supported: uefi, uefi-shell, u-boot, bootmon
bootloader=None,
# supported: vemsd
flash_method='vemsd',
image=None,
fdt=None,
initrd=None,
bootargs=None,
uefi_entry=None, # only used if bootloader is "uefi"
ready_timeout=60,
):
super(VersatileExpressPlatform, self).__init__(name,
core_names,
core_clusters,
big_core,
model,
modules)
self.serial_port = serial_port
self.baudrate = baudrate
self.vemsd_mount = vemsd_mount
self.image = image
self.fdt = fdt
self.initrd = initrd
self.bootargs = bootargs
self.uefi_entry = uefi_entry
self.ready_timeout = ready_timeout
self.bootloader = None
self.hard_reset_method = None
self._set_bootloader(bootloader)
self._set_hard_reset_method(hard_reset_method)
self._set_flash_method(flash_method)
def init_target_connection(self, target):
if target.os == 'android':
self._init_android_target(target)
else:
self._init_linux_target(target)
def _init_android_target(self, target):
if target.connection_settings.get('device') is None:
addr = self._get_target_ip_address(target)
target.connection_settings['device'] = addr + ':5555'
def _init_linux_target(self, target):
if target.connection_settings.get('host') is None:
addr = self._get_target_ip_address(target)
target.connection_settings['host'] = addr
# pylint: disable=no-member
def _get_target_ip_address(self, target):
with open_serial_connection(port=self.serial_port,
baudrate=self.baudrate,
timeout=30,
init_dtr=0) as tty:
tty.sendline('su') # this is, apprently, required to query network device
# info by name on recent Juno builds...
self.logger.debug('Waiting for the shell prompt.')
tty.expect(target.shell_prompt)
self.logger.debug('Waiting for IP address...')
wait_start_time = time.time()
try:
while True:
tty.sendline('ip addr list eth0')
time.sleep(1)
try:
tty.expect(r'inet ([1-9]\d*.\d+.\d+.\d+)', timeout=10)
return tty.match.group(1).decode('utf-8')
except pexpect.TIMEOUT:
pass # We have our own timeout -- see below.
if (time.time() - wait_start_time) > self.ready_timeout:
raise TargetTransientError('Could not acquire IP address.')
finally:
tty.sendline('exit') # exit shell created by "su" call at the start
def _set_hard_reset_method(self, hard_reset_method):
if hard_reset_method == 'dtr':
self.modules.append({'vexpress-dtr': {'port': self.serial_port,
'baudrate': self.baudrate,
}})
elif hard_reset_method == 'reboottxt':
self.modules.append({'vexpress-reboottxt': {'port': self.serial_port,
'baudrate': self.baudrate,
'path': self.vemsd_mount,
}})
else:
ValueError('Invalid hard_reset_method: {}'.format(hard_reset_method))
def _set_bootloader(self, bootloader):
self.bootloader = bootloader
if self.bootloader == 'uefi':
self.modules.append({'vexpress-uefi': {'port': self.serial_port,
'baudrate': self.baudrate,
'image': self.image,
'fdt': self.fdt,
'initrd': self.initrd,
'bootargs': self.bootargs,
}})
elif self.bootloader == 'uefi-shell':
self.modules.append({'vexpress-uefi-shell': {'port': self.serial_port,
'baudrate': self.baudrate,
'image': self.image,
'bootargs': self.bootargs,
}})
elif self.bootloader == 'u-boot':
uboot_env = None
if self.bootargs:
uboot_env = {'bootargs': self.bootargs}
self.modules.append({'vexpress-u-boot': {'port': self.serial_port,
'baudrate': self.baudrate,
'env': uboot_env,
}})
elif self.bootloader == 'bootmon':
self.modules.append({'vexpress-bootmon': {'port': self.serial_port,
'baudrate': self.baudrate,
'image': self.image,
'fdt': self.fdt,
'initrd': self.initrd,
'bootargs': self.bootargs,
}})
else:
ValueError('Invalid hard_reset_method: {}'.format(bootloader))
def _set_flash_method(self, flash_method):
if flash_method == 'vemsd':
self.modules.append({'vexpress-vemsd': {'vemsd_mount': self.vemsd_mount}})
else:
ValueError('Invalid flash_method: {}'.format(flash_method))
class Juno(VersatileExpressPlatform):
def __init__(self,
vemsd_mount='/media/JUNO',
baudrate=115200,
bootloader='u-boot',
hard_reset_method='dtr',
**kwargs
):
super(Juno, self).__init__('juno',
vemsd_mount=vemsd_mount,
baudrate=baudrate,
bootloader=bootloader,
hard_reset_method=hard_reset_method,
**kwargs)
class TC2(VersatileExpressPlatform):
def __init__(self,
vemsd_mount='/media/VEMSD',
baudrate=38400,
bootloader='bootmon',
hard_reset_method='reboottxt',
**kwargs
):
super(TC2, self).__init__('tc2',
vemsd_mount=vemsd_mount,
baudrate=baudrate,
bootloader=bootloader,
hard_reset_method=hard_reset_method,
**kwargs)
class JunoEnergyInstrument(Instrument):
binname = 'readenergy'
mode = CONTINUOUS | INSTANTANEOUS
_channels = [
InstrumentChannel('sys', 'current'),
InstrumentChannel('a57', 'current'),
InstrumentChannel('a53', 'current'),
InstrumentChannel('gpu', 'current'),
InstrumentChannel('sys', 'voltage'),
InstrumentChannel('a57', 'voltage'),
InstrumentChannel('a53', 'voltage'),
InstrumentChannel('gpu', 'voltage'),
InstrumentChannel('sys', 'power'),
InstrumentChannel('a57', 'power'),
InstrumentChannel('a53', 'power'),
InstrumentChannel('gpu', 'power'),
InstrumentChannel('sys', 'energy'),
InstrumentChannel('a57', 'energy'),
InstrumentChannel('a53', 'energy'),
InstrumentChannel('gpu', 'energy'),
]
def __init__(self, target):
super(JunoEnergyInstrument, self).__init__(target)
self.on_target_file = None
self.command = None
self.binary = self.target.bin(self.binname)
for chan in self._channels:
self.channels[chan.name] = chan
self.on_target_file = self.target.tempfile('energy', '.csv')
self.sample_rate_hz = 10 # DEFAULT_PERIOD is 100[ms] in readenergy.c
self.command = '{} -o {}'.format(self.binary, self.on_target_file)
self.command2 = '{}'.format(self.binary)
def setup(self): # pylint: disable=arguments-differ
self.binary = self.target.install(os.path.join(PACKAGE_BIN_DIRECTORY,
self.target.abi, self.binname))
self.command = '{} -o {}'.format(self.binary, self.on_target_file)
self.command2 = '{}'.format(self.binary)
def reset(self, sites=None, kinds=None, channels=None):
super(JunoEnergyInstrument, self).reset(sites, kinds, channels)
self.target.killall(self.binname, as_root=True)
def start(self):
self.target.kick_off(self.command, as_root=True)
def stop(self):
self.target.killall(self.binname, signal='TERM', as_root=True)
# pylint: disable=arguments-differ
def get_data(self, output_file):
temp_file = tempfile.mktemp()
self.target.pull(self.on_target_file, temp_file)
self.target.remove(self.on_target_file)
with csvreader(temp_file) as reader:
headings = next(reader)
# Figure out which columns from the collected csv we actually want
select_columns = []
for chan in self.active_channels:
try:
select_columns.append(headings.index(chan.name))
except ValueError:
raise HostError('Channel "{}" is not in {}'.format(chan.name, temp_file))
with csvwriter(output_file) as writer:
write_headings = ['{}_{}'.format(c.site, c.kind)
for c in self.active_channels]
writer.writerow(write_headings)
for row in reader:
write_row = [row[c] for c in select_columns]
writer.writerow(write_row)
return MeasurementsCsv(output_file, self.active_channels, sample_rate_hz=10)
def take_measurement(self):
result = []
output = self.target.execute(self.command2).split()
with csvreader(output) as reader:
headings = next(reader)
values = next(reader)
for chan in self.active_channels:
value = values[headings.index(chan.name)]
result.append(Measurement(value, chan))
return result
|
|
# Licensed under a 3-clause BSD style license - see PYFITS.rst
import gzip
import itertools
import io
import mmap
import operator
import os
import platform
import signal
import sys
import tempfile
import textwrap
import threading
import warnings
import weakref
from contextlib import contextmanager, suppress
from functools import wraps
from astropy.utils import data
from distutils.version import LooseVersion
import numpy as np
from astropy.utils.exceptions import AstropyUserWarning
try:
# Support the Python 3.6 PathLike ABC where possible
from os import PathLike
path_like = (str, PathLike)
except ImportError:
path_like = (str,)
cmp = lambda a, b: (a > b) - (a < b)
all_integer_types = (int, np.integer)
class NotifierMixin:
"""
Mixin class that provides services by which objects can register
listeners to changes on that object.
All methods provided by this class are underscored, since this is intended
for internal use to communicate between classes in a generic way, and is
not machinery that should be exposed to users of the classes involved.
Use the ``_add_listener`` method to register a listener on an instance of
the notifier. This registers the listener with a weak reference, so if
no other references to the listener exist it is automatically dropped from
the list and does not need to be manually removed.
Call the ``_notify`` method on the notifier to update all listeners
upon changes. ``_notify('change_type', *args, **kwargs)`` results
in calling ``listener._update_change_type(*args, **kwargs)`` on all
listeners subscribed to that notifier.
If a particular listener does not have the appropriate update method
it is ignored.
Examples
--------
>>> class Widget(NotifierMixin):
... state = 1
... def __init__(self, name):
... self.name = name
... def update_state(self):
... self.state += 1
... self._notify('widget_state_changed', self)
...
>>> class WidgetListener:
... def _update_widget_state_changed(self, widget):
... print('Widget {0} changed state to {1}'.format(
... widget.name, widget.state))
...
>>> widget = Widget('fred')
>>> listener = WidgetListener()
>>> widget._add_listener(listener)
>>> widget.update_state()
Widget fred changed state to 2
"""
_listeners = None
def _add_listener(self, listener):
"""
Add an object to the list of listeners to notify of changes to this
object. This adds a weakref to the list of listeners that is
removed from the listeners list when the listener has no other
references to it.
"""
if self._listeners is None:
self._listeners = weakref.WeakValueDictionary()
self._listeners[id(listener)] = listener
def _remove_listener(self, listener):
"""
Removes the specified listener from the listeners list. This relies
on object identity (i.e. the ``is`` operator).
"""
if self._listeners is None:
return
with suppress(KeyError):
del self._listeners[id(listener)]
def _notify(self, notification, *args, **kwargs):
"""
Notify all listeners of some particular state change by calling their
``_update_<notification>`` method with the given ``*args`` and
``**kwargs``.
The notification does not by default include the object that actually
changed (``self``), but it certainly may if required.
"""
if self._listeners is None:
return
method_name = f'_update_{notification}'
for listener in self._listeners.valuerefs():
# Use valuerefs instead of itervaluerefs; see
# https://github.com/astropy/astropy/issues/4015
listener = listener() # dereference weakref
if listener is None:
continue
if hasattr(listener, method_name):
method = getattr(listener, method_name)
if callable(method):
method(*args, **kwargs)
def __getstate__(self):
"""
Exclude listeners when saving the listener's state, since they may be
ephemeral.
"""
# TODO: This hasn't come up often, but if anyone needs to pickle HDU
# objects it will be necessary when HDU objects' states are restored to
# re-register themselves as listeners on their new column instances.
try:
state = super().__getstate__()
except AttributeError:
# Chances are the super object doesn't have a getstate
state = self.__dict__.copy()
state['_listeners'] = None
return state
def first(iterable):
"""
Returns the first item returned by iterating over an iterable object.
Example:
>>> a = [1, 2, 3]
>>> first(a)
1
"""
return next(iter(iterable))
def itersubclasses(cls, _seen=None):
"""
Generator over all subclasses of a given class, in depth first order.
>>> class A: pass
>>> class B(A): pass
>>> class C(A): pass
>>> class D(B,C): pass
>>> class E(D): pass
>>>
>>> for cls in itersubclasses(A):
... print(cls.__name__)
B
D
E
C
>>> # get ALL classes currently defined
>>> [cls.__name__ for cls in itersubclasses(object)]
[...'tuple', ...'type', ...]
From http://code.activestate.com/recipes/576949/
"""
if _seen is None:
_seen = set()
try:
subs = cls.__subclasses__()
except TypeError: # fails only when cls is type
subs = cls.__subclasses__(cls)
for sub in sorted(subs, key=operator.attrgetter('__name__')):
if sub not in _seen:
_seen.add(sub)
yield sub
for sub in itersubclasses(sub, _seen):
yield sub
def ignore_sigint(func):
"""
This decorator registers a custom SIGINT handler to catch and ignore SIGINT
until the wrapped function is completed.
"""
@wraps(func)
def wrapped(*args, **kwargs):
# Get the name of the current thread and determine if this is a single
# threaded application
curr_thread = threading.currentThread()
single_thread = (threading.activeCount() == 1 and
curr_thread.getName() == 'MainThread')
class SigintHandler:
def __init__(self):
self.sigint_received = False
def __call__(self, signum, frame):
warnings.warn('KeyboardInterrupt ignored until {} is '
'complete!'.format(func.__name__),
AstropyUserWarning)
self.sigint_received = True
sigint_handler = SigintHandler()
# Define new signal interput handler
if single_thread:
# Install new handler
old_handler = signal.signal(signal.SIGINT, sigint_handler)
try:
func(*args, **kwargs)
finally:
if single_thread:
if old_handler is not None:
signal.signal(signal.SIGINT, old_handler)
else:
signal.signal(signal.SIGINT, signal.SIG_DFL)
if sigint_handler.sigint_received:
raise KeyboardInterrupt
return wrapped
def pairwise(iterable):
"""Return the items of an iterable paired with its next item.
Ex: s -> (s0,s1), (s1,s2), (s2,s3), ....
"""
a, b = itertools.tee(iterable)
for _ in b:
# Just a little trick to advance b without having to catch
# StopIter if b happens to be empty
break
return zip(a, b)
def encode_ascii(s):
if isinstance(s, str):
return s.encode('ascii')
elif (isinstance(s, np.ndarray) and
issubclass(s.dtype.type, np.str_)):
ns = np.char.encode(s, 'ascii').view(type(s))
if ns.dtype.itemsize != s.dtype.itemsize / 4:
ns = ns.astype((np.bytes_, s.dtype.itemsize / 4))
return ns
elif (isinstance(s, np.ndarray) and
not issubclass(s.dtype.type, np.bytes_)):
raise TypeError('string operation on non-string array')
return s
def decode_ascii(s):
if isinstance(s, bytes):
try:
return s.decode('ascii')
except UnicodeDecodeError:
warnings.warn('non-ASCII characters are present in the FITS '
'file header and have been replaced by "?" '
'characters', AstropyUserWarning)
s = s.decode('ascii', errors='replace')
return s.replace('\ufffd', '?')
elif (isinstance(s, np.ndarray) and
issubclass(s.dtype.type, np.bytes_)):
# np.char.encode/decode annoyingly don't preserve the type of the
# array, hence the view() call
# It also doesn't necessarily preserve widths of the strings,
# hence the astype()
if s.size == 0:
# Numpy apparently also has a bug that if a string array is
# empty calling np.char.decode on it returns an empty float64
# array wth
dt = s.dtype.str.replace('S', 'U')
ns = np.array([], dtype=dt).view(type(s))
else:
ns = np.char.decode(s, 'ascii').view(type(s))
if ns.dtype.itemsize / 4 != s.dtype.itemsize:
ns = ns.astype((np.str_, s.dtype.itemsize))
return ns
elif (isinstance(s, np.ndarray) and
not issubclass(s.dtype.type, np.str_)):
# Don't silently pass through on non-string arrays; we don't want
# to hide errors where things that are not stringy are attempting
# to be decoded
raise TypeError('string operation on non-string array')
return s
def isreadable(f):
"""
Returns True if the file-like object can be read from. This is a common-
sense approximation of io.IOBase.readable.
"""
if hasattr(f, 'readable'):
return f.readable()
if hasattr(f, 'closed') and f.closed:
# This mimics the behavior of io.IOBase.readable
raise ValueError('I/O operation on closed file')
if not hasattr(f, 'read'):
return False
if hasattr(f, 'mode') and not any(c in f.mode for c in 'r+'):
return False
# Not closed, has a 'read()' method, and either has no known mode or a
# readable mode--should be good enough to assume 'readable'
return True
def iswritable(f):
"""
Returns True if the file-like object can be written to. This is a common-
sense approximation of io.IOBase.writable.
"""
if hasattr(f, 'writable'):
return f.writable()
if hasattr(f, 'closed') and f.closed:
# This mimics the behavior of io.IOBase.writable
raise ValueError('I/O operation on closed file')
if not hasattr(f, 'write'):
return False
if hasattr(f, 'mode') and not any(c in f.mode for c in 'wa+'):
return False
# Note closed, has a 'write()' method, and either has no known mode or a
# mode that supports writing--should be good enough to assume 'writable'
return True
def isfile(f):
"""
Returns True if the given object represents an OS-level file (that is,
``isinstance(f, file)``).
On Python 3 this also returns True if the given object is higher level
wrapper on top of a FileIO object, such as a TextIOWrapper.
"""
if isinstance(f, io.FileIO):
return True
elif hasattr(f, 'buffer'):
return isfile(f.buffer)
elif hasattr(f, 'raw'):
return isfile(f.raw)
return False
def fileobj_open(filename, mode):
"""
A wrapper around the `open()` builtin.
This exists because `open()` returns an `io.BufferedReader` by default.
This is bad, because `io.BufferedReader` doesn't support random access,
which we need in some cases. We must call open with buffering=0 to get
a raw random-access file reader.
"""
return open(filename, mode, buffering=0)
def fileobj_name(f):
"""
Returns the 'name' of file-like object f, if it has anything that could be
called its name. Otherwise f's class or type is returned. If f is a
string f itself is returned.
"""
if isinstance(f, str):
return f
elif isinstance(f, gzip.GzipFile):
# The .name attribute on GzipFiles does not always represent the name
# of the file being read/written--it can also represent the original
# name of the file being compressed
# See the documentation at
# https://docs.python.org/3/library/gzip.html#gzip.GzipFile
# As such, for gzip files only return the name of the underlying
# fileobj, if it exists
return fileobj_name(f.fileobj)
elif hasattr(f, 'name'):
return f.name
elif hasattr(f, 'filename'):
return f.filename
elif hasattr(f, '__class__'):
return str(f.__class__)
else:
return str(type(f))
def fileobj_closed(f):
"""
Returns True if the given file-like object is closed or if f is a string
(and assumed to be a pathname).
Returns False for all other types of objects, under the assumption that
they are file-like objects with no sense of a 'closed' state.
"""
if isinstance(f, str):
return True
if hasattr(f, 'closed'):
return f.closed
elif hasattr(f, 'fileobj') and hasattr(f.fileobj, 'closed'):
return f.fileobj.closed
elif hasattr(f, 'fp') and hasattr(f.fp, 'closed'):
return f.fp.closed
else:
return False
def fileobj_mode(f):
"""
Returns the 'mode' string of a file-like object if such a thing exists.
Otherwise returns None.
"""
# Go from most to least specific--for example gzip objects have a 'mode'
# attribute, but it's not analogous to the file.mode attribute
# gzip.GzipFile -like
if hasattr(f, 'fileobj') and hasattr(f.fileobj, 'mode'):
fileobj = f.fileobj
# astropy.io.fits._File -like, doesn't need additional checks because it's
# already validated
elif hasattr(f, 'fileobj_mode'):
return f.fileobj_mode
# PIL-Image -like investigate the fp (filebuffer)
elif hasattr(f, 'fp') and hasattr(f.fp, 'mode'):
fileobj = f.fp
# FILEIO -like (normal open(...)), keep as is.
elif hasattr(f, 'mode'):
fileobj = f
# Doesn't look like a file-like object, for example strings, urls or paths.
else:
return None
return _fileobj_normalize_mode(fileobj)
def _fileobj_normalize_mode(f):
"""Takes care of some corner cases in Python where the mode string
is either oddly formatted or does not truly represent the file mode.
"""
mode = f.mode
# Special case: Gzip modes:
if isinstance(f, gzip.GzipFile):
# GzipFiles can be either readonly or writeonly
if mode == gzip.READ:
return 'rb'
elif mode == gzip.WRITE:
return 'wb'
else:
return None # This shouldn't happen?
# Sometimes Python can produce modes like 'r+b' which will be normalized
# here to 'rb+'
if '+' in mode:
mode = mode.replace('+', '')
mode += '+'
return mode
def fileobj_is_binary(f):
"""
Returns True if the give file or file-like object has a file open in binary
mode. When in doubt, returns True by default.
"""
# This is kind of a hack for this to work correctly with _File objects,
# which, for the time being, are *always* binary
if hasattr(f, 'binary'):
return f.binary
if isinstance(f, io.TextIOBase):
return False
mode = fileobj_mode(f)
if mode:
return 'b' in mode
else:
return True
def translate(s, table, deletechars):
if deletechars:
table = table.copy()
for c in deletechars:
table[ord(c)] = None
return s.translate(table)
def fill(text, width, **kwargs):
"""
Like :func:`textwrap.wrap` but preserves existing paragraphs which
:func:`textwrap.wrap` does not otherwise handle well. Also handles section
headers.
"""
paragraphs = text.split('\n\n')
def maybe_fill(t):
if all(len(l) < width for l in t.splitlines()):
return t
else:
return textwrap.fill(t, width, **kwargs)
return '\n\n'.join(maybe_fill(p) for p in paragraphs)
# On MacOS X 10.8 and earlier, there is a bug that causes numpy.fromfile to
# fail when reading over 2Gb of data. If we detect these versions of MacOS X,
# we can instead read the data in chunks. To avoid performance penalties at
# import time, we defer the setting of this global variable until the first
# time it is needed.
CHUNKED_FROMFILE = None
def _array_from_file(infile, dtype, count):
"""Create a numpy array from a file or a file-like object."""
if isfile(infile):
global CHUNKED_FROMFILE
if CHUNKED_FROMFILE is None:
if (sys.platform == 'darwin' and
LooseVersion(platform.mac_ver()[0]) < LooseVersion('10.9')):
CHUNKED_FROMFILE = True
else:
CHUNKED_FROMFILE = False
if CHUNKED_FROMFILE:
chunk_size = int(1024 ** 3 / dtype.itemsize) # 1Gb to be safe
if count < chunk_size:
return np.fromfile(infile, dtype=dtype, count=count)
else:
array = np.empty(count, dtype=dtype)
for beg in range(0, count, chunk_size):
end = min(count, beg + chunk_size)
array[beg:end] = np.fromfile(infile, dtype=dtype, count=end - beg)
return array
else:
return np.fromfile(infile, dtype=dtype, count=count)
else:
# treat as file-like object with "read" method; this includes gzip file
# objects, because numpy.fromfile just reads the compressed bytes from
# their underlying file object, instead of the decompressed bytes
read_size = np.dtype(dtype).itemsize * count
s = infile.read(read_size)
array = np.ndarray(buffer=s, dtype=dtype, shape=(count,))
# copy is needed because np.frombuffer returns a read-only view of the
# underlying buffer
array = array.copy()
return array
_OSX_WRITE_LIMIT = (2 ** 32) - 1
_WIN_WRITE_LIMIT = (2 ** 31) - 1
def _array_to_file(arr, outfile):
"""
Write a numpy array to a file or a file-like object.
Parameters
----------
arr : `~numpy.ndarray`
The Numpy array to write.
outfile : file-like
A file-like object such as a Python file object, an `io.BytesIO`, or
anything else with a ``write`` method. The file object must support
the buffer interface in its ``write``.
If writing directly to an on-disk file this delegates directly to
`ndarray.tofile`. Otherwise a slower Python implementation is used.
"""
if isfile(outfile) and not isinstance(outfile, io.BufferedIOBase):
write = lambda a, f: a.tofile(f)
else:
write = _array_to_file_like
# Implements a workaround for a bug deep in OSX's stdlib file writing
# functions; on 64-bit OSX it is not possible to correctly write a number
# of bytes greater than 2 ** 32 and divisible by 4096 (or possibly 8192--
# whatever the default blocksize for the filesystem is).
# This issue should have a workaround in Numpy too, but hasn't been
# implemented there yet: https://github.com/astropy/astropy/issues/839
#
# Apparently Windows has its own fwrite bug:
# https://github.com/numpy/numpy/issues/2256
if (sys.platform == 'darwin' and arr.nbytes >= _OSX_WRITE_LIMIT + 1 and
arr.nbytes % 4096 == 0):
# chunksize is a count of elements in the array, not bytes
chunksize = _OSX_WRITE_LIMIT // arr.itemsize
elif sys.platform.startswith('win'):
chunksize = _WIN_WRITE_LIMIT // arr.itemsize
else:
# Just pass the whole array to the write routine
return write(arr, outfile)
# Write one chunk at a time for systems whose fwrite chokes on large
# writes.
idx = 0
arr = arr.view(np.ndarray).flatten()
while idx < arr.nbytes:
write(arr[idx:idx + chunksize], outfile)
idx += chunksize
def _array_to_file_like(arr, fileobj):
"""
Write a `~numpy.ndarray` to a file-like object (which is not supported by
`numpy.ndarray.tofile`).
"""
# If the array is empty, we can simply take a shortcut and return since
# there is nothing to write.
if len(arr) == 0:
return
if arr.flags.contiguous:
# It suffices to just pass the underlying buffer directly to the
# fileobj's write (assuming it supports the buffer interface). If
# it does not have the buffer interface, a TypeError should be returned
# in which case we can fall back to the other methods.
try:
fileobj.write(arr.data)
except TypeError:
pass
else:
return
if hasattr(np, 'nditer'):
# nditer version for non-contiguous arrays
for item in np.nditer(arr, order='C'):
fileobj.write(item.tostring())
else:
# Slower version for Numpy versions without nditer;
# The problem with flatiter is it doesn't preserve the original
# byteorder
byteorder = arr.dtype.byteorder
if ((sys.byteorder == 'little' and byteorder == '>')
or (sys.byteorder == 'big' and byteorder == '<')):
for item in arr.flat:
fileobj.write(item.byteswap().tostring())
else:
for item in arr.flat:
fileobj.write(item.tostring())
def _write_string(f, s):
"""
Write a string to a file, encoding to ASCII if the file is open in binary
mode, or decoding if the file is open in text mode.
"""
# Assume if the file object doesn't have a specific mode, that the mode is
# binary
binmode = fileobj_is_binary(f)
if binmode and isinstance(s, str):
s = encode_ascii(s)
elif not binmode and not isinstance(f, str):
s = decode_ascii(s)
f.write(s)
def _convert_array(array, dtype):
"""
Converts an array to a new dtype--if the itemsize of the new dtype is
the same as the old dtype and both types are not numeric, a view is
returned. Otherwise a new array must be created.
"""
if array.dtype == dtype:
return array
elif (array.dtype.itemsize == dtype.itemsize and not
(np.issubdtype(array.dtype, np.number) and
np.issubdtype(dtype, np.number))):
# Includes a special case when both dtypes are at least numeric to
# account for ticket #218: https://aeon.stsci.edu/ssb/trac/pyfits/ticket/218
return array.view(dtype)
else:
return array.astype(dtype)
def _unsigned_zero(dtype):
"""
Given a numpy dtype, finds its "zero" point, which is exactly in the
middle of its range.
"""
assert dtype.kind == 'u'
return 1 << (dtype.itemsize * 8 - 1)
def _is_pseudo_unsigned(dtype):
return dtype.kind == 'u' and dtype.itemsize >= 2
def _is_int(val):
return isinstance(val, all_integer_types)
def _str_to_num(val):
"""Converts a given string to either an int or a float if necessary."""
try:
num = int(val)
except ValueError:
# If this fails then an exception should be raised anyways
num = float(val)
return num
def _words_group(input, strlen):
"""
Split a long string into parts where each part is no longer
than ``strlen`` and no word is cut into two pieces. But if
there is one single word which is longer than ``strlen``, then
it will be split in the middle of the word.
"""
words = []
nblanks = input.count(' ')
nmax = max(nblanks, len(input) // strlen + 1)
arr = np.frombuffer((input + ' ').encode('utf8'), dtype='S1')
# locations of the blanks
blank_loc = np.nonzero(arr == b' ')[0]
offset = 0
xoffset = 0
for idx in range(nmax):
try:
loc = np.nonzero(blank_loc >= strlen + offset)[0][0]
offset = blank_loc[loc - 1] + 1
if loc == 0:
offset = -1
except Exception:
offset = len(input)
# check for one word longer than strlen, break in the middle
if offset <= xoffset:
offset = xoffset + strlen
# collect the pieces in a list
words.append(input[xoffset:offset])
if len(input) == offset:
break
xoffset = offset
return words
def _tmp_name(input):
"""
Create a temporary file name which should not already exist. Use the
directory of the input file as the base name of the mkstemp() output.
"""
if input is not None:
input = os.path.dirname(input)
f, fn = tempfile.mkstemp(dir=input)
os.close(f)
return fn
def _get_array_mmap(array):
"""
If the array has an mmap.mmap at base of its base chain, return the mmap
object; otherwise return None.
"""
if isinstance(array, mmap.mmap):
return array
base = array
while hasattr(base, 'base') and base.base is not None:
if isinstance(base.base, mmap.mmap):
return base.base
base = base.base
@contextmanager
def _free_space_check(hdulist, dirname=None):
try:
yield
except OSError as exc:
error_message = ''
if not isinstance(hdulist, list):
hdulist = [hdulist, ]
if dirname is None:
dirname = os.path.dirname(hdulist._file.name)
if os.path.isdir(dirname):
free_space = data.get_free_space_in_dir(dirname)
hdulist_size = sum(hdu.size for hdu in hdulist)
if free_space < hdulist_size:
error_message = ("Not enough space on disk: requested {}, "
"available {}. ".format(hdulist_size, free_space))
for hdu in hdulist:
hdu._close()
raise OSError(error_message + str(exc))
def _extract_number(value, default):
"""
Attempts to extract an integer number from the given value. If the
extraction fails, the value of the 'default' argument is returned.
"""
try:
# The _str_to_num method converts the value to string/float
# so we need to perform one additional conversion to int on top
return int(_str_to_num(value))
except (TypeError, ValueError):
return default
def get_testdata_filepath(filename):
"""
Return a string representing the path to the file requested from the
io.fits test data set.
.. versionadded:: 2.0.3
Parameters
----------
filename : str
The filename of the test data file.
Returns
-------
filepath : str
The path to the requested file.
"""
return data.get_pkg_data_filename(
f'io/fits/tests/data/{filename}', 'astropy')
def _rstrip_inplace(array):
"""
Performs an in-place rstrip operation on string arrays. This is necessary
since the built-in `np.char.rstrip` in Numpy does not perform an in-place
calculation.
"""
# The following implementation convert the string to unsigned integers of
# the right length. Trailing spaces (which are represented as 32) are then
# converted to null characters (represented as zeros). To avoid creating
# large temporary mask arrays, we loop over chunks (attempting to do that
# on a 1-D version of the array; large memory may still be needed in the
# unlikely case that a string array has small first dimension and cannot
# be represented as a contiguous 1-D array in memory).
dt = array.dtype
if dt.kind not in 'SU':
raise TypeError("This function can only be used on string arrays")
# View the array as appropriate integers. The last dimension will
# equal the number of characters in each string.
bpc = 1 if dt.kind == 'S' else 4
dt_int = "({},){}u{}".format(dt.itemsize // bpc, dt.byteorder, bpc)
b = array.view(dt_int, np.ndarray)
# For optimal speed, work in chunks of the internal ufunc buffer size.
bufsize = np.getbufsize()
# Attempt to have the strings as a 1-D array to give the chunk known size.
# Note: the code will work if this fails; the chunks will just be larger.
if b.ndim > 2:
try:
b.shape = -1, b.shape[-1]
except AttributeError: # can occur for non-contiguous arrays
pass
for j in range(0, b.shape[0], bufsize):
c = b[j:j + bufsize]
# Mask which will tell whether we're in a sequence of trailing spaces.
mask = np.ones(c.shape[:-1], dtype=bool)
# Loop over the characters in the strings, in reverse order. We process
# the i-th character of all strings in the chunk at the same time. If
# the character is 32, this corresponds to a space, and we then change
# this to 0. We then construct a new mask to find rows where the
# i-th character is 0 (null) and the i-1-th is 32 (space) and repeat.
for i in range(-1, -c.shape[-1], -1):
mask &= c[..., i] == 32
c[..., i][mask] = 0
mask = c[..., i] == 0
return array
|
|
#!/usr/bin/env python2.7
# NOTE THIS NEEDS 2.6 as parser breaks with 2.5 :-)
import warnings
warnings.simplefilter("ignore",DeprecationWarning)
import os, sys, re, urllib2, string, socket
import htmlentitydefs
import mechanize
import html5lib
from html5lib import treebuilders
import lxml.html, lxml.etree
from lxml.cssselect import CSSSelector
socket.setdefaulttimeout(15)
class ParseException(Exception):
pass
##
# Removes HTML or XML character references and entities from a text string.
#
# @param text The HTML (or XML) source text.
# @return The plain text, as a Unicode string, if necessary.
def unescape(text):
def fixup(m):
text = m.group(0)
if text[:2] == "&#":
# character reference
try:
if text[:3] == "&#x":
return unichr(int(text[3:-1], 16))
else:
return unichr(int(text[2:-1]))
except ValueError:
pass
else:
# named entity
try:
text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])
except KeyError:
pass
return text # leave as is
return re.sub("&#?\w+;", fixup, text).encode('utf-8')
#
# Strip off any institutional proxies we find
#
def canon_url(url):
# print "xxxxx url = %s" % url
m = re.match(r'http://[^/]*sciencedirect.com[^/]*/(science(\?_ob|/article).*$)', url)
if not m:
raise ParseException, "bad source url"
return "http://www.sciencedirect.com/" + m.group(1)
#
# Make up crossref metadata URL (just need the DOI)
#
def crossref_xml_url(doi):
url = "http://doi.crossref.org/openurl/?id=doi:" + doi
url += "&noredirect=true"
# see http://www.crossref.org/help/Content/05_Interfacing_with_the_CrossRef_system/Using_the_Open_URL_Resolver.htm
# key is either "username:password" or "<email>"
key_file = os.environ.get("HOME") + "/.crossref-key"
if os.path.exists(key_file):
f = open(key_file)
key = f.read().strip()
f.close()
url += "&pid=" + key
url += "&format=unixref"
return url
#
# Try, by foul trickery, to get an abstract
# We're looking for HTML like this:
# <div class="articleText" style="display: inline;">
# <h3 class="h3">Abstract</h3>
# <p>An instrumented indentation technique...
#
def scrape_abstract(page):
root = lxml.html.fromstring(page)
#root = lxml.html.fromstring(html_data)
#links_lxml_res = root.cssselect("a.detailsViewLink")
#links_lxml = [link.get("href") for link in links_lxml_res]
#links_lxml = list(set(links_lxml))
abs = []
for div in root.cssselect("div.articleText"):
for h3 in div.cssselect("h3.h3"):
if h3.text and string.lower(h3.text) in ('abstract','summary'):
for p in div.cssselect("p"):
abs.append(p.xpath("string()"))
if len(abs) == 0:
for div in root.cssselect('div.svAbstract'):
for p in div.cssselect("p"):
abs.append(p.xpath("string()"))
if len(abs) == 0:
for div in root.cssselect('#articleContent'):
for p in div.cssselect("div.articleText_indent"):
abs.append(p.xpath("string()"))
abstract = ' '.join(abs)
abstract = re.sub('\n+',' ',abstract)
abstract = re.sub('\s+',' ',abstract)
# print "1================================================================="
# print abstract
# print "2================================================================="
return unescape(abstract)
#
# Just try to fetch the metadata from crossref
#
def handle(url):
cUrl = canon_url(url)
#print "%s => %s" % (url, cUrl)
cookies = mechanize.CookieJar()
browser = mechanize.Browser()
browser.addheaders = [("User-Agent", "Mozilla/5.0 (compatible; citeulike/1.0)"),
("From", "plugins@citeulike.org")]
#browser.add_handler(PrettifyHandler())
browser.set_handle_robots(False)
browser.set_debug_http(False)
browser.set_debug_redirects(False)
browser.open(cUrl)
response = browser.response()
page = response.get_data()
#
# Elsevier insist on user selecting a "preferred source" when the article is
# available. This is normally stored in a cookie.
# If we get directed to the Elsevier "linking hub", find the 1st SD link on the
# and follow that.
# Yeah, I know - rubbish.
#
huburl = browser.geturl()
doi = None
m = re.search(r'linkinghub.elsevier.com/', huburl)
if m:
root = lxml.html.fromstring(page)
inputs = root.cssselect("input")
hrefs = [link.get("value") for link in inputs]
for href in hrefs:
n = re.search('sciencedirect.com',href)
if n:
browser.open(href)
response = browser.response()
page = response.get_data()
break
m = re.search(r'<a(?: id="[^"]+")? href="http://dx.doi.org/([^"]+)"', page)
# this page might requires a login. Luckily there seems to be a
# link "View Abstract" which can take us to a page we can read
if not m and not doi:
root = lxml.html.fromstring(page)
links = root.cssselect("a")
for href in [e.get("href") for e in links]:
if href:
m = re.search(r'http://dx.doi.org/([^"]+)', href)
if m:
break
if False:
parser = html5lib.HTMLParser(tree=treebuilders.getTreeBuilder("beautifulsoup"))
# print page
soup = parser.parse(page)
link = soup.find(text=re.compile(r"view abstract", re.I))
if link:
href = link.parent['href']
browser.open(href)
response = browser.response()
page = response.get_data()
m = re.search(r'<a(?: id="[^"]+")? href="http://dx.doi.org/([^"]+)"', page)
if m:
doi = m.group(1)
else:
root = lxml.html.fromstring(page)
doi_nodes = root.cssselect("#doi")
for n in [e.text for e in doi_nodes]:
if n:
doi = re.sub(r'doi:','',n)
break
if not doi:
m = re.search(r'/doi/(10\.\d\d\d\d)_([^/]+)/', page)
if m:
doi = "%s/%s" % (m.group(1), m.group(2))
if not doi:
m = re.search(r'\'(10\.\d\d\d\d)/([^\']+)\'', page)
if m:
doi = "%s/%s" % (m.group(1), m.group(2))
if not doi:
raise ParseException, "Cannot find DOI in page"
# if not re.search(r'^10[.](1016|1006|1053)/',doi):
# raise ParseException, "Cannot find an Elsevier DOI (10.1006, 10.1016, 10.1053) DOI"
xml_url = crossref_xml_url(doi)
browser.open(xml_url)
response = browser.response()
xml_page = response.get_data()
xml_page = xml_page.decode('utf-8')
# Get rid of extraneous "stars" \u2606. Sometimes at end of title (hopefully
# they're never meant to be "real" elsewhere...)
xml_page = xml_page.replace(u'\u2606',' ')
m = re.search("not found in CrossRef", xml_page)
if m:
raise ParseException, "Unable to locate that DOI (%s) in crossref" % doi
yield "begin_tsv"
yield "use_crossref\t1"
yield "linkout\tDOI\t\t%s\t\t" % doi
abstract = scrape_abstract(page)
# try:
# abstract = scrape_abstract(page)
# except:
# abstract = ''
if abstract:
print "abstract\t%s" % (abstract)
yield "end_tsv"
yield "status\tok"
if __name__ == "__main__":
url = sys.stdin.readline().strip()
for line in handle(url):
print line.encode("utf-8")
sys.exit(0)
try:
for line in handle(url):
print line.encode("utf-8")
except Exception, e:
import traceback
line = traceback.tb_lineno(sys.exc_info()[2])
print "\t".join(["status", "error", "There was an internal error processing this request. Please report this to bugs@citeulike.org quoting error code %d." % line])
raise
|
|
from django.template import Context, loader, RequestContext
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response, get_object_or_404, redirect
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.core.mail import send_mail, mail_admins
from django.contrib import messages
from dash.cluster.models import ClusterTemplate, ClusterInstance, UserProfile
from dash.cluster.models import Ec2InstanceType, Disk, AwsCredential
from dash.cluster.forms import ClusterTemplateForm, DiskForm, AwsCredentialForm
from dash.cluster.forms import ClusterUserCreationForm
import dash.ec2utils as ec2utils
from djangotasks.models import Task
import datetime
def register(request):
user = request.user
return render_to_response('registration/register.html', {
'user': user,
} )
@login_required
def create(request):
user = request.user
user_profile = user.get_profile()
if request.method == 'POST':
cluster_form = ClusterTemplateForm(request.POST)
disk_form = DiskForm(request.POST)
if cluster_form.is_valid() and disk_form.is_valid():
# Creating the Cluster Template and Related Activities
if 'is_demo' in request.POST:
is_demo = request.POST['is_demo']
else:
is_demo = False
instance_type = Ec2InstanceType.objects.get(id = request.POST['type_of_nodes'])
cluster_template = ClusterTemplate(
user_profile = user_profile,
name = request.POST['name'],
number_of_nodes = request.POST['number_of_nodes'],
type_of_nodes = instance_type,
creation_time = datetime.datetime.now(),
is_demo = is_demo,
status = 'disk-creating',
user_clustertemplate_id = user_profile.next_user_clustertemplate_id
)
cluster_template.save()
user_profile.next_user_clustertemplate_id += 1
user_profile.save()
disk = Disk(
size = request.POST['size'],
cluster_template = cluster_template,
)
disk.save()
task = Task.objects.task_for_object(
Disk,
disk.id,
'make_initial_snapshot'
)
Task.objects.run_task(task.id)
mail_admins(
"User %s created a cluster" % user.username,
"User %s created a cluster: %s (DEMO: %s)" % (
user.username,
cluster_template.name,
cluster_template.is_demo,
)
)
return HttpResponseRedirect('/')
else:
cluster_form = ClusterTemplateForm( initial = {'is_demo': True} )
disk_form = DiskForm()
return render_to_response(
'create.html',
{
'cluster_form': cluster_form,
'disk_form': disk_form,
'user': user,
},
context_instance=RequestContext(request)
)
@login_required
def launch(request, user_clustertemplate_id):
user = request.user
if request.method == 'GET':
# FIXME: Switch to POST
# Clusters should only launch if its a POST request
user_profile = user.get_profile()
cluster_template = get_object_or_404(ClusterTemplate,
user_profile__exact = user_profile,
user_clustertemplate_id__exact = user_clustertemplate_id,
)
if cluster_template.status in ('starting', 'stopping', 'running'):
error_message = 'This cluster is already running. Only one cluster may run at a time. Create a new cluster and launch it if necessary.'
return render_to_response('launch_error.html', {
'user': user,
'cluster_template': cluster_template,
'error_message': error_message,
} )
else:
cluster_template.status = 'starting'
cluster_template.save()
cluster_instance = ClusterInstance(
cluster_template = cluster_template,
launch_time = datetime.datetime.now()
)
cluster_instance.save()
task = Task.objects.task_for_object( ClusterInstance, cluster_instance.id, 'launch' )
Task.objects.run_task(task.id)
mail_admins(
"User %s launched a cluster" % user.username,
"User %s launched a cluster: %s (DEMO: %s)" % (
user.username,
cluster_template.name,
cluster_template.is_demo,
)
)
return render_to_response(
'launch.html',
{
'user': user,
'cluster_template': cluster_template,
'cluster_instance': cluster_instance,
},
context_instance=RequestContext(request)
)
@login_required
def terminate(request,user_clustertemplate_id):
user = request.user
user_profile = user.get_profile()
cluster_template = get_object_or_404(ClusterTemplate,
user_profile__exact = user_profile,
user_clustertemplate_id__exact = user_clustertemplate_id,
)
for instance in cluster_template.clusterinstance_set.all():
if instance.is_running:
cluster_instance = instance
cluster_instance.termination_time = datetime.datetime.now()
cluster_instance.save()
cluster_template.status = 'stopping'
cluster_template.save()
task = Task.objects.task_for_object(
ClusterInstance,
cluster_instance.id,
'terminate'
)
Task.objects.run_task(task.id)
mail_admins(
"User %s terminated a cluster" % user.username,
"User %s terminated a cluster" % user.username
)
return render_to_response(
'terminate.html',
{
'user': user,
'cluster_instance': cluster_instance
},
context_instance=RequestContext(request)
)
@login_required
def archive(request, user_clustertemplate_id):
user = request.user
if request.method == 'GET':
# FIXME: Switch to POST
# Clusters should only be archived if its a POST request
user_profile = user.get_profile()
cluster_template = get_object_or_404(ClusterTemplate,
user_profile__exact = user_profile,
user_clustertemplate_id__exact = user_clustertemplate_id,
)
cluster_template.archived = True
cluster_template.save()
messages.success(request, 'Cluster Archived.')
return redirect('/cluster/')
@login_required
def unarchive(request, user_clustertemplate_id):
user = request.user
if request.method == 'GET':
# FIXME: Switch to POST
# Clusters should only be unarchived if its a POST request
user_profile = user.get_profile()
cluster_template = get_object_or_404(ClusterTemplate,
user_profile__exact = user_profile,
user_clustertemplate_id__exact = user_clustertemplate_id,
)
cluster_template.archived = False
cluster_template.save()
messages.success(request, 'Cluster Removed From Archive.')
return redirect(archived)
@login_required
def archived(request):
"""The Archived Cluster Listing"""
user = request.user
user_profile = user.get_profile()
cluster_templates = list(ClusterTemplate.objects.filter(
user_profile = user_profile,
archived = True,
))
return render_to_response('archived.html', {
'user': user,
'cluster_templates': cluster_templates,
},
context_instance=RequestContext(request)
)
@login_required
def account(request):
user = request.user
return render_to_response(
'account.html',
{
'user': user,
})
@login_required
def ssh_key(request):
user = request.user
ssh_key = user.userprofile_set.all()[0].awscredential.ssh_key
response = HttpResponse(ssh_key, content_type='text/plain')
response['Content-Disposition'] = 'attachment; filename=crunch_io_master.rsa'
return response
@login_required
def history(request, user_clustertemplate_id):
user = request.user
user_profile = user.get_profile()
cluster_template = get_object_or_404(ClusterTemplate,
user_profile__exact = user_profile,
user_clustertemplate_id__exact = user_clustertemplate_id,
)
return render_to_response('history.html', {
'user': user,
'cluster_template': cluster_template,
} )
@login_required
def dash(request):
user = request.user
user_profile = user.get_profile()
cluster_templates = list(ClusterTemplate.objects.filter(
user_profile = user_profile,
archived = False,
))
cluster_instances = list(ClusterInstance.objects.all())
return render_to_response('dash.html', {
'user': user,
'cluster_templates': cluster_templates,
'cluster_instances': cluster_instances,
'running_states': ['starting', 'stopping', 'running'],
},
context_instance=RequestContext(request)
)
def account_create(request):
if request.method == 'POST':
user_form = ClusterUserCreationForm(request.POST)
aws_credential_form = AwsCredentialForm(request.POST)
if user_form.is_valid() and aws_credential_form.is_valid():
user_data = user_form.cleaned_data
aws_data = aws_credential_form.cleaned_data
ssh_key_needed = False
ssh_key = ec2utils.create_ssh_key(
aws_data['aws_key_id'],
aws_data['aws_secret_key'],
'crunch-master'
)
if ssh_key == None:
ssh_key = 'Please paste your SSH key here'
ssh_key_needed = True
user = User(
username = user_data['username'],
email = user_data['email_address'],
)
user.set_password(user_data['password1'])
user.is_active = False
user.save()
user_profile = UserProfile(
user = user,
credit = '0.00',
next_user_clustertemplate_id = 1,
)
user_profile.save()
aws_credentials = AwsCredential(
user_profile = user_profile,
aws_user_id = aws_data['aws_user_id'],
ssh_key = ssh_key,
aws_key_id = aws_data['aws_key_id'],
aws_secret_key = aws_data['aws_secret_key'],
)
aws_credentials.save()
mail_admins(
"User Account %s Created for %s" % (
user_data['username'],
user_data['email_address']
),
"""The following user account has been created:
username: %s
email: %s
ssh_key_needed: %s
""" % (
user_data['username'],
user_data['email_address'],
ssh_key_needed,
)
)
return render_to_response(
'account_created.html',
{
'ssh_key_needed': ssh_key_needed,
'email': user_data['email_address'],
},
context_instance=RequestContext(request)
)
else:
user_form = ClusterUserCreationForm()
aws_credential_form = AwsCredentialForm()
return render_to_response(
'account_create.html',
{
'user_form': user_form,
'aws_credential_form': aws_credential_form,
},
context_instance=RequestContext(request)
)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import distributions
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
tfd = distributions
class DistributionTest(test.TestCase):
def testParamShapesAndFromParams(self):
classes = [
tfd.Normal,
tfd.Bernoulli,
tfd.Beta,
tfd.Chi2,
tfd.Exponential,
tfd.Gamma,
tfd.InverseGamma,
tfd.Laplace,
tfd.StudentT,
tfd.Uniform,
]
sample_shapes = [(), (10,), (10, 20, 30)]
with self.cached_session():
for cls in classes:
for sample_shape in sample_shapes:
param_shapes = cls.param_shapes(sample_shape)
params = dict([(name, random_ops.random_normal(shape))
for name, shape in param_shapes.items()])
dist = cls(**params)
self.assertAllEqual(sample_shape,
array_ops.shape(dist.sample()).eval())
dist_copy = dist.copy()
self.assertAllEqual(sample_shape,
array_ops.shape(dist_copy.sample()).eval())
self.assertEqual(dist.parameters, dist_copy.parameters)
def testCopyExtraArgs(self):
with self.cached_session():
# Note: we cannot easily test all distributions since each requires
# different initialization arguments. We therefore spot test a few.
normal = tfd.Normal(loc=1., scale=2., validate_args=True)
self.assertEqual(normal.parameters, normal.copy().parameters)
wishart = tfd.WishartFull(df=2, scale=[[1., 2], [2, 5]],
validate_args=True)
self.assertEqual(wishart.parameters, wishart.copy().parameters)
def testCopyOverride(self):
with self.cached_session():
normal = tfd.Normal(loc=1., scale=2., validate_args=True)
unused_normal_copy = normal.copy(validate_args=False)
base_params = normal.parameters.copy()
copy_params = normal.copy(validate_args=False).parameters.copy()
self.assertNotEqual(
base_params.pop("validate_args"), copy_params.pop("validate_args"))
self.assertEqual(base_params, copy_params)
def testIsScalar(self):
with self.cached_session():
mu = 1.
sigma = 2.
normal = tfd.Normal(mu, sigma, validate_args=True)
self.assertTrue(tensor_util.constant_value(normal.is_scalar_event()))
self.assertTrue(tensor_util.constant_value(normal.is_scalar_batch()))
normal = tfd.Normal([mu], [sigma], validate_args=True)
self.assertTrue(tensor_util.constant_value(normal.is_scalar_event()))
self.assertFalse(tensor_util.constant_value(normal.is_scalar_batch()))
mvn = tfd.MultivariateNormalDiag([mu], [sigma], validate_args=True)
self.assertFalse(tensor_util.constant_value(mvn.is_scalar_event()))
self.assertTrue(tensor_util.constant_value(mvn.is_scalar_batch()))
mvn = tfd.MultivariateNormalDiag([[mu]], [[sigma]], validate_args=True)
self.assertFalse(tensor_util.constant_value(mvn.is_scalar_event()))
self.assertFalse(tensor_util.constant_value(mvn.is_scalar_batch()))
# We now test every codepath within the underlying is_scalar_helper
# function.
# Test case 1, 2.
x = array_ops.placeholder(dtype=dtypes.int32, shape=[])
# None would fire an exception were it actually executed.
self.assertTrue(normal._is_scalar_helper(x.get_shape(), lambda: None))
self.assertTrue(
normal._is_scalar_helper(tensor_shape.TensorShape(None),
lambda: array_ops.shape(x)))
x = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
# None would fire an exception were it actually executed.
self.assertFalse(normal._is_scalar_helper(x.get_shape(), lambda: None))
self.assertFalse(
normal._is_scalar_helper(tensor_shape.TensorShape(None),
lambda: array_ops.shape(x)))
# Test case 3.
x = array_ops.placeholder(dtype=dtypes.int32)
is_scalar = normal._is_scalar_helper(x.get_shape(),
lambda: array_ops.shape(x))
self.assertTrue(is_scalar.eval(feed_dict={x: 1}))
self.assertFalse(is_scalar.eval(feed_dict={x: [1]}))
def _GetFakeDistribution(self):
class FakeDistribution(tfd.Distribution):
"""Fake Distribution for testing _set_sample_static_shape."""
def __init__(self, batch_shape=None, event_shape=None):
self._static_batch_shape = tensor_shape.TensorShape(batch_shape)
self._static_event_shape = tensor_shape.TensorShape(event_shape)
super(FakeDistribution, self).__init__(
dtype=dtypes.float32,
reparameterization_type=distributions.NOT_REPARAMETERIZED,
validate_args=True,
allow_nan_stats=True,
name="DummyDistribution")
def _batch_shape(self):
return self._static_batch_shape
def _event_shape(self):
return self._static_event_shape
return FakeDistribution
def testSampleShapeHints(self):
fake_distribution = self._GetFakeDistribution()
with self.cached_session():
# Make a new session since we're playing with static shapes. [And below.]
x = array_ops.placeholder(dtype=dtypes.float32)
dist = fake_distribution(batch_shape=[2, 3], event_shape=[5])
sample_shape = ops.convert_to_tensor([6, 7], dtype=dtypes.int32)
y = dist._set_sample_static_shape(x, sample_shape)
# We use as_list since TensorShape comparison does not work correctly for
# unknown values, ie, Dimension(None).
self.assertAllEqual([6, 7, 2, 3, 5], y.get_shape().as_list())
with self.cached_session():
x = array_ops.placeholder(dtype=dtypes.float32)
dist = fake_distribution(batch_shape=[None, 3], event_shape=[5])
sample_shape = ops.convert_to_tensor([6, 7], dtype=dtypes.int32)
y = dist._set_sample_static_shape(x, sample_shape)
self.assertAllEqual([6, 7, None, 3, 5], y.get_shape().as_list())
with self.cached_session():
x = array_ops.placeholder(dtype=dtypes.float32)
dist = fake_distribution(batch_shape=[None, 3], event_shape=[None])
sample_shape = ops.convert_to_tensor([6, 7], dtype=dtypes.int32)
y = dist._set_sample_static_shape(x, sample_shape)
self.assertAllEqual([6, 7, None, 3, None], y.get_shape().as_list())
with self.cached_session():
x = array_ops.placeholder(dtype=dtypes.float32)
dist = fake_distribution(batch_shape=None, event_shape=None)
sample_shape = ops.convert_to_tensor([6, 7], dtype=dtypes.int32)
y = dist._set_sample_static_shape(x, sample_shape)
self.assertTrue(y.get_shape().ndims is None)
with self.cached_session():
x = array_ops.placeholder(dtype=dtypes.float32)
dist = fake_distribution(batch_shape=[None, 3], event_shape=None)
sample_shape = ops.convert_to_tensor([6, 7], dtype=dtypes.int32)
y = dist._set_sample_static_shape(x, sample_shape)
self.assertTrue(y.get_shape().ndims is None)
def testNameScopeWorksCorrectly(self):
x = tfd.Normal(loc=0., scale=1., name="x")
x_duplicate = tfd.Normal(loc=0., scale=1., name="x")
with ops.name_scope("y") as name:
y = tfd.Bernoulli(logits=0., name=name)
x_sample = x.sample(name="custom_sample")
x_sample_duplicate = x.sample(name="custom_sample")
x_log_prob = x.log_prob(0., name="custom_log_prob")
x_duplicate_sample = x_duplicate.sample(name="custom_sample")
self.assertEqual(x.name, "x/")
self.assertEqual(x_duplicate.name, "x_1/")
self.assertEqual(y.name, "y/")
self.assertTrue(x_sample.name.startswith("x/custom_sample"))
self.assertTrue(x_sample_duplicate.name.startswith("x/custom_sample_1"))
self.assertTrue(x_log_prob.name.startswith("x/custom_log_prob"))
self.assertTrue(x_duplicate_sample.name.startswith(
"x_1/custom_sample"))
def testStrWorksCorrectlyScalar(self):
normal = tfd.Normal(loc=np.float16(0), scale=np.float16(1))
self.assertEqual(
("tfp.distributions.Normal("
"\"Normal/\", "
"batch_shape=(), "
"event_shape=(), "
"dtype=float16)"), # Got the dtype right.
str(normal))
chi2 = tfd.Chi2(df=np.float32([1., 2.]), name="silly")
self.assertEqual(
("tfp.distributions.Chi2("
"\"silly/\", " # What a silly name that is!
"batch_shape=(2,), "
"event_shape=(), "
"dtype=float32)"),
str(chi2))
exp = tfd.Exponential(rate=array_ops.placeholder(dtype=dtypes.float32))
self.assertEqual(
("tfp.distributions.Exponential(\"Exponential/\", "
# No batch shape.
"event_shape=(), "
"dtype=float32)"),
str(exp))
def testStrWorksCorrectlyMultivariate(self):
mvn_static = tfd.MultivariateNormalDiag(
loc=np.zeros([2, 2]), name="MVN")
self.assertEqual(
("tfp.distributions.MultivariateNormalDiag("
"\"MVN/\", "
"batch_shape=(2,), "
"event_shape=(2,), "
"dtype=float64)"),
str(mvn_static))
mvn_dynamic = tfd.MultivariateNormalDiag(
loc=array_ops.placeholder(shape=[None, 3], dtype=dtypes.float32),
name="MVN2")
self.assertEqual(
("tfp.distributions.MultivariateNormalDiag("
"\"MVN2/\", "
"batch_shape=(?,), " # Partially known.
"event_shape=(3,), "
"dtype=float32)"),
str(mvn_dynamic))
def testReprWorksCorrectlyScalar(self):
normal = tfd.Normal(loc=np.float16(0), scale=np.float16(1))
self.assertEqual(
("<tfp.distributions.Normal"
" 'Normal/'"
" batch_shape=()"
" event_shape=()"
" dtype=float16>"), # Got the dtype right.
repr(normal))
chi2 = tfd.Chi2(df=np.float32([1., 2.]), name="silly")
self.assertEqual(
("<tfp.distributions.Chi2"
" 'silly/'" # What a silly name that is!
" batch_shape=(2,)"
" event_shape=()"
" dtype=float32>"),
repr(chi2))
exp = tfd.Exponential(rate=array_ops.placeholder(dtype=dtypes.float32))
self.assertEqual(
("<tfp.distributions.Exponential"
" 'Exponential/'"
" batch_shape=<unknown>"
" event_shape=()"
" dtype=float32>"),
repr(exp))
def testReprWorksCorrectlyMultivariate(self):
mvn_static = tfd.MultivariateNormalDiag(
loc=np.zeros([2, 2]), name="MVN")
self.assertEqual(
("<tfp.distributions.MultivariateNormalDiag"
" 'MVN/'"
" batch_shape=(2,)"
" event_shape=(2,)"
" dtype=float64>"),
repr(mvn_static))
mvn_dynamic = tfd.MultivariateNormalDiag(
loc=array_ops.placeholder(shape=[None, 3], dtype=dtypes.float32),
name="MVN2")
self.assertEqual(
("<tfp.distributions.MultivariateNormalDiag"
" 'MVN2/'"
" batch_shape=(?,)" # Partially known.
" event_shape=(3,)"
" dtype=float32>"),
repr(mvn_dynamic))
if __name__ == "__main__":
test.main()
|
|
"""Test Home Assistant package util methods."""
import asyncio
import logging
import os
import sys
from subprocess import PIPE
from unittest.mock import MagicMock, call, patch
import pkg_resources
import pytest
import homeassistant.util.package as package
RESOURCE_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', 'resources'))
TEST_EXIST_REQ = 'pip>=7.0.0'
TEST_NEW_REQ = 'pyhelloworld3==1.0.0'
TEST_ZIP_REQ = 'file://{}#{}' \
.format(os.path.join(RESOURCE_DIR, 'pyhelloworld3.zip'), TEST_NEW_REQ)
@pytest.fixture
def mock_sys():
"""Mock sys."""
with patch('homeassistant.util.package.sys', spec=object) as sys_mock:
sys_mock.executable = 'python3'
yield sys_mock
@pytest.fixture
def mock_exists():
"""Mock check_package_exists."""
with patch('homeassistant.util.package.check_package_exists') as mock:
mock.return_value = False
yield mock
@pytest.fixture
def deps_dir():
"""Return path to deps directory."""
return os.path.abspath('/deps_dir')
@pytest.fixture
def lib_dir(deps_dir):
"""Return path to lib directory."""
return os.path.join(deps_dir, 'lib_dir')
@pytest.fixture
def mock_popen(lib_dir):
"""Return a Popen mock."""
with patch('homeassistant.util.package.Popen') as popen_mock:
popen_mock.return_value.communicate.return_value = (
bytes(lib_dir, 'utf-8'), b'error')
popen_mock.return_value.returncode = 0
yield popen_mock
@pytest.fixture
def mock_env_copy():
"""Mock os.environ.copy."""
with patch('homeassistant.util.package.os.environ.copy') as env_copy:
env_copy.return_value = {}
yield env_copy
@pytest.fixture
def mock_venv():
"""Mock homeassistant.util.package.running_under_virtualenv."""
with patch('homeassistant.util.package.running_under_virtualenv') as mock:
mock.return_value = True
yield mock
@asyncio.coroutine
def mock_async_subprocess():
"""Return an async Popen mock."""
async_popen = MagicMock()
@asyncio.coroutine
def communicate(input=None):
"""Communicate mock."""
stdout = bytes('/deps_dir/lib_dir', 'utf-8')
return (stdout, None)
async_popen.communicate = communicate
return async_popen
def test_install_existing_package(mock_exists, mock_popen):
"""Test an install attempt on an existing package."""
mock_exists.return_value = True
assert package.install_package(TEST_EXIST_REQ)
assert mock_exists.call_count == 1
assert mock_exists.call_args == call(TEST_EXIST_REQ)
assert mock_popen.return_value.communicate.call_count == 0
def test_install(mock_sys, mock_exists, mock_popen, mock_env_copy, mock_venv):
"""Test an install attempt on a package that doesn't exist."""
env = mock_env_copy()
assert package.install_package(TEST_NEW_REQ, False)
assert mock_exists.call_count == 1
assert mock_popen.call_count == 1
assert (
mock_popen.call_args ==
call([
mock_sys.executable, '-m', 'pip', 'install', '--quiet',
TEST_NEW_REQ
], stdin=PIPE, stdout=PIPE, stderr=PIPE, env=env)
)
assert mock_popen.return_value.communicate.call_count == 1
def test_install_upgrade(
mock_sys, mock_exists, mock_popen, mock_env_copy, mock_venv):
"""Test an upgrade attempt on a package."""
env = mock_env_copy()
assert package.install_package(TEST_NEW_REQ)
assert mock_exists.call_count == 1
assert mock_popen.call_count == 1
assert (
mock_popen.call_args ==
call([
mock_sys.executable, '-m', 'pip', 'install', '--quiet',
TEST_NEW_REQ, '--upgrade'
], stdin=PIPE, stdout=PIPE, stderr=PIPE, env=env)
)
assert mock_popen.return_value.communicate.call_count == 1
def test_install_target(
mock_sys, mock_exists, mock_popen, mock_env_copy, mock_venv):
"""Test an install with a target."""
target = 'target_folder'
env = mock_env_copy()
env['PYTHONUSERBASE'] = os.path.abspath(target)
mock_venv.return_value = False
mock_sys.platform = 'linux'
args = [
mock_sys.executable, '-m', 'pip', 'install', '--quiet',
TEST_NEW_REQ, '--user', '--prefix=']
assert package.install_package(TEST_NEW_REQ, False, target=target)
assert mock_exists.call_count == 1
assert mock_popen.call_count == 1
assert (
mock_popen.call_args ==
call(args, stdin=PIPE, stdout=PIPE, stderr=PIPE, env=env)
)
assert mock_popen.return_value.communicate.call_count == 1
def test_install_target_venv(
mock_sys, mock_exists, mock_popen, mock_env_copy, mock_venv):
"""Test an install with a target in a virtual environment."""
target = 'target_folder'
with pytest.raises(AssertionError):
package.install_package(TEST_NEW_REQ, False, target=target)
def test_install_error(caplog, mock_sys, mock_exists, mock_popen, mock_venv):
"""Test an install with a target."""
caplog.set_level(logging.WARNING)
mock_popen.return_value.returncode = 1
assert not package.install_package(TEST_NEW_REQ)
assert len(caplog.records) == 1
for record in caplog.records:
assert record.levelname == 'ERROR'
def test_install_constraint(
mock_sys, mock_exists, mock_popen, mock_env_copy, mock_venv):
"""Test install with constraint file on not installed package."""
env = mock_env_copy()
constraints = 'constraints_file.txt'
assert package.install_package(
TEST_NEW_REQ, False, constraints=constraints)
assert mock_exists.call_count == 1
assert mock_popen.call_count == 1
assert (
mock_popen.call_args ==
call([
mock_sys.executable, '-m', 'pip', 'install', '--quiet',
TEST_NEW_REQ, '--constraint', constraints
], stdin=PIPE, stdout=PIPE, stderr=PIPE, env=env)
)
assert mock_popen.return_value.communicate.call_count == 1
def test_check_package_global():
"""Test for an installed package."""
installed_package = list(pkg_resources.working_set)[0].project_name
assert package.check_package_exists(installed_package)
def test_check_package_zip():
"""Test for an installed zip package."""
assert not package.check_package_exists(TEST_ZIP_REQ)
def test_get_user_site(deps_dir, lib_dir, mock_popen, mock_env_copy):
"""Test get user site directory."""
env = mock_env_copy()
env['PYTHONUSERBASE'] = os.path.abspath(deps_dir)
args = [sys.executable, '-m', 'site', '--user-site']
ret = package.get_user_site(deps_dir)
assert mock_popen.call_count == 1
assert mock_popen.call_args == call(
args, stdin=PIPE, stdout=PIPE, stderr=PIPE, env=env)
assert ret == lib_dir
@asyncio.coroutine
def test_async_get_user_site(hass, mock_env_copy):
"""Test async get user site directory."""
deps_dir = '/deps_dir'
env = mock_env_copy()
env['PYTHONUSERBASE'] = os.path.abspath(deps_dir)
args = [sys.executable, '-m', 'site', '--user-site']
with patch('homeassistant.util.package.asyncio.create_subprocess_exec',
return_value=mock_async_subprocess()) as popen_mock:
ret = yield from package.async_get_user_site(deps_dir, hass.loop)
assert popen_mock.call_count == 1
assert popen_mock.call_args == call(
*args, loop=hass.loop, stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.DEVNULL,
env=env)
assert ret == os.path.join(deps_dir, 'lib_dir')
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import logging
import getpass
from optparse import OptionParser
import sleekxmpp
import subprocess
from commands.system_commands import *
# Python versions before 3.0 do not use UTF-8 encoding
# by default. To ensure that Unicode is handled properly
# throughout SleekXMPP, we will set the default encoding
# ourselves to UTF-8.
if sys.version_info < (3, 0):
from sleekxmpp.util.misc_ops import setdefaultencoding
setdefaultencoding('utf8')
else:
raw_input = input
COMMANDS_AVAILABLES = ["get_hostname", "get_kernel_version"]
class ClientBot(sleekxmpp.ClientXMPP):
"""
A ClientBot that provides a adhoc commands.
"""
def __init__(self, jid, password):
sleekxmpp.ClientXMPP.__init__(self, jid, password)
# The session_start event will be triggered when
# the bot establishes its connection with the server
# and the XML streams are ready for use. We want to
# listen for this event so that we we can initialize
# our roster.
self.add_event_handler("session_start", self.start)
def start(self, event):
"""
Process the session_start event.
Typical actions for the session_start event are
requesting the roster and broadcasting an initial
presence stanza.
Arguments:
event -- An empty dictionary. The session_start
event does not provide any additional
data.
"""
self.send_presence()
self.get_roster()
# We add the command after session_start has fired
# to ensure that the correct full JID is used.
# If using a component, may also pass jid keyword parameter.
self['xep_0050'].add_command(node='client',
name='API',
handler=self._handle_command)
def _handle_command(self, iq, session):
"""
Respond to the initial request for a API request.
Arguments:
iq -- The iq stanza containing the command request.
session -- A dictionary of data relevant to the command
session. Additional, custom data may be saved
here to persist across handler callbacks.
"""
form = self['xep_0004'].makeForm('form', 'API')
form['instructions'] = 'Send an API request to a JID'
form.addField(var='request',
ftype='text-single',
label='Your request')
session['payload'] = form
session['next'] = self._handle_command_complete
session['has_next'] = False
# Other useful session values:
# session['to'] -- The JID that received the
# command request.
# session['from'] -- The JID that sent the
# command request.
# session['has_next'] = True -- There are more steps to complete
# session['allow_complete'] = True -- Allow user to finish immediately
# and possibly skip steps
# session['cancel'] = handler -- Assign a handler for if the user
# cancels the command.
# session['notes'] = [ -- Add informative notes about the
# ('info', 'Info message'), command's results.
# ('warning', 'Warning message'),
# ('error', 'Error message')]
return session
def _handle_command_complete(self, payload, session):
"""
Process a command result from the user.
Arguments:
payload -- Either a single item, such as a form, or a list
of items or forms if more than one form was
provided to the user. The payload may be any
stanza, such as jabber:x:oob for out of band
data, or jabber:x:data for typical data forms.
session -- A dictionary of data relevant to the command
session. Additional, custom data may be saved
here to persist across handler callbacks.
"""
# In this case (as is typical), the payload is a form
form = payload
request = form['values']['request']
if request in COMMANDS_AVAILABLES:
sc = SystemCommands(request)
sc.execute_command()
self.send_message(mto=session['from'],
mbody="Your request %s, have been executed!" % request,
mtype='chat')
# Having no return statement is the same as unsetting the 'payload'
# and 'next' session values and returning the session.
# Unless it is the final step, always return the session dictionary.
session['payload'] = None
session['next'] = None
return session
if __name__ == '__main__':
# Setup the command line arguments.
optp = OptionParser()
# Output verbosity options.
optp.add_option('-q', '--quiet', help='set logging to ERROR',
action='store_const', dest='loglevel',
const=logging.ERROR, default=logging.INFO)
optp.add_option('-d', '--debug', help='set logging to DEBUG',
action='store_const', dest='loglevel',
const=logging.DEBUG, default=logging.INFO)
optp.add_option('-v', '--verbose', help='set logging to COMM',
action='store_const', dest='loglevel',
const=5, default=logging.INFO)
# JID and password options.
optp.add_option("-j", "--jid", dest="jid",
help="JID to use")
optp.add_option("-p", "--password", dest="password",
help="password to use")
opts, args = optp.parse_args()
# Setup logging.
logging.basicConfig(level=opts.loglevel,
format='%(levelname)-8s %(message)s')
if opts.jid is None:
opts.jid = raw_input("Username: ")
if opts.password is None:
opts.password = getpass.getpass("Password: ")
# Setup the CommandBot and register plugins. Note that while plugins may
# have interdependencies, the order in which you register them does
# not matter.
xmpp = ClientBot(opts.jid, opts.password)
xmpp.register_plugin('xep_0030') # Service Discovery
xmpp.register_plugin('xep_0004') # Data Forms
xmpp.register_plugin('xep_0050') # Adhoc Commands
xmpp.register_plugin('xep_0199', {'keepalive': True, 'frequency':15})
# If you are working with an OpenFire server, you may need
# to adjust the SSL version used:
# xmpp.ssl_version = ssl.PROTOCOL_SSLv3
# If you want to verify the SSL certificates offered by a server:
# xmpp.ca_certs = "path/to/ca/cert"
# Connect to the XMPP server and start processing XMPP stanzas.
if xmpp.connect():
# If you do not have the dnspython library installed, you will need
# to manually specify the name of the server if it does not match
# the one in the JID. For example, to use Google Talk you would
# need to use:
#
# if xmpp.connect(('talk.google.com', 5222)):
# ...
xmpp.process(block=True)
print("Done")
else:
print("Unable to connect.")
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Additional tests specific for query parameters
#
from __future__ import absolute_import
import sys
if sys.version_info[:2] <= (2, 6):
import unittest2 as unittest
else:
import unittest
import pytest
from impala.dbapi.interface import _bind_parameters, ProgrammingError
@pytest.mark.dbapi_query_parameters
class ImpalaDBAPIQueryParameters(unittest.TestCase):
def dt(self, expected, query, params):
result = _bind_parameters(query, params)
self.assertEquals(expected, result)
def test_pyformat(self):
# technically these tests shouldn't need the full sql query
# syntax, but it makes it easier to show how the formats are
# used
self.dt("select * from test where int = 1",
"select * from test where int = %(int)s",
{'int': 1})
self.dt("select * from test where str = 'foo'",
"select * from test where str = %(str)s",
{'str': "foo"})
self.dt("select * from test where flt = 0.123",
"select * from test where flt = %(flt)s",
{'flt': 0.123})
self.dt("select * from test where nul = NULL",
"select * from test where nul = %(nul)s",
{'nul': None})
self.dt("select * from test where int = 1 and str = 'foo' and " +
"flt = 0.123 and nul = NULL",
"select * from test where int = %(int)s and str = " +
"%(str)s and flt = %(flt)s and nul = %(nul)s",
{'int': 1, 'str': "foo", 'flt': 0.123, 'nul': None})
# Make sure parameters are not replaced twice
self.dt("select * from test where a=':b' and b=':c' and c=':a'",
"select * from test where a=%(a)s and b=%(b)s and c=%(c)s",
{'a': ":b", 'b': ":c", 'c': ":a"})
# Unused parameters should be fine
self.dt("select * from test where a=1",
"select * from test where a=1",
{'unused': 3})
# But nonexistent should not
with self.assertRaises(KeyError):
self.dt("select * from test where int = 1",
"select * from test where int = %(nosuchkeyword)s",
{'wrong': 1})
def test_named(self):
self.dt("select * from test where int = 1",
"select * from test where int = :int",
{'int': 1})
self.dt("select * from test where str = 'foo'",
"select * from test where str = :str",
{'str': "foo"})
self.dt("select * from test where flt = 0.123",
"select * from test where flt = :flt",
{'flt': 0.123})
self.dt("select * from test where nul = NULL",
"select * from test where nul = :nul",
{'nul': None})
self.dt("select * from test where int = 1 and str = 'foo' and " +
"flt = 0.123 and nul = NULL",
"select * from test where int = :int and str = " +
":str and flt = :flt and nul = :nul",
{'int': 1, 'str': "foo", 'flt': 0.123, 'nul': None})
# Characters around keywords
self.dt("select * from test where int=(1) and str='foo' and " +
"flt=0.123 and nul=NULL",
"select * from test where int=(:int) and str=" +
":str and flt=:flt and nul=:nul",
{'int': 1, 'str': "foo", 'flt': 0.123, 'nul': None})
# Partially overlapping names
self.dt("select * from test where a=1 and b=2 and c=3",
"select * from test where a=:f and b=:fo and c=:foo",
{'f': 1, 'fo': 2, 'foo': 3})
self.dt("select * from test where a=1 and b=2 and c=3",
"select * from test where a=:foo and b=:fo and c=:f",
{'foo': 1, 'fo': 2, 'f': 3})
# Make sure parameters are not replaced twice
self.dt("select * from test where a=':b' and b=':c' and c=':a'",
"select * from test where a=:a and b=:b and c=:c",
{'a': ":b", 'b': ":c", 'c': ":a"})
with self.assertRaises(KeyError):
self.dt("select * from test where int = 1",
"select * from test where int = :nosuchkeyword",
{'wrong': 1})
def test_numeric(self):
self.dt("select * from test where int = 1",
"select * from test where int = :1",
[1])
self.dt("select * from test where str = 'foo'",
"select * from test where str = :1",
["foo"])
self.dt("select * from test where flt = 0.123",
"select * from test where flt = :1",
[0.123])
self.dt("select * from test where nul = NULL",
"select * from test where nul = :1",
[None])
self.dt("select * from test where int = 1 and str = 'foo' and " +
"flt = 0.123 and nul = NULL",
"select * from test where int = :1 and str = " +
":2 and flt = :3 and nul = :4",
[1, "foo", 0.123, None])
# reverse list
self.dt("select * from test where int = 1 and str = 'foo' and " +
"flt = 0.123 and nul = NULL",
"select * from test where int = :4 and str = " +
":3 and flt = :2 and nul = :1",
[None, 0.123, "foo", 1])
# characters around them
self.dt("select * from test where int=1 and str='foo' and " +
"flt=(0.123) and nul=NULL",
"select * from test where int=:1 and str=" +
":2 and flt=(:3) and nul=:4",
[1, "foo", 0.123, None])
# tuple instead of list
self.dt("select * from test where int = 1 and str = 'foo' and " +
"flt = 0.123 and nul = NULL",
"select * from test where int = :1 and str = " +
":2 and flt = :3 and nul = :4",
(1, "foo", 0.123, None))
# more than 9
self.dt("select * from test where a=1 and b=2 and c=3 and d=4 "+
"and e=5 and f=6 and g=7 and h=8 and i=9 and j=10",
"select * from test where a=:1 and b=:2 and c=:3 and d=:4 "+
"and e=:5 and f=:6 and g=:7 and h=:8 and i=:9 and j=:10",
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
self.dt("select * from test where a='a' and b='b' and c='c' "+
"and d='d' and e='e' and f='f' and g='g' and h='h' "+
"and i='i' and j='j' and k='k'",
"select * from test where a=:1 and b=:2 and c=:3 and "+
"d=:4 and e=:5 and f=:6 and g=:7 and h=:8 and i=:9 and "+
"j=:10 and k=:11",
['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k'])
def test_qmark(self):
self.dt("select * from test where int = 1",
"select * from test where int = ?",
[1])
self.dt("select * from test where str = 'foo'",
"select * from test where str = ?",
["foo"])
self.dt("select * from test where flt = 0.123",
"select * from test where flt = ?",
[0.123])
self.dt("select * from test where nul = NULL",
"select * from test where nul = ?",
[None])
self.dt("select * from test where int = 1 and str = 'foo' and " +
"flt = 0.123 and nul = NULL",
"select * from test where int = ? and str = " +
"? and flt = ? and nul = ?",
[1, "foo", 0.123, None])
# no spaces around =
# characters around them
self.dt("select * from test where int=1 and str='foo' and " +
"flt=(0.123) and nul=NULL",
"select * from test where int=? and str=" +
"? and flt=(?) and nul=?",
[1, "foo", 0.123, None])
# tuple instead of list
self.dt("select * from test where int=1 and str='foo' and " +
"flt=0.123 and nul=NULL",
"select * from test where int=? and str=" +
"? and flt=? and nul=?",
(1, "foo", 0.123, None))
# bad number of bindings
for q in [
("select * from test where int = ?", []),
("select * from test where int = ? or int = ?", [1]),
("select * from test where int = ?", [1, 2]),
("select * from test where int = ? or int = ? or int = ?",
[1, 2, 3, 4]),
]:
with self.assertRaises(ProgrammingError):
self.dt("should have raised exception", q[0], q[1])
def test_bad_argument_type(self):
self.assertRaises(ProgrammingError, _bind_parameters,
"select * from test", 1)
self.assertRaises(ProgrammingError, _bind_parameters,
"select * from test", "a")
|
|
#!/usr/bin/env python
"""
release command tests
"""
import os
import unittest
import tempfile
import mock
from cirrus.release import new_release
from cirrus.release import upload_release
from cirrus.release import build_release
from cirrus.release import cleanup_release
from cirrus.release import artifact_name
from cirrus.configuration import Configuration
from cirrus._2to3 import to_str
from pluggage.errors import FactoryError
from .harnesses import CirrusConfigurationHarness, write_cirrus_conf
class ReleaseNewCommandTest(unittest.TestCase):
"""
Test Case for new_release function
"""
def setUp(self):
"""set up test files"""
self.dir = to_str(tempfile.mkdtemp())
self.config = os.path.join(self.dir, 'cirrus.conf')
write_cirrus_conf(self.config,
**{
'package': {'name': 'cirrus_unittest', 'version': '1.2.3'},
'gitflow': {'develop_branch': 'develop', 'release_branch_prefix': 'release/'},
}
)
self.harness = CirrusConfigurationHarness('cirrus.release.load_configuration', self.config)
self.harness.setUp()
self.harness_utils = CirrusConfigurationHarness('cirrus.release_utils.load_configuration', self.config)
self.harness_utils.setUp()
self.patch_pull = mock.patch('cirrus.release.checkout_and_pull')
self.patch_branch = mock.patch('cirrus.release.branch')
self.patch_commit = mock.patch('cirrus.release.commit_files_optional_push')
self.mock_pull = self.patch_pull.start()
self.mock_branch = self.patch_branch.start()
self.mock_commit = self.patch_commit.start()
def tearDown(self):
self.patch_pull.stop()
self.patch_branch.stop()
self.patch_commit.stop()
self.harness.tearDown()
self.harness_utils.tearDown()
if os.path.exists(self.dir):
os.system('rm -rf {0}'.format(self.dir))
@mock.patch('cirrus.release.has_unstaged_changes')
def test_new_release(self, mock_unstaged):
"""
_test_new_release_
"""
mock_unstaged.return_value = False
opts = mock.Mock()
opts.micro = True
opts.major = False
opts.minor = False
opts.nightly = False
opts.bump = None
opts.skip_existing = False
# should create a new minor release, editing
# the cirrus config in the test dir
new_release(opts)
# verify new version
new_conf = Configuration(self.config)
new_conf.load()
self.assertEqual(new_conf.package_version(), '1.2.4')
self.failUnless(self.mock_pull.called)
self.assertEqual(self.mock_pull.call_args[0][1], 'develop')
self.failUnless(self.mock_branch.called)
self.assertEqual(self.mock_branch.call_args[0][1], 'release/1.2.4')
self.failUnless(self.mock_commit.called)
self.assertEqual(self.mock_commit.call_args[0][2], False)
self.assertEqual(self.mock_commit.call_args[0][3], 'cirrus.conf')
@mock.patch('cirrus.release.has_unstaged_changes')
@mock.patch('cirrus.release.unmerged_releases')
def test_new_release_skip_existing(self, mock_unmerged, mock_unstaged):
"""
_test_new_release_
"""
mock_unstaged.return_value = False
mock_unmerged.return_value = ['1.2.4']
opts = mock.Mock()
opts.micro = True
opts.major = False
opts.minor = False
opts.nightly = False
opts.bump = None
opts.skip_existing = True
# should create a new minor release, editing
# the cirrus config in the test dir
new_release(opts)
# verify new version
new_conf = Configuration(self.config)
new_conf.load()
self.assertEqual(new_conf.package_version(), '1.2.5')
self.failUnless(self.mock_pull.called)
self.assertEqual(self.mock_pull.call_args[0][1], 'develop')
self.failUnless(self.mock_branch.called)
self.assertEqual(self.mock_branch.call_args[0][1], 'release/1.2.5')
self.failUnless(self.mock_commit.called)
self.assertEqual(self.mock_commit.call_args[0][2], False)
self.assertEqual(self.mock_commit.call_args[0][3], 'cirrus.conf')
@mock.patch('cirrus.release.has_unstaged_changes')
@mock.patch('cirrus.release_utils.datetime')
def test_new_nightly_release(self, mock_dt, mock_unstaged):
"""
_test_new_release_
"""
mock_ts = mock.Mock()
mock_ts.strftime = mock.Mock(return_value="TIMESTAMP")
mock_now = mock.Mock(return_value=mock_ts)
mock_dt.datetime=mock.Mock()
mock_dt.datetime.now = mock_now
mock_unstaged.return_value = False
opts = mock.Mock()
opts.micro = False
opts.major = False
opts.minor = False
opts.nightly = True
opts.bump = None
opts.skip_existing = False
# should create a new minor release, editing
# the cirrus config in the test dir
new_release(opts)
# verify new version
new_conf = Configuration(self.config)
new_conf.load()
self.assertEqual(new_conf.package_version(), '1.2.3-nightly-TIMESTAMP')
self.failUnless(self.mock_pull.called)
self.assertEqual(self.mock_pull.call_args[0][1], 'develop')
self.failUnless(self.mock_branch.called)
self.assertEqual(self.mock_branch.call_args[0][1], 'release/1.2.3-nightly-TIMESTAMP')
self.failUnless(self.mock_commit.called)
self.assertEqual(self.mock_commit.call_args[0][2], False)
self.assertEqual(self.mock_commit.call_args[0][3], 'cirrus.conf')
@mock.patch('cirrus.release.has_unstaged_changes')
@mock.patch('cirrus.release.bump_package')
def test_new_release_bump(self, mock_bump, mock_unstaged):
"""
_test_new_release_
"""
mock_unstaged.return_value = False
opts = mock.Mock()
opts.micro = True
opts.major = False
opts.minor = False
opts.nightly = False
opts.skip_existing = False
opts.bump = [['womp', '1.2.3'], ['wibble', '3.4.5']]
# should create a new minor release, editing
# the cirrus config in the test dir
new_release(opts)
# verify new version
new_conf = Configuration(self.config)
new_conf.load()
self.assertEqual(new_conf.package_version(), '1.2.4')
self.failUnless(self.mock_pull.called)
self.assertEqual(self.mock_pull.call_args[0][1], 'develop')
self.failUnless(self.mock_branch.called)
self.assertEqual(self.mock_branch.call_args[0][1], 'release/1.2.4')
self.failUnless(self.mock_commit.called)
self.assertEqual(self.mock_commit.call_args[0][2], False)
self.assertEqual(self.mock_commit.call_args[0][3], 'cirrus.conf')
self.assertEqual(mock_bump.call_count, 2)
@mock.patch('cirrus.release.has_unstaged_changes')
def test_new_release_unstaged(self, mock_unstaged):
"""
test new release fails on unstaged changes
"""
mock_unstaged.return_value = True
opts = mock.Mock()
opts.micro = True
opts.major = False
opts.minor = False
opts.nightly = False
opts.bump = None
opts.skip_existing = False
self.assertRaises(RuntimeError, new_release, opts)
class ReleaseCleanupCommandTest(unittest.TestCase):
"""
Test Case for cleanup function
"""
def setUp(self):
"""set up test files"""
self.dir = tempfile.mkdtemp()
self.config = os.path.join(self.dir, 'cirrus.conf')
write_cirrus_conf(self.config,
**{
'package': {'name': 'cirrus_unittest', 'version': '1.2.3'},
'gitflow': {'develop_branch': 'develop', 'release_branch_prefix': 'release/'},
}
)
self.harness = CirrusConfigurationHarness('cirrus.release.load_configuration', self.config)
self.harness.setUp()
self.patch_ghc = mock.patch('cirrus.release.GitHubContext')
self.mock_ghc = self.patch_ghc.start()
self.mock_ctx = mock.Mock()
self.mock_instance = mock.Mock()
self.mock_instance.delete_branch = mock.Mock()
self.mock_ctx.__enter__ = mock.Mock(return_value=self.mock_instance)
self.mock_ctx.__exit__ = mock.Mock()
self.mock_ghc.return_value=self.mock_ctx
def tearDown(self):
self.patch_ghc.stop()
self.harness.tearDown()
if os.path.exists(self.dir):
os.system('rm -rf {0}'.format(self.dir))
def test_cleanup_command(self):
"""test cleanup"""
opts = mock.Mock()
opts.no_remote = False
opts.version = None
cleanup_release(opts)
self.failUnless(self.mock_ghc.called)
self.failUnless(self.mock_instance.delete_branch.called)
self.mock_instance.delete_branch.assert_has_calls([mock.call('release/1.2.3', True)])
self.mock_instance.reset_mock()
opts.no_remote = True
opts.version = '4.5.6'
cleanup_release(opts)
self.failUnless(self.mock_instance.delete_branch.called)
self.mock_instance.delete_branch.assert_has_calls([mock.call('release/4.5.6', False)])
self.mock_instance.reset_mock()
opts.no_remote = False
opts.version = 'release/7.8.9'
cleanup_release(opts)
self.failUnless(self.mock_instance.delete_branch.called)
self.mock_instance.delete_branch.assert_has_calls([mock.call('release/7.8.9', True)])
class ReleaseBuildCommandTest(unittest.TestCase):
"""
test case for cirrus release build command
"""
def setUp(self):
self.dir = tempfile.mkdtemp()
self.config = os.path.join(self.dir, 'cirrus.conf')
write_cirrus_conf(self.config,
**{
'package': {'name': 'cirrus_unittest', 'version': '1.2.3'},
'gitflow': {'develop_branch': 'develop', 'release_branch_prefix': 'release/'}
}
)
self.harness = CirrusConfigurationHarness('cirrus.release.load_configuration', self.config)
self.harness.setUp()
self.patch_local = mock.patch('cirrus.release.local')
self.mock_local = self.patch_local.start()
def tearDown(self):
self.harness.tearDown()
self.patch_local.stop()
def test_build_command_raises(self):
"""should raise when build artifact is not present"""
opts = mock.Mock()
self.assertRaises(RuntimeError, build_release, opts)
def test_build_command(self):
"""test calling build, needs os.path.exists mocks since we arent actually building"""
with mock.patch('cirrus.release.os') as mock_os:
mock_os.path = mock.Mock()
mock_os.path.exists = mock.Mock()
mock_os.path.exists.return_value = True
mock_os.path.join = mock.Mock()
mock_os.path.join.return_value = 'build_artifact'
opts = mock.Mock()
result = build_release(opts)
self.assertEqual(result, 'build_artifact')
self.failUnless(mock_os.path.exists.called)
self.assertEqual(mock_os.path.exists.call_args[0][0], 'build_artifact')
self.failUnless(self.mock_local.called)
self.assertEqual(self.mock_local.call_args[0][0], 'python setup.py sdist')
class ReleaseUploadTest(unittest.TestCase):
"""unittest coverage for upload command using plugins"""
def setUp(self):
self.dir = tempfile.mkdtemp()
self.config = os.path.join(self.dir, 'cirrus.conf')
write_cirrus_conf(self.config,
**{
'package' :{'name': 'cirrus_unittest', 'version': '1.2.3'},
'github': {'develop_branch': 'develop', 'release_branch_prefix': 'release/'},
'pypi': {
'pypi_upload_path': '/opt/pypi',
'pypi_url': 'pypi.cloudant.com',
'pypi_username': 'steve',
'pypi_ssh_key': 'steves_creds'
}
}
)
self.harness = CirrusConfigurationHarness('cirrus.release.load_configuration', self.config)
self.harness.setUp()
self.artifact_name = artifact_name(self.harness.config)
def tearDown(self):
self.harness.tearDown()
def test_missing_build_artifact(self):
"""test throws if build artifact not found"""
opts = mock.Mock()
self.assertRaises(RuntimeError, upload_release, opts)
@mock.patch('cirrus.release.os.path.exists')
@mock.patch('cirrus.release.get_plugin')
def test_upload_plugin(self, mock_plugin, mock_exists):
"""test call with well behaved plugin"""
plugin = mock.Mock()
plugin.upload = mock.Mock()
mock_exists.return_value = True
mock_plugin.return_value = plugin
opts = mock.Mock()
opts.plugin = 'pypi'
opts.test = False
upload_release(opts)
self.failUnless(plugin.upload.called)
plugin.upload.assert_has_calls(
[mock.call(opts, self.artifact_name)]
)
@mock.patch('cirrus.release.os.path.exists')
@mock.patch('cirrus.release.get_plugin')
def test_upload_plugin_test_mode(self, mock_plugin, mock_exists):
plugin = mock.Mock()
plugin.upload = mock.Mock()
mock_exists.return_value = True
mock_plugin.return_value = plugin
opts = mock.Mock()
opts.plugin = 'pypi'
opts.test = True
upload_release(opts)
self.failUnless(not plugin.upload.called)
@mock.patch('cirrus.release.os.path.exists')
def test_upload_bad_plugin(self, mock_exists):
"""test with missing plugin"""
mock_exists.return_value = True
opts = mock.Mock()
opts.plugin = 'womp'
opts.test = True
self.assertRaises(FactoryError, upload_release, opts)
if __name__ == '__main__':
unittest.main()
|
|
'''
Created on Jul 28, 2012
@author: rafaelolaechea
'''
from xml_parser_helper import load_xml_model
from xml.etree import ElementTree
import subprocess
import re
import collections
_namespaces = {'c1': 'http://clafer.org/ir',
'xsi': 'http://www.w3.org/2001/XMLSchema-instance'}
class SPL_ClaferAnalyzer(object):
def __init__(self, feature_model_filename, load_from_xml_file=True):
if load_from_xml_file == True:
self.xml_model = load_xml_model(feature_model_filename)
else:
#Run clafer on "feature_model_xml_filename"
subprocess.check_output(["clafer", "--mode=xml", feature_model_filename], stderr=subprocess.STDOUT)
self.xml_model = load_xml_model(feature_model_filename[:-4] + ".xml")
self.SPL = self.get_top_level_SPL_model()
self.SPL_concrete = self.get_concrete_SPL_configuration()
self._non_functional_properties = None
self._FeatureTypes = None
self._xml_element_from_uniqueID ={}
self._parentToChild = {}
self._childToParent = {}
self._initialize_childParentMappings()
assert self.SPL != None
def get_top_level_SPL_model(self):
"""
Assume SPL feature model is the last top level abstract clafer.
"""
top_level_spl_model = None
assert len (self.xml_model.findall('./c1:Declaration', _namespaces)) > 0
for top_level_clafer in self.xml_model.findall('./c1:Declaration', _namespaces):
if top_level_clafer.find('c1:IsAbstract', _namespaces)!= None and \
top_level_clafer.find('c1:IsAbstract', _namespaces).text == 'true' :
top_level_spl_model = top_level_clafer
return top_level_spl_model
def get_concrete_SPL_configuration(self):
"""
Assume SPL concrete instance, is the last top level concrete model.
"""
top_level_concrete_spl_model = None
assert len (self.xml_model.findall('./c1:Declaration', _namespaces)) > 0
for top_level_clafer in self.xml_model.findall('./c1:Declaration', _namespaces):
if top_level_clafer.find('c1:IsAbstract', _namespaces)!= None and \
top_level_clafer.find('c1:IsAbstract', _namespaces).text == 'false' :
top_level_concrete_spl_model = top_level_clafer
return top_level_concrete_spl_model
def get_ConcreteLevelConstraints(self):
"""
Returns a set of constraints for the partially configured SPL.
"""
def get_clafer_Id(self, element):
return element.find('c1:Id',_namespaces).text
def get_clafer_UniqueId(self, element):
return element.find('c1:UniqueId',_namespaces).text
def get_abstract_top_level_clafers(self):
abstract_top_level_clafers = []
for top_level_clafer in self.xml_model.findall('./c1:Declaration', _namespaces):
if top_level_clafer.find('c1:IsAbstract', _namespaces)!= None and \
top_level_clafer.find('c1:IsAbstract', _namespaces).text == 'true' :
abstract_top_level_clafers.append(top_level_clafer)
return abstract_top_level_clafers
@property
def FeatureTypes(self):
if self._FeatureTypes == None:
self._FeatureTypes = self.get_abstract_top_level_clafers()[:-1]
#print "Returing _FeatureTypes= %s " % (str(self._FeatureTypes),)
return self._FeatureTypes
def getFeatureAttributes(self, FeatureType):
ret_attributes = []
for nonfunctional_property in FeatureType.findall(".//c1:Declaration[@xsi:type='cl:IClafer']", _namespaces):
ret_attributes.append(self.get_clafer_UniqueId(nonfunctional_property))
return ret_attributes
@property
def non_functional_properties(self):
if self._non_functional_properties == None:
self._non_functional_properties = {}
for FeatureType in self.FeatureTypes:
for nonfunctional_property in FeatureType.findall(".//c1:Declaration[@xsi:type='cl:IClafer']", _namespaces):
self._non_functional_properties[self.get_clafer_Id(nonfunctional_property)] = self.get_clafer_UniqueId(nonfunctional_property)
return self._non_functional_properties
def get_non_functional_properties_listing(self):
return self.non_functional_properties.keys()
def get_parentToChildMapping(self):
return self._parentToChild
def get_non_functional_property_unique_id(self, non_functional_property):
return self.non_functional_properties.get(non_functional_property)
def extract_integer(self, element):
"""
Extracts an integer from the second argument for the constraint of form [this.property = <number>] or [this.property = - <number>]
E.g Element must be either -<number> or just <number>.
"""
extacted_integer = 10
if element.find("c1:Exp[@xsi:type='cl:IFunctionExp']/c1:Operation", _namespaces)!=None and \
element.find("c1:Exp[@xsi:type='cl:IFunctionExp']/c1:Operation", _namespaces).text =='-':
# we have this.footprint = - <number>
extacted_integer = '-' + element.find("c1:Exp[@xsi:type='cl:IFunctionExp']/c1:Argument/c1:Exp/c1:IntLiteral", _namespaces).text
else:
# we have just <number>
extacted_integer = element.find("c1:Exp/c1:IntLiteral", _namespaces).text
return extacted_integer
def get_property_value(self, element, property):
property_val = 0
for constraint in element.findall("./c1:Declaration[@xsi:type='cl:IConstraint']", _namespaces):
constraint_operation = constraint.find("c1:ParentExp/c1:Exp[@xsi:type='cl:IFunctionExp']/c1:Operation", _namespaces)
constraint_arguments = constraint.findall("c1:ParentExp/c1:Exp[@xsi:type='cl:IFunctionExp']/c1:Argument", _namespaces)
if constraint_operation != None and constraint_operation.text == '=' and len(constraint_arguments)==2:
first_argument = constraint_arguments[0]
second_argument = constraint_arguments[1]
first_argument_sub_arguments = first_argument.findall("c1:Exp[@xsi:type='cl:IFunctionExp']/c1:Argument", _namespaces)
first_argument_sub_operation = first_argument.findall("c1:Exp[@xsi:type='cl:IFunctionExp']/c1:Operation", _namespaces)
if len(first_argument_sub_arguments) == 2 and \
len(first_argument_sub_operation)>0 and first_argument_sub_operation[0] != None and first_argument_sub_operation[0].text == '.' and \
\
first_argument_sub_arguments[0].find("c1:Exp[@xsi:type='cl:IClaferId']/c1:Id", _namespaces) != None and \
first_argument_sub_arguments[0].find("c1:Exp[@xsi:type='cl:IClaferId']/c1:Id", _namespaces).text == 'this' and \
\
first_argument_sub_arguments[1].find("c1:Exp[@xsi:type='cl:IFunctionExp']/c1:Argument/c1:Exp[@xsi:type='cl:IClaferId']c1:Id", _namespaces) != None and \
first_argument_sub_arguments[1].find("c1:Exp[@xsi:type='cl:IFunctionExp']/c1:Argument/c1:Exp[@xsi:type='cl:IClaferId']c1:Id", _namespaces).text == self.get_non_functional_property_unique_id(property) and \
second_argument.find("c1:Type[@xsi:type='cl:IInteger']", _namespaces)!= None:
property_val = self.extract_integer(second_argument)
return str(property_val)
def get_max_value_property(self):
"""
Returns the maximum integer value for a nonfunctional in the Software Product Line Feature Model.
"""
max_integer = 0
for feature in self.get_features_as_xml_elements():
for nonfunctional_property in self.get_non_functional_properties_listing():
nonfunctional_property_value = self.get_property_value(feature, nonfunctional_property)
max_integer = max_integer + max(int(nonfunctional_property_value), 0)
return max_integer
def get_concrete_instance_as_xml_element(self):
top_level_clafers = self.xml_model.findall("./c1:Declaration[@xsi:type='cl:IClafer']", _namespaces)
concrete_top_level_clafers = [clafer for clafer in top_level_clafers if \
clafer.find('c1:IsAbstract', _namespaces) !=None and \
clafer.find('c1:IsAbstract', _namespaces).text == 'false']
assert len (concrete_top_level_clafers) > 0
return concrete_top_level_clafers[0]
def get_xml_elmenet_from_uniqueId(self, uniqueID):
if self._xml_element_from_uniqueID.get(uniqueID) == None:
# print"Finding clafer with id %s of " % uniqueID
# iterate_over = [claferDecl for claferDecl in self.xml_model.findall(".//c1:Declaration[@xsi:type='cl:IClafer']", _namespaces) \
# if claferDecl.find('./c1:UniqueId', _namespaces) != None ]
# for x in iterate_over:
# print x.find('./c1:UniqueId', _namespaces).text
# print "Clafer id's available were those."
# print "getting %s " % uniqueID
# print "Found %s " % str(claferDecl for claferDecl in self.xml_model.findall(".//c1:Declaration[@xsi:type='cl:IClafer']", _namespaces))
xml_element = [claferDecl for claferDecl in self.xml_model.findall(".//c1:Declaration[@xsi:type='cl:IClafer']", _namespaces) \
if claferDecl.find('./c1:UniqueId', _namespaces) != None and \
claferDecl.find('./c1:UniqueId', _namespaces).text == uniqueID ][0]
self._xml_element_from_uniqueID[uniqueID] = xml_element #c
return self._xml_element_from_uniqueID.get(uniqueID)
def get_supers(self, clafer):
"""
Returns a list of all transitive features of a clafer, as a list of unique names.
"""
#print "Getting supers of %s " % self.get_clafer_UniqueId(clafer)
ret_supers = []
element_super = clafer.find('./c1:Supers/c1:Super/c1:Exp/c1:Id', _namespaces)
element_super_uniqueId = element_super.text
ret_supers.append(element_super_uniqueId)
while(element_super_uniqueId != 'clafer'):
element_super_xml = self.get_xml_elmenet_from_uniqueId(element_super_uniqueId)
element_super = element_super_xml.find('./c1:Supers/c1:Super/c1:Exp/c1:Id', _namespaces)
element_super_uniqueId = element_super.text
ret_supers.append(element_super_uniqueId)
return ret_supers
def is_product_level_attribute(self, UniqueId):
"""
Returns true if unique_id represents a product-level quality attribute such as total_fooptrint, total_performance, etc.
"""
return self.get_clafer_Id(self.get_xml_elmenet_from_uniqueId(UniqueId)) in \
["total_%s" % nonfunctional_property for nonfunctional_property in self.get_non_functional_properties_listing()]
def get_features_as_xml_elements(self, feature_type_unique_id=None):
"""
Returns a list of all features that inherit from feature_type_unique_id, or all features if feature_type_unique_id = None.
"""
#print "Called get_features_as_xml_elements"
#print "Returning list of features, which aren't in %s " % (str(["total_%s" % nonfunctional_property for nonfunctional_property in self.get_non_functional_properties_listing()]),)
#raw_features = [ self.get_clafer_Id(feature) for feature in self.SPL.findall(".//c1:Declaration[@xsi:type='cl:IClafer']", _namespaces) ]
#print "Start raw_features"
#print raw_features
#print "End raw_features"
ret_list_features = [feature for feature in self.SPL.findall(".//c1:Declaration[@xsi:type='cl:IClafer']", _namespaces) if \
self.get_clafer_Id(feature) not in ["total_%s" % nonfunctional_property for nonfunctional_property in self.get_non_functional_properties_listing()]]
# print "Start ret_list_features"
# print [ self.get_clafer_Id(feature) for feature in ret_list_features]
# print "End ret_list_features"
if feature_type_unique_id != None:
ret_list_features = [ feature for feature in ret_list_features if feature_type_unique_id in self.get_supers(feature) ]
return ret_list_features
def _initialize_childParentMappings(self):
breadthFirstSearchQueue = collections.deque()
breadthFirstSearchQueue.append(self.SPL)
while(len(breadthFirstSearchQueue) > 0):
parent = breadthFirstSearchQueue.popleft()
parent_id = self.get_clafer_UniqueId(parent)
self._parentToChild[parent_id] = []
for childfeature in parent.findall("./c1:Declaration[@xsi:type='cl:IClafer']", _namespaces):
childfeature_id = self.get_clafer_UniqueId(childfeature)
self._parentToChild.get(parent_id).append(childfeature_id)
# Only one parent per child, so no need to use list for this mapping.
self._childToParent[childfeature_id] = parent_id
breadthFirstSearchQueue.append(childfeature)
def get_children(self, feature_id):
"""
Returns a list of children's unique ids.
"""
return self._parentToChild.get(feature_id)
def get_parent(self, feature_id):
"""
Returns the unique id of parent.
"""
return self._childToParent.get(feature_id)
def get_group_cardinality_info(self, feature):
"""
Returns a tuple (isKeywordCardinality, intervalMin, intervalMax) to represent group cardinality of the group of feature that would be children of of feature.
"""
isKeywordCardinality = feature.find('.//c1:GroupCard/c1:IsKeyword', _namespaces).text == 'true'
intervalMin = int(feature.find('.//c1:GroupCard/c1:Interval/c1:Min/c1:IntLiteral', _namespaces).text)
intervalMax = int(feature.find('.//c1:GroupCard/c1:Interval/c1:Max/c1:IntLiteral', _namespaces).text)
return (isKeywordCardinality, intervalMin, intervalMax)
def get_cardinailty_info(self, feature):
"""
Returns a tuple (Min, Max) representing the cardinality of feature. (e.g mandatory is min=1).
"""
intervalMin = int(feature.find('.//c1:Card/c1:Min/c1:IntLiteral', _namespaces).text)
intervalMax = int(feature.find('.//c1:Card/c1:Max/c1:IntLiteral', _namespaces).text)
return (intervalMin, intervalMax)
def get_exclusive_ors(self):
"""
Get a set of feature that have an exclusive or for their children.
For example in:
xor AbstractIterator
ForwardIterator
backwardIterator
would return [AbstractIterator_UniqueId]
"""
xor_features = []
for feature in self.get_features_as_xml_elements():
(isKeywordCardinality, intervalMin, intervalMax) = self.get_group_cardinality_info(feature)
if isKeywordCardinality==True and intervalMin==1 and intervalMax==1:
xor_features.append(self.get_clafer_UniqueId(feature))
return xor_features
def get_ors(self):
"""
Get a set of feature that have an "or" for their children.
For example in:
or AbstractIterator
ForwardIterator
backwardIterator
would return [AbstractIterator]
"""
or_features = []
for feature in self.get_features_as_xml_elements():
(isKeywordCardinality, intervalMin, intervalMax) = self.get_group_cardinality_info(feature)
if isKeywordCardinality==True and intervalMin==1 and intervalMax==-1:
or_features.append(self.get_clafer_UniqueId(feature))
return or_features
def get_mandatory_features(self):
"""
Returns a list of features that any configuration must have.
"""
top_level_uniqueid = self.get_clafer_UniqueId(self.SPL)
BFSQueue = collections.deque()
BFSQueue.append(top_level_uniqueid)
mandatory_features = []
mandatory_features.append(top_level_uniqueid)
while(len(BFSQueue) > 0):
parent_uniqueid = BFSQueue.popleft()
for child_id in self.get_children(parent_uniqueid):
child_xml = self.get_xml_elmenet_from_uniqueId(child_id)
(intervalMin, intervalMax) = self.get_cardinailty_info(child_xml)
if (intervalMin > 0):
mandatory_features.append(child_id)
BFSQueue.append(child_id)
return [feature for feature in mandatory_features if \
self.get_clafer_Id((self.get_xml_elmenet_from_uniqueId(feature))) not in \
["total_%s" % nonfunctional_property for nonfunctional_property in self.get_non_functional_properties_listing()]]
def get_set_extra_integers_from_feature_model(self):
"""
Returns a set of all integers that are not referenced in the feature model, but that might be
needed to represent the quality properties of a configuration of the feature model.
"""
from collections import Counter
bag_integers_in_spl_model = Counter()
for clafer_features in self.get_features_as_xml_elements():
# Eg add the integer to the bag.
for nonfunctional_property in self.get_non_functional_properties_listing():
bag_integers_in_spl_model.update([int(self.get_property_value(clafer_features, nonfunctional_property))])
set_integers_derived_from_spl_model = set()
for feature_number in bag_integers_in_spl_model.elements(): # expand the bag (e.g BAG = {1, 1 , 1, 2} expands to 1,1,1,2 .
tmp_numbers_to_add = set()
for existing_numbers in set_integers_derived_from_spl_model:
tmp_numbers_to_add.add(existing_numbers + feature_number)
tmp_numbers_to_add.add(feature_number)
# For each number of the bag x,
# set_integers_derived_from_spl_model += x + each element of set_integers_derived_from_spl_model .
set_integers_derived_from_spl_model.update(tmp_numbers_to_add)
return set_integers_derived_from_spl_model.difference(set(bag_integers_in_spl_model))
def convert_ClaferUniqueId_to_ClaferId(self, UniqueIdLabel):
"""
Function to clafer unique id to clafer id.
Used also in translation alloy answer to clafer.
"""
regex_remove_pre = re.compile(r'c\d+_')
match = regex_remove_pre.search(UniqueIdLabel)
return UniqueIdLabel.replace(match.group(0), '')
def has_SomeQuantifierAsCrossTree(self, constraint):
return constraint.find("c1:ParentExp/c1:Exp[@xsi:type='cl:IDeclarationParentExp']/c1:Quantifier[@xsi:type='cl:ISome']", _namespaces) != None
def has_NoQuantifierAsCrossTree(self, constraint):
return constraint.find("c1:ParentExp/c1:Exp[@xsi:type='cl:IDeclarationParentExp']/c1:Quantifier[@xsi:type='cl:INo']", _namespaces) != None
def is_cross_tree_constraint(self, element, constraint):
element_id = self.get_clafer_UniqueId(element)
if element_id == "c49_InMemory":
print "is_cross_tree_constraint: %s" % element_id
has_SomeQuantifier = False
has_NoQuantifier = False
isNotAttributeAssignment = constraint.find("c1:ParentExp/c1:Exp[@xsi:type='cl:IFunctionExp']", _namespaces) == None
if isNotAttributeAssignment == True:
if element_id == "c49_InMemory":
print "Yes isNotAttributeAssignment for c49_InMemory"
has_SomeQuantifier = self.has_SomeQuantifierAsCrossTree(constraint)
has_NoQuantifier = self.has_NoQuantifierAsCrossTree(constraint)
else:
if element_id == "c49_InMemory":
print "Not isNotAttributeAssignment for c49_InMemory"
return has_SomeQuantifier or has_NoQuantifier
# print constraint
# for x in constraint.iter():
# print x.tag
# print constraint.find("c1:ParentExp/c1:Exp[@xsi:type='cl:IDeclarationParentExp']/c1:Quantifier[@xsi:type='cl:ISome']", _namespaces)
# print dir(constraint)
def extract_implied_feature(self, element, constraint):
"""
Given a Constraint, it returns the unique id of the feature that is implied by the constraint.
Either uniqueId, 1 if uniqueId is implied, or uniqueId, 0 if !uniqueID is implied.
precondition : self.is_cross_tree_constraint(element, constraint) == True
"""
#print constraint.find("c1:ParentExp/c1:Exp[@xsi:type='cl:IDeclarationParentExp']/c1:BodyParentExp", _namespaces)
arguments = constraint.findall("c1:ParentExp/c1:Exp[@xsi:type='cl:IDeclarationParentExp']/c1:BodyParentExp/c1:Exp[@xsi:type='cl:IFunctionExp']/c1:Argument", _namespaces)
#print len(arguments)
assert(len(arguments) >= 2) # Should have two arguments, this.parent and someFeature.
ArgumentThis = arguments[0]
ArgumentImpliedFeature = arguments [1]
ImpliedElement = ArgumentImpliedFeature.find("./c1:Exp/c1:Id", _namespaces).text
ImpliedElementValue = 1
if self.has_SomeQuantifierAsCrossTree(constraint):
ImpliedElementValue = 1
elif self.has_NoQuantifierAsCrossTree(constraint):
ImpliedElementValue = 0
return (ImpliedElement, ImpliedElementValue)
#self.printAllChildrenTags(ArgumentImpliedFeature.find("./c1:Exp", _namespaces))
#self.printAllChildrenTags(constraint.find("c1:ParentExp/c1:Exp[@xsi:type='cl:IDeclarationParentExp']/c1:BodyParentExp/Exp", _namespaces))
def get_goals_unique_id(self):
"""
Returns a list of goals as uniqueIDs
"""
ret = []
goal_declarations = self.xml_model.findall("./c1:Declaration[@xsi:type='cl:IGoal']", _namespaces)
for goal_declaration in goal_declarations:
goals = goal_declaration.findall("./c1:ParentExp/c1:Exp[@xsi:type='cl:IFunctionExp']/c1:Argument/c1:Exp[@xsi:type='cl:IFunctionExp']/c1:Argument/c1:Exp[@xsi:type='cl:IFunctionExp']/c1:Argument/c1:Exp[@xsi:type='cl:IClaferId']", _namespaces)
for goal in goals:
if self.get_clafer_Id(goal) != "ref":
ret.append( self.get_clafer_Id(goal) )
return ret
def get_goals_as_tuple_xml_is_maximize(self):
"""
Returns a list of goals as tuple [(XMLDeclarationGoal, IsMax[true/false])]
"""
goals = self.xml_model.findall("./c1:Declaration[@xsi:type='cl:IGoal']", _namespaces)
return [ (goal, goal.find("./c1:ParentExp/c1:Exp[@xsi:type='cl:IFunctionExp']/c1:Operation", _namespaces).text=="max") for goal in goals ]
def get_crosstree_constraints(self):
"""
Returns a list of cross-tree constraints as a list of tuples
((FeatureA, 0/1), (FeatureB, 0/1))
to represent FeatureA of value 0/1 implies FeatureB of value 0/1.
"""
# Method Stub: return [(("c83_Measurement", 1), ("c47_AbstractSort", 1))]
print "get_corsstree_constraints"
list_implications = []
for element in [x for x in self.get_features_as_xml_elements() if not self.is_product_level_attribute(self.get_clafer_UniqueId(x))]:
element_id = self.get_clafer_UniqueId(element)
might_have_crosstree_constraint = False
for constraint in element.findall("./c1:Declaration[@xsi:type='cl:IConstraint']", _namespaces):
if self.is_cross_tree_constraint(element, constraint):
print "Adding Cross Tree for element_id %s " % element_id
list_implications.append(((element_id, 1), self.extract_implied_feature(element, constraint)))
might_have_crosstree_constraint = True
if might_have_crosstree_constraint == True:
print self.get_clafer_Id(element)
return list_implications
def printAllChildrenTags(self, element):
for x in element.iter():
print x.tag,
print ""
|
|
from gevent import monkey; monkey.patch_all()
import gevent_openssl; gevent_openssl.monkey_patch()
import re
import random
import string
import pytz
import email
import email.header
import json
import time
import traceback
import smtplib
from email.mime.text import MIMEText
from imapclient import IMAPClient
from datetime import datetime, timedelta
from operator import itemgetter
import gevent
from gevent import Greenlet
from gevent.queue import Queue
from gevent.lock import Semaphore
from flask import Flask, render_template, Response, request
from flask_sockets import Sockets
from bs4 import BeautifulSoup
app = Flask(__name__)
sockets = Sockets(app)
spawn = gevent.Greenlet.spawn
CONFIG = json.load(file("config.json"))
URL_REGEX = re.compile(r"(http|ftp|https)://([\w_-]+(?:(?:\.[\w_-]+)+))([\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])?")
def remove_tags(garbage):
return BeautifulSoup(garbage, "lxml").get_text()
class Authenticator(object):
def __init__(self):
self._lock = Semaphore()
def verify(self, token):
with self._lock:
gevent.sleep(1)
try:
return self.verify_impl(token)
except:
traceback.print_exc()
return False
def verify_impl(self, token):
return False
class AuthenticatorDebugDoNotUse(Authenticator):
def verify_impl(self, token):
return True
class AuthenticatorOTP(Authenticator):
def __init__(self, state_file):
Authenticator.__init__(self)
self._state_file = state_file
def verify_impl(self, token):
with file(self._state_file) as inp:
lines = [
line.strip() for line in inp.readlines()
if len(line.strip()) >= 8
]
if not lines:
return False # otps depleted
if token != lines[0]:
return False
with file(self._state_file, "wb") as outp:
outp.write("\n".join(lines[1:]))
return True
class AuthenticatorYUBI(Authenticator):
def __init__(self, client_id, secret_key, yubikey_id):
Authenticator.__init__(self)
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
from yubico_client import Yubico
self._client = Yubico(client_id, secret_key)
if len(yubikey_id) != 12:
raise ValueError("invalid yubikey_id")
self._yubikey_id = yubikey_id
def verify_impl(self, token):
if token[:12] != self._yubikey_id:
return False
return self._client.verify(token, timeout=5)
Authenticator = {
"yubi": AuthenticatorYUBI,
"otp": AuthenticatorOTP,
"debug-do-not-use": AuthenticatorDebugDoNotUse,
}[CONFIG['authenticator']['name']](**CONFIG['authenticator'].get('args', {}))
class Actor(gevent.Greenlet):
def __init__(self, *args, **kwargs):
self._inbox = Queue()
self._running = True
Greenlet.__init__(self)
self.start()
self._args = args
self._kwargs = kwargs
def quit(self):
self.stop()
def stop(self):
self._running = False
def _run(self):
self.setup(*self._args, **self._kwargs)
while self._running:
event, args, kwargs = self._inbox.get()
# print type(self).__name__, '<-', event, args
try:
getattr(self, event)(*args, **kwargs)
except:
traceback.print_exc()
self.stop()
def send(self, event, *args, **kwargs):
self._inbox.put((event, args, kwargs))
class IMAP(Actor):
def setup(self, client, hostname, username, password):
self._imap = IMAPClient(hostname, use_uid=True, ssl=True)
# self._imap.debug = True
self._imap.login(username, password)
self._client = client
self._folder = None
def get_messages(self, folder, query):
# XXX: error handling
if folder != self._folder:
self._imap.select_folder(folder, readonly=True)
self._folder = folder
ids = self._imap.search(query)
messages = self._imap.fetch(ids, ['FLAGS', 'RFC822'])
def parse_message(raw):
b = email.message_from_string(raw)
body = "<no body>"
if b.is_multipart():
for part in b.walk():
ctype = part.get_content_type()
cdispo = str(part.get('Content-Disposition'))
if ctype == 'text/plain' and 'attachment' not in cdispo:
charset = part.get_content_charset() or 'utf-8'
body = part.get_payload(decode=True).decode(charset, errors='replace')
break
else:
charset = b.get_content_charset() or 'utf-8'
body = b.get_payload(decode=True).decode(charset, errors='replace')
body = remove_tags(body)
replace_links = any(
re.search(regex, body, re.M|re.S)
for regex in CONFIG['mask_links']
)
if replace_links:
body = URL_REGEX.sub("<link masked>", body)
date = b['date']
try:
timestamp = email.utils.mktime_tz(email.utils.parsedate_tz(date))
date = datetime.fromtimestamp(timestamp, pytz.utc).strftime("%c UTC")
except:
traceback.print_exc()
timestamp = time.time()
def dh(value):
return unicode(email.header.make_header(email.header.decode_header(value)))
return {
'from': dh(b['from']),
'to': dh(b['to']),
'subject': dh(b['subject']),
'msg_id': b['message-id'],
'date': date,
'body': body,
'sort_key': timestamp,
'unread': '\\Seen' not in message['FLAGS'],
}
mails = []
for id, message in messages.iteritems():
mails.append(parse_message(message['RFC822']))
mails.sort(key=itemgetter('sort_key'), reverse=True)
self._client.send('mails', dict(
folder = folder,
mails = mails,
))
def quit(self):
self._imap.logout()
def external_api(fn):
fn.external = True
return fn
class Client(Actor):
def setup(self, socket):
self._socket = socket
self._imap = None
self._killer_task = None
self.send_to_client('connected')
def send_to_client(self, event, **data):
self._socket.send('send_client', json.dumps(dict(
event = event,
data = data,
)))
def send_error_and_close(self, message):
self.send_to_client('error', message=message)
gevent.sleep(0.5)
self._socket.send('close')
def mails(self, update):
self.send_to_client('mails', **update)
def quit(self):
if self._imap:
self._imap.send('quit')
self._killer_task.kill()
self.stop()
def killer_task(self):
max_time = CONFIG['max_session_duration']
gevent.sleep(max_time - 30)
self.send_to_client('expire_warn')
gevent.sleep(30)
self.send_error_and_close('Session expired')
@external_api
def authenticate(self, token):
if self._imap:
self.send_error_and_close('Already authenticated')
return
if not Authenticator.verify(token):
self.send_error_and_close('Invalid token')
return
self._imap = IMAP(self,
CONFIG['auth']['imap']['hostname'],
CONFIG['auth']['imap']['username'],
CONFIG['auth']['imap']['password'],
)
self._killer_task = spawn(self.killer_task)
self.send_to_client('authenticated', expires=CONFIG['max_session_duration'])
self.send_to_client('folders', folders=sorted(CONFIG['folders']))
@external_api
def get_messages(self, folder):
if not self._imap:
self.send_error_and_close('Not authenticated')
return
if folder not in CONFIG['folders']:
self.send_error_and_close('Invalid folder')
return
folder_settings = CONFIG['folders'][folder]
def parse(tok):
if isinstance(tok, basestring):
return tok
elif isinstance(tok, (int, long)):
return (datetime.now() - timedelta(days=tok)).date()
query = [parse(tok) for tok in folder_settings['query']]
self._imap.send('get_messages', folder_settings['folder'], query)
@external_api
def send_mail(self, to, subject, replyto, body):
if not self._imap:
self.send_error_and_close('Not authenticated')
return
server = smtplib.SMTP_SSL(
CONFIG['auth']['smtp']['hostname'],
CONFIG['auth']['smtp']['port'],
)
# server.set_debuglevel(1)
# server.starttls()
server.login(
CONFIG['auth']['smtp']['username'],
CONFIG['auth']['smtp']['password'],
)
msg = MIMEText(body, 'plain', 'utf-8')
msg['Subject'] = subject
msg['From'] = CONFIG['auth']['smtp']['from']
msg['To'] = to
msg['In-Reply-To'] = replyto
server.sendmail(
CONFIG['auth']['smtp']['from'],
[to], msg.as_string()
)
server.quit()
# XXX: This should also put a copy of the sent mail into the SENT folder?
self.send_to_client('mail_sent')
def client_message(self, message):
try:
parsed = json.loads(message)
event = parsed['event']
data = parsed.get('data', {})
fn = getattr(self, event)
if not hasattr(fn, 'external'):
raise NotImplementedError('not externally callable')
fn(**data)
except:
traceback.print_exc()
self.send_error_and_close('Invalid message')
class Socket(Actor):
def setup(self, ws):
self._ws = ws
def ping(self):
self.send_client('')
def close(self):
self._ws.close()
def send_client(self, message):
self._ws.send(message)
@sockets.route('/ws')
def session_socket(ws):
socket = Socket(ws)
client = Client(socket)
def pinger():
while 1:
gevent.sleep(30)
socket.send('ping')
pinger_task = spawn(pinger)
while not ws.closed:
message = ws.receive()
if not message:
break
client.send('client_message', message)
pinger_task.kill()
client.send('quit')
socket.send('quit')
print "done"
@app.route('/')
def index():
nonce = ''.join(random.sample(
string.lowercase+string.digits, 16
))
r = Response(render_template("otm.jinja",
nonce=nonce
))
r.headers['Content-Security-Policy'] = ';'.join((
"default-src 'none'",
"style-src 'nonce-%s'" % nonce,
"script-src 'nonce-%s'" % nonce,
"connect-src %s://%s/ws" % (
"wss" if request.is_secure else "ws",
request.host,
),
))
r.headers['X-Frame-Options'] = 'DENY'
return r
if __name__ == "__main__":
from gevent import pywsgi
from geventwebsocket.handler import WebSocketHandler
from werkzeug.contrib.fixers import ProxyFix
app = ProxyFix(app)
server = pywsgi.WSGIServer(('127.0.0.1', 8080), app, handler_class=WebSocketHandler)
server.serve_forever()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/startup.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
################################################################################
#
# CLEAN ROOM MODULE
#
# This module is classified as a "Clean Room" module and is subject to
# restrictions on what it may import.
#
# See: https://king-phisher.readthedocs.io/en/latest/development/modules.html#clean-room-modules
#
################################################################################
import collections
import gc
import io
import logging
import os
import select
import shlex
import shutil
import subprocess
import sys
from king_phisher import its
from king_phisher import version
ProcessResults = collections.namedtuple('ProcessResults', ('stdout', 'stderr', 'status'))
"""
A named tuple for holding the results of an executed external process.
.. py:attribute:: stdout
A string containing the data the process wrote to stdout.
.. py:attribute:: stderr
A string containing the data the process wrote to stderr.
.. py:attribute:: status
An integer representing the process's exit code.
"""
def _multistream(input, *outputs, size=None):
transfered = 0
if select.select([input], [], [], 0)[0]:
chunk = input.read(size)
for output in outputs:
output.write(chunk)
output.flush()
transfered += len(chunk)
return transfered
def _run_pipenv(args, **kwargs):
"""
Execute Pipenv with the supplied arguments and return the
:py:class:`~.ProcessResults`. If the exit status is non-zero, then the
stdout buffer from the Pipenv execution will be written to stderr.
:param tuple args: The arguments for the Pipenv.
:param str cwd: An optional current working directory to use for the
process.
:return: The results of the execution.
:rtype: :py:class:`~.ProcessResults`
"""
path = which('pipenv')
if path is None:
return RuntimeError('pipenv could not be found')
args = (path,) + tuple(args)
results = run_process(args, **kwargs)
if results.status:
sys.stderr.write('pipenv encountered the following error:\n')
sys.stderr.write(results.stdout)
sys.stderr.flush()
return results
def pipenv_entry(parser, entry_point):
"""
Run through startup logic for a Pipenv script (see Pipenv: `Custom Script
Shortcuts`_ for more information). This sets up a basic stream logging
configuration, establishes the Pipenv environment and finally calls the
actual entry point using :py:func:`os.execve`.
.. note::
Due to the use of :py:func:`os.execve`, this function does not return.
.. note::
Due to the use of :py:func:`os.execve` and ``os.EX_*`` exit codes, this
function is not available on Windows.
:param parser: The argument parser to use. Arguments are added to it and
extracted before passing the remainder to the entry point.
:param str entry_point: The name of the entry point using Pipenv.
.. _Custom Script Shortcuts: https://pipenv.readthedocs.io/en/latest/advanced/#custom-script-shortcuts
"""
if its.on_windows:
# this is because of the os.exec call and os.EX_* status codes
raise RuntimeError('pipenv_entry is incompatible with windows')
env_group = parser.add_argument_group('environment wrapper options')
env_action = env_group.add_mutually_exclusive_group()
env_action.add_argument('--env-install', dest='pipenv_install', default=False, action='store_true', help='install pipenv environment and exit')
env_action.add_argument('--env-update', dest='pipenv_update', default=False, action='store_true', help='update pipenv requirements and exit')
if its.on_windows:
env_group.set_defaults(pipenv_verbose=False)
else:
env_group.add_argument('--env-verbose', dest='pipenv_verbose', default=False, action='store_true', help='display pipenv output')
argp_add_default_args(parser)
arguments, _ = parser.parse_known_args()
sys_argv = sys.argv
sys_argv.pop(0)
if sys.version_info < (3, 4):
print('[-] the Python version is too old (minimum required is 3.4)')
return os.EX_SOFTWARE
# initialize basic stream logging
logger = logging.getLogger('KingPhisher.wrapper')
logger.setLevel(arguments.loglvl if arguments.loglvl else 'WARNING')
console_log_handler = logging.StreamHandler()
console_log_handler.setLevel(arguments.loglvl if arguments.loglvl else 'WARNING')
console_log_handler.setFormatter(logging.Formatter('%(levelname)-8s %(message)s'))
logger.addHandler(console_log_handler)
target_directory = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
logger.debug("target directory: {}".format(target_directory))
os.environ['PIPENV_VENV_IN_PROJECT'] = os.environ.get('PIPENV_VENV_IN_PROJECT', 'True')
os.environ['PIPENV_PIPFILE'] = os.environ.get('PIPENV_PIPFILE', os.path.join(target_directory, 'Pipfile'))
python_path = os.environ.get('PYTHONPATH')
python_path = [] if python_path is None else python_path.split(os.pathsep)
python_path.append(target_directory)
os.environ['PYTHONPATH'] = os.pathsep.join(python_path)
logger.info('checking for the pipenv environment')
if which('pipenv') is None:
logger.exception('pipenv not found, run tools/install.sh --update')
return os.EX_UNAVAILABLE
pipenv_path = which('pipenv')
logger.debug("pipenv path: {0!r}".format(pipenv_path))
pipenv_args = ['--site-packages', '--three']
if arguments.pipenv_verbose and logger.isEnabledFor(logging.DEBUG):
pipenv_args.append('--verbose')
if arguments.pipenv_install or not os.path.isdir(os.path.join(target_directory, '.venv')):
if arguments.pipenv_install:
logger.info('installing the pipenv environment')
else:
logger.warning('no pre-existing pipenv environment was found, installing it now')
results = _run_pipenv(pipenv_args + ['install'], cwd=target_directory, tee=arguments.pipenv_verbose)
if results.status:
logger.error('failed to install the pipenv environment')
logger.info('removing the incomplete .venv directory')
try:
shutil.rmtree(os.path.join(target_directory, '.venv'))
except OSError:
logger.error('failed to remove the incomplete .venv directory', exc_info=True)
return results.status
if arguments.pipenv_install:
return os.EX_OK
if arguments.pipenv_update:
logger.info('updating the pipenv environment')
results = _run_pipenv(pipenv_args + ['update'], cwd=target_directory, tee=arguments.pipenv_verbose)
if results.status:
logger.error('failed to update the pipenv environment')
return results.status
logger.info('the pipenv environment has been updated')
return os.EX_OK
logger.debug('pipenv Pipfile: {}'.format(os.environ['PIPENV_PIPFILE']))
# the blank arg being passed is required for pipenv
passing_argv = [' ', 'run', entry_point] + sys_argv
os.execve(pipenv_path, passing_argv, os.environ)
def run_process(process_args, cwd=None, tee=False, encoding='utf-8'):
"""
Run a subprocess, wait for it to complete and return a
:py:class:`~.ProcessResults` object. This function differs from
:py:func:`.start_process` in the type it returns and the fact that it always
waits for the subprocess to finish before returning.
.. versionchanged:: 1.15.0
Added the *tee* parameter.
:param tuple process_args: The arguments for the processes including the binary.
:param bool cwd: An optional current working directory to use for the process.
:param bool tee: Whether or not to display the console output while the process is running.
:param str encoding: The encoding to use for strings.
:return: The results of the process including the status code and any text
printed to stdout or stderr.
:rtype: :py:class:`~.ProcessResults`
"""
process_handle = start_process(process_args, wait=False, cwd=cwd)
if tee:
if its.on_windows:
# this is because select() does not support file descriptors
raise RuntimeError('tee mode is not supported on Windows')
stdout = io.BytesIO()
stderr = io.BytesIO()
while process_handle.poll() is None:
_multistream(process_handle.stdout, stdout, sys.stdout.buffer, size=1)
_multistream(process_handle.stderr, stderr, sys.stderr.buffer, size=1)
_multistream(process_handle.stdout, stdout, sys.stdout.buffer)
_multistream(process_handle.stderr, stderr, sys.stderr.buffer)
stdout = stdout.getvalue()
stderr = stderr.getvalue()
else:
process_handle.wait()
stdout = process_handle.stdout.read()
stderr = process_handle.stderr.read()
results = ProcessResults(
stdout.decode(encoding),
stderr.decode(encoding),
process_handle.returncode
)
return results
def start_process(process_args, wait=True, cwd=None):
"""
Start a subprocess and optionally wait for it to finish. If not **wait**, a
handle to the subprocess is returned instead of ``True`` when it exits
successfully. This function differs from :py:func:`.run_process` in that it
optionally waits for the subprocess to finish, and can return a handle to
it.
:param tuple process_args: The arguments for the processes including the binary.
:param bool wait: Whether or not to wait for the subprocess to finish before returning.
:param str cwd: The optional current working directory.
:return: If **wait** is set to True, then a boolean indication success is returned, else a handle to the subprocess is returened.
"""
cwd = cwd or os.getcwd()
if isinstance(process_args, str):
process_args = shlex.split(process_args)
close_fds = True
startupinfo = None
preexec_fn = None if wait else getattr(os, 'setsid', None)
if sys.platform.startswith('win'):
close_fds = False
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
logger = logging.getLogger('KingPhisher.ExternalProcess')
logger.debug('starting external process: ' + ' '.join(process_args))
proc_h = subprocess.Popen(
process_args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
preexec_fn=preexec_fn,
close_fds=close_fds,
cwd=cwd,
startupinfo=startupinfo
)
if not wait:
return proc_h
return proc_h.wait() == 0
def which(program):
"""
Examine the ``PATH`` environment variable to determine the location for the
specified program. If it can not be found None is returned. This is
fundamentally similar to the Unix utility of the same name.
:param str program: The name of the program to search for.
:return: The absolute path to the program if found.
:rtype: str
"""
is_exe = lambda fpath: (os.path.isfile(fpath) and os.access(fpath, os.X_OK))
for path in os.environ['PATH'].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
if is_exe(program):
return os.path.abspath(program)
return None
def argp_add_default_args(parser, default_root=''):
"""
Add standard arguments to a new :py:class:`argparse.ArgumentParser`
instance. Used to add the utilities argparse options to the wrapper for
display.
:param parser: The parser to add arguments to.
:type parser: :py:class:`argparse.ArgumentParser`
:param str default_root: The default root logger to specify.
"""
parser.add_argument('-v', '--version', action='version', version=parser.prog + ' Version: ' + version.version)
log_group = parser.add_argument_group('logging options')
log_group.add_argument('-L', '--log', dest='loglvl', type=str.upper, choices=('DEBUG', 'INFO', 'WARNING', 'ERROR', 'FATAL'), help='set the logging level')
log_group.add_argument('--logger', default=default_root, help='specify the root logger')
gc_group = parser.add_argument_group('garbage collector options')
gc_group.add_argument('--gc-debug-leak', action='store_const', const=gc.DEBUG_LEAK, default=0, help='set the DEBUG_LEAK flag')
gc_group.add_argument('--gc-debug-stats', action='store_const', const=gc.DEBUG_STATS, default=0, help='set the DEBUG_STATS flag')
return parser
def argp_add_client(parser):
"""
Add client-specific arguments to a new :py:class:`argparse.ArgumentParser`
instance.
:param parser: The parser to add arguments to.
:type parser: :py:class:`argparse.ArgumentParser`
"""
kpc_group = parser.add_argument_group('client specific options')
kpc_group.add_argument('-c', '--config', dest='config_file', required=False, help='specify a configuration file to use')
kpc_group.add_argument('--no-plugins', dest='use_plugins', default=True, action='store_false', help='disable all plugins')
kpc_group.add_argument('--no-style', dest='use_style', default=True, action='store_false', help='disable interface styling')
return parser
def argp_add_server(parser):
"""
Add server-specific arguments to a new :py:class:`argparse.ArgumentParser`
instance.
:param parser: The parser to add arguments to.
:type parser: :py:class:`argparse.ArgumentParser`
"""
kps_group = parser.add_argument_group('server specific options')
kps_group.add_argument('-f', '--foreground', dest='foreground', action='store_true', default=False, help='run in the foreground (do not fork)')
kps_group.add_argument('--update-geoip-db', dest='update_geoip_db', action='store_true', default=False, help='update the geoip database and exit')
kps_group.add_argument('--verify-config', dest='verify_config', action='store_true', default=False, help='verify the configuration and exit')
kps_group.add_argument('config_file', action='store', help='configuration file to use')
return parser
|
|
# Copyright (c) 2015 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from networking_cisco.plugins.ml2.drivers.cisco.n1kv import exceptions as c_exc
from networking_cisco.plugins.ml2.drivers.cisco.n1kv import n1kv_client
_resource_metadata = {'network-segment': ['publishName', 'description', 'id',
'tenantId', 'mode', 'segmentType',
'networkSegmentPool', 'vlan',
'vxlan', 'bridgeDomain'],
'port': ['id', 'macAddress', 'ipAddress', 'subnetId'],
'vmnetwork': ['name', 'networkSegmentId',
'networkSegment', 'portProfile',
'portProfileId', 'tenantId',
'portId', 'macAddress',
'portType', 'ipAddress', 'subnetId'],
'subnet': ['addressRangeStart', 'addressRangeEnd',
'ipAddressSubnet', 'description', 'gateway',
'dhcp', 'dnsServersList', 'networkAddress',
'netSegmentName', 'id', 'tenantId']}
class TestClient(n1kv_client.Client):
"""Base class for test client"""
def __init__(self, **kwargs):
self.broken = False
self.inject_params = False
self.total_profiles = 1
self.shared_net = False
self.upd_shared_net = False
super(TestClient, self).__init__()
def _get_total_profiles(self):
"""Return total number of profiles"""
return self.total_profiles
def _do_request(self, method, action, body=None, headers=None,
vsm_ip=None):
"""Handle outgoing requestes based on type"""
if self.broken:
raise c_exc.VSMError(reason='VSM:Internal Server Error')
if self.inject_params and body:
body['invalidKey'] = 'catchMeIfYouCan'
if method == 'POST' and self.shared_net:
return _validate_shared_net_resource(action, body)
# For update to shared network cases, set to shared after net create
elif (method == 'POST' and self.upd_shared_net and
'/network-segment/' in action):
ret = _validate_resource(action, body)
self.shared_net = True
return ret
elif method == 'POST':
return _validate_resource(action, body)
elif method == 'GET':
if 'virtual-port-profile' in action:
return _policy_profile_generator(
self._get_total_profiles())
else:
raise c_exc.VSMError(reason='VSM:Internal Server Error')
class TestClientInvalidRequest(TestClient):
"""Test invalid request cases"""
def __init__(self, **kwargs):
super(TestClientInvalidRequest, self).__init__()
self.inject_params = True
class TestClientInvalidResponse(TestClient):
"""Test invalid response cases"""
def __init__(self, **kwargs):
super(TestClientInvalidResponse, self).__init__()
self.broken = True
class TestClientSharedNetwork(TestClient):
"""Test shared network cases"""
def __init__(self, **kwargs):
super(TestClientSharedNetwork, self).__init__()
self.shared_net = True
class TestClientUpdateSharedNetwork(TestClient):
"""Test cases where network is updated to be shared"""
def __init__(self, **kwargs):
super(TestClientUpdateSharedNetwork, self).__init__()
self.upd_shared_net = True
class TestClientVSMRetry(n1kv_client.Client):
"""Base class for test client"""
def __init__(self, **kwargs):
super(TestClientVSMRetry, self).__init__()
self.pool.spawn = self._fake_pool_spawn
def _fake_pool_spawn(self):
pass
def _validate_resource(action, body=None):
"""Validate expected keys are present in outgoing request"""
if body:
body_set = set(body.keys())
else:
return
if 'vm-network' in action and 'port' not in action:
vmnetwork_set = set(_resource_metadata['vmnetwork'])
if body_set - vmnetwork_set:
raise c_exc.VSMError(reason='Invalid Request')
elif '/network-segment/' in action:
network_set = set(_resource_metadata['network-segment'])
if body_set - network_set:
raise c_exc.VSMError(reason='Invalid Request')
elif 'port' in action:
port_set = set(_resource_metadata['port'])
if body_set - port_set:
raise c_exc.VSMError(reason='Invalid Request')
elif 'subnet' in action:
subnet_set = set(_resource_metadata['subnet'])
if body_set - subnet_set:
raise c_exc.VSMError(reason='Invalid Request')
else:
return
def _validate_shared_net_resource(action, body=None):
"""Validate keys in outgoing shared network request"""
if body:
body_set = set(body.keys())
else:
return
if 'vm-network' in action and 'port' not in action:
vmnetwork_set = set(_resource_metadata['vmnetwork'])
if body_set - vmnetwork_set:
raise c_exc.VSMError(reason='Invalid Request')
elif 'port' in action:
port_set = set(_resource_metadata['port'])
if body_set - port_set:
raise c_exc.VSMError(reason='Invalid Request')
elif 'subnet' in action:
subnet_set = set(_resource_metadata['subnet'])
if body_set - subnet_set:
raise c_exc.VSMError(reason='Invalid Request')
elif '/network-segment/' in action:
network_set = set(_resource_metadata['network-segment'])
if body_set - network_set:
raise c_exc.VSMError(reason='Invalid Request')
if body['tenantId'] != '0':
raise c_exc.VSMError(reason='Invalid Shared Network Handling')
else:
return
def _policy_profile_generator(total_profiles):
"""
Generate policy profile response and return a dictionary.
:param total_profiles: integer representing total number of profiles to
return
"""
profiles = {}
for num in range(1, total_profiles + 1):
name = "pp-%s" % num
profile_id = "00000000-0000-0000-0000-00000000000%s" % num
profiles[name] = {"properties": {"name": name, "id": profile_id}}
return profiles
def _policy_profile_generator_xml(total_profiles):
"""
Generate policy profile response in XML format.
:param total_profiles: integer representing total number of profiles to
return
"""
xml = ["""<?xml version="1.0" encoding="utf-8"?>
<set name="virtual_port_profile_set">"""]
template = (
'<instance name="%(num)d"'
' url="/api/n1k/virtual-port-profile/%(num)s">'
'<properties>'
'<id>00000000-0000-0000-0000-00000000000%(num)s</id>'
'<name>pp-%(num)s</name>'
'</properties>'
'</instance>'
)
xml.extend(template % {'num': n} for n in range(1, total_profiles + 1))
xml.append("</set>")
return ''.join(xml)
|
|
from abc import ABCMeta, abstractmethod, abstractstaticmethod
from collections import namedtuple
from functools import partial
from enum import Enum
from collections import OrderedDict, UserList
from struct import Struct
try:
from . util import to_hex, crc32
except SystemError:
from util import to_hex, crc32
import sys
import logging
from io import IOBase, BytesIO
log = logging.getLogger(__name__)
"""
---types---
"""
def encoded_combinator_number(data):
"""
converts a string represenation of a combinator into a bytes represenation of its number
::
# vector#1cb5c415 {t:Type} # [ t ] = Vector t
encoded_combinator_number('vector type:t # [ t ] = Vector') -> b'\x15\xc4\xb5\x1c'
"""
if isinstance(data, str):
data = data.encode()
return crc32(data).to_bytes(4, 'little')
class TLObject(metaclass=ABCMeta):
__slots__ = ()
def to_bytes(self):
return b''.join(self.to_buffers())
def hex_components(self):
return ' '.join([to_hex(data) for data in self.to_buffers()])
@abstractmethod
def to_buffers(self):
"""A list of bytes() (one item for each component of the combinator)"""
raise NotImplementedError()
_VectorBase = namedtuple('Vector', ('t', 'num', 'items'))
class TLType:
constructors = {}
def __new__(cls, *args, **kwargs):
raise SyntaxError('TLType is not to be created standalone')
@classmethod
def from_stream(cls, stream):
"""Boxed type combinator loading"""
con_num = stream.read(4)
con = cls.constructors.get(con_num)
if con is None:
if cls is TLType:
raise ValueError('constructor with number {} does not exists'.format(to_hex(con_num)))
else:
raise ValueError('{} does not have combinator with number {}'.format(cls, to_hex(con_num)))
return con.from_stream(stream)
@classmethod
def add_constuctor(cls, constructor_cls):
if TLType.constructors.get(constructor_cls.number) is not None:
raise ValueError('duplicate constructor with number: {}'.format(constructor_cls.number))
TLType.constructors[constructor_cls.number] = constructor_cls
cls.constructors[constructor_cls.number] = constructor_cls
_P_Q_inner_dataBase = namedtuple('P_Q_inner_data', ['pq', 'p', 'q', 'nonce', 'server_nonce', 'new_nonce'])
P_Q_inner_data = type('P_Q_inner_data', (TLType,), dict(constructors={}))
"""
---constructors---
"""
class TLCombinator(TLObject):
def hex_components(self, boxed=False):
result = ['{}:{}'.format(self.name, to_hex(self.number, 4))] if boxed else ['{}: '.format(self.name)]
for arg in self:
result += ['{}:{}'.format(arg.name, to_hex(b''.join(arg.to_buffers()), 4))]
return " ".join(result)
@abstractstaticmethod
def from_stream(stream, boxed):
raise NotImplementedError()
def to_boxed_bytes(self):
return b''.join([self.number, self.to_bytes()])
def to_hex(self, width=4, boxed=False):
if boxed:
return to_hex(self.to_boxed_bytes(), width)
return to_hex(self.to_bytes(), width)
def to_buffers(self):
raise NotImplementedError()
class TLConstructor(TLCombinator):
def to_buffers(self, boxed=False):
result = [self.number] if boxed else []
for arg in self:
result += arg.to_buffers()
return result
@classmethod
def from_stream(cls, stream):
args = []
for p in cls.param_types:
arg = p.from_stream(stream)
args.append(arg)
return cls.__new__(cls, *args)
def create_constructor(name, number, params, param_types, result_type):
def con__new__(cls, *args, **kwargs):
return super(cls._cls, cls).__new__(cls, *args, **kwargs)
params = namedtuple(name, params)
class_bases = (params, TLConstructor, result_type,)
class_body = dict(
__new__=con__new__,
name=name,
number=number.to_bytes(4, 'little'),
params=params,
param_types=params(*list(param_types)),
result_type=result_type
)
new_type = type(name, class_bases, class_body)
setattr(new_type, '_cls', new_type)
result_type.add_constuctor(new_type)
return new_type
class _IntBase(int):
def __new__(cls, value):
return cls.from_int(value)
@classmethod
def from_int(cls, value):
result = int.__new__(cls, value)
if result.bit_length() > cls._bit_length:
raise ValueError('{:d} cannot fit into a {}bit Integer'.format(result, cls._bit_length))
return result
@classmethod
def from_bytes(cls, data):
value = int.from_bytes(data, 'little')
return cls.from_int(value)
@classmethod
def from_stream(cls, stream):
data = stream.read(cls._byte_length)
if len(data) < cls._byte_length:
raise StreamReadError('{} requires {:d} bytes, only read {:d}'.format(cls, cls._byte_length, len(data)))
return cls.from_bytes(data)
@classmethod
def _to_bytes(cls, value):
return int.to_bytes(value, cls._byte_length, 'little')
def to_bytes(self):
return int.to_bytes(self, self._byte_length, 'little')
def to_buffers(self):
return [self.to_bytes()]
class Int(_IntBase, TLType):
constructors = {}
_bit_length = 32
_byte_length = 4
class int_c(Int, TLConstructor):
"""
int ? = Int
"""
__slots__ = ()
number = encoded_combinator_number('int ? = Int')
name = 'int'
Int.add_constuctor(int_c)
class Long(_IntBase, TLType):
constructors = {}
_bit_length = 64
_byte_length = 8
class long_c(Long, TLConstructor):
"""
long ? = Long
"""
__slots__ = ()
number = encoded_combinator_number('long ? = Long')
name = 'long'
Long.add_constuctor(long_c)
int64_c = long_c # utility alias
class Double(int, TLType):
constructors = {}
def __new__(cls, value):
if isinstance(value, bytes):
return double_c.from_bytes()
return double_c.from_float(float(value))
class double_c(Double, TLConstructor):
"""
double ? = Double
"""
__slots__ = ()
number = encoded_combinator_number('double ? = Double')
name = 'double'
_struct = Struct('<d')
def to_buffers(self):
return [self.to_bytes()]
def to_bytes(self):
return self._struct.pack(self)
@classmethod
def from_bytes(cls, data):
value = cls._struct.unpack(data)[0]
return float.__new__(cls, value)
@classmethod
def from_float(cls, value):
return float.__new__(cls, value)
@classmethod
def from_stream(cls, stream):
return cls.from_bytes(stream.read(8))
Double.add_constuctor(double_c)
class Vector(UserList, TLType):
constructors = {}
_vector_types = {}
def __new__(cls, *args, **kwargs):
if cls is Vector or cls is vector_c:
vector_item_cls = args[0] if args else kwargs.get('vector_item_cls')
if not issubclass(vector_item_cls, TLType):
raise TypeError('vector_item_cls must be a subclass of TLType')
key = (cls, vector_item_cls,)
vector_cls = cls._vector_types.get(key)
if vector_cls is None:
name = '{}_{}'.format(cls.__name__, vector_item_cls.__name__)
Vector._vector_types[key] = type(name, (cls,), {'_item_cls_':vector_item_cls})
vector_cls = Vector._vector_types.get(key)
return vector_cls
else:
return object.__new__(cls)
def __init__(self, initlist=None):
super().__init__(map(self._item_cls_, initlist))
def insert(self, index, item):
return super().insert(index, self._item_cls_(item))
def append(self, item):
return super().append(self._item_cls_(item))
def extend(self, iterable):
return super().extend(map(self._item_cls_, iterable))
def __setitem__(self, index, item):
return super().__setitem__(index, self._item_cls_(item))
def __add__(self, iterable):
return super().__add__(map(self._item_cls_, iterable))
def __iadd__(self, iterable):
return super().__iadd__(map(self._item_cls_, iterable))
@classmethod
def _from_stream(cls, stream):
num = int.from_bytes(stream.read(4), 'little')
items = []
for i in iter(range(num)):
items.append(cls._item_cls_.from_stream(stream))
return cls(items)
@classmethod
def from_stream(cls, stream):
"""Boxed type combinator loading"""
con_num = stream.read(4)
con = cls.constructors.get(con_num)
if con is None:
raise ValueError('{} does not have combinator with number {}'.format(cls, to_hex(con_num)))
return cls._from_stream(stream)
def _to_bytes(self):
count = len(self).to_bytes(4, 'little')
items = [i.to_bytes() for i in self.data]
return b''.join([count] + items)
def to_bytes(self, boxed=True):
return vector_c.number + self._to_bytes()
class vector_c(Vector, TLConstructor):
number = int(0x1cb5c415).to_bytes(4, 'little')
name = 'vector'
def to_buffers(self):
return [item.to_bytes() for item in self.items]
@classmethod
def from_stream(cls, stream):
return cls._from_stream(stream)
def to_bytes(self):
return self._to_bytes()
Vector.add_constuctor(vector_c)
class Int128(_IntBase, TLType):
constructors = {}
_bit_length = 128
_byte_length = 16
class int128_c(Int128, TLConstructor):
"""
int128 4*[ int ] = Int128
"""
number = encoded_combinator_number('int 4*[ int ] = Int128')
name = 'int128'
Int128.add_constuctor(int128_c)
class Int256(_IntBase, TLType):
constructors = {}
_bit_length = 256
_byte_length = 32
class int256_c(Int256, TLConstructor):
"""
int256 8*[ int ] = Int256
"""
number = encoded_combinator_number('int 8*[ int ] = Int256')
name = 'int256'
Int256.add_constuctor(int256_c)
class String(bytes, TLType):
constructors = {}
def __new__(cls, data):
return cls.from_bytes(data)
@classmethod
def from_stream(cls, stream):
str_len = stream.read(1)[0]
count = 1
if str_len == 254:
str_len = int.from_bytes(stream.read(3), 'little')
count += 3
data = stream.read(str_len)
count += str_len
# get rid of the padded bytes
stream.read((4 - (count % 4)) % 4)
return cls.from_bytes(data)
@classmethod
def from_int(cls, value, byteorder='little', length=None):
if length is None:
length = value.bit_length() // 8 + 1
return cls.from_bytes(value.to_bytes(length, byteorder))
@classmethod
def from_bytes(cls, data):
return bytes.__new__(cls, data)
@classmethod
def from_str(cls, string):
return str.__new__(cls, string, encoding='utf-8')
def to_bytes(self):
str_len = len(self)
pfx = bytes([str_len]) if str_len < 254 else bytes([254]) + str_len.to_bytes(3, 'little')
padding = bytes((4 - (len(pfx) + len(self)) % 4) % 4)
return b''.join([pfx, self, padding])
def to_buffers(self):
return [self.to_bytes()]
def to_int(self, byteorder='little'):
return int.from_bytes(self, byteorder)
class string_c(String, TLConstructor):
number = encoded_combinator_number('string ? = String')
name = 'string'
class bytes_c(string_c):
name = 'bytes'
class ResPQ(namedtuple('ResPQ', ['nonce', 'server_nonce', 'pq', 'server_public_key_fingerprints']), TLType):
constructors = {}
class resPQ_c(TLConstructor, ResPQ):
"""
resPQ#05162463 nonce:int128 server_nonce:int128 pq:bytes server_public_key_fingerprints:Vector<long> = ResPQ;
"""
number = b'\x63\x24\x16\x05'
name = 'resPQ_c'
@classmethod
def from_stream(cls, stream):
return tuple.__new__(cls, [
int128_c.from_stream(stream),
int128_c.from_stream(stream),
bytes_c.from_stream(stream),
Vector(long_c).from_stream(stream)
])
ResPQ.add_constuctor(resPQ_c)
class P_Q_inner_data(TLType):
constructors = {}
p_q_inner_data_c = create_constructor(
name='p_q_inner_data_c', number=0x83c95aec,
params=['pq', 'p', 'q', 'nonce', 'server_nonce', 'new_nonce'],
param_types=[bytes_c, bytes_c, bytes_c, int128_c, int128_c, int256_c],
result_type=P_Q_inner_data)
class Server_DH_Params(TLType, namedtuple('Server_DH_Params',
['nonce', 'server_nonce', 'new_nonce_hash', 'encrypted_answer'])):
constructors = {}
def __new__(cls, nonce, server_nonce, new_nonce_hash, encrypted_answer):
raise SyntaxError('Do not call Server_DH_Params directly')
class server_DH_params_fail_c(TLConstructor, Server_DH_Params):
"""
server_DH_params_fail#79cb045d nonce:int128 server_nonce:int128 new_nonce_hash:int128 = Server_DH_Params;
"""
number = b'\x5dx\04\xcb\x79'
name = 'server_DH_params_fail_c'
def __new__(cls, nonce, server_nonce, new_nonce_hash):
return tuple.__new__(cls, [int128_c(nonce), int128_c(server_nonce), int128_c(new_nonce_hash), None])
@classmethod
def from_stream(cls, stream):
return tuple.__new__(cls, [
int128_c.from_stream(stream),
int128_c.from_stream(stream),
int128_c.from_stream(stream),
None
])
class server_DH_params_ok_c(TLConstructor, Server_DH_Params):
"""
server_DH_params_ok#d0e8075c nonce:int128 server_nonce:int128 encrypted_answer:bytes = Server_DH_Params;
"""
number = b'\x5c\x07\xe8\xd0'
name = 'server_DH_params_ok_c'
def __new__(cls, nonce, server_nonce, encrypted_answer):
return tuple.__new__(cls, [int128_c(nonce), int128_c(server_nonce), None, bytes_c(encrypted_answer)])
@classmethod
def from_stream(cls, stream):
return tuple.__new__(cls, [
int128_c.from_stream(stream),
int128_c.from_stream(stream),
None,
bytes_c.from_stream(stream)
])
Server_DH_Params.add_constuctor(server_DH_params_fail_c)
Server_DH_Params.add_constuctor(server_DH_params_ok_c)
class Server_DH_inner_data(TLType):
constructors = {}
server_DH_inner_data_c = create_constructor(
name='server_DH_inner_data_c', number=0xb5890dba,
params=['nonce', 'server_nonce', 'g', 'dh_prime', 'g_a', 'server_time'],
param_types=[int128_c, int128_c, int_c, bytes_c, bytes_c, int_c],
result_type=Server_DH_inner_data)
class Client_DH_Inner_Data(TLType):
constructors = {}
client_DH_inner_data_c = create_constructor(
name='client_DH_inner_data_c', number=0x6643b654,
params=['nonce', 'server_nonce', 'retry_id', 'g_b'],
param_types=[int128_c, int128_c, long_c, bytes_c],
result_type=Client_DH_Inner_Data)
class Set_client_DH_params_answer(TLType):
constructors = {}
dh_gen_ok_c = create_constructor(
name='dh_gen_ok_c', number=0x3bcbf734,
params=['nonce', 'server_nonce', 'new_nonce_hash1'],
param_types=[int128_c, int128_c, int128_c],
result_type=Set_client_DH_params_answer)
dh_gen_retry_c = create_constructor(
name='dh_gen_retry_c', number=0x46dc1fb9,
params=['nonce', 'server_nonce', 'new_nonce_hash2'],
param_types=[int128_c, int128_c, int128_c],
result_type=Set_client_DH_params_answer)
dh_gen_fail_c = create_constructor(
name='dh_gen_fail_c', number=0xa69dae02,
params=['nonce', 'server_nonce', 'new_nonce_hash3'],
param_types=[int128_c, int128_c, int128_c],
result_type=Set_client_DH_params_answer)
class RpcResult(TLType):
constructors = {}
rpc_result_c = create_constructor(
name='rpc_result_c', number=0xf35c6d01,
params=['req_msg_id', 'result'],
param_types=[long_c, TLType],
result_type=RpcResult)
class RpcError(TLType):
constructors = {}
rpc_error_c = create_constructor(
name='rpc_error_c', number=0x2144ca19,
params=['error_code', 'error_message'],
param_types=[int_c, string_c],
result_type=RpcError)
class RpcDropAnswer(TLType):
constructors = {}
rpc_answer_unknown_c = create_constructor(
name='rpc_answer_unknown_c', number=0x5e2ad36e,
params=[],
param_types=[],
result_type=RpcDropAnswer)
rpc_answer_dropped_running_c = create_constructor(
name='rpc_answer_dropped_running_c', number=0xcd78e586,
params=[],
param_types=[],
result_type=RpcDropAnswer)
rpc_answer_dropped_c = create_constructor(
name='rpc_answer_dropped_c', number=0xa43ad8b7,
params=['msg_id', 'seq_no', 'bytes'],
param_types=[long_c, int_c, int_c],
result_type=RpcDropAnswer)
class FutureSalt(TLType):
constructors = {}
future_salt_c = create_constructor(
name='future_salt_c', number=0x0949d9dc,
params=['valid_since', 'valid_until', 'salt'],
param_types=[int_c, int_c, long_c],
result_type=FutureSalt)
class FutureSalts(TLType):
constructors = {}
future_salts_c = create_constructor(
name='future_salts_c', number=0xae500895,
params=['req_msg_id', 'now', 'salts'],
param_types=[long_c, int_c, vector_c(future_salt_c)],
result_type=FutureSalts)
class Pong(TLType):
constructors = {}
pong_c = create_constructor(
name='pong_c', number=0x347773c5,
params=['msg_id', 'ping_id'],
param_types=[long_c, long_c],
result_type=Pong)
class DestroySessionRes(TLType):
constructors = {}
destroy_session_ok_c = create_constructor(
name='destroy_session_ok_c', number=0xe22045fc,
params=['session_id'],
param_types=[long_c],
result_type=DestroySessionRes)
destroy_session_none_c = create_constructor(
name='destroy_session_none_c', number=0x62d350c9,
params=['session_id'],
param_types=[long_c],
result_type=DestroySessionRes)
class NewSession(TLType):
constructors = {}
new_session_created_c = create_constructor(
name='new_session_created_c', number=0x9ec20908,
params=['first_msg_id', 'unique_id', 'server_salt'],
param_types=[long_c, long_c, long_c],
result_type=NewSession)
class Message(TLType):
constructors = {}
message_c = create_constructor(
name='message_c', number=crc32('message msg_id:long seqno:int bytes:int body:Object = Message'.encode()),
params=['msg_id', 'seqno', 'bytes', 'body'],
param_types=[long_c, int_c, int_c, TLType],
result_type=Message)
class MessageContainer(TLType):
constructors = {}
msg_container_c = create_constructor(
name='msg_container_c', number=0x73f1f8dc,
params=['messages'],
param_types=[vector_c(message_c)],
result_type=MessageContainer)
class MessageCopy(TLType):
constructors = {}
msg_copy_c = create_constructor(
name='msg_copy_c', number=0xe06046b2,
params=['orig_message'],
param_types=[Message],
result_type=MessageCopy)
gzip_packed_c = create_constructor(
name='gzip_packed_c', number=0x3072cfa1,
params=['packed_data'],
param_types=[bytes_c],
result_type=TLType)
class MsgsAck(TLType):
constructors = {}
msgs_ack_c = create_constructor(
name='msgs_ack_c', number=0x62d6b459,
params=['msg_ids'],
param_types=[Vector(long_c)],
result_type=MsgsAck)
class BadMsgNotification(TLType):
constructors = {}
bad_msg_notification_c = create_constructor(
name='bad_msg_notification_c', number=0xa7eff811,
params=['bad_msg_id', 'bad_msg_seqno', 'error_code'],
param_types=[long_c, int_c, int_c],
result_type=BadMsgNotification)
bad_server_salt_c = create_constructor(
name='bad_server_salt_c', number=0xedab447b,
params=['bad_msg_id', 'bad_msg_seqno', 'error_code', 'new_server_salt'],
param_types=[long_c, int_c, int_c, long_c],
result_type=BadMsgNotification)
class MsgResendReq(TLType):
constructors = {}
msg_resend_req_c = create_constructor(
name='msg_resend_req_c', number=0x7d861a08,
params=['msg_ids'],
param_types=[Vector(long_c)],
result_type=MsgResendReq)
class MsgsStateReq(TLType):
constructors = {}
msgs_state_req_c = create_constructor(
name='msgs_state_req_c', number=0xda69fb52,
params=['msg_ids'],
param_types=[Vector(long_c)],
result_type=MsgsStateReq)
class MsgsStateInfo(TLType):
constructors = {}
msgs_state_info_c = create_constructor(
name='msgs_state_info_c', number=0x04deb57d,
params=['req_msg_id', 'info'],
param_types=[long_c, bytes_c],
result_type=MsgsStateInfo)
class MsgsAllInfo(TLType):
constructors = {}
msgs_all_info_c = create_constructor(
name='msgs_all_info_c', number=0x8cc0d131,
params=['msg_ids', 'info'],
param_types=[Vector(long_c), bytes_c],
result_type=MsgsAllInfo)
class MsgDetailedInfo(TLType):
constructors = {}
msg_detailed_info_c = create_constructor(
name='msg_detailed_info_c', number=0x276d3ec6,
params=['msg_id', 'answer_msg_id', 'bytes', 'status'],
param_types=[long_c, long_c, int_c, int_c],
result_type=MsgDetailedInfo)
"""
---functions---
"""
class TLFunction(TLCombinator):
def to_buffers(self):
result = [self.number]
for arg in self:
result += arg.to_buffers()
return result
class req_pq(namedtuple('req_pq', ['nonce']), TLFunction):
"""req_pq#60469778 nonce:int128 = ResPQ"""
number = int(0x60469778).to_bytes(4, byteorder='little')
name = 'req_DH_params'
...
class req_DH_params(namedtuple('req_DH_params',
['nonce', 'server_nonce', 'p', 'q', 'public_key_fingerprint', 'encrypted_data']), TLFunction):
"""
req_DH_params#d712e4be nonce:int128 server_nonce:int128 p:string q:string public_key_fingerprint:long encrypted_data:string = Server_DH_Params
"""
number = int(0xd712e4be).to_bytes(4, byteorder='little')
name = 'req_DH_params'
...
class set_client_DH_params(namedtuple('set_client_DH_params', ['nonce', 'server_nonce', 'encrypted_data']), TLFunction):
"""
set_client_DH_params#f5045f1f nonce:int128 server_nonce:int128 encrypted_data:bytes = Set_client_DH_params_answer
"""
number = int(0xf5045f1f).to_bytes(4, byteorder='little')
name = 'set_client_DH_params'
...
"""
--main api testing--
"""
"""
nearestDc#8e1a1775 country:string this_dc:int nearest_dc:int = NearestDc;
help.getNearestDc#1fb33026 = NearestDc;
"""
class NearestDc(TLType):
constructors = {}
nearestDC_c = create_constructor(
name='nearestDC', number=0x8e1a1775,
params=['country', 'this_dc', 'nearest_dc'],
param_types=[string_c, int_c, int_c],
result_type=NearestDc)
class help_getNearestDc(namedtuple('getNearestDc', []), TLFunction):
number = int(0x1fb33026).to_bytes(4, 'little')
name = 'getNearestDc'
result_type = NearestDc
|
|
#
# Copyright (C) 2014 Dell, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import threading
import signal
import sys
import dcm.agent.exceptions as exceptions
import dcm.agent.logger as dcm_logger
import dcm.agent.messaging.states as states
import dcm.agent.messaging.types as message_types
import dcm.agent.messaging.utils as utils
import dcm.agent.events.state_machine as state_machine
import dcm.agent.utils as agent_util
import dcm.eventlog.tracer as tracer
from dcm.agent.events.globals import global_space as dcm_events
_g_logger = logging.getLogger(__name__)
class ReplyRPC(object):
MISSING_VALUE_STRING = "DEADBEEF"
def __init__(self,
reply_listener,
agent_id,
connection,
request_id,
request_document,
db,
timeout=1.0,
reply_doc=None,
start_state=states.ReplyStates.REQUESTING):
self._agent_id = agent_id
self._request_id = request_id
self._request_document = request_document
self._cancel_callback = None
self._cancel_callback_args = None
self._cancel_callback_kwargs = None
self._reply_message_timer = None
self._reply_listener = reply_listener
self._timeout = timeout
self._conn = connection
self._resend_reply_cnt = 0
self._resend_reply_cnt_threshold = 5
self._lock = threading.RLock()
self._response_doc = reply_doc
self._sm = state_machine.StateMachine(start_state)
self._setup_states()
self._db = db
def get_request_id(self):
return self._request_id
def lock(self):
self._lock.acquire()
def unlock(self):
self._lock.release()
def get_message_payload(self):
return self._request_document["payload"]
def shutdown(self):
with tracer.RequestTracer(self._request_id):
try:
if self._reply_message_timer:
self._reply_message_timer.cancel()
self._reply_listener.message_done(self)
except Exception as ex:
_g_logger.warn("Error shutting down the request", ex)
def kill(self):
with tracer.RequestTracer(self._request_id):
if self._reply_message_timer:
try:
self._reply_message_timer.cancel()
except Exception as ex:
_g_logger.info("an exception occurred when trying to "
"cancel the timer: " + str(ex))
@agent_util.class_method_sync
def ack(self,
cancel_callback, cancel_callback_args, cancel_callback_kwargs):
"""
Indicate to the messaging system that you have successfully received
this message and stored it for processing.
"""
with tracer.RequestTracer(self._request_id):
self._cancel_callback = cancel_callback
self._cancel_callback_args = cancel_callback_args
if self._cancel_callback_args is None:
self._cancel_callback_args = []
self._cancel_callback_args.insert(0, self)
self._cancel_callback_kwargs = cancel_callback_kwargs
self._sm.event_occurred(states.ReplyEvents.USER_ACCEPTS_REQUEST,
message={})
@agent_util.class_method_sync
def nak(self, response_document):
"""
This function is called to out right reject the message. The user
is signifying that this message will not be processed at all.
A call to this function signifies that this object will no longer be
referenced by the user.
"""
with tracer.RequestTracer(self._request_id):
self._sm.event_occurred(states.ReplyEvents.USER_REJECTS_REQUEST,
message=response_document)
@agent_util.class_method_sync
def reply(self, response_document):
"""
Send a reply to this request. This signifies that the user is
done with this object.
"""
with tracer.RequestTracer(self._request_id):
_g_logger.debug("reply() has been called")
self._sm.event_occurred(states.ReplyEvents.USER_REPLIES,
message=response_document)
@agent_util.class_method_sync
def reply_timeout(self, message_timer):
with tracer.RequestTracer(self._request_id):
_g_logger.debug("reply timeout occurred, resending.")
self._sm.event_occurred(states.RequesterEvents.TIMEOUT,
message_timer=message_timer)
@agent_util.class_method_sync
def incoming_message(self, json_doc):
with tracer.RequestTracer(self._request_id):
type_to_event = {
message_types.MessageTypes.ACK:
states.ReplyEvents.REPLY_ACK_RECEIVED,
message_types.MessageTypes.NACK:
states.ReplyEvents.REPLY_NACK_RECEIVED,
message_types.MessageTypes.CANCEL:
states.ReplyEvents.CANCEL_RECEIVED,
message_types.MessageTypes.STATUS:
states.ReplyEvents.STATUS_RECEIVED,
message_types.MessageTypes.REQUEST:
states.ReplyEvents.REQUEST_RECEIVED
}
if 'type' not in json_doc:
raise exceptions.MissingMessageParameterException('type')
if json_doc['type'] not in type_to_event:
raise exceptions.InvalidMessageParameterValueException(
'type', json_doc['type'])
# this next call drives the state machine
self._sm.event_occurred(type_to_event[json_doc['type']],
message=json_doc)
def _send_reply_message(self, message_timer):
self._reply_message_timer = message_timer
message_timer.send(self._conn)
###################################################################
# state machine event handlers
# ever method that starts with _sm_ is called under the same lock.
###################################################################
def _sm_initial_request_received(self, **kwargs):
"""
This is the initial request, we simply set this to the requesting
state.
"""
pass
def _sm_requesting_retransmission_received(self, **kwargs):
"""
After receiving an initial request we receive a retransmission of it.
The user has not yet acked the message but they have been notified
that the message exists. In this case we do nothing but wait for
the user to ack the message
"""
pass
def _sm_requesting_cancel_received(self, **kwargs):
"""
A cancel message flows over the wire after the request is received
but before it is acknowledged. Here we will tell the user about the
cancel. It is important that the cancel notification comes after
the message received notification.
"""
dcm_events.register_callback(
self._cancel_callback,
args=self._cancel_callback_args,
kwargs=self._cancel_callback_kwargs)
def _sm_requesting_user_accepts(self, **kwargs):
"""
The user decided to accept the message. Here we will send the ack
"""
self._db.new_record(self._request_id,
self._request_document,
None,
states.ReplyStates.ACKED,
self._agent_id)
ack_doc = {'type': message_types.MessageTypes.ACK,
'message_id': utils.new_message_id(),
'request_id': self._request_id,
'entity': "user_accepts",
'agent_id': self._agent_id}
self._conn.send(ack_doc)
def _sm_requesting_user_replies(self, **kwargs):
"""
The user decides to reply before acknowledging the message. Therefore
we just send the reply and it acts as the ack and the reply
"""
self._response_doc = kwargs['message']
self._db.update_record(self._request_id,
states.ReplyStates.REPLY,
reply_doc=self._response_doc)
reply_doc = {'type': message_types.MessageTypes.REPLY,
'message_id': utils.new_message_id(),
'request_id': self._request_id,
'payload': self._response_doc,
'entity': "user_replies",
'agent_id': self._agent_id}
message_timer = utils.MessageTimer(self._timeout,
self.reply_timeout,
reply_doc)
self._send_reply_message(message_timer)
def _sm_requesting_user_rejects(self, **kwargs):
"""
The user decides to reject the incoming request so we must send
a nack to the remote side.
"""
self._db.new_record(self._request_id,
self._request_document,
None,
states.ReplyStates.ACKED,
self._agent_id)
nack_doc = {'type': message_types.MessageTypes.NACK,
'message_id': utils.new_message_id(),
'request_id': self._request_id,
'entity': "user_rejects",
'error_message': "The agent rejected the request.",
'agent_id': self._agent_id}
self._conn.send(nack_doc)
def _sm_acked_request_received(self, **kwargs):
"""
In this case a retransmission of the request comes in after the user
acknowledged the message. Here we resend the ack.
"""
# reply using the latest message id
ack_doc = {'type': message_types.MessageTypes.ACK,
'message_id': utils.new_message_id(),
'request_id': self._request_id,
'entity': "request_received",
'agent_id': self._agent_id}
self._conn.send(ack_doc)
def _sm_acked_cancel_received(self, **kwargs):
"""
A cancel is received from the remote end. We simply notify the user
of the request and allow the user to act upon it.
"""
dcm_events.register_callback(
self._cancel_callback,
args=self._cancel_callback_args,
kwargs=self._cancel_callback_kwargs)
def _sm_acked_reply(self, **kwargs):
"""
This is the standard case. A user has accepted the message and is
now replying to it. We send the reply.
"""
self._response_doc = kwargs['message']
self._db.update_record(self._request_id,
states.ReplyStates.REPLY,
reply_doc=self._response_doc)
reply_doc = {'type': message_types.MessageTypes.REPLY,
'message_id': utils.new_message_id(),
'request_id': self._request_id,
'payload': self._response_doc,
'entity': "acked_reply",
'agent_id': self._agent_id}
message_timer = utils.MessageTimer(self._timeout,
self.reply_timeout,
reply_doc)
self._send_reply_message(message_timer)
def _sm_acked_re_reply(self, **kwargs):
self._db.update_record(self._request_id,
states.ReplyStates.REPLY,
reply_doc=self._response_doc)
reply_doc = {'type': message_types.MessageTypes.REPLY,
'message_id': utils.new_message_id(),
'request_id': self._request_id,
'payload': self._response_doc,
'entity': "acked_reply",
'agent_id': self._agent_id}
message_timer = utils.MessageTimer(self._timeout,
self.reply_timeout,
reply_doc)
self._send_reply_message(message_timer)
def _sm_reply_request_retrans(self, **kwargs):
"""
After replying to a message we receive a retransmission of the
original request. This can happen if the remote end never receives
an ack and the reply message is either lost or delayed. Here we
retransmit the reply.
"""
reply_doc = {'type': message_types.MessageTypes.REPLY,
'message_id': utils.new_message_id(),
'request_id': self._request_id,
'payload': self._response_doc,
'entity': "request_retrans",
'agent_id': self._agent_id}
message_timer = utils.MessageTimer(self._timeout,
self.reply_timeout,
reply_doc)
self._send_reply_message(message_timer)
def _sm_reply_cancel_received(self, **kwargs):
"""
This occurs when a cancel is received after a reply is sent. It can
happen if the remote end sends a cancel before the reply is received.
Because we have already finished with this request we simply ignore
this message.
"""
pass
def _sm_reply_ack_received(self, **kwargs):
"""
This is the standard case. A reply is sent and the ack to that
reply is received. At this point we know that the RPC was
successful.
"""
self._db.update_record(self._request_id,
states.ReplyStates.REPLY_ACKED)
self._reply_message_timer.cancel()
self._reply_message_timer = None
self._reply_listener.message_done(self)
_g_logger.debug("Messaging complete. State event transition: "
+ str(self._sm.get_event_list()))
def _sm_reply_nack_received(self, **kwargs):
"""
The reply was nacked. This is probably a result of the a
retransmission that was not needed.
"""
self._db.update_record(self._request_id,
states.ReplyStates.REPLY_NACKED)
self._reply_message_timer.cancel()
self._reply_message_timer = None
self._reply_listener.message_done(self)
_g_logger.debug("Reply NACKed, messaging complete. State event "
"transition: " + str(self._sm.get_event_list()))
def _sm_reply_ack_timeout(self, **kwargs):
"""
This happens when after a given amount of time an ack has still not
been received. We thus must re-send the reply.
"""
message_timer = kwargs['message_timer']
# The time out did occur before the message could be acked so we must
# resend it
_g_logger.info("Resending reply")
self._resend_reply_cnt += 1
if self._resend_reply_cnt > self._resend_reply_cnt_threshold:
# TODO punt at some point ?
pass
self._send_reply_message(message_timer)
def _sm_nacked_request_received(self, **kwargs):
"""
This happens when a request is received after it has been nacked.
This will occur if the first nack is lost or delayed. We retransmit
the nack
"""
nack_doc = {'type': message_types.MessageTypes.NACK,
'message_id': utils.new_message_id(),
'request_id': self._request_id,
'entity': "request_received",
'error_message': "The agent already rejected this request",
'agent_id': self._agent_id}
self._conn.send(nack_doc)
def _sm_cancel_waiting_ack(self, **kwargs):
"""
If a cancel is received while in the requesting state we must make sure
that the user does not get the cancel callback until after they have
acked the message. This handler occurs when the user calls ack()
after a cancel has arrived. Here we just register a cancel callback
and let the user react to it how they will.
"""
dcm_events.register_user_callback(
self._cancel_callback,
args=self._cancel_callback_args,
kwargs=self._cancel_callback_kwargs)
def _sm_send_status(self):
status_doc = {'type': message_types.MessageTypes.STATUS,
'message_id': utils.new_message_id(),
'request_id': self._request_id,
'entity': "status send",
'agent_id': self._agent_id,
'state': self._sm._current_state,
'reply': self._response_doc}
self._conn.send(status_doc)
def _sm_reinflated_reply_ack(self):
_g_logger.warn("The agent manager sent a message for this request "
"after it was in the REPLY_ACK state")
def _sm_reinflated_reply_nack(self):
_g_logger.warn("The agent manager sent a message for this request "
"after it was in the REPLY_NACK state")
def _reinflate_done(self):
if self._reply_message_timer:
self._reply_message_timer.cancel()
self._reply_message_timer = None
self._reply_listener.message_done(self)
def _sm_reply_ack_re_acked(self, message=None):
"""
This is called when a re-inflated state had already been reply acked,
and is now acked again. We just take it out of memory.
"""
self._reinflate_done()
def _sm_reply_ack_now_nacked(self, message=None):
"""
This is called whenever a re-inflated command reaches a terminal state
that was
"""
self._reinflate_done()
def _sm_reply_nack_re_nacked(self, message=None):
"""
This is called when a re-inflated state had already been reply nacked,
and is now nacked again. We just take it out of memory.
"""
self._reinflate_done()
def _sm_reply_nack_now_acked(self, message=None):
"""
This is called whenever a re-inflated command reaches acked state but
it was previously nacked
"""
self._reinflate_done()
def _sm_ack_reply_nack_received(self, message=None):
_g_logger.warn("A NACK was received when in the ACK state "
+ str(message))
# this will be cleaned up when the command replies, which it is
# required to do
def _sm_replied_nacked_reply(self, message=None):
"""
This is called when a request was received but the ACK for that
request received a NACK. However the command finished running
and a reply was sent back. Here we cancel the message and log the
event
"""
_g_logger.warn("A command that was already finished ended "
+ str(message))
self.shutdown()
def _setup_states(self):
self._sm.add_transition(states.ReplyStates.NEW,
states.ReplyEvents.REQUEST_RECEIVED,
states.ReplyStates.REQUESTING,
self._sm_initial_request_received)
self._sm.add_transition(states.ReplyStates.REQUESTING,
states.ReplyEvents.REQUEST_RECEIVED,
states.ReplyStates.REQUESTING,
self._sm_requesting_retransmission_received)
self._sm.add_transition(states.ReplyStates.REQUESTING,
states.ReplyEvents.CANCEL_RECEIVED,
states.ReplyStates.CANCEL_RECEIVED_REQUESTING,
self._sm_requesting_cancel_received)
self._sm.add_transition(states.ReplyStates.REQUESTING,
states.ReplyEvents.USER_ACCEPTS_REQUEST,
states.ReplyStates.ACKED,
self._sm_requesting_user_accepts)
self._sm.add_transition(states.ReplyStates.REQUESTING,
states.ReplyEvents.USER_REPLIES,
states.ReplyStates.REPLY,
self._sm_requesting_user_replies)
self._sm.add_transition(states.ReplyStates.REQUESTING,
states.ReplyEvents.USER_REJECTS_REQUEST,
states.ReplyStates.NACKED,
self._sm_requesting_user_rejects)
self._sm.add_transition(states.ReplyStates.REQUESTING,
states.ReplyEvents.STATUS_RECEIVED,
states.ReplyStates.REQUESTING,
self._sm_send_status)
self._sm.add_transition(states.ReplyStates.CANCEL_RECEIVED_REQUESTING,
states.ReplyEvents.REQUEST_RECEIVED,
states.ReplyStates.CANCEL_RECEIVED_REQUESTING,
self._sm_requesting_retransmission_received)
self._sm.add_transition(states.ReplyStates.CANCEL_RECEIVED_REQUESTING,
states.ReplyEvents.CANCEL_RECEIVED,
states.ReplyStates.CANCEL_RECEIVED_REQUESTING,
None)
self._sm.add_transition(states.ReplyStates.CANCEL_RECEIVED_REQUESTING,
states.ReplyEvents.USER_ACCEPTS_REQUEST,
states.ReplyStates.ACKED,
self._sm_cancel_waiting_ack)
self._sm.add_transition(states.ReplyStates.CANCEL_RECEIVED_REQUESTING,
states.ReplyEvents.USER_REPLIES,
states.ReplyStates.REPLY,
self._sm_requesting_user_replies)
self._sm.add_transition(states.ReplyStates.CANCEL_RECEIVED_REQUESTING,
states.ReplyEvents.USER_REJECTS_REQUEST,
states.ReplyStates.NACKED,
self._sm_requesting_user_rejects)
self._sm.add_transition(states.ReplyStates.CANCEL_RECEIVED_REQUESTING,
states.ReplyEvents.STATUS_RECEIVED,
states.ReplyStates.CANCEL_RECEIVED_REQUESTING,
self._sm_send_status)
self._sm.add_transition(states.ReplyStates.ACKED,
states.ReplyEvents.REQUEST_RECEIVED,
states.ReplyStates.ACKED,
self._sm_acked_request_received)
self._sm.add_transition(states.ReplyStates.ACKED,
states.ReplyEvents.CANCEL_RECEIVED,
states.ReplyStates.ACKED,
self._sm_acked_cancel_received)
self._sm.add_transition(states.ReplyStates.ACKED,
states.ReplyEvents.USER_REPLIES,
states.ReplyStates.REPLY,
self._sm_acked_reply)
self._sm.add_transition(states.ReplyStates.ACKED,
states.ReplyEvents.STATUS_RECEIVED,
states.ReplyStates.ACKED,
self._sm_send_status)
# if the AM receives and ACK but has never heard of the request ID
# it will send a nack. this should not happen in a normal course
# of events. At this point we should just kill the request and
# log a scary message. We also need to kill anything running for that
# that request
# This will happen when the agent manager quits on a request before
# the agent sends the ack. when the AM receives the ack it has already
# canceled the request and thus NACKs the ACK
self._sm.add_transition(states.ReplyStates.ACKED,
states.ReplyEvents.REPLY_NACK_RECEIVED,
states.ReplyStates.REPLY_NACKED,
self._sm_ack_reply_nack_received)
# note, eventually we will want to reply retrans logic to just punt
self._sm.add_transition(states.ReplyStates.REPLY,
states.ReplyEvents.REQUEST_RECEIVED,
states.ReplyStates.REPLY,
self._sm_reply_request_retrans)
self._sm.add_transition(states.ReplyStates.REPLY,
states.ReplyEvents.USER_REPLIES,
states.ReplyStates.REPLY,
self._sm_acked_reply)
self._sm.add_transition(states.ReplyStates.REPLY,
states.ReplyEvents.CANCEL_RECEIVED,
states.ReplyStates.REPLY,
self._sm_reply_cancel_received)
self._sm.add_transition(states.ReplyStates.REPLY,
states.ReplyEvents.REPLY_ACK_RECEIVED,
states.ReplyStates.REPLY_ACKED,
self._sm_reply_ack_received)
self._sm.add_transition(states.ReplyStates.REPLY,
states.ReplyEvents.TIMEOUT,
states.ReplyStates.REPLY,
self._sm_reply_ack_timeout)
self._sm.add_transition(states.ReplyStates.REPLY,
states.ReplyEvents.REPLY_NACK_RECEIVED,
states.ReplyStates.REPLY_NACKED,
self._sm_reply_nack_received)
self._sm.add_transition(states.ReplyStates.REPLY,
states.ReplyEvents.STATUS_RECEIVED,
states.ReplyStates.REPLY,
self._sm_send_status)
self._sm.add_transition(states.ReplyStates.REPLY,
states.ReplyEvents.DB_INFLATE,
states.ReplyStates.REPLY,
self._sm_acked_re_reply)
self._sm.add_transition(states.ReplyStates.NACKED,
states.ReplyEvents.REQUEST_RECEIVED,
states.ReplyStates.NACKED,
self._sm_nacked_request_received)
self._sm.add_transition(states.ReplyStates.NACKED,
states.ReplyEvents.STATUS_RECEIVED,
states.ReplyStates.NACKED,
self._sm_send_status)
self._sm.add_transition(states.ReplyStates.REPLY_ACKED,
states.ReplyEvents.REQUEST_RECEIVED,
states.ReplyStates.REPLY_ACKED,
self._sm_reply_request_retrans)
self._sm.add_transition(states.ReplyStates.REPLY_ACKED,
states.ReplyEvents.REPLY_ACK_RECEIVED,
states.ReplyStates.REPLY_ACKED,
self._sm_reply_ack_re_acked)
self._sm.add_transition(states.ReplyStates.REPLY_ACKED,
states.ReplyEvents.REPLY_NACK_RECEIVED,
states.ReplyStates.REPLY_ACKED,
self._sm_reply_ack_now_nacked)
self._sm.add_transition(states.ReplyStates.REPLY_ACKED,
states.ReplyEvents.CANCEL_RECEIVED,
states.ReplyStates.REPLY_ACKED,
None)
self._sm.add_transition(states.ReplyStates.REPLY_ACKED,
states.ReplyEvents.STATUS_RECEIVED,
states.ReplyStates.REPLY_ACKED,
self._sm_send_status)
self._sm.add_transition(states.ReplyStates.REPLY_ACKED,
states.ReplyEvents.TIMEOUT,
states.ReplyStates.REPLY_ACKED,
None)
# this transition should only occur when the AM makes a mistake
# or messages are received out of order.
self._sm.add_transition(states.ReplyStates.REPLY_ACKED,
states.ReplyEvents.DB_INFLATE,
states.ReplyStates.REPLY_ACKED,
self._sm_reinflated_reply_ack)
self._sm.add_transition(states.ReplyStates.REPLY_NACKED,
states.ReplyEvents.REQUEST_RECEIVED,
states.ReplyStates.REPLY_NACKED,
self._sm_reply_request_retrans)
self._sm.add_transition(states.ReplyStates.REPLY_NACKED,
states.ReplyEvents.REPLY_ACK_RECEIVED,
states.ReplyStates.REPLY_NACKED,
self._sm_reply_nack_re_nacked)
self._sm.add_transition(states.ReplyStates.REPLY_NACKED,
states.ReplyEvents.REPLY_NACK_RECEIVED,
states.ReplyStates.REPLY_NACKED,
self._sm_reply_nack_now_acked)
self._sm.add_transition(states.ReplyStates.REPLY_NACKED,
states.ReplyEvents.CANCEL_RECEIVED,
states.ReplyStates.REPLY_NACKED,
None)
# this will happen when the plugin finishes and thus replies
# to a request that had its ACK NACKed. In this case we
# just cancel the messaging and log a message
self._sm.add_transition(states.ReplyStates.REPLY_NACKED,
states.ReplyEvents.USER_REPLIES,
states.ReplyStates.REPLY_NACKED,
self._sm_replied_nacked_reply)
# this next state should only occur when a message is out
# of order or the agent manager made a mistake
self._sm.add_transition(states.ReplyStates.REPLY_NACKED,
states.ReplyEvents.DB_INFLATE,
states.ReplyStates.REPLY_NACKED,
self._sm_reinflated_reply_ack)
class RequestListener(object):
def __init__(self, conf, sender_connection, dispatcher,
db, id_system=None):
self._conn = sender_connection
self._dispatcher = dispatcher
self._requests = {}
self._messages_processed = 0
self._reply_observers = []
self._timeout = conf.messaging_retransmission_timeout
self._shutdown = False
self._conf = conf
self._db = db
self._id_system = id_system
self._lock = threading.RLock()
self._db.starting_agent()
def get_reply_observers(self):
# get the whole list so that the user can add and remove themselves.
# This sort of thing should be done only with carefully writen code
# using carefully writen observers that do very light weight
# nonblocking operations
return self._reply_observers
def _call_reply_observers(self, func_name, argument):
for o in self._reply_observers:
try:
func = getattr(o, func_name)
func(argument)
except:
_g_logger.exception("A bad observer threw an exception.")
# dont let some crappy observer ruin everything
pass
def _process_doc(self, incoming_doc):
if incoming_doc is None:
return
with tracer.RequestTracer(incoming_doc['request_id']):
self._call_reply_observers("incoming_message", incoming_doc)
_g_logger.debug("New message type %s" % incoming_doc['type'])
# if the agent is misbehaving the AM might tell it to kill itself.
# cold.
if incoming_doc["type"] == message_types.MessageTypes.HEMLOCK:
_g_logger.error("HEMLOCK: DCM told me to kill myself.")
os.killpg(0, signal.SIGKILL)
sys.exit(10)
# if it is a alert message short circuit
if incoming_doc["type"] == message_types.MessageTypes.ALERT_ACK:
if self._id_system:
self._id_system.incoming_message(incoming_doc)
return
request_id = incoming_doc["request_id"]
# is this request already in memory?
if request_id in self._requests:
_g_logger.debug("The message was found in the requests.")
# send it through, state machine will deal with it
req = self._requests[request_id]
req.incoming_message(incoming_doc)
return
# if the request id has already been seen by the database
db_record = self._db.lookup_req(request_id)
if db_record:
_g_logger.info("Inflating the record from the DB."
+ request_id)
req = ReplyRPC(
self,
self._conf.agent_id,
self._conn,
request_id,
incoming_doc,
self._db,
timeout=self._timeout,
reply_doc=db_record.reply_doc,
start_state=db_record.state)
# this will probably be used in the near future so get it
# on the memory list
self._requests[request_id] = req
req.incoming_message(incoming_doc)
return
if incoming_doc["type"] == message_types.MessageTypes.REQUEST:
if len(list(self._requests.keys())) >=\
self._conf.messaging_max_at_once > -1:
# short circuit the case where the agent is too busy
dcm_logger.log_to_dcm_console_overloaded(
msg="The new request was rejected because the agent has too many outstanding requests.")
nack_doc = {
'type': message_types.MessageTypes.NACK,
'message_id': utils.new_message_id(),
'request_id': request_id,
'agent_id': self._conf.agent_id,
'error_message': ("The agent can only handle %d "
"commands at once"
% self._conf.messaging_max_at_once)}
self._conn.send(nack_doc)
return
_g_logger.debug("A new request has come in.")
req = ReplyRPC(
self,
self._conf.agent_id,
self._conn,
request_id,
incoming_doc,
self._db,
timeout=self._timeout)
self._call_reply_observers("new_message", req)
# only add the message if processing was successful
self._requests[request_id] = req
try:
self._dispatcher.incoming_request(req)
except Exception:
_g_logger.exception("The dispatcher could not handle a "
"message.")
del self._requests[request_id]
dcm_logger.log_to_dcm_console_messaging_error(
msg="The dispatcher could not handle the message.")
raise
else:
# if we have never heard of the ID and this is not a new
# request we return a courtesy error
_g_logger.debug("Unknown message ID sending a NACK")
nack_doc = {'type': message_types.MessageTypes.NACK,
'message_id': utils.new_message_id(),
'request_id': request_id,
'agent_id': self._conf.agent_id,
'error_message':
"%s is an unknown ID" % request_id}
self._conn.send(nack_doc)
def _validate_doc(self, incoming_doc):
pass
def _send_bad_message_reply(self, incoming_doc, message):
_g_logger.debug("Sending the bad message %s" % message)
# we want to send a NACK to the message however it may be an error
# because it was not formed with message_id or request_id. In this
# case we will send values in that place indicating that *a* message
# was bad. There will be almost no way for the sender to know which
# one
try:
request_id = incoming_doc['request_id']
except KeyError:
request_id = ReplyRPC.MISSING_VALUE_STRING
nack_doc = {'type': message_types.MessageTypes.NACK,
'message_id': utils.new_message_id(),
'request_id': request_id,
'error_message': message,
'agent_id': self._conf.agent_id}
self._conn.send(nack_doc)
def message_done(self, reply_message):
self._lock.acquire()
try:
request_id = reply_message.get_request_id()
del self._requests[request_id]
_g_logger.debug("The message %s has completed and is being "
"removed" % request_id)
self._messages_processed += 1
finally:
self._lock.release()
self._call_reply_observers("message_done", reply_message)
def get_messages_processed(self):
return self._messages_processed
def is_busy(self):
return len(self._requests) != 0
def shutdown(self):
"""
Stop accepting new requests but allow for outstanding messages to
complete.
"""
self._shutdown = True
for req in list(self._requests.values()):
req.kill()
def wait_for_all_nicely(self):
# XXX TODO how long should this block? do we need this?
# looks like just for tests
while self._requests:
dcm_events.poll()
def reply(self, request_id, reply_doc):
reply_req = self._requests[request_id]
reply_req.reply(reply_doc)
def incoming_parent_q_message(self, incoming_doc):
_g_logger.debug("Received message %s" % str(incoming_doc))
try:
self._validate_doc(incoming_doc)
self._process_doc(incoming_doc)
except Exception as ex:
_g_logger.exception(
"Error processing the message: %s" % str(incoming_doc))
self._send_bad_message_reply(incoming_doc, str(ex))
class ReplyObserverInterface(object):
@agent_util.not_implemented_decorator
def new_message(self, reply):
pass
@agent_util.not_implemented_decorator
def message_done(self, reply):
pass
@agent_util.not_implemented_decorator
def incoming_message(self, incoming_doc):
pass
|
|
# Copyright 2015, Ansible, Inc.
# Luke Sneeringer <lsneeringer@ansible.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import click
from tower_cli.cli import types
from tower_cli.utils import grammar
_field_counter = 0
class BaseField(object):
def __init__(self):
# Track the creation history of each field, for sorting reasons.
global _field_counter
self.number = _field_counter
_field_counter += 1
def __lt__(self, other):
return self.number < other.number
def __gt__(self, other):
return self.number > other.number
class Field(BaseField):
"""A class representing flags on a given field on a model.
This class tracks whether a field is unique, filterable, read-only, etc.
"""
def __init__(self, key=None, type=six.text_type, default=None,
display=True, filterable=True, help_text=None,
is_option=True, password=False, read_only=False,
required=True, show_default=False, unique=False,
multiple=False, no_lookup=False, col_width=None):
# Init the name to blank.
# What's going on here: This is set by the ResourceMeta metaclass
# when the **resource** is instantiated.
# Essentially, in any normal situation, it's safe to expect it
# to be set and non-empty.
self.name = ''
# Save properties of this field.
self.key = key
self.type = type
self.display = display
self.default = default
self.help_text = help_text
self.is_option = is_option
self.filterable = filterable
self.password = password
self.read_only = read_only
self.required = required
self.show_default = show_default
self.unique = unique
self.multiple = multiple
self.no_lookup = no_lookup
self.col_width = col_width
# If this is a password, display is always off.
if self.password:
self.display = False
super(Field, self).__init__()
def __repr__(self):
return '<Field: %s (%s)>' % (self.name, ', '.join(self.flags))
@property
def flags(self):
try:
flags_list = [self.type.__name__.replace('unicode', 'str')]
except AttributeError:
flags_list = [type(self.type).__name__.replace('unicode', 'str')]
if self.read_only:
flags_list.append('read-only')
if self.unique:
flags_list.append('unique')
if not self.filterable:
flags_list.append('not filterable')
if not self.required:
flags_list.append('not required')
return flags_list
@property
def help(self):
"""Return the help text that was passed to the constructor, or a
sensible default if none was provided.
"""
if self.help_text:
return self.help_text
return 'The %s field.' % self.name
@property
def option(self):
"""Return the field name as a bash option string
(e.g. "--field-name").
"""
return '--' + self.name.replace('_', '-')
class ManyToManyField(BaseField):
"""
A class that contains utilities for the ResourceMeta metaclass
to construct two methods for association and disassociation of the field
:param other_name: tower-cli resource name for related resource.
:param res_name: tower-cli resource name for primary resource.
can be set on initialization of class, if not initially given.
:param relationship: The API related name for the relationship. Example,
"admins" relationship from org->users
:param method_name: The name CLI alias for the relationship in method names.
"""
def __init__(self, other_name, res_name=None,
relationship=None, method_name=None):
# If not defined here, the following fields may be set by the
# resource metaclass:
# res_name - inferred from the endpoint of the resource
# relationship - set to the variable name of the field
# Example:
# class Foo:
# endpoint = '/foos/'
# friends = ManyToManyField('bar')
# in that case, "foo" and "friends" become res_name and relationship
self.other_name = other_name
self.res_name = res_name
self.relationship = relationship
self.method_name = None
self._set_method_names(method_name, relationship)
super(ManyToManyField, self).__init__()
def __repr__(self):
return '<ManyToManyField: %s (%s-%s)>' % (
self.relationship, self.res_name, self.other_name
)
def configure_model(self, attrs, field_name):
'''
Hook for ResourceMeta class to call when initializing model class.
Saves fields obtained from resource class backlinks
'''
self.relationship = field_name
self._set_method_names(relationship=field_name)
if self.res_name is None:
self.res_name = grammar.singularize(attrs.get('endpoint', 'unknown').strip('/'))
def _set_method_names(self, method_name=None, relationship=None):
if self.method_name is not None:
return # provided in __init__, do not let metaclass override
suffix = ''
if method_name is not None:
self.method_name = method_name
if method_name != '':
suffix = '_{}'.format(method_name)
elif relationship is not None:
suffix = '_{}'.format(grammar.singularize(relationship))
else:
return
self.associate_method_name = 'associate{}'.format(suffix)
self.disassociate_method_name = 'disassociate{}'.format(suffix)
@property
def associate_method(self):
return self._produce_method()
@property
def disassociate_method(self):
return self._produce_method(disassociate=True)
def _produce_raw_method(self):
'''
Returns a callable which becomes the associate or disassociate
method for the related field.
Method can be overridden to add additional functionality, but
`_produce_method` may also need to be subclassed to decorate
it appropriately.
'''
def method(res_self, **kwargs):
obj_pk = kwargs.get(method._res_name)
other_obj_pk = kwargs.get(method._other_name)
internal_method = getattr(res_self, method._internal_name)
return internal_method(method._relationship, obj_pk, other_obj_pk)
return method
def _produce_method(self, disassociate=False):
method = self._produce_raw_method()
# Apply options for user to specify the 2 resources to associate
method = click.option(
'--{}'.format(self.other_name.replace('_', '-')),
type=types.Related(self.other_name),
required=True
)(method)
method = click.option(
'--{}'.format(self.res_name.replace('_', '-')),
type=types.Related(self.res_name),
required=True
)(method)
# This does the same thing as @resources.command, but without importing
method._cli_command = True
method._cli_command_attrs = dict(use_fields_as_options=False)
# Define field-specific parameters that control functionality
method._relationship = self.relationship
method._res_name = self.res_name
method._other_name = self.other_name
if disassociate:
method._internal_name = '_disassoc'
method.__doc__ = self._produce_doc(action='disassociate')
else:
method._internal_name = '_assoc'
method.__doc__ = self._produce_doc()
return method
def _produce_doc(self, action='associate'):
doc_relation = self.method_name if self.method_name else grammar.singularize(self.relationship)
return """{title_action} {status_article} {status} with this {res_name}.
=====API DOCS=====
{title_action} {status_article} {status} with this {res_name}.
:param {res_name}: Primary key or name of the {res_name} to {action} to.
:type {res_name}: str
:param {other_name}: Primary key or name of the {other_name} to be {action}d.
:type {other_name}: str
:returns: Dictionary of only one key "changed", which indicates whether the {action} succeeded.
:rtype: dict
=====API DOCS=====
""".format(
action=action,
title_action=action.title(),
status_article=grammar.article(doc_relation),
status=doc_relation,
res_name=self.res_name,
other_name=self.other_name,
)
|
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import shutil
import pyauto_functional # Must be imported before pyauto
import pyauto
import test_utils
from webdriver_pages import settings
from webdriver_pages.settings import Behaviors, ContentTypes
class PrefsTest(pyauto.PyUITest):
"""TestCase for Preferences."""
INFOBAR_TYPE = 'rph_infobar'
def setUp(self):
pyauto.PyUITest.setUp(self)
self._driver = self.NewWebDriver()
def Debug(self):
"""Test method for experimentation.
This method will not run automatically.
"""
while True:
raw_input('Interact with the browser and hit <enter> to dump prefs... ')
self.pprint(self.GetPrefsInfo().Prefs())
def testSessionRestore(self):
"""Test session restore preference."""
url1 = 'http://www.google.com/'
url2 = 'http://news.google.com/'
self.NavigateToURL(url1)
self.AppendTab(pyauto.GURL(url2))
num_tabs = self.GetTabCount()
# Set pref to restore session on startup.
self.SetPrefs(pyauto.kRestoreOnStartup, 1)
logging.debug('Setting %s to 1' % pyauto.kRestoreOnStartup)
self.RestartBrowser(clear_profile=False)
self.assertEqual(self.GetPrefsInfo().Prefs(pyauto.kRestoreOnStartup), 1)
self.assertEqual(num_tabs, self.GetTabCount())
self.ActivateTab(0)
self.assertEqual(url1, self.GetActiveTabURL().spec())
self.ActivateTab(1)
self.assertEqual(url2, self.GetActiveTabURL().spec())
def testNavigationStateOnSessionRestore(self):
"""Verify navigation state is preserved on session restore."""
urls = ('http://www.google.com/',
'http://news.google.com/',
'http://dev.chromium.org/',)
for url in urls:
self.NavigateToURL(url)
self.TabGoBack()
self.assertEqual(self.GetActiveTabURL().spec(), urls[-2])
self.SetPrefs(pyauto.kRestoreOnStartup, 1) # set pref to restore session
self.RestartBrowser(clear_profile=False)
# Verify that navigation state (forward/back state) is restored.
self.TabGoBack()
self.assertEqual(self.GetActiveTabURL().spec(), urls[0])
for i in (-2, -1):
tab.GoForward()
self.assertEqual(self.GetActiveTabURL().spec(), urls[i])
def testSessionRestoreURLs(self):
"""Verify restore URLs preference."""
url1 = self.GetFileURLForPath(os.path.join(self.DataDir(), 'title1.html'))
url2 = self.GetFileURLForPath(os.path.join(self.DataDir(), 'title2.html'))
# Set pref to restore given URLs on startup
self.SetPrefs(pyauto.kRestoreOnStartup, 4) # 4 is for restoring URLs
self.SetPrefs(pyauto.kURLsToRestoreOnStartup, [url1, url2])
self.RestartBrowser(clear_profile=False)
# Verify
self.assertEqual(self.GetPrefsInfo().Prefs(pyauto.kRestoreOnStartup), 4)
self.assertEqual(2, self.GetTabCount())
self.ActivateTab(0)
self.assertEqual(url1, self.GetActiveTabURL().spec())
self.ActivateTab(1)
self.assertEqual(url2, self.GetActiveTabURL().spec())
def testHomepagePrefs(self):
"""Verify homepage prefs."""
# "Use the New Tab page"
self.SetPrefs(pyauto.kHomePageIsNewTabPage, True)
logging.debug('Setting %s to 1' % pyauto.kHomePageIsNewTabPage)
self.RestartBrowser(clear_profile=False)
self.assertEqual(self.GetPrefsInfo().Prefs(pyauto.kHomePageIsNewTabPage),
True)
# "Open this page"
url = self.GetFileURLForPath(os.path.join(self.DataDir(), 'title1.html'))
self.SetPrefs(pyauto.kHomePage, url)
self.SetPrefs(pyauto.kHomePageIsNewTabPage, False)
self.RestartBrowser(clear_profile=False)
self.assertEqual(self.GetPrefsInfo().Prefs(pyauto.kHomePage), url)
self.assertFalse(self.GetPrefsInfo().Prefs(pyauto.kHomePageIsNewTabPage))
# TODO(nirnimesh): Actually verify that homepage loads.
# This requires telling pyauto *not* to set about:blank as homepage.
def testGeolocationPref(self):
"""Verify geolocation pref.
Checks for the geolocation infobar.
"""
# GetBrowserInfo() call seems to fail later on in this test. Call it early.
# crbug.com/89000
branding = self.GetBrowserInfo()['properties']['branding']
url = self.GetFileURLForPath(os.path.join( # triggers geolocation
self.DataDir(), 'geolocation', 'geolocation_on_load.html'))
self.assertEqual(3, # default state
self.GetPrefsInfo().Prefs(pyauto.kGeolocationDefaultContentSetting))
self.NavigateToURL(url)
self.assertTrue(self.WaitForInfobarCount(1))
self.assertTrue(self.GetBrowserInfo()['windows'][0]['tabs'][0]['infobars'])
# Disable geolocation
self.SetPrefs(pyauto.kGeolocationDefaultContentSetting, 2)
self.assertEqual(2,
self.GetPrefsInfo().Prefs(pyauto.kGeolocationDefaultContentSetting))
self.ReloadTab()
# Fails on Win7/Vista Chromium bots. crbug.com/89000
if (self.IsWin7() or self.IsWinVista()) and branding == 'Chromium':
return
behavior = self._driver.execute_async_script(
'triggerGeoWithCallback(arguments[arguments.length - 1]);')
self.assertEqual(
behavior, Behaviors.BLOCK,
msg='Behavior is "%s" when it should be BLOCKED.' % behavior)
def testUnderTheHoodPref(self):
"""Verify the security preferences for Under the Hood.
The setting is enabled by default."""
pref_list = [pyauto.kNetworkPredictionEnabled, pyauto.kSafeBrowsingEnabled,
pyauto.kAlternateErrorPagesEnabled,
pyauto.kSearchSuggestEnabled]
for pref in pref_list:
# Verify the default value
self.assertEqual(self.GetPrefsInfo().Prefs(pref), True)
self.SetPrefs(pref, False)
self.RestartBrowser(clear_profile=False)
for pref in pref_list:
self.assertEqual(self.GetPrefsInfo().Prefs(pref), False)
def testHaveLocalStatePrefs(self):
"""Verify that we have some Local State prefs."""
self.assertTrue(self.GetLocalStatePrefsInfo())
def testAllowSelectedGeoTracking(self):
"""Verify hostname pattern and behavior for allowed tracking."""
# Default location tracking option "Ask me".
self.SetPrefs(pyauto.kGeolocationDefaultContentSetting, 3)
self.NavigateToURL(
self.GetHttpURLForDataPath('geolocation', 'geolocation_on_load.html'))
self.assertTrue(self.WaitForInfobarCount(1))
self.PerformActionOnInfobar('accept', infobar_index=0) # Allow tracking.
# Get the hostname pattern (e.g. http://127.0.0.1:57622).
hostname_pattern = (
'/'.join(self.GetHttpURLForDataPath('').split('/')[0:3]))
self.assertEqual(
# Allow the hostname.
{hostname_pattern+','+hostname_pattern: {'geolocation': 1}},
self.GetPrefsInfo().Prefs(pyauto.kContentSettingsPatternPairs))
def testDismissedInfobarSavesNoEntry(self):
"""Verify dismissing infobar does not save an exception entry."""
# Default location tracking option "Ask me".
self.SetPrefs(pyauto.kGeolocationDefaultContentSetting, 3)
self.NavigateToURL(
self.GetFileURLForDataPath('geolocation', 'geolocation_on_load.html'))
self.assertTrue(self.WaitForInfobarCount(1))
self.PerformActionOnInfobar('dismiss', infobar_index=0)
self.assertEqual(
{}, self.GetPrefsInfo().Prefs(pyauto.kContentSettingsPatternPairs))
def testGeolocationBlockedWhenTrackingDenied(self):
"""Verify geolocations is blocked when tracking is denied.
The test verifies the blocked hostname pattern entry on the Geolocations
exceptions page.
"""
# Ask for permission when site wants to track.
self.SetPrefs(pyauto.kGeolocationDefaultContentSetting, 3)
self.NavigateToURL(
self.GetHttpURLForDataPath('geolocation', 'geolocation_on_load.html'))
self.assertTrue(self.WaitForInfobarCount(1))
self.PerformActionOnInfobar('cancel', infobar_index=0) # Deny tracking.
behavior = self._driver.execute_async_script(
'triggerGeoWithCallback(arguments[arguments.length - 1]);')
self.assertEqual(
behavior, Behaviors.BLOCK,
msg='Behavior is "%s" when it should be BLOCKED.' % behavior)
# Get the hostname pattern (e.g. http://127.0.0.1:57622).
hostname_pattern = (
'/'.join(self.GetHttpURLForDataPath('').split('/')[0:3]))
self.assertEqual(
# Block the hostname.
{hostname_pattern+','+hostname_pattern: {'geolocation': 2}},
self.GetPrefsInfo().Prefs(pyauto.kContentSettingsPatternPairs))
def _CheckForVisibleImage(self, tab_index=0, windex=0):
"""Checks whether or not an image is visible on the webpage.
Args:
tab_index: Tab index. Defaults to 0 (first tab).
windex: Window index. Defaults to 0 (first window).
Returns:
True if image is loaded, otherwise returns False if image is not loaded.
"""
# Checks whether an image is loaded by checking the area (width
# and height) of the image. If the area is non zero then the image is
# visible. If the area is zero then the image is not loaded.
# Chrome zeros the |naturalWidth| and |naturalHeight|.
script = """
for (i=0; i < document.images.length; i++) {
if ((document.images[i].naturalWidth != 0) &&
(document.images[i].naturalHeight != 0)) {
window.domAutomationController.send(true);
}
}
window.domAutomationController.send(false);
"""
return self.ExecuteJavascript(script, windex=windex, tab_index=tab_index)
def testImagesNotBlockedInIncognito(self):
"""Verify images are not blocked in Incognito mode."""
url = self.GetHttpURLForDataPath('settings', 'image_page.html')
self.RunCommand(pyauto.IDC_NEW_INCOGNITO_WINDOW)
self.NavigateToURL(url, 1, 0)
self.assertTrue(self._CheckForVisibleImage(windex=1),
msg='No visible images found in Incognito mode.')
def testBlockImagesForHostname(self):
"""Verify images blocked for defined hostname pattern."""
url = 'http://www.google.com'
page = settings.ManageExceptionsPage.FromNavigation(
self._driver, ContentTypes.IMAGES)
pattern, behavior = (url, Behaviors.BLOCK)
# Add an exception BLOCK for hostname pattern 'www.google.com'.
page.AddNewException(pattern, behavior)
self.NavigateToURL(url)
self.assertFalse(self._CheckForVisibleImage(),
msg='At least one visible image found.')
def testAllowImagesForHostname(self):
"""Verify images allowed for defined hostname pattern."""
url = 'http://www.google.com'
page = settings.ManageExceptionsPage.FromNavigation(
self._driver, ContentTypes.IMAGES)
pattern, behavior = (url, Behaviors.ALLOW)
# Add an exception ALLOW for hostname pattern 'www.google.com'.
page.AddNewException(pattern, behavior)
self.NavigateToURL(url)
self.assertTrue(self._CheckForVisibleImage(),
msg='No visible images found.')
def testProtocolHandlerRegisteredCorrectly(self):
"""Verify sites that ask to be default handlers registers correctly."""
url = self.GetHttpURLForDataPath('settings', 'protocol_handler.html')
self.NavigateToURL(url)
# Returns a dictionary with the custom handler.
asked_handler_dict = self._driver.execute_script(
'return registerCustomHandler()')
self.PerformActionOnInfobar(
'accept', infobar_index=test_utils.WaitForInfobarTypeAndGetIndex(
self, self.INFOBAR_TYPE))
self._driver.find_element_by_id('test_protocol').click()
self.assertTrue(
self._driver.execute_script(
'return doesQueryConformsToProtocol("%s", "%s")'
% (asked_handler_dict['query_key'],
asked_handler_dict['query_value'])),
msg='Protocol did not register correctly.')
if __name__ == '__main__':
pyauto_functional.Main()
|
|
import base64
import binascii
from datetime import timedelta
from functools import cached_property
import django.core.exceptions
from django.conf import settings
from django.contrib.auth import user_logged_in
from django.contrib.auth.hashers import is_password_usable
from django.core.cache import cache
from django.core.mail import EmailMessage
from django.http import Http404
from django.shortcuts import redirect
from django.template.loader import get_template
from rest_framework import generics, mixins, status, viewsets
from rest_framework.authentication import get_authorization_header
from rest_framework.exceptions import (NotAcceptable, NotFound, PermissionDenied, ValidationError)
from rest_framework.permissions import IsAuthenticated, SAFE_METHODS
from rest_framework.renderers import JSONRenderer, StaticHTMLRenderer
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.settings import api_settings
from rest_framework.views import APIView
import desecapi.authentication as auth
from desecapi import metrics, models, permissions, serializers
from desecapi.exceptions import ConcurrencyException
from desecapi.pdns import get_serials
from desecapi.pdns_change_tracker import PDNSChangeTracker
from desecapi.renderers import PlainTextRenderer
def generate_confirmation_link(request, action_serializer, viewname, **kwargs):
action = action_serializer.Meta.model(**kwargs)
action_data = action_serializer(action).data
confirmation_link = reverse(viewname, request=request, args=[action_data['code']])
return confirmation_link, action_serializer.validity_period
class EmptyPayloadMixin:
def initialize_request(self, request, *args, **kwargs):
# noinspection PyUnresolvedReferences
request = super().initialize_request(request, *args, **kwargs)
try:
no_data = request.stream is None
except:
no_data = True
if no_data:
# In this case, data and files are both empty, so we can set request.data=None (instead of the default {}).
# This allows distinguishing missing payload from empty dict payload.
# See https://github.com/encode/django-rest-framework/pull/7195
request._full_data = None
return request
class IdempotentDestroyMixin:
def destroy(self, request, *args, **kwargs):
try:
# noinspection PyUnresolvedReferences
super().destroy(request, *args, **kwargs)
except Http404:
pass
return Response(status=status.HTTP_204_NO_CONTENT)
class TokenViewSet(IdempotentDestroyMixin, viewsets.ModelViewSet):
serializer_class = serializers.TokenSerializer
permission_classes = (IsAuthenticated, permissions.HasManageTokensPermission,)
throttle_scope = 'account_management_passive'
def get_queryset(self):
return self.request.user.token_set.all()
def get_serializer(self, *args, **kwargs):
# When creating a new token, return the plaintext representation
if self.action == 'create':
kwargs.setdefault('include_plain', True)
return super().get_serializer(*args, **kwargs)
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class DomainViewSet(IdempotentDestroyMixin,
mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.DestroyModelMixin,
mixins.ListModelMixin,
viewsets.GenericViewSet):
serializer_class = serializers.DomainSerializer
lookup_field = 'name'
lookup_value_regex = r'[^/]+'
@property
def permission_classes(self):
ret = [IsAuthenticated, permissions.IsOwner]
if self.action == 'create':
ret.append(permissions.WithinDomainLimit)
if self.request.method not in SAFE_METHODS:
ret.append(permissions.TokenNoDomainPolicy)
return ret
@property
def throttle_scope(self):
return 'dns_api_read' if self.request.method in SAFE_METHODS else 'dns_api_write_domains'
@property
def pagination_class(self):
# Turn off pagination when filtering for covered qname, as pagination would re-order by `created` (not what we
# want here) after taking a slice (that's forbidden anyway). But, we don't need pagination in this case anyways.
if 'owns_qname' in self.request.query_params:
return None
else:
return api_settings.DEFAULT_PAGINATION_CLASS
def get_queryset(self):
qs = self.request.user.domains
owns_qname = self.request.query_params.get('owns_qname')
if owns_qname is not None:
qs = qs.filter_qname(owns_qname).order_by('-name_length')[:1]
return qs
def get_serializer(self, *args, **kwargs):
include_keys = (self.action in ['create', 'retrieve'])
return super().get_serializer(*args, include_keys=include_keys, **kwargs)
def perform_create(self, serializer):
with PDNSChangeTracker():
domain = serializer.save(owner=self.request.user)
# TODO this line raises if the local public suffix is not in our database!
PDNSChangeTracker.track(lambda: self.auto_delegate(domain))
@staticmethod
def auto_delegate(domain: models.Domain):
if domain.is_locally_registrable:
parent_domain = models.Domain.objects.get(name=domain.parent_domain_name)
parent_domain.update_delegation(domain)
def perform_destroy(self, instance: models.Domain):
with PDNSChangeTracker():
instance.delete()
if instance.is_locally_registrable:
parent_domain = models.Domain.objects.get(name=instance.parent_domain_name)
with PDNSChangeTracker():
parent_domain.update_delegation(instance)
class TokenPoliciesRoot(APIView):
permission_classes = [
IsAuthenticated,
permissions.HasManageTokensPermission | permissions.AuthTokenCorrespondsToViewToken,
]
def get(self, request, *args, **kwargs):
return Response({'domain': reverse('token_domain_policies-list', request=request, kwargs=kwargs)})
class TokenDomainPolicyViewSet(IdempotentDestroyMixin, viewsets.ModelViewSet):
lookup_field = 'domain__name'
lookup_value_regex = DomainViewSet.lookup_value_regex
pagination_class = None
serializer_class = serializers.TokenDomainPolicySerializer
throttle_scope = 'account_management_passive'
@property
def permission_classes(self):
ret = [IsAuthenticated]
if self.request.method in SAFE_METHODS:
ret.append(permissions.HasManageTokensPermission | permissions.AuthTokenCorrespondsToViewToken)
else:
ret.append(permissions.HasManageTokensPermission)
return ret
def dispatch(self, request, *args, **kwargs):
# map default policy onto domain_id IS NULL
lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field
try:
if kwargs[lookup_url_kwarg] == 'default':
kwargs[lookup_url_kwarg] = None
except KeyError:
pass
return super().dispatch(request, *args, **kwargs)
def get_queryset(self):
return models.TokenDomainPolicy.objects.filter(token_id=self.kwargs['token_id'], token__user=self.request.user)
def perform_destroy(self, instance):
try:
super().perform_destroy(instance)
except django.core.exceptions.ValidationError as exc:
raise ValidationError(exc.message_dict, code='precedence')
class SerialListView(APIView):
permission_classes = (permissions.IsVPNClient,)
throttle_classes = [] # don't break slaves when they ask too often (our cached responses are cheap)
def get(self, request, *args, **kwargs):
key = 'desecapi.views.serials'
serials = cache.get(key)
if serials is None:
serials = get_serials()
cache.get_or_set(key, serials, timeout=15)
return Response(serials)
class RRsetView:
serializer_class = serializers.RRsetSerializer
permission_classes = (IsAuthenticated, permissions.IsDomainOwner, permissions.TokenHasDomainRRsetsPermission,)
@property
def domain(self):
try:
return self.request.user.domains.get(name=self.kwargs['name'])
except models.Domain.DoesNotExist:
raise Http404
@property
def throttle_scope(self):
return 'dns_api_read' if self.request.method in SAFE_METHODS else 'dns_api_write_rrsets'
@property
def throttle_scope_bucket(self):
# Note: bucket should remain constant even when domain is recreated
return None if self.request.method in SAFE_METHODS else self.kwargs['name']
def get_queryset(self):
return self.domain.rrset_set
def get_serializer_context(self):
# noinspection PyUnresolvedReferences
return {**super().get_serializer_context(), 'domain': self.domain}
def perform_update(self, serializer):
with PDNSChangeTracker():
super().perform_update(serializer)
class RRsetDetail(RRsetView, IdempotentDestroyMixin, generics.RetrieveUpdateDestroyAPIView):
def get_object(self):
queryset = self.filter_queryset(self.get_queryset())
filter_kwargs = {k: self.kwargs[k] for k in ['subname', 'type']}
obj = generics.get_object_or_404(queryset, **filter_kwargs)
# May raise a permission denied
self.check_object_permissions(self.request, obj)
return obj
def update(self, request, *args, **kwargs):
response = super().update(request, *args, **kwargs)
if response.data is None:
response.status_code = 204
return response
def perform_destroy(self, instance):
with PDNSChangeTracker():
super().perform_destroy(instance)
class RRsetList(RRsetView, EmptyPayloadMixin, generics.ListCreateAPIView, generics.UpdateAPIView):
def get_queryset(self):
rrsets = super().get_queryset()
for filter_field in ('subname', 'type'):
value = self.request.query_params.get(filter_field)
if value is not None:
# TODO consider moving this
if filter_field == 'type' and value in models.RR_SET_TYPES_AUTOMATIC:
raise PermissionDenied("You cannot tinker with the %s RRset." % value)
rrsets = rrsets.filter(**{'%s__exact' % filter_field: value})
return rrsets.all() # without .all(), cache is sometimes inconsistent with actual state in bulk tests. (Why?)
def get_object(self):
# For this view, the object we're operating on is the queryset that one can also GET. Serializing a queryset
# is fine as per https://www.django-rest-framework.org/api-guide/serializers/#serializing-multiple-objects.
# We skip checking object permissions here to avoid evaluating the queryset. The user can access all his RRsets
# anyways.
return self.filter_queryset(self.get_queryset())
def get_serializer(self, *args, **kwargs):
kwargs = kwargs.copy()
if 'many' not in kwargs:
if self.request.method in ['POST']:
kwargs['many'] = isinstance(kwargs.get('data'), list)
elif self.request.method in ['PATCH', 'PUT']:
kwargs['many'] = True
return super().get_serializer(*args, **kwargs)
def perform_create(self, serializer):
with PDNSChangeTracker():
super().perform_create(serializer)
class Root(APIView):
def get(self, request, *args, **kwargs):
if self.request.user.is_authenticated:
routes = {
'account': {
'show': reverse('account', request=request),
'delete': reverse('account-delete', request=request),
'change-email': reverse('account-change-email', request=request),
'reset-password': reverse('account-reset-password', request=request),
},
'logout': reverse('logout', request=request),
'tokens': reverse('token-list', request=request),
'domains': reverse('domain-list', request=request),
}
else:
routes = {
'register': reverse('register', request=request),
'login': reverse('login', request=request),
'reset-password': reverse('account-reset-password', request=request),
}
return Response(routes)
class DynDNS12UpdateView(generics.GenericAPIView):
authentication_classes = (auth.TokenAuthentication, auth.BasicTokenAuthentication, auth.URLParamAuthentication,)
permission_classes = (permissions.TokenHasDomainDynDNSPermission,)
renderer_classes = [PlainTextRenderer]
serializer_class = serializers.RRsetSerializer
throttle_scope = 'dyndns'
@property
def throttle_scope_bucket(self):
return self.domain.name
def _find_ip(self, params, version):
if version == 4:
look_for = '.'
elif version == 6:
look_for = ':'
else:
raise Exception
# Check URL parameters
for p in params:
if p in self.request.query_params:
if not len(self.request.query_params[p]):
return None
if look_for in self.request.query_params[p]:
return self.request.query_params[p]
# Check remote IP address
client_ip = self.request.META.get('REMOTE_ADDR')
if look_for in client_ip:
return client_ip
# give up
return None
@cached_property
def qname(self):
# hostname parameter
try:
if self.request.query_params['hostname'] != 'YES':
return self.request.query_params['hostname'].lower()
except KeyError:
pass
# host_id parameter
try:
return self.request.query_params['host_id'].lower()
except KeyError:
pass
# http basic auth username
try:
domain_name = base64.b64decode(
get_authorization_header(self.request).decode().split(' ')[1].encode()).decode().split(':')[0]
if domain_name and '@' not in domain_name:
return domain_name.lower()
except (binascii.Error, IndexError, UnicodeDecodeError):
pass
# username parameter
try:
return self.request.query_params['username'].lower()
except KeyError:
pass
# only domain associated with this user account
try:
return self.request.user.domains.get().name
except models.Domain.MultipleObjectsReturned:
raise ValidationError(detail={
"detail": "Request does not properly specify domain for update.",
"code": "domain-unspecified"
})
except models.Domain.DoesNotExist:
metrics.get('desecapi_dynDNS12_domain_not_found').inc()
raise NotFound('nohost')
@cached_property
def domain(self):
try:
return models.Domain.objects.filter_qname(self.qname, owner=self.request.user).order_by('-name_length')[0]
except (IndexError, ValueError):
raise NotFound('nohost')
@property
def subname(self):
return self.qname.rpartition(f'.{self.domain.name}')[0]
def get_serializer_context(self):
return {**super().get_serializer_context(), 'domain': self.domain, 'minimum_ttl': 60}
def get_queryset(self):
return self.domain.rrset_set.filter(subname=self.subname, type__in=['A', 'AAAA'])
def get(self, request, *args, **kwargs):
instances = self.get_queryset().all()
ipv4 = self._find_ip(['myip', 'myipv4', 'ip'], version=4)
ipv6 = self._find_ip(['myipv6', 'ipv6', 'myip', 'ip'], version=6)
data = [
{'type': 'A', 'subname': self.subname, 'ttl': 60, 'records': [ipv4] if ipv4 else []},
{'type': 'AAAA', 'subname': self.subname, 'ttl': 60, 'records': [ipv6] if ipv6 else []},
]
serializer = self.get_serializer(instances, data=data, many=True, partial=True)
try:
serializer.is_valid(raise_exception=True)
except ValidationError as e:
if any(
any(
getattr(non_field_error, 'code', '') == 'unique'
for non_field_error
in err.get('non_field_errors', [])
)
for err in e.detail
):
raise ConcurrencyException from e
raise e
with PDNSChangeTracker():
serializer.save()
return Response('good', content_type='text/plain')
class DonationList(generics.CreateAPIView):
serializer_class = serializers.DonationSerializer
def perform_create(self, serializer):
instance = serializer.save()
context = {
'donation': instance,
'creditoridentifier': settings.SEPA['CREDITOR_ID'],
'creditorname': settings.SEPA['CREDITOR_NAME'],
}
# internal desec notification
content_tmpl = get_template('emails/donation/desec-content.txt')
subject_tmpl = get_template('emails/donation/desec-subject.txt')
attachment_tmpl = get_template('emails/donation/desec-attachment-jameica.txt')
from_tmpl = get_template('emails/from.txt')
email = EmailMessage(subject_tmpl.render(context),
content_tmpl.render(context),
from_tmpl.render(context),
['donation@desec.io'],
attachments=[
('jameica-directdebit.xml',
attachment_tmpl.render(context),
'text/xml')
])
email.send()
# donor notification
if instance.email:
content_tmpl = get_template('emails/donation/donor-content.txt')
subject_tmpl = get_template('emails/donation/donor-subject.txt')
footer_tmpl = get_template('emails/footer.txt')
email = EmailMessage(subject_tmpl.render(context),
content_tmpl.render(context) + footer_tmpl.render(),
from_tmpl.render(context),
[instance.email])
email.send()
class AccountCreateView(generics.CreateAPIView):
serializer_class = serializers.RegisterAccountSerializer
throttle_scope = 'account_management_active'
def create(self, request, *args, **kwargs):
# Create user and send trigger email verification.
# Alternative would be to create user once email is verified, but this could be abused for bulk email.
serializer = self.get_serializer(data=request.data)
activation_required = settings.USER_ACTIVATION_REQUIRED
try:
serializer.is_valid(raise_exception=True)
except ValidationError as e:
# Hide existing users
email_detail = e.detail.pop('email', [])
email_detail = [detail for detail in email_detail if detail.code != 'unique']
if email_detail:
e.detail['email'] = email_detail
if e.detail:
raise e
else:
# create user
user = serializer.save(is_active=(not activation_required))
# send email if needed
domain = serializer.validated_data.get('domain')
if domain or activation_required:
link, validity_period = generate_confirmation_link(request,
serializers.AuthenticatedActivateUserActionSerializer,
'confirm-activate-account', user=user, domain=domain)
user.send_email('activate-with-domain' if domain else 'activate', context={
'confirmation_link': link,
'link_expiration_hours': validity_period // timedelta(hours=1),
'domain': domain,
})
# This request is unauthenticated, so don't expose whether we did anything.
message = 'Welcome! Please check your mailbox.' if activation_required else 'Welcome!'
return Response(data={'detail': message}, status=status.HTTP_202_ACCEPTED)
class AccountView(generics.RetrieveAPIView):
permission_classes = (IsAuthenticated, permissions.TokenNoDomainPolicy,)
serializer_class = serializers.UserSerializer
throttle_scope = 'account_management_passive'
def get_object(self):
return self.request.user
class AccountDeleteView(APIView):
authentication_classes = (auth.EmailPasswordPayloadAuthentication,)
permission_classes = (IsAuthenticated,)
response_still_has_domains = Response(
data={'detail': 'To delete your user account, first delete all of your domains.'},
status=status.HTTP_409_CONFLICT,
)
throttle_scope = 'account_management_active'
def post(self, request, *args, **kwargs):
if self.request.user.domains.exists():
return self.response_still_has_domains
link, validity_period = generate_confirmation_link(request,
serializers.AuthenticatedDeleteUserActionSerializer,
'confirm-delete-account', user=self.request.user)
request.user.send_email('delete-user', context={
'confirmation_link': link,
'link_expiration_hours': validity_period // timedelta(hours=1),
})
return Response(data={'detail': 'Please check your mailbox for further account deletion instructions.'},
status=status.HTTP_202_ACCEPTED)
class AccountLoginView(generics.GenericAPIView):
authentication_classes = (auth.EmailPasswordPayloadAuthentication,)
permission_classes = (IsAuthenticated,)
serializer_class = serializers.TokenSerializer
throttle_scope = 'account_management_passive'
def post(self, request, *args, **kwargs):
user = self.request.user
token = models.Token.objects.create(user=user, name="login", perm_manage_tokens=True,
max_age=timedelta(days=7), max_unused_period=timedelta(hours=1))
user_logged_in.send(sender=user.__class__, request=self.request, user=user)
data = self.get_serializer(token, include_plain=True).data
return Response(data)
class AccountLogoutView(APIView, mixins.DestroyModelMixin):
authentication_classes = (auth.TokenAuthentication,)
permission_classes = (IsAuthenticated,)
throttle_classes = [] # always allow people to log out
def get_object(self):
# self.request.auth contains the hashed key as it is stored in the database
return models.Token.objects.get(key=self.request.auth)
def post(self, request, *args, **kwargs):
return self.destroy(request, *args, **kwargs)
class AccountChangeEmailView(generics.GenericAPIView):
authentication_classes = (auth.EmailPasswordPayloadAuthentication,)
permission_classes = (IsAuthenticated,)
serializer_class = serializers.ChangeEmailSerializer
throttle_scope = 'account_management_active'
def post(self, request, *args, **kwargs):
# Check password and extract email
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
new_email = serializer.validated_data['new_email']
link, validity_period = generate_confirmation_link(request,
serializers.AuthenticatedChangeEmailUserActionSerializer,
'confirm-change-email', user=request.user, new_email=new_email)
request.user.send_email('change-email', recipient=new_email, context={
'confirmation_link': link,
'link_expiration_hours': validity_period // timedelta(hours=1),
'old_email': request.user.email,
'new_email': new_email,
})
# At this point, we know that we are talking to the user, so we can tell that we sent an email.
return Response(data={'detail': 'Please check your mailbox to confirm email address change.'},
status=status.HTTP_202_ACCEPTED)
class AccountResetPasswordView(generics.GenericAPIView):
serializer_class = serializers.ResetPasswordSerializer
throttle_scope = 'account_management_active'
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
try:
email = serializer.validated_data['email']
user = models.User.objects.get(email=email, is_active=True)
except models.User.DoesNotExist:
pass
else:
self.send_reset_token(user, request)
# This request is unauthenticated, so don't expose whether we did anything.
return Response(data={'detail': 'Please check your mailbox for further password reset instructions. '
'If you did not receive an email, please contact support.'},
status=status.HTTP_202_ACCEPTED)
@staticmethod
def send_reset_token(user, request):
link, validity_period = generate_confirmation_link(request,
serializers.AuthenticatedResetPasswordUserActionSerializer,
'confirm-reset-password', user=user)
user.send_email('reset-password', context={
'confirmation_link': link,
'link_expiration_hours': validity_period // timedelta(hours=1),
})
class AuthenticatedActionView(generics.GenericAPIView):
"""
Abstract class. Deserializes the given payload according the serializers specified by the view extending
this class. If the `serializer.is_valid`, `act` is called on the action object.
Summary of the behavior depending on HTTP method and Accept: header:
GET POST other method
Accept: text/html forward to `self.html_url` if any perform action 405 Method Not Allowed
else HTTP 406 Not Acceptable perform action 405 Method Not Allowed
"""
authenticated_action = None
html_url = None # Redirect GET requests to this webapp GUI URL
http_method_names = ['get', 'post'] # GET is for redirect only
renderer_classes = [JSONRenderer, StaticHTMLRenderer]
@property
def authentication_classes(self):
# This prevents both code evaluation and user-specific throttling when we only want a redirect
return () if self.request.method in SAFE_METHODS else (auth.AuthenticatedBasicUserActionAuthentication,)
@property
def throttle_scope(self):
return 'account_management_passive' if self.request.method in SAFE_METHODS else 'account_management_active'
def get_serializer_context(self):
return {
**super().get_serializer_context(),
'code': self.kwargs['code'],
'validity_period': self.get_serializer_class().validity_period,
}
def get(self, request, *args, **kwargs):
# Redirect browsers to frontend if available
is_redirect = (request.accepted_renderer.format == 'html') and self.html_url is not None
if is_redirect:
# Careful: This can generally lead to an open redirect if values contain slashes!
# However, it cannot happen for Django view kwargs.
return redirect(self.html_url.format(**kwargs))
else:
raise NotAcceptable
def post(self, request, *args, **kwargs):
super().perform_authentication(request)
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
try:
self.authenticated_action = serializer.Meta.model(**serializer.validated_data)
except ValueError: # this happens when state cannot be verified
ex = ValidationError('This action cannot be carried out because another operation has been performed, '
'invalidating this one. (Are you trying to perform this action twice?)')
ex.status_code = status.HTTP_409_CONFLICT
raise ex
self.authenticated_action.act()
return self.finalize()
def finalize(self):
raise NotImplementedError
class AuthenticatedActivateUserActionView(AuthenticatedActionView):
html_url = '/confirm/activate-account/{code}/'
serializer_class = serializers.AuthenticatedActivateUserActionSerializer
def finalize(self):
if not self.authenticated_action.domain:
return self._finalize_without_domain()
else:
domain = self._create_domain()
return self._finalize_with_domain(domain)
def _create_domain(self):
serializer = serializers.DomainSerializer(
data={'name': self.authenticated_action.domain},
context=self.get_serializer_context()
)
try:
serializer.is_valid(raise_exception=True)
except ValidationError as e: # e.g. domain name unavailable
self.authenticated_action.user.delete()
reasons = ', '.join([detail.code for detail in e.detail.get('name', [])])
raise ValidationError(
f'The requested domain {self.authenticated_action.domain} could not be registered (reason: {reasons}). '
f'Please start over and sign up again.'
)
# TODO the following line is subject to race condition and can fail, as for the domain name, we have that
# time-of-check != time-of-action
return PDNSChangeTracker.track(lambda: serializer.save(owner=self.authenticated_action.user))
def _finalize_without_domain(self):
if not is_password_usable(self.authenticated_action.user.password):
AccountResetPasswordView.send_reset_token(self.authenticated_action.user, self.request)
return Response({
'detail': 'Success! We sent you instructions on how to set your password.'
})
return Response({
'detail': 'Success! Your account has been activated, and you can now log in.'
})
def _finalize_with_domain(self, domain):
if domain.is_locally_registrable:
# TODO the following line raises Domain.DoesNotExist under unknown conditions
PDNSChangeTracker.track(lambda: DomainViewSet.auto_delegate(domain))
token = models.Token.objects.create(user=domain.owner, name='dyndns')
return Response({
'detail': 'Success! Here is the password ("token") to configure your router (or any other dynDNS '
'client). This password is different from your account password for security reasons.',
'domain': serializers.DomainSerializer(domain).data,
**serializers.TokenSerializer(token, include_plain=True).data,
})
else:
return Response({
'detail': 'Success! Please check the docs for the next steps, https://desec.readthedocs.io/.',
'domain': serializers.DomainSerializer(domain, include_keys=True).data,
})
class AuthenticatedChangeEmailUserActionView(AuthenticatedActionView):
html_url = '/confirm/change-email/{code}/'
serializer_class = serializers.AuthenticatedChangeEmailUserActionSerializer
def finalize(self):
return Response({
'detail': f'Success! Your email address has been changed to {self.authenticated_action.user.email}.'
})
class AuthenticatedResetPasswordUserActionView(AuthenticatedActionView):
html_url = '/confirm/reset-password/{code}/'
serializer_class = serializers.AuthenticatedResetPasswordUserActionSerializer
def finalize(self):
return Response({'detail': 'Success! Your password has been changed.'})
class AuthenticatedDeleteUserActionView(AuthenticatedActionView):
html_url = '/confirm/delete-account/{code}/'
serializer_class = serializers.AuthenticatedDeleteUserActionSerializer
def post(self, request, *args, **kwargs):
if self.request.user.domains.exists():
return AccountDeleteView.response_still_has_domains
return super().post(request, *args, **kwargs)
def finalize(self):
return Response({'detail': 'All your data has been deleted. Bye bye, see you soon! <3'})
class AuthenticatedRenewDomainBasicUserActionView(AuthenticatedActionView):
html_url = '/confirm/renew-domain/{code}/'
serializer_class = serializers.AuthenticatedRenewDomainBasicUserActionSerializer
def finalize(self):
return Response({'detail': f'We recorded that your domain {self.authenticated_action.domain} is still in use.'})
class CaptchaView(generics.CreateAPIView):
serializer_class = serializers.CaptchaSerializer
throttle_scope = 'account_management_passive'
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for checking quantile related ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import tempfile
import numpy as np
from tensorflow.contrib.boosted_trees.proto.quantiles_pb2 import QuantileConfig
from tensorflow.contrib.boosted_trees.python.ops import quantile_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resources
from tensorflow.python.platform import googletest
from tensorflow.python.training import saver
class QuantileBucketsOpTest(test_util.TensorFlowTestCase):
def _gen_config(self, eps, num_quantiles):
config = QuantileConfig()
config.eps = eps
config.num_quantiles = num_quantiles
return config.SerializeToString()
def testBasicQuantileBuckets(self):
"""Sets up the quantile summary op test as follows.
Create a batch of 6 examples having a dense and sparse features. SparseM is
a sparse multi-dimensional (multivalent) feature.
The data looks like this
| Instance | instance weights | Dense 0 | Sparse 0 | SparseM
| 0 | 10 | 1 | | | |
| 1 | 1 | 2 | 2 | 2 | |
| 2 | 1 | 3 | 3 | 3 | |
| 3 | 1 | 4 | 4 | | 4 |
| 4 | 1 | 4 | 5 | | 5 |
| 5 | 1 | 5 | 6 | | 6 |
"""
dense_float_tensor_0 = constant_op.constant(
[1, 2, 3, 4, 4, 5], dtype=dtypes.float32)
sparse_indices_0 = constant_op.constant(
[[1, 0], [2, 0], [3, 0], [4, 0], [5, 0]], dtype=dtypes.int64)
sparse_values_0 = constant_op.constant(
[2, 3, 4, 5, 6], dtype=dtypes.float32)
sparse_shape_0 = constant_op.constant([6, 1], dtype=dtypes.int64)
# Multi-dimensional feature that should have the same quantiles as Sparse 0.
sparse_indices_m = constant_op.constant(
[[1, 1], [2, 0], [3, 1], [4, 1], [5, 1]], dtype=dtypes.int64)
sparse_values_m = constant_op.constant(
[2, 3, 4, 5, 6], dtype=dtypes.float32)
sparse_shape_m = constant_op.constant([6, 2], dtype=dtypes.int64)
example_weights = constant_op.constant(
[10, 1, 1, 1, 1, 1], dtype=dtypes.float32)
with self.cached_session():
config = self._gen_config(0.33, 3)
dense_buckets, sparse_buckets = quantile_ops.quantile_buckets(
[dense_float_tensor_0], [sparse_indices_0, sparse_indices_m],
[sparse_values_0, sparse_values_m], [sparse_shape_0, sparse_shape_m],
example_weights=example_weights,
dense_config=[config],
sparse_config=[config, config])
self.assertAllEqual([1, 3, 5], dense_buckets[0].eval())
self.assertAllEqual([2, 4, 6.], sparse_buckets[0].eval())
# Multidimensional sparse.
self.assertAllEqual([2, 4, 6.], sparse_buckets[1].eval())
def testStreamingQuantileBucketsWithVaryingBatch(self):
"""Sets up the quantile summary op test as follows.
Creates batches examples with different number of inputs in each batch.
The input values are dense in the range [1 ... N]
The data looks like this:
| Batch | Start | InputList
| 1 | 1 | [1]
| 2 | 2 | [2, 3]
| 3 | 4 | [4, 5, 6]
| 4 | 7 | [7, 8, 9, 10]
| 5 | 11 | [11, 12, 13, 14, 15]
| 6 | 16 | [16, 17, 18, 19, 20, 21]
"""
num_quantiles = 3
with self.cached_session() as sess:
accumulator = quantile_ops.QuantileAccumulator(
init_stamp_token=0, num_quantiles=num_quantiles,
epsilon=0.001, name="q1")
resources.initialize_resources(resources.shared_resources()).run()
input_column = array_ops.placeholder(dtypes.float32)
weights = array_ops.placeholder(dtypes.float32)
update = accumulator.add_summary(
stamp_token=0,
column=input_column,
example_weights=weights)
with self.cached_session() as sess:
for i in range(1, 23):
# start = 1, 2, 4, 7, 11, 16 ... (see comment above)
start = int((i * (i-1) / 2) + 1)
sess.run(update,
{input_column: range(start, start+i),
weights: [1] * i})
with self.cached_session() as sess:
sess.run(accumulator.flush(stamp_token=0, next_stamp_token=1))
are_ready_flush, buckets = (accumulator.get_buckets(stamp_token=1))
buckets, are_ready_flush = (sess.run(
[buckets, are_ready_flush]))
self.assertEqual(True, are_ready_flush)
self.assertEqual(num_quantiles + 1, len(buckets))
self.assertAllEqual([1, 86., 170., 253.], buckets)
def testStreamingQuantileBucketsLowPrecisionInput(self):
"""Tests inputs that simulate low precision float16 values."""
num_quantiles = 3
# set generate_quantiles to True since the test will generate fewer
# boundaries otherwise.
with self.cached_session() as sess:
accumulator = quantile_ops.QuantileAccumulator(
init_stamp_token=0, num_quantiles=num_quantiles,
epsilon=0.001, name="q1", generate_quantiles=True)
resources.initialize_resources(resources.shared_resources()).run()
input_column = array_ops.placeholder(dtypes.float32)
weights = array_ops.placeholder(dtypes.float32)
update = accumulator.add_summary(
stamp_token=0,
column=input_column,
example_weights=weights)
with self.cached_session() as sess:
# This input is generated by integer in the range [2030, 2060]
# but represented by with float16 precision. Integers <= 2048 are
# exactly represented, whereas numbers > 2048 are rounded; and hence
# numbers > 2048 are repeated. For precision loss / rounding, see:
# https://en.wikipedia.org/wiki/Half-precision_floating-point_format.
#
# The intent of the test is not handling of float16 values, but to
# validate the number of buckets is returned, in cases where the input
# may contain repeated values.
inputs = [
2030.0, 2031.0, 2032.0, 2033.0, 2034.0, 2035.0, 2036.0, 2037.0,
2038.0, 2039.0, 2040.0, 2041.0, 2042.0, 2043.0, 2044.0, 2045.0,
2046.0, 2047.0, 2048.0, 2048.0, 2050.0, 2052.0, 2052.0, 2052.0,
2054.0, 2056.0, 2056.0, 2056.0, 2058.0, 2060.0
]
sess.run(update,
{input_column: inputs,
weights: [1] * len(inputs)})
with self.cached_session() as sess:
sess.run(accumulator.flush(stamp_token=0, next_stamp_token=1))
are_ready_flush, buckets = (accumulator.get_buckets(stamp_token=1))
buckets, are_ready_flush = (sess.run(
[buckets, are_ready_flush]))
self.assertEqual(True, are_ready_flush)
self.assertEqual(num_quantiles + 1, len(buckets))
self.assertAllEqual([2030, 2040, 2050, 2060], buckets)
def _testStreamingQuantileBucketsHelper(
self, inputs, num_quantiles=3, expected_buckets=None):
"""Helper to test quantile buckets on different inputs."""
# set generate_quantiles to True since the test will generate fewer
# boundaries otherwise.
with self.cached_session() as sess:
accumulator = quantile_ops.QuantileAccumulator(
init_stamp_token=0, num_quantiles=num_quantiles,
epsilon=0.001, name="q1", generate_quantiles=True)
resources.initialize_resources(resources.shared_resources()).run()
input_column = array_ops.placeholder(dtypes.float32)
weights = array_ops.placeholder(dtypes.float32)
update = accumulator.add_summary(
stamp_token=0,
column=input_column,
example_weights=weights)
with self.cached_session() as sess:
sess.run(update,
{input_column: inputs,
weights: [1] * len(inputs)})
with self.cached_session() as sess:
sess.run(accumulator.flush(stamp_token=0, next_stamp_token=1))
are_ready_flush, buckets = (accumulator.get_buckets(stamp_token=1))
buckets, are_ready_flush = (sess.run(
[buckets, are_ready_flush]))
self.assertEqual(True, are_ready_flush)
# By default, use 3 quantiles, 4 boundaries for simplicity.
self.assertEqual(num_quantiles + 1, len(buckets))
if expected_buckets:
self.assertAllEqual(buckets, expected_buckets)
def testStreamingQuantileBucketsRepeatedSingleValue(self):
inputs = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
self._testStreamingQuantileBucketsHelper(inputs)
def testStreamingQ2antileBucketsRepeatedTwoValues(self):
inputs = [1, 1, 1, 2, 2, 2, 2, 2, 1, 1]
self._testStreamingQuantileBucketsHelper(inputs)
def testStreamingQ2antileBucketsRepeatedTwoValuesUnbalanced(self):
inputs = [7, 7, 7, 2, 7, 7, 2, 2, 7, 7]
self._testStreamingQuantileBucketsHelper(inputs)
def testStreamingQuantileBucketsFewerInputstThanBuckets(self):
inputs = [5]
self._testStreamingQuantileBucketsHelper(inputs)
def testStreamingQuantileBucketsEqualDistributionInSequence(self):
# Input pattern is of the form [1, 1, 1, 2, 2, 2, 3, 3, 3, ...]
ones = 100 * [1]
inputs = []
for i in range(1, 101):
inputs += [i * k for k in ones]
# Expect 100 equally spaced buckets.
expected_buckets = range(1, 101)
self._testStreamingQuantileBucketsHelper(
inputs, num_quantiles=99, expected_buckets=expected_buckets)
def testStreamingQuantileBucketsEqualDistributionInterleaved(self):
# Input pattern is of the form [1, 2, 3, 1, 2, 3, 1, 2, 3, ...]
sequence = range(1, 101)
inputs = []
for _ in range(1, 101):
inputs += sequence
# Expect 100 equally spaced buckets.
expected_buckets = range(1, 101)
self._testStreamingQuantileBucketsHelper(
inputs, num_quantiles=99, expected_buckets=expected_buckets)
def testStreamingQuantileBuckets(self):
"""Sets up the quantile summary op test as follows.
100 batches of data is added to the accumulator. The batches are in form:
[0 1 .. 99]
[100 101 .. 200]
...
[9900 9901 .. 9999]
All the batches have 1 for all the example weights.
"""
with self.cached_session() as sess:
accumulator = quantile_ops.QuantileAccumulator(
init_stamp_token=0, num_quantiles=3, epsilon=0.01, name="q1")
resources.initialize_resources(resources.shared_resources()).run()
weight_placeholder = array_ops.placeholder(dtypes.float32)
dense_placeholder = array_ops.placeholder(dtypes.float32)
update = accumulator.add_summary(
stamp_token=0,
column=dense_placeholder,
example_weights=weight_placeholder)
with self.cached_session() as sess:
for i in range(100):
dense_float = np.linspace(
i * 100, (i + 1) * 100 - 1, num=100).reshape(-1, 1)
sess.run(update, {
dense_placeholder: dense_float,
weight_placeholder: np.ones(shape=(100, 1), dtype=np.float32)
})
with self.cached_session() as sess:
sess.run(accumulator.flush(stamp_token=0, next_stamp_token=1))
are_ready_flush, buckets = (accumulator.get_buckets(stamp_token=1))
buckets, are_ready_flush = (sess.run([buckets, are_ready_flush]))
self.assertEqual(True, are_ready_flush)
self.assertAllEqual([0, 3335., 6671., 9999.], buckets)
def testStreamingQuantileBucketsTwoLevel(self):
"""Sets up the quantile summary op test as follows.
100 batches of data is added to the accumulator. The batches are in form:
[0 1 .. 99]
[100 101 .. 200]
...
[9900 9901 .. 9999]
All the batches have 1 for all the example weights.
"""
with self.cached_session() as sess:
accumulator = quantile_ops.QuantileAccumulator(
init_stamp_token=0, num_quantiles=3, epsilon=0.01, name="q1")
accumulator_2 = quantile_ops.QuantileAccumulator(
init_stamp_token=0, num_quantiles=3, epsilon=0.01, name="q2")
resources.initialize_resources(resources.shared_resources()).run()
weight_placeholder = array_ops.placeholder(dtypes.float32)
dense_placeholder = array_ops.placeholder(dtypes.float32)
update = accumulator.add_summary(
stamp_token=0,
column=dense_placeholder,
example_weights=weight_placeholder)
with self.cached_session() as sess:
for i in range(100):
dense_float = np.linspace(
i * 100, (i + 1) * 100 - 1, num=100).reshape(-1, 1)
sess.run(update, {
dense_placeholder: dense_float,
weight_placeholder: np.ones(shape=(100, 1), dtype=np.float32)
})
with self.cached_session() as sess:
summary = sess.run(
accumulator.flush_summary(stamp_token=0, next_stamp_token=1))
sess.run(
accumulator_2.add_prebuilt_summary(
stamp_token=0, summary=constant_op.constant(summary)))
sess.run(accumulator_2.flush(stamp_token=0, next_stamp_token=1))
are_ready_flush, buckets = (accumulator_2.get_buckets(stamp_token=1))
buckets, are_ready_flush = (sess.run([buckets, are_ready_flush]))
self.assertEqual(True, are_ready_flush)
self.assertAllEqual([0, 3337., 6677., 9999.], buckets)
def testSaveRestoreBeforeFlush(self):
save_dir = os.path.join(self.get_temp_dir(), "save_restore")
save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), "hash")
with self.session(graph=ops.Graph()) as sess:
accumulator = quantile_ops.QuantileAccumulator(
init_stamp_token=0, num_quantiles=3, epsilon=0.33, name="q0")
save = saver.Saver()
resources.initialize_resources(resources.shared_resources()).run()
sparse_indices_0 = constant_op.constant(
[[1, 0], [2, 1], [3, 0], [4, 2], [5, 0]], dtype=dtypes.int64)
sparse_values_0 = constant_op.constant(
[2.0, 3.0, 4.0, 5.0, 6.0], dtype=dtypes.float32)
sparse_shape_0 = constant_op.constant([6, 3], dtype=dtypes.int64)
example_weights = constant_op.constant(
[10, 1, 1, 1, 1, 1], dtype=dtypes.float32, shape=[6, 1])
update = accumulator.add_summary(
stamp_token=0,
column=sparse_tensor.SparseTensor(sparse_indices_0, sparse_values_0,
sparse_shape_0),
example_weights=example_weights)
update.run()
save.save(sess, save_path)
reset = accumulator.flush(stamp_token=0, next_stamp_token=1)
with ops.control_dependencies([reset]):
are_ready_flush, buckets = (accumulator.get_buckets(stamp_token=1))
buckets, are_ready_flush = (sess.run([buckets, are_ready_flush]))
self.assertEqual(True, are_ready_flush)
self.assertAllEqual([2, 4, 6.], buckets)
with self.session(graph=ops.Graph()) as sess:
accumulator = quantile_ops.QuantileAccumulator(
init_stamp_token=0, num_quantiles=3, epsilon=0.33, name="q0")
save = saver.Saver()
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
are_ready_noflush = accumulator.get_buckets(stamp_token=0)[0]
with ops.control_dependencies([are_ready_noflush]):
reset = accumulator.flush(stamp_token=0, next_stamp_token=1)
with ops.control_dependencies([reset]):
are_ready_flush, buckets = accumulator.get_buckets(stamp_token=1)
buckets, are_ready_flush, are_ready_noflush = (sess.run(
[buckets, are_ready_flush, are_ready_noflush]))
self.assertFalse(are_ready_noflush)
self.assertTrue(are_ready_flush)
self.assertAllEqual([2, 4, 6.], buckets)
def testSaveRestoreAfterFlush(self):
save_dir = os.path.join(self.get_temp_dir(), "save_restore")
save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), "hash")
with self.session(graph=ops.Graph()) as sess:
accumulator = quantile_ops.QuantileAccumulator(
init_stamp_token=0, num_quantiles=3, epsilon=0.33, name="q0")
save = saver.Saver()
resources.initialize_resources(resources.shared_resources()).run()
example_weights = constant_op.constant(
[10, 1, 1, 1, 1, 1], dtype=dtypes.float32, shape=[6, 1])
dense_float_tensor_0 = constant_op.constant(
[1, 2, 3, 4, 4, 5], dtype=dtypes.float32, shape=[6, 1])
update = accumulator.add_summary(
stamp_token=0,
column=dense_float_tensor_0,
example_weights=example_weights)
update.run()
reset = accumulator.flush(stamp_token=0, next_stamp_token=1)
with ops.control_dependencies([reset]):
are_ready_flush, buckets = (accumulator.get_buckets(stamp_token=1))
buckets, are_ready_flush = (sess.run([buckets, are_ready_flush]))
self.assertEqual(True, are_ready_flush)
self.assertAllEqual([1, 3, 5], buckets)
save.save(sess, save_path)
with self.session(graph=ops.Graph()) as sess:
accumulator = quantile_ops.QuantileAccumulator(
init_stamp_token=0, num_quantiles=3, epsilon=0.33, name="q0")
save = saver.Saver()
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
are_ready_flush, buckets = (accumulator.get_buckets(stamp_token=1))
buckets, are_ready_flush = (sess.run([buckets, are_ready_flush]))
self.assertEqual(True, are_ready_flush)
self.assertAllEqual([1, 3, 5], buckets)
def testFixedUniform(self):
"""Sets up the quantile summary op test as follows.
Creates array dividing range [0, 1] to 1<<16 elements equally spaced
with weight of 1.0.
"""
dense_float_tensor_0 = constant_op.constant(
[(1.0 * i) / math.pow(2.0, 16)
for i in range(0, int(math.pow(2, 16)) + 1)])
example_weights = constant_op.constant(
[1] * (int(math.pow(2, 16)) + 1), dtype=dtypes.float32)
config = self._gen_config(0.1, 10)
with self.cached_session():
dense_buckets, _ = quantile_ops.quantile_buckets(
[dense_float_tensor_0], [], [], [],
example_weights=example_weights,
dense_config=[config],
sparse_config=[])
self.assertAllClose(
[0] + [(i + 1.0) / 10 for i in range(0, 10)],
dense_buckets[0].eval(),
atol=0.1)
def testFixedNonUniform(self):
"""Sets up the quantile summary op test as follows.
Creates array dividing range [0, 1] to 1<<16 elements equally spaced
with weight same as the value.
"""
dense_float_tensor_0 = constant_op.constant(
[(1.0 * i) / math.pow(2.0, 16)
for i in range(0, int(math.pow(2, 16)) + 1)])
example_weights = constant_op.constant(
[(1.0 * i) / math.pow(2.0, 16)
for i in range(0, int(math.pow(2, 16)) + 1)])
config = self._gen_config(0.1, 10)
with self.cached_session():
dense_buckets, _ = quantile_ops.quantile_buckets(
[dense_float_tensor_0], [], [], [],
example_weights=example_weights,
dense_config=[config],
sparse_config=[])
self.assertAllClose(
[0] + [math.sqrt((i + 1.0) / 10) for i in range(0, 10)],
dense_buckets[0].eval(),
atol=0.1)
class QuantilesOpTest(test_util.TensorFlowTestCase):
def setUp(self):
"""Sets up the quantile op tests.
Create a batch of 4 examples having 2 dense and 4 sparse features.
Fourth sparse feature is multivalent (3 dimensional)
The data looks like this
| Instance | Dense 0 | Dense 1 | Sparse 0 | Sparse 1 |Sparse 2| SparseM
| 0 | -0.1 | -1 | -2 | 0.1 | |_ ,1,_
| 1 | 0.4 | -15 | 5.5 | | 2 |2 ,_,_
| 2 | 3.2 | 18 | 16 | 3 | |__,_,_
| 3 | 190 | 1000 | 17.5 | -3 | 4 |1 ,8,1
Quantiles are:
Dense 0: (-inf,0.4], (0.4,5], (5, 190]
Dense 1: (-inf, -9], (-9,15], (15, 1000)
Sparse 0: (-inf, 5], (5,16], (16, 100]
Sparse 1: (-inf, 2], (2, 5]
Sparse 2: (-inf, 100]
SparseM: (-inf, 1], (1,2], (2,1000]
"""
super(QuantilesOpTest, self).setUp()
self._dense_float_tensor_0 = constant_op.constant(
[[-0.1], [0.4], [3.2], [190]], dtype=dtypes.float32)
self._dense_float_tensor_1 = constant_op.constant(
[[-1], [-15], [18], [1000]], dtype=dtypes.float32)
# Sparse feature 0
self._sparse_indices_0 = constant_op.constant(
[[0, 0], [1, 0], [2, 0], [3, 0]], dtype=dtypes.int64)
self._sparse_values_0 = constant_op.constant([-2, 5.5, 16, 17.5])
self._sparse_shape_0 = constant_op.constant([4, 1])
# Sprase feature 1
self._sparse_indices_1 = constant_op.constant(
[[0, 0], [2, 0], [3, 0]], dtype=dtypes.int64)
self._sparse_values_1 = constant_op.constant([0.1, 3, -3])
self._sparse_shape_1 = constant_op.constant([4, 1])
# Sprase feature 2
self._sparse_indices_2 = constant_op.constant(
[[1, 0], [3, 0]], dtype=dtypes.int64)
self._sparse_values_2 = constant_op.constant([2, 4], dtype=dtypes.float32)
self._sparse_shape_2 = constant_op.constant([4, 1])
# Sprase feature M
self._sparse_indices_m = constant_op.constant(
[[0, 1], [1, 0], [3, 0], [3, 1], [3, 2]], dtype=dtypes.int64)
self._sparse_values_m = constant_op.constant(
[1, 2, 1, 8, 1], dtype=dtypes.float32)
self._sparse_shape_m = constant_op.constant([4, 1])
# Quantiles
self._dense_thresholds_0 = [0.4, 5, 190]
self._dense_thresholds_1 = [-9, 15, 1000]
self._sparse_thresholds_0 = [5, 16, 100]
self._sparse_thresholds_1 = [2, 5]
self._sparse_thresholds_2 = [100]
self._sparse_thresholds_m = [1, 2, 1000]
def testDenseFeaturesOnly(self):
with self.cached_session():
dense_quantiles, _ = quantile_ops.quantiles(
[self._dense_float_tensor_0, self._dense_float_tensor_1], [],
[self._dense_thresholds_0, self._dense_thresholds_1], [], [])
# Dense feature 0
self.assertAllEqual([[0, 0], [0, 0], [1, 0], [2, 0]],
dense_quantiles[0].eval())
# Dense feature 1
self.assertAllEqual([[1, 0], [0, 0], [2, 0], [2, 0]],
dense_quantiles[1].eval())
def testSparseFeaturesOnly(self):
with self.cached_session():
_, sparse_quantiles = quantile_ops.quantiles([], [
self._sparse_values_0, self._sparse_values_1, self._sparse_values_2,
self._sparse_values_m
], [], [
self._sparse_thresholds_0, self._sparse_thresholds_1,
self._sparse_thresholds_2, self._sparse_thresholds_m
], [
self._sparse_indices_0, self._sparse_indices_1,
self._sparse_indices_2, self._sparse_indices_m
])
self.assertAllEqual(4, len(sparse_quantiles))
# Sparse feature 0
self.assertAllEqual([[0, 0], [1, 0], [1, 0], [2, 0]],
sparse_quantiles[0].eval())
# Sparse feature 1
self.assertAllEqual([[0, 0], [1, 0], [0, 0]], sparse_quantiles[1].eval())
# Sparse feature 2
self.assertAllEqual([[0, 0], [0, 0]], sparse_quantiles[2].eval())
# Multidimensional feature.
self.assertAllEqual([[0, 1], [1, 0], [0, 0], [2, 1], [0, 2]],
sparse_quantiles[3].eval())
def testDenseAndSparseFeatures(self):
with self.cached_session():
dense_quantiles, sparse_quantiles = quantile_ops.quantiles(
[self._dense_float_tensor_0, self._dense_float_tensor_1], [
self._sparse_values_0, self._sparse_values_1,
self._sparse_values_2, self._sparse_values_m
], [self._dense_thresholds_0, self._dense_thresholds_1], [
self._sparse_thresholds_0, self._sparse_thresholds_1,
self._sparse_thresholds_2, self._sparse_thresholds_m
], [
self._sparse_indices_0, self._sparse_indices_1,
self._sparse_indices_2, self._sparse_indices_m
])
# Dense feature 0
self.assertAllEqual([[0, 0], [0, 0], [1, 0], [2, 0]],
dense_quantiles[0].eval())
# Dense feature 1
self.assertAllEqual([[1, 0], [0, 0], [2, 0], [2, 0]],
dense_quantiles[1].eval())
# Sparse feature 0
self.assertAllEqual([[0, 0], [1, 0], [1, 0], [2, 0]],
sparse_quantiles[0].eval())
# Sparse feature 1
self.assertAllEqual([[0, 0], [1, 0], [0, 0]], sparse_quantiles[1].eval())
# Sparse feature 2
self.assertAllEqual([[0, 0], [0, 0]], sparse_quantiles[2].eval())
# Multidimensional feature.
self.assertAllEqual([[0, 1], [1, 0], [0, 0], [2, 1], [0, 2]],
sparse_quantiles[3].eval())
def testBucketizeWithInputBoundaries(self):
with self.cached_session():
buckets = quantile_ops.bucketize_with_input_boundaries(
input=[1, 2, 3, 4, 5],
boundaries=[3])
self.assertAllEqual([0, 0, 1, 1, 1], buckets.eval())
def testBucketizeWithInputBoundaries2(self):
with self.cached_session():
boundaries = constant_op.constant([3], dtype=dtypes.float32)
buckets = quantile_ops.bucketize_with_input_boundaries(
input=[1, 2, 3, 4, 5],
boundaries=boundaries)
self.assertAllEqual([0, 0, 1, 1, 1], buckets.eval())
def testBucketizeWithInputBoundaries3(self):
with self.cached_session():
b = array_ops.placeholder(dtypes.float32)
buckets = quantile_ops.bucketize_with_input_boundaries(
input=[1, 2, 3, 4, 5],
boundaries=b)
self.assertAllEqual([0, 1, 1, 2, 2],
buckets.eval(feed_dict={b: [2, 4]}))
if __name__ == "__main__":
googletest.main()
|
|
"""Cement core controller module."""
import re
import textwrap
import argparse
from ..core import exc, interface, handler
from ..utils.misc import minimal_logger
LOG = minimal_logger(__name__)
def controller_validator(klass, obj):
"""
Validates a handler implementation against the IController interface.
"""
members = [
'_setup',
'_dispatch',
]
meta = [
'label',
'interface',
'config_section',
'config_defaults',
'stacked_on',
'stacked_type',
]
interface.validate(IController, obj, members, meta=meta)
# also check _meta.arguments values
errmsg = "Controller arguments must be a list of tuples. I.e. " + \
"[ (['-f', '--foo'], dict(action='store')), ]"
if obj._meta.arguments is not None:
if type(obj._meta.arguments) is not list:
raise exc.InterfaceError(errmsg)
for item in obj._meta.arguments:
if type(item) is not tuple:
raise exc.InterfaceError(errmsg)
if type(item[0]) is not list:
raise exc.InterfaceError(errmsg)
if type(item[1]) is not dict:
raise exc.InterfaceError(errmsg)
if not obj._meta.label == 'base' and obj._meta.stacked_on is None:
errmsg = "Controller `%s` is not stacked anywhere!" % \
obj.__class__.__name__
raise exc.InterfaceError(errmsg)
if not obj._meta.label == 'base' and \
obj._meta.stacked_type not in ['nested', 'embedded']:
raise exc.InterfaceError(
"Controller '%s' " % obj._meta.label +
"has an unknown stacked type of '%s'." %
obj._meta.stacked_type
)
class IController(interface.Interface):
"""
This class defines the Controller Handler Interface. Classes that
implement this handler must provide the methods and attributes defined
below.
Implementations do *not* subclass from interfaces.
Usage:
.. code-block:: python
from cement.core import controller
class MyBaseController(controller.CementBaseController):
class Meta:
interface = controller.IController
...
"""
# pylint: disable=W0232, C0111, R0903
class IMeta:
"""Interface meta-data."""
#: The string identifier of the interface.
label = 'controller'
#: The interface validator function.
validator = controller_validator
# Must be provided by the implementation
Meta = interface.Attribute('Handler meta-data')
def _setup(app_obj):
"""
The _setup function is after application initialization and after it
is determined that this controller was requested via command line
arguments. Meaning, a controllers _setup() function is only called
right before it's _dispatch() function is called to execute a command.
Must 'setup' the handler object making it ready for the framework
or the application to make further calls to it.
:param app_obj: The application object.
:returns: ``None``
"""
def _dispatch(self):
"""
Reads the application object's data to dispatch a command from this
controller. For example, reading self.app.pargs to determine what
command was passed, and then executing that command function.
Note that Cement does *not* parse arguments when calling _dispatch()
on a controller, as it expects the controller to handle parsing
arguments (I.e. self.app.args.parse()).
:returns: Returns the result of the executed controller function,
or ``None`` if no controller function is called.
"""
class expose(object):
"""
Used to expose controller functions to be listed as commands, and to
decorate the function with Meta data for the argument parser.
:param help: Help text to display for that command.
:type help: str
:param hide: Whether the command should be visible.
:type hide: boolean
:param aliases: Aliases to this command.
:param aliases_only: Whether to only display the aliases (not the label).
This is useful for situations where you have obscure function names
which you do not want displayed. Effecively, if there are aliases and
`aliases_only` is True, then aliases[0] will appear as the actual
command/function label.
:type aliases: ``list``
Usage:
.. code-block:: python
from cement.core.controller import CementBaseController, expose
class MyAppBaseController(CementBaseController):
class Meta:
label = 'base'
@expose(hide=True, aliases=['run'])
def default(self):
print("In MyAppBaseController.default()")
@expose()
def my_command(self):
print("In MyAppBaseController.my_command()")
"""
# pylint: disable=W0622
def __init__(self, help='', hide=False, aliases=[], aliases_only=False):
self.hide = hide
self.help = help
self.aliases = aliases
self.aliases_only = aliases_only
def __call__(self, func):
metadict = {}
metadict['label'] = re.sub('_', '-', func.__name__)
metadict['func_name'] = func.__name__
metadict['exposed'] = True
metadict['hide'] = self.hide
metadict['help'] = self.help
metadict['aliases'] = self.aliases
metadict['aliases_only'] = self.aliases_only
metadict['controller'] = None # added by the controller
func.__cement_meta__ = metadict
return func
# pylint: disable=R0921
class CementBaseController(handler.CementBaseHandler):
"""
This is an implementation of the
`IControllerHandler <#cement.core.controller.IController>`_ interface, but
as a base class that application controllers `should` subclass from.
Registering it directly as a handler is useless.
NOTE: This handler **requires** that the applications 'arg_handler' be
argparse. If using an alternative argument handler you will need to
write your own controller base class.
NOTE: This the initial default implementation of CementBaseController. In
the future it will be replaced by CementBaseController2, therefore using
CementBaseController2 is recommended for new development.
Usage:
.. code-block:: python
from cement.core.controller import CementBaseController
class MyAppBaseController(CementBaseController):
class Meta:
label = 'base'
description = 'MyApp is awesome'
config_defaults = dict()
arguments = []
epilog = "This is the text at the bottom of --help."
# ...
class MyStackedController(CementBaseController):
class Meta:
label = 'second_controller'
aliases = ['sec', 'secondary']
stacked_on = 'base'
stacked_type = 'embedded'
# ...
"""
class Meta:
"""
Controller meta-data (can be passed as keyword arguments to the parent
class).
"""
interface = IController
"""The interface this class implements."""
label = None
"""The string identifier for the controller."""
aliases = []
"""
A list of aliases for the controller. Will be treated like
command/function aliases for non-stacked controllers. For example:
``myapp <controller_label> --help`` is the same as
``myapp <controller_alias> --help``.
"""
aliases_only = False
"""
When set to True, the controller label will not be displayed at
command line, only the aliases will. Effectively, aliases[0] will
appear as the label. This feature is useful for the situation Where
you might want two controllers to have the same label when stacked
on top of separate controllers. For example, 'myapp users list' and
'myapp servers list' where 'list' is a stacked controller, not a
function.
"""
description = None
"""The description shown at the top of '--help'. Default: None"""
config_section = None
"""
A config [section] to merge config_defaults into. Cement will default
to controller.<label> if None is set.
"""
config_defaults = {}
"""
Configuration defaults (type: dict) that are merged into the
applications config object for the config_section mentioned above.
"""
arguments = []
"""
Arguments to pass to the argument_handler. The format is a list
of tuples whos items are a ( list, dict ). Meaning:
``[ ( ['-f', '--foo'], dict(dest='foo', help='foo option') ), ]``
This is equivelant to manually adding each argument to the argument
parser as in the following example:
``parser.add_argument(['-f', '--foo'], help='foo option', dest='foo')``
"""
stacked_on = 'base'
"""
A label of another controller to 'stack' commands/arguments on top of.
"""
stacked_type = 'embedded'
"""
Whether to `embed` commands and arguments within the parent controller
or to simply `nest` the controller under the parent controller (making
it a sub-sub-command). Must be one of `['embedded', 'nested']` only
if `stacked_on` is not `None`.
"""
hide = False
"""Whether or not to hide the controller entirely."""
epilog = None
"""
The text that is displayed at the bottom when '--help' is passed.
"""
usage = None
"""
The text that is displayed at the top when '--help' is passed.
Although the default is `None`, Cement will set this to a generic
usage based on the `prog`, `controller` name, etc if nothing else is
passed.
"""
argument_formatter = argparse.RawDescriptionHelpFormatter
"""
The argument formatter class to use to display --help output.
"""
default_func = 'default'
"""
Function to call if no sub-command is passed. Note that this can
**not** start with an ``_`` due to backward compatibility restraints
in how Cement discovers and maps commands.
"""
def __init__(self, *args, **kw):
super(CementBaseController, self).__init__(*args, **kw)
self.app = None
self._commands = {} # used to store collected commands
self._visible_commands = [] # used to sort visible command labels
self._arguments = [] # used to store collected arguments
self._dispatch_map = {} # used to map commands/aliases to controller
self._dispatch_command = None # set during _parse_args()
def _setup(self, app_obj):
"""
See `IController._setup() <#cement.core.cache.IController._setup>`_.
"""
super(CementBaseController, self)._setup(app_obj)
if getattr(self._meta, 'description', None) is None:
self._meta.description = "%s Controller" % \
self._meta.label.capitalize()
self.app = app_obj
def _collect(self):
LOG.debug("collecting arguments/commands for %s" % self)
arguments = []
commands = []
# process my arguments and commands first
arguments = list(self._meta.arguments)
for member in dir(self.__class__):
if member.startswith('_'):
continue
try:
func = getattr(self.__class__, member).__cement_meta__
except AttributeError:
continue
else:
func['controller'] = self
commands.append(func)
# process stacked controllers second for commands and args
for contr in handler.list('controller'):
# don't include self here
if contr == self.__class__:
continue
contr = contr()
contr._setup(self.app)
if contr._meta.stacked_on == self._meta.label:
if contr._meta.stacked_type == 'embedded':
contr_arguments, contr_commands = contr._collect()
for arg in contr_arguments:
arguments.append(arg)
for func in contr_commands:
commands.append(func)
elif contr._meta.stacked_type == 'nested':
metadict = {}
metadict['label'] = re.sub('_', '-', contr._meta.label)
metadict['func_name'] = '_dispatch'
metadict['exposed'] = True
metadict['hide'] = contr._meta.hide
metadict['help'] = contr._meta.description
metadict['aliases'] = contr._meta.aliases
metadict['aliases_only'] = contr._meta.aliases_only
metadict['controller'] = contr
commands.append(metadict)
return (arguments, commands)
def _process_arguments(self):
for _arg, _kw in self._arguments:
try:
self.app.args.add_argument(*_arg, **_kw)
except argparse.ArgumentError as e:
raise exc.FrameworkError(e.__str__())
def _process_commands(self):
self._dispatch_map = {}
self._visible_commands = []
for cmd in self._commands:
# process command labels
if cmd['label'] in self._dispatch_map.keys():
raise exc.FrameworkError(
"Duplicate command named '%s' " % cmd['label'] +
"found in controller '%s'" % cmd['controller']
)
self._dispatch_map[cmd['label']] = cmd
if not cmd['hide']:
self._visible_commands.append(cmd['label'])
# process command aliases
for alias in cmd['aliases']:
if alias in self._dispatch_map.keys():
raise exc.FrameworkError(
"The alias '%s' of the " % alias +
"'%s' controller collides " % cmd['controller'] +
"with a command or alias of the same name."
)
self._dispatch_map[alias] = cmd
self._visible_commands.sort()
def _get_dispatch_command(self):
default_func = self._meta.default_func
default_func_key = re.sub('_', '-', self._meta.default_func)
if (len(self.app.argv) <= 0) or (self.app.argv[0].startswith('-')):
# if no command is passed, then use default
if default_func_key in self._dispatch_map.keys():
self._dispatch_command = self._dispatch_map[default_func_key]
elif self.app.argv[0] in self._dispatch_map.keys():
self._dispatch_command = self._dispatch_map[self.app.argv[0]]
self.app.argv.pop(0)
else:
# check for default again (will get here if command line has
# positional arguments that don't start with a -)
if default_func_key in self._dispatch_map.keys():
self._dispatch_command = self._dispatch_map[default_func_key]
def _parse_args(self):
self.app.args.description = self._help_text
self.app.args.usage = self._usage_text
self.app.args.formatter_class = self._meta.argument_formatter
self.app._parse_args()
def _dispatch(self):
"""
Takes the remaining arguments from self.app.argv and parses for a
command to dispatch, and if so... dispatches it.
"""
if hasattr(self._meta, 'epilog'):
if self._meta.epilog is not None:
self.app.args.epilog = self._meta.epilog
self._arguments, self._commands = self._collect()
self._process_commands()
self._get_dispatch_command()
if self._dispatch_command:
if self._dispatch_command['func_name'] == '_dispatch':
func = getattr(self._dispatch_command['controller'],
'_dispatch')
return func()
else:
self._process_arguments()
self._parse_args()
func = getattr(self._dispatch_command['controller'],
self._dispatch_command['func_name'])
return func()
else:
self._process_arguments()
self._parse_args()
@property
def _usage_text(self):
"""Returns the usage text displayed when ``--help`` is passed."""
if self._meta.usage is not None:
return self._meta.usage
txt = "%s (sub-commands ...) [options ...] {arguments ...}" % \
self.app.args.prog
return txt
@property
def _help_text(self):
"""Returns the help text displayed when '--help' is passed."""
cmd_txt = ''
for label in self._visible_commands:
cmd = self._dispatch_map[label]
if len(cmd['aliases']) > 0 and cmd['aliases_only']:
if len(cmd['aliases']) > 1:
first = cmd['aliases'].pop(0)
cmd_txt = cmd_txt + " %s (aliases: %s)\n" % \
(first, ', '.join(cmd['aliases']))
else:
cmd_txt = cmd_txt + " %s\n" % cmd['aliases'][0]
elif len(cmd['aliases']) > 0:
cmd_txt = cmd_txt + " %s (aliases: %s)\n" % \
(label, ', '.join(cmd['aliases']))
else:
cmd_txt = cmd_txt + " %s\n" % label
if cmd['help']:
cmd_txt = cmd_txt + " %s\n\n" % cmd['help']
else:
cmd_txt = cmd_txt + "\n"
if len(cmd_txt) > 0:
txt = '''%s
commands:
%s
''' % (self._meta.description, cmd_txt)
else:
txt = self._meta.description
return textwrap.dedent(txt)
|
|
# Copyright (c) 2015 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from oslo_config import cfg
from oslo_log import log as logging
from sqlalchemy.orm import exc
from neutron.api.v2 import attributes
from neutron.common import constants
from neutron.common import exceptions as n_exc
from neutron.common import utils
from neutron.db import common_db_mixin
from neutron.db import models_v2
from neutron.ipam import utils as ipam_utils
LOG = logging.getLogger(__name__)
class DbBasePluginCommon(common_db_mixin.CommonDbMixin):
"""Stores getters and helper methods for db_base_plugin_v2
All private getters and simple helpers like _make_*_dict were moved from
db_base_plugin_v2.
More complicated logic and public methods left in db_base_plugin_v2.
Main purpose of this class is to make getters accessible for Ipam
backends.
"""
@staticmethod
def _generate_mac():
return utils.get_random_mac(cfg.CONF.base_mac.split(':'))
@staticmethod
def _delete_ip_allocation(context, network_id, subnet_id, ip_address):
# Delete the IP address from the IPAllocate table
LOG.debug("Delete allocated IP %(ip_address)s "
"(%(network_id)s/%(subnet_id)s)",
{'ip_address': ip_address,
'network_id': network_id,
'subnet_id': subnet_id})
context.session.query(models_v2.IPAllocation).filter_by(
network_id=network_id,
ip_address=ip_address,
subnet_id=subnet_id).delete()
@staticmethod
def _store_ip_allocation(context, ip_address, network_id, subnet_id,
port_id):
LOG.debug("Allocated IP %(ip_address)s "
"(%(network_id)s/%(subnet_id)s/%(port_id)s)",
{'ip_address': ip_address,
'network_id': network_id,
'subnet_id': subnet_id,
'port_id': port_id})
allocated = models_v2.IPAllocation(
network_id=network_id,
port_id=port_id,
ip_address=ip_address,
subnet_id=subnet_id
)
context.session.add(allocated)
@classmethod
def _check_gateway_in_subnet(cls, cidr, gateway):
"""Validate that the gateway is on the subnet."""
ip = netaddr.IPAddress(gateway)
if ip.version == 4 or (ip.version == 6 and not ip.is_link_local()):
return ipam_utils.check_subnet_ip(cidr, gateway)
return True
def _make_subnet_dict(self, subnet, fields=None):
res = {'id': subnet['id'],
'name': subnet['name'],
'tenant_id': subnet['tenant_id'],
'network_id': subnet['network_id'],
'ip_version': subnet['ip_version'],
'cidr': subnet['cidr'],
'subnetpool_id': subnet.get('subnetpool_id'),
'allocation_pools': [{'start': pool['first_ip'],
'end': pool['last_ip']}
for pool in subnet['allocation_pools']],
'gateway_ip': subnet['gateway_ip'],
'enable_dhcp': subnet['enable_dhcp'],
'ipv6_ra_mode': subnet['ipv6_ra_mode'],
'ipv6_address_mode': subnet['ipv6_address_mode'],
'dns_nameservers': [dns['address']
for dns in subnet['dns_nameservers']],
'host_routes': [{'destination': route['destination'],
'nexthop': route['nexthop']}
for route in subnet['routes']],
'shared': subnet['shared']
}
# Call auxiliary extend functions, if any
self._apply_dict_extend_functions(attributes.SUBNETS, res, subnet)
return self._fields(res, fields)
def _make_subnetpool_dict(self, subnetpool, fields=None):
default_prefixlen = str(subnetpool['default_prefixlen'])
min_prefixlen = str(subnetpool['min_prefixlen'])
max_prefixlen = str(subnetpool['max_prefixlen'])
res = {'id': subnetpool['id'],
'name': subnetpool['name'],
'tenant_id': subnetpool['tenant_id'],
'default_prefixlen': default_prefixlen,
'min_prefixlen': min_prefixlen,
'max_prefixlen': max_prefixlen,
'shared': subnetpool['shared'],
'prefixes': [prefix['cidr']
for prefix in subnetpool['prefixes']],
'ip_version': subnetpool['ip_version'],
'default_quota': subnetpool['default_quota']}
return self._fields(res, fields)
def _make_port_dict(self, port, fields=None,
process_extensions=True):
res = {"id": port["id"],
'name': port['name'],
"network_id": port["network_id"],
'tenant_id': port['tenant_id'],
"mac_address": port["mac_address"],
"admin_state_up": port["admin_state_up"],
"status": port["status"],
"fixed_ips": [{'subnet_id': ip["subnet_id"],
'ip_address': ip["ip_address"]}
for ip in port["fixed_ips"]],
"device_id": port["device_id"],
"device_owner": port["device_owner"]}
# Call auxiliary extend functions, if any
if process_extensions:
self._apply_dict_extend_functions(
attributes.PORTS, res, port)
return self._fields(res, fields)
def _get_network(self, context, id):
try:
network = self._get_by_id(context, models_v2.Network, id)
except exc.NoResultFound:
raise n_exc.NetworkNotFound(net_id=id)
return network
def _get_subnet(self, context, id):
try:
subnet = self._get_by_id(context, models_v2.Subnet, id)
except exc.NoResultFound:
raise n_exc.SubnetNotFound(subnet_id=id)
return subnet
def _get_subnetpool(self, context, id):
try:
return self._get_by_id(context, models_v2.SubnetPool, id)
except exc.NoResultFound:
raise n_exc.SubnetPoolNotFound(subnetpool_id=id)
def _get_all_subnetpools(self, context):
# NOTE(tidwellr): see note in _get_all_subnets()
return context.session.query(models_v2.SubnetPool).all()
def _get_port(self, context, id):
try:
port = self._get_by_id(context, models_v2.Port, id)
except exc.NoResultFound:
raise n_exc.PortNotFound(port_id=id)
return port
def _get_dns_by_subnet(self, context, subnet_id):
dns_qry = context.session.query(models_v2.DNSNameServer)
return dns_qry.filter_by(subnet_id=subnet_id).all()
def _get_route_by_subnet(self, context, subnet_id):
route_qry = context.session.query(models_v2.SubnetRoute)
return route_qry.filter_by(subnet_id=subnet_id).all()
def _get_router_gw_ports_by_network(self, context, network_id):
port_qry = context.session.query(models_v2.Port)
return port_qry.filter_by(network_id=network_id,
device_owner=constants.DEVICE_OWNER_ROUTER_GW).all()
def _get_subnets_by_network(self, context, network_id):
subnet_qry = context.session.query(models_v2.Subnet)
return subnet_qry.filter_by(network_id=network_id).all()
def _get_subnets_by_subnetpool(self, context, subnetpool_id):
subnet_qry = context.session.query(models_v2.Subnet)
return subnet_qry.filter_by(subnetpool_id=subnetpool_id).all()
def _get_all_subnets(self, context):
# NOTE(salvatore-orlando): This query might end up putting
# a lot of stress on the db. Consider adding a cache layer
return context.session.query(models_v2.Subnet).all()
def _make_network_dict(self, network, fields=None,
process_extensions=True):
res = {'id': network['id'],
'name': network['name'],
'tenant_id': network['tenant_id'],
'admin_state_up': network['admin_state_up'],
'mtu': network.get('mtu', constants.DEFAULT_NETWORK_MTU),
'status': network['status'],
'shared': network['shared'],
'subnets': [subnet['id']
for subnet in network['subnets']]}
# TODO(pritesh): Move vlan_transparent to the extension module.
# vlan_transparent here is only added if the vlantransparent
# extension is enabled.
if ('vlan_transparent' in network and network['vlan_transparent'] !=
attributes.ATTR_NOT_SPECIFIED):
res['vlan_transparent'] = network['vlan_transparent']
# Call auxiliary extend functions, if any
if process_extensions:
self._apply_dict_extend_functions(
attributes.NETWORKS, res, network)
return self._fields(res, fields)
def _make_subnet_args(self, context, shared, detail,
subnet, subnetpool_id=None):
args = {'tenant_id': detail.tenant_id,
'id': detail.subnet_id,
'name': subnet['name'],
'network_id': subnet['network_id'],
'ip_version': subnet['ip_version'],
'cidr': str(detail.subnet_cidr),
'subnetpool_id': subnetpool_id,
'enable_dhcp': subnet['enable_dhcp'],
'gateway_ip': self._gateway_ip_str(subnet, detail.subnet_cidr),
'shared': shared}
if subnet['ip_version'] == 6 and subnet['enable_dhcp']:
if attributes.is_attr_set(subnet['ipv6_ra_mode']):
args['ipv6_ra_mode'] = subnet['ipv6_ra_mode']
if attributes.is_attr_set(subnet['ipv6_address_mode']):
args['ipv6_address_mode'] = subnet['ipv6_address_mode']
return args
def _gateway_ip_str(self, subnet, cidr_net):
if subnet.get('gateway_ip') is attributes.ATTR_NOT_SPECIFIED:
return str(cidr_net.network + 1)
return subnet.get('gateway_ip')
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""sklearn cross-support (deprecated)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import six
from tensorflow.python.util.compat import collections_abc
def _pprint(d):
return ', '.join(['%s=%s' % (key, str(value)) for key, value in d.items()])
class _BaseEstimator(object):
"""This is a cross-import when sklearn is not available.
Adopted from sklearn.BaseEstimator implementation.
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py
"""
def get_params(self, deep=True):
"""Get parameters for this estimator.
Args:
deep: boolean, optional
If `True`, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns:
params : mapping of string to any
Parameter names mapped to their values.
"""
out = {}
param_names = [name for name in self.__dict__ if not name.startswith('_')]
for key in param_names:
value = getattr(self, key, None)
if isinstance(value, collections_abc.Callable):
continue
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as pipelines). The former have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
Args:
**params: Parameters.
Returns:
self
Raises:
ValueError: If params contain invalid names.
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def __repr__(self):
class_name = self.__class__.__name__
return '%s(%s)' % (class_name,
_pprint(self.get_params(deep=False)),)
# pylint: disable=old-style-class
class _ClassifierMixin():
"""Mixin class for all classifiers."""
pass
class _RegressorMixin():
"""Mixin class for all regression estimators."""
pass
class _TransformerMixin():
"""Mixin class for all transformer estimators."""
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting.
USE OF THIS EXCEPTION IS DEPRECATED.
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
Examples:
>>> from sklearn.svm import LinearSVC
>>> from sklearn.exceptions import NotFittedError
>>> try:
... LinearSVC().predict([[1, 2], [2, 3], [3, 4]])
... except NotFittedError as e:
... print(repr(e))
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
NotFittedError('This LinearSVC instance is not fitted yet',)
Copied from
https://github.com/scikit-learn/scikit-learn/master/sklearn/exceptions.py
"""
# pylint: enable=old-style-class
def _accuracy_score(y_true, y_pred):
score = y_true == y_pred
return np.average(score)
def _mean_squared_error(y_true, y_pred):
if len(y_true.shape) > 1:
y_true = np.squeeze(y_true)
if len(y_pred.shape) > 1:
y_pred = np.squeeze(y_pred)
return np.average((y_true - y_pred)**2)
def _train_test_split(*args, **options):
# pylint: disable=missing-docstring
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
if test_size is None and train_size is None:
train_size = 0.75
elif train_size is None:
train_size = 1 - test_size
train_size = int(train_size * args[0].shape[0])
np.random.seed(random_state)
indices = np.random.permutation(args[0].shape[0])
train_idx, test_idx = indices[:train_size], indices[train_size:]
result = []
for x in args:
result += [x.take(train_idx, axis=0), x.take(test_idx, axis=0)]
return tuple(result)
# If "TENSORFLOW_SKLEARN" flag is defined then try to import from sklearn.
TRY_IMPORT_SKLEARN = os.environ.get('TENSORFLOW_SKLEARN', False)
if TRY_IMPORT_SKLEARN:
# pylint: disable=g-import-not-at-top,g-multiple-import,unused-import
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin, TransformerMixin
from sklearn.metrics import accuracy_score, log_loss, mean_squared_error
from sklearn.model_selection import train_test_split
try:
from sklearn.exceptions import NotFittedError
except ImportError:
try:
from sklearn.utils.validation import NotFittedError
except ImportError:
pass
else:
# Naive implementations of sklearn classes and functions.
BaseEstimator = _BaseEstimator
ClassifierMixin = _ClassifierMixin
RegressorMixin = _RegressorMixin
TransformerMixin = _TransformerMixin
accuracy_score = _accuracy_score
log_loss = None
mean_squared_error = _mean_squared_error
train_test_split = _train_test_split
|
|
"""
This module (along with a few functions in :mod:`.helper_functions`) contains
everything that is needed to calculate instantons in one field dimension.
The primary class is :class:`SingleFieldInstanton`, which can calculate the
instanton solution in any number of spatial dimensions using the overshoot /
undershoot method. Additional classes inherit common functionality from this
one, and can be used to calculate the bubble wall profile with constant
friction (:class:`WallWithConstFriction`) instead of radius-dependent friction,
or to calculate the instanton in the presence of gravity (*not yet
implemented*).
.. todo::
Create and document a *CDL_Instanton* class for tunneling with gravity.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import optimize, integrate, special, interpolate
from collections import namedtuple
from . import helper_functions
from .helper_functions import rkqs, IntegrationError, clampVal
from .helper_functions import cubicInterpFunction
import sys
if sys.version_info >= (3,0):
xrange = range
class PotentialError(Exception):
"""
Used when the potential does not have the expected characteristics.
The error messages should be tuples, with the second item being one of
``("no barrier", "stable, not metastable")``.
"""
pass
class SingleFieldInstanton:
"""
This class will calculate properties of an instanton with a single scalar
Field without gravity using the overshoot/undershoot method.
Most users will probably be primarily interested in the functions
:func:`findProfile` and :func:`findAction`.
Note
----
When the bubble is thin-walled (due to nearly degenerate minima), an
approximate solution is found to the equations of motion and integration
starts close to the wall itself (instead of always starting at the center
of the bubble). This way the overshoot/undershoot method runs just as fast
for extremely thin-walled bubbles as it does for thick-walled bubbles.
Parameters
----------
phi_absMin : float
The field value at the stable vacuum to which the instanton
tunnels. Nowhere in the code is it *required* that there actually be a
minimum at `phi_absMin`, but the :func:`findProfile` function will only
use initial conditions between `phi_absMin` and `phi_metaMin`, and the
code is optimized for thin-walled bubbles when the center of the
instanton is close to `phi_absMin`.
phi_metaMin : float
The field value in the metastable vacuum.
V : callable
The potential function. It should take as its single parameter the field
value `phi`.
dV, d2V : callable, optional
The potential's first and second derivatives. If not None, these
override the methods :func:`dV` and :func:`d2V`.
phi_eps : float, optional
A small value used to calculate derivatives (if not overriden by
the user) and in the function :func:`dV_from_absMin`. The input should
be unitless; it is later rescaled by ``abs(phi_absMin - phi_metaMin)``.
alpha : int or float, optional
The coefficient for the friction term in the ODE. This is also
the number of spacetime dimensions minus 1.
phi_bar : float, optional
The field value at the edge of the barrier. If `None`, it is found by
:func:`findBarrierLocation`.
rscale : float, optional
The approximate radial scale of the instanton. If `None` it is found by
:func:`findRScale`.
Raises
------
PotentialError
when the barrier is non-existent or when the presumably stable minimum
has a higher energy that the metastable minimum.
Examples
--------
Thick and thin-walled bubbles:
.. plot::
:include-source:
from cosmoTransitions.tunneling1D import SingleFieldInstanton
import matplotlib.pyplot as plt
# Thin-walled
def V1(phi): return 0.25*phi**4 - 0.49*phi**3 + 0.235 * phi**2
def dV1(phi): return phi*(phi-.47)*(phi-1)
profile = SingleFieldInstanton(1.0, 0.0, V1, dV1).findProfile()
plt.plot(profile.R, profile.Phi)
# Thick-walled
def V2(phi): return 0.25*phi**4 - 0.4*phi**3 + 0.1 * phi**2
def dV2(phi): return phi*(phi-.2)*(phi-1)
profile = SingleFieldInstanton(1.0, 0.0, V2, dV2).findProfile()
plt.plot(profile.R, profile.Phi)
plt.xlabel(r"Radius $r$")
plt.ylabel(r"Field $\phi$")
plt.show()
"""
def __init__(self, phi_absMin, phi_metaMin, V,
dV=None, d2V=None, phi_eps=1e-3, alpha=2,
phi_bar=None, rscale=None):
self.phi_absMin, self.phi_metaMin = phi_absMin, phi_metaMin
self.V = V
if V(phi_metaMin) <= V(phi_absMin):
raise PotentialError("V(phi_metaMin) <= V(phi_absMin); "
"tunneling cannot occur.", "stable, not metastable")
if dV is not None:
self.dV = dV
if d2V is not None:
self.d2V = d2V
if phi_bar is None:
self.phi_bar = self.findBarrierLocation()
else:
self.phi_bar = phi_bar
if rscale is None:
self.rscale = self.findRScale()
else:
self.rscale = rscale
self.alpha = alpha
self.phi_eps = phi_eps * abs(phi_absMin - phi_metaMin)
def dV(self, phi):
R"""
Calculates `dV/dphi` using finite differences.
The finite difference is given by `self.phi_eps`, and the derivative
is calculated to fourth order.
"""
eps = self.phi_eps
V = self.V
return (V(phi-2*eps) - 8*V(phi-eps) + 8*V(phi+eps) - V(phi+2*eps)
) / (12.*eps)
def dV_from_absMin(self, delta_phi):
R"""
Calculates `dV/dphi` at ``phi = phi_absMin + delta_phi``.
It is sometimes helpful to find `dV/dphi` extremely close to the
minimum. In this case, floating-point error can be significant. To get
increased accuracy, this function expands about the minimum in
a Taylor series and uses that for nearby values. That is,
:math:`V'(\phi) \approx V''(\phi_{\rm absMin})(\phi-\phi_{\rm absMin})`.
For values that are farther away, it instead uses :func:`dV`.
It blends the two methods so that there are no numerical
discontinuities.
This uses `self.phi_eps` to determine whether the field is considered
nearby or not.
"""
phi = self.phi_absMin + delta_phi
dV = self.dV(phi)
# If phi is very close to phi_absMin, it should be safer to assume
# that dV is zero exactly at phi_absMin and instead calculate dV from
# d2V.
if self.phi_eps > 0:
dV_ = self.d2V(phi) * delta_phi
# blend the two together so that there are no discontinuites
blend_factor = np.exp(-(delta_phi/self.phi_eps)**2)
dV = dV_*blend_factor + dV*(1-blend_factor)
return dV
def d2V(self, phi):
R"""
Calculates `d^2V/dphi^2` using finite differences.
The finite difference is given by `self.phi_eps`, and the derivative
is calculated to fourth order.
"""
eps = self.phi_eps
V = self.V
return (-V(phi-2*eps) + 16*V(phi-eps) - 30*V(phi)
+ 16*V(phi+eps) - V(phi+2*eps)) / (12.*eps*eps)
def findBarrierLocation(self):
R"""
Find edge of the potential barrier.
Returns
-------
phi_barrier : float
The value such that `V(phi_barrier) = V(phi_metaMin)`
"""
phi_tol = abs(self.phi_metaMin - self.phi_absMin) * 1e-12
V_phimeta = self.V(self.phi_metaMin)
phi1 = self.phi_metaMin
phi2 = self.phi_absMin
phi0 = 0.5 * (phi1+phi2)
# Do a very simple binary search to narrow down on the right answer.
while abs(phi1-phi2) > phi_tol:
V0 = self.V(phi0)
if V0 > V_phimeta:
phi1 = phi0
else:
phi2 = phi0
phi0 = 0.5 * (phi1+phi2)
return phi0
def findRScale(self):
R"""
Find the characteristic length scale for tunneling over the potential
barrier.
The characteristic length scale should formally be given by the period
of oscillations about the top of the potential barrier. However, it is
perfectly acceptable for the potential barrier to have a flat top, in
which case a naive calculation of the length scale would be infinite.
Instead, this function finds the top of the barrier along with a cubic
function that has a maximum at the barrier top and a minimum at the
metastable minimum. The returned length scale is then the period of
oscillations about this cubic maximum.
Raises
------
PotentialError
when the barrier is non-existent.
"""
"""
NOT USED:
We could also do a sanity check in case the barrier goes to zero.
A second way of finding the scale is to see how long it would take
the field to roll from one minimum to the other if the potential were
purely linear and there were no friction.
Parameters
----------
second_check : float
If bigger than zero, do the sanity check. Return value is then the
larger of the first scale and the second scale times
`second_check`.
"""
phi_tol = abs(self.phi_bar - self.phi_metaMin) * 1e-6
x1 = min(self.phi_bar, self.phi_metaMin)
x2 = max(self.phi_bar, self.phi_metaMin)
phi_bar_top = optimize.fminbound(
lambda x: -self.V(x), x1, x2, xtol=phi_tol)
if phi_bar_top + phi_tol > x2 or phi_bar_top - phi_tol < x1:
raise PotentialError(
"Minimization is placing the top of the "
"potential barrier outside of the interval defined by "
"phi_bar and phi_metaMin. Assume that the barrier does not exist.",
"no barrier")
Vtop = self.V(phi_bar_top) - self.V(self.phi_metaMin)
xtop = phi_bar_top - self.phi_metaMin
# Cubic function given by (ignoring linear and constant terms):
# f(x) = C [(-1/3)x^3 + (1/2)x^2 xtop]
# C = 6 Vtop / xtop^3
# f''(xtop) = - C xtop
# d2V = -6*Vtop / xtop**2
# rscale = 1 / sqrt(d2V)
if Vtop <= 0:
raise PotentialError("Barrier height is not positive, "
"does not exist.", "no barrier")
rscale1 = abs(xtop) / np.sqrt(abs(6*Vtop))
return rscale1
# The following would calculate it a separate way, but this goes
# to infinity when delta_V goes to zero, so it's a bad way of doing it
delta_phi = abs(self.phi_absMin - self.phi_metaMin)
delta_V = abs(self.V(self.phi_absMin) - self.V(self.phi_metaMin))
rscale2 = np.sqrt(2*delta_phi**2 / (delta_V+1e-100))
return max(rscale1, rscale2)
_exactSolution_rval = namedtuple("exactSolution_rval", "phi dphi")
def exactSolution(self, r, phi0, dV, d2V):
R"""
Find `phi(r)` given `phi(r=0)`, assuming a quadratic potential.
Parameters
----------
r : float
The radius at which the solution should be calculated.
phi0 : float
The field at `r=0`.
dV, d2V : float
The potential's first and second derivatives evaluated at `phi0`.
Returns
-------
phi, dphi : float
The field and its derivative evaluated at `r`.
Notes
-----
If the potential at the point :math:`\phi_0` is a simple quadratic, the
solution to the instanton equation of motion can be determined exactly.
The non-singular solution to
.. math::
\frac{d^2\phi}{dr^2} + \frac{\alpha}{r}\frac{d\phi}{dr} =
V'(\phi_0) + V''(\phi_0) (\phi-\phi_0)
is
.. math::
\phi(r)-\phi_0 = \frac{V'}{V''}\left[
\Gamma(\nu+1)\left(\frac{\beta r}{2}\right)^{-\nu} I_\nu(\beta r) - 1
\right]
where :math:`\nu = \frac{\alpha-1}{2}`, :math:`I_\nu` is the modified
Bessel function, and :math:`\beta^2 = V''(\phi_0) > 0`. If instead
:math:`-\beta^2 = V''(\phi_0) < 0`, the solution is the same but with
:math:`I_\nu \rightarrow J_\nu`.
"""
beta = np.sqrt(abs(d2V))
beta_r = beta*r
nu = 0.5 * (self.alpha - 1)
gamma = special.gamma # Gamma function
iv, jv = special.iv, special.jv # (modified) Bessel function
if beta_r < 1e-2:
# Use a small-r approximation for the Bessel function.
s = +1 if d2V > 0 else -1
phi = 0.0
dphi = 0.0
for k in xrange(1,4):
_ = (0.5*beta_r)**(2*k-2) * s**k / (gamma(k+1)*gamma(k+1+nu))
phi += _
dphi += _ * (2*k)
phi *= 0.25 * gamma(nu+1) * r**2 * dV * s
dphi *= 0.25 * gamma(nu+1) * r * dV * s
phi += phi0
elif d2V > 0:
import warnings
# If beta_r is very large, this will throw off overflow and divide
# by zero errors in iv(). It will return np.inf though, which is
# what we want. Just ignore the warnings.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
phi = (gamma(nu+1)*(0.5*beta_r)**-nu * iv(nu, beta_r)-1) * dV/d2V
dphi = -nu*((0.5*beta_r)**-nu / r) * iv(nu, beta_r)
dphi += (0.5*beta_r)**-nu * 0.5*beta \
* (iv(nu-1, beta_r)+iv(nu+1, beta_r))
dphi *= gamma(nu+1) * dV/d2V
phi += phi0
else:
phi = (gamma(nu+1)*(0.5*beta_r)**-nu * jv(nu, beta_r) - 1) * dV/d2V
dphi = -nu*((0.5*beta_r)**-nu / r) * jv(nu, beta_r)
dphi += (0.5*beta_r)**-nu * 0.5*beta \
* (jv(nu-1, beta_r)-jv(nu+1, beta_r))
dphi *= gamma(nu+1) * dV/d2V
phi += phi0
return self._exactSolution_rval(phi, dphi)
_initialConditions_rval = namedtuple(
"initialConditions_rval", "r0 phi dphi")
def initialConditions(self, delta_phi0, rmin, delta_phi_cutoff):
R"""
Finds the initial conditions for integration.
The instanton equations of motion are singular at `r=0`, so we
need to start the integration at some larger radius. This
function finds the value `r0` such that `phi(r0) = phi_cutoff`.
If there is no such value, it returns the intial conditions at `rmin`.
Parameters
----------
delta_phi0 : float
`delta_phi0 = phi(r=0) - phi_absMin`
rmin : float
The smallest acceptable radius at which to start integration.
delta_phi_cutoff : float
The desired value for `phi(r0)`.
`delta_phi_cutoff = phi(r0) - phi_absMin`.
Returns
-------
r0, phi, dphi : float
The initial radius and the field and its derivative at that radius.
Notes
-----
The field values are calculated using :func:`exactSolution`.
"""
phi0 = self.phi_absMin + delta_phi0
dV = self.dV_from_absMin(delta_phi0)
d2V = self.d2V(phi0)
phi_r0, dphi_r0 = self.exactSolution(rmin, phi0, dV, d2V)
if abs(phi_r0 - self.phi_absMin) > abs(delta_phi_cutoff):
# The initial conditions at rmin work. Stop here.
return self._initialConditions_rval(rmin, phi_r0, dphi_r0)
if np.sign(dphi_r0) != np.sign(delta_phi0):
# The field is evolving in the wrong direction.
# Increasing r0 won't increase |delta_phi_r0|/
return rmin, phi_r0, dphi_r0
# Find the smallest r0 such that delta_phi_r0 > delta_phi_cutoff
r = rmin
while np.isfinite(r):
rlast = r
r *= 10
phi, dphi = self.exactSolution(r, phi0, dV, d2V)
if abs(phi - self.phi_absMin) > abs(delta_phi_cutoff):
break
# Now find where phi - self.phi_absMin = delta_phi_cutoff exactly
def deltaPhiDiff(r_):
p = self.exactSolution(r_, phi0, dV, d2V)[0]
return abs(p - self.phi_absMin) - abs(delta_phi_cutoff)
r0 = optimize.brentq(deltaPhiDiff, rlast, r, disp=False)
phi_r0, dphi_r0 = self.exactSolution(r0, phi0, dV, d2V)
return self._initialConditions_rval(r0, phi_r0, dphi_r0)
def equationOfMotion(self, y, r):
"""
Used to integrate the bubble wall.
"""
return np.array([y[1], self.dV(y[0])-self.alpha*y[1]/r])
_integrateProfile_rval = namedtuple(
"integrateProfile_rval", "r y convergence_type")
def integrateProfile(self, r0, y0, dr0,
epsfrac, epsabs, drmin, rmax, *eqn_args):
R"""
Integrate the bubble wall equation:
.. math::
\frac{d^2\phi}{dr^2} + \frac{\alpha}{r}\frac{d\phi}{dr} =
\frac{dV}{d\phi}.
The integration will stop when it either overshoots or undershoots
the false vacuum minimum, or when it converges upon the false vacuum
minimum.
Parameters
----------
r0 : float
The starting radius for the integration.
y0 : array_like
The starting values [phi(r0), dphi(r0)].
dr0 : float
The starting integration stepsize.
epsfrac, epsabs : float
The error tolerances used for integration. This is fed into
:func:`helper_functions.rkqs` and is used to test for convergence.
drmin : float
The minimum allowed value of `dr` before raising an error.
rmax : float
The maximum allowed value of `r-r0` before raising an error.
eqn_args : tuple
Extra arguments to pass to :func:`equationOfMotion`. Useful for
subclasses.
Returns
-------
r : float
The final radius.
y : array_like
The final field values [phi, dphi]
convergence_type : str
Either 'overshoot', 'undershoot', or 'converged'.
Raises
------
helper_functions.IntegrationError
"""
dr = dr0
# dY is the ODE that we use
def dY(y,r,args=eqn_args):
return self.equationOfMotion(y,r,*args)
dydr0 = dY(y0, r0)
ysign = np.sign(y0[0]-self.phi_metaMin)
# positive means we're heading down, negative means heading up.
rmax += r0
convergence_type = None
while True:
dy, dr, drnext = rkqs(y0, dydr0, r0, dY, dr, epsfrac, epsabs)
r1 = r0 + dr
y1 = y0 + dy
dydr1 = dY(y1,r1)
# Check for completion
if (r1 > rmax):
raise IntegrationError("r > rmax")
elif (dr < drmin):
raise IntegrationError("dr < drmin")
elif (abs(y1 - np.array([self.phi_metaMin,0])) < 3*epsabs).all():
r,y = r1,y1
convergence_type = "converged"
break
elif y1[1]*ysign > 0 or (y1[0]-self.phi_metaMin)*ysign < 0:
f = cubicInterpFunction(y0, dr*dydr0, y1, dr*dydr1)
if(y1[1]*ysign > 0):
# Extrapolate to where dphi(r) = 0
x = optimize.brentq(lambda x: f(x)[1], 0, 1)
convergence_type = "undershoot"
else:
# Extrapolate to where phi(r) = phi_metaMin
x = optimize.brentq(lambda x: f(x)[0]-self.phi_metaMin, 0,1)
convergence_type = "overshoot"
r = r0 + dr*x
y = f(x)
break
# Advance the integration variables
r0,y0,dydr0 = r1,y1,dydr1
dr = drnext
# Check convergence for a second time.
# The extrapolation in overshoot/undershoot might have gotten us within
# the acceptable error.
if (abs(y - np.array([self.phi_metaMin,0])) < 3*epsabs).all():
convergence_type = "converged"
return self._integrateProfile_rval(r, y, convergence_type)
profile_rval = namedtuple("Profile1D", "R Phi dPhi Rerr")
def integrateAndSaveProfile(self, R, y0, dr,
epsfrac, epsabs,drmin, *eqn_args):
"""
Integrate the bubble profile, saving the output in an array.
Parameters
----------
R: array_like
The array of points at which we want to save the profile.
y0 : float
The starting values [phi(r0), dphi(r0)].
dr : float
Starting stepsize.
epsfrac, epsabs : float
The error tolerances used for integration. This
is fed into :func:`helper_functions.rkqs`.
drmin : float
The smallest allowed stepsize.
eqn_args : tuple
Extra arguments to pass to :func:`equationOfMotion`. Useful for
subclasses.
Returns
-------
R, Phi, dPhi : array_like
Radii and field values which make up the bubble profile.
Rerr : float or None
The first value of `r` at which ``dr < drmin``, or `None` if
``dr >= drmin`` always.
Notes
-----
Subclasses can use this function without overriding it even if the
subclass uses more fields/values in its equation of motion (i.e.,
``len(y0) > 2``). This is accomplished by setting the class variable
`profile_rval` to a different named tuple type with more than four
inputs. The first three should always be *R, Phi, dPhi*, and the last
one should be *Rerr*, but additional values can be stuck in between.
"""
N = len(R)
R, r0 = np.array(R), R[0]
Yout = np.zeros((N,len(y0)))
Yout[0] = y0
# dY is the ODE that we use
def dY(y,r,args=eqn_args):
return self.equationOfMotion(y,r,*args)
dydr0 = dY(y0, r0)
Rerr = None
i = 1
while i < N:
dy, dr, drnext = rkqs(y0, dydr0, r0, dY, dr, epsfrac, epsabs)
if (dr >= drmin):
r1 = r0 + dr
y1 = y0 + dy
else:
y1 = y0 + dy*drmin/dr
dr = drnext = drmin
r1 = r0 + dr
if Rerr is not None: Rerr = r1
dydr1 = dY(y1,r1)
# Fill the arrays, if necessary
if (r0 < R[i] <= r1):
f = cubicInterpFunction(y0, dr*dydr0, y1, dr*dydr1)
while (i < N and r0 < R[i] <= r1):
x = (R[i]-r0)/dr
Yout[i] = f(x)
i += 1
# Advance the integration variables
r0,y0,dydr0 = r1,y1,dydr1
dr = drnext
rval = (R,)+tuple(Yout.T)+eqn_args+(Rerr,)
return self.profile_rval(*rval)
def findProfile(self, xguess=None, xtol=1e-4, phitol=1e-4,
thinCutoff=.01, npoints=500, rmin=1e-4, rmax=1e4,
max_interior_pts=None):
R"""
Calculate the bubble profile by iteratively over/undershooting.
This will call :func:`integrateProfile` many times, trying to find
the correct initial condition `phi(r=0)` such that the field ends up
in the metastable vacuum at infinity. Once the correct initial
condition is found, it calls :func:`integrateAndSaveProfile` to find
the profile along the length of the wall.
Parameters
----------
xguess : float, optional
The initial guess for `x`. If `None`, `xguess` is set such
that ``phi_guess = self.phi_bar``.
xtol : float, optional
Target accuracy in `x`.
phitol : float, optional
Fractional error tolerance in integration.
thinCutoff : float, optional
Equal to `delta_phi_cutoff / (phi_metaMin - phi_absMin)`, where
`delta_phi_cutoff` is used in :func:`initialConditions`.
npoints : int
Number of points to return in the profile.
rmin : float
Relative to ``self.rscale``. Sets the smallest starting
radius, the starting stepsize, and the smallest allowed stepsize
(``0.01*rmin``).
rmax : float
Relative ``self.rscale``. Sets the maximum allowed integration
distance.
max_interior_pts : int
Maximum number of points to place between ``r=0`` and the start of
integration. If None, ``max_interior_pts=npoints/2``. If zero, no
points are added to the bubble interior.
Returns
-------
R, Phi, dPhi : array_like
Radii and field values which make up the bubble profile. Note that
`R[0]` can be much bigger than zero for thin-walled bubbles.
Rerr : float or None
The first value of `r` at which ``dr < drmin``, or `None` if
``dr >= drmin`` always.
Notes
-----
For very thin-walled bubbles, the initially value of `phi` can be
extremely close to the stable minimum and small variations in `phi`
can cause large variations in the integration. Rather than varying
`phi(r=0)` directly, it is easier to vary a parameter `x` defined by
.. math::
\phi(r=0) = \phi_{\rm absMin}
+ e^{-x}(\phi_{\rm metaMin}-\phi_{\rm absMin})
This way, `phi = phi_metaMin` when `x` is zero and
`phi = phi_absMin` when `x` is infinity.
"""
# Set x parameters
xmin = xtol*10
xmax = np.inf
if xguess is not None:
x = xguess
else:
x = -np.log(abs((self.phi_bar-self.phi_absMin) /
(self.phi_metaMin-self.phi_absMin)))
xincrease = 5.0
# The relative amount to increase x by if there is no upper bound.
# --
# Set r parameters
rmin *= self.rscale
dr0 = rmin
drmin = 0.01*rmin
rmax *= self.rscale
# --
# Set the phi parameters
delta_phi = self.phi_metaMin - self.phi_absMin
epsabs = abs(np.array([delta_phi, delta_phi/self.rscale])*phitol)
epsfrac = np.array([1,1]) * phitol
delta_phi_cutoff = thinCutoff * delta_phi
# The sign for delta_phi_cutoff doesn't matter
# --
integration_args = (dr0, epsfrac, epsabs, drmin, rmax)
rf = None
while True:
delta_phi0 = np.exp(-x)*delta_phi
# r0, phi0, dphi0 = self.initialConditions(x, rmin, thinCutoff)
r0_, phi0, dphi0 = self.initialConditions(
delta_phi0, rmin, delta_phi_cutoff)
if not np.isfinite(r0_) or not np.isfinite(x):
# Use the last finite values instead
# (assuming there are such values)
assert rf is not None, "Failed to retrieve initial "\
"conditions on the first try."
break
r0 = r0_
y0 = np.array([phi0, dphi0])
rf, yf, ctype = self.integrateProfile(r0, y0, *integration_args)
# Check for overshoot, undershoot
if ctype == "converged":
break
elif ctype == "undershoot": # x is too low
xmin = x
x = x*xincrease if xmax == np.inf else .5*(xmin+xmax)
elif ctype == "overshoot": # x is too high
xmax = x
x = .5*(xmin+xmax)
# Check if we've reached xtol
if (xmax-xmin) < xtol:
break
# Integrate a second time, this time getting the points along the way
R = np.linspace(r0, rf, npoints)
profile = self.integrateAndSaveProfile(R, y0, dr0,
epsfrac, epsabs, drmin)
# Make points interior to the bubble.
if max_interior_pts is None:
max_interior_pts = len(R) // 2
if max_interior_pts > 0:
dx0 = R[1]-R[0]
if R[0] / dx0 <= max_interior_pts:
n = int(np.ceil(R[0]/dx0))
R_int = np.linspace(0, R[0], n+1)[:-1]
else:
n = max_interior_pts
# R[0] = dx0 * (n + a*n*(n+1)/2)
a = (R[0]/dx0 - n) * 2/(n*(n+1))
N = np.arange(1,n+1)[::-1]
R_int = R[0] - dx0*(N + 0.5*a*N*(N+1))
R_int[0] = 0.0 # enforce this exactly
Phi_int = np.empty_like(R_int)
dPhi_int = np.empty_like(R_int)
Phi_int[0] = self.phi_absMin + delta_phi0
dPhi_int[0] = 0.0
dV = self.dV_from_absMin(delta_phi0)
d2V = self.d2V(Phi_int[0])
for i in xrange(1,len(R_int)):
Phi_int[i], dPhi_int[i] = self.exactSolution(
R_int[i], Phi_int[0], dV, d2V)
R = np.append(R_int, profile.R)
Phi = np.append(Phi_int, profile.Phi)
dPhi = np.append(dPhi_int, profile.dPhi)
profile = self.profile_rval(R,Phi,dPhi, profile.Rerr)
return profile
def findAction(self, profile):
R"""
Calculate the Euclidean action for the instanton:
.. math::
S = \int [(d\phi/dr)^2 + V(\phi)] r^\alpha dr d\Omega_\alpha
Arguments
---------
profile
Output from :func:`findProfile()`.
Returns
-------
float
The Euclidean action.
"""
r, phi, dphi = profile.R, profile.Phi, profile.dPhi
# Find the area of an n-sphere (alpha=n):
d = self.alpha+1 # Number of dimensions in the integration
area = r**self.alpha * 2*np.pi**(d*.5)/special.gamma(d*.5)
# And integrate the profile
integrand = 0.5 * dphi**2 + self.V(phi) - self.V(self.phi_metaMin)
integrand *= area
S = integrate.simps(integrand, r)
# Find the bulk term in the bubble interior
volume = r[0]**d * np.pi**(d*.5)/special.gamma(d*.5 + 1)
S += volume * (self.V(phi[0]) - self.V(self.phi_metaMin))
return S
def evenlySpacedPhi(self, phi, dphi, npoints=100, k=1, fixAbs=True):
"""
This method takes `phi` and `dphi` as input, which will probably
come from the output of :func:`findProfile`, and returns a different
set of arrays `phi2` and `dphi2` such that `phi2` is linearly spaced
(instead of `r`).
Parameters
----------
phi, dphi : array_like
npoints : int
The number of points to output.
k : int
The degree of spline fitting. ``k=1`` means linear interpolation.
fixAbs : bool
If true, make phi go all the way to `phi_absMin`.
"""
if fixAbs is True:
phi = np.append(self.phi_absMin, np.append(phi, self.phi_metaMin))
dphi = np.append(0.0, np.append(dphi, 0.0))
else:
phi = np.append(phi, self.phi_metaMin)
dphi = np.append(dphi, 0.0)
# Make sure that phi is increasing everywhere
# (this is uglier than it ought to be)
i = helper_functions.monotonicIndices(phi)
# Now do the interpolation
tck = interpolate.splrep(phi[i], dphi[i], k=k)
if fixAbs:
p = np.linspace(self.phi_absMin, self.phi_metaMin, npoints)
else:
p = np.linspace(phi[i][0], self.phi_metaMin, npoints)
dp = interpolate.splev(p, tck)
return p, dp
class WallWithConstFriction(SingleFieldInstanton):
"""
This class solves a modified version of the instanton equations of motion
with a *constant* friction term.
This may be useful if one wants to estimate the shape of a bubble wall
moving through a plasma. It will, however, be a rough estimate since a real
friction force would most likely be field-dependent.
"""
def findRScale(self):
R"""
Find the characteristic length scale for tunneling over the potential
barrier.
Since for this class the tunneling solution always goes between the two
minima, we want to take the overall shape between the two (not just
at the top of the barrier) to set the radial scale. This finds the scale
by fitting a simple quadratic to the potential.
Raises
------
PotentialError
when the barrier is non-existent.
"""
pA = self.phi_absMin
pB = 0.5 * (self.phi_bar + self.phi_metaMin)
pC = self.phi_metaMin
yA = self.V(pA)
yB = self.V(pB)
yC = self.V(pC)
# Let lmda be the quadratic coefficient that will fit these 3 points
lmda = 2*((yA-yB)/(pA-pB) - (yB-yC)/(pB-pC)) / (pC-pA)
if lmda <= 0.0:
raise PotentialError("Cannot fit the potential to a negative "
"quadratic.", "no barrier")
omega = np.sqrt(lmda) # frequency of oscillations
return np.pi / omega
def initialConditions(self, F, phi0_rel=1e-3):
R"""
Get the initial conditions for integration.
Parameters
----------
F : float
Magnitude of the friction term.
phi0_rel : float
The initial value for the field, relative to the two minima
with 0.0 being at `phi_absMin` and 1.0 being at `phi_metaMin`
(should be close to 0.0).
Returns
-------
r0, phi, dphi : float
The initial radius and the field and its derivative at that radius.
Notes
-----
Approximate the equation of motion near the minimum as
.. math::
\phi'' + F \phi' = (\phi-\phi_{absMin}) \frac{d^2V}{d\phi^2}
which has solution
.. math::
\phi(r) = (\phi_0-\phi_{absMin}) e^{kr} + \phi_{absMin}
where :math:`k = (\sqrt{F^2 + 4 V''} - F) / 2`.
"""
k = 0.5 * (np.sqrt(F*F+4*self.d2V(self.phi_absMin)) - F)
r0 = 0.0
phi0 = self.phi_absMin + phi0_rel * (self.phi_metaMin-self.phi_absMin)
dphi0 = k * (phi0 - self.phi_absMin)
return self._initialConditions_rval(r0, phi0, dphi0)
def equationOfMotion(self, y, r, F):
"""
Used to integrate the bubble wall.
"""
return np.array([y[1], self.dV(y[0])-F*y[1]])
profile_rval = namedtuple("Profile1D", "R Phi dPhi F Rerr")
def findProfile(self, Fguess=None, Ftol=1e-4, phitol=1e-4,
npoints=500, rmin=1e-4, rmax=1e4, phi0_rel=1e-3):
R"""
Calculate the bubble profile by iteratively over/undershooting.
Parameters
----------
Fguess : float, optional
The initial guess for `F`. If `None`, `Fguess` is calculated from
`self.rscale`.
Ftol : float, optional
Target accuracy in `F`, relative to `Fguess`.
phitol : float, optional
Fractional error tolerance in integration.
npoints : int
Number of points to return in the profile.
rmin : float
Relative to ``self.rscale``. Sets the smallest starting
radius, the starting stepsize, and the smallest allowed stepsize
(``0.01*rmin``).
rmax : float
Relative ``self.rscale``. Sets the maximum allowed integration
distance.
phi0_rel : float
Passed to :func:`initialConditions`.
Returns
-------
R, Phi, dPhi : array_like
Radii and field values which make up the bubble profile. Note that
`R[0]` can be much bigger than zero for thin-walled bubbles.
Rerr : float or None
The first value of `r` at which ``dr < drmin``, or `None` if
``dr >= drmin`` always.
"""
# Set r parameters
rmin *= self.rscale
dr0 = rmin
drmin = 0.01*rmin
rmax *= self.rscale
# --
# Set the phi parameters
delta_phi = self.phi_metaMin - self.phi_absMin
epsabs = abs(np.array([delta_phi, delta_phi/self.rscale])*phitol)
epsfrac = np.array([1,1]) * phitol
# --
# Set F parameters
Fmin = 0.0
Fmax = np.inf
if Fguess is not None:
F = Fguess
else:
# Find F from conservation of energy
# (total work done to slow down the field)
Delta_V = self.V(self.phi_metaMin) - self.V(self.phi_absMin)
F = Delta_V * self.rscale / delta_phi**2
Ftol *= F
Fincrease = 5.0
# The relative amount to increase F by if there is no upper bound.
# --
integration_args = [dr0, epsfrac, epsabs, drmin, rmax, F]
rf = None
while True:
r0, phi0, dphi0 = self.initialConditions(F, phi0_rel)
y0 = np.array([phi0, dphi0])
integration_args[-1] = F
rf, yf, ctype = self.integrateProfile(r0, y0, *integration_args)
# Check for overshoot, undershoot
if ctype == "converged":
break
elif ctype == "undershoot": # F is too high
Fmax = F
F = F/Fincrease if Fmin == 0.0 else .5*(Fmin+Fmax)
elif ctype == "overshoot": # F is too low
Fmin = F
F = F*Fincrease if Fmax == np.inf else .5*(Fmin+Fmax)
# Check if we've reached xtol
if (Fmax-Fmin) < Ftol:
break
# Integrate a second time, this time getting the points along the way
R = np.linspace(r0, rf, npoints)
profile = self.integrateAndSaveProfile(R, y0, dr0,
epsfrac, epsabs, drmin, F)
return profile
def findAction(self, profile):
"""
Always returns `np.inf`.
"""
return np.inf
|
|
#!/usr/bin/env python
# Copyright (c) PLUMgrid, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
# This program implements a topology likes below:
# pem: physical endpoint manager, implemented as a bpf program
#
# vm1 <--------+ +----> bridge1 <----+
# V V V
# pem router
# ^ ^ ^
# vm2 <--------+ +----> bridge2 <----+
#
# The vm1, vm2 and router are implemented as namespaces.
# The bridge is implemented with limited functionality in bpf program.
#
# vm1 and vm2 are in different subnet. For vm1 to communicate to vm2,
# the packet will have to travel from vm1 to pem, bridge1, router, bridge2, pem, and
# then come to vm2.
#
# When this test is run with verbose mode (ctest -R <test_name> -V),
# the following printout is observed on my local box:
#
# ......
# 8: ARPING 100.1.1.254 from 100.1.1.1 eth0
# 8: Unicast reply from 100.1.1.254 [76:62:B5:5C:8C:6F] 0.533ms
# 8: Sent 1 probes (1 broadcast(s))
# 8: Received 1 response(s)
# 8: ARPING 200.1.1.254 from 200.1.1.1 eth0
# 8: Unicast reply from 200.1.1.254 [F2:F0:B4:ED:7B:1B] 0.524ms
# 8: Sent 1 probes (1 broadcast(s))
# 8: Received 1 response(s)
# 8: PING 200.1.1.1 (200.1.1.1) 56(84) bytes of data.
# 8: 64 bytes from 200.1.1.1: icmp_req=1 ttl=63 time=0.074 ms
# 8: 64 bytes from 200.1.1.1: icmp_req=2 ttl=63 time=0.061 ms
# 8:
# 8: --- 200.1.1.1 ping statistics ---
# 8: 2 packets transmitted, 2 received, 0% packet loss, time 999ms
# 8: rtt min/avg/max/mdev = 0.061/0.067/0.074/0.010 ms
# 8: [ ID] Interval Transfer Bandwidth
# 8: [ 5] 0.0- 1.0 sec 4.00 GBytes 34.3 Gbits/sec
# 8: Starting netserver with host 'IN(6)ADDR_ANY' port '12865' and family AF_UNSPEC
# 8: MIGRATED TCP STREAM TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to 200.1.1.1 (200.1.1.1) port 0 AF_INET : demo
# 8: Recv Send Send
# 8: Socket Socket Message Elapsed
# 8: Size Size Size Time Throughput
# 8: bytes bytes bytes secs. 10^6bits/sec
# 8:
# 8: 87380 16384 65160 1.00 41991.68
# 8: MIGRATED TCP REQUEST/RESPONSE TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to 200.1.1.1 (200.1.1.1) port 0 AF_INET : demo : first burst 0
# 8: Local /Remote
# 8: Socket Size Request Resp. Elapsed Trans.
# 8: Send Recv Size Size Time Rate
# 8: bytes Bytes bytes bytes secs. per sec
# 8:
# 8: 16384 87380 1 1 1.00 48645.53
# 8: 16384 87380
# 8: .
# 8: ----------------------------------------------------------------------
# 8: Ran 1 test in 11.296s
# 8:
# 8: OK
from ctypes import c_uint
from netaddr import IPAddress, EUI
from bcc import BPF
from pyroute2 import IPRoute, NetNS, IPDB, NSPopen
from utils import NSPopenWithCheck
import sys
from time import sleep
from unittest import main, TestCase
from simulation import Simulation
arg1 = sys.argv.pop(1)
ipr = IPRoute()
ipdb = IPDB(nl=ipr)
sim = Simulation(ipdb)
class TestBPFSocket(TestCase):
def set_default_const(self):
self.ns1 = "ns1"
self.ns2 = "ns2"
self.ns_router = "ns_router"
self.vm1_ip = "100.1.1.1"
self.vm2_ip = "200.1.1.1"
self.vm1_rtr_ip = "100.1.1.254"
self.vm2_rtr_ip = "200.1.1.254"
self.vm1_rtr_mask = "100.1.1.0/24"
self.vm2_rtr_mask = "200.1.1.0/24"
def get_table(self, b):
self.jump = b.get_table("jump")
self.pem_dest = b.get_table("pem_dest")
self.pem_port = b.get_table("pem_port")
self.pem_ifindex = b.get_table("pem_ifindex")
self.pem_stats = b.get_table("pem_stats")
self.br1_dest = b.get_table("br1_dest")
self.br1_mac = b.get_table("br1_mac")
self.br1_rtr = b.get_table("br1_rtr")
self.br2_dest = b.get_table("br2_dest")
self.br2_mac = b.get_table("br2_mac")
self.br2_rtr = b.get_table("br2_rtr")
def connect_ports(self, prog_id_pem, prog_id_br, curr_pem_pid, curr_br_pid,
br_dest_map, br_mac_map, ifindex, vm_mac, vm_ip):
self.pem_dest[c_uint(curr_pem_pid)] = self.pem_dest.Leaf(prog_id_br, curr_br_pid)
br_dest_map[c_uint(curr_br_pid)] = br_dest_map.Leaf(prog_id_pem, curr_pem_pid)
self.pem_port[c_uint(curr_pem_pid)] = c_uint(ifindex)
self.pem_ifindex[c_uint(ifindex)] = c_uint(curr_pem_pid)
mac_addr = br_mac_map.Key(int(EUI(vm_mac)))
br_mac_map[mac_addr] = c_uint(curr_br_pid)
def config_maps(self):
# program id
prog_id_pem = 1
prog_id_br1 = 2
prog_id_br2 = 3
# initial port id and table pointers
curr_pem_pid = 0
curr_br1_pid = 0
curr_br2_pid = 0
# configure jump table
self.jump[c_uint(prog_id_pem)] = c_uint(self.pem_fn.fd)
self.jump[c_uint(prog_id_br1)] = c_uint(self.br1_fn.fd)
self.jump[c_uint(prog_id_br2)] = c_uint(self.br2_fn.fd)
# connect pem and br1
curr_pem_pid = curr_pem_pid + 1
curr_br1_pid = curr_br1_pid + 1
self.connect_ports(prog_id_pem, prog_id_br1, curr_pem_pid, curr_br1_pid,
self.br1_dest, self.br1_mac,
self.ns1_eth_out.index, self.vm1_mac, self.vm1_ip)
# connect pem and br2
curr_pem_pid = curr_pem_pid + 1
curr_br2_pid = curr_br2_pid + 1
self.connect_ports(prog_id_pem, prog_id_br2, curr_pem_pid, curr_br2_pid,
self.br2_dest, self.br2_mac,
self.ns2_eth_out.index, self.vm2_mac, self.vm2_ip)
# connect <br1, rtr> and <br2, rtr>
self.br1_rtr[c_uint(0)] = c_uint(self.nsrtr_eth0_out.index)
self.br2_rtr[c_uint(0)] = c_uint(self.nsrtr_eth1_out.index)
def test_brb(self):
try:
b = BPF(src_file=arg1, debug=0)
self.pem_fn = b.load_func("pem", BPF.SCHED_CLS)
self.br1_fn = b.load_func("br1", BPF.SCHED_CLS)
self.br2_fn = b.load_func("br2", BPF.SCHED_CLS)
self.get_table(b)
# set up the topology
self.set_default_const()
(ns1_ipdb, self.ns1_eth_out, _) = sim._create_ns(self.ns1, ipaddr=self.vm1_ip+'/24',
fn=self.pem_fn, action='drop',
disable_ipv6=True)
(ns2_ipdb, self.ns2_eth_out, _) = sim._create_ns(self.ns2, ipaddr=self.vm2_ip+'/24',
fn=self.pem_fn, action='drop',
disable_ipv6=True)
ns1_ipdb.routes.add({'dst': self.vm2_rtr_mask, 'gateway': self.vm1_rtr_ip}).commit()
ns2_ipdb.routes.add({'dst': self.vm1_rtr_mask, 'gateway': self.vm2_rtr_ip}).commit()
self.vm1_mac = ns1_ipdb.interfaces['eth0'].address
self.vm2_mac = ns2_ipdb.interfaces['eth0'].address
(_, self.nsrtr_eth0_out, _) = sim._create_ns(self.ns_router, ipaddr=self.vm1_rtr_ip+'/24',
fn=self.br1_fn, action='drop',
disable_ipv6=True)
(rt_ipdb, self.nsrtr_eth1_out, _) = sim._ns_add_ifc(self.ns_router, "eth1", "ns_router2",
ipaddr=self.vm2_rtr_ip+'/24',
fn=self.br2_fn, action='drop',
disable_ipv6=True)
nsp = NSPopen(rt_ipdb.nl.netns, ["sysctl", "-w", "net.ipv4.ip_forward=1"])
nsp.wait(); nsp.release()
# configure maps
self.config_maps()
# our bridge is not smart enough, so send arping for router learning to prevent router
# from sending out arp request
nsp = NSPopen(ns1_ipdb.nl.netns,
["arping", "-w", "1", "-c", "1", "-I", "eth0", self.vm1_rtr_ip])
nsp.wait(); nsp.release()
nsp = NSPopen(ns2_ipdb.nl.netns,
["arping", "-w", "1", "-c", "1", "-I", "eth0", self.vm2_rtr_ip])
nsp.wait(); nsp.release()
# ping
nsp = NSPopen(ns1_ipdb.nl.netns, ["ping", self.vm2_ip, "-c", "2"])
nsp.wait(); nsp.release()
# pem_stats only counts pem->bridge traffic, each VM has 4: arping/arp request/2 icmp request
# total 8 packets should be counted
self.assertEqual(self.pem_stats[c_uint(0)].value, 8)
nsp_server = NSPopenWithCheck(ns2_ipdb.nl.netns, ["iperf", "-s", "-xSC"])
sleep(1)
nsp = NSPopen(ns1_ipdb.nl.netns, ["iperf", "-c", self.vm2_ip, "-t", "1", "-xSC"])
nsp.wait(); nsp.release()
nsp_server.kill(); nsp_server.wait(); nsp_server.release()
nsp_server = NSPopenWithCheck(ns2_ipdb.nl.netns, ["netserver", "-D"])
sleep(1)
nsp = NSPopenWithCheck(ns1_ipdb.nl.netns, ["netperf", "-l", "1", "-H", self.vm2_ip, "--", "-m", "65160"])
nsp.wait(); nsp.release()
nsp = NSPopen(ns1_ipdb.nl.netns, ["netperf", "-l", "1", "-H", self.vm2_ip, "-t", "TCP_RR"])
nsp.wait(); nsp.release()
nsp_server.kill(); nsp_server.wait(); nsp_server.release()
finally:
sim.release()
ipdb.release()
if __name__ == "__main__":
main()
|
|
# Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import unittest
import mock
class Test_UDFResource(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.bigquery.query import UDFResource
return UDFResource
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_ctor(self):
udf = self._make_one('resourceUri', 'gs://some_bucket/some_file')
self.assertEqual(udf.udf_type, 'resourceUri')
self.assertEqual(udf.value, 'gs://some_bucket/some_file')
def test___eq__(self):
udf = self._make_one('resourceUri', 'gs://some_bucket/some_file')
self.assertEqual(udf, udf)
self.assertNotEqual(udf, object())
wrong_val = self._make_one(
'resourceUri', 'gs://some_bucket/other_file')
self.assertNotEqual(udf, wrong_val)
wrong_type = self._make_one('inlineCode', udf.value)
self.assertNotEqual(udf, wrong_type)
class Test__AbstractQueryParameter(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.bigquery.query import _AbstractQueryParameter
return _AbstractQueryParameter
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_from_api_virtual(self):
klass = self._get_target_class()
with self.assertRaises(NotImplementedError):
klass.from_api_repr({})
def test_to_api_virtual(self):
param = self._make_one()
with self.assertRaises(NotImplementedError):
param.to_api_repr()
class Test_ScalarQueryParameter(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.bigquery.query import ScalarQueryParameter
return ScalarQueryParameter
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_ctor(self):
param = self._make_one(name='foo', type_='INT64', value=123)
self.assertEqual(param.name, 'foo')
self.assertEqual(param.type_, 'INT64')
self.assertEqual(param.value, 123)
def test___eq__(self):
param = self._make_one(name='foo', type_='INT64', value=123)
self.assertEqual(param, param)
self.assertNotEqual(param, object())
alias = self._make_one(name='bar', type_='INT64', value=123)
self.assertNotEqual(param, alias)
wrong_type = self._make_one(name='foo', type_='FLOAT64', value=123.0)
self.assertNotEqual(param, wrong_type)
wrong_val = self._make_one(name='foo', type_='INT64', value=234)
self.assertNotEqual(param, wrong_val)
def test_positional(self):
klass = self._get_target_class()
param = klass.positional(type_='INT64', value=123)
self.assertEqual(param.name, None)
self.assertEqual(param.type_, 'INT64')
self.assertEqual(param.value, 123)
def test_from_api_repr_w_name(self):
RESOURCE = {
'name': 'foo',
'parameterType': {
'type': 'INT64',
},
'parameterValue': {
'value': 123,
},
}
klass = self._get_target_class()
param = klass.from_api_repr(RESOURCE)
self.assertEqual(param.name, 'foo')
self.assertEqual(param.type_, 'INT64')
self.assertEqual(param.value, 123)
def test_from_api_repr_wo_name(self):
RESOURCE = {
'parameterType': {
'type': 'INT64',
},
'parameterValue': {
'value': '123',
},
}
klass = self._get_target_class()
param = klass.from_api_repr(RESOURCE)
self.assertEqual(param.name, None)
self.assertEqual(param.type_, 'INT64')
self.assertEqual(param.value, 123)
def test_to_api_repr_w_name(self):
EXPECTED = {
'name': 'foo',
'parameterType': {
'type': 'INT64',
},
'parameterValue': {
'value': '123',
},
}
param = self._make_one(name='foo', type_='INT64', value=123)
self.assertEqual(param.to_api_repr(), EXPECTED)
def test_to_api_repr_wo_name(self):
EXPECTED = {
'parameterType': {
'type': 'INT64',
},
'parameterValue': {
'value': '123',
},
}
klass = self._get_target_class()
param = klass.positional(type_='INT64', value=123)
self.assertEqual(param.to_api_repr(), EXPECTED)
def test_to_api_repr_w_float(self):
EXPECTED = {
'parameterType': {
'type': 'FLOAT64',
},
'parameterValue': {
'value': 12.345,
},
}
klass = self._get_target_class()
param = klass.positional(type_='FLOAT64', value=12.345)
self.assertEqual(param.to_api_repr(), EXPECTED)
def test_to_api_repr_w_numeric(self):
EXPECTED = {
'parameterType': {
'type': 'NUMERIC',
},
'parameterValue': {
'value': '123456789.123456789',
},
}
klass = self._get_target_class()
param = klass.positional(type_='NUMERIC',
value='123456789.123456789')
self.assertEqual(param.to_api_repr(), EXPECTED)
def test_to_api_repr_w_bool(self):
EXPECTED = {
'parameterType': {
'type': 'BOOL',
},
'parameterValue': {
'value': 'false',
},
}
klass = self._get_target_class()
param = klass.positional(type_='BOOL', value=False)
self.assertEqual(param.to_api_repr(), EXPECTED)
def test_to_api_repr_w_timestamp_datetime(self):
from google.cloud._helpers import UTC
STAMP = '2016-12-20 15:58:27.339328+00:00'
when = datetime.datetime(2016, 12, 20, 15, 58, 27, 339328, tzinfo=UTC)
EXPECTED = {
'parameterType': {
'type': 'TIMESTAMP',
},
'parameterValue': {
'value': STAMP,
},
}
klass = self._get_target_class()
param = klass.positional(type_='TIMESTAMP', value=when)
self.assertEqual(param.to_api_repr(), EXPECTED)
def test_to_api_repr_w_timestamp_micros(self):
from google.cloud._helpers import _microseconds_from_datetime
now = datetime.datetime.utcnow()
seconds = _microseconds_from_datetime(now) / 1.0e6
EXPECTED = {
'parameterType': {
'type': 'TIMESTAMP',
},
'parameterValue': {
'value': seconds,
},
}
klass = self._get_target_class()
param = klass.positional(type_='TIMESTAMP', value=seconds)
self.assertEqual(param.to_api_repr(), EXPECTED)
def test_to_api_repr_w_datetime_datetime(self):
from google.cloud._helpers import _datetime_to_rfc3339
now = datetime.datetime.utcnow()
EXPECTED = {
'parameterType': {
'type': 'DATETIME',
},
'parameterValue': {
'value': _datetime_to_rfc3339(now)[:-1], # strip trailing 'Z'
},
}
klass = self._get_target_class()
param = klass.positional(type_='DATETIME', value=now)
self.assertEqual(param.to_api_repr(), EXPECTED)
def test_to_api_repr_w_datetime_string(self):
from google.cloud._helpers import _datetime_to_rfc3339
now = datetime.datetime.utcnow()
now_str = _datetime_to_rfc3339(now)
EXPECTED = {
'parameterType': {
'type': 'DATETIME',
},
'parameterValue': {
'value': now_str,
},
}
klass = self._get_target_class()
param = klass.positional(type_='DATETIME', value=now_str)
self.assertEqual(param.to_api_repr(), EXPECTED)
def test_to_api_repr_w_date_date(self):
today = datetime.date.today()
EXPECTED = {
'parameterType': {
'type': 'DATE',
},
'parameterValue': {
'value': today.isoformat(),
},
}
klass = self._get_target_class()
param = klass.positional(type_='DATE', value=today)
self.assertEqual(param.to_api_repr(), EXPECTED)
def test_to_api_repr_w_date_string(self):
today = datetime.date.today()
today_str = today.isoformat(),
EXPECTED = {
'parameterType': {
'type': 'DATE',
},
'parameterValue': {
'value': today_str,
},
}
klass = self._get_target_class()
param = klass.positional(type_='DATE', value=today_str)
self.assertEqual(param.to_api_repr(), EXPECTED)
def test_to_api_repr_w_unknown_type(self):
EXPECTED = {
'parameterType': {
'type': 'UNKNOWN',
},
'parameterValue': {
'value': 'unknown',
},
}
klass = self._get_target_class()
param = klass.positional(type_='UNKNOWN', value='unknown')
self.assertEqual(param.to_api_repr(), EXPECTED)
def test___eq___wrong_type(self):
field = self._make_one('test', 'STRING', 'value')
other = object()
self.assertNotEqual(field, other)
self.assertEqual(field, mock.ANY)
def test___eq___name_mismatch(self):
field = self._make_one('test', 'STRING', 'value')
other = self._make_one('other', 'STRING', 'value')
self.assertNotEqual(field, other)
def test___eq___field_type_mismatch(self):
field = self._make_one('test', 'STRING', None)
other = self._make_one('test', 'INT64', None)
self.assertNotEqual(field, other)
def test___eq___value_mismatch(self):
field = self._make_one('test', 'STRING', 'hello')
other = self._make_one('test', 'STRING', 'world')
self.assertNotEqual(field, other)
def test___eq___hit(self):
field = self._make_one('test', 'STRING', 'gotcha')
other = self._make_one('test', 'STRING', 'gotcha')
self.assertEqual(field, other)
def test___ne___wrong_type(self):
field = self._make_one('toast', 'INT64', 13)
other = object()
self.assertNotEqual(field, other)
self.assertEqual(field, mock.ANY)
def test___ne___same_value(self):
field1 = self._make_one('test', 'INT64', 12)
field2 = self._make_one('test', 'INT64', 12)
# unittest ``assertEqual`` uses ``==`` not ``!=``.
comparison_val = (field1 != field2)
self.assertFalse(comparison_val)
def test___ne___different_values(self):
field1 = self._make_one('test', 'INT64', 11)
field2 = self._make_one('test', 'INT64', 12)
self.assertNotEqual(field1, field2)
def test___repr__(self):
field1 = self._make_one('field1', 'STRING', 'value')
expected = "ScalarQueryParameter('field1', 'STRING', 'value')"
self.assertEqual(repr(field1), expected)
def _make_subparam(name, type_, value):
from google.cloud.bigquery.query import ScalarQueryParameter
return ScalarQueryParameter(name, type_, value)
class Test_ArrayQueryParameter(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.bigquery.query import ArrayQueryParameter
return ArrayQueryParameter
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_ctor(self):
param = self._make_one(name='foo', array_type='INT64', values=[1, 2])
self.assertEqual(param.name, 'foo')
self.assertEqual(param.array_type, 'INT64')
self.assertEqual(param.values, [1, 2])
def test___eq__(self):
param = self._make_one(name='foo', array_type='INT64', values=[123])
self.assertEqual(param, param)
self.assertNotEqual(param, object())
alias = self._make_one(name='bar', array_type='INT64', values=[123])
self.assertNotEqual(param, alias)
wrong_type = self._make_one(
name='foo', array_type='FLOAT64', values=[123.0])
self.assertNotEqual(param, wrong_type)
wrong_val = self._make_one(
name='foo', array_type='INT64', values=[234])
self.assertNotEqual(param, wrong_val)
def test_positional(self):
klass = self._get_target_class()
param = klass.positional(array_type='INT64', values=[1, 2])
self.assertEqual(param.name, None)
self.assertEqual(param.array_type, 'INT64')
self.assertEqual(param.values, [1, 2])
def test_from_api_repr_w_name(self):
RESOURCE = {
'name': 'foo',
'parameterType': {
'type': 'ARRAY',
'arrayType': {
'type': 'INT64',
},
},
'parameterValue': {
'arrayValues': [
{
'value': '1',
},
{
'value': '2'
},
],
},
}
klass = self._get_target_class()
param = klass.from_api_repr(RESOURCE)
self.assertEqual(param.name, 'foo')
self.assertEqual(param.array_type, 'INT64')
self.assertEqual(param.values, [1, 2])
def test_from_api_repr_wo_name(self):
RESOURCE = {
'parameterType': {
'type': 'ARRAY',
'arrayType': {
'type': 'INT64',
},
},
'parameterValue': {
'arrayValues': [
{
'value': '1',
},
{
'value': '2'
},
],
},
}
klass = self._get_target_class()
param = klass.from_api_repr(RESOURCE)
self.assertEqual(param.name, None)
self.assertEqual(param.array_type, 'INT64')
self.assertEqual(param.values, [1, 2])
def test_from_api_repr_w_struct_type(self):
from google.cloud.bigquery.query import StructQueryParameter
RESOURCE = {
'parameterType': {
'type': 'ARRAY',
'arrayType': {
'type': 'STRUCT',
'structTypes': [
{
'name': 'name',
'type': {'type': 'STRING'},
},
{
'name': 'age',
'type': {'type': 'INT64'},
},
],
},
},
'parameterValue': {
'arrayValues': [
{
'structValues': {
'name': {'value': 'Phred Phlyntstone'},
'age': {'value': '32'},
},
},
{
'structValues': {
'name': {
'value': 'Bharney Rhubbyl',
},
'age': {'value': '31'},
},
},
],
},
}
klass = self._get_target_class()
param = klass.from_api_repr(RESOURCE)
phred = StructQueryParameter.positional(
_make_subparam('name', 'STRING', 'Phred Phlyntstone'),
_make_subparam('age', 'INT64', 32))
bharney = StructQueryParameter.positional(
_make_subparam('name', 'STRING', 'Bharney Rhubbyl'),
_make_subparam('age', 'INT64', 31))
self.assertEqual(param.array_type, 'STRUCT')
self.assertEqual(param.values, [phred, bharney])
def test_to_api_repr_w_name(self):
EXPECTED = {
'name': 'foo',
'parameterType': {
'type': 'ARRAY',
'arrayType': {
'type': 'INT64',
},
},
'parameterValue': {
'arrayValues': [
{
'value': '1',
},
{
'value': '2'
},
],
},
}
param = self._make_one(name='foo', array_type='INT64', values=[1, 2])
self.assertEqual(param.to_api_repr(), EXPECTED)
def test_to_api_repr_wo_name(self):
EXPECTED = {
'parameterType': {
'type': 'ARRAY',
'arrayType': {
'type': 'INT64',
},
},
'parameterValue': {
'arrayValues': [
{
'value': '1',
},
{
'value': '2'
},
],
},
}
klass = self._get_target_class()
param = klass.positional(array_type='INT64', values=[1, 2])
self.assertEqual(param.to_api_repr(), EXPECTED)
def test_to_api_repr_w_unknown_type(self):
EXPECTED = {
'parameterType': {
'type': 'ARRAY',
'arrayType': {
'type': 'UNKNOWN',
},
},
'parameterValue': {
'arrayValues': [
{
'value': 'unknown',
}
],
},
}
klass = self._get_target_class()
param = klass.positional(array_type='UNKNOWN', values=['unknown'])
self.assertEqual(param.to_api_repr(), EXPECTED)
def test_to_api_repr_w_record_type(self):
from google.cloud.bigquery.query import StructQueryParameter
EXPECTED = {
'parameterType': {
'type': 'ARRAY',
'arrayType': {
'type': 'STRUCT',
'structTypes': [
{'name': 'foo', 'type': {'type': 'STRING'}},
{'name': 'bar', 'type': {'type': 'INT64'}},
],
},
},
'parameterValue': {
'arrayValues': [{
'structValues': {
'foo': {'value': 'Foo'},
'bar': {'value': '123'},
}
}]
},
}
one = _make_subparam('foo', 'STRING', 'Foo')
another = _make_subparam('bar', 'INT64', 123)
struct = StructQueryParameter.positional(one, another)
klass = self._get_target_class()
param = klass.positional(array_type='RECORD', values=[struct])
self.assertEqual(param.to_api_repr(), EXPECTED)
def test___eq___wrong_type(self):
field = self._make_one('test', 'STRING', ['value'])
other = object()
self.assertNotEqual(field, other)
self.assertEqual(field, mock.ANY)
def test___eq___name_mismatch(self):
field = self._make_one('field', 'STRING', ['value'])
other = self._make_one('other', 'STRING', ['value'])
self.assertNotEqual(field, other)
def test___eq___field_type_mismatch(self):
field = self._make_one('test', 'STRING', [])
other = self._make_one('test', 'INT64', [])
self.assertNotEqual(field, other)
def test___eq___value_mismatch(self):
field = self._make_one('test', 'STRING', ['hello'])
other = self._make_one('test', 'STRING', ['hello', 'world'])
self.assertNotEqual(field, other)
def test___eq___hit(self):
field = self._make_one('test', 'STRING', ['gotcha'])
other = self._make_one('test', 'STRING', ['gotcha'])
self.assertEqual(field, other)
def test___ne___wrong_type(self):
field = self._make_one('toast', 'INT64', [13])
other = object()
self.assertNotEqual(field, other)
self.assertEqual(field, mock.ANY)
def test___ne___same_value(self):
field1 = self._make_one('test', 'INT64', [12])
field2 = self._make_one('test', 'INT64', [12])
# unittest ``assertEqual`` uses ``==`` not ``!=``.
comparison_val = (field1 != field2)
self.assertFalse(comparison_val)
def test___ne___different_values(self):
field1 = self._make_one('test', 'INT64', [11])
field2 = self._make_one('test', 'INT64', [12])
self.assertNotEqual(field1, field2)
def test___repr__(self):
field1 = self._make_one('field1', 'STRING', ['value'])
expected = "ArrayQueryParameter('field1', 'STRING', ['value'])"
self.assertEqual(repr(field1), expected)
class Test_StructQueryParameter(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.bigquery.query import StructQueryParameter
return StructQueryParameter
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_ctor(self):
sub_1 = _make_subparam('bar', 'INT64', 123)
sub_2 = _make_subparam('baz', 'STRING', 'abc')
param = self._make_one('foo', sub_1, sub_2)
self.assertEqual(param.name, 'foo')
self.assertEqual(param.struct_types, {'bar': 'INT64', 'baz': 'STRING'})
self.assertEqual(param.struct_values, {'bar': 123, 'baz': 'abc'})
def test___eq__(self):
sub_1 = _make_subparam('bar', 'INT64', 123)
sub_2 = _make_subparam('baz', 'STRING', 'abc')
sub_3 = _make_subparam('baz', 'STRING', 'def')
sub_1_float = _make_subparam('bar', 'FLOAT64', 123.0)
param = self._make_one('foo', sub_1, sub_2)
self.assertEqual(param, param)
self.assertNotEqual(param, object())
alias = self._make_one('bar', sub_1, sub_2)
self.assertNotEqual(param, alias)
wrong_type = self._make_one('foo', sub_1_float, sub_2)
self.assertNotEqual(param, wrong_type)
wrong_val = self._make_one('foo', sub_2, sub_3)
self.assertNotEqual(param, wrong_val)
def test_positional(self):
sub_1 = _make_subparam('bar', 'INT64', 123)
sub_2 = _make_subparam('baz', 'STRING', 'abc')
klass = self._get_target_class()
param = klass.positional(sub_1, sub_2)
self.assertEqual(param.name, None)
self.assertEqual(param.struct_types, {'bar': 'INT64', 'baz': 'STRING'})
self.assertEqual(param.struct_values, {'bar': 123, 'baz': 'abc'})
def test_from_api_repr_w_name(self):
RESOURCE = {
'name': 'foo',
'parameterType': {
'type': 'STRUCT',
'structTypes': [
{'name': 'bar', 'type': {'type': 'INT64'}},
{'name': 'baz', 'type': {'type': 'STRING'}},
],
},
'parameterValue': {
'structValues': {
'bar': {'value': 123},
'baz': {'value': 'abc'},
},
},
}
klass = self._get_target_class()
param = klass.from_api_repr(RESOURCE)
self.assertEqual(param.name, 'foo')
self.assertEqual(param.struct_types, {'bar': 'INT64', 'baz': 'STRING'})
self.assertEqual(param.struct_values, {'bar': 123, 'baz': 'abc'})
def test_from_api_repr_wo_name(self):
RESOURCE = {
'parameterType': {
'type': 'STRUCT',
'structTypes': [
{'name': 'bar', 'type': {'type': 'INT64'}},
{'name': 'baz', 'type': {'type': 'STRING'}},
],
},
'parameterValue': {
'structValues': {
'bar': {'value': 123},
'baz': {'value': 'abc'},
},
},
}
klass = self._get_target_class()
param = klass.from_api_repr(RESOURCE)
self.assertEqual(param.name, None)
self.assertEqual(param.struct_types, {'bar': 'INT64', 'baz': 'STRING'})
self.assertEqual(param.struct_values, {'bar': 123, 'baz': 'abc'})
def test_from_api_repr_w_nested_array(self):
from google.cloud.bigquery.query import ArrayQueryParameter
RESOURCE = {
'name': 'foo',
'parameterType': {
'type': 'STRUCT',
'structTypes': [
{'name': 'bar', 'type': {'type': 'STRING'}},
{'name': 'baz', 'type': {
'type': 'ARRAY',
'arrayType': {'type': 'INT64'},
}},
],
},
'parameterValue': {
'structValues': {
'bar': {'value': 'abc'},
'baz': {'arrayValues': [
{'value': '123'},
{'value': '456'},
]},
},
},
}
klass = self._get_target_class()
param = klass.from_api_repr(RESOURCE)
self.assertEqual(
param,
self._make_one(
'foo',
_make_subparam('bar', 'STRING', 'abc'),
ArrayQueryParameter('baz', 'INT64', [123, 456])))
def test_from_api_repr_w_nested_struct(self):
RESOURCE = {
'name': 'foo',
'parameterType': {
'type': 'STRUCT',
'structTypes': [
{'name': 'bar', 'type': {'type': 'STRING'}},
{'name': 'baz', 'type': {
'type': 'STRUCT',
'structTypes': [
{'name': 'qux', 'type': {'type': 'INT64'}},
{'name': 'spam', 'type': {'type': 'BOOL'}},
],
}},
],
},
'parameterValue': {
'structValues': {
'bar': {'value': 'abc'},
'baz': {'structValues': {
'qux': {'value': '123'},
'spam': {'value': 'true'},
}},
},
},
}
klass = self._get_target_class()
param = klass.from_api_repr(RESOURCE)
expected = self._make_one(
'foo',
_make_subparam('bar', 'STRING', 'abc'),
self._make_one(
'baz',
_make_subparam('qux', 'INT64', 123),
_make_subparam('spam', 'BOOL', True)))
self.assertEqual(param.name, 'foo')
self.assertEqual(param.struct_types, expected.struct_types)
self.assertEqual(param.struct_values, expected.struct_values)
def test_to_api_repr_w_name(self):
EXPECTED = {
'name': 'foo',
'parameterType': {
'type': 'STRUCT',
'structTypes': [
{'name': 'bar', 'type': {'type': 'INT64'}},
{'name': 'baz', 'type': {'type': 'STRING'}},
],
},
'parameterValue': {
'structValues': {
'bar': {'value': '123'},
'baz': {'value': 'abc'},
},
},
}
sub_1 = _make_subparam('bar', 'INT64', 123)
sub_2 = _make_subparam('baz', 'STRING', 'abc')
param = self._make_one('foo', sub_1, sub_2)
self.assertEqual(param.to_api_repr(), EXPECTED)
def test_to_api_repr_wo_name(self):
EXPECTED = {
'parameterType': {
'type': 'STRUCT',
'structTypes': [
{'name': 'bar', 'type': {'type': 'INT64'}},
{'name': 'baz', 'type': {'type': 'STRING'}},
],
},
'parameterValue': {
'structValues': {
'bar': {'value': '123'},
'baz': {'value': 'abc'},
},
},
}
sub_1 = _make_subparam('bar', 'INT64', 123)
sub_2 = _make_subparam('baz', 'STRING', 'abc')
klass = self._get_target_class()
param = klass.positional(sub_1, sub_2)
self.assertEqual(param.to_api_repr(), EXPECTED)
def test_to_api_repr_w_nested_array(self):
from google.cloud.bigquery.query import ArrayQueryParameter
EXPECTED = {
'name': 'foo',
'parameterType': {
'type': 'STRUCT',
'structTypes': [
{'name': 'bar', 'type': {'type': 'STRING'}},
{'name': 'baz', 'type': {
'type': 'ARRAY',
'arrayType': {'type': 'INT64'},
}},
],
},
'parameterValue': {
'structValues': {
'bar': {'value': 'abc'},
'baz': {'arrayValues': [
{'value': '123'},
{'value': '456'},
]},
},
},
}
scalar = _make_subparam('bar', 'STRING', 'abc')
array = ArrayQueryParameter('baz', 'INT64', [123, 456])
param = self._make_one('foo', scalar, array)
self.assertEqual(param.to_api_repr(), EXPECTED)
def test_to_api_repr_w_nested_struct(self):
EXPECTED = {
'name': 'foo',
'parameterType': {
'type': 'STRUCT',
'structTypes': [
{'name': 'bar', 'type': {'type': 'STRING'}},
{'name': 'baz', 'type': {
'type': 'STRUCT',
'structTypes': [
{'name': 'qux', 'type': {'type': 'INT64'}},
{'name': 'spam', 'type': {'type': 'BOOL'}},
],
}},
],
},
'parameterValue': {
'structValues': {
'bar': {'value': 'abc'},
'baz': {'structValues': {
'qux': {'value': '123'},
'spam': {'value': 'true'},
}},
},
},
}
scalar_1 = _make_subparam('bar', 'STRING', 'abc')
scalar_2 = _make_subparam('qux', 'INT64', 123)
scalar_3 = _make_subparam('spam', 'BOOL', True)
sub = self._make_one('baz', scalar_2, scalar_3)
param = self._make_one('foo', scalar_1, sub)
self.assertEqual(param.to_api_repr(), EXPECTED)
def test___eq___wrong_type(self):
field = self._make_one(
'test', _make_subparam('bar', 'STRING', 'abc'))
other = object()
self.assertNotEqual(field, other)
self.assertEqual(field, mock.ANY)
def test___eq___name_mismatch(self):
field = self._make_one(
'test', _make_subparam('bar', 'STRING', 'abc'))
other = self._make_one(
'other ', _make_subparam('bar', 'STRING', 'abc'))
self.assertNotEqual(field, other)
def test___eq___field_type_mismatch(self):
field = self._make_one(
'test', _make_subparam('bar', 'STRING', None))
other = self._make_one(
'test', _make_subparam('bar', 'INT64', None))
self.assertNotEqual(field, other)
def test___eq___value_mismatch(self):
field = self._make_one(
'test', _make_subparam('bar', 'STRING', 'hello'))
other = self._make_one(
'test', _make_subparam('bar', 'STRING', 'world'))
self.assertNotEqual(field, other)
def test___eq___hit(self):
field = self._make_one(
'test', _make_subparam('bar', 'STRING', 'gotcha'))
other = self._make_one(
'test', _make_subparam('bar', 'STRING', 'gotcha'))
self.assertEqual(field, other)
def test___ne___wrong_type(self):
field = self._make_one(
'test', _make_subparam('bar', 'STRING', 'hello'))
other = object()
self.assertNotEqual(field, other)
self.assertEqual(field, mock.ANY)
def test___ne___same_value(self):
field1 = self._make_one(
'test', _make_subparam('bar', 'STRING', 'hello'))
field2 = self._make_one(
'test', _make_subparam('bar', 'STRING', 'hello'))
# unittest ``assertEqual`` uses ``==`` not ``!=``.
comparison_val = (field1 != field2)
self.assertFalse(comparison_val)
def test___ne___different_values(self):
field1 = self._make_one(
'test', _make_subparam('bar', 'STRING', 'hello'))
field2 = self._make_one(
'test', _make_subparam('bar', 'STRING', 'world'))
self.assertNotEqual(field1, field2)
def test___repr__(self):
field1 = self._make_one(
'test', _make_subparam('field1', 'STRING', 'hello'))
got = repr(field1)
self.assertIn('StructQueryParameter', got)
self.assertIn("'field1', 'STRING'", got)
self.assertIn("'field1': 'hello'", got)
class Test_QueryResults(unittest.TestCase):
PROJECT = 'project'
JOB_ID = 'test-synchronous-query'
TOKEN = 'TOKEN'
@staticmethod
def _get_target_class():
from google.cloud.bigquery.query import _QueryResults
return _QueryResults
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def _make_resource(self):
return {
'jobReference': {
'projectId': self.PROJECT,
'jobId': self.JOB_ID,
},
}
def _verifySchema(self, query, resource):
from google.cloud.bigquery.schema import SchemaField
if 'schema' in resource:
fields = resource['schema']['fields']
self.assertEqual(len(query.schema), len(fields))
for found, expected in zip(query.schema, fields):
self.assertIsInstance(found, SchemaField)
self.assertEqual(found.name, expected['name'])
self.assertEqual(found.field_type, expected['type'])
self.assertEqual(found.mode, expected['mode'])
self.assertEqual(found.description,
expected.get('description'))
self.assertEqual(found.fields, expected.get('fields', ()))
else:
self.assertEqual(query.schema, ())
def test_ctor_defaults(self):
query = self._make_one(self._make_resource())
self.assertIsNone(query.cache_hit)
self.assertIsNone(query.complete)
self.assertIsNone(query.errors)
self.assertIsNone(query.page_token)
self.assertEqual(query.project, self.PROJECT)
self.assertEqual(query.rows, [])
self.assertEqual(query.schema, ())
self.assertIsNone(query.total_rows)
self.assertIsNone(query.total_bytes_processed)
def test_cache_hit_missing(self):
query = self._make_one(self._make_resource())
self.assertIsNone(query.cache_hit)
def test_cache_hit_present(self):
resource = self._make_resource()
resource['cacheHit'] = True
query = self._make_one(resource)
self.assertTrue(query.cache_hit)
def test_complete_missing(self):
query = self._make_one(self._make_resource())
self.assertIsNone(query.complete)
def test_complete_present(self):
resource = self._make_resource()
resource['jobComplete'] = True
query = self._make_one(resource)
self.assertTrue(query.complete)
def test_errors_missing(self):
query = self._make_one(self._make_resource())
self.assertIsNone(query.errors)
def test_errors_present(self):
ERRORS = [
{'reason': 'testing'},
]
resource = self._make_resource()
resource['errors'] = ERRORS
query = self._make_one(resource)
self.assertEqual(query.errors, ERRORS)
def test_job_id_missing(self):
with self.assertRaises(ValueError):
self._make_one({})
def test_job_id_broken_job_reference(self):
resource = {'jobReference': {'bogus': 'BOGUS'}}
with self.assertRaises(ValueError):
self._make_one(resource)
def test_job_id_present(self):
resource = self._make_resource()
resource['jobReference']['jobId'] = 'custom-job'
query = self._make_one(resource)
self.assertEqual(query.job_id, 'custom-job')
def test_page_token_missing(self):
query = self._make_one(self._make_resource())
self.assertIsNone(query.page_token)
def test_page_token_present(self):
resource = self._make_resource()
resource['pageToken'] = 'TOKEN'
query = self._make_one(resource)
self.assertEqual(query.page_token, 'TOKEN')
def test_total_rows_present_integer(self):
resource = self._make_resource()
resource['totalRows'] = 42
query = self._make_one(resource)
self.assertEqual(query.total_rows, 42)
def test_total_rows_present_string(self):
resource = self._make_resource()
resource['totalRows'] = '42'
query = self._make_one(resource)
self.assertEqual(query.total_rows, 42)
def test_total_bytes_processed_missing(self):
query = self._make_one(self._make_resource())
self.assertIsNone(query.total_bytes_processed)
def test_total_bytes_processed_present_integer(self):
resource = self._make_resource()
resource['totalBytesProcessed'] = 123456
query = self._make_one(resource)
self.assertEqual(query.total_bytes_processed, 123456)
def test_total_bytes_processed_present_string(self):
resource = self._make_resource()
resource['totalBytesProcessed'] = '123456'
query = self._make_one(resource)
self.assertEqual(query.total_bytes_processed, 123456)
def test_num_dml_affected_rows_missing(self):
query = self._make_one(self._make_resource())
self.assertIsNone(query.num_dml_affected_rows)
def test_num_dml_affected_rows_present_integer(self):
resource = self._make_resource()
resource['numDmlAffectedRows'] = 123456
query = self._make_one(resource)
self.assertEqual(query.num_dml_affected_rows, 123456)
def test_num_dml_affected_rows_present_string(self):
resource = self._make_resource()
resource['numDmlAffectedRows'] = '123456'
query = self._make_one(resource)
self.assertEqual(query.num_dml_affected_rows, 123456)
def test_schema(self):
query = self._make_one(self._make_resource())
self._verifySchema(query, self._make_resource())
resource = self._make_resource()
resource['schema'] = {
'fields': [
{'name': 'full_name', 'type': 'STRING', 'mode': 'REQURED'},
{'name': 'age', 'type': 'INTEGER', 'mode': 'REQURED'},
],
}
query._set_properties(resource)
self._verifySchema(query, resource)
class Test__query_param_from_api_repr(unittest.TestCase):
@staticmethod
def _call_fut(resource):
from google.cloud.bigquery.query import _query_param_from_api_repr
return _query_param_from_api_repr(resource)
def test_w_scalar(self):
from google.cloud.bigquery.query import ScalarQueryParameter
RESOURCE = {
'name': 'foo',
'parameterType': {'type': 'INT64'},
'parameterValue': {'value': '123'},
}
parameter = self._call_fut(RESOURCE)
self.assertIsInstance(parameter, ScalarQueryParameter)
self.assertEqual(parameter.name, 'foo')
self.assertEqual(parameter.type_, 'INT64')
self.assertEqual(parameter.value, 123)
def test_w_scalar_timestamp(self):
from google.cloud._helpers import UTC
from google.cloud.bigquery.query import ScalarQueryParameter
RESOURCE = {
'name': 'zoned',
'parameterType': {'type': 'TIMESTAMP'},
'parameterValue': {'value': '2012-03-04 05:06:07+00:00'},
}
parameter = self._call_fut(RESOURCE)
self.assertIsInstance(parameter, ScalarQueryParameter)
self.assertEqual(parameter.name, 'zoned')
self.assertEqual(parameter.type_, 'TIMESTAMP')
self.assertEqual(
parameter.value,
datetime.datetime(2012, 3, 4, 5, 6, 7, tzinfo=UTC))
def test_w_scalar_timestamp_micros(self):
from google.cloud._helpers import UTC
from google.cloud.bigquery.query import ScalarQueryParameter
RESOURCE = {
'name': 'zoned',
'parameterType': {'type': 'TIMESTAMP'},
'parameterValue': {'value': '2012-03-04 05:06:07.250000+00:00'},
}
parameter = self._call_fut(RESOURCE)
self.assertIsInstance(parameter, ScalarQueryParameter)
self.assertEqual(parameter.name, 'zoned')
self.assertEqual(parameter.type_, 'TIMESTAMP')
self.assertEqual(
parameter.value,
datetime.datetime(2012, 3, 4, 5, 6, 7, 250000, tzinfo=UTC))
def test_w_array(self):
from google.cloud.bigquery.query import ArrayQueryParameter
RESOURCE = {
'name': 'foo',
'parameterType': {
'type': 'ARRAY',
'arrayType': {'type': 'INT64'},
},
'parameterValue': {
'arrayValues': [
{'value': '123'},
]},
}
parameter = self._call_fut(RESOURCE)
self.assertIsInstance(parameter, ArrayQueryParameter)
self.assertEqual(parameter.name, 'foo')
self.assertEqual(parameter.array_type, 'INT64')
self.assertEqual(parameter.values, [123])
def test_w_struct(self):
from google.cloud.bigquery.query import StructQueryParameter
RESOURCE = {
'name': 'foo',
'parameterType': {
'type': 'STRUCT',
'structTypes': [
{'name': 'foo', 'type': {'type': 'STRING'}},
{'name': 'bar', 'type': {'type': 'INT64'}},
],
},
'parameterValue': {
'structValues': {
'foo': {'value': 'Foo'},
'bar': {'value': '123'},
}
},
}
parameter = self._call_fut(RESOURCE)
self.assertIsInstance(parameter, StructQueryParameter)
self.assertEqual(parameter.name, 'foo')
self.assertEqual(
parameter.struct_types, {'foo': 'STRING', 'bar': 'INT64'})
self.assertEqual(parameter.struct_values, {'foo': 'Foo', 'bar': 123})
|
|
"""
python script that generates mesh for Howorka geometry
"""
import numpy
from importlib import import_module
import nanopores.py4gmsh.basic
import nanopores.py4gmsh.extra
from nanopores.py4gmsh import *
from .params_geo import *
def get_geo(**params):
"""
writes a 2d geo file for an axissymmetric geometry for Wei et al
'Stochastic sensing ...'
_________
| |
| |
| _____|
| \____|
| |
| |
|________|
"""
basic = reload(nanopores.py4gmsh.basic)
extra = reload(nanopores.py4gmsh.extra)
globals().update(params)
Rx = R
Ry = Rz
# define additional geo variables
# mesh generation should also work for no SAM layer
if lsam < tolc or lsam is None:
sam = None
else:
sam = True
l0 = lau +lsin +lsam
angle2 = angle/2.0
tan = numpy.tan(angle2*numpy.pi/180)
sin = numpy.sin(angle2*numpy.pi/180)
cos = numpy.cos(angle2*numpy.pi/180)
r1 = r0 + l0*tan
rsam = r0 + lsam/cos
rsin = r0 + lsam/cos + rlau
X_Fluid_up = numpy.array([
[0, Ry, 0],
[Rx, Ry, 0],
[Rx, l0/2, 0],
])
X_Fluid_low = numpy.array([
[Rx, -l0/2, 0],
[Rx, -Ry, 0],
[0, -Ry, 0],
])
X_Fluid_ctr = numpy.array([
[0, -l0/2, 0],
[0, -l0/6, 0],
[0, +l0/6, 0],
[0, +l0/2, 0],
])
X_SAM_ctr = numpy.array([
[r1, l0/2, 0],
[(2*r1+r0)/3, l0/6, 0],
[(r1+2*r0)/3, -l0/6, 0],
[r0, -l0/2, 0],
])
X_SiN = numpy.array([
[Rx, -(l0/2-lsin), 0],
[rsin +tan*lsin, -l0/2 +lsin, 0],
[rsin, -l0/2, 0],
])
p_SAM = [Point(x, lcCenter) for x in X_SAM_ctr]
p_SiN = [Point(x, lcOuter) for x in X_SiN]
p_Fluid_up = [Point(x, lcOuter) for x in X_Fluid_up]
p_Fluid_low = [Point(x, lcOuter) for x in X_Fluid_low]
p_Fluid_ctr = [Point(x, lcCenter) for x in X_Fluid_ctr]
if sam is None:
p_Au = []
else:
X_Au = numpy.array([
[Rx, -lsam +l0/2, 0],
[rsam -tan*(lsam-l0), -lsam + l0/2, 0],
[rsam, -l0/2, 0],
])
p_Au = [Point(X_Au[0], lcOuter)]
p_Au.extend([Point(x, lcCenter) for x in X_Au[1:]])
# Group all fluid points into list
p_Fluid = p_Fluid_up + p_SAM
if sam is not None:
p_Fluid.append(p_Au[-1])
p_Fluid.append(p_SiN[-1])
p_Fluid.extend(p_Fluid_low + p_Fluid_ctr)
geo_cs_str = "no crosssectional surface"
cs_pop_i = None
insert_ind = 0
Comment(' integrate crosssectional lines in fluid surface ')
z_CrossS = [X_Fluid_ctr[k][1] for k in reversed(range(len(X_Fluid_ctr)))]
e_CrossS = [Line(p_Fluid_ctr[k], p_SAM[len(p_SAM)-1-k]) for k in reversed(range(len(p_Fluid_ctr)))]
if x0 is not None:
X_Molecule = numpy.array([[0, x0[2] -rMolecule, 0],
[0, x0[2], 0],
[0, x0[2] +rMolecule, 0]])
p_Molecule = [Point(x, lcMolecule) for x in X_Molecule]
geo_cs_list = ["top", "center top", "center bottom", "bottom"]
# Determine position of molecule for correct geometry
for i in range(len(z_CrossS)):
if abs(x0[2] - z_CrossS[i]) < rMolecule:
cs_pop_i = i
if x0[2] < z_CrossS[i] - rMolecule:
insert_ind = i+1
if cs_pop_i is not None:
geo_cs_str = geo_cs_list[cs_pop_i]
e_CrossS.pop(cs_pop_i)
p_Fluid.pop(len(p_Fluid)-1-cs_pop_i)
if x0 is not None:
p_Fluid.insert(len(p_Fluid)-insert_ind, p_Molecule[0])
p_Fluid.insert(len(p_Fluid)-insert_ind, p_Molecule[2])
c_Molecule = Circle(p_Molecule)
e_Molecule = Line(p_Molecule[2], p_Molecule[0])
ll_Molecule = LineLoop([c_Molecule, e_Molecule])
s_Molecule = PlaneSurface(ll_Molecule)
# Create Line Loops from the points sitting on the line
Comment(' Connect all Fluid points ')
e_Fluid = [Line(p_Fluid[k], p_Fluid[k+1]) for k in range(len(p_Fluid)-1)]
e_Fluid.append(Line(p_Fluid[-1], p_Fluid[0]))
if x0 is not None:
e_Fluid.pop(len(e_Fluid) - insert_ind -2)
e_Fluid.insert(len(e_Fluid) - insert_ind -1, c_Molecule)
ll_Fluid = LineLoop(e_Fluid)
Comment(' Connect all outer Membrane points ')
slicem = slice(len(p_Fluid_up) -1, p_Fluid.index(p_SiN[-1]) +1)
e_Membrane = e_Fluid[slicem]
if sam is not None:
p_list_mem = [p_Fluid_low[0], p_SiN[0], p_Au[0], p_Fluid_up[-1]]
else:
p_list_mem = [p_Fluid_low[0], p_SiN[0], p_Fluid_up[-1]]
e_Membrane.extend([Line(p_list_mem[k], p_list_mem[k+1]) \
for k in range(len(p_list_mem)-1)])
ll_Membrane = LineLoop(e_Membrane)
s_Fluid = PlaneSurface(ll_Fluid)
s_Membrane = PlaneSurface(ll_Membrane)
Comment(' Integrate crossections into fluid surface')
raw_code(['Line{%s} In Surface{%s};' \
%(e_CrossS[k], s_Fluid) for k in range(len(e_CrossS))])
Comment(' Integrate Membrane material interfaces into membrane surface ')
if sam is not None:
e_Au = [Line(p_Au[k], p_Au[k+1]) for k in range(len(p_Au)-1)]
raw_code(['Line{%s} In Surface{%s};' \
%(e_Au[k], s_Membrane) for k in range(len(e_Au))])
e_SiN = [Line(p_SiN[k], p_SiN[k+1]) for k in range(len(p_SiN)-1)]
raw_code(['Line{%s} In Surface{%s};' \
%(e_SiN[k], s_Membrane) for k in range(len(e_SiN))])
# Define Fields for varying mesh size
field_list = []
if membraneblayer == True:
blayer = BoundaryLayer(edges_list=e_Membrane[1:5], hfar=lcOuter,
hwall_n=lcCenter*0.2, hwall_t=lcCenter*0.5,
thickness=1, ratio=2)
field_list.append(blayer)
if boxfields:
box1 = BoxField(lcCenter, lcOuter, -1e-15, r1*1.3, \
-l0/2 -r0, l0/2 +r0)
field_list.append(box1)
if lowerblayer:
n_edge_l = 9 if not sam else 10
blayer_l = BoundaryLayer(
edges_list=e_Fluid[n_edge_l:n_edge_l+1],
hfar=lcOuter, hwall_n=lcCenter*0.5, hwall_t=lcCenter*1.0,
thickness=1, ratio=2)
field_list.append(blayer_l)
if x0 is not None and moleculeblayer:
blayer_mol = BoundaryLayer(
edges_list=[c_Molecule,], hfar=lcOuter,
hwall_n=lcCenter*0.1, hwall_t=lcCenter*0.5,
thickness=1, ratio=2)
field_list.append(blayer_mol)
#raw_code(['bfield = newf;'])
#raw_code(['Field[bfield] = Min;'])
#raw_code(['Field[bfield].FieldsList = {%s};' %(','.join(field_list))])
#raw_code(['Background Field = bfield;'])
# to disable question dialogs
raw_code(['General.ExpertMode = 1;'])
# Don't extend the elements sizes from the boundary inside the domain
#raw_code(['Mesh.CharacteristicLengthExtendFromBoundary = 0;'])
# 2D algorithm (1=MeshAdapt, 2=Automatic, 5=Delaunay, 6=Frontal, 7=bamg, 8=delquad)
# only delaunay is compatible with attractor fields
raw_code(['Mesh.Algorithm = 5;'])
geo_dict = {"gmsh mesh generating sript": __name__,
"xMolecule": x0,
"Number of crosssections": len(e_CrossS),
"Total number of crossections": 4,
"molecule crosses": geo_cs_str,
"popped crossection index": cs_pop_i,
"cs_pop_i": cs_pop_i,
"Typical length scale": lc,
"geo_code": get_code(),
}
return geo_dict
# -----
if __name__ == '__main__':
print(get_geo())
print('\n - This is the sample code for the geo file')
|
|
#!/usr/bin/env python
#
# Copyright (c) 2013 Corey Goldberg
#
# This file is part of: sauceclient
# https://github.com/cgoldberg/sauceclient
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
import random
import unittest
import sauceclient
# set these to run tests
SAUCE_USERNAME = ''
SAUCE_ACCESS_KEY = ''
TEST_JOB_ID = '' # any valid job
def assert_is_utf8(content, test_obj):
if sauceclient.is_py2:
test_obj.assertIsInstance(content, unicode)
else:
try:
content.encode('utf-8')
except UnicodeEncodeError:
assert False
class TestSauceClient(unittest.TestCase):
def setUp(self):
self.sc = sauceclient.SauceClient(
SAUCE_USERNAME,
SAUCE_ACCESS_KEY,
)
def test_has_instances(self):
self.assertIsInstance(self.sc.information, sauceclient.Information)
self.assertIsInstance(self.sc.jobs, sauceclient.Jobs)
self.assertIsInstance(self.sc.provisioning, sauceclient.Provisioning)
self.assertIsInstance(self.sc.usage, sauceclient.Usage)
def test_headers(self):
headers = self.sc.headers
self.assertIsInstance(headers, dict)
self.assertIn('Authorization', headers)
self.assertIn('Content-Type', headers)
self.assertIn('Basic', headers['Authorization'])
self.assertEqual('application/json', headers['Content-Type'])
def test_request_get(self):
url = 'rest/v1/info/status'
json_data = self.sc.request('GET', url)
self.assertIsInstance(json_data, str)
class TestJobs(unittest.TestCase):
def setUp(self):
self.sc = sauceclient.SauceClient(
SAUCE_USERNAME,
SAUCE_ACCESS_KEY,
)
def test_get_job_ids(self):
job_ids = self.sc.jobs.get_job_ids()
self.assertIsInstance(job_ids, list)
job_id = random.choice(job_ids)
assert_is_utf8(job_id, self)
self.assertTrue(job_id.isalnum())
def test_get_jobs(self):
jobs = self.sc.jobs.get_jobs()
self.assertIsInstance(jobs, list)
job = random.choice(jobs)
self.assertIn('id', job)
assert_is_utf8(job['id'], self)
self.assertEqual(job['owner'], self.sc.sauce_username)
def test_get_job_attributes(self):
job_attributes = self.sc.jobs.get_job_attributes(TEST_JOB_ID)
self.assertIsInstance(job_attributes, dict)
self.assertIn('id', job_attributes)
self.assertIn('status', job_attributes)
self.assertIn('commands_not_successful', job_attributes)
self.assertIn('name', job_attributes)
self.assertIn('video_url', job_attributes)
self.assertIn('tags', job_attributes)
self.assertIn('start_time', job_attributes)
self.assertIn('log_url', job_attributes)
self.assertIn('creation_time', job_attributes)
self.assertIn('custom-data', job_attributes)
self.assertIn('browser_version', job_attributes)
self.assertIn('end_time', job_attributes)
self.assertIn('passed', job_attributes)
self.assertIn('owner', job_attributes)
self.assertIn('browser', job_attributes)
self.assertIn('os', job_attributes)
self.assertIn('public', job_attributes)
self.assertIn('breakpointed', job_attributes)
self.assertIn('build', job_attributes)
self.assertEqual(job_attributes['id'], TEST_JOB_ID)
self.assertIn(job_attributes['owner'], self.sc.sauce_username)
def test_update_job(self):
job_attributes = self.sc.jobs.update_job(TEST_JOB_ID)
self.assertIsInstance(job_attributes, dict)
self.assertIn('id', job_attributes)
self.assertEqual(job_attributes['id'], TEST_JOB_ID)
class TestProvisioning(unittest.TestCase):
def setUp(self):
self.sc = sauceclient.SauceClient(
SAUCE_USERNAME,
SAUCE_ACCESS_KEY,
)
def test_get_account_details(self):
account_details = self.sc.provisioning.get_account_details()
self.assertIsInstance(account_details, dict)
self.assertIn('id', account_details)
self.assertIn('minutes', account_details)
self.assertIn('access_key', account_details)
self.assertIn('subscribed', account_details)
self.assertEqual(account_details['id'], self.sc.sauce_username)
def test_get_account_limits(self):
account_limits = self.sc.provisioning.get_account_limits()
self.assertIsInstance(account_limits, dict)
self.assertIn('concurrency', account_limits)
self.assertTrue(account_limits['concurrency'] > 0)
class TestInformation(unittest.TestCase):
def setUp(self):
self.sc = sauceclient.SauceClient()
def test_get_status(self):
status = self.sc.information.get_status()
self.assertIsInstance(status, dict)
self.assertIn('service_operational', status)
self.assertIn('status_message', status)
self.assertIn('wait_time', status)
assert_is_utf8(status['status_message'], self)
self.assertTrue(status['service_operational'])
def test_get_status_with_auth(self):
sc = sauceclient.SauceClient(
SAUCE_USERNAME,
SAUCE_ACCESS_KEY,
)
status = sc.information.get_status()
self.assertIsInstance(status, dict)
self.assertIn('service_operational', status)
self.assertIn('status_message', status)
self.assertIn('wait_time', status)
self.assertTrue(status['service_operational'])
def test_get_browsers(self):
browsers = self.sc.information.get_browsers()
self.assertIsInstance(browsers, list)
self.assertTrue(len(browsers) > 0)
browser = random.choice(browsers)
self.assertIn('automation_backend', browser)
self.assertIn('long_name', browser)
self.assertIn('long_version', browser)
self.assertIn('os', browser)
self.assertIn('preferred_version', browser)
self.assertIn('selenium_name', browser)
self.assertIn('short_version', browser)
assert_is_utf8(browser['selenium_name'], self)
def test_get_count(self):
count = self.sc.information.get_count()
self.assertIsInstance(count, int)
self.assertTrue(count > 20000000)
class TestUsage(unittest.TestCase):
def setUp(self):
self.sc = sauceclient.SauceClient(
SAUCE_USERNAME,
SAUCE_ACCESS_KEY,
)
def test_get_current_activity(self):
activity = self.sc.usage.get_current_activity()
self.assertIsInstance(activity, dict)
self.assertIn('subaccounts', activity)
self.assertIn(SAUCE_USERNAME, activity['subaccounts'])
subaccount_activity = activity['subaccounts'][self.sc.sauce_username]
self.assertIn('all', subaccount_activity)
self.assertIsInstance(subaccount_activity['all'], int)
self.assertIn('in progress', subaccount_activity)
self.assertIsInstance(subaccount_activity['in progress'], int)
self.assertIn('queued', subaccount_activity)
self.assertIsInstance(subaccount_activity['queued'], int)
self.assertIn('totals', activity)
self.assertIn('all', activity['totals'])
self.assertIsInstance(activity['totals']['all'], int)
self.assertIn('in progress', activity['totals'])
self.assertIsInstance(activity['totals']['in progress'], int)
self.assertIn('queued', activity['totals'])
self.assertIsInstance(activity['totals']['queued'], int)
def test_get_historical_usage(self):
historical_usage = self.sc.usage.get_historical_usage()
self.assertIn('usage', historical_usage)
self.assertIn('username', historical_usage)
self.assertEqual(historical_usage['username'], self.sc.sauce_username)
self.assertIsInstance(historical_usage['usage'], list)
if __name__ == '__main__':
if not all((SAUCE_USERNAME, SAUCE_ACCESS_KEY, TEST_JOB_ID)):
raise SystemExit('Set your credentials (username/access-key)')
unittest.main(verbosity=2)
|
|
#
# Copyright 2015 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
#!/usr/bin/env python
usage = '''
Write extra flags to outfile for DM based on the bot name:
$ python dm_flags.py outfile Test-Ubuntu-GCC-GCE-CPU-AVX2-x86-Debug
Or run self-tests:
$ python dm_flags.py test
'''
import inspect
import json
import os
import sys
def lineno():
caller = inspect.stack()[1] # Up one level to our caller.
return inspect.getframeinfo(caller[0]).lineno
cov_start = lineno()+1 # We care about coverage starting just past this def.
def get_args(bot):
args = []
# 32-bit desktop bots tend to run out of memory, because they have relatively
# far more cores than RAM (e.g. 32 cores, 3G RAM). Hold them back a bit.
if '-x86-' in bot and not 'NexusPlayer' in bot:
args.extend('--threads 4'.split(' '))
# These are the canonical configs that we would ideally run on all bots. We
# may opt out or substitute some below for specific bots
configs = ['565', '8888', 'gpu', 'gpusrgb', 'pdf']
# Add in either msaa4 or msaa16 to the canonical set of configs to run
if 'Android' in bot or 'iOS' in bot:
configs.append('msaa4')
else:
configs.append('msaa16')
# With msaa, the S4 crashes and the NP produces a long error stream when we
# run with MSAA. The Tegra2 and Tegra3 just don't support it. No record of
# why we're not running msaa on iOS, probably started with gpu config and just
# haven't tried.
if ('GalaxyS4' in bot or
'NexusPlayer' in bot or
'Tegra3' in bot or
'iOS' in bot):
configs = [x for x in configs if 'msaa' not in x]
# Runs out of memory on Android bots and Daisy. Everyone else seems fine.
if 'Android' in bot or 'Daisy' in bot:
configs.remove('pdf')
if '-GCE-' in bot:
configs.extend(['f16', 'srgb']) # Gamma-correct formats.
configs.extend(['sp-8888', '2ndpic-8888']) # Test niche uses of SkPicture.
if '-TSAN' not in bot:
if ('TegraK1' in bot or
'GTX550Ti' in bot or
'GTX660' in bot or
'GT610' in bot):
if 'Android' in bot:
configs.append('nvprdit4')
else:
configs.append('nvprdit16')
# We want to test the OpenGL config not the GLES config on the X1
if 'TegraX1' in bot:
configs = [x.replace('gpu', 'gl') for x in configs]
configs = [x.replace('msaa', 'glmsaa') for x in configs]
# NP is running out of RAM when we run all these modes. skia:3255
if 'NexusPlayer' not in bot:
configs.extend(mode + '-8888' for mode in
['serialize', 'tiles_rt', 'pic'])
if 'ANGLE' in bot:
configs.append('angle')
# We want to run gpudft on atleast the mali 400
if 'GalaxyS3' in bot:
configs.append('gpudft')
# CommandBuffer bot *only* runs the command_buffer config.
if 'CommandBuffer' in bot:
configs = ['commandbuffer']
# Vulkan bot *only* runs the vk config.
if 'Vulkan' in bot:
configs = ['vk']
args.append('--config')
args.extend(configs)
# Run tests, gms, and image decoding tests everywhere.
args.extend('--src tests gm image'.split(' '))
if 'GalaxyS' in bot:
args.extend(('--threads', '0'))
blacklist = []
# TODO: ???
blacklist.extend('f16 _ _ dstreadshuffle'.split(' '))
blacklist.extend('f16 image _ _'.split(' '))
blacklist.extend('srgb image _ _'.split(' '))
blacklist.extend('gpusrgb image _ _'.split(' '))
# Certain gm's on win7 gpu and pdf are never finishing and keeping the test
# running forever
if 'Win7' in bot:
blacklist.extend('msaa16 gm _ colorwheelnative'.split(' '))
blacklist.extend('pdf gm _ fontmgr_iter_factory'.split(' '))
if 'Valgrind' in bot:
# These take 18+ hours to run.
blacklist.extend('pdf gm _ fontmgr_iter'.split(' '))
blacklist.extend('pdf _ _ PANO_20121023_214540.jpg'.split(' '))
blacklist.extend('pdf skp _ worldjournal'.split(' '))
blacklist.extend('pdf skp _ desk_baidu.skp'.split(' '))
blacklist.extend('pdf skp _ desk_wikipedia.skp'.split(' '))
if 'iOS' in bot:
blacklist.extend('gpu skp _ _ msaa skp _ _'.split(' '))
blacklist.extend('msaa16 gm _ tilemodesProcess'.split(' '))
if 'Mac' in bot or 'iOS' in bot:
# CG fails on questionable bmps
blacklist.extend('_ image gen_platf rgba32abf.bmp'.split(' '))
blacklist.extend('_ image gen_platf rgb24prof.bmp'.split(' '))
blacklist.extend('_ image gen_platf rgb24lprof.bmp'.split(' '))
blacklist.extend('_ image gen_platf 8bpp-pixeldata-cropped.bmp'.split(' '))
blacklist.extend('_ image gen_platf 4bpp-pixeldata-cropped.bmp'.split(' '))
blacklist.extend('_ image gen_platf 32bpp-pixeldata-cropped.bmp'.split(' '))
blacklist.extend('_ image gen_platf 24bpp-pixeldata-cropped.bmp'.split(' '))
# CG has unpredictable behavior on this questionable gif
# It's probably using uninitialized memory
blacklist.extend('_ image gen_platf frame_larger_than_image.gif'.split(' '))
# WIC fails on questionable bmps
if 'Win' in bot:
blacklist.extend('_ image gen_platf rle8-height-negative.bmp'.split(' '))
blacklist.extend('_ image gen_platf rle4-height-negative.bmp'.split(' '))
blacklist.extend('_ image gen_platf pal8os2v2.bmp'.split(' '))
blacklist.extend('_ image gen_platf pal8os2v2-16.bmp'.split(' '))
blacklist.extend('_ image gen_platf rgba32abf.bmp'.split(' '))
blacklist.extend('_ image gen_platf rgb24prof.bmp'.split(' '))
blacklist.extend('_ image gen_platf rgb24lprof.bmp'.split(' '))
blacklist.extend('_ image gen_platf 8bpp-pixeldata-cropped.bmp'.split(' '))
blacklist.extend('_ image gen_platf 4bpp-pixeldata-cropped.bmp'.split(' '))
blacklist.extend('_ image gen_platf 32bpp-pixeldata-cropped.bmp'.split(' '))
blacklist.extend('_ image gen_platf 24bpp-pixeldata-cropped.bmp'.split(' '))
# skia:4095
for test in ['not_native32_bitmap_config',
'bleed_image',
'bleed_alpha_image',
'bleed_alpha_image_shader',
'c_gms',
'colortype',
'colortype_xfermodes',
'drawfilter',
'fontmgr_bounds_0.75_0',
'fontmgr_bounds_1_-0.25',
'fontmgr_bounds',
'fontmgr_match',
'fontmgr_iter',
'verylargebitmap', # Windows only.
'verylarge_picture_image']: # Windows only.
blacklist.extend(['serialize-8888', 'gm', '_', test])
# skia:4769
for test in ['drawfilter']:
blacklist.extend([ 'sp-8888', 'gm', '_', test])
blacklist.extend([ 'pic-8888', 'gm', '_', test])
blacklist.extend(['2ndpic-8888', 'gm', '_', test])
# skia:4703
for test in ['image-cacherator-from-picture',
'image-cacherator-from-raster',
'image-cacherator-from-ctable']:
blacklist.extend([ 'sp-8888', 'gm', '_', test])
blacklist.extend([ 'pic-8888', 'gm', '_', test])
blacklist.extend([ '2ndpic-8888', 'gm', '_', test])
blacklist.extend(['serialize-8888', 'gm', '_', test])
# Extensions for RAW images
r = ["arw", "cr2", "dng", "nef", "nrw", "orf", "raf", "rw2", "pef", "srw",
"ARW", "CR2", "DNG", "NEF", "NRW", "ORF", "RAF", "RW2", "PEF", "SRW"]
# skbug.com/4888
# Blacklist RAW images (and a few large PNGs) on GPU bots
# until we can resolve failures
if 'GPU' in bot:
blacklist.extend('_ image _ interlaced1.png'.split(' '))
blacklist.extend('_ image _ interlaced2.png'.split(' '))
blacklist.extend('_ image _ interlaced3.png'.split(' '))
for raw_ext in r:
blacklist.extend(('_ image _ .%s' % raw_ext).split(' '))
# Large image that overwhelms older Mac bots
if 'MacMini4.1-GPU' in bot:
blacklist.extend('_ image _ abnormal.wbmp'.split(' '))
match = []
if 'Valgrind' in bot: # skia:3021
match.append('~Threaded')
if 'GalaxyS3' in bot: # skia:1699
match.append('~WritePixels')
if 'AndroidOne' in bot: # skia:4711
match.append('~WritePixels')
if 'NexusPlayer' in bot:
match.append('~ResourceCache')
if 'GalaxyS4' in bot: # skia:4079
match.append('~imagefiltersclipped')
match.append('~imagefilterscropexpand')
match.append('~scaled_tilemodes_npot')
match.append('~bleed_image') # skia:4367
match.append('~ReadPixels') # skia:4368
if 'ANGLE' in bot and 'Debug' in bot:
match.append('~GLPrograms') # skia:4717
if 'MSAN' in bot:
match.extend(['~Once', '~Shared']) # Not sure what's up with these tests.
if blacklist:
args.append('--blacklist')
args.extend(blacklist)
if match:
args.append('--match')
args.extend(match)
# These bots run out of memory running RAW codec tests. Do not run them in
# parallel
if 'NexusPlayer' in bot or 'Nexus5' in bot or 'Nexus9' in bot:
args.append('--noRAW_threading')
return args
cov_end = lineno() # Don't care about code coverage past here.
def self_test():
import coverage # This way the bots don't need coverage.py to be installed.
args = {}
cases = [
'Pretend-iOS-Bot',
'Test-Android-GCC-AndroidOne-GPU-Mali400MP2-Arm7-Release',
'Test-Android-GCC-GalaxyS3-GPU-Mali400-Arm7-Debug',
'Test-Android-GCC-GalaxyS4-GPU-SGX544-Arm7-Release',
'Test-Android-GCC-Nexus7-GPU-Tegra3-Arm7-Release',
'Test-Android-GCC-Nexus9-GPU-TegraK1-Arm64-Debug',
'Test-Android-GCC-NexusPlayer-CPU-SSSE3-x86-Release',
'Test-Android-GCC-NVIDIA_Shield-GPU-TegraX1-Arm64-Release',
'Test-Mac-Clang-MacMini4.1-GPU-GeForce320M-x86_64-Release',
'Test-Mac-Clang-MacMini6.2-GPU-HD4000-x86_64-Debug-CommandBuffer',
'Test-Mac10.8-Clang-MacMini4.1-CPU-SSE4-x86_64-Release',
'Test-Ubuntu-GCC-GCE-CPU-AVX2-x86_64-Debug-MSAN',
'Test-Ubuntu-GCC-GCE-CPU-AVX2-x86_64-Release-TSAN',
'Test-Ubuntu-GCC-GCE-CPU-AVX2-x86_64-Release-Valgrind',
'Test-Ubuntu-GCC-ShuttleA-GPU-GTX550Ti-x86_64-Release-Valgrind',
'Test-Win10-MSVC-ShuttleA-GPU-GTX660-x86_64-Debug-Vulkan',
'Test-Win7-MSVC-ShuttleA-GPU-HD2000-x86-Debug-ANGLE',
]
cov = coverage.coverage()
cov.start()
for case in cases:
args[case] = get_args(case)
cov.stop()
this_file = os.path.basename(__file__)
_, _, not_run, _ = cov.analysis(this_file)
filtered = [line for line in not_run if line > cov_start and line < cov_end]
if filtered:
print 'Lines not covered by test cases: ', filtered
sys.exit(1)
golden = this_file.replace('.py', '.json')
with open(os.path.join(os.path.dirname(__file__), golden), 'w') as f:
json.dump(args, f, indent=2, sort_keys=True)
if __name__ == '__main__':
if len(sys.argv) == 2 and sys.argv[1] == 'test':
self_test()
sys.exit(0)
if len(sys.argv) != 3:
print usage
sys.exit(1)
with open(sys.argv[1], 'w') as out:
json.dump(get_args(sys.argv[2]), out)
|
|
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import mock
import uuid
from orquesta import conducting
from orquesta import statuses as wf_statuses
import st2tests
import st2tests.config as tests_config
tests_config.parse_args()
from local_runner import local_shell_command_runner
from st2common.bootstrap import actionsregistrar
from st2common.bootstrap import runnersregistrar
from st2common.constants import action as action_constants
from st2common.exceptions import workflow as wf_exc
from st2common.models.db import liveaction as lv_db_models
from st2common.persistence import execution as ex_db_access
from st2common.persistence import liveaction as lv_db_access
from st2common.persistence import workflow as wf_db_access
from st2common.services import action as action_service
from st2common.services import workflows as workflow_service
from st2common.transport import liveaction as lv_ac_xport
from st2common.transport import publishers
from st2tests.mocks import liveaction as mock_lv_ac_xport
TEST_PACK = "orquesta_tests"
TEST_PACK_PATH = (
st2tests.fixturesloader.get_fixtures_packs_base_path() + "/" + TEST_PACK
)
PACKS = [
TEST_PACK_PATH,
st2tests.fixturesloader.get_fixtures_packs_base_path() + "/core",
]
RUNNER_RESULT_FAILED = (action_constants.LIVEACTION_STATUS_FAILED, {}, {})
RUNNER_RESULT_SUCCEEDED = (
action_constants.LIVEACTION_STATUS_SUCCEEDED,
{"stdout": "foobar"},
{},
)
@mock.patch.object(
publishers.CUDPublisher, "publish_update", mock.MagicMock(return_value=None)
)
@mock.patch.object(
publishers.CUDPublisher,
"publish_create",
mock.MagicMock(side_effect=mock_lv_ac_xport.MockLiveActionPublisher.publish_create),
)
@mock.patch.object(
lv_ac_xport.LiveActionPublisher,
"publish_state",
mock.MagicMock(side_effect=mock_lv_ac_xport.MockLiveActionPublisher.publish_state),
)
class WorkflowExecutionRerunTest(st2tests.WorkflowTestCase):
@classmethod
def setUpClass(cls):
super(WorkflowExecutionRerunTest, cls).setUpClass()
# Register runners.
runnersregistrar.register_runners()
# Register test pack(s).
actions_registrar = actionsregistrar.ActionsRegistrar(
use_pack_cache=False, fail_on_failure=True
)
for pack in PACKS:
actions_registrar.register_from_pack(pack)
def prep_wf_ex_for_rerun(self):
wf_meta = self.get_wf_fixture_meta_data(TEST_PACK_PATH, "sequential.yaml")
# Manually create the liveaction and action execution objects without publishing.
lv_ac_db1 = lv_db_models.LiveActionDB(action=wf_meta["name"])
lv_ac_db1, ac_ex_db1 = action_service.create_request(lv_ac_db1)
# Request the workflow execution.
wf_def = self.get_wf_def(TEST_PACK_PATH, wf_meta)
st2_ctx = self.mock_st2_context(ac_ex_db1)
wf_ex_db = workflow_service.request(wf_def, ac_ex_db1, st2_ctx)
wf_ex_db = self.prep_wf_ex(wf_ex_db)
# Fail workflow execution.
self.run_workflow_step(
wf_ex_db,
"task1",
0,
expected_ac_ex_db_status=action_constants.LIVEACTION_STATUS_FAILED,
expected_tk_ex_db_status=wf_statuses.FAILED,
)
# Check workflow status.
conductor, wf_ex_db = workflow_service.refresh_conductor(str(wf_ex_db.id))
self.assertEqual(conductor.get_workflow_status(), wf_statuses.FAILED)
self.assertEqual(wf_ex_db.status, wf_statuses.FAILED)
lv_ac_db1 = lv_db_access.LiveAction.get_by_id(str(lv_ac_db1.id))
self.assertEqual(lv_ac_db1.status, action_constants.LIVEACTION_STATUS_FAILED)
ac_ex_db1 = ex_db_access.ActionExecution.get_by_id(str(ac_ex_db1.id))
self.assertEqual(ac_ex_db1.status, action_constants.LIVEACTION_STATUS_FAILED)
return wf_meta, lv_ac_db1, ac_ex_db1, wf_ex_db
@mock.patch.object(
local_shell_command_runner.LocalShellCommandRunner,
"run",
mock.MagicMock(side_effect=[RUNNER_RESULT_FAILED, RUNNER_RESULT_SUCCEEDED]),
)
def test_request_rerun(self):
# Create and return a failed workflow execution.
wf_meta, lv_ac_db1, ac_ex_db1, wf_ex_db = self.prep_wf_ex_for_rerun()
# Manually create the liveaction and action execution objects for the rerun.
lv_ac_db2 = lv_db_models.LiveActionDB(action=wf_meta["name"])
lv_ac_db2, ac_ex_db2 = action_service.create_request(lv_ac_db2)
# Request workflow execution rerun.
st2_ctx = self.mock_st2_context(ac_ex_db2, ac_ex_db1.context)
st2_ctx["workflow_execution_id"] = str(wf_ex_db.id)
rerun_options = {"ref": str(ac_ex_db1.id), "tasks": ["task1"]}
wf_ex_db = workflow_service.request_rerun(ac_ex_db2, st2_ctx, rerun_options)
wf_ex_db = self.prep_wf_ex(wf_ex_db)
# Check workflow status.
conductor, wf_ex_db = workflow_service.refresh_conductor(str(wf_ex_db.id))
self.assertEqual(conductor.get_workflow_status(), wf_statuses.RUNNING)
self.assertEqual(wf_ex_db.status, wf_statuses.RUNNING)
# Complete task1.
self.run_workflow_step(wf_ex_db, "task1", 0)
# Check workflow status and make sure it is still running.
conductor, wf_ex_db = workflow_service.refresh_conductor(str(wf_ex_db.id))
self.assertEqual(conductor.get_workflow_status(), wf_statuses.RUNNING)
self.assertEqual(wf_ex_db.status, wf_statuses.RUNNING)
lv_ac_db2 = lv_db_access.LiveAction.get_by_id(str(lv_ac_db2.id))
self.assertEqual(lv_ac_db2.status, action_constants.LIVEACTION_STATUS_RUNNING)
ac_ex_db2 = ex_db_access.ActionExecution.get_by_id(str(ac_ex_db2.id))
self.assertEqual(ac_ex_db2.status, action_constants.LIVEACTION_STATUS_RUNNING)
def test_request_rerun_while_original_is_still_running(self):
wf_meta = self.get_wf_fixture_meta_data(TEST_PACK_PATH, "sequential.yaml")
# Manually create the liveaction and action execution objects without publishing.
lv_ac_db1 = lv_db_models.LiveActionDB(action=wf_meta["name"])
lv_ac_db1, ac_ex_db1 = action_service.create_request(lv_ac_db1)
# Request the workflow execution.
wf_def = self.get_wf_def(TEST_PACK_PATH, wf_meta)
st2_ctx = self.mock_st2_context(ac_ex_db1)
wf_ex_db = workflow_service.request(wf_def, ac_ex_db1, st2_ctx)
wf_ex_db = self.prep_wf_ex(wf_ex_db)
# Check workflow status.
conductor, wf_ex_db = workflow_service.refresh_conductor(str(wf_ex_db.id))
self.assertEqual(conductor.get_workflow_status(), wf_statuses.RUNNING)
self.assertEqual(wf_ex_db.status, wf_statuses.RUNNING)
# Manually create the liveaction and action execution objects for the rerun.
lv_ac_db2 = lv_db_models.LiveActionDB(action=wf_meta["name"])
lv_ac_db2, ac_ex_db2 = action_service.create_request(lv_ac_db2)
# Request workflow execution rerun.
st2_ctx = self.mock_st2_context(ac_ex_db2, ac_ex_db1.context)
st2_ctx["workflow_execution_id"] = str(wf_ex_db.id)
rerun_options = {"ref": str(ac_ex_db1.id), "tasks": ["task1"]}
expected_error = (
'^Unable to rerun workflow execution ".*" '
"because it is not in a completed state.$"
)
self.assertRaisesRegexp(
wf_exc.WorkflowExecutionRerunException,
expected_error,
workflow_service.request_rerun,
ac_ex_db2,
st2_ctx,
rerun_options,
)
@mock.patch.object(
local_shell_command_runner.LocalShellCommandRunner,
"run",
mock.MagicMock(side_effect=[RUNNER_RESULT_FAILED, RUNNER_RESULT_SUCCEEDED]),
)
def test_request_rerun_again_while_prev_rerun_is_still_running(self):
# Create and return a failed workflow execution.
wf_meta, lv_ac_db1, ac_ex_db1, wf_ex_db = self.prep_wf_ex_for_rerun()
# Manually create the liveaction and action execution objects for the rerun.
lv_ac_db2 = lv_db_models.LiveActionDB(action=wf_meta["name"])
lv_ac_db2, ac_ex_db2 = action_service.create_request(lv_ac_db2)
# Request workflow execution rerun.
st2_ctx = self.mock_st2_context(ac_ex_db2, ac_ex_db1.context)
st2_ctx["workflow_execution_id"] = str(wf_ex_db.id)
rerun_options = {"ref": str(ac_ex_db1.id), "tasks": ["task1"]}
wf_ex_db = workflow_service.request_rerun(ac_ex_db2, st2_ctx, rerun_options)
wf_ex_db = self.prep_wf_ex(wf_ex_db)
# Check workflow status.
conductor, wf_ex_db = workflow_service.refresh_conductor(str(wf_ex_db.id))
self.assertEqual(conductor.get_workflow_status(), wf_statuses.RUNNING)
self.assertEqual(wf_ex_db.status, wf_statuses.RUNNING)
# Complete task1.
self.run_workflow_step(wf_ex_db, "task1", 0)
# Check workflow status and make sure it is still running.
conductor, wf_ex_db = workflow_service.refresh_conductor(str(wf_ex_db.id))
self.assertEqual(conductor.get_workflow_status(), wf_statuses.RUNNING)
self.assertEqual(wf_ex_db.status, wf_statuses.RUNNING)
lv_ac_db2 = lv_db_access.LiveAction.get_by_id(str(lv_ac_db2.id))
self.assertEqual(lv_ac_db2.status, action_constants.LIVEACTION_STATUS_RUNNING)
ac_ex_db2 = ex_db_access.ActionExecution.get_by_id(str(ac_ex_db2.id))
self.assertEqual(ac_ex_db2.status, action_constants.LIVEACTION_STATUS_RUNNING)
# Manually create the liveaction and action execution objects for the rerun.
lv_ac_db3 = lv_db_models.LiveActionDB(action=wf_meta["name"])
lv_ac_db3, ac_ex_db3 = action_service.create_request(lv_ac_db3)
# Request workflow execution rerun again.
st2_ctx = self.mock_st2_context(ac_ex_db3, ac_ex_db1.context)
st2_ctx["workflow_execution_id"] = str(wf_ex_db.id)
rerun_options = {"ref": str(ac_ex_db1.id), "tasks": ["task1"]}
expected_error = (
'^Unable to rerun workflow execution ".*" '
"because it is not in a completed state.$"
)
self.assertRaisesRegexp(
wf_exc.WorkflowExecutionRerunException,
expected_error,
workflow_service.request_rerun,
ac_ex_db3,
st2_ctx,
rerun_options,
)
@mock.patch.object(
local_shell_command_runner.LocalShellCommandRunner,
"run",
mock.MagicMock(return_value=RUNNER_RESULT_FAILED),
)
def test_request_rerun_with_missing_workflow_execution_id(self):
# Create and return a failed workflow execution.
wf_meta, lv_ac_db1, ac_ex_db1, wf_ex_db = self.prep_wf_ex_for_rerun()
# Manually create the liveaction and action execution objects for the rerun.
lv_ac_db2 = lv_db_models.LiveActionDB(action=wf_meta["name"])
lv_ac_db2, ac_ex_db2 = action_service.create_request(lv_ac_db2)
# Request workflow execution rerun without workflow_execution_id.
st2_ctx = self.mock_st2_context(ac_ex_db2, ac_ex_db1.context)
rerun_options = {"ref": str(ac_ex_db1.id), "tasks": ["task1"]}
expected_error = (
"Unable to rerun workflow execution because "
"workflow_execution_id is not provided."
)
self.assertRaisesRegexp(
wf_exc.WorkflowExecutionRerunException,
expected_error,
workflow_service.request_rerun,
ac_ex_db2,
st2_ctx,
rerun_options,
)
@mock.patch.object(
local_shell_command_runner.LocalShellCommandRunner,
"run",
mock.MagicMock(return_value=RUNNER_RESULT_FAILED),
)
def test_request_rerun_with_nonexistent_workflow_execution(self):
# Create and return a failed workflow execution.
wf_meta, lv_ac_db1, ac_ex_db1, wf_ex_db = self.prep_wf_ex_for_rerun()
# Manually create the liveaction and action execution objects for the rerun.
lv_ac_db2 = lv_db_models.LiveActionDB(action=wf_meta["name"])
lv_ac_db2, ac_ex_db2 = action_service.create_request(lv_ac_db2)
# Request workflow execution rerun with bogus workflow_execution_id.
st2_ctx = self.mock_st2_context(ac_ex_db2, ac_ex_db1.context)
st2_ctx["workflow_execution_id"] = uuid.uuid4().hex[0:24]
rerun_options = {"ref": str(ac_ex_db1.id), "tasks": ["task1"]}
expected_error = (
'^Unable to rerun workflow execution ".*" ' "because it does not exist.$"
)
self.assertRaisesRegexp(
wf_exc.WorkflowExecutionRerunException,
expected_error,
workflow_service.request_rerun,
ac_ex_db2,
st2_ctx,
rerun_options,
)
@mock.patch.object(
local_shell_command_runner.LocalShellCommandRunner,
"run",
mock.MagicMock(return_value=RUNNER_RESULT_FAILED),
)
def test_request_rerun_with_workflow_execution_not_abended(self):
# Create and return a failed workflow execution.
wf_meta, lv_ac_db1, ac_ex_db1, wf_ex_db = self.prep_wf_ex_for_rerun()
# Manually set workflow execution status to paused.
wf_ex_db.status = wf_statuses.PAUSED
wf_ex_db = wf_db_access.WorkflowExecution.add_or_update(wf_ex_db)
# Manually create the liveaction and action execution objects for the rerun.
lv_ac_db2 = lv_db_models.LiveActionDB(action=wf_meta["name"])
lv_ac_db2, ac_ex_db2 = action_service.create_request(lv_ac_db2)
# Request workflow execution rerun with bogus workflow_execution_id.
st2_ctx = self.mock_st2_context(ac_ex_db2, ac_ex_db1.context)
st2_ctx["workflow_execution_id"] = str(wf_ex_db.id)
rerun_options = {"ref": str(ac_ex_db1.id), "tasks": ["task1"]}
expected_error = (
'^Unable to rerun workflow execution ".*" '
"because it is not in a completed state.$"
)
self.assertRaisesRegexp(
wf_exc.WorkflowExecutionRerunException,
expected_error,
workflow_service.request_rerun,
ac_ex_db2,
st2_ctx,
rerun_options,
)
@mock.patch.object(
local_shell_command_runner.LocalShellCommandRunner,
"run",
mock.MagicMock(return_value=RUNNER_RESULT_FAILED),
)
def test_request_rerun_with_conductor_status_not_abended(self):
# Create and return a failed workflow execution.
wf_meta, lv_ac_db1, ac_ex_db1, wf_ex_db = self.prep_wf_ex_for_rerun()
# Manually set workflow conductor state to paused.
wf_ex_db.state["status"] = wf_statuses.PAUSED
wf_ex_db = wf_db_access.WorkflowExecution.add_or_update(wf_ex_db)
# Manually create the liveaction and action execution objects for the rerun.
lv_ac_db2 = lv_db_models.LiveActionDB(action=wf_meta["name"])
lv_ac_db2, ac_ex_db2 = action_service.create_request(lv_ac_db2)
# Request workflow execution rerun with bogus workflow_execution_id.
st2_ctx = self.mock_st2_context(ac_ex_db2, ac_ex_db1.context)
st2_ctx["workflow_execution_id"] = str(wf_ex_db.id)
rerun_options = {"ref": str(ac_ex_db1.id), "tasks": ["task1"]}
expected_error = (
"Unable to rerun workflow because it is not in a completed state."
)
self.assertRaisesRegexp(
wf_exc.WorkflowExecutionRerunException,
expected_error,
workflow_service.request_rerun,
ac_ex_db2,
st2_ctx,
rerun_options,
)
@mock.patch.object(
local_shell_command_runner.LocalShellCommandRunner,
"run",
mock.MagicMock(return_value=RUNNER_RESULT_FAILED),
)
def test_request_rerun_with_bad_task_name(self):
# Create and return a failed workflow execution.
wf_meta, lv_ac_db1, ac_ex_db1, wf_ex_db = self.prep_wf_ex_for_rerun()
# Manually create the liveaction and action execution objects for the rerun.
lv_ac_db2 = lv_db_models.LiveActionDB(action=wf_meta["name"])
lv_ac_db2, ac_ex_db2 = action_service.create_request(lv_ac_db2)
# Request workflow execution.
st2_ctx = self.mock_st2_context(ac_ex_db2, ac_ex_db1.context)
st2_ctx["workflow_execution_id"] = str(wf_ex_db.id)
rerun_options = {"ref": str(ac_ex_db1.id), "tasks": ["task5354"]}
expected_error = (
"^Unable to rerun workflow because one or more tasks is not found: .*$"
)
self.assertRaisesRegexp(
wf_exc.WorkflowExecutionRerunException,
expected_error,
workflow_service.request_rerun,
ac_ex_db2,
st2_ctx,
rerun_options,
)
@mock.patch.object(
local_shell_command_runner.LocalShellCommandRunner,
"run",
mock.MagicMock(return_value=RUNNER_RESULT_FAILED),
)
def test_request_rerun_with_conductor_status_not_resuming(self):
# Create and return a failed workflow execution.
wf_meta, lv_ac_db1, ac_ex_db1, wf_ex_db = self.prep_wf_ex_for_rerun()
# Manually create the liveaction and action execution objects for the rerun.
lv_ac_db2 = lv_db_models.LiveActionDB(action=wf_meta["name"])
lv_ac_db2, ac_ex_db2 = action_service.create_request(lv_ac_db2)
# Request workflow execution rerun with bogus workflow_execution_id.
st2_ctx = self.mock_st2_context(ac_ex_db2, ac_ex_db1.context)
st2_ctx["workflow_execution_id"] = str(wf_ex_db.id)
rerun_options = {"ref": str(ac_ex_db1.id), "tasks": ["task1"]}
expected_error = (
'^Unable to rerun workflow execution ".*" ' "due to an unknown cause."
)
with mock.patch.object(
conducting.WorkflowConductor,
"get_workflow_status",
mock.MagicMock(return_value=wf_statuses.FAILED),
):
self.assertRaisesRegexp(
wf_exc.WorkflowExecutionRerunException,
expected_error,
workflow_service.request_rerun,
ac_ex_db2,
st2_ctx,
rerun_options,
)
|
|
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This module allows adding and deleting of projects to the local manifest."""
from __future__ import print_function
import logging
import platform
import optparse
import os
import sys
import xml.etree.ElementTree as ElementTree
from chromite.lib import cros_build_lib
from chromite.lib import git
class Manifest(object):
"""Class which provides an abstraction for manipulating the local manifest."""
@classmethod
def FromPath(cls, path, empty_if_missing=False):
if os.path.isfile(path):
with open(path) as f:
return cls(f.read())
elif empty_if_missing:
cros_build_lib.Die('Manifest file, %r, not found' % path)
return cls()
def __init__(self, text=None):
self._text = text or '<manifest>\n</manifest>'
self.nodes = ElementTree.fromstring(self._text)
def AddNonWorkonProject(self, name, path, remote=None, revision=None):
"""Add a new nonworkon project element to the manifest tree."""
element = ElementTree.Element('project', name=name, path=path,
remote=remote)
element.attrib['workon'] = 'False'
if revision is not None:
element.attrib['revision'] = revision
self.nodes.append(element)
return element
def GetProject(self, name, path=None):
"""Accessor method for getting a project node from the manifest tree.
Returns:
project element node from ElementTree, otherwise, None
"""
if path is None:
# Use a unique value that can't ever match.
path = object()
for project in self.nodes.findall('project'):
if project.attrib['name'] == name or project.attrib['path'] == path:
return project
return None
def ToString(self):
# Reset the tail for each node, then just do a hacky replace.
project = None
for project in self.nodes.findall('project'):
project.tail = '\n '
if project is not None:
# Tweak the last project to not have the trailing space.
project.tail = '\n'
# Fix manifest tag text and tail.
self.nodes.text = '\n '
self.nodes.tail = '\n'
return ElementTree.tostring(self.nodes)
def GetProjects(self):
return list(self.nodes.findall('project'))
def _AddProjectsToManifestGroups(options, *args):
"""Enable the given manifest groups for the configured repository."""
groups_to_enable = ['name:%s' % x for x in args]
git_config = options.git_config
cmd = ['config', '-f', git_config, '--get', 'manifest.groups']
enabled_groups = git.RunGit('.', cmd, error_code_ok=True).output.split(',')
# Note that ordering actually matters, thus why the following code
# is written this way.
# Per repo behaviour, enforce an appropriate platform group if
# we're converting from a default manifest group to a limited one.
# Finally, note we reprocess the existing groups; this is to allow
# us to cleanup any user screwups, or our own screwups.
requested_groups = (
['minilayout', 'platform-%s' % (platform.system().lower(),)] +
enabled_groups + list(groups_to_enable))
processed_groups = set()
finalized_groups = []
for group in requested_groups:
if group not in processed_groups:
finalized_groups.append(group)
processed_groups.add(group)
cmd = ['config', '-f', git_config, 'manifest.groups',
','.join(finalized_groups)]
git.RunGit('.', cmd)
def _UpgradeMinilayout(options):
"""Convert a repo checkout away from minilayout.xml to default.xml."""
full_tree = Manifest.FromPath(options.default_manifest_path)
local_manifest_exists = os.path.exists(options.local_manifest_path)
new_groups = []
if local_manifest_exists:
local_tree = Manifest.FromPath(options.local_manifest_path)
# Identify which projects need to be transferred across.
projects = local_tree.GetProjects()
new_groups = [x.attrib['name'] for x in projects]
allowed = set(x.attrib['name'] for x in full_tree.GetProjects())
transferred = [x for x in projects if x.attrib['name'] in allowed]
for project in transferred:
# Mangle local_manifest object, removing those projects;
# note we'll still be adding those projects to the default groups,
# including those that didn't intersect the main manifest.
local_tree.nodes.remove(project)
_AddProjectsToManifestGroups(options, *new_groups)
if local_manifest_exists:
# Rewrite the local_manifest now; if there is no settings left in
# the local_manifest, wipe it.
if local_tree.nodes.getchildren():
with open(options.local_manifest_path, 'w') as f:
f.write(local_tree.ToString())
else:
os.unlink(options.local_manifest_path)
# Finally, move the symlink.
os.unlink(options.manifest_sym_path)
os.symlink('manifests/default.xml', options.manifest_sym_path)
logging.info("Converted the checkout to manifest groups based minilayout.")
def main(argv):
parser = optparse.OptionParser(usage='usage: %prog add [options] <name> '
'<--workon | <path> --remote <remote> >')
parser.add_option('-w', '--workon', action='store_true', dest='workon',
default=False, help='Is this a workon package?')
parser.add_option('-r', '--remote', dest='remote',
default=None)
parser.add_option('-v', '--revision', dest='revision',
default=None,
help="Use to override the manifest defined default "
"revision used for a given project.")
parser.add_option('--upgrade-minilayout', default=False, action='store_true',
help="Upgrade a minilayout checkout into a full.xml "
"checkout utilizing manifest groups.")
(options, args) = parser.parse_args(argv)
repo_dir = git.FindRepoDir(os.getcwd())
if not repo_dir:
parser.error("This script must be invoked from within a repository "
"checkout.")
options.git_config = os.path.join(repo_dir, 'manifests.git', 'config')
options.repo_dir = repo_dir
options.local_manifest_path = os.path.join(repo_dir, 'local_manifest.xml')
# This constant is used only when we're doing an upgrade away from
# minilayout.xml to default.xml.
options.default_manifest_path = os.path.join(repo_dir, 'manifests',
'default.xml')
options.manifest_sym_path = os.path.join(repo_dir, 'manifest.xml')
active_manifest = os.path.basename(os.readlink(options.manifest_sym_path))
upgrade_required = active_manifest == 'minilayout.xml'
if options.upgrade_minilayout:
if args:
parser.error("--upgrade-minilayout takes no arguments.")
if not upgrade_required:
print("This repository checkout isn't using minilayout.xml; "
"nothing to do")
else:
_UpgradeMinilayout(options)
return 0
elif upgrade_required:
logging.warn(
"Your repository checkout is using the old minilayout.xml workflow; "
"auto-upgrading it.")
cros_build_lib.RunCommand(
[sys.argv[0], '--upgrade-minilayout'], cwd=os.getcwd(), print_cmd=False)
if not args:
parser.error("No command specified.")
elif args[0] != 'add':
parser.error("Only supported subcommand is add right now.")
elif options.workon:
if len(args) != 2:
parser.error(
"Argument count is wrong for --workon; must be add <project>")
name, path = args[1], None
else:
if options.remote is None:
parser.error('Adding non-workon projects requires a remote.')
elif len(args) != 3:
parser.error(
"Argument count is wrong for non-workon mode; "
"must be add <project> <path> --remote <remote-arg>")
name, path = args[1:]
revision = options.revision
if revision is not None:
if (not git.IsRefsTags(revision) and
not git.IsSHA1(revision)):
revision = git.StripRefsHeads(revision, False)
main_manifest = Manifest.FromPath(options.manifest_sym_path,
empty_if_missing=False)
local_manifest = Manifest.FromPath(options.local_manifest_path)
main_element = main_manifest.GetProject(name, path=path)
if options.workon:
if main_element is None:
parser.error('No project named %r in the default manifest.' % name)
_AddProjectsToManifestGroups(options, main_element.attrib['name'])
elif main_element is not None:
if options.remote is not None:
# Likely this project wasn't meant to be remote, so workon main element
print("Project already exists in manifest. Using that as workon project.")
_AddProjectsToManifestGroups(options, main_element.attrib['name'])
else:
# Conflict will occur; complain.
parser.error("Requested project name=%r path=%r will conflict with "
"your current manifest %s" % (name, path, active_manifest))
elif local_manifest.GetProject(name, path=path) is not None:
parser.error("Requested project name=%r path=%r conflicts with "
"your local_manifest.xml" % (name, path))
else:
element = local_manifest.AddNonWorkonProject(name=name, path=path,
remote=options.remote,
revision=revision)
_AddProjectsToManifestGroups(options, element.attrib['name'])
with open(options.local_manifest_path, 'w') as f:
f.write(local_manifest.ToString())
return 0
|
|
# Copyright 2017 Ravi Sojitra. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import pytest
from numpy.testing import assert_allclose
from plda import plda
from plda.plda.model import (
get_space_walk,
transform_D_to_X,
transform_X_to_U,
transform_U_to_U_model,
transform_U_model_to_U,
transform_U_to_X,
transform_X_to_D
)
from sklearn.decomposition import PCA
def gen_invertible_matrix(dim, scale):
arr = np.random.random((dim, dim)) * scale
return np.matmul(arr, arr.T)
def test_get_space_walk():
spaces = ['U_model', 'U', 'X', 'D']
actual = list(get_space_walk('U_model', 'D'))
expected = list(zip(spaces[:-1], spaces[1:]))
assert actual == expected
actual = list(get_space_walk('U_model', 'X'))
expected = list(zip(spaces[:-2], spaces[1:-1]))
assert actual == expected
actual = list(get_space_walk('U_model', 'U'))
expected = [(spaces[0], spaces[1])]
assert actual == expected
actual = list(get_space_walk('U', 'X'))
expected = [(spaces[1], spaces[2])]
assert actual == expected
actual = list(get_space_walk('U', 'D'))
expected = [(spaces[1], spaces[2]), (spaces[2], spaces[3])]
assert actual == expected
actual = list(get_space_walk('X', 'D'))
expected = [(spaces[2], spaces[3])]
assert actual == expected
actual = list(get_space_walk('D', 'X'))
expected = [(spaces[3], spaces[2])]
assert actual == expected
actual = list(get_space_walk('D', 'U'))
expected = [(spaces[3], spaces[2]), (spaces[2], spaces[1])]
assert actual == expected
actual = list(get_space_walk('D', 'U_model'))
expected = list(zip(spaces[::-1][:-1], spaces[::-1][1:]))
assert actual == expected
actual = list(get_space_walk('X', 'U'))
expected = [(spaces[2], spaces[1])]
assert actual == expected
actual = list(get_space_walk('X', 'U_model'))
expected = [(spaces[2], spaces[1]), (spaces[1], spaces[0])]
assert actual == expected
actual = list(get_space_walk('U', 'U_model'))
expected = [(spaces[1], spaces[0])]
assert actual == expected
def test_transform_D_to_X():
np.random.seed(1234)
n = 100
dim = 5
data = np.random.random((n, dim))
expected = data
actual = transform_D_to_X(expected, None)
assert_allclose(actual, expected)
pca = PCA(n_components=2)
pca.fit(data)
expected = pca.transform(data)
actual = transform_D_to_X(data, pca)
assert_allclose(actual, expected)
def test_transform_X_to_U():
np.random.seed(1234)
n = 100
dim = 5
expected = np.random.random((n, dim))
A = gen_invertible_matrix(dim, 10)
m = np.random.random(dim)
data = np.matmul(expected, A.T) + m
actual = transform_X_to_U(data, np.linalg.inv(A), m)
assert_allclose(actual, expected)
def test_transform_U_to_U_model():
np.random.seed(1234)
n = 100
target_dim = 50
subspace_dim = 10
dims = np.arange(target_dim)
np.random.shuffle(dims)
dims = dims[:subspace_dim]
data = np.random.random((n, target_dim))
expected = data[:, dims]
actual = transform_U_to_U_model(data, dims)
assert_allclose(actual, expected)
def test_transform_U_model_to_U():
np.random.seed(1234)
target_dim = 100
subspace_dim = 50
n = 100
relevant_U_dims = np.arange(target_dim)
np.random.shuffle(relevant_U_dims)
relevant_U_dims = relevant_U_dims[:subspace_dim]
data = np.random.random((n, subspace_dim))
expected = np.zeros((n, target_dim))
expected[:, relevant_U_dims] = data
actual = transform_U_model_to_U(data, relevant_U_dims, target_dim)
assert_allclose(actual, expected)
def test_transform_U_to_X():
np.random.seed(1234)
n = 100
dim = 5
A = gen_invertible_matrix(dim, 10)
m = np.random.random(dim)
expected = np.random.random((n, dim))
data = np.matmul(expected - m, np.linalg.inv(A).T)
actual = transform_U_to_X(data, A, m)
assert_allclose(actual, expected)
def test_transform_X_to_D():
np.random.seed(1234)
n = 100
target_dim = 5
subspace_dim = 2
data = np.random.random((n, subspace_dim))
expected = np.zeros((n, target_dim))
expected[:, :subspace_dim] = data
actual = transform_X_to_D(expected, None)
assert_allclose(actual, expected)
pca = PCA(n_components=subspace_dim)
pca.fit(expected)
actual = transform_X_to_D(pca.transform(expected), pca)
assert_allclose(actual, expected)
|
|
from sympy import (
Symbol, gamma, I, oo, nan, zoo, factorial, sqrt, Rational, log,
polygamma, EulerGamma, pi, uppergamma, S, expand_func, loggamma, sin,
cos, O, lowergamma, exp, erf, exp_polar, harmonic, zeta,conjugate)
from sympy.core.function import ArgumentIndexError
from sympy.utilities.randtest import (test_derivative_numerically as td,
random_complex_number as randcplx,
test_numerically as tn)
from sympy.utilities.pytest import raises
x = Symbol('x')
y = Symbol('y')
n = Symbol('n', integer=True)
w = Symbol('w', real=True)
def test_gamma():
assert gamma(nan) == nan
assert gamma(oo) == oo
assert gamma(-100) == zoo
assert gamma(0) == zoo
assert gamma(1) == 1
assert gamma(2) == 1
assert gamma(3) == 2
assert gamma(102) == factorial(101)
assert gamma(Rational(1, 2)) == sqrt(pi)
assert gamma(Rational(3, 2)) == Rational(1, 2)*sqrt(pi)
assert gamma(Rational(5, 2)) == Rational(3, 4)*sqrt(pi)
assert gamma(Rational(7, 2)) == Rational(15, 8)*sqrt(pi)
assert gamma(Rational(-1, 2)) == -2*sqrt(pi)
assert gamma(Rational(-3, 2)) == Rational(4, 3)*sqrt(pi)
assert gamma(Rational(-5, 2)) == -Rational(8, 15)*sqrt(pi)
assert gamma(Rational(-15, 2)) == Rational(256, 2027025)*sqrt(pi)
assert gamma(Rational(
-11, 8)).expand(func=True) == Rational(64, 33)*gamma(Rational(5, 8))
assert gamma(Rational(
-10, 3)).expand(func=True) == Rational(81, 280)*gamma(Rational(2, 3))
assert gamma(Rational(
14, 3)).expand(func=True) == Rational(880, 81)*gamma(Rational(2, 3))
assert gamma(Rational(
17, 7)).expand(func=True) == Rational(30, 49)*gamma(Rational(3, 7))
assert gamma(Rational(
19, 8)).expand(func=True) == Rational(33, 64)*gamma(Rational(3, 8))
assert gamma(x).diff(x) == gamma(x)*polygamma(0, x)
assert gamma(x - 1).expand(func=True) == gamma(x)/(x - 1)
assert gamma(x + 2).expand(func=True, mul=False) == x*(x + 1)*gamma(x)
assert conjugate(gamma(x)) == gamma(conjugate(x))
assert gamma(w).is_real is True
assert expand_func(gamma(x + Rational(3, 2))) == \
(x + Rational(1, 2))*gamma(x + Rational(1, 2))
assert expand_func(gamma(x - Rational(1, 2))) == \
gamma(Rational(1, 2) + x)/(x - Rational(1, 2))
# Test a bug:
assert expand_func(gamma(x + Rational(3, 4))) == gamma(x + Rational(3, 4))
assert gamma(3*exp_polar(I*pi)/4).is_nonnegative is False
assert gamma(3*exp_polar(I*pi)/4).is_nonpositive is True
def test_gamma_series():
assert gamma(x + 1).series(x, 0, 3) == \
1 - EulerGamma*x + x**2*(EulerGamma**2/2 + pi**2/12) + O(x**3)
assert gamma(x).series(x, -1, 3) == \
-1/x + EulerGamma - 1 + x*(-1 - pi**2/12 - EulerGamma**2/2 + EulerGamma) \
+ x**2*(-1 - pi**2/12 - EulerGamma**2/2 + EulerGamma**3/6 -
polygamma(2, 1)/6 + EulerGamma*pi**2/12 + EulerGamma) + O(x**3)
def tn_branch(s, func):
from sympy import I, pi, exp_polar
from random import uniform
c = uniform(1, 5)
expr = func(s, c*exp_polar(I*pi)) - func(s, c*exp_polar(-I*pi))
eps = 1e-15
expr2 = func(s + eps, -c + eps*I) - func(s + eps, -c - eps*I)
return abs(expr.n() - expr2.n()).n() < 1e-10
def test_lowergamma():
from sympy import meijerg, exp_polar, I, expint
assert lowergamma(x, y).diff(y) == y**(x - 1)*exp(-y)
assert td(lowergamma(randcplx(), y), y)
assert td(lowergamma(x, randcplx()), x)
assert lowergamma(x, y).diff(x) == \
gamma(x)*polygamma(0, x) - uppergamma(x, y)*log(y) \
- meijerg([], [1, 1], [0, 0, x], [], y)
assert lowergamma(S.Half, x) == sqrt(pi)*erf(sqrt(x))
assert not lowergamma(S.Half - 3, x).has(lowergamma)
assert not lowergamma(S.Half + 3, x).has(lowergamma)
assert lowergamma(S.Half, x, evaluate=False).has(lowergamma)
assert tn(lowergamma(S.Half + 3, x, evaluate=False),
lowergamma(S.Half + 3, x), x)
assert tn(lowergamma(S.Half - 3, x, evaluate=False),
lowergamma(S.Half - 3, x), x)
assert tn_branch(-3, lowergamma)
assert tn_branch(-4, lowergamma)
assert tn_branch(S(1)/3, lowergamma)
assert tn_branch(pi, lowergamma)
assert lowergamma(3, exp_polar(4*pi*I)*x) == lowergamma(3, x)
assert lowergamma(y, exp_polar(5*pi*I)*x) == \
exp(4*I*pi*y)*lowergamma(y, x*exp_polar(pi*I))
assert lowergamma(-2, exp_polar(5*pi*I)*x) == \
lowergamma(-2, x*exp_polar(I*pi)) + 2*pi*I
assert conjugate(lowergamma(x, y)) == lowergamma(conjugate(x), conjugate(y))
assert conjugate(lowergamma(x, 0)) == conjugate(lowergamma(x, 0))
assert conjugate(lowergamma(x, -oo)) == conjugate(lowergamma(x, -oo))
assert lowergamma(
x, y).rewrite(expint) == -y**x*expint(-x + 1, y) + gamma(x)
k = Symbol('k', integer=True)
assert lowergamma(
k, y).rewrite(expint) == -y**k*expint(-k + 1, y) + gamma(k)
k = Symbol('k', integer=True, positive=False)
assert lowergamma(k, y).rewrite(expint) == lowergamma(k, y)
assert lowergamma(x, y).rewrite(uppergamma) == gamma(x) - uppergamma(x, y)
def test_uppergamma():
from sympy import meijerg, exp_polar, I, expint
assert uppergamma(4, 0) == 6
assert uppergamma(x, y).diff(y) == -y**(x - 1)*exp(-y)
assert td(uppergamma(randcplx(), y), y)
assert uppergamma(x, y).diff(x) == \
uppergamma(x, y)*log(y) + meijerg([], [1, 1], [0, 0, x], [], y)
assert td(uppergamma(x, randcplx()), x)
assert uppergamma(S.Half, x) == sqrt(pi)*(1 - erf(sqrt(x)))
assert not uppergamma(S.Half - 3, x).has(uppergamma)
assert not uppergamma(S.Half + 3, x).has(uppergamma)
assert uppergamma(S.Half, x, evaluate=False).has(uppergamma)
assert tn(uppergamma(S.Half + 3, x, evaluate=False),
uppergamma(S.Half + 3, x), x)
assert tn(uppergamma(S.Half - 3, x, evaluate=False),
uppergamma(S.Half - 3, x), x)
assert tn_branch(-3, uppergamma)
assert tn_branch(-4, uppergamma)
assert tn_branch(S(1)/3, uppergamma)
assert tn_branch(pi, uppergamma)
assert uppergamma(3, exp_polar(4*pi*I)*x) == uppergamma(3, x)
assert uppergamma(y, exp_polar(5*pi*I)*x) == \
exp(4*I*pi*y)*uppergamma(y, x*exp_polar(pi*I)) + \
gamma(y)*(1 - exp(4*pi*I*y))
assert uppergamma(-2, exp_polar(5*pi*I)*x) == \
uppergamma(-2, x*exp_polar(I*pi)) - 2*pi*I
assert uppergamma(-2, x) == expint(3, x)/x**2
assert conjugate(uppergamma(x, y)) == uppergamma(conjugate(x), conjugate(y))
assert conjugate(uppergamma(x, 0)) == gamma(conjugate(x))
assert conjugate(uppergamma(x, -oo)) == conjugate(uppergamma(x, -oo))
assert uppergamma(x, y).rewrite(expint) == y**x*expint(-x + 1, y)
assert uppergamma(x, y).rewrite(lowergamma) == gamma(x) - lowergamma(x, y)
def test_polygamma():
from sympy import I
assert polygamma(n, nan) == nan
assert polygamma(0, oo) == oo
assert polygamma(0, -oo) == oo
assert polygamma(0, I*oo) == oo
assert polygamma(0, -I*oo) == oo
assert polygamma(1, oo) == 0
assert polygamma(5, oo) == 0
assert polygamma(0, -9) == zoo
assert polygamma(0, -9) == zoo
assert polygamma(0, -1) == zoo
assert polygamma(0, 0) == zoo
assert polygamma(0, 1) == -EulerGamma
assert polygamma(0, 7) == Rational(49, 20) - EulerGamma
assert polygamma(1, 1) == pi**2/6
assert polygamma(1, 2) == pi**2/6 - 1
assert polygamma(1, 3) == pi**2/6 - Rational(5, 4)
assert polygamma(3, 1) == pi**4 / 15
assert polygamma(3, 5) == 6*(Rational(-22369, 20736) + pi**4/90)
assert polygamma(5, 1) == 8 * pi**6 / 63
def t(m, n):
x = S(m)/n
r = polygamma(0, x)
if r.has(polygamma):
return False
return abs(polygamma(0, x.n()).n() - r.n()).n() < 1e-10
assert t(1, 2)
assert t(3, 2)
assert t(-1, 2)
assert t(1, 4)
assert t(-3, 4)
assert t(1, 3)
assert t(4, 3)
assert t(3, 4)
assert t(2, 3)
assert polygamma(0, x).rewrite(zeta) == polygamma(0, x)
assert polygamma(1, x).rewrite(zeta) == zeta(2, x)
assert polygamma(2, x).rewrite(zeta) == -2*zeta(3, x)
assert polygamma(3, 7*x).diff(x) == 7*polygamma(4, 7*x)
assert polygamma(0, x).rewrite(harmonic) == harmonic(x - 1) - EulerGamma
assert polygamma(2, x).rewrite(harmonic) == 2*harmonic(x - 1, 3) - 2*zeta(3)
ni = Symbol("n", integer=True)
assert polygamma(ni, x).rewrite(harmonic) == (-1)**(ni + 1)*(-harmonic(x - 1, ni + 1)
+ zeta(ni + 1))*factorial(ni)
# Polygamma of non-negative integer order is unbranched:
from sympy import exp_polar
k = Symbol('n', integer=True, nonnegative=True)
assert polygamma(k, exp_polar(2*I*pi)*x) == polygamma(k, x)
# but negative integers are branched!
k = Symbol('n', integer=True)
assert polygamma(k, exp_polar(2*I*pi)*x).args == (k, exp_polar(2*I*pi)*x)
# Polygamma of order -1 is loggamma:
assert polygamma(-1, x) == loggamma(x)
# But smaller orders are iterated integrals and don't have a special name
assert polygamma(-2, x).func is polygamma
# Test a bug
assert polygamma(0, -x).expand(func=True) == polygamma(0, -x)
def test_polygamma_expand_func():
assert polygamma(0, x).expand(func=True) == polygamma(0, x)
assert polygamma(0, 2*x).expand(func=True) == \
polygamma(0, x)/2 + polygamma(0, Rational(1, 2) + x)/2 + log(2)
assert polygamma(1, 2*x).expand(func=True) == \
polygamma(1, x)/4 + polygamma(1, Rational(1, 2) + x)/4
assert polygamma(2, x).expand(func=True) == \
polygamma(2, x)
assert polygamma(0, -1 + x).expand(func=True) == \
polygamma(0, x) - 1/(x - 1)
assert polygamma(0, 1 + x).expand(func=True) == \
1/x + polygamma(0, x )
assert polygamma(0, 2 + x).expand(func=True) == \
1/x + 1/(1 + x) + polygamma(0, x)
assert polygamma(0, 3 + x).expand(func=True) == \
polygamma(0, x) + 1/x + 1/(1 + x) + 1/(2 + x)
assert polygamma(0, 4 + x).expand(func=True) == \
polygamma(0, x) + 1/x + 1/(1 + x) + 1/(2 + x) + 1/(3 + x)
assert polygamma(1, 1 + x).expand(func=True) == \
polygamma(1, x) - 1/x**2
assert polygamma(1, 2 + x).expand(func=True, multinomial=False) == \
polygamma(1, x) - 1/x**2 - 1/(1 + x)**2
assert polygamma(1, 3 + x).expand(func=True, multinomial=False) == \
polygamma(1, x) - 1/x**2 - 1/(1 + x)**2 - 1/(2 + x)**2
assert polygamma(1, 4 + x).expand(func=True, multinomial=False) == \
polygamma(1, x) - 1/x**2 - 1/(1 + x)**2 - \
1/(2 + x)**2 - 1/(3 + x)**2
assert polygamma(0, x + y).expand(func=True) == \
polygamma(0, x + y)
assert polygamma(1, x + y).expand(func=True) == \
polygamma(1, x + y)
assert polygamma(1, 3 + 4*x + y).expand(func=True, multinomial=False) == \
polygamma(1, y + 4*x) - 1/(y + 4*x)**2 - \
1/(1 + y + 4*x)**2 - 1/(2 + y + 4*x)**2
assert polygamma(3, 3 + 4*x + y).expand(func=True, multinomial=False) == \
polygamma(3, y + 4*x) - 6/(y + 4*x)**4 - \
6/(1 + y + 4*x)**4 - 6/(2 + y + 4*x)**4
assert polygamma(3, 4*x + y + 1).expand(func=True, multinomial=False) == \
polygamma(3, y + 4*x) - 6/(y + 4*x)**4
e = polygamma(3, 4*x + y + S(3)/2)
assert e.expand(func=True) == e
e = polygamma(3, x + y + S(3)/4)
assert e.expand(func=True, basic=False) == e
def test_loggamma():
raises(TypeError, lambda: loggamma(2, 3))
raises(ArgumentIndexError, lambda: loggamma(x).fdiff(2))
assert loggamma(x).diff(x) == polygamma(0, x)
s1 = loggamma(1/(x + sin(x)) + cos(x)).nseries(x, n=4)
s2 = (-log(2*x) - 1)/(2*x) - log(x/pi)/2 + (4 - log(2*x))*x/24 + O(x**2) + \
log(x)*x**2/2
assert (s1 - s2).expand(force=True).removeO() == 0
s1 = loggamma(1/x).series(x)
s2 = (1/x - S(1)/2)*log(1/x) - 1/x + log(2*pi)/2 + \
x/12 - x**3/360 + x**5/1260 + O(x**7)
assert ((s1 - s2).expand(force=True)).removeO() == 0
assert loggamma(x).rewrite('intractable') == log(gamma(x))
s1 = loggamma(x).series(x)
assert s1 == -log(x) - EulerGamma*x + pi**2*x**2/12 + x**3*polygamma(2, 1)/6 + \
pi**4*x**4/360 + x**5*polygamma(4, 1)/120 + O(x**6)
assert s1 == loggamma(x).rewrite('intractable').series(x)
assert conjugate(loggamma(x)) == loggamma(conjugate(x))
assert conjugate(loggamma(0)) == conjugate(loggamma(0))
assert conjugate(loggamma(1)) == loggamma(conjugate(1))
assert conjugate(loggamma(-oo)) == conjugate(loggamma(-oo))
assert loggamma(x).is_real is None
y, z = Symbol('y', real=True), Symbol('z', imaginary=True)
assert loggamma(y).is_real
assert loggamma(z).is_real is False
def tN(N, M):
assert loggamma(1/x)._eval_nseries(x, n=N).getn() == M
tN(0, 0)
tN(1, 1)
tN(2, 3)
tN(3, 3)
tN(4, 5)
tN(5, 5)
def test_polygamma_expansion():
# A. & S., pa. 259 and 260
assert polygamma(0, 1/x).nseries(x, n=3) == \
-log(x) - x/2 - x**2/12 + O(x**4)
assert polygamma(1, 1/x).series(x, n=5) == \
x + x**2/2 + x**3/6 + O(x**5)
assert polygamma(3, 1/x).nseries(x, n=11) == \
2*x**3 + 3*x**4 + 2*x**5 - x**7 + 4*x**9/3 + O(x**11)
|
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import calendar
import logging
import requests
import xmltodict
import pytz
import dateutil.parser
import module.collector.config as config
import module.common.const as const
import module.common.util as util
from datetime import datetime
from module.common.topologydb import *
from module.common.md import post_md
from module.common.tn_md import create_monitoring_data_xml,DBUploader
# constants
COL_NAME = 'monitoring-data(tn)'
POST_URI = config.post_uri + "/" + const.TYPE_MON_TN
# logger.
logger = logging.getLogger(const.MODULE_NAME_COL)
# set data uploader script file path.
db_upld = DBUploader(log_name=const.MODULE_NAME_COL,
db_name=config.mon_data_tn_db,
db_host=config.db_addr,db_port=config.db_port,
db_user=config.db_user,db_pass=config.db_pass)
# set interval.
interval = 0
for module in config.module_list:
if module['class-name'].split('.')[-1] == 'MonitoringDataTN':
interval = module['interval']
break
class MonitoringDataTN():
def __check_timestamp(self,timestamp,now_time):
# to replace timestamp with now_time if timestamp is smaller than "now_time - interval"
if int(timestamp) < int(now_time) - interval:
return str(now_time)
return str(timestamp)
def __get_nsi_monitoring_data(self):
logger.debug("get monitoring-data from NSI.")
# HTTP GET monitoring-data from NSI
res = requests.get(config.nsi_uri, timeout=const.HTTP_TIME_OUT)
# get Xml root element in the Dictionary type.
all_nsi_md = dict()
xd_root = xmltodict.parse(res.text)
# get <reservationIDMaps>
if not xd_root.has_key(const.NST_TAG_RESERVE_MAPS):
logger.warn('tag <{0}> is not specified.'.format(const.NST_TAG_RESERVE_MAPS))
return None
xd_reservationIDMaps = xd_root[const.NST_TAG_RESERVE_MAPS]
# get <updateTime>
if not xd_reservationIDMaps.has_key(const.NSI_TAG_UPDATE_TIME):
logger.warn('tag <{0}> is not specified.'.format(const.NSI_TAG_UPDATE_TIME))
return None
# convert the updateTime to UNIXTIME(UTC)
dt_update_time = dateutil.parser.parse(xd_reservationIDMaps[const.NSI_TAG_UPDATE_TIME]).astimezone(pytz.timezone('UTC'))
update_time = calendar.timegm(dt_update_time.timetuple())
logger.debug('update time.({0}->{1})'.format(xd_reservationIDMaps[const.NSI_TAG_UPDATE_TIME],update_time))
# get <reservationIDMap>
if not xd_reservationIDMaps.has_key(const.NST_TAG_RESERVE_MAP):
logger.warn('tag <{0}> is not specified.'.format(const.NST_TAG_RESERVE_MAP))
return None
for xd_reservationIDMap in util.to_array(xd_reservationIDMaps[const.NST_TAG_RESERVE_MAP]):
# get <resourceSet>
if not xd_reservationIDMap.has_key(const.NST_TAG_RESOURCE_SET):
logger.warn('tag <{0}> is not specified.'.format(const.NST_TAG_RESOURCE_SET))
continue
for xd_resourceSet in util.to_array(xd_reservationIDMap[const.NST_TAG_RESOURCE_SET]):
# get <networkResource>
if not xd_resourceSet.has_key(const.NST_TAG_NW_RESOURCE):
logger.warn('tag <{0}> is not specified.'.format(const.NST_TAG_NW_RESOURCE))
continue
xd_networkResource = xd_resourceSet[const.NST_TAG_NW_RESOURCE]
# check <globalReservationId>
if not xd_networkResource.has_key(const.NSI_TAG_LINK_ID):
logger.warn('tag <{0}> is not specified.'.format(const.NSI_TAG_LINK_ID))
continue
logger.debug(xd_networkResource[const.NSI_TAG_LINK_ID])
# check <dataPlaneState>
if not xd_networkResource.has_key(const.NSI_TAG_STATE):
logger.warn('tag <{0}> is not specified.'.format(const.NSI_TAG_STATE))
continue
md_dict = {'timestamp':update_time}
# # get <provisionState>
# if xd_networkResource[const.NSI_TAG_STATE] == 'PROVISIONED':
# md_dict['status'] = const.MD_STATUS_UP
# else:
# md_dict['status'] = const.MD_STATUS_DOWN
# get <dataPlaneState>isAct=false, ver=0, isConsistent=true</dataPlaneState>
# create status dict.
# e.g. state_list=[[dictisAct,false], [ver,0], [isConsistent,true]]
state_list = [status_value_list.split("=") for status_value_list in xd_networkResource[const.NSI_TAG_STATE].split(",")]
logger.debug(state_list)
# e.g. status_dict={isAct:True, ver:0, isConsistent:true}
state_dict = dict(state_list)
logger.debug(state_dict)
if not state_dict.has_key(const.NSI_ELM_STATE):
logger.warn('element[{0}] is not found.'.format(const.NSI_ELM_STATE))
continue
if state_dict[const.NSI_ELM_STATE].lower() == 'true':
md_dict['status'] = const.MD_STATUS_UP
else:
md_dict['status'] = const.MD_STATUS_DOWN
all_nsi_md[xd_networkResource[const.NSI_TAG_LINK_ID]] = md_dict
logger.debug(all_nsi_md)
return all_nsi_md
def __get_monitoring_data(self,all_nsi_md,link_name,now_time):
logger.debug("get monitoring-data.")
# find NSI-monitoring-data dict.
if not all_nsi_md.has_key(link_name):
return None
md_dict = all_nsi_md[link_name]
val_list = list()
val_dict = dict()
val_dict['status'] = md_dict['status']
val_dict['timestamp'] = self.__check_timestamp(md_dict['timestamp'],now_time)
val_list.append(val_dict)
res_dict = {'link_name':link_name,'val_list':val_list}
return res_dict
def main(self):
# get now time.(UTC:0)
now_time = calendar.timegm(datetime.utcnow().timetuple())
try:
print(COL_NAME + ' -start-')
logger.info(COL_NAME + ' -start-')
# open topology database connection.
tpldb_setup()
# get all of the monitoring-data from NSI.
all_nsi_md = self.__get_nsi_monitoring_data()
# get all of the TN-link from DB.
link_list = get_all_tn_link()
all_md_list = []
for link in link_list:
# get monitoring-data from NSI.
link_name = link.link_name
md_dict = self.__get_monitoring_data(all_nsi_md,link_name,now_time)
if not md_dict:
logger.debug('monitoring-data is no data.(link={0})'.format(link_name))
continue
md_dict['network_type'] = link.network.type
md_dict['network_name'] = link.network_name
md_dict['link_type'] = link.type
### md_dict={network_type:slice,network_name:xxx,
### link_type:tn,link_name:xxx,val_list:list(val_dict[param_name:value])}
logger.debug(md_dict)
all_md_list.append(md_dict)
if not all_md_list:
logger.debug('monitoring-data is no data.(all TN-link)')
return
# parse monitoring-data-list to monitoring-data-xml.
md_xml = create_monitoring_data_xml(logger,all_md_list)
if not md_xml:
logger.debug('monitoring-data-xml is null.')
return
logger.debug(md_xml)
# upload monitoring-data to DB.
logger.debug('upload monitoring-data to DB.')
if not db_upld.upload_monitoring_data_all(md_xml):
logger.debug('upload monitoring-data is null.')
return
# post the monitoring-data to the master-monitoring-server.
logger.debug('post the monitoring-data to the master-monitoring-server.')
res_flg,res = post_md(POST_URI,md_xml,'yes')
if res_flg is False:
logger.error('post monitoring-data error.(post_uri={0})'.format(POST_URI))
if res:
logger.debug("HTTP Response({0}):{1}".format(res.status_code,res.text))
except Exception:
logger.exception(COL_NAME)
print(COL_NAME + ' -exception-')
finally:
# close topology database connection.
tpldb_close()
logger.info(COL_NAME + ' -end-')
print(COL_NAME + ' -end-')
return
|
|
#!/usr/bin/env python3
import numpy as np
import numpy.core.umath_tests as ut
import json
import collections
import itertools
from . import cromermann_params
def round_to_infinity(a):
"""
Signed ceiling function that rounds negative numbers to the next integer
smaller than it.
"""
return np.copysign(np.ceil(np.fabs(a)), a)
def memoize(f):
"""Memoization decorator for functions"""
cache = {}
def decorated_function(*args):
if args in cache:
return cache[args]
else:
res = f(*args)
cache[args] = res
return res
return decorated_function
def rotation_matrix(theta, axis = None):
"""
Returns the transformation matrix M that rotates a vector v around
the given axis (anticlockwise in 2D if no axis supplies) by an angle
theta. Rotation can be applied by calling np.dot(M, v)
"""
if axis is None:
return np.array([[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]])
axis = np.asarray(axis, dtype=np.float)
theta = np.asarray(theta)
axis /= np.linalg.norm(axis)
a = np.cos(theta / 2)
b, c, d = -axis * np.sin(theta / 2)
aa, bb, cc, dd = a*a, b*b, c*c, d*d
bc, ad, ac, ab, bd, cd = b*c, a*d, a*c, a*b, b*d, c*d
return np.array([[aa + bb - cc - dd, 2*(bc + ad), 2*(bd - ac)],
[2*(bc - ad), aa + cc - bb - dd, 2*(cd + ab)],
[2*(bd + ac), 2*(cd - ab), aa + dd - bb - cc]])
def align_a_to_b(a, b):
"""
Returns the rotation matrix M required to align the first input vector a
with the second vector b, i.e. np.dot(M, a) || b
"""
axis = np.cross(a, b)
if np.linalg.norm(axis) == 0:
# a and b are already (anti)parallel
theta = {-1:np.pi, 1:0}[np.sign(np.dot(a, b))]
# need to choose axis perpendicular to a
c = np.array([1, 0, 0])
axis = np.cross(a, c)
if np.linalg.norm(axis) == 0:
c = np.array([0, 1, 0])
axis = np.cross(a, c)
else:
theta = np.arccos(np.dot(a, b) / np.linalg.norm(a) / np.linalg.norm(b))
return rotation_matrix(theta, axis)
class Lattice:
"""
Crystal lattice objects that consist of a 3D Bravais lattice combined with
an atomic basis. Lattices are defined by the lengths of the three primitive
vectors: a, b, c and the angles between them: alpha, beta, gamma.
The lattice vectors are stored in a transformation matrix lv and the
corresponding reciprocal lattice matrix rlv calculated.
The intensity associated with each reciprocal lattice point is calculated
using the supplied atomic basis.
The orientation of the crystal is stored in an orientation matrix, which can be
changed using a general rotation or by specifying miller indices to align with
a direct space vector.
"""
def __init__(self, a, b, c, alpha, beta, gamma, basis,
rl_scaling_factor = 0.5, orientation = None, radians = True,
**kwargs):
"""
a, b, c, alpha, beta, gamma define a general Bravais lattice in 3D.
The angles alpha, beta, gamma are given in radians by default.
if c is 0 or None, a 2D lattice and corresponding reciprocal lattice
will be created instead. alpha and beta will be ignored and gamma used
as the angle between the two lattice vectors.
basis is a Basis object storing the positions of all atoms within a
unit cell.
rl_scaling_factor is used to scale radii associated with
intensity-weighted reciprocal lattice points.
"""
self.a, self.b, self.c = a, b, c
self.radians = radians
if not radians:
alpha, beta, gamma = np.radians([alpha, beta, gamma])
self.alpha, self.beta, self.gamma = alpha, beta, gamma
self.lv = self.lattice_vectors(a, b, c, alpha, beta, gamma)
self.rlv = self.reciprocal_lattice(self.lv)
self.lvi = np.linalg.inv(self.lv)
self.rlvi = np.linalg.inv(self.rlv)
self.basis = basis
self.rl_scaling_factor = rl_scaling_factor
self.dimension = len(self.lv)
self.orientation = np.identity(self.dimension)
self.generated_lattice = None
if orientation:
self.apply_transform(np.asarray(orientation))
self.generate_lattice()
# nearest neighbour generation
n = list(itertools.product([0, 1], repeat = self.dimension))
self.neighbours2 = np.expand_dims(np.array(n), axis = 1)
@staticmethod
def lattice_vectors(a, b, c, alpha, beta, gamma):
"""
Return the matrix of lattice vectors [a_1 a_2 a_3] for a general
3D lattice defined by the parameters a, b, c, alpha, beta, gamma.
"""
if not c:
# calculate 2D transformation matrix
return np.array([[a, b*np.cos(gamma)],
[0, b*np.sin(gamma)]]).T
# Convert to lattice vectors in orthonormal coordinate system
aby = np.array([alpha, beta, gamma])
cosa, cosb, cosy = np.cos(aby)
sina, sinb, siny = np.sin(aby)
cos2a, cos2b, sin2y = cosa*cosa, cosb*cosb, siny*siny
return np.array([[a, b*cosy, c*cosb],
[0, b*siny, c*(cosa - cosb * cosy)/siny],
[0, 0, c*np.sqrt(1 - (cos2a + cos2b - 2*cosa*cosb*cosy)/sin2y)]]).T
@staticmethod
def reciprocal_lattice(lattice_vectors):
"""
Return the reciprocal lattice transformation matrix associated with a
direct lattice vector transformation matrix.
"""
return 2*np.pi*np.linalg.inv(lattice_vectors.T)
def intensity(self, G):
"""
Returns the intensity associated with the reciprocal lattice point G.
The value is returned from a stored value calculated when the lattice
is generated.
"""
G = np.asarray(G)
initial_shape = G.shape
G = G.reshape((np.prod(initial_shape[:-1]), initial_shape[-1]))
idx = tuple((G - self.sphere_bounds[0]).T.reshape(self.dimension, -1))
return self.intensities[idx].reshape(initial_shape[:-1])
def intensity_bulk(self, G):
"""
Return the intensity associated with reciprocal lattice vectors G
defined in reciprocal lattice coordinates. Intensity is calculated
using the atomic form factors defined in the atomic basis.
"""
if self.basis is None:
print('Atomic basis not defined for lattice')
return
# bragg law
k = np.linalg.norm(np.dot(G, self.rlv), axis = -1) / (4 * np.pi)
f_m = memoize(lambda f : f(k))
f_k = [f_m(f) for f in self.basis.basis_functions]
f_k = np.stack(f_k, axis = -1)
proj = 2*np.pi*np.dot(G, self.basis.basis_axes)
F = np.stack((
ut.inner1d(np.cos(proj), f_k),
ut.inner1d(np.sin(proj), f_k),
)) / self.basis.n
# return sqrt of intensities
res = np.linalg.norm(F, axis = 0)
return res
def sphere_bound(self, r = 20):
"""
Return an estimate for the bounds of reciprocal lattice indices within
a radius r (in units of 1/A) from the origin.
"""
s = round_to_infinity(r / np.linalg.norm(self.rlv, axis = 1)).astype(int)
return -s, s
def generate_lattice(self, bound = 20, recalculate = False):
"""
Generates the intensity-weighted reciprocal lattice within a given bound
around the origin in reciprocal space and associated radii.
"""
if not recalculate and self.generated_lattice is not None:
return self.generated_lattice
self.sphere_bounds = self.sphere_bound(bound)
pts = np.stack(np.meshgrid(*(list(range(i, j+1))
for i, j in zip(*self.sphere_bounds))), axis = -1)
rlv = np.dot(pts, self.rlv)
I = self.intensity_bulk(pts)
# cached intensities for quick indexing
self.intensities = I
idx = np.unravel_index(np.argmax(I), I.shape)
self.max_intensity = I[idx]
self.reciprocal_lattice_radii = self.reciprocal_lattice_sphere_radius_bulk(I)
picker = np.linalg.norm(rlv, axis = -1) < bound
picker *= (I >= 1/self.basis.n)
pts = pts[picker]
rlv = rlv[picker]
I = I[picker]
R_i = self.reciprocal_lattice_radii[picker]
self.generated_lattice = [pts, rlv, I, R_i]
return self.generated_lattice
def reciprocal_lattice_sphere_radius(self, G):
"""
Returns the radius associated with the reciprocal lattice point G.
The value is returned from a stored value calculated when the lattice
is generated.
"""
G = np.asarray(G)
initial_shape = G.shape
idx = tuple((G - self.sphere_bounds[0]).T.reshape(self.dimension, -1))
return self.reciprocal_lattice_radii[idx].reshape(initial_shape[:-1])
def reciprocal_lattice_sphere_radius_bulk(self, intensity):
"""Return the radius associated with a given reciprocal lattice intensity"""
return self.rl_scaling_factor * (intensity / self.max_intensity)
def closest_reciprocal_lattice_point(self, pt):
"""
Returns the reciprocal lattice points closest to arbitrary vectors in
reciprocal space.
"""
base_coords = np.floor(np.dot(pt, self.rlvi)).astype(int)
lattice_coords = self.neighbours2 + base_coords
rectangular_coords = np.dot(lattice_coords, self.rlv)
dists = np.linalg.norm(rectangular_coords - pt, axis = -1)
idx = np.argmin(dists, axis=0)
return (np.choose(idx, dists),
np.choose(idx[:, np.newaxis], lattice_coords),
np.choose(idx[:, np.newaxis], rectangular_coords))
def d_spacing(self, G):
"""Calculates the d-spacing associated with a reciprocal lattice point G"""
d = 2*np.pi/np.linalg.norm(np.dot(G, self.rlv), axis = -1)
return d
def direction_vector(self, *indices):
"""
Return the real coordinate vector corresponding to the
given crystal direction index
"""
indices = np.asarray(indices)
return np.dot(indices, self.lv)
def plane_vector(self, *indices):
"""
Return the reciprocal coordinate vector corresponding to the
given crystal plane index.
"""
indices = np.asarray(indices)
return np.dot(indices, self.rlv)
def apply_transform(self, M):
"""
Applies a general transform M to the entire lattice and all values
that depend on the lattice orientation.
"""
self.lv = np.dot(M, self.lv.T).T
self.rlv = np.dot(M, self.rlv.T).T
self.lvi = np.dot(M, self.lvi)
self.rlvi = np.dot(M, self.rlvi)
# recalculate generated lattice points
if self.generated_lattice:
self.generated_lattice[1] = np.dot(self.generated_lattice[0], self.rlv)
self.orientation = np.dot(M, self.orientation)
def align_lattice(self, v, *indices, direction = False):
"""
Aligns the lattice plane index with the direct coordinate vector v.
If direction is True, aligns the lattice direction index instead.
"""
fv = [self.plane_vector, self.direction_vector][direction]
M = align_a_to_b(fv(*indices), v)
self.apply_transform(M)
def rotate_lattice(self, angle, axis, radians = True):
"""A simple lattice rotation through an angle around a specified axis."""
if not radians:
angle = np.radians(angle)
M = rotation_matrix(angle, axis)
self.apply_transform(M)
def export(self):
"""
Returns a dictionary of values defining the current lattice object in
its current state.
"""
if not self.radians:
alpha, beta, gamma = np.degrees([self.alpha, self.beta, self.gamma])
else:
alpha, beta, gamma = self.alpha, self.beta, self.gamma
d = collections.OrderedDict([
('radians',self.radians),
('a',self.a),
('b',self.b),
('c',self.c),
('alpha',alpha),
('beta',beta),
('gamma',gamma),
('basis',self.basis.export()),
('orientation',self.orientation.tolist()),
('rl_scaling_factor',self.rl_scaling_factor)
])
return d
def export_json(self, fname):
"""
Saves details of the lattice object state to a specified file name
in json format.
"""
with open(fname, 'w') as f:
json.dump(self.export(), f, indent = 2)
@classmethod
def load(cls, lattice_dic):
"""
Return a Lattice object created from a dictionary of values defining a
lattice object.
"""
b = Basis.load(lattice_dic['basis'])
return cls(**{**lattice_dic, 'basis':b})
@classmethod
def load_json(cls, fname):
"""
Return a Lattice object created from parameters defined in a .json file.
"""
with open(fname, 'r') as f:
return cls.load(json.load(f))
class Basis:
"""
Atomic basis objects that store the positions of atoms in a unit cell
using fractional coordinates of the cell's primitive vectors and the
element type.
Atomic form factors are calculated using the Cromer-Mann parameterisation.
"""
def __init__(self, *atoms, d = 3):
"""
Create an atomic basis from the atoms provided as arguments.
Atoms take the form (elem, ri) where elem is a string of the element
or ion's chemical symbol, e.g. "N" and ri is the fractional coordinates
of the atom in the unit cell.
"""
self.atoms = []
self.elems = []
self.d = d
self.basis_axes = np.array([]).reshape(self.d, 0)
self.basis_functions = []
for atom in atoms:
self.add_atom(*atom)
# normalization constant
self.n = np.sum(f(0) for f in self.basis_functions)
def add_atom(self, elem, ri, u = 0):
"""Adds an atom to the current Basis."""
ri = np.asarray(ri)
if len(ri) != self.d and not self.atoms:
# change dimension of basis to match initially added atom
self.d = len(ri)
self.basis_axes = np.array([]).reshape(self.d, 0)
self.atoms.append((elem, ri))
self.elems.append(elem)
self.basis_axes = np.hstack((self.basis_axes, ri.reshape(self.d, 1)))
self.basis_functions.append(self.atomic_form_factor(elem, u = u))
# update normalization
self.n = np.sum(f(0) for f in self.basis_functions)
@staticmethod
def atomic_form_factor(elem, u = 0):
"""
Returns a function to calculate the non-dispersive part of the atomic
scattering factor using the 9 parameter Cromer-Mann parameterization.
"""
cmparams = cromermann_params.params[elem]
const_shift = cmparams[8]
multipliers = np.asarray([const_shift] + cmparams[0:4], dtype = np.float)
powers = -np.asarray([0] + cmparams[4:8], dtype = np.float) - u
def f(k):
res = np.dot(np.exp(powers * np.square(k)[..., np.newaxis]), multipliers)
if np.isscalar(k):
return np.asscalar(res)
return res
return f
def export(self):
"""Returns a dictionary of atoms in the current basis object."""
d = collections.defaultdict(list)
for e, ri in self.atoms:
d[e].append(ri.tolist())
return d
def export_json(self, fname):
"""Saves the basis of atoms to a specified file name in json format."""
with open(fname, 'w') as f:
json.dump(self.export(), f)
@classmethod
def load(cls, atom_dic):
"""Return a Basis object from a dictionary of atoms with their coordinates."""
return cls(*((e, ri) for e in atom_dic for ri in atom_dic[e]))
|
|
"""Dependency injector dict provider unit tests."""
import sys
import unittest
from dependency_injector import providers
class DictTests(unittest.TestCase):
def test_is_provider(self):
self.assertTrue(providers.is_provider(providers.Dict()))
def test_provided_instance_provider(self):
provider = providers.Dict()
self.assertIsInstance(provider.provided, providers.ProvidedInstance)
def test_init_with_non_string_keys(self):
a1 = object()
a2 = object()
provider = providers.Dict({a1: 'i1', a2: 'i2'})
dict1 = provider()
dict2 = provider()
self.assertEqual(dict1, {a1: 'i1', a2: 'i2'})
self.assertEqual(dict2, {a1: 'i1', a2: 'i2'})
self.assertIsNot(dict1, dict2)
def test_init_with_string_and_non_string_keys(self):
a1 = object()
provider = providers.Dict({a1: 'i1'}, a2='i2')
dict1 = provider()
dict2 = provider()
self.assertEqual(dict1, {a1: 'i1', 'a2': 'i2'})
self.assertEqual(dict2, {a1: 'i1', 'a2': 'i2'})
self.assertIsNot(dict1, dict2)
def test_call_with_init_keyword_args(self):
provider = providers.Dict(a1='i1', a2='i2')
dict1 = provider()
dict2 = provider()
self.assertEqual(dict1, {'a1': 'i1', 'a2': 'i2'})
self.assertEqual(dict2, {'a1': 'i1', 'a2': 'i2'})
self.assertIsNot(dict1, dict2)
def test_call_with_context_keyword_args(self):
provider = providers.Dict(a1='i1', a2='i2')
self.assertEqual(
provider(a3='i3', a4='i4'),
{'a1': 'i1', 'a2': 'i2', 'a3': 'i3', 'a4': 'i4'},
)
def test_call_with_provider(self):
provider = providers.Dict(
a1=providers.Factory(str, 'i1'),
a2=providers.Factory(str, 'i2'),
)
self.assertEqual(provider(), {'a1': 'i1', 'a2': 'i2'})
def test_fluent_interface(self):
provider = providers.Dict() \
.add_kwargs(a1='i1', a2='i2')
self.assertEqual(provider(), {'a1': 'i1', 'a2': 'i2'})
def test_add_kwargs(self):
provider = providers.Dict() \
.add_kwargs(a1='i1') \
.add_kwargs(a2='i2')
self.assertEqual(provider.kwargs, {'a1': 'i1', 'a2': 'i2'})
def test_add_kwargs_non_string_keys(self):
a1 = object()
a2 = object()
provider = providers.Dict() \
.add_kwargs({a1: 'i1'}) \
.add_kwargs({a2: 'i2'})
self.assertEqual(provider.kwargs, {a1: 'i1', a2: 'i2'})
def test_add_kwargs_string_and_non_string_keys(self):
a2 = object()
provider = providers.Dict() \
.add_kwargs(a1='i1') \
.add_kwargs({a2: 'i2'})
self.assertEqual(provider.kwargs, {'a1': 'i1', a2: 'i2'})
def test_set_kwargs(self):
provider = providers.Dict() \
.add_kwargs(a1='i1', a2='i2') \
.set_kwargs(a3='i3', a4='i4')
self.assertEqual(provider.kwargs, {'a3': 'i3', 'a4': 'i4'})
def test_set_kwargs_non_string_keys(self):
a3 = object()
a4 = object()
provider = providers.Dict() \
.add_kwargs(a1='i1', a2='i2') \
.set_kwargs({a3: 'i3', a4: 'i4'})
self.assertEqual(provider.kwargs, {a3: 'i3', a4: 'i4'})
def test_set_kwargs_string_and_non_string_keys(self):
a3 = object()
provider = providers.Dict() \
.add_kwargs(a1='i1', a2='i2') \
.set_kwargs({a3: 'i3'}, a4='i4')
self.assertEqual(provider.kwargs, {a3: 'i3', 'a4': 'i4'})
def test_clear_kwargs(self):
provider = providers.Dict() \
.add_kwargs(a1='i1', a2='i2') \
.clear_kwargs()
self.assertEqual(provider.kwargs, {})
def test_call_overridden(self):
provider = providers.Dict(a1='i1', a2='i2')
overriding_provider1 = providers.Dict(a2='i2', a3='i3')
overriding_provider2 = providers.Dict(a3='i3', a4='i4')
provider.override(overriding_provider1)
provider.override(overriding_provider2)
instance1 = provider()
instance2 = provider()
self.assertIsNot(instance1, instance2)
self.assertEqual(instance1, {'a3': 'i3', 'a4': 'i4'})
self.assertEqual(instance2, {'a3': 'i3', 'a4': 'i4'})
def test_deepcopy(self):
provider = providers.Dict(a1='i1', a2='i2')
provider_copy = providers.deepcopy(provider)
self.assertIsNot(provider, provider_copy)
self.assertEqual(provider.kwargs, provider_copy.kwargs)
self.assertIsInstance(provider, providers.Dict)
def test_deepcopy_from_memo(self):
provider = providers.Dict(a1='i1', a2='i2')
provider_copy_memo = providers.Dict(a1='i1', a2='i2')
provider_copy = providers.deepcopy(
provider,
memo={id(provider): provider_copy_memo},
)
self.assertIs(provider_copy, provider_copy_memo)
def test_deepcopy_kwargs(self):
provider = providers.Dict()
dependent_provider1 = providers.Factory(list)
dependent_provider2 = providers.Factory(dict)
provider.add_kwargs(d1=dependent_provider1, d2=dependent_provider2)
provider_copy = providers.deepcopy(provider)
dependent_provider_copy1 = provider_copy.kwargs['d1']
dependent_provider_copy2 = provider_copy.kwargs['d2']
self.assertNotEqual(provider.kwargs, provider_copy.kwargs)
self.assertIs(dependent_provider1.cls, dependent_provider_copy1.cls)
self.assertIsNot(dependent_provider1, dependent_provider_copy1)
self.assertIs(dependent_provider2.cls, dependent_provider_copy2.cls)
self.assertIsNot(dependent_provider2, dependent_provider_copy2)
def test_deepcopy_kwargs_non_string_keys(self):
a1 = object()
a2 = object()
dependent_provider1 = providers.Factory(list)
dependent_provider2 = providers.Factory(dict)
provider = providers.Dict({a1: dependent_provider1, a2: dependent_provider2})
provider_copy = providers.deepcopy(provider)
dependent_provider_copy1 = provider_copy.kwargs[a1]
dependent_provider_copy2 = provider_copy.kwargs[a2]
self.assertNotEqual(provider.kwargs, provider_copy.kwargs)
self.assertIs(dependent_provider1.cls, dependent_provider_copy1.cls)
self.assertIsNot(dependent_provider1, dependent_provider_copy1)
self.assertIs(dependent_provider2.cls, dependent_provider_copy2.cls)
self.assertIsNot(dependent_provider2, dependent_provider_copy2)
def test_deepcopy_overridden(self):
provider = providers.Dict()
object_provider = providers.Object(object())
provider.override(object_provider)
provider_copy = providers.deepcopy(provider)
object_provider_copy = provider_copy.overridden[0]
self.assertIsNot(provider, provider_copy)
self.assertEqual(provider.kwargs, provider_copy.kwargs)
self.assertIsInstance(provider, providers.Dict)
self.assertIsNot(object_provider, object_provider_copy)
self.assertIsInstance(object_provider_copy, providers.Object)
def test_deepcopy_with_sys_streams(self):
provider = providers.Dict()
provider.add_kwargs(stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr)
provider_copy = providers.deepcopy(provider)
self.assertIsNot(provider, provider_copy)
self.assertIsInstance(provider_copy, providers.Dict)
self.assertIs(provider.kwargs['stdin'], sys.stdin)
self.assertIs(provider.kwargs['stdout'], sys.stdout)
self.assertIs(provider.kwargs['stderr'], sys.stderr)
def test_repr(self):
provider = providers.Dict(a1=1, a2=2)
self.assertEqual(repr(provider),
'<dependency_injector.providers.'
'Dict({0}) at {1}>'.format(
repr(provider.kwargs),
hex(id(provider))))
|
|
# ~~APISearch:Feature->API:Feature~~
from portality.api.common import Api
from portality import util
from portality.core import app
from portality.lib import dates
from portality import models
import esprit
import re, json, uuid, os
from copy import deepcopy
from flask import url_for
from portality.ui.messages import Messages
from portality.bll.doaj import DOAJ
class DiscoveryException(Exception):
pass
class SearchResult(object):
def __init__(self, raw=None):
self.data = raw if raw is not None else {}
def query_substitute(query, substitutions):
if len(list(substitutions.keys())) == 0:
return query
# apply the regex escapes to the substitutions, so we know they
# are ready to be matched
escsubs = {}
for k, v in substitutions.items():
escsubs[k.replace(":", "\\:")] = v
# define a function which takes the match group and returns the
# substitution if there is one
def rep(match):
for k, v in escsubs.items():
if k == match.group(1):
return v
return match.group(1)
# define the regular expressions for splitting and then extracting
# the field to be substituted
split_rx = "([^\\\\]:)"
field_rx = "([^\s\+\-\(\)\"]+?):$"
# split the query around any unescaped colons
bits = re.split(split_rx, query)
# stitch back together the split sections and the separators
segs = [bits[i] + bits[i+1] for i in range(0, len(bits), 2) if i+1 < len(bits)] + [bits[len(bits) - 1]] if len(bits) % 2 == 1 else []
# substitute the fields as required
subs = []
for seg in segs:
if seg.endswith(":"):
subs.append(re.sub(field_rx, rep, seg))
else:
subs.append(seg)
return ":".join(subs)
def allowed(query, wildcards=False, fuzzy=False):
if not wildcards:
rx = "(.+[^\\\\][\?\*]+.*)"
if re.search(rx, query):
return False
if not fuzzy:
# this covers both fuzzy searching and proximity searching
rx = "(.+[^\\\\]~[0-9]{0,1}[\.]{0,1}[0-9]{0,1})"
if re.search(rx, query):
return False
return True
def escape(query):
# just escapes all instances of "/" in the query with "\\/"
# amd all instances of ":" with "\\:
# Functions which does the replacements
def slasher(m):
data = m.group(0)[0] + "\\/"
return data
def colon_escaper(q):
# we need to escape every colon that is not after keyword and is not already escaped
# colons after keywords are: first one and every first after AND or OR
parts = q.split(":")
for i in range(1, len(parts)-1):
if not parts[i].endswith('\\') and ' AND ' not in parts[i] and ' OR ' not in parts[i]:
parts[i] = parts[i] + "\\"
query = ":".join(parts)
return query
# the regular expression which looks for an unescaped /
slash_rx = "[^\\\\]/"
# because the regex matches two characters, neighbouring /s will not both
# get replaced at the same time because re.sub looks at "non overlapping matches".
# This means "//" will not be properly escaped. So, we run the re.subn
# function repeatedly until the number of replacements drops to 0
count = 1
while count > 0:
query, count = re.subn(slash_rx, slasher, query)
query = colon_escaper(query)
return query
# ~~->Swagger:Feature~~
# ~~->API:Documentation~~
DISCOVERY_API_SWAG = {
'application': json.loads(util.load_file(os.path.join(app.config['BASE_FILE_PATH'], 'api', 'current', 'discovery_api_application_swag.json'))),
'journal': json.loads(util.load_file(os.path.join(app.config['BASE_FILE_PATH'], 'api', 'current', 'discovery_api_journal_swag.json'))),
'article': json.loads(util.load_file(os.path.join(app.config['BASE_FILE_PATH'], 'api', 'current', 'discovery_api_article_swag.json')))
}
max_page_size = str(app.config.get("DISCOVERY_MAX_PAGE_SIZE", 100))
class DiscoveryApi(Api):
@staticmethod
def get_application_swag():
description = DISCOVERY_API_SWAG['application']["parameters"][3]["description"]
DISCOVERY_API_SWAG['application']["parameters"][3]["description"] = \
''.join([description, " The page size limit is ", max_page_size])
return deepcopy(DISCOVERY_API_SWAG['application'])
@staticmethod
def get_journal_swag():
description = DISCOVERY_API_SWAG['journal']["parameters"][2]["description"]
DISCOVERY_API_SWAG['journal']["parameters"][2]["description"] = \
''.join([description, " The page size limit is ", max_page_size])
return deepcopy(DISCOVERY_API_SWAG['journal'])
@staticmethod
def get_article_swag():
description = DISCOVERY_API_SWAG['article']["parameters"][2]["description"]
DISCOVERY_API_SWAG['article']["parameters"][2]["description"] = \
''.join([description, " The page size limit is ", max_page_size])
return deepcopy(DISCOVERY_API_SWAG['article'])
@classmethod
def _sanitise(cls, q, page, page_size, sort, search_subs, sort_subs, bulk):
if q is not None:
if not allowed(q):
raise DiscoveryException("Query contains disallowed Lucene features")
q = query_substitute(q, search_subs)
q = escape(q)
# sanitise the page size information
if page < 1:
page = 1
if bulk:
max_page_size = app.config.get("DISCOVERY_BULK_PAGE_SIZE", 1000)
else:
max_page_size = app.config.get("DISCOVERY_MAX_PAGE_SIZE", 100)
if page_size > max_page_size:
page_size = max_page_size
elif page_size < 1:
page_size = 10
# calculate the position of the from cursor in the document set
fro = (page - 1) * page_size
# If fro is greater than the max allowed, throw error
# using bulk to provide an override when needed
max_records = app.config.get("DISCOVERY_MAX_RECORDS_SIZE", 1000)
if fro >= max_records:
message = Messages.PREVENT_DEEP_PAGING_IN_API.format(
max_records=max_records,
data_dump_url=app.config.get("BASE_URL") + url_for("doaj.public_data_dump"),
oai_journal_url=app.config.get("BASE_URL") + url_for("oaipmh.oaipmh"),
oai_article_url=app.config.get("BASE_URL") + url_for("oaipmh.oaipmh", specified="article")
)
raise DiscoveryException(message)
# interpret the sort field into the form required by the query
sortby = None
sortdir = None
if sort is not None:
if ":" in sort:
bits = sort.split(":")
if len(bits) != 2:
raise DiscoveryException("Malformed sort parameter")
sortby = bits[0]
if sortby in sort_subs:
sortby = sort_subs[sortby]
if bits[1] in ["asc", "desc"]:
sortdir = bits[1]
else:
raise DiscoveryException("Sort direction must be 'asc' or 'desc'")
else:
sortby = sort
if sortby in sort_subs:
sortby = sort_subs[sortby]
return q, page, fro, page_size, sortby, sortdir
@classmethod
def _make_query(cls, q, page, page_size, sort, index_type, bulk):
if index_type == 'article':
search_subs = app.config.get("DISCOVERY_ARTICLE_SEARCH_SUBS", {})
sort_subs = app.config.get("DISCOVERY_ARTICLE_SORT_SUBS", {})
elif index_type == 'journal':
search_subs = app.config.get("DISCOVERY_JOURNAL_SEARCH_SUBS", {})
sort_subs = app.config.get("DISCOVERY_JOURNAL_SORT_SUBS", {})
else:
search_subs = app.config.get("DISCOVERY_APPLICATION_SEARCH_SUBS", {})
sort_subs = app.config.get("DISCOVERY_APPLICATION_SORT_SUBS", {})
# sanitise and prep the inputs
q, page, fro, page_size, sortby, sortdir = cls._sanitise(q, page, page_size, sort, search_subs, sort_subs, bulk)
search_query = SearchQuery(q, fro, page_size, sortby, sortdir)
raw_query = search_query.query()
return raw_query, page, page_size
@staticmethod
def _calc_pagination(total, page_size, requested_page):
"""
Calculate pagination for API results like # of pages and the last page.
Modified from https://github.com/Pylons/paginate/blob/master/paginate/__init__.py#L260 ,
a pagination library. (__init__.py, Page.__init__)
"""
FIRST_PAGE = 1
if total == 0:
return 1, None, None, 1
page_count = ((total - 1) // page_size) + 1
last_page = FIRST_PAGE + page_count - 1
# Links to previous and next page
if requested_page > FIRST_PAGE:
previous_page = requested_page - 1
else:
previous_page = None
if requested_page < last_page:
next_page = requested_page + 1
else:
next_page = None
return page_count, previous_page, next_page, last_page
@classmethod
def _make_response(cls, endpoint, res, q, page, page_size, sort, obs):
total = res.get("hits", {}).get("total", {}).get('value', 0)
page_count, previous_page, next_page, last_page = cls._calc_pagination(total, page_size, page)
# build the response object
result = {
"total": total,
"page": page,
"pageSize": page_size,
"timestamp": dates.now_with_microseconds(),
"query": q,
"results": obs
}
if previous_page is not None:
result["prev"] = app.config['BASE_URL'] + url_for(app.config['API_CURRENT_BLUEPRINT_NAME'] + '.' + endpoint, search_query=q, page=previous_page, pageSize=page_size, sort=sort)
if next_page is not None:
result["next"] = app.config['BASE_URL'] + url_for(app.config['API_CURRENT_BLUEPRINT_NAME'] + '.' + endpoint, search_query=q, page=next_page, pageSize=page_size, sort=sort)
if last_page is not None:
result["last"] = app.config['BASE_URL'] + url_for(app.config['API_CURRENT_BLUEPRINT_NAME'] + '.' + endpoint, search_query=q, page=last_page, pageSize=page_size, sort=sort)
if sort is not None:
result["sort"] = sort
return SearchResult(result)
@classmethod
def search(cls, index_type, account, q, page, page_size, sort=None):
if index_type not in ['article', 'journal', 'application']:
raise DiscoveryException("There was an error executing your query for {0}. Unknown type.)".format(index_type))
if index_type == 'article':
endpoint = 'search_articles'
klass = models.Article # ~~->Article:Model~~
elif index_type == 'journal':
endpoint = 'search_journals'
klass = models.Journal # ~~->Journal:Model~~
else:
endpoint = 'search_applications'
klass = models.Suggestion #~~->Application:Model~~
raw_query, page, page_size = cls._make_query(q, page, page_size, sort, index_type, False)
# execute the query against the articles
# ~~->Query:Service~~
query_service = DOAJ.queryService()
try:
res = query_service.search('api_query', index_type, raw_query, account, None)
except Exception as e:
magic = uuid.uuid1()
msg = e.error if hasattr(e, "error") else e.message if hasattr(e, "message") else str(e)
app.logger.error(u"Error executing discovery query search for {i}: {x} (ref: {y})".format(i=index_type, x=msg, y=magic))
raise DiscoveryException("There was an error executing your query (ref: {y})".format(y=magic))
obs = [klass(**raw) for raw in esprit.raw.unpack_json_result(res)]
return cls._make_response(endpoint, res, q, page, page_size, sort, obs)
@classmethod
def scroll(cls, index_type, account, q, page_size, sort=None, scan=False):
if index_type not in ['article', 'journal', 'application']:
raise DiscoveryException("There was an error executing your query for {0}. Unknown type.)".format(index_type))
page = 1 # Not used in scroll
raw_query, page, page_size = cls._make_query(q, page, page_size, sort, index_type, True)
# execute the query against the articles
query_service = DOAJ.queryService()
for result in query_service.scroll('api_query', index_type, raw_query, account, page_size, scan=scan):
yield result
class SearchQuery(object):
"""
~~->Search:Query~~
~~Search:Query->Elasticsearch:Technology~~
"""
def __init__(self, qs, fro, psize, sortby=None, sortdir=None):
self.qs = qs
self.fro = fro
self.psize = psize
self.sortby = sortby
self.sortdir = sortdir if sortdir is not None else "asc"
def query(self):
q = {
"track_total_hits" : True,
"from": self.fro,
"size": self.psize
}
if self.qs is not None:
q["query"] = {
"query_string": {
"query": self.qs,
"default_operator": "AND"
}
}
else:
q["query"] = {"match_all": {}}
if self.sortby is not None:
q["sort"] = [{self.sortby: {"order": self.sortdir, "mode": "min"}}]
return q
|
|
"""Tests for the module 'graph_ingestion'."""
from unittest import mock
from f8a_jobs.graph_ingestion import (ingest_epv_into_graph,
ingest_epv,
ingest_selective_epv_into_graph,
ingest_selective_epv,
ingest_epv_internal,
ingest_selective_epv_internal,
trigger_workerflow,
trigger_workerflow_internal)
from f8a_jobs import graph_ingestion
data_v1 = {
'body': {
"ecosystem": "npm",
"packages": [{
"package": "pkg1",
"version": "ver1"
}
],
"force": False,
"force_graph_sync": True,
"recursive_limit": 0
}
}
data_v2 = {
'body': {
"ecosystem": "npm",
"packages": [{
"pkg": "pkg1",
"ver": "ver1"
}],
"force": False,
"force_graph_sync": True,
"recursive_limit": 0
}
}
data_v3 = {
'body': {
"ecosystem": "nuget",
"packages": [{
"package": "pkg1",
"version": "ver1"
}
],
"force": False,
"force_graph_sync": True,
"recursive_limit": 0
}
}
data_v4 = {
'body': {
"ecosystem": "golang",
"packages": [{
"package": "pkg1"
}
],
"flow_name": "flow_name",
"task_names": [
"TASK_1",
"TASK_2",
"TASK_3",
"TASK_4"
]
}
}
data_v5 = {
'body': {
"ecosystem": "npm",
"packages": [{
"package": "pkg1",
"version": "ver1"
}
],
"force": False,
"force_graph_sync": True,
"recursive_limit": 0,
"flow_name": 'flow_name'
}
}
data_v6 = {
'body': {
"ecosystem": "golang",
"packages": [{
"package": "pkg1",
"url": "https://github.com/",
"version": "ver1"
}
],
"flow_name": "flow_name",
"task_names": [
"TASK_1",
"TASK_2",
"TASK_3",
"TASK_4"
]
}
}
data_v7 = {
"ecosystem": "nuget",
"packages": [{
"package": "pkg1"
}
],
"source": "git-refresh",
"task_names": [
"TASK_1",
"TASK_2",
"TASK_3",
"TASK_4"
]
}
data_v8 = {
'body': {
"ecosystem": "golang",
"packages": [{
"package": "pkg1",
"version": "ver1"
}
]
}
}
data_v9 = {
'body': {
"ecosystem": "npm",
"packages": [{
"package": "pkg1",
"version": "ver1"
}
],
"source": "api"
}
}
data_v10 = {
'body': {
"ecosystem": "npm",
"packages": [{
"package": "pkg1",
"version": "ver1"
}
],
"flow_name": None
}
}
data_v11 = {
"external_request_id": "ccddf6b7-34a7-4927-a273-146b17b6b1f7",
"flowname": "componentApiFlow",
"data": {
"api_name": "component_analyses_post",
"manifest_hash": "sadasdsfsdf4545dsfdsfdfdgffds",
"ecosystem": "pypi",
"packages_list": {
'name': "ejs",
'given_name': "ejs",
'version': "1.0.0"
},
"user_id": "ccddf6b7-34a7-4927-a273-146b17b6b1f7",
"user_agent": "unit-test",
"source": "unit-test",
"telemetry_id": "ccddf6b7-34a7-4927-a273-146b17b6b1f7"
}
}
data_v12 = {
"external_request_id": "ccddf6b7-34a7-4927-a273-146b17b6b1f7",
"flowname": "test",
"data": {
"api_name": "component_analyses_post",
"manifest_hash": "sadasdsfsdf4545dsfdsfdfdgffds",
"ecosystem": "pypi",
"packages_list": {
'name': "ejs",
'given_name': "ejs",
'version': "1.0.0"
},
"user_id": "ccddf6b7-34a7-4927-a273-146b17b6b1f7",
"user_agent": "unit-test",
"source": "unit-test",
"telemetry_id": "ccddf6b7-34a7-4927-a273-146b17b6b1f7"
}
}
data_v13 = {
"external_request_id": "ccddf6b7-34a7-4927-a273-146b17b6b1f7",
"flowname": "componentApiFlow",
"data": {
"api_name": "component_analyses_post",
"manifest_hash": "sadasdsfsdf4545dsfdsfdfdgffds",
"ecosystem": "pypi",
"packages_list": {
'name': "ejs",
'given_name': "ejs",
'version': "1.0.0"
},
"user_id": "ccddf6b7-34a7-4927-a273-146b17b6b1f7",
"user_agent": "unit-test",
"source": "unit-test",
"telemetry_id": "ccddf6b7-34a7-4927-a273-146b17b6b1f7"
}
}
class Dispacher:
"""Dispatcher class returned by Selinon.run_flow."""
id = "dummy_dispacher_id"
class DispacherError:
"""DispatcherError class returned by Selinon.run_flow."""
dummy_id = None
@mock.patch('f8a_jobs.graph_ingestion.run_flow', return_value=Dispacher())
def test_ingest_epv_into_graph(_mock):
"""Tests for 'ingest_epv_into_graph'."""
result = ingest_epv_into_graph(data_v1)
expected = ({
'ecosystem': 'npm',
'force': False,
'force_graph_sync': True,
'packages': [{
'dispacher_id': 'dummy_dispacher_id',
'package': 'pkg1',
'version': 'ver1'}],
'recursive_limit': 0
}, 201)
assert result == expected
def test_ingest_epv_into_graph4():
"""Tests for 'ingest_epv_into_graph'."""
result = ingest_epv_into_graph(data_v10)
expected = ({"message": "Failed to initiate worker flow."}, 500)
assert result == expected
@mock.patch('f8a_jobs.graph_ingestion.run_flow', return_value=Dispacher())
def test_ingest_epv_into_graph5(_mock):
"""Tests for 'ingest_epv_into_graph'."""
result = ingest_epv_into_graph(data_v5)
expected = ({
'ecosystem': 'npm',
'force': False,
'force_graph_sync': True,
'packages': [{
'dispacher_id': 'dummy_dispacher_id',
'package': 'pkg1',
'version': 'ver1'}],
'recursive_limit': 0,
'flow_name': 'flow_name'
}, 201)
assert result == expected
@mock.patch('f8a_jobs.graph_ingestion.run_flow_selective', return_value=Dispacher())
def test_ingest_selective_epv_into_graph(_mock):
"""Tests for 'ingest_epv_into_graph'."""
result = ingest_selective_epv_into_graph(data_v4)
expected = ({
'ecosystem': 'golang',
'flow_name': 'flow_name',
'packages': [{
'dispacher_id': 'dummy_dispacher_id',
'package': 'pkg1'}],
'task_names': ['TASK_1', 'TASK_2', 'TASK_3', 'TASK_4']},
201)
assert result == expected
@mock.patch('f8a_jobs.graph_ingestion.run_flow_selective', return_value=Dispacher())
def test_ingest_selective_epv_into_graph2(_mock):
"""Tests for 'ingest_epv_into_graph'."""
result = ingest_selective_epv_into_graph(data_v6)
expected = ({
'ecosystem': 'golang',
'flow_name': 'flow_name',
'packages': [{
'dispacher_id': 'dummy_dispacher_id',
'package': 'pkg1',
'url': 'https://github.com/',
'version': 'ver1'}],
'task_names': ['TASK_1', 'TASK_2', 'TASK_3', 'TASK_4']},
201)
assert result == expected
def test_ingest_selective_epv():
"""Tests for 'ingest_epv'."""
result = ingest_selective_epv(body=data_v7)
expected = ({
'message': 'Failed to initiate worker flow.'
}, 500)
assert result == expected
@mock.patch('f8a_jobs.graph_ingestion.GithubUtils.is_pseudo_version', return_value=True)
@mock.patch('f8a_jobs.graph_ingestion.is_pkg_public', return_value=True)
def test_ingest_epv_into_graph6(_mock, _mock1):
"""Tests for 'ingest_epv_into_graph'."""
result = ingest_epv_into_graph(data_v8)
expected = ({'ecosystem': 'golang',
'packages': [{
'error_message': 'Golang pseudo version is not supported.',
'package': 'pkg1',
'version': 'ver1'}]},
201)
assert result == expected
def test_ingest_selective_epv_internal():
"""Tests for 'ingest_epv_internal'."""
result = ingest_selective_epv_internal(body=data_v7)
expected = ({
'message': 'Failed to initiate worker flow.'
}, 500)
assert result == expected
@mock.patch('f8a_jobs.graph_ingestion._DISABLE_UNKNOWN_PACKAGE_FLOW', True)
@mock.patch('f8a_jobs.graph_ingestion.run_flow', return_value=Dispacher())
def test_ingest_epv_into_graph7(_mock1):
"""Tests for 'ingest_epv_into_graph'."""
result = ingest_epv_into_graph(data_v9)
expected = ({'ecosystem': 'npm',
'message': 'Unknown package ingestion is disabled.',
'packages': [{
'package': 'pkg1',
'version': 'ver1'}],
'source': 'api'},
201)
assert result == expected
@mock.patch('f8a_jobs.graph_ingestion._INVOKE_API_WORKERS', False)
def test_ingest_epv_into_graph8():
"""Tests for 'ingest_epv_into_graph'."""
result = ingest_epv_into_graph(data_v9)
expected = ({
'ecosystem': 'npm',
'message': 'Worker flows are disabled.',
'packages': [{
'package': 'pkg1',
'version': 'ver1'}],
'source': 'api'},
201)
assert result == expected
def test_ingest_selective_epv_into_graph3():
"""Tests for 'ingest_selective_epv_into_graph'."""
graph_ingestion._INVOKE_API_WORKERS = False
result = ingest_selective_epv_into_graph(data_v6)
expected = ({
'ecosystem': 'golang',
'flow_name': 'flow_name',
'message': 'Worker flows are disabled.',
'packages': [{
'dispacher_id': 'dummy_dispacher_id',
'package': 'pkg1',
'url': 'https://github.com/',
'version': 'ver1'}],
'task_names': ['TASK_1', 'TASK_2', 'TASK_3', 'TASK_4']},
201)
assert result == expected
@mock.patch('f8a_jobs.graph_ingestion._INVOKE_API_WORKERS', False)
def test_ingest_epv_internal():
"""Tests for 'ingest_epv_into_graph'."""
result = ingest_epv_internal(body=data_v9)
expected = ({
'body': {
'ecosystem': 'npm',
'message': 'Worker flows are disabled.',
'packages': [{
'package': 'pkg1',
'version': 'ver1'}],
'source': 'api'},
'message': 'Worker flows are disabled.'},
201)
assert result == expected
@mock.patch('f8a_jobs.graph_ingestion._INVOKE_API_WORKERS', False)
def test_ingest_epv():
"""Tests for 'ingest_epv_into_graph'."""
result = ingest_epv(body=data_v9)
expected = ({
'body': {
'ecosystem': 'npm',
'message': 'Worker flows are disabled.',
'packages': [{
'package': 'pkg1',
'version': 'ver1'}],
'source': 'api'},
'message': 'Worker flows are disabled.'},
201)
assert result == expected
@mock.patch('f8a_jobs.graph_ingestion._INVOKE_API_WORKERS', True)
@mock.patch('f8a_jobs.graph_ingestion.run_flow', return_value=Dispacher())
def test_trigger_workerflow_1(_mock):
"""Tests for 'trigger_workerflow'."""
result = trigger_workerflow(body=data_v11)
expected = ({
"data": {
"api_name": "component_analyses_post",
"manifest_hash": "sadasdsfsdf4545dsfdsfdfdgffds",
"ecosystem": "pypi",
"packages_list": {
'name': "ejs",
'given_name': "ejs",
'version': "1.0.0"
},
"user_id": "ccddf6b7-34a7-4927-a273-146b17b6b1f7",
"user_agent": "unit-test",
"source": "unit-test",
"telemetry_id": "ccddf6b7-34a7-4927-a273-146b17b6b1f7"
},
"dispacher_id": "dummy_dispacher_id",
"external_request_id": "ccddf6b7-34a7-4927-a273-146b17b6b1f7",
"flowname": "componentApiFlow"
}, 201)
assert result == expected
@mock.patch('f8a_jobs.graph_ingestion._INVOKE_API_WORKERS', False)
def test_trigger_workerflow_2():
"""Tests for 'trigger_workerflow'."""
result = trigger_workerflow(body=data_v13)
expected = ({
"data": {
"api_name": "component_analyses_post",
"manifest_hash": "sadasdsfsdf4545dsfdsfdfdgffds",
"ecosystem": "pypi",
"packages_list": {
'name': "ejs",
'given_name': "ejs",
'version': "1.0.0"
},
"user_id": "ccddf6b7-34a7-4927-a273-146b17b6b1f7",
"user_agent": "unit-test",
"source": "unit-test",
"telemetry_id": "ccddf6b7-34a7-4927-a273-146b17b6b1f7"
},
"external_request_id": "ccddf6b7-34a7-4927-a273-146b17b6b1f7",
"flowname": "componentApiFlow",
'message': "Worker flows are disabled."
}, 201)
assert result == expected
@mock.patch('f8a_jobs.graph_ingestion._INVOKE_API_WORKERS', True)
@mock.patch('f8a_jobs.graph_ingestion.run_flow', return_value=DispacherError())
def test_trigger_workerflow_3(_mock):
"""Tests for 'trigger_workflow'."""
result = trigger_workerflow(body=data_v12)
expected = ({
'message': 'Failed to initiate worker flow.'
}, 500)
assert result == expected
@mock.patch('f8a_jobs.graph_ingestion._INVOKE_API_WORKERS', True)
@mock.patch('f8a_jobs.graph_ingestion.run_flow', return_value=Dispacher())
def test_trigger_workerflow_internal_1(_mock):
"""Tests for 'trigger_workerflow_internal'."""
result = trigger_workerflow_internal(body=data_v11)
expected = ({
"data": {
"api_name": "component_analyses_post",
"manifest_hash": "sadasdsfsdf4545dsfdsfdfdgffds",
"ecosystem": "pypi",
"packages_list": {
'name': "ejs",
'given_name': "ejs",
'version': "1.0.0"
},
"user_id": "ccddf6b7-34a7-4927-a273-146b17b6b1f7",
"user_agent": "unit-test",
"source": "unit-test",
"telemetry_id": "ccddf6b7-34a7-4927-a273-146b17b6b1f7"
},
"dispacher_id": "dummy_dispacher_id",
"external_request_id": "ccddf6b7-34a7-4927-a273-146b17b6b1f7",
"flowname": "componentApiFlow"
}, 201)
assert result == expected
@mock.patch('f8a_jobs.graph_ingestion._INVOKE_API_WORKERS', False)
def test_trigger_workerflow_internal_2():
"""Tests for 'trigger_workerflow_internal'."""
result = trigger_workerflow_internal(body=data_v13)
expected = ({
"data": {
"api_name": "component_analyses_post",
"manifest_hash": "sadasdsfsdf4545dsfdsfdfdgffds",
"ecosystem": "pypi",
"packages_list": {
'name': "ejs",
'given_name': "ejs",
'version': "1.0.0"
},
"user_id": "ccddf6b7-34a7-4927-a273-146b17b6b1f7",
"user_agent": "unit-test",
"source": "unit-test",
"telemetry_id": "ccddf6b7-34a7-4927-a273-146b17b6b1f7"
},
"external_request_id": "ccddf6b7-34a7-4927-a273-146b17b6b1f7",
"flowname": "componentApiFlow",
'message': "Worker flows are disabled."
}, 201)
assert result == expected
@mock.patch('f8a_jobs.graph_ingestion._INVOKE_API_WORKERS', True)
@mock.patch('f8a_jobs.graph_ingestion.run_flow', return_value=DispacherError())
def test_trigger_workerflow_internal_3(_mock):
"""Tests for 'trigger_workflow_internal'."""
result = trigger_workerflow_internal(body=data_v12)
expected = ({
'message': 'Failed to initiate worker flow.'
}, 500)
assert result == expected
|
|
"""
:math:`IC_TC_P` Colour Encoding
===============================
Defines the :math:`IC_TC_P` colour encoding related transformations:
- :func:`colour.RGB_to_ICtCp`
- :func:`colour.ICtCp_to_RGB`
- :func:`colour.XYZ_to_ICtCp`
- :func:`colour.ICtCp_to_XYZ`
References
----------
- :cite:`Dolby2016a` : Dolby. (2016). WHAT IS ICtCp? - INTRODUCTION.
https://www.dolby.com/us/en/technologies/dolby-vision/ICtCp-white-paper.pdf
- :cite:`InternationalTelecommunicationUnion2018` : International
Telecommunication Union. (2018). Recommendation ITU-R BT.2100-2 - Image
parameter values for high dynamic range television for use in production
and international programme exchange.
https://www.itu.int/dms_pubrec/itu-r/rec/bt/\
R-REC-BT.2100-2-201807-I!!PDF-E.pdf
- :cite:`Lu2016c` : Lu, T., Pu, F., Yin, P., Chen, T., Husak, W., Pytlarz,
J., Atkins, R., Froehlich, J., & Su, G.-M. (2016). ITP Colour Space and Its
Compression Performance for High Dynamic Range and Wide Colour Gamut Video
Distribution. ZTE Communications, 14(1), 32-38.
"""
from __future__ import annotations
import numpy as np
from colour.algebra import vector_dot
from colour.colorimetry import CCS_ILLUMINANTS
from colour.hints import ArrayLike, Floating, Literal, NDArray, Union
from colour.models.rgb import RGB_COLOURSPACES, RGB_to_XYZ, XYZ_to_RGB
from colour.models.rgb.transfer_functions import (
eotf_ST2084,
eotf_inverse_ST2084,
oetf_HLG_BT2100,
oetf_inverse_HLG_BT2100,
)
from colour.utilities import (
as_float_array,
domain_range_scale,
validate_method,
)
__author__ = "Colour Developers"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "colour-developers@colour-science.org"
__status__ = "Production"
__all__ = [
"MATRIX_ICTCP_RGB_TO_LMS",
"MATRIX_ICTCP_LMS_TO_RGB",
"MATRIX_ICTCP_LMS_P_TO_ICTCP",
"MATRIX_ICTCP_ICTCP_TO_LMS_P",
"MATRIX_ICTCP_LMS_P_TO_ICTCP_HLG_BT2100_2",
"MATRIX_ICTCP_ICTCP_TO_LMS_P_HLG_BT2100_2",
"RGB_to_ICtCp",
"ICtCp_to_RGB",
"XYZ_to_ICtCp",
"ICtCp_to_XYZ",
]
MATRIX_ICTCP_RGB_TO_LMS: NDArray = (
np.array(
[
[1688, 2146, 262],
[683, 2951, 462],
[99, 309, 3688],
]
)
/ 4096
)
"""*ITU-R BT.2020* colourspace to normalised cone responses matrix."""
MATRIX_ICTCP_LMS_TO_RGB: NDArray = np.linalg.inv(MATRIX_ICTCP_RGB_TO_LMS)
"""
:math:`IC_TC_P` colourspace normalised cone responses to *ITU-R BT.2020*
colourspace matrix.
"""
MATRIX_ICTCP_LMS_P_TO_ICTCP: NDArray = (
np.array(
[
[2048, 2048, 0],
[6610, -13613, 7003],
[17933, -17390, -543],
]
)
/ 4096
)
"""
:math:`LMS_p` *SMPTE ST 2084:2014* encoded normalised cone responses to
:math:`IC_TC_P` colour encoding matrix.
"""
MATRIX_ICTCP_ICTCP_TO_LMS_P: NDArray = np.linalg.inv(
MATRIX_ICTCP_LMS_P_TO_ICTCP
)
"""
:math:`IC_TC_P` colour encoding to :math:`LMS_p` *SMPTE ST 2084:2014* encoded
normalised cone responses matrix.
"""
MATRIX_ICTCP_LMS_P_TO_ICTCP_HLG_BT2100_2: NDArray = (
np.array(
[
[2048, 2048, 0],
[3625, -7465, 3840],
[9500, -9212, -288],
]
)
/ 4096
)
"""
:math:`LMS_p` *SMPTE ST 2084:2014* encoded normalised cone responses to
:math:`IC_TC_P` colour encoding matrix as given in *ITU-R BT.2100-2*.
"""
MATRIX_ICTCP_ICTCP_TO_LMS_P_HLG_BT2100_2: NDArray = np.linalg.inv(
MATRIX_ICTCP_LMS_P_TO_ICTCP_HLG_BT2100_2
)
"""
:math:`IC_TC_P` colour encoding to :math:`LMS_p` *SMPTE ST 2084:2014* encoded
normalised cone responses matrix as given in *ITU-R BT.2100-2*.
"""
def RGB_to_ICtCp(
RGB: ArrayLike,
method: Union[
Literal[
"Dolby 2016",
"ITU-R BT.2100-1 HLG",
"ITU-R BT.2100-1 PQ",
"ITU-R BT.2100-2 HLG",
"ITU-R BT.2100-2 PQ",
],
str,
] = "Dolby 2016",
L_p: Floating = 10000,
) -> NDArray:
"""
Convert from *ITU-R BT.2020* colourspace to :math:`IC_TC_P` colour
encoding.
Parameters
----------
RGB
*ITU-R BT.2020* colourspace array.
method
Computation method. *Recommendation ITU-R BT.2100* defines multiple
variants of the :math:`IC_TC_P` colour encoding:
- *ITU-R BT.2100-1*
- *SMPTE ST 2084:2014* inverse electro-optical transfer
function (EOTF) and the :math:`IC_TC_P` matrix from
:cite:`Dolby2016a`: *Dolby 2016*, *ITU-R BT.2100-1 PQ*,
*ITU-R BT.2100-2 PQ* methods.
- *Recommendation ITU-R BT.2100* *Reference HLG* opto-electrical
transfer function (OETF) and the :math:`IC_TC_P` matrix
from :cite:`Dolby2016a`: *ITU-R BT.2100-1 HLG* method.
- *ITU-R BT.2100-2*
- *SMPTE ST 2084:2014* inverse electro-optical transfer
function (EOTF) and the :math:`IC_TC_P` matrix from
:cite:`Dolby2016a`: *Dolby 2016*, *ITU-R BT.2100-1 PQ*,
*ITU-R BT.2100-2 PQ* methods.
- *Recommendation ITU-R BT.2100* *Reference HLG* opto-electrical
transfer function (OETF) and a custom :math:`IC_TC_P`
matrix from :cite:`InternationalTelecommunicationUnion2018`:
*ITU-R BT.2100-2 HLG* method.
L_p
Display peak luminance :math:`cd/m^2` for *SMPTE ST 2084:2014*
non-linear encoding. This parameter should stay at its default
:math:`10000 cd/m^2` value for practical applications. It is exposed so
that the definition can be used as a fitting function.
Returns
-------
:class:`numpy.ndarray`
:math:`IC_TC_P` colour encoding array.
Warnings
--------
The underlying *SMPTE ST 2084:2014* transfer function is an absolute
transfer function.
Notes
-----
- The *ITU-R BT.2100-1 PQ* and *ITU-R BT.2100-2 PQ* methods are aliases
for the *Dolby 2016* method.
- The underlying *SMPTE ST 2084:2014* transfer function is an absolute
transfer function, thus the domain and range values for the *Reference*
and *1* scales are only indicative that the data is not affected by
scale transformations. The effective domain of *SMPTE ST 2084:2014*
inverse electro-optical transfer function (EOTF) is
[0.0001, 10000].
+------------+-----------------------+------------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+==================+
| ``RGB`` | ``UN`` | ``UN`` |
+------------+-----------------------+------------------+
+------------+-----------------------+------------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+==================+
| ``ICtCp`` | ``I`` : [0, 1] | ``I`` : [0, 1] |
| | | |
| | ``CT`` : [-1, 1] | ``CT`` : [-1, 1] |
| | | |
| | ``CP`` : [-1, 1] | ``CP`` : [-1, 1] |
+------------+-----------------------+------------------+
References
----------
:cite:`Dolby2016a`, :cite:`Lu2016c`
Examples
--------
>>> RGB = np.array([0.45620519, 0.03081071, 0.04091952])
>>> RGB_to_ICtCp(RGB) # doctest: +ELLIPSIS
array([ 0.0735136..., 0.0047525..., 0.0935159...])
>>> RGB_to_ICtCp(RGB, method='ITU-R BT.2100-2 HLG') # doctest: +ELLIPSIS
array([ 0.6256789..., -0.0198449..., 0.3591125...])
"""
RGB = as_float_array(RGB)
method = validate_method(
method,
[
"Dolby 2016",
"ITU-R BT.2100-1 HLG",
"ITU-R BT.2100-1 PQ",
"ITU-R BT.2100-2 HLG",
"ITU-R BT.2100-2 PQ",
],
)
is_hlg_method = "hlg" in method
is_BT2100_2_method = "2100-2" in method
LMS = vector_dot(MATRIX_ICTCP_RGB_TO_LMS, RGB)
with domain_range_scale("ignore"):
LMS_p = (
oetf_HLG_BT2100(LMS)
if is_hlg_method
else eotf_inverse_ST2084(LMS, L_p)
)
ICtCp = (
vector_dot(MATRIX_ICTCP_LMS_P_TO_ICTCP_HLG_BT2100_2, LMS_p)
if (is_hlg_method and is_BT2100_2_method)
else vector_dot(MATRIX_ICTCP_LMS_P_TO_ICTCP, LMS_p)
)
return ICtCp
def ICtCp_to_RGB(
ICtCp: ArrayLike,
method: Union[
Literal[
"Dolby 2016",
"ITU-R BT.2100-1 HLG",
"ITU-R BT.2100-1 PQ",
"ITU-R BT.2100-2 HLG",
"ITU-R BT.2100-2 PQ",
],
str,
] = "Dolby 2016",
L_p: Floating = 10000,
) -> NDArray:
"""
Convert from :math:`IC_TC_P` colour encoding to *ITU-R BT.2020*
colourspace.
Parameters
----------
ICtCp
:math:`IC_TC_P` colour encoding array.
method
Computation method. *Recommendation ITU-R BT.2100* defines multiple
variants of the :math:`IC_TC_P` colour encoding:
- *ITU-R BT.2100-1*
- *SMPTE ST 2084:2014* inverse electro-optical transfer
function (EOTF) and the :math:`IC_TC_P` matrix from
:cite:`Dolby2016a`: *Dolby 2016*, *ITU-R BT.2100-1 PQ*,
*ITU-R BT.2100-2 PQ* methods.
- *Recommendation ITU-R BT.2100* *Reference HLG* opto-electrical
transfer function (OETF) and the :math:`IC_TC_P` matrix
from :cite:`Dolby2016a`: *ITU-R BT.2100-1 HLG* method.
- *ITU-R BT.2100-2*
- *SMPTE ST 2084:2014* inverse electro-optical transfer
function (EOTF) and the :math:`IC_TC_P` matrix from
:cite:`Dolby2016a`: *Dolby 2016*, *ITU-R BT.2100-1 PQ*,
*ITU-R BT.2100-2 PQ* methods.
- *Recommendation ITU-R BT.2100* *Reference HLG* opto-electrical
transfer function (OETF) and a custom :math:`IC_TC_P`
matrix from :cite:`InternationalTelecommunicationUnion2018`:
*ITU-R BT.2100-2 HLG* method.
L_p
Display peak luminance :math:`cd/m^2` for *SMPTE ST 2084:2014*
non-linear encoding. This parameter should stay at its default
:math:`10000 cd/m^2` value for practical applications. It is exposed so
that the definition can be used as a fitting function.
Returns
-------
:class:`numpy.ndarray`
*ITU-R BT.2020* colourspace array.
Warnings
--------
The underlying *SMPTE ST 2084:2014* transfer function is an absolute
transfer function.
Notes
-----
- The *ITU-R BT.2100-1 PQ* and *ITU-R BT.2100-2 PQ* methods are aliases
for the *Dolby 2016* method.
- The underlying *SMPTE ST 2084:2014* transfer function is an absolute
transfer function, thus the domain and range values for the *Reference*
and *1* scales are only indicative that the data is not affected by
scale transformations.
+------------+-----------------------+------------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+==================+
| ``ICtCp`` | ``I`` : [0, 1] | ``I`` : [0, 1] |
| | | |
| | ``CT`` : [-1, 1] | ``CT`` : [-1, 1] |
| | | |
| | ``CP`` : [-1, 1] | ``CP`` : [-1, 1] |
+------------+-----------------------+------------------+
+------------+-----------------------+------------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+==================+
| ``RGB`` | ``UN`` | ``UN`` |
+------------+-----------------------+------------------+
References
----------
:cite:`Dolby2016a`, :cite:`Lu2016c`
Examples
--------
>>> ICtCp = np.array([0.07351364, 0.00475253, 0.09351596])
>>> ICtCp_to_RGB(ICtCp) # doctest: +ELLIPSIS
array([ 0.4562052..., 0.0308107..., 0.0409195...])
>>> ICtCp = np.array([0.62567899, -0.01984490, 0.35911259])
>>> ICtCp_to_RGB(ICtCp, method='ITU-R BT.2100-2 HLG') # doctest: +ELLIPSIS
array([ 0.4562052..., 0.0308107..., 0.0409195...])
"""
ICtCp = as_float_array(ICtCp)
method = validate_method(
method,
[
"Dolby 2016",
"ITU-R BT.2100-1 HLG",
"ITU-R BT.2100-1 PQ",
"ITU-R BT.2100-2 HLG",
"ITU-R BT.2100-2 PQ",
],
)
is_hlg_method = "hlg" in method
is_BT2100_2_method = "2100-2" in method
LMS_p = (
vector_dot(MATRIX_ICTCP_ICTCP_TO_LMS_P_HLG_BT2100_2, ICtCp)
if (is_hlg_method and is_BT2100_2_method)
else vector_dot(MATRIX_ICTCP_ICTCP_TO_LMS_P, ICtCp)
)
with domain_range_scale("ignore"):
LMS = (
oetf_inverse_HLG_BT2100(LMS_p)
if is_hlg_method
else eotf_ST2084(LMS_p, L_p)
)
RGB = vector_dot(MATRIX_ICTCP_LMS_TO_RGB, LMS)
return RGB
def XYZ_to_ICtCp(
XYZ: ArrayLike,
illuminant=CCS_ILLUMINANTS["CIE 1931 2 Degree Standard Observer"]["D65"],
chromatic_adaptation_transform: Union[
Literal[
"Bianco 2010",
"Bianco PC 2010",
"Bradford",
"CAT02 Brill 2008",
"CAT02",
"CAT16",
"CMCCAT2000",
"CMCCAT97",
"Fairchild",
"Sharp",
"Von Kries",
"XYZ Scaling",
],
str,
] = "CAT02",
method: Union[
Literal[
"Dolby 2016",
"ITU-R BT.2100-1 HLG",
"ITU-R BT.2100-1 PQ",
"ITU-R BT.2100-2 HLG",
"ITU-R BT.2100-2 PQ",
],
str,
] = "Dolby 2016",
L_p: Floating = 10000,
) -> NDArray:
"""
Convert from *CIE XYZ* tristimulus values to :math:`IC_TC_P` colour
encoding.
Parameters
----------
XYZ
*CIE XYZ* tristimulus values.
illuminant
Source illuminant chromaticity coordinates.
chromatic_adaptation_transform
*Chromatic adaptation* transform.
method
Computation method. *Recommendation ITU-R BT.2100* defines multiple
variants of the :math:`IC_TC_P` colour encoding:
- *ITU-R BT.2100-1*
- *SMPTE ST 2084:2014* inverse electro-optical transfer
function (EOTF) and the :math:`IC_TC_P` matrix from
:cite:`Dolby2016a`: *Dolby 2016*, *ITU-R BT.2100-1 PQ*,
*ITU-R BT.2100-2 PQ* methods.
- *Recommendation ITU-R BT.2100* *Reference HLG* opto-electrical
transfer function (OETF) and the :math:`IC_TC_P` matrix
from :cite:`Dolby2016a`: *ITU-R BT.2100-1 HLG* method.
- *ITU-R BT.2100-2*
- *SMPTE ST 2084:2014* inverse electro-optical transfer
function (EOTF) and the :math:`IC_TC_P` matrix from
:cite:`Dolby2016a`: *Dolby 2016*, *ITU-R BT.2100-1 PQ*,
*ITU-R BT.2100-2 PQ* methods.
- *Recommendation ITU-R BT.2100* *Reference HLG* opto-electrical
transfer function (OETF) and a custom :math:`IC_TC_P`
matrix from :cite:`InternationalTelecommunicationUnion2018`:
*ITU-R BT.2100-2 HLG* method.
L_p
Display peak luminance :math:`cd/m^2` for *SMPTE ST 2084:2014*
non-linear encoding. This parameter should stay at its default
:math:`10000 cd/m^2` value for practical applications. It is exposed so
that the definition can be used as a fitting function.
Returns
-------
:class:`numpy.ndarray`
:math:`IC_TC_P` colour encoding array.
Warnings
--------
The underlying *SMPTE ST 2084:2014* transfer function is an absolute
transfer function.
Notes
-----
- The underlying *SMPTE ST 2084:2014* transfer function is an absolute
transfer function, thus the domain and range values for the *Reference*
- The *ITU-R BT.2100-1 PQ* and *ITU-R BT.2100-2 PQ* methods are aliases
for the *Dolby 2016* method.
and *1* scales are only indicative that the data is not affected by
scale transformations. The effective domain of *SMPTE ST 2084:2014*
inverse electro-optical transfer function (EOTF) is
[0.0001, 10000].
+------------+-----------------------+------------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+==================+
| ``XYZ`` | ``UN`` | ``UN`` |
+------------+-----------------------+------------------+
+------------+-----------------------+------------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+==================+
| ``ICtCp`` | ``I`` : [0, 1] | ``I`` : [0, 1] |
| | | |
| | ``CT`` : [-1, 1] | ``CT`` : [-1, 1] |
| | | |
| | ``CP`` : [-1, 1] | ``CP`` : [-1, 1] |
+------------+-----------------------+------------------+
References
----------
:cite:`Dolby2016a`, :cite:`Lu2016c`
Examples
--------
>>> XYZ = np.array([0.20654008, 0.12197225, 0.05136952])
>>> XYZ_to_ICtCp(XYZ) # doctest: +ELLIPSIS
array([ 0.0685809..., -0.0028384..., 0.0602098...])
>>> XYZ_to_ICtCp(XYZ, method='ITU-R BT.2100-2 HLG') # doctest: +ELLIPSIS
array([ 0.5924279..., -0.0374073..., 0.2512267...])
"""
BT2020 = RGB_COLOURSPACES["ITU-R BT.2020"]
RGB = XYZ_to_RGB(
XYZ,
illuminant,
BT2020.whitepoint,
BT2020.matrix_XYZ_to_RGB,
chromatic_adaptation_transform,
)
return RGB_to_ICtCp(RGB, method, L_p)
def ICtCp_to_XYZ(
ICtCp: ArrayLike,
illuminant=CCS_ILLUMINANTS["CIE 1931 2 Degree Standard Observer"]["D65"],
chromatic_adaptation_transform: Union[
Literal[
"Bianco 2010",
"Bianco PC 2010",
"Bradford",
"CAT02 Brill 2008",
"CAT02",
"CAT16",
"CMCCAT2000",
"CMCCAT97",
"Fairchild",
"Sharp",
"Von Kries",
"XYZ Scaling",
],
str,
] = "CAT02",
method: Union[
Literal[
"Dolby 2016",
"ITU-R BT.2100-1 HLG",
"ITU-R BT.2100-1 PQ",
"ITU-R BT.2100-2 HLG",
"ITU-R BT.2100-2 PQ",
],
str,
] = "Dolby 2016",
L_p: Floating = 10000,
) -> NDArray:
"""
Convert from :math:`IC_TC_P` colour encoding to *CIE XYZ* tristimulus
values.
Parameters
----------
ICtCp
:math:`IC_TC_P` colour encoding array.
illuminant
Source illuminant chromaticity coordinates.
chromatic_adaptation_transform
*Chromatic adaptation* transform.
method
Computation method. *Recommendation ITU-R BT.2100* defines multiple
variants of the :math:`IC_TC_P` colour encoding:
- *ITU-R BT.2100-1*
- *SMPTE ST 2084:2014* inverse electro-optical transfer
function (EOTF) and the :math:`IC_TC_P` matrix from
:cite:`Dolby2016a`: *Dolby 2016*, *ITU-R BT.2100-1 PQ*,
*ITU-R BT.2100-2 PQ* methods.
- *Recommendation ITU-R BT.2100* *Reference HLG* opto-electrical
transfer function (OETF) and the :math:`IC_TC_P` matrix
from :cite:`Dolby2016a`: *ITU-R BT.2100-1 HLG* method.
- *ITU-R BT.2100-2*
- *SMPTE ST 2084:2014* inverse electro-optical transfer
function (EOTF) and the :math:`IC_TC_P` matrix from
:cite:`Dolby2016a`: *Dolby 2016*, *ITU-R BT.2100-1 PQ*,
*ITU-R BT.2100-2 PQ* methods.
- *Recommendation ITU-R BT.2100* *Reference HLG* opto-electrical
transfer function (OETF) and a custom :math:`IC_TC_P`
matrix from :cite:`InternationalTelecommunicationUnion2018`:
*ITU-R BT.2100-2 HLG* method.
L_p
Display peak luminance :math:`cd/m^2` for *SMPTE ST 2084:2014*
non-linear encoding. This parameter should stay at its default
:math:`10000 cd/m^2` value for practical applications. It is exposed so
that the definition can be used as a fitting function.
Returns
-------
:class:`numpy.ndarray`
*CIE XYZ* tristimulus values.
Warnings
--------
The underlying *SMPTE ST 2084:2014* transfer function is an absolute
transfer function.
Notes
-----
- The *ITU-R BT.2100-1 PQ* and *ITU-R BT.2100-2 PQ* methods are aliases
for the *Dolby 2016* method.
- The underlying *SMPTE ST 2084:2014* transfer function is an absolute
transfer function, thus the domain and range values for the *Reference*
and *1* scales are only indicative that the data is not affected by
scale transformations.
+------------+-----------------------+------------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+==================+
| ``ICtCp`` | ``I`` : [0, 1] | ``I`` : [0, 1] |
| | | |
| | ``CT`` : [-1, 1] | ``CT`` : [-1, 1] |
| | | |
| | ``CP`` : [-1, 1] | ``CP`` : [-1, 1] |
+------------+-----------------------+------------------+
+------------+-----------------------+------------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+==================+
| ``XYZ`` | ``UN`` | ``UN`` |
+------------+-----------------------+------------------+
References
----------
:cite:`Dolby2016a`, :cite:`Lu2016c`
Examples
--------
>>> ICtCp = np.array([0.06858097, -0.00283842, 0.06020983])
>>> ICtCp_to_XYZ(ICtCp) # doctest: +ELLIPSIS
array([ 0.2065400..., 0.1219722..., 0.0513695...])
>>> ICtCp = np.array([0.59242792, -0.03740730, 0.25122675])
>>> ICtCp_to_XYZ(ICtCp, method='ITU-R BT.2100-2 HLG') # doctest: +ELLIPSIS
array([ 0.2065400..., 0.1219722..., 0.0513695...])
"""
RGB = ICtCp_to_RGB(ICtCp, method, L_p)
BT2020 = RGB_COLOURSPACES["ITU-R BT.2020"]
XYZ = RGB_to_XYZ(
RGB,
BT2020.whitepoint,
illuminant,
BT2020.matrix_RGB_to_XYZ,
chromatic_adaptation_transform,
)
return XYZ
|
|
import pytest
from datetime import datetime, date
import numpy as np
from pandas import Timestamp, Period, Index
from pandas.compat import u
import pandas.util.testing as tm
from pandas.tseries.offsets import Second, Milli, Micro, Day
from pandas.compat.numpy import np_datetime64_compat
converter = pytest.importorskip('pandas.plotting._converter')
def test_timtetonum_accepts_unicode():
assert (converter.time2num("00:01") == converter.time2num(u("00:01")))
class TestDateTimeConverter(object):
def setup_method(self, method):
self.dtc = converter.DatetimeConverter()
self.tc = converter.TimeFormatter(None)
def test_convert_accepts_unicode(self):
r1 = self.dtc.convert("12:22", None, None)
r2 = self.dtc.convert(u("12:22"), None, None)
assert (r1 == r2), "DatetimeConverter.convert should accept unicode"
def test_conversion(self):
rs = self.dtc.convert(['2012-1-1'], None, None)[0]
xp = datetime(2012, 1, 1).toordinal()
assert rs == xp
rs = self.dtc.convert('2012-1-1', None, None)
assert rs == xp
rs = self.dtc.convert(date(2012, 1, 1), None, None)
assert rs == xp
rs = self.dtc.convert(datetime(2012, 1, 1).toordinal(), None, None)
assert rs == xp
rs = self.dtc.convert('2012-1-1', None, None)
assert rs == xp
rs = self.dtc.convert(Timestamp('2012-1-1'), None, None)
assert rs == xp
# also testing datetime64 dtype (GH8614)
rs = self.dtc.convert(np_datetime64_compat('2012-01-01'), None, None)
assert rs == xp
rs = self.dtc.convert(np_datetime64_compat(
'2012-01-01 00:00:00+0000'), None, None)
assert rs == xp
rs = self.dtc.convert(np.array([
np_datetime64_compat('2012-01-01 00:00:00+0000'),
np_datetime64_compat('2012-01-02 00:00:00+0000')]), None, None)
assert rs[0] == xp
# we have a tz-aware date (constructed to that when we turn to utc it
# is the same as our sample)
ts = (Timestamp('2012-01-01')
.tz_localize('UTC')
.tz_convert('US/Eastern')
)
rs = self.dtc.convert(ts, None, None)
assert rs == xp
rs = self.dtc.convert(ts.to_pydatetime(), None, None)
assert rs == xp
rs = self.dtc.convert(Index([ts - Day(1), ts]), None, None)
assert rs[1] == xp
rs = self.dtc.convert(Index([ts - Day(1), ts]).to_pydatetime(),
None, None)
assert rs[1] == xp
def test_conversion_float(self):
decimals = 9
rs = self.dtc.convert(
Timestamp('2012-1-1 01:02:03', tz='UTC'), None, None)
xp = converter.dates.date2num(Timestamp('2012-1-1 01:02:03', tz='UTC'))
tm.assert_almost_equal(rs, xp, decimals)
rs = self.dtc.convert(
Timestamp('2012-1-1 09:02:03', tz='Asia/Hong_Kong'), None, None)
tm.assert_almost_equal(rs, xp, decimals)
rs = self.dtc.convert(datetime(2012, 1, 1, 1, 2, 3), None, None)
tm.assert_almost_equal(rs, xp, decimals)
def test_conversion_outofbounds_datetime(self):
# 2579
values = [date(1677, 1, 1), date(1677, 1, 2)]
rs = self.dtc.convert(values, None, None)
xp = converter.dates.date2num(values)
tm.assert_numpy_array_equal(rs, xp)
rs = self.dtc.convert(values[0], None, None)
xp = converter.dates.date2num(values[0])
assert rs == xp
values = [datetime(1677, 1, 1, 12), datetime(1677, 1, 2, 12)]
rs = self.dtc.convert(values, None, None)
xp = converter.dates.date2num(values)
tm.assert_numpy_array_equal(rs, xp)
rs = self.dtc.convert(values[0], None, None)
xp = converter.dates.date2num(values[0])
assert rs == xp
def test_time_formatter(self):
self.tc(90000)
def test_dateindex_conversion(self):
decimals = 9
for freq in ('B', 'L', 'S'):
dateindex = tm.makeDateIndex(k=10, freq=freq)
rs = self.dtc.convert(dateindex, None, None)
xp = converter.dates.date2num(dateindex._mpl_repr())
tm.assert_almost_equal(rs, xp, decimals)
def test_resolution(self):
def _assert_less(ts1, ts2):
val1 = self.dtc.convert(ts1, None, None)
val2 = self.dtc.convert(ts2, None, None)
if not val1 < val2:
raise AssertionError('{0} is not less than {1}.'.format(val1,
val2))
# Matplotlib's time representation using floats cannot distinguish
# intervals smaller than ~10 microsecond in the common range of years.
ts = Timestamp('2012-1-1')
_assert_less(ts, ts + Second())
_assert_less(ts, ts + Milli())
_assert_less(ts, ts + Micro(50))
def test_convert_nested(self):
inner = [Timestamp('2017-01-01', Timestamp('2017-01-02'))]
data = [inner, inner]
result = self.dtc.convert(data, None, None)
expected = [self.dtc.convert(x, None, None) for x in data]
assert result == expected
class TestPeriodConverter(object):
def setup_method(self, method):
self.pc = converter.PeriodConverter()
class Axis(object):
pass
self.axis = Axis()
self.axis.freq = 'D'
def test_convert_accepts_unicode(self):
r1 = self.pc.convert("2012-1-1", None, self.axis)
r2 = self.pc.convert(u("2012-1-1"), None, self.axis)
assert r1 == r2
def test_conversion(self):
rs = self.pc.convert(['2012-1-1'], None, self.axis)[0]
xp = Period('2012-1-1').ordinal
assert rs == xp
rs = self.pc.convert('2012-1-1', None, self.axis)
assert rs == xp
rs = self.pc.convert([date(2012, 1, 1)], None, self.axis)[0]
assert rs == xp
rs = self.pc.convert(date(2012, 1, 1), None, self.axis)
assert rs == xp
rs = self.pc.convert([Timestamp('2012-1-1')], None, self.axis)[0]
assert rs == xp
rs = self.pc.convert(Timestamp('2012-1-1'), None, self.axis)
assert rs == xp
# FIXME
# rs = self.pc.convert(
# np_datetime64_compat('2012-01-01'), None, self.axis)
# assert rs == xp
#
# rs = self.pc.convert(
# np_datetime64_compat('2012-01-01 00:00:00+0000'),
# None, self.axis)
# assert rs == xp
#
# rs = self.pc.convert(np.array([
# np_datetime64_compat('2012-01-01 00:00:00+0000'),
# np_datetime64_compat('2012-01-02 00:00:00+0000')]),
# None, self.axis)
# assert rs[0] == xp
def test_integer_passthrough(self):
# GH9012
rs = self.pc.convert([0, 1], None, self.axis)
xp = [0, 1]
assert rs == xp
def test_convert_nested(self):
data = ['2012-1-1', '2012-1-2']
r1 = self.pc.convert([data, data], None, self.axis)
r2 = [self.pc.convert(data, None, self.axis) for _ in range(2)]
assert r1 == r2
|
|
#!/usr/bin/env python3
# Copyright 2020 The gf-layers Project Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import re
import sys
from pathlib import Path
from typing import List, Any
doc = """
Generates the layer manifest JSON files and linker scripts from the C++ source
code in each VkLayer_* directory.
The C++ files must include commented layer properties like this:
"VkLayer_GF_frame_counter", // layerName
VK_MAKE_VERSION(1U, 1U, 130U), // specVersion
1, // implementationVersion
"Frame counter layer.", // description
And exports like this:
VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
VkLayer_GF_frame_counterNegotiateLoaderLayerInterfaceVersion(
"""
def not_none(x):
assert x is not None
return x
def log(s: str):
print(s, file=sys.stderr, flush=True)
class LayerProperties:
def __init__(self):
self.layer_name = None
self.spec_version = None
self.implementation_version = None
self.description = None
def generate_manifests(layer_dir: Path, layer_properties: LayerProperties, check_only: bool) -> bool:
result = True
src_files = [f for f in layer_dir.glob("src/*.cc")]
for src_file in src_files:
with src_file.open("r", encoding="utf-8", errors="ignore") as f:
for line in f:
if "// layerName" in line:
layer_properties.layer_name = re.search(r'"([^"]*)",', line).group(1)
elif "// specVersion" in line:
match = re.search(r'VK_MAKE_VERSION\((\d+)U, (\d+)U, (\d+)U\)', line)
layer_properties.spec_version = f"{match.group(1)}.{match.group(2)}.{match.group(3)}"
elif "// implementationVersion" in line:
layer_properties.implementation_version = re.search(r' (\d+),', line).group(1)
elif "// description" in line:
layer_properties.description = re.search(r'"([^"]*)"', line).group(1)
# Write out the manifest file for each platform.
for platform in ["unix", "darwin", "windows"]:
if platform == "unix":
library_path = f"./lib{not_none(layer_properties.layer_name)}.so"
elif platform == "darwin":
library_path = f"./lib{not_none(layer_properties.layer_name)}.dylib"
elif platform == "windows":
# JSON output is similar to: ".\\aaa.dll"
# I.e. the backslash must be escaped in JSON.
# We don't need to double-escape below because we use a raw string literal.
library_path = fr".\\{not_none(layer_properties.layer_name)}.dll"
else:
assert False, "Unsupported platform"
manifest_file = layer_dir / f"{not_none(layer_properties.layer_name)}_{platform}.json"
# Braces have to be escaped.
manifest_contents = f'''{{
"file_format_version" : "1.1.2",
"layers": [
{{
"name": "{not_none(layer_properties.layer_name)}",
"type": "GLOBAL",
"library_path": "{library_path}",
"api_version" : "{not_none(layer_properties.spec_version)}",
"implementation_version" : "{not_none(layer_properties.implementation_version)}",
"description" : "{not_none(layer_properties.description)}",
"functions": {{
"vkGetDeviceProcAddr": "{not_none(layer_properties.layer_name)}GetDeviceProcAddr",
"vkGetInstanceProcAddr": "{not_none(layer_properties.layer_name)}GetInstanceProcAddr",
"vkNegotiateLoaderLayerInterfaceVersion": "{not_none(layer_properties.layer_name)}NegotiateLoaderLayerInterfaceVersion"
}}
}}
],
"enable_environment": {{
"{not_none(layer_properties.layer_name)}_V{not_none(layer_properties.implementation_version)}_ENABLE": "1"
}},
"disable_environment": {{
"{not_none(layer_properties.layer_name)}_V{not_none(layer_properties.implementation_version)}_DISABLE": "1"
}}
}}
'''
# End of manifest_contents.
log(f"Processing: {str(manifest_file)}")
if check_only:
if ((not manifest_file.is_file()) or
manifest_file.read_text(encoding="utf-8", errors="ignore") != manifest_contents):
log("Error: manifest file needs to be re-generated")
result = False
else:
manifest_file.write_text(manifest_contents, encoding="utf-8", errors="ignore")
return result
ANDROID_EXPORTS = [
"vkEnumerateInstanceLayerProperties",
"vkEnumerateDeviceLayerProperties",
"vkEnumerateInstanceExtensionProperties",
"vkEnumerateDeviceExtensionProperties",
]
def generate_linker_scripts(layer_dir: Path, layer_name: str, check_only: bool) -> bool:
result = True
# Collect the exported functions by scanning all source files.
non_android_exported_functions: List[str] = []
src_files = [f for f in layer_dir.glob("src/*.cc")]
for src_file in src_files:
contents = src_file.read_text(encoding="utf-8", errors="ignore")
function_names = re.findall(
r"VK_LAYER_EXPORT(?:\s|\n)+VKAPI_ATTR(?:\s|\n)+\w+(?:\s|\n)+VKAPI_CALL(?:\s|\n)+(\w+)\(",
contents,
)
for function_name in function_names:
if function_name not in ANDROID_EXPORTS:
non_android_exported_functions.append(function_name)
# Write out the linker scripts.
# .def (used by MSVC)
def_path = layer_dir / f"{layer_name}.def"
log(f"Processing: {str(def_path)}")
def_contents = f"; Linker script for MSVC.\n"
def_contents += f"; Generated file; do not edit.\n"
def_contents += f"LIBRARY {layer_name}\n"
def_contents += "EXPORTS\n"
for function in non_android_exported_functions:
def_contents += f" {function}\n"
if check_only:
if not def_path.is_file() or def_path.read_text(encoding="utf-8", errors="ignore") != def_contents:
log("Error: file needs to be re-generated")
result = False
else:
def_path.write_text(def_contents, encoding="utf-8", errors="ignore")
# .exports (used for Apple targets)
exports_path = layer_dir / f"{layer_name}.exports"
log(f"Processing: {str(exports_path)}")
exports_contents = "# Linker script for Apple.\n"
exports_contents += f"# Generated file; do not edit.\n"
for function in non_android_exported_functions:
exports_contents += f"_{function}\n"
if check_only:
if not exports_path.is_file() or exports_path.read_text(encoding="utf-8", errors="ignore") != exports_contents:
log("Error: file needs to be re-generated")
result = False
else:
exports_path.write_text(exports_contents, encoding="utf-8", errors="ignore")
# .lds (used for Linux targets)
lds_path = layer_dir / f"{layer_name}.lds"
log(f"Processing: {str(lds_path)}")
lds_contents = "# Linker script for Linux.\n"
lds_contents += f"# Generated file; do not edit.\n"
lds_contents += "{\n"
lds_contents += "global:\n"
for function in non_android_exported_functions:
lds_contents += f" {function};\n"
lds_contents += "local:\n"
lds_contents += " *;\n"
lds_contents += "};\n"
if check_only:
if not lds_path.is_file() or lds_path.read_text(encoding="utf-8", errors="ignore") != lds_contents:
log("Error: file needs to be re-generated")
result = False
else:
lds_path.write_text(lds_contents, encoding="utf-8", errors="ignore")
# _android.lds (used for Android targets)
lds_android_path = layer_dir / f"{layer_name}_android.lds"
log(f"Processing: {str(lds_android_path)}")
lds_android_contents = "# Linker script for Android.\n"
lds_android_contents += f"# Generated file; do not edit.\n"
lds_android_contents += "{\n"
lds_android_contents += "global:\n"
for function in non_android_exported_functions:
lds_android_contents += f" {function};\n"
# Just for Android:
lds_android_contents += " # Introspection functions must be exported on Android.\n"
for function in ANDROID_EXPORTS:
lds_android_contents += f" {function};\n"
lds_android_contents += "local:\n"
lds_android_contents += " *;\n"
lds_android_contents += "};\n"
if check_only:
if (not lds_android_path.is_file()
or lds_android_path.read_text(encoding="utf-8", errors="ignore") != lds_android_contents):
log("Error: file needs to be re-generated")
result = False
else:
lds_android_path.write_text(lds_android_contents, encoding="utf-8", errors="ignore")
return result
def main(args) -> None:
raw_help_formatter: Any = argparse.RawDescriptionHelpFormatter
parser = argparse.ArgumentParser(
description=doc,
formatter_class=raw_help_formatter,
)
parser.add_argument(
"--check_only",
help="Don't change any files; just check that they would not change. If the files would have changed then "
"exits with error status 2.",
action="store_true",
)
parsed_args = parser.parse_args(args[1:])
check_only: bool = parsed_args.check_only
os.chdir(os.environ["GF_LAYERS_REPO_ROOT"])
layer_dirs = [d for d in Path().glob("src/VkLayer_*/") if d.is_dir()]
result = True
# For each layer:
for layer_dir in layer_dirs:
layer_properties = LayerProperties()
result &= generate_manifests(layer_dir, layer_properties, check_only)
result &= generate_linker_scripts(layer_dir, layer_name=layer_properties.layer_name, check_only=check_only)
if not result:
raise AssertionError("Checks failed. See above.")
if __name__ == "__main__":
main(sys.argv)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2008-2011 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
"""
Interface to the Xapian indexing engine for the Translate Toolkit
Xapian v1.0 or higher is supported.
If you are interested in writing an interface for Xapian 0.x, then
you should checkout the following::
svn export -r 7235 https://translate.svn.sourceforge.net/svnroot/translate/src/branches/translate-search-indexer-generic-merging/translate/search/indexer/
It is not completely working, but it should give you a good start.
"""
# xapian module versions before 1.0.13 hangs apache under mod_python
import sys
import re
# detect if running under apache
if 'apache' in sys.modules or '_apache' in sys.modules or 'mod_wsgi' in sys.modules:
def _str2version(version):
return [int(i) for i in version.split('.')]
import subprocess
# even checking xapian version leads to deadlock under apache, must figure version from command line
try:
command = subprocess.Popen(['xapian-check', '--version'], stdout=subprocess.PIPE)
stdout, stderr = command.communicate()
if _str2version(re.match('.*([0-9]+\.[0-9]+\.[0-9]+).*', stdout).groups()[0]) < [1, 0, 13]:
raise ImportError("Running under apache, can't load xapain")
except:
#FIXME: report is xapian-check command is missing?
raise ImportError("Running under apache, can't load xapian")
import CommonIndexer
import xapian
import os
import time
import logging
def is_available():
return xapian.major_version() > 0
# in xapian there is a length restriction for term strings
# see http://osdir.com/ml/search.xapian.general/2006-11/msg00210.html
# a maximum length of around 240 is described there - but we need less anyway
_MAX_TERM_LENGTH = 128
class XapianDatabase(CommonIndexer.CommonDatabase):
"""Interface to the `Xapian indexer <http://xapian.org>`_."""
QUERY_TYPE = xapian.Query
INDEX_DIRECTORY_NAME = "xapian"
def __init__(self, basedir, analyzer=None, create_allowed=True):
"""Initialize or open a Xapian database.
:raise ValueError: the given location exists, but the database type
is incompatible (e.g. created by a different indexing engine)
:raise OSError: the database failed to initialize
:param basedir: the parent directory of the database
:type basedir: str
:param analyzer: Bitwise combination of possible analyzer flags
to be used as the default analyzer for this
database. Leave it empty to use the system default
analyzer (self.ANALYZER_DEFAULT).
See self.ANALYZER_TOKENIZE, self.ANALYZER_PARTIAL, ...
:type analyzer: int
:param create_allowed: create the database, if necessary; default: True
:type create_allowed: bool
"""
# call the __init__ function of our parent
super(XapianDatabase, self).__init__(basedir, analyzer=analyzer,
create_allowed=create_allowed)
self.reader = None
self.writer = None
if os.path.exists(self.location):
# try to open an existing database
try:
self.reader = xapian.Database(self.location)
except xapian.DatabaseOpeningError, err_msg:
raise ValueError("Indexer: failed to open xapian database " \
+ "(%s) - maybe it is not a xapian database: %s" \
% (self.location, str(err_msg)))
else:
# create a new database
if not create_allowed:
raise OSError("Indexer: skipping database creation")
try:
# create the parent directory if it does not exist
parent_path = os.path.dirname(self.location)
if not os.path.isdir(parent_path):
# recursively create all directories up to parent_path
os.makedirs(parent_path)
except IOError, err_msg:
raise OSError("Indexer: failed to create the parent " \
+ "directory (%s) of the indexing database: %s" \
% (parent_path, str(err_msg)))
try:
self.writer = xapian.WritableDatabase(self.location,
xapian.DB_CREATE_OR_OPEN)
self.flush()
except xapian.DatabaseOpeningError, err_msg:
raise OSError("Indexer: failed to open or create a xapian " \
+ "database (%s): %s" % (self.location, str(err_msg)))
def __del__(self):
self.reader = None
self._writer_close()
def flush(self, optimize=False):
"""force to write the current changes to disk immediately
:param optimize: ignored for xapian
:type optimize: bool
"""
# write changes to disk (only if database is read-write)
if self._writer_is_open():
self._writer_close()
self._index_refresh()
def make_query(self, *args, **kwargs):
try:
return super(XapianDatabase, self).make_query(*args, **kwargs)
except xapian.DatabaseModifiedError:
self._index_refresh()
return super(XapianDatabase, self).make_query(*args, **kwargs)
def _create_query_for_query(self, query):
"""generate a query based on an existing query object
basically this function should just create a copy of the original
:param query: the original query object
:type query: xapian.Query
:return: the resulting query object
:rtype: xapian.Query
"""
# create a copy of the original query
return xapian.Query(query)
def _create_query_for_string(self, text, require_all=True,
analyzer=None):
"""generate a query for a plain term of a string query
basically this function parses the string and returns the resulting
query
:param text: the query string
:type text: str
:param require_all: boolean operator
(True -> AND (default) / False -> OR)
:type require_all: bool
:param analyzer: Define query options (partial matching, exact matching,
tokenizing, ...) as bitwise combinations of
*CommonIndexer.ANALYZER_???*.
This can override previously defined field
analyzer settings.
If analyzer is None (default), then the configured
analyzer for the field is used.
:type analyzer: int
:return: resulting query object
:rtype: xapian.Query
"""
qp = xapian.QueryParser()
qp.set_database(self.reader)
if require_all:
qp.set_default_op(xapian.Query.OP_AND)
else:
qp.set_default_op(xapian.Query.OP_OR)
if analyzer is None:
analyzer = self.analyzer
if analyzer & self.ANALYZER_PARTIAL > 0:
match_flags = xapian.QueryParser.FLAG_PARTIAL
return qp.parse_query(text, match_flags)
elif analyzer == self.ANALYZER_EXACT:
# exact matching -
return xapian.Query(text)
else:
# everything else (not partial and not exact)
match_flags = 0
return qp.parse_query(text, match_flags)
def _create_query_for_field(self, field, value, analyzer=None):
"""generate a field query
this functions creates a field->value query
:param field: the fieldname to be used
:type field: str
:param value: the wanted value of the field
:type value: str
:param analyzer: Define query options (partial matching, exact
matching, tokenizing, ...) as bitwise combinations of
*CommonIndexer.ANALYZER_???*.
This can override previously defined field
analyzer settings.
If analyzer is None (default), then the configured
analyzer for the field is used.
:type analyzer: int
:return: the resulting query object
:rtype: xapian.Query
"""
if analyzer is None:
analyzer = self.analyzer
if analyzer == self.ANALYZER_EXACT:
# exact matching -> keep special characters
return xapian.Query("%s%s" % (field.upper(), value))
# other queries need a parser object
qp = xapian.QueryParser()
qp.set_database(self.reader)
if (analyzer & self.ANALYZER_PARTIAL > 0):
# partial matching
match_flags = xapian.QueryParser.FLAG_PARTIAL
return qp.parse_query(value, match_flags, field.upper())
else:
# everything else (not partial and not exact)
match_flags = 0
return qp.parse_query(value, match_flags, field.upper())
def _create_query_combined(self, queries, require_all=True):
"""generate a combined query
:param queries: list of the original queries
:type queries: list of xapian.Query
:param require_all: boolean operator
(True -> AND (default) / False -> OR)
:type require_all: bool
:return: the resulting combined query object
:rtype: xapian.Query
"""
if require_all:
query_op = xapian.Query.OP_AND
else:
query_op = xapian.Query.OP_OR
return xapian.Query(query_op, queries)
def _create_empty_document(self):
"""create an empty document to be filled and added to the index later
:return: the new document object
:rtype: xapian.Document
"""
return xapian.Document()
def _add_plain_term(self, document, term, tokenize=True):
"""add a term to a document
:param document: the document to be changed
:type document: xapian.Document
:param term: a single term to be added
:type term: str
:param tokenize: should the term be tokenized automatically
:type tokenize: bool
"""
if tokenize:
term_gen = xapian.TermGenerator()
term_gen.set_document(document)
term_gen.index_text(term)
else:
document.add_term(_truncate_term_length(term))
def _add_field_term(self, document, field, term, tokenize=True):
"""add a field term to a document
:param document: the document to be changed
:type document: xapian.Document
:param field: name of the field
:type field: str
:param term: term to be associated to the field
:type term: str
:param tokenize: should the term be tokenized automatically
:type tokenize: bool
"""
if tokenize:
term_gen = xapian.TermGenerator()
term_gen.set_document(document)
term_gen.index_text(term, 1, field.upper())
else:
document.add_term(_truncate_term_length("%s%s" % \
(field.upper(), term)))
def _add_document_to_index(self, document):
"""add a prepared document to the index database
:param document: the document to be added
:type document: xapian.Document
"""
# open the database for writing
self._writer_open()
self.writer.add_document(document)
def begin_transaction(self):
"""Begin a transaction.
Xapian supports transactions to group multiple database modifications.
This avoids intermediate flushing and therefore increases performance.
"""
self._writer_open()
self.writer.begin_transaction()
def cancel_transaction(self):
"""cancel an ongoing transaction
no changes since the last execution of 'begin_transcation' are written
"""
self.writer.cancel_transaction()
self._writer_close()
def commit_transaction(self):
"""Submit the changes of an ongoing transaction.
All changes since the last execution of 'begin_transaction'
are written.
"""
self.writer.commit_transaction()
self._writer_close()
def get_query_result(self, query):
"""Return an object containing the results of a query.
:param query: a pre-compiled xapian query
:type query: xapian.Query
:return: an object that allows access to the results
:rtype: XapianIndexer.CommonEnquire
"""
enquire = xapian.Enquire(self.reader)
enquire.set_query(query)
return XapianEnquire(enquire)
def delete_document_by_id(self, docid):
"""Delete a specified document.
:param docid: the document ID to be deleted
:type docid: int
"""
# open the database for writing
self._writer_open()
try:
self.writer.delete_document(docid)
return True
except xapian.DocNotFoundError:
return False
def search(self, query, fieldnames):
"""Return a list of the contents of specified fields for all matches
of a query.
:param query: the query to be issued
:type query: xapian.Query
:param fieldnames: the name(s) of a field of the document content
:type fieldnames: string | list of strings
:return: a list of dicts containing the specified field(s)
:rtype: list of dicts
"""
result = []
if isinstance(fieldnames, basestring):
fieldnames = [fieldnames]
try:
self._walk_matches(query, _extract_fieldvalues,
(result, fieldnames))
except xapian.DatabaseModifiedError:
self._index_refresh()
self._walk_matches(query, _extract_fieldvalues,
(result, fieldnames))
return result
def _delete_stale_lock(self):
if not self._writer_is_open():
lockfile = os.path.join(self.location, 'flintlock')
if (os.path.exists(lockfile) and
(time.time() - os.path.getmtime(lockfile)) / 60 > 15):
logging.warning("Stale lock found in %s, removing.",
self.location)
os.remove(lockfile)
def _writer_open(self):
"""Open write access for the indexing database and acquire an
exclusive lock.
"""
if not self._writer_is_open():
self._delete_stale_lock()
try:
self.writer = xapian.WritableDatabase(self.location, xapian.DB_OPEN)
except xapian.DatabaseOpeningError, err_msg:
raise ValueError("Indexer: failed to open xapian database " \
+ "(%s) - maybe it is not a xapian database: %s" \
% (self.location, str(err_msg)))
def _writer_close(self):
"""close indexing write access and remove database lock"""
if self._writer_is_open():
self.writer.flush()
self.writer = None
def _writer_is_open(self):
"""check if the indexing write access is currently open"""
return hasattr(self, "writer") and not self.writer is None
def _index_refresh(self):
"""re-read the indexer database"""
try:
if self.reader is None:
self.reader = xapian.Database(self.location)
else:
self.reader.reopen()
except xapian.DatabaseOpeningError, err_msg:
raise ValueError("Indexer: failed to open xapian database " \
+ "(%s) - maybe it is not a xapian database: %s" \
% (self.location, str(err_msg)))
class XapianEnquire(CommonIndexer.CommonEnquire):
"""interface to the xapian object for storing sets of matches
"""
def get_matches(self, start, number):
"""Return a specified number of qualified matches of a previous query.
:param start: index of the first match to return (starting from zero)
:type start: int
:param number: the number of matching entries to return
:type number: int
:return: a set of matching entries and some statistics
:rtype: tuple of (returned number, available number, matches)
"matches" is a dictionary of::
["rank", "percent", "document", "docid"]
"""
matches = self.enquire.get_mset(start, number)
result = []
for match in matches:
elem = {}
elem["rank"] = match.rank
elem["docid"] = match.docid
elem["percent"] = match.percent
elem["document"] = match.document
result.append(elem)
return (matches.size(), matches.get_matches_estimated(), result)
def _truncate_term_length(term, taken=0):
"""truncate the length of a term string length to the maximum allowed
for xapian terms
:param term: the value of the term, that should be truncated
:type term: str
:param taken: since a term consists of the name of the term and its
actual value, this additional parameter can be used to reduce the
maximum count of possible characters
:type taken: int
:return: the truncated string
:rtype: str
"""
if len(term) > _MAX_TERM_LENGTH - taken:
return term[0:_MAX_TERM_LENGTH - taken - 1]
else:
return term
def _extract_fieldvalues(match, (result, fieldnames)):
"""Add a dict of field values to a list.
Usually this function should be used together with :func:`_walk_matches`
for traversing a list of matches.
:param match: a single match object
:type match: xapian.MSet
:param result: the resulting dict will be added to this list
:type result: list of dict
:param fieldnames: the names of the fields to be added to the dict
:type fieldnames: list of str
"""
# prepare empty dict
item_fields = {}
# fill the dict
for term in match["document"].termlist():
for fname in fieldnames:
if ((fname is None) and re.match("[^A-Z]", term.term)):
value = term.term
elif re.match("%s[^A-Z]" % str(fname).upper(), term.term):
value = term.term[len(fname):]
else:
continue
# we found a matching field/term
if fname in item_fields:
item_fields[fname].append(value)
else:
item_fields[fname] = [value]
result.append(item_fields)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.