gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
# Copyright (c) 2016 EMC Corporation, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from cinder import exception
from cinder import test
from cinder.tests.unit.volume.drivers.dell_emc.vnx import fake_exception \
as storops_ex
from cinder.tests.unit.volume.drivers.dell_emc.vnx import fake_storops \
as storops
from cinder.tests.unit.volume.drivers.dell_emc.vnx import res_mock
from cinder.tests.unit.volume.drivers.dell_emc.vnx import utils as ut_utils
from cinder.volume.drivers.dell_emc.vnx import common
from cinder.volume.drivers.dell_emc.vnx import utils as vnx_utils
from cinder.volume import volume_types
class FakeDriver(object):
@vnx_utils.require_consistent_group_snapshot_enabled
def fake_group_method(self, context, group_or_snap):
return True
class TestUtils(test.TestCase):
def setUp(self):
super(TestUtils, self).setUp()
self.origin_timeout = common.DEFAULT_TIMEOUT
common.DEFAULT_TIMEOUT = 0.05
def tearDown(self):
super(TestUtils, self).tearDown()
common.DEFAULT_TIMEOUT = self.origin_timeout
def test_wait_until(self):
mock_testmethod = mock.Mock(return_value=True)
vnx_utils.wait_until(mock_testmethod, interval=0)
mock_testmethod.assert_has_calls([mock.call()])
def test_wait_until_with_exception(self):
mock_testmethod = mock.Mock(
side_effect=storops_ex.VNXAttachSnapError('Unknown error'))
mock_testmethod.__name__ = 'test_method'
self.assertRaises(storops_ex.VNXAttachSnapError,
vnx_utils.wait_until,
mock_testmethod,
timeout=1,
interval=0,
reraise_arbiter=(
lambda ex: not isinstance(
ex, storops_ex.VNXCreateLunError)))
mock_testmethod.assert_has_calls([mock.call()])
def test_wait_until_with_params(self):
mock_testmethod = mock.Mock(return_value=True)
vnx_utils.wait_until(mock_testmethod,
param1=1,
param2='test')
mock_testmethod.assert_has_calls(
[mock.call(param1=1, param2='test')])
mock_testmethod.assert_has_calls([mock.call(param1=1, param2='test')])
@res_mock.mock_driver_input
def test_retype_need_migration_when_host_changed(self, driver_in):
volume = driver_in['volume']
another_host = driver_in['host']
re = vnx_utils.retype_need_migration(
volume, None, None, another_host)
self.assertTrue(re)
@res_mock.mock_driver_input
def test_retype_need_migration_for_smp_volume(self, driver_in):
volume = driver_in['volume']
host = driver_in['host']
re = vnx_utils.retype_need_migration(
volume, None, None, host)
self.assertTrue(re)
@res_mock.mock_driver_input
def test_retype_need_migration_when_provision_changed(
self, driver_in):
volume = driver_in['volume']
host = driver_in['host']
old_spec = common.ExtraSpecs({'provisioning:type': 'thin'})
new_spec = common.ExtraSpecs({'provisioning:type': 'deduplicated'})
re = vnx_utils.retype_need_migration(
volume, old_spec.provision, new_spec.provision, host)
self.assertTrue(re)
@res_mock.mock_driver_input
def test_retype_not_need_migration_when_provision_changed(
self, driver_in):
volume = driver_in['volume']
host = driver_in['host']
old_spec = common.ExtraSpecs({'provisioning:type': 'thick'})
new_spec = common.ExtraSpecs({'provisioning:type': 'compressed'})
re = vnx_utils.retype_need_migration(
volume, old_spec.provision, new_spec.provision, host)
self.assertFalse(re)
@res_mock.mock_driver_input
def test_retype_not_need_migration(self, driver_in):
volume = driver_in['volume']
host = driver_in['host']
old_spec = common.ExtraSpecs({'storagetype:tiering': 'auto'})
new_spec = common.ExtraSpecs(
{'storagetype:tiering': 'starthighthenauto'})
re = vnx_utils.retype_need_migration(
volume, old_spec.provision, new_spec.provision, host)
self.assertFalse(re)
def test_retype_need_change_tier(self):
re = vnx_utils.retype_need_change_tier(
storops.VNXTieringEnum.AUTO, storops.VNXTieringEnum.HIGH_AUTO)
self.assertTrue(re)
def test_retype_need_turn_on_compression(self):
re = vnx_utils.retype_need_turn_on_compression(
storops.VNXProvisionEnum.THIN,
storops.VNXProvisionEnum.COMPRESSED)
self.assertTrue(re)
re = vnx_utils.retype_need_turn_on_compression(
storops.VNXProvisionEnum.THICK,
storops.VNXProvisionEnum.COMPRESSED)
self.assertTrue(re)
def test_retype_not_need_turn_on_compression(self):
re = vnx_utils.retype_need_turn_on_compression(
storops.VNXProvisionEnum.DEDUPED,
storops.VNXProvisionEnum.COMPRESSED)
self.assertFalse(re)
re = vnx_utils.retype_need_turn_on_compression(
storops.VNXProvisionEnum.DEDUPED,
storops.VNXProvisionEnum.COMPRESSED)
self.assertFalse(re)
@res_mock.mock_driver_input
def test_get_base_lun_name(self, mocked):
volume = mocked['volume']
self.assertEqual(
'test',
vnx_utils.get_base_lun_name(volume))
def test_convert_to_tgt_list_and_itor_tgt_map(self):
zone_mapping = {
'san_1': {'initiator_port_wwn_list':
['wwn1_1'],
'target_port_wwn_list':
['wwnt_1', 'wwnt_2']},
'san_2': {'initiator_port_wwn_list':
['wwn2_1', 'wwn2_2'],
'target_port_wwn_list':
['wwnt_1', 'wwnt_3']},
}
tgt_wwns, itor_tgt_map = (
vnx_utils.convert_to_tgt_list_and_itor_tgt_map(zone_mapping))
self.assertEqual({'wwnt_1', 'wwnt_2', 'wwnt_3'}, set(tgt_wwns))
self.assertEqual({'wwn1_1': ['wwnt_1', 'wwnt_2'],
'wwn2_1': ['wwnt_1', 'wwnt_3'],
'wwn2_2': ['wwnt_1', 'wwnt_3']},
itor_tgt_map)
@ut_utils.patch_group_specs('<is> True')
@res_mock.mock_driver_input
def test_require_consistent_group_snapshot_enabled(self, input):
driver = FakeDriver()
is_called = driver.fake_group_method('context', input['group'])
self.assertTrue(is_called)
@res_mock.mock_driver_input
def test_is_image_cache_volume_false(self, mocked):
volume = mocked['volume']
volume.display_name = 'volume-ca86b9a0-d0d5-4267-8cd5-c62274056cc0'
self.assertFalse(vnx_utils.is_image_cache_volume(volume))
volume.display_name = 'volume-ca86b9a0-d0d5-c62274056cc0'
self.assertFalse(vnx_utils.is_image_cache_volume(volume))
@res_mock.mock_driver_input
def test_is_image_cache_volume_true(self, mocked):
volume = mocked['volume']
volume.display_name = 'image-ca86b9a0-d0d5-4267-8cd5-c62274056cc0'
self.assertTrue(vnx_utils.is_image_cache_volume(volume))
@res_mock.mock_driver_input
def test_calc_migrate_and_provision_image_cache(self, mocked):
volume = mocked['volume']
volume.display_name = 'image-ca86b9a0-d0d5-4267-8cd5-c62274056cc0'
self.assertTrue(vnx_utils.is_image_cache_volume(volume))
async_migrate, provision = vnx_utils.calc_migrate_and_provision(volume)
self.assertFalse(async_migrate)
self.assertEqual(provision.name, 'THIN')
@res_mock.mock_driver_input
def test_calc_migrate_and_provision(self, mocked):
volume = mocked['volume']
volume.display_name = 'volume-ca86b9a0-d0d5-4267-8cd5-c62274056cc0'
async_migrate, provision = vnx_utils.calc_migrate_and_provision(volume)
self.assertEqual(vnx_utils.is_async_migrate_enabled(volume),
async_migrate)
self.assertEqual(provision.name, 'THICK')
@ut_utils.patch_extra_specs({})
@res_mock.mock_driver_input
def test_get_backend_qos_specs(self, cinder_input):
volume = cinder_input['volume']
with mock.patch.object(volume_types, 'get_volume_type_qos_specs',
return_value={'qos_specs': None}):
r = vnx_utils.get_backend_qos_specs(volume)
self.assertIsNone(r)
with mock.patch.object(volume_types, 'get_volume_type_qos_specs',
return_value={
'qos_specs': {'consumer': 'frontend'}}):
r = vnx_utils.get_backend_qos_specs(volume)
self.assertIsNone(r)
with mock.patch.object(volume_types, 'get_volume_type_qos_specs',
return_value={
'qos_specs': {'id': 'test', 'consumer': 'back-end', 'specs': {
common.QOS_MAX_BWS: 100, common.QOS_MAX_IOPS: 10}}}):
r = vnx_utils.get_backend_qos_specs(volume)
self.assertIsNotNone(r)
self.assertEqual(100, r[common.QOS_MAX_BWS])
self.assertEqual(10, r[common.QOS_MAX_IOPS])
@ut_utils.patch_group_specs({
'consistent_group_replication_enabled': '<is> True'})
@ut_utils.patch_extra_specs({
'replication_enabled': '<is> False'})
@res_mock.mock_driver_input
def test_check_type_matched_invalid(self, mocked):
volume = mocked['volume']
volume.group = mocked['group']
self.assertRaises(exception.InvalidInput,
vnx_utils.check_type_matched,
volume)
@ut_utils.patch_group_specs({
'consistent_group_replication_enabled': '<is> True'})
@res_mock.mock_driver_input
def test_check_rep_status_matched_disabled(self, mocked):
group = mocked['group']
self.assertRaises(exception.InvalidInput,
vnx_utils.check_rep_status_matched,
group)
| |
#!/usr/bin/python
# coding=utf-8
################################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
from diamond.collector import Collector
from httpd import HttpdCollector
import httplib
################################################################################
class TestHTTPResponse(httplib.HTTPResponse):
def __init__(self):
pass
def read(self):
pass
class TestHttpdCollector(CollectorTestCase):
def setUp(self, config=None):
if config is None:
config = get_collector_config('HttpdCollector', {
'interval': '10',
'url': 'http://www.example.com:80/server-status?auto'
})
else:
config = get_collector_config('HttpdCollector', config)
self.collector = HttpdCollector(config, None)
self.HTTPResponse = TestHTTPResponse()
httplib.HTTPConnection.request = Mock(return_value=True)
httplib.HTTPConnection.getresponse = Mock(
return_value=self.HTTPResponse)
def test_import(self):
self.assertTrue(HttpdCollector)
@patch.object(Collector, 'publish')
def test_should_work_with_synthetic_data(self, publish_mock):
self.setUp()
patch_read = patch.object(
TestHTTPResponse,
'read',
Mock(return_value=self.getFixture(
'server-status-fake-1').getvalue()))
patch_headers = patch.object(
TestHTTPResponse,
'getheaders',
Mock(return_value={}))
patch_headers.start()
patch_read.start()
self.collector.collect()
patch_read.stop()
self.assertPublishedMany(publish_mock, {})
patch_read = patch.object(
TestHTTPResponse,
'read',
Mock(return_value=self.getFixture(
'server-status-fake-2').getvalue()))
patch_read.start()
self.collector.collect()
patch_read.stop()
patch_headers.stop()
self.assertPublishedMany(publish_mock, {
'TotalAccesses': 100,
'ReqPerSec': 10,
'BytesPerSec': 20480,
'BytesPerReq': 204.8,
'BusyWorkers': 6,
'IdleWorkers': 4,
'WritingWorkers': 1,
'KeepaliveWorkers': 2,
'ReadingWorkers': 3,
'DnsWorkers': 0,
'ClosingWorkers': 0,
'LoggingWorkers': 0,
'FinishingWorkers': 0,
'CleanupWorkers': 0,
'AccessesPerSec': 0,
'StartingFinishingWorkers': 0,
'StandbyWorkers': 5,
'CPULoad': 0.5,
})
@patch.object(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
self.setUp()
patch_read = patch.object(
TestHTTPResponse,
'read',
Mock(return_value=self.getFixture(
'server-status-live-1').getvalue()))
patch_headers = patch.object(
TestHTTPResponse,
'getheaders',
Mock(return_value={}))
patch_headers.start()
patch_read.start()
self.collector.collect()
patch_read.stop()
self.assertPublishedMany(publish_mock, {})
patch_read = patch.object(
TestHTTPResponse,
'read',
Mock(return_value=self.getFixture(
'server-status-live-2').getvalue()))
patch_read.start()
self.collector.collect()
patch_read.stop()
patch_headers.stop()
metrics = {
'TotalAccesses': 8314,
'ReqPerSec': 0,
'BytesPerSec': 165,
'BytesPerReq': 5418.55,
'BusyWorkers': 9,
'IdleWorkers': 0,
'WritingWorkers': 1,
'KeepaliveWorkers': 7,
'ReadingWorkers': 1,
'DnsWorkers': 0,
'ClosingWorkers': 0,
'LoggingWorkers': 0,
'FinishingWorkers': 0,
'CleanupWorkers': 0,
'AccessesPerSec': 0,
}
self.assertPublishedMany(publish_mock, metrics)
@patch.object(Collector, 'publish')
def test_should_work_with_multiple_servers(self, publish_mock):
self.setUp(config={
'urls': [
'nickname1 http://localhost:8080/server-status?auto',
'nickname2 http://localhost:8080/server-status?auto',
],
})
patch_read = patch.object(
TestHTTPResponse,
'read',
Mock(return_value=self.getFixture(
'server-status-live-1').getvalue()))
patch_headers = patch.object(
TestHTTPResponse,
'getheaders',
Mock(return_value={}))
patch_headers.start()
patch_read.start()
self.collector.collect()
patch_read.stop()
self.assertPublishedMany(publish_mock, {})
patch_read = patch.object(
TestHTTPResponse,
'read',
Mock(return_value=self.getFixture(
'server-status-live-2').getvalue()))
patch_read.start()
self.collector.collect()
patch_read.stop()
patch_headers.stop()
metrics = {
'nickname1.TotalAccesses': 8314,
'nickname1.ReqPerSec': 0,
'nickname1.BytesPerSec': 165,
'nickname1.BytesPerReq': 5418.55,
'nickname1.BusyWorkers': 9,
'nickname1.IdleWorkers': 0,
'nickname1.WritingWorkers': 1,
'nickname1.KeepaliveWorkers': 7,
'nickname1.ReadingWorkers': 1,
'nickname1.DnsWorkers': 0,
'nickname1.ClosingWorkers': 0,
'nickname1.LoggingWorkers': 0,
'nickname1.FinishingWorkers': 0,
'nickname1.CleanupWorkers': 0,
'nickname1.AccessesPerSec': 0,
'nickname2.TotalAccesses': 8314,
'nickname2.ReqPerSec': 0,
'nickname2.BytesPerSec': 165,
'nickname2.BytesPerReq': 5418.55,
'nickname2.BusyWorkers': 9,
'nickname2.IdleWorkers': 0,
'nickname2.WritingWorkers': 1,
'nickname2.KeepaliveWorkers': 7,
'nickname2.ReadingWorkers': 1,
'nickname2.DnsWorkers': 0,
'nickname2.ClosingWorkers': 0,
'nickname2.LoggingWorkers': 0,
'nickname2.FinishingWorkers': 0,
'nickname2.CleanupWorkers': 0,
'nickname2.AccessesPerSec': 0,
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
@patch.object(Collector, 'publish')
def test_issue_456(self, publish_mock):
self.setUp(config={
'urls': 'vhost http://localhost/server-status?auto',
})
patch_read = patch.object(
TestHTTPResponse,
'read',
Mock(return_value=self.getFixture(
'server-status-live-3').getvalue()))
patch_headers = patch.object(
TestHTTPResponse,
'getheaders',
Mock(return_value={}))
patch_headers.start()
patch_read.start()
self.collector.collect()
patch_read.stop()
self.assertPublishedMany(publish_mock, {})
patch_read = patch.object(
TestHTTPResponse,
'read',
Mock(return_value=self.getFixture(
'server-status-live-4').getvalue()))
patch_read.start()
self.collector.collect()
patch_read.stop()
patch_headers.stop()
metrics = {
'vhost.TotalAccesses': 329,
'vhost.ReqPerSec': 0.156966,
'vhost.BytesPerSec': 2417.83,
'vhost.BytesPerReq': 15403.6,
'vhost.BusyWorkers': 1,
'vhost.IdleWorkers': 17,
'vhost.WritingWorkers': 1,
'vhost.KeepaliveWorkers': 0,
'vhost.ReadingWorkers': 0,
'vhost.DnsWorkers': 0,
'vhost.ClosingWorkers': 0,
'vhost.LoggingWorkers': 0,
'vhost.FinishingWorkers': 0,
'vhost.CleanupWorkers': 0,
'vhost.AccessesPerSec': 0,
}
self.assertPublishedMany(publish_mock, metrics)
@patch.object(Collector, 'publish')
def test_issue_533(self, publish_mock):
self.setUp(config={
'urls': 'localhost http://localhost:80/server-status?auto,',
})
expected_urls = {'localhost': 'http://localhost:80/server-status?auto'}
self.assertEqual(self.collector.urls, expected_urls)
@patch.object(Collector, 'publish')
def test_url_with_port(self, publish_mock):
self.setUp(config={
'urls': 'localhost http://localhost:80/server-status?auto',
})
expected_urls = {'localhost': 'http://localhost:80/server-status?auto'}
self.assertEqual(self.collector.urls, expected_urls)
@patch.object(Collector, 'publish')
def test_url_without_port(self, publish_mock):
self.setUp(config={
'urls': 'localhost http://localhost/server-status?auto',
})
expected_urls = {'localhost': 'http://localhost/server-status?auto'}
self.assertEqual(self.collector.urls, expected_urls)
@patch.object(Collector, 'publish')
def test_url_without_nickname(self, publish_mock):
self.setUp(config={
'urls': 'http://localhost/server-status?auto',
})
expected_urls = {'': 'http://localhost/server-status?auto'}
self.assertEqual(self.collector.urls, expected_urls)
@patch.object(Collector, 'publish')
def test_issue_538(self, publish_mock):
self.setUp(config={
'enabled': True,
'path_suffix': "",
'ttl_multiplier': 2,
'measure_collector_time': False,
'byte_unit': 'byte',
'urls': 'localhost http://localhost:80/server-status?auto',
})
expected_urls = {'localhost': 'http://localhost:80/server-status?auto'}
self.assertEqual(self.collector.urls, expected_urls)
################################################################################
if __name__ == "__main__":
unittest.main()
| |
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Basic RNN Cores for TensorFlow snt.
This file contains the definitions of the simplest building blocks for Recurrent
Neural Networks.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
# Dependency imports
from sonnet.python.modules import basic
from sonnet.python.modules import rnn_core
from sonnet.python.modules import util
import tensorflow as tf
from tensorflow.python.framework import tensor_shape
from tensorflow.python.util import nest
def _get_flat_core_sizes(cores):
"""Obtains the list flattened output sizes of a list of cores.
Args:
cores: list of cores to get the shapes from.
Returns:
List of lists that, for each core, contains the list of its output
dimensions.
"""
core_sizes_lists = []
for core in cores:
flat_output_size = nest.flatten(core.output_size)
core_sizes_lists.append([tensor_shape.as_shape(size).as_list()
for size in flat_output_size])
return core_sizes_lists
def _get_shape_without_batch_dimension(tensor_nest):
"""Converts Tensor nest to a TensorShape nest, removing batch dimension."""
def _strip_batch_and_convert_to_shape(tensor):
return tensor[0].get_shape()
return nest.map_structure(_strip_batch_and_convert_to_shape, tensor_nest)
class VanillaRNN(rnn_core.RNNCore):
"""Basic fully connected vanilla RNN core."""
IN_TO_HIDDEN = "in_to_hidden"
HIDDEN_TO_HIDDEN = "hidden_to_hidden"
POSSIBLE_INITIALIZER_KEYS = {IN_TO_HIDDEN, HIDDEN_TO_HIDDEN}
def __init__(self, hidden_size, activation=tf.tanh, initializers=None,
partitioners=None, regularizers=None, name="vanilla_rnn"):
"""Construct a Basic RNN core.
Args:
hidden_size: hidden size dimensionality.
activation: activation function to use.
initializers: optional dict containing ops to initialize the weights. This
dictionary may contain the keys 'in_to_hidden' and/or
'hidden_to_hidden'.
partitioners: optional dict containing ops to partition the weights. This
dictionary may contain the keys 'in_to_hidden' and/or
'hidden_to_hidden'.
regularizers: optional dict containing ops to regularize the weights. This
dictionary may contain the keys 'in_to_hidden' and/or
'hidden_to_hidden'.
name: name of the module.
Raises:
KeyError: if `initializers` contains any keys other than 'in_to_hidden' or
'hidden_to_hidden'.
KeyError: if `partitioners` contains any keys other than 'in_to_hidden' or
'hidden_to_hidden'.
KeyError: if `regularizers` contains any keys other than 'in_to_hidden' or
'hidden_to_hidden'.
TypeError: If any of the given initializers are not callable.
TypeError: If any of the given partitioners are not callable.
TypeError: If any of the given regularizers are not callable.
"""
super(VanillaRNN, self).__init__(name=name)
self._hidden_size = hidden_size
self._activation = activation
self._initializers = util.check_initializers(
initializers, self.POSSIBLE_INITIALIZER_KEYS)
self._partitioners = util.check_partitioners(
partitioners, self.POSSIBLE_INITIALIZER_KEYS)
self._regularizers = util.check_regularizers(
regularizers, self.POSSIBLE_INITIALIZER_KEYS)
def _build(self, input_, prev_state):
"""Connects the VanillaRNN module into the graph.
If this is not the first time the module has been connected to the graph,
the Tensors provided as input_ and state must have the same final
dimension, in order for the existing variables to be the correct size for
their corresponding multiplications. The batch size may differ for each
connection.
Args:
input_: a 2D Tensor of size [batch_size, input_size].
prev_state: a 2D Tensor of size [batch_size, hidden_size].
Returns:
output: a 2D Tensor of size [batch_size, hidden_size].
next_state: a Tensor of size [batch_size, hidden_size].
Raises:
ValueError: if connecting the module into the graph any time after the
first time, and the inferred size of the inputs does not match previous
invocations.
"""
self._in_to_hidden_linear = basic.Linear(
self._hidden_size, name="in_to_hidden",
initializers=self._initializers.get("in_to_hidden"),
partitioners=self._partitioners.get("in_to_hidden"),
regularizers=self._regularizers.get("in_to_hidden"))
self._hidden_to_hidden_linear = basic.Linear(
self._hidden_size, name="hidden_to_hidden",
initializers=self._initializers.get("hidden_to_hidden"),
partitioners=self._partitioners.get("hidden_to_hidden"),
regularizers=self._regularizers.get("hidden_to_hidden"))
in_to_hidden = self._in_to_hidden_linear(input_)
hidden_to_hidden = self._hidden_to_hidden_linear(prev_state)
output = self._activation(in_to_hidden + hidden_to_hidden)
# For VanillaRNN, the next state of the RNN is the same as the output
return output, output
@property
def in_to_hidden_linear(self):
self._ensure_is_connected()
return self._in_to_hidden_linear
@property
def hidden_to_hidden_linear(self):
self._ensure_is_connected()
return self._hidden_to_hidden_linear
@property
def in_to_hidden_variables(self):
self._ensure_is_connected()
return self._in_to_hidden_linear.get_variables()
@property
def hidden_to_hidden_variables(self):
self._ensure_is_connected()
return self._hidden_to_hidden_linear.get_variables()
@property
def state_size(self):
return tf.TensorShape([self._hidden_size])
@property
def output_size(self):
return tf.TensorShape([self._hidden_size])
class DeepRNN(rnn_core.RNNCore):
"""RNN core that passes data through a number of internal modules or ops.
This module is constructed by passing an iterable of externally constructed
modules or ops. The DeepRNN takes `(input, prev_state)` as input and passes
the input through each internal module in the order they were presented,
using elements from `prev_state` as necessary for internal recurrent cores.
The output is `(output, next_state)` in common with other RNN cores.
By default, skip connections from the input to all internal modules and from
each intermediate output to the final output are used.
E.g.:
```python
lstm1 = snt.LSTM(hidden_size=256)
lstm2 = snt.LSTM(hidden_size=256)
deep_rnn = snt.DeepRNN([lstm1, lstm2])
output, next_state = deep_rnn(input, prev_state)
```
The computation set up inside the DeepRNN has the same effect as:
```python
prev_state1, prev_state2 = prev_state
lstm1_output, next_state1 = lstm1(input, prev_state1)
lstm2_output, next_state2 = lstm(
tf.concat([input, lstm1_output], 1), prev_state2)
next_state = (next_state1, next_state2)
output = tf.concat([lstm1_output, lstm2_output], 1)
```
Every internal module receives the preceding module's output and the entire
core's input. The output is created by concatenating each internal module's
output. In the case of internal recurrent elements, corresponding elements
of the state are used such that `state[i]` is passed to the `i`'th internal
recurrent element. Note that the state of a `DeepRNN` is always a tuple, which
will contain the same number of elements as there are internal recurrent
cores. If no internal modules are recurrent, the state of the DeepRNN as a
whole is the empty tuple. Wrapping non-recurrent modules into a DeepRNN can
be useful to produce something API compatible with a "real" recurrent module,
simplifying code that handles the cores.
Without skip connections the previous example would become the following
(note the only difference is the addition of `skip_connections=False`):
```python
# ... declare other modules as above
deep_rnn = snt.DeepRNN([lin, tanh, lstm], skip_connections=False)
output, next_state = deep_rnn(input, prev_state)
```
which is equivalent to:
```python
lin_output = lin(input)
tanh_output = tanh(lin_output)
lstm_output, lstm_next_state = lstm(tanh_output, prev_state[0])
next_state = (lstm_next_state,)
output = lstm_output
```
Note: when using skip connections, all the cores should be recurrent.
"""
def __init__(self, cores, skip_connections=True,
concat_final_output_if_skip=True, name="deep_rnn"):
"""Construct a Deep RNN core.
Args:
cores: iterable of modules or ops.
skip_connections: a boolean that indicates whether to use skip
connections. This means that the input is fed to all the layers, after
being concatenated with the output of the previous layer. The output
of the module will be the concatenation of all the outputs of the
internal modules.
concat_final_output_if_skip: A boolean that indicates whether the outputs
of intermediate layers should be concatenated into the timestep-wise
output of the core. By default this is True. If this is set to False,
then the core output is that of the final layer, i.e. that of
`cores[-1]`.
name: name of the module.
Raises:
ValueError: if `cores` is not an iterable, or if `skip_connections` is
True and not all the modules are recurrent.
"""
super(DeepRNN, self).__init__(name=name)
if not isinstance(cores, collections.Iterable):
raise ValueError("Cores should be an iterable object.")
self._cores = tuple(cores)
self._skip_connections = skip_connections
self._concat_final_output_if_skip = concat_final_output_if_skip
self._is_recurrent_list = [isinstance(core, rnn_core.RNNCore)
for core in self._cores]
if self._skip_connections:
tf.logging.warning(
"The `skip_connections` argument will be deprecated. Please use "
"snt.SkipConnectionCore instead."
)
if not all(self._is_recurrent_list):
raise ValueError("skip_connections are enabled but not all cores are "
"`snt.RNNCore`s, which is not supported. The following"
" cores were specified: {}.".format(self._cores))
self._check_cores_output_sizes()
self._num_recurrent = sum(self._is_recurrent_list)
def _check_cores_output_sizes(self):
"""Checks the output_sizes of the cores of the DeepRNN module.
Raises:
ValueError: if the outputs of the cores cannot be concatenated along their
first dimension.
"""
for core_sizes in zip(*tuple(_get_flat_core_sizes(self._cores))):
first_core_list = core_sizes[0][1:]
for i, core_list in enumerate(core_sizes[1:]):
if core_list[1:] != first_core_list:
raise ValueError("The outputs of the provided cores are not able "
"to be concatenated along the first feature "
"dimension. Core 0 has size %s, whereas Core %d "
"has size %s" % (first_core_list, i, core_list))
def _build(self, inputs, prev_state):
"""Connects the DeepRNN module into the graph.
If this is not the first time the module has been connected to the graph,
the Tensors provided as input_ and state must have the same final
dimension, in order for the existing variables to be the correct size for
their corresponding multiplications. The batch size may differ for each
connection.
Args:
inputs: a nested tuple of Tensors of arbitrary dimensionality, with at
least an initial batch dimension.
prev_state: a tuple of `prev_state`s that corresponds to the state
of each one of the cores of the `DeepCore`.
Returns:
output: a nested tuple of Tensors of arbitrary dimensionality, with at
least an initial batch dimension.
next_state: a tuple of `next_state`s that corresponds to the updated state
of each one of the cores of the `DeepCore`.
Raises:
ValueError: if connecting the module into the graph any time after the
first time, and the inferred size of the inputs does not match previous
invocations. This may happen if one connects a module any time after the
first time that does not have the configuration of skip connections as
the first time.
"""
current_input = inputs
next_states = []
outputs = []
recurrent_idx = 0
for i, core in enumerate(self._cores):
if self._skip_connections and i > 0:
flat_input = (nest.flatten(inputs), nest.flatten(current_input))
flat_input = [tf.concat(input_, 1) for input_ in zip(*flat_input)]
current_input = nest.pack_sequence_as(structure=inputs,
flat_sequence=flat_input)
# Determine if this core in the stack is recurrent or not and call
# accordingly.
if self._is_recurrent_list[i]:
current_input, next_state = core(current_input,
prev_state[recurrent_idx])
next_states.append(next_state)
recurrent_idx += 1
else:
current_input = core(current_input)
if self._skip_connections:
outputs.append(current_input)
if self._skip_connections and self._concat_final_output_if_skip:
flat_outputs = tuple(nest.flatten(output) for output in outputs)
flat_outputs = [tf.concat(output, 1) for output in zip(*flat_outputs)]
output = nest.pack_sequence_as(structure=outputs[0],
flat_sequence=flat_outputs)
else:
output = current_input
return output, tuple(next_states)
def initial_state(self, batch_size, dtype=tf.float32, trainable=False,
trainable_initializers=None, trainable_regularizers=None,
name=None):
"""Builds the default start state for a DeepRNN.
Args:
batch_size: An int, float or scalar Tensor representing the batch size.
dtype: The data type to use for the state.
trainable: Boolean that indicates whether to learn the initial state.
trainable_initializers: An initializer function or nested structure of
functions with same structure as the `state_size` property of the
core, to be used as initializers of the initial state variable.
trainable_regularizers: Optional regularizer function or nested structure
of functions with the same structure as the `state_size` property of the
core, to be used as regularizers of the initial state variable. A
regularizer should be a function that takes a single `Tensor` as an
input and returns a scalar `Tensor` output, e.g. the L1 and L2
regularizers in `tf.contrib.layers`.
name: Optional string used to prefix the initial state variable names, in
the case of a trainable initial state. If not provided, defaults to
the name of the module.
Returns:
A tensor or nested tuple of tensors with same structure and shape as the
`state_size` property of the core.
Raises:
ValueError: if the number of passed initializers is not the same as the
number of recurrent cores.
"""
initial_state = []
if trainable_initializers is None:
trainable_initializers = [None] * self._num_recurrent
if trainable_regularizers is None:
trainable_regularizers = [None] * self._num_recurrent
num_initializers = len(trainable_initializers)
if num_initializers != self._num_recurrent:
raise ValueError("The number of initializers and recurrent cores should "
"be the same. Received %d initializers for %d specified "
"recurrent cores."
% (num_initializers, self._num_recurrent))
with tf.name_scope(self._initial_state_scope(name)):
recurrent_idx = 0
for is_recurrent, core in zip(self._is_recurrent_list, self._cores):
if is_recurrent:
core_initial_state = core.initial_state(
batch_size, dtype=dtype, trainable=trainable,
trainable_initializers=trainable_initializers[recurrent_idx],
trainable_regularizers=trainable_regularizers[recurrent_idx])
initial_state.append(core_initial_state)
recurrent_idx += 1
return tuple(initial_state)
@property
def state_size(self):
sizes = []
for is_recurrent, core in zip(self._is_recurrent_list, self._cores):
if is_recurrent:
sizes.append(core.state_size)
return tuple(sizes)
@property
def output_size(self):
if self._skip_connections and self._concat_final_output_if_skip:
output_size = []
for core_sizes in zip(*tuple(_get_flat_core_sizes(self._cores))):
added_core_size = core_sizes[0]
added_core_size[0] = sum([size[0] for size in core_sizes])
output_size.append(tf.TensorShape(added_core_size))
return nest.pack_sequence_as(structure=self._cores[0].output_size,
flat_sequence=output_size)
else:
# Assumes that an element of cores which does not have the output_size
# property does not affect the output shape. Then the 'last' core in the
# sequence with output_size information should be the output_size of the
# DeepRNN. This heuristic is error prone, but we would lose a lot of
# flexibility if we tried to enforce that the final core must have an
# output_size field (e.g. it would be impossible to add a TF nonlinearity
# as the final "core"), but we should at least print a warning if this
# is the case.
final_core = self._cores[-1]
if hasattr(final_core, "output_size"):
# This is definitely the correct value, so no warning needed.
return final_core.output_size
# If we have connected the module at least once, we can get the output
# size of whatever was actually produced. The indexing of [-1] gets us
# the most recent connection, and [0] gets us the first element of the
# output tuple as opposed to the recurrent state.
if self._connected_subgraphs:
last_connected_output_size = _get_shape_without_batch_dimension(
self._connected_subgraphs[-1].outputs[0])
tf.logging.warning(
"Final core does not contain .output_size, but the "
"DeepRNN has been connected into the graph, so inferred output "
"size as %s", last_connected_output_size)
return last_connected_output_size
# If all else fails, iterate backwards through cores and return the
# first one which has an output_size field. This can be incorrect in
# various ways, so warn loudly.
try:
guessed_output_size = next(core.output_size
for core in reversed(self._cores)
if hasattr(core, "output_size"))
except StopIteration:
raise ValueError("None of the 'cores' have output_size information.")
tf.logging.warning(
"Trying to infer output_size of DeepRNN, but the final core %s does "
"not have the .output_size field. The guessed output_size is %s "
"but this may not be correct. If you see shape errors following this "
"warning, you must change the cores used in the DeepRNN so that "
"the final core used has a correct .output_size property.",
final_core, guessed_output_size)
return guessed_output_size
class ModelRNN(rnn_core.RNNCore):
"""RNNCore that ignores input and uses a model to compute its next state."""
def __init__(self, model, name="model_rnn"):
"""Construct a Basic RNN core.
Args:
model: callable that computes the next state.
name: name of the module.
Raises:
TypeError: if model is not a callable object or if it is an RNNCore.
AttributeError: if model does not have an output_size attribute.
"""
super(ModelRNN, self).__init__(name=name)
if not callable(model):
raise TypeError("Model must be callable.")
if isinstance(model, rnn_core.RNNCore):
raise TypeError("Model should not be an RNNCore.")
try:
self._output_size = model.output_size
except AttributeError:
raise AttributeError("Model should have an output_size attribute.")
self._model = model
def _build(self, inputs, prev_state):
"""Connects the ModelRNN module into the graph.
If this is not the first time the module has been connected to the graph,
the Tensors provided as input_ and state must have the same final
dimension, in order for the existing variables to be the correct size for
their corresponding multiplications. The batch size may differ for each
connection.
Args:
inputs: Tensor input to the ModelRNN (ignored).
prev_state: Tensor of size `model.output_size`.
Returns:
output: Tensor of size `model.output_size`.
next_state: Tensor of size `model.output_size`.
"""
next_state = self._model(prev_state)
# For ModelRNN, the next state of the RNN is the same as the output
return next_state, next_state
@property
def state_size(self):
return self._output_size
@property
def output_size(self):
return self._output_size
| |
import re
import os
from cumulusci.tasks.release_notes.github_api import GithubApiMixin
class BaseChangeNotesParser(object):
def __init__(self, title):
self.title = title
self.content = []
def parse(self):
raise NotImplementedError()
def render(self):
return '# {}\r\n\r\n{}'.format(self.title, self._render())
def _render(self):
raise NotImplementedError()
class ChangeNotesLinesParser(BaseChangeNotesParser):
def __init__(self, release_notes_generator, title):
super(ChangeNotesLinesParser, self).__init__(title)
self.release_notes_generator = release_notes_generator
self.title = title
self._in_section = False
self.h2 = {} # dict of h2 sections - key=header, value is list of lines
self.h2_title = None # has value when in h2 section
def parse(self, change_note):
change_note = self._process_change_note(change_note)
for line in change_note.splitlines():
line = self._process_line(line)
# Look for the starting line of the section
if self._is_start_line(line):
self._in_section = True
continue
# Look for h2
if line.startswith('## '):
self.h2_title = re.sub('\s+#+$', '', line[3:]).lstrip()
continue
# Add all content once in the section
if self._in_section:
# End when the end of section is found
if self._is_end_line(line):
# If the line starts the section again, continue
if self._is_start_line(line):
continue
self._in_section = False
self.h2_title = None
continue
# Skip excluded lines
if self._is_excluded_line(line):
continue
self._add_line(line)
self._in_section = False
def _process_change_note(self, change_note):
# subclasses override this if some manipulation is needed
return change_note
def _process_line(self, line):
try:
line = unicode(line, 'utf-8')
except TypeError:
pass
return line.rstrip()
def _is_excluded_line(self, line):
if not line:
return True
def _is_start_line(self, line):
if self.title:
return line.upper() == '# {}'.format(self.title.upper())
def _is_end_line(self, line):
# Also treat any new top level heading as end of section
if line.startswith('# '):
return True
def _add_line(self, line):
line = self._add_link(line)
if self.h2_title:
if self.h2_title not in self.h2:
self.h2[self.h2_title] = []
self.h2[self.h2_title].append(line)
return
self.content.append(line)
def _add_link(self, line):
return line
def render(self):
if not self.content:
return None
content = []
content.append(self._render_header())
content.append(self._render_content())
if self.h2:
content.append(self._render_h2())
return u'\r\n'.join(content)
def _render_header(self):
return u'# {}\r\n'.format(self.title)
def _render_content(self):
return u'\r\n'.join(self.content)
def _render_h2(self):
content = []
for h2_title in self.h2.keys():
content.append(u'\r\n## {}\r\n'.format(h2_title))
content.append(u'\r\n'.join(self.h2[h2_title]))
return u'\r\n'.join(content)
class GithubLinesParser(ChangeNotesLinesParser):
def __init__(self, release_notes_generator, title):
super(GithubLinesParser, self).__init__(release_notes_generator, title)
self.pr_number = None
self.pr_url = None
def _process_change_note(self, pull_request):
self.pr_number = pull_request['number']
self.pr_url = pull_request['html_url']
return pull_request['body']
class GithubLinkingLinesParser(GithubLinesParser):
def _add_link(self, line):
return line + ' [[PR{}]({})]'.format(self.pr_number, self.pr_url)
class IssuesParser(ChangeNotesLinesParser):
def __init__(self, release_notes_generator, title,
issue_regex=None):
super(IssuesParser, self).__init__(
release_notes_generator,
title,
)
if issue_regex:
self.issue_regex = issue_regex
else:
self.issue_regex = self._get_default_regex()
def _add_line(self, line):
# find issue numbers per line
issue_numbers = re.findall(self.issue_regex, line, flags=re.IGNORECASE)
for issue_number in issue_numbers:
self.content.append(int(issue_number))
def _get_default_regex(self):
return '#(\d+)'
def _render_content(self):
issues = []
for issue in sorted(self.content):
issues.append('#{}'.format(issue))
return u'\r\n'.join(issues)
class ParserGithubApiMixin(GithubApiMixin):
@property
def current_tag(self):
return self.release_notes_generator.current_tag
@property
def github_info(self):
# By default, look for github config info in the release_notes
# property. Subclasses can override this if needed
return self.release_notes_generator.github_info
class GithubIssuesParser(IssuesParser, ParserGithubApiMixin):
def __init__(self, release_notes_generator, title, issue_regex=None, link_pr=False):
super(GithubIssuesParser, self).__init__(
release_notes_generator,
title,
issue_regex,
)
self.link_pr = link_pr
self.pr_number = None
self.pr_url = None
def _add_line(self, line):
# find issue numbers per line
issue_numbers = re.findall(self.issue_regex, line, flags=re.IGNORECASE)
for issue_number in issue_numbers:
self.content.append({
'issue_number': int(issue_number),
'pr_number': self.pr_number,
'pr_url': self.pr_url,
})
def _get_default_regex(self):
keywords = (
'close',
'closes',
'closed',
'fix',
'fixes',
'fixed',
'resolve',
'resolves',
'resolved',
)
return r'(?:{})\s#(\d+)'.format('|'.join(keywords))
def _render_content(self):
content = []
for item in sorted(self.content, key=lambda k: k['issue_number']):
issue_info = self._get_issue_info(item['issue_number'])
txt = '#{}: {}'.format(item['issue_number'], issue_info['title'])
if self.link_pr:
txt += ' [[PR{}]({})]'.format(
item['pr_number'],
item['pr_url'],
)
content.append(txt)
return u'\r\n'.join(content)
def _get_issue_info(self, issue_number):
return self.call_api('/issues/{}'.format(issue_number))
def _process_change_note(self, pull_request):
self.pr_number = pull_request['number']
self.pr_url = pull_request['html_url']
return pull_request['body']
class CommentingGithubIssuesParser(GithubIssuesParser):
message_prod = 'Included in production release'
message_beta = 'Included in beta release'
def _get_issue_info(self, issue_number):
self._add_issue_comment(issue_number)
return super(CommentingGithubIssuesParser, self)._get_issue_info(issue_number)
def _add_issue_comment(self, issue_number):
# Ensure all issues have a comment on which release they were fixed
gh_issue_comments = self.call_api(
'/issues/{}/comments'.format(issue_number))
has_comment = False
current_tag_info = self.current_tag_info
for comment in gh_issue_comments:
if current_tag_info['is_prod']:
if comment['body'].startswith(self.message_prod):
has_comment = True
elif current_tag_info['is_beta']:
if comment['body'].startswith(self.message_beta):
has_comment = True
if not has_comment:
data = {}
if current_tag_info['is_prod']:
data['body'] = '{} {}'.format(
self.message_prod,
current_tag_info['version_number'],
)
elif current_tag_info['is_beta']:
data['body'] = '{} {}'.format(
self.message_beta,
current_tag_info['version_number'],
)
if data:
self.call_api(
'/issues/{}/comments'.format(issue_number),
data=data,
)
| |
# -*- coding: utf-8 -*-
import _version
from setuptools.command.build_py import build_py
from setuptools.command.install import install
from setuptools import Command
from setuptools import find_packages
from setuptools import setup
import fnmatch
import glob
import shutil
import os
import subprocess
import sys
try:
import sphinx
from sphinx.setup_command import BuildDoc
except ImportError:
sphinx = None
PROJECT = "spectrocrunch"
########################################
## Disable hardlinks when not working ##
########################################
if hasattr(os, "link"):
tempfile = __file__ + ".tmp"
try:
os.link(__file__, tempfile)
except OSError as e:
del os.link
finally:
if os.path.exists(tempfile):
os.remove(tempfile)
###########################
## Get setup information ##
###########################
def get_version():
return _version.strictversion
def get_devstatus():
# The development status is derived from the SpectroCrunch release level
mapping = {"dev": 2, "alpha": 3, "beta": 4, "rc": 5, "final": 6}
cycle = {
1: "Planning",
2: "Pre-Alpha",
3: "Alpha",
4: "Beta",
5: "Production/Stable",
6: "Mature",
7: "Inactive",
}
status = mapping[_version.version_info.releaselevel]
return "Development Status :: %d - %s" % (status, cycle[status])
def get_readme():
dirname = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(dirname, "README.rst"), "r") as fp:
long_description = fp.read()
return long_description
#####################
## Command classes ##
#####################
cmdclass = {}
class DisabledCommand(Command):
user_options = []
_MSG = "Command is disabled."
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
raise RuntimeError(self._MSG)
#######################
## "version" command ##
#######################
class VersionOfAllPackages(Command):
description = "Get project version"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
print("This version of {} is {}".format(PROJECT, _version.version))
cmdclass["version"] = VersionOfAllPackages
########################
## "build_py" command ##
########################
class BuildWithVersion(build_py):
"""
Enhanced build_py which copies version.py to <PROJECT>._version.py
"""
description = "build with version info"
def find_package_modules(self, package, package_dir):
modules = build_py.find_package_modules(self, package, package_dir)
if "." not in package:
modules.append((package, "_version", "_version.py"))
return modules
cmdclass["build_py"] = BuildWithVersion
#########################
## "build_doc" command ##
#########################
if sphinx is not None:
class BuildDocCommand(BuildDoc):
description = "Build documentation from source"
def run(self):
# Make sure the python path is pointing to the newly built
# code so that the documentation is built on this and not a
# previously installed version
build = self.get_finalized_command("build")
sys.path.insert(0, os.path.abspath(build.build_lib))
for builder in ["html", "latex"]:
self.builder = builder
self.builder_target_dir = os.path.join(self.build_dir, builder)
self.mkpath(self.builder_target_dir)
BuildDoc.run(self)
sys.path.pop(0)
else:
class BuildDocCommand(DisabledCommand):
_MSG = "Sphinx is required to build or test the documentation."
cmdclass["build_doc"] = BuildDocCommand
#####################
## "clean" command ##
#####################
class CleanCommand(Command):
"""Custom clean command to tidy up the project root."""
description = "Clean build and compiled files"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
shutil.rmtree("./build", True)
shutil.rmtree("./dist", True)
patterns = ["*.egg-info"]
for pattern in patterns:
for dirname in glob.glob(pattern):
shutil.rmtree(dirname, True)
patterns = ["*.pyc"]
for root, dirnames, filenames in os.walk(PROJECT):
for pattern in patterns:
for filename in fnmatch.filter(filenames, pattern):
os.remove(os.path.join(root, filename))
cmdclass["clean"] = CleanCommand
#####################
## "name" command ##
#####################
class NameCommand(Command):
"""Print project name."""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
print(PROJECT)
cmdclass["name"] = NameCommand
#######################
## Trove classifiers ##
#######################
classifiers = [
get_devstatus(),
"Environment :: Console",
## 'Environment :: MacOS X',
## 'Environment :: Win32 (MS Windows)',
## 'Environment :: X11 Applications :: Qt',
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
## 'Operating System :: Microsoft :: Windows',
"Operating System :: POSIX :: Linux",
## 'Operating System :: MacOS :: MacOS X',
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Documentation :: Sphinx",
"Topic :: Scientific/Engineering :: Physics",
"Topic :: Software Development :: Libraries :: Python Modules",
]
##################
## Requirements ##
##################
install_requires = [
"setuptools",
"numpy",
"future",
"scipy",
"h5py",
"fabio",
"silx",
"pyparsing",
"shapely",
"matplotlib",
"uncertainties",
"pint",
"pandas",
"scikit-image",
"xlsxwriter",
"xlrd",
"openpyxl",
"python-dateutil",
"jsonpickle",
"testfixtures",
"future",
"cvxopt",
]
extras_require = {
"physics": ["xraylib", "cctbx", "fdmnes", "PyTMM"],
"elastix": ["SimpleITK"],
}
setup_requires = ["setuptools", "testfixtures"]
###################
## Package setup ##
###################
setup(
name=PROJECT,
version=get_version(),
url="https://github.com/woutdenolf/spectrocrunch",
author="Wout De Nolf",
author_email="woutdenolf@users.sf.net",
classifiers=classifiers,
description="Spectroscopic imaging library (XRF/XAS)",
long_description=get_readme(),
install_requires=install_requires,
extras_require=extras_require,
setup_requires=setup_requires,
packages=find_packages(),
package_data={"spectrocrunch.resources": ["*/*.*"]},
license="MIT",
cmdclass=cmdclass,
test_suite="{}.tests.test_all.test_suite".format(PROJECT),
)
| |
# Copyright 2013 Openstack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Quark Pluggable IPAM
"""
import functools
import itertools
import random
import time
import uuid
import netaddr
from neutron.common import exceptions
from neutron.common import rpc as n_rpc
from oslo.config import cfg
from oslo.db import exception as db_exception
from oslo.utils import timeutils
from oslo_concurrency import lockutils
from oslo_log import log as logging
from quark.db import api as db_api
from quark.db import ip_types
from quark.db import models
from quark import exceptions as q_exc
from quark import utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
quark_opts = [
cfg.IntOpt('v6_allocation_attempts',
default=10,
help=_('Number of times to retry generating v6 addresses'
' before failure. Also implicitly controls how many'
' v6 addresses we assign to any port, as the random'
' values generated will be the same every time.')),
cfg.IntOpt("mac_address_retry_max",
default=20,
help=_("Number of times to attempt to allocate a new MAC"
" address before giving up.")),
cfg.IntOpt("ip_address_retry_max",
default=20,
help=_("Number of times to attempt to allocate a new IP"
" address before giving up.")),
cfg.BoolOpt("ipam_use_synchronization",
default=False,
help=_("Configures whether or not to use the experimental"
" semaphore logic around IPAM")),
cfg.BoolOpt("ipam_select_subnet_v6_locking",
default=True,
help=_("Controls whether or not SELECT ... FOR UPDATE is used"
" when retrieving v6 subnets explicitly."))
]
CONF.register_opts(quark_opts, "QUARK")
# NOTE(mdietz): equivalent to the following line, but converting
# v6 addresses in netaddr is very slow.
# netaddr.IPAddress("::0200:0:0:0").value
MAGIC_INT = 144115188075855872
def no_synchronization(*args, **kwargs):
def wrap(f):
@functools.wraps(f)
def inner(*args, **kwargs):
return f(*args, **kwargs)
return inner
return wrap
def named(sema):
return "%s.%s" % (__name__, sema)
if CONF.QUARK.ipam_use_synchronization:
synchronized = lockutils.synchronized
else:
synchronized = no_synchronization
def rfc2462_ip(mac, cidr):
# NOTE(mdietz): see RFC2462
int_val = netaddr.IPNetwork(cidr).value
mac = netaddr.EUI(mac)
int_val += mac.eui64().value
int_val ^= MAGIC_INT
return int_val
def rfc3041_ip(port_id, cidr):
random.seed(int(uuid.UUID(port_id)))
int_val = netaddr.IPNetwork(cidr).value
while True:
val = int_val + random.getrandbits(64)
val ^= MAGIC_INT
yield val
def generate_v6(mac, port_id, cidr):
# NOTE(mdietz): RM10879 - if we don't have a MAC, don't panic, defer to
# our magic rfc3041_ip method instead. If an IP is created
# by the ip_addresses controller, we wouldn't necessarily
# have a MAC to base our generator on in that case for
# example.
if mac is not None:
yield rfc2462_ip(mac, cidr)
for addr in rfc3041_ip(port_id, cidr):
yield addr
def ipam_logged(fx):
def wrap(self, *args, **kwargs):
log = QuarkIPAMLog()
kwargs['ipam_log'] = log
try:
return fx(self, *args, **kwargs)
finally:
log.end()
return wrap
class QuarkIPAMLog(object):
def __init__(self):
self.entries = {}
self.success = True
def make_entry(self, fx_name):
if fx_name not in self.entries:
self.entries[fx_name] = []
entry = QuarkIPAMLogEntry(self, fx_name)
self.entries[fx_name].append(entry)
return entry
def _output(self, status, time_total, fails, successes):
status = "SUCCESS"
if not self.success:
status = "FAILED"
LOG.debug("STATUS:%s TIME:%f ATTEMPTS:%d PASS:%d FAIL:%d" %
(status, time_total, fails + successes, successes, fails))
def end(self):
total = 0
fails = 0
successes = 0
for fx, entries in self.entries.items():
for entry in entries:
total += entry.get_time()
if entry.success:
successes += 1
else:
fails += 1
self._output(self.success, total, fails, successes)
def failed(self):
self.success = False
class QuarkIPAMLogEntry(object):
def __init__(self, log, name):
self.name = name
self.log = log
self.start_time = time.time()
self.success = True
def failed(self):
self.success = False
def end(self):
self.end_time = time.time()
def get_time(self):
if not hasattr(self, 'end_time'):
return 0
return self.end_time - self.start_time
class QuarkIpam(object):
@synchronized(named("allocate_mac_address"))
def allocate_mac_address(self, context, net_id, port_id, reuse_after,
mac_address=None,
use_forbidden_mac_range=False):
if mac_address:
mac_address = netaddr.EUI(mac_address).value
kwargs = {"network_id": net_id, "port_id": port_id,
"mac_address": mac_address,
"use_forbidden_mac_range": use_forbidden_mac_range}
LOG.info(("Attempting to allocate a new MAC address "
"[{0}]").format(utils.pretty_kwargs(**kwargs)))
for retry in xrange(CONF.QUARK.mac_address_retry_max):
LOG.info("Attemping to reallocate deallocated MAC (step 1 of 3),"
" attempt {0} of {1}".format(
retry + 1, CONF.QUARK.mac_address_retry_max))
try:
with context.session.begin():
transaction = db_api.transaction_create(context)
update_kwargs = {
"deallocated": False,
"deallocated_at": None,
"transaction_id": transaction.id
}
filter_kwargs = {
"reuse_after": reuse_after,
"deallocated": True,
"address": mac_address
}
elevated = context.elevated()
result = db_api.mac_address_reallocate(
elevated, update_kwargs, **filter_kwargs)
if not result:
break
reallocated_mac = db_api.mac_address_reallocate_find(
elevated, transaction.id)
if reallocated_mac:
dealloc = netaddr.EUI(reallocated_mac["address"])
LOG.info("Found a suitable deallocated MAC {0}".format(
str(dealloc)))
LOG.info("MAC assignment for port ID {0} completed "
"with address {1}".format(port_id, dealloc))
return reallocated_mac
except Exception:
LOG.exception("Error in mac reallocate...")
continue
LOG.info("Couldn't find a suitable deallocated MAC, attempting "
"to create a new one")
# This could fail if a large chunk of MACs were chosen explicitly,
# but under concurrent load enough MAC creates should iterate without
# any given thread exhausting its retry count.
for retry in xrange(CONF.QUARK.mac_address_retry_max):
LOG.info("Attemping to find a range to create a new MAC in "
"(step 2 of 3), attempt {0} of {1}".format(
retry + 1, CONF.QUARK.mac_address_retry_max))
next_address = None
with context.session.begin():
try:
fn = db_api.mac_address_range_find_allocation_counts
mac_range = \
fn(context, address=mac_address,
use_forbidden_mac_range=use_forbidden_mac_range)
if not mac_range:
LOG.info("No MAC ranges could be found given "
"the criteria")
break
rng, addr_count = mac_range
LOG.info("Found a MAC range {0}".format(rng["cidr"]))
last = rng["last_address"]
first = rng["first_address"]
if (last - first + 1) <= addr_count:
# Somehow, the range got filled up without us
# knowing, so set the next_auto_assign to be -1
# so we never try to create new ones
# in this range
db_api.mac_range_update_set_full(context, rng)
LOG.info("MAC range {0} is full".format(rng["cidr"]))
continue
if mac_address:
next_address = mac_address
else:
next_address = rng["next_auto_assign_mac"]
if next_address + 1 > rng["last_address"]:
db_api.mac_range_update_set_full(context, rng)
else:
db_api.mac_range_update_next_auto_assign_mac(
context, rng)
context.session.refresh(rng)
except Exception:
LOG.exception("Error in updating mac range")
continue
# Based on the above, this should only fail if a MAC was
# was explicitly chosen at some point. As such, fall through
# here and get in line for a new MAC address to try
try:
mac_readable = str(netaddr.EUI(next_address))
LOG.info("Attempting to create new MAC {0} "
"(step 3 of 3)".format(mac_readable))
with context.session.begin():
address = db_api.mac_address_create(
context, address=next_address,
mac_address_range_id=rng["id"])
LOG.info("MAC assignment for port ID {0} completed with "
"address {1}".format(port_id, mac_readable))
return address
except Exception:
LOG.info("Failed to create new MAC {0}".format(mac_readable))
LOG.exception("Error in creating mac. MAC possibly duplicate")
continue
raise exceptions.MacAddressGenerationFailure(net_id=net_id)
@synchronized(named("reallocate_ip"))
def attempt_to_reallocate_ip(self, context, net_id, port_id, reuse_after,
version=None, ip_address=None,
segment_id=None, subnets=None, **kwargs):
version = version or [4, 6]
elevated = context.elevated()
LOG.info("Attempting to reallocate an IP (step 1 of 3) - [{0}]".format(
utils.pretty_kwargs(network_id=net_id, port_id=port_id,
version=version, segment_id=segment_id,
subnets=subnets)))
if version == 6:
# Defers to the create case. The reason why is we'd have to look
# up subnets here to correctly generate the v6. If we split them
# up into reallocate and create, we'd be looking up the same
# subnets twice, which is a waste of time.
# TODO(mdietz): after reviewing this code, this block annoyingly
# doesn't trigger in the ANY case, since we end up
# using a list of [4, 6]. It works as expected most
# of the time, but we can anticipate that isolated
# networks will end up using sequential assignment.
# Probably want to rework this logic to compensate
# at some point. Considering they all come from the
# same MAC address pool, nothing bad will happen,
# just worth noticing and fixing.
LOG.info("Identified as v6 case, deferring to IP create path")
return []
sub_ids = []
if subnets:
sub_ids = subnets
elif segment_id:
subnets = db_api.subnet_find(elevated,
network_id=net_id,
segment_id=segment_id)
sub_ids = [s["id"] for s in subnets]
if not sub_ids:
LOG.info("No subnets matching segment_id {0} could be "
"found".format(segment_id))
raise exceptions.IpAddressGenerationFailure(
net_id=net_id)
ip_kwargs = {
"network_id": net_id,
"reuse_after": reuse_after,
"deallocated": True,
"ip_address": ip_address,
"version": version,
}
if ip_address:
del ip_kwargs["deallocated"]
if sub_ids:
ip_kwargs["subnet_id"] = sub_ids
ipam_log = kwargs.get('ipam_log', None)
for retry in xrange(CONF.QUARK.ip_address_retry_max):
attempt = None
if ipam_log:
attempt = ipam_log.make_entry("attempt_to_reallocate_ip")
LOG.info("Attempt {0} of {1}".format(
retry + 1, CONF.QUARK.ip_address_retry_max))
try:
with context.session.begin():
transaction = db_api.transaction_create(context)
m = models.IPAddress
update_kwargs = {
m.transaction_id: transaction.id,
m.address_type: kwargs.get("address_type", ip_types.FIXED),
m.deallocated: False,
m.deallocated_at: None,
m.used_by_tenant_id: context.tenant_id,
m.allocated_at: timeutils.utcnow(),
}
result = db_api.ip_address_reallocate(
elevated, update_kwargs, **ip_kwargs)
if not result:
LOG.info("Couldn't update any reallocatable addresses "
"given the criteria")
if attempt:
attempt.failed()
break
updated_address = db_api.ip_address_reallocate_find(
elevated, transaction.id)
if not updated_address:
if attempt:
attempt.failed()
continue
LOG.info("Address {0} is reallocated".format(
updated_address["address_readable"]))
return [updated_address]
except Exception:
if attempt:
attempt.failed()
LOG.exception("Error in reallocate ip...")
finally:
if attempt:
attempt.end()
return []
def is_strategy_satisfied(self, ip_addresses, allocate_complete=False):
return ip_addresses
def _allocate_from_subnet(self, context, net_id, subnet,
port_id, reuse_after, ip_address=None, **kwargs):
LOG.info("Creating a new address in subnet {0} - [{1}]".format(
subnet["_cidr"], utils.pretty_kwargs(network_id=net_id,
subnet=subnet,
port_id=port_id,
ip_address=ip_address)))
ip_policy_cidrs = models.IPPolicy.get_ip_policy_cidrs(subnet)
next_ip = ip_address
if not next_ip:
if subnet["next_auto_assign_ip"] != -1:
next_ip = netaddr.IPAddress(subnet["next_auto_assign_ip"] - 1)
else:
next_ip = netaddr.IPAddress(subnet["last_ip"])
if subnet["ip_version"] == 4:
next_ip = next_ip.ipv4()
LOG.info("Next IP is {0}".format(str(next_ip)))
if ip_policy_cidrs and next_ip in ip_policy_cidrs and not ip_address:
LOG.info("Next IP {0} violates policy".format(str(next_ip)))
raise q_exc.IPAddressPolicyRetryableFailure(ip_addr=next_ip,
net_id=net_id)
try:
with context.session.begin():
address = db_api.ip_address_create(
context, address=next_ip, subnet_id=subnet["id"],
deallocated=0, version=subnet["ip_version"],
network_id=net_id,
port_id=port_id,
address_type=kwargs.get('address_type', ip_types.FIXED))
address["deallocated"] = 0
except Exception:
# NOTE(mdietz): Our version of sqlalchemy incorrectly raises None
# here when there's an IP conflict
if ip_address:
raise exceptions.IpAddressInUse(ip_address=next_ip,
net_id=net_id)
raise q_exc.IPAddressRetryableFailure(ip_addr=next_ip,
net_id=net_id)
return address
def _allocate_from_v6_subnet(self, context, net_id, subnet,
port_id, reuse_after, ip_address=None,
**kwargs):
"""This attempts to allocate v6 addresses as per RFC2462 and RFC3041.
To accomodate this, we effectively treat all v6 assignment as a
first time allocation utilizing the MAC address of the VIF. Because
we recycle MACs, we will eventually attempt to recreate a previously
generated v6 address. Instead of failing, we've opted to handle
reallocating that address in this method.
This should provide a performance boost over attempting to check
each and every subnet in the existing reallocate logic, as we'd
have to iterate over each and every subnet returned
"""
LOG.info("Attempting to allocate a v6 address - [{0}]".format(
utils.pretty_kwargs(network_id=net_id, subnet=subnet,
port_id=port_id, ip_address=ip_address)))
if ip_address:
LOG.info("IP %s explicitly requested, deferring to standard "
"allocation" % ip_address)
return self._allocate_from_subnet(context, net_id=net_id,
subnet=subnet, port_id=port_id,
reuse_after=reuse_after,
ip_address=ip_address, **kwargs)
else:
mac = kwargs.get("mac_address")
if mac:
mac = kwargs["mac_address"].get("address")
ip_policy_cidrs = models.IPPolicy.get_ip_policy_cidrs(subnet)
for tries, ip_address in enumerate(
generate_v6(mac, port_id, subnet["cidr"])):
LOG.info("Attempt {0} of {1}".format(
tries + 1, CONF.QUARK.v6_allocation_attempts))
if tries > CONF.QUARK.v6_allocation_attempts - 1:
LOG.info("Exceeded v6 allocation attempts, bailing")
raise exceptions.IpAddressGenerationFailure(
net_id=net_id)
ip_address = netaddr.IPAddress(ip_address).ipv6()
LOG.info("Generated a new v6 address {0}".format(
str(ip_address)))
# NOTE(mdietz): treating the IPSet as a boolean caused netaddr
# to attempt to enumerate the entire set!
if (ip_policy_cidrs is not None and
ip_address in ip_policy_cidrs):
LOG.info("Address {0} excluded by policy".format(
str(ip_address)))
continue
# TODO(mdietz): replace this with a compare-and-swap loop
with context.session.begin():
address = db_api.ip_address_find(
context, network_id=net_id, ip_address=ip_address,
scope=db_api.ONE, reuse_after=reuse_after,
deallocated=True, subnet_id=subnet["id"],
lock_mode=True)
if address:
LOG.info("Address {0} exists, claiming".format(
str(ip_address)))
return db_api.ip_address_update(
context, address, deallocated=False,
deallocated_at=None,
used_by_tenant_id=context.tenant_id,
allocated_at=timeutils.utcnow(),
address_type=kwargs.get('address_type',
ip_types.FIXED))
# This triggers when the IP is allocated to another tenant,
# either because we missed it due to our filters above, or
# in an extremely unlikely race between the find and here.
try:
with context.session.begin():
return db_api.ip_address_create(
context, address=ip_address,
subnet_id=subnet["id"],
version=subnet["ip_version"], network_id=net_id,
address_type=kwargs.get('address_type',
ip_types.FIXED))
except db_exception.DBDuplicateEntry:
LOG.info("{0} exists but was already "
"allocated".format(str(ip_address)))
LOG.debug("Duplicate entry found when inserting subnet_id"
" %s ip_address %s", subnet["id"], ip_address)
def _allocate_ips_from_subnets(self, context, new_addresses, net_id,
subnets, port_id, reuse_after,
ip_address=None, **kwargs):
LOG.info("Allocating IP(s) from chosen subnet(s) (step 3 of 3) - "
"[{0}]".format(utils.pretty_kwargs(
network_id=net_id, port_id=port_id,
new_addresses=new_addresses, ip_address=ip_address)))
subnets = subnets or []
for subnet in subnets:
if not subnet:
continue
LOG.info("Attempting to allocate from {0} - {1}".format(
subnet["id"], subnet["_cidr"]))
address = None
if int(subnet["ip_version"]) == 4:
address = self._allocate_from_subnet(context, net_id,
subnet, port_id,
reuse_after,
ip_address, **kwargs)
else:
address = self._allocate_from_v6_subnet(context, net_id,
subnet, port_id,
reuse_after,
ip_address, **kwargs)
if address:
LOG.info("Created IP {0}".format(
address["address_readable"]))
new_addresses.append(address)
return new_addresses
def _notify_new_addresses(self, context, new_addresses):
for addr in new_addresses:
payload = dict(used_by_tenant_id=addr["used_by_tenant_id"],
ip_block_id=addr["subnet_id"],
ip_address=addr["address_readable"],
device_ids=[p["device_id"] for p in addr["ports"]],
created_at=addr["created_at"])
n_rpc.get_notifier("network").info(context,
"ip_block.address.create",
payload)
@ipam_logged
def allocate_ip_address(self, context, new_addresses, net_id, port_id,
reuse_after, segment_id=None, version=None,
ip_addresses=None, subnets=None, **kwargs):
elevated = context.elevated()
subnets = subnets or []
ip_addresses = ip_addresses or []
ipam_log = kwargs.get('ipam_log', None)
LOG.info("Starting a new IP address(es) allocation. Strategy "
"is {0} - [{1}]".format(
self.get_name(),
utils.pretty_kwargs(network_id=net_id, port_id=port_id,
new_addresses=new_addresses,
ip_addresses=ip_addresses,
subnets=subnets,
segment_id=segment_id,
version=version)))
def _try_reallocate_ip_address(ipam_log, ip_addr=None):
new_addresses.extend(self.attempt_to_reallocate_ip(
context, net_id, port_id, reuse_after, version=None,
ip_address=ip_addr, segment_id=segment_id, subnets=subnets,
**kwargs))
def _try_allocate_ip_address(ipam_log, ip_addr=None, sub=None):
for retry in xrange(CONF.QUARK.ip_address_retry_max):
attempt = None
if ipam_log:
attempt = ipam_log.make_entry("_try_allocate_ip_address")
LOG.info("Allocating new IP attempt {0} of {1}".format(
retry + 1, CONF.QUARK.ip_address_retry_max))
if not sub:
subnets = self._choose_available_subnet(
elevated, net_id, version, segment_id=segment_id,
ip_address=ip_addr, reallocated_ips=new_addresses)
else:
subnets = [self.select_subnet(context, net_id,
ip_addr, segment_id,
subnet_ids=[sub])]
LOG.info("Subnet selection returned {0} viable subnet(s) - "
"IDs: {1}".format(len(subnets),
", ".join([str(s["id"])
for s in subnets if s])))
try:
self._allocate_ips_from_subnets(context, new_addresses,
net_id, subnets,
port_id, reuse_after,
ip_addr, **kwargs)
except q_exc.IPAddressRetryableFailure:
LOG.exception("Error in allocating IP")
if attempt:
LOG.debug("ATTEMPT FAILED")
attempt.failed()
remaining = CONF.QUARK.ip_address_retry_max - retry - 1
if remaining > 0:
LOG.info("{0} retries remain, retrying...".format(
remaining))
else:
LOG.info("No retries remaing, bailing")
continue
finally:
if attempt:
attempt.end()
break
ip_addresses = [netaddr.IPAddress(ip_address)
for ip_address in ip_addresses]
if ip_addresses:
for ip_address in ip_addresses:
_try_reallocate_ip_address(ipam_log, ip_address)
else:
_try_reallocate_ip_address(ipam_log)
if self.is_strategy_satisfied(new_addresses):
return
else:
LOG.info("Reallocated addresses {0} but still need more addresses "
"to satisfy strategy {1}. Falling back to creating "
"IPs".format(new_addresses, self.get_name()))
if ip_addresses or subnets:
for ip_address, subnet in itertools.izip_longest(ip_addresses,
subnets):
_try_allocate_ip_address(ipam_log, ip_address, subnet)
else:
_try_allocate_ip_address(ipam_log)
if self.is_strategy_satisfied(new_addresses, allocate_complete=True):
self._notify_new_addresses(context, new_addresses)
LOG.info("IPAM for port ID {0} completed with addresses "
"{1}".format(port_id,
[a["address_readable"]
for a in new_addresses]))
return
ipam_log.failed()
raise exceptions.IpAddressGenerationFailure(net_id=net_id)
def deallocate_ip_address(self, context, address):
address["deallocated"] = 1
address["address_type"] = None
payload = dict(used_by_tenant_id=address["used_by_tenant_id"],
ip_block_id=address["subnet_id"],
ip_address=address["address_readable"],
device_ids=[p["device_id"] for p in address["ports"]],
created_at=address["created_at"],
deleted_at=timeutils.utcnow())
n_rpc.get_notifier("network").info(context,
"ip_block.address.delete",
payload)
def deallocate_ips_by_port(self, context, port=None, **kwargs):
ips_removed = []
for addr in port["ip_addresses"]:
if "ip_address" in kwargs:
ip = kwargs["ip_address"]
if ip != netaddr.IPAddress(int(addr["address"])):
continue
# Note: only deallocate ip if this is the
# only port mapped
if len(addr["ports"]) == 1:
self.deallocate_ip_address(context, addr)
ips_removed.append(addr)
port["ip_addresses"] = list(
set(port["ip_addresses"]) - set(ips_removed))
# NCP-1509(roaet):
# - started using admin_context due to tenant not claiming when realloc
def deallocate_mac_address(self, context, address):
admin_context = context.elevated()
mac = db_api.mac_address_find(admin_context, address=address,
scope=db_api.ONE)
if not mac:
raise exceptions.NotFound(
message="No MAC address %s found" % netaddr.EUI(address))
if mac["mac_address_range"]["do_not_use"]:
db_api.mac_address_delete(admin_context, mac)
else:
db_api.mac_address_update(admin_context, mac, deallocated=True,
deallocated_at=timeutils.utcnow())
def _select_subnet(self, context, net_id, ip_address, segment_id,
subnet_ids, **filters):
# NCP-1480: Don't need to lock V6 subnets, since we don't use
# next_auto_assign_ip for them. We already uniquely identified
# the V6 we're going to get by generating a MAC in a previous step.
# Also note that this only works under BOTH or BOTH_REQUIRED. ANY
# does not pass an ip_version
lock_subnets = True
if (not CONF.QUARK.ipam_select_subnet_v6_locking and
"ip_version" in filters and
int(filters["ip_version"]) == 6):
lock_subnets = False
select_api = db_api.subnet_find_ordered_by_most_full
# TODO(mdietz): Add configurable, alternate subnet selection here
subnets = select_api(context, net_id, lock_subnets=lock_subnets,
segment_id=segment_id, scope=db_api.ALL,
subnet_id=subnet_ids, **filters)
if not subnets:
LOG.info("No subnets found given the search criteria!")
return
# TODO(mdietz): Making this into an iterator because we want to move
# to selecting 1 subnet at a time and paginating rather
# than the bulk fetch. Without locks, we need to
# minimize looking at stale data to save ourselves
# some retries. Getting then 1 at a time will
# facilitate this.
for subnet, ips_in_subnet in subnets:
yield subnet, ips_in_subnet
def _should_mark_subnet_full(self, context, subnet, ipnet, ip_address,
ips_in_subnet):
ip = subnet["next_auto_assign_ip"]
# NOTE(mdietz): When atomically updated, this probably
# doesn't need the lower bounds check but
# I'm not comfortable removing it yet.
if (subnet["ip_version"] == 4 and ip < subnet["first_ip"] or
ip > subnet["last_ip"]):
return True
ip_policy = None
if not ip_address:
# Policies don't prevent explicit assignment, so we only
# need to check if we're allocating a new IP
ip_policy = subnet.get("ip_policy")
policy_size = ip_policy["size"] if ip_policy else 0
if ipnet.size > (ips_in_subnet + policy_size - 1):
return False
return True
def _ip_in_subnet(self, subnet, subnet_ids, ipnet, ip_address):
if ip_address:
requested_ip = netaddr.IPAddress(ip_address)
if ipnet.version == 4 and requested_ip.version != 4:
requested_ip = requested_ip.ipv4()
if requested_ip not in ipnet:
if subnet_ids is not None:
LOG.info("Requested IP {0} not in subnet {1}, "
"retrying".format(str(requested_ip),
str(ipnet)))
raise q_exc.IPAddressNotInSubnet(
ip_addr=ip_address, subnet_id=subnet["id"])
return False
return True
# RM6180(roaet):
# - removed session.begin due to deadlocks
# - fix off-by-one error and overflow
@synchronized(named("select_subnet"))
def select_subnet(self, context, net_id, ip_address, segment_id,
subnet_ids=None, **filters):
LOG.info("Selecting subnet(s) - (Step 2 of 3) [{0}]".format(
utils.pretty_kwargs(network_id=net_id, ip_address=ip_address,
segment_id=segment_id, subnet_ids=subnet_ids,
ip_version=filters.get("ip_version"))))
# TODO(mdietz): Invert the iterator and the session, should only be
# one subnet per attempt. We should also only be fetching
# the subnet and usage when we need to. Otherwise
# we're locking every subnet for a segment, and once
# we stop locking, we're looking at stale data.
with context.session.begin():
for subnet, ips_in_subnet in self._select_subnet(context, net_id,
ip_address,
segment_id,
subnet_ids,
**filters):
ipnet = netaddr.IPNetwork(subnet["cidr"])
LOG.info("Trying subnet ID: {0} - CIDR: {1}".format(
subnet["id"], subnet["_cidr"]))
if not self._ip_in_subnet(subnet, subnet_ids, ipnet,
ip_address):
continue
if self._should_mark_subnet_full(context, subnet, ipnet,
ip_address, ips_in_subnet):
LOG.info("Marking subnet {0} as full".format(subnet["id"]))
updated = db_api.subnet_update_set_full(context, subnet)
# Ensure the session is aware of the changes to the subnet
if updated:
context.session.refresh(subnet)
continue
if not ip_address and subnet["ip_version"] == 4:
auto_inc = db_api.subnet_update_next_auto_assign_ip
updated = auto_inc(context, subnet)
if updated:
context.session.refresh(subnet)
else:
# This means the subnet was marked full
# while we were checking out policies.
# Fall out and go back to the outer retry
# loop.
return
LOG.info("Subnet {0} - {1} {2} looks viable, "
"returning".format(subnet["id"], subnet["_cidr"],
subnet["next_auto_assign_ip"]))
return subnet
class QuarkIpamANY(QuarkIpam):
@classmethod
def get_name(self):
return "ANY"
def _choose_available_subnet(self, context, net_id, version=None,
segment_id=None, ip_address=None,
reallocated_ips=None):
filters = {}
if version:
filters["ip_version"] = version
subnet = self.select_subnet(context, net_id, ip_address, segment_id,
**filters)
if subnet:
return [subnet]
raise exceptions.IpAddressGenerationFailure(net_id=net_id)
class QuarkIpamBOTH(QuarkIpam):
@classmethod
def get_name(self):
return "BOTH"
def is_strategy_satisfied(self, reallocated_ips, allocate_complete=False):
req = [4, 6]
for ip in reallocated_ips:
if ip is not None:
req.remove(ip["version"])
ips_allocated = len(req)
if ips_allocated == 0:
return True
elif ips_allocated == 1 and allocate_complete:
return True
return False
def attempt_to_reallocate_ip(self, context, net_id, port_id,
reuse_after, version=None,
ip_address=None, segment_id=None,
subnets=None, **kwargs):
both_versions = []
for ver in (4, 6):
address = super(QuarkIpamBOTH, self).attempt_to_reallocate_ip(
context, net_id, port_id, reuse_after, ver, ip_address,
segment_id, subnets=subnets, **kwargs)
both_versions.extend(address)
return both_versions
def _choose_available_subnet(self, context, net_id, version=None,
segment_id=None, ip_address=None,
reallocated_ips=None):
both_subnet_versions = []
need_versions = [4, 6]
for i in reallocated_ips:
if i["version"] in need_versions:
need_versions.remove(i["version"])
filters = {}
for ver in need_versions:
filters["ip_version"] = ver
sub = self.select_subnet(context, net_id, ip_address, segment_id,
**filters)
if sub:
both_subnet_versions.append(sub)
if not reallocated_ips and not both_subnet_versions:
raise exceptions.IpAddressGenerationFailure(net_id=net_id)
return both_subnet_versions
class QuarkIpamBOTHREQ(QuarkIpamBOTH):
@classmethod
def get_name(self):
return "BOTH_REQUIRED"
def is_strategy_satisfied(self, reallocated_ips, allocate_complete=False):
req = [4, 6]
for ip in reallocated_ips:
if ip is not None:
req.remove(ip["version"])
ips_allocated = len(req)
if ips_allocated == 0:
return True
return False
def _choose_available_subnet(self, context, net_id, version=None,
segment_id=None, ip_address=None,
reallocated_ips=None):
subnets = super(QuarkIpamBOTHREQ, self)._choose_available_subnet(
context, net_id, version, segment_id, ip_address, reallocated_ips)
if len(reallocated_ips) + len(subnets) < 2:
raise exceptions.IpAddressGenerationFailure(net_id=net_id)
return subnets
class IpamRegistry(object):
def __init__(self):
self.strategies = {
QuarkIpamANY.get_name(): QuarkIpamANY(),
QuarkIpamBOTH.get_name(): QuarkIpamBOTH(),
QuarkIpamBOTHREQ.get_name(): QuarkIpamBOTHREQ()}
def is_valid_strategy(self, strategy_name):
if strategy_name in self.strategies:
return True
return False
def get_strategy(self, strategy_name):
if self.is_valid_strategy(strategy_name):
return self.strategies[strategy_name]
fallback = CONF.QUARK.default_ipam_strategy
LOG.warn("IPAM strategy %s not found, "
"using default %s" % (strategy_name, fallback))
return self.strategies[fallback]
IPAM_REGISTRY = IpamRegistry()
| |
"""Brython templating engine.
Templates in HTML pages can include:
- Python code blocks:
<tr b-code="for item in items">
...
</tr>
- Python expressions:
{message}
- tag attributes:
<option value="{name}", selected="{name===expected}">
- inclusion of subtemplates:
<div b-include="menu.html"></div>
Usage in Brython scripts:
from browser.template import Template
Template(element).render(message="ok")
replaces an element with template code by its rendering using the
key/values in kw.
Elements rendered by the template engine have an attribute "data" set to a
object with attributes set to the keyword arguments of render().
Callback functions
------------------
<button b-on="click:increment">Increment</button>
The tag attribute "b-on" is converted so that a click on the button is
handled by the function "increment". This function takes two arguments:
def increment(event, element):
element.data.counter += 1
where "event" is the event object.
To make the function available in the element, pass the list of callback
functions as the second argument of Template():
Template(element, [increment]).render(counter=0)
After a handler function is run, if element.data has changed, the element is
rendered again, with the new value of element.data.
"""
import tb as traceback
from browser import document, html
# HTML elements that don't need a closing tag
# Cf. http://w3c.github.io/html/syntax.html#void-elements
void_elements = ["AREA", "BASE", "BR", "COL", "EMBED", "HR", "IMG", "INPUT",
"LINK", "META", "PARAM", "SOURCE", "TRACK", "WBR"]
def copy(obj):
if isinstance(obj, dict):
res = {}
for key, value in obj.items():
res[key] = copy(value)
return res
elif isinstance(obj, (list, tuple)):
return obj[:]
elif isinstance(obj, set):
return {x for x in obj}
else:
return obj
class ElementData:
"""Class used to manipulate template element data as an object with
attributes, rather than as a dictionary."""
def __init__(self, **kw):
"""Initialise the instance with the keyword arguments passed to
Template.render().
Attribute self.__keys__ is the set of keys of the keyword arguments.
"""
self.__keys__ = set()
for key, value in kw.items():
object.__setattr__(self, key, value)
self.__keys__.add(key)
def __setattr__(self, attr, value):
"""The attribute "data" of the Template element is set to the
instance. If a callback function sets an attribute, this method
updates the set self.__keys__.
"""
object.__setattr__(self, attr, value)
if attr != "__keys__":
self.__keys__.add(attr)
def to_dict(self):
"""Make a dictionary with the keys in self.__keys__."""
return {k:getattr(self, k) for k in self.__keys__}
def clone(self):
"""Used to store the current key / values before running a callback
function ; the element is rendered again only if the element data
has changed.
"""
return copy(self.to_dict())
class TemplateError(Exception):
pass
class Template:
def __init__(self, element, callbacks=[]):
if isinstance(element, str):
element = document[element]
self.element = element
self.line_mapping = {}
self.line_num = 1
self.indent = 0
self.python = ""
self.parse(element)
self.callbacks = callbacks
def add(self, content, elt):
self.python += content
self.line_mapping[self.line_num] = elt
if content.endswith("\n"):
self.line_num += 1
def add_indent(self, content, elt):
self.add(" " * self.indent + content, elt)
def write(self, content):
self.html += str(content) + "\n"
def parse(self, elt):
"""Parse the element recursively to generate the Python code that
will itself generate the HTML code to render the template.
"""
# Flag to indicate if the element has an attribute b-code that
# starts a Python block (for loop, if / elif / else...).
is_block = False
if elt.nodeType == 3:
# Text node.
if elt.text.strip():
text = elt.text.replace('"', """)
text = text.replace("\n", "\\n")
text = '"' + text + '"'
# If the text has single braces, render it as an f-string.
nb_braces = elt.text.count("{")
if nb_braces:
nb_double_braces = elt.text.count("{{")
if nb_double_braces != nb_braces:
lines = [line for line in elt.text.split("\n")
if line.strip()]
text = 'f"""' + " ".join(lines) + '"""'
self.add_indent ("__write__(" + text + ")\n", elt)
elif hasattr(elt, "tagName"):
start_tag = "__write__('<" + elt.tagName
block = None
# If an attribute value has a "{", it is considered dynamic and
# will be rendered as an f-string. Otherwise is is static.
static_attrs = []
dynamic_attrs = []
for item in elt.attributes:
if item.name == "b-code":
# Code block. The trailing ":" is optional.
block = item.value.rstrip(":") + ":"
elif item.name == "b-include":
# Replace by content of the file at address item.value
elt.html = open(item.value).read()
else:
value = item.value.replace("\n", "")
if "{" in value:
dynamic_attrs.append("'" + item.name + "', f'" +
value.replace("'", "\\'") + "'")
else:
static_attrs.append(item.name + '="' + value +'"')
if block:
self.add_indent(block + "\n", elt)
self.indent += 1
is_block = True
self.add_indent(start_tag, elt)
if static_attrs or dynamic_attrs:
self.add(" ", elt)
for attr in static_attrs:
self.add_indent(attr + " ", elt)
if dynamic_attrs:
self.add("')\n", elt)
for attr in dynamic_attrs:
self.add_indent("__render_attr__(" + attr + ")\n", elt)
self.add_indent("__write__('>')\n", elt)
else:
self.add_indent(">')\n", elt)
for child in elt.childNodes:
self.parse(child)
if hasattr(elt, "tagName") and elt.tagName not in void_elements:
self.add_indent("__write__('</" + elt.tagName + ">')\n", elt)
if is_block:
self.indent -= 1
def on(self, element, event, callback):
def func(evt):
cache = self.data.clone()
callback(evt, self)
new_data = self.data.to_dict()
if new_data != cache:
self.render(**new_data)
element.bind(event, func)
def render_attr(self, name, value):
"""Function called when executing the Python code to generate the HTML
code for a dynamic attribute.
If the value is a boolean (eg for the "selected" attribute of an
OPTION tag), generate the attribute name if the value is True and
nothing otherwise.
If the value is of another type, add its string representation.
"""
if value == "False":
return
elif value == "True":
self.html += " " + name
else:
self.html += " " + name + '="' + str(value) + '"'
def render(self, **ns):
"""Returns the HTML code for the template, with the key / values in
the keyword argument ns.
"""
# Set attribute "data" to an instance of class ElementData.
self.data = ElementData(**ns)
# Add names "__write__" and "__render_attr__" to namespace.
ns.update({"__write__": self.write,
"__render_attr__": self.render_attr})
self.html = ""
# Executing the Python code will store HTML code in self.html.
try:
exec(self.python, ns)
except Exception as exc:
msg = traceback.format_exc()
if isinstance(exc, SyntaxError):
line_no = exc.args[2]
else:
tb = exc.traceback
while tb is not None:
line_no = tb.tb_lineno
tb = tb.tb_next
elt = self.line_mapping[line_no]
print("Error rendering the element:", elt.nodeType)
if elt.nodeType == 3: # text
print(elt.textContent)
else:
try:
print(elt.outerHTML)
except AttributeError:
print('no outerHTML for', elt)
print(elt.html)
print(f"{exc.__class__.__name__}: {exc}")
return
# Replace element content by generated html.
# Since we reset outerHTML (this is necessary because the element may
# have dynamic attributes), we must reset the reference to the element
# because self.element would still point to the previous version (cf.
# https://developer.mozilla.org/en-US/docs/Web/API/Element/outerHTML,
# section Notes).
if self.element.nodeType != 9:
rank = self.element.index()
parent = self.element.parent
self.element.outerHTML = self.html
self.element = parent.childNodes[rank]
else:
# If the template is the document, only reset (inner)html
self.element.html = self.html
# Bindings.
self.element.unbind()
callbacks = {}
for callback in self.callbacks:
callbacks[callback.__name__] = callback
# Bindings are specified with the attribute b-on. Its value has the
# form "event1:callback1;event2:callback2".
for element in self.element.select("*[b-on]"):
bindings = element.getAttribute("b-on")
bindings = bindings.split(";")
for binding in bindings:
parts = binding.split(":")
if not len(parts) == 2:
raise TemplateError(f"wrong binding: {binding}")
event, func_name = [x.strip() for x in parts]
if not func_name in callbacks:
print(element.outerHTML)
raise TemplateError(f"unknown callback: {func_name}")
self.on(element, event, callbacks[func_name])
| |
# Licensed Materials - Property of IBM
# Copyright IBM Corp. 2019
import unittest
import unittest.mock
import sys
import os
import time
import requests
import uuid
import json
import glob
from streamsx.topology.topology import Topology
from streamsx.topology.context import submit, ConfigParams, JobConfig
from streamsx.rest_primitives import Instance
import streamsx.scripts.streamtool as streamtool
import streamsx.ec
from contextlib import contextmanager
from io import StringIO
# Tests streamtool submitjob script.
# Requires environment setup for a ICP4D Streams instance.
import test_sc
@unittest.skipUnless(test_sc.cpd_setup(), "requires Streams REST API setup")
class TestSubmitJob(unittest.TestCase):
def _submitjob(self, args, sab=None):
args.insert(0, "--disable-ssl-verify")
args.insert(1, "submitjob")
if sab:
args.insert(2, sab)
else:
topo = Topology()
topo.source([1])
cfg = {}
cfg[ConfigParams.SSL_VERIFY] = False
src = submit("BUNDLE", topo, cfg)
sab_path = src["bundlePath"]
args.insert(2, sab_path)
self.files_to_remove.append(sab_path)
rc, val = streamtool.run_cmd(args=args)
return rc, val
def setUp(self):
self.instance = Instance.of_endpoint(verify=False).id
self.username = os.environ["STREAMS_USERNAME"]
self.stringLength = 10
self.jobs_to_cancel = []
self.files_to_remove = []
self.name = "TEST__" + uuid.uuid4().hex.upper()[0 : self.stringLength]
def tearDown(self):
for job in self.jobs_to_cancel:
job.cancel(force=True)
self.files_to_remove.extend(glob.glob("./test_st_submitjob.*.json"))
for file in self.files_to_remove:
if os.path.exists(file):
os.remove(file)
# Check --jobname option
def test_submitjob_name(self):
rc, job = self._submitjob(args=["--jobname", self.name])
self.jobs_to_cancel.extend([job])
self.assertEqual(rc, 0)
self.assertEqual(job.name, self.name)
# Check --jobgroup option
def test_submitjob_group(self):
# JobGroup name doesn't exist, error should be printed to stderr
output, error, rc = self.get_output(
lambda: self._submitjob(args=["--jobgroup", str(self.name)])
)
error = error.splitlines()
if not any(
"500 Server Error: Internal Server Error for url" in s for s in error
):
self.fail("jobgroup doesn't already exist, should throw 500 error")
# Check no jobGroup specified results in default jobGroup
rc, job = self._submitjob([])
self.jobs_to_cancel.extend([job])
jobgroup = job.jobGroup.split("/")[-1]
self.assertEqual(rc, 0)
self.assertEqual(jobgroup, "default")
# Check --jobConfig option
def test_submitjob_config(self):
jc = JobConfig(job_name=self.name)
my_file = "jobconfig.json"
with open(my_file, "w") as f:
json.dump(jc.as_overlays(), f)
rc, job = self._submitjob(args=["--jobConfig", my_file])
self.jobs_to_cancel.extend([job])
self.files_to_remove.append(my_file)
self.assertEqual(rc, 0)
self.assertEqual(job.name, self.name)
# Check --outfile option
def test_submitjob_outfile(self):
my_file = self.name + ".txt"
rc, job = self._submitjob(args=["--outfile", my_file])
self.jobs_to_cancel.extend([job])
self.files_to_remove.append(my_file)
self.assertEqual(rc, 0)
with open(my_file, "r") as f:
job_ids = [line.rstrip() for line in f if not line.isspace()]
self.assertEqual(job_ids[0], job.id)
# Check -P option w/ simple key1=value1 submission parameters
def test_submitjob_submission_parameters_simple(self):
operator1 = "test1"
operator2 = "test2"
topo = Topology()
lower = topo.create_submission_parameter("key1")
upper = topo.create_submission_parameter("key2")
s = topo.source([1])
s.for_each(Test_metrics(lower), name=operator1)
s.for_each(Test_metrics(upper), name=operator2)
cfg = {}
cfg[ConfigParams.SSL_VERIFY] = False
src = submit("BUNDLE", topo, cfg)
sab_path = src["bundlePath"]
# Submit the job
args = ["--jobname", str(self.name), "-P", "key1=val1", "-P", "key2=val2"]
rc, my_job = self._submitjob(args, sab=sab_path)
self.files_to_remove.append(sab_path)
self.jobs_to_cancel.extend([my_job])
test1 = my_job.get_operators(operator1)[0]
test2 = my_job.get_operators(operator2)[0]
m1, m2 = None, None
for _ in range(100):
if m1 and m2:
break
time.sleep(1)
m1 = test1.get_metrics("val1")
m2 = test2.get_metrics("val2")
self.assertEqual(rc, 0)
if not (m1 and m2):
self.fail("Submission parameters failed to be created")
# Check -P option w/ randomly generated key/value submission parameters
def test_submitjob_submission_parameters_complex(self):
paramList1, paramList2 = self.generateRandom()
operator1 = "test1"
operator2 = "test2"
topo = Topology()
lower = topo.create_submission_parameter(paramList1[0][0])
upper = topo.create_submission_parameter(paramList1[1][0])
s = topo.source([1])
s.for_each(Test_metrics(lower), name=operator1)
s.for_each(Test_metrics(upper), name=operator2)
cfg = {}
cfg[ConfigParams.SSL_VERIFY] = False
src = submit("BUNDLE", topo, cfg)
sab_path = src["bundlePath"]
# Submit the job
args = ["--jobname", str(self.name)]
for prop in paramList2:
args.extend(["-P", prop])
rc, my_job = self._submitjob(args, sab=sab_path)
self.files_to_remove.append(sab_path)
self.jobs_to_cancel.extend([my_job])
test1 = my_job.get_operators(operator1)[0]
test2 = my_job.get_operators(operator2)[0]
m1, m2 = None, None
for _ in range(100):
if m1 and m2:
break
time.sleep(1)
m1 = test1.get_metrics(paramList1[0][1])
m2 = test2.get_metrics(paramList1[1][1])
self.assertEqual(rc, 0)
if not (m1 and m2):
self.fail("Submission parameters failed to be created")
def get_output(self, my_function):
""" Helper function that gets the ouput from executing my_function
Arguments:
my_function {} -- The function to be executed
Returns:
stdout [String] -- Output of my_function
stderr [String] -- Errors and exceptions from executing my_function
rc [int] -- 0 indicates succces, 1 indicates error or failure
"""
rc = None
with captured_output() as (out, err):
rc, val = my_function()
stdout = out.getvalue().strip()
stderr = err.getvalue().strip()
return stdout, stderr, rc
def generateRandom(self, num=2):
""" Helper function that generates random key-value pairs of the form <KEY>=<VALUE> and returns them in a list
Returns:
(propList1, propList2) [Tuple] -- A tuple containing 2 lists, the first is of form [(<KEY>, <VALUE>)...],
second list is of form [ <KEY>=<VALUE>, ..... ]
"""
propList1 = []
propList2 = []
for _ in range(num):
key = "KEY_" + uuid.uuid4().hex.upper()[0 : self.stringLength]
value = "VALUE_" + uuid.uuid4().hex.upper()[0 : self.stringLength]
propList2.append(key + "=" + value)
propList1.append((key, value))
return (propList1, propList2)
@contextmanager
def captured_output():
new_out, new_err = StringIO(), StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_out, new_err
yield sys.stdout, sys.stderr
finally:
sys.stdout, sys.stderr = old_out, old_err
class Test_metrics(object):
def __init__(self, val):
self.val = val
def __enter__(self):
self.m1 = streamsx.ec.CustomMetric(self, name=self.val(), initialValue=37)
def __exit__(self, a, b, c):
pass
def __call__(self, tuple_):
return
| |
# -*- coding: utf-8 -*-
import base64
import copy
import errno
import logging
import socket
from sys import exc_info
import traceback
import types
from ws4py import WS_KEY, WS_VERSION
from ws4py.exc import HandshakeError, StreamClosed
from ws4py.streaming import Stream
from ws4py.messaging import Message
DEFAULT_READING_SIZE = 2
__all__ = ['WebSocket', 'EchoWebSocket']
class WebSocket(object):
""" Represents a websocket endpoint and provides a high level interface to drive the endpoint. """
def __init__(self, sock, protocols=None, extensions=None, environ=None):
""" The ``sock`` is an opened connection
resulting from the websocket handshake.
If ``protocols`` is provided, it is a list of protocols
negotiated during the handshake as is ``extensions``.
If ``environ`` is provided, it is a copy of the WSGI environ
dictionnary from the underlying WSGI server.
"""
self.stream = Stream(always_mask=False)
"""
Underlying websocket stream that performs the websocket
parsing to high level objects. By default this stream
never masks its messages. Clients using this class should
set the ``stream.always_mask`` fields to ``True``
and ``stream.expect_masking`` fields to ``False``.
"""
self.protocols = protocols
"""
List of protocols supported by this endpoint.
Unused for now.
"""
self.extensions = extensions
"""
List of extensions supported by this endpoint.
Unused for now.
"""
self.sock = sock
"""
Underlying connection.
"""
self.client_terminated = False
"""
Indicates if the client has been marked as terminated.
"""
self.server_terminated = False
"""
Indicates if the server has been marked as terminated.
"""
self.reading_buffer_size = DEFAULT_READING_SIZE
"""
Current connection reading buffer size.
"""
self.sender = self.sock.sendall
self.environ = environ
"""
WSGI environ dictionary.
"""
def opened(self):
"""
Called by the server when the upgrade handshake
has succeeeded.
"""
pass
def close(self, code=1000, reason=''):
"""
Call this method to initiate the websocket connection
closing by sending a close frame to the connected peer.
The ``code`` is the status code representing the
termination's reason.
Once this method is called, the ``server_terminated``
attribute is set. Calling this method several times is
safe as the closing frame will be sent only the first
time.
.. seealso:: Defined Status Codes http://tools.ietf.org/html/rfc6455#section-7.4.1
"""
if not self.server_terminated:
self.server_terminated = True
self.sender(self.stream.close(code=code, reason=reason).single(mask=self.stream.always_mask))
def closed(self, code, reason=None):
"""
Called when the websocket stream and connection are finally closed.
The provided ``code`` is status set by the other point and
``reason`` is a human readable message.
.. seealso:: Defined Status Codes http://tools.ietf.org/html/rfc6455#section-7.4.1
"""
pass
@property
def terminated(self):
"""
Returns ``True`` if both the client and server have been
marked as terminated.
"""
return self.client_terminated is True and self.server_terminated is True
def close_connection(self):
"""
Shutdowns then closes the underlying connection.
"""
try:
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
except:
pass
def ponged(self, pong):
"""
Pong message, as a :class:`messaging.PongControlMessage` instance,
received on the stream.
"""
pass
def received_message(self, message):
"""
Called whenever a complete ``message``, binary or text,
is received and ready for application's processing.
The passed message is an instance of :class:`messaging.TextMessage`
or :class:`messaging.BinaryMessage`.
.. note:: You should override this method in your subclass.
"""
pass
def send(self, payload, binary=False):
"""
Sends the given ``payload`` out.
If ``payload`` is some bytes or a bytearray,
then it is sent as a single message not fragmented.
If ``payload`` is a generator, each chunk is sent as part of
fragmented message.
If ``binary`` is set, handles the payload as a binary message.
"""
message_sender = self.stream.binary_message if binary else self.stream.text_message
if isinstance(payload, basestring) or isinstance(payload, bytearray):
self.sender(message_sender(payload).single(mask=self.stream.always_mask))
elif isinstance(payload, Message):
self.sender(payload.single(mask=self.stream.always_mask))
elif type(payload) == types.GeneratorType:
bytes = payload.next()
first = True
for chunk in payload:
self.sender(message_sender(bytes).fragment(first=first, mask=self.stream.always_mask))
bytes = chunk
first = False
self.sender(message_sender(bytes).fragment(last=True, mask=self.stream.always_mask))
else:
raise ValueError("Unsupported type '%s' passed to send()" % type(payload))
def _cleanup(self):
"""
Frees up resources used by the endpoint.
"""
self.sender = None
self.sock = None
self.environ = None
self.stream._cleanup()
self.stream = None
def run(self):
"""
Performs the operation of reading from the underlying
connection in order to feed the stream of bytes.
We start with a small size of two bytes to be read
from the connection so that we can quickly parse an
incoming frame header. Then the stream indicates
whatever size must be read from the connection since
it knows the frame payload length.
Note that we perform some automatic opererations:
* On a closing message, we respond with a closing
message and finally close the connection
* We respond to pings with pong messages.
* Whenever an error is raised by the stream parsing,
we initiate the closing of the connection with the
appropiate error code.
This method is blocking and should likely be run
in a thread.
"""
self.sock.setblocking(True)
s = self.stream
try:
self.opened()
sock = self.sock
fileno = sock.fileno()
process = self.process
while not self.terminated:
bytes = sock.recv(self.reading_buffer_size)
if not process(bytes):
break
finally:
self.client_terminated = self.server_terminated = True
try:
if not s.closing:
self.closed(1006, "Going away")
else:
self.closed(s.closing.code, s.closing.reason)
finally:
s = sock = fileno = process = None
self.close_connection()
self._cleanup()
def process(self, bytes):
""" Takes some bytes and process them through the
internal stream's parser. If a message of any kind is
found, performs one of these actions:
* A closing message will initiate the closing handshake
* Errors will initiate a closing handshake
* A message will be passed to the ``received_message`` method
* Pings will see pongs be sent automatically
* Pongs will be passed to the ``ponged`` method
The process should be terminated when this method
returns ``False``.
"""
s = self.stream
if not bytes and self.reading_buffer_size > 0:
return False
self.reading_buffer_size = s.parser.send(bytes) or DEFAULT_READING_SIZE
if s.closing is not None:
if not self.server_terminated:
self.close(s.closing.code, s.closing.reason)
else:
self.client_terminated = True
s = None
return False
if s.errors:
for error in s.errors:
self.close(error.code, error.reason)
s.errors = []
s = None
return False
if s.has_message:
self.received_message(s.message)
s.message.data = None
s.message = None
s = None
return True
if s.pings:
for ping in s.pings:
self.sender(s.pong(ping.data))
s.pings = []
if s.pongs:
for pong in s.pongs:
self.ponged(pong)
s.pongs = []
s = None
return True
class EchoWebSocket(WebSocket):
def received_message(self, message):
"""
Automatically sends back the provided ``message`` to
its originating endpoint.
"""
self.send(message.data, message.is_binary)
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from .TProtocol import *
from struct import pack, unpack
class TBinaryProtocol(TProtocolBase):
"""Binary implementation of the Thrift protocol driver."""
# NastyHaxx. Python 2.4+ on 32-bit machines forces hex constants to be
# positive, converting this into a long. If we hardcode the int value
# instead it'll stay in 32 bit-land.
# VERSION_MASK = 0xffff0000
VERSION_MASK = -65536
# VERSION_1 = 0x80010000
VERSION_1 = -2147418112
TYPE_MASK = 0x000000ff
def __init__(self, trans, strictRead=False, strictWrite=True):
TProtocolBase.__init__(self, trans)
self.strictRead = strictRead
self.strictWrite = strictWrite
def writeMessageBegin(self, name, type, seqid):
if self.strictWrite:
self.writeI32(TBinaryProtocol.VERSION_1 | type)
self.writeString(name)
self.writeI32(seqid)
else:
self.writeString(name)
self.writeByte(type)
self.writeI32(seqid)
def writeMessageEnd(self):
pass
def writeStructBegin(self, name):
pass
def writeStructEnd(self):
pass
def writeFieldBegin(self, name, type, id):
self.writeByte(type)
self.writeI16(id)
def writeFieldEnd(self):
pass
def writeFieldStop(self):
self.writeByte(TType.STOP)
def writeMapBegin(self, ktype, vtype, size):
self.writeByte(ktype)
self.writeByte(vtype)
self.writeI32(size)
def writeMapEnd(self):
pass
def writeListBegin(self, etype, size):
self.writeByte(etype)
self.writeI32(size)
def writeListEnd(self):
pass
def writeSetBegin(self, etype, size):
self.writeByte(etype)
self.writeI32(size)
def writeSetEnd(self):
pass
def writeBool(self, bool):
if bool:
self.writeByte(1)
else:
self.writeByte(0)
def writeByte(self, byte):
buff = pack("!b", byte)
self.trans.write(buff)
def writeI16(self, i16):
buff = pack("!h", i16)
self.trans.write(buff)
def writeI32(self, i32):
buff = pack("!i", i32)
self.trans.write(buff)
def writeI64(self, i64):
buff = pack("!q", i64)
self.trans.write(buff)
def writeDouble(self, dub):
buff = pack("!d", dub)
self.trans.write(buff)
def writeString(self, str):
self.writeI32(len(str))
self.trans.write(str)
def readMessageBegin(self):
sz = self.readI32()
if sz < 0:
version = sz & TBinaryProtocol.VERSION_MASK
if version != TBinaryProtocol.VERSION_1:
raise TProtocolException(
type=TProtocolException.BAD_VERSION,
message='Bad version in readMessageBegin: %d' % (sz))
type = sz & TBinaryProtocol.TYPE_MASK
name = self.readString()
seqid = self.readI32()
else:
if self.strictRead:
raise TProtocolException(type=TProtocolException.BAD_VERSION,
message='No protocol version header')
name = self.trans.readAll(sz)
type = self.readByte()
seqid = self.readI32()
return (name, type, seqid)
def readMessageEnd(self):
pass
def readStructBegin(self):
pass
def readStructEnd(self):
pass
def readFieldBegin(self):
type = self.readByte()
if type == TType.STOP:
return (None, type, 0)
id = self.readI16()
return (None, type, id)
def readFieldEnd(self):
pass
def readMapBegin(self):
ktype = self.readByte()
vtype = self.readByte()
size = self.readI32()
return (ktype, vtype, size)
def readMapEnd(self):
pass
def readListBegin(self):
etype = self.readByte()
size = self.readI32()
return (etype, size)
def readListEnd(self):
pass
def readSetBegin(self):
etype = self.readByte()
size = self.readI32()
return (etype, size)
def readSetEnd(self):
pass
def readBool(self):
byte = self.readByte()
if byte == 0:
return False
return True
def readByte(self):
buff = self.trans.readAll(1)
val, = unpack('!b', buff)
return val
def readI16(self):
buff = self.trans.readAll(2)
val, = unpack('!h', buff)
return val
def readI32(self):
buff = self.trans.readAll(4)
try:
val, = unpack('!i', buff)
except TypeError:
#str does not support the buffer interface
val, = unpack('!i', buff)
return val
def readI64(self):
buff = self.trans.readAll(8)
val, = unpack('!q', buff)
return val
def readDouble(self):
buff = self.trans.readAll(8)
val, = unpack('!d', buff)
return val
def readString(self):
len = self.readI32()
str = self.trans.readAll(len)
return str
class TBinaryProtocolFactory:
def __init__(self, strictRead=False, strictWrite=True):
self.strictRead = strictRead
self.strictWrite = strictWrite
def getProtocol(self, trans):
prot = TBinaryProtocol(trans, self.strictRead, self.strictWrite)
return prot
class TBinaryProtocolAccelerated(TBinaryProtocol):
"""C-Accelerated version of TBinaryProtocol.
This class does not override any of TBinaryProtocol's methods,
but the generated code recognizes it directly and will call into
our C module to do the encoding, bypassing this object entirely.
We inherit from TBinaryProtocol so that the normal TBinaryProtocol
encoding can happen if the fastbinary module doesn't work for some
reason. (TODO(dreiss): Make this happen sanely in more cases.)
In order to take advantage of the C module, just use
TBinaryProtocolAccelerated instead of TBinaryProtocol.
NOTE: This code was contributed by an external developer.
The internal Thrift team has reviewed and tested it,
but we cannot guarantee that it is production-ready.
Please feel free to report bugs and/or success stories
to the public mailing list.
"""
pass
class TBinaryProtocolAcceleratedFactory:
def getProtocol(self, trans):
return TBinaryProtocolAccelerated(trans)
| |
"""Script for publishing new versions of Selenium to cloud storage.
When you run this script, it will use OAuth 2.0 to authenticate with
Google Cloud Storage before attempting to upload any files. This script
will fail if the authenticated account does not have write access to the
indicated bucket.
By default, this script will use the adjacent client_secrets.json for
OAuth authentication; this may be changed with the --client_secrets
flag.
Example usage:
python publish_release.py \\
--client_secrets my_secrets.json \\
--project_id foo:bar \\
--bucket releases \\
--publish_version 1.50 \\
--publish path/to/file/one.txt \\
--publish path/to/file/two.txt \\
--acl "public-read"
This will publish
http://releases.storage.googleapis.com/1.50/one.txt
http://releases.storage.googleapis.com/1.50/two.txt
"""
import logging
import mimetypes
from optparse import OptionParser
import os
import sys
try:
import gflags
except ImportError:
print ('Could not import gflags\n'
'Download available at https://code.google.com/p/'
'python-gflags/downloads/\nor run `easy_install python-gflags`')
sys.exit(1)
try:
import httplib2
except ImportError:
print ('Could not import httplib2\n'
'Download available at https://code.google.com/p/httplib2/'
'downloads/\nor run `easy_install httplib2`')
sys.exit(1)
try:
import oauth2client.client as oauthclient
import oauth2client.file as oauthfile
import oauth2client.tools as oauthtools
except ImportError:
print ('Could not import oauth2client\n'
'Download available at https://code.google.com/p/'
'google-api-python-client/downloads\nor run '
'`easy_install oauth2client`')
sys.exit(1)
FLAGS = gflags.FLAGS
gflags.DEFINE_enum(
'logging_level', 'INFO', ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
'Set the level of logging detail.')
gflags.DEFINE_string(
'client_secrets',
os.path.join(os.path.dirname(__file__), 'client_secrets.json'),
'The OAuth 2.0 client secrets file to use')
gflags.DEFINE_string(
'project_id', None, 'The Cloud Storage project id')
gflags.DEFINE_string(
'bucket', None, 'The bucket to upload to')
gflags.DEFINE_string(
'publish_version', None, 'The version being published (e.g. 1.23)')
gflags.DEFINE_multistring(
'publish', [],
'A file to publish to Cloud Storage; this may be specified multiple times')
gflags.DEFINE_enum(
'acl', 'private', ['private', 'public-read', 'authenticated-read'],
'The ACLs to assign to the uploaded files')
API_VERSION = '2'
DEFAULT_SECRETS_FILE = os.path.join(os.path.dirname(__file__),
'client_secrets.json')
OAUTH_CREDENTIALS_FILE = '.credentials.dat'
OAUTH_SCOPE = 'https://www.googleapis.com/auth/devstorage.full_control'
class Error(Exception):
def __init__(self, status, message):
self.status = status
self.message = message
def __str__(self):
return '%s: %s' % (repr(self.status), repr(self.message))
def _upload(auth_http, project_id, bucket_name, file_path, object_name, acl):
"""Uploads a file to Google Cloud Storage.
Args:
auth_http: An authorized httplib2.Http instance.
project_id: The project to upload to.
bucket_name: The bucket to upload to.
file_path: Path to the file to upload.
object_name: The name within the bucket to upload to.
acl: The ACL to assign to the uploaded file.
"""
with open(file_path, 'r') as f:
data = f.read()
content_type, content_encoding = mimetypes.guess_type(file_path)
headers = {
'x-goog-project-id': project_id,
'x-goog-api-version': API_VERSION,
'x-goog-acl': acl,
'Content-Length': '%d' % len(data)
}
if content_type: headers['Content-Type'] = content_type
if content_type: headers['Content-Encoding'] = content_encoding
try:
response, content = auth_http.request(
'http://%s.storage.googleapis.com/%s' % (bucket_name, object_name),
method='PUT',
headers=headers,
body=data)
except httplib2.ServerNotFoundError, se:
raise Error(404, 'Server not found.')
if response.status >= 300:
raise Error(response.status, response.reason)
return content
def _authenticate(secrets_file):
"""Runs the OAuth 2.0 installed application flow.
Returns:
An authorized httplib2.Http instance.
"""
flow = oauthclient.flow_from_clientsecrets(
secrets_file,
scope=OAUTH_SCOPE,
message=('Failed to initialized OAuth 2.0 flow with secrets '
'file: %s' % secrets_file))
storage = oauthfile.Storage(OAUTH_CREDENTIALS_FILE)
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = oauthtools.run(flow, storage)
http = httplib2.Http()
return credentials.authorize(http)
def main(argv):
try:
argv = FLAGS(argv)
except gflags.FlagsError, e:
logging.error('%s\\nUsage: %s ARGS\\n%s', e, argv[0], FLAGS)
sys.exit(1)
numeric_level = getattr(logging, FLAGS.logging_level.upper())
if not isinstance(numeric_level, int):
logging.error('Invalid log level: %s' % FLAGS.logging_level)
sys.exit(1)
logging.basicConfig(level=numeric_level)
if FLAGS.logging_level == 'DEBUG':
httplib2.debuglevel = 1
def die(message):
logging.fatal(message)
sys.exit(2)
if FLAGS.client_secrets is None:
die('You must specify a client secrets file via --client_secrets')
if FLAGS.project_id is None:
die('You must specify a project ID via --project_id')
if not FLAGS.bucket:
die('You must specify a bucket via --bucket')
if FLAGS.publish_version is None:
die('You must specify a published version identifier via '
'--publish_version')
auth_http = _authenticate(FLAGS.client_secrets)
published = []
for f in FLAGS.publish:
object_name = '%s/%s' % (FLAGS.publish_version, os.path.basename(f))
logging.info('Publishing %s as %s', f, object_name)
_upload(auth_http, FLAGS.project_id, FLAGS.bucket, f, object_name,
FLAGS.acl)
published.append(object_name)
if published:
base_url = 'http://%s.storage.googleapis.com/' % FLAGS.bucket
logging.info('Published:\n %s' %
'\n '.join([base_url + p for p in published]))
if __name__ == '__main__':
main(sys.argv)
| |
"""Abstract class for RSA."""
from .cryptomath import *
class RSAKey:
"""This is an abstract base class for RSA keys.
Particular implementations of RSA keys, such as
L{OpenSSL_RSAKey.OpenSSL_RSAKey},
L{Python_RSAKey.Python_RSAKey}, and
L{PyCrypto_RSAKey.PyCrypto_RSAKey},
inherit from this.
To create or parse an RSA key, don't use one of these classes
directly. Instead, use the factory functions in
L{tlslite.utils.keyfactory}.
"""
def __init__(self, n=0, e=0):
"""Create a new RSA key.
If n and e are passed in, the new key will be initialized.
@type n: int
@param n: RSA modulus.
@type e: int
@param e: RSA public exponent.
"""
raise NotImplementedError()
def __len__(self):
"""Return the length of this key in bits.
@rtype: int
"""
return numBits(self.n)
def hasPrivateKey(self):
"""Return whether or not this key has a private component.
@rtype: bool
"""
raise NotImplementedError()
def hash(self):
"""Return the cryptoID <keyHash> value corresponding to this
key.
@rtype: str
"""
raise NotImplementedError()
def getSigningAlgorithm(self):
"""Return the cryptoID sigAlgo value corresponding to this key.
@rtype: str
"""
return "pkcs1-sha1"
def hashAndSign(self, bytes):
"""Hash and sign the passed-in bytes.
This requires the key to have a private component. It performs
a PKCS1-SHA1 signature on the passed-in data.
@type bytes: str or L{array.array} of unsigned bytes
@param bytes: The value which will be hashed and signed.
@rtype: L{array.array} of unsigned bytes.
@return: A PKCS1-SHA1 signature on the passed-in data.
"""
if not isinstance(bytes, type("")):
bytes = bytesToString(bytes)
hashBytes = stringToBytes(sha1(bytes).digest())
prefixedHashBytes = self._addPKCS1SHA1Prefix(hashBytes)
sigBytes = self.sign(prefixedHashBytes)
return sigBytes
def hashAndVerify(self, sigBytes, bytes):
"""Hash and verify the passed-in bytes with the signature.
This verifies a PKCS1-SHA1 signature on the passed-in data.
@type sigBytes: L{array.array} of unsigned bytes
@param sigBytes: A PKCS1-SHA1 signature.
@type bytes: str or L{array.array} of unsigned bytes
@param bytes: The value which will be hashed and verified.
@rtype: bool
@return: Whether the signature matches the passed-in data.
"""
if not isinstance(bytes, type("")):
bytes = bytesToString(bytes)
hashBytes = stringToBytes(sha1(bytes).digest())
prefixedHashBytes = self._addPKCS1SHA1Prefix(hashBytes)
return self.verify(sigBytes, prefixedHashBytes)
def sign(self, bytes):
"""Sign the passed-in bytes.
This requires the key to have a private component. It performs
a PKCS1 signature on the passed-in data.
@type bytes: L{array.array} of unsigned bytes
@param bytes: The value which will be signed.
@rtype: L{array.array} of unsigned bytes.
@return: A PKCS1 signature on the passed-in data.
"""
if not self.hasPrivateKey():
raise AssertionError()
paddedBytes = self._addPKCS1Padding(bytes, 1)
m = bytesToNumber(paddedBytes)
if m >= self.n:
raise ValueError()
c = self._rawPrivateKeyOp(m)
sigBytes = numberToBytes(c)
return sigBytes
def verify(self, sigBytes, bytes):
"""Verify the passed-in bytes with the signature.
This verifies a PKCS1 signature on the passed-in data.
@type sigBytes: L{array.array} of unsigned bytes
@param sigBytes: A PKCS1 signature.
@type bytes: L{array.array} of unsigned bytes
@param bytes: The value which will be verified.
@rtype: bool
@return: Whether the signature matches the passed-in data.
"""
paddedBytes = self._addPKCS1Padding(bytes, 1)
c = bytesToNumber(sigBytes)
if c >= self.n:
return False
m = self._rawPublicKeyOp(c)
checkBytes = numberToBytes(m)
return checkBytes == paddedBytes
def encrypt(self, bytes):
"""Encrypt the passed-in bytes.
This performs PKCS1 encryption of the passed-in data.
@type bytes: L{array.array} of unsigned bytes
@param bytes: The value which will be encrypted.
@rtype: L{array.array} of unsigned bytes.
@return: A PKCS1 encryption of the passed-in data.
"""
paddedBytes = self._addPKCS1Padding(bytes, 2)
m = bytesToNumber(paddedBytes)
if m >= self.n:
raise ValueError()
c = self._rawPublicKeyOp(m)
encBytes = numberToBytes(c)
return encBytes
def decrypt(self, encBytes):
"""Decrypt the passed-in bytes.
This requires the key to have a private component. It performs
PKCS1 decryption of the passed-in data.
@type encBytes: L{array.array} of unsigned bytes
@param encBytes: The value which will be decrypted.
@rtype: L{array.array} of unsigned bytes or None.
@return: A PKCS1 decryption of the passed-in data or None if
the data is not properly formatted.
"""
if not self.hasPrivateKey():
raise AssertionError()
c = bytesToNumber(encBytes)
if c >= self.n:
return None
m = self._rawPrivateKeyOp(c)
decBytes = numberToBytes(m)
if (len(decBytes) != numBytes(self.n)-1): #Check first byte
return None
if decBytes[0] != 2: #Check second byte
return None
for x in range(len(decBytes)-1): #Scan through for zero separator
if decBytes[x]== 0:
break
else:
return None
return decBytes[x+1:] #Return everything after the separator
def _rawPrivateKeyOp(self, m):
raise NotImplementedError()
def _rawPublicKeyOp(self, c):
raise NotImplementedError()
def acceptsPassword(self):
"""Return True if the write() method accepts a password for use
in encrypting the private key.
@rtype: bool
"""
raise NotImplementedError()
def write(self, password=None):
"""Return a string containing the key.
@rtype: str
@return: A string describing the key, in whichever format (PEM
or XML) is native to the implementation.
"""
raise NotImplementedError()
def writeXMLPublicKey(self, indent=''):
"""Return a string containing the key.
@rtype: str
@return: A string describing the public key, in XML format.
"""
return Python_RSAKey(self.n, self.e).write(indent)
def generate(bits):
"""Generate a new key with the specified bit length.
@rtype: L{tlslite.utils.RSAKey.RSAKey}
"""
raise NotImplementedError()
generate = staticmethod(generate)
# **************************************************************************
# Helper Functions for RSA Keys
# **************************************************************************
def _addPKCS1SHA1Prefix(self, bytes):
prefixBytes = createByteArraySequence(\
[48,33,48,9,6,5,43,14,3,2,26,5,0,4,20])
prefixedBytes = prefixBytes + bytes
return prefixedBytes
def _addPKCS1Padding(self, bytes, blockType):
padLength = (numBytes(self.n) - (len(bytes)+3))
if blockType == 1: #Signature padding
pad = [0xFF] * padLength
elif blockType == 2: #Encryption padding
pad = createByteArraySequence([])
while len(pad) < padLength:
padBytes = getRandomBytes(padLength * 2)
pad = [b for b in padBytes if b != 0]
pad = pad[:padLength]
else:
raise AssertionError()
#NOTE: To be proper, we should add [0,blockType]. However,
#the zero is lost when the returned padding is converted
#to a number, so we don't even bother with it. Also,
#adding it would cause a misalignment in verify()
padding = createByteArraySequence([blockType] + pad + [0])
paddedBytes = padding + bytes
return paddedBytes
| |
"""
Module for configuring Windows Firewall using ``netsh``
"""
import re
import salt.utils.platform
import salt.utils.win_lgpo_netsh
from salt.exceptions import CommandExecutionError
# Define the module's virtual name
__virtualname__ = "firewall"
def __virtual__():
"""
Only works on Windows systems
"""
if not salt.utils.platform.is_windows():
return False, "Module win_firewall: module only available on Windows"
return __virtualname__
def get_config():
"""
Get the status of all the firewall profiles
Returns:
dict: A dictionary of all profiles on the system
Raises:
CommandExecutionError: If the command fails
CLI Example:
.. code-block:: bash
salt '*' firewall.get_config
"""
profiles = {}
curr = None
cmd = ["netsh", "advfirewall", "show", "allprofiles"]
ret = __salt__["cmd.run_all"](cmd, python_shell=False, ignore_retcode=True)
if ret["retcode"] != 0:
raise CommandExecutionError(ret["stdout"])
# There may be some problems with this depending on how `netsh` is localized
# It's looking for lines that contain `Profile Settings` or start with
# `State` which may be different in different localizations
for line in ret["stdout"].splitlines():
if not curr:
tmp = re.search("(.*) Profile Settings:", line)
if tmp:
curr = tmp.group(1)
elif line.startswith("State"):
profiles[curr] = line.split()[1] == "ON"
curr = None
return profiles
def disable(profile="allprofiles"):
"""
Disable firewall profile
Args:
profile (Optional[str]): The name of the profile to disable. Default is
``allprofiles``. Valid options are:
- allprofiles
- domainprofile
- privateprofile
- publicprofile
Returns:
bool: True if successful
Raises:
CommandExecutionError: If the command fails
CLI Example:
.. code-block:: bash
salt '*' firewall.disable
"""
cmd = ["netsh", "advfirewall", "set", profile, "state", "off"]
ret = __salt__["cmd.run_all"](cmd, python_shell=False, ignore_retcode=True)
if ret["retcode"] != 0:
raise CommandExecutionError(ret["stdout"])
return True
def enable(profile="allprofiles"):
"""
.. versionadded:: 2015.5.0
Enable firewall profile
Args:
profile (Optional[str]): The name of the profile to enable. Default is
``allprofiles``. Valid options are:
- allprofiles
- domainprofile
- privateprofile
- publicprofile
Returns:
bool: True if successful
Raises:
CommandExecutionError: If the command fails
CLI Example:
.. code-block:: bash
salt '*' firewall.enable
"""
cmd = ["netsh", "advfirewall", "set", profile, "state", "on"]
ret = __salt__["cmd.run_all"](cmd, python_shell=False, ignore_retcode=True)
if ret["retcode"] != 0:
raise CommandExecutionError(ret["stdout"])
return True
def get_rule(name="all"):
"""
.. versionadded:: 2015.5.0
Display all matching rules as specified by name
Args:
name (Optional[str]): The full name of the rule. ``all`` will return all
rules. Default is ``all``
Returns:
dict: A dictionary of all rules or rules that match the name exactly
Raises:
CommandExecutionError: If the command fails
CLI Example:
.. code-block:: bash
salt '*' firewall.get_rule 'MyAppPort'
"""
cmd = ["netsh", "advfirewall", "firewall", "show", "rule", "name={}".format(name)]
ret = __salt__["cmd.run_all"](cmd, python_shell=False, ignore_retcode=True)
if ret["retcode"] != 0:
raise CommandExecutionError(ret["stdout"])
return {name: ret["stdout"]}
def add_rule(name, localport, protocol="tcp", action="allow", dir="in", remoteip="any"):
"""
.. versionadded:: 2015.5.0
Add a new inbound or outbound rule to the firewall policy
Args:
name (str): The name of the rule. Must be unique and cannot be "all".
Required.
localport (int): The port the rule applies to. Must be a number between
0 and 65535. Can be a range. Can specify multiple ports separated by
commas. Required.
protocol (Optional[str]): The protocol. Can be any of the following:
- A number between 0 and 255
- icmpv4
- icmpv6
- tcp
- udp
- any
action (Optional[str]): The action the rule performs. Can be any of the
following:
- allow
- block
- bypass
dir (Optional[str]): The direction. Can be ``in`` or ``out``.
remoteip (Optional [str]): The remote IP. Can be any of the following:
- any
- localsubnet
- dns
- dhcp
- wins
- defaultgateway
- Any valid IPv4 address (192.168.0.12)
- Any valid IPv6 address (2002:9b3b:1a31:4:208:74ff:fe39:6c43)
- Any valid subnet (192.168.1.0/24)
- Any valid range of IP addresses (192.168.0.1-192.168.0.12)
- A list of valid IP addresses
Can be combinations of the above separated by commas.
Returns:
bool: True if successful
Raises:
CommandExecutionError: If the command fails
CLI Example:
.. code-block:: bash
salt '*' firewall.add_rule 'test' '8080' 'tcp'
salt '*' firewall.add_rule 'test' '1' 'icmpv4'
salt '*' firewall.add_rule 'test_remote_ip' '8000' 'tcp' 'allow' 'in' '192.168.0.1'
"""
cmd = [
"netsh",
"advfirewall",
"firewall",
"add",
"rule",
"name={}".format(name),
"protocol={}".format(protocol),
"dir={}".format(dir),
"action={}".format(action),
"remoteip={}".format(remoteip),
]
if protocol is None or ("icmpv4" not in protocol and "icmpv6" not in protocol):
cmd.append("localport={}".format(localport))
ret = __salt__["cmd.run_all"](cmd, python_shell=False, ignore_retcode=True)
if ret["retcode"] != 0:
raise CommandExecutionError(ret["stdout"])
return True
def delete_rule(name=None, localport=None, protocol=None, dir=None, remoteip=None):
"""
.. versionadded:: 2015.8.0
Delete an existing firewall rule identified by name and optionally by ports,
protocols, direction, and remote IP.
Args:
name (str): The name of the rule to delete. If the name ``all`` is used
you must specify additional parameters.
localport (Optional[str]): The port of the rule. If protocol is not
specified, protocol will be set to ``tcp``
protocol (Optional[str]): The protocol of the rule. Default is ``tcp``
when ``localport`` is specified
dir (Optional[str]): The direction of the rule.
remoteip (Optional[str]): The remote IP of the rule.
Returns:
bool: True if successful
Raises:
CommandExecutionError: If the command fails
CLI Example:
.. code-block:: bash
# Delete incoming tcp port 8080 in the rule named 'test'
salt '*' firewall.delete_rule 'test' '8080' 'tcp' 'in'
# Delete the incoming tcp port 8000 from 192.168.0.1 in the rule named
# 'test_remote_ip'
salt '*' firewall.delete_rule 'test_remote_ip' '8000' 'tcp' 'in' '192.168.0.1'
# Delete all rules for local port 80:
salt '*' firewall.delete_rule all 80 tcp
# Delete a rule called 'allow80':
salt '*' firewall.delete_rule allow80
"""
cmd = ["netsh", "advfirewall", "firewall", "delete", "rule"]
if name:
cmd.append("name={}".format(name))
if protocol:
cmd.append("protocol={}".format(protocol))
if dir:
cmd.append("dir={}".format(dir))
if remoteip:
cmd.append("remoteip={}".format(remoteip))
if protocol is None or ("icmpv4" not in protocol and "icmpv6" not in protocol):
if localport:
if not protocol:
cmd.append("protocol=tcp")
cmd.append("localport={}".format(localport))
ret = __salt__["cmd.run_all"](cmd, python_shell=False, ignore_retcode=True)
if ret["retcode"] != 0:
raise CommandExecutionError(ret["stdout"])
return True
def rule_exists(name):
"""
.. versionadded:: 2016.11.6
Checks if a firewall rule exists in the firewall policy
Args:
name (str): The name of the rule
Returns:
bool: True if exists, otherwise False
CLI Example:
.. code-block:: bash
# Is there a rule named RemoteDesktop
salt '*' firewall.rule_exists RemoteDesktop
"""
try:
get_rule(name)
return True
except CommandExecutionError:
return False
def get_settings(profile, section, store="local"):
"""
Get the firewall property from the specified profile in the specified store
as returned by ``netsh advfirewall``.
.. versionadded:: 2018.3.4
.. versionadded:: 2019.2.0
Args:
profile (str):
The firewall profile to query. Valid options are:
- domain
- public
- private
section (str):
The property to query within the selected profile. Valid options
are:
- firewallpolicy : inbound/outbound behavior
- logging : firewall logging settings
- settings : firewall properties
- state : firewalls state (on | off)
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
dict: A dictionary containing the properties for the specified profile
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
CLI Example:
.. code-block:: bash
# Get the inbound/outbound firewall settings for connections on the
# local domain profile
salt * win_firewall.get_settings domain firewallpolicy
# Get the inbound/outbound firewall settings for connections on the
# domain profile as defined by local group policy
salt * win_firewall.get_settings domain firewallpolicy lgpo
"""
return salt.utils.win_lgpo_netsh.get_settings(
profile=profile, section=section, store=store
)
def get_all_settings(domain, store="local"):
"""
Gets all the properties for the specified profile in the specified store
.. versionadded:: 2018.3.4
.. versionadded:: 2019.2.0
Args:
profile (str):
The firewall profile to query. Valid options are:
- domain
- public
- private
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
dict: A dictionary containing the specified settings
CLI Example:
.. code-block:: bash
# Get all firewall settings for connections on the domain profile
salt * win_firewall.get_all_settings domain
# Get all firewall settings for connections on the domain profile as
# defined by local group policy
salt * win_firewall.get_all_settings domain lgpo
"""
return salt.utils.win_lgpo_netsh.get_all_settings(profile=domain, store=store)
def get_all_profiles(store="local"):
"""
Gets all properties for all profiles in the specified store
.. versionadded:: 2018.3.4
.. versionadded:: 2019.2.0
Args:
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
dict: A dictionary containing the specified settings for each profile
CLI Example:
.. code-block:: bash
# Get all firewall settings for all profiles
salt * firewall.get_all_settings
# Get all firewall settings for all profiles as defined by local group
# policy
salt * firewall.get_all_settings lgpo
"""
return salt.utils.win_lgpo_netsh.get_all_profiles(store=store)
def set_firewall_settings(profile, inbound=None, outbound=None, store="local"):
"""
Set the firewall inbound/outbound settings for the specified profile and
store
.. versionadded:: 2018.3.4
.. versionadded:: 2019.2.0
Args:
profile (str):
The firewall profile to query. Valid options are:
- domain
- public
- private
inbound (str):
The inbound setting. If ``None`` is passed, the setting will remain
unchanged. Valid values are:
- blockinbound
- blockinboundalways
- allowinbound
- notconfigured
Default is ``None``
outbound (str):
The outbound setting. If ``None`` is passed, the setting will remain
unchanged. Valid values are:
- allowoutbound
- blockoutbound
- notconfigured
Default is ``None``
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
CLI Example:
.. code-block:: bash
# Set the inbound setting for the domain profile to block inbound
# connections
salt * firewall.set_firewall_settings domain='domain' inbound='blockinbound'
# Set the outbound setting for the domain profile to allow outbound
# connections
salt * firewall.set_firewall_settings domain='domain' outbound='allowoutbound'
# Set inbound/outbound settings for the domain profile in the group
# policy to block inbound and allow outbound
salt * firewall.set_firewall_settings domain='domain' inbound='blockinbound' outbound='allowoutbound' store='lgpo'
"""
return salt.utils.win_lgpo_netsh.set_firewall_settings(
profile=profile, inbound=inbound, outbound=outbound, store=store
)
def set_logging_settings(profile, setting, value, store="local"):
r"""
Configure logging settings for the Windows firewall.
.. versionadded:: 2018.3.4
.. versionadded:: 2019.2.0
Args:
profile (str):
The firewall profile to configure. Valid options are:
- domain
- public
- private
setting (str):
The logging setting to configure. Valid options are:
- allowedconnections
- droppedconnections
- filename
- maxfilesize
value (str):
The value to apply to the setting. Valid values are dependent upon
the setting being configured. Valid options are:
allowedconnections:
- enable
- disable
- notconfigured
droppedconnections:
- enable
- disable
- notconfigured
filename:
- Full path and name of the firewall log file
- notconfigured
maxfilesize:
- 1 - 32767
- notconfigured
.. note::
``notconfigured`` can only be used when using the lgpo store
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
CLI Example:
.. code-block:: bash
# Log allowed connections and set that in local group policy
salt * firewall.set_logging_settings domain allowedconnections enable lgpo
# Don't log dropped connections
salt * firewall.set_logging_settings profile=private setting=droppedconnections value=disable
# Set the location of the log file
salt * firewall.set_logging_settings domain filename C:\windows\logs\firewall.log
# You can also use environment variables
salt * firewall.set_logging_settings domain filename %systemroot%\system32\LogFiles\Firewall\pfirewall.log
# Set the max file size of the log to 2048 Kb
salt * firewall.set_logging_settings domain maxfilesize 2048
"""
return salt.utils.win_lgpo_netsh.set_logging_settings(
profile=profile, setting=setting, value=value, store=store
)
def set_settings(profile, setting, value, store="local"):
"""
Configure firewall settings.
.. versionadded:: 2018.3.4
.. versionadded:: 2019.2.0
Args:
profile (str):
The firewall profile to configure. Valid options are:
- domain
- public
- private
setting (str):
The firewall setting to configure. Valid options are:
- localfirewallrules
- localconsecrules
- inboundusernotification
- remotemanagement
- unicastresponsetomulticast
value (str):
The value to apply to the setting. Valid options are
- enable
- disable
- notconfigured
.. note::
``notconfigured`` can only be used when using the lgpo store
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
CLI Example:
.. code-block:: bash
# Merge local rules with those distributed through group policy
salt * firewall.set_settings domain localfirewallrules enable
# Allow remote management of Windows Firewall
salt * firewall.set_settings domain remotemanagement enable
"""
return salt.utils.win_lgpo_netsh.set_settings(
profile=profile, setting=setting, value=value, store=store
)
def set_state(profile, state, store="local"):
"""
Configure the firewall state.
.. versionadded:: 2018.3.4
.. versionadded:: 2019.2.0
Args:
profile (str):
The firewall profile to configure. Valid options are:
- domain
- public
- private
state (str):
The firewall state. Valid options are:
- on
- off
- notconfigured
.. note::
``notconfigured`` can only be used when using the lgpo store
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
CLI Example:
.. code-block:: bash
# Turn the firewall off when the domain profile is active
salt * firewall.set_state domain off
# Turn the firewall on when the public profile is active and set that in
# the local group policy
salt * firewall.set_state public on lgpo
"""
return salt.utils.win_lgpo_netsh.set_state(
profile=profile, state=state, store=store
)
| |
"""Component to integrate the Home Assistant cloud."""
import logging
from hass_nabucasa import Cloud
import voluptuous as vol
from homeassistant.components.alexa import const as alexa_const
from homeassistant.components.google_assistant import const as ga_c
from homeassistant.const import (
CONF_MODE,
CONF_NAME,
CONF_REGION,
EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_validation as cv, entityfilter
from homeassistant.loader import bind_hass
from homeassistant.util.aiohttp import MockRequest
from . import account_link, http_api
from .client import CloudClient
from .const import (
CONF_ACCOUNT_LINK_URL,
CONF_ACME_DIRECTORY_SERVER,
CONF_ALEXA,
CONF_ALEXA_ACCESS_TOKEN_URL,
CONF_ALIASES,
CONF_CLOUDHOOK_CREATE_URL,
CONF_COGNITO_CLIENT_ID,
CONF_ENTITY_CONFIG,
CONF_FILTER,
CONF_GOOGLE_ACTIONS,
CONF_GOOGLE_ACTIONS_REPORT_STATE_URL,
CONF_GOOGLE_ACTIONS_SYNC_URL,
CONF_RELAYER,
CONF_REMOTE_API_URL,
CONF_SUBSCRIPTION_INFO_URL,
CONF_USER_POOL_ID,
CONF_VOICE_API_URL,
DOMAIN,
MODE_DEV,
MODE_PROD,
)
from .prefs import CloudPreferences
_LOGGER = logging.getLogger(__name__)
DEFAULT_MODE = MODE_PROD
SERVICE_REMOTE_CONNECT = "remote_connect"
SERVICE_REMOTE_DISCONNECT = "remote_disconnect"
ALEXA_ENTITY_SCHEMA = vol.Schema(
{
vol.Optional(alexa_const.CONF_DESCRIPTION): cv.string,
vol.Optional(alexa_const.CONF_DISPLAY_CATEGORIES): cv.string,
vol.Optional(CONF_NAME): cv.string,
}
)
GOOGLE_ENTITY_SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_ALIASES): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(ga_c.CONF_ROOM_HINT): cv.string,
}
)
ASSISTANT_SCHEMA = vol.Schema(
{vol.Optional(CONF_FILTER, default=dict): entityfilter.FILTER_SCHEMA}
)
ALEXA_SCHEMA = ASSISTANT_SCHEMA.extend(
{vol.Optional(CONF_ENTITY_CONFIG): {cv.entity_id: ALEXA_ENTITY_SCHEMA}}
)
GACTIONS_SCHEMA = ASSISTANT_SCHEMA.extend(
{vol.Optional(CONF_ENTITY_CONFIG): {cv.entity_id: GOOGLE_ENTITY_SCHEMA}}
)
# pylint: disable=no-value-for-parameter
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_MODE, default=DEFAULT_MODE): vol.In(
[MODE_DEV, MODE_PROD]
),
vol.Optional(CONF_COGNITO_CLIENT_ID): str,
vol.Optional(CONF_USER_POOL_ID): str,
vol.Optional(CONF_REGION): str,
vol.Optional(CONF_RELAYER): str,
vol.Optional(CONF_GOOGLE_ACTIONS_SYNC_URL): vol.Url(),
vol.Optional(CONF_SUBSCRIPTION_INFO_URL): vol.Url(),
vol.Optional(CONF_CLOUDHOOK_CREATE_URL): vol.Url(),
vol.Optional(CONF_REMOTE_API_URL): vol.Url(),
vol.Optional(CONF_ACME_DIRECTORY_SERVER): vol.Url(),
vol.Optional(CONF_ALEXA): ALEXA_SCHEMA,
vol.Optional(CONF_GOOGLE_ACTIONS): GACTIONS_SCHEMA,
vol.Optional(CONF_ALEXA_ACCESS_TOKEN_URL): vol.Url(),
vol.Optional(CONF_GOOGLE_ACTIONS_REPORT_STATE_URL): vol.Url(),
vol.Optional(CONF_ACCOUNT_LINK_URL): vol.Url(),
vol.Optional(CONF_VOICE_API_URL): vol.Url(),
}
)
},
extra=vol.ALLOW_EXTRA,
)
class CloudNotAvailable(HomeAssistantError):
"""Raised when an action requires the cloud but it's not available."""
@bind_hass
@callback
def async_is_logged_in(hass) -> bool:
"""Test if user is logged in."""
return DOMAIN in hass.data and hass.data[DOMAIN].is_logged_in
@bind_hass
@callback
def async_active_subscription(hass) -> bool:
"""Test if user has an active subscription."""
return async_is_logged_in(hass) and not hass.data[DOMAIN].subscription_expired
@bind_hass
async def async_create_cloudhook(hass, webhook_id: str) -> str:
"""Create a cloudhook."""
if not async_is_logged_in(hass):
raise CloudNotAvailable
hook = await hass.data[DOMAIN].cloudhooks.async_create(webhook_id, True)
return hook["cloudhook_url"]
@bind_hass
async def async_delete_cloudhook(hass, webhook_id: str) -> None:
"""Delete a cloudhook."""
if DOMAIN not in hass.data:
raise CloudNotAvailable
await hass.data[DOMAIN].cloudhooks.async_delete(webhook_id)
@bind_hass
@callback
def async_remote_ui_url(hass) -> str:
"""Get the remote UI URL."""
if not async_is_logged_in(hass):
raise CloudNotAvailable
if not hass.data[DOMAIN].remote.instance_domain:
raise CloudNotAvailable
return "https://" + hass.data[DOMAIN].remote.instance_domain
def is_cloudhook_request(request):
"""Test if a request came from a cloudhook.
Async friendly.
"""
return isinstance(request, MockRequest)
async def async_setup(hass, config):
"""Initialize the Home Assistant cloud."""
# Process configs
if DOMAIN in config:
kwargs = dict(config[DOMAIN])
else:
kwargs = {CONF_MODE: DEFAULT_MODE}
# Alexa/Google custom config
alexa_conf = kwargs.pop(CONF_ALEXA, None) or ALEXA_SCHEMA({})
google_conf = kwargs.pop(CONF_GOOGLE_ACTIONS, None) or GACTIONS_SCHEMA({})
# Cloud settings
prefs = CloudPreferences(hass)
await prefs.async_initialize()
# Initialize Cloud
websession = hass.helpers.aiohttp_client.async_get_clientsession()
client = CloudClient(hass, prefs, websession, alexa_conf, google_conf)
cloud = hass.data[DOMAIN] = Cloud(client, **kwargs)
async def _startup(event):
"""Startup event."""
await cloud.start()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, _startup)
async def _shutdown(event):
"""Shutdown event."""
await cloud.stop()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _shutdown)
async def _service_handler(service):
"""Handle service for cloud."""
if service.service == SERVICE_REMOTE_CONNECT:
await cloud.remote.connect()
await prefs.async_update(remote_enabled=True)
elif service.service == SERVICE_REMOTE_DISCONNECT:
await cloud.remote.disconnect()
await prefs.async_update(remote_enabled=False)
hass.helpers.service.async_register_admin_service(
DOMAIN, SERVICE_REMOTE_CONNECT, _service_handler
)
hass.helpers.service.async_register_admin_service(
DOMAIN, SERVICE_REMOTE_DISCONNECT, _service_handler
)
loaded = False
async def _on_connect():
"""Discover RemoteUI binary sensor."""
nonlocal loaded
# Prevent multiple discovery
if loaded:
return
loaded = True
hass.async_create_task(
hass.helpers.discovery.async_load_platform(
"binary_sensor", DOMAIN, {}, config
)
)
hass.async_create_task(
hass.helpers.discovery.async_load_platform("stt", DOMAIN, {}, config)
)
hass.async_create_task(
hass.helpers.discovery.async_load_platform("tts", DOMAIN, {}, config)
)
cloud.iot.register_on_connect(_on_connect)
await http_api.async_setup(hass)
account_link.async_setup(hass)
return True
| |
# Copyright 2019 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""StreetLearn basic level for the instruction-following task.
In this environment, the agent receives a reward for every waypoint it hits
as well as a larger reward for reaching the final goal.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from absl import logging
import numpy as np
from streetlearn.engine.python import color
from streetlearn.python.environment import coin_game
from streetlearn.python.environment import thumbnail_helper
TrajectoryStep = collections.namedtuple(
'TrajectoryStep',
'waypoint_index pano lat lng heading_deg length instruction')
Trajectory = collections.namedtuple('Trajectory', 'steps goal')
class InstructionsBase(coin_game.CoinGame):
"""Instruction following game."""
def __init__(self, config):
"""Creates an instance of the StreetLearn level.
Args:
config: config dict of various settings.
"""
super(InstructionsBase, self).__init__(config)
self._colors.update({
'goal': color.Color(*config['color_for_goal']),
'waypoint': color.Color(*config['color_for_waypoint']),
'shortest_path': color.Color(*config['color_for_shortest_path']),
})
self._reward_at_waypoint = config['reward_at_waypoint']
self._reward_at_goal = config['reward_at_goal']
self._instruction_file = config['instruction_file']
self._num_instructions = config['num_instructions']
self._max_instructions = config['max_instructions']
self._thumbnail_helper = thumbnail_helper.ThumbnailHelper()
self._thumbnails = np.zeros(
[self._max_instructions + 1, config['height'], config['width'], 3],
dtype=np.uint8)
logging.info('Using %d instructions', self._num_instructions)
logging.info('Padding to %d instructions', self._max_instructions)
self._instructions = []
self._step_counter = 1
self._reward_pano_id_list = {}
self._reward_pano_id_to_family = {}
self._reward_family = {}
self._pano_id_to_color = {}
self._goal_pano_id = None
self._trajectory = None
self._show_shortest_path = config['show_shortest_path']
self._calculate_ground_truth = config['calculate_ground_truth']
# Ground truth direction (for imitation learning agents).
self._gt_direction = 0
# Trajectories
self._num_trajectories = 0
self._trajectory_data = []
self._loaded_trajectories = False
def _load_trajectories(self):
"""Load the trajectories into memory."""
logging.info('Loading trajectories from %s', self._instruction_file)
steps = []
current_trajectory_index = 0
with open(self._instruction_file, 'r') as f:
for line in f:
tokens = line.strip().split('\t')
trajectory_index = int(tokens[0])
waypoint_index = int(tokens[1])
lat = float(tokens[2])
lng = float(tokens[3])
heading_deg = float(tokens[4])
length = float(tokens[5])
pano_id = tokens[6]
instruction = tokens[7]
step = TrajectoryStep(
waypoint_index=waypoint_index,
pano=pano_id,
lat=lat,
lng=lng,
heading_deg=heading_deg,
length=length,
instruction=instruction)
if trajectory_index != current_trajectory_index:
self._add_trajectory(steps)
steps = []
current_trajectory_index = trajectory_index
steps.append(step)
self._add_trajectory(steps)
logging.info('Loaded %d trajectories', self._num_trajectories)
self._loaded_trajectories = True
def _add_trajectory(self, steps):
"""Store a trajectory."""
num_steps = len(steps)
if num_steps > 0:
# Separate goal from waypoints.
goal = steps[num_steps-1]
steps = steps[:(num_steps-1)]
# Store the trajectory in a hashtable.
trajectory = Trajectory(steps=steps, goal=goal)
self._trajectory_data.append(trajectory)
self._num_trajectories += 1
if self._num_trajectories % 1000 == 0:
logging.info('Stored %d trajectories', self._num_trajectories)
def on_reset(self, streetlearn):
"""Gets called after StreetLearn:reset().
Selects a random trajectory, extracts the instructions and panos at goal and
waypoints, computes the shortest paths between each start, each waypoint and
the goal.
Args:
streetlearn: a streetlearn instance.
Returns:
A newly populated pano_id_to_color dictionary.
"""
# Initialise graph of rewards and colors with coins
super(InstructionsBase, self).on_reset(streetlearn)
self._current_step = 0
self._step_counter = 1
self._step_by_pano = {}
self._pano_by_step = {}
self._reward_pano_id_list = {}
self._reward_pano_id_to_family = {}
self._reward_family = {}
self._pano_id_to_color = {}
self._num_steps_this_goal = 0
# Randomly sample a trajectory.
if self._loaded_trajectories == False:
self._load_trajectories()
trajectory = self._sample_trajectory(streetlearn)
start = max(len(trajectory.steps) - self._num_instructions, 0)
logging.info('Trajectory of length %d (max %d), starting at %d',
len(trajectory.steps), self._num_instructions, start)
num_steps = 0
start_pano_id = None
self._instructions = []
self._thumbnails[:] = 0
pano_list = []
for step in trajectory.steps[start:]:
pano_id = step.pano
pano_list.append(pano_id)
# Even if we do not take rewards for waypoints, we store them to keep
# track of the agent's position along the trajectory.
if num_steps == 0:
start_pano_id = pano_id
if num_steps > 0:
self._add_reward_to_pano(pano_id, self._reward_at_waypoint,
self._colors['waypoint'], streetlearn)
self._instructions.append(step.instruction)
# Fetch the thumbnail for the current step of the trajectory.
step_thumbnail = self._thumbnail_helper.get_thumbnail(
streetlearn, pano_id, step.heading_deg)
if step_thumbnail is not None:
self._thumbnails[num_steps] = step_thumbnail
if self._reward_at_waypoint:
logging.info('Waypoint %d at pano %s, yields reward of %f',
num_steps, pano_id, self._reward_at_waypoint)
else:
logging.info('Waypoint %d at pano %s', num_steps, pano_id)
num_steps += 1
# Set the goal.
self._goal_pano_id = trajectory.goal.pano
self._add_reward_to_pano(self._goal_pano_id, self._reward_at_goal,
self._colors['goal'], streetlearn)
pano_list.append(self._goal_pano_id)
# Store the previously defined coin rewards and colours
for pano_id in self._coin_pano_id_set:
self._add_coin_reward_to_pano(pano_id)
# Add goal pano thumbnail at the end.
goal_thumbnail = self._thumbnail_helper.get_thumbnail(
streetlearn, self._goal_pano_id, trajectory.goal.heading_deg)
if goal_thumbnail is not None:
self._thumbnails[num_steps] = goal_thumbnail
# Move and rotate player into start position.
streetlearn.engine.SetPosition(start_pano_id)
streetlearn.currentpano_id = start_pano_id
streetlearn.engine.RotateObserver(trajectory.steps[start].heading_deg, 0)
logging.info('From: %s (%f, %f), To: %s', start_pano_id,
trajectory.steps[start].lat,
trajectory.steps[start].lng, self._goal_pano_id)
logging.info('Trajectory with %d waypoints (goal included)', num_steps)
if self._calculate_ground_truth or self._show_shortest_path:
# Update the shortest path to the goal or first waypoint.
self._update_shortest_path(streetlearn, start_pano_id)
if self._show_shortest_path:
# Use the computed shortest path to color the panos.
self._color_shortest_path(streetlearn)
# By default, direction is forward.
self._gt_direction = 0
return self._pano_id_to_color
def _update_shortest_path(self, streetlearn, start_pano_id):
"""Update the target of the shortest paths and color panos along that path.
Args:
streetlearn: the streetlearn environment.
start_pano_id: a string for the current pano ID, for computing the optimal
path.
"""
step = self._current_step + 1
logging.info(self._pano_by_step)
logging.info('Reached step %d', step)
if step in self._pano_by_step:
target_pano_id = self._pano_by_step[step]
self._shortest_path, num_panos = self._shortest_paths(
streetlearn, target_pano_id, start_pano_id)
logging.info('Shortest path from %s to waypoint/goal %s covers %d panos',
start_pano_id, target_pano_id, num_panos)
def _color_shortest_path(self, streetlearn):
"""Color panos along the current shortest path to the current target.
Args:
streetlearn: the streetlearn environment.
"""
for pano_id in self._shortest_path:
self._pano_id_to_color.setdefault(pano_id, self._colors['shortest_path'])
@property
def trajectory(self):
return self._trajectory
def _sample_trajectory(self, streetlearn):
"""Sample a trajectory.
Args:
streetlearn: Streetlearn instance.
Returns:
trajectory object.
"""
trajectory_index = np.random.randint(len(self._trajectory_data))
self._trajectory = self._trajectory_data[trajectory_index]
return self.trajectory
def _add_reward_to_pano(self, pano_id, reward, color, streetlearn):
"""Add reward to a pano and all its neighbours.
Args:
pano_id: centroid pano id.
reward: Amount of reward to attach to this and neighbouring panos.
color: Color for the goal in the minimap.
streetlearn: Streetlearn instance
"""
# If this already has a reward indirectly through a neighbour, undo that.
if pano_id in self._reward_pano_id_list:
if self._reward_pano_id_to_family[pano_id] == pano_id:
# This was already made a reward field; update reward only.
for neighbor in self._reward_family[pano_id]:
# Replace reward and colour.
self._reward_pano_id_list[neighbor] = reward
self._pano_id_to_color[neighbor] = color
return
else:
# This was previously an indirect reward field.
# Remove from other family,: continue with default operation.
self._reward_family[self._reward_pano_id_to_family[pano_id]].remove(
pano_id)
self._reward_pano_id_to_family[pano_id] = None
# Define family around this id.
self._add_family(pano_id, streetlearn)
# Add reward and colour to family and links into family.
for neighbor in self._reward_family[pano_id]:
self._reward_pano_id_list[neighbor] = reward
self._reward_pano_id_to_family[neighbor] = pano_id
self._pano_id_to_color[neighbor] = color
def _add_coin_reward_to_pano(self, pano_id):
"""Add coin reward to a pano, but only if that pano has no reward yet.
Args:
pano_id: centroid pano id.
"""
if pano_id not in self._reward_pano_id_list:
self._reward_pano_id_list[pano_id] = self._reward_per_coin
self._reward_pano_id_to_family[pano_id] = pano_id
self._reward_family[pano_id] = {pano_id}
self._pano_id_to_color[pano_id] = self._colors['coin']
def _add_family(self, pano_id, streetlearn):
"""Add all neighbours of a pano to a list (family) of pano IDs.
Args:
pano_id: centroid pano id.
streetlearn: streetlearn graph for establishing neighbours.
"""
# If the pano is already part of a reward, do not mess with it.
if pano_id in self._reward_family:
return
# Assign each waypoint with a pano group counter. Used when adding waypoints
# one by one, in the order of the trajectory.
if pano_id not in self._step_by_pano:
logging.info('Added waypoint %d at pano %s', self._step_counter, pano_id)
self._step_by_pano[pano_id] = self._step_counter
self._pano_by_step[self._step_counter] = pano_id
self._step_counter += 1
# Add the same logic to the immediate neighbours of the pano.
self._reward_family[pano_id] = set({pano_id})
pano_metadata = streetlearn.engine.GetMetadata(pano_id)
for neighbor in pano_metadata.neighbors:
if neighbor.id not in self._reward_pano_id_to_family:
self._reward_pano_id_to_family[neighbor.id] = pano_id
self._reward_family[pano_id].add(neighbor.id)
def _check_reward(self, pano_id, streetlearn):
"""Check what reward the current pano yields, based on instructions.
Args:
pano_id: centroid pano id.
streetlearn: streetlearn graph for establishing neighbours.
Returns:
The reward for the current step.
"""
reward = 0
self._reached_goal = False
# Check if pano ID is in the list of pano IDs that yield rewards.
if pano_id in self._reward_pano_id_list:
reward = self._reward_pano_id_list[pano_id]
family_id = self._reward_pano_id_to_family[pano_id]
# If the family_id matches the goal, we have finished the trajectory.
previous_step = self._current_step
self._current_step = self._step_by_pano[family_id]
if family_id == self._goal_pano_id:
self._reached_goal = True
logging.info('%d: Completed level', streetlearn.frame_count)
# It appears the level does not end immediately, so we need to reset the
# step counter manually at this stage to prevent overflow.
self._current_step = 0
else:
logging.info('%d: Moved from %d to %d', streetlearn.frame_count,
previous_step, self._current_step)
if self._calculate_ground_truth or self._show_shortest_path:
# Update the shortest path to the goal or next waypoint.
self._update_shortest_path(streetlearn, pano_id)
if self._show_shortest_path:
# Use the computed shortest path to color the panos.
self._color_shortest_path(streetlearn)
for i in self._reward_family[family_id]:
del self._reward_pano_id_list[i]
del self._reward_pano_id_to_family[i]
del self._pano_id_to_color[i]
del self._reward_family[family_id]
# The value of the reward determines if the goal was reached and the
# episode can now end.
logging.info('%d: Picked up reward of %f at pano %s.',
streetlearn.frame_count, reward, pano_id)
# Add optional coin rewards.
if pano_id in self._coin_pano_id_set:
reward += self._reward_per_coin
self._coin_pano_id_set.remove(pano_id)
return reward
def get_reward(self, streetlearn):
"""Looks at current_pano_id and collects any reward found there.
Args:
streetlearn: the streetlearn environment.
Returns:
reward: the reward from the last step.
"""
# Calculate coin, waypoint and goal rewards, determine if end of episode.
current_pano_id = streetlearn.current_pano_id
reward = self._check_reward(current_pano_id, streetlearn)
self._num_steps_this_goal += 1
return reward
def get_info(self, streetlearn):
""""Returns current information about the state of the environment.
Args:
streetlearn: a StreetLearn instance.
Returns:
info: information from the environment at the last step.
"""
info = super(InstructionsBase, self).get_info(streetlearn)
info['num_steps_this_goal'] = self._num_steps_this_goal
info['current_step'] = self._current_step
info['current_goal_id'] = self._goal_pano_id
info['distance_to_goal'] = streetlearn.engine.GetPanoDistance(
streetlearn.current_pano_id, self._goal_pano_id)
info['reward_current_goal'] = self._reward_at_goal
if self._calculate_ground_truth:
current_pano_id = streetlearn.current_pano_id
next_pano_id = self._panos_to_goal[current_pano_id]
info['next_pano_id'] = next_pano_id
if next_pano_id:
bearing_to_next_pano = streetlearn.engine.GetPanoBearing(
current_pano_id, next_pano_id) - streetlearn.engine.GetYaw()
else:
bearing_to_next_pano = 0
info['bearing_to_next_pano'] = (bearing_to_next_pano + 180) % 360 - 180
return info
def done(self):
"""Returns a flag indicating the end of the current episode.
This game ends only at the end of the episode or if the goal is reached.
"""
if self._reached_goal:
self._reached_goal = False
return True
else:
return False
def thumbnails(self):
"""Returns extra observation thumbnails.
Args:
include_goal_thumb: Bool (default: False) of whether we add the goal.
Returns:
thumbnails: Thumbnails array of shape (batch_size, 3, h, w)
"""
return self._thumbnails
def instructions(self):
"""Returns instructions.
Args:
None
Returns:
instructions: string containing game specific instructions.
"""
return self._instructions
@property
def goal_id(self):
"""Returns the id of the goal Pano."""
return self._goal_pano_id
def on_step(self, streetlearn):
"""Update the ground truth direction to take and the set of touched panos.
Args:
streetlearn: the streetlearn environment.
"""
super(InstructionsBase, self).on_step(streetlearn)
if self._calculate_ground_truth:
# streetlearn.current_pano_id is not always updated.
current_pano_id = streetlearn.engine.GetPano().id
# What is the next pano and what is the direction to the pano?
next_pano_id = self._panos_to_goal[current_pano_id]
if next_pano_id:
yaw = streetlearn.engine.GetYaw()
bearing = streetlearn.engine.GetPanoBearing(
current_pano_id, next_pano_id) - yaw
self._gt_direction = (bearing + 180) % 360 - 180
else:
self._gt_direction = 0
def ground_truth_direction(self):
"""Returns the ground truth direction to take.
Returns:
ground_truth_direction: Float angle with the ground truth direction
to be taken for the agent to go towards the goal.
"""
return self._gt_direction
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from horizon.test.settings import * # noqa
from horizon.utils import secret_key
from openstack_dashboard import exceptions
from openstack_dashboard.static_settings import get_staticfiles_dirs # noqa
STATICFILES_DIRS = get_staticfiles_dirs()
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_PATH = os.path.abspath(os.path.join(TEST_DIR, ".."))
STATIC_ROOT = os.path.abspath(os.path.join(ROOT_PATH, '..', 'static'))
SECRET_KEY = secret_key.generate_or_read_from_file(
os.path.join(TEST_DIR, '.secret_key_store'))
ROOT_URLCONF = 'openstack_dashboard.test.urls'
TEMPLATE_DIRS = (
os.path.join(TEST_DIR, 'templates'),
)
TEMPLATE_CONTEXT_PROCESSORS += (
'openstack_dashboard.context_processors.openstack',
)
INSTALLED_APPS = (
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sessions',
'django.contrib.staticfiles',
'django.contrib.messages',
'django.contrib.humanize',
'django_nose',
'openstack_auth',
'compressor',
'horizon',
'openstack_dashboard',
'openstack_dashboard.dashboards.project',
'openstack_dashboard.dashboards.admin',
'openstack_dashboard.dashboards.identity',
'openstack_dashboard.dashboards.settings',
'openstack_dashboard.dashboards.router',
)
AUTHENTICATION_BACKENDS = ('openstack_auth.backend.KeystoneBackend',)
SITE_BRANDING = 'OpenStack'
HORIZON_CONFIG = {
'dashboards': ('project', 'admin', 'identity', 'settings', 'router',),
'default_dashboard': 'project',
"password_validator": {
"regex": '^.{8,18}$',
"help_text": "Password must be between 8 and 18 characters."
},
'user_home': None,
'help_url': "http://docs.openstack.org",
'exceptions': {'recoverable': exceptions.RECOVERABLE,
'not_found': exceptions.NOT_FOUND,
'unauthorized': exceptions.UNAUTHORIZED},
'angular_modules': [],
'js_files': [],
}
# Set to True to allow users to upload images to glance via Horizon server.
# When enabled, a file form field will appear on the create image form.
# See documentation for deployment considerations.
HORIZON_IMAGES_ALLOW_UPLOAD = True
AVAILABLE_REGIONS = [
('http://localhost:5000/v2.0', 'local'),
('http://remote:5000/v2.0', 'remote'),
]
OPENSTACK_API_VERSIONS = {
"identity": 3
}
OPENSTACK_KEYSTONE_URL = "http://localhost:5000/v2.0"
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "_member_"
OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'test_domain'
OPENSTACK_KEYSTONE_BACKEND = {
'name': 'native',
'can_edit_user': True,
'can_edit_group': True,
'can_edit_project': True,
'can_edit_domain': True,
'can_edit_role': True
}
OPENSTACK_CINDER_FEATURES = {
'enable_backup': True,
}
OPENSTACK_NEUTRON_NETWORK = {
'enable_router': True,
'enable_quotas': False, # Enabled in specific tests only
# Parameters below (enable_lb, enable_firewall, enable_vpn)
# control if these panels are displayed or not,
# i.e. they only affect the navigation menu.
# These panels are registered even if enable_XXX is False,
# so we don't need to set them to True in most unit tests
# to avoid stubbing neutron extension check calls.
'enable_lb': False,
'enable_firewall': False,
'enable_vpn': False,
'profile_support': None,
'enable_distributed_router': False,
# 'profile_support': 'cisco'
}
OPENSTACK_HYPERVISOR_FEATURES = {
'can_set_mount_point': False,
'can_set_password': True,
}
OPENSTACK_IMAGE_BACKEND = {
'image_formats': [
('', 'Select format'),
('aki', 'AKI - Amazon Kernel Image'),
('ami', 'AMI - Amazon Machine Image'),
('ari', 'ARI - Amazon Ramdisk Image'),
('iso', 'ISO - Optical Disk Image'),
('qcow2', 'QCOW2 - QEMU Emulator'),
('raw', 'Raw'),
('vdi', 'VDI'),
('vhd', 'VHD'),
('vmdk', 'VMDK')
]
}
LOGGING['loggers'].update(
{
'openstack_dashboard': {
'handlers': ['test'],
'propagate': False,
},
'openstack_auth': {
'handlers': ['test'],
'propagate': False,
},
'novaclient': {
'handlers': ['test'],
'propagate': False,
},
'keystoneclient': {
'handlers': ['test'],
'propagate': False,
},
'glanceclient': {
'handlers': ['test'],
'propagate': False,
},
'neutronclient': {
'handlers': ['test'],
'propagate': False,
},
'iso8601': {
'handlers': ['null'],
'propagate': False,
},
}
)
SECURITY_GROUP_RULES = {
'all_tcp': {
'name': 'ALL TCP',
'ip_protocol': 'tcp',
'from_port': '1',
'to_port': '65535',
},
'http': {
'name': 'HTTP',
'ip_protocol': 'tcp',
'from_port': '80',
'to_port': '80',
},
}
NOSE_ARGS = ['--nocapture',
'--nologcapture',
'--cover-package=openstack_dashboard',
'--cover-inclusive',
'--all-modules']
POLICY_FILES_PATH = os.path.join(ROOT_PATH, "conf")
POLICY_FILES = {
'identity': 'keystone_policy.json',
'compute': 'nova_policy.json'
}
# The openstack_auth.user.Token object isn't JSON-serializable ATM
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
REST_API_SETTING_1 = 'foo'
REST_API_SETTING_2 = 'bar'
REST_API_SECURITY = 'SECURITY'
REST_API_REQUIRED_SETTINGS = ['REST_API_SETTING_1']
REST_API_ADDITIONAL_SETTINGS = ['REST_API_SETTING_2']
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for quantizing a Tensorflow graph."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.quantize.python import quantize
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import googletest
conv2d = layers.conv2d
separable_conv2d = layers.separable_conv2d
class QuantizeTest(test_util.TensorFlowTestCase):
def _RunTestOverParameters(self, test_fn):
params = [True, False]
for is_training in params:
test_fn(is_training)
def testInsertQuantOpFailsWhenOpsNotConnected(self):
pass
def _TestInsertQuantOpFailsWhenOpsNotConnected(self, is_training):
graph = ops.Graph()
with graph.as_default():
batch_size, height, width, depth = 5, 128, 128, 3
inputs = array_ops.zeros((batch_size, height, width, depth))
conv = conv2d(inputs, 32, [5, 5], stride=2, padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=None, scope='test')
relu = nn_ops.relu6(inputs)
# Inserting a quantization op between two unconnected ops should fail with
# ValueError.
with self.assertRaises(ValueError) as err:
quantize._InsertQuantOp('test', is_training, conv.op, [relu.op],
'FailingQuantOp')
self.assertEqual(
str(err.exception), 'Some inputs not quantized for ops: [Relu6]')
def testInsertQuantOpForAddAfterConv2d(self):
self._RunTestOverParameters(self._TestInsertQuantOpForAddAfterConv2d)
def _TestInsertQuantOpForAddAfterConv2d(self, is_training):
graph = ops.Graph()
with graph.as_default():
batch_size, height, width, depth = 5, 128, 128, 3
input1 = array_ops.zeros((batch_size, height, width, depth))
input2 = array_ops.zeros((batch_size, height / 2, width / 2, 32))
conv = conv2d(input1, 32, [5, 5], stride=2, padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=None, scope='test/test')
node = math_ops.add(conv, input2, name='test/add')
node = nn_ops.relu6(node, name='test/relu6')
update_barrier = control_flow_ops.no_op(name='update_barrier')
with ops.control_dependencies([update_barrier]):
array_ops.identity(node, name='control_dependency')
quantize.Quantize(graph, is_training, weight_bits=8, activation_bits=8)
quantization_node_name = 'FakeQuantWithMinMaxVars'
conv_quant = graph.get_operation_by_name('test/test/conv_quant/' +
quantization_node_name)
self.assertEqual(conv_quant.type, quantization_node_name)
# Scan through all FakeQuant operations, ensuring that the activation
# isn't in the consumers of the operation. Since activations are folded
# the preceding operation during inference, the FakeQuant operation after
# the activation is all that is needed.
for op in graph.get_operations():
if op.type == quantization_node_name:
quant_op = graph.get_operation_by_name(op.name)
consumers = []
for output in quant_op.outputs:
consumers.extend(output.consumers())
self.assertNotIn('test/relu6', [c.name for c in consumers])
def testInsertQuantOpForAddAfterSeparableConv2d(self):
self._RunTestOverParameters(
self._TestInsertQuantOpForAddAfterSeparableConv2d)
def _TestInsertQuantOpForAddAfterSeparableConv2d(self, is_training):
graph = ops.Graph()
with graph.as_default():
batch_size, height, width, depth = 5, 128, 128, 3
input1 = array_ops.zeros((batch_size, height, width, depth))
input2 = array_ops.zeros((batch_size, height / 2, width / 2, depth))
conv = separable_conv2d(input1, None, [5, 5], stride=2,
depth_multiplier=1.0, padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=None, scope='test/test')
node = math_ops.add(conv, input2, name='test/add')
node = nn_ops.relu6(node, name='test/relu6')
update_barrier = control_flow_ops.no_op(name='update_barrier')
with ops.control_dependencies([update_barrier]):
array_ops.identity(node, name='control_dependency')
quantize.Quantize(graph, is_training, weight_bits=8, activation_bits=8)
# Check if output of bias add is quantized
quantization_node_name = 'FakeQuantWithMinMaxVars'
conv_quant = graph.get_operation_by_name('test/test/conv_quant/' +
quantization_node_name)
self.assertEqual(conv_quant.type, quantization_node_name)
for op in graph.get_operations():
if op.type == quantization_node_name:
quant_op = graph.get_operation_by_name(op.name)
# Scan through all FakeQuant operations, ensuring that the activation
# identity op isn't in the consumers of the operation.
consumers = []
for output in quant_op.outputs:
consumers.extend(output.consumers())
self.assertNotIn('test/relu6', [c.name for c in consumers])
def testInsertQuantOpInSeparableConv2d(self):
self._RunTestOverParameters(self._TestInsertQuantOpInSeparableConv2d)
def _TestInsertQuantOpInSeparableConv2d(self, is_training):
graph = ops.Graph()
with graph.as_default():
batch_size, height, width, depth = 5, 128, 128, 3
input1 = array_ops.zeros((batch_size, height, width, depth))
input2 = array_ops.zeros((batch_size, height / 2, width / 2, depth))
conv = separable_conv2d(
input1,
3, [5, 5],
stride=2,
depth_multiplier=1.0,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=None,
scope='test/test')
node = math_ops.add(conv, input2, name='test/add')
node = nn_ops.relu6(node, name='test/relu6')
update_barrier = control_flow_ops.no_op(name='update_barrier')
with ops.control_dependencies([update_barrier]):
array_ops.identity(node, name='control_dependency')
quantize.Quantize(graph, is_training, weight_bits=8, activation_bits=8)
# Check if output of bias add is quantized
quantization_node_name = 'FakeQuantWithMinMaxVars'
conv_quant = graph.get_operation_by_name('test/test/conv_quant/' +
quantization_node_name)
self.assertEqual(conv_quant.type, quantization_node_name)
# Check if weights for both convs inside seperable conv are quantized
pointwise_weight_quant = graph.get_operation_by_name(
'test/test/weights_quant/' + quantization_node_name)
self.assertEqual(pointwise_weight_quant.type, quantization_node_name)
depthwise_weight_quant = graph.get_operation_by_name(
'test/test/separable_conv2d/weights_quant/' + quantization_node_name)
self.assertEqual(depthwise_weight_quant.type, quantization_node_name)
# Check if activations after first depthwise conv are quantized.
depthwise_act_quant = graph.get_operation_by_name(
'test/test/separable_conv2d/act_quant/' + quantization_node_name)
self.assertEqual(depthwise_act_quant.type, quantization_node_name)
for op in graph.get_operations():
if op.type == quantization_node_name:
quant_op = graph.get_operation_by_name(op.name)
# Scan through all FakeQuant operations, ensuring that the activation
# identity op isn't in the consumers of the operation.
consumers = []
for output in quant_op.outputs:
consumers.extend(output.consumers())
self.assertNotIn('test/relu6', [c.name for c in consumers])
def testLayerActivationQuantized(self):
self._RunTestOverParameters(self._TestLayerActivationQuantized)
def _TestLayerActivationQuantized(self, is_training):
graph = ops.Graph()
with graph.as_default():
batch_size, height, width, depth = 5, 128, 128, 3
input1 = array_ops.zeros((batch_size, height, width, depth))
_ = conv2d(
input1,
32, [5, 5],
stride=2,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=nn_ops.relu6,
biases_initializer=None,
scope='test')
# Ensure that both weights and output of activations are quantized
# when we have a conv->relu6 with no bias add
quantize.Quantize(graph, is_training, weight_bits=8, activation_bits=8)
activation_op = graph.get_operation_by_name('test/Relu6')
conv_op = graph.get_operation_by_name('test/Conv2D')
self.assertTrue('test/weights_quant/FakeQuantWithMinMaxVars:0' in
[tensor_in.name for tensor_in in conv_op.inputs])
self.assertTrue('FakeQuantWithMinMaxVars' in
[op.type for op in activation_op.outputs[0].consumers()])
def testFinalLayerQuantized(self):
self._RunTestOverParameters(self._TestFinalLayerQuantized)
def _TestFinalLayerQuantized(self, is_training):
graph = ops.Graph()
with graph.as_default():
batch_size, height, width, depth = 5, 128, 128, 3
input1 = array_ops.zeros((batch_size, height, width, depth))
_ = conv2d(
input1,
32, [5, 5],
stride=2,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=None,
scope='test')
# Ensure that the a FakeQuant operation is in the outputs of the BiasAdd.
bias_add_op = graph.get_operation_by_name('test/BiasAdd')
quantize.Quantize(graph, is_training, weight_bits=8, activation_bits=8)
self.assertTrue('FakeQuantWithMinMaxVars' in
[op.type for op in bias_add_op.outputs[0].consumers()])
def testPostActivationBypassQuantized(self):
self._RunTestOverParameters(self._TestPostActivationBypassQuantized)
def _TestPostActivationBypassQuantized(self, is_training):
graph = ops.Graph()
with graph.as_default():
batch_size, height, width, depth = 5, 128, 128, 3
input1 = array_ops.zeros((batch_size, height, width, depth))
input2 = array_ops.zeros((batch_size, height / 2, width / 2, 32))
conv = conv2d(
input1,
32, [5, 5],
stride=2,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=nn_ops.relu6,
scope='test/test')
bypass_tensor = math_ops.add(conv, input2, name='test/add')
# The output of the post_activation bypass will be another layer.
_ = conv2d(
bypass_tensor,
32, [5, 5],
stride=2,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=nn_ops.relu6,
scope='test/unused')
quantize.Quantize(graph, is_training, weight_bits=8, activation_bits=8)
# Ensure that the bypass node is preceded by and followed by a
# FakeQuantWithMinMaxVar operation, since the output of the Add isn't an
# activation.
self.assertTrue('FakeQuantWithMinMaxVars' in
[c.type for c in bypass_tensor.consumers()])
self.assertTrue('FakeQuantWithMinMaxVars' in
[i.op.type for i in bypass_tensor.op.inputs])
def testOverlappingPostActivationBypassQuantized(self):
self._RunTestOverParameters(
self._TestOverlappingPostActivationBypassQuantized)
def _TestOverlappingPostActivationBypassQuantized(self, is_training):
graph = ops.Graph()
with graph.as_default():
batch_size, height, width, depth = 5, 128, 128, 3
conv_input = array_ops.zeros((batch_size, height, width, depth))
conv1 = conv2d(
conv_input,
32, [5, 5],
stride=2,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=nn_ops.relu6,
scope='test/test1')
# The bypass of this conv is the post activation bypass of the previous
# conv.
conv2 = conv2d(
conv_input,
32, [5, 5],
stride=2,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=None,
scope='test/test2')
bypass_tensor = math_ops.add(conv1, conv2, name='test/add')
_ = nn_ops.relu6(bypass_tensor, name='test/output')
quantize.Quantize(graph, is_training, weight_bits=8, activation_bits=8)
# Ensure that the bypass node is preceded by a FakeQuantWithMinMaxVar
# operation, and NOT followed by one.
self.assertTrue('FakeQuantWithMinMaxVars' not in
[c.type for c in bypass_tensor.consumers()])
self.assertTrue('FakeQuantWithMinMaxVars' in
[i.op.type for i in bypass_tensor.op.inputs])
# Ensure that all the convs and activations are quantized.
op_names = [op.name for op in graph.get_operations()]
self.assertTrue(
'test/test1/weights_quant/FakeQuantWithMinMaxVars' in op_names)
self.assertTrue(
'test/test2/weights_quant/FakeQuantWithMinMaxVars' in op_names)
self.assertTrue(
'test/test1/act_quant/FakeQuantWithMinMaxVars' in op_names)
self.assertTrue('test/act_quant/FakeQuantWithMinMaxVars' in op_names)
self.assertEqual(
'Relu6',
graph.get_operation_by_name(
'test/test1/act_quant/FakeQuantWithMinMaxVars').inputs[0].op.type)
self.assertEqual(
'Relu6',
graph.get_operation_by_name(
'test/act_quant/FakeQuantWithMinMaxVars').inputs[0].op.type)
def testWithNameScope(self):
self._RunTestOverParameters(self._TestWithNameScope)
def _TestWithNameScope(self, is_training):
graph = ops.Graph()
with graph.as_default():
with graph.name_scope('name_scope'):
batch_size, height, width, depth = 5, 128, 128, 3
input1 = array_ops.zeros((batch_size, height, width, depth))
_ = conv2d(
input1,
32, [5, 5],
stride=2,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=None,
scope='test')
quantize.Quantize(graph, is_training, weight_bits=8, activation_bits=8)
for op in graph.get_operations():
self.assertTrue(not op.name.startswith('name_scope/name_scope/'),
'Broken op: %s' % op.name)
def testWithNullNameScope(self):
self._RunTestOverParameters(self._TestWithNullNameScope)
def _TestWithNullNameScope(self, is_training):
graph = ops.Graph()
with graph.as_default():
with graph.name_scope(None):
batch_size, height, width, depth = 5, 128, 128, 32
input1 = array_ops.zeros((batch_size, height, width, depth))
_ = conv2d(
input1,
32, [5, 5],
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=None,
scope='test')
quantize.Quantize(graph, is_training, weight_bits=8, activation_bits=8)
# Passes if Quantize() does not crash.
def testWithNonMatchingNameScope(self):
self._RunTestOverParameters(self._testWithNonMatchingNameScope)
def _testWithNonMatchingNameScope(self, is_training):
graph = ops.Graph()
with graph.as_default():
with graph.name_scope('name_scope'):
batch_size, height, width, depth = 5, 128, 128, 3
input1 = array_ops.zeros((batch_size, height, width, depth))
_ = conv2d(
input1,
32, [5, 5],
stride=2,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=None,
scope='test')
op_names_before_quantize = set([op.name for op in graph.get_operations()])
quantize.Quantize(
graph, is_training, weight_bits=8, activation_bits=8,
scope='NonExisting/')
op_names_after_quantize = set([op.name for op in graph.get_operations()])
# No ops should be inserted or removed.
self.assertEqual(op_names_before_quantize, op_names_after_quantize)
def testSinglePartitionedVariable(self):
self._RunTestOverParameters(self._testSinglePartitionedVariable)
def _testSinglePartitionedVariable(self, is_training):
# When weights are partitioned into a single partition, the weights variable
# is followed by a identity -> identity (An additional identity node).
partitioner = partitioned_variables.fixed_size_partitioner(1)
graph = ops.Graph()
with graph.as_default():
with variable_scope.variable_scope('part', partitioner=partitioner):
batch_size, height, width, depth = 5, 128, 128, 3
input1 = array_ops.zeros((batch_size, height, width, depth))
input2 = array_ops.zeros((batch_size, height / 2, width / 2, 32))
conv = conv2d(
input1,
32, [5, 5],
stride=2,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=None,
scope='test/test')
node = math_ops.add(conv, input2, name='test/add')
node = nn_ops.relu6(node, name='test/relu6')
quantize.Quantize(graph, is_training, weight_bits=8, activation_bits=8)
# Check that the weight's quant node was added.
op_names = [op.name for op in graph.get_operations()]
self.assertTrue(
'part/test/test/weights_quant/FakeQuantWithMinMaxVars' in op_names)
def testMultiplePartitionedVariables(self):
self._RunTestOverParameters(self._testMultiplePartitionedVariables)
def _testMultiplePartitionedVariables(self, is_training):
# When weights are partitioned into multiple partitions the weights variable
# is followed by a identity -> concat -> identity to group the partitions.
partitioner = partitioned_variables.fixed_size_partitioner(2)
graph = ops.Graph()
with graph.as_default():
with variable_scope.variable_scope('part', partitioner=partitioner):
batch_size, height, width, depth = 5, 128, 128, 3
input1 = array_ops.zeros((batch_size, height, width, depth))
input2 = array_ops.zeros((batch_size, height / 2, width / 2, 32))
conv = conv2d(
input1,
32, [5, 5],
stride=2,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=None,
scope='test/test')
node = math_ops.add(conv, input2, name='test/add')
node = nn_ops.relu6(node, name='test/relu6')
quantize.Quantize(graph, is_training, weight_bits=8, activation_bits=8)
# Check that the weight's quant node was added.
op_names = [op.name for op in graph.get_operations()]
self.assertTrue(
'part/test/test/weights_quant/FakeQuantWithMinMaxVars' in op_names)
def _WeightInit(self, stddev):
"""Returns truncated normal variable initializer.
Function is defined purely to shorten the name so that it stops wrapping.
Args:
stddev: Standard deviation of normal variable.
Returns:
An initialized that initializes with a truncated normal variable.
"""
return init_ops.truncated_normal_initializer(stddev=stddev)
if __name__ == '__main__':
googletest.main()
| |
# extension module to the "dpkt" library to parse TableDumpV2 MRT/RIB files
# use: parsing routeviews files and creating an IP to ASN list
# author hadi a
# note: parts of this code are copied/based on the dpkt project
# date 24.11.2009 v 1.1
# note: this module might be speeded up by replacing some struct.unpacks with ord()
import struct
TABLE_DUMP_V1 = 12
TABLE_DUMP_V2 = 13
AS32_SIZE = pow(2,16)
class MRTHeader2:
HDR_LEN = 12
def __init__(self, buf):
self.ts, self.type, self.subtype, self.len = struct.unpack('>IHHI', buf[0:12])
self.data = buf[12:]
def __str__(self):
return 'mrt_{ts:%d,type:%d,subtype:%d,len:%d}'% (self.ts, self.type, self.subtype, self.len)
def __repr__(self):
return str(self)
class TableDumpV1:
def __init__(self, buf):
self.view, self.seq, prefix, self.bitmask, self.status, self.originate_ts, self.peer_ip, self.peer_as, self.attr_len\
= struct.unpack('>HHIBBIIHH', buf[:22])
assert self.view == 0 # not necessary but in our data is so
assert self.status == 1
self.cidr = '%d.%d.%d.%d' % (prefix>>24&0xff, prefix>>16&0xff, prefix>>8&0xff, prefix&0xff)
self.data = buf[22:]
self.attrs = None
def as_path(self):
self.parse_attrs()
as_path = None
for x in self.attrs:
if x.type == Attribute.AS_PATH:
assert as_path is None # "two aspaths"
as_path = x.as_path
assert as_path is not None # no as path?
return as_path
def parse_attrs(self):
if self.attrs is not None:
return
plen = self.attr_len
l = []
while plen > 0:
attr = Attribute(self.data, False)
self.data = self.data[len(attr):]
plen -= len(attr)
l.append(attr)
#
assert len(self.data) == 0
self.attrs = l
def __str__(self):
return 'TableDumpV1{seq:%d,cidr:%s,bitmask:%d,attr_len:%d,peer_as:%d,status:%d,originate_ts:%d}' % \
(self.seq, self.cidr, self.bitmask, self.attr_len, self.peer_as, self.status, self.originate_ts)
def __repr__(self):
return str(self)
class TableDumpV2:
# TABLE_DUMP_V2 subtypes used:
PEER_INDEX_TABLE = 1
RIB_IPV4_UNICAST = 2
PARSE_ONLY_FIRST_TDENTRY = True # !!! for speedup, as we only use the path on the first one
def __init__(self, buf):
self.seq, self.bitmask = struct.unpack('>IB', buf[0:5])
octets = (self.bitmask + 7) / 8
if octets == 0:
self.cidr = '0.0.0.0'
px = 5
elif octets == 1:
self.cidr = '%d.0.0.0' % ord(buf[5])
px = 6
elif octets == 2:
self.cidr = '%d.%d.0.0' % tuple(map(ord, buf[5:7]))
px = 7
elif octets == 3:
self.cidr = '%d.%d.%d.0' % tuple(map(ord, buf[5:8]))
px = 8
else:
self.cidr = '%d.%d.%d.%d' % tuple(map(ord, buf[5:9]))
px = 9
self.entry_count = struct.unpack('>H', buf[px:px+2])[0]
buf = buf[px+2:]
f_parseattrs = True
f_parseattrs_2nd = not self.PARSE_ONLY_FIRST_TDENTRY
self.entries = []
for ix in range(self.entry_count,0,-1):
et = TDEntry(buf, f_parseattrs)
self.entries.append(et)
buf = et.raw
et.raw = None
f_parseattrs = f_parseattrs_2nd
#
assert len(buf) == 0
def __str__(self):
return 'TableDumpV2{seq:%d,cidr:%s,bitmask:%d,entry_count:%d}' % (self.seq, self.cidr, self.bitmask, self.entry_count)
def __repr__(self):
return str(self)
class TDEntry:
def __init__(self, buf, f_parseattrs):
self.peer_index, self.originate_ts, self.attr_len = struct.unpack('>HIH', buf[0:8])
self.data = buf[8 : 8 + self.attr_len]
self.raw = buf[8+self.attr_len:]
plen = self.attr_len
l = []
if f_parseattrs:
while plen > 0:
attr = Attribute(self.data, True)
self.data = self.data[len(attr):]
plen -= len(attr)
l.append(attr)
assert(plen == 0)
self.attrs = l
def __str__(self):
return 'TDEntry{peer_index: %d, originate_ts: %d, attr_len: %d}' % (self.peer_index, self.originate_ts, self.attr_len)
def __repr__(self):
return str(self)
def as_path(self):
as_path = None
for x in self.attrs:
if x.type == Attribute.AS_PATH:
assert as_path is None # "TDEntry.two aspaths"
as_path = x.as_path
assert as_path is not None # "TDEntry.no aspaths"
return as_path
class Attribute:
# attribute types we use
AS_PATH = 2
def _get_o(self):
return (self.flags >> 7) & 0x1
def _set_o(self, o):
self.flags = (self.flags & ~0x80) | ((o & 0x1) << 7)
optional = property(_get_o, _set_o)
def _get_t(self):
return (self.flags >> 6) & 0x1
def _set_t(self, t):
self.flags = (self.flags & ~0x40) | ((t & 0x1) << 6)
transitive = property(_get_t, _set_t)
def _get_p(self):
return (self.flags >> 5) & 0x1
def _set_p(self, p):
self.flags = (self.flags & ~0x20) | ((p & 0x1) << 5)
partial = property(_get_p, _set_p)
def _get_e(self):
return (self.flags >> 4) & 0x1
def _set_e(self, e):
self.flags = (self.flags & ~0x10) | ((e & 0x1) << 4)
extended_length = property(_get_e, _set_e)
def __init__(self, buf, is32):
self.flags, self.type = struct.unpack('>BB', buf[0:2])
self.data = buf[2:]
if self.extended_length:
self.len = struct.unpack('>H', self.data[:2])[0]
self.data = self.data[2:]
else:
self.len = struct.unpack('B', self.data[:1])[0]
self.data = self.data[1:]
self.data = self.data[:self.len]
if self.type == self.AS_PATH:
self.as_path = self.ASPath32(self.data, is32)
# We do not use the rest of the attributes, so I have not configured them for speed
# stats on usage in TDv2 files(on approx 100.000 thousand):
#ORIGIN 1=24%
#AS_PATH 2=24%
#NEXT_HOP 3=24%
#MULTI_EXIT_DISC 4=10%
#ATOMIC_AGGREGATE 6=1.5%
#AGGREGATOR 7=2.5%
#COMMUNITIES 8=14%
def __len__(self):
attr_len = 2 if self.extended_length else 1
return 2 + attr_len + len(self.data)
def __str__(self):
if self.extended_length:
attr_len_str = struct.pack('>H', self.len)
else:
attr_len_str = struct.pack('B', self.len)
return 'Attr2{type:%d,flags:%d,len(data):%d}' % (self.type, self.flags, len(self.data))
def __repr__(self):
return str(self)
class ASPath32:
# AS_Path types we use:
AS_SET = 1
AS_SEQUENCE = 2
def __init__(self, buf, is32):
self.segments = []
self.data = buf
l = []
while self.data:
seg = self.ASPathSegment32(self.data, is32)
self.data = self.data[len(seg):]
l.append(seg)
self.data = self.segments = l
def __len__(self):
return sum(map(len, self.data))
def __str__(self):
# return ''.join(map(str, self.data))
return repr(self)
def __repr__(self):
return 'ASPath32{segments:%d,path:%s}' % (len(self.segments), str(self.segments))
def owning_asn(self):
if len(self.segments) == 1:
x = int(self.segments[0].path[-1])
if x > AS32_SIZE or x < 0:
return '%d.%d' % (x>>16, x&0xffff) # correct?
return str(x)
elif len(self.segments) == 2 and self.segments[0].type==self.AS_SEQUENCE and self.segments[1].type==self.AS_SET:
x = str(self.segments[1]) #or self.segments[1].path[-1]?
assert '{' in x
return x
else:
return '!' + str(self.segments)
class ASPathSegment32:
# AS_Path types we use:
AS_SET = 1 # shouldn't have to copy these globals in both classes
AS_SEQUENCE = 2
def __init__(self, buf, is32):
self.type, self.len = struct.unpack('>BB', buf[:2])
self.data = buf[2:]
self.aslen = 4 if is32 else 2
#print self.type
assert self.type==self.AS_SET or self.type==self.AS_SEQUENCE or self.type==3 # 3!
# stats on 100,000: {1: 1196, 2: 3677845}
l = []
for i in range(self.len):
if is32:
AS = struct.unpack('>I', self.data[:4])[0]
self.data = self.data[4:]
else:
AS = struct.unpack('>H', self.data[:2])[0]
self.data = self.data[2:]
l.append(AS)
#
self.data = self.path = l
def __len__(self):
return 2 + self.aslen*len(self.path)
def __str__(self):
#assert self.type==self.AS_SET or self.type==self.AS_SEQUENCE
as_str = '' if self.type==self.AS_SEQUENCE else '{' if self.type==self.AS_SET else '<<'
for AS in self.path:
as_str += str(AS) + ' '
as_str = as_str.strip()
as_str += '' if self.type==self.AS_SEQUENCE else '}' if self.type==self.AS_SET else '>>'
return as_str
def __repr__(self):
return str(self)
| |
#
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import timedelta
import os
from unittest import TestCase
from numpy import (
arange,
array,
int64,
float64,
full,
nan,
transpose,
zeros,
)
from numpy.testing import assert_almost_equal, assert_array_equal
from pandas import (
DataFrame,
DatetimeIndex,
Timestamp,
Timedelta,
NaT,
date_range,
)
from testfixtures import TempDirectory
from zipline.data.minute_bars import (
BcolzMinuteBarWriter,
BcolzMinuteBarReader,
BcolzMinuteOverlappingData,
US_EQUITIES_MINUTES_PER_DAY,
BcolzMinuteWriterColumnMismatch
)
from zipline.finance.trading import TradingEnvironment
# Calendar is set to cover several half days, to check a case where half
# days would be read out of order in cases of windows which spanned over
# multiple half days.
TEST_CALENDAR_START = Timestamp('2014-06-02', tz='UTC')
TEST_CALENDAR_STOP = Timestamp('2015-12-31', tz='UTC')
class BcolzMinuteBarTestCase(TestCase):
@classmethod
def setUpClass(cls):
cls.env = TradingEnvironment()
all_market_opens = cls.env.open_and_closes.market_open
all_market_closes = cls.env.open_and_closes.market_close
indexer = all_market_opens.index.slice_indexer(
start=TEST_CALENDAR_START,
end=TEST_CALENDAR_STOP
)
cls.market_opens = all_market_opens[indexer]
cls.market_closes = all_market_closes[indexer]
cls.test_calendar_start = cls.market_opens.index[0]
cls.test_calendar_stop = cls.market_opens.index[-1]
def setUp(self):
self.dir_ = TempDirectory()
self.dir_.create()
self.dest = self.dir_.getpath('minute_bars')
os.makedirs(self.dest)
self.writer = BcolzMinuteBarWriter(
TEST_CALENDAR_START,
self.dest,
self.market_opens,
self.market_closes,
US_EQUITIES_MINUTES_PER_DAY,
)
self.reader = BcolzMinuteBarReader(self.dest)
def tearDown(self):
self.dir_.cleanup()
def test_write_one_ohlcv(self):
minute = self.market_opens[self.test_calendar_start]
sid = 1
data = DataFrame(
data={
'open': [10.0],
'high': [20.0],
'low': [30.0],
'close': [40.0],
'volume': [50.0]
},
index=[minute])
self.writer.write_sid(sid, data)
open_price = self.reader.get_value(sid, minute, 'open')
self.assertEquals(10.0, open_price)
high_price = self.reader.get_value(sid, minute, 'high')
self.assertEquals(20.0, high_price)
low_price = self.reader.get_value(sid, minute, 'low')
self.assertEquals(30.0, low_price)
close_price = self.reader.get_value(sid, minute, 'close')
self.assertEquals(40.0, close_price)
volume_price = self.reader.get_value(sid, minute, 'volume')
self.assertEquals(50.0, volume_price)
def test_write_two_bars(self):
minute_0 = self.market_opens[self.test_calendar_start]
minute_1 = minute_0 + timedelta(minutes=1)
sid = 1
data = DataFrame(
data={
'open': [10.0, 11.0],
'high': [20.0, 21.0],
'low': [30.0, 31.0],
'close': [40.0, 41.0],
'volume': [50.0, 51.0]
},
index=[minute_0, minute_1])
self.writer.write_sid(sid, data)
open_price = self.reader.get_value(sid, minute_0, 'open')
self.assertEquals(10.0, open_price)
high_price = self.reader.get_value(sid, minute_0, 'high')
self.assertEquals(20.0, high_price)
low_price = self.reader.get_value(sid, minute_0, 'low')
self.assertEquals(30.0, low_price)
close_price = self.reader.get_value(sid, minute_0, 'close')
self.assertEquals(40.0, close_price)
volume_price = self.reader.get_value(sid, minute_0, 'volume')
self.assertEquals(50.0, volume_price)
open_price = self.reader.get_value(sid, minute_1, 'open')
self.assertEquals(11.0, open_price)
high_price = self.reader.get_value(sid, minute_1, 'high')
self.assertEquals(21.0, high_price)
low_price = self.reader.get_value(sid, minute_1, 'low')
self.assertEquals(31.0, low_price)
close_price = self.reader.get_value(sid, minute_1, 'close')
self.assertEquals(41.0, close_price)
volume_price = self.reader.get_value(sid, minute_1, 'volume')
self.assertEquals(51.0, volume_price)
def test_write_on_second_day(self):
second_day = self.test_calendar_start + 1
minute = self.market_opens[second_day]
sid = 1
data = DataFrame(
data={
'open': [10.0],
'high': [20.0],
'low': [30.0],
'close': [40.0],
'volume': [50.0]
},
index=[minute])
self.writer.write_sid(sid, data)
open_price = self.reader.get_value(sid, minute, 'open')
self.assertEquals(10.0, open_price)
high_price = self.reader.get_value(sid, minute, 'high')
self.assertEquals(20.0, high_price)
low_price = self.reader.get_value(sid, minute, 'low')
self.assertEquals(30.0, low_price)
close_price = self.reader.get_value(sid, minute, 'close')
self.assertEquals(40.0, close_price)
volume_price = self.reader.get_value(sid, minute, 'volume')
self.assertEquals(50.0, volume_price)
def test_write_empty(self):
minute = self.market_opens[self.test_calendar_start]
sid = 1
data = DataFrame(
data={
'open': [0],
'high': [0],
'low': [0],
'close': [0],
'volume': [0]
},
index=[minute])
self.writer.write_sid(sid, data)
open_price = self.reader.get_value(sid, minute, 'open')
assert_almost_equal(nan, open_price)
high_price = self.reader.get_value(sid, minute, 'high')
assert_almost_equal(nan, high_price)
low_price = self.reader.get_value(sid, minute, 'low')
assert_almost_equal(nan, low_price)
close_price = self.reader.get_value(sid, minute, 'close')
assert_almost_equal(nan, close_price)
volume_price = self.reader.get_value(sid, minute, 'volume')
assert_almost_equal(0, volume_price)
def test_write_on_multiple_days(self):
tds = self.market_opens.index
days = tds[tds.slice_indexer(
start=self.test_calendar_start + 1,
end=self.test_calendar_start + 3
)]
minutes = DatetimeIndex([
self.market_opens[days[0]] + timedelta(minutes=60),
self.market_opens[days[1]] + timedelta(minutes=120),
])
sid = 1
data = DataFrame(
data={
'open': [10.0, 11.0],
'high': [20.0, 21.0],
'low': [30.0, 31.0],
'close': [40.0, 41.0],
'volume': [50.0, 51.0]
},
index=minutes)
self.writer.write_sid(sid, data)
minute = minutes[0]
open_price = self.reader.get_value(sid, minute, 'open')
self.assertEquals(10.0, open_price)
high_price = self.reader.get_value(sid, minute, 'high')
self.assertEquals(20.0, high_price)
low_price = self.reader.get_value(sid, minute, 'low')
self.assertEquals(30.0, low_price)
close_price = self.reader.get_value(sid, minute, 'close')
self.assertEquals(40.0, close_price)
volume_price = self.reader.get_value(sid, minute, 'volume')
self.assertEquals(50.0, volume_price)
minute = minutes[1]
open_price = self.reader.get_value(sid, minute, 'open')
self.assertEquals(11.0, open_price)
high_price = self.reader.get_value(sid, minute, 'high')
self.assertEquals(21.0, high_price)
low_price = self.reader.get_value(sid, minute, 'low')
self.assertEquals(31.0, low_price)
close_price = self.reader.get_value(sid, minute, 'close')
self.assertEquals(41.0, close_price)
volume_price = self.reader.get_value(sid, minute, 'volume')
self.assertEquals(51.0, volume_price)
def test_no_overwrite(self):
minute = self.market_opens[TEST_CALENDAR_START]
sid = 1
data = DataFrame(
data={
'open': [10.0],
'high': [20.0],
'low': [30.0],
'close': [40.0],
'volume': [50.0]
},
index=[minute])
self.writer.write_sid(sid, data)
with self.assertRaises(BcolzMinuteOverlappingData):
self.writer.write_sid(sid, data)
def test_append_to_same_day(self):
"""
Test writing data with the same date as existing data in our file.
"""
sid = 1
first_minute = self.market_opens[TEST_CALENDAR_START]
data = DataFrame(
data={
'open': [10.0],
'high': [20.0],
'low': [30.0],
'close': [40.0],
'volume': [50.0]
},
index=[first_minute])
self.writer.write_sid(sid, data)
# Write data in the same day as the previous minute
second_minute = first_minute + Timedelta(minutes=1)
new_data = DataFrame(
data={
'open': [5.0],
'high': [10.0],
'low': [3.0],
'close': [7.0],
'volume': [10.0]
},
index=[second_minute])
self.writer.write_sid(sid, new_data)
open_price = self.reader.get_value(sid, second_minute, 'open')
self.assertEquals(5.0, open_price)
high_price = self.reader.get_value(sid, second_minute, 'high')
self.assertEquals(10.0, high_price)
low_price = self.reader.get_value(sid, second_minute, 'low')
self.assertEquals(3.0, low_price)
close_price = self.reader.get_value(sid, second_minute, 'close')
self.assertEquals(7.0, close_price)
volume_price = self.reader.get_value(sid, second_minute, 'volume')
self.assertEquals(10.0, volume_price)
def test_append_on_new_day(self):
sid = 1
ohlcv = {
'open': [2.0],
'high': [3.0],
'low': [1.0],
'close': [2.0],
'volume': [10.0]
}
first_minute = self.market_opens[TEST_CALENDAR_START]
data = DataFrame(
data=ohlcv,
index=[first_minute])
self.writer.write_sid(sid, data)
next_day_minute = first_minute + Timedelta(days=1)
new_data = DataFrame(
data=ohlcv,
index=[next_day_minute])
self.writer.write_sid(sid, new_data)
second_minute = first_minute + Timedelta(minutes=1)
# The second minute should have been padded with zeros
for col in ('open', 'high', 'low', 'close'):
assert_almost_equal(
nan, self.reader.get_value(sid, second_minute, col)
)
self.assertEqual(
0, self.reader.get_value(sid, second_minute, 'volume')
)
# The first day should contain US_EQUITIES_MINUTES_PER_DAY rows.
# The second day should contain a single row.
self.assertEqual(
len(self.writer._ensure_ctable(sid)),
US_EQUITIES_MINUTES_PER_DAY + 1,
)
def test_write_multiple_sids(self):
"""
Test writing multiple sids.
Tests both that the data is written to the correct sid, as well as
ensuring that the logic for creating the subdirectory path to each sid
does not cause issues from attempts to recreate existing paths.
(Calling out this coverage, because an assertion of that logic does not
show up in the test itself, but is exercised by the act of attempting
to write two consecutive sids, which would be written to the same
containing directory, `00/00/000001.bcolz` and `00/00/000002.bcolz)
Before applying a check to make sure the path writing did not
re-attempt directory creation an OSError like the following would
occur:
```
OSError: [Errno 17] File exists: '/tmp/tmpR7yzzT/minute_bars/00/00'
```
"""
minute = self.market_opens[TEST_CALENDAR_START]
sids = [1, 2]
data = DataFrame(
data={
'open': [15.0],
'high': [17.0],
'low': [11.0],
'close': [15.0],
'volume': [100.0]
},
index=[minute])
self.writer.write_sid(sids[0], data)
data = DataFrame(
data={
'open': [25.0],
'high': [27.0],
'low': [21.0],
'close': [25.0],
'volume': [200.0]
},
index=[minute])
self.writer.write_sid(sids[1], data)
sid = sids[0]
open_price = self.reader.get_value(sid, minute, 'open')
self.assertEquals(15.0, open_price)
high_price = self.reader.get_value(sid, minute, 'high')
self.assertEquals(17.0, high_price)
low_price = self.reader.get_value(sid, minute, 'low')
self.assertEquals(11.0, low_price)
close_price = self.reader.get_value(sid, minute, 'close')
self.assertEquals(15.0, close_price)
volume_price = self.reader.get_value(sid, minute, 'volume')
self.assertEquals(100.0, volume_price)
sid = sids[1]
open_price = self.reader.get_value(sid, minute, 'open')
self.assertEquals(25.0, open_price)
high_price = self.reader.get_value(sid, minute, 'high')
self.assertEquals(27.0, high_price)
low_price = self.reader.get_value(sid, minute, 'low')
self.assertEquals(21.0, low_price)
close_price = self.reader.get_value(sid, minute, 'close')
self.assertEquals(25.0, close_price)
volume_price = self.reader.get_value(sid, minute, 'volume')
self.assertEquals(200.0, volume_price)
def test_pad_data(self):
"""
Test writing empty data.
"""
sid = 1
last_date = self.writer.last_date_in_output_for_sid(sid)
self.assertIs(last_date, NaT)
self.writer.pad(sid, TEST_CALENDAR_START)
last_date = self.writer.last_date_in_output_for_sid(sid)
self.assertEqual(last_date, TEST_CALENDAR_START)
freq = self.market_opens.index.freq
day = TEST_CALENDAR_START + freq
minute = self.market_opens[day]
data = DataFrame(
data={
'open': [15.0],
'high': [17.0],
'low': [11.0],
'close': [15.0],
'volume': [100.0]
},
index=[minute])
self.writer.write_sid(sid, data)
open_price = self.reader.get_value(sid, minute, 'open')
self.assertEquals(15.0, open_price)
high_price = self.reader.get_value(sid, minute, 'high')
self.assertEquals(17.0, high_price)
low_price = self.reader.get_value(sid, minute, 'low')
self.assertEquals(11.0, low_price)
close_price = self.reader.get_value(sid, minute, 'close')
self.assertEquals(15.0, close_price)
volume_price = self.reader.get_value(sid, minute, 'volume')
self.assertEquals(100.0, volume_price)
# Check that if we then pad the rest of this day, we end up with
# 2 days worth of minutes.
self.writer.pad(sid, day)
self.assertEqual(
len(self.writer._ensure_ctable(sid)),
self.writer._minutes_per_day * 2,
)
def test_nans(self):
"""
Test writing empty data.
"""
sid = 1
last_date = self.writer.last_date_in_output_for_sid(sid)
self.assertIs(last_date, NaT)
self.writer.pad(sid, TEST_CALENDAR_START)
last_date = self.writer.last_date_in_output_for_sid(sid)
self.assertEqual(last_date, TEST_CALENDAR_START)
freq = self.market_opens.index.freq
minute = self.market_opens[TEST_CALENDAR_START + freq]
minutes = date_range(minute, periods=9, freq='min')
data = DataFrame(
data={
'open': full(9, nan),
'high': full(9, nan),
'low': full(9, nan),
'close': full(9, nan),
'volume': full(9, 0),
},
index=[minutes])
self.writer.write_sid(sid, data)
fields = ['open', 'high', 'low', 'close', 'volume']
ohlcv_window = list(map(transpose, self.reader.load_raw_arrays(
fields, minutes[0], minutes[-1], [sid],
)))
for i, field in enumerate(fields):
if field != 'volume':
assert_array_equal(full(9, nan), ohlcv_window[i][0])
else:
assert_array_equal(zeros(9), ohlcv_window[i][0])
def test_differing_nans(self):
"""
Also test nans of differing values/construction.
"""
sid = 1
last_date = self.writer.last_date_in_output_for_sid(sid)
self.assertIs(last_date, NaT)
self.writer.pad(sid, TEST_CALENDAR_START)
last_date = self.writer.last_date_in_output_for_sid(sid)
self.assertEqual(last_date, TEST_CALENDAR_START)
freq = self.market_opens.index.freq
minute = self.market_opens[TEST_CALENDAR_START + freq]
minutes = date_range(minute, periods=9, freq='min')
data = DataFrame(
data={
'open': ((0b11111111111 << 52) + arange(1, 10, dtype=int64)).
view(float64),
'high': ((0b11111111111 << 52) + arange(11, 20, dtype=int64)).
view(float64),
'low': ((0b11111111111 << 52) + arange(21, 30, dtype=int64)).
view(float64),
'close': ((0b11111111111 << 52) + arange(31, 40, dtype=int64)).
view(float64),
'volume': full(9, 0),
},
index=[minutes])
self.writer.write_sid(sid, data)
fields = ['open', 'high', 'low', 'close', 'volume']
ohlcv_window = list(map(transpose, self.reader.load_raw_arrays(
fields, minutes[0], minutes[-1], [sid],
)))
for i, field in enumerate(fields):
if field != 'volume':
assert_array_equal(full(9, nan), ohlcv_window[i][0])
else:
assert_array_equal(zeros(9), ohlcv_window[i][0])
def test_write_cols(self):
minute_0 = self.market_opens[self.test_calendar_start]
minute_1 = minute_0 + timedelta(minutes=1)
sid = 1
cols = {
'open': array([10.0, 11.0]),
'high': array([20.0, 21.0]),
'low': array([30.0, 31.0]),
'close': array([40.0, 41.0]),
'volume': array([50.0, 51.0])
}
dts = array([minute_0, minute_1], dtype='datetime64[s]')
self.writer.write_cols(sid, dts, cols)
open_price = self.reader.get_value(sid, minute_0, 'open')
self.assertEquals(10.0, open_price)
high_price = self.reader.get_value(sid, minute_0, 'high')
self.assertEquals(20.0, high_price)
low_price = self.reader.get_value(sid, minute_0, 'low')
self.assertEquals(30.0, low_price)
close_price = self.reader.get_value(sid, minute_0, 'close')
self.assertEquals(40.0, close_price)
volume_price = self.reader.get_value(sid, minute_0, 'volume')
self.assertEquals(50.0, volume_price)
open_price = self.reader.get_value(sid, minute_1, 'open')
self.assertEquals(11.0, open_price)
high_price = self.reader.get_value(sid, minute_1, 'high')
self.assertEquals(21.0, high_price)
low_price = self.reader.get_value(sid, minute_1, 'low')
self.assertEquals(31.0, low_price)
close_price = self.reader.get_value(sid, minute_1, 'close')
self.assertEquals(41.0, close_price)
volume_price = self.reader.get_value(sid, minute_1, 'volume')
self.assertEquals(51.0, volume_price)
def test_write_cols_mismatch_length(self):
dts = date_range(self.market_opens[self.test_calendar_start],
periods=2, freq='min').asi8.astype('datetime64[s]')
sid = 1
cols = {
'open': array([10.0, 11.0, 12.0]),
'high': array([20.0, 21.0]),
'low': array([30.0, 31.0, 33.0, 34.0]),
'close': array([40.0, 41.0]),
'volume': array([50.0, 51.0, 52.0])
}
with self.assertRaises(BcolzMinuteWriterColumnMismatch):
self.writer.write_cols(sid, dts, cols)
def test_unadjusted_minutes(self):
"""
Test unadjusted minutes.
"""
start_minute = self.market_opens[TEST_CALENDAR_START]
minutes = [start_minute,
start_minute + Timedelta('1 min'),
start_minute + Timedelta('2 min')]
sids = [1, 2]
data_1 = DataFrame(
data={
'open': [15.0, nan, 15.1],
'high': [17.0, nan, 17.1],
'low': [11.0, nan, 11.1],
'close': [14.0, nan, 14.1],
'volume': [1000, 0, 1001]
},
index=minutes)
self.writer.write_sid(sids[0], data_1)
data_2 = DataFrame(
data={
'open': [25.0, nan, 25.1],
'high': [27.0, nan, 27.1],
'low': [21.0, nan, 21.1],
'close': [24.0, nan, 24.1],
'volume': [2000, 0, 2001]
},
index=minutes)
self.writer.write_sid(sids[1], data_2)
reader = BcolzMinuteBarReader(self.dest)
columns = ['open', 'high', 'low', 'close', 'volume']
sids = [sids[0], sids[1]]
arrays = list(map(transpose, reader.load_raw_arrays(
columns, minutes[0], minutes[-1], sids,
)))
data = {sids[0]: data_1, sids[1]: data_2}
for i, col in enumerate(columns):
for j, sid in enumerate(sids):
assert_almost_equal(data[sid][col], arrays[i][j])
def test_unadjusted_minutes_early_close(self):
"""
Test unadjusted minute window, ensuring that early closes are filtered
out.
"""
day_before_thanksgiving = Timestamp('2015-11-25', tz='UTC')
xmas_eve = Timestamp('2015-12-24', tz='UTC')
market_day_after_xmas = Timestamp('2015-12-28', tz='UTC')
minutes = [self.market_closes[day_before_thanksgiving] -
Timedelta('2 min'),
self.market_closes[xmas_eve] - Timedelta('1 min'),
self.market_opens[market_day_after_xmas] +
Timedelta('1 min')]
sids = [1, 2]
data_1 = DataFrame(
data={
'open': [
15.0, 15.1, 15.2],
'high': [17.0, 17.1, 17.2],
'low': [11.0, 11.1, 11.3],
'close': [14.0, 14.1, 14.2],
'volume': [1000, 1001, 1002],
},
index=minutes)
self.writer.write_sid(sids[0], data_1)
data_2 = DataFrame(
data={
'open': [25.0, 25.1, 25.2],
'high': [27.0, 27.1, 27.2],
'low': [21.0, 21.1, 21.2],
'close': [24.0, 24.1, 24.2],
'volume': [2000, 2001, 2002],
},
index=minutes)
self.writer.write_sid(sids[1], data_2)
reader = BcolzMinuteBarReader(self.dest)
columns = ['open', 'high', 'low', 'close', 'volume']
sids = [sids[0], sids[1]]
arrays = list(map(transpose, reader.load_raw_arrays(
columns, minutes[0], minutes[-1], sids,
)))
data = {sids[0]: data_1, sids[1]: data_2}
start_minute_loc = self.env.market_minutes.get_loc(minutes[0])
minute_locs = [self.env.market_minutes.get_loc(minute) -
start_minute_loc
for minute in minutes]
for i, col in enumerate(columns):
for j, sid in enumerate(sids):
assert_almost_equal(data[sid].loc[minutes, col],
arrays[i][j][minute_locs])
def test_adjust_non_trading_minutes(self):
start_day = Timestamp('2015-06-01', tz='UTC')
end_day = Timestamp('2015-06-02', tz='UTC')
sid = 1
cols = {
'open': arange(1, 781),
'high': arange(1, 781),
'low': arange(1, 781),
'close': arange(1, 781),
'volume': arange(1, 781)
}
dts = array(self.env.minutes_for_days_in_range(start_day, end_day))
self.writer.write_cols(sid, dts, cols)
self.assertEqual(
self.reader.get_value(
sid,
Timestamp('2015-06-01 20:00:00', tz='UTC'),
'open'),
390)
self.assertEqual(
self.reader.get_value(
sid,
Timestamp('2015-06-02 20:00:00', tz='UTC'),
'open'),
780)
self.assertEqual(
self.reader.get_value(
sid,
Timestamp('2015-06-02', tz='UTC'),
'open'),
390)
self.assertEqual(
self.reader.get_value(
sid,
Timestamp('2015-06-02 20:01:00', tz='UTC'),
'open'),
780)
def test_adjust_non_trading_minutes_half_days(self):
# half day
start_day = Timestamp('2015-11-27', tz='UTC')
end_day = Timestamp('2015-11-30', tz='UTC')
sid = 1
cols = {
'open': arange(1, 601),
'high': arange(1, 601),
'low': arange(1, 601),
'close': arange(1, 601),
'volume': arange(1, 601)
}
dts = array(self.env.minutes_for_days_in_range(start_day, end_day))
self.writer.write_cols(sid, dts, cols)
self.assertEqual(
self.reader.get_value(
sid,
Timestamp('2015-11-27 18:00:00', tz='UTC'),
'open'),
210)
self.assertEqual(
self.reader.get_value(
sid,
Timestamp('2015-11-30 21:00:00', tz='UTC'),
'open'),
600)
self.assertEqual(
self.reader.get_value(
sid,
Timestamp('2015-11-27 18:01:00', tz='UTC'),
'open'),
210)
self.assertEqual(
self.reader.get_value(
sid,
Timestamp('2015-11-30', tz='UTC'),
'open'),
210)
self.assertEqual(
self.reader.get_value(
sid,
Timestamp('2015-11-30 21:01:00', tz='UTC'),
'open'),
600)
def test_set_sid_attrs(self):
"""Confirm that we can set the attributes of a sid's file correctly.
"""
sid = 1
start_day = Timestamp('2015-11-27', tz='UTC')
end_day = Timestamp('2015-06-02', tz='UTC')
attrs = {
'start_day': start_day.value / int(1e9),
'end_day': end_day.value / int(1e9),
'factor': 100,
}
# Write the attributes
self.writer.set_sid_attrs(sid, **attrs)
# Read the attributes
for k, v in attrs.items():
self.assertEqual(self.reader.get_sid_attr(sid, k), v)
| |
# This file is part of beets.
# Copyright 2011, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Provides the basic, interface-agnostic workflow for importing and
autotagging music files.
"""
from __future__ import with_statement # Python 2.5
import os
import logging
import pickle
from collections import defaultdict
from beets import autotag
from beets import library
import beets.autotag.art
from beets import plugins
from beets import util
from beets.util import pipeline
from beets.util import syspath, normpath, displayable_path
from beets.util.enumeration import enum
action = enum(
'SKIP', 'ASIS', 'TRACKS', 'MANUAL', 'APPLY', 'MANUAL_ID',
name='action'
)
QUEUE_SIZE = 128
STATE_FILE = os.path.expanduser('~/.beetsstate')
SINGLE_ARTIST_THRESH = 0.25
VARIOUS_ARTISTS = u'Various Artists'
# Global logger.
log = logging.getLogger('beets')
class ImportAbort(Exception):
"""Raised when the user aborts the tagging operation.
"""
pass
# Utilities.
def tag_log(logfile, status, path):
"""Log a message about a given album to logfile. The status should
reflect the reason the album couldn't be tagged.
"""
if logfile:
print >>logfile, '%s %s' % (status, path)
logfile.flush()
def log_choice(config, task):
"""Logs the task's current choice if it should be logged.
"""
path = task.path if task.is_album else task.item.path
if task.choice_flag is action.ASIS:
tag_log(config.logfile, 'asis', path)
elif task.choice_flag is action.SKIP:
tag_log(config.logfile, 'skip', path)
def _reopen_lib(lib):
"""Because of limitations in SQLite, a given Library is bound to
the thread in which it was created. This function reopens Library
objects so that they can be used from separate threads.
"""
if isinstance(lib, library.Library):
return library.Library(
lib.path,
lib.directory,
lib.path_formats,
lib.art_filename,
lib.timeout,
lib.replacements,
)
else:
return lib
def _duplicate_check(lib, task, recent=None):
"""Check whether an album already exists in the library. `recent`
should be a set of (artist, album) pairs that will be built up
with every call to this function and checked along with the
library.
"""
if task.choice_flag is action.ASIS:
artist = task.cur_artist
album = task.cur_album
elif task.choice_flag is action.APPLY:
artist = task.info.artist
album = task.info.album
else:
return False
if artist is None:
# As-is import with no artist. Skip check.
return False
# Try the recent albums.
if recent is not None:
if (artist, album) in recent:
return True
recent.add((artist, album))
# Look in the library.
cur_paths = set(i.path for i in task.items if i)
for album_cand in lib.albums(artist=artist):
if album_cand.album == album:
# Check whether the album is identical in contents, in which
# case it is not a duplicate (will be replaced).
other_paths = set(i.path for i in album_cand.items())
if other_paths == cur_paths:
continue
return True
return False
def _item_duplicate_check(lib, task, recent=None):
"""Check whether an item already exists in the library."""
if task.choice_flag is action.ASIS:
artist = task.item.artist
title = task.item.title
elif task.choice_flag is action.APPLY:
artist = task.info.artist
title = task.info.title
else:
return False
# Try recent items.
if recent is not None:
if (artist, title) in recent:
return True
recent.add((artist, title))
# Check the library.
for other_item in lib.items(artist=artist, title=title):
# Existing items not considered duplicates.
if other_item.path == task.item.path:
continue
return True
return False
def _infer_album_fields(task):
"""Given an album and an associated import task, massage the
album-level metadata. This ensures that the album artist is set
and that the "compilation" flag is set automatically.
"""
assert task.is_album
assert task.items
changes = {}
if task.choice_flag == action.ASIS:
# Taking metadata "as-is". Guess whether this album is VA.
plur_artist, freq = util.plurality([i.artist for i in task.items])
if freq == len(task.items) or (freq > 1 and
float(freq) / len(task.items) >= SINGLE_ARTIST_THRESH):
# Single-artist album.
changes['albumartist'] = plur_artist
changes['comp'] = False
else:
# VA.
changes['albumartist'] = VARIOUS_ARTISTS
changes['comp'] = True
elif task.choice_flag == action.APPLY:
# Applying autotagged metadata. Just get AA from the first
# item.
for item in task.items:
if item is not None:
first_item = item
break
else:
assert False, "all items are None"
if not first_item.albumartist:
changes['albumartist'] = first_item.artist
if not first_item.mb_albumartistid:
changes['mb_albumartistid'] = first_item.mb_artistid
else:
assert False
# Apply new metadata.
for item in task.items:
if item is not None:
for k, v in changes.iteritems():
setattr(item, k, v)
def _open_state():
"""Reads the state file, returning a dictionary."""
try:
with open(STATE_FILE) as f:
return pickle.load(f)
except (IOError, EOFError):
return {}
def _save_state(state):
"""Writes the state dictionary out to disk."""
try:
with open(STATE_FILE, 'w') as f:
pickle.dump(state, f)
except IOError, exc:
log.error(u'state file could not be written: %s' % unicode(exc))
# Utilities for reading and writing the beets progress file, which
# allows long tagging tasks to be resumed when they pause (or crash).
PROGRESS_KEY = 'tagprogress'
def progress_set(toppath, path):
"""Record that tagging for the given `toppath` was successful up to
`path`. If path is None, then clear the progress value (indicating
that the tagging completed).
"""
state = _open_state()
if PROGRESS_KEY not in state:
state[PROGRESS_KEY] = {}
if path is None:
# Remove progress from file.
if toppath in state[PROGRESS_KEY]:
del state[PROGRESS_KEY][toppath]
else:
state[PROGRESS_KEY][toppath] = path
_save_state(state)
def progress_get(toppath):
"""Get the last successfully tagged subpath of toppath. If toppath
has no progress information, returns None.
"""
state = _open_state()
if PROGRESS_KEY not in state:
return None
return state[PROGRESS_KEY].get(toppath)
# Similarly, utilities for manipulating the "incremental" import log.
# This keeps track of all directories that were ever imported, which
# allows the importer to only import new stuff.
HISTORY_KEY = 'taghistory'
def history_add(path):
"""Indicate that the import of `path` is completed and should not
be repeated in incremental imports.
"""
state = _open_state()
if HISTORY_KEY not in state:
state[HISTORY_KEY] = set()
state[HISTORY_KEY].add(path)
_save_state(state)
def history_get():
"""Get the set of completed paths in incremental imports.
"""
state = _open_state()
if HISTORY_KEY not in state:
return set()
return state[HISTORY_KEY]
# The configuration structure.
class ImportConfig(object):
"""Contains all the settings used during an import session. Should
be used in a "write-once" way -- everything is set up initially and
then never touched again.
"""
_fields = ['lib', 'paths', 'resume', 'logfile', 'color', 'quiet',
'quiet_fallback', 'copy', 'write', 'art', 'delete',
'choose_match_func', 'should_resume_func', 'threaded',
'autot', 'singletons', 'timid', 'choose_item_func',
'query', 'incremental', 'ignore']
def __init__(self, **kwargs):
for slot in self._fields:
setattr(self, slot, kwargs[slot])
# Normalize the paths.
if self.paths:
self.paths = map(normpath, self.paths)
# Incremental and progress are mutually exclusive.
if self.incremental:
self.resume = False
# When based on a query instead of directories, never
# save progress or try to resume.
if self.query is not None:
self.paths = None
self.resume = False
self.incremental = False
# The importer task class.
class ImportTask(object):
"""Represents a single set of items to be imported along with its
intermediate state. May represent an album or a single item.
"""
def __init__(self, toppath=None, path=None, items=None):
self.toppath = toppath
self.path = path
self.items = items
self.sentinel = False
@classmethod
def done_sentinel(cls, toppath):
"""Create an ImportTask that indicates the end of a top-level
directory import.
"""
obj = cls(toppath)
obj.sentinel = True
return obj
@classmethod
def progress_sentinel(cls, toppath, path):
"""Create a task indicating that a single directory in a larger
import has finished. This is only required for singleton
imports; progress is implied for album imports.
"""
obj = cls(toppath, path)
obj.sentinel = True
return obj
@classmethod
def item_task(cls, item):
"""Creates an ImportTask for a single item."""
obj = cls()
obj.item = item
obj.is_album = False
return obj
def set_match(self, cur_artist, cur_album, candidates, rec):
"""Sets the candidates for this album matched by the
`autotag.tag_album` method.
"""
assert not self.sentinel
self.cur_artist = cur_artist
self.cur_album = cur_album
self.candidates = candidates
self.rec = rec
self.is_album = True
def set_null_match(self):
"""Set the candidates to indicate no album match was found.
"""
self.set_match(None, None, None, None)
def set_item_match(self, candidates, rec):
"""Set the match for a single-item task."""
assert not self.is_album
assert self.item is not None
self.item_match = (candidates, rec)
def set_null_item_match(self):
"""For single-item tasks, mark the item as having no matches.
"""
assert not self.is_album
assert self.item is not None
self.item_match = None
def set_choice(self, choice):
"""Given either an (info, items) tuple or an action constant,
indicates that an action has been selected by the user (or
automatically).
"""
assert not self.sentinel
# Not part of the task structure:
assert choice not in (action.MANUAL, action.MANUAL_ID)
assert choice != action.APPLY # Only used internally.
if choice in (action.SKIP, action.ASIS, action.TRACKS):
self.choice_flag = choice
self.info = None
else:
assert not isinstance(choice, action)
if self.is_album:
info, items = choice
self.items = items # Reordered items list.
else:
info = choice
self.info = info
self.choice_flag = action.APPLY # Implicit choice.
def save_progress(self):
"""Updates the progress state to indicate that this album has
finished.
"""
if self.sentinel and self.path is None:
# "Done" sentinel.
progress_set(self.toppath, None)
elif self.sentinel or self.is_album:
# "Directory progress" sentinel for singletons or a real
# album task, which implies the same.
progress_set(self.toppath, self.path)
def save_history(self):
"""Save the directory in the history for incremental imports.
"""
if self.sentinel or self.is_album:
history_add(self.path)
# Logical decisions.
def should_write_tags(self):
"""Should new info be written to the files' metadata?"""
if self.choice_flag == action.APPLY:
return True
elif self.choice_flag in (action.ASIS, action.TRACKS, action.SKIP):
return False
else:
assert False
def should_fetch_art(self):
"""Should album art be downloaded for this album?"""
return self.should_write_tags() and self.is_album
def should_skip(self):
"""After a choice has been made, returns True if this is a
sentinel or it has been marked for skipping.
"""
return self.sentinel or self.choice_flag == action.SKIP
# Full-album pipeline stages.
def read_tasks(config):
"""A generator yielding all the albums (as ImportTask objects) found
in the user-specified list of paths. In the case of a singleton
import, yields single-item tasks instead.
"""
# Look for saved progress.
progress = config.resume is not False
if progress:
resume_dirs = {}
for path in config.paths:
resume_dir = progress_get(path)
if resume_dir:
# Either accept immediately or prompt for input to decide.
if config.resume:
do_resume = True
log.warn('Resuming interrupted import of %s' % path)
else:
do_resume = config.should_resume_func(config, path)
if do_resume:
resume_dirs[path] = resume_dir
else:
# Clear progress; we're starting from the top.
progress_set(path, None)
# Look for saved incremental directories.
if config.incremental:
incremental_skipped = 0
history_dirs = history_get()
for toppath in config.paths:
# Check whether the path is to a file.
if config.singletons and not os.path.isdir(syspath(toppath)):
item = library.Item.from_path(toppath)
yield ImportTask.item_task(item)
continue
# Produce paths under this directory.
if progress:
resume_dir = resume_dirs.get(toppath)
for path, items in autotag.albums_in_dir(toppath, config.ignore):
# Skip according to progress.
if progress and resume_dir:
# We're fast-forwarding to resume a previous tagging.
if path == resume_dir:
# We've hit the last good path! Turn off the
# fast-forwarding.
resume_dir = None
continue
# When incremental, skip paths in the history.
if config.incremental and path in history_dirs:
log.debug(u'Skipping previously-imported path: %s' %
displayable_path(path))
incremental_skipped += 1
continue
# Yield all the necessary tasks.
if config.singletons:
for item in items:
yield ImportTask.item_task(item)
yield ImportTask.progress_sentinel(toppath, path)
else:
yield ImportTask(toppath, path, items)
# Indicate the directory is finished.
yield ImportTask.done_sentinel(toppath)
# Show skipped directories.
if config.incremental and incremental_skipped:
log.info(u'Incremental import: skipped %i directories.' %
incremental_skipped)
def query_tasks(config):
"""A generator that works as a drop-in-replacement for read_tasks.
Instead of finding files from the filesystem, a query is used to
match items from the library.
"""
lib = _reopen_lib(config.lib)
if config.singletons:
# Search for items.
for item in lib.items(config.query):
yield ImportTask.item_task(item)
else:
# Search for albums.
for album in lib.albums(config.query):
log.debug('yielding album %i: %s - %s' %
(album.id, album.albumartist, album.album))
items = list(album.items())
yield ImportTask(None, album.item_dir(), items)
def initial_lookup(config):
"""A coroutine for performing the initial MusicBrainz lookup for an
album. It accepts lists of Items and yields
(items, cur_artist, cur_album, candidates, rec) tuples. If no match
is found, all of the yielded parameters (except items) are None.
"""
task = None
while True:
task = yield task
if task.sentinel:
continue
log.debug('Looking up: %s' % task.path)
try:
task.set_match(*autotag.tag_album(task.items, config.timid))
except autotag.AutotagError:
task.set_null_match()
def user_query(config):
"""A coroutine for interfacing with the user about the tagging
process. lib is the Library to import into and logfile may be
a file-like object for logging the import process. The coroutine
accepts and yields ImportTask objects.
"""
lib = _reopen_lib(config.lib)
recent = set()
task = None
while True:
task = yield task
if task.sentinel:
continue
# Ask the user for a choice.
choice = config.choose_match_func(task, config)
task.set_choice(choice)
log_choice(config, task)
# As-tracks: transition to singleton workflow.
if choice is action.TRACKS:
# Set up a little pipeline for dealing with the singletons.
item_tasks = []
def emitter():
for item in task.items:
yield ImportTask.item_task(item)
yield ImportTask.progress_sentinel(task.toppath, task.path)
def collector():
while True:
item_task = yield
item_tasks.append(item_task)
ipl = pipeline.Pipeline((emitter(), item_lookup(config),
item_query(config), collector()))
ipl.run_sequential()
task = pipeline.multiple(item_tasks)
continue
# Check for duplicates if we have a match (or ASIS).
if _duplicate_check(lib, task, recent):
tag_log(config.logfile, 'duplicate', task.path)
log.warn("This album is already in the library!")
task.set_choice(action.SKIP)
def show_progress(config):
"""This stage replaces the initial_lookup and user_query stages
when the importer is run without autotagging. It displays the album
name and artist as the files are added.
"""
task = None
while True:
task = yield task
if task.sentinel:
continue
log.info(task.path)
# Behave as if ASIS were selected.
task.set_null_match()
task.set_choice(action.ASIS)
def apply_choices(config):
"""A coroutine for applying changes to albums during the autotag
process.
"""
lib = _reopen_lib(config.lib)
task = None
while True:
task = yield task
if task.should_skip():
continue
items = [i for i in task.items if i] if task.is_album else [task.item]
# Clear IDs in case the items are being re-tagged.
for item in items:
item.id = None
item.album_id = None
# Change metadata.
if task.should_write_tags():
if task.is_album:
autotag.apply_metadata(task.items, task.info)
else:
autotag.apply_item_metadata(task.item, task.info)
# Infer album-level fields.
if task.is_album:
_infer_album_fields(task)
# Find existing item entries that these are replacing. Old
# album structures are automatically cleaned up when the
# last item is removed.
replaced_items = defaultdict(list)
for item in items:
dup_items = lib.items(library.MatchQuery('path', item.path))
for dup_item in dup_items:
replaced_items[item].append(dup_item)
log.debug('replacing item %i: %s' %
(dup_item.id, displayable_path(item.path)))
log.debug('%i of %i items replaced' % (len(replaced_items),
len(items)))
# Move/copy files.
task.old_paths = [item.path for item in items]
for item in items:
if config.copy:
# If we're replacing an item, then move rather than
# copying.
old_path = item.path
do_copy = not bool(replaced_items[item])
lib.move(item, do_copy, task.is_album)
if not do_copy:
# If we moved the item, remove the now-nonexistent
# file from old_paths.
task.old_paths.remove(old_path)
if config.write and task.should_write_tags():
item.write()
# Add items to library. We consolidate this at the end to avoid
# locking while we do the copying and tag updates.
try:
# Remove old items.
for replaced in replaced_items.itervalues():
for item in replaced:
lib.remove(item)
# Add new ones.
if task.is_album:
# Add an album.
album = lib.add_album(items)
task.album_id = album.id
else:
# Add tracks.
for item in items:
lib.add(item)
finally:
lib.save()
def fetch_art(config):
"""A coroutine that fetches and applies album art for albums where
appropriate.
"""
lib = _reopen_lib(config.lib)
task = None
while True:
task = yield task
if task.should_skip():
continue
if task.should_fetch_art():
artpath = beets.autotag.art.art_for_album(task.info, task.path)
# Save the art if any was found.
if artpath:
try:
album = lib.get_album(task.album_id)
album.set_art(artpath)
if config.delete and not util.samefile(artpath,
album.artpath):
# Delete the original file after it's imported.
os.remove(artpath)
finally:
lib.save(False)
def finalize(config):
"""A coroutine that finishes up importer tasks. In particular, the
coroutine sends plugin events, deletes old files, and saves
progress. This is a "terminal" coroutine (it yields None).
"""
lib = _reopen_lib(config.lib)
while True:
task = yield
if task.should_skip():
if config.resume is not False:
task.save_progress()
if config.incremental:
task.save_history()
continue
items = [i for i in task.items if i] if task.is_album else [task.item]
# Announce that we've added an album.
if task.is_album:
album = lib.get_album(task.album_id)
plugins.send('album_imported', lib=lib, album=album, config=config)
else:
for item in items:
plugins.send('item_imported', lib=lib, item=item, config=config)
# Finally, delete old files.
if config.copy and config.delete:
new_paths = [os.path.realpath(item.path) for item in items]
for old_path in task.old_paths:
# Only delete files that were actually copied.
if old_path not in new_paths:
os.remove(syspath(old_path))
# Clean up directory if it is emptied.
if task.toppath:
util.prune_dirs(os.path.dirname(old_path),
task.toppath)
# Update progress.
if config.resume is not False:
task.save_progress()
if config.incremental:
task.save_history()
# Singleton pipeline stages.
def item_lookup(config):
"""A coroutine used to perform the initial MusicBrainz lookup for
an item task.
"""
task = None
while True:
task = yield task
if task.sentinel:
continue
task.set_item_match(*autotag.tag_item(task.item, config.timid))
def item_query(config):
"""A coroutine that queries the user for input on single-item
lookups.
"""
lib = _reopen_lib(config.lib)
task = None
recent = set()
while True:
task = yield task
if task.sentinel:
continue
choice = config.choose_item_func(task, config)
task.set_choice(choice)
log_choice(config, task)
# Duplicate check.
if _item_duplicate_check(lib, task, recent):
tag_log(config.logfile, 'duplicate', task.item.path)
log.warn("This item is already in the library!")
task.set_choice(action.SKIP)
def item_progress(config):
"""Skips the lookup and query stages in a non-autotagged singleton
import. Just shows progress.
"""
task = None
log.info('Importing items:')
while True:
task = yield task
if task.sentinel:
continue
log.info(displayable_path(task.item.path))
task.set_null_item_match()
task.set_choice(action.ASIS)
# Main driver.
def run_import(**kwargs):
"""Run an import. The keyword arguments are the same as those to
ImportConfig.
"""
config = ImportConfig(**kwargs)
# Set up the pipeline.
if config.query is None:
stages = [read_tasks(config)]
else:
stages = [query_tasks(config)]
if config.singletons:
# Singleton importer.
if config.autot:
stages += [item_lookup(config), item_query(config)]
else:
stages += [item_progress(config)]
else:
# Whole-album importer.
if config.autot:
# Only look up and query the user when autotagging.
stages += [initial_lookup(config), user_query(config)]
else:
# When not autotagging, just display progress.
stages += [show_progress(config)]
stages += [apply_choices(config)]
if config.art:
stages += [fetch_art(config)]
stages += [finalize(config)]
pl = pipeline.Pipeline(stages)
# Run the pipeline.
try:
if config.threaded:
pl.run_parallel(QUEUE_SIZE)
else:
pl.run_sequential()
except ImportAbort:
# User aborted operation. Silently stop.
pass
| |
# Copyright (c) 2011, Roger Lew [see LICENSE.txt]
# This software is funded in part by NIH Grant P20 RR016454.
"""
This unittest tests the dictset module.
"""
import sys
import unittest
import doctest
import random
from random import shuffle
from string import digits,ascii_lowercase
import dictset
from dictset import DictSet
# First we need to define some translator functions so we
# can compare DictSets without relying on DictSet itself.
#
# Comparisons are made as with sorted lists of key value
# pairs. The values (sets) are also sorted and turned into
# lists
def s2d(x=None):
"""
s2d(...) -> takes a string returns a dict
A shortcut function for turning strings into dicts
with list values for testing dictset
lowercase letters become keys,
whole numbers become list elements,
0 becomes an empty list
all other characters are ignored
s2d() -> {}
s2d('') -> {}
>>> s2d('a0b123c 567')
{'a': [], 'c': ['5', '6', '7'], 'b': ['1', '2', '3']}
"""
if x==None or x=='': return {}
keys,vals=[],[]
for c in x:
if c in ascii_lowercase:
keys.append(c)
vals.append([])
elif c in '123456789':
vals[-1]+=c
#else c is '0' whitespace, non-alpha, etc.
# randomly shuffle the order of the values in the list
for v in vals:
shuffle(v) # shuffles in place
return dict(list(zip(keys,vals)))
def d2l(ds):
"""
s2l(...) -> takes a dict/DictSet returns sorted list of (k,v) pairs.
takes a mappable. Sorts the item pairs, and and listifies
and sorts the values
"""
return [(k,sorted(list(v))) for (k,v) in sorted(ds.items())]
def s2l(x=None):
"""
s2d(...) -> takes a string returns sorted list of (k,v) pairs.
>>> s2d('b312a0c756')
[('a', []), ('b', ['1', '2', '3']), ('c', ['5', '6', '7'])]
"""
return d2l(s2d(x))
class TestDictSet__init__(unittest.TestCase):
# Init test failure assertions
def test0(self):
with self.assertRaises(TypeError) as cm:
DictSet(42)
self.assertEqual(str(cm.exception),
"'int' object is not iterable")
def test1(self):
with self.assertRaises(TypeError) as cm:
DictSet(one=1, two=2)
self.assertEqual(str(cm.exception),
"'int' object is not iterable")
def test2(self):
with self.assertRaises(TypeError) as cm:
DictSet([('one',1),('two',2)])
self.assertEqual(str(cm.exception),
"'int' object is not iterable")
def test3(self):
with self.assertRaises(TypeError) as cm:
DictSet(['one',[1],'two',[2]])
self.assertEqual(str(cm.exception),
'could not unpack arg to key/value pairs')
def test4(self):
with self.assertRaises(TypeError) as cm:
DictSet([('one'),[1],'two',[2]])
self.assertEqual(str(cm.exception),
'could not unpack arg to key/value pairs')
def test5(self):
with self.assertRaises(TypeError) as cm:
DictSet([(set('one'),[1]),('two',[2])])
self.assertEqual(str(cm.exception),
"unhashable type: 'set'")
def test6(self):
with self.assertRaises(TypeError) as cm:
DictSet([(set('one'),[1]),('two',[2])],{'three':[3]})
self.assertEqual(str(cm.exception),
'DictSet expected at most 1 arguments, got 2')
# test initialization signatures
def test20(self):
"""DictSet()"""
self.assertEqual(DictSet(),{})
def test21(self):
"""DictSet(mapping)"""
self.assertEqual(
d2l(DictSet(s2d('a0b12333c45556'))),
s2l('a0b123 c45 6'))
def test22(self):
"""DictSet(iterable)"""
self.assertEqual(
d2l(DictSet([('a',''),('b','123'),('c','45556'),])),
s2l('a 0 b 123 c 45 6'))
def test23(self):
"""DictSet(**kwargs)"""
self.assertEqual(
d2l(DictSet(a='',b='123',c='45556')),
s2l('a 0 b 123 c 45 6'))
def test231(self):
self.assertEqual(d2l(DictSet(self=[1,2,3], other=[4,5,6])),
[('other', [4, 5, 6]), ('self', [1, 2, 3])])
def test24(self):
"""self can be a keyword)"""
self.assertEqual(
d2l(DictSet(self='45556')),
d2l({'self':'456'}))
def test25(self):
"""DictSet(iterable, **kwargs), with overlapping key/values"""
self.assertEqual(
d2l(DictSet(s2d('a1c5678'),a='',b='123',c='456')),
s2l('a1 b 123 c 45678'))
def test99(self):
"""Make sure that direct calls to update
do not clear previous contents"""
L=DictSet(a='1',b='2')
L.__init__(b='3',c='4')
self.assertEqual(d2l(L),s2l('a1b23c4'))
class TestDictSet_remove(unittest.TestCase):
def test0(self):
L = DictSet(s2d('a1 c5666788'))
R = s2l('a1 c56 7')
L.remove('c','8')
self.assertEqual(d2l(L),R)
def test1(self):
L = DictSet(s2d('a1 c5666788'))
R = s2l('a1 c56 7')
L.remove('c','8')
with self.assertRaises(KeyError) as cm:
L.remove('c','8')
self.assertEqual(str(cm.exception),"'8'")
def test2(self):
L = DictSet(s2d('a1 c5666788'))
R = s2l('a1 c56 7')
L.remove('c','8')
with self.assertRaises(KeyError) as cm:
L.remove('d','8')
self.assertEqual(str(cm.exception),"'d'")
def test4(self):
L = DictSet(s2d('a1 c5666788'))
R = s2l('a1 c56 7')
L.remove('c','8')
if sys.version_info[0]==2:
with self.assertRaises(KeyError) as cm:
L.remove([],'8')
self.assertEqual(str(cm.exception),'[]')
elif sys.version_info[0]==3:
with self.assertRaises(TypeError) as cm:
L.remove([],'8')
self.assertEqual(str(cm.exception),"unhashable type: 'list'")
def test5(self):
L = DictSet(s2d('a0'))
R = s2l('')
L.remove('a')
self.assertEqual(d2l(L),R)
def test6(self):
L = DictSet(s2d('a123 b456'))
R = s2l('b456')
L.remove('a')
self.assertEqual(d2l(L),R)
def test7(self):
L = DictSet(s2d('a123 b456'))
L.remove('a')
with self.assertRaises(KeyError) as cm:
L.remove('a')
self.assertEqual(str(cm.exception),"'a'")
class TestDictSet_clear(unittest.TestCase):
def test0(self):
L = DictSet(s2d('a1 c5666788'))
R = s2l('')
L.clear() # clear
self.assertEqual(d2l(L),R)
class TestDictSet_delitem(unittest.TestCase):
def test0(self):
L = DictSet(s2d('a123 b456'))
R = s2l('b456')
del L['a']
self.assertEqual(d2l(L),R)
def test1(self):
L = DictSet(s2d('a123 b456'))
del L['a']
with self.assertRaises(KeyError) as cm:
del L['a']
self.assertEqual(str(cm.exception),"'a'")
class TestDictSet_add(unittest.TestCase):
def test0(self):
L = DictSet(s2d('a1 c5666788'))
R = s2l('a1 c56 78 9')
L.add('c','9') # add to existing set
self.assertEqual(d2l(L),R)
def test1(self):
L = DictSet(s2d('a1 c5666788'))
R = s2l('a1 c56 78')
L.add('c','7') # does nothing to existing set
self.assertEqual(d2l(L),R)
def test3(self):
L = DictSet(s2d('a1 c5666788'))
R = s2l('a1 c56 78 d7')
L.add('d','7') # create new set
self.assertEqual(d2l(L),R)
def test4(self):
L = DictSet(s2d('a1 c5666788'))
with self.assertRaises(TypeError) as cm:
L.add('d',[])
self.assertEqual(str(cm.exception),
"unhashable type: 'list'")
def test5(self):
L = DictSet(s2d('b456'))
R = s2l('b456')
L.add('b')
self.assertEqual(d2l(L),R) # b should stay unaltered
def test6(self):
L = DictSet(s2d('b456'))
R = s2l('b456 c0')
L.add('c')
self.assertEqual(d2l(L),R)
def test7(self):
L = DictSet(s2d('a123 b456 c0'))
R = s2l('a123 b456 c0')
L.add('c') # shouldn't do anything
self.assertEqual(d2l(L),R)
class TestDictSet_copy(unittest.TestCase):
def test0(self):
L = DictSet(s2d('a1 c5678'))
R1 = s2l('a1 c5678')
M=L.copy()
M.add('d','9')
self.assertEqual(d2l(L),R1)
def test01(self):
L = DictSet(s2d('a1 c5666788'))
R = s2l('a1')
L.remove('c')
self.assertEqual(d2l(L),R)
def test1(self):
L = DictSet(s2d('a1 c5678'))
M=L.copy()
M.add('d','9')
R2 = s2l('a1 c5678 d9')
self.assertEqual(d2l(M),R2)
class TestDictSet_fromkeys(unittest.TestCase):
def test0(self):
L = DictSet(s2d('a1 c5678'))
R1 = s2l('a1 c5678')
M=L.fromkeys(['a','b'])
self.assertEqual(d2l(L),R1)
self.assertEqual(d2l(M),s2l('a0b0'))
def test1(self):
L = DictSet(s2d('a1 c5678'))
R1 = s2l('a1 c5678')
M=L.fromkeys(['a','b'],'567')
self.assertEqual(d2l(L),R1)
self.assertEqual(d2l(M),s2l('a567b567'))
def test2(self):
L = DictSet(s2d('a1 c5678'))
R1 = s2l('a1 c5678')
with self.assertRaises(TypeError) as cm:
M=L.fromkeys(['a','b'],5)
self.assertEqual(str(cm.exception),
"'int' object is not iterable")
self.assertEqual(d2l(L),R1)
class TestDictSet_discard(unittest.TestCase):
def test0(self):
L = DictSet(s2d('a1 c5666788'))
R = s2l('a1 c56 7')
L.discard('c','8')
self.assertEqual(d2l(L),R)
def test01(self):
L = DictSet(s2d('a1 c5666788'))
R = s2l('a1')
L.discard('c')
self.assertEqual(d2l(L),R)
def test1(self):
L = DictSet(s2d('a1 c5666788'))
R = s2l('a1 c56 7')
L.discard('c','8')
L.discard('c','8') # doesn't raise KeyError
def test2(self):
L = DictSet(s2d('a1c5666788'))
R = s2l('a1c56 7')
L.discard('d','8') # doesn't raise KeyError
def test3(self):
L = DictSet(s2d('a1 c5666788'))
R = s2l('a1 c56 7')
L.discard([],'8') # dosen't raise TypeError
def test4(self):
L = DictSet(s2d('a1 c5666788'))
R = s2l('a1 c56 7')
L.discard('c','8')
L.discard([],'8') # shouldn't complain
def test5(self):
L = DictSet(s2d('a0'))
R = s2l('')
L.discard('a')
self.assertEqual(d2l(L),R)
def test6(self):
L = DictSet(s2d('a123 b456'))
R = s2l('b456')
L.discard('a')
self.assertEqual(d2l(L),R)
def test7(self):
L = DictSet(s2d('a123 b456'))
L.discard('a')
L.discard('a') # Shouldn't complain
class TestDictSet__setitem__(unittest.TestCase):
def test0(self):
L = DictSet(s2d('a1 c5666788'))
R = s2l('a1 c56 7')
with self.assertRaises(TypeError) as cm:
L.__setitem__([],'8')
self.assertEqual(str(cm.exception),
"unhashable type: 'list'")
def test1(self):
L = DictSet(s2d('a1 c5666788'))
R = s2l('a1 c56 7')
with self.assertRaises(TypeError) as cm:
L.__setitem__('a',42)
self.assertEqual(str(cm.exception),
"'int' object is not iterable")
def test2(self):
L = DictSet(s2d('a1 c5666788'))
R = s2l('a1 c42')
L.__setitem__('c','42') # overwrite existing item
self.assertEqual(d2l(L),R)
def test3(self):
L = DictSet(s2d('a1 c5666788'))
R = s2l('a1 c56 78 z42')
L.__setitem__('z','42') # create new item
self.assertEqual(d2l(L),R)
class TestDictSet__setitem__(unittest.TestCase):
def test0(self):
L = DictSet(s2d('a1 c5666788'))
R = s2l('a1 c56 7')
class TestDictSet_get(unittest.TestCase):
def test0(self):
L = DictSet(s2d('a1 c5666788'))
self.assertEqual(L.get('c'),set('5678'))
def test1(self):
L = DictSet(s2d('a1 c5666788'))
self.assertEqual(L.get('d'),None)
def test2(self):
L = DictSet(s2d('a1 c5666788'))
R= s2l('a1 c5678')
self.assertEqual(L.get('d',[]),set())
self.assertEqual(d2l(L),R)
def test3(self):
L = DictSet(s2d('a1 c5666788'))
R= s2l('a1 c5678')
self.assertEqual(L.get('d','234'),set('234'))
self.assertEqual(d2l(L),R)
class TestDictSet_setdefault(unittest.TestCase):
def test0(self):
L = DictSet(s2d('a1 c5666788'))
self.assertEqual(L.setdefault('c'),set('5678'))
def test1(self):
L = DictSet(s2d('a1 c5666788'))
self.assertEqual(L.setdefault('d'),None)
def test2(self):
L = DictSet(s2d('a1 c5666788'))
R= s2l('a1 c5678 d0')
self.assertEqual(L.setdefault('d',[]),set())
self.assertEqual(d2l(L),R)
def test3(self):
L = DictSet(s2d('a1 c5666788'))
R= s2l('a1 c5678 d234')
self.assertEqual(L.setdefault('d','234'),set('234'))
self.assertEqual(d2l(L),R)
## update functions
class TestDictSet_update(unittest.TestCase):
def test0(self):
L = DictSet(s2d('a1c5666788'))
M = s2d('')
R = s2l('a1c56 78')
L.update(M)
self.assertTrue(isinstance(L,DictSet))
self.assertTrue(isinstance(M,dict))
self.assertEqual(d2l(L),R) # L is updated
self.assertEqual(d2l(M),s2l(''))
def test1(self):
L = DictSet(s2d(''))
M = s2d('a1c5666788')
R = s2l('a1c56 78')
L.update(M)
self.assertTrue(isinstance(L,DictSet))
self.assertTrue(isinstance(M,dict))
self.assertEqual(d2l(L),R) # L is updated
self.assertEqual(d2l(M),s2l('a1c5666788'))
def test2(self):
L = DictSet(s2d('a1 c5666788'))
M = s2d('a123 b324')
R = s2l('a123 b324 c56 78')
L.update(M)
self.assertTrue(isinstance(L,DictSet))
self.assertTrue(isinstance(M,dict))
self.assertEqual(d2l(L),R) # L is updated
self.assertEqual(d2l(M),s2l('a123 b324'))
def test3(self):
L = DictSet(s2d('a123 b324'))
M = s2d('a1 c5666788')
R = s2l('a123 b324 c56 78')
L.update(M)
self.assertTrue(isinstance(L,DictSet))
self.assertTrue(isinstance(M,dict))
self.assertEqual(d2l(L),R) # L is updated
self.assertEqual(d2l(M),s2l('a1 c5666788'))
class TestDictSet__ior__(unittest.TestCase):
def test0(self):
L = DictSet(s2d('a1c5666788'))
M = s2d('')
R = s2l('a1c56 78')
L|=M
self.assertTrue(isinstance(L,DictSet))
self.assertTrue(isinstance(M,dict))
self.assertEqual(d2l(L),R) # L is updated
self.assertEqual(d2l(M),s2l(''))
def test1(self):
L = DictSet(s2d(''))
M = s2d('a1c5666788')
R = s2l('a1c56 78')
L|=M
self.assertTrue(isinstance(L,DictSet))
self.assertTrue(isinstance(M,dict))
self.assertEqual(d2l(L),R) # L is updated
self.assertEqual(d2l(M),s2l('a1c5666788'))
def test2(self):
L = DictSet(s2d('a1 c5666788'))
M = s2d('a123 b324')
R = s2l('a123 b324 c56 78')
L|=M
self.assertTrue(isinstance(L,DictSet))
self.assertTrue(isinstance(M,dict))
self.assertEqual(d2l(L),R) # L is updated
self.assertEqual(d2l(M),s2l('a123 b324'))
def test3(self):
L = DictSet(s2d('a123 b324'))
M = s2d('a1 c5666788')
R = s2l('a123 b324 c56 78')
L|=M
self.assertTrue(isinstance(L,DictSet))
self.assertTrue(isinstance(M,dict))
self.assertEqual(d2l(L),R) # L is updated
self.assertEqual(d2l(M),s2l('a1 c5666788'))
class TestDictSet_difference_update(unittest.TestCase):
"""update is a difference update"""
def test0(self):
L = DictSet(s2d('a1 c5666788'))
M = s2d('')
R = s2l('a1 c56 78')
L.difference_update(M)
self.assertTrue(isinstance(L,DictSet))
self.assertTrue(isinstance(M,dict))
self.assertEqual(d2l(L),R) # L is updated
self.assertEqual(d2l(M),s2l(''))
def test1(self):
L = DictSet(s2d(''))
M = s2d('a1 c5666788')
R = s2l('')
L.difference_update(M)
self.assertTrue(isinstance(L,DictSet))
self.assertTrue(isinstance(M,dict))
self.assertEqual(d2l(L),R) # L is updated
self.assertEqual(d2l(M),s2l('a1 c5666788'))
def test2(self):
L = DictSet(s2d('a1 c56788'))
M = s2d('a123 b324 c5 78')
R = s2l(' c 6')
L.difference_update(M)
self.assertTrue(isinstance(L,DictSet))
self.assertTrue(isinstance(M,dict))
self.assertEqual(d2l(L),R) # L is updated
self.assertEqual(d2l(M),s2l('a123 b324 c5 78'))
def test3(self):
L = DictSet(s2d('a123 b324 c5 78'))
M = s2d('a1 c56788')
R = s2l('a 23 b324')
L.difference_update(M)
self.assertTrue(isinstance(L,DictSet))
self.assertTrue(isinstance(M,dict))
self.assertEqual(d2l(L),R) # L is updated
self.assertEqual(d2l(M),s2l('a1 c56788'))
class TestDictSet__isub__(unittest.TestCase):
"""update is a difference update"""
def test0(self):
L = DictSet(s2d('a1 c5666788'))
M = s2d('')
R = s2l('a1 c56 78')
L-=M
self.assertTrue(isinstance(L,DictSet))
self.assertTrue(isinstance(M,dict))
self.assertEqual(d2l(L),R) # L is updated
self.assertEqual(d2l(M),s2l(''))
def test1(self):
L = DictSet(s2d(''))
M = s2d('a1 c5666788')
R = s2l('')
L-=M
self.assertTrue(isinstance(L,DictSet))
self.assertTrue(isinstance(M,dict))
self.assertEqual(d2l(L),R) # L is updated
self.assertEqual(d2l(M),s2l('a1 c5666788'))
def test2(self):
L = DictSet(s2d('a1 c56788'))
M = s2d('a123 b324 c5 78')
R = s2l(' c 6')
L-=M
self.assertTrue(isinstance(L,DictSet))
self.assertTrue(isinstance(M,dict))
self.assertEqual(d2l(L),R) # L is updated
self.assertEqual(d2l(M),s2l('a123 b324 c5 78'))
def test3(self):
L = DictSet(s2d('a123 b324 c5 78'))
M = s2d('a1 c56788')
R = s2l('a 23 b324')
L-=M
self.assertTrue(isinstance(L,DictSet))
self.assertTrue(isinstance(M,dict))
self.assertEqual(d2l(L),R) # L is updated
self.assertEqual(d2l(M),s2l('a1 c56788'))
class TestDictSet_symmetric_difference_update(unittest.TestCase):
"""tests symmetric_difference_update"""
def test0(self):
L = DictSet(s2d('a1 c5666788'))
M = s2d('')
R = s2l('a1 c56 78')
L.symmetric_difference_update(M)
self.assertTrue(isinstance(L,DictSet))
self.assertTrue(isinstance(M,dict))
self.assertEqual(d2l(L),R) # L is updated
self.assertEqual(d2l(M),s2l(''))
def test1(self):
L = DictSet(s2d(''))
M = s2d('a1 c5666788')
R = s2l('a1 c56 78')
L.symmetric_difference_update(M)
self.assertTrue(isinstance(L,DictSet))
self.assertTrue(isinstance(M,dict))
self.assertEqual(d2l(L),R) # L is updated
self.assertEqual(d2l(M),s2l('a1 c5666788'))
def test2(self):
L = DictSet(s2d('a1 c56788'))
M = s2d('a123 b324 c5 78')
R = s2l('a 23 b324 c 6')
L.symmetric_difference_update(M)
self.assertTrue(isinstance(L,DictSet))
self.assertTrue(isinstance(M,dict))
self.assertEqual(d2l(L),R) # L is updated
self.assertEqual(d2l(M),s2l('a123 b324 c5 78'))
def test3(self):
L = DictSet(s2d('a123 b324 c5 78'))
M = s2d('a1 c56788')
R = s2l('a 23 b324 c 6')
L.symmetric_difference_update(M)
self.assertTrue(isinstance(L,DictSet))
self.assertTrue(isinstance(M,dict))
self.assertEqual(d2l(L),R) # L is updated
self.assertEqual(d2l(M),s2l('a1 c56788'))
class TestDictSet__ixor__(unittest.TestCase):
"""test symmetric_difference_update overloading"""
def test0(self):
L = DictSet(s2d('a1 c5666788'))
M = s2d('')
R = s2l('a1 c56 78')
L^=M
self.assertTrue(isinstance(L,DictSet))
self.assertTrue(isinstance(M,dict))
self.assertEqual(d2l(L),R) # L is updated
self.assertEqual(d2l(M),s2l(''))
def test1(self):
L = DictSet(s2d(''))
M = s2d('a1 c5666788')
R = s2l('a1 c56 78')
L^=M
self.assertTrue(isinstance(L,DictSet))
self.assertTrue(isinstance(M,dict))
self.assertEqual(d2l(L),R) # L is updated
self.assertEqual(d2l(M),s2l('a1 c5666788'))
def test2(self):
L = DictSet(s2d('a1 c56788'))
M = s2d('a123 b324 c5 78')
R = s2l('a 23 b324 c 6')
L^=M
self.assertTrue(isinstance(L,DictSet))
self.assertTrue(isinstance(M,dict))
self.assertEqual(d2l(L),R) # L is updated
self.assertEqual(d2l(M),s2l('a123 b324 c5 78'))
def test3(self):
L = DictSet(s2d('a123 b324 c5 78'))
M = s2d('a1 c56788')
R = s2l('a 23 b324 c 6')
L^=M
self.assertTrue(isinstance(L,DictSet))
self.assertTrue(isinstance(M,dict))
self.assertEqual(d2l(L),R) # L is updated
self.assertEqual(d2l(M),s2l('a1 c56788'))
class TestDictSet_intersection_update(unittest.TestCase):
"""tests intersection_update"""
def test0(self):
L = DictSet(s2d('a1 c5666788'))
M = s2d('')
R = s2l('')
L.intersection_update(M)
self.assertTrue(isinstance(L,DictSet))
self.assertTrue(isinstance(M,dict))
self.assertEqual(d2l(L),R) # L is updated
self.assertEqual(d2l(M),s2l(''))
def test1(self):
L = DictSet(s2d(''))
M = s2d('a1 c5666788')
R = s2l('')
L.intersection_update(M)
self.assertTrue(isinstance(L,DictSet))
self.assertTrue(isinstance(M,dict))
self.assertEqual(d2l(L),R) # L is updated
self.assertEqual(d2l(M),s2l('a1 c5666788'))
def test2(self):
L = DictSet(s2d('a1 c567889'))
M = s2d('a123 b324 c5 78')
R = s2l('a1 c5 78 ')
L.intersection_update(M)
self.assertTrue(isinstance(L,DictSet))
self.assertTrue(isinstance(M,dict))
self.assertEqual(d2l(L),R) # L is updated
self.assertEqual(d2l(M),s2l('a123 b324 c5 78'))
def test3(self):
L = DictSet(s2d('a123 b324 c5 78'))
M = s2d('a1 c567889')
R = s2l('a1 c5 78 ')
L.intersection_update(M)
self.assertTrue(isinstance(L,DictSet))
self.assertTrue(isinstance(M,dict))
self.assertEqual(d2l(L),R) # L is updated
self.assertEqual(d2l(M),s2l('a1 c567889'))
class TestDictSet__iand__(unittest.TestCase):
"""test _update overloading"""
def test0(self):
L = DictSet(s2d('a1 c5666788'))
M = s2d('')
R = s2l('')
L&=M
self.assertTrue(isinstance(L,DictSet))
self.assertTrue(isinstance(M,dict))
self.assertEqual(d2l(L),R) # L is updated
self.assertEqual(d2l(M),s2l(''))
def test1(self):
L = DictSet(s2d(''))
M = s2d('a1 c5666788')
R = s2l('')
L&=M
self.assertTrue(isinstance(L,DictSet))
self.assertTrue(isinstance(M,dict))
self.assertEqual(d2l(L),R) # L is updated
self.assertEqual(d2l(M),s2l('a1 c5666788'))
def test2(self):
L = DictSet(s2d('a1 c56788'))
M = s2d('a123 b324 c5 78')
R = s2l('a1 c5 78')
L&=M
self.assertTrue(isinstance(L,DictSet))
self.assertTrue(isinstance(M,dict))
self.assertEqual(d2l(L),R) # L is updated
self.assertEqual(d2l(M),s2l('a123 b324 c5 78'))
def test3(self):
L = DictSet(s2d('a123 b324 c5 78'))
M = s2d('a1 c56788')
R = s2l('a1 c5 78')
L&=M
self.assertTrue(isinstance(L,DictSet))
self.assertTrue(isinstance(M,dict))
self.assertEqual(d2l(L),R) # L is updated
self.assertEqual(d2l(M),s2l('a1 c56788'))
# set operations
class TestDictSet_union(unittest.TestCase):
"""update is a union update"""
def test0(self):
L = DictSet(s2d('a1c5666788'))
M = s2d('')
R = s2l('a1c56 78')
self.assertTrue(isinstance(L.union(M),DictSet))
self.assertEqual(d2l(L.union(M)),R)
self.assertEqual(d2l(L),s2l('a1c5678'))
self.assertEqual(d2l(M),s2l(''))
def test1(self):
L = DictSet(s2d(''))
M = s2d('a1c5666788')
R = s2l('a1c56 78')
self.assertTrue(isinstance(L.union(M),DictSet))
self.assertEqual(d2l(L.union(M)),R)
self.assertEqual(d2l(L),s2l(''))
self.assertEqual(d2l(M),s2l('a1c5666788'))
def test2(self):
L = DictSet(s2d('a1 c5666788'))
M = s2d('a123 b324')
R = s2l('a123 b324 c56 78')
self.assertTrue(isinstance(L.union(M),DictSet))
self.assertEqual(d2l(L.union(M)),R)
self.assertEqual(d2l(L),s2l('a1 c5678'))
self.assertEqual(d2l(M),s2l('a123 b324'))
def test3(self):
L = DictSet(s2d('a123 b324'))
M = s2d('a1 c5666788')
R = s2l('a123 b324 c56 78')
self.assertTrue(isinstance(L.union(M),DictSet))
self.assertEqual(d2l(L.union(M)),R)
self.assertEqual(d2l(L),s2l('a123 b234'))
self.assertEqual(d2l(M),s2l('a1 c5666788'))
class TestDictSet__or__(unittest.TestCase):
"""update is a union update"""
def test0(self):
L = DictSet(s2d('a1c5666788'))
M = s2d('')
R = s2l('a1c56 78')
self.assertTrue(isinstance(L|M,DictSet))
self.assertEqual(d2l(L|M),R)
self.assertEqual(d2l(L),s2l('a1c5678'))
self.assertEqual(d2l(M),s2l(''))
def test1(self):
L = DictSet(s2d(''))
M = s2d('a1c5666788')
R = s2l('a1c56 78')
self.assertTrue(isinstance(L|M,DictSet))
self.assertEqual(d2l(L|M),R)
self.assertEqual(d2l(L),s2l(''))
self.assertEqual(d2l(M),s2l('a1c5666788'))
def test2(self):
L = DictSet(s2d('a1 c5666788'))
M = s2d('a123 b324')
R = s2l('a123 b324 c56 78')
self.assertTrue(isinstance(L|M,DictSet))
self.assertEqual(d2l(L|M),R)
self.assertEqual(d2l(L),s2l('a1c5678'))
self.assertEqual(d2l(M),s2l('a123 b324'))
def test3(self):
L = DictSet(s2d('a123 b324'))
M = s2d('a1 c5666788')
R = s2l('a123 b324 c56 78')
self.assertTrue(isinstance(L|M,DictSet))
self.assertEqual(d2l(L|M),R)
self.assertEqual(d2l(L),s2l('a123 b234'))
self.assertEqual(d2l(M),s2l('a1 c5666788'))
class TestDictSet_difference(unittest.TestCase):
"""update is a union update"""
def test0(self):
L = DictSet(s2d('a1 c5666788'))
M = s2d('')
R = s2l('a1 c56 78')
self.assertTrue(isinstance(L.difference(M),DictSet))
self.assertEqual(d2l(L.difference(M)),R)
self.assertEqual(d2l(L),s2l('a1 c5678'))
self.assertEqual(d2l(M),s2l(''))
def test1(self):
L = DictSet(s2d(''))
M = s2d('a1 c5666788')
R = s2l('')
self.assertTrue(isinstance(L.difference(M),DictSet))
self.assertEqual(d2l(L.difference(M)),R)
self.assertEqual(d2l(L),s2l(''))
self.assertEqual(d2l(M),s2l('a1 c5666788'))
def test2(self):
L = DictSet(s2d('a1 c56788'))
M = s2d('a123 b324 c5 78')
R = s2l(' c 6')
self.assertTrue(isinstance(L.difference(M),DictSet))
self.assertEqual(d2l(L.difference(M)),R)
self.assertEqual(d2l(L),s2l('a1 c5678'))
self.assertEqual(d2l(M),s2l('a123 b324 c5 78'))
def test3(self):
L = DictSet(s2d('a123 b324 c5 78'))
M = s2d('a1 c56788')
R = s2l('a 23 b324')
self.assertTrue(isinstance(L.difference(M),DictSet))
self.assertEqual(d2l(L.difference(M)),R)
self.assertEqual(d2l(L),s2l('a123 b234 c5 78'))
self.assertEqual(d2l(M),s2l('a1 c56788'))
class TestDictSet__sub__(unittest.TestCase):
def test0(self):
L = DictSet(s2d('a1 c5666788 e0'))
M = s2d('')
R = s2l('a1 c56 78')
self.assertTrue(isinstance(L-M,DictSet))
self.assertEqual(d2l(L-M),R)
self.assertEqual(d2l(L),s2l('a1 c5678 e0'))
self.assertEqual(d2l(M),s2l(''))
def test1(self):
L = DictSet(s2d(''))
M = s2d('a1 c5666788 e0')
R = s2l('')
self.assertTrue(isinstance(L-M,DictSet))
self.assertEqual(d2l(L-M),R)
self.assertEqual(d2l(L),s2l(''))
self.assertEqual(d2l(M),s2l('a1 c5666788 e0'))
def test2(self):
L = DictSet(s2d('a1 c56788 e0'))
M = s2d('a123 b324 c5 78 d0')
R = s2l(' c 6')
self.assertTrue(isinstance(L-M,DictSet))
self.assertEqual(d2l(L-M),R)
self.assertEqual(d2l(L),s2l('a1 c5678 e0'))
self.assertEqual(d2l(M),s2l('a123 b324 c5 78 d0'))
def test3(self):
L = DictSet(s2d('a123 b324 c5 78 d0'))
M = s2d('a1 c56788 e0')
R = s2l('a 23 b324')
self.assertTrue(isinstance(L-M,DictSet))
self.assertEqual(d2l(L-M),R)
self.assertEqual(d2l(L),s2l('a123 b234 c5 78 d0'))
self.assertEqual(d2l(M),s2l('a1 c56788 e0'))
class TestDictSet_symmetric_difference(unittest.TestCase):
def test0(self):
L = DictSet(s2d('a1 c5666788 e0'))
M = s2d('')
R = s2l('a1 c56 78')
self.assertTrue(isinstance(L.symmetric_difference(M),DictSet))
self.assertEqual(d2l(L.symmetric_difference(M)),R)
self.assertEqual(d2l(L),s2l('a1 c5678 e0'))
self.assertEqual(d2l(M),s2l(''))
def test1(self):
L = DictSet(s2d(''))
M = s2d('a1 c5666788 e0')
R = s2l('a1 c56 78')
self.assertTrue(isinstance(L.symmetric_difference(M),DictSet))
self.assertEqual(d2l(L.symmetric_difference(M)),R)
self.assertEqual(d2l(L),s2l(''))
self.assertEqual(d2l(M),s2l('a1 c5666788 e0'))
def test2(self):
L = DictSet(s2d('a1 c56788 e0'))
M = s2d('a123 b324 c5 78 d0')
R = s2l('a 23 b324 c 6')
self.assertTrue(isinstance(L.symmetric_difference(M),DictSet))
self.assertEqual(d2l(L.symmetric_difference(M)),R)
self.assertEqual(d2l(L),s2l('a1 c5678 e0'))
self.assertEqual(d2l(M),s2l('a123 b324 c5 78 d0'))
def test3(self):
L = DictSet(s2d('a123 b324 c5 78 d0'))
M = s2d('a1 c56788 e0')
R = s2l('a 23 b324 c 6')
self.assertTrue(isinstance(L.symmetric_difference(M),DictSet))
self.assertEqual(d2l(L.symmetric_difference(M)),R)
self.assertEqual(d2l(L),s2l('a123 b234 c5 78 d0'))
self.assertEqual(d2l(M),s2l('a1 c56788 e0'))
class TestDictSet__xor__(unittest.TestCase):
def test0(self):
L = DictSet(s2d('a1 c5666788 e0'))
M = s2d('')
R = s2l('a1 c56 78')
self.assertTrue(isinstance(L^M,DictSet))
self.assertEqual(d2l(L^M),R)
self.assertEqual(d2l(L),s2l('a1 c5678 e0'))
self.assertEqual(d2l(M),s2l(''))
def test1(self):
L = DictSet(s2d(''))
M = s2d('a1 c5666788 e0')
R = s2l('a1 c56 78')
self.assertTrue(isinstance(L^M,DictSet))
self.assertEqual(d2l(L^M),R)
self.assertEqual(d2l(L),s2l(''))
self.assertEqual(d2l(M),s2l('a1 c5666788 e0'))
def test2(self):
L = DictSet(s2d('a1 c56788 e0'))
M = s2d('a123 b324 c5 78 d0')
R = s2l('a 23 b324 c 6')
self.assertTrue(isinstance(L^M,DictSet))
self.assertEqual(d2l(L^M),R)
self.assertEqual(d2l(L),s2l('a1 c5678 e0'))
self.assertEqual(d2l(M),s2l('a123 b324 c5 78 d0'))
def test3(self):
L = DictSet(s2d('a123 b324 c5 78 d0'))
M = s2d('a1 c56788 e0')
R = s2l('a 23 b324 c 6')
self.assertTrue(isinstance(L^M,DictSet))
self.assertEqual(d2l(L^M),R)
self.assertEqual(d2l(L),s2l('a123 b234 c5 78 d0'))
self.assertEqual(d2l(M),s2l('a1 c56788 e0'))
class TestDictSet_intersection(unittest.TestCase):
def test0(self):
L = DictSet(s2d('a1 c5666788'))
M = s2d('')
R = s2l('')
self.assertTrue(isinstance(L.intersection(M),DictSet))
self.assertEqual(d2l(L.intersection(M)),R)
self.assertEqual(d2l(L),s2l('a1 c5678'))
self.assertEqual(d2l(M),s2l(''))
def test1(self):
L = DictSet(s2d(''))
M = s2d('a1 c5666788')
R = s2l('')
self.assertTrue(isinstance(L.intersection(M),DictSet))
self.assertEqual(d2l(L.intersection(M)),R)
self.assertEqual(d2l(L),s2l(''))
self.assertEqual(d2l(M),s2l('a1 c5666788'))
def test2(self):
L = DictSet(s2d('a1 c567889'))
M = s2d('a123 b324 c5 78')
R = s2l('a1 c5 78 ')
self.assertTrue(isinstance(L.intersection(M),DictSet))
self.assertEqual(d2l(L.intersection(M)),R)
self.assertEqual(d2l(L),s2l('a1 c56789'))
self.assertEqual(d2l(M),s2l('a123 b324 c5 78'))
def test3(self):
L = DictSet(s2d('a123 b324 c5 78'))
M = s2d('a1 c567889')
R = s2l('a1 c5 78 ')
self.assertTrue(isinstance(L.intersection(M),DictSet))
self.assertEqual(d2l(L.intersection(M)),R)
self.assertEqual(d2l(L),s2l('a123 b234 c5 78'))
self.assertEqual(d2l(M),s2l('a1 c567889'))
class TestDictSet__and__(unittest.TestCase):
def test0(self):
L = DictSet(s2d('a1 c5666788'))
M = s2d('e0')
R = s2l('')
self.assertTrue(isinstance(L&M,DictSet))
self.assertEqual(d2l(L&M),R)
self.assertEqual(d2l(L),s2l('a1 c5678'))
self.assertEqual(d2l(M),s2l('e0'))
def test1(self):
L = DictSet(s2d(''))
M = s2d('a1 c5666788 d0')
R = s2l('')
self.assertTrue(isinstance(L&M,DictSet))
self.assertEqual(d2l(L&M),R)
self.assertEqual(d2l(L),s2l(''))
self.assertEqual(d2l(M),s2l('a1 c5666788 d0'))
def test2(self):
L = DictSet(s2d('a1 c567889 d0'))
M = s2d('a123 b324 c5 78')
R = s2l('a1 c5 78')
self.assertTrue(isinstance(L&M,DictSet))
self.assertEqual(d2l(L&M),R)
self.assertEqual(d2l(L),s2l('a1 c56789 d0'))
self.assertEqual(d2l(M),s2l('a123 b324 c5 78'))
def test3(self):
L = DictSet(s2d('a123 b324 c5 78 e0'))
M = s2d('a1 c567889 d0')
R = s2l('a1 c5 78 ')
self.assertTrue(isinstance(L&M,DictSet))
self.assertEqual(d2l(L&M),R)
self.assertEqual(d2l(L),s2l('a123 b234 c5 78 e0'))
self.assertEqual(d2l(M),s2l('a1 c567889 d0'))
# truth comparisons
class TestDictSet__eq__(unittest.TestCase):
def test0(self):
L = DictSet(s2d('a1 c5666788'))
M = []
self.assertFalse(L.__eq__(M))
def test1(self):
L = DictSet(s2d(''))
M = 42
self.assertFalse(L.__eq__(M))
def test3(self):
L = DictSet(s2d('a1b12345'))
M = [('a','1'),('b','52341231425'),('c','')]
self.assertTrue(L.__eq__(M))
def test4(self):
L = DictSet(s2d('a1b12345d0'))
M = [('a','1'),('b','52341231425')]
self.assertTrue(L.__eq__(M))
def test5(self):
L = DictSet()
M = {}
self.assertTrue(L.__eq__(M))
class TestDictSet__ne__(unittest.TestCase):
def test0(self):
L = DictSet(s2d('a1 c5666788'))
M = []
self.assertTrue(L.__ne__(M))
def test1(self):
L = DictSet(s2d(''))
M = 42
self.assertTrue(L.__ne__(M))
def test3(self):
L = DictSet(s2d('a1b12345'))
M = [('a','1'),('b','52341231425'),('c','')]
self.assertFalse(L.__ne__(M))
def test4(self):
L = DictSet(s2d('a1b12345d0'))
M = [('a','1'),('b','52341231425')]
self.assertFalse(L.__ne__(M))
def test5(self):
L = DictSet()
M = {}
self.assertFalse(L.__ne__(M))
class TestDictSet_issubset(unittest.TestCase):
def test0(self):
L = DictSet()
M = s2d('a1 c5666788')
self.assertTrue(L.issubset(M))
def test1(self):
with self.assertRaises(TypeError) as cm:
DictSet().issubset(4)
self.assertEqual(str(cm.exception),
"'int' object is not iterable")
def test3(self):
L = DictSet(s2d('a0b12345'))
M = [('a','1'),('b','52341231425'),('c','')]
self.assertTrue(L.issubset(M))
def test31(self):
L = DictSet([('a','1'),('b','52341231425'),('c','')])
M = DictSet(s2d('a0b12345'))
self.assertFalse(L.issubset(M))
def test4(self):
L = DictSet(s2d('a1b1234d0'))
M = [('a','1'),('b','52341231425')]
self.assertTrue(L.issubset(M))
def test5(self):
L = DictSet()
M = {}
self.assertTrue(L.issubset(M))
def test6(self):
L = DictSet(s2d('a0'))
M = {}
self.assertTrue(L.issubset(M))
class TestDictSet__le__(unittest.TestCase):
def test0(self):
L = DictSet()
M = s2d('a1 c5666788')
self.assertTrue(L<=M)
def test1(self):
with self.assertRaises(TypeError) as cm:
DictSet().issubset(4)
self.assertEqual(str(cm.exception),
"'int' object is not iterable")
def test3(self):
L = DictSet(s2d('a0b12345'))
M = [('a','1'),('b','52341231425'),('c','')]
self.assertTrue(L<=M)
def test31(self):
L = DictSet([('a','1'),('b','52341231425'),('c','')])
M = DictSet(s2d('a0b12345'))
self.assertFalse(L<=M)
def test4(self):
L = DictSet(s2d('a1b1234d0'))
M = [('a','1'),('b','52341231425')]
self.assertTrue(L<=M)
def test5(self):
L = DictSet()
M = {}
self.assertTrue(L<=M)
def test6(self):
L = DictSet(s2d('a0'))
M = {}
self.assertTrue(L<=M)
class TestDictSet_issuperset(unittest.TestCase):
def test0(self):
L = DictSet()
M = s2d('a1 c5666788')
self.assertFalse(L.issuperset(M))
def test1(self):
with self.assertRaises(TypeError) as cm:
DictSet().issuperset(4)
self.assertEqual(str(cm.exception),
"'int' object is not iterable")
def test3(self):
L = DictSet(s2d('a0b12345'))
M = [('a','1'),('b','52341231425'),('c','')]
self.assertFalse(L.issuperset(M))
def test31(self):
L = DictSet([('a','1'),('b','52341231425'),('c','')])
M = DictSet(s2d('a0b12345'))
self.assertTrue(L.issuperset(M))
def test4(self):
L = DictSet(s2d('a1b1234d0'))
M = [('a','1'),('b','52341231425')]
self.assertFalse(L.issuperset(M))
def test5(self):
L = DictSet()
M = {}
self.assertTrue(L.issuperset(M))
def test6(self):
L = DictSet(s2d('a0'))
M = {}
self.assertTrue(L.issuperset(M))
class TestDictSet__ge__(unittest.TestCase):
def test0(self):
L = DictSet()
M = s2d('a1 c5666788')
self.assertFalse(L>=M)
def test1(self):
with self.assertRaises(TypeError) as cm:
DictSet().issuperset(4)
self.assertEqual(str(cm.exception),
"'int' object is not iterable")
def test3(self):
L = DictSet(s2d('a0b12345'))
M = [('a','1'),('b','52341231425'),('c','')]
self.assertFalse(L>=M)
def test31(self):
L = DictSet([('a','1'),('b','52341231425'),('c','')])
M = DictSet(s2d('a0b12345'))
self.assertTrue(L>=M)
def test4(self):
L = DictSet(s2d('a1b1234d0'))
M = [('a','1'),('b','52341231425')]
self.assertFalse(L>=M)
def test5(self):
L = DictSet()
M = {}
self.assertTrue(L>=M)
def test6(self):
L = DictSet(s2d('a0'))
M = {}
self.assertTrue(L>=M)
class TestDictSet__contains__(unittest.TestCase):
def test0(self):
L = DictSet(s2d('a1 c5666788'))
self.assertFalse(set() in L)
def test1(self):
L = DictSet(s2d('a1 c5666788'))
self.assertFalse(42 in L)
def test2(self):
L = DictSet(s2d('a1 c5666788 d0'))
self.assertTrue('a' in L)
def test3(self):
L = DictSet(s2d('a1 c5666788 d0'))
self.assertFalse('d' in L) # d is a key, but has an empty set
def test4(self):
L = DictSet(s2d('a1 c5666788 d0'))
self.assertFalse('e' in L) # really not a key
class TestDictSet__repr__(unittest.TestCase):
def test0(self):
L = DictSet(s2d('a1 c5666788'))
if sys.version_info[0]==2:
R="DictSet([('a', set(['1'])), ('c', set(['5', '7', '6', '8']))])"
elif sys.version_info[0]==3:
R="DictSet([('a', {'1'}), ('c', {'5', '7', '6', '8'})])"
self.assertEqual(L.__repr__(),R)
self.assertEqual(d2l(eval(R)),d2l(L))
def test1(self):
L = DictSet()
if sys.version_info[0]==2:
R="DictSet()"
elif sys.version_info[0]==3:
R="DictSet()"
self.assertEqual(L.__repr__(),R)
self.assertEqual(d2l(eval(R)),d2l(L))
class TestDictSet__iter__(unittest.TestCase):
def test0(self):
L = DictSet(s2d('a1 b576 c5666788 d43'))
g=L.__iter__()
self.assertEqual(set([v for v in g])^set('abcd'),set())
self.assertEqual(set(list(L.keys()))^set('abcd'),set())
def test1(self):
L = DictSet(s2d('a1 b576 c5666788 d0'))
g=L.__iter__()
self.assertEqual(set([v for v in g])^set('abc'),set())
self.assertEqual(set(list(L.keys()))^set('abcd'),set())
def test2(self):
L = DictSet(s2d(''))
g=L.__iter__()
self.assertEqual(set([v for v in g]),set())
self.assertEqual(set(list(L.keys())),set())
class TestDictSet_unique_combinations(unittest.TestCase):
def test0(self):
L = DictSet(s2d('a1 c5666788 d0'))
g=L.unique_combinations(keys=[])
self.assertEqual([v for v in g],[None])
def test1(self):
L = DictSet(s2d('a12 c5666788 d0'))
g=L.unique_combinations()
self.assertEqual([v for v in g],[['1','5'],
['1','6'],
['1','7'],
['1','8'],
['2','5'],
['2','6'],
['2','7'],
['2','8']])
def test2(self):
L = DictSet(s2d('a12 c5666788 d12345'))
g=L.unique_combinations(keys=['a','c'])
self.assertEqual([v for v in g],[['1','5'],
['1','6'],
['1','7'],
['1','8'],
['2','5'],
['2','6'],
['2','7'],
['2','8']])
def test3(self):
L = DictSet(s2d('a12 c5666788 d12345'))
g=L.unique_combinations(keys=['c','a'])
self.assertEqual([v for v in g],[['5','1'],
['5','2'],
['6','1'],
['6','2'],
['7','1'],
['7','2'],
['8','1'],
['8','2']])
def test4(self):
L = DictSet(s2d('a12 c568 d123 e78'))
g=L.unique_combinations()
self.assertEqual(''.join([''.join(v) for v in g]),
'151715181527152815371538161716181627162816371638'
'181718181827182818371838251725182527252825372538'
'261726182627262826372638281728182827282828372838')
def suite():
return unittest.TestSuite((
unittest.makeSuite(TestDictSet__init__),
unittest.makeSuite(TestDictSet_remove),
unittest.makeSuite(TestDictSet_discard),
unittest.makeSuite(TestDictSet_clear),
unittest.makeSuite(TestDictSet_add),
unittest.makeSuite(TestDictSet_delitem),
unittest.makeSuite(TestDictSet_get),
unittest.makeSuite(TestDictSet_setdefault),
unittest.makeSuite(TestDictSet_copy),
unittest.makeSuite(TestDictSet_fromkeys),
unittest.makeSuite(TestDictSet__setitem__),
unittest.makeSuite(TestDictSet_update),
unittest.makeSuite(TestDictSet__ior__),
unittest.makeSuite(TestDictSet_difference_update),
unittest.makeSuite(TestDictSet__isub__),
unittest.makeSuite(TestDictSet_symmetric_difference_update),
unittest.makeSuite(TestDictSet__ixor__),
unittest.makeSuite(TestDictSet_intersection_update),
unittest.makeSuite(TestDictSet__iand__),
unittest.makeSuite(TestDictSet_union),
unittest.makeSuite(TestDictSet__or__),
unittest.makeSuite(TestDictSet_difference),
unittest.makeSuite(TestDictSet__sub__),
unittest.makeSuite(TestDictSet_symmetric_difference),
unittest.makeSuite(TestDictSet__xor__),
unittest.makeSuite(TestDictSet_intersection),
unittest.makeSuite(TestDictSet__and__),
unittest.makeSuite(TestDictSet__eq__),
unittest.makeSuite(TestDictSet__ne__),
unittest.makeSuite(TestDictSet_issubset),
unittest.makeSuite(TestDictSet__le__),
unittest.makeSuite(TestDictSet_issuperset),
unittest.makeSuite(TestDictSet__ge__),
unittest.makeSuite(TestDictSet__contains__),
unittest.makeSuite(TestDictSet_unique_combinations),
unittest.makeSuite(TestDictSet__repr__),
unittest.makeSuite(TestDictSet__iter__)
))
if __name__ == "__main__":
# run tests
runner = unittest.TextTestRunner()
runner.run(suite())
| |
from flask_cache import Cache as FlaskCache
from flask_login import login_required
from flask import Blueprint, render_template, jsonify, abort
from flask import request, current_app
from sys import getsizeof
import time
import functools
import logging
logger = logging.getLogger(__name__)
class LogData(object):
def __init__(self, hot=False, hit=0, miss=0, size=0, access_time=0):
self.hot = hot
self.hit = hit
self.miss = miss
self.size = size
self.access_time = access_time
def __repr__(self):
return ('hot: {}, hit:{}, miss:{}, size:{}, access_time:{}'
.format(self.hot, self.hit, self.miss, self.size, self.access_time))
def data(self):
return dict(hot=self.hot, hit=self.hit, miss=self.miss,
size='{:.3f}'.format(self.size),
access_time='{:.5f}'.format(self.access_time))
class Cache(FlaskCache):
def __init__(self, *args, **kwargs):
self._log = {}
super(Cache, self).__init__(*args, **kwargs)
def __add_log(self, key, hot=False, cold=False, hit=False, miss=False,
size=None, access_time=None):
if key in self._log:
data = self._log[key]
else:
data = LogData()
self._log[key] = data
if hot:
data.hot = True
elif cold:
data.hot = False
if hit:
data.hit += 1
if miss:
data.miss += 1
if size:
data.size = size
if access_time:
data.access_time = access_time
def get(self, *args, **kwargs):
"Proxy function for internal cache object."
start_time = time.time()
retval = self.cache.get(*args, **kwargs)
end_time = (time.time() - start_time) * 1000
if retval:
size = getsizeof(retval, 0) / 1024.0
self.__add_log(args[0], hot=True, hit=True, size=size, access_time=end_time)
else:
self.__add_log(args[0], cold=True, miss=True, access_time=end_time)
return retval
def set(self, *args, **kwargs):
"Proxy function for internal cache object."
retval = self.cache.set(*args, **kwargs)
print(retval)
if retval:
size = getsizeof(args[1], 0) / 1024.0
self.__add_log(args[0], hot=True, size=size)
return retval
def add(self, *args, **kwargs):
"Proxy function for internal cache object."
retval = self.cache.add(*args, **kwargs)
if retval:
size = getsizeof(args[1], 0) / 1024.0
self.__add_log(args[0], hot=True, size=size)
return retval
def delete(self, *args, **kwargs):
"Proxy function for internal cache object."
retval = self.cache.delete(*args, **kwargs)
if retval:
self.__add_log(args[0], cold=True)
return retval
def get_many(self, *args, **kwargs):
"Proxy function for internal cache object."
retval = self.cache.get_many(*args, **kwargs)
retval = list(retval)
for idx, key in enumerate(args):
if retval[idx]:
size = getsizeof(retval, 0) / 1024.0
self.__add_log(key, hot=True, hit=True, size=size)
else:
self.__add_log(key, cold=True, miss=True)
return retval
def delete_many(self, *args, **kwargs):
retval = self.cache.delete_many(*args, **kwargs)
if retval:
for key in args:
self.__add_log(key, cold=True)
return retval
def set_many(self, *args, **kwargs):
retval = self.cache.set_many(*args, **kwargs)
if retval:
for key in args[0]:
val = args[0][key]
size = getsizeof(val, 0) / 1024.0
self.__add_log(key, hot=True, size=size)
return retval
def get_log(self):
data = {}
for key in self._log:
data[key] = self._log[key].data()
return data
def cached(self, timeout=None, key_prefix='view/%s', unless=None):
"""This is a copy of the flask cache version of cached. This one to one
copy is not ideal, but a necessasity as the the decorator calls
self.cache.get() rather than self.get().
"""
def decorator(f):
@functools.wraps(f)
def decorated_function(*args, **kwargs):
#: Bypass the cache entirely.
if callable(unless) and unless() is True:
return f(*args, **kwargs)
try:
cache_key = decorated_function.make_cache_key(*args, **kwargs)
rv = self.get(cache_key)
except Exception:
if current_app.debug:
raise
logger.exception("Exception possibly due to cache backend.")
return f(*args, **kwargs)
if rv is None:
rv = f(*args, **kwargs)
try:
self.set(cache_key, rv,
timeout=decorated_function.cache_timeout)
except Exception:
if current_app.debug:
raise
logger.exception("Exception possibly due to cache backend.")
return f(*args, **kwargs)
return rv
def make_cache_key(*args, **kwargs):
if callable(key_prefix):
cache_key = key_prefix()
elif '%s' in key_prefix:
cache_key = key_prefix % request.path
else:
cache_key = key_prefix
return cache_key
decorated_function.uncached = f
decorated_function.cache_timeout = timeout
decorated_function.make_cache_key = make_cache_key
return decorated_function
return decorator
class CacheStats(Blueprint):
def __init__(self, cache_obj, base_template="base.html",
enable_clear_api=False, protect_api=True,
cache_template="stats_view.html",
url_prefix='/cache_stats'):
self.cache = cache_obj
self.base_template = base_template
self.cache_template = cache_template
self.api_enabled = enable_clear_api
super(CacheStats, self).__init__("flask_cache_stats", __name__,
template_folder='templates',
static_folder='static',
static_url_path='')
self.add_url_rule(url_prefix, 'flask_cache_stats', self.stats_view)
if self.api_enabled:
url = url_prefix + '/<key>'
if protect_api:
api = login_required(self.clear_key)
else:
api = self.clear_key
self.add_url_rule(url, 'flask_cache_clear_key',
api, methods=['DELETE'])
def stats_view(self):
return render_template(self.cache_template, log=self.cache.get_log(),
base_template=self.base_template,
api_enabled=self.api_enabled)
def clear_key(self, key):
if self.cache.delete(key):
return jsonify(status='success')
else:
abort(404)
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Timeseries head."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.python.training import training_util
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.timeseries.python.timeseries import feature_keys
from tensorflow.python.estimator import estimator_lib
from tensorflow.python.estimator.canned import head as head_lib
from tensorflow.python.estimator.export import export_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import nest
def time_series_regression_head(model,
state_manager,
optimizer,
input_statistics_generator=None):
"""Creates a `_Head` for time series regression.
Args:
model: A model for time series regression.
state_manager: A state manager.
optimizer: An optimizer.
input_statistics_generator: A input statistics generator.
Returns:
An instance of `_Head` for time series regression.
"""
return _TimeSeriesRegressionHead(model, state_manager, optimizer,
input_statistics_generator)
class _TimeSeriesRegressionHead(head_lib._Head): # pylint:disable=protected-access
"""See `time_series_regression_head`."""
def __init__(self,
model,
state_manager,
optimizer,
input_statistics_generator=None,
name=None):
self.model = model
self.state_manager = state_manager
self.optimizer = optimizer
self.input_statistics_generator = input_statistics_generator
self._name = name
def _train_ops(self, features):
"""Add training ops to the graph."""
with variable_scope.variable_scope("model"):
model_outputs = self.state_manager.define_loss(
self.model, features, estimator_lib.ModeKeys.TRAIN)
train_op = optimizers.optimize_loss(
model_outputs.loss,
global_step=training_util.get_global_step(),
optimizer=self.optimizer,
# Learning rate is set in the Optimizer object
learning_rate=None)
return estimator_lib.EstimatorSpec(
loss=model_outputs.loss,
mode=estimator_lib.ModeKeys.TRAIN,
train_op=train_op)
# TODO(terrytangyuan): suffix summary and metrics keys by `"/" + name`
@property
def name(self):
return self._name
# TODO(terrytangyuan): unused for now. Need to decouple
# `state_manager.define_loss` to satisfy the extendable return signature of
# `_Head.create_loss`.
def create_loss(self, features, mode, logits, labels):
"""See `_Head`."""
return None
# TODO(terrytangyuan): check label dimension
@property
def logits_dimension(self):
return None
def _evaluate_ops(self, features):
"""Add ops for evaluation (aka filtering) to the graph."""
with variable_scope.variable_scope("model"):
model_outputs = self.state_manager.define_loss(
self.model, features, estimator_lib.ModeKeys.EVAL)
metrics = {}
# Just output in-sample predictions for the last chunk seen
for prediction_key, prediction_value in model_outputs.predictions.items():
metrics[prediction_key] = _identity_metric_single(prediction_key,
prediction_value)
metrics[feature_keys.FilteringResults.TIMES] = _identity_metric_single(
feature_keys.FilteringResults.TIMES, model_outputs.prediction_times)
metrics[feature_keys.FilteringResults.STATE_TUPLE] = (
_identity_metric_nested(feature_keys.FilteringResults.STATE_TUPLE,
model_outputs.end_state))
return estimator_lib.EstimatorSpec(
loss=model_outputs.loss,
mode=estimator_lib.ModeKeys.EVAL,
eval_metric_ops=metrics,
predictions={})
def _predict_ops(self, features):
"""Add ops for prediction to the graph."""
with variable_scope.variable_scope("model"):
prediction = self.model.predict(features=features)
prediction[feature_keys.PredictionResults.TIMES] = features[
feature_keys.PredictionFeatures.TIMES]
return estimator_lib.EstimatorSpec(
predictions=prediction, mode=estimator_lib.ModeKeys.PREDICT)
def _serving_ops(self, features):
"""Add ops for serving to the graph."""
with variable_scope.variable_scope("model"):
prediction_outputs = self.model.predict(features=features)
with variable_scope.variable_scope("model", reuse=True):
filtering_outputs = self.state_manager.define_loss(
self.model, features, estimator_lib.ModeKeys.EVAL)
return estimator_lib.EstimatorSpec(
mode=estimator_lib.ModeKeys.PREDICT,
export_outputs={
feature_keys.SavedModelLabels.PREDICT:
export_lib.PredictOutput(prediction_outputs),
feature_keys.SavedModelLabels.FILTER:
export_lib.PredictOutput(
state_to_dictionary(filtering_outputs.end_state))
},
# Likely unused, but it is necessary to return `predictions` to satisfy
# the Estimator's error checking.
predictions={})
def _convert_feature_to_tensor(self, name, value):
"""Casts features to the correct dtype based on their name."""
if name in [
feature_keys.TrainEvalFeatures.TIMES,
feature_keys.PredictionFeatures.TIMES
]:
return math_ops.cast(value, dtypes.int64)
if name == feature_keys.TrainEvalFeatures.VALUES:
return math_ops.cast(value, self.model.dtype)
if name == feature_keys.PredictionFeatures.STATE_TUPLE:
return value # Correct dtypes are model-dependent
return ops.convert_to_tensor(value)
def _gather_state(self, features):
"""Returns `features` with state packed, indicates if packing was done."""
prefixed_state_re = re.compile(r"^" + feature_keys.State.STATE_PREFIX +
r"_(\d+)$")
numbered_state = []
for key, tensor in features.items():
search_result = prefixed_state_re.search(key)
if search_result:
numbered_state.append((int(search_result.group(1)), key, tensor))
if not numbered_state:
return features, False
features = features.copy()
for _, key, _ in numbered_state:
del features[key]
numbered_state.sort(key=lambda number, *_: number)
features[feature_keys.State.STATE_TUPLE] = nest.pack_sequence_as(
structure=self.model.get_start_state(),
flat_sequence=[tensor for _, _, tensor in numbered_state])
return features, True
def create_estimator_spec(self, features, mode, labels=None):
"""Performs basic error checking and returns an EstimatorSpec."""
with ops.name_scope("head"):
if labels:
raise ValueError(
"The model received a `labels` dictionary, which is "
"not supported. Pass '{}' and '{}' as "
"features.".format(feature_keys.TrainEvalFeatures.TIMES,
feature_keys.TrainEvalFeatures.VALUES))
del labels
features = {
name: self._convert_feature_to_tensor(name=name, value=value)
for name, value in features.items()
}
if self.input_statistics_generator is not None:
input_statistics = self.input_statistics_generator.initialize_graph(
features, update_statistics=(mode == estimator_lib.ModeKeys.TRAIN))
else:
input_statistics = None
self.model.initialize_graph(input_statistics=input_statistics)
# _gather_state requires the model to have its graph initialized (so it
# has access to the structure of the model's state)
features, passed_flat_state = self._gather_state(features)
if (mode == estimator_lib.ModeKeys.TRAIN or
mode == estimator_lib.ModeKeys.EVAL):
_check_train_eval_features(features, self.model)
elif mode == estimator_lib.ModeKeys.PREDICT:
_check_predict_features(features)
else:
raise ValueError("Unknown mode '{}' passed to model_fn.".format(mode))
self.state_manager.initialize_graph(
model=self.model, input_statistics=input_statistics)
if mode == estimator_lib.ModeKeys.TRAIN:
return self._train_ops(features)
elif mode == estimator_lib.ModeKeys.EVAL:
return self._evaluate_ops(features)
elif mode == estimator_lib.ModeKeys.PREDICT and not passed_flat_state:
return self._predict_ops(features)
elif mode == estimator_lib.ModeKeys.PREDICT and passed_flat_state:
# The mode is PREDICT, but we're actually in export_savedmodel for
# serving. We want to return two graphs: one for filtering (state + data
# -> state) and one for predicting (state -> prediction).
return self._serving_ops(features)
def _check_feature_shapes_compatible_with(features,
compatible_with_name,
compatible_with_value,
ignore=None):
"""Checks all features are compatible with the given time-like feature."""
if ignore is None:
ignore = set()
for name, value in features.items():
if name in ignore:
continue
feature_shape = value.get_shape()
if feature_shape.ndims is None:
continue
if feature_shape.ndims < 2:
raise ValueError(
("Features must have shape (batch dimension, window size, ...) "
"(got rank {} for feature '{}')").format(feature_shape.ndims, name))
if not feature_shape[:2].is_compatible_with(
compatible_with_value.get_shape()):
raise ValueError(
("Features must have shape (batch dimension, window size, ...) "
"where batch dimension and window size match the "
"'{times_feature}' feature (got shape {feature_shape} for "
"feature '{feature_name}' but shape {times_shape} for feature "
"'{times_feature}')").format(
times_feature=compatible_with_name,
feature_shape=feature_shape,
feature_name=name,
times_shape=compatible_with_value.get_shape()))
def _check_predict_features(features):
"""Raises errors if features are not suitable for prediction."""
if feature_keys.PredictionFeatures.TIMES not in features:
raise ValueError("Expected a '{}' feature for prediction.".format(
feature_keys.PredictionFeatures.TIMES))
if feature_keys.PredictionFeatures.STATE_TUPLE not in features:
raise ValueError("Expected a '{}' feature for prediction.".format(
feature_keys.PredictionFeatures.STATE_TUPLE))
times_feature = features[feature_keys.PredictionFeatures.TIMES]
if not times_feature.get_shape().is_compatible_with([None, None]):
raise ValueError(
("Expected shape (batch dimension, window size) for feature '{}' "
"(got shape {})").format(feature_keys.PredictionFeatures.TIMES,
times_feature.get_shape()))
_check_feature_shapes_compatible_with(
features=features,
compatible_with_name=feature_keys.PredictionFeatures.TIMES,
compatible_with_value=times_feature,
ignore=set([
feature_keys.PredictionFeatures.STATE_TUPLE # Model-dependent shapes
]))
def _check_train_eval_features(features, model):
"""Raise errors if features are not suitable for training/evaluation."""
if feature_keys.TrainEvalFeatures.TIMES not in features:
raise ValueError("Expected a '{}' feature for training/evaluation.".format(
feature_keys.TrainEvalFeatures.TIMES))
if feature_keys.TrainEvalFeatures.VALUES not in features:
raise ValueError("Expected a '{}' feature for training/evaluation.".format(
feature_keys.TrainEvalFeatures.VALUES))
times_feature = features[feature_keys.TrainEvalFeatures.TIMES]
if not times_feature.get_shape().is_compatible_with([None, None]):
raise ValueError(
("Expected shape (batch dimension, window size) for feature '{}' "
"(got shape {})").format(feature_keys.TrainEvalFeatures.TIMES,
times_feature.get_shape()))
values_feature = features[feature_keys.TrainEvalFeatures.VALUES]
if not values_feature.get_shape().is_compatible_with(
[None, None, model.num_features]):
raise ValueError(
("Expected shape (batch dimension, window size, {num_features}) "
"for feature '{feature_name}', since the model was configured "
"with num_features={num_features} (got shape {got_shape})").format(
num_features=model.num_features,
feature_name=feature_keys.TrainEvalFeatures.VALUES,
got_shape=times_feature.get_shape()))
_check_feature_shapes_compatible_with(
features=features,
compatible_with_name=feature_keys.TrainEvalFeatures.TIMES,
compatible_with_value=times_feature,
ignore=set([
feature_keys.State.STATE_TUPLE # Model-dependent shapes
]))
def _identity_metric_single(name, input_tensor):
"""A metric which takes on its last updated value.
This keeps evaluation metrics in sync with one another, since update ops are
run separately from their result Tensors. Simply returning (input_tensor,
no_op) as a metric with a value but no update means that a metric will come
from a different batch of data than metrics which cache values in a Variable
(e.g. the default loss metric).
Args:
name: A name for the metric.
input_tensor: Any Tensor.
Returns:
A tuple of (value, update_op).
"""
metric_variable = variable_scope.variable(
name="{}_identity_metric".format(name),
initial_value=array_ops.zeros([], dtype=input_tensor.dtype),
collections=[ops.GraphKeys.LOCAL_VARIABLES],
validate_shape=False)
update_op = state_ops.assign(
metric_variable, input_tensor, validate_shape=False)
# This shape will be correct once the first update runs (but may be
# incomplete, so is not helpful for initializing the variable).
metric_variable.set_shape(input_tensor.get_shape())
return (metric_variable.value(), update_op)
def _identity_metric_nested(name, input_tensors):
"""Create identity metrics for a nested tuple of Tensors."""
update_ops = []
value_tensors = []
for tensor_number, tensor in enumerate(nest.flatten(input_tensors)):
value_tensor, update_op = _identity_metric_single(
name="{}_{}".format(name, tensor_number), input_tensor=tensor)
update_ops.append(update_op)
value_tensors.append(value_tensor)
return (nest.pack_sequence_as(input_tensors, value_tensors),
control_flow_ops.group(*update_ops))
def state_to_dictionary(state_tuple):
"""Flatten model state into a dictionary with string keys."""
flattened = {}
for state_number, state_value in enumerate(nest.flatten(state_tuple)):
prefixed_state_name = "{}_{:02d}".format(feature_keys.State.STATE_PREFIX,
state_number)
flattened[prefixed_state_name] = state_value
return flattened
| |
"""
Code to generate a Python model from a database or differences
between a model and database.
Some of this is borrowed heavily from the AutoCode project at:
http://code.google.com/p/sqlautocode/
"""
import sys
import logging
import sqlalchemy
import migrate
import migrate.changeset
log = logging.getLogger(__name__)
HEADER = """
## File autogenerated by genmodel.py
from sqlalchemy import *
"""
META_DEFINITION = "meta = MetaData()"
DECLARATIVE_DEFINITION = """
from sqlalchemy.ext import declarative
Base = declarative.declarative_base()
"""
class ModelGenerator(object):
"""Various transformations from an A, B diff.
In the implementation, A tends to be called the model and B
the database (although this is not true of all diffs).
The diff is directionless, but transformations apply the diff
in a particular direction, described in the method name.
"""
def __init__(self, diff, engine, declarative=False):
self.diff = diff
self.engine = engine
self.declarative = declarative
def column_repr(self, col):
kwarg = []
if col.key != col.name:
kwarg.append('key')
if col.primary_key:
col.primary_key = True # otherwise it dumps it as 1
kwarg.append('primary_key')
if not col.nullable:
kwarg.append('nullable')
if col.onupdate:
kwarg.append('onupdate')
if col.default:
if col.primary_key:
# I found that PostgreSQL automatically creates a
# default value for the sequence, but let's not show
# that.
pass
else:
kwarg.append('default')
args = ['%s=%r' % (k, getattr(col, k)) for k in kwarg]
# crs: not sure if this is good idea, but it gets rid of extra
# u''
name = col.name.encode('utf8')
type_ = col.type
for cls in col.type.__class__.__mro__:
if cls.__module__ == 'sqlalchemy.types' and \
not cls.__name__.isupper():
if cls is not type_.__class__:
type_ = cls()
break
type_repr = repr(type_)
if type_repr.endswith('()'):
type_repr = type_repr[:-2]
constraints = [repr(cn) for cn in col.constraints]
data = {
'name': name,
'commonStuff': ', '.join([type_repr] + constraints + args),
}
if self.declarative:
return """%(name)s = Column(%(commonStuff)s)""" % data
else:
return """Column(%(name)r, %(commonStuff)s)""" % data
def _getTableDefn(self, table, metaName='meta'):
out = []
tableName = table.name
if self.declarative:
out.append("class %(table)s(Base):" % {'table': tableName})
out.append(" __tablename__ = '%(table)s'\n" %
{'table': tableName})
for col in table.columns:
out.append(" %s" % self.column_repr(col))
out.append('\n')
else:
out.append("%(table)s = Table('%(table)s', %(meta)s," %
{'table': tableName, 'meta': metaName})
for col in table.columns:
out.append(" %s," % self.column_repr(col))
out.append(")\n")
return out
def _get_tables(self,missingA=False,missingB=False,modified=False):
to_process = []
for bool_,names,metadata in (
(missingA,self.diff.tables_missing_from_A,self.diff.metadataB),
(missingB,self.diff.tables_missing_from_B,self.diff.metadataA),
(modified,self.diff.tables_different,self.diff.metadataA),
):
if bool_:
for name in names:
yield metadata.tables.get(name)
def _genModelHeader(self, tables):
out = []
import_index = []
out.append(HEADER)
for table in tables:
for col in table.columns:
if "dialects" in col.type.__module__ and \
col.type.__class__ not in import_index:
out.append("from " + col.type.__module__ +
" import " + col.type.__class__.__name__)
import_index.append(col.type.__class__)
out.append("")
if self.declarative:
out.append(DECLARATIVE_DEFINITION)
else:
out.append(META_DEFINITION)
out.append("")
return out
def genBDefinition(self):
"""Generates the source code for a definition of B.
Assumes a diff where A is empty.
Was: toPython. Assume database (B) is current and model (A) is empty.
"""
out = []
out.extend(self._genModelHeader(self._get_tables(missingA=True)))
for table in self._get_tables(missingA=True):
out.extend(self._getTableDefn(table))
return '\n'.join(out)
def genB2AMigration(self, indent=' '):
'''Generate a migration from B to A.
Was: toUpgradeDowngradePython
Assume model (A) is most current and database (B) is out-of-date.
'''
decls = ['from migrate.changeset import schema',
'pre_meta = MetaData()',
'post_meta = MetaData()',
]
upgradeCommands = ['pre_meta.bind = migrate_engine',
'post_meta.bind = migrate_engine']
downgradeCommands = list(upgradeCommands)
for tn in self.diff.tables_missing_from_A:
pre_table = self.diff.metadataB.tables[tn]
decls.extend(self._getTableDefn(pre_table, metaName='pre_meta'))
upgradeCommands.append(
"pre_meta.tables[%(table)r].drop()" % {'table': tn})
downgradeCommands.append(
"pre_meta.tables[%(table)r].create()" % {'table': tn})
for tn in self.diff.tables_missing_from_B:
post_table = self.diff.metadataA.tables[tn]
decls.extend(self._getTableDefn(post_table, metaName='post_meta'))
upgradeCommands.append(
"post_meta.tables[%(table)r].create()" % {'table': tn})
downgradeCommands.append(
"post_meta.tables[%(table)r].drop()" % {'table': tn})
for (tn, td) in self.diff.tables_different.iteritems():
if td.columns_missing_from_A or td.columns_different:
pre_table = self.diff.metadataB.tables[tn]
decls.extend(self._getTableDefn(
pre_table, metaName='pre_meta'))
if td.columns_missing_from_B or td.columns_different:
post_table = self.diff.metadataA.tables[tn]
decls.extend(self._getTableDefn(
post_table, metaName='post_meta'))
for col in td.columns_missing_from_A:
upgradeCommands.append(
'pre_meta.tables[%r].columns[%r].drop()' % (tn, col))
downgradeCommands.append(
'pre_meta.tables[%r].columns[%r].create()' % (tn, col))
for col in td.columns_missing_from_B:
upgradeCommands.append(
'post_meta.tables[%r].columns[%r].create()' % (tn, col))
downgradeCommands.append(
'post_meta.tables[%r].columns[%r].drop()' % (tn, col))
for modelCol, databaseCol, modelDecl, databaseDecl in td.columns_different:
upgradeCommands.append(
'assert False, "Can\'t alter columns: %s:%s=>%s"' % (
tn, modelCol.name, databaseCol.name))
downgradeCommands.append(
'assert False, "Can\'t alter columns: %s:%s=>%s"' % (
tn, modelCol.name, databaseCol.name))
return (
'\n'.join(decls),
'\n'.join('%s%s' % (indent, line) for line in upgradeCommands),
'\n'.join('%s%s' % (indent, line) for line in downgradeCommands))
def _db_can_handle_this_change(self,td):
"""Check if the database can handle going from B to A."""
if (td.columns_missing_from_B
and not td.columns_missing_from_A
and not td.columns_different):
# Even sqlite can handle column additions.
return True
else:
return not self.engine.url.drivername.startswith('sqlite')
def runB2A(self):
"""Goes from B to A.
Was: applyModel. Apply model (A) to current database (B).
"""
meta = sqlalchemy.MetaData(self.engine)
for table in self._get_tables(missingA=True):
table = table.tometadata(meta)
table.drop()
for table in self._get_tables(missingB=True):
table = table.tometadata(meta)
table.create()
for modelTable in self._get_tables(modified=True):
tableName = modelTable.name
modelTable = modelTable.tometadata(meta)
dbTable = self.diff.metadataB.tables[tableName]
td = self.diff.tables_different[tableName]
if self._db_can_handle_this_change(td):
for col in td.columns_missing_from_B:
modelTable.columns[col].create()
for col in td.columns_missing_from_A:
dbTable.columns[col].drop()
# XXX handle column changes here.
else:
# Sqlite doesn't support drop column, so you have to
# do more: create temp table, copy data to it, drop
# old table, create new table, copy data back.
#
# I wonder if this is guaranteed to be unique?
tempName = '_temp_%s' % modelTable.name
def getCopyStatement():
preparer = self.engine.dialect.preparer
commonCols = []
for modelCol in modelTable.columns:
if modelCol.name in dbTable.columns:
commonCols.append(modelCol.name)
commonColsStr = ', '.join(commonCols)
return 'INSERT INTO %s (%s) SELECT %s FROM %s' % \
(tableName, commonColsStr, commonColsStr, tempName)
# Move the data in one transaction, so that we don't
# leave the database in a nasty state.
connection = self.engine.connect()
trans = connection.begin()
try:
connection.execute(
'CREATE TEMPORARY TABLE %s as SELECT * from %s' % \
(tempName, modelTable.name))
# make sure the drop takes place inside our
# transaction with the bind parameter
modelTable.drop(bind=connection)
modelTable.create(bind=connection)
connection.execute(getCopyStatement())
connection.execute('DROP TABLE %s' % tempName)
trans.commit()
except:
trans.rollback()
raise
| |
#!/usr/bin/env python2.7
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Run tests in parallel."""
import argparse
import ast
import glob
import hashlib
import itertools
import json
import multiprocessing
import os
import platform
import random
import re
import socket
import subprocess
import sys
import tempfile
import traceback
import time
import urllib2
import uuid
import jobset
import report_utils
import watch_dirs
_ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
os.chdir(_ROOT)
_FORCE_ENVIRON_FOR_WRAPPERS = {}
def platform_string():
return jobset.platform_string()
# SimpleConfig: just compile with CONFIG=config, and run the binary to test
class Config(object):
def __init__(self, config, environ=None, timeout_multiplier=1, tool_prefix=[]):
if environ is None:
environ = {}
self.build_config = config
self.allow_hashing = (config != 'gcov')
self.environ = environ
self.environ['CONFIG'] = config
self.tool_prefix = tool_prefix
self.timeout_multiplier = timeout_multiplier
def job_spec(self, cmdline, hash_targets, timeout_seconds=5*60,
shortname=None, environ={}, cpu_cost=1.0, flaky=False):
"""Construct a jobset.JobSpec for a test under this config
Args:
cmdline: a list of strings specifying the command line the test
would like to run
hash_targets: either None (don't do caching of test results), or
a list of strings specifying files to include in a
binary hash to check if a test has changed
-- if used, all artifacts needed to run the test must
be listed
"""
actual_environ = self.environ.copy()
for k, v in environ.iteritems():
actual_environ[k] = v
return jobset.JobSpec(cmdline=self.tool_prefix + cmdline,
shortname=shortname,
environ=actual_environ,
cpu_cost=cpu_cost,
timeout_seconds=(self.timeout_multiplier * timeout_seconds if timeout_seconds else None),
hash_targets=hash_targets
if self.allow_hashing else None,
flake_retries=5 if flaky or args.allow_flakes else 0,
timeout_retries=3 if args.allow_flakes else 0)
def get_c_tests(travis, test_lang) :
out = []
platforms_str = 'ci_platforms' if travis else 'platforms'
with open('tools/run_tests/tests.json') as f:
js = json.load(f)
return [tgt
for tgt in js
if tgt['language'] == test_lang and
platform_string() in tgt[platforms_str] and
not (travis and tgt['flaky'])]
def _check_compiler(compiler, supported_compilers):
if compiler not in supported_compilers:
raise Exception('Compiler %s not supported (on this platform).' % compiler)
def _check_arch(arch, supported_archs):
if arch not in supported_archs:
raise Exception('Architecture %s not supported.' % arch)
def _is_use_docker_child():
"""Returns True if running running as a --use_docker child."""
return True if os.getenv('RUN_TESTS_COMMAND') else False
class CLanguage(object):
def __init__(self, make_target, test_lang):
self.make_target = make_target
self.platform = platform_string()
self.test_lang = test_lang
def configure(self, config, args):
self.config = config
self.args = args
if self.platform == 'windows':
self._make_options = [_windows_toolset_option(self.args.compiler),
_windows_arch_option(self.args.arch)]
else:
self._docker_distro, self._make_options = self._compiler_options(self.args.use_docker,
self.args.compiler)
def test_specs(self):
out = []
binaries = get_c_tests(self.args.travis, self.test_lang)
POLLING_STRATEGIES = {
'windows': ['all'],
'mac': ['all'],
'posix': ['all'],
'linux': ['poll', 'legacy']
}
for target in binaries:
polling_strategies = (POLLING_STRATEGIES[self.platform]
if target.get('uses_polling', True)
else ['all'])
for polling_strategy in polling_strategies:
env={'GRPC_DEFAULT_SSL_ROOTS_FILE_PATH':
_ROOT + '/src/core/lib/tsi/test_creds/ca.pem',
'GRPC_POLL_STRATEGY': polling_strategy}
shortname_ext = '' if polling_strategy=='all' else ' polling=%s' % polling_strategy
if self.config.build_config in target['exclude_configs']:
continue
if self.platform == 'windows':
binary = 'vsprojects/%s%s/%s.exe' % (
'x64/' if self.args.arch == 'x64' else '',
_MSBUILD_CONFIG[self.config.build_config],
target['name'])
else:
binary = 'bins/%s/%s' % (self.config.build_config, target['name'])
if os.path.isfile(binary):
if 'gtest' in target and target['gtest']:
# here we parse the output of --gtest_list_tests to build up a
# complete list of the tests contained in a binary
# for each test, we then add a job to run, filtering for just that
# test
with open(os.devnull, 'w') as fnull:
tests = subprocess.check_output([binary, '--gtest_list_tests'],
stderr=fnull)
base = None
for line in tests.split('\n'):
i = line.find('#')
if i >= 0: line = line[:i]
if not line: continue
if line[0] != ' ':
base = line.strip()
else:
assert base is not None
assert line[1] == ' '
test = base + line.strip()
cmdline = [binary] + ['--gtest_filter=%s' % test]
out.append(self.config.job_spec(cmdline, [binary],
shortname='%s:%s %s' % (binary, test, shortname_ext),
cpu_cost=target['cpu_cost'],
environ=env))
else:
cmdline = [binary] + target['args']
out.append(self.config.job_spec(cmdline, [binary],
shortname=' '.join(cmdline) + shortname_ext,
cpu_cost=target['cpu_cost'],
flaky=target.get('flaky', False),
environ=env))
elif self.args.regex == '.*' or self.platform == 'windows':
print '\nWARNING: binary not found, skipping', binary
return sorted(out)
def make_targets(self):
test_regex = self.args.regex
if self.platform != 'windows' and self.args.regex != '.*':
# use the regex to minimize the number of things to build
return [os.path.basename(target['name'])
for target in get_c_tests(False, self.test_lang)
if re.search(test_regex, '/' + target['name'])]
if self.platform == 'windows':
# don't build tools on windows just yet
return ['buildtests_%s' % self.make_target]
return ['buildtests_%s' % self.make_target, 'tools_%s' % self.make_target]
def make_options(self):
return self._make_options;
def pre_build_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\pre_build_c.bat']]
else:
return []
def build_steps(self):
return []
def post_tests_steps(self):
if self.platform == 'windows':
return []
else:
return [['tools/run_tests/post_tests_c.sh']]
def makefile_name(self):
return 'Makefile'
def _clang_make_options(self, version_suffix=''):
return ['CC=clang%s' % version_suffix,
'CXX=clang++%s' % version_suffix,
'LD=clang%s' % version_suffix,
'LDXX=clang++%s' % version_suffix]
def _gcc_make_options(self, version_suffix):
return ['CC=gcc%s' % version_suffix,
'CXX=g++%s' % version_suffix,
'LD=gcc%s' % version_suffix,
'LDXX=g++%s' % version_suffix]
def _compiler_options(self, use_docker, compiler):
"""Returns docker distro and make options to use for given compiler."""
if not use_docker and not _is_use_docker_child():
_check_compiler(compiler, ['default'])
if compiler == 'gcc4.9' or compiler == 'default':
return ('jessie', [])
elif compiler == 'gcc4.4':
return ('wheezy', self._gcc_make_options(version_suffix='-4.4'))
elif compiler == 'gcc4.6':
return ('wheezy', self._gcc_make_options(version_suffix='-4.6'))
elif compiler == 'gcc5.3':
return ('ubuntu1604', [])
elif compiler == 'clang3.4':
# on ubuntu1404, clang-3.4 alias doesn't exist, just use 'clang'
return ('ubuntu1404', self._clang_make_options())
elif compiler == 'clang3.5':
return ('jessie', self._clang_make_options(version_suffix='-3.5'))
elif compiler == 'clang3.6':
return ('ubuntu1604', self._clang_make_options(version_suffix='-3.6'))
elif compiler == 'clang3.7':
return ('ubuntu1604', self._clang_make_options(version_suffix='-3.7'))
else:
raise Exception('Compiler %s not supported.' % compiler)
def dockerfile_dir(self):
return 'tools/dockerfile/test/cxx_%s_%s' % (self._docker_distro,
_docker_arch_suffix(self.args.arch))
def __str__(self):
return self.make_target
class NodeLanguage(object):
def __init__(self):
self.platform = platform_string()
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default', 'node0.12',
'node4', 'node5'])
if self.args.compiler == 'default':
self.node_version = '4'
else:
# Take off the word "node"
self.node_version = self.args.compiler[4:]
def test_specs(self):
if self.platform == 'windows':
return [self.config.job_spec(['tools\\run_tests\\run_node.bat'], None)]
else:
return [self.config.job_spec(['tools/run_tests/run_node.sh', self.node_version],
None,
environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
def pre_build_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\pre_build_node.bat']]
else:
return [['tools/run_tests/pre_build_node.sh', self.node_version]]
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\build_node.bat']]
else:
return [['tools/run_tests/build_node.sh', self.node_version]]
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/node_jessie_%s' % _docker_arch_suffix(self.args.arch)
def __str__(self):
return 'node'
class PhpLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
return [self.config.job_spec(['src/php/bin/run_tests.sh'], None,
environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
def pre_build_steps(self):
return []
def make_targets(self):
return ['static_c', 'shared_c']
def make_options(self):
return []
def build_steps(self):
return [['tools/run_tests/build_php.sh']]
def post_tests_steps(self):
return [['tools/run_tests/post_tests_php.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/php_jessie_%s' % _docker_arch_suffix(self.args.arch)
def __str__(self):
return 'php'
class PythonLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
self._tox_envs = self._get_tox_envs(self.args.compiler)
def test_specs(self):
# load list of known test suites
with open('src/python/grpcio/tests/tests.json') as tests_json_file:
tests_json = json.load(tests_json_file)
environment = dict(_FORCE_ENVIRON_FOR_WRAPPERS)
environment['PYTHONPATH'] = '{}:{}'.format(
os.path.abspath('src/python/gens'),
os.path.abspath('src/python/grpcio_health_checking'))
if self.config.build_config != 'gcov':
return [self.config.job_spec(
['tools/run_tests/run_python.sh', tox_env],
None,
environ=dict(environment.items() +
[('GRPC_PYTHON_TESTRUNNER_FILTER', suite_name)]),
shortname='%s.test.%s' % (tox_env, suite_name),
timeout_seconds=5*60)
for suite_name in tests_json
for tox_env in self._tox_envs]
else:
return [self.config.job_spec(['tools/run_tests/run_python.sh', tox_env],
None,
environ=environment,
shortname='%s.test.coverage' % tox_env,
timeout_seconds=15*60)
for tox_env in self._tox_envs]
def pre_build_steps(self):
return []
def make_targets(self):
return ['static_c', 'grpc_python_plugin', 'shared_c']
def make_options(self):
return []
def build_steps(self):
return [['tools/run_tests/build_python.sh', tox_env]
for tox_env in self._tox_envs]
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/python_jessie_%s' % _docker_arch_suffix(self.args.arch)
def _get_tox_envs(self, compiler):
"""Returns name of tox environment based on selected compiler."""
if compiler == 'default':
return ('py27', 'py34')
elif compiler == 'python2.7':
return ('py27',)
elif compiler == 'python3.4':
return ('py34',)
else:
raise Exception('Compiler %s not supported.' % compiler)
def __str__(self):
return 'python'
class RubyLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
return [self.config.job_spec(['tools/run_tests/run_ruby.sh'], None,
timeout_seconds=10*60,
environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
def pre_build_steps(self):
return [['tools/run_tests/pre_build_ruby.sh']]
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
return [['tools/run_tests/build_ruby.sh']]
def post_tests_steps(self):
return [['tools/run_tests/post_tests_ruby.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/ruby_jessie_%s' % _docker_arch_suffix(self.args.arch)
def __str__(self):
return 'ruby'
class CSharpLanguage(object):
def __init__(self):
self.platform = platform_string()
def configure(self, config, args):
self.config = config
self.args = args
if self.platform == 'windows':
# Explicitly choosing between x86 and x64 arch doesn't work yet
_check_arch(self.args.arch, ['default'])
self._make_options = [_windows_toolset_option(self.args.compiler),
_windows_arch_option(self.args.arch)]
else:
_check_compiler(self.args.compiler, ['default'])
if self.platform == 'mac':
# On Mac, official distribution of mono is 32bit.
# TODO(jtattermusch): EMBED_ZLIB=true currently breaks the mac build
self._make_options = ['EMBED_OPENSSL=true',
'CFLAGS=-m32', 'LDFLAGS=-m32']
else:
self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
def test_specs(self):
with open('src/csharp/tests.json') as f:
tests_by_assembly = json.load(f)
msbuild_config = _MSBUILD_CONFIG[self.config.build_config]
nunit_args = ['--labels=All',
'--noresult',
'--workers=1']
if self.platform == 'windows':
runtime_cmd = []
else:
runtime_cmd = ['mono']
specs = []
for assembly in tests_by_assembly.iterkeys():
assembly_file = 'src/csharp/%s/bin/%s/%s.exe' % (assembly, msbuild_config, assembly)
if self.config.build_config != 'gcov' or self.platform != 'windows':
# normally, run each test as a separate process
for test in tests_by_assembly[assembly]:
cmdline = runtime_cmd + [assembly_file, '--test=%s' % test] + nunit_args
specs.append(self.config.job_spec(cmdline,
None,
shortname='csharp.%s' % test,
environ=_FORCE_ENVIRON_FOR_WRAPPERS))
else:
# For C# test coverage, run all tests from the same assembly at once
# using OpenCover.Console (only works on Windows).
cmdline = ['src\\csharp\\packages\\OpenCover.4.6.519\\tools\\OpenCover.Console.exe',
'-target:%s' % assembly_file,
'-targetdir:src\\csharp',
'-targetargs:%s' % ' '.join(nunit_args),
'-filter:+[Grpc.Core]*',
'-register:user',
'-output:src\\csharp\\coverage_csharp_%s.xml' % assembly]
# set really high cpu_cost to make sure instances of OpenCover.Console run exclusively
# to prevent problems with registering the profiler.
run_exclusive = 1000000
specs.append(self.config.job_spec(cmdline,
None,
shortname='csharp.coverage.%s' % assembly,
cpu_cost=run_exclusive,
environ=_FORCE_ENVIRON_FOR_WRAPPERS))
return specs
def pre_build_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\pre_build_csharp.bat']]
else:
return [['tools/run_tests/pre_build_csharp.sh']]
def make_targets(self):
return ['grpc_csharp_ext']
def make_options(self):
return self._make_options;
def build_steps(self):
if self.platform == 'windows':
return [[_windows_build_bat(self.args.compiler),
'src/csharp/Grpc.sln',
'/p:Configuration=%s' % _MSBUILD_CONFIG[self.config.build_config]]]
else:
return [['tools/run_tests/build_csharp.sh']]
def post_tests_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\post_tests_csharp.bat']]
else:
return [['tools/run_tests/post_tests_csharp.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/csharp_jessie_%s' % _docker_arch_suffix(self.args.arch)
def __str__(self):
return 'csharp'
class ObjCLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
return [self.config.job_spec(['src/objective-c/tests/run_tests.sh'], None,
environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
def pre_build_steps(self):
return []
def make_targets(self):
return ['grpc_objective_c_plugin', 'interop_server']
def make_options(self):
return []
def build_steps(self):
return [['src/objective-c/tests/build_tests.sh']]
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return None
def __str__(self):
return 'objc'
class Sanity(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
import yaml
with open('tools/run_tests/sanity/sanity_tests.yaml', 'r') as f:
return [self.config.job_spec(cmd['script'].split(), None,
timeout_seconds=None, environ={'TEST': 'true'},
cpu_cost=cmd.get('cpu_cost', 1))
for cmd in yaml.load(f)]
def pre_build_steps(self):
return []
def make_targets(self):
return ['run_dep_checks']
def make_options(self):
return []
def build_steps(self):
return []
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/sanity'
def __str__(self):
return 'sanity'
# different configurations we can run under
with open('tools/run_tests/configs.json') as f:
_CONFIGS = dict((cfg['config'], Config(**cfg)) for cfg in ast.literal_eval(f.read()))
_LANGUAGES = {
'c++': CLanguage('cxx', 'c++'),
'c': CLanguage('c', 'c'),
'node': NodeLanguage(),
'php': PhpLanguage(),
'python': PythonLanguage(),
'ruby': RubyLanguage(),
'csharp': CSharpLanguage(),
'objc' : ObjCLanguage(),
'sanity': Sanity()
}
_MSBUILD_CONFIG = {
'dbg': 'Debug',
'opt': 'Release',
'gcov': 'Debug',
}
def _windows_arch_option(arch):
"""Returns msbuild cmdline option for selected architecture."""
if arch == 'default' or arch == 'x86':
return '/p:Platform=Win32'
elif arch == 'x64':
return '/p:Platform=x64'
else:
print 'Architecture %s not supported.' % arch
sys.exit(1)
def _check_arch_option(arch):
"""Checks that architecture option is valid."""
if platform_string() == 'windows':
_windows_arch_option(arch)
elif platform_string() == 'linux':
# On linux, we need to be running under docker with the right architecture.
runtime_arch = platform.architecture()[0]
if arch == 'default':
return
elif runtime_arch == '64bit' and arch == 'x64':
return
elif runtime_arch == '32bit' and arch == 'x86':
return
else:
print 'Architecture %s does not match current runtime architecture.' % arch
sys.exit(1)
else:
if args.arch != 'default':
print 'Architecture %s not supported on current platform.' % args.arch
sys.exit(1)
def _windows_build_bat(compiler):
"""Returns name of build.bat for selected compiler."""
if compiler == 'default' or compiler == 'vs2013':
return 'vsprojects\\build_vs2013.bat'
elif compiler == 'vs2015':
return 'vsprojects\\build_vs2015.bat'
elif compiler == 'vs2010':
return 'vsprojects\\build_vs2010.bat'
else:
print 'Compiler %s not supported.' % compiler
sys.exit(1)
def _windows_toolset_option(compiler):
"""Returns msbuild PlatformToolset for selected compiler."""
if compiler == 'default' or compiler == 'vs2013':
return '/p:PlatformToolset=v120'
elif compiler == 'vs2015':
return '/p:PlatformToolset=v140'
elif compiler == 'vs2010':
return '/p:PlatformToolset=v100'
else:
print 'Compiler %s not supported.' % compiler
sys.exit(1)
def _docker_arch_suffix(arch):
"""Returns suffix to dockerfile dir to use."""
if arch == 'default' or arch == 'x64':
return 'x64'
elif arch == 'x86':
return 'x86'
else:
print 'Architecture %s not supported with current settings.' % arch
sys.exit(1)
def runs_per_test_type(arg_str):
"""Auxilary function to parse the "runs_per_test" flag.
Returns:
A positive integer or 0, the latter indicating an infinite number of
runs.
Raises:
argparse.ArgumentTypeError: Upon invalid input.
"""
if arg_str == 'inf':
return 0
try:
n = int(arg_str)
if n <= 0: raise ValueError
return n
except:
msg = '\'{}\' is not a positive integer or \'inf\''.format(arg_str)
raise argparse.ArgumentTypeError(msg)
# parse command line
argp = argparse.ArgumentParser(description='Run grpc tests.')
argp.add_argument('-c', '--config',
choices=sorted(_CONFIGS.keys()),
default='opt')
argp.add_argument('-n', '--runs_per_test', default=1, type=runs_per_test_type,
help='A positive integer or "inf". If "inf", all tests will run in an '
'infinite loop. Especially useful in combination with "-f"')
argp.add_argument('-r', '--regex', default='.*', type=str)
argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
argp.add_argument('-s', '--slowdown', default=1.0, type=float)
argp.add_argument('-f', '--forever',
default=False,
action='store_const',
const=True)
argp.add_argument('-t', '--travis',
default=False,
action='store_const',
const=True)
argp.add_argument('--newline_on_success',
default=False,
action='store_const',
const=True)
argp.add_argument('-l', '--language',
choices=['all'] + sorted(_LANGUAGES.keys()),
nargs='+',
default=['all'])
argp.add_argument('-S', '--stop_on_failure',
default=False,
action='store_const',
const=True)
argp.add_argument('--use_docker',
default=False,
action='store_const',
const=True,
help='Run all the tests under docker. That provides ' +
'additional isolation and prevents the need to install ' +
'language specific prerequisites. Only available on Linux.')
argp.add_argument('--allow_flakes',
default=False,
action='store_const',
const=True,
help='Allow flaky tests to show as passing (re-runs failed tests up to five times)')
argp.add_argument('--arch',
choices=['default', 'x86', 'x64'],
default='default',
help='Selects architecture to target. For some platforms "default" is the only supported choice.')
argp.add_argument('--compiler',
choices=['default',
'gcc4.4', 'gcc4.6', 'gcc4.9', 'gcc5.3',
'clang3.4', 'clang3.5', 'clang3.6', 'clang3.7',
'vs2010', 'vs2013', 'vs2015',
'python2.7', 'python3.4',
'node0.12', 'node4', 'node5'],
default='default',
help='Selects compiler to use. Allowed values depend on the platform and language.')
argp.add_argument('--build_only',
default=False,
action='store_const',
const=True,
help='Perform all the build steps but dont run any tests.')
argp.add_argument('--measure_cpu_costs', default=False, action='store_const', const=True,
help='Measure the cpu costs of tests')
argp.add_argument('--update_submodules', default=[], nargs='*',
help='Update some submodules before building. If any are updated, also run generate_projects. ' +
'Submodules are specified as SUBMODULE_NAME:BRANCH; if BRANCH is omitted, master is assumed.')
argp.add_argument('-a', '--antagonists', default=0, type=int)
argp.add_argument('-x', '--xml_report', default=None, type=str,
help='Generates a JUnit-compatible XML report')
args = argp.parse_args()
jobset.measure_cpu_costs = args.measure_cpu_costs
# update submodules if necessary
need_to_regenerate_projects = False
for spec in args.update_submodules:
spec = spec.split(':', 1)
if len(spec) == 1:
submodule = spec[0]
branch = 'master'
elif len(spec) == 2:
submodule = spec[0]
branch = spec[1]
cwd = 'third_party/%s' % submodule
def git(cmd, cwd=cwd):
print 'in %s: git %s' % (cwd, cmd)
subprocess.check_call('git %s' % cmd, cwd=cwd, shell=True)
git('fetch')
git('checkout %s' % branch)
git('pull origin %s' % branch)
if os.path.exists('src/%s/gen_build_yaml.py' % submodule):
need_to_regenerate_projects = True
if need_to_regenerate_projects:
if jobset.platform_string() == 'linux':
subprocess.check_call('tools/buildgen/generate_projects.sh', shell=True)
else:
print 'WARNING: may need to regenerate projects, but since we are not on'
print ' Linux this step is being skipped. Compilation MAY fail.'
# grab config
run_config = _CONFIGS[args.config]
build_config = run_config.build_config
if args.travis:
_FORCE_ENVIRON_FOR_WRAPPERS = {'GRPC_TRACE': 'api'}
if 'all' in args.language:
lang_list = _LANGUAGES.keys()
else:
lang_list = args.language
# We don't support code coverage on some languages
if 'gcov' in args.config:
for bad in ['objc', 'sanity']:
if bad in lang_list:
lang_list.remove(bad)
languages = set(_LANGUAGES[l] for l in lang_list)
for l in languages:
l.configure(run_config, args)
language_make_options=[]
if any(language.make_options() for language in languages):
if not 'gcov' in args.config and len(languages) != 1:
print 'languages with custom make options cannot be built simultaneously with other languages'
sys.exit(1)
else:
language_make_options = next(iter(languages)).make_options()
if args.use_docker:
if not args.travis:
print 'Seen --use_docker flag, will run tests under docker.'
print
print 'IMPORTANT: The changes you are testing need to be locally committed'
print 'because only the committed changes in the current branch will be'
print 'copied to the docker environment.'
time.sleep(5)
dockerfile_dirs = set([l.dockerfile_dir() for l in languages])
if len(dockerfile_dirs) > 1:
if 'gcov' in args.config:
dockerfile_dir = 'tools/dockerfile/test/multilang_jessie_x64'
print ('Using multilang_jessie_x64 docker image for code coverage for '
'all languages.')
else:
print ('Languages to be tested require running under different docker '
'images.')
sys.exit(1)
else:
dockerfile_dir = next(iter(dockerfile_dirs))
child_argv = [ arg for arg in sys.argv if not arg == '--use_docker' ]
run_tests_cmd = 'python tools/run_tests/run_tests.py %s' % ' '.join(child_argv[1:])
env = os.environ.copy()
env['RUN_TESTS_COMMAND'] = run_tests_cmd
env['DOCKERFILE_DIR'] = dockerfile_dir
env['DOCKER_RUN_SCRIPT'] = 'tools/run_tests/dockerize/docker_run_tests.sh'
if args.xml_report:
env['XML_REPORT'] = args.xml_report
if not args.travis:
env['TTY_FLAG'] = '-t' # enables Ctrl-C when not on Jenkins.
subprocess.check_call(['tools/run_tests/dockerize/build_docker_and_run_tests.sh'],
shell=True,
env=env)
sys.exit(0)
_check_arch_option(args.arch)
def make_jobspec(cfg, targets, makefile='Makefile'):
if platform_string() == 'windows':
extra_args = []
# better do parallel compilation
# empirically /m:2 gives the best performance/price and should prevent
# overloading the windows workers.
extra_args.extend(['/m:2'])
# disable PDB generation: it's broken, and we don't need it during CI
extra_args.extend(['/p:Jenkins=true'])
return [
jobset.JobSpec([_windows_build_bat(args.compiler),
'vsprojects\\%s.sln' % target,
'/p:Configuration=%s' % _MSBUILD_CONFIG[cfg]] +
extra_args +
language_make_options,
shell=True, timeout_seconds=None)
for target in targets]
else:
if targets:
return [jobset.JobSpec([os.getenv('MAKE', 'make'),
'-f', makefile,
'-j', '%d' % args.jobs,
'EXTRA_DEFINES=GRPC_TEST_SLOWDOWN_MACHINE_FACTOR=%f' % args.slowdown,
'CONFIG=%s' % cfg] +
language_make_options +
([] if not args.travis else ['JENKINS_BUILD=1']) +
targets,
timeout_seconds=None)]
else:
return []
make_targets = {}
for l in languages:
makefile = l.makefile_name()
make_targets[makefile] = make_targets.get(makefile, set()).union(
set(l.make_targets()))
def build_step_environ(cfg):
environ = {'CONFIG': cfg}
msbuild_cfg = _MSBUILD_CONFIG.get(cfg)
if msbuild_cfg:
environ['MSBUILD_CONFIG'] = msbuild_cfg
return environ
build_steps = list(set(
jobset.JobSpec(cmdline, environ=build_step_environ(build_config), flake_retries=5)
for l in languages
for cmdline in l.pre_build_steps()))
if make_targets:
make_commands = itertools.chain.from_iterable(make_jobspec(build_config, list(targets), makefile) for (makefile, targets) in make_targets.iteritems())
build_steps.extend(set(make_commands))
build_steps.extend(set(
jobset.JobSpec(cmdline, environ=build_step_environ(build_config), timeout_seconds=None)
for l in languages
for cmdline in l.build_steps()))
post_tests_steps = list(set(
jobset.JobSpec(cmdline, environ=build_step_environ(build_config))
for l in languages
for cmdline in l.post_tests_steps()))
runs_per_test = args.runs_per_test
forever = args.forever
class TestCache(object):
"""Cache for running tests."""
def __init__(self, use_cache_results):
self._last_successful_run = {}
self._use_cache_results = use_cache_results
self._last_save = time.time()
def should_run(self, cmdline, bin_hash):
if cmdline not in self._last_successful_run:
return True
if self._last_successful_run[cmdline] != bin_hash:
return True
if not self._use_cache_results:
return True
return False
def finished(self, cmdline, bin_hash):
self._last_successful_run[cmdline] = bin_hash
if time.time() - self._last_save > 1:
self.save()
def dump(self):
return [{'cmdline': k, 'hash': v}
for k, v in self._last_successful_run.iteritems()]
def parse(self, exdump):
self._last_successful_run = dict((o['cmdline'], o['hash']) for o in exdump)
def save(self):
with open('.run_tests_cache', 'w') as f:
f.write(json.dumps(self.dump()))
self._last_save = time.time()
def maybe_load(self):
if os.path.exists('.run_tests_cache'):
with open('.run_tests_cache') as f:
self.parse(json.loads(f.read()))
def _start_port_server(port_server_port):
# check if a compatible port server is running
# if incompatible (version mismatch) ==> start a new one
# if not running ==> start a new one
# otherwise, leave it up
try:
version = int(urllib2.urlopen(
'http://localhost:%d/version_number' % port_server_port,
timeout=1).read())
print 'detected port server running version %d' % version
running = True
except Exception as e:
print 'failed to detect port server: %s' % sys.exc_info()[0]
print e.strerror
running = False
if running:
current_version = int(subprocess.check_output(
[sys.executable, os.path.abspath('tools/run_tests/port_server.py'),
'dump_version']))
print 'my port server is version %d' % current_version
running = (version >= current_version)
if not running:
print 'port_server version mismatch: killing the old one'
urllib2.urlopen('http://localhost:%d/quitquitquit' % port_server_port).read()
time.sleep(1)
if not running:
fd, logfile = tempfile.mkstemp()
os.close(fd)
print 'starting port_server, with log file %s' % logfile
args = [sys.executable, os.path.abspath('tools/run_tests/port_server.py'),
'-p', '%d' % port_server_port, '-l', logfile]
env = dict(os.environ)
env['BUILD_ID'] = 'pleaseDontKillMeJenkins'
if platform_string() == 'windows':
# Working directory of port server needs to be outside of Jenkins
# workspace to prevent file lock issues.
tempdir = tempfile.mkdtemp()
port_server = subprocess.Popen(
args,
env=env,
cwd=tempdir,
creationflags = 0x00000008, # detached process
close_fds=True)
else:
port_server = subprocess.Popen(
args,
env=env,
preexec_fn=os.setsid,
close_fds=True)
time.sleep(1)
# ensure port server is up
waits = 0
while True:
if waits > 10:
print 'killing port server due to excessive start up waits'
port_server.kill()
if port_server.poll() is not None:
print 'port_server failed to start'
# try one final time: maybe another build managed to start one
time.sleep(1)
try:
urllib2.urlopen('http://localhost:%d/get' % port_server_port,
timeout=1).read()
print 'last ditch attempt to contact port server succeeded'
break
except:
traceback.print_exc()
port_log = open(logfile, 'r').read()
print port_log
sys.exit(1)
try:
urllib2.urlopen('http://localhost:%d/get' % port_server_port,
timeout=1).read()
print 'port server is up and ready'
break
except socket.timeout:
print 'waiting for port_server: timeout'
traceback.print_exc();
time.sleep(1)
waits += 1
except urllib2.URLError:
print 'waiting for port_server: urlerror'
traceback.print_exc();
time.sleep(1)
waits += 1
except:
traceback.print_exc()
port_server.kill()
raise
def _calculate_num_runs_failures(list_of_results):
"""Caculate number of runs and failures for a particular test.
Args:
list_of_results: (List) of JobResult object.
Returns:
A tuple of total number of runs and failures.
"""
num_runs = len(list_of_results) # By default, there is 1 run per JobResult.
num_failures = 0
for jobresult in list_of_results:
if jobresult.retries > 0:
num_runs += jobresult.retries
if jobresult.num_failures > 0:
num_failures += jobresult.num_failures
return num_runs, num_failures
# _build_and_run results
class BuildAndRunError(object):
BUILD = object()
TEST = object()
POST_TEST = object()
# returns a list of things that failed (or an empty list on success)
def _build_and_run(
check_cancelled, newline_on_success, cache, xml_report=None, build_only=False):
"""Do one pass of building & running tests."""
# build latest sequentially
num_failures, resultset = jobset.run(
build_steps, maxjobs=1, stop_on_failure=True,
newline_on_success=newline_on_success, travis=args.travis)
if num_failures:
return [BuildAndRunError.BUILD]
if build_only:
if xml_report:
report_utils.render_junit_xml_report(resultset, xml_report)
return []
# start antagonists
antagonists = [subprocess.Popen(['tools/run_tests/antagonist.py'])
for _ in range(0, args.antagonists)]
port_server_port = 32767
_start_port_server(port_server_port)
resultset = None
num_test_failures = 0
try:
infinite_runs = runs_per_test == 0
one_run = set(
spec
for language in languages
for spec in language.test_specs()
if re.search(args.regex, spec.shortname))
# When running on travis, we want out test runs to be as similar as possible
# for reproducibility purposes.
if args.travis:
massaged_one_run = sorted(one_run, key=lambda x: x.shortname)
else:
# whereas otherwise, we want to shuffle things up to give all tests a
# chance to run.
massaged_one_run = list(one_run) # random.shuffle needs an indexable seq.
random.shuffle(massaged_one_run) # which it modifies in-place.
if infinite_runs:
assert len(massaged_one_run) > 0, 'Must have at least one test for a -n inf run'
runs_sequence = (itertools.repeat(massaged_one_run) if infinite_runs
else itertools.repeat(massaged_one_run, runs_per_test))
all_runs = itertools.chain.from_iterable(runs_sequence)
num_test_failures, resultset = jobset.run(
all_runs, check_cancelled, newline_on_success=newline_on_success,
travis=args.travis, infinite_runs=infinite_runs, maxjobs=args.jobs,
stop_on_failure=args.stop_on_failure,
cache=cache if not xml_report else None,
add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port})
if resultset:
for k, v in resultset.iteritems():
num_runs, num_failures = _calculate_num_runs_failures(v)
if num_failures == num_runs: # what about infinite_runs???
jobset.message('FAILED', k, do_newline=True)
elif num_failures > 0:
jobset.message(
'FLAKE', '%s [%d/%d runs flaked]' % (k, num_failures, num_runs),
do_newline=True)
else:
jobset.message('PASSED', k, do_newline=True)
finally:
for antagonist in antagonists:
antagonist.kill()
if xml_report and resultset:
report_utils.render_junit_xml_report(resultset, xml_report)
number_failures, _ = jobset.run(
post_tests_steps, maxjobs=1, stop_on_failure=True,
newline_on_success=newline_on_success, travis=args.travis)
out = []
if number_failures:
out.append(BuildAndRunError.POST_TEST)
if num_test_failures:
out.append(BuildAndRunError.TEST)
if cache: cache.save()
return out
test_cache = TestCache(runs_per_test == 1)
test_cache.maybe_load()
if forever:
success = True
while True:
dw = watch_dirs.DirWatcher(['src', 'include', 'test', 'examples'])
initial_time = dw.most_recent_change()
have_files_changed = lambda: dw.most_recent_change() != initial_time
previous_success = success
errors = _build_and_run(check_cancelled=have_files_changed,
newline_on_success=False,
cache=test_cache,
build_only=args.build_only) == 0
if not previous_success and not errors:
jobset.message('SUCCESS',
'All tests are now passing properly',
do_newline=True)
jobset.message('IDLE', 'No change detected')
while not have_files_changed():
time.sleep(1)
else:
errors = _build_and_run(check_cancelled=lambda: False,
newline_on_success=args.newline_on_success,
cache=test_cache,
xml_report=args.xml_report,
build_only=args.build_only)
if not errors:
jobset.message('SUCCESS', 'All tests passed', do_newline=True)
else:
jobset.message('FAILED', 'Some tests failed', do_newline=True)
exit_code = 0
if BuildAndRunError.BUILD in errors:
exit_code |= 1
if BuildAndRunError.TEST in errors and not args.travis:
exit_code |= 2
if BuildAndRunError.POST_TEST in errors:
exit_code |= 4
sys.exit(exit_code)
| |
import unittest
import os
import glob
import tempfile
import shutil
import fs
import fs.copy
import fs.path
from defcon import Font, Glyph, LayerSet, Guideline
from defcon.errors import DefconError
from defcon.tools.notifications import NotificationCenter
from defcon.test.testTools import (
getTestFontPath, getTestFontCopyPath, makeTestFontCopy,
openTestFontAsFileSystem, closeTestFontAsFileSystem,
tearDownTestFontCopy)
from fontTools.ufoLib import UFOReader, UFOWriter, UFOFileStructure
import zipfile
import logging
from fontTools.misc.loggingTools import CapturingLogHandler
try:
from plistlib import load, dump
except ImportError:
from plistlib import readPlist as load, writePlist as dump
testFeaturesText = """
@class1 = [a b c d];
feature liga {
sub f i by fi;
} liga;
@class2 = [x y z];
feature salt {
sub a by a.alt;
} salt; feature ss01 {sub x by x.alt} ss01;
feature ss02 {sub y by y.alt} ss02;
# feature calt {
# sub a b' by b.alt;
# } calt;
"""
class FontTest(unittest.TestCase):
def __init__(self, methodName):
unittest.TestCase.__init__(self, methodName)
# Python 3 renamed assertRaisesRegexp to assertRaisesRegex,
# and fires deprecation warnings if a program uses the old name.
if not hasattr(self, "assertRaisesRegex"):
self.assertRaisesRegex = self.assertRaisesRegexp
def tearDown(self):
if os.path.exists(getTestFontCopyPath()):
tearDownTestFontCopy()
def test_set_parent_data_in_glyph(self):
font = Font(getTestFontPath())
glyph = font["A"]
self.assertEqual(id(glyph.getParent()), id(font))
def test_dispatcher(self):
font = Font()
self.assertIsInstance(font.dispatcher, NotificationCenter)
with self.assertRaises(AttributeError):
self.font.dispatcher = "foo"
def test_newGlyph(self):
font = Font(getTestFontPath())
glyph = font.newGlyph("NewGlyphTest")
self.assertEqual(glyph.name, "NewGlyphTest")
self.assertTrue(glyph.dirty)
self.assertTrue(font.dirty)
self.assertEqual(sorted(font.keys()), ["A", "B", "C", "NewGlyphTest"])
def test_insertGlyph(self):
font = Font(getTestFontPath())
glyph = Glyph()
glyph.name = "NewGlyphTest"
self.assertEqual(sorted(font.keys()), ["A", "B", "C"])
font.insertGlyph(glyph)
self.assertEqual(sorted(font.keys()), ["A", "B", "C", "NewGlyphTest"])
def test_iter(self):
font = Font(getTestFontPath())
self.assertEqual(sorted(glyph.name for glyph in font), ["A", "B", "C"])
names = []
for glyph1 in font:
for glyph2 in font:
names.append((glyph1.name, glyph2.name))
self.assertEqual(sorted(names),
[("A", "A"), ("A", "B"), ("A", "C"),
("B", "A"), ("B", "B"), ("B", "C"),
("C", "A"), ("C", "B"), ("C", "C")])
def test_getitem(self):
font = Font(getTestFontPath())
self.assertEqual(font["A"].name, "A")
self.assertEqual(font["B"].name, "B")
with self.assertRaises(KeyError):
font["NotInFont"]
def test_delitem(self):
path = makeTestFontCopy()
font = Font(path)
del font["A"]
self.assertTrue(font.dirty)
font.newGlyph("NewGlyphTest")
del font["NewGlyphTest"]
self.assertEqual(sorted(font.keys()), ["B", "C"])
self.assertEqual(len(font), 2)
self.assertFalse("A" in font)
font.save()
fileNames = glob.glob(os.path.join(path, 'glyphs', '*.glif'))
fileNames = [os.path.basename(fileName) for fileName in fileNames]
self.assertEqual(sorted(fileNames), ["B_.glif", "C_.glif"])
with self.assertRaises(KeyError):
del font["NotInFont"]
def test_delitem_glyph_not_dirty(self):
for ufo in (u"TestExternalEditing.ufo", u"TestExternalEditing.ufoz"):
path = getTestFontPath(ufo)
path = makeTestFontCopy(path)
with Font(path) as font:
font["A"] # glyph = font["A"]
fileSystem = openTestFontAsFileSystem(path)
glyphPath = fs.path.join("glyphs", "A_.glif")
fileSystem.remove(glyphPath)
contentsPath = fs.path.join("glyphs", "contents.plist")
with fileSystem.open(contentsPath, "rb") as f:
plist = load(f)
del plist["A"]
with fileSystem.open(contentsPath, "wb") as f:
dump(plist, f)
closeTestFontAsFileSystem(fileSystem, path)
r = font.testForExternalChanges()
self.assertEqual(r["deletedGlyphs"], ["A"])
del font["A"]
font.save()
self.assertFalse(os.path.exists(glyphPath))
tearDownTestFontCopy(font.path)
def test_delitem_glyph_dirty(self):
for ufo in (u"TestExternalEditing.ufo", u"TestExternalEditing.ufoz"):
path = getTestFontPath(ufo)
path = makeTestFontCopy(path)
with Font(path) as font:
glyph = font["A"]
glyph.dirty = True
fileSystem = openTestFontAsFileSystem(path)
glyphPath = fs.path.join("glyphs", "A_.glif")
fileSystem.remove(glyphPath)
contentsPath = fs.path.join("glyphs", "contents.plist")
with fileSystem.open(contentsPath, "rb") as f:
plist = load(f)
del plist["A"]
with fileSystem.open(contentsPath, "wb") as f:
dump(plist, f)
closeTestFontAsFileSystem(fileSystem, path)
r = font.testForExternalChanges()
self.assertEqual(r["deletedGlyphs"], ["A"])
del font["A"]
font.save()
fileSystem = openTestFontAsFileSystem(path)
self.assertFalse(fileSystem.exists(glyphPath))
closeTestFontAsFileSystem(fileSystem, path)
tearDownTestFontCopy(font.path)
def test_len(self):
font = Font(getTestFontPath())
self.assertEqual(len(font), 3)
font = Font()
self.assertEqual(len(font), 0)
def test_contains(self):
font = Font(getTestFontPath())
self.assertTrue("A" in font)
self.assertFalse("NotInFont" in font)
font = Font()
self.assertFalse("A" in font)
def test_keys(self):
font = Font(getTestFontPath())
self.assertEqual(sorted(font.keys()), ["A", "B", "C"])
del font["A"]
self.assertEqual(sorted(font.keys()), ["B", "C"])
font.newGlyph("A")
self.assertEqual(sorted(font.keys()), ["A", "B", "C"])
font = Font()
self.assertEqual(font.keys(), set())
font.newGlyph("A")
self.assertEqual(sorted(font.keys()), ["A"])
def test_path_get(self):
path = getTestFontPath()
font = Font(path)
self.assertEqual(font.path, path)
font = Font()
self.assertIsNone(font.path)
def test_path_set(self):
path1 = getTestFontPath()
font = Font(path1)
path2 = getTestFontPath("setPathTest.ufo")
shutil.copytree(path1, path2)
font.path = path2
self.assertEqual(font.path, path2)
shutil.rmtree(path2)
def test_glyphsWithOutlines(self):
font = Font(getTestFontPath())
self.assertEqual(sorted(font.glyphsWithOutlines), ["A", "B"])
font = Font(getTestFontPath())
for glyph in font:
pass
self.assertEqual(sorted(font.glyphsWithOutlines), ["A", "B"])
def test_componentReferences(self):
font = Font(getTestFontPath())
self.assertEqual(font.componentReferences,
{"A": set(["C"]), "B": set(["C"])})
def test_bounds(self):
font = Font(getTestFontPath())
self.assertEqual(font.bounds, (0, 0, 700, 700))
def test_controPointsBounds(self):
font = Font(getTestFontPath())
self.assertEqual(font.controlPointBounds, (0, 0, 700, 700))
def test_beginSelfLayerSetNotificationObservation(self):
font = Font()
self.assertTrue(font.dispatcher.hasObserver(
font, "LayerSet.Changed", font.layers))
self.assertTrue(font.dispatcher.hasObserver(
font, "LayerSet.LayerAdded", font.layers))
self.assertTrue(font.dispatcher.hasObserver(
font, "LayerSet.LayerWillBeDeleted", font.layers))
font.layers.removeObserver(
observer=self, notification="LayerSet.Changed")
font.layers.removeObserver(
observer=self, notification="LayerSet.LayerAdded")
font.layers.removeObserver(
observer=self, notification="LayerSet.LayerWillBeDeleted")
font.layers.endSelfNotificationObservation()
font.beginSelfLayerSetNotificationObservation()
self.assertTrue(font.dispatcher.hasObserver(
font, "LayerSet.Changed", font.layers))
self.assertTrue(font.dispatcher.hasObserver(
font, "LayerSet.LayerAdded", font.layers))
self.assertTrue(font.dispatcher.hasObserver(
font, "LayerSet.LayerWillBeDeleted", font.layers))
def test_endSelfLayerSetNotificationObservation(self):
font = Font()
font.endSelfLayerSetNotificationObservation()
self.assertFalse(font.dispatcher.hasObserver(
font, "LayerSet.Changed", font.layers))
self.assertFalse(font.dispatcher.hasObserver(
font, "LayerSet.LayerAdded", font.layers))
self.assertFalse(font.dispatcher.hasObserver(
font, "LayerSet.LayerWillBeDeleted", font.layers))
def test_layers(self):
font = Font(getTestFontPath())
self.assertIsInstance(font.layers, LayerSet)
self.assertEqual(font.layers.layerOrder,
["public.default", "public.background", "Layer 1"])
self.assertTrue(font.layers.hasObserver(font, "LayerSet.Changed"))
self.assertTrue(font.layers.hasObserver(font, "LayerSet.LayerAdded"))
self.assertTrue(font.layers.hasObserver(font,
"LayerSet.LayerWillBeDeleted"))
def test_font_observes_new_layer(self):
font = Font()
font.layers.newLayer("test_layer")
layer = font.layers["test_layer"]
self.assertTrue(layer.hasObserver(font, "Layer.GlyphAdded"))
def test_font_observes_loaded_layers(self):
font = Font(getTestFontPath())
for layername in font.layers.layerOrder:
layer = font.layers[layername]
self.assertTrue(layer.hasObserver(font, "Layer.GlyphAdded"))
def test_glyphOrder(self):
font = Font(getTestFontPath())
self.assertEqual(font.glyphOrder, [])
font.glyphOrder = sorted(font.keys())
self.assertEqual(font.glyphOrder, ["A", "B", "C"])
layer = font.layers.defaultLayer
layer.newGlyph("X")
self.assertEqual(sorted(layer.keys()), ["A", "B", "C", "X"])
self.assertEqual(font.glyphOrder, ["A", "B", "C", "X"])
del layer["A"]
self.assertEqual(font.glyphOrder, ["A", "B", "C", "X"])
del layer["X"]
self.assertEqual(font.glyphOrder, ["A", "B", "C"])
layer["B"].name = "Y"
self.assertEqual(font.glyphOrder, ["A", "Y", "C"])
def test_updateGlyphOrder_none(self):
font = Font(getTestFontPath())
self.assertEqual(font.glyphOrder, [])
font.updateGlyphOrder()
self.assertEqual(font.glyphOrder, [])
def test_updateGlyphOrder_add(self):
font = Font(getTestFontPath())
self.assertEqual(font.glyphOrder, [])
font.updateGlyphOrder(addedGlyph="test")
self.assertEqual(font.glyphOrder, ["test"])
def test_updateGlyphOrder_remove(self):
font = Font(getTestFontPath())
self.assertEqual(font.glyphOrder, [])
font.glyphOrder = ["test"]
self.assertEqual(font.glyphOrder, ["test"])
font.updateGlyphOrder(removedGlyph="test")
self.assertEqual(font.glyphOrder, [])
def test_updateGlyphOrder_rename(self):
font = Font(getTestFontPath())
self.assertEqual(font.glyphOrder, [])
font.glyphOrder = sorted(font.keys())
self.assertEqual(font.glyphOrder, ["A", "B", "C"])
font.updateGlyphOrder(addedGlyph="new", removedGlyph="B")
self.assertEqual(font.glyphOrder, ["A", "new", "C"])
def test_guidelines(self):
font = Font(getTestFontPath())
self.assertEqual(font.guidelines, [])
guideline1 = Guideline(guidelineDict={"x": 100})
guideline2 = Guideline(guidelineDict={"y": 200})
font.guidelines = [guideline1, guideline2]
self.assertEqual(font.guidelines, [guideline1, guideline2])
def test_instantiateGuideline(self):
font = Font(getTestFontPath())
guideline = font.instantiateGuideline()
self.assertIsInstance(guideline, Guideline)
guideline = font.instantiateGuideline(guidelineDict={"x": 100})
self.assertEqual(dict(guideline), {'x': 100})
def test_beginSelfGuidelineNotificationObservation(self):
font = Font(getTestFontPath())
guideline = font.instantiateGuideline()
self.assertFalse(guideline.dispatcher.hasObserver(
font, "Guideline.Changed", guideline))
font.beginSelfGuidelineNotificationObservation(guideline)
self.assertTrue(guideline.dispatcher.hasObserver(
font, "Guideline.Changed", guideline))
def test_endSelfGuidelineNotificationObservation(self):
font = Font(getTestFontPath())
guideline = font.instantiateGuideline()
font.beginSelfGuidelineNotificationObservation(guideline)
self.assertTrue(guideline.hasObserver(
font, "Guideline.Changed"))
font.endSelfGuidelineNotificationObservation(guideline)
self.assertIsNone(guideline.dispatcher)
self.assertFalse(guideline.hasObserver(
font, "Guideline.Changed"))
def test_appendGuideline(self):
font = Font(getTestFontPath())
guideline1 = Guideline(guidelineDict={"x": 100})
font.appendGuideline(guideline1)
self.assertEqual([dict(guideline) for guideline in font.guidelines], [{'x': 100}])
guideline2 = Guideline(guidelineDict={"y": 200})
font.appendGuideline(guideline2)
self.assertEqual([dict(guideline) for guideline in font.guidelines], [{'x': 100}, {'y': 200}])
guideline3 = Guideline(guidelineDict={"y": 100})
font.appendGuideline(guideline3)
self.assertEqual([dict(guideline) for guideline in font.guidelines], [{'x': 100}, {'y': 200}, {'y': 100}])
def test_insertGuideline(self):
font = Font(getTestFontPath())
guideline1 = Guideline(guidelineDict={"x": 100})
font.insertGuideline(0, guideline1)
self.assertEqual([dict(guideline) for guideline in font.guidelines], [{'x': 100}])
guideline2 = Guideline(guidelineDict={"y": 200})
font.insertGuideline(0, guideline2)
self.assertEqual([dict(guideline) for guideline in font.guidelines], [{'y': 200}, {'x': 100}])
guideline3 = Guideline(guidelineDict={"y": 100})
font.insertGuideline(2, guideline3)
self.assertEqual([dict(guideline) for guideline in font.guidelines], [{'y': 200}, {'x': 100}, {'y': 100}])
def test_removeGuideline(self):
font = Font(getTestFontPath())
guideline1 = Guideline(guidelineDict={"x": 100})
guideline2 = Guideline(guidelineDict={"y": 200})
font.guidelines = [guideline1, guideline2]
font.removeGuideline(guideline1)
self.assertEqual(font.guidelines, [guideline2])
def test_guidelineIndex(self):
font = Font(getTestFontPath())
guideline1 = Guideline(guidelineDict={"x": 100})
guideline2 = Guideline(guidelineDict={"y": 200})
font.guidelines = [guideline1, guideline2]
self.assertEqual(font.guidelineIndex(guideline1), 0)
self.assertEqual(font.guidelineIndex(guideline2), 1)
def test_clearGuidelines(self):
font = Font(getTestFontPath())
guideline1 = Guideline(guidelineDict={"x": 100})
guideline2 = Guideline(guidelineDict={"y": 200})
font.guidelines = [guideline1, guideline2]
self.assertEqual(font.guidelines, [guideline1, guideline2])
font.clearGuidelines()
self.assertEqual(font.guidelines, [])
def test_save(self):
for ufo in (u"TestFont.ufo", u"TestFont.ufoz"):
path = makeTestFontCopy(getTestFontPath(ufo))
try:
with Font(path) as font:
origFileStructure = font.ufoFileStructure
for glyph in font:
glyph.dirty = True
font.save()
fileNames = sorted(
[
fs.path.basename(m.path)
for m in UFOReader(path).fs.glob("glyphs/*.glif")
]
)
self.assertEqual(fileNames, ["A_.glif", "B_.glif", "C_.glif"])
self.assertEqual(origFileStructure, font.ufoFileStructure)
finally:
tearDownTestFontCopy(path)
def test_save_as(self):
for ufo in (u"TestFont.ufo", u"TestFont.ufoz"):
path = getTestFontPath(ufo)
font = Font(path)
origFileStructure = font.ufoFileStructure
saveAsPath = getTestFontCopyPath(path)
self.assertFalse(os.path.exists(saveAsPath))
font.save(saveAsPath)
try:
fileNames = sorted(
[
fs.path.basename(m.path)
for m in UFOReader(saveAsPath).fs.glob("glyphs/*.glif")
]
)
self.assertEqual(fileNames, ["A_.glif", "B_.glif", "C_.glif"])
self.assertEqual(font.path, saveAsPath)
self.assertEqual(origFileStructure, font.ufoFileStructure)
finally:
font.close()
tearDownTestFontCopy(saveAsPath)
def test_save_same_path(self):
for ufo in (u"TestFont.ufo", u"TestFont.ufoz"):
path = makeTestFontCopy(getTestFontPath(ufo))
isZip = zipfile.is_zipfile(path)
font = Font(path)
try:
font = Font(path)
font.save(path)
if isZip:
self.assertTrue(zipfile.is_zipfile(path))
else:
self.assertTrue(os.path.isdir(path))
finally:
font.close()
tearDownTestFontCopy(path)
def test_save_same_path_different_structure(self):
for ufo in ("TestFont.ufo", "TestFont.ufoz"):
path = makeTestFontCopy(getTestFontPath(ufo))
isZip = zipfile.is_zipfile(path)
with Font(path) as font:
with self.assertRaisesRegex(
DefconError,
"Can't save font in-place with a different structure"
):
font.save(path, structure="package" if isZip else "zip")
tearDownTestFontCopy(path)
def test_save_new_font_without_path(self):
font = Font()
msg = "Can't save new font without a 'path'"
with self.assertRaisesRegex(DefconError, msg):
font.save()
def test_save_new_font_to_exsisting_directory(self):
for ufo in ("TestFont.ufo", "TestFont.ufoz"):
path = makeTestFontCopy(getTestFontPath(ufo))
font = Font()
try:
self.assertTrue(os.path.exists(path))
font.save(path)
self.assertTrue(os.path.isdir(path))
finally:
font.close()
tearDownTestFontCopy(path)
def test_save_ufoz(self):
path = getTestFontPath()
tmpdir = tempfile.mkdtemp()
dest = os.path.join(tmpdir, "TestFont.ufoz")
font = Font(path)
try:
self.assertFalse(os.path.exists(dest))
self.assertEqual(font.path, path)
font.save(dest, structure="zip")
self.assertTrue(os.path.exists(dest))
self.assertTrue(zipfile.is_zipfile(dest))
self.assertEqual(font.path, dest)
self.assertEqual(font.ufoFileStructure, UFOFileStructure.ZIP)
fileNames = sorted(
[
fs.path.basename(m.path)
for m in UFOReader(dest).fs.glob("glyphs/*.glif")
]
)
self.assertEqual(fileNames, ["A_.glif", "B_.glif", "C_.glif"])
finally:
font.close()
shutil.rmtree(tmpdir)
def test_save_new_font_to_existing_file(self):
with tempfile.NamedTemporaryFile(delete=False) as tmp:
pass
path = tmp.name
self.assertTrue(os.path.exists(path))
try:
font = Font()
font.save(path)
self.assertTrue(os.path.isdir(path))
finally:
if os.path.isfile(path):
os.remove(path)
elif os.path.isdir(path):
shutil.rmtree(path)
def test_new_font_format(self):
font = Font()
self.assertEqual(font.ufoFormatVersion, None)
self.assertEqual(font.ufoFormatVersionTuple, None)
def test_save_in_place_different_format(self):
path = makeTestFontCopy()
font = Font(path)
self.assertEqual(font.ufoFormatVersion, 3)
self.assertEqual(font.ufoFormatVersionTuple, (3, 0))
font.save(formatVersion=2)
self.assertEqual(font.ufoFormatVersion, 2)
self.assertEqual(font.ufoFormatVersionTuple, (2, 0))
def test_save_in_place_invalid_ufo(self):
path = makeTestFontCopy()
font = Font(path)
layercontents = os.path.join(path, "layercontents.plist")
os.remove(layercontents)
self.assertFalse(os.path.exists(layercontents))
logger = logging.getLogger("defcon.objects.font")
with CapturingLogHandler(logger, level="ERROR") as captor:
font.save()
captor.assertRegex("Invalid ufo found")
self.assertTrue(os.path.exists(layercontents))
font = Font(path)
_ = font.layers
font.save()
def test_save_ufo3z_as_ufo2(self):
# https://github.com/robotools/defcon/issues/296
font = Font()
font.layers.defaultLayer.name = "a-custom-layer-name"
font.newLayer("a-new-layer")
with tempfile.TemporaryDirectory() as root:
path_ufo3 = os.path.join(root, "three.ufo")
font.save(path_ufo3, formatVersion=3, structure="zip")
font.close()
path_ufo2 = os.path.join(root, "two.ufo")
with Font(path_ufo3) as font_ufo2:
font_ufo2.save(path_ufo2, formatVersion=2)
def test_testForExternalChanges(self):
for ufo in (u"TestExternalEditing.ufo", u"TestExternalEditing.ufoz"):
path = getTestFontPath(ufo)
path = makeTestFontCopy(path)
with Font(path) as font:
# load all the objects so that they get stamped
font.info # i = font.info
k = font.kerning
font.groups # g = font.groups
font.lib # l = font.lib
font["A"] # g = font["A"]
d = font.testForExternalChanges()
self.assertFalse(d["info"])
self.assertFalse(d["kerning"])
self.assertFalse(d["groups"])
self.assertFalse(d["lib"])
# make a simple change to the kerning data
fileSystem = openTestFontAsFileSystem(font.path)
path = u"kerning.plist"
t = fileSystem.readbytes(path)
t += b"<!-- test -->"
fileSystem.writebytes(path, t)
k._dataOnDiskTimeStamp -= 1
closeTestFontAsFileSystem(fileSystem, font.path)
d = font.testForExternalChanges()
self.assertTrue(d["kerning"])
self.assertFalse(d["groups"])
self.assertFalse(d["info"])
self.assertFalse(d["lib"])
# save the kerning data and test again
font.kerning.dirty = True
font.save()
d = font.testForExternalChanges()
self.assertFalse(d["kerning"])
self.assertFalse(d["groups"])
self.assertFalse(d["info"])
self.assertFalse(d["lib"])
tearDownTestFontCopy(font.path)
def test_reloadInfo(self):
path = getTestFontPath(u"TestExternalEditing.ufo")
font = Font(path)
info = font.info
path = os.path.join(font.path, "fontinfo.plist")
f = open(path, "r")
t = f.read()
f.close()
t = t.replace("<integer>750</integer>", "<integer>751</integer>")
f = open(path, "w")
f.write(t)
f.close()
self.assertEqual(info.ascender, 750)
font.reloadInfo()
self.assertEqual(info.ascender, 751)
t = t.replace("<integer>751</integer>", "<integer>750</integer>")
f = open(path, "w")
f.write(t)
f.close()
def test_reloadKerning(self):
path = getTestFontPath(u"TestExternalEditing.ufo")
font = Font(path)
kerning = font.kerning
path = os.path.join(font.path, "kerning.plist")
f = open(path, "r")
t = f.read()
f.close()
t = t.replace("<integer>-100</integer>", "<integer>-101</integer>")
f = open(path, "w")
f.write(t)
f.close()
self.assertEqual(list(kerning.items()), [(("A", "A"), -100)])
font.reloadKerning()
self.assertEqual(list(kerning.items()), [(("A", "A"), -101)])
t = t.replace("<integer>-101</integer>", "<integer>-100</integer>")
f = open(path, "w")
f.write(t)
f.close()
def test_reloadGroups(self):
path = getTestFontPath(u"TestExternalEditing.ufo")
font = Font(path)
groups = font.groups
path = os.path.join(font.path, "groups.plist")
f = open(path, "r")
t = f.read()
f.close()
t = t.replace("<key>TestGroup</key>", "<key>XXX</key>")
f = open(path, "w")
f.write(t)
f.close()
self.assertEqual(list(groups.keys()), ["TestGroup"])
font.reloadGroups()
self.assertEqual(list(groups.keys()), ["XXX"])
t = t.replace("<key>XXX</key>", "<key>TestGroup</key>")
f = open(path, "w")
f.write(t)
f.close()
def test_reloadLib(self):
path = getTestFontPath(u"TestExternalEditing.ufo")
font = Font(path)
lib = font.lib
path = os.path.join(font.path, "lib.plist")
f = open(path, "r")
t = f.read()
f.close()
t = t.replace("<key>org.robofab.glyphOrder</key>",
"<key>org.robofab.glyphOrder.XXX</key>")
f = open(path, "w")
f.write(t)
f.close()
self.assertEqual(list(lib.keys()), ["org.robofab.glyphOrder"])
font.reloadLib()
self.assertEqual(list(lib.keys()), ["org.robofab.glyphOrder.XXX"])
t = t.replace("<key>org.robofab.glyphOrder.XXX</key>",
"<key>org.robofab.glyphOrder</key>")
f = open(path, "w")
f.write(t)
f.close()
def test_reloadGlyphs(self):
path = getTestFontPath(u"TestExternalEditing.ufo")
font = Font(path)
glyph = font["A"]
path = os.path.join(font.path, "glyphs", "A_.glif")
f = open(path, "r")
t = f.read()
f.close()
t = t.replace('<advance width="700"/>', '<advance width="701"/>')
f = open(path, "w")
f.write(t)
f.close()
self.assertEqual(glyph.width, 700)
self.assertEqual(len(glyph), 2)
font.reloadGlyphs(["A"])
self.assertEqual(glyph.width, 701)
self.assertEqual(len(glyph), 2)
t = t.replace('<advance width="701"/>', '<advance width="700"/>')
f = open(path, "w")
f.write(t)
f.close()
def test_splitFeaturesForConversion(self):
font = Font()
self.assertEqual(
font._splitFeaturesForConversion(testFeaturesText),
(
"\n@class1 = [a b c d];\n",
[("liga", "\nfeature liga {\n sub f i by fi;\n} liga;\n\n"
"@class2 = [x y z];\n"),
("salt", "\nfeature salt {\n sub a by a.alt;\n} salt; "
"feature ss01 {sub x by x.alt} ss01;\n"),
("ss02", "\nfeature ss02 {sub y by y.alt} ss02;\n\n"
"# feature calt {\n# sub a b' by b.alt;\n# } calt;\n")]
)
)
def test_glyph_name_change(self):
font = Font(getTestFontPath())
glyph = font["A"]
glyph.name = "NameChangeTest"
self.assertEqual(sorted(font.keys()), ["B", "C", "NameChangeTest"])
self.assertTrue(font.dirty)
def test_glyph_unicodes_changed(self):
font = Font(getTestFontPath())
glyph = font["A"]
glyph.unicodes = [123, 456]
self.assertEqual(font.unicodeData[123], ["A"])
self.assertEqual(font.unicodeData[456], ["A"])
self.assertEqual(font.unicodeData[66], ["B"])
font = Font(getTestFontPath())
glyph = font.newGlyph("test")
glyph.unicodes = [65]
self.assertEqual(font.unicodeData[65], ["test", "A"])
if __name__ == "__main__":
unittest.main()
| |
# -*- coding: utf-8 -*-
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Support for Linux namespaces"""
from __future__ import print_function
import ctypes
import ctypes.util
import errno
import os
import signal
# Note: We avoid cros_build_lib here as that's a "large" module and we want
# to keep this "light" and standalone. The subprocess usage in here is also
# simple by design -- if it gets more complicated, we should look at using
# the cros_build_lib.run helper.
import subprocess
import sys
import six
from chromite.lib import locking
from chromite.lib import osutils
from chromite.lib import process_util
from chromite.lib import proctitle
assert sys.version_info >= (3, 6), 'This module requires Python 3.6+'
CLONE_FS = 0x00000200
CLONE_FILES = 0x00000400
CLONE_NEWNS = 0x00020000
CLONE_NEWUTS = 0x04000000
CLONE_NEWIPC = 0x08000000
CLONE_NEWUSER = 0x10000000
CLONE_NEWPID = 0x20000000
CLONE_NEWNET = 0x40000000
def SetNS(fd, nstype):
"""Binding to the Linux setns system call. See setns(2) for details.
Args:
fd: An open file descriptor or path to one.
nstype: Namespace to enter; one of CLONE_*.
Raises:
OSError: if setns failed.
"""
try:
fp = None
if isinstance(fd, six.string_types):
fp = open(fd)
fd = fp.fileno()
libc = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True)
if libc.setns(ctypes.c_int(fd), ctypes.c_int(nstype)) != 0:
e = ctypes.get_errno()
raise OSError(e, os.strerror(e))
finally:
if fp is not None:
fp.close()
def Unshare(flags):
"""Binding to the Linux unshare system call. See unshare(2) for details.
Args:
flags: Namespaces to unshare; bitwise OR of CLONE_* flags.
Raises:
OSError: if unshare failed.
"""
libc = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True)
if libc.unshare(ctypes.c_int(flags)) != 0:
e = ctypes.get_errno()
raise OSError(e, os.strerror(e))
def _ReapChildren(pid):
"""Reap all children that get reparented to us until we see |pid| exit.
Args:
pid: The main child to watch for.
Returns:
The wait status of the |pid| child.
"""
pid_status = 0
while True:
try:
(wpid, status) = os.wait()
if pid == wpid:
# Save the status of our main child so we can exit with it below.
pid_status = status
except OSError as e:
if e.errno == errno.ECHILD:
break
elif e.errno != errno.EINTR:
raise
return pid_status
def _SafeTcSetPgrp(fd, pgrp):
"""Set |pgrp| as the controller of the tty |fd|."""
try:
curr_pgrp = os.tcgetpgrp(fd)
except OSError as e:
# This can come up when the fd is not connected to a terminal.
if e.errno == errno.ENOTTY:
return
raise
# We can change the owner only if currently own it. Otherwise we'll get
# stopped by the kernel with SIGTTOU and that'll hit the whole group.
if curr_pgrp == os.getpgrp():
os.tcsetpgrp(fd, pgrp)
def _ForwardToChildPid(pid, signal_to_forward):
"""Setup a signal handler that forwards the given signal to the given pid."""
def _ForwardingHandler(signum, _frame):
os.kill(pid, signum)
signal.signal(signal_to_forward, _ForwardingHandler)
def CreatePidNs():
"""Start a new pid namespace
This will launch all the right manager processes. The child that returns
will be isolated in a new pid namespace.
If functionality is not available, then it will return w/out doing anything.
A note about the processes generated as a result of calling this function:
You call CreatePidNs() in pid X
- X launches Pid Y,
- Pid X will now do nothing but wait for Pid Y to finish and then sys.exit()
with that return code
- Y launches Pid Z
- Pid Y will now do nothing but wait for Pid Z to finish and then
sys.exit() with that return code
- **Pid Z returns from CreatePidNs**. So, the caller of this function
continues in a different process than the one that made the call.
- All SIGTERM/SIGINT signals are forwarded down from pid X to pid Z to
handle.
- SIGKILL will only kill pid X, and leak Pid Y and Z.
Returns:
The last pid outside of the namespace. (i.e., pid X)
"""
first_pid = os.getpid()
try:
# First create the namespace.
Unshare(CLONE_NEWPID)
except OSError as e:
if e.errno == errno.EINVAL:
# For older kernels, or the functionality is disabled in the config,
# return silently. We don't want to hard require this stuff.
return first_pid
else:
# For all other errors, abort. They shouldn't happen.
raise
# Used to make sure process groups are in the right state before we try to
# forward the controlling terminal.
lock = locking.PipeLock()
# Now that we're in the new pid namespace, fork. The parent is the master
# of it in the original namespace, so it only monitors the child inside it.
# It is only allowed to fork once too.
pid = os.fork()
if pid:
proctitle.settitle('pid ns', 'external init')
# We forward termination signals to the child and trust the child to respond
# sanely. Later, ExitAsStatus propagates the exit status back up.
_ForwardToChildPid(pid, signal.SIGINT)
_ForwardToChildPid(pid, signal.SIGTERM)
# Forward the control of the terminal to the child so it can manage input.
_SafeTcSetPgrp(sys.stdin.fileno(), pid)
# Signal our child it can move forward.
lock.Post()
del lock
# Reap the children as the parent of the new namespace.
process_util.ExitAsStatus(_ReapChildren(pid))
else:
# Make sure to unshare the existing mount point if needed. Some distros
# create shared mount points everywhere by default.
try:
osutils.Mount('none', '/proc', 0, osutils.MS_PRIVATE | osutils.MS_REC)
except OSError as e:
if e.errno != errno.EINVAL:
raise
# The child needs its own proc mount as it'll be different.
osutils.Mount('proc', '/proc', 'proc',
osutils.MS_NOSUID | osutils.MS_NODEV | osutils.MS_NOEXEC |
osutils.MS_RELATIME)
# Wait for our parent to finish initialization.
lock.Wait()
del lock
# Resetup the locks for the next phase.
lock = locking.PipeLock()
pid = os.fork()
if pid:
proctitle.settitle('pid ns', 'init')
# We forward termination signals to the child and trust the child to
# respond sanely. Later, ExitAsStatus propagates the exit status back up.
_ForwardToChildPid(pid, signal.SIGINT)
_ForwardToChildPid(pid, signal.SIGTERM)
# Now that we're in a new pid namespace, start a new process group so that
# children have something valid to use. Otherwise getpgrp/etc... will get
# back 0 which tends to confuse -- you can't setpgrp(0) for example.
os.setpgrp()
# Forward the control of the terminal to the child so it can manage input.
_SafeTcSetPgrp(sys.stdin.fileno(), pid)
# Signal our child it can move forward.
lock.Post()
del lock
# Watch all of the children. We need to act as the master inside the
# namespace and reap old processes.
process_util.ExitAsStatus(_ReapChildren(pid))
# Wait for our parent to finish initialization.
lock.Wait()
del lock
# Create a process group for the grandchild so it can manage things
# independent of the init process.
os.setpgrp()
# The grandchild will return and take over the rest of the sdk steps.
return first_pid
def CreateNetNs():
"""Start a new net namespace
We will bring up the loopback interface, but that is all.
If functionality is not available, then it will return w/out doing anything.
"""
# The net namespace was added in 2.6.24 and may be disabled in the kernel.
try:
Unshare(CLONE_NEWNET)
except OSError as e:
if e.errno == errno.EINVAL:
return
else:
# For all other errors, abort. They shouldn't happen.
raise
# Since we've unshared the net namespace, we need to bring up loopback.
# The kernel automatically adds the various ip addresses, so skip that.
try:
subprocess.call(['ip', 'link', 'set', 'up', 'lo'])
except OSError as e:
if e.errno == errno.ENOENT:
print('warning: could not bring up loopback for network; '
'install the iproute2 package', file=sys.stderr)
else:
raise
def SimpleUnshare(mount=True, uts=True, ipc=True, net=False, pid=False):
"""Simpler helper for setting up namespaces quickly.
If support for any namespace type is not available, we'll silently skip it.
Args:
mount: Create a mount namespace.
uts: Create a UTS namespace.
ipc: Create an IPC namespace.
net: Create a net namespace.
pid: Create a pid namespace.
"""
# The mount namespace is the only one really guaranteed to exist --
# it's been supported forever and it cannot be turned off.
if mount:
Unshare(CLONE_NEWNS)
# The UTS namespace was added 2.6.19 and may be disabled in the kernel.
if uts:
try:
Unshare(CLONE_NEWUTS)
except OSError as e:
if e.errno != errno.EINVAL:
pass
# The IPC namespace was added 2.6.19 and may be disabled in the kernel.
if ipc:
try:
Unshare(CLONE_NEWIPC)
except OSError as e:
if e.errno != errno.EINVAL:
pass
if net:
CreateNetNs()
if pid:
CreatePidNs()
| |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""External script for generating Cloud Endpoints related files.
The gen_discovery_doc subcommand takes a list of fully qualified ProtoRPC
service names and calls a cloud service which generates a discovery document in
REST or RPC style.
Example:
endpointscfg.py gen_discovery_doc -o . -f rest postservice.GreetingsV1
The gen_client_lib subcommand takes a discovery document and calls a cloud
service to generate a client library for a target language (currently just Java)
Example:
endpointscfg.py gen_client_lib java -o . greetings-v0.1.discovery
The get_client_lib subcommand does both of the above commands at once.
Example:
endpointscfg.py get_client_lib java -o . -f rest postservice.GreetingsV1
The gen_api_config command outputs an .api configuration file for a service.
Example:
endpointscfg.py gen_api_config -o . -a /path/to/app \
--hostname myhost.appspot.com postservice.GreetingsV1
"""
from __future__ import with_statement
import collections
import contextlib
try:
import json
except ImportError:
import simplejson as json
import os
import re
import sys
import urllib
import urllib2
from protorpc import remote
from google.appengine.ext.endpoints import api_config
DISCOVERY_DOC_BASE = ('https://webapis-discovery.appspot.com/_ah/api/'
'discovery/v1/apis/generate/')
CLIENT_LIBRARY_BASE = 'https://google-api-client-libraries.appspot.com/generate'
class ServerRequestException(Exception):
"""Exception for problems with the request to a server."""
def __init__(self, http_error):
"""Create a ServerRequestException from a given urllib2.HTTPError.
Args:
http_error: The HTTPError that the ServerRequestException will be
based on.
"""
error_details = None
if http_error.fp:
try:
error_body = json.load(http_error.fp)
error_details = ['%s: %s' % (detail['message'], detail['debug_info'])
for detail in error_body['error']['errors']]
except (ValueError, TypeError, KeyError):
pass
if error_details:
error_message = ('HTTP %s (%s) error when communicating with URL: %s. '
'Details: %s' % (http_error.code, http_error.reason,
http_error.filename, error_details))
else:
error_message = ('HTTP %s (%s) error when communicating with URL: %s.' %
(http_error.code, http_error.reason,
http_error.filename))
super(ServerRequestException, self).__init__(error_message)
def _WriteFile(output_path, name, content):
"""Write given content to a file in a given directory.
Args:
output_path: The directory to store the file in.
name: The name of the file to store the content in.
content: The content to write to the file.close
Returns:
The full path to the written file.
"""
path = os.path.join(output_path, name)
with open(path, 'wb') as f:
f.write(content)
return path
def GenApiConfig(service_class_names, generator=None, hostname=None):
"""Write an API configuration for endpoints annotated ProtoRPC services.
Args:
service_class_names: A list of fully qualified ProtoRPC service classes.
generator: An generator object that produces API config strings using its
pretty_print_config_to_json method.
hostname: A string hostname which will be used as the default version
hostname. If no hostname is specificied in the @endpoints.api decorator,
this value is the fallback. Defaults to None.
Raises:
TypeError: If any service classes don't inherit from remote.Service.
messages.DefinitionNotFoundError: If a service can't be found.
Returns:
A map from service names to a string containing the API configuration of the
service in JSON format.
"""
api_service_map = collections.OrderedDict()
for service_class_name in service_class_names:
module_name, base_service_class_name = service_class_name.rsplit('.', 1)
module = __import__(module_name, fromlist=base_service_class_name)
service = getattr(module, base_service_class_name)
if not (isinstance(service, type) and issubclass(service, remote.Service)):
raise TypeError('%s is not a ProtoRPC service' % service_class_name)
services = api_service_map.setdefault((service.api_info.name,
service.api_info.version),
[])
services.append(service)
service_map = collections.OrderedDict()
generator = generator or api_config.ApiConfigGenerator()
for api_info, services in api_service_map.iteritems():
hostname = services[0].api_info.hostname or hostname
service_map['%s-%s' % api_info] = generator.pretty_print_config_to_json(
services, hostname=hostname)
return service_map
def GenDiscoveryDoc(service_class_names, doc_format,
output_path, hostname=None):
"""Write discovery documents generated from a cloud service to file.
Args:
service_class_names: A list of fully qualified ProtoRPC service names.
doc_format: The requested format for the discovery doc. (rest|rpc)
output_path: The directory to output the discovery docs to.
hostname: A string hostname which will be used as the default version
hostname. If no hostname is specificied in the @endpoints.api decorator,
this value is the fallback. Defaults to None.
Raises:
ServerRequestException: If fetching the generated discovery doc fails.
Returns:
A list of discovery doc filenames.
"""
output_files = []
service_configs = GenApiConfig(service_class_names, hostname=hostname)
for api_name_version, config in service_configs.iteritems():
body = json.dumps({'config': config}, indent=2, sort_keys=True)
request = urllib2.Request(DISCOVERY_DOC_BASE + doc_format, body)
request.add_header('content-type', 'application/json')
try:
with contextlib.closing(urllib2.urlopen(request)) as response:
content = response.read()
discovery_name = api_name_version + '.discovery'
output_files.append(_WriteFile(output_path, discovery_name, content))
except urllib2.HTTPError, error:
raise ServerRequestException(error)
return output_files
def GenClientLib(discovery_path, language, output_path):
"""Write a client library from a discovery doc, using a cloud service to file.
Args:
discovery_path: Path to the discovery doc used to generate the client
library.
language: The client library language to generate. (java)
output_path: The directory to output the client library zip to.
Raises:
IOError: If reading the discovery doc fails.
ServerRequestException: If fetching the generated client library fails.
Returns:
The path to the zipped client library.
"""
with open(discovery_path) as f:
discovery_doc = f.read()
client_name = re.sub(r'\.discovery$', '.zip',
os.path.basename(discovery_path))
_GenClientLibFromContents(discovery_doc, language, output_path, client_name)
def _GenClientLibFromContents(discovery_doc, language, output_path,
client_name):
"""Write a client library from a discovery doc, using a cloud service to file.
Args:
discovery_doc: A string, the contents of the discovery doc used to
generate the client library.
language: A string, the client library language to generate. (java)
output_path: A string, the directory to output the client library zip to.
client_name: A string, the filename used to save the client lib.
Raises:
IOError: If reading the discovery doc fails.
ServerRequestException: If fetching the generated client library fails.
Returns:
The path to the zipped client library.
"""
body = urllib.urlencode({'lang': language, 'content': discovery_doc})
request = urllib2.Request(CLIENT_LIBRARY_BASE, body)
try:
with contextlib.closing(urllib2.urlopen(request)) as response:
content = response.read()
return _WriteFile(output_path, client_name, content)
except urllib2.HTTPError, error:
raise ServerRequestException(error)
def GetClientLib(service_class_names, doc_format, language,
output_path, hostname=None):
"""Fetch discovery documents and client libraries from a cloud service.
Args:
service_class_names: A list of fully qualified ProtoRPC service names.
doc_format: The requested format for the discovery doc. (rest|rpc)
language: The client library language to generate. (java)
output_path: The directory to output the discovery docs to.
hostname: A string hostname which will be used as the default version
hostname. If no hostname is specificied in the @endpoints.api decorator,
this value is the fallback. Defaults to None.
Returns:
A tuple (discovery_files, client_libs):
discovery_files: A list of paths to discovery documents.
client_libs: A list of paths to client libraries.
"""
discovery_files = GenDiscoveryDoc(service_class_names, doc_format,
output_path, hostname=hostname)
client_libs = []
for discovery_path in discovery_files:
client_libs.append(
GenClientLib(discovery_path, language, output_path))
return discovery_files, client_libs
def _GenApiConfigCallback(args, api_func=GenApiConfig):
"""Generate an api file.
Args:
args: An argparse.Namespace object to extract parameters from.
api_func: A function that generates and returns an API configuration
for a list of services.
"""
service_class_names, output_path, hostname = (
args.service, args.output, args.hostname)
service_configs = api_func(service_class_names, hostname=hostname)
for api_name_version, config in service_configs.iteritems():
api_name = api_name_version + '.api'
_WriteFile(output_path, api_name, config)
def _GetClientLibCallback(args,
client_func=GetClientLib):
"""Generate discovery docs and client libraries to files.
Args:
args: An argparse.Namespace object to extract parameters from.
client_func: A function that generates client libraries and stores them to
files, accepting a list of service names, a discovery doc format, a client
library language, and an output directory.
"""
service_class_names, doc_format, language, output_path, hostname = (
args.service, args.format, args.language, args.output, args.hostname)
discovery_paths, client_paths = client_func(
service_class_names, doc_format, language, output_path, hostname=hostname)
for discovery_path in discovery_paths:
print 'API discovery document written to %s' % discovery_path
for client_path in client_paths:
print 'API client library written to %s' % client_path
def _GenDiscoveryDocCallback(args, discovery_func=GenDiscoveryDoc):
"""Generate discovery docs to files.
Args:
args: An argparse.Namespace object to extract parameters from
discovery_func: A function that generates discovery docs and stores them to
files, accepting a list of service names, a discovery doc format, and an
output directory.
"""
services, doc_format, output_path, hostname = (
args.service, args.format, args.output, args.hostname)
discovery_paths = discovery_func(services, doc_format,
output_path, hostname=hostname)
for discovery_path in discovery_paths:
print 'API discovery document written to %s' % discovery_path
def _GenClientLibCallback(args, client_func=GenClientLib):
"""Generate a client library to file.
Args:
args: An argparse.Namespace object to extract parameters from
client_func: A function that generates client libraries and stores them to
files, accepting a path to a discovery doc, a client library language, and
an output directory.
"""
discovery_path, language, output_path = (args.discovery_doc[0], args.language,
args.output)
client_path = client_func(discovery_path, language, output_path)
print 'API client library written to %s' % client_path
def MakeParser(prog):
"""Create an argument parser.
Args:
prog: The name of the program to use when outputting help text.
Returns:
An argparse.ArgumentParser built to specification.
"""
import argparse
def AddStandardOptions(parser, *args):
"""Add common endpoints options to a parser.
Args:
parser: The parser to add options to.
*args: A list of option names to add. Possible names are: application,
format, output, language, service, and discovery_doc.
"""
if 'application' in args:
parser.add_argument('-a', '--application', default='.',
help='The path to the Python App Engine App')
if 'format' in args:
parser.add_argument('-f', '--format', default='rest',
choices=['rest', 'rpc'],
help='The requested API protocol type')
if 'hostname' in args:
help_text = ('Default application hostname, if none is specified '
'for API service.')
parser.add_argument('--hostname', help=help_text)
if 'output' in args:
parser.add_argument('-o', '--output', default='.',
help='The directory to store output files')
if 'language' in args:
parser.add_argument('language', choices=['java'],
help='The target output programming language')
if 'service' in args:
parser.add_argument('service', nargs='+',
help='Fully qualified service class name')
if 'discovery_doc' in args:
parser.add_argument('discovery_doc', nargs=1,
help='Path to the discovery document')
parser = argparse.ArgumentParser(prog=prog)
subparsers = parser.add_subparsers(title='subcommands')
get_client_lib = subparsers.add_parser(
'get_client_lib', help=('Generates discovery documents and client '
'libraries from service classes'))
get_client_lib.set_defaults(callback=_GetClientLibCallback)
AddStandardOptions(get_client_lib, 'application', 'format', 'hostname',
'output', 'language', 'service')
gen_api_config = subparsers.add_parser(
'gen_api_config', help=('Generates an .api file for the given service '
'classes'))
gen_api_config.set_defaults(callback=_GenApiConfigCallback)
AddStandardOptions(gen_api_config, 'application', 'hostname', 'output',
'service')
gen_discovery_doc = subparsers.add_parser(
'gen_discovery_doc',
help='Generates discovery documents from service classes')
gen_discovery_doc.set_defaults(callback=_GenDiscoveryDocCallback)
AddStandardOptions(gen_discovery_doc, 'application', 'format', 'hostname',
'output', 'service')
gen_client_lib = subparsers.add_parser(
'gen_client_lib', help='Generates a client library from service classes')
gen_client_lib.set_defaults(callback=_GenClientLibCallback)
AddStandardOptions(gen_client_lib, 'output', 'language', 'discovery_doc')
return parser
def main(argv):
parser = MakeParser(argv[0])
args = parser.parse_args(argv[1:])
application_path = getattr(args, 'application', None)
if application_path is not None:
sys.path.insert(0, os.path.abspath(application_path))
args.callback(args)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| |
"""Support for Konnected devices."""
import asyncio
import copy
import hmac
import json
import logging
from aiohttp.hdrs import AUTHORIZATION
from aiohttp.web import Request, Response
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components.binary_sensor import DEVICE_CLASSES_SCHEMA
from homeassistant.components.http import HomeAssistantView
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_ACCESS_TOKEN,
CONF_BINARY_SENSORS,
CONF_DEVICES,
CONF_HOST,
CONF_ID,
CONF_NAME,
CONF_PIN,
CONF_PORT,
CONF_SENSORS,
CONF_SWITCHES,
CONF_TYPE,
CONF_ZONE,
HTTP_BAD_REQUEST,
HTTP_NOT_FOUND,
HTTP_UNAUTHORIZED,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import config_validation as cv
from .config_flow import ( # Loading the config flow file will register the flow
CONF_DEFAULT_OPTIONS,
CONF_IO,
CONF_IO_BIN,
CONF_IO_DIG,
CONF_IO_SWI,
OPTIONS_SCHEMA,
)
from .const import (
CONF_ACTIVATION,
CONF_API_HOST,
CONF_BLINK,
CONF_DISCOVERY,
CONF_INVERSE,
CONF_MOMENTARY,
CONF_PAUSE,
CONF_POLL_INTERVAL,
CONF_REPEAT,
DOMAIN,
PIN_TO_ZONE,
STATE_HIGH,
STATE_LOW,
UNDO_UPDATE_LISTENER,
UPDATE_ENDPOINT,
ZONE_TO_PIN,
ZONES,
)
from .errors import CannotConnect
from .handlers import HANDLERS
from .panel import AlarmPanel
_LOGGER = logging.getLogger(__name__)
def ensure_pin(value):
"""Check if valid pin and coerce to string."""
if value is None:
raise vol.Invalid("pin value is None")
if PIN_TO_ZONE.get(str(value)) is None:
raise vol.Invalid("pin not valid")
return str(value)
def ensure_zone(value):
"""Check if valid zone and coerce to string."""
if value is None:
raise vol.Invalid("zone value is None")
if str(value) not in ZONES is None:
raise vol.Invalid("zone not valid")
return str(value)
def import_device_validator(config):
"""Validate zones and reformat for import."""
config = copy.deepcopy(config)
io_cfgs = {}
# Replace pins with zones
for conf_platform, conf_io in (
(CONF_BINARY_SENSORS, CONF_IO_BIN),
(CONF_SENSORS, CONF_IO_DIG),
(CONF_SWITCHES, CONF_IO_SWI),
):
for zone in config.get(conf_platform, []):
if zone.get(CONF_PIN):
zone[CONF_ZONE] = PIN_TO_ZONE[zone[CONF_PIN]]
del zone[CONF_PIN]
io_cfgs[zone[CONF_ZONE]] = conf_io
# Migrate config_entry data into default_options structure
config[CONF_IO] = io_cfgs
config[CONF_DEFAULT_OPTIONS] = OPTIONS_SCHEMA(config)
# clean up fields migrated to options
config.pop(CONF_BINARY_SENSORS, None)
config.pop(CONF_SENSORS, None)
config.pop(CONF_SWITCHES, None)
config.pop(CONF_BLINK, None)
config.pop(CONF_DISCOVERY, None)
config.pop(CONF_API_HOST, None)
config.pop(CONF_IO, None)
return config
def import_validator(config):
"""Reformat for import."""
config = copy.deepcopy(config)
# push api_host into device configs
for device in config.get(CONF_DEVICES, []):
device[CONF_API_HOST] = config.get(CONF_API_HOST, "")
return config
# configuration.yaml schemas (legacy)
BINARY_SENSOR_SCHEMA_YAML = vol.All(
vol.Schema(
{
vol.Exclusive(CONF_ZONE, "s_io"): ensure_zone,
vol.Exclusive(CONF_PIN, "s_io"): ensure_pin,
vol.Required(CONF_TYPE): DEVICE_CLASSES_SCHEMA,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_INVERSE, default=False): cv.boolean,
}
),
cv.has_at_least_one_key(CONF_PIN, CONF_ZONE),
)
SENSOR_SCHEMA_YAML = vol.All(
vol.Schema(
{
vol.Exclusive(CONF_ZONE, "s_io"): ensure_zone,
vol.Exclusive(CONF_PIN, "s_io"): ensure_pin,
vol.Required(CONF_TYPE): vol.All(vol.Lower, vol.In(["dht", "ds18b20"])),
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_POLL_INTERVAL, default=3): vol.All(
vol.Coerce(int), vol.Range(min=1)
),
}
),
cv.has_at_least_one_key(CONF_PIN, CONF_ZONE),
)
SWITCH_SCHEMA_YAML = vol.All(
vol.Schema(
{
vol.Exclusive(CONF_ZONE, "s_io"): ensure_zone,
vol.Exclusive(CONF_PIN, "s_io"): ensure_pin,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_ACTIVATION, default=STATE_HIGH): vol.All(
vol.Lower, vol.Any(STATE_HIGH, STATE_LOW)
),
vol.Optional(CONF_MOMENTARY): vol.All(vol.Coerce(int), vol.Range(min=10)),
vol.Optional(CONF_PAUSE): vol.All(vol.Coerce(int), vol.Range(min=10)),
vol.Optional(CONF_REPEAT): vol.All(vol.Coerce(int), vol.Range(min=-1)),
}
),
cv.has_at_least_one_key(CONF_PIN, CONF_ZONE),
)
DEVICE_SCHEMA_YAML = vol.All(
vol.Schema(
{
vol.Required(CONF_ID): cv.matches_regex("[0-9a-f]{12}"),
vol.Optional(CONF_BINARY_SENSORS): vol.All(
cv.ensure_list, [BINARY_SENSOR_SCHEMA_YAML]
),
vol.Optional(CONF_SENSORS): vol.All(cv.ensure_list, [SENSOR_SCHEMA_YAML]),
vol.Optional(CONF_SWITCHES): vol.All(cv.ensure_list, [SWITCH_SCHEMA_YAML]),
vol.Inclusive(CONF_HOST, "host_info"): cv.string,
vol.Inclusive(CONF_PORT, "host_info"): cv.port,
vol.Optional(CONF_BLINK, default=True): cv.boolean,
vol.Optional(CONF_API_HOST, default=""): vol.Any("", cv.url),
vol.Optional(CONF_DISCOVERY, default=True): cv.boolean,
}
),
import_device_validator,
)
# pylint: disable=no-value-for-parameter
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
import_validator,
vol.Schema(
{
vol.Required(CONF_ACCESS_TOKEN): cv.string,
vol.Optional(CONF_API_HOST): vol.Url(),
vol.Optional(CONF_DEVICES): vol.All(
cv.ensure_list, [DEVICE_SCHEMA_YAML]
),
}
),
)
},
extra=vol.ALLOW_EXTRA,
)
YAML_CONFIGS = "yaml_configs"
PLATFORMS = ["binary_sensor", "sensor", "switch"]
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the Konnected platform."""
cfg = config.get(DOMAIN)
if cfg is None:
cfg = {}
if DOMAIN not in hass.data:
hass.data[DOMAIN] = {
CONF_ACCESS_TOKEN: cfg.get(CONF_ACCESS_TOKEN),
CONF_API_HOST: cfg.get(CONF_API_HOST),
CONF_DEVICES: {},
}
hass.http.register_view(KonnectedView)
# Check if they have yaml configured devices
if CONF_DEVICES not in cfg:
return True
for device in cfg.get(CONF_DEVICES, []):
# Attempt to importing the cfg. Use
# hass.async_add_job to avoid a deadlock.
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}, data=device
)
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up panel from a config entry."""
client = AlarmPanel(hass, entry)
# creates a panel data store in hass.data[DOMAIN][CONF_DEVICES]
await client.async_save_data()
try:
await client.async_connect()
except CannotConnect:
# this will trigger a retry in the future
raise config_entries.ConfigEntryNotReady
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
# config entry specific data to enable unload
hass.data[DOMAIN][entry.entry_id] = {
UNDO_UPDATE_LISTENER: entry.add_update_listener(async_entry_updated)
}
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
hass.data[DOMAIN][entry.entry_id][UNDO_UPDATE_LISTENER]()
if unload_ok:
hass.data[DOMAIN][CONF_DEVICES].pop(entry.data[CONF_ID])
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
async def async_entry_updated(hass: HomeAssistant, entry: ConfigEntry):
"""Reload the config entry when options change."""
await hass.config_entries.async_reload(entry.entry_id)
class KonnectedView(HomeAssistantView):
"""View creates an endpoint to receive push updates from the device."""
url = UPDATE_ENDPOINT
name = "api:konnected"
requires_auth = False # Uses access token from configuration
def __init__(self):
"""Initialize the view."""
@staticmethod
def binary_value(state, activation):
"""Return binary value for GPIO based on state and activation."""
if activation == STATE_HIGH:
return 1 if state == STATE_ON else 0
return 0 if state == STATE_ON else 1
async def update_sensor(self, request: Request, device_id) -> Response:
"""Process a put or post."""
hass = request.app["hass"]
data = hass.data[DOMAIN]
auth = request.headers.get(AUTHORIZATION)
tokens = []
if hass.data[DOMAIN].get(CONF_ACCESS_TOKEN):
tokens.extend([hass.data[DOMAIN][CONF_ACCESS_TOKEN]])
tokens.extend(
[
entry.data[CONF_ACCESS_TOKEN]
for entry in hass.config_entries.async_entries(DOMAIN)
if entry.data.get(CONF_ACCESS_TOKEN)
]
)
if auth is None or not next(
(True for token in tokens if hmac.compare_digest(f"Bearer {token}", auth)),
False,
):
return self.json_message("unauthorized", status_code=HTTP_UNAUTHORIZED)
try: # Konnected 2.2.0 and above supports JSON payloads
payload = await request.json()
except json.decoder.JSONDecodeError:
_LOGGER.error(
"Your Konnected device software may be out of "
"date. Visit https://help.konnected.io for "
"updating instructions."
)
device = data[CONF_DEVICES].get(device_id)
if device is None:
return self.json_message(
"unregistered device", status_code=HTTP_BAD_REQUEST
)
try:
zone_num = str(payload.get(CONF_ZONE) or PIN_TO_ZONE[payload[CONF_PIN]])
payload[CONF_ZONE] = zone_num
zone_data = device[CONF_BINARY_SENSORS].get(zone_num) or next(
(s for s in device[CONF_SENSORS] if s[CONF_ZONE] == zone_num), None
)
except KeyError:
zone_data = None
if zone_data is None:
return self.json_message(
"unregistered sensor/actuator", status_code=HTTP_BAD_REQUEST
)
zone_data["device_id"] = device_id
for attr in ["state", "temp", "humi", "addr"]:
value = payload.get(attr)
handler = HANDLERS.get(attr)
if value is not None and handler:
hass.async_create_task(handler(hass, zone_data, payload))
return self.json_message("ok")
async def get(self, request: Request, device_id) -> Response:
"""Return the current binary state of a switch."""
hass = request.app["hass"]
data = hass.data[DOMAIN]
device = data[CONF_DEVICES].get(device_id)
if not device:
return self.json_message(
f"Device {device_id} not configured", status_code=HTTP_NOT_FOUND
)
# Our data model is based on zone ids but we convert from/to pin ids
# based on whether they are specified in the request
try:
zone_num = str(
request.query.get(CONF_ZONE) or PIN_TO_ZONE[request.query[CONF_PIN]]
)
zone = next(
switch
for switch in device[CONF_SWITCHES]
if switch[CONF_ZONE] == zone_num
)
except StopIteration:
zone = None
except KeyError:
zone = None
zone_num = None
if not zone:
target = request.query.get(
CONF_ZONE, request.query.get(CONF_PIN, "unknown")
)
return self.json_message(
f"Switch on zone or pin {target} not configured",
status_code=HTTP_NOT_FOUND,
)
resp = {}
if request.query.get(CONF_ZONE):
resp[CONF_ZONE] = zone_num
else:
resp[CONF_PIN] = ZONE_TO_PIN[zone_num]
# Make sure entity is setup
zone_entity_id = zone.get(ATTR_ENTITY_ID)
if zone_entity_id:
resp["state"] = self.binary_value(
hass.states.get(zone_entity_id).state, zone[CONF_ACTIVATION]
)
return self.json(resp)
_LOGGER.warning("Konnected entity not yet setup, returning default")
resp["state"] = self.binary_value(STATE_OFF, zone[CONF_ACTIVATION])
return self.json(resp)
async def put(self, request: Request, device_id) -> Response:
"""Receive a sensor update via PUT request and async set state."""
return await self.update_sensor(request, device_id)
async def post(self, request: Request, device_id) -> Response:
"""Receive a sensor update via POST request and async set state."""
return await self.update_sensor(request, device_id)
| |
from sympy import (Symbol, symbols, factorial, factorial2, binomial,
rf, ff, gamma, polygamma, EulerGamma, O, pi, nan,
oo, zoo, simplify, expand_func)
from sympy.functions.combinatorial.factorials import subfactorial
from sympy.utilities.pytest import XFAIL, raises
def test_rf_eval_apply():
x, y = symbols('x,y')
assert rf(nan, y) == nan
assert rf(x, y) == rf(x, y)
assert rf(oo, 0) == 1
assert rf(-oo, 0) == 1
assert rf(oo, 6) == oo
assert rf(-oo, 7) == -oo
assert rf(oo, -6) == oo
assert rf(-oo, -7) == oo
assert rf(x, 0) == 1
assert rf(x, 1) == x
assert rf(x, 2) == x*(x + 1)
assert rf(x, 3) == x*(x + 1)*(x + 2)
assert rf(x, 5) == x*(x + 1)*(x + 2)*(x + 3)*(x + 4)
assert rf(x, -1) == 1/(x - 1)
assert rf(x, -2) == 1/((x - 1)*(x - 2))
assert rf(x, -3) == 1/((x - 1)*(x - 2)*(x - 3))
assert rf(1, 100) == factorial(100)
n = Symbol('n', integer=True)
k = Symbol('k', integer=True)
m = Symbol('m', integer=True, nonnegative=True)
assert rf(x, m).is_integer is None
assert rf(n, k).is_integer is None
assert rf(n, m).is_integer is True
assert rf(n, k + pi).is_integer is False
assert rf(n, m + pi).is_integer is False
assert rf(pi, m).is_integer is False
def test_ff_eval_apply():
x, y = symbols('x,y')
assert ff(nan, y) == nan
assert ff(x, y) == ff(x, y)
assert ff(oo, 0) == 1
assert ff(-oo, 0) == 1
assert ff(oo, 6) == oo
assert ff(-oo, 7) == -oo
assert ff(oo, -6) == oo
assert ff(-oo, -7) == oo
assert ff(x, 0) == 1
assert ff(x, 1) == x
assert ff(x, 2) == x*(x - 1)
assert ff(x, 3) == x*(x - 1)*(x - 2)
assert ff(x, 5) == x*(x - 1)*(x - 2)*(x - 3)*(x - 4)
assert ff(x, -1) == 1/(x + 1)
assert ff(x, -2) == 1/((x + 1)*(x + 2))
assert ff(x, -3) == 1/((x + 1)*(x + 2)*(x + 3))
assert ff(100, 100) == factorial(100)
n = Symbol('n', integer=True)
k = Symbol('k', integer=True)
m = Symbol('m', integer=True, nonnegative=True)
assert ff(x, m).is_integer is None
assert ff(n, k).is_integer is None
assert ff(n, m).is_integer is True
assert ff(n, k + pi).is_integer is False
assert ff(n, m + pi).is_integer is False
assert ff(pi, m).is_integer is False
def test_factorial():
n = Symbol('n', integer=True)
k = Symbol('k', integer=True, positive=True)
assert factorial(-2) == zoo
assert factorial(0) == 1
assert factorial(7) == 5040
assert factorial(n).func == factorial
assert factorial(2*n).func == factorial
assert factorial(n).is_integer
assert factorial(n).is_positive is None
assert factorial(k).is_positive
assert factorial(oo) == oo
def test_factorial_diff():
n = Symbol('n', integer=True)
assert factorial(n).diff(n) == \
gamma(1 + n)*polygamma(0, 1 + n)
assert factorial(n**2).diff(n) == \
2*n*gamma(1 + n**2)*polygamma(0, 1 + n**2)
def test_factorial_series():
n = Symbol('n', integer=True)
assert factorial(n).series(n, 0, 3) == \
1 - n*EulerGamma + n**2*(EulerGamma**2/2 + pi**2/12) + O(n**3)
def test_factorial_rewrite():
n = Symbol('n', integer=True)
assert factorial(n).rewrite(gamma) == gamma(n + 1)
def test_factorial2():
n = Symbol('n', integer=True)
assert factorial2(-1) == 1
assert factorial2(0) == 1
assert factorial2(7) == 105
assert factorial2(8) == 384
assert factorial2(n).func == factorial2
def test_binomial():
n = Symbol('n', integer=True)
k = Symbol('k', integer=True)
u = Symbol('v', negative=True)
v = Symbol('m', positive=True)
assert binomial(0, 0) == 1
assert binomial(1, 1) == 1
assert binomial(10, 10) == 1
assert binomial(1, 2) == 0
assert binomial(1, -1) == 0
assert binomial(-1, 1) == -1
assert binomial(-10, 1) == -10
assert binomial(-10, 7) == -11440
assert binomial(n, -1) == 0
assert binomial(n, 0) == 1
assert expand_func(binomial(n, 1)) == n
assert expand_func(binomial(n, 2)) == n*(n - 1)/2
assert expand_func(binomial(n, n - 2)) == n*(n - 1)/2
assert expand_func(binomial(n, n - 1)) == n
assert binomial(n, 3).func == binomial
assert binomial(n, 3).expand(func=True) == n**3/6 - n**2/2 + n/3
assert expand_func(binomial(n, 3)) == n*(n - 2)*(n - 1)/6
assert binomial(n, n) == 1
assert binomial(n, n + 1) == 0
assert binomial(n, u) == 0
assert binomial(n, v).func == binomial
assert binomial(n, k).func == binomial
assert binomial(n, n + v) == 0
assert expand_func(binomial(n, n-3)) == n*(n - 2)*(n - 1)/6
assert binomial(n, k).is_integer
def test_binomial_diff():
n = Symbol('n', integer=True)
k = Symbol('k', integer=True)
assert binomial(n, k).diff(n) == \
(-polygamma(0, 1 + n - k) + polygamma(0, 1 + n))*binomial(n, k)
assert binomial(n**2, k**3).diff(n) == \
2*n*(-polygamma(
0, 1 + n**2 - k**3) + polygamma(0, 1 + n**2))*binomial(n**2, k**3)
assert binomial(n, k).diff(k) == \
(-polygamma(0, 1 + k) + polygamma(0, 1 + n - k))*binomial(n, k)
assert binomial(n**2, k**3).diff(k) == \
3*k**2*(-polygamma(
0, 1 + k**3) + polygamma(0, 1 + n**2 - k**3))*binomial(n**2, k**3)
def test_binomial_rewrite():
n = Symbol('n', integer=True)
k = Symbol('k', integer=True)
assert binomial(n, k).rewrite(
factorial) == factorial(n)/(factorial(k)*factorial(n - k))
assert binomial(
n, k).rewrite(gamma) == gamma(n + 1)/(gamma(k + 1)*gamma(n - k + 1))
@XFAIL
def test_factorial_simplify_fail():
# simplify(factorial(x + 1).diff(x) - ((x + 1)*factorial(x)).diff(x))) == 0
from sympy.abc import x
assert simplify(x*polygamma(0, x + 1) - x*polygamma(0, x + 2) +
polygamma(0, x + 1) - polygamma(0, x + 2) + 1) == 0
def test_subfactorial():
assert all(subfactorial(i) == ans for i, ans in enumerate(
[1, 0, 1, 2, 9, 44, 265, 1854, 14833, 133496]))
raises(ValueError, lambda: subfactorial(0.1))
raises(ValueError, lambda: subfactorial(-2))
| |
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains the main abstractions used by clusterdock topologies
to bring up clusters.
"""
import copy
import io
import logging
import os
import tarfile
import time
from collections import OrderedDict, namedtuple
import docker
from .config import defaults
from .exceptions import DuplicateClusterNameError, DuplicateHostnamesError
from .utils import (get_containers, generate_cluster_name, get_clusterdock_label,
nested_get, wait_for_condition)
logger = logging.getLogger(__name__)
clusterdock_args = None
client = docker.from_env(timeout=300)
DEFAULT_NETWORK_TYPE = 'bridge'
LOCALTIME_MOUNT = True # Sync host time to Docker container by /etc/localtime
PRIVILEGED_CONTAINER = False # Give extended privileges to the container.
class Cluster:
"""The central abstraction for interacting with Docker container clusters.
No Docker behavior is actually invoked until the start method is called.
Args:
*nodes: One or more :py:obj:`clusterdock.models.Node` instances.
"""
def __init__(self, *nodes):
self.nodes = nodes
if clusterdock_args and clusterdock_args.cluster_name:
clusters = {container.cluster_name for container in get_containers(clusterdock=True)}
if clusterdock_args.cluster_name in clusters:
raise DuplicateClusterNameError(name=clusterdock_args.cluster_name, clusters=clusters)
else:
self.name = clusterdock_args.cluster_name
else:
self.name = generate_cluster_name()
if clusterdock_args and clusterdock_args.port:
nodes_by_host = {node.hostname: node for node in self.nodes}
for port in clusterdock_args.port:
node = nodes_by_host.get(port.split(':')[0])
port_value = port.split(':')[1]
node.ports.append({port_value.split('->')[0]: port_value.split('->')[1]}
if '->' in port_value else int(port_value))
self.node_groups = {}
for node in self.nodes:
if node.group not in self.node_groups:
logger.debug('Creating NodeGroup %s ...',
node.group)
self.node_groups[node.group] = NodeGroup(node.group, node)
else:
self.node_groups[node.group].nodes.append(node)
# Put this outside the if-else because, whether a new NodeGroup is created
# or not, the node will be added to it.
logger.debug('Adding node (%s) to NodeGroup %s ...',
node.hostname,
node.group)
def start(self, network, pull_images=False, update_etc_hosts=True):
"""Start the cluster.
Args:
network (:obj:`str`): Name of the Docker network to use for the cluster.
pull_images (:obj:`bool`, optional): Pull every Docker image needed by every
:py:obj:`clusterdock.models.Node` instance, even if it exists locally.
Default: ``False``
update_etc_hosts (:obj:`bool`): Update the /etc/hosts file on the host with the hostname
and IP address of the container. Default: ``True``
"""
logger.info('Starting cluster (%s) on network (%s) ...', self.name, network)
self.network = network
the_network = self._setup_network(name=self.network)
if len(the_network.containers) != 0:
containers_attached_to_network = [nested_get(container.attrs,
['NetworkSettings',
'Networks',
self.network,
'Aliases',
0])
for container in the_network.containers]
logger.debug('Network (%s) currently has the followed containers attached: \n%s',
self.network,
'\n'.join('- {}'.format(container)
for container in containers_attached_to_network))
duplicate_hostnames = set(containers_attached_to_network) & set(node.hostname
for node in self.nodes)
if duplicate_hostnames:
raise DuplicateHostnamesError(duplicates=duplicate_hostnames,
network=self.network)
for node in self:
node.start(self.network, cluster_name=self.name, pull_images=pull_images)
def execute(self, command, **kwargs):
"""Execute a command on every :py:class:`clusterdock.models.Node` within the
:py:class:`clusterdock.models.Cluster`.
Args:
command (:obj:`str`): Command to execute.
**kwargs: Additional keyword arguments to pass to
:py:meth:`clusterdock.models.Node.execute`.
Returns:
A :py:class:`collections.OrderedDict` of :obj:`str` instances (the FQDN of the node)
mapping to the :py:class:`collections.namedtuple` instances returned by
:py:meth:`clusterdock.models.Node.execute`.
"""
return OrderedDict((node.fqdn, node.execute(command, **kwargs)) for node in self.nodes)
def __iter__(self):
for node in self.nodes:
yield node
def _setup_network(self, name):
try:
labels = {defaults.get('DEFAULT_DOCKER_LABEL_KEY'): get_clusterdock_label(self.name)}
network = client.networks.create(name=name,
driver=DEFAULT_NETWORK_TYPE,
check_duplicate=True,
labels=labels)
logger.debug('Successfully created network (%s).', name)
except docker.errors.APIError as api_error:
if api_error.explanation == 'network with name {} already exists'.format(name):
logger.warning('Network (%s) already exists. Continuing without creating ...',
name)
network = client.networks.get(name)
else:
raise
return network
class NodeGroup:
"""Abstraction representing a collection of Nodes that it could be useful to interact with
enmasse. For example, a typical HDFS cluster could be seen as consisting of a 1 node group
consisting of hosts with NameNodes and an n-1 node group of hosts with DataNodes.
Args:
name (:obj:`str`): The name by which to refer to the group.
*nodes: One or more :py:class:`clusterdock.models.Node` instances.
"""
def __init__(self, name, *nodes):
self.name = name
# We want the list of nodes to be mutable, so the tuple we get from *nodes
# needs to be cast.
self.nodes = list(nodes)
def __iter__(self):
for node in self.nodes:
yield node
def execute(self, command, **kwargs):
"""Execute a command on every :py:class:`clusterdock.models.Node` within the
:py:class:`clusterdock.models.NodeGroup`.
Args:
command (:obj:`str`): Command to execute.
**kwargs: Additional keyword arguments to pass to
:py:meth:`clusterdock.models.Node.execute`.
Returns:
A :py:class:`collections.OrderedDict` of :obj:`str` instances (the FQDN of the node)
mapping to the :py:class:`collections.namedtuple` instances returned by
:py:meth:`clusterdock.models.Node.execute`.
"""
return OrderedDict((node.fqdn, node.execute(command, **kwargs)) for node in self.nodes)
class Node:
"""Class representing a single cluster host.
Args:
hostname (:obj:`str`): Hostname of the node.
group (:obj:`str`): :py:obj:`clusterdock.models.NodeGroup` to which the node should belong.
image (:obj:`str`): Docker image with which to start the container.
ports (:obj:`list`, optional): A list of container ports to expose to the host. Elements of
the list could be integers (in which case a random port on the host will be chosen by
the Docker daemon) or dictionaries (with the key being the host port and the value being
the container port). Default: ``None``
volumes (:obj:`list`, optional): A list of volumes to create for the node. Elements of the
list could be dictionaries of bind volumes (i.e. key: the absolute path on the host,
value: the absolute path in the container) or strings representing the names of
Docker images from which to get volumes. As an example,
``[{'/var/www': '/var/www'}, 'my_super_secret_image']`` would create a bind mount of
``/var/www`` on the host and use any volumes from ``my_super_secret_image``.
Default: ``None``
devices (:obj:`list`, optional): Devices on the host to expose to the node. Default:
``None``
**create_container_kwargs: Any other keyword arguments to pass directly to
:py:meth:`docker.api.container.create_container`.
"""
DEFAULT_CREATE_HOST_CONFIG_KWARGS = {
# Add all capabilities to make containers host-like.
'cap_add': ['ALL'],
# Run without a seccomp profile.
'security_opt': ['seccomp=unconfined']
}
DEFAULT_CREATE_CONTAINER_KWARGS = {
# All nodes run in detached mode.
'detach': True,
'volumes': []
}
def __init__(self, hostname, group, image, ports=None, volumes=None, devices=None, environment=None,
**create_container_kwargs):
self.hostname = hostname
self.group = group
self.image = image
self.ports = ports or []
self.volumes = volumes or []
self.devices = devices or []
self.environment = environment or {}
self.create_container_kwargs = create_container_kwargs
if clusterdock_args and clusterdock_args.clusterdock_config_directory:
dir_path = clusterdock_args.clusterdock_config_directory
else:
dir_path = defaults.get('DEFAULT_CLUSTERDOCK_CONFIG_DIRECTORY')
self.clusterdock_config_host_dir = os.path.realpath(os.path.expanduser(dir_path))
logger.info('self.clusterdock_config_host_dir = %s', self.clusterdock_config_host_dir)
self.execute_shell = '/bin/sh'
def start(self, network, cluster_name=None, pull_images=False):
"""Start the node.
Args:
network (:obj:`str`): Docker network to which to attach the container.
cluster_name (:obj:`str`, optional): Cluster name to use for the Node. Default: ``None``
pull_images (:obj:`bool`, optional): Pull every Docker image needed by this node instance,
even if it exists locally.
Default: ``False``
"""
self.fqdn = '{}.{}'.format(self.hostname, network)
# Instantiate dictionaries for kwargs we'll pass when creating host configs
# and the node's container itself.
create_host_config_kwargs = copy.deepcopy(Node.DEFAULT_CREATE_HOST_CONFIG_KWARGS)
create_container_kwargs = copy.deepcopy(dict(Node.DEFAULT_CREATE_CONTAINER_KWARGS,
**self.create_container_kwargs))
create_host_config_kwargs['privileged'] = PRIVILEGED_CONTAINER
if LOCALTIME_MOUNT:
# Mount in /etc/localtime to have container time match the host's.
create_host_config_kwargs['binds'] = {os.path.join(self.clusterdock_config_host_dir, 'localtime'):
{'bind': '/etc/localtime', 'mode': 'rw'}}
create_container_kwargs['volumes'].append('/etc/localtime')
else:
self.environment['TZ'] = os.readlink('/etc/localtime').split('zoneinfo/')[1]
clusterdock_container_labels = {defaults.get('DEFAULT_DOCKER_LABEL_KEY'):
get_clusterdock_label(cluster_name)}
create_container_kwargs['labels'] = clusterdock_container_labels
if self.volumes:
# Instantiate empty lists to which we'll append elements as we traverse through
# volumes. These populated lists will then get passed to either
# :py:meth:`docker.api.client.APIClient.create_host_config` or
# :py:meth:`docker.api.client.create_container`.
binds = {}
volumes = []
volumes_from = []
for volume in self.volumes:
if isinstance(volume, list):
# List in the volumes list are Docker volumes to create.
volumes.extend(volume)
elif isinstance(volume, dict):
# Dictionaries in the volumes list are bind volumes.
for host_directory, container_directory in volume.items():
logger.debug('Adding volume (%s) to container config ...',
'{} => {}'.format(host_directory, container_directory))
binds[host_directory] = dict(bind=container_directory, mode='rw')
volumes.append(container_directory)
elif isinstance(volume, str):
# Strings in the volume list are `volumes_from` images.
if pull_images:
logger.info('Node started with pull_images=True. '
'Attempting to pull image (%s) ...', volume)
client.images.pull(volume)
else:
# Check for whether the image we need is present by trying to inspect it. If any
# NotFound exception is raised, make sure it's because the image is missing and then
# pull it before trying again.
try:
client.api.inspect_image(volume)
except docker.errors.NotFound as not_found:
if (not_found.response.status_code == 404 and
'No such image' in not_found.explanation):
logger.info('Could not find %s locally. Attempting to pull ...', volume)
client.images.pull(volume)
container = client.containers.create(volume, labels=clusterdock_container_labels)
volumes_from.append(container.id)
else:
element_type = type(volume).__name__
raise TypeError('Saw volume of type {} (must be dict or str).'.format(element_type))
if volumes_from:
create_host_config_kwargs['volumes_from'] = volumes_from
if volumes:
create_host_config_kwargs['binds'].update(binds)
create_container_kwargs['volumes'] += volumes
ports = []
port_bindings = {}
for port in self.ports:
if isinstance(port, dict):
for host_port, container_port in port.items():
logger.debug('Adding binding from host port %s to container port %s ...',
host_port, container_port)
ports.append(container_port)
port_bindings[container_port] = host_port
elif isinstance(port, int):
ports.append(port)
port_bindings[port] = None
else:
element_type = type(port).__name__
raise TypeError('Saw port of type {} (must be dict or int).'.format(element_type))
if self.environment:
create_container_kwargs['environment']= self.environment
if ports:
create_container_kwargs['ports'] = ports
if port_bindings:
create_host_config_kwargs['port_bindings'] = port_bindings
if self.devices:
create_host_config_kwargs['devices'] = self.devices
host_config = client.api.create_host_config(**create_host_config_kwargs)
# Pass networking config to container at creation time to avoid issues with
# DNS resolution.
networking_config = client.api.create_networking_config({
network: client.api.create_endpoint_config(aliases=[self.hostname])
})
logger.info('Starting node %s ...', self.fqdn)
if pull_images:
logger.info('Node started with pull_images=True. '
'Attempting to pull image (%s) ...', self.image)
client.images.pull(self.image)
else:
# Check for whether the image we need is present by trying to inspect it. If any
# NotFound exception is raised, make sure it's because the image is missing and then
# pull it before trying again.
try:
client.api.inspect_image(self.image)
except docker.errors.NotFound as not_found:
if (not_found.response.status_code == 404 and
'No such image' in not_found.explanation):
logger.info('Could not find %s locally. Attempting to pull ...', self.image)
client.images.pull(self.image)
# Since we need to use the low-level API to handle networking properly, we need to get
# a container instance from the ID
container_id = client.api.create_container(image=self.image,
hostname=self.fqdn,
host_config=host_config,
networking_config=networking_config,
**create_container_kwargs)['Id']
client.api.start(container=container_id)
# When the Container instance is created, the corresponding Docker container may not
# be in a RUNNING state. Wait until it is (or until timeout takes place).
self.container = client.containers.get(container_id=container_id)
logger.debug('Connecting container (%s) to network (%s) ...',
self.container.short_id, network)
# Wait for container to be in running state before moving on.
def condition(container):
container.reload()
outcome = nested_get(container.attrs, ['State', 'Running'])
logger.debug('Container running state evaluated to %s.', outcome)
return outcome
def success(time):
logger.debug('Container reached running state after %s seconds.', time)
def failure(timeout):
logger.debug('Timed out after %s seconds waiting for container to reach running state.',
timeout)
timeout_in_secs = 30
wait_for_condition(condition=condition, condition_args=[self.container],
timeout=30, success=success, failure=failure)
logger.debug('Reloading attributes for container (%s) ...', self.container.short_id)
self.container.reload()
self.ip_address = nested_get(self.container.attrs,
['NetworkSettings', 'Networks', network, 'IPAddress'])
self.host_ports = {int(container_port.split('/')[0]): int(host_ports[0]['HostPort'])
for container_port, host_ports in nested_get(self.container.attrs,
['NetworkSettings',
'Ports']).items()}
if self.host_ports:
logger.info('Created host port mapping (%s) for node (%s).',
'; '.join('{} => {}'.format(host_port, container_port)
for host_port, container_port in self.host_ports.items()),
self.hostname)
# Wait for container's SSH daemon to come online.
def condition(node):
sshd_status = node.execute('service sshd status', quiet=True).exit_code
logger.debug('service sshd status returned %s.', sshd_status)
return sshd_status == 0
def success(time):
logger.debug('SSH daemon came up after %s seconds.', time)
def failure(timeout):
logger.debug('Timed out after %s seconds waiting for SSH daemon to start.',
timeout)
wait_for_condition(condition=condition, condition_args=[self],
timeout=30, success=success, failure=failure)
self._add_node_to_etc_hosts()
def stop(self, remove=True):
"""Stop the node and optionally removing the Docker container.
Args:
remove (:obj:`bool`, optional): Remove underlying Docker container. Default: ``True``
"""
if not remove:
self.container.stop()
else:
self.container.remove(v=True, force=True)
def execute(self, command, user='root', quiet=False, detach=False):
"""Execute a command on the node.
Args:
command (:obj:`str`): Command to execute.
user (:obj:`str`, optional): User with which to execute the command. Default: ``root``
quiet (:obj:`bool`, optional): Run the command without showing any output. Default:
``False``
detach (:obj:`bool`, optional): Run the command in detached mode. Default:
``False``
Returns:
A :py:class:`collections.namedtuple` instance with `exit_code` and `output` attributes.
"""
logger.debug('Executing command (%s) on node (%s) ...', command, self.fqdn)
exec_command = [self.execute_shell, '-c', command]
logger.debug('Running docker exec with command (%s) ...', exec_command)
exec_id = client.api.exec_create(self.container.id, exec_command, user=user)['Id']
output = []
for response_chunk in client.api.exec_start(exec_id, stream=True, detach=detach):
output_chunk = response_chunk.decode()
output.append(output_chunk)
if not quiet:
print(output_chunk)
exit_code = client.api.exec_inspect(exec_id).get('ExitCode')
return namedtuple('ExecuteSession', ['exit_code', 'output'])(exit_code=exit_code,
output=''.join(output))
def get_file(self, path):
"""Get file from the node.
Args:
path (:obj:`str`): Absolute path to file.
Returns:
A :obj:`str` containing the contents of the file.
"""
tarstream = io.BytesIO()
for chunk in self.container.get_archive(path=path)[0]:
tarstream.write(chunk)
tarstream.seek(0)
with tarfile.open(fileobj=tarstream) as tarfile_:
for tarinfo in tarfile_.getmembers():
return tarfile_.extractfile(tarinfo).read().decode()
def put_file(self, path, contents):
"""Put file on the node.
Args:
path (:obj:`str`): Absolute path to file.
contents (:obj:`str`): The contents of the file.
"""
data = io.BytesIO()
with tarfile.open(fileobj=data, mode='w') as tarfile_:
encoded_file = contents.encode()
tarinfo = tarfile.TarInfo(path)
# We set the modification time to now because some systems (e.g. logging) rely upon
# timestamps to determine whether to read config files.
tarinfo.mtime = time.time()
tarinfo.size = len(encoded_file)
tarfile_.addfile(tarinfo, io.BytesIO(encoded_file))
data.seek(0)
self.container.put_archive(path='/', data=data)
def commit(self, repository, tag=None, push=False, **kwargs):
"""Commit the Node's Docker container to a Docker image.
Args:
repository (:obj:`str`): The Docker repository to commit the image to.
tag (:obj:`str`, optional): Docker image tag. Default: ``None``
push (:obj:`bool`, optional): Push the image to Docker repository. Default: ``False``
**kwargs: Additional keyword arguments to pass to
:py:meth:`docker.models.Containers.Container.commit`
"""
logger.debug('Committing `%s` with container id %s ...', self.fqdn, self.container.short_id)
image = self.container.commit(repository=repository, tag=tag, **kwargs)
logger.debug('%s repo tags committed with image id as %s', image.tags, image.short_id)
if push:
logger.debug('Pushing image of `%s` to repository %s ...', self.fqdn, repository)
for line in client.api.push(repository, tag, stream=True, decode=True):
line.pop('progressDetail', None) # take out too much detail
logger.debug(line)
logger.debug('%s repo tags pushed for `%s`, whose image id is %s',
image.tags, self.fqdn, image.short_id)
def _add_node_to_etc_hosts(self):
"""Add node information to the Docker hosts' /etc/hosts file, exploiting Docker's
permissions to do so without needing an explicit sudo.
"""
image = 'alpine:latest'
command = 'echo "{} {} # clusterdock" >> /etc/hosts'.format(self.ip_address,
self.fqdn)
volumes = {'/etc/hosts': {'bind': '/etc/hosts', 'mode': 'rw'}}
logger.debug('Adding %s to /etc/hosts ...', self.fqdn)
client.containers.run(image=image,
command=[self.execute_shell, '-c', command],
volumes=volumes,
remove=True)
| |
# -*- coding: utf-8 -*-
# BITFINEX API wrapper
#
#
# AUTHOR: @jimako1989
# GITHUB: github.com/jimako1989/bitfinexpy
# LICENSE: MIT
#
import base64
import hashlib
import hmac
import json
import requests
import time
# EndpointsMixin provides a mixin for the API instance
class EndpointsMixin(object):
# Public API #######################################################
def ticker(self, **params):
""" Gives innermost bid and asks and information on the most recent trade, as well as high, low and volume of the last 24 hours.
Docs: https://docs.bitfinex.com/v1/reference#rest-public-ticker
"""
symbol = params.pop('symbol')
endpoint = 'pubticker/' + symbol
return self.request(endpoint, auth=False, params=params)
def stats(self, **params):
""" Various statistics about the requested pair.
Docs: http://docs.bitfinex.com/#stats
"""
symbol = params.pop('symbol')
endpoint = 'stats/' + symbol
return self.request(endpoint, auth=False, params=params)
def fundingbook(self, **params):
""" Get the full margin funding book.
Docs: http://docs.bitfinex.com/#fundingbook
"""
symbol = params.pop('symbol')
endpoint = 'lendbook/' + symbol
return self.request(endpoint, auth=False, params=params)
def orderbook(self, **params):
""" Get the full order book.
Docs: http://docs.bitfinex.com/#orderbook
"""
symbol = params.pop('symbol')
endpoint = 'book/' + symbol
return self.request(endpoint, auth=False, params=params)
def trades(self, **params):
""" Get a list of the most recent trades for the given symbol.
Docs: http://docs.bitfinex.com/#trades
"""
symbol = params.pop('symbol')
endpoint = 'trades/' + symbol
return self.request(endpoint, auth=False, params=params)
def lends(self, **params):
""" Get a list of the most recent funding data for the given currency: total amount lent and Flash Return Rate (in % by 365 days) over time.
Docs: http://docs.bitfinex.com/#lends
"""
symbol = params.pop('symbol')
endpoint = 'lends/' + symbol
return self.request(endpoint, auth=False, params=params)
def symbols(self, **params):
""" Get a list of valid symbol IDs.
Docs: http://docs.bitfinex.com/#symbols
"""
endpoint = 'symbols'
return self.request(endpoint, auth=False, params=params)
def symbol_details(self, **params):
""" Get a list of valid symbol IDs and the pair details.
Docs: http://docs.bitfinex.com/#symbol-details
"""
symbol = params.pop('symbol')
endpoint = 'book/' + symbol
return self.request(endpoint, auth=False, params=params)
# Private API #######################################################
# Account
def account_infos(self, **params):
""" Check the balance.
Docs: http://docs.bitfinex.com/#account-info
"""
endpoint = 'account_infos'
return self.request(endpoint, payload_params=params)
# Deposit
def deposit(self, **params):
""" Return your deposit address to make a new deposit.
Docs: http://docs.bitfinex.com/#deposit
"""
endpoint = 'deposit/new'
return self.request(endpoint, payload_params=params)
# Order
def new_order(self, symbol, amount, price, side, order_type, **params):
""" Submit a new order.
Docs: http://docs.bitfinex.com/#new-order
"""
endpoint = 'order/new'
params['symbol'] = symbol
params['amount'] = amount
params['price'] = price
params['side'] = side
params['type'] = order_type
params['exchange'] = 'bitfinex'
return self.request(endpoint, method='POST', payload_params=params)
def multiple_new_orders(self, orders, **params):
""" Submit several new orders at once.
Docs: http://docs.bitfinex.com/#new-order
"""
endpoint = 'order/new/multi'
params['orders'] = json.dumps(orders)
return self.request(endpoint, method='POST', payload_params=params)
def cancel_order(self, order_id, **params):
""" Cancel an order.
Docs: http://docs.bitfinex.com/#cancel-order
"""
endpoint = 'order/cancel'
params['order_id'] = order_id
return self.request(endpoint, method='POST', payload_params=params)
def cancel_multiple_orders(self, order_ids, **params):
""" Cancel multiples orders at once.
Docs: http://docs.bitfinex.com/#cancel-multiple-orders
"""
endpoint = 'order/cancel/multi'
params['order_ids'] = order_ids
return self.request(endpoint, method='POST', payload_params=params)
def cancel_all_orders(self, **params):
""" Cancel multiples orders at once.
Docs: http://docs.bitfinex.com/#cancel-all-orders
"""
endpoint = 'order/cancel/all'
return self.request(endpoint, method='POST', payload_params=params)
def replace_order(self, order_id, symbol, amount, price, side, order_type, **params):
""" Replace an orders with a new one.
Docs: http://docs.bitfinex.com/#replace-orders
"""
endpoint = 'order/cancel/replace'
params['order_id'] = order_id
params['symbol'] = symbol
params['amount'] = amount
params['price'] = price
params['side'] = side
params['type'] = order_type
params['exchange'] = 'bitfinex'
return self.request(endpoint, method='POST', payload_params=params)
def order_status(self, order_id, **params):
""" Get the status of an order. Is it active? Was it cancelled? To what extent has it been executed? etc.
Docs: http://docs.bitfinex.com/#order-status
"""
endpoint = 'order/status'
params['order_id'] = order_id
return self.request(endpoint, method='POST', payload_params=params)
def active_orders(self, **params):
""" View your active orders.
Docs: http://docs.bitfinex.com/#active-orders
"""
endpoint = 'orders'
return self.request(endpoint, method='POST', payload_params=params)
# Positions
def active_positions(self, **params):
""" View your active positions.
Docs: http://docs.bitfinex.com/#active-positions
"""
endpoint = 'positions'
return self.request(endpoint, method='POST', payload_params=params)
def claim_position(self, position_id, **params):
""" A position can be claimed if:
It is a long position:
The amount in the last unit of the position pair that you have in your trading wallet AND/OR the realized profit of the position is greater or equal to the purchase amount of the position (base price * position amount) and the funds which need to be returned.
For example, for a long BTCUSD position, you can claim the position if the amount of USD you have in the trading wallet is greater than the base price * the position amount and the funds used.
It is a short position:
The amount in the first unit of the position pair that you have in your trading wallet is greater or equal to the amount of the position and the margin funding used.
Docs: http://docs.bitfinex.com/#claim-position
"""
endpoint = 'position/claim'
params['position_id'] = position_id
return self.request(endpoint, method='POST', payload_params=params)
# Historical Data
def balance_history(self, currency, **params):
""" View all of your balance ledger entries.
Docs: http://docs.bitfinex.com/#balance-history
"""
endpoint = 'history'
params['currency'] = currency
return self.request(endpoint, method='POST', payload_params=params)
def deposit_withdrawal_history(self, currency, **params):
""" View all of your balance ledger entries.
Docs: http://docs.bitfinex.com/#balance-history
"""
endpoint = 'history/movements'
params['currency'] = currency
return self.request(endpoint, method='POST', payload_params=params)
def past_trades(self, symbol, **params):
""" View all of your balance ledger entries.
Docs: http://docs.bitfinex.com/#balance-history
"""
endpoint = 'mytrades'
params['symbol'] = symbol
return self.request(endpoint, method='POST', payload_params=params)
# Margin Funding
def new_offer(self, currency, amount, rate, period, direction, **params):
""" Submit a new offer.
Docs: http://docs.bitfinex.com/#new-offer
"""
endpoint = 'offer/new'
params['currency'] = currency
params['amount'] = amount
params['rate'] = rate
params['period'] = period
params['direction'] = direction
return self.request(endpoint, method='POST', payload_params=params)
def cancel_offer(self, offer_id, **params):
""" Cancel an offer.
Docs: http://docs.bitfinex.com/#cancel-offer
"""
endpoint = 'offer/cancel'
params['offer_id'] = offer_id
return self.request(endpoint, method='POST', payload_params=params)
def offer_status(self, offer_id, **params):
""" Get the status of an offer. Is it active? Was it cancelled? To what extent has it been executed? etc.
Docs: http://docs.bitfinex.com/#offer-status
"""
endpoint = 'offer/status'
params['offer_id'] = offer_id
return self.request(endpoint, method='POST', payload_params=params)
def active_credits(self, **params):
""" View your active offers.
Docs: http://docs.bitfinex.com/#active-credits
"""
endpoint = 'offers'
return self.request(endpoint, method='POST', payload_params=params)
def active_funding_used_in_a_margin_position(self, **params):
""" View your funding currently borrowed and used in a margin position.
Docs: http://docs.bitfinex.com/#active-funding-used-in-a-margin-position
"""
endpoint = 'taken_funds'
return self.request(endpoint, method='POST', payload_params=params)
def total_taken_funds(self, **params):
""" View the total of your active-funding used in your position(s).
Docs: http://docs.bitfinex.com/#total-taken-funds
"""
endpoint = 'total_taken_funds'
return self.request(endpoint, method='POST', payload_params=params)
def close_margin_funding(self, **params):
""" Return the funding taken in a margin position.
Docs: http://docs.bitfinex.com/#total-taken-funds
"""
endpoint = 'funding/close'
return self.request(endpoint, method='POST', payload_params=params)
# Wallet Balances
def wallet_balances(self, **params):
""" See your balances.
Docs: http://docs.bitfinex.com/#wallet-balances
"""
endpoint = 'balances'
return self.request(endpoint, method='POST', payload_params=params)
# Margin Information
def margin_information(self, **params):
""" See your trading wallet information for margin trading.
Docs: http://docs.bitfinex.com/#margin-information
"""
endpoint = 'margin_infos'
return self.request(endpoint, method='POST', payload_params=params)
# Transfer Between Wallets
def wallet_transfer(self, amount, currency, walletfrom, walletto, **params):
""" Allow you to move available balances between your wallets.
Docs: https://bitfinex.readme.io/v1/reference#rest-auth-transfer-between-wallets
"""
endpoint = 'transfer'
params['amount'] = amount
params['currency'] = currency
params['walletfrom'] = walletfrom
params['walletto'] = walletto
return self.request(endpoint, method='POST', payload_params=params)
# Withdrawal
def withdrawal(self, withdraw_type, walletselected, amount, **params):
""" Allow you to request a withdrawal from one of your wallet.
Docs: http://docs.bitfinex.com/#withdrawal
"""
endpoint = 'withdraw'
params['withdraw_type'] = withdraw_type
params['walletselected'] = walletselected
params['amount'] = amount
return self.request(endpoint, method='POST', payload_params=params)
# Provides functionality for access to core BITFINEX API calls
class API(EndpointsMixin, object):
def __init__(self, environment='live', key=None, secret_key=None):
""" Instantiates an instance of BitfinexPy's API wrapper """
if environment == 'live':
self.api_url = 'https://api.bitfinex.com/v1/'
else:
# for future, access to a demo account.
pass
self.key = key
self.secret_key = bytes(secret_key, 'utf-8')
self.client = requests.Session()
def request(self, endpoint, method='GET', auth=True, params=None, payload_params=None):
""" Returns dict of response from Bitfinex's open API """
method = method.lower()
url = '%s%s' % (self.api_url, endpoint)
request_args = {'params': params}
if auth:
payload_object = {
"request": "/v1/%s" % endpoint,
"nonce": str(time.time() * 1000000) # update nonce each POST request
}
if payload_params is not None:
payload_object.update(payload_params)
payload = base64.b64encode(bytes(json.dumps(payload_object), "utf-8"))
signature = hmac.new(self.secret_key, msg=payload, digestmod=hashlib.sha384).hexdigest()
request_args['headers'] = {
'X-BFX-APIKEY': self.key,
'X-BFX-PAYLOAD': payload,
'X-BFX-SIGNATURE': signature
}
# request_args['data'] = {}
func = getattr(self.client, method)
try:
response = func(url, **request_args)
content = response.json()
except Exception as e:
print("Failed to get the response because %s. \
The request url is %s" % (str(e), url))
# error message
if response.status_code >= 400:
print("%s error_response : %s" % (str(response.status_code), content))
raise BitfinexError(response.status_code, content)
return content
# HTTPS Streaming
class Streamer:
""" Provides functionality for HTTPS Streaming """
# TODO: WS stream reader
def __init__(self, symbol, environment='live', heartbeat=1.0):
""" Instantiates an instance of BitfinexPy's streaming API wrapper. """
self.connected = False
if environment == 'live':
self.api_url = 'https://api.bitfinex.com/v1/pubticker/' + symbol
else:
# for future, access to a demo account.
pass
self.heartbeat = heartbeat
self.client = requests.Session()
def start(self, **params):
""" Starts the stream with the given parameters """
keys = ['last_price', 'bid', 'volume', 'ask', 'low', 'high']
self.connected = True
request_args = {}
content_ = {k: None for k in keys}
while self.connected:
response = self.client.get(self.api_url, **request_args)
content = response.content.decode('ascii')
content = json.loads(content)
if response.status_code != 200:
self.on_error(content)
# when the tick is updated
if any([content.get(k) != content_.get(k) for k in keys]):
self.on_success(content)
content_ = content
time.sleep(self.heartbeat)
def on_success(self, content):
""" Called when data is successfully retrieved from the stream """
print(content)
return True
def on_error(self, content):
""" Called when stream returns non-200 status code
Override this to handle your streaming data.
"""
self.connected = False
print(content)
return
# Contains BITFINEX exception
class BitfinexError(Exception):
""" Generic error class, catches bitfinex response errors
"""
def __init__(self, status_code, error_response):
msg = "BITFINEX API returned error code %s (%s)" % (status_code, error_response['error'])
super(BitfinexError, self).__init__(msg)
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import spectral_ops_test_util
from tensorflow.python.ops.linalg import linalg
from tensorflow.python.ops.linalg import linear_operator_circulant
from tensorflow.python.ops.linalg import linear_operator_test_util
from tensorflow.python.ops.signal import fft_ops
from tensorflow.python.platform import test
rng = np.random.RandomState(0)
_to_complex = linear_operator_circulant._to_complex
class LinearOperatorCirculantBaseTest(object):
"""Common class for circulant tests."""
@contextlib.contextmanager
def _constrain_devices_and_set_default(self, sess, use_gpu, force_gpu):
"""We overwrite the FFT operation mapping for testing."""
with test.TestCase._constrain_devices_and_set_default(
self, sess, use_gpu, force_gpu) as sess:
with spectral_ops_test_util.fft_kernel_label_map():
yield sess
def _shape_to_spectrum_shape(self, shape):
# If spectrum.shape = batch_shape + [N],
# this creates an operator of shape batch_shape + [N, N]
return shape[:-1]
def _spectrum_to_circulant_1d(self, spectrum, shape, dtype):
"""Creates a circulant matrix from a spectrum.
Intentionally done in an explicit yet inefficient way. This provides a
cross check to the main code that uses fancy reshapes.
Args:
spectrum: Float or complex `Tensor`.
shape: Python list. Desired shape of returned matrix.
dtype: Type to cast the returned matrix to.
Returns:
Circulant (batch) matrix of desired `dtype`.
"""
spectrum = _to_complex(spectrum)
spectrum_shape = self._shape_to_spectrum_shape(shape)
domain_dimension = spectrum_shape[-1]
if not domain_dimension:
return array_ops.zeros(shape, dtype)
# Explicitly compute the action of spectrum on basis vectors.
matrix_rows = []
for m in range(domain_dimension):
x = np.zeros([domain_dimension])
# x is a basis vector.
x[m] = 1.0
fft_x = fft_ops.fft(x.astype(np.complex64))
h_convolve_x = fft_ops.ifft(spectrum * fft_x)
matrix_rows.append(h_convolve_x)
matrix = array_ops.stack(matrix_rows, axis=-1)
return math_ops.cast(matrix, dtype)
class LinearOperatorCirculantTestSelfAdjointOperator(
LinearOperatorCirculantBaseTest,
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Test of LinearOperatorCirculant when operator is self-adjoint.
Real spectrum <==> Self adjoint operator.
Note that when the spectrum is real, the operator may still be complex.
"""
@property
def _dtypes_to_test(self):
# This operator will always be complex because, although the spectrum is
# real, the matrix will not be real.
return [dtypes.complex64]
def _operator_and_matrix(
self, build_info, dtype, use_placeholder,
ensure_self_adjoint_and_pd=False):
shape = build_info.shape
# For this test class, we are creating real spectrums.
# We also want the spectrum to have eigenvalues bounded away from zero.
#
# spectrum is bounded away from zero.
spectrum = linear_operator_test_util.random_sign_uniform(
shape=self._shape_to_spectrum_shape(shape), minval=1., maxval=2.)
if ensure_self_adjoint_and_pd:
spectrum = math_ops.abs(spectrum)
# If dtype is complex, cast spectrum to complex. The imaginary part will be
# zero, so the operator will still be self-adjoint.
spectrum = math_ops.cast(spectrum, dtype)
lin_op_spectrum = spectrum
if use_placeholder:
lin_op_spectrum = array_ops.placeholder_with_default(spectrum, shape=None)
operator = linalg.LinearOperatorCirculant(
lin_op_spectrum,
is_self_adjoint=True,
is_positive_definite=True if ensure_self_adjoint_and_pd else None,
input_output_dtype=dtype)
mat = self._spectrum_to_circulant_1d(spectrum, shape, dtype=dtype)
return operator, mat
@test_util.run_deprecated_v1
def test_simple_hermitian_spectrum_gives_operator_with_zero_imag_part(self):
with self.cached_session():
spectrum = math_ops.cast([1., 1j, -1j], dtypes.complex64)
operator = linalg.LinearOperatorCirculant(
spectrum, input_output_dtype=dtypes.complex64)
matrix = operator.to_dense()
imag_matrix = math_ops.imag(matrix)
eps = np.finfo(np.float32).eps
np.testing.assert_allclose(
0, self.evaluate(imag_matrix), rtol=0, atol=eps * 3)
class LinearOperatorCirculantTestHermitianSpectrum(
LinearOperatorCirculantBaseTest,
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Test of LinearOperatorCirculant when the spectrum is Hermitian.
Hermitian spectrum <==> Real valued operator. We test both real and complex
dtypes here though. So in some cases the matrix will be complex but with
zero imaginary part.
"""
@property
def _dtypes_to_test(self):
return [dtypes.float32, dtypes.complex64]
def _operator_and_matrix(
self, build_info, dtype, use_placeholder,
ensure_self_adjoint_and_pd=False):
shape = build_info.shape
# For this test class, we are creating Hermitian spectrums.
# We also want the spectrum to have eigenvalues bounded away from zero.
#
# pre_spectrum is bounded away from zero.
pre_spectrum = linear_operator_test_util.random_uniform(
shape=self._shape_to_spectrum_shape(shape), minval=1., maxval=2.)
pre_spectrum_c = _to_complex(pre_spectrum)
# Real{IFFT[pre_spectrum]}
# = IFFT[EvenPartOf[pre_spectrum]]
# is the IFFT of something that is also bounded away from zero.
# Therefore, FFT[pre_h] would be a well-conditioned spectrum.
pre_h = fft_ops.ifft(pre_spectrum_c)
# A spectrum is Hermitian iff it is the DFT of a real convolution kernel.
# So we will make spectrum = FFT[h], for real valued h.
h = math_ops.real(pre_h)
h_c = _to_complex(h)
spectrum = fft_ops.fft(h_c)
lin_op_spectrum = spectrum
if use_placeholder:
lin_op_spectrum = array_ops.placeholder_with_default(spectrum, shape=None)
operator = linalg.LinearOperatorCirculant(
lin_op_spectrum,
input_output_dtype=dtype,
is_positive_definite=True if ensure_self_adjoint_and_pd else None,
is_self_adjoint=True if ensure_self_adjoint_and_pd else None,
)
mat = self._spectrum_to_circulant_1d(spectrum, shape, dtype=dtype)
return operator, mat
@test_util.run_deprecated_v1
def test_simple_hermitian_spectrum_gives_operator_with_zero_imag_part(self):
with self.cached_session():
spectrum = math_ops.cast([1., 1j, -1j], dtypes.complex64)
operator = linalg.LinearOperatorCirculant(
spectrum, input_output_dtype=dtypes.complex64)
matrix = operator.to_dense()
imag_matrix = math_ops.imag(matrix)
eps = np.finfo(np.float32).eps
np.testing.assert_allclose(
0, self.evaluate(imag_matrix), rtol=0, atol=eps * 3)
class LinearOperatorCirculantTestNonHermitianSpectrum(
LinearOperatorCirculantBaseTest,
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Test of LinearOperatorCirculant when the spectrum is not Hermitian.
Non-Hermitian spectrum <==> Complex valued operator.
We test only complex dtypes here.
"""
@property
def _dtypes_to_test(self):
return [dtypes.complex64]
# Skip Cholesky since we are explicitly testing non-hermitian
# spectra.
@property
def _tests_to_skip(self):
return ["cholesky"]
def _operator_and_matrix(
self, build_info, dtype, use_placeholder,
ensure_self_adjoint_and_pd=False):
del ensure_self_adjoint_and_pd
shape = build_info.shape
# Will be well conditioned enough to get accurate solves.
spectrum = linear_operator_test_util.random_sign_uniform(
shape=self._shape_to_spectrum_shape(shape),
dtype=dtypes.complex64,
minval=1.,
maxval=2.)
lin_op_spectrum = spectrum
if use_placeholder:
lin_op_spectrum = array_ops.placeholder_with_default(spectrum, shape=None)
operator = linalg.LinearOperatorCirculant(
lin_op_spectrum, input_output_dtype=dtype)
mat = self._spectrum_to_circulant_1d(spectrum, shape, dtype=dtype)
return operator, mat
@test_util.run_deprecated_v1
def test_simple_hermitian_spectrum_gives_operator_with_zero_imag_part(self):
with self.cached_session():
spectrum = math_ops.cast([1., 1j, -1j], dtypes.complex64)
operator = linalg.LinearOperatorCirculant(
spectrum, input_output_dtype=dtypes.complex64)
matrix = operator.to_dense()
imag_matrix = math_ops.imag(matrix)
eps = np.finfo(np.float32).eps
np.testing.assert_allclose(
0, self.evaluate(imag_matrix), rtol=0, atol=eps * 3)
@test_util.run_deprecated_v1
def test_simple_positive_real_spectrum_gives_self_adjoint_pos_def_oper(self):
with self.cached_session() as sess:
spectrum = math_ops.cast([6., 4, 2], dtypes.complex64)
operator = linalg.LinearOperatorCirculant(
spectrum, input_output_dtype=dtypes.complex64)
matrix, matrix_h = sess.run(
[operator.to_dense(),
linalg.adjoint(operator.to_dense())])
self.assertAllClose(matrix, matrix_h)
operator.assert_positive_definite().run() # Should not fail
operator.assert_self_adjoint().run() # Should not fail
@test_util.run_deprecated_v1
def test_defining_operator_using_real_convolution_kernel(self):
with self.cached_session():
convolution_kernel = [1., 2., 1.]
spectrum = fft_ops.fft(
math_ops.cast(convolution_kernel, dtypes.complex64))
# spectrum is shape [3] ==> operator is shape [3, 3]
# spectrum is Hermitian ==> operator is real.
operator = linalg.LinearOperatorCirculant(spectrum)
# Allow for complex output so we can make sure it has zero imag part.
self.assertEqual(operator.dtype, dtypes.complex64)
matrix = operator.to_dense().eval()
np.testing.assert_allclose(0, np.imag(matrix), atol=1e-6)
@test_util.run_v1_only("currently failing on v2")
def test_hermitian_spectrum_gives_operator_with_zero_imag_part(self):
with self.cached_session():
# Make spectrum the FFT of a real convolution kernel h. This ensures that
# spectrum is Hermitian.
h = linear_operator_test_util.random_normal(shape=(3, 4))
spectrum = fft_ops.fft(math_ops.cast(h, dtypes.complex64))
operator = linalg.LinearOperatorCirculant(
spectrum, input_output_dtype=dtypes.complex64)
matrix = operator.to_dense()
imag_matrix = math_ops.imag(matrix)
eps = np.finfo(np.float32).eps
np.testing.assert_allclose(
0, self.evaluate(imag_matrix), rtol=0, atol=eps * 3 * 4)
@test_util.run_deprecated_v1
def test_convolution_kernel_same_as_first_row_of_to_dense(self):
spectrum = [[3., 2., 1.], [2., 1.5, 1.]]
with self.cached_session():
operator = linalg.LinearOperatorCirculant(spectrum)
h = operator.convolution_kernel()
c = operator.to_dense()
self.assertAllEqual((2, 3), h.get_shape())
self.assertAllEqual((2, 3, 3), c.get_shape())
self.assertAllClose(h.eval(), self.evaluate(c)[:, :, 0])
@test_util.run_deprecated_v1
def test_assert_non_singular_fails_for_singular_operator(self):
spectrum = math_ops.cast([0, 4, 2j + 2], dtypes.complex64)
operator = linalg.LinearOperatorCirculant(spectrum)
with self.cached_session():
with self.assertRaisesOpError("Singular operator"):
operator.assert_non_singular().run()
@test_util.run_deprecated_v1
def test_assert_non_singular_does_not_fail_for_non_singular_operator(self):
spectrum = math_ops.cast([-3j, 4, 2j + 2], dtypes.complex64)
operator = linalg.LinearOperatorCirculant(spectrum)
with self.cached_session():
operator.assert_non_singular().run() # Should not fail
@test_util.run_deprecated_v1
def test_assert_positive_definite_fails_for_non_positive_definite(self):
spectrum = math_ops.cast([6., 4, 2j], dtypes.complex64)
operator = linalg.LinearOperatorCirculant(spectrum)
with self.cached_session():
with self.assertRaisesOpError("Not positive definite"):
operator.assert_positive_definite().run()
@test_util.run_deprecated_v1
def test_assert_positive_definite_does_not_fail_when_pos_def(self):
spectrum = math_ops.cast([6., 4, 2j + 2], dtypes.complex64)
operator = linalg.LinearOperatorCirculant(spectrum)
with self.cached_session():
operator.assert_positive_definite().run() # Should not fail
def test_real_spectrum_and_not_self_adjoint_hint_raises(self):
spectrum = [1., 2.]
with self.assertRaisesRegexp(ValueError, "real.*always.*self-adjoint"):
linalg.LinearOperatorCirculant(spectrum, is_self_adjoint=False)
def test_real_spectrum_auto_sets_is_self_adjoint_to_true(self):
spectrum = [1., 2.]
operator = linalg.LinearOperatorCirculant(spectrum)
self.assertTrue(operator.is_self_adjoint)
class LinearOperatorCirculant2DBaseTest(object):
"""Common class for 2D circulant tests."""
@contextlib.contextmanager
def _constrain_devices_and_set_default(self, sess, use_gpu, force_gpu):
"""We overwrite the FFT operation mapping for testing."""
with test.TestCase._constrain_devices_and_set_default(
self, sess, use_gpu, force_gpu) as sess:
with spectral_ops_test_util.fft_kernel_label_map():
yield sess
@property
def _operator_build_infos(self):
build_info = linear_operator_test_util.OperatorBuildInfo
# non-batch operators (n, n) and batch operators.
return [
build_info((0, 0)),
build_info((1, 1)),
build_info((1, 6, 6)),
build_info((3, 4, 4)),
build_info((2, 1, 3, 3))
]
def _shape_to_spectrum_shape(self, shape):
"""Get a spectrum shape that will make an operator of desired shape."""
# This 2D block circulant operator takes a spectrum of shape
# batch_shape + [N0, N1],
# and creates and operator of shape
# batch_shape + [N0*N1, N0*N1]
if shape == (0, 0):
return (0, 0)
elif shape == (1, 1):
return (1, 1)
elif shape == (1, 6, 6):
return (1, 2, 3)
elif shape == (3, 4, 4):
return (3, 2, 2)
elif shape == (2, 1, 3, 3):
return (2, 1, 3, 1)
else:
raise ValueError("Unhandled shape: %s" % shape)
def _spectrum_to_circulant_2d(self, spectrum, shape, dtype):
"""Creates a block circulant matrix from a spectrum.
Intentionally done in an explicit yet inefficient way. This provides a
cross check to the main code that uses fancy reshapes.
Args:
spectrum: Float or complex `Tensor`.
shape: Python list. Desired shape of returned matrix.
dtype: Type to cast the returned matrix to.
Returns:
Block circulant (batch) matrix of desired `dtype`.
"""
spectrum = _to_complex(spectrum)
spectrum_shape = self._shape_to_spectrum_shape(shape)
domain_dimension = spectrum_shape[-1]
if not domain_dimension:
return array_ops.zeros(shape, dtype)
block_shape = spectrum_shape[-2:]
# Explicitly compute the action of spectrum on basis vectors.
matrix_rows = []
for n0 in range(block_shape[0]):
for n1 in range(block_shape[1]):
x = np.zeros(block_shape)
# x is a basis vector.
x[n0, n1] = 1.0
fft_x = fft_ops.fft2d(x.astype(np.complex64))
h_convolve_x = fft_ops.ifft2d(spectrum * fft_x)
# We want the flat version of the action of the operator on a basis
# vector, not the block version.
h_convolve_x = array_ops.reshape(h_convolve_x, shape[:-1])
matrix_rows.append(h_convolve_x)
matrix = array_ops.stack(matrix_rows, axis=-1)
return math_ops.cast(matrix, dtype)
class LinearOperatorCirculant2DTestHermitianSpectrum(
LinearOperatorCirculant2DBaseTest,
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Test of LinearOperatorCirculant2D when the spectrum is Hermitian.
Hermitian spectrum <==> Real valued operator. We test both real and complex
dtypes here though. So in some cases the matrix will be complex but with
zero imaginary part.
"""
@property
def _dtypes_to_test(self):
return [dtypes.float32, dtypes.complex64]
def _operator_and_matrix(
self, build_info, dtype, use_placeholder,
ensure_self_adjoint_and_pd=False):
shape = build_info.shape
# For this test class, we are creating Hermitian spectrums.
# We also want the spectrum to have eigenvalues bounded away from zero.
#
# pre_spectrum is bounded away from zero.
pre_spectrum = linear_operator_test_util.random_uniform(
shape=self._shape_to_spectrum_shape(shape), minval=1., maxval=2.)
pre_spectrum_c = _to_complex(pre_spectrum)
# Real{IFFT[pre_spectrum]}
# = IFFT[EvenPartOf[pre_spectrum]]
# is the IFFT of something that is also bounded away from zero.
# Therefore, FFT[pre_h] would be a well-conditioned spectrum.
pre_h = fft_ops.ifft2d(pre_spectrum_c)
# A spectrum is Hermitian iff it is the DFT of a real convolution kernel.
# So we will make spectrum = FFT[h], for real valued h.
h = math_ops.real(pre_h)
h_c = _to_complex(h)
spectrum = fft_ops.fft2d(h_c)
lin_op_spectrum = spectrum
if use_placeholder:
lin_op_spectrum = array_ops.placeholder_with_default(spectrum, shape=None)
operator = linalg.LinearOperatorCirculant2D(
lin_op_spectrum,
is_positive_definite=True if ensure_self_adjoint_and_pd else None,
is_self_adjoint=True if ensure_self_adjoint_and_pd else None,
input_output_dtype=dtype)
mat = self._spectrum_to_circulant_2d(spectrum, shape, dtype=dtype)
return operator, mat
class LinearOperatorCirculant2DTestNonHermitianSpectrum(
LinearOperatorCirculant2DBaseTest,
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Test of LinearOperatorCirculant when the spectrum is not Hermitian.
Non-Hermitian spectrum <==> Complex valued operator.
We test only complex dtypes here.
"""
@property
def _dtypes_to_test(self):
return [dtypes.complex64]
@property
def _tests_to_skip(self):
return ["cholesky"]
def _operator_and_matrix(
self, build_info, dtype, use_placeholder,
ensure_self_adjoint_and_pd=False):
del ensure_self_adjoint_and_pd
shape = build_info.shape
# Will be well conditioned enough to get accurate solves.
spectrum = linear_operator_test_util.random_sign_uniform(
shape=self._shape_to_spectrum_shape(shape),
dtype=dtype,
minval=1.,
maxval=2.)
lin_op_spectrum = spectrum
if use_placeholder:
lin_op_spectrum = array_ops.placeholder_with_default(spectrum, shape=None)
operator = linalg.LinearOperatorCirculant2D(
lin_op_spectrum, input_output_dtype=dtype)
mat = self._spectrum_to_circulant_2d(spectrum, shape, dtype=dtype)
return operator, mat
@test_util.run_deprecated_v1
def test_real_hermitian_spectrum_gives_real_symmetric_operator(self):
with self.cached_session() as sess:
# This is a real and hermitian spectrum.
spectrum = [[1., 2., 2.], [3., 4., 4.], [3., 4., 4.]]
operator = linalg.LinearOperatorCirculant(spectrum)
matrix_tensor = operator.to_dense()
self.assertEqual(matrix_tensor.dtype,
linear_operator_circulant._DTYPE_COMPLEX)
matrix_t = array_ops.matrix_transpose(matrix_tensor)
imag_matrix = math_ops.imag(matrix_tensor)
matrix, matrix_transpose, imag_matrix = sess.run(
[matrix_tensor, matrix_t, imag_matrix])
np.testing.assert_allclose(0, imag_matrix, atol=1e-6)
self.assertAllClose(matrix, matrix_transpose, atol=0)
@test_util.run_v1_only("b/120545219")
def test_real_spectrum_gives_self_adjoint_operator(self):
with self.cached_session() as sess:
# This is a real and hermitian spectrum.
spectrum = linear_operator_test_util.random_normal(
shape=(3, 3), dtype=dtypes.float32)
operator = linalg.LinearOperatorCirculant2D(spectrum)
matrix_tensor = operator.to_dense()
self.assertEqual(matrix_tensor.dtype,
linear_operator_circulant._DTYPE_COMPLEX)
matrix_h = linalg.adjoint(matrix_tensor)
matrix, matrix_h = self.evaluate([matrix_tensor, matrix_h])
self.assertAllClose(matrix, matrix_h, atol=0)
@test_util.run_deprecated_v1
def test_assert_non_singular_fails_for_singular_operator(self):
spectrum = math_ops.cast([[0, 4], [2j + 2, 3.]], dtypes.complex64)
operator = linalg.LinearOperatorCirculant2D(spectrum)
with self.cached_session():
with self.assertRaisesOpError("Singular operator"):
operator.assert_non_singular().run()
@test_util.run_deprecated_v1
def test_assert_non_singular_does_not_fail_for_non_singular_operator(self):
spectrum = math_ops.cast([[-3j, 4], [2j + 2, 3.]], dtypes.complex64)
operator = linalg.LinearOperatorCirculant2D(spectrum)
with self.cached_session():
operator.assert_non_singular().run() # Should not fail
@test_util.run_deprecated_v1
def test_assert_positive_definite_fails_for_non_positive_definite(self):
spectrum = math_ops.cast([[6., 4], [2j, 3.]], dtypes.complex64)
operator = linalg.LinearOperatorCirculant2D(spectrum)
with self.cached_session():
with self.assertRaisesOpError("Not positive definite"):
operator.assert_positive_definite().run()
@test_util.run_deprecated_v1
def test_assert_positive_definite_does_not_fail_when_pos_def(self):
spectrum = math_ops.cast([[6., 4], [2j + 2, 3.]], dtypes.complex64)
operator = linalg.LinearOperatorCirculant2D(spectrum)
with self.cached_session():
operator.assert_positive_definite().run() # Should not fail
def test_real_spectrum_and_not_self_adjoint_hint_raises(self):
spectrum = [[1., 2.], [3., 4]]
with self.assertRaisesRegexp(ValueError, "real.*always.*self-adjoint"):
linalg.LinearOperatorCirculant2D(spectrum, is_self_adjoint=False)
def test_real_spectrum_auto_sets_is_self_adjoint_to_true(self):
spectrum = [[1., 2.], [3., 4]]
operator = linalg.LinearOperatorCirculant2D(spectrum)
self.assertTrue(operator.is_self_adjoint)
def test_invalid_dtype_raises(self):
spectrum = array_ops.constant(rng.rand(2, 2, 2))
with self.assertRaisesRegexp(TypeError, "must have dtype"):
linalg.LinearOperatorCirculant2D(spectrum)
def test_invalid_rank_raises(self):
spectrum = array_ops.constant(np.float32(rng.rand(2)))
with self.assertRaisesRegexp(ValueError, "must have at least 2 dimensions"):
linalg.LinearOperatorCirculant2D(spectrum)
class LinearOperatorCirculant3DTest(test.TestCase):
"""Simple test of the 3D case. See also the 1D and 2D tests."""
@contextlib.contextmanager
def _constrain_devices_and_set_default(self, sess, use_gpu, force_gpu):
"""We overwrite the FFT operation mapping for testing."""
with test.TestCase._constrain_devices_and_set_default(
self, sess, use_gpu, force_gpu) as sess:
with spectral_ops_test_util.fft_kernel_label_map():
yield sess
@test_util.run_deprecated_v1
def test_real_spectrum_gives_self_adjoint_operator(self):
with self.cached_session() as sess:
# This is a real and hermitian spectrum.
spectrum = linear_operator_test_util.random_normal(
shape=(2, 2, 3, 5), dtype=dtypes.float32)
operator = linalg.LinearOperatorCirculant3D(spectrum)
self.assertAllEqual((2, 2 * 3 * 5, 2 * 3 * 5), operator.shape)
matrix_tensor = operator.to_dense()
self.assertEqual(matrix_tensor.dtype,
linear_operator_circulant._DTYPE_COMPLEX)
matrix_h = linalg.adjoint(matrix_tensor)
matrix, matrix_h = self.evaluate([matrix_tensor, matrix_h])
self.assertAllEqual((2, 2 * 3 * 5, 2 * 3 * 5), matrix.shape)
self.assertAllClose(matrix, matrix_h)
@test_util.run_deprecated_v1
def test_defining_operator_using_real_convolution_kernel(self):
with self.cached_session():
convolution_kernel = linear_operator_test_util.random_normal(
shape=(2, 2, 3, 5), dtype=dtypes.float32)
# Convolution kernel is real ==> spectrum is Hermitian.
spectrum = fft_ops.fft3d(
math_ops.cast(convolution_kernel, dtypes.complex64))
# spectrum is Hermitian ==> operator is real.
operator = linalg.LinearOperatorCirculant3D(spectrum)
self.assertAllEqual((2, 2 * 3 * 5, 2 * 3 * 5), operator.shape)
# Allow for complex output so we can make sure it has zero imag part.
self.assertEqual(operator.dtype, dtypes.complex64)
matrix = operator.to_dense().eval()
self.assertAllEqual((2, 2 * 3 * 5, 2 * 3 * 5), matrix.shape)
np.testing.assert_allclose(0, np.imag(matrix), atol=1e-6)
@test_util.run_deprecated_v1
def test_defining_spd_operator_by_taking_real_part(self):
with self.cached_session() as sess:
# S is real and positive.
s = linear_operator_test_util.random_uniform(
shape=(10, 2, 3, 4), dtype=dtypes.float32, minval=1., maxval=2.)
# Let S = S1 + S2, the Hermitian and anti-hermitian parts.
# S1 = 0.5 * (S + S^H), S2 = 0.5 * (S - S^H),
# where ^H is the Hermitian transpose of the function:
# f(n0, n1, n2)^H := ComplexConjugate[f(N0-n0, N1-n1, N2-n2)].
# We want to isolate S1, since
# S1 is Hermitian by construction
# S1 is real since S is
# S1 is positive since it is the sum of two positive kernels
# IDFT[S] = IDFT[S1] + IDFT[S2]
# = H1 + H2
# where H1 is real since it is Hermitian,
# and H2 is imaginary since it is anti-Hermitian.
ifft_s = fft_ops.ifft3d(math_ops.cast(s, dtypes.complex64))
# Throw away H2, keep H1.
real_ifft_s = math_ops.real(ifft_s)
# This is the perfect spectrum!
# spectrum = DFT[H1]
# = S1,
fft_real_ifft_s = fft_ops.fft3d(
math_ops.cast(real_ifft_s, dtypes.complex64))
# S1 is Hermitian ==> operator is real.
# S1 is real ==> operator is self-adjoint.
# S1 is positive ==> operator is positive-definite.
operator = linalg.LinearOperatorCirculant3D(fft_real_ifft_s)
# Allow for complex output so we can check operator has zero imag part.
self.assertEqual(operator.dtype, dtypes.complex64)
matrix, matrix_t = sess.run([
operator.to_dense(),
array_ops.matrix_transpose(operator.to_dense())
])
operator.assert_positive_definite().run() # Should not fail.
np.testing.assert_allclose(0, np.imag(matrix), atol=1e-6)
self.assertAllClose(matrix, matrix_t)
# Just to test the theory, get S2 as well.
# This should create an imaginary operator.
# S2 is anti-Hermitian ==> operator is imaginary.
# S2 is real ==> operator is self-adjoint.
imag_ifft_s = math_ops.imag(ifft_s)
fft_imag_ifft_s = fft_ops.fft3d(
1j * math_ops.cast(imag_ifft_s, dtypes.complex64))
operator_imag = linalg.LinearOperatorCirculant3D(fft_imag_ifft_s)
matrix, matrix_h = sess.run([
operator_imag.to_dense(),
array_ops.matrix_transpose(math_ops.conj(operator_imag.to_dense()))
])
self.assertAllClose(matrix, matrix_h)
np.testing.assert_allclose(0, np.real(matrix), atol=1e-7)
if __name__ == "__main__":
test.main()
| |
import pbuffer
from struct import unpack
import socket
BNLSProtByte = {'STAR': 0x01,
'SEXP': 0x02,
'W2BN': 0x03,
'D2DV': 0x04,
'D2XP': 0x05,
'JSTR': 0x06,
'WAR3': 0x07,
'W3XP': 0x08}
class __init__ (pbuffer.conn):
def __init__(self, bot):
self.data = ''
self.connected = False
self.bot = bot
self.bot.BNLS = self
self.bot.events.add(self, 'BNLSRecv', 0, 0,
0x02, self.recv_0x02,
0x03, self.recv_0x03,
0x04, self.recv_0x04,
0x10, self.recv_0x10,
0x1A, self.recv_0x1A,
0x0C, self.recv_0x0C,
0x0B, self.recv_0x0B)
self.bot.events.add(self, 'BNCSRecv', 1000, 0,
0x0A, self.BNLSclose)
self.bot.events.add(self, 'hashing', 'get', 0, 0,
'pwhash', self.send_0x0B,
'new_pwhash', self.create_account,
'cdkey', self.send_0x0C,
'game', self.send_0x1A,
'nls_logon', self.send_nls_start,
'nls_logon_from_create', self.send_0x02,
'nls_logon_proof', self.send_0x03,
'nls_create', self.send_0x04)
self.bot.events.add(self, 'bot', 0, 0,
'connect', self.BNLSconnect,
'disc', self.close)
if int(self.bot.config['plugins']['gui_wx']) == 0: #GUI disabled
self.bot.connect()
def BNLSconnect(self, *rest):
self.bot.addchat('Connecting...')
try:
self.connect(self.bot.config['login']['bnlsserver'], 9367)
self.bot.add_socket(self.socket, self.BNLSrecv)
except socket.error, (errno, descr):
self.bot.addchat('error', 'Error #'+str(errno)+': '+descr)
except socket.herror, (errno, descr):
self.bot.addchat('error', 'Address-related error #'+str(errno)+': '+descr)
except socket.gaierror, (errno, descr):
self.bot.addchat('error', 'Failed to get address, got error #'+\
str(errno)+': '+descr)
else:
self.bot.addchat('success', 'Connected to BNLS')
self.send_0x10()
def send_nls_start(self, packet):
self.bot.BNLS.send_0x0D()
self.bot.BNLS.send_0x02()
def send_0x02(self, packet=None):
self.insert_string(self.bot.config['login']['username'])
try:
self.insert_string(self.bot.config['login']['password'].lower())
except KeyError, missing:
self.need_setting(missing, self.send_0x02)
return
self.BNLSsend(0x02)
def send_0x03(self, packet=None):
self.insert_raw(self.bot.status['salt'])
self.insert_raw(self.bot.status['ckB'])
self.BNLSsend(0x03)
def send_0x04(self, packet=None):
self.insert_string(self.bot.config['login']['username'])
self.insert_string(self.bot.config['login']['password'].lower())
self.BNLSsend(0x04)
def send_0x0B(self, packet=None, flags=0x06):
try:
self.insert_long(len(self.bot.config['login']['password']))
except KeyError, missing:
self.need_setting(missing, self.send_0x0B)
return
self.insert_long(flags) #0x02: double hash, 0x04: cookie hash
self.insert_raw(self.bot.config['login']['password'].lower())
if (flags & 0x02) == 0x02:
self.insert_long(self.bot.status['ctoken'])
self.insert_long(self.bot.status['stoken'])
self.insert_long(flags) #cookie
self.BNLSsend(0x0B)
def send_0x0C(self):
self.insert_long(0x00)
if self.bot.config['login']['product'] in ['D2XP', 'W3XP']:
self.insert_byte(0x02)
else:
self.insert_byte(0x01)
self.insert_long(0x01)
self.insert_long(self.bot.status['stoken'])
try:
self.insert_string(self.bot.config['login']['cdkey'])
except KeyError, missing:
self.need_setting(missing, self.send_0x0C)
return
if self.bot.config['login']['product'] in ['D2XP', 'W3XP']:
try:
self.insert_string(self.bot.config['login']['expcdkey'])
except KeyError, missing:
self.need_setting(missing, self.send_0x0C)
return
self.BNLSsend(0x0C)
def send_0x0D(self, packet=None):
self.insert_long(self.bot.status['logontype'])
self.BNLSsend(0x0D)
def send_0x10(self, packet=None):
try:
self.insert_long(BNLSProtByte[self.bot.config['login']['product']])
except KeyError, missing:
self.need_setting(missing, self.send_0x10)
return
self.BNLSsend(0x10)
def send_0x1A(self, packet=None):
self.insert_long(BNLSProtByte[self.bot.config['login']['product']])
self.insert_long(0x00000000)
self.insert_long(0x00000000)
self.insert_raw(self.bot.status['mpqtime'])
self.insert_string(self.bot.status['verfile'])
self.insert_string(self.bot.status['valstring'])
self.BNLSsend(0x1A)
def recv_0x02(self, packet):
data = unpack('<32s', packet['data'])
self.bot.status['ckA'] = data[0]
self.bot.events.call('hashing', 'recv', 'nls_logon')
def recv_0x03(self, packet):
data = unpack('<20s', packet['data'])
self.bot.status['M1'] = data[0]
self.bot.events.call('hashing', 'recv', 'nls_logon_proof')
def recv_0x04(self, packet):
data = unpack('<64s', packet['data'])
self.bot.status['new_wc3_account'] = data[0]
self.bot.events.call('hashing', 'recv', 'nls_create')
def recv_0x0B(self, packet):
results = unpack('<20sl', packet['data'])
if (results[1] & 0x02) == 0x02:
self.bot.status['pwhash'] = results[0]
self.bot.events.call('hashing', 'recv', 'pwhash')
else:
self.bot.status['new_pwhash'] = results[0]
self.bot.events.call('hashing', 'recv', 'new_pwhash')
return False
def recv_0x0C(self, packet):
fmt = '<L2B2L36s'
if packet['length'] < 50:
self.bot.addchat('CD-keys failed to hash.')
return False
if self.bot.config['login']['product'] in ['D2XP', 'W3XP']:
fmt = fmt+'L36s'
if packet['length'] < 90:
self.bot.addchat('CD-keys failed to hash.')
return False
results = unpack(fmt, packet['data'])
if results[1] != results[2]:
self.bot.addchat('CD-keys failed to hash.')
return False
self.bot.status['ctoken'] = results[4]
self.bot.status['keyhash'] = results[5]
if results[1] == 2:
self.bot.status['expkeyhash'] = results[7]
self.bot.events.call('hashing', 'recv', 'keyhash')
return False
def recv_0x10(self, packet):
data = unpack('<2L', packet['data'])
self.bot.status['verbyte'] = data[1]
self.bot.addchat('Verbyte received ('+hex(self.bot.status['verbyte'])+')')
def recv_0x1A(self, packet):
sl1 = packet['length'] - 21
if sl1 < 0:
self.bot.addchat('Version check failed.')
return 0
results = unpack('<3L'+str(sl1)+'sx2L', packet['data'])
self.bot.status['version'] = results[1]
self.bot.status['checksum'] = results[2]
self.bot.status['vcstatstring'] = results[3]
self.bot.events.call('hashing', 'recv', 'game')
return False
def create_account(self):
self.send_0x0B(flags=0x04)
def BNLSsend(self, packet):
self.data = pbuffer.make_word(len(self.data) + 3) + chr(packet) + self.data
self.send()
def BNLSrecv(self):
try:
header = unpack('<HB', self.recv(3))
except: #socket died
self.close()
return
data = self.recv(header[0] - 3)
self.bot.events.call('BNLSRecv', header[1],
[{'id': header[1],
'length': header[0] -3,
'data': data}])
def BNLSclose(self, *rest):
self.close()
| |
#!/usr/bin/env python3
import psycopg2
import sys
from PIL import Image
import os
import struct
import argparse
import math
def hex2rgb(hex):
# Remove the hash in the beginning
hex = hex[1:]
return struct.unpack('BBB', bytes.fromhex(hex))
def calculate_coverage_full_tiles(basemap_tiles_path, osm_tiles_path, zoom, schema, tile_size, tile_indices):
covered_basemap_pixels = 0
uncovered_basemap_pixels = 0
for index in tile_indices:
x = index[0]
y = index[1]
basemap_tile_path = basemap_tiles_path + schema % (zoom, x, y)
osm_tile_path = osm_tiles_path + schema % (zoom, x, y)
basemap_tile = Image.open(basemap_tile_path).load()
osm_tile = Image.open(osm_tile_path).convert('RGBA').load()
for pixel_y in range(tile_size):
for pixel_x in range(tile_size):
(cbr, cbg, cbb, cba) = basemap_tile[pixel_x, pixel_y]
(cor, cog, cob, coa) = osm_tile[pixel_x, pixel_y]
if cba != 0: # basemap pixel
if coa != 0: # Also OSM pixel
covered_basemap_pixels += 1
else: # Only basemap pixel
uncovered_basemap_pixels += 1
return covered_basemap_pixels, uncovered_basemap_pixels
def calculate_coverage_partial_tiles(municipality_tiles_path, basemap_tiles_path, osm_tiles_path, color, zoom, schema, tile_size, tile_indices):
covered_basemap_pixels = 0
uncovered_basemap_pixels = 0
(r, g, b) = hex2rgb(color)
for index in tile_indices:
x = index[0]
y = index[1]
municipality_tile_path = municipality_tiles_path + schema % (zoom, x, y)
basemap_tile_path = basemap_tiles_path + schema % (zoom, x, y)
osm_tile_path = osm_tiles_path + schema % (zoom, x, y)
municipality_tile = Image.open(municipality_tile_path).convert('RGBA').load()
basemap_tile = Image.open(basemap_tile_path).load()
osm_tile = Image.open(osm_tile_path).convert('RGBA').load()
for pixel_y in range(tile_size):
for pixel_x in range(tile_size):
(cmr, cmg, cmb, cma) = municipality_tile[pixel_x, pixel_y]
(cbr, cbg, cbb, cba) = basemap_tile[pixel_x, pixel_y]
(cor, cog, cob, coa) = osm_tile[pixel_x, pixel_y]
if cmr == r and cmg == g and cmb == b:
# We're on this municipality
if cba != 0: # basemap pixel
if coa != 0: # Also OSM pixel
covered_basemap_pixels += 1
else: # Only basemap pixel
uncovered_basemap_pixels += 1
return covered_basemap_pixels, uncovered_basemap_pixels
def get_latest_timestamp(tile_indices, full_schemata, zoom):
latest_timestamp = 0
for full_schema in full_schemata:
for index in tile_indices:
x = index[0]
y = index[1]
tile_path = full_schema % (zoom, x, y)
if os.path.exists(tile_path):
timestamp = os.path.getmtime(tile_path)
if timestamp > latest_timestamp:
latest_timestamp = timestamp
# We only need seconds precision
return math.floor(latest_timestamp)
def get_number_of_coverage_entries(cur, boundary_id):
cur.execute("select count(*) from austria_building_coverage where boundary_id = %s", (boundary_id,))
return cur.fetchone()[0]
def get_latest_coverage_entry(cur, boundary_id):
cur.execute("""
select c1.id as id, extract(epoch from c1.timestamp) as latest_timestamp,
c1.covered_basemap_pixels, c1.total_basemap_pixels, c1.coverage
from austria_building_coverage c1
where boundary_id = %s
and timestamp =
(select max(timestamp) from austria_building_coverage c2
where c2.boundary_id = c1.boundary_id)
""",
(boundary_id,))
return cur.fetchone()
def update_coverage_entry_timestamp(cur, conn, entry_id, timestamp):
cur.execute("update austria_building_coverage set timestamp = to_timestamp(%s) where id = %s",
("%.0f" % timestamp, entry_id,))
conn.commit()
def update_coverage_high_level(cur, conn, boundaries_updated):
# Record boundaries that have been updated
boundaries_coverage_updated = []
if len(boundaries_updated) > 0:
cur.execute("""
select b.id
from austria_admin_boundaries b
left join austria_admin_boundaries m on (m.parent = b.id)
where m.id = ANY(%s)
group by b.id
""",
(boundaries_updated,))
boundaries_to_update = cur.fetchall()
for boundary in boundaries_to_update:
boundary_id = boundary[0]
number_of_entries = get_number_of_coverage_entries(cur, boundary_id)
latest_entry = get_latest_coverage_entry(cur, boundary_id)
cur.execute("""select extract(epoch from max(c.timestamp)), sum(c.covered_basemap_pixels), sum(c.total_basemap_pixels)
from austria_admin_boundaries parent
left join austria_admin_boundaries child on (child.parent = parent.id)
left join austria_building_coverage c on (c.boundary_id = child.id)
where c.timestamp = (select max(timestamp) from austria_building_coverage c2 where c.boundary_id = c2.boundary_id)
and parent.id = %s
""",
(boundary_id,))
result = cur.fetchone()
if result is not None and result[0] is not None:
# Calculate district coverage and avoid division by zero
if result[1] > 0:
boundary_coverage = result[1] / result[2] * 100.0
else:
boundary_coverage = 0.0
if latest_entry is None or result[1] != latest_entry[2] or result[2] != latest_entry[3]:
boundaries_coverage_updated.append(boundary_id)
print("Calculated coverage of boundary #%s (coverage: %.2f percent)." % (boundary_id, boundary_coverage))
cur.execute("insert into austria_building_coverage "
"(boundary_id, timestamp, covered_basemap_pixels, total_basemap_pixels, coverage) "
"values ("
"%s, to_timestamp(%s), %s, %s, %s"
")",
(
boundary_id,
"%.0f" % result[0],
result[1],
result[2],
boundary_coverage,
)
)
conn.commit()
elif number_of_entries > 1:
boundaries_coverage_updated.append(boundary_id)
update_coverage_entry_timestamp(cur, conn, latest_entry[0], result[0])
else:
print("Boundary %d marked for update but not affected." % boundary_id)
else:
print("Error: No coverage results of boundary %d could be calculated." % boundary_id)
return boundaries_coverage_updated
def main():
parser = argparse.ArgumentParser(description="Update the coverage scores of each outdated municipality.")
parser.add_argument("-m", "--municipality-tiles-path", dest="municipality_tiles_path", required=True,
help="The path to the municipality tiles (with a trailing slash)")
parser.add_argument("-b", "--basemap-tiles-path", dest="basemap_tiles_path", required=True,
help="The path to the basemap tiles (with a trailing slash)")
parser.add_argument("-o", "--osm-tiles-path", dest="osm_tiles_path", required=True,
help="The path to the OSM tiles (with a trailing slash)")
parser.add_argument("-H", "--hostname", dest="hostname", required=False, help="The database hostname")
parser.add_argument("-d", "--database", dest="database", nargs='?', default="gis", help="The name of the database")
parser.add_argument("-u", "--user", dest="user", required=False, help="The database user")
parser.add_argument("-p", "--password", dest="password", required=False, help="The database password")
parser.add_argument("-O", "--onlyhighlevel", action="store_true",
help="Set this if you want to update only the high-level boundaries (districts, federal states,"
"the whole country) from the current municipality coverage scores.")
args = parser.parse_args()
municipality_tiles_path = os.path.expanduser(args.municipality_tiles_path)
basemap_tiles_path = os.path.expanduser(args.basemap_tiles_path)
osm_tiles_path = os.path.expanduser(args.osm_tiles_path)
tile_size = 256
zoom = 16
schema = "%d/%d/%d.png"
for path in [municipality_tiles_path, basemap_tiles_path, osm_tiles_path]:
if not os.path.isdir(path):
print("Path %s does not exist. Please specify a valid path." % (path))
sys.exit(1)
# Try to connect
try:
conn = psycopg2.connect(
host=args.hostname,
database=args.database,
user=args.user,
password=args.password
)
except Exception as e:
print("I am unable to connect to the database (%s)." % str(e))
sys.exit(1)
cur = conn.cursor()
try:
cur.execute("SELECT id, name, full_tiles, partial_tiles, color "
"from austria_admin_boundaries "
"where admin_level=3")
except Exception as e:
print("I can't SELECT! (%s)" % str(e))
sys.exit(1)
all_municipalities = cur.fetchall()
print("%d municipalities found." % len(all_municipalities))
if not args.onlyhighlevel:
municipalities_coverage_updated = []
for municipality in all_municipalities:
id = municipality[0]
name = municipality[1]
full_tiles = municipality[2]
partial_tiles = municipality[3]
color = municipality[4]
entry_count = get_number_of_coverage_entries(cur, id)
latest_coverage_row = get_latest_coverage_entry(cur, id)
latest_tile_timestamp = get_latest_timestamp(
full_tiles + partial_tiles,
[
basemap_tiles_path + schema,
osm_tiles_path + schema,
],
zoom)
if latest_coverage_row is None or latest_coverage_row[1] < latest_tile_timestamp:
print("Municipality %s (ID %d) is out of date. Updating..." % (name, id))
(covered_basemap_pixels_full, uncovered_basemap_pixels_full) = \
calculate_coverage_full_tiles(basemap_tiles_path, osm_tiles_path, zoom, schema, tile_size, full_tiles)
(covered_basemap_pixels_partial, uncovered_basemap_pixels_partial) =\
calculate_coverage_partial_tiles(municipality_tiles_path, basemap_tiles_path, osm_tiles_path, color, zoom, schema, tile_size, partial_tiles)
covered_basemap_pixels = covered_basemap_pixels_full + covered_basemap_pixels_partial
uncovered_basemap_pixels = uncovered_basemap_pixels_full + uncovered_basemap_pixels_partial
total_basemap_pixels = covered_basemap_pixels + uncovered_basemap_pixels
# Calculate coverage and avoid a division by zero.
if total_basemap_pixels > 0:
coverage = covered_basemap_pixels / total_basemap_pixels * 100.0
else:
coverage = 0.0
# Only insert the values if no entry exists yet or if the values have actually changed.
if latest_coverage_row is None or latest_coverage_row[2] != covered_basemap_pixels or \
latest_coverage_row[3] != total_basemap_pixels:
municipalities_coverage_updated.append(id)
cur.execute("insert into austria_building_coverage "
"(boundary_id, timestamp, covered_basemap_pixels, total_basemap_pixels, coverage) "
"values ("
"%s, to_timestamp(%s), %s, %s, %s"
")",
(
id,
"%.0f" % latest_tile_timestamp,
covered_basemap_pixels,
total_basemap_pixels,
coverage,
)
)
conn.commit()
# We update the timestamp only if the entry count is higher than one. The problem is that if a tile is
# updated that is part of the municipality's tile set but does not affect the municipality, the
# timestamp of the last coverage is simply updated. That may lead to the case where some municipalities
# do not have an austria_building_coverage entry on the first day.
elif entry_count > 1:
print("The latest timestamp of the tiles of municipality %s has changed but these changes did not "
"affect this municipality. Only updating the timestsamp of entry %d." % (name, latest_coverage_row[0]))
municipalities_coverage_updated.append(id)
update_coverage_entry_timestamp(cur, conn, latest_coverage_row[0], latest_tile_timestamp)
else:
print("The latest timestamp of the tiles of municipality %s has changed but these changes did not "
"affect this municipality. Not updating the timestamp anyway because the municipality has "
"only one coverage score entry. Updating the timestamp would cause the municipality not to "
"have a score entry on the first day." % name)
# Alright, all municipalities updated. Now let's update the total coverage scores of districts, federal states and
# the whole contry where necessary.
if args.onlyhighlevel:
municipalities_coverage_updated = []
for municipality in all_municipalities:
municipalities_coverage_updated.append(municipality[0])
# Update districts.
districts_updated = update_coverage_high_level(cur, conn, municipalities_coverage_updated)
# Update federal states.
states_updated = update_coverage_high_level(cur, conn, districts_updated)
# Update the whole country.
update_coverage_high_level(cur, conn, states_updated)
if __name__ == "__main__":
main()
| |
# Copyright 2016 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit tests for the nova-status CLI interfaces.
"""
import fixtures
import mock
from six.moves import StringIO
from keystoneauth1 import exceptions as ks_exc
from keystoneauth1 import loading as keystone
from keystoneauth1 import session
from oslo_utils import uuidutils
from nova.cmd import status
import nova.conf
from nova import context
# NOTE(mriedem): We only use objects as a convenience to populate the database
# in the tests, we don't use them in the actual CLI.
from nova import objects
from nova.objects import fields
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests import uuidsentinel as uuids
CONF = nova.conf.CONF
class TestNovaStatusMain(test.NoDBTestCase):
"""Tests for the basic nova-status command infrastructure."""
def setUp(self):
super(TestNovaStatusMain, self).setUp()
self.output = StringIO()
self.useFixture(fixtures.MonkeyPatch('sys.stdout', self.output))
@mock.patch.object(status.config, 'parse_args')
@mock.patch.object(status, 'CONF')
def _check_main(self, mock_CONF, mock_parse_args,
category_name='check', expected_return_value=0):
mock_CONF.category.name = category_name
return_value = status.main()
self.assertEqual(expected_return_value, return_value)
mock_CONF.register_cli_opt.assert_called_once_with(
status.category_opt)
@mock.patch.object(status.version, 'version_string_with_package',
return_value="x.x.x")
def test_main_version(self, mock_version_string):
self._check_main(category_name='version')
self.assertEqual("x.x.x\n", self.output.getvalue())
@mock.patch.object(status.cmd_common, 'print_bash_completion')
def test_main_bash_completion(self, mock_print_bash):
self._check_main(category_name='bash-completion')
mock_print_bash.assert_called_once_with(status.CATEGORIES)
@mock.patch.object(status.cmd_common, 'get_action_fn')
def test_main(self, mock_get_action_fn):
mock_fn = mock.Mock()
mock_fn_args = [mock.sentinel.arg]
mock_fn_kwargs = {'key': mock.sentinel.value}
mock_get_action_fn.return_value = (mock_fn, mock_fn_args,
mock_fn_kwargs)
self._check_main(expected_return_value=mock_fn.return_value)
mock_fn.assert_called_once_with(mock.sentinel.arg,
key=mock.sentinel.value)
@mock.patch.object(status.cmd_common, 'get_action_fn')
def test_main_error(self, mock_get_action_fn):
mock_fn = mock.Mock(side_effect=Exception('wut'))
mock_get_action_fn.return_value = (mock_fn, [], {})
self._check_main(expected_return_value=255)
output = self.output.getvalue()
self.assertIn('Error:', output)
# assert the traceback is in the output
self.assertIn('wut', output)
class TestPlacementCheck(test.NoDBTestCase):
"""Tests the nova-status placement checks.
These are done with mock as the ability to replicate all failure
domains otherwise is quite complicated. Using a devstack
environment you can validate each of these tests are matching
reality.
"""
def setUp(self):
super(TestPlacementCheck, self).setUp()
self.output = StringIO()
self.useFixture(fixtures.MonkeyPatch('sys.stdout', self.output))
self.cmd = status.UpgradeCommands()
@mock.patch.object(keystone, "load_auth_from_conf_options")
def test_no_auth(self, auth):
"""Test failure when no credentials are specified.
Replicate in devstack: start devstack with or without
placement engine, remove the auth section from the [placement]
block in nova.conf.
"""
auth.side_effect = ks_exc.MissingAuthPlugin()
res = self.cmd._check_placement()
self.assertEqual(status.UpgradeCheckCode.FAILURE, res.code)
self.assertIn('No credentials specified', res.details)
@mock.patch.object(keystone, "load_auth_from_conf_options")
@mock.patch.object(session.Session, 'get')
def _test_placement_get_interface(
self, expected_interface, mock_get, mock_auth):
def fake_get(path, *a, **kw):
self.assertEqual(mock.sentinel.path, path)
self.assertIn('endpoint_filter', kw)
self.assertEqual(expected_interface,
kw['endpoint_filter']['interface'])
return mock.Mock(autospec='requests.models.Response')
mock_get.side_effect = fake_get
self.cmd._placement_get(mock.sentinel.path)
mock_auth.assert_called_once_with(status.CONF, 'placement')
self.assertTrue(mock_get.called)
@mock.patch.object(keystone, "load_auth_from_conf_options")
@mock.patch.object(session.Session, 'get')
def test_placement_get_interface_default(self, mock_get, mock_auth):
"""Tests that None is specified for interface by default."""
self._test_placement_get_interface(None)
@mock.patch.object(keystone, "load_auth_from_conf_options")
@mock.patch.object(session.Session, 'get')
def test_placement_get_interface_internal(self, mock_get, mock_auth):
"""Tests that "internal" is specified for interface when configured."""
self.flags(os_interface='internal', group='placement')
self._test_placement_get_interface('internal')
@mock.patch.object(status.UpgradeCommands, "_placement_get")
def test_invalid_auth(self, get):
"""Test failure when wrong credentials are specified or service user
doesn't exist.
Replicate in devstack: start devstack with or without
placement engine, specify random credentials in auth section
from the [placement] block in nova.conf.
"""
get.side_effect = ks_exc.Unauthorized()
res = self.cmd._check_placement()
self.assertEqual(status.UpgradeCheckCode.FAILURE, res.code)
self.assertIn('Placement service credentials do not work', res.details)
@mock.patch.object(status.UpgradeCommands, "_placement_get")
def test_invalid_endpoint(self, get):
"""Test failure when no endpoint exists.
Replicate in devstack: start devstack without placement
engine, but create valid placement service user and specify it
in auth section of [placement] in nova.conf.
"""
get.side_effect = ks_exc.EndpointNotFound()
res = self.cmd._check_placement()
self.assertEqual(status.UpgradeCheckCode.FAILURE, res.code)
self.assertIn('Placement API endpoint not found', res.details)
@mock.patch.object(status.UpgradeCommands, "_placement_get")
def test_discovery_failure(self, get):
"""Test failure when discovery for placement URL failed.
Replicate in devstack: start devstack with placement
engine, create valid placement service user and specify it
in auth section of [placement] in nova.conf. Stop keystone service.
"""
get.side_effect = ks_exc.DiscoveryFailure()
res = self.cmd._check_placement()
self.assertEqual(status.UpgradeCheckCode.FAILURE, res.code)
self.assertIn('Discovery for placement API URI failed.', res.details)
@mock.patch.object(status.UpgradeCommands, "_placement_get")
def test_down_endpoint(self, get):
"""Test failure when endpoint is down.
Replicate in devstack: start devstack with placement
engine, disable placement engine apache config.
"""
get.side_effect = ks_exc.NotFound()
res = self.cmd._check_placement()
self.assertEqual(status.UpgradeCheckCode.FAILURE, res.code)
self.assertIn('Placement API does not seem to be running', res.details)
@mock.patch.object(status.UpgradeCommands, "_placement_get")
def test_valid_version(self, get):
get.return_value = {
"versions": [
{
"min_version": "1.0",
"max_version": "1.10",
"id": "v1.0"
}
]
}
res = self.cmd._check_placement()
self.assertEqual(status.UpgradeCheckCode.SUCCESS, res.code)
@mock.patch.object(status.UpgradeCommands, "_placement_get")
def test_version_comparison_does_not_use_floats(self, get):
# NOTE(rpodolyaka): previously _check_placement() coerced the version
# numbers to floats prior to comparison, that would lead to failures
# in cases like float('1.10') < float('1.4'). As we require 1.4+ now,
# the _check_placement() call below will assert that version comparison
# continues to work correctly when Placement API versions 1.10
# (or newer) is released
get.return_value = {
"versions": [
{
"min_version": "1.0",
"max_version": "1.10",
"id": "v1.0"
}
]
}
res = self.cmd._check_placement()
self.assertEqual(status.UpgradeCheckCode.SUCCESS, res.code)
@mock.patch.object(status.UpgradeCommands, "_placement_get")
def test_invalid_version(self, get):
get.return_value = {
"versions": [
{
"min_version": "0.9",
"max_version": "0.9",
"id": "v1.0"
}
]
}
res = self.cmd._check_placement()
self.assertEqual(status.UpgradeCheckCode.FAILURE, res.code)
self.assertIn('Placement API version 1.10 needed, you have 0.9',
res.details)
class TestUpgradeCheckBasic(test.NoDBTestCase):
"""Tests for the nova-status upgrade check command.
The tests in this class should just test basic logic and use mock. Real
checks which require more elaborate fixtures or the database should be done
in separate test classes as they are more or less specific to a particular
release and may be removed in a later release after they are no longer
needed.
"""
def setUp(self):
super(TestUpgradeCheckBasic, self).setUp()
self.output = StringIO()
self.useFixture(fixtures.MonkeyPatch('sys.stdout', self.output))
self.cmd = status.UpgradeCommands()
def test_check_success(self):
fake_checks = (
('good', mock.Mock(return_value=status.UpgradeCheckResult(
status.UpgradeCheckCode.SUCCESS
))),
)
with mock.patch.object(self.cmd, '_upgrade_checks', fake_checks):
self.assertEqual(status.UpgradeCheckCode.SUCCESS, self.cmd.check())
expected = """\
+-----------------------+
| Upgrade Check Results |
+-----------------------+
| Check: good |
| Result: Success |
| Details: None |
+-----------------------+
"""
self.assertEqual(expected, self.output.getvalue())
def test_check_warning(self):
fake_checks = (
('good', mock.Mock(return_value=status.UpgradeCheckResult(
status.UpgradeCheckCode.SUCCESS
))),
('warn', mock.Mock(return_value=status.UpgradeCheckResult(
status.UpgradeCheckCode.WARNING, 'there might be a problem'
))),
)
with mock.patch.object(self.cmd, '_upgrade_checks', fake_checks):
self.assertEqual(status.UpgradeCheckCode.WARNING, self.cmd.check())
expected = """\
+-----------------------------------+
| Upgrade Check Results |
+-----------------------------------+
| Check: good |
| Result: Success |
| Details: None |
+-----------------------------------+
| Check: warn |
| Result: Warning |
| Details: there might be a problem |
+-----------------------------------+
"""
self.assertEqual(expected, self.output.getvalue())
def test_check_failure(self):
# make the error details over 60 characters so we test the wrapping
error_details = 'go back to bed' + '!' * 60
fake_checks = (
('good', mock.Mock(return_value=status.UpgradeCheckResult(
status.UpgradeCheckCode.SUCCESS
))),
('warn', mock.Mock(return_value=status.UpgradeCheckResult(
status.UpgradeCheckCode.WARNING, 'there might be a problem'
))),
('fail', mock.Mock(return_value=status.UpgradeCheckResult(
status.UpgradeCheckCode.FAILURE, error_details
))),
)
with mock.patch.object(self.cmd, '_upgrade_checks', fake_checks):
self.assertEqual(status.UpgradeCheckCode.FAILURE, self.cmd.check())
expected = """\
+-----------------------------------------------------------------------+
| Upgrade Check Results |
+-----------------------------------------------------------------------+
| Check: good |
| Result: Success |
| Details: None |
+-----------------------------------------------------------------------+
| Check: warn |
| Result: Warning |
| Details: there might be a problem |
+-----------------------------------------------------------------------+
| Check: fail |
| Result: Failure |
| Details: go back to bed!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! |
| !!!!!!!!!!!!!! |
+-----------------------------------------------------------------------+
"""
self.assertEqual(expected, self.output.getvalue())
class TestUpgradeCheckCellsV2(test.NoDBTestCase):
"""Tests for the nova-status upgrade cells v2 specific check."""
# We'll setup the API DB fixture ourselves and slowly build up the
# contents until the check passes.
USES_DB_SELF = True
def setUp(self):
super(TestUpgradeCheckCellsV2, self).setUp()
self.output = StringIO()
self.useFixture(fixtures.MonkeyPatch('sys.stdout', self.output))
self.useFixture(nova_fixtures.Database(database='api'))
self.cmd = status.UpgradeCommands()
def test_check_no_cell_mappings(self):
"""The cells v2 check should fail because there are no cell mappings.
"""
result = self.cmd._check_cellsv2()
self.assertEqual(status.UpgradeCheckCode.FAILURE, result.code)
self.assertIn('There needs to be at least two cell mappings',
result.details)
def _create_cell_mapping(self, uuid):
cm = objects.CellMapping(
context=context.get_admin_context(),
uuid=uuid,
name=uuid,
transport_url='fake://%s/' % uuid,
database_connection=uuid)
cm.create()
return cm
def test_check_no_cell0_mapping(self):
"""We'll create two cell mappings but not have cell0 mapped yet."""
for i in range(2):
uuid = getattr(uuids, str(i))
self._create_cell_mapping(uuid)
result = self.cmd._check_cellsv2()
self.assertEqual(status.UpgradeCheckCode.FAILURE, result.code)
self.assertIn('No cell0 mapping found', result.details)
def test_check_no_host_mappings_with_computes(self):
"""Creates a cell0 and cell1 mapping but no host mappings and there are
compute nodes in the cell database.
"""
self._setup_cells()
cn = objects.ComputeNode(
context=context.get_admin_context(),
host='fake-host',
vcpus=4,
memory_mb=8 * 1024,
local_gb=40,
vcpus_used=2,
memory_mb_used=2 * 1024,
local_gb_used=10,
hypervisor_type='fake',
hypervisor_version=1,
cpu_info='{"arch": "x86_64"}')
cn.create()
result = self.cmd._check_cellsv2()
self.assertEqual(status.UpgradeCheckCode.FAILURE, result.code)
self.assertIn('No host mappings found but there are compute nodes',
result.details)
def test_check_no_host_mappings_no_computes(self):
"""Creates the cell0 and cell1 mappings but no host mappings and no
compute nodes so it's assumed to be an initial install.
"""
self._setup_cells()
result = self.cmd._check_cellsv2()
self.assertEqual(status.UpgradeCheckCode.SUCCESS, result.code)
self.assertIn('No host mappings or compute nodes were found',
result.details)
def test_check_success(self):
"""Tests a successful cells v2 upgrade check."""
# create the cell0 and first cell mappings
self._setup_cells()
# Start a compute service and create a hostmapping for it
svc = self.start_service('compute')
cell = self.cell_mappings[test.CELL1_NAME]
hm = objects.HostMapping(context=context.get_admin_context(),
host=svc.host,
cell_mapping=cell)
hm.create()
result = self.cmd._check_cellsv2()
self.assertEqual(status.UpgradeCheckCode.SUCCESS, result.code)
self.assertIsNone(result.details)
# This is what the ResourceTracker sets up in the nova-compute service.
FAKE_VCPU_INVENTORY = {
'resource_class': fields.ResourceClass.VCPU,
'total': 32,
'reserved': 4,
'min_unit': 1,
'max_unit': 1,
'step_size': 1,
'allocation_ratio': 1.0,
}
# This is the kind of thing that Neutron will setup externally for routed
# networks.
FAKE_IP_POOL_INVENTORY = {
'resource_class': fields.ResourceClass.IPV4_ADDRESS,
'total': 256,
'reserved': 10,
'min_unit': 1,
'max_unit': 1,
'step_size': 1,
'allocation_ratio': 1.0,
}
class TestUpgradeCheckResourceProviders(test.NoDBTestCase):
"""Tests for the nova-status upgrade check on resource providers."""
# We'll setup the database ourselves because we need to use cells fixtures
# for multiple cell mappings.
USES_DB_SELF = True
def setUp(self):
super(TestUpgradeCheckResourceProviders, self).setUp()
self.output = StringIO()
self.useFixture(fixtures.MonkeyPatch('sys.stdout', self.output))
# We always need the API DB to be setup.
self.useFixture(nova_fixtures.Database(database='api'))
self.cmd = status.UpgradeCommands()
def test_check_resource_providers_fresh_install_no_mappings(self):
"""Tests the scenario where we don't have any cell mappings (no cells
v2 setup yet) and no compute nodes in the single main database.
"""
# We don't have a cell mapping, just the regular old main database
# because let's assume they haven't run simple_cell_setup yet.
self.useFixture(nova_fixtures.Database())
result = self.cmd._check_resource_providers()
# this is assumed to be base install so it's OK but with details
self.assertEqual(status.UpgradeCheckCode.SUCCESS, result.code)
self.assertIn('There are no compute resource providers in the '
'Placement service nor are there compute nodes in the '
'database',
result.details)
def test_check_resource_providers_no_rps_no_computes_in_cell1(self):
"""Tests the scenario where we have a cell mapping with no computes in
it and no resource providers (because of no computes).
"""
# this will setup two cell mappings, one for cell0 and a single cell1
self._setup_cells()
# there are no compute nodes in the cell1 database so we have 0
# resource providers and 0 compute nodes, so it's assumed to be a fresh
# install and not a failure.
result = self.cmd._check_resource_providers()
# this is assumed to be base install so it's OK but with details
self.assertEqual(status.UpgradeCheckCode.SUCCESS, result.code)
self.assertIn('There are no compute resource providers in the '
'Placement service nor are there compute nodes in the '
'database',
result.details)
def test_check_resource_providers_no_rps_one_compute(self):
"""Tests the scenario where we have compute nodes in the cell but no
resource providers yet - VCPU or otherwise. This is a warning because
the compute isn't reporting into placement.
"""
self._setup_cells()
# create a compute node which will be in cell1 by default
cn = objects.ComputeNode(
context=context.get_admin_context(),
host='fake-host',
vcpus=4,
memory_mb=8 * 1024,
local_gb=40,
vcpus_used=2,
memory_mb_used=2 * 1024,
local_gb_used=10,
hypervisor_type='fake',
hypervisor_version=1,
cpu_info='{"arch": "x86_64"}')
cn.create()
result = self.cmd._check_resource_providers()
self.assertEqual(status.UpgradeCheckCode.WARNING, result.code)
self.assertIn('There are no compute resource providers in the '
'Placement service but there are 1 compute nodes in the '
'deployment.', result.details)
def _create_resource_provider(self, inventory):
"""Helper method to create a resource provider with inventory"""
ctxt = context.get_admin_context()
rp_uuid = uuidutils.generate_uuid()
rp = objects.ResourceProvider(
context=ctxt,
name=rp_uuid,
uuid=rp_uuid)
rp.create()
inventory = objects.Inventory(
context=ctxt,
resource_provider=rp,
**inventory)
inventory.create()
return rp
def test_check_resource_providers_no_compute_rps_one_compute(self):
"""Tests the scenario where we have compute nodes in the cell but no
compute (VCPU) resource providers yet. This is a failure warning the
compute isn't reporting into placement.
"""
self._setup_cells()
# create a compute node which will be in cell1 by default
cn = objects.ComputeNode(
context=context.get_admin_context(),
host='fake-host',
vcpus=4,
memory_mb=8 * 1024,
local_gb=40,
vcpus_used=2,
memory_mb_used=2 * 1024,
local_gb_used=10,
hypervisor_type='fake',
hypervisor_version=1,
cpu_info='{"arch": "x86_64"}')
cn.create()
# create a single resource provider that represents an external shared
# IP allocation pool - this tests our filtering when counting resource
# providers
self._create_resource_provider(FAKE_IP_POOL_INVENTORY)
result = self.cmd._check_resource_providers()
self.assertEqual(status.UpgradeCheckCode.WARNING, result.code)
self.assertIn('There are no compute resource providers in the '
'Placement service but there are 1 compute nodes in the '
'deployment.', result.details)
def test_check_resource_providers_fewer_rps_than_computes(self):
"""Tests the scenario that we have fewer resource providers than
compute nodes which is a warning because we're underutilized.
"""
# setup the cell0 and cell1 mappings
self._setup_cells()
# create two compute nodes (by default in cell1)
ctxt = context.get_admin_context()
for x in range(2):
cn = objects.ComputeNode(
context=ctxt,
host=getattr(uuids, str(x)),
vcpus=4,
memory_mb=8 * 1024,
local_gb=40,
vcpus_used=2,
memory_mb_used=2 * 1024,
local_gb_used=10,
hypervisor_type='fake',
hypervisor_version=1,
cpu_info='{"arch": "x86_64"}')
cn.create()
# create a single resource provider with some VCPU inventory
self._create_resource_provider(FAKE_VCPU_INVENTORY)
result = self.cmd._check_resource_providers()
self.assertEqual(status.UpgradeCheckCode.WARNING, result.code)
self.assertIn('There are 1 compute resource providers and 2 compute '
'nodes in the deployment.', result.details)
def test_check_resource_providers_equal_rps_to_computes(self):
"""This tests the happy path scenario where we have an equal number
of compute resource providers to compute nodes.
"""
# setup the cell0 and cell1 mappings
self._setup_cells()
# create a single compute node
ctxt = context.get_admin_context()
cn = objects.ComputeNode(
context=ctxt,
host=uuids.host,
vcpus=4,
memory_mb=8 * 1024,
local_gb=40,
vcpus_used=2,
memory_mb_used=2 * 1024,
local_gb_used=10,
hypervisor_type='fake',
hypervisor_version=1,
cpu_info='{"arch": "x86_64"}')
cn.create()
# create a single resource provider with some VCPU inventory
self._create_resource_provider(FAKE_VCPU_INVENTORY)
# create an externally shared IP allocation pool resource provider
self._create_resource_provider(FAKE_IP_POOL_INVENTORY)
result = self.cmd._check_resource_providers()
self.assertEqual(status.UpgradeCheckCode.SUCCESS, result.code)
self.assertIsNone(result.details)
| |
import numpy as np
import tensorflow as tf
import sys
import time
from sklearn.metrics import f1_score
import random
tf.compat.v1.disable_eager_execution()
class History(object):
def __init__(self):
self.history = {}
class hcan(object):
def __init__(self, embedding_matrix, num_classes, max_sents, max_words,
attention_size=512, dropout_rate=0.9, activation=tf.nn.elu, lr=0.0001,
optimizer='adam', embed_train=True):
tf.compat.v1.reset_default_graph()
dropout_keep = dropout_rate
self.dropout_keep = dropout_keep
self.dropout = tf.compat.v1.placeholder(tf.float32)
self.ms = max_sents
self.mw = max_words
self.embedding_matrix = embedding_matrix.astype(np.float32)
self.attention_size = attention_size
self.activation = activation
self.num_tasks = len(num_classes)
self.embed_train = embed_train
# doc input
self.doc_input = tf.compat.v1.placeholder(tf.int32, shape=[None, max_sents, max_words]) # batch x sents x words
batch_size = tf.shape(self.doc_input)[0]
words_per_sent = tf.reduce_sum(tf.sign(self.doc_input), 2) # batch X sents
max_words_ = tf.reduce_max(words_per_sent)
sents_per_doc = tf.reduce_sum(tf.sign(words_per_sent), 1) # batch
max_sents_ = tf.reduce_max(sents_per_doc)
doc_input_reduced = self.doc_input[:, : max_sents_, : max_words_] # clip
doc_input_reshape = tf.reshape(doc_input_reduced, (-1, max_words_)) # batch*sents x words
# word embeddings
word_embeds = tf.gather(tf.compat.v1.get_variable('embeddings', initializer=self.embedding_matrix,
dtype=tf.float32, trainable=self.embed_train), doc_input_reshape)
word_embeds = tf.nn.dropout(word_embeds, self.dropout) # batch*sents x words x attention_size
# word self attention
Q = tf.compat.v1.layers.conv1d(word_embeds, self.attention_size, 1, padding='same',
activation=self.activation,
kernel_initializer=tf.initializers.glorot_uniform())
K = tf.compat.v1.layers.conv1d(word_embeds, self.attention_size, 1, padding='same',
activation=self.activation,
kernel_initializer=tf.initializers.glorot_uniform())
V = tf.compat.v1.layers.conv1d(word_embeds, self.attention_size, 1, padding='same',
activation=self.activation,
kernel_initializer=tf.initializers.glorot_uniform())
outputs = tf.matmul(Q, tf.transpose(K, [0, 2, 1]))
outputs = outputs / (K.get_shape().as_list()[-1]**0.5)
outputs = tf.where(tf.equal(outputs, 0), tf.ones_like(outputs) * -1000, outputs)
outputs = tf.nn.dropout(tf.nn.softmax(outputs), self.dropout)
outputs = tf.matmul(outputs, V) # batch*sents x words x attention_size
# word target attention
Q = tf.compat.v1.get_variable('word_Q', (1, 1, self.attention_size),
tf.float32, tf.initializers.orthogonal())
Q = tf.tile(Q, [batch_size * max_sents_, 1, 1])
V = outputs
outputs = tf.matmul(Q, tf.transpose(outputs, [0, 2, 1]))
outputs = outputs / (K.get_shape().as_list()[-1]**0.5)
outputs = tf.where(tf.equal(outputs, 0), tf.ones_like(outputs) * -1000, outputs)
outputs = tf.nn.dropout(tf.nn.softmax(outputs), self.dropout)
outputs = tf.matmul(outputs, V) # batch*sents x 1 x attention_size
sent_embeds = tf.reshape(outputs, (-1, max_sents_, self.attention_size))
sent_embeds = tf.nn.dropout(sent_embeds, self.dropout) # batch x sents x attention_size
# sent self attention
Q = tf.compat.v1.layers.conv1d(sent_embeds, self.attention_size, 1, padding='same',
activation=self.activation,
kernel_initializer=tf.initializers.glorot_uniform())
K = tf.compat.v1.layers.conv1d(sent_embeds, self.attention_size, 1, padding='same',
activation=self.activation,
kernel_initializer=tf.initializers.glorot_uniform())
V = tf.compat.v1.layers.conv1d(sent_embeds, self.attention_size, 1, padding='same',
activation=self.activation,
kernel_initializer=tf.initializers.glorot_uniform())
outputs = tf.matmul(Q, tf.transpose(K, [0, 2, 1]))
outputs = outputs / (K.get_shape().as_list()[-1]**0.5)
outputs = tf.where(tf.equal(outputs, 0), tf.ones_like(outputs) * -1000, outputs)
outputs = tf.nn.dropout(tf.nn.softmax(outputs), self.dropout)
outputs = tf.matmul(outputs, V) # batch x sents x attention_size
# sent target attention
Q = tf.compat.v1.get_variable('sent_Q', (1, 1, self.attention_size),
tf.float32, tf.initializers.orthogonal())
Q = tf.tile(Q, [batch_size, 1, 1])
V = outputs
outputs = tf.matmul(Q, tf.transpose(outputs, [0, 2, 1]))
outputs = outputs / (K.get_shape().as_list()[-1]**0.5)
outputs = tf.where(tf.equal(outputs, 0), tf.ones_like(outputs) * -1000, outputs)
outputs = tf.nn.dropout(tf.nn.softmax(outputs), self.dropout)
outputs = tf.matmul(outputs, V) # batch x 1 x attention_size
doc_embeds = tf.nn.dropout(tf.squeeze(outputs, [1]), self.dropout) # batch x attention_size
# classification functions
logits = []
self.predictions = []
for i in range(self.num_tasks):
logit = tf.compat.v1.layers.dense(doc_embeds, num_classes[i],
kernel_initializer=tf.initializers.glorot_uniform())
logits.append(logit)
self.predictions.append(tf.nn.softmax(logit))
# loss, accuracy, and training functions
self.labels = []
self.loss = 0
for i in range(self.num_tasks):
label = tf.compat.v1.placeholder(tf.int32, shape=[None])
self.labels.append(label)
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits[i], labels=label))
self.loss += loss / self.num_tasks
if optimizer == 'adam':
self.optimizer = tf.compat.v1.train.AdamOptimizer(lr, 0.9, 0.99)
elif optimizer == 'sgd':
self.optimizer = tf.compat.v1.train.GradientDescentOptimizer(lr)
elif optimizer == 'adadelta':
self.optimizer = tf.compat.v1.train.AdadeltaOptimizer(learning_rate=lr)
else:
self.optimizer = tf.compat.v1.train.RMSPropOptimizer(lr)
tf_version = tf.__version__
tf_version_split = tf_version.split('.')
if(int(tf_version_split[0]) == 1 and int(tf_version_split[1]) > 13):
self.optimizer = tf.train.experimental.enable_mixed_precision_graph_rewrite(self.optimizer, loss_scale='dynamic')
self.optimizer = self.optimizer.minimize(self.loss)
# init op
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
self.saver = tf.compat.v1.train.Saver()
self.sess = tf.compat.v1.Session(config=config)
self.sess.run(tf.compat.v1.global_variables_initializer())
def train(self, data, labels, batch_size=100, epochs=50, validation_data=None):
if validation_data:
validation_size = len(validation_data[0])
else:
validation_size = len(data)
print('training network on %i documents, validation on %i documents'
% (len(data), validation_size))
history = History()
for ep in range(epochs):
# shuffle data
labels.append(data)
xy = list(zip(*labels))
random.shuffle(xy)
shuffled = list(zip(*xy))
data = list(shuffled[-1])
labels = list(shuffled[:self.num_tasks])
y_preds = [[] for i in range(self.num_tasks)]
y_trues = [[] for i in range(self.num_tasks)]
start_time = time.time()
# train
for start in range(0, len(data), batch_size):
# get batch index
if start + batch_size < len(data):
stop = start + batch_size
else:
stop = len(data)
feed_dict = {self.doc_input: data[start: stop], self.dropout: self.dropout_keep}
for i in range(self.num_tasks):
feed_dict[self.labels[i]] = labels[i][start: stop]
retvals = self.sess.run(self.predictions + [self.optimizer, self.loss], feed_dict=feed_dict)
loss = retvals[-1]
# track correct predictions
for i in range(self.num_tasks):
y_preds[i].extend(np.argmax(retvals[i], 1))
y_trues[i].extend(labels[i][start:stop])
sys.stdout.write("epoch %i, sample %i of %i, loss: %f \r"
% (ep + 1, stop, len(data), loss))
sys.stdout.flush()
# checkpoint after every epoch
print("\ntraining time: %.2f" % (time.time() - start_time))
for i in range(self.num_tasks):
micro = f1_score(y_trues[i], y_preds[i], average='micro')
macro = f1_score(y_trues[i], y_preds[i], average='macro')
print("epoch %i task %i training micro/macro: %.4f, %.4f" % (ep + 1, i + 1, micro, macro))
scores, val_loss = self.score(validation_data[0], validation_data[1], batch_size=batch_size)
for i in range(self.num_tasks):
print("epoch %i task %i validation micro/macro: %.4f, %.4f" % (ep + 1, i + 1, scores[i][0], scores[i][1]))
history.history.setdefault('val_loss', []).append(val_loss)
# reset timer
start_time = time.time()
return history
def predict(self, data, batch_size=100):
y_preds = [[] for i in range(self.num_tasks)]
for start in range(0, len(data), batch_size):
# get batch index
if start + batch_size < len(data):
stop = start + batch_size
else:
stop = len(data)
feed_dict = {self.doc_input: data[start: stop], self.dropout: 1.0}
preds = self.sess.run(self.predictions, feed_dict=feed_dict)
for i in range(self.num_tasks):
y_preds[i].append(np.argmax(preds[i], 1))
sys.stdout.write("processed %i of %i records \r" % (stop, len(data)))
sys.stdout.flush()
print()
for i in range(self.num_tasks):
y_preds[i] = np.concatenate(y_preds[i], 0)
return y_preds
def score(self, data, labels, batch_size=16):
loss = []
y_preds = [[] for i in range(self.num_tasks)]
for start in range(0, len(data), batch_size):
# get batch index
if start + batch_size < len(data):
stop = start + batch_size
else:
stop = len(data)
feed_dict = {self.doc_input: data[start: stop], self.dropout: 1.0}
for i in range(self.num_tasks):
feed_dict[self.labels[i]] = labels[i][start: stop]
retvals = self.sess.run(self.predictions + [self.loss], feed_dict=feed_dict)
loss.append(retvals[-1])
for i in range(self.num_tasks):
y_preds[i].append(np.argmax(retvals[i], 1))
sys.stdout.write("processed %i of %i records \r" % (stop, len(data)))
sys.stdout.flush()
loss = np.mean(loss)
print()
for i in range(self.num_tasks):
y_preds[i] = np.concatenate(y_preds[i], 0)
scores = []
for i in range(self.num_tasks):
micro = f1_score(labels[i], y_preds[i], average='micro')
macro = f1_score(labels[i], y_preds[i], average='macro')
scores.append((micro, macro))
return scores, loss
def save(self, filename):
self.saver.save(self.sess, filename)
def load(self, filename):
self.saver.restore(self.sess, filename)
| |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import six
import sys
from collections import defaultdict, MutableSet
from .. import core
from ... import compat as cpt
from ..framework import Program, default_main_program, Parameter, Variable, core
from ..backward import _rename_arg_
from functools import reduce
from six.moves import range
dtype_to_size = {
core.VarDesc.VarType.FP16: 2,
core.VarDesc.VarType.FP32: 4,
core.VarDesc.VarType.FP64: 8,
core.VarDesc.VarType.INT16: 2,
core.VarDesc.VarType.INT32: 4,
core.VarDesc.VarType.INT64: 8,
core.VarDesc.VarType.BOOL: 1,
core.VarDesc.VarType.UINT8: 1,
}
SUB_BLOCK_OPS = [
"while", "while_grad", "conditional_block", "conditional_block_grad"
]
SUB_BLOCK_PAIR = [("while", "while_grad"),
("conditional_block", "conditional_block_grad")]
PRINT_LOG = False
FLAGS_memory_optimize = ""
class OrderedSet(MutableSet):
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def update(self, other):
for e in other:
self.add(e)
def discard(self, key):
if key in self.map:
key, prev, next = self.map.pop(key)
prev[2] = next
next[1] = prev
def remove(self, key):
self.discard(key)
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def pop(self, last=True):
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__, )
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
class ControlFlowGraph(object):
def __init__(self, program, ops, forward_num, skip_opt):
self._program = program
self._ops = ops
self._forward_num = forward_num
self._successors = defaultdict(OrderedSet)
self._presuccessors = defaultdict(OrderedSet)
self._uses = defaultdict(OrderedSet)
self._defs = defaultdict(OrderedSet)
self._live_in = defaultdict(OrderedSet)
self._live_out = defaultdict(OrderedSet)
self._skip_opt = skip_opt
self.pool = []
def _add_connections(self, connections):
"""Populates _successors and _presuccessors for two neighbor nodes."""
for node1, node2 in connections:
self._add(node1, node2)
def _add(self, node1, node2):
self._successors[node1].add(node2)
self._presuccessors[node2].add(node1)
# TODO(panyx0718): We need to have a unified way of building intermediate
# representation.
def _build_graph(self):
"""Build a graph based on op sequence.
"""
self.op_size = len(self._ops)
op_node_connections = [(i, i + 1) for i in range(self.op_size - 1)]
self._add_connections(op_node_connections)
for i in range(self.op_size):
self._uses[i].update(self._ops[i].input_arg_names())
self._defs[i].update(self._ops[i].output_arg_names())
def _update_graph(self, old_name, new_name, begin_idx=0):
for i in range(begin_idx, self.op_size):
if old_name in self._uses[i]:
self._uses[i].remove(old_name)
self._uses[i].add(new_name)
if old_name in self._defs[i]:
self._defs[i].remove(old_name)
self._defs[i].add(new_name)
if old_name in self._live_in[i]:
self._live_in[i].remove(old_name)
self._live_in[i].add(new_name)
if old_name in self._live_out[i]:
self._live_out[i].remove(old_name)
self._live_out[i].add(new_name)
def _dataflow_analyze(self):
self._build_graph()
live_in = defaultdict(set)
worklist = list(range(len(self._ops) - 1, -1, -1))
while worklist:
i = worklist.pop(0)
live_in[i] = set(self._live_in[i])
for s in self._successors[i]:
self._live_out[i] |= self._live_in[s]
self._live_in[i] = self._uses[i] | (
self._live_out[i] - self._defs[i])
if live_in[i] != set(self._live_in[i]):
for d in self._presuccessors[i]:
worklist.append(d)
def _fill_pool(self, i, is_forward):
def comparator(x, cache):
x_shape = x[1]
cache_shape = cache[1]
x_size = abs(reduce(lambda x, y: x * y, x_shape))
cache_size = abs(reduce(lambda x, y: x * y, cache_shape))
if (x_shape[0] == -1 and cache_shape[0] == -1) or \
(x_shape[0] != -1 and cache_shape[0] != -1) :
return x_size <= cache_size
else:
return False
def find_var_in_block(x):
known_vars = set()
for op in self._ops:
known_vars.update(op.output_arg_names())
return x in known_vars
block_desc = self._ops[i].block()
in_diff, _ = self._get_diff(self._live_in[i], self._live_out[i])
# NOTE: must sort the in_diff set for cases that get different cache var.
# FIXME(typhoonzero): maybe use a "sorted set" is better than this.
can_optimize = [
x for x in sorted(in_diff)
if self._check_var_validity(block_desc, x, is_forward)
]
if can_optimize:
for var_name in can_optimize:
cache = (var_name, self._find_var(block_desc, var_name,
is_forward).shape())
if cache not in self.pool and find_var_in_block(var_name):
i = 0
while i < len(self.pool):
mycache = self.pool[i]
mysize = mycache[1][0]
cache_size = cache[1][0]
if (mysize == -1 and cache_size == -1) or \
(mysize != -1 and cache_size != -1):
if comparator(mycache, cache):
i += 1
else:
break
elif mysize == -1 and cache_size != -1:
i += 1
elif mysize != -1 and cache_size == -1:
break
self.pool.insert(i, cache)
def _get_diff(self, a, b):
u = a & b
return a - u, b - u
def _has_var(self, block_desc, var_name, is_forward):
if is_forward:
return block_desc.has_var(cpt.to_bytes(var_name))
else:
return block_desc.has_var_recursive(cpt.to_bytes(var_name))
def _find_var(self, block_desc, var_name, is_forward):
if is_forward:
return block_desc.find_var(cpt.to_bytes(var_name))
else:
return block_desc.find_var_recursive(cpt.to_bytes(var_name))
def _check_var_validity(self, block_desc, x, is_forward):
if str(x) == "@EMPTY@":
return False
if not self._has_var(block_desc, x, is_forward):
return False
if self._find_var(block_desc, x, is_forward).persistable():
return False
if self._find_var(block_desc, x,
is_forward).type() != core.VarDesc.VarType.LOD_TENSOR:
return False
if x in self._skip_opt:
return False
if not self._find_var(block_desc, x, is_forward).shape():
return False
return True
# TODO(panyx0718): This needs to be less hacky. It seems memory optimization
# doesn't consider vars copied between cpu and gpu.
def _update_skip_opt_set(self):
for i in range(self.op_size):
op = self._ops[i]
if op.has_attr("force_cpu") and op.attr("force_cpu") == True:
self._skip_opt.update(op.output_arg_names())
def release_memory(self, skip_opt_set=None):
self._dataflow_analyze()
self._update_skip_opt_set()
if skip_opt_set:
self._skip_opt.update(skip_opt_set)
fwd_id = 0
bwd_id = 0
for i in range(self.op_size):
op = self._ops[i]
if op.type() in SUB_BLOCK_OPS:
continue
block_desc = op.block()
is_forward = i < self._forward_num
in_diff, out_diff = self._get_diff(self._live_in[i],
self._live_out[i])
can_optimize = [
x for x in in_diff
if self._check_var_validity(block_desc, x, is_forward)
]
if can_optimize:
index = i + fwd_id + 1 if is_forward else i - self._forward_num + bwd_id + 1
delete_op = block_desc._insert_op(index)
delete_op.set_type("delete_var")
delete_op.set_input("X", can_optimize)
if is_forward:
fwd_id += 1
else:
bwd_id += 1
def memory_optimize(self, skip_opt_set=None, level=0):
def compare_shape(x_shape, cache_shape, opt_level):
if opt_level == 0:
return x_shape == cache_shape
elif opt_level == 1:
if (x_shape[0] == -1) ^ (cache_shape[0] == -1):
return False
x_size = abs(reduce(lambda x, y: x * y, x_shape))
cache_size = abs(reduce(lambda x, y: x * y, cache_shape))
if x_size <= cache_size:
return True
else:
raise ValueError("only support opt_level 0 or 1.")
return False
self._dataflow_analyze()
self._update_skip_opt_set()
# update skip set to meet users' demand
if skip_opt_set:
self._skip_opt.update(skip_opt_set)
counter = 0
for i in range(self.op_size):
op = self._ops[i]
if op.type() in SUB_BLOCK_OPS:
continue
block_desc = op.block()
is_forward = i < self._forward_num
if self.pool:
# NOTE: must sort the in_diff set for cases that get different cache var.
defs_can_optimize = [
x for x in self._defs[i]
if self._check_var_validity(block_desc, x, is_forward)
]
out_pair = [
(x, self._find_var(block_desc, x, is_forward).shape())
for x in defs_can_optimize
]
for x, x_shape in out_pair:
# If x is both in uses and defs, it can not be optimized!
if x in self._uses[i]:
continue
if x == FLAGS_memory_optimize:
print("start match var ", x, " of op ", op.type())
print(self.pool)
for index, cache_pair in enumerate(self.pool):
cache_var = cache_pair[0]
cache_shape = cache_pair[1]
if not self._has_var(block_desc, cache_var, is_forward):
if PRINT_LOG:
print("cache %s not exists!" %
(cpt.to_text(cache_var)))
continue
if x == cache_var:
if PRINT_LOG:
print("x : ", cpt.to_text(x), " cache : ",
cpt.to_text(cache_var), " is same var!")
break
x_dtype = self._find_var(block_desc, x,
is_forward).dtype()
cache_dtype = self._find_var(block_desc, cache_var,
is_forward).dtype()
if x_dtype != cache_dtype:
if PRINT_LOG:
print("x_dtype and cache_dtype are different")
continue
if not compare_shape(x_shape, cache_shape, level):
continue
# TODO(qijun): dtype_to_size[x_dtype] and dtype_to_size[cache_dtype]
if PRINT_LOG:
print(
("!!! %d, %s => %s, cache idx %d, pool size %d"
% (counter, x + str(x_shape),
cache_var + str(cache_shape), index,
len(self.pool))))
counter += 1
self.pool.pop(index)
# Rename the var to the cache var already with
# memory allocated in order to reuse the memory.
_rename_arg_(self._ops, x, cache_var, begin_idx=i)
self._program.block(block_desc.id).var(cpt.to_text(
x)).desc = self._find_var(block_desc, cache_var,
is_forward)
self._program.block(block_desc.id).vars[cpt.to_text(x)] = \
Variable(self._program.block(block_desc.id), name=cpt.to_text(x))
self._update_graph(x, cache_var, begin_idx=i)
break
self._fill_pool(i, is_forward)
def _process_sub_block_pair(pdesc, sub_block_pair):
"""Creates a list of tuple each of which tracks info of a subblock.
Note: this function doesn't handle nested subblocks yet.
TODO(panyx0718): assert if case nested subblocks happen.
:param pdesc: ProgramDesc.
:param sub_block_pair: A list op pairs. Each op pair is the forward
op and backward op. The ops in the list are special that they contain
a subblock of ops.
:return: A list of tuples, each tuple is (all ops in a subblock pair
including forward and backward, number of forward ops,
all output args names of the ops in the subblock pairs).
"""
ops_list = []
block_desc = pdesc.block(0)
op_size = block_desc.op_size()
for fwd_op, bwd_op in sub_block_pair:
sub_block_ids = []
grad_sub_block_ids = []
sub_block_id_pair = []
sub_op_dict = {}
for i in range(op_size):
op = block_desc.op(i)
if op.type() == fwd_op:
sub_block_ids.append(op.attr("sub_block").id)
sub_op_dict[op.attr("sub_block").id] = op
elif op.type() == bwd_op:
grad_sub_block_ids.append(op.attr("sub_block").id)
sub_op_dict[op.attr("sub_block").id] = op
# Find fwd_op/bwd_op block pair
for grad_id in grad_sub_block_ids:
fwd_id = pdesc.block(grad_id).get_forward_block_idx()
if fwd_id in sub_block_ids:
sub_block_id_pair.append((fwd_id, grad_id))
sub_block_ids.remove(fwd_id)
# Get fwd_op/bwd_op block ops
for fwd_id, grad_id in sub_block_id_pair:
sub_block_ops = []
sub_block = pdesc.block(fwd_id)
block_op_size = sub_block.op_size()
for i in range(block_op_size):
sub_block_ops.append(sub_block.op(i))
grad_sub_block = pdesc.block(grad_id)
grad_sub_block_op_size = grad_sub_block.op_size()
for i in range(grad_sub_block_op_size):
sub_block_ops.append(grad_sub_block.op(i))
sub_op_output = set()
sub_op_output.update(sub_op_dict[fwd_id].output_arg_names())
sub_op_output.update(sub_op_dict[grad_id].output_arg_names())
sub_op_output.update(sub_op_dict[fwd_id].input_arg_names())
sub_op_output.update(sub_op_dict[grad_id].input_arg_names())
ops_list.append((sub_block_ops, block_op_size, sub_op_output))
# Process rest fwd_op block ops
for fwd_id in sub_block_ids:
sub_block_ops = []
sub_block = pdesc.block(fwd_id)
sub_block_op_size = sub_block.op_size()
for i in range(sub_block_op_size):
sub_block_ops.append(sub_block.op(i))
sub_op_output = set()
sub_op_output.update(sub_op_dict[fwd_id].output_arg_names())
sub_op_output.update(sub_op_dict[fwd_id].input_arg_names())
ops_list.append((sub_block_ops, sub_block_op_size, sub_op_output))
return ops_list
def _get_cfgs(input_program):
"""Process each block and create ControlFlowGraph for each of them.
:param input_program: Program object.
:return: A list of ControlFlowGraph, each corresponds to a block.
"""
ops_list = []
pdesc = input_program._get_desc()
block_desc = pdesc.block(0)
op_size = block_desc.op_size()
# Only process one level of nested subblock.
ops_list.extend(_process_sub_block_pair(pdesc, SUB_BLOCK_PAIR))
skip_opt_set = set()
for _, _, skip_opt in ops_list:
skip_opt_set.update(skip_opt)
# Get global block ops
ops_list.insert(
0, ([block_desc.op(i) for i in range(op_size)], op_size, skip_opt_set))
cfgs = [
ControlFlowGraph(input_program, ops, forward_num, skip_opt)
for ops, forward_num, skip_opt in ops_list
]
return cfgs
def _is_opt_role_op(op):
op_maker = core.op_proto_and_checker_maker
optimize_role = core.op_proto_and_checker_maker.OpRole.Optimize
if op_maker.kOpRoleAttrName() in op.attr_names and \
int(op.all_attrs()[op_maker.kOpRoleAttrName()]) == int(optimize_role):
return True
def memory_optimize(input_program,
skip_opt_set=None,
print_log=False,
level=0,
skip_grads=False):
"""Optimize memory by reusing var memory.
Note: it doesn't not support subblock nested in subblock.
Args:
input_program(str): Input Program
skip_opt_set(set): vars wil be skipped in memory optimze
print_log(bool): whether to print debug log.
level(int): If level=0, reuse if the shape is completely equal, o
Returns:
None
"""
sys.stderr.write('memory_optimize is deprecated. '
'Use CompiledProgram and Executor\n')
def to_name_str(var):
if isinstance(var, Variable):
return var.desc.name()
elif isinstance(var, str):
return var
elif isinstance(var, six.string_types):
return str(var)
else:
raise TypeError(str(var) + " should be Variable or str")
if level != 0 and level != 1:
raise ValueError("only support opt_level 0 or 1.")
if skip_opt_set is not None:
if isinstance(skip_opt_set, set) or isinstance(skip_opt_set, list):
skip_opt_set = set(skip_opt_set)
else:
raise ValueError("only support skip_opt_set as set.")
global PRINT_LOG
PRINT_LOG = print_log
if skip_grads:
grad_set = set()
OP_ROLE_VAR = core.op_proto_and_checker_maker.kOpRoleVarAttrName()
for op in input_program.global_block().ops:
if _is_opt_role_op(op):
if op.attr(OP_ROLE_VAR):
grad_name = op.attr(OP_ROLE_VAR)[1]
grad_set.add(grad_name)
if not skip_opt_set:
skip_opt_set = grad_set
else:
skip_opt_set.update(grad_set)
if skip_opt_set is not None:
skip_opt_set = set(map(to_name_str, skip_opt_set))
cfgs = _get_cfgs(input_program)
input_program._is_mem_optimized = True
for cfg in cfgs:
cfg.memory_optimize(skip_opt_set=skip_opt_set, level=level)
def release_memory(input_program, skip_opt_set=None):
"""
Modify the input program and insert :code:`delete_op` to early drop not used
variables. The modification will be performed inplace.
Notes: This is an experimental API and could be removed in next few
releases. Users should not use this API.
Args:
input_program(Program): The program will be inserted :code:`delete_op`.
skip_opt_set(set): vars wil be skipped in memory optimze
Returns:
None
"""
cfgs = _get_cfgs(input_program)
input_program._is_mem_optimized = True
for cfg in cfgs:
cfg.release_memory(skip_opt_set=skip_opt_set)
| |
import bpy
from bpy.props import StringProperty, BoolProperty, EnumProperty
from .. import rman_cycles_convert
from ..rfb_utils import shadergraph_utils
from .. import rman_bl_nodes
from .rman_operators_utils import get_bxdf_items, get_projection_items
from ..rman_render import RmanRender
import math
class SHADING_OT_convert_all_renderman_nodetree(bpy.types.Operator):
''''''
bl_idname = "material.rman_convert_all_cycles_shaders"
bl_label = "Convert All Cycles to RenderMan"
bl_description = "Convert all Cycles nodetrees to RenderMan. This is not guaranteed to work. It is still recommended to use RenderMan only nodes."
bl_options = {'INTERNAL'}
def execute(self, context):
for mat in bpy.data.materials:
mat.use_nodes = True
nt = mat.node_tree
if shadergraph_utils.is_renderman_nodetree(mat):
continue
output = nt.nodes.new('RendermanOutputNode')
try:
if not rman_cycles_convert.convert_cycles_nodetree(mat, output):
pxr_disney_node = rman_bl_nodes.__BL_NODES_MAP__['PxrDisneyBsdf']
default = nt.nodes.new(pxr_disney_node)
default.location = output.location
default.location[0] -= 300
nt.links.new(default.outputs[0], output.inputs[0])
except Exception as e:
self.report({'ERROR'}, "Error converting " + mat.name)
#self.report({'ERROR'}, str(e))
# uncomment to debug conversion
import traceback
traceback.print_exc()
for n in nt.nodes:
n.select = False
for light in bpy.data.lights:
if light.renderman.use_renderman_node:
continue
light.use_nodes = True
light_type = light.type
light.renderman.light_primary_visibility = False
nt = light.node_tree
light_shader = ''
if light_type == 'SUN':
light_shader = 'PxrDistantLight'
elif light_type == 'HEMI':
light_shader = 'PxrDomeLight'
elif light_type == 'AREA':
if light.shape == 'DISK':
light_shader = 'PxrDiskLight'
elif light.shape == 'ELLIPSE':
light_shader = 'PxrSphereLight'
else:
light_shader = 'PxrRectLight'
elif light_type == 'SPOT':
light_shader = 'PxrDiskLight'
elif light_type == 'POINT':
light_shader = 'PxrSphereLight'
else:
light_shader = 'PxrRectLight'
#light.type = 'AREA'
if hasattr(light, 'size'):
light.size = 0.0
light.type = 'POINT'
light.renderman.use_renderman_node = True
output = nt.nodes.new('RendermanOutputNode')
node_name = rman_bl_nodes.__BL_NODES_MAP__[light_shader]
default = nt.nodes.new(node_name)
default.location = output.location
default.location[0] -= 300
nt.links.new(default.outputs[0], output.inputs[1])
output.inputs[0].hide = True
output.inputs[2].hide = True
output.inputs[3].hide = True
light.renderman.renderman_light_role = 'RMAN_LIGHT'
if light_type == 'SPOT':
node = light.renderman.get_light_node()
node.coneAngle = math.degrees(light.spot_size)
node.coneSoftness = light.spot_blend
for n in nt.nodes:
n.select = False
# convert cycles vis settings
for ob in context.scene.objects:
if not ob.cycles_visibility.camera:
ob.renderman.visibility_camera = False
if not ob.cycles_visibility.diffuse or not ob.cycles_visibility.glossy:
ob.renderman.visibility_trace_indirect = False
if not ob.cycles_visibility.transmission:
ob.renderman.visibility_trace_transmission = False
return {'FINISHED'}
class SHADING_OT_convert_cycles_to_renderman_nodetree(bpy.types.Operator):
''''''
bl_idname = "material.rman_convert_cycles_shader"
bl_label = "Convert Cycles Shader"
bl_description = "Try to convert the current Cycles Shader to RenderMan. This is not guaranteed to work. It is still recommended to use RenderMan only nodes."
bl_options = {'INTERNAL'}
idtype: StringProperty(name="ID Type", default="material")
bxdf_name: StringProperty(name="Bxdf Name", default="LamaSurface")
def execute(self, context):
idtype = self.properties.idtype
if idtype == 'node_editor':
idblock = context.space_data.id
idtype = 'material'
else:
context_data = {'material': context.material,
'light': context.light, 'world': context.scene.world}
idblock = context_data[idtype]
if not idblock:
# try getting material from context.object
ob = context.object
rm = ob.renderman
idblock = rm.rman_material_override
idblock.use_nodes = True
nt = idblock.node_tree
if idtype == 'material':
output = nt.nodes.new('RendermanOutputNode')
if idblock.grease_pencil:
shadergraph_utils.convert_grease_pencil_mat(idblock, nt, output)
elif not rman_cycles_convert.convert_cycles_nodetree(idblock, output):
bxdf_node_name = rman_bl_nodes.__BL_NODES_MAP__[self.properties.bxdf_name]
default = nt.nodes.new(bxdf_node_name)
default.location = output.location
default.location[0] -= 300
nt.links.new(default.outputs[0], output.inputs[0])
if idblock.renderman.copy_color_params:
default.diffuseColor = idblock.diffuse_color
default.diffuseGain = idblock.diffuse_intensity
default.enablePrimarySpecular = True
default.specularFaceColor = idblock.specular_color
output.inputs[3].hide = True
for n in nt.nodes:
n.select = False
return {'FINISHED'}
class SHADING_OT_add_renderman_nodetree(bpy.types.Operator):
''''''
bl_idname = "material.rman_add_rman_nodetree"
bl_label = "Add RenderMan Nodetree"
bl_description = "Add a RenderMan shader node tree"
bl_options = {'INTERNAL'}
idtype: StringProperty(name="ID Type", default="material")
def get_type_items(self, context):
return get_bxdf_items()
bxdf_name: EnumProperty(items=get_type_items, name="Material")
def execute(self, context):
idtype = self.properties.idtype
if idtype == 'node_editor':
idblock = context.space_data.id
idtype = 'material'
elif idtype == 'world':
idblock = context.scene.world
else:
context_data = {'material': context.material,
'light': context.light, 'world': context.scene.world}
idblock = context_data[idtype]
if not idblock:
# try getting material from context.object
ob = context.object
rm = ob.renderman
idblock = rm.rman_material_override
# nt = bpy.data.node_groups.new(idblock.name,
# type='RendermanPatternGraph')
#nt.use_fake_user = True
idblock.use_nodes = True
nt = idblock.node_tree
if idtype == 'material':
output = nt.nodes.new('RendermanOutputNode')
if idblock.grease_pencil:
shadergraph_utils.convert_grease_pencil_mat(idblock, nt, output)
else:
bxdf_node_name = rman_bl_nodes.__BL_NODES_MAP__[self.properties.bxdf_name]
default = nt.nodes.new(bxdf_node_name)
default.location = output.location
default.location[0] -= 300
nt.links.new(default.outputs[0], output.inputs[0])
if self.properties.bxdf_name == 'PxrLayerSurface':
shadergraph_utils.create_pxrlayer_nodes(nt, default)
default.update_mat(idblock)
output.inputs[3].hide = True
elif idtype == 'light':
light_type = idblock.type
light = idblock
light_shader = ''
if light_type == 'SUN':
light_shader = 'PxrDistantLight'
elif light_type == 'HEMI':
light_shader = 'PxrDomeLight'
elif light_type == 'AREA':
if light.shape == 'DISK':
light_shader = 'PxrDiskLight'
elif light.shape == 'ELLIPSE':
light_shader = 'PxrSphereLight'
else:
light_shader = 'PxrRectLight'
elif light_type == 'SPOT':
light_shader = 'PxrDiskLight'
elif light_type == 'POINT':
light_shader = 'PxrSphereLight'
else:
light_shader = 'PxrRectLight'
light.type = 'AREA'
light.renderman.use_renderman_node = True
output = nt.nodes.new('RendermanOutputNode')
default = nt.nodes.new('%sLightNode' %
light_shader)
default.location = output.location
default.location[0] -= 300
nt.links.new(default.outputs[0], output.inputs[1])
output.inputs[0].hide = True
output.inputs[2].hide = True
output.inputs[3].hide = True
light.renderman.renderman_light_role = 'RMAN_LIGHT'
if light_type == 'SPOT':
node = context.light.renderman.get_light_node()
node.coneAngle = math.degrees(light.spot_size)
node.coneSoftness = light.spot_blend
elif idtype == 'world':
# world
idblock.renderman.use_renderman_node = True
if shadergraph_utils.find_node(idblock, 'RendermanIntegratorsOutputNode'):
return {'FINISHED'}
output = nt.nodes.new('RendermanIntegratorsOutputNode')
node_name = rman_bl_nodes.__BL_NODES_MAP__.get('PxrPathTracer')
default = nt.nodes.new(node_name)
default.location = output.location
default.location[0] -= 200
nt.links.new(default.outputs[0], output.inputs[0])
sf_output = nt.nodes.new('RendermanSamplefiltersOutputNode')
sf_output.location = default.location
sf_output.location[0] -= 300
df_output = nt.nodes.new('RendermanDisplayfiltersOutputNode')
df_output.location = sf_output.location
df_output.location[0] -= 300
rman_cycles_convert.convert_world_nodetree(idblock, context, df_output)
# unselect all nodes
for n in nt.nodes:
n.select = False
return {'FINISHED'}
def draw(self, context):
layout = self.layout
col = layout.column()
col.label(text="Select a Material")
col.prop(self, 'bxdf_name')
def invoke(self, context, event):
idtype = self.properties.idtype
if idtype == 'node_editor':
idblock = context.space_data.id
idtype = 'material'
elif idtype == 'world':
idblock = context.scene.world
else:
context_data = {'material': context.material,
'light': context.light, 'world': context.scene.world}
idblock = context_data[idtype]
if not idblock:
# try getting material from context.object
ob = context.object
rm = ob.renderman
idblock = rm.rman_material_override
idblock.use_nodes = True
nt = idblock.node_tree
if idtype == 'material':
if idblock.grease_pencil:
return self.execute(context)
wm = context.window_manager
return wm.invoke_props_dialog(self)
return self.execute(context)
class SHADING_OT_add_integrator_nodetree(bpy.types.Operator):
''''''
bl_idname = "material.rman_add_integrator_nodetree"
bl_label = "Add RenderMan Integrator Nodetree"
bl_description = "Add a RenderMan Integrator node tree"
bl_options = {'INTERNAL'}
def execute(self, context):
world = context.scene.world
world.use_nodes = True
nt = world.node_tree
# world
world.renderman.use_renderman_node = True
if shadergraph_utils.find_node(world, 'RendermanIntegratorsOutputNode'):
return {'FINISHED'}
output = nt.nodes.new('RendermanIntegratorsOutputNode')
node_name = rman_bl_nodes.__BL_NODES_MAP__.get('PxrPathTracer')
default = nt.nodes.new(node_name)
default.location = output.location
default.location[0] -= 200
nt.links.new(default.outputs[0], output.inputs[0])
# unselect all nodes
for n in nt.nodes:
n.select = False
return {'FINISHED'}
def invoke(self, context, event):
return self.execute(context)
class SHADING_OT_add_displayfilters_nodetree(bpy.types.Operator):
''''''
bl_idname = "material.rman_add_displayfilters_nodetree"
bl_label = "Add RenderMan Dsiplay Filters Nodetree"
bl_description = "Add a RenderMan display filters node tree. Note, a PxrBackgroundDisplayFilter will be automatically added for you, that will inherit the world color."
bl_options = {'INTERNAL'}
def execute(self, context):
world = context.scene.world
world.use_nodes = True
nt = world.node_tree
world.renderman.use_renderman_node = True
if shadergraph_utils.find_node(world, 'RendermanDisplayfiltersOutputNode'):
return {'FINISHED'}
df_output = nt.nodes.new('RendermanDisplayfiltersOutputNode')
df_output.location = df_output.location
df_output.location[0] -= 300
rman_cycles_convert.convert_world_nodetree(world, context, df_output)
# unselect all nodes
for n in nt.nodes:
n.select = False
return {'FINISHED'}
def invoke(self, context, event):
return self.execute(context)
class SHADING_OT_add_samplefilters_nodetree(bpy.types.Operator):
''''''
bl_idname = "material.rman_add_samplefilters_nodetree"
bl_label = "Add RenderMan Sample Filters Nodetree"
bl_description = "Add a RenderMan sample filters node tree"
bl_options = {'INTERNAL'}
def execute(self, context):
world = context.scene.world
world.use_nodes = True
nt = world.node_tree
world.renderman.use_renderman_node = True
if shadergraph_utils.find_node(world, 'RendermanSamplefiltersOutputNode'):
return {'FINISHED'}
sf_output = nt.nodes.new('RendermanSamplefiltersOutputNode')
sf_output.location = sf_output.location
sf_output.location[0] -= 300
# unselect all nodes
for n in nt.nodes:
n.select = False
return {'FINISHED'}
def invoke(self, context, event):
return self.execute(context)
class PRMAN_OT_New_bxdf(bpy.types.Operator):
bl_idname = "node.rman_new_bxdf"
bl_label = "New RenderMan Material"
bl_description = "Create a new material with a new RenderMan Bxdf"
bl_options = {"REGISTER", "UNDO"}
idtype: StringProperty(name="ID Type", default="material")
def get_type_items(self, context):
return get_bxdf_items()
bxdf_name: EnumProperty(items=get_type_items, name="Bxdf Name")
def execute(self, context):
ob = context.object
bxdf_name = self.bxdf_name
mat = bpy.data.materials.new(bxdf_name)
ob.active_material = mat
mat.use_nodes = True
nt = mat.node_tree
output = nt.nodes.new('RendermanOutputNode')
bxdf_node_name = rman_bl_nodes.__BL_NODES_MAP__[bxdf_name]
default = nt.nodes.new(bxdf_node_name)
default.location = output.location
default.location[0] -= 300
default.select = False
nt.links.new(default.outputs[0], output.inputs[0])
if self.bxdf_name == 'PxrLayerSurface':
shadergraph_utils.create_pxrlayer_nodes(nt, default)
output.inputs[3].hide = True
default.update_mat(mat)
return {"FINISHED"}
def draw(self, context):
layout = self.layout
col = layout.column()
col.label(text="Select a Material")
col.prop(self, 'bxdf_name')
def invoke(self, context, event):
idtype = self.properties.idtype
if idtype == 'node_editor':
idblock = context.space_data.id
idtype = 'material'
else:
context_data = {'material': context.material,
'light': context.light, 'world': context.scene.world}
idblock = context_data[idtype]
idblock.use_nodes = True
nt = idblock.node_tree
if idtype == 'material':
if context.material.grease_pencil:
return self.execute(context)
wm = context.window_manager
return wm.invoke_props_dialog(self)
return self.execute(context)
class PRMAN_OT_New_Material_Override(bpy.types.Operator):
bl_idname = "node.rman_new_material_override"
bl_label = "New RenderMan Material Override"
bl_description = "Create a new material override"
bl_options = {"REGISTER", "UNDO"}
def get_type_items(self, context):
return get_bxdf_items()
bxdf_name: EnumProperty(items=get_type_items, name="Bxdf Name")
def execute(self, context):
ob = context.object
bxdf_name = self.bxdf_name
mat = bpy.data.materials.new(bxdf_name)
ob.renderman.rman_material_override = mat
mat.use_nodes = True
nt = mat.node_tree
output = nt.nodes.new('RendermanOutputNode')
output.select = False
bxdf_node_name = rman_bl_nodes.__BL_NODES_MAP__[bxdf_name]
default = nt.nodes.new(bxdf_node_name)
default.location = output.location
default.location[0] -= 300
default.select = False
nt.links.new(default.outputs[0], output.inputs[0])
if self.bxdf_name == 'PxrLayerSurface':
shadergraph_utils.create_pxrlayer_nodes(nt, default)
output.inputs[3].hide = True
default.update_mat(mat)
ob.update_tag(refresh={'OBJECT'})
return {"FINISHED"}
def draw(self, context):
layout = self.layout
col = layout.column()
col.label(text="Select a Material")
col.prop(self, 'bxdf_name')
def invoke(self, context, event):
wm = context.window_manager
return wm.invoke_props_dialog(self)
class PRMAN_OT_Force_Material_Refresh(bpy.types.Operator):
bl_idname = "node.rman_force_material_refresh"
bl_label = "Force Refresh"
bl_description = "Force Material to Refresh during IPR. Use this if your material is not responding to edits."
def execute(self, context):
rr = RmanRender.get_rman_render()
if rr.rman_is_live_rendering:
mat = getattr(context, "material", None)
if mat:
rr.rman_scene_sync.update_material(mat)
return {"FINISHED"}
class PRMAN_OT_Force_Light_Refresh(bpy.types.Operator):
bl_idname = "node.rman_force_light_refresh"
bl_label = "Force Refresh"
bl_description = "Force Light to Refresh during IPR. Use this if your light is not responding to edits."
def execute(self, context):
rr = RmanRender.get_rman_render()
if rr.rman_is_live_rendering:
ob = getattr(context, "light", context.active_object)
if ob:
rr.rman_scene_sync.update_light(ob)
return {"FINISHED"}
class PRMAN_OT_Force_LightFilter_Refresh(bpy.types.Operator):
bl_idname = "node.rman_force_lightfilter_refresh"
bl_label = "Force Refresh"
bl_description = "Force Light Filter to Refresh during IPR. Use this if your light filter is not responding to edits."
def execute(self, context):
rr = RmanRender.get_rman_render()
if rr.rman_is_live_rendering:
ob = getattr(context, "light_filter", context.active_object)
if ob:
rr.rman_scene_sync.update_light_filter(ob)
return {"FINISHED"}
class PRMAN_OT_Add_Projection_Nodetree(bpy.types.Operator):
bl_idname = "node.rman_add_projection_nodetree"
bl_label = "New Projection"
bl_description = "Attach a RenderMan projection plugin"
bl_options = {"REGISTER"}
def get_type_items(self, context):
return get_projection_items()
proj_name: EnumProperty(items=get_type_items, name="Projection")
def execute(self, context):
ob = context.object
if ob.type != 'CAMERA':
return {'FINISHED'}
nt = bpy.data.node_groups.new(ob.data.name, 'ShaderNodeTree')
output = nt.nodes.new('RendermanProjectionsOutputNode')
output.select = False
ob.data.renderman.rman_nodetree = nt
proj_node_name = rman_bl_nodes.__BL_NODES_MAP__[self.proj_name]
default = nt.nodes.new(proj_node_name)
default.location = output.location
default.location[0] -= 300
default.select = False
nt.links.new(default.outputs[0], output.inputs[0])
ob.update_tag(refresh={'DATA'})
return {"FINISHED"}
def draw(self, context):
layout = self.layout
col = layout.column()
col.label(text="Select a Projection")
col.prop(self, 'proj_name')
def invoke(self, context, event):
wm = context.window_manager
return wm.invoke_props_dialog(self)
classes = [
SHADING_OT_convert_all_renderman_nodetree,
SHADING_OT_convert_cycles_to_renderman_nodetree,
SHADING_OT_add_renderman_nodetree,
SHADING_OT_add_integrator_nodetree,
SHADING_OT_add_displayfilters_nodetree,
SHADING_OT_add_samplefilters_nodetree,
PRMAN_OT_New_bxdf,
PRMAN_OT_New_Material_Override,
PRMAN_OT_Force_Material_Refresh,
PRMAN_OT_Force_Light_Refresh,
PRMAN_OT_Force_LightFilter_Refresh,
PRMAN_OT_Add_Projection_Nodetree
]
def register():
for cls in classes:
bpy.utils.register_class(cls)
def unregister():
for cls in classes:
try:
bpy.utils.unregister_class(cls)
except RuntimeError:
rfb_log().debug('Could not unregister class: %s' % str(cls))
pass
| |
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import datetime
import itertools
import os
import six
import traceback
from oslo.serialization import jsonutils
from sqlalchemy import or_
from nailgun import consts
from nailgun import notifier
from nailgun import objects
from nailgun.rpc import utils
from nailgun.settings import settings
from nailgun.consts import TASK_STATUSES
from nailgun.db import db
from nailgun.db.sqlalchemy.models import IPAddr
from nailgun.db.sqlalchemy.models import Node
from nailgun.db.sqlalchemy.models import Release
from nailgun.logger import logger
from nailgun.network import connectivity_check
from nailgun.network import utils as net_utils
from nailgun.task.helpers import TaskHelper
from nailgun.utils import logs as logs_utils
from nailgun.utils import reverse
class NailgunReceiver(object):
@classmethod
def remove_nodes_resp(cls, **kwargs):
logger.info(
"RPC method remove_nodes_resp received: %s" %
jsonutils.dumps(kwargs)
)
task_uuid = kwargs.get('task_uuid')
nodes = kwargs.get('nodes') or []
error_nodes = kwargs.get('error_nodes') or []
inaccessible_nodes = kwargs.get('inaccessible_nodes') or []
error_msg = kwargs.get('error')
status = kwargs.get('status')
progress = kwargs.get('progress')
if status in [consts.TASK_STATUSES.ready, consts.TASK_STATUSES.error]:
progress = 100
# locking tasks on cluster
task = objects.Task.get_by_uuid(task_uuid, fail_if_not_found=True)
objects.TaskCollection.lock_cluster_tasks(task.cluster_id)
task = objects.Task.get_by_uuid(
task_uuid,
fail_if_not_found=True,
lock_for_update=True
)
# locking cluster
if task.cluster_id is not None:
objects.Cluster.get_by_uid(
task.cluster_id,
fail_if_not_found=True,
lock_for_update=True
)
# locking nodes
all_nodes = itertools.chain(nodes, error_nodes, inaccessible_nodes)
all_nodes_ids = [
node['id'] if 'id' in node else node['uid']
for node in all_nodes
]
locked_nodes = objects.NodeCollection.filter_by_list(
None,
'id',
all_nodes_ids,
order_by='id'
)
objects.NodeCollection.lock_for_update(locked_nodes).all()
def get_node_id(n):
return n.get('id', int(n.get('uid')))
nodes_to_delete_ids = [get_node_id(n) for n in nodes]
if(len(inaccessible_nodes) > 0):
inaccessible_node_ids = [
get_node_id(n) for n in inaccessible_nodes]
logger.warn(u'Nodes %s not answered by RPC, removing from db',
inaccessible_nodes)
nodes_to_delete_ids.extend(inaccessible_node_ids)
for node in objects.NodeCollection.filter_by_id_list(
None, nodes_to_delete_ids):
logs_utils.delete_node_logs(node)
Node.delete_by_ids(nodes_to_delete_ids)
for node in error_nodes:
node_db = objects.Node.get_by_uid(node['uid'])
if not node_db:
logger.error(
u"Failed to delete node '%s' marked as error from Astute:"
" node doesn't exist", str(node)
)
else:
node_db.pending_deletion = False
node_db.status = 'error'
db().add(node_db)
node['name'] = node_db.name
db().flush()
success_msg = u"No nodes were removed"
err_msg = u"No errors occurred"
if nodes:
success_msg = u"Successfully removed {0} node(s)".format(
len(nodes)
)
notifier.notify("done", success_msg)
if error_nodes:
err_msg = u"Failed to remove {0} node(s): {1}".format(
len(error_nodes),
', '.join(
[n.get('name') or "ID: {0}".format(n['uid'])
for n in error_nodes])
)
notifier.notify("error", err_msg)
if not error_msg:
error_msg = ". ".join([success_msg, err_msg])
data = {
'status': status,
'progress': progress,
'message': error_msg,
}
objects.Task.update(task, data)
cls._update_action_log_entry(status, task.name, task_uuid, nodes)
@classmethod
def remove_cluster_resp(cls, **kwargs):
logger.info(
"RPC method remove_cluster_resp received: %s" %
jsonutils.dumps(kwargs)
)
task_uuid = kwargs.get('task_uuid')
# in remove_nodes_resp method all objects are already locked
cls.remove_nodes_resp(**kwargs)
task = objects.Task.get_by_uuid(task_uuid, fail_if_not_found=True)
cluster = task.cluster
if task.status in ('ready',):
logger.debug("Removing environment itself")
cluster_name = cluster.name
ips = db().query(IPAddr).filter(
IPAddr.network.in_([n.id for n in cluster.network_groups])
)
map(db().delete, ips)
db().flush()
db().delete(cluster)
db().flush()
notifier.notify(
"done",
u"Environment '%s' and all its nodes are deleted" % (
cluster_name
)
)
elif task.status in ('error',):
cluster.status = 'error'
db().add(cluster)
db().flush()
if not task.message:
task.message = "Failed to delete nodes:\n{0}".format(
cls._generate_error_message(
task,
error_types=('deletion',)
)
)
notifier.notify(
"error",
task.message,
cluster.id
)
@classmethod
def deploy_resp(cls, **kwargs):
logger.info(
"RPC method deploy_resp received: %s" %
jsonutils.dumps(kwargs)
)
task_uuid = kwargs.get('task_uuid')
nodes = kwargs.get('nodes') or []
message = kwargs.get('error')
status = kwargs.get('status')
progress = kwargs.get('progress')
task = objects.Task.get_by_uuid(
task_uuid,
fail_if_not_found=True,
)
# locking all cluster tasks
objects.TaskCollection.lock_cluster_tasks(task.cluster_id)
# lock cluster
objects.Cluster.get_by_uid(
task.cluster_id,
fail_if_not_found=True,
lock_for_update=True
)
if not status:
status = task.status
# for deployment we need just to pop
master = next((
n for n in nodes if n['uid'] == consts.MASTER_ROLE), {})
# we should remove master node from the nodes since it requires
# special handling and won't work with old code
if master:
nodes.remove(master)
# if there no node except master - then just skip updating
# nodes status, for the task itself astute will send
# message with descriptive error
if nodes:
# lock nodes for updating so they can't be deleted
q_nodes = objects.NodeCollection.filter_by_id_list(
None,
[n['uid'] for n in nodes],
)
q_nodes = objects.NodeCollection.order_by(q_nodes, 'id')
objects.NodeCollection.lock_for_update(q_nodes).all()
# First of all, let's update nodes in database
for node in nodes:
node_db = objects.Node.get_by_uid(node['uid'])
if not node_db:
logger.warning(
u"No node found with uid '{0}' - nothing changed".format(
node['uid']
)
)
continue
update_fields = (
'error_msg',
'error_type',
'status',
'progress',
'online'
)
for param in update_fields:
if param in node:
logger.debug(
u"Updating node {0} - set {1} to {2}".format(
node['uid'],
param,
node[param]
)
)
setattr(node_db, param, node[param])
if param == 'progress' and node.get('status') == 'error' \
or node.get('online') is False:
# If failure occurred with node
# it's progress should be 100
node_db.progress = 100
# Setting node error_msg for offline nodes
if node.get('online') is False \
and not node_db.error_msg:
node_db.error_msg = u"Node is offline"
# Notification on particular node failure
notifier.notify(
"error",
u"Failed to deploy node '{0}': {1}".format(
node_db.name,
node_db.error_msg or "Unknown error"
),
cluster_id=task.cluster_id,
node_id=node['uid'],
task_uuid=task_uuid
)
db().flush()
if nodes and not progress:
progress = TaskHelper.recalculate_deployment_task_progress(task)
# full error will be provided in next astute message
if master.get('status') == consts.TASK_STATUSES.error:
status = consts.TASK_STATUSES.error
# Let's check the whole task status
if status == consts.TASK_STATUSES.error:
cls._error_action(task, status, progress, message)
elif status == consts.TASK_STATUSES.ready:
cls._success_action(task, status, progress)
else:
data = {'status': status, 'progress': progress, 'message': message}
objects.Task.update(task, data)
cls._update_action_log_entry(status, task.name, task_uuid, nodes)
@classmethod
def provision_resp(cls, **kwargs):
logger.info(
"RPC method provision_resp received: %s" %
jsonutils.dumps(kwargs))
task_uuid = kwargs.get('task_uuid')
message = kwargs.get('error')
status = kwargs.get('status')
progress = kwargs.get('progress')
nodes = kwargs.get('nodes', [])
task = objects.Task.get_by_uuid(
task_uuid,
fail_if_not_found=True,
lock_for_update=True
)
# if task was failed on master node then we should
# mark all cluster's nodes in error state
master = next((
n for n in nodes if n['uid'] == consts.MASTER_ROLE), {})
# we should remove master node from the nodes since it requires
# special handling and won't work with old code
if master:
nodes.remove(master)
if master.get('status') == consts.TASK_STATUSES.error:
status = consts.TASK_STATUSES.error
progress = 100
# lock nodes for updating
q_nodes = objects.NodeCollection.filter_by_id_list(
None,
[n['uid'] for n in nodes])
q_nodes = objects.NodeCollection.order_by(q_nodes, 'id')
objects.NodeCollection.lock_for_update(q_nodes).all()
for node in nodes:
uid = node.get('uid')
node_db = objects.Node.get_by_uid(node['uid'])
if not node_db:
logger.warn('Node with uid "{0}" not found'.format(uid))
continue
if node.get('status') == consts.TASK_STATUSES.error:
node_db.status = consts.TASK_STATUSES.error
node_db.progress = 100
node_db.error_type = 'provision'
node_db.error_msg = node.get('error_msg', 'Unknown error')
else:
node_db.status = node.get('status')
node_db.progress = node.get('progress')
db().flush()
if nodes and not progress:
progress = TaskHelper.recalculate_provisioning_task_progress(task)
data = {'status': status, 'progress': progress, 'message': message}
objects.Task.update(task, data)
cls._update_action_log_entry(status, task.name, task_uuid, nodes)
@classmethod
def _update_action_log_entry(cls, task_status, task_name, task_uuid,
nodes_from_resp):
try:
if task_status in (consts.TASK_STATUSES.ready,
consts.TASK_STATUSES.error):
al = objects.ActionLog.get_by_kwargs(task_uuid=task_uuid,
action_name=task_name)
if al:
data = {
'end_timestamp': datetime.datetime.utcnow(),
'additional_info': {
'nodes_from_resp': cls.sanitize_nodes_from_resp(
nodes_from_resp),
'ended_with_status': task_status
}
}
objects.ActionLog.update(al, data)
except Exception as e:
logger.error("_update_action_log_entry failed: %s",
six.text_type(e))
@classmethod
def sanitize_nodes_from_resp(cls, nodes):
resp = []
if isinstance(nodes, list):
for n in nodes:
if isinstance(n, dict) and 'uid' in n:
resp.append(n['uid'])
return resp
@classmethod
def _generate_error_message(cls, task, error_types, names_only=False):
nodes_info = []
error_nodes = db().query(Node).filter_by(
cluster_id=task.cluster_id
).filter(
or_(
Node.status == 'error',
Node.online == (False)
)
).filter(
Node.error_type.in_(error_types)
).all()
for n in error_nodes:
if names_only:
nodes_info.append(u"'{0}'".format(n.name))
else:
nodes_info.append(u"'{0}': {1}".format(n.name, n.error_msg))
if nodes_info:
if names_only:
message = u", ".join(nodes_info)
else:
message = u"\n".join(nodes_info)
else:
message = u"Unknown error"
return message
@classmethod
def _error_action(cls, task, status, progress, message=None):
task_name = task.name.title()
if message:
message = u"{0} has failed. {1}".format(task_name, message)
# in case we are sending faild task message from astute
# we should not create a notification with it, because its add
# a lot of clutter for user
notify_message = message.split('\n\n')[0]
else:
message = u"{0} has failed. Check these nodes:\n{1}".format(
task_name,
cls._generate_error_message(
task,
error_types=('deploy', 'provision'),
names_only=True
)
)
notify_message = message
notifier.notify(
"error",
notify_message,
task.cluster_id
)
data = {'status': status, 'progress': progress, 'message': message}
objects.Task.update(task, data)
@classmethod
def _success_action(cls, task, status, progress):
# check if all nodes are ready
if any(map(lambda n: n.status == 'error',
task.cluster.nodes)):
cls._error_action(task, 'error', 100)
return
task_name = task.name.title()
if task.cluster.mode in ('singlenode', 'multinode'):
# determining horizon url - it's an IP
# of a first cluster controller
controller = db().query(Node).filter_by(
cluster_id=task.cluster_id
).filter(
Node.roles.any('controller')
).first()
if controller:
logger.debug(
u"Controller is found, node_id=%s, "
"getting it's IP addresses",
controller.id
)
public_net = filter(
lambda n: n['name'] == 'public' and 'ip' in n,
objects.Cluster.get_network_manager(
controller.cluster
).get_node_networks(controller)
)
if public_net:
horizon_ip = public_net[0]['ip'].split('/')[0]
protocol = utils.get_protocol_for_horizon(task.cluster)
message = (
u"{task} of environment '{name}' is done. "
"Access the OpenStack dashboard (Horizon) at "
"{proto}://{horizon_address}/ or via internal "
"network at http://{controller_address}/"
).format(
task=task_name,
name=task.cluster.name,
proto=protocol,
horizon_address=horizon_ip,
controller_address=controller.ip
)
else:
message = u"{0} of environment '{1}' is done".format(
task_name,
task.cluster.name
)
logger.warning(
u"Public ip for controller node "
"not found in '{0}'".format(task.cluster.name)
)
else:
message = u"{0} of environment '{1}' is done".format(
task_name,
task.cluster.name
)
logger.warning(u"Controller node not found in '{0}'".format(
task.cluster.name
))
elif task.cluster.is_ha_mode:
# determining horizon url in HA mode - it's vip
# from a public network saved in task cache
try:
message = (
u"{0} of environment '{1}' is done. "
"Access the OpenStack dashboard (Horizon) at {2}"
).format(
task_name,
task.cluster.name,
objects.Cluster.get_network_manager(
task.cluster
).get_horizon_url(task.cluster.id)
)
except Exception as exc:
logger.error(": ".join([
str(exc),
traceback.format_exc()
]))
message = u"{0} of environment '{1}' is done".format(
task_name,
task.cluster.name
)
logger.warning(
u"Cannot find virtual IP for '{0}'".format(
task.cluster.name
)
)
zabbix_url = objects.Cluster.get_network_manager(
task.cluster
).get_zabbix_url(task.cluster)
if zabbix_url:
message = "{0} Access Zabbix dashboard at {1}".format(
message, zabbix_url)
plugins_msg = cls._make_plugins_success_message(task.cluster.plugins)
if plugins_msg:
message = '{0}\n\n{1}'.format(message, plugins_msg)
notifier.notify("done", message, task.cluster_id)
data = {'status': status, 'progress': progress, 'message': message}
objects.Task.update(task, data)
@classmethod
def _make_plugins_success_message(cls, plugins):
"""Makes plugins installation message
"""
msg = 'Plugin {0} is deployed. {1}'
return '\n'.join(
map(lambda p: msg.format(p.name, p.description), plugins))
@classmethod
def stop_deployment_resp(cls, **kwargs):
logger.info(
"RPC method stop_deployment_resp received: %s" %
jsonutils.dumps(kwargs)
)
task_uuid = kwargs.get('task_uuid')
nodes = kwargs.get('nodes', [])
ia_nodes = kwargs.get('inaccessible_nodes', [])
message = kwargs.get('error')
status = kwargs.get('status')
progress = kwargs.get('progress')
task = objects.Task.get_by_uuid(
task_uuid,
fail_if_not_found=True,
)
# locking all cluster tasks
objects.TaskCollection.lock_cluster_tasks(task.cluster_id)
stopping_task_names = [
consts.TASK_NAMES.deploy,
consts.TASK_NAMES.deployment,
consts.TASK_NAMES.provision
]
# Locking other tasks for stopping
q_stop_tasks = objects.TaskCollection.filter_by_list(
None,
'name',
stopping_task_names
)
q_stop_tasks = objects.TaskCollection.filter_by(
q_stop_tasks,
cluster_id=task.cluster_id
)
q_stop_tasks = objects.TaskCollection.order_by(
q_stop_tasks,
'id'
)
stop_tasks = objects.TaskCollection.lock_for_update(q_stop_tasks).all()
# Locking cluster
objects.Cluster.get_by_uid(
task.cluster_id,
fail_if_not_found=True,
lock_for_update=True
)
if not stop_tasks:
logger.warning("stop_deployment_resp: deployment tasks \
not found for environment '%s'!", task.cluster_id)
if status == "ready":
task.cluster.status = "stopped"
if stop_tasks:
map(db().delete, stop_tasks)
node_uids = [n['uid'] for n in itertools.chain(nodes, ia_nodes)]
q_nodes = objects.NodeCollection.filter_by_id_list(None, node_uids)
q_nodes = objects.NodeCollection.filter_by(
q_nodes,
cluster_id=task.cluster_id
)
q_nodes = objects.NodeCollection.order_by(q_nodes, 'id')
q_nodes = objects.NodeCollection.lock_for_update(q_nodes)
# locking Nodes for update
update_nodes = objects.NodeCollection.lock_for_update(
q_nodes
).all()
for node in update_nodes:
objects.Node.reset_to_discover(node)
if ia_nodes:
cls._notify_inaccessible(
task.cluster_id,
[n["uid"] for n in ia_nodes],
u"deployment stopping"
)
message = (
u"Deployment of environment '{0}' was successfully stopped. "
u"Please reset the environment if you want to redeploy it."
.format(task.cluster.name or task.cluster_id)
)
notifier.notify(
"done",
message,
task.cluster_id
)
data = {'status': status, 'progress': progress, 'message': message}
objects.Task.update(task, data)
cls._update_action_log_entry(status, task.name, task_uuid, nodes)
@classmethod
def reset_environment_resp(cls, **kwargs):
logger.info(
"RPC method reset_environment_resp received: %s",
jsonutils.dumps(kwargs)
)
task_uuid = kwargs.get('task_uuid')
nodes = kwargs.get('nodes', [])
ia_nodes = kwargs.get('inaccessible_nodes', [])
message = kwargs.get('error')
status = kwargs.get('status')
progress = kwargs.get('progress')
task = objects.Task.get_by_uuid(
task_uuid,
fail_if_not_found=True,
lock_for_update=True
)
# Locking cluster
objects.Cluster.get_by_uid(
task.cluster_id,
fail_if_not_found=True,
lock_for_update=True
)
if status == consts.TASK_STATUSES.ready:
# restoring pending changes
task.cluster.status = consts.CLUSTER_STATUSES.new
objects.Cluster.add_pending_changes(
task.cluster,
consts.CLUSTER_CHANGES.attributes
)
objects.Cluster.add_pending_changes(
task.cluster,
consts.CLUSTER_CHANGES.networks
)
node_uids = [n["uid"] for n in itertools.chain(nodes, ia_nodes)]
q_nodes = objects.NodeCollection.filter_by_id_list(None, node_uids)
q_nodes = objects.NodeCollection.filter_by(
q_nodes,
cluster_id=task.cluster_id
)
q_nodes = objects.NodeCollection.order_by(q_nodes, 'id')
# locking Nodes for update
update_nodes = objects.NodeCollection.lock_for_update(
q_nodes
).all()
for node in update_nodes:
logs_utils.delete_node_logs(node)
objects.Node.reset_to_discover(node)
if ia_nodes:
cls._notify_inaccessible(
task.cluster_id,
[n["uid"] for n in ia_nodes],
u"environment resetting"
)
message = (
u"Environment '{0}' "
u"was successfully reset".format(
task.cluster.name or task.cluster_id
)
)
notifier.notify(
"done",
message,
task.cluster_id
)
data = {'status': status, 'progress': progress, 'message': message}
objects.Task.update(task, data)
cls._update_action_log_entry(status, task.name, task_uuid, nodes)
@classmethod
def _notify_inaccessible(cls, cluster_id, nodes_uids, action):
ia_nodes_db = db().query(Node.name).filter(
Node.id.in_(nodes_uids),
Node.cluster_id == cluster_id
).order_by(Node.id).yield_per(100)
ia_message = (
u"Fuel couldn't reach these nodes during "
u"{0}: {1}. Manual check may be needed.".format(
action,
u", ".join([
u"'{0}'".format(n.name)
for n in ia_nodes_db
])
)
)
notifier.notify(
"warning",
ia_message,
cluster_id
)
@classmethod
def verify_networks_resp(cls, **kwargs):
logger.info(
"RPC method verify_networks_resp received: %s" %
jsonutils.dumps(kwargs)
)
task_uuid = kwargs.get('task_uuid')
nodes = kwargs.get('nodes')
error_msg = kwargs.get('error')
status = kwargs.get('status')
progress = kwargs.get('progress')
# We simply check that each node received all vlans for cluster
task = objects.Task.get_by_uuid(task_uuid, fail_if_not_found=True)
result = []
# We expect that 'nodes' contains all nodes which we test.
# Situation when some nodes not answered must be processed
# in orchestrator early.
if nodes is None:
# If no nodes in kwargs then we update progress or status only.
pass
elif isinstance(nodes, list):
cached_nodes = task.cache['args']['nodes']
node_uids = [str(n['uid']) for n in nodes]
cached_node_uids = [str(n['uid']) for n in cached_nodes]
forgotten_uids = set(cached_node_uids) - set(node_uids)
if forgotten_uids:
absent_nodes = db().query(Node).filter(
Node.id.in_(forgotten_uids)
).all()
absent_node_names = []
for n in absent_nodes:
if n.name:
absent_node_names.append(n.name)
else:
absent_node_names.append('id: %s' % n.id)
if not error_msg:
error_msg = 'Node(s) {0} didn\'t return data.'.format(
', '.join(absent_node_names)
)
status = 'error'
else:
error_nodes = []
node_excluded_networks = []
for node in nodes:
cached_nodes_filtered = filter(
lambda n: str(n['uid']) == str(node['uid']),
cached_nodes
)
if not cached_nodes_filtered:
logger.warning(
"verify_networks_resp: arguments contain node "
"data which is not in the task cache: %r",
node
)
continue
cached_node = cached_nodes_filtered[0]
# Check if we have excluded interfaces for LACP bonds
excluded_networks = cached_node.get(
'excluded_networks', [])
if excluded_networks:
interfaces = ', '.join(
[net.get('iface') for net in excluded_networks])
node_excluded_networks.append({
'node_name': cached_node['name'],
'interfaces': interfaces
})
errors = connectivity_check.check_received_data(
cached_node, node)
error_nodes.extend(errors)
if error_nodes:
result = error_nodes
status = 'error'
else:
# notices must not rewrite error messages
if node_excluded_networks:
interfaces_list = ', '.join(
['node {0} [{1}]'.format(
item['node_name'], item['interfaces'])
for item in node_excluded_networks])
error_msg = connectivity_check.append_message(
error_msg,
'Notice: some interfaces were skipped from '
'connectivity checking because this version of '
'Fuel cannot establish LACP on Bootstrap nodes. '
'Only interfaces of successfully deployed nodes '
'may be checked with LACP enabled. The list of '
'skipped interfaces: {0}.'.format(interfaces_list)
)
if task.cache['args']['offline'] > 0:
error_msg = connectivity_check.append_message(
error_msg,
'Notice: {0} node(s) were offline during '
'connectivity check so they were skipped from the '
'check.'.format(task.cache['args']['offline'])
)
else:
error_msg = (error_msg or
'verify_networks_resp: argument "nodes"'
' have incorrect type')
status = 'error'
logger.error(error_msg)
if status not in ('ready', 'error'):
data = {
'status': status,
'progress': progress,
'message': error_msg,
'result': result
}
objects.Task.update(task, data)
else:
objects.Task.update_verify_networks(
task, status, progress, error_msg, result)
cls._update_action_log_entry(status, task.name, task_uuid, nodes)
@classmethod
def multicast_verification_resp(cls, **kwargs):
"""Receiver for verification of multicast packages
data - {1: response, 2: response}
"""
logger.info(
u"RPC method multicast_resp received: {0}".format(
jsonutils.dumps(kwargs))
)
task_uuid = kwargs.get('task_uuid')
task = objects.task.Task.get_by_uuid(uuid=task_uuid)
if kwargs.get('status'):
task.status = kwargs['status']
task.progress = kwargs.get('progress', 0)
response = kwargs.get('nodes', {})
error_msg = kwargs.get('error')
if task.status == TASK_STATUSES.error:
task.message = error_msg
elif task.status == TASK_STATUSES.ready:
errors = []
results = []
node_ids = set(config['uid'] for config
in task.cache['args']['nodes'])
not_received_nodes = node_ids - set(response.keys())
if not_received_nodes:
msg = (u'No answer from nodes: {0}').format(
list(not_received_nodes))
errors.append(msg)
for node_id, received_ids in response.iteritems():
result = {}
not_received_ids = node_ids - set(received_ids or [])
result = {'node_id': node_id,
'not_received': list(not_received_ids)}
results.append(result)
if not_received_ids:
msg = (u'Not received ids {0}'
u' for node {1}.').format(not_received_ids, node_id)
errors.append(msg)
task.message = '\n'.join(errors)
if errors:
task.status = TASK_STATUSES.error
task.result = results
if task.status == TASK_STATUSES.ready:
editable = copy.deepcopy(task.cluster.attributes.editable)
editable['corosync']['verified']['value'] = True
task.cluster.attributes.editable = editable
logger.debug(u'Multicast verification message %s', task.message)
objects.Task.update_verify_networks(
task, task.status,
task.progress, task.message, task.result)
@classmethod
def check_dhcp_resp(cls, **kwargs):
"""Receiver method for check_dhcp task
For example of kwargs check FakeCheckingDhcpThread
"""
logger.info(
"RPC method check_dhcp_resp received: %s",
jsonutils.dumps(kwargs)
)
messages = []
result = collections.defaultdict(list)
message_template = (
u"Node {node_name} discovered DHCP server "
u"via {iface} with following parameters: IP: {server_id}, "
u"MAC: {mac}. This server will conflict with the installation.")
task_uuid = kwargs.get('task_uuid')
nodes = kwargs.get('nodes', [])
error_msg = kwargs.get('error')
status = kwargs.get('status')
progress = kwargs.get('progress')
nodes_uids = [node['uid'] for node in nodes]
nodes_db = db().query(Node).filter(Node.id.in_(nodes_uids)).all()
nodes_map = dict((str(node.id), node) for node in nodes_db)
master_network_mac = settings.ADMIN_NETWORK['mac']
logger.debug('Mac addr on master node %s', master_network_mac)
for node in nodes:
if node['status'] == 'ready':
for row in node.get('data', []):
if not net_utils.is_same_mac(row['mac'],
master_network_mac):
node_db = nodes_map.get(node['uid'])
if node_db:
row['node_name'] = node_db.name
message = message_template.format(**row)
messages.append(message)
result[node['uid']].append(row)
else:
logger.warning(
'Received message from nonexistent node. '
'Message %s', row)
status = status if not messages else "error"
error_msg = '\n'.join(messages) if messages else error_msg
logger.debug('Check dhcp message %s', error_msg)
task = objects.Task.get_by_uuid(task_uuid, fail_if_not_found=True)
objects.Task.update_verify_networks(task, status, progress,
error_msg, result)
@classmethod
def download_release_resp(cls, **kwargs):
logger.info(
"RPC method download_release_resp received: %s" %
jsonutils.dumps(kwargs)
)
task_uuid = kwargs.get('task_uuid')
error_msg = kwargs.get('error')
status = kwargs.get('status')
progress = kwargs.get('progress')
task = objects.Task.get_by_uuid(task_uuid, fail_if_not_found=True)
release_info = task.cache['args']['release_info']
release_id = release_info['release_id']
release = db().query(Release).get(release_id)
if not release:
logger.error("download_release_resp: Release"
" with ID %s not found", release_id)
return
if error_msg:
status = 'error'
error_msg = "{0} download and preparation " \
"has failed.".format(release.name)
cls._download_release_error(
release_id,
error_msg
)
elif progress == 100 and status == 'ready':
cls._download_release_completed(release_id)
result = {
"release_info": {
"release_id": release_id
}
}
data = {'status': status, 'progress': progress, 'message': error_msg,
'result': result}
objects.Task.update(task, data)
@classmethod
def dump_environment_resp(cls, **kwargs):
logger.info(
"RPC method dump_environment_resp received: %s" %
jsonutils.dumps(kwargs)
)
task_uuid = kwargs.get('task_uuid')
status = kwargs.get('status')
progress = kwargs.get('progress')
error = kwargs.get('error')
msg = kwargs.get('msg')
task = objects.Task.get_by_uuid(task_uuid, fail_if_not_found=True)
if status == 'error':
notifier.notify('error', error)
data = {'status': status, 'progress': 100, 'message': error}
objects.Task.update(task, data)
elif status == 'ready':
dumpfile = os.path.basename(msg)
notifier.notify('done', 'Snapshot is ready. '
'Visit Support page to download')
dumpfile_url = reverse('SnapshotDownloadHandler',
kwargs={'snapshot_name': dumpfile})
data = {'status': status, 'progress': progress,
'message': dumpfile_url}
objects.Task.update(task, data)
@classmethod
def stats_user_resp(cls, **kwargs):
logger.info("RPC method stats_user_resp received: %s",
jsonutils.dumps(kwargs))
task_uuid = kwargs.get('task_uuid')
nodes = kwargs.get('nodes', [])
status = kwargs.get('status')
error = kwargs.get('error')
message = kwargs.get('msg')
task = objects.Task.get_by_uuid(
task_uuid, fail_if_not_found=True, lock_for_update=True)
if status not in (consts.TASK_STATUSES.ready,
consts.TASK_STATUSES.error):
logger.debug("Task %s, id: %s in status: %s",
task.name, task.id, task.status)
return
data = {'status': status, 'progress': 100, 'message': message}
if status == consts.TASK_STATUSES.error:
logger.error("Task %s, id: %s failed: %s",
task.name, task.id, error)
data['message'] = error
objects.Task.update(task, data)
cls._update_action_log_entry(status, task.name, task_uuid, nodes)
logger.info("RPC method stats_user_resp processed")
@classmethod
def check_repositories_resp(cls, **kwargs):
logger.info(
"RPC method check_repositories_resp received: %s",
jsonutils.dumps(kwargs)
)
task_uuid = kwargs.get('task_uuid')
nodes = kwargs.get('nodes')
task = objects.Task.get_by_uuid(task_uuid, fail_if_not_found=True)
failed_nodes = [node for node in nodes if node['status'] != 0]
failed_nodes_ids = [node['uid'] for node in failed_nodes]
progress = 100
message = ''
if not failed_nodes_ids:
status = consts.TASK_STATUSES.ready
else:
failed_urls = set()
for n in failed_nodes:
failed_urls.update(n['out'].get('failed_urls', []))
message = ('These nodes: "{0}" failed to connect to '
'some of these repositories: "{1}"').format(
'", "'.join([str(id) for id in failed_nodes_ids]),
'", "'.join(failed_urls))
status = consts.TASK_STATUSES.error
objects.Task.update_verify_networks(
task, status, progress, message, [])
@classmethod
def check_repositories_with_setup_resp(cls, **kwargs):
logger.info(
"RPC method check_repositories_with_setup received: %s" %
jsonutils.dumps(kwargs)
)
task_uuid = kwargs.get('task_uuid')
response = kwargs.get('nodes', [])
status = consts.TASK_STATUSES.ready
progress = 100
task = objects.Task.get_by_uuid(
task_uuid, fail_if_not_found=True)
response_nodes = dict([(n['uid'], n) for n in response])
nodes = objects.NodeCollection.filter_by_list(
None, 'id', response_nodes.keys(), order_by='id')
failed_nodes = []
failed_repos = set()
for node in nodes:
node_response = response_nodes[node.uid]
if node_response['status'] != 0:
if isinstance(node_response['out'], dict):
failed_repos.update(
node_response['out'].get('failed_urls', []))
failed_nodes.append(node.name)
msg = ''
if failed_nodes:
msg = ('Repo availability verification using public network'
' failed on following nodes {0}.\n '.format(
', '.join(failed_nodes)))
if failed_repos:
msg += ('Following repos are not available - {0}\n. '.format(
', '.join(failed_repos)))
if msg:
msg += ('Check your public network settings and '
'availability of the repositories from public network. '
'Please examine nailgun and astute'
' logs for additional details.')
status = consts.TASK_STATUSES.error
objects.Task.update_verify_networks(
task, status, progress, msg, {})
| |
#!/usr/bin/env python
"""The classes in this module aid the specification of tally `units` in
:py:class:`pyne.simplesim.cards.ICellSurfTally` (see examples below). The class
focuses on the ability to specify cells and surfaces that are nested within
universes or other cells in nearly arbitrarily complex ways. Apart from making
user input clearler, this class has methods that help with creating cell
comments and mcnp strings.
The names of classes here are fairly short, so the user may not want to import
the entire namespace. A suggested way to import the module is::
import .simplesim.nestedgeom as ng
An inheritance diagram of all the classes in this module can be found at
:ref:`pyne_simplesim_inheritance`.
Usage Examples
--------------
The subclasses of :py:class:`ICellSurfTally` can take as input any subclass of
:py:class:`IUnit`. The following shows different ways in which complex units
can be built. Here it is assumed that there is a cell named '1' in the system,
a surface named '1' in the system, a universe named '1' in the system, etc. The
unions represent averaging or totaling the units, depending on the type of
tally.
+-----------+-------------------------------------------------------------+------------------------------------------------------------+-----------------------+
|category |description |nestedgeom |mcnp |
+===========+=============================================================+============================================================+=======================+
|basic |surface 1 (s1) |``s1 = Surf('1')`` |1 |
| +-------------------------------------------------------------+------------------------------------------------------------+-----------------------+
| |cell 1 (c1) |``c1 = Cell('1')`` |1 |
| +-------------------------------------------------------------+------------------------------------------------------------+-----------------------+
| |universe 1 (u1) |``u1 = Univ('1')`` |U=1 |
+-----------+-------------------------------------------------------------+------------------------------------------------------------+-----------------------+
|union |union of s1 and s2 |``Union(s1, s2)`` |(1 2) |
| | +------------------------------------------------------------+ |
| | |``(s1 | s2)`` | |
| +-------------------------------------------------------------+------------------------------------------------------------+-----------------------+
| |union of c1, c2, and c3 |``Union(c1, c2, c3)`` |(1 2 3) |
| | +------------------------------------------------------------+ |
| | |``(c1 | c2 | c3)`` | |
| +-------------------------------------------------------------+------------------------------------------------------------+-----------------------+
| |union of cells in u1 |``Union(u1)`` |(U=1) |
+-----------+-------------------------------------------------------------+------------------------------------------------------------+-----------------------+
|nesting |s1 in filled cell 2 (fc2) |``fc2 = FCell('2'): s1 < fc2`` |(1 < 1) |
| | +------------------------------------------------------------+ |
| | |``s1.of(fc2)`` | |
| +-------------------------------------------------------------+------------------------------------------------------------+-----------------------+
| |c1 in fc3 in fc3 |``c1 < fc2 < fc3`` |(1 < 2 < 3) |
| | +------------------------------------------------------------+ |
| | |``c1.of( fc2.of(fc3) )`` | |
| +-------------------------------------------------------------+------------------------------------------------------------+-----------------------+
| |s1 in union of fc2 and fc3 |``s1 < Union(fc2, fc3)`` |(1 < (2 3)) |
| | +------------------------------------------------------------+ |
| | |``s1.of( Union(fc2, fc3) )`` | |
| +-------------------------------------------------------------+------------------------------------------------------------+-----------------------+
| |s1 in (union of fc2 and fc3) in fc4 |``s1 < Union(fc2, fc3) < fc4`` |(1 < (2 3) < 4) |
| | +------------------------------------------------------------+ |
| | |``s1.of( Union(fc2, fc3).of(fc4) )`` | |
| +-------------------------------------------------------------+------------------------------------------------------------+-----------------------+
| |s1 in u1 in fc4 |``s1 < u1 < fc4`` |(1 < U=1 < 4) |
| | +------------------------------------------------------------+ |
| | |``s1.of( u1.of(fc4) )`` | |
+-----------+-------------------------------------------------------------+------------------------------------------------------------+-----------------------+
|vectorized |(over s1 and s2) in fc2 |``ng.Vec(s1, s2) < fc2`` |(1 2 < 2) |
| | +------------------------------------------------------------+ |
| | |``ng.Vec(s1, s2).of(fc2)`` | |
| | +------------------------------------------------------------+ |
| | |``(s1 & s2) < fc2`` | |
| +-------------------------------------------------------------+------------------------------------------------------------+-----------------------+
| |s1 in (over fc2 and fc3) |``s1 < ng.Vec(fc2, fc3)`` |(1 < 2 3) |
| | +------------------------------------------------------------+ |
| | |``s1 < (fc2 & fc3)`` | |
+-----------+-------------------------------------------------------------+------------------------------------------------------------+-----------------------+
|lattice |s1 in (union of fc2 and (fc3 in lattice elem 5)) in fc4 |``la = Lin(5): s1 < (fc2 | FCell('3', la)) < fc4`` |(1 < (2 3[5]) < 4 |
| | +------------------------------------------------------------+ |
| | |``s1 < (fc2 | fc3.lat(la)) < fc4`` | |
| +-------------------------------------------------------------+------------------------------------------------------------+-----------------------+
| |s1 in (fc2 in lattice x range 0:1, y range 0:2, z range 0:3) |``s1 < fc2.lat(Rng([0,1], [0,2], [0,3])`` |(1 < 2[0:1 0:2 0:3]) |
| +-------------------------------------------------------------+------------------------------------------------------------+-----------------------+
| |s1 in (fc2 in lattice coord (0, 1, 2)) |``s1 < fc2.lat(Cor([0, 1, 2]))`` |(1 < 2[0 1 2]) |
| +-------------------------------------------------------------+------------------------------------------------------------+-----------------------+
| |s1 in (fc2 in lattice coords (0, 1, 2), (3, 2, 1)) |``s1 < fc2.lat( Cor([ [0, 1, 2], [3, 2, 1]]) )`` |(1 < 2[0 1 2, 3 2 1]) |
+-----------+-------------------------------------------------------------+------------------------------------------------------------+-----------------------+
"""
from warnings import warn
from pyne.utils import VnVWarning
warn(__name__ + " is not yet V&V compliant.", VnVWarning)
class IUnit(object):
"""Abstract base class for tally units. The user does not use this class
directly.
"""
# TODO need to define a 'recursive' __repr__.
def __init__(self, up=None, down=None):
"""Currently, the two keyword arguments are not actually used, but are
sometimes set directly, after initialization.
"""
self.up = up
self.down = down
def __lt__(self, next_level_up):
"""The user can use the < operator to perform the operation of
:py:meth:`of`, e.g. (cells/surfs) < (cells/univ)
"""
return self.of(next_level_up)
def of(self, next_level_up):
"""Returns a unit where ``self`` must be in ``next_level_up`` to be
tallied. ``self`` must be a cell or surface, or `vector` or `union`
thereof (cell/surf), and
``next_level_up`` can be a cell or universe, or union thereof
(cells/univ).
"""
self.up = next_level_up
self.up.down = self
if self.down: return self.down
else: return self
def __or__(self, right):
"""A convenient way to call :py:meth:`union`."""
return self.union(right)
def union(self, right):
"""Returns a :py:class:`Union` of ``self`` with ``right``."""
if isinstance(self, Union):
self.brothers += [right]
return self
else:
return Union(self, right)
def __and__(self, right):
"""A convenient way to call :py:meth:`vector`."""
return self.vector(right)
def vector(self, right):
"""Returns a :py:class:`Vec` of ``self`` with ``right``."""
if isinstance(self, Vec):
self.sisters += [right]
return self
else:
return Vec(self, right)
def comment(self, inner):
return "{0}{1}{2}{3}".format(
" (" if self.up and not self.down else "",
inner,
(" in" + self.up.comment()) if self.up else "",
")" if self.down and not self.up else "")
def mcnp(self, float_format, sim, inner):
return "{0}{1}{2}{3}".format(
" (" if self.up and not self.down else "",
inner,
(" <" + self.up.mcnp(float_format, sim)) if self.up else "",
")" if self.down and not self.up else "")
class ICellSurf(IUnit):
"""Abstract base class for surfaces and cells in the lowest level of the
nested geometry. The user directly uses the subclasses of this class. For
cells in higher levels of nesting (e.g. closer to the real world), see
:py:class:`FCell`.
"""
def __init__(self, name):
"""
Parameters
----------
name : str
Name of the surface or cell. Depending on the subclass, the name is
looked up appropriately in the system definition to obtain the
surface or cell number.
"""
super(ICellSurf, self).__init__()
self.name = name
class Surf(ICellSurf):
"""The user uses this class directly to reference a surface in the system
definition for tallying.
.. inheritance-diagram:: pyne.simplesim.nestedgeom.Surf
"""
def comment(self):
return super(Surf, self).comment(" surf {0!r}".format(self.name))
def mcnp(self, float_format, sim):
return super(Surf, self).mcnp(float_format, sim,
" {0}".format(sim.sys.surface_num(self.name)))
class Cell(ICellSurf):
"""The user uses this class directly to reference a cell in the system
definition for tallying.
.. inheritance-diagram:: pyne.simplesim.nestedgeom.Cell
"""
def comment(self, inner=None):
return super(Cell, self).comment(" cell {0!r}{1}".format(self.name,
inner if inner else ""))
def mcnp(self, float_format, sim, inner=None):
return super(Cell, self).mcnp(float_format, sim,
" {0}{1}".format(sim.sys.cell_num(self.name),
inner if inner else ""))
class FCell(Cell):
"""This is subclassed from :py:class:`Cell`. Its name stands for filled
cell. It is to be used for higher-level cells (closer to the real world),
and has an additional attribute to specify specific lattice elements from
this cell if it is a lattice cell.
.. inheritance-diagram:: pyne.simplesim.nestedgeom.FCell
"""
def __init__(self, name, lat_spec=None):
"""
Parameters
----------
name : str
See :py:class:`ICellSurf`.
lat_sec : subclass of :py:class:`LatticeSpec`
"""
super(FCell, self).__init__(name)
self.lat_spec = lat_spec
def lat(self, lat_spec):
self.lat_spec = lat_spec
return self
def comment(self):
return super(FCell, self).comment(
self.lat_spec.comment() if self.lat_spec else "")
def mcnp(self, float_format, sim):
return super(FCell, self).mcnp(float_format, sim,
self.lat_spec.mcnp(float_format, sim) if self.lat_spec else "")
class Univ(IUnit):
"""A universe. It is used in higher levels of nesting (closer to the real
world). The class, given the name of a universe in the system, looks up the
appropriate universe number.
.. inheritance-diagram:: pyne.simplesim.nestedgeom.Univ
"""
def __init__(self, name):
"""
Parameters
----------
name : str
Name of the universe in the system definition.
"""
super(Univ, self).__init__()
self.name = name
def comment(self):
return super(Univ, self).comment(" univ {0!r}".format(self.name))
def mcnp(self, float_format, sim):
return super(Univ, self).mcnp(float_format, sim,
" U={0}".format(sim.sys.universe_num(self.name)))
class Union(IUnit):
"""A union of surfaces, cells, or of a single universe.
.. inheritance-diagram:: pyne.simplesim.nestedgeom.Union
"""
def __init__(self, *args):
"""
Parameters
-----------
*args : list of instances of :py:class:`IUnit` subclasses.
"""
# all args must be a instance of Unit or its subclasses.
self.brothers = args
super(Union, self).__init__()
def comment(self):
string = ""
counter = 0
for bro in self.brothers:
counter += 1
string += bro.comment()
if counter < len(self.brothers): string += ","
return super(Union, self).comment(" union of ({0})".format(string))
def mcnp(self, float_format, sim):
string = ""
for bro in self.brothers:
string += bro.mcnp(float_format, sim)
return super(Union, self).mcnp(float_format, sim,
" ({0})".format(string))
class Vec(IUnit):
"""A "vector" of surfaces or cells. This class named after the vectorized
notation that can be used in MATLAB, as this class allows the specification
of multiple units of input in a single unit. Typically, a :py:class:`Vec`
is the first or last element in a nested unit. The following are two
example usages of this class, and their equivalent in comment as two
separate units::
# Surf('A') < FCell('C') Surf('B') < FCell('C')
Vec(Surf('A'), Surf('B')) < FCell('C')
# Surf('A') < FCell('C') Surf('A') < FCell('D')
Surf('A') < Vec(FCell('C'), FCell('D'))
"""
# Named after matlab's vectorized notation
def __init__(self, *args):
# all args must be a instance of Unit or its subclasses.
self.sisters = args
super(Vec, self).__init__()
def comment(self):
string = ""
counter = 0
for sis in self.sisters:
counter += 1
string += sis.comment()
if counter < len(self.sisters): string += ","
return super(Vec, self).comment(" over ({0})".format(string))
def mcnp(self, float_format, sim):
string = ""
for sis in self.sisters:
string += sis.mcnp(float_format, sim)
return super(Vec, self).mcnp(float_format, sim, string)
class ILatticeSpec(object):
"""Abstract base class for lattice element specifiers. The user does not
use this class directly. There are 3 subclasses:
- :py:class:`Lin` : a single linear index of a lattice element.
- :py:class:`Rng` : x, y, and z index range of lattice elements.
- :py:class:`Cor` : list coordinates of lattice elments.
"""
def comment(self, inner):
return "-lat {0}".format(inner)
def mcnp(self, float_format, sim, inner):
# Lattice specification goes in square brackets.
return "[{0}]".format(inner)
class Lin(ILatticeSpec):
"""A single linear index of a lattice element.
.. inheritance-diagram:: pyne.simplesim.nestedgeom.Lin
"""
def __init__(self, linear_index):
"""
Parameters
----------
linear_index : int
Linear (1-D) of a lattice element for the lattice cell that this
specifier becomes a part of.
"""
self.index = linear_index
def comment(self):
return super(Lin, self).comment("linear idx {0:d}".format(self.index))
def mcnp(self, float_format, sim):
return super(Lin, self).mcnp(float_format, sim,
"{0:d}".format(self.index))
class Rng(ILatticeSpec):
"""A range of lattice elements.
.. inheritance-diagram:: pyne.simplesim.nestedgeom.Rng
"""
def __init__(self, x_bounds=[0, 0], y_bounds=[0, 0], z_bounds=[0, 0]):
"""
Parameters
----------
x_bounds : 2-element list of int, optional
Something like [0,5] for lattice elements i=0 through i=5. First
element is less than the second element. Don't
specify to leave as [0, 0].
y_bounds : 2-element list of int, optional
First element is less than the second element. Don't specify to
leave as [0, 0].
z_bounds : 2-element list of int, optional
First element is less than the second element. Don't specify to
leave as [0, 0].
Examples
--------
In the following, there is no y dimension::
myrange = Rng([0, 5], z_bounds=[0, 2])
"""
super(Rng, self).__init__()
self.x_bounds = x_bounds
self.y_bounds = y_bounds
self.z_bounds = z_bounds
def comment(self):
return super(Rng, self).comment(
"x range {0[0]:d}:{0[1]:d}, y range {1[0]:d}:{1[1]:d}, "
"z range {2[0]:d}:{2[1]:d}".format(
self.x_bounds, self.y_bounds, self.z_bounds))
def mcnp(self, float_format, sim):
return super(Rng, self).mcnp(float_format, sim,
"{0[0]:d}:{0[1]:d} {1[0]:d}:{1[1]:d} {2[0]:d}:{2[1]:d}".format(
self.x_bounds, self.y_bounds, self.z_bounds))
class Cor(ILatticeSpec):
"""A list of lattice element coordinates (in indices).
.. inheritance-diagram:: pyne.simplesim.nestedgeom.Cor
"""
def __init__(self, points=[0, 0, 0]):
"""
Parameters
----------
points : 3-element list of int, list of lists, optional
Coordinates of a lattice element, or a list of coordinates.
Examples
--------
The following work::
latspec = Cor()
latspec = Cor([1, 2, 3])
latspec = Cor([ [1, 2, 3], [-1, 3, -2]])
"""
super(Cor, self).__init__()
# We want a nested list, even if the user doesn't provide it. If the
# first element of the list is an int, then it's not a 3-element list
# or numpy array, so we need to nest it in a loop for the methods to
# work.
if type(points[0]) is int: points = [points]
self.points = points
def comment(self):
string = "coords"
counter = 0
for pt in self.points:
counter += 1
string += " ({0[0]:d}, {0[1]:d}, {0[2]:d})".format(pt)
if counter < len(self.points): string += ","
return super(Cor, self).comment(string)
def mcnp(self, float_format, sim):
string = ""
counter = 0
for pt in self.points:
counter += 1
string += " {0[0]:d} {0[1]:d} {0[2]:d}".format(pt)
if counter < len(self.points): string += ","
return super(Cor, self).mcnp(float_format, sim, string)
# # "raw" docstring for table above.
# ====
# MCNP
# ====
# 1
# 1
# U=1
#
# (1 2)
# (1 2 3)
# (U=1)
#
# (1 < 1)
# (1 < 2 < 3)
# (1 < (2 3))
# (1 < (2 3) < 4)
#
# (1 2 < 2)
# (1 < 2 3)
#
# 1 < (2 3[5]) < 4
# 1 < 2[0:1 0:2 0:3]
# 1 < 2[0 1 2]
# 1 < 2[0 1 2, 3 2 1]
#
# ===========
# description
# ===========
# surface 1 (s1)
# cell 1 (c1)
# universe 1 (u1)
#
# union of s1 and s2
# union of c1, c2, and c3
# union of cells in u1
#
# s1 in filled cell 2 (fc2)
# c1 in fc3 in fc3
# s1 in union of fc2 and fc3
# s1 in (union of fc2 and fc3) in fc4
# s1 in u1 in fc4
#
# (over s1 and s2) in fc2
# s1 in (over fc2 and fc3)
#
# s1 in (union of fc2 and (fc3 in lattice elem 5)) in fc4
# s1 in (fc2 in lattice x range 0:1, y range 0:2, z range 0:3)
# s1 in (fc2 in lattice coord (0, 1, 2))
# s1 in (fc2 in lattice coords (0, 1, 2), (3, 2, 1))
#
# ==========
# nestedgeom
# ==========
# s1 = Surf('1')
# c1 = Cell('1')
# u1 = Univ('1')
#
# Union(s1, s2) / (s1 | s2)
# Union(c1, c2, c3) / (c1 | c2 | c3)
# Union(u1)
#
# fc2 = FCell('2'): s1 < fc2 / s1.of(fc2)
# c1 < fc2 < fc3 / c1.of( fc2.of(fc3) )
# s1 < Union(fc2, fc3) / s1.of( Union(fc2, fc3) )
# s1 < Union(fc2, fc3) < fc4 / s1.of( Union(fc2, fc3).of(fc4) )
# s1 < u1 < fc4 / s1.of( u1.of(fc4) )
#
# ng.Vec(s1, s2) < fc2 / ng.Vec(s1, s2).of(fc2) / (s1 & s2) < fc2
# s1 < ng.Vec(fc2, fc3) / s1 < (fc2 & fc3)
#
# la = Lin(5): s1 < (fc2 | FCell('3', la)) < fc4 / s1 < (fc2 | fc3.lat(la)) < fc4
# s1 < fc2.lat(Rng([0,1], [0,2], [0,3])
# s1 < fc2.lat(Cor([0, 1, 2]))
# s1 < fc2.lat( Cor([ [0, 1, 2], [3, 2, 1]]) )
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
import sys
from uuid import uuid4
from proton import *
from proton._compat import raise_
from . import common
class Test(common.Test):
def setUp(self):
self.data = Data()
def tearDown(self):
self.data = None
class DataTest(Test):
def testTopLevelNext(self):
assert self.data.next() is None
self.data.put_null()
self.data.put_bool(False)
self.data.put_int(0)
assert self.data.next() is None
self.data.rewind()
assert self.data.next() == Data.NULL
assert self.data.next() == Data.BOOL
assert self.data.next() == Data.INT
assert self.data.next() is None
def testNestedNext(self):
assert self.data.next() is None
self.data.put_null()
assert self.data.next() is None
self.data.put_list()
assert self.data.next() is None
self.data.put_bool(False)
assert self.data.next() is None
self.data.rewind()
assert self.data.next() is Data.NULL
assert self.data.next() is Data.LIST
self.data.enter()
assert self.data.next() is None
self.data.put_ubyte(0)
assert self.data.next() is None
self.data.put_uint(0)
assert self.data.next() is None
self.data.put_int(0)
assert self.data.next() is None
self.data.exit()
assert self.data.next() is Data.BOOL
assert self.data.next() is None
self.data.rewind()
assert self.data.next() is Data.NULL
assert self.data.next() is Data.LIST
assert self.data.enter()
assert self.data.next() is Data.UBYTE
assert self.data.next() is Data.UINT
assert self.data.next() is Data.INT
assert self.data.next() is None
assert self.data.exit()
assert self.data.next() is Data.BOOL
assert self.data.next() is None
def testEnterExit(self):
assert self.data.next() is None
assert not self.data.enter()
self.data.put_list()
assert self.data.enter()
assert self.data.next() is None
self.data.put_list()
assert self.data.enter()
self.data.put_list()
assert self.data.enter()
assert self.data.exit()
assert self.data.get_list() == 0
assert self.data.exit()
assert self.data.get_list() == 1
assert self.data.exit()
assert self.data.get_list() == 1
assert not self.data.exit()
assert self.data.get_list() == 1
assert self.data.next() is None
self.data.rewind()
assert self.data.next() is Data.LIST
assert self.data.get_list() == 1
assert self.data.enter()
assert self.data.next() is Data.LIST
assert self.data.get_list() == 1
assert self.data.enter()
assert self.data.next() is Data.LIST
assert self.data.get_list() == 0
assert self.data.enter()
assert self.data.next() is None
assert self.data.exit()
assert self.data.get_list() == 0
assert self.data.exit()
assert self.data.get_list() == 1
assert self.data.exit()
assert self.data.get_list() == 1
assert not self.data.exit()
def put(self, putter, v):
"""More informative exception from putters, include bad value"""
try:
putter(v)
except Exception:
etype, value, trace = sys.exc_info()
raise_(etype, etype("%s(%r): %s" % (putter.__name__, v, value)), trace)
return putter
# (bits, signed) for each integer type
INT_TYPES = {
"byte": (8, True),
"ubyte": (8, False),
"short": (16, True),
"ushort": (16, False),
"int": (32, True),
"uint": (32, False),
"long": (64, True),
"ulong": (64, False)
}
def int_values(self, dtype):
"""Set of test values for integer type dtype, include extreme and medial values"""
bits, signed = self.INT_TYPES[dtype]
values = [0, 1, 2, 5, 42]
if signed:
min, max = -2**(bits - 1), 2**(bits - 1) - 1
values.append(max // 2)
values += [-i for i in values if i]
values += [min, max]
else:
max = 2**(bits) - 1
values += [max // 2, max]
return sorted(values)
def _testArray(self, dtype, descriptor, atype, *values):
if dtype:
dTYPE = getattr(self.data, dtype.upper())
aTYPE = getattr(self.data, atype.upper())
self.data.put_array(dtype is not None, aTYPE)
self.data.enter()
if dtype is not None:
putter = getattr(self.data, "put_%s" % dtype)
self.put(putter, descriptor)
putter = getattr(self.data, "put_%s" % atype)
for v in values:
self.put(putter, v)
self.data.exit()
self.data.rewind()
assert self.data.next() == Data.ARRAY
count, described, type = self.data.get_array()
assert count == len(values), count
if dtype is None:
assert described == False
else:
assert described
assert type == aTYPE, type
assert self.data.enter()
if described:
assert self.data.next() == dTYPE
getter = getattr(self.data, "get_%s" % dtype)
gotten = getter()
assert gotten == descriptor, gotten
if values:
getter = getattr(self.data, "get_%s" % atype)
for v in values:
assert self.data.next() == aTYPE
gotten = getter()
assert gotten == v, gotten
assert self.data.next() is None
assert self.data.exit()
def testStringArray(self):
self._testArray(None, None, "string", "one", "two", "three")
def testDescribedStringArray(self):
self._testArray("symbol", "url", "string", "one", "two", "three")
def _test_int_array(self, atype):
self._testArray(None, None, atype, *self.int_values(atype))
def testByteArray(self): self._test_int_array("byte")
def testUbyteArray(self): self._test_int_array("ubyte")
def testShortArray(self): self._test_int_array("short")
def testUshortArray(self): self._test_int_array("ushort")
def testIntArray(self): self._test_int_array("int")
def testUintArray(self): self._test_int_array("uint")
def testLongArray(self): self._test_int_array("long")
def testUlongArray(self): self._test_int_array("ulong")
def testUUIDArray(self):
self._testArray(None, None, "uuid", uuid4(), uuid4(), uuid4())
def testEmptyArray(self):
self._testArray(None, None, "null")
def testDescribedEmptyArray(self):
self._testArray("long", 0, "null")
def testPropertyDict(self):
a = PropertyDict(one=1, two=2, three=3)
b = PropertyDict({'one': 1, 'two': 2, 'three': 3})
c = PropertyDict(zip(['one', 'two', 'three'], [1, 2, 3]))
d = PropertyDict([('two', 2), ('one', 1), ('three', 3)])
e = PropertyDict({symbol('three'): 3, symbol('one'): 1, symbol('two'): 2})
f = PropertyDict(a)
g = PropertyDict()
g['one'] = 1
g[symbol('two')] = 2
g['three'] = 3
assert a == b == c == d == e == f == g
for k in a.keys():
assert isinstance(k, symbol)
self.assertRaises(KeyError, AnnotationDict, {'one': 1, None: 'none'})
self.assertRaises(KeyError, AnnotationDict, {'one': 1, 1.23: 4})
def testPropertyDictNoRaiseError(self):
a = PropertyDict(one=1, two=2, three=3, raise_on_error=False)
a[4] = 'four'
b = PropertyDict({'one': 1, 'two': 2, 'three': 3, 4: 'four'}, raise_on_error=False)
c = PropertyDict(zip(['one', 'two', 'three', 4], [1, 2, 3, 'four']), raise_on_error=False)
d = PropertyDict([('two', 2), ('one', 1), ('three', 3), (4, 'four')], raise_on_error=False)
e = PropertyDict({4: 'four', symbol('three'): 3, symbol('one'): 1, symbol('two'): 2}, raise_on_error=False)
f = PropertyDict(a, raise_on_error=False)
g = PropertyDict(raise_on_error=False)
g['one'] = 1
g[4] = 'four'
g[symbol('two')] = 2
g['three'] = 3
assert a == b == c == d == e == f == g
def testAnnotationDict(self):
# AnnotationMap c'tor calls update(), so this method is also covered
a = AnnotationDict(one=1, two=2, three=3)
a[ulong(4)] = 'four'
b = AnnotationDict({'one': 1, 'two': 2, 'three': 3, ulong(4): 'four'})
c = AnnotationDict(zip(['one', 'two', 'three', ulong(4)], [1, 2, 3, 'four']))
d = AnnotationDict([('two', 2), ('one', 1), ('three', 3), (ulong(4), 'four')])
e = AnnotationDict({symbol('three'): 3, ulong(4): 'four', symbol('one'): 1, symbol('two'): 2})
f = AnnotationDict(a)
g = AnnotationDict()
g[ulong(4)] = 'four'
g['one'] = 1
g[symbol('two')] = 2
g['three'] = 3
assert a == b == c == d == e == f == g
for k in a.keys():
assert isinstance(k, (symbol, ulong))
self.assertRaises(KeyError, AnnotationDict, {'one': 1, None: 'none'})
self.assertRaises(KeyError, AnnotationDict, {'one': 1, 1.23: 4})
def testAnnotationDictNoRaiseError(self):
a = AnnotationDict(one=1, two=2, three=3, raise_on_error=False)
a[ulong(4)] = 'four'
a[5] = 'five'
b = AnnotationDict({'one': 1, 'two': 2, 'three': 3, ulong(4): 'four', 5: 'five'}, raise_on_error=False)
c = AnnotationDict(zip(['one', 'two', 'three', ulong(4), 5], [1, 2, 3, 'four', 'five']), raise_on_error=False)
d = AnnotationDict([('two', 2), ('one', 1), ('three', 3),
(ulong(4), 'four'), (5, 'five')], raise_on_error=False)
e = AnnotationDict({5: 'five', symbol('three'): 3, ulong(4): 'four',
symbol('one'): 1, symbol('two'): 2}, raise_on_error=False)
f = AnnotationDict(a, raise_on_error=False)
g = AnnotationDict(raise_on_error=False)
g[ulong(4)] = 'four'
g['one'] = 1
g[symbol('two')] = 2
g[5] = 'five'
g['three'] = 3
assert a == b == c == d == e == f == g
def testSymbolList(self):
a = SymbolList(['one', 'two', 'three'])
b = SymbolList([symbol('one'), symbol('two'), symbol('three')])
c = SymbolList()
c.append('one')
c.extend([symbol('two'), 'three'])
d1 = SymbolList(['one'])
d2 = SymbolList(['two', symbol('three')])
d = d1 + d2
e = SymbolList(['one'])
e += SymbolList(['two', symbol('three')])
f = SymbolList(['one', 'hello', 'goodbye'])
f[1] = symbol('two')
f[2] = 'three'
g = SymbolList(a)
assert a == b == c == d == e == f == g
for v in a:
assert isinstance(v, symbol)
self.assertRaises(TypeError, SymbolList, ['one', None])
self.assertRaises(TypeError, SymbolList, ['one', 2])
self.assertRaises(TypeError, SymbolList, ['one', ['two']])
self.assertRaises(TypeError, SymbolList, ['one', {'two': 3}])
def testSymbolListNoRaiseError(self):
a = SymbolList(['one', 'two', 'three', 4], raise_on_error=False)
b = SymbolList([symbol('one'), symbol('two'), symbol('three'), 4], raise_on_error=False)
c = SymbolList(raise_on_error=False)
c.append('one')
c.extend([symbol('two'), 'three', 4])
d1 = SymbolList(['one'], raise_on_error=False)
d2 = SymbolList(['two', symbol('three'), 4], raise_on_error=False)
d = d1 + d2
e = SymbolList(['one'], raise_on_error=False)
e += SymbolList(['two', symbol('three'), 4], raise_on_error=False)
f = SymbolList(['one', 'hello', 'goodbye', 'what?'], raise_on_error=False)
f[1] = symbol('two')
f[2] = 'three'
f[3] = 4
g = SymbolList(a, raise_on_error=False)
assert a == b == c == d == e == f == g
def _test(self, dtype, *values, **kwargs):
eq = kwargs.get("eq", lambda x, y: x == y)
ntype = getattr(Data, dtype.upper())
putter = getattr(self.data, "put_%s" % dtype)
getter = getattr(self.data, "get_%s" % dtype)
for v in values:
self.put(putter, v)
gotten = getter()
assert eq(gotten, v), (gotten, v)
self.data.rewind()
for v in values:
vtype = self.data.next()
assert vtype == ntype, vtype
gotten = getter()
assert eq(gotten, v), (gotten, v)
encoded = self.data.encode()
copy = Data(0)
while encoded:
n = copy.decode(encoded)
encoded = encoded[n:]
copy.rewind()
cgetter = getattr(copy, "get_%s" % dtype)
for v in values:
vtype = copy.next()
assert vtype == ntype, vtype
gotten = cgetter()
assert eq(gotten, v), (gotten, v)
def _test_int(self, itype):
self._test(itype, *self.int_values(itype))
def testByte(self): self._test_int("byte")
def testUbyte(self):
self._test_int("ubyte")
self.assertRaises(AssertionError, ubyte, -1)
def testShort(self): self._test_int("short")
def testUshort(self):
self._test("ushort")
self.assertRaises(AssertionError, ushort, -1)
def testInt(self): self._test_int("int")
def testUint(self):
self._test_int("uint")
self.assertRaises(AssertionError, uint, -1)
def testLong(self): self._test_int("long")
def testUlong(self):
self._test_int("ulong")
self.assertRaises(AssertionError, ulong, -1)
def testString(self):
self._test("string", "one", "two", "three", "this is a test", "")
def testFloat(self):
# we have to use a special comparison here because python
# internally only uses doubles and converting between floats and
# doubles is imprecise
self._test("float", 0, 1, 2, 3, 0.1, 0.2, 0.3, -1, -2, -3, -0.1, -0.2, -0.3,
eq=lambda x, y: x - y < 0.000001)
def testDouble(self):
self._test("double", 0, 1, 2, 3, 0.1, 0.2, 0.3, -1, -2, -3, -0.1, -0.2, -0.3)
def testBinary(self):
self._test("binary", b"this", b"is", b"a", b"test", b"of" b"b\x00inary")
def testSymbol(self):
self._test("symbol", symbol("this is a symbol test"), symbol("bleh"), symbol("blah"))
def testTimestamp(self):
self._test("timestamp", timestamp(0), timestamp(12345), timestamp(1000000))
def testChar(self):
self._test("char", char('a'), char('b'), char('c'), char(u'\u20AC'))
def testUUID(self):
self._test("uuid", uuid4(), uuid4(), uuid4())
def testDecimal32(self):
self._test("decimal32", decimal32(0), decimal32(1), decimal32(2), decimal32(3), decimal32(4), decimal32(2**30))
def testDecimal64(self):
self._test("decimal64", decimal64(0), decimal64(1), decimal64(2), decimal64(3), decimal64(4), decimal64(2**60))
def testDecimal128(self):
self._test("decimal128", decimal128(b"fdsaasdf;lkjjkl;"), decimal128(b"x" * 16))
def testCopy(self):
self.data.put_described()
self.data.enter()
self.data.put_ulong(123)
self.data.put_map()
self.data.enter()
self.data.put_string("pi")
self.data.put_double(3.14159265359)
dst = Data()
dst.copy(self.data)
copy = dst.format()
orig = self.data.format()
assert copy == orig, (copy, orig)
def testCopyNested(self):
nested = [1, 2, 3, [4, 5, 6], 7, 8, 9]
self.data.put_object(nested)
dst = Data()
dst.copy(self.data)
assert dst.format() == self.data.format()
def testCopyNestedArray(self):
nested = [Array(UNDESCRIBED, Data.LIST,
["first", [Array(UNDESCRIBED, Data.INT, 1, 2, 3)]],
["second", [Array(UNDESCRIBED, Data.INT, 1, 2, 3)]],
["third", [Array(UNDESCRIBED, Data.INT, 1, 2, 3)]],
),
"end"]
self.data.put_object(nested)
dst = Data()
dst.copy(self.data)
assert dst.format() == self.data.format()
def testRoundTrip(self):
obj = {symbol("key"): timestamp(1234),
ulong(123): "blah",
char("c"): "bleh",
u"desc": Described(symbol("url"), u"http://example.org"),
u"array": Array(UNDESCRIBED, Data.INT, 1, 2, 3),
u"list": [1, 2, 3, None, 4],
u"boolean": True}
self.data.put_object(obj)
enc = self.data.encode()
data = Data()
data.decode(enc)
data.rewind()
assert data.next()
copy = data.get_object()
assert copy == obj, (copy, obj)
def testBuffer(self):
try:
self.data.put_object(buffer(b"foo"))
except NameError:
# python >= 3.0 does not have `buffer`
return
data = Data()
data.decode(self.data.encode())
data.rewind()
assert data.next()
assert data.type() == Data.BINARY
assert data.get_object() == b"foo"
def testMemoryView(self):
self.data.put_object(memoryview(b"foo"))
data = Data()
data.decode(self.data.encode())
data.rewind()
assert data.next()
assert data.type() == Data.BINARY
assert data.get_object() == b"foo"
def testLookup(self):
obj = {symbol("key"): u"value",
symbol("pi"): 3.14159,
symbol("list"): [1, 2, 3, 4]}
self.data.put_object(obj)
self.data.rewind()
self.data.next()
self.data.enter()
self.data.narrow()
assert self.data.lookup("pi")
assert self.data.get_object() == 3.14159
self.data.rewind()
assert self.data.lookup("key")
assert self.data.get_object() == u"value"
self.data.rewind()
assert self.data.lookup("list")
assert self.data.get_object() == [1, 2, 3, 4]
self.data.widen()
self.data.rewind()
assert not self.data.lookup("pi")
| |
# Copyright 2014 - Rackspace Hosting.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from glanceclient import client as glanceclient
from heatclient import client as heatclient
from marconiclient.queues.v1 import client as marconiclient
from mistralclient.api import client as mistralclient
from neutronclient.neutron import client as neutronclient
from oslo.config import cfg
from swiftclient import client as swiftclient
from solum.common import exception
from solum.common import solum_keystoneclient
from solum.openstack.common.gettextutils import _
from solum.openstack.common import log as logging
LOG = logging.getLogger(__name__)
# Note: this config is duplicated in many projects that use OpenStack
# clients. This should really be in the client.
# There is a place holder bug here:
# https://bugs.launchpad.net/solum/+bug/1292334
# that we use to track this.
glance_client_opts = [
cfg.StrOpt('endpoint_type',
default='publicURL',
help=_(
'Type of endpoint in Identity service catalog to use '
'for communication with the Glance service.'))]
heat_client_opts = [
cfg.StrOpt('endpoint_type',
default='publicURL',
help=_(
'Type of endpoint in Identity service catalog to use '
'for communication with the OpenStack service.')),
cfg.StrOpt('ca_file',
help=_('Optional CA cert file to use in SSL connections.')),
cfg.StrOpt('cert_file',
help=_('Optional PEM-formatted certificate chain file.')),
cfg.StrOpt('key_file',
help=_('Optional PEM-formatted file that contains the '
'private key.')),
cfg.BoolOpt('insecure',
default=False,
help=_("If set, then the server's certificate will not "
"be verified."))]
marconi_client_opts = [
cfg.StrOpt('endpoint_type',
default='publicURL',
help=_(
'Type of endpoint in Queue service catalog to use '
'for communication with the Marconi service.')),
cfg.BoolOpt('insecure',
default=False,
help=_("If set, then the server's certificate for marconi "
"will not be verified."))]
neutron_client_opts = [
cfg.StrOpt('endpoint_type',
default='publicURL',
help=_(
'Type of endpoint in Identity service catalog to use '
'for communication with the Neutron service.')),
cfg.StrOpt('ca_cert',
help=_('Optional CA bundle file to use in SSL connections.')),
cfg.BoolOpt('insecure',
default=False,
help=_("If set, then the server's certificate for neutron "
"will not be verified."))]
swift_client_opts = [
cfg.StrOpt('endpoint_type',
default='publicURL',
help=_(
'Type of endpoint in Identity service catalog to use '
'for communication with the Swift service.')),
cfg.StrOpt('cacert',
help=_('Optional CA cert file to use in SSL connections.')),
cfg.BoolOpt('insecure',
default=False,
help=_("If set the server certificate will not be verified."))]
mistral_client_opts = [
cfg.StrOpt('endpoint_type',
default='publicURL',
help=_(
'Type of endpoint in Identity service catalog to use '
'for communication with the mistral service.')),
cfg.StrOpt('cacert',
help=_('Optional CA cert file to use in SSL connections '
'with Mistral.')),
cfg.BoolOpt('insecure',
default=False,
help=_("If set the server certificate will not be verified "
"while using Mistral."))]
cfg.CONF.register_opts(glance_client_opts, group='glance_client')
cfg.CONF.register_opts(heat_client_opts, group='heat_client')
cfg.CONF.register_opts(marconi_client_opts, group='marconi_client')
cfg.CONF.register_opts(neutron_client_opts, group='neutron_client')
cfg.CONF.register_opts(swift_client_opts, group='swift_client')
cfg.CONF.register_opts(mistral_client_opts, group='mistral_client')
class OpenStackClients(object):
"""Convenience class to create and cache client instances."""
def __init__(self, context):
self.context = context
self._keystone = None
self._glance = None
self._heat = None
self._neutron = None
self._swift = None
self._marconi = None
self._mistral = None
def url_for(self, **kwargs):
return self.keystone().client.service_catalog.url_for(**kwargs)
@property
def auth_url(self):
return self.keystone().v3_endpoint
@property
def auth_token(self):
return self.context.auth_token or self.keystone().auth_token
def keystone(self):
if self._keystone:
return self._keystone
self._keystone = solum_keystoneclient.KeystoneClientV3(self.context)
return self._keystone
@exception.wrap_keystone_exception
def marconi(self):
if self._marconi:
return self._marconi
endpoint_type = self._get_client_option('marconi', 'endpoint_type')
endpoint_url = self.url_for(service_type='queuing',
endpoint_type=endpoint_type)
conf = {'auth_opts':
{'backend': 'keystone',
'options': {'os_auth_token': self.auth_token,
'os_auth_url': self.auth_url,
'insecure': self._get_client_option(
'marconi', 'insecure')}
}
}
self._marconi = marconiclient.Client(endpoint_url, conf=conf)
return self._marconi
@exception.wrap_keystone_exception
def neutron(self):
if self._neutron:
return self._neutron
endpoint_type = self._get_client_option('neutron', 'endpoint_type')
endpoint_url = self.url_for(service_type='network',
endpoint_type=endpoint_type)
args = {
'auth_url': self.auth_url,
'endpoint_url': endpoint_url,
'token': self.auth_token,
'username': None,
'password': None,
'insecure': self._get_client_option('neutron', 'insecure'),
'ca_cert': self._get_client_option('neutron', 'ca_cert')
}
self._neutron = neutronclient.Client('2.0', **args)
return self._neutron
def _get_client_option(self, client, option):
return getattr(getattr(cfg.CONF, '%s_client' % client), option)
@exception.wrap_keystone_exception
def glance(self):
if self._glance:
return self._glance
args = {
'token': self.auth_token,
}
endpoint_type = self._get_client_option('glance', 'endpoint_type')
endpoint = self.url_for(service_type='image',
endpoint_type=endpoint_type)
self._glance = glanceclient.Client('2', endpoint, **args)
return self._glance
@exception.wrap_keystone_exception
def mistral(self):
if self._mistral:
return self._mistral
args = {
'auth_token': self.auth_token,
}
endpoint_type = self._get_client_option('mistral', 'endpoint_type')
endpoint = self.url_for(service_type='workflow',
endpoint_type=endpoint_type)
self._mistral = mistralclient.Client(mistral_url=endpoint, **args)
return self._mistral
@exception.wrap_keystone_exception
def heat(self):
if self._heat:
return self._heat
endpoint_type = self._get_client_option('heat', 'endpoint_type')
args = {
'auth_url': self.auth_url,
'token': self.auth_token,
'username': None,
'password': None,
'ca_file': self._get_client_option('heat', 'ca_file'),
'cert_file': self._get_client_option('heat', 'cert_file'),
'key_file': self._get_client_option('heat', 'key_file'),
'insecure': self._get_client_option('heat', 'insecure')
}
endpoint = self.url_for(service_type='orchestration',
endpoint_type=endpoint_type)
self._heat = heatclient.Client('1', endpoint, **args)
return self._heat
@exception.wrap_keystone_exception
def swift(self):
if self._swift:
return self._swift
endpoint_type = self._get_client_option('swift', 'endpoint_type')
args = {
'auth_version': '2.0',
'preauthtoken': self.auth_token,
'preauthurl': self.url_for(service_type='object-store',
endpoint_type=endpoint_type),
'os_options': {'endpoint_type': endpoint_type},
'cacert': self._get_client_option('swift', 'cacert'),
'insecure': self._get_client_option('swift', 'insecure')
}
self._swift = swiftclient.Connection(**args)
return self._swift
| |
'''
Image
=====
Core classes for loading images and converting them to a
:class:`~kivy.graphics.texture.Texture`. The raw image data can be keep in
memory for further access.
'''
__all__ = ('Image', 'ImageLoader', 'ImageData')
from kivy.event import EventDispatcher
from kivy.core import core_register_libs
from kivy.logger import Logger
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.atlas import Atlas
from kivy.resources import resource_find
from kivy.utils import platform
from kivy.compat import string_types
import zipfile
try:
import io as SIO
except ImportError:
import io as SIO
# late binding
Texture = TextureRegion = None
# register image caching only for keep_data=True
Cache.register('kv.image', timeout=60)
Cache.register('kv.atlas')
class ImageData(object):
'''Container for images and mipmap images.
The container will always have at least the mipmap level 0.
'''
__slots__ = ('fmt', 'mipmaps', 'source', 'flip_vertical')
_supported_fmts = ('rgb', 'rgba', 'bgr', 'bgra', 's3tc_dxt1', 's3tc_dxt3',
's3tc_dxt5', 'pvrtc_rgb2', 'pvrtc_rgb4', 'pvrtc_rgba2',
'pvrtc_rgba4', 'etc1_rgb8')
def __init__(self, width, height, fmt, data, source=None,
flip_vertical=True):
assert fmt in ImageData._supported_fmts
#: Decoded image format, one of a available texture format
self.fmt = fmt
#: Data for each mipmap.
self.mipmaps = {}
self.add_mipmap(0, width, height, data)
#: Image source, if available
self.source = source
#: Indicate if the texture will need to be vertically flipped
self.flip_vertical = flip_vertical
def release_data(self):
mm = self.mipmaps
for item in mm.values():
item[2] = None
@property
def width(self):
'''Image width in pixels.
(If the image is mipmapped, it will use the level 0)
'''
return self.mipmaps[0][0]
@property
def height(self):
'''Image height in pixels.
(If the image is mipmapped, it will use the level 0)
'''
return self.mipmaps[0][1]
@property
def data(self):
'''Image data.
(If the image is mipmapped, it will use the level 0)
'''
return self.mipmaps[0][2]
@property
def size(self):
'''Image (width, height) in pixels.
(If the image is mipmapped, it will use the level 0)
'''
mm = self.mipmaps[0]
return mm[0], mm[1]
@property
def have_mipmap(self):
return len(self.mipmaps) > 1
def __repr__(self):
return ('<ImageData width=%d height=%d fmt=%s '
'source=%r with %d images>' % (
self.width, self.height, self.fmt,
self.source, len(self.mipmaps)))
def add_mipmap(self, level, width, height, data):
'''Add a image for a specific mipmap level.
.. versionadded:: 1.0.7
'''
self.mipmaps[level] = [int(width), int(height), data]
def get_mipmap(self, level):
'''Get the mipmap image at a specific level if it exists
.. versionadded:: 1.0.7
'''
if level == 0:
return (self.width, self.height, self.data)
assert(level < len(self.mipmaps))
return self.mipmaps[level]
def iterate_mipmaps(self):
'''Iterate over all mipmap images available
.. versionadded:: 1.0.7
'''
mm = self.mipmaps
for x in range(len(mm)):
item = mm.get(x, None)
if item is None:
raise Exception('Invalid mipmap level, found empty one')
yield x, item[0], item[1], item[2]
class ImageLoaderBase(object):
'''Base to implement an image loader.'''
__slots__ = ('_texture', '_data', 'filename', 'keep_data',
'_mipmap', '_nocache')
def __init__(self, filename, **kwargs):
self._mipmap = kwargs.get('mipmap', False)
self.keep_data = kwargs.get('keep_data', False)
self._nocache = kwargs.get('nocache', False)
self.filename = filename
self._data = self.load(filename)
self._textures = None
def load(self, filename):
'''Load an image'''
return None
@staticmethod
def can_save():
'''Indicate if the loader can save the Image object
'''
return False
@staticmethod
def save():
raise NotImplementedError()
def populate(self):
self._textures = []
if __debug__:
Logger.trace('Image: %r, populate to textures (%d)' %
(self.filename, len(self._data)))
for count in range(len(self._data)):
# first, check if a texture with the same name already exist in the
# cache
uid = '%s|%s|%s' % (self.filename, self._mipmap, count)
texture = Cache.get('kv.texture', uid)
# if not create it and append to the cache
if texture is None:
imagedata = self._data[count]
texture = Texture.create_from_data(
imagedata, mipmap=self._mipmap)
if not self._nocache:
Cache.append('kv.texture', uid, texture)
if imagedata.flip_vertical:
texture.flip_vertical()
# set as our current texture
self._textures.append(texture)
# release data if ask
if not self.keep_data:
self._data[count].release_data()
@property
def width(self):
'''Image width
'''
return self._data[0].width
@property
def height(self):
'''Image height
'''
return self._data[0].height
@property
def size(self):
'''Image size (width, height)
'''
return (self._data[0].width, self._data[0].height)
@property
def texture(self):
'''Get the image texture (created on the first call)
'''
if self._textures is None:
self.populate()
if self._textures is None:
return None
return self._textures[0]
@property
def textures(self):
'''Get the textures list (for mipmapped image or animated image)
.. versionadded:: 1.0.8
'''
if self._textures is None:
self.populate()
return self._textures
@property
def nocache(self):
'''Indicate if the texture will not be stored in the cache
.. versionadded:: 1.6.0
'''
return self._nocache
class ImageLoader(object):
loaders = []
@staticmethod
def zip_loader(filename, **kwargs):
'''Read images from an zip file.
.. versionadded:: 1.0.8
Returns an Image with a list of type ImageData stored in Image._data
'''
# read zip in menory for faster access
_file = SIO.BytesIO(open(filename, 'rb').read())
# read all images inside the zip
z = zipfile.ZipFile(_file)
image_data = []
# sort filename list
znamelist = z.namelist()
znamelist.sort()
image = None
for zfilename in znamelist:
try:
#read file and store it in mem with fileIO struct around it
tmpfile = SIO.BytesIO(z.read(zfilename))
ext = zfilename.split('.')[-1].lower()
im = None
for loader in ImageLoader.loaders:
if ext not in loader.extensions():
continue
Logger.debug('Image%s: Load <%s> from <%s>' %
(loader.__name__[11:], zfilename, filename))
try:
im = loader(tmpfile, **kwargs)
except:
# Loader failed, continue trying.
continue
break
if im is not None:
# append ImageData to local variable before it's
# overwritten
image_data.append(im._data[0])
image = im
#else: if not image file skip to next
except:
Logger.warning('Image: Unable to load image'
'<%s> in zip <%s> trying to continue...'
% (zfilename, filename))
z.close()
if len(image_data) == 0:
raise Exception('no images in zip <%s>' % filename)
# replace Image.Data with the array of all the images in the zip
image._data = image_data
image.filename = filename
return image
@staticmethod
def register(defcls):
ImageLoader.loaders.append(defcls)
@staticmethod
def load(filename, **kwargs):
# atlas ?
if filename[:8] == 'atlas://':
# remove the url
rfn = filename[8:]
# last field is the ID
try:
rfn, uid = rfn.rsplit('/', 1)
except ValueError:
raise ValueError(
'Image: Invalid %s name for atlas' % filename)
# search if we already got the atlas loaded
atlas = Cache.get('kv.atlas', rfn)
# atlas already loaded, so reupload the missing texture in cache,
# because when it's not in use, the texture can be removed from the
# kv.texture cache.
if atlas:
texture = atlas[uid]
fn = 'atlas://%s/%s' % (rfn, uid)
cid = '%s|%s|%s' % (fn, False, 0)
Cache.append('kv.texture', cid, texture)
return Image(texture)
# search with resource
afn = rfn
if not afn.endswith('.atlas'):
afn += '.atlas'
afn = resource_find(afn)
if not afn:
raise Exception('Unable to found %r atlas' % afn)
atlas = Atlas(afn)
Cache.append('kv.atlas', rfn, atlas)
# first time, fill our texture cache.
for nid, texture in atlas.textures.items():
fn = 'atlas://%s/%s' % (rfn, nid)
cid = '%s|%s|%s' % (fn, False, 0)
Cache.append('kv.texture', cid, texture)
return Image(atlas[uid])
# extract extensions
ext = filename.split('.')[-1].lower()
# prevent url querystrings
if filename.startswith((('http://', 'https://'))):
ext = ext.split('?')[0]
# special case. When we are trying to load a "zip" file with image, we
# will use the special zip_loader in ImageLoader. This might return a
# sequence of images contained in the zip.
if ext == 'zip':
return ImageLoader.zip_loader(filename)
else:
im = None
for loader in ImageLoader.loaders:
if ext not in loader.extensions():
continue
Logger.debug('Image%s: Load <%s>' %
(loader.__name__[11:], filename))
im = loader(filename, **kwargs)
break
if im is None:
raise Exception('Unknown <%s> type, no loader found.' % ext)
return im
class Image(EventDispatcher):
'''Load an image and store the size and texture.
.. versionadded::
In 1.0.7, the mipmap attribute has been added. The texture_mipmap and
texture_rectangle have been deleted.
.. versionadded::
In 1.0.8, an Image widget can change its texture. A new event
'on_texture' has been introduced. New methods for handling sequenced
animation have been added.
:Parameters:
`arg` : can be a string (str), Texture or Image object.
A string is interpreted as a path to the image to be loaded.
You can also provide a texture object or an already existing
image object. In the latter case, a real copy of the given
image object will be returned.
`keep_data` : bool, defaults to False.
Keep the image data when the texture is created.
`scale` : float, defaults to 1.0
Scale of the image.
`mipmap` : bool, defaults to False
Create mipmap for the texture.
`anim_delay`: float, defaults to .25
Delay in seconds between each animation frame. Lower values means
faster animation.
'''
copy_attributes = ('_size', '_filename', '_texture', '_image',
'_mipmap', '_nocache')
def __init__(self, arg, **kwargs):
# this event should be fired on animation of sequenced img's
self.register_event_type('on_texture')
super(Image, self).__init__()
self._mipmap = kwargs.get('mipmap', False)
self._keep_data = kwargs.get('keep_data', False)
self._nocache = kwargs.get('nocache', False)
self._size = [0, 0]
self._image = None
self._filename = None
self._texture = None
self._anim_available = False
self._anim_index = 0
self._anim_delay = 0
self.anim_delay = kwargs.get('anim_delay', .25)
# indicator of images having been loded in cache
self._iteration_done = False
if isinstance(arg, Image):
for attr in Image.copy_attributes:
self.__setattr__(attr, arg.__getattribute__(attr))
elif type(arg) in (Texture, TextureRegion):
if not hasattr(self, 'textures'):
self.textures = []
self.textures.append(arg)
self._texture = arg
self._size = self.texture.size
elif isinstance(arg, ImageLoaderBase):
self.image = arg
elif isinstance(arg, string_types):
self.filename = arg
else:
raise Exception('Unable to load image type {0!r}'.format(arg))
# check if the image hase sequences for animation in it
self._img_iterate()
def remove_from_cache(self):
'''Remove the Image from cache. This facilitates re-loading of
images from disk in case the image content has changed.
.. versionadded:: 1.3.0
Usage::
im = CoreImage('1.jpg')
# -- do something --
im.remove_from_cache()
im = CoreImage('1.jpg')
# this time image will be re-loaded from disk
'''
count = 0
uid = '%s|%s|%s' % (self.filename, self._mipmap, count)
Cache.remove("kv.image", uid)
while Cache.get("kv.texture", uid):
Cache.remove("kv.texture", uid)
count += 1
uid = '%s|%s|%s' % (self.filename, self._mipmap, count)
def _anim(self, *largs):
if not self._image:
return
textures = self.image.textures
if self._anim_index >= len(textures):
self.anim_reset(False)
self._anim_index = 0
self._texture = self.image.textures[self._anim_index]
self.dispatch('on_texture')
self._anim_index += 1
self._anim_index %= len(self._image.textures)
def anim_reset(self, allow_anim):
'''Reset an animation if available.
.. versionadded:: 1.0.8
:Parameters:
`allow_anim`: bool
Indicate whether the animation should restart playing or not.
Usage::
# start/reset animation
image.anim_reset(True)
# or stop the animation
image.anim_reset(False)
You can change the animation speed whilst it is playing::
# Set to 20 FPS
image.anim_delay = 1 / 20.
'''
# stop animation
Clock.unschedule(self._anim)
if allow_anim and self._anim_available:
Clock.schedule_interval(self._anim, self.anim_delay)
self._anim()
def _get_anim_delay(self):
return self._anim_delay
def _set_anim_delay(self, x):
if self._anim_delay == x:
return
self._anim_delay = x
if self._anim_available:
Clock.unschedule(self._anim)
if self._anim_delay >= 0:
Clock.schedule_interval(self._anim, self._anim_delay)
anim_delay = property(_get_anim_delay, _set_anim_delay)
'''Delay between each animation frame. A lower value means faster
animation.
.. versionadded:: 1.0.8
'''
@property
def anim_available(self):
'''Return True if this Image instance has animation available.
.. versionadded:: 1.0.8
'''
return self._anim_available
@property
def anim_index(self):
'''Return the index number of the image currently in the texture.
.. versionadded:: 1.0.8
'''
return self._anim_index
def _img_iterate(self, *largs):
if not self.image or self._iteration_done:
return
self._iteration_done = True
imgcount = len(self.image.textures)
if imgcount > 1:
self._anim_available = True
self.anim_reset(True)
self._texture = self.image.textures[0]
def on_texture(self, *largs):
'''This event is fired when the texture reference or content has
changed. It is normally used for sequenced images.
.. versionadded:: 1.0.8
'''
pass
@staticmethod
def load(filename, **kwargs):
'''Load an image
:Parameters:
`filename` : str
Filename of the image.
`keep_data` : bool, defaults to False
Keep the image data when the texture is created.
'''
kwargs.setdefault('keep_data', False)
return Image(filename, **kwargs)
def _get_image(self):
return self._image
def _set_image(self, image):
self._image = image
if hasattr(image, 'filename'):
self._filename = image.filename
if image:
self._size = (self.image.width, self.image.height)
image = property(_get_image, _set_image,
doc='Get/set the data image object')
def _get_filename(self):
return self._filename
def _set_filename(self, value):
if value is None or value == self._filename:
return
self._filename = value
# construct uid as a key for Cache
uid = '%s|%s|%s' % (self.filename, self._mipmap, 0)
# in case of Image have been asked with keep_data
# check the kv.image cache instead of texture.
image = Cache.get('kv.image', uid)
if image:
# we found an image, yeah ! but reset the texture now.
self.image = image
# if image.__class__ is core image then it's a texture
# from atlas or other sources and has no data so skip
if (image.__class__ != self.__class__ and
not image.keep_data and self._keep_data):
self.remove_from_cache()
self._filename = ''
self._set_filename(value)
else:
self._texture = None
self._img_iterate()
return
else:
# if we already got a texture, it will be automatically reloaded.
_texture = Cache.get('kv.texture', uid)
if _texture:
self._texture = _texture
return
# if image not already in cache then load
tmpfilename = self._filename
image = ImageLoader.load(
self._filename, keep_data=self._keep_data,
mipmap=self._mipmap, nocache=self._nocache)
self._filename = tmpfilename
# put the image into the cache if needed
if isinstance(image, Texture):
self._texture = image
self._size = image.size
else:
self.image = image
if not self._nocache:
Cache.append('kv.image', uid, self.image)
filename = property(_get_filename, _set_filename,
doc='Get/set the filename of image')
@property
def size(self):
'''Image size (width, height)
'''
return self._size
@property
def width(self):
'''Image width
'''
return self._size[0]
@property
def height(self):
'''Image height
'''
return self._size[1]
@property
def texture(self):
'''Texture of the image'''
if self.image:
if not self._iteration_done:
self._img_iterate()
return self._texture
@property
def nocache(self):
'''Indicate whether the texture will not be stored in the cache or not.
.. versionadded:: 1.6.0
'''
return self._nocache
def save(self, filename, flipped=False):
'''Save image texture to file.
The filename should have the '.png' extension because the texture data
read from the GPU is in the RGBA format. '.jpg' might work but has not
been heavilly tested so some providers might break when using it.
Any other extensions are not officially supported.
Example::
# Save an core image object
from kivy.core.image import Image
img = Image('hello.png')
img.save('hello2.png')
# Save a texture
texture = Texture.create(...)
img = Image(texture)
img.save('hello3.png')
.. versionadded:: 1.7.0
.. versionchanged:: 1.8.0
Parameter `flipped` added to flip the image before saving, default
to False.
'''
pixels = None
size = None
loaders = [x for x in ImageLoader.loaders if x.can_save()]
if not loaders:
return False
loader = loaders[0]
if self.image:
# we might have a ImageData object to use
data = self.image._data[0]
if data.data is not None:
if data.fmt not in ('rgba', 'rgb'):
# fast path, use the "raw" data when keep_data is used
size = data.width, data.height
pixels = data.data
else:
# the format is not rgba, we need to convert it.
# use texture for that.
self.populate()
if pixels is None and self._texture:
# use the texture pixels
size = self._texture.size
pixels = self._texture.pixels
if pixels is None:
return False
l_pixels = len(pixels)
if l_pixels == size[0] * size[1] * 3:
fmt = 'rgb'
elif l_pixels == size[0] * size[1] * 4:
fmt = 'rgba'
else:
raise Exception('Unable to determine the format of the pixels')
return loader.save(filename, size[0], size[1], fmt, pixels, flipped)
def read_pixel(self, x, y):
'''For a given local x/y position, return the pixel color at that
position.
.. warning::
This function can only be used with images loaded with the
keep_data=True keyword. For example::
m = Image.load('image.png', keep_data=True)
color = m.read_pixel(150, 150)
:Parameters:
`x` : int
Local x coordinate of the pixel in question.
`y` : int
Local y coordinate of the pixel in question.
'''
data = self.image._data[0]
# can't use this fonction without ImageData
if data.data is None:
raise EOFError('Image data is missing, make sure that image is'
'loaded with keep_data=True keyword.')
# check bounds
x, y = int(x), int(y)
if not (0 <= x < data.width and 0 <= y < data.height):
raise IndexError('Position (%d, %d) is out of range.' % (x, y))
assert data.fmt in ImageData._supported_fmts
size = 3 if data.fmt in ('rgb', 'bgr') else 4
index = y * data.width * size + x * size
raw = data.data[index:index + size]
color = [ord(c) / 255.0 for c in raw]
# conversion for BGR->RGB, BGR->RGBA format
if data.fmt in ('bgr', 'bgra'):
color[0], color[2] = color[2], color[0]
return color
def load(filename):
'''Load an image'''
return Image.load(filename)
# load image loaders
image_libs = []
if platform in ('macosx', 'ios'):
image_libs += [('imageio', 'img_imageio')]
image_libs += [
('tex', 'img_tex'),
('dds', 'img_dds'),
('pygame', 'img_pygame'),
('pil', 'img_pil'),
('gif', 'img_gif')]
libs_loaded = core_register_libs('image', image_libs)
from os import environ
if not 'KIVY_DOC' in environ and not libs_loaded:
import sys
Logger.critical('App: Unable to get any Image provider, abort.')
sys.exit(1)
# resolve binding.
from kivy.graphics.texture import Texture, TextureRegion
| |
# coding=utf-8
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from .test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertConfig,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class MobileBertModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=64,
embedding_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.embedding_size = embedding_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = MobileBertConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
embedding_size=self.embedding_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=False,
initializer_range=self.initializer_range,
)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def create_and_check_mobilebert_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = MobileBertModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_mobilebert_for_masked_lm(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = MobileBertForMaskedLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_mobilebert_for_next_sequence_prediction(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = MobileBertForNextSentencePrediction(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
labels=sequence_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2))
def create_and_check_mobilebert_for_pretraining(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = MobileBertForPreTraining(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
labels=token_labels,
next_sentence_label=sequence_labels,
)
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2))
def create_and_check_mobilebert_for_question_answering(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = MobileBertForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def create_and_check_mobilebert_for_sequence_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = MobileBertForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_mobilebert_for_token_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = MobileBertForTokenClassification(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_mobilebert_for_multiple_choice(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_choices = self.num_choices
model = MobileBertForMultipleChoice(config=config)
model.to(torch_device)
model.eval()
multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
result = model(
multiple_choice_inputs_ids,
attention_mask=multiple_choice_input_mask,
token_type_ids=multiple_choice_token_type_ids,
labels=choice_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class MobileBertModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
fx_ready_model_classes = all_model_classes
test_sequence_classification_problem_types = True
# special case for ForPreTraining model
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
if return_labels:
if model_class in get_values(MODEL_FOR_PRETRAINING_MAPPING):
inputs_dict["labels"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device
)
inputs_dict["next_sentence_label"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device
)
return inputs_dict
def setUp(self):
self.model_tester = MobileBertModelTester(self)
self.config_tester = ConfigTester(self, config_class=MobileBertConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_mobilebert_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*config_and_inputs)
def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*config_and_inputs)
def test_for_next_sequence_prediction(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*config_and_inputs)
def test_for_pretraining(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*config_and_inputs)
def _long_tensor(tok_lst):
return torch.tensor(
tok_lst,
dtype=torch.long,
device=torch_device,
)
TOLERANCE = 1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class MobileBertModelIntegrationTests(unittest.TestCase):
@slow
def test_inference_no_head(self):
model = MobileBertModel.from_pretrained("google/mobilebert-uncased").to(torch_device)
input_ids = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]])
with torch.no_grad():
output = model(input_ids)[0]
expected_shape = torch.Size((1, 9, 512))
self.assertEqual(output.shape, expected_shape)
expected_slice = torch.tensor(
[
[
[-2.4736526e07, 8.2691656e04, 1.6521838e05],
[-5.7541704e-01, 3.9056022e00, 4.4011507e00],
[2.6047359e00, 1.5677652e00, -1.7324188e-01],
]
],
device=torch_device,
)
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lower_bound = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE)
upper_bound = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE)
self.assertTrue(lower_bound and upper_bound)
| |
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf import settings
from django.core.urlresolvers import reverse
from django import http
from django.test.utils import override_settings
from mox3.mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.volumes.backups \
import tables as backup_tables
from openstack_dashboard.dashboards.project.volumes.snapshots \
import tables as snapshot_tables
from openstack_dashboard.dashboards.project.volumes.volumes \
import tables as volume_tables
from openstack_dashboard.test import helpers as test
INDEX_URL = reverse('horizon:project:volumes:index')
VOLUME_SNAPSHOTS_TAB_URL = reverse('horizon:project:volumes:snapshots_tab')
VOLUME_BACKUPS_TAB_URL = reverse('horizon:project:volumes:backups_tab')
class VolumeAndSnapshotsAndBackupsTests(test.TestCase):
@test.create_stubs({api.cinder: ('tenant_absolute_limits',
'volume_list',
'volume_list_paged',
'volume_snapshot_list',
'volume_snapshot_list_paged',
'volume_backup_supported',
'volume_backup_list_paged',
),
api.nova: ('server_list',)})
def _test_index(self, backup_supported=True):
vol_backups = self.cinder_volume_backups.list()
vol_snaps = self.cinder_volume_snapshots.list()
volumes = self.cinder_volumes.list()
api.cinder.volume_backup_supported(IsA(http.HttpRequest)).\
MultipleTimes().AndReturn(backup_supported)
api.cinder.volume_list_paged(
IsA(http.HttpRequest), marker=None, search_opts=None,
sort_dir='desc', paginate=True).\
AndReturn([volumes, False, False])
api.nova.server_list(IsA(http.HttpRequest), search_opts=None).\
AndReturn([self.servers.list(), False])
api.cinder.volume_snapshot_list(IsA(http.HttpRequest)).\
AndReturn(vol_snaps)
api.cinder.volume_snapshot_list_paged(
IsA(http.HttpRequest), paginate=True, marker=None,
sort_dir='desc').AndReturn([vol_snaps, False, False])
api.cinder.volume_list(IsA(http.HttpRequest)).AndReturn(volumes)
if backup_supported:
api.cinder.volume_backup_list_paged(
IsA(http.HttpRequest), marker=None, sort_dir='desc',
paginate=True).AndReturn([vol_backups, False, False])
api.cinder.volume_list(IsA(http.HttpRequest)).AndReturn(volumes)
api.cinder.tenant_absolute_limits(IsA(http.HttpRequest)).\
MultipleTimes().AndReturn(self.cinder_limits['absolute'])
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'project/volumes/index.html')
# Explicitly load the other tabs. If this doesn't work the test
# will fail due to "Expected methods never called."
res = self.client.get(VOLUME_SNAPSHOTS_TAB_URL)
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'project/volumes/index.html')
if backup_supported:
res = self.client.get(VOLUME_BACKUPS_TAB_URL)
self.assertTemplateUsed(res, 'project/volumes/index.html')
def test_index_backup_supported(self):
self._test_index(backup_supported=True)
def test_index_backup_not_supported(self):
self._test_index(backup_supported=False)
@test.create_stubs({api.cinder: ('tenant_absolute_limits',
'volume_list_paged',
'volume_backup_supported',
'volume_snapshot_list'),
api.nova: ('server_list',)})
def _test_index_paginated(self, marker, sort_dir, volumes, url,
has_more, has_prev):
backup_supported = True
vol_snaps = self.cinder_volume_snapshots.list()
api.cinder.volume_backup_supported(IsA(http.HttpRequest)).\
MultipleTimes().AndReturn(backup_supported)
api.cinder.volume_list_paged(IsA(http.HttpRequest), marker=marker,
sort_dir=sort_dir, search_opts=None,
paginate=True).\
AndReturn([volumes, has_more, has_prev])
api.cinder.volume_snapshot_list(
IsA(http.HttpRequest), search_opts=None).AndReturn(vol_snaps)
api.nova.server_list(IsA(http.HttpRequest), search_opts=None).\
AndReturn([self.servers.list(), False])
api.cinder.tenant_absolute_limits(IsA(http.HttpRequest)).MultipleTimes().\
AndReturn(self.cinder_limits['absolute'])
self.mox.ReplayAll()
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'project/volumes/index.html')
self.mox.UnsetStubs()
return res
@override_settings(API_RESULT_PAGE_SIZE=2)
def test_index_paginated(self):
mox_volumes = self.cinder_volumes.list()
size = settings.API_RESULT_PAGE_SIZE
# get first page
expected_volumes = mox_volumes[:size]
url = INDEX_URL
res = self._test_index_paginated(marker=None, sort_dir="desc",
volumes=expected_volumes, url=url,
has_more=True, has_prev=False)
volumes = res.context['volumes_table'].data
self.assertItemsEqual(volumes, expected_volumes)
# get second page
expected_volumes = mox_volumes[size:2 * size]
marker = expected_volumes[0].id
next = volume_tables.VolumesTable._meta.pagination_param
url = "?".join([INDEX_URL, "=".join([next, marker])])
res = self._test_index_paginated(marker=marker, sort_dir="desc",
volumes=expected_volumes, url=url,
has_more=True, has_prev=True)
volumes = res.context['volumes_table'].data
self.assertItemsEqual(volumes, expected_volumes)
# get last page
expected_volumes = mox_volumes[-size:]
marker = expected_volumes[0].id
next = volume_tables.VolumesTable._meta.pagination_param
url = "?".join([INDEX_URL, "=".join([next, marker])])
res = self._test_index_paginated(marker=marker, sort_dir="desc",
volumes=expected_volumes, url=url,
has_more=False, has_prev=True)
volumes = res.context['volumes_table'].data
self.assertItemsEqual(volumes, expected_volumes)
@override_settings(API_RESULT_PAGE_SIZE=2)
def test_index_paginated_prev_page(self):
mox_volumes = self.cinder_volumes.list()
size = settings.API_RESULT_PAGE_SIZE
# prev from some page
expected_volumes = mox_volumes[size:2 * size]
marker = expected_volumes[0].id
prev = volume_tables.VolumesTable._meta.prev_pagination_param
url = "?".join([INDEX_URL, "=".join([prev, marker])])
res = self._test_index_paginated(marker=marker, sort_dir="asc",
volumes=expected_volumes, url=url,
has_more=True, has_prev=True)
volumes = res.context['volumes_table'].data
self.assertItemsEqual(volumes, expected_volumes)
# back to first page
expected_volumes = mox_volumes[:size]
marker = expected_volumes[0].id
prev = volume_tables.VolumesTable._meta.prev_pagination_param
url = "?".join([INDEX_URL, "=".join([prev, marker])])
res = self._test_index_paginated(marker=marker, sort_dir="asc",
volumes=expected_volumes, url=url,
has_more=True, has_prev=False)
volumes = res.context['volumes_table'].data
self.assertItemsEqual(volumes, expected_volumes)
@test.create_stubs({api.cinder: ('tenant_absolute_limits',
'volume_snapshot_list_paged',
'volume_list',
'volume_backup_supported',
),
api.nova: ('server_list',)})
def _test_snapshots_index_paginated(self, marker, sort_dir, snapshots, url,
has_more, has_prev):
backup_supported = True
api.cinder.volume_backup_supported(IsA(http.HttpRequest)).\
MultipleTimes().AndReturn(backup_supported)
api.cinder.volume_snapshot_list_paged(
IsA(http.HttpRequest), marker=marker, sort_dir=sort_dir,
paginate=True).AndReturn([snapshots, has_more, has_prev])
api.cinder.volume_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_volumes.list())
self.mox.ReplayAll()
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'project/volumes/index.html')
self.mox.UnsetStubs()
return res
@override_settings(API_RESULT_PAGE_SIZE=1)
def test_snapshots_index_paginated(self):
mox_snapshots = self.cinder_volume_snapshots.list()
size = settings.API_RESULT_PAGE_SIZE
base_url = reverse('horizon:project:volumes:snapshots_tab')
next = snapshot_tables.VolumeSnapshotsTable._meta.pagination_param
# get first page
expected_snapshots = mox_snapshots[:size]
res = self._test_snapshots_index_paginated(
marker=None, sort_dir="desc", snapshots=expected_snapshots,
url=base_url, has_more=True, has_prev=False)
snapshots = res.context['volume_snapshots_table'].data
self.assertItemsEqual(snapshots, expected_snapshots)
# get second page
expected_snapshots = mox_snapshots[size:2 * size]
marker = expected_snapshots[0].id
url = "&".join([base_url, "=".join([next, marker])])
res = self._test_snapshots_index_paginated(
marker=marker, sort_dir="desc", snapshots=expected_snapshots,
url=url, has_more=True, has_prev=True)
snapshots = res.context['volume_snapshots_table'].data
self.assertItemsEqual(snapshots, expected_snapshots)
# get last page
expected_snapshots = mox_snapshots[-size:]
marker = expected_snapshots[0].id
url = "&".join([base_url, "=".join([next, marker])])
res = self._test_snapshots_index_paginated(
marker=marker, sort_dir="desc", snapshots=expected_snapshots,
url=url, has_more=False, has_prev=True)
snapshots = res.context['volume_snapshots_table'].data
self.assertItemsEqual(snapshots, expected_snapshots)
@override_settings(API_RESULT_PAGE_SIZE=1)
def test_snapshots_index_paginated_prev_page(self):
mox_snapshots = self.cinder_volume_snapshots.list()
size = settings.API_RESULT_PAGE_SIZE
base_url = reverse('horizon:project:volumes:snapshots_tab')
prev = snapshot_tables.VolumeSnapshotsTable._meta.prev_pagination_param
# prev from some page
expected_snapshots = mox_snapshots[size:2 * size]
marker = expected_snapshots[0].id
url = "&".join([base_url, "=".join([prev, marker])])
res = self._test_snapshots_index_paginated(
marker=marker, sort_dir="asc", snapshots=expected_snapshots,
url=url, has_more=True, has_prev=True)
snapshots = res.context['volume_snapshots_table'].data
self.assertItemsEqual(snapshots, expected_snapshots)
# back to first page
expected_snapshots = mox_snapshots[:size]
marker = expected_snapshots[0].id
url = "&".join([base_url, "=".join([prev, marker])])
res = self._test_snapshots_index_paginated(
marker=marker, sort_dir="asc", snapshots=expected_snapshots,
url=url, has_more=True, has_prev=False)
snapshots = res.context['volume_snapshots_table'].data
self.assertItemsEqual(snapshots, expected_snapshots)
@test.create_stubs({api.cinder: ('tenant_absolute_limits',
'volume_backup_list_paged',
'volume_list',
'volume_backup_supported',
),
api.nova: ('server_list',)})
def _test_backups_index_paginated(self, marker, sort_dir, backups, url,
has_more, has_prev):
backup_supported = True
api.cinder.volume_backup_supported(IsA(http.HttpRequest)).\
MultipleTimes().AndReturn(backup_supported)
api.cinder.volume_backup_list_paged(
IsA(http.HttpRequest), marker=marker, sort_dir=sort_dir,
paginate=True).AndReturn([backups, has_more, has_prev])
api.cinder.volume_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_volumes.list())
self.mox.ReplayAll()
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'project/volumes/index.html')
self.mox.UnsetStubs()
return res
@override_settings(API_RESULT_PAGE_SIZE=1)
def test_backups_index_paginated(self):
mox_backups = self.cinder_volume_backups.list()
size = settings.API_RESULT_PAGE_SIZE
base_url = reverse('horizon:project:volumes:backups_tab')
next = backup_tables.BackupsTable._meta.pagination_param
# get first page
expected_backups = mox_backups[:size]
res = self._test_backups_index_paginated(
marker=None, sort_dir="desc", backups=expected_backups,
url=base_url, has_more=True, has_prev=False)
backups = res.context['volume_backups_table'].data
self.assertItemsEqual(backups, expected_backups)
# get second page
expected_backups = mox_backups[size:2 * size]
marker = expected_backups[0].id
url = "&".join([base_url, "=".join([next, marker])])
res = self._test_backups_index_paginated(
marker=marker, sort_dir="desc", backups=expected_backups, url=url,
has_more=True, has_prev=True)
backups = res.context['volume_backups_table'].data
self.assertItemsEqual(backups, expected_backups)
# get last page
expected_backups = mox_backups[-size:]
marker = expected_backups[0].id
url = "&".join([base_url, "=".join([next, marker])])
res = self._test_backups_index_paginated(
marker=marker, sort_dir="desc", backups=expected_backups, url=url,
has_more=False, has_prev=True)
backups = res.context['volume_backups_table'].data
self.assertItemsEqual(backups, expected_backups)
@override_settings(API_RESULT_PAGE_SIZE=1)
def test_backups_index_paginated_prev_page(self):
mox_backups = self.cinder_volume_backups.list()
size = settings.API_RESULT_PAGE_SIZE
base_url = reverse('horizon:project:volumes:backups_tab')
prev = backup_tables.BackupsTable._meta.prev_pagination_param
# prev from some page
expected_backups = mox_backups[size:2 * size]
marker = expected_backups[0].id
url = "&".join([base_url, "=".join([prev, marker])])
res = self._test_backups_index_paginated(
marker=marker, sort_dir="asc", backups=expected_backups, url=url,
has_more=True, has_prev=True)
backups = res.context['volume_backups_table'].data
self.assertItemsEqual(backups, expected_backups)
# back to first page
expected_backups = mox_backups[:size]
marker = expected_backups[0].id
url = "&".join([base_url, "=".join([prev, marker])])
res = self._test_backups_index_paginated(
marker=marker, sort_dir="asc", backups=expected_backups, url=url,
has_more=True, has_prev=False)
backups = res.context['volume_backups_table'].data
self.assertItemsEqual(backups, expected_backups)
| |
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinderclient import exceptions as cinder_exception
import mock
from manila import context
from manila import exception
from manila import test
from manila.volume import cinder
class FakeCinderClient(object):
class Volumes(object):
def get(self, volume_id):
return {'id': volume_id}
def list(self, detailed, search_opts={}):
return [{'id': 'id1'}, {'id': 'id2'}]
def create(self, *args, **kwargs):
return {'id': 'created_id'}
def __getattr__(self, item):
return None
def __init__(self):
self.volumes = self.Volumes()
self.volume_snapshots = self.volumes
class CinderApiTestCase(test.TestCase):
def setUp(self):
super(CinderApiTestCase, self).setUp()
self.api = cinder.API()
self.cinderclient = FakeCinderClient()
self.ctx = context.get_admin_context()
self.mock_object(cinder, 'cinderclient',
mock.Mock(return_value=self.cinderclient))
self.mock_object(cinder, '_untranslate_volume_summary_view',
lambda ctx, vol: vol)
self.mock_object(cinder, '_untranslate_snapshot_summary_view',
lambda ctx, snap: snap)
def test_get(self):
volume_id = 'volume_id1'
result = self.api.get(self.ctx, volume_id)
self.assertEqual(result['id'], volume_id)
def test_get_failed(self):
cinder.cinderclient.side_effect = cinder_exception.NotFound(404)
volume_id = 'volume_id'
self.assertRaises(exception.VolumeNotFound,
self.api.get, self.ctx, volume_id)
def test_create(self):
result = self.api.create(self.ctx, 1, '', '')
self.assertEqual(result['id'], 'created_id')
def test_create_failed(self):
cinder.cinderclient.side_effect = cinder_exception.BadRequest(400)
self.assertRaises(exception.InvalidInput,
self.api.create, self.ctx, 1, '', '')
def test_create_not_found_error(self):
cinder.cinderclient.side_effect = cinder_exception.NotFound(404)
self.assertRaises(exception.NotFound,
self.api.create, self.ctx, 1, '', '')
def test_create_failed_exception(self):
cinder.cinderclient.side_effect = Exception("error msg")
self.assertRaises(exception.ManilaException,
self.api.create, self.ctx, 1, '', '')
def test_get_all(self):
cinder._untranslate_volume_summary_view.return_value = ['id1', 'id2']
self.assertEqual([{'id': 'id1'}, {'id': 'id2'}],
self.api.get_all(self.ctx))
def test_check_attach_volume_status_error(self):
volume = {'status': 'error'}
self.assertRaises(exception.InvalidVolume,
self.api.check_attach, self.ctx, volume)
def test_check_attach_volume_already_attached(self):
volume = {'status': 'available'}
volume['attach_status'] = "attached"
self.assertRaises(exception.InvalidVolume,
self.api.check_attach, self.ctx, volume)
def test_check_attach_availability_zone_differs(self):
volume = {'status': 'available'}
volume['attach_status'] = "detached"
instance = {'availability_zone': 'zone1'}
volume['availability_zone'] = 'zone2'
cinder.CONF.set_override('cinder_cross_az_attach', False)
self.assertRaises(exception.InvalidVolume,
self.api.check_attach, self.ctx, volume, instance)
volume['availability_zone'] = 'zone1'
self.assertIsNone(self.api.check_attach(self.ctx, volume, instance))
cinder.CONF.reset()
def test_check_attach(self):
volume = {'status': 'available'}
volume['attach_status'] = "detached"
volume['availability_zone'] = 'zone1'
instance = {'availability_zone': 'zone1'}
cinder.CONF.set_override('cinder_cross_az_attach', False)
self.assertIsNone(self.api.check_attach(self.ctx, volume, instance))
cinder.CONF.reset()
def test_check_detach(self):
volume = {'status': 'available'}
self.assertRaises(exception.InvalidVolume,
self.api.check_detach, self.ctx, volume)
volume['status'] = 'non-available'
self.assertIsNone(self.api.check_detach(self.ctx, volume))
def test_update(self):
fake_volume = {'fake': 'fake'}
self.mock_object(self.cinderclient.volumes, 'get',
mock.Mock(return_value=fake_volume))
self.mock_object(self.cinderclient.volumes, 'update')
fake_volume_id = 'fake_volume'
fake_data = {'test': 'test'}
self.api.update(self.ctx, fake_volume_id, fake_data)
self.cinderclient.volumes.get.assert_called_once_with(fake_volume_id)
self.cinderclient.volumes.update.assert_called_once_with(fake_volume,
**fake_data)
def test_reserve_volume(self):
self.mock_object(self.cinderclient.volumes, 'reserve')
self.api.reserve_volume(self.ctx, 'id1')
self.cinderclient.volumes.reserve.assert_called_once_with('id1')
def test_unreserve_volume(self):
self.mock_object(self.cinderclient.volumes, 'unreserve')
self.api.unreserve_volume(self.ctx, 'id1')
self.cinderclient.volumes.unreserve.assert_called_once_with('id1')
def test_begin_detaching(self):
self.mock_object(self.cinderclient.volumes, 'begin_detaching')
self.api.begin_detaching(self.ctx, 'id1')
self.cinderclient.volumes.begin_detaching.assert_called_once_with(
'id1')
def test_roll_detaching(self):
self.mock_object(self.cinderclient.volumes, 'roll_detaching')
self.api.roll_detaching(self.ctx, 'id1')
self.cinderclient.volumes.roll_detaching.assert_called_once_with('id1')
def test_attach(self):
self.mock_object(self.cinderclient.volumes, 'attach')
self.api.attach(self.ctx, 'id1', 'uuid', 'point')
self.cinderclient.volumes.attach.assert_called_once_with('id1',
'uuid',
'point')
def test_detach(self):
self.mock_object(self.cinderclient.volumes, 'detach')
self.api.detach(self.ctx, 'id1')
self.cinderclient.volumes.detach.assert_called_once_with('id1')
def test_initialize_connection(self):
self.mock_object(self.cinderclient.volumes, 'initialize_connection')
self.api.initialize_connection(self.ctx, 'id1', 'connector')
self.cinderclient.volumes.initialize_connection.\
assert_called_once_with('id1', 'connector')
def test_terminate_connection(self):
self.mock_object(self.cinderclient.volumes, 'terminate_connection')
self.api.terminate_connection(self.ctx, 'id1', 'connector')
self.cinderclient.volumes.terminate_connection.\
assert_called_once_with('id1', 'connector')
def test_delete(self):
self.mock_object(self.cinderclient.volumes, 'delete')
self.api.delete(self.ctx, 'id1')
self.cinderclient.volumes.delete.assert_called_once_with('id1')
def test_get_snapshot(self):
snapshot_id = 'snapshot_id1'
result = self.api.get_snapshot(self.ctx, snapshot_id)
self.assertEqual(result['id'], snapshot_id)
def test_get_snapshot_failed(self):
cinder.cinderclient.side_effect = cinder_exception.NotFound(404)
snapshot_id = 'snapshot_id'
self.assertRaises(exception.VolumeSnapshotNotFound,
self.api.get_snapshot, self.ctx, snapshot_id)
def test_get_all_snapshots(self):
cinder._untranslate_snapshot_summary_view.return_value = ['id1', 'id2']
self.assertEqual([{'id': 'id1'}, {'id': 'id2'}],
self.api.get_all_snapshots(self.ctx))
def test_create_snapshot(self):
result = self.api.create_snapshot(self.ctx, {'id': 'id1'}, '', '')
self.assertEqual(result['id'], 'created_id')
def test_create_force(self):
result = self.api.create_snapshot_force(self.ctx,
{'id': 'id1'}, '', '')
self.assertEqual(result['id'], 'created_id')
def test_delete_snapshot(self):
self.mock_object(self.cinderclient.volume_snapshots, 'delete')
self.api.delete_snapshot(self.ctx, 'id1')
self.cinderclient.volume_snapshots.delete.assert_called_once_with(
'id1')
| |
from binascii import hexlify
from configparser import RawConfigParser
import os
from io import StringIO
import stat
import subprocess
import sys
import shutil
import tempfile
import time
import unittest
from hashlib import sha256
from .. import xattr
from ..archive import Archive, ChunkBuffer, CHUNK_MAX_EXP
from ..archiver import Archiver
from ..cache import Cache
from ..crypto import bytes_to_long, num_aes_blocks
from ..helpers import Manifest
from ..remote import RemoteRepository, PathNotAllowed
from ..repository import Repository
from . import BaseTestCase
from .mock import patch
try:
import llfuse
has_llfuse = True or llfuse # avoids "unused import"
except ImportError:
has_llfuse = False
has_lchflags = hasattr(os, 'lchflags')
src_dir = os.path.join(os.getcwd(), os.path.dirname(__file__), '..')
class changedir:
def __init__(self, dir):
self.dir = dir
def __enter__(self):
self.old = os.getcwd()
os.chdir(self.dir)
def __exit__(self, *args, **kw):
os.chdir(self.old)
class environment_variable:
def __init__(self, **values):
self.values = values
self.old_values = {}
def __enter__(self):
for k, v in self.values.items():
self.old_values[k] = os.environ.get(k)
os.environ[k] = v
def __exit__(self, *args, **kw):
for k, v in self.old_values.items():
if v is not None:
os.environ[k] = v
class ArchiverTestCaseBase(BaseTestCase):
prefix = ''
def setUp(self):
os.environ['BORG_CHECK_I_KNOW_WHAT_I_AM_DOING'] = '1'
self.archiver = Archiver()
self.tmpdir = tempfile.mkdtemp()
self.repository_path = os.path.join(self.tmpdir, 'repository')
self.repository_location = self.prefix + self.repository_path
self.input_path = os.path.join(self.tmpdir, 'input')
self.output_path = os.path.join(self.tmpdir, 'output')
self.keys_path = os.path.join(self.tmpdir, 'keys')
self.cache_path = os.path.join(self.tmpdir, 'cache')
self.exclude_file_path = os.path.join(self.tmpdir, 'excludes')
os.environ['BORG_KEYS_DIR'] = self.keys_path
os.environ['BORG_CACHE_DIR'] = self.cache_path
os.mkdir(self.input_path)
os.mkdir(self.output_path)
os.mkdir(self.keys_path)
os.mkdir(self.cache_path)
with open(self.exclude_file_path, 'wb') as fd:
fd.write(b'input/file2\n# A comment line, then a blank line\n\n')
self._old_wd = os.getcwd()
os.chdir(self.tmpdir)
def tearDown(self):
shutil.rmtree(self.tmpdir)
os.chdir(self._old_wd)
def cmd(self, *args, **kw):
exit_code = kw.get('exit_code', 0)
fork = kw.get('fork', False)
if fork:
try:
output = subprocess.check_output((sys.executable, '-m', 'borg.archiver') + args)
ret = 0
except subprocess.CalledProcessError as e:
output = e.output
ret = e.returncode
output = os.fsdecode(output)
if ret != exit_code:
print(output)
self.assert_equal(exit_code, ret)
return output
args = list(args)
stdin, stdout, stderr = sys.stdin, sys.stdout, sys.stderr
try:
sys.stdin = StringIO()
output = StringIO()
sys.stdout = sys.stderr = output
ret = self.archiver.run(args)
sys.stdin, sys.stdout, sys.stderr = stdin, stdout, stderr
if ret != exit_code:
print(output.getvalue())
self.assert_equal(exit_code, ret)
return output.getvalue()
finally:
sys.stdin, sys.stdout, sys.stderr = stdin, stdout, stderr
def create_src_archive(self, name):
self.cmd('create', self.repository_location + '::' + name, src_dir)
class ArchiverTestCase(ArchiverTestCaseBase):
def create_regular_file(self, name, size=0, contents=None):
filename = os.path.join(self.input_path, name)
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with open(filename, 'wb') as fd:
if contents is None:
contents = b'X' * size
fd.write(contents)
def create_test_files(self):
"""Create a minimal test case including all supported file types
"""
# File
self.create_regular_file('empty', size=0)
# next code line raises OverflowError on 32bit cpu (raspberry pi 2):
# 2600-01-01 > 2**64 ns
# os.utime('input/empty', (19880895600, 19880895600))
# thus, we better test with something not that far in future:
# 2038-01-19 (1970 + 2^31 - 1 seconds) is the 32bit "deadline":
os.utime('input/empty', (2**31 - 1, 2**31 - 1))
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('flagfile', size=1024)
# Directory
self.create_regular_file('dir2/file2', size=1024 * 80)
# File owner
os.chown('input/file1', 100, 200)
# File mode
os.chmod('input/file1', 0o7755)
os.chmod('input/dir2', 0o555)
# Block device
os.mknod('input/bdev', 0o600 | stat.S_IFBLK, os.makedev(10, 20))
# Char device
os.mknod('input/cdev', 0o600 | stat.S_IFCHR, os.makedev(30, 40))
# Hard link
os.link(os.path.join(self.input_path, 'file1'),
os.path.join(self.input_path, 'hardlink'))
# Symlink
os.symlink('somewhere', os.path.join(self.input_path, 'link1'))
if xattr.is_enabled(self.input_path):
xattr.setxattr(os.path.join(self.input_path, 'file1'), 'user.foo', b'bar')
# XXX this always fails for me
# ubuntu 14.04, on a TMP dir filesystem with user_xattr, using fakeroot
# same for newer ubuntu and centos.
# if this is supported just on specific platform, platform should be checked first,
# so that the test setup for all tests using it does not fail here always for others.
# xattr.setxattr(os.path.join(self.input_path, 'link1'), 'user.foo_symlink', b'bar_symlink', follow_symlinks=False)
# FIFO node
os.mkfifo(os.path.join(self.input_path, 'fifo1'))
if has_lchflags:
os.lchflags(os.path.join(self.input_path, 'flagfile'), stat.UF_NODUMP)
def test_basic_functionality(self):
self.create_test_files()
self.cmd('init', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
self.cmd('create', self.repository_location + '::test.2', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
self.assert_equal(len(self.cmd('list', self.repository_location).splitlines()), 2)
self.assert_equal(len(self.cmd('list', self.repository_location + '::test').splitlines()), 11)
self.assert_dirs_equal('input', 'output/input')
info_output = self.cmd('info', self.repository_location + '::test')
self.assert_in('Number of files: 4', info_output)
shutil.rmtree(self.cache_path)
with environment_variable(BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK='1'):
info_output2 = self.cmd('info', self.repository_location + '::test')
# info_output2 starts with some "initializing cache" text but should
# end the same way as info_output
assert info_output2.endswith(info_output)
def _extract_repository_id(self, path):
return Repository(self.repository_path).id
def _set_repository_id(self, path, id):
config = RawConfigParser()
config.read(os.path.join(path, 'config'))
config.set('repository', 'id', hexlify(id).decode('ascii'))
with open(os.path.join(path, 'config'), 'w') as fd:
config.write(fd)
return Repository(self.repository_path).id
def test_sparse_file(self):
# no sparse file support on Mac OS X
sparse_support = sys.platform != 'darwin'
filename = os.path.join(self.input_path, 'sparse')
content = b'foobar'
hole_size = 5 * (1 << CHUNK_MAX_EXP) # 5 full chunker buffers
with open(filename, 'wb') as fd:
# create a file that has a hole at the beginning and end (if the
# OS and filesystem supports sparse files)
fd.seek(hole_size, 1)
fd.write(content)
fd.seek(hole_size, 1)
pos = fd.tell()
fd.truncate(pos)
total_len = hole_size + len(content) + hole_size
st = os.stat(filename)
self.assert_equal(st.st_size, total_len)
if sparse_support and hasattr(st, 'st_blocks'):
self.assert_true(st.st_blocks * 512 < total_len / 10) # is input sparse?
self.cmd('init', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract', '--sparse', self.repository_location + '::test')
self.assert_dirs_equal('input', 'output/input')
filename = os.path.join(self.output_path, 'input', 'sparse')
with open(filename, 'rb') as fd:
# check if file contents are as expected
self.assert_equal(fd.read(hole_size), b'\0' * hole_size)
self.assert_equal(fd.read(len(content)), content)
self.assert_equal(fd.read(hole_size), b'\0' * hole_size)
st = os.stat(filename)
self.assert_equal(st.st_size, total_len)
if sparse_support and hasattr(st, 'st_blocks'):
self.assert_true(st.st_blocks * 512 < total_len / 10) # is output sparse?
def test_repository_swap_detection(self):
self.create_test_files()
os.environ['BORG_PASSPHRASE'] = 'passphrase'
self.cmd('init', '--encryption=passphrase', self.repository_location)
repository_id = self._extract_repository_id(self.repository_path)
self.cmd('create', self.repository_location + '::test', 'input')
shutil.rmtree(self.repository_path)
self.cmd('init', '--encryption=none', self.repository_location)
self._set_repository_id(self.repository_path, repository_id)
self.assert_equal(repository_id, self._extract_repository_id(self.repository_path))
self.assert_raises(Cache.EncryptionMethodMismatch, lambda: self.cmd('create', self.repository_location + '::test.2', 'input'))
def test_repository_swap_detection2(self):
self.create_test_files()
self.cmd('init', '--encryption=none', self.repository_location + '_unencrypted')
os.environ['BORG_PASSPHRASE'] = 'passphrase'
self.cmd('init', '--encryption=passphrase', self.repository_location + '_encrypted')
self.cmd('create', self.repository_location + '_encrypted::test', 'input')
shutil.rmtree(self.repository_path + '_encrypted')
os.rename(self.repository_path + '_unencrypted', self.repository_path + '_encrypted')
self.assert_raises(Cache.RepositoryAccessAborted, lambda: self.cmd('create', self.repository_location + '_encrypted::test.2', 'input'))
def test_strip_components(self):
self.cmd('init', self.repository_location)
self.create_regular_file('dir/file')
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test', '--strip-components', '3')
self.assert_true(not os.path.exists('file'))
with self.assert_creates_file('file'):
self.cmd('extract', self.repository_location + '::test', '--strip-components', '2')
with self.assert_creates_file('dir/file'):
self.cmd('extract', self.repository_location + '::test', '--strip-components', '1')
with self.assert_creates_file('input/dir/file'):
self.cmd('extract', self.repository_location + '::test', '--strip-components', '0')
def test_extract_include_exclude(self):
self.cmd('init', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('file2', size=1024 * 80)
self.create_regular_file('file3', size=1024 * 80)
self.create_regular_file('file4', size=1024 * 80)
self.cmd('create', '--exclude=input/file4', self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test', 'input/file1', )
self.assert_equal(sorted(os.listdir('output/input')), ['file1'])
with changedir('output'):
self.cmd('extract', '--exclude=input/file2', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file3'])
with changedir('output'):
self.cmd('extract', '--exclude-from=' + self.exclude_file_path, self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file3'])
def test_exclude_caches(self):
self.cmd('init', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('cache1/CACHEDIR.TAG', contents=b'Signature: 8a477f597d28d172789f06886806bc55 extra stuff')
self.create_regular_file('cache2/CACHEDIR.TAG', contents=b'invalid signature')
self.cmd('create', '--exclude-caches', self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['cache2', 'file1'])
self.assert_equal(sorted(os.listdir('output/input/cache2')), ['CACHEDIR.TAG'])
def test_path_normalization(self):
self.cmd('init', self.repository_location)
self.create_regular_file('dir1/dir2/file', size=1024 * 80)
with changedir('input/dir1/dir2'):
self.cmd('create', self.repository_location + '::test', '../../../input/dir1/../dir1/dir2/..')
output = self.cmd('list', self.repository_location + '::test')
self.assert_not_in('..', output)
self.assert_in(' input/dir1/dir2/file', output)
def test_exclude_normalization(self):
self.cmd('init', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('file2', size=1024 * 80)
with changedir('input'):
self.cmd('create', '--exclude=file1', self.repository_location + '::test1', '.')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test1')
self.assert_equal(sorted(os.listdir('output')), ['file2'])
with changedir('input'):
self.cmd('create', '--exclude=./file1', self.repository_location + '::test2', '.')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test2')
self.assert_equal(sorted(os.listdir('output')), ['file2'])
self.cmd('create', '--exclude=input/./file1', self.repository_location + '::test3', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test3')
self.assert_equal(sorted(os.listdir('output/input')), ['file2'])
def test_repeated_files(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input', 'input')
def test_overwrite(self):
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('dir2/file2', size=1024 * 80)
self.cmd('init', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
# Overwriting regular files and directories should be supported
os.mkdir('output/input')
os.mkdir('output/input/file1')
os.mkdir('output/input/dir2')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
self.assert_dirs_equal('input', 'output/input')
# But non-empty dirs should fail
os.unlink('output/input/file1')
os.mkdir('output/input/file1')
os.mkdir('output/input/file1/dir')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test', exit_code=1)
def test_rename(self):
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('dir2/file2', size=1024 * 80)
self.cmd('init', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
self.cmd('create', self.repository_location + '::test.2', 'input')
self.cmd('extract', '--dry-run', self.repository_location + '::test')
self.cmd('extract', '--dry-run', self.repository_location + '::test.2')
self.cmd('rename', self.repository_location + '::test', 'test.3')
self.cmd('extract', '--dry-run', self.repository_location + '::test.2')
self.cmd('rename', self.repository_location + '::test.2', 'test.4')
self.cmd('extract', '--dry-run', self.repository_location + '::test.3')
self.cmd('extract', '--dry-run', self.repository_location + '::test.4')
# Make sure both archives have been renamed
repository = Repository(self.repository_path)
manifest, key = Manifest.load(repository)
self.assert_equal(len(manifest.archives), 2)
self.assert_in('test.3', manifest.archives)
self.assert_in('test.4', manifest.archives)
def test_delete(self):
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('dir2/file2', size=1024 * 80)
self.cmd('init', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
self.cmd('create', self.repository_location + '::test.2', 'input')
self.cmd('extract', '--dry-run', self.repository_location + '::test')
self.cmd('extract', '--dry-run', self.repository_location + '::test.2')
self.cmd('delete', self.repository_location + '::test')
self.cmd('extract', '--dry-run', self.repository_location + '::test.2')
self.cmd('delete', self.repository_location + '::test.2')
# Make sure all data except the manifest has been deleted
repository = Repository(self.repository_path)
self.assert_equal(len(repository), 1)
def test_delete_repo(self):
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('dir2/file2', size=1024 * 80)
self.cmd('init', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
self.cmd('create', self.repository_location + '::test.2', 'input')
self.cmd('delete', self.repository_location)
# Make sure the repo is gone
self.assertFalse(os.path.exists(self.repository_path))
def test_corrupted_repository(self):
self.cmd('init', self.repository_location)
self.create_src_archive('test')
self.cmd('extract', '--dry-run', self.repository_location + '::test')
self.cmd('check', self.repository_location)
name = sorted(os.listdir(os.path.join(self.tmpdir, 'repository', 'data', '0')), reverse=True)[0]
with open(os.path.join(self.tmpdir, 'repository', 'data', '0', name), 'r+b') as fd:
fd.seek(100)
fd.write(b'XXXX')
self.cmd('check', self.repository_location, exit_code=1)
def test_readonly_repository(self):
self.cmd('init', self.repository_location)
self.create_src_archive('test')
os.system('chmod -R ugo-w ' + self.repository_path)
try:
self.cmd('extract', '--dry-run', self.repository_location + '::test')
finally:
# Restore permissions so shutil.rmtree is able to delete it
os.system('chmod -R u+w ' + self.repository_path)
def test_umask(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
mode = os.stat(self.repository_path).st_mode
self.assertEqual(stat.S_IMODE(mode), 0o700)
def test_cmdline_compatibility(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
output = self.cmd('verify', '-v', self.repository_location + '::test')
self.assert_in('"borg verify" has been deprecated', output)
output = self.cmd('prune', self.repository_location, '--hourly=1')
self.assert_in('"--hourly" has been deprecated. Use "--keep-hourly" instead', output)
def test_prune_repository(self):
self.cmd('init', self.repository_location)
self.cmd('create', self.repository_location + '::test1', src_dir)
self.cmd('create', self.repository_location + '::test2', src_dir)
output = self.cmd('prune', '-v', '--dry-run', self.repository_location, '--keep-daily=2')
self.assert_in('Keeping archive: test2', output)
self.assert_in('Would prune: test1', output)
output = self.cmd('list', self.repository_location)
self.assert_in('test1', output)
self.assert_in('test2', output)
self.cmd('prune', self.repository_location, '--keep-daily=2')
output = self.cmd('list', self.repository_location)
self.assert_not_in('test1', output)
self.assert_in('test2', output)
def test_usage(self):
self.assert_raises(SystemExit, lambda: self.cmd())
self.assert_raises(SystemExit, lambda: self.cmd('-h'))
@unittest.skipUnless(has_llfuse, 'llfuse not installed')
def test_fuse_mount_repository(self):
mountpoint = os.path.join(self.tmpdir, 'mountpoint')
os.mkdir(mountpoint)
self.cmd('init', self.repository_location)
self.create_test_files()
self.cmd('create', self.repository_location + '::archive', 'input')
self.cmd('create', self.repository_location + '::archive2', 'input')
try:
self.cmd('mount', self.repository_location, mountpoint, fork=True)
self.wait_for_mount(mountpoint)
self.assert_dirs_equal(self.input_path, os.path.join(mountpoint, 'archive', 'input'))
self.assert_dirs_equal(self.input_path, os.path.join(mountpoint, 'archive2', 'input'))
finally:
if sys.platform.startswith('linux'):
os.system('fusermount -u ' + mountpoint)
else:
os.system('umount ' + mountpoint)
os.rmdir(mountpoint)
# Give the daemon some time to exit
time.sleep(.2)
@unittest.skipUnless(has_llfuse, 'llfuse not installed')
def test_fuse_mount_archive(self):
mountpoint = os.path.join(self.tmpdir, 'mountpoint')
os.mkdir(mountpoint)
self.cmd('init', self.repository_location)
self.create_test_files()
self.cmd('create', self.repository_location + '::archive', 'input')
try:
self.cmd('mount', self.repository_location + '::archive', mountpoint, fork=True)
self.wait_for_mount(mountpoint)
self.assert_dirs_equal(self.input_path, os.path.join(mountpoint, 'input'))
finally:
if sys.platform.startswith('linux'):
os.system('fusermount -u ' + mountpoint)
else:
os.system('umount ' + mountpoint)
os.rmdir(mountpoint)
# Give the daemon some time to exit
time.sleep(.2)
def verify_aes_counter_uniqueness(self, method):
seen = set() # Chunks already seen
used = set() # counter values already used
def verify_uniqueness():
repository = Repository(self.repository_path)
for key, _ in repository.open_index(repository.get_transaction_id()).iteritems():
data = repository.get(key)
hash = sha256(data).digest()
if hash not in seen:
seen.add(hash)
num_blocks = num_aes_blocks(len(data) - 41)
nonce = bytes_to_long(data[33:41])
for counter in range(nonce, nonce + num_blocks):
self.assert_not_in(counter, used)
used.add(counter)
self.create_test_files()
os.environ['BORG_PASSPHRASE'] = 'passphrase'
self.cmd('init', '--encryption=' + method, self.repository_location)
verify_uniqueness()
self.cmd('create', self.repository_location + '::test', 'input')
verify_uniqueness()
self.cmd('create', self.repository_location + '::test.2', 'input')
verify_uniqueness()
self.cmd('delete', self.repository_location + '::test.2')
verify_uniqueness()
self.assert_equal(used, set(range(len(used))))
def test_aes_counter_uniqueness_keyfile(self):
self.verify_aes_counter_uniqueness('keyfile')
def test_aes_counter_uniqueness_passphrase(self):
self.verify_aes_counter_uniqueness('passphrase')
class ArchiverCheckTestCase(ArchiverTestCaseBase):
def setUp(self):
super().setUp()
with patch.object(ChunkBuffer, 'BUFFER_SIZE', 10):
self.cmd('init', self.repository_location)
self.create_src_archive('archive1')
self.create_src_archive('archive2')
def open_archive(self, name):
repository = Repository(self.repository_path)
manifest, key = Manifest.load(repository)
archive = Archive(repository, key, manifest, name)
return archive, repository
def test_check_usage(self):
output = self.cmd('check', self.repository_location, exit_code=0)
self.assert_in('Starting repository check', output)
self.assert_in('Starting archive consistency check', output)
output = self.cmd('check', '--repository-only', self.repository_location, exit_code=0)
self.assert_in('Starting repository check', output)
self.assert_not_in('Starting archive consistency check', output)
output = self.cmd('check', '--archives-only', self.repository_location, exit_code=0)
self.assert_not_in('Starting repository check', output)
self.assert_in('Starting archive consistency check', output)
def test_missing_file_chunk(self):
archive, repository = self.open_archive('archive1')
for item in archive.iter_items():
if item[b'path'].endswith('testsuite/archiver.py'):
repository.delete(item[b'chunks'][-1][0])
break
repository.commit()
self.cmd('check', self.repository_location, exit_code=1)
self.cmd('check', '--repair', self.repository_location, exit_code=0)
self.cmd('check', self.repository_location, exit_code=0)
def test_missing_archive_item_chunk(self):
archive, repository = self.open_archive('archive1')
repository.delete(archive.metadata[b'items'][-5])
repository.commit()
self.cmd('check', self.repository_location, exit_code=1)
self.cmd('check', '--repair', self.repository_location, exit_code=0)
self.cmd('check', self.repository_location, exit_code=0)
def test_missing_archive_metadata(self):
archive, repository = self.open_archive('archive1')
repository.delete(archive.id)
repository.commit()
self.cmd('check', self.repository_location, exit_code=1)
self.cmd('check', '--repair', self.repository_location, exit_code=0)
self.cmd('check', self.repository_location, exit_code=0)
def test_missing_manifest(self):
archive, repository = self.open_archive('archive1')
repository.delete(Manifest.MANIFEST_ID)
repository.commit()
self.cmd('check', self.repository_location, exit_code=1)
output = self.cmd('check', '--repair', self.repository_location, exit_code=0)
self.assert_in('archive1', output)
self.assert_in('archive2', output)
self.cmd('check', self.repository_location, exit_code=0)
def test_extra_chunks(self):
self.cmd('check', self.repository_location, exit_code=0)
repository = Repository(self.repository_location)
repository.put(b'01234567890123456789012345678901', b'xxxx')
repository.commit()
repository.close()
self.cmd('check', self.repository_location, exit_code=1)
self.cmd('check', self.repository_location, exit_code=1)
self.cmd('check', '--repair', self.repository_location, exit_code=0)
self.cmd('check', self.repository_location, exit_code=0)
self.cmd('extract', '--dry-run', self.repository_location + '::archive1', exit_code=0)
class RemoteArchiverTestCase(ArchiverTestCase):
prefix = '__testsuite__:'
def test_remote_repo_restrict_to_path(self):
self.cmd('init', self.repository_location)
path_prefix = os.path.dirname(self.repository_path)
with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-path', '/foo']):
self.assert_raises(PathNotAllowed, lambda: self.cmd('init', self.repository_location + '_1'))
with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-path', path_prefix]):
self.cmd('init', self.repository_location + '_2')
with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-path', '/foo', '--restrict-to-path', path_prefix]):
self.cmd('init', self.repository_location + '_3')
| |
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Services related to role based access control.
from django.http import HttpResponse, HttpResponseRedirect
from mint.django_rest.deco import return_xml, access, requires
from mint.django_rest.rbuilder import service
from mint.django_rest.rbuilder.rbac import models
from mint.django_rest.rbuilder.rbac.rbacauth import rbac
from mint.django_rest.rbuilder.querysets import models as querymodels
class BaseRbacService(service.BaseService):
pass
class RbacService(BaseRbacService):
"""
URLs for discovery purposes
"""
@access.anonymous
@return_xml
def rest_GET(self, request):
return models.Rbac()
class RbacPermissionTypesService(BaseRbacService):
"""
Returns the list of permissions configurable for RBAC
"""
@access.anonymous
@return_xml
def rest_GET(self, request):
return self.get()
def get(self):
return self.mgr.getRbacPermissionTypes()
class RbacPermissionTypeService(BaseRbacService):
"""
Returns the list of permissions configurable for RBAC
"""
@access.anonymous
@return_xml
def rest_GET(self, request, permission_type_id):
return self.get(permission_type_id)
def get(self, permission_type_id):
return self.mgr.getRbacPermissionType(permission_type_id)
class RbacPermissionsService(BaseRbacService):
"""
Grants and removes permissions.
<grants>
<grant id="http://hostname/api/rbac/permissions/1">
<permission_id>1</permission_id>
<role id="http://..."/>
<queryset id="http://...">
<permission>write</permission>
</grant>
...
</grants>
"""
# READ
@access.admin
@return_xml
def rest_GET(self, request):
return self.get()
def get(self):
return self.mgr.getRbacPermissions()
# CREATE
@access.admin
@requires('grant', save=False)
@return_xml
def rest_POST(self, request, grant):
return self.mgr.addRbacPermission(grant, request._authUser)
class RbacPermissionService(BaseRbacService):
# READ
@access.admin
@return_xml
def rest_GET(self, request, permission_id):
return self.get(permission_id)
def get(self, permission_id):
return self.mgr.getRbacPermission(permission_id)
# UPDATE
@access.admin
@requires('grant', save=False)
@return_xml
def rest_PUT(self, request, permission_id, grant):
return self.mgr.updateRbacPermission(permission_id, grant, request._authUser)
# DELETE
@access.admin
def rest_DELETE(self, request, permission_id):
self.mgr.deleteRbacPermission(permission_id)
return HttpResponse(status=204)
class RbacQuerySetGrantMatrixService(BaseRbacService):
'''
query_set/N/grant_matrix -- a very UI specific
transmogrification of grants data
'''
@access.admin
@return_xml
def rest_GET(self, request, query_set_id):
return self.mgr.getRbacGrantMatrix(query_set_id, request)
class RbacRolesService(BaseRbacService):
"""
Adds and edits roles.
<roles>
<role id="http://hostname/api/rbac/roles/sysadmin">
<role_id>sysadmin</role_id>
</role>
</roles>
"""
# READ
@access.admin
@return_xml
def rest_GET(self, request):
return self.get()
def get(self):
return self.mgr.getRbacRoles()
# CREATE
@access.admin
@return_xml
@requires('role', save=False)
def rest_POST(self, request, role):
return self.mgr.addRbacRole(role, request._authUser)
def can_read_role(view, request, role_id, *args, **kwargs):
# users are allowed to see their roles, you will notice this
# doesn't run through RBAC *directly* because we can't use
# RBAC to RBAC RBAC (actually, it might work, but...)
user = request._authUser
if user.is_admin:
return True
in_role = view.mgr.isUserInRole(user, role_id)
if in_role:
return True
return False
def can_read_user_roles(view, request, user_id, role_id=None, *args, **kwargs):
# users can see their roles
user = request._authUser
if role_id is None:
if user.is_admin:
return True
if str(user.pk) == str(user_id):
return True
return False
else:
return can_read_role(view, request, role_id, *args, **kwargs)
class RbacRoleService(BaseRbacService):
"""
Adds and edits roles.
<roles>
<role id="http://hostname/api/rbac/roles/sysadmin">
<role_id>sysadmin</role_id>
</role>
</roles>
"""
# READ
@rbac(can_read_role)
@return_xml
def rest_GET(self, request, role_id):
return self.get(role_id)
def get(self, role_id):
return self.mgr.getRbacRole(role_id)
# UPDATE
@access.admin
@requires('role', save=False)
@return_xml
def rest_PUT(self, request, role_id, role):
return self.mgr.updateRbacRole(role_id, role, request._authUser)
# DELETE
@access.admin
def rest_DELETE(self, request, role_id):
self.mgr.deleteRbacRole(role_id)
return HttpResponse(status=204)
class RbacUserRolesService(BaseRbacService):
"""
Assign roles to a user & list the roles they have.
<roles>
...
</roles>
"""
# READ
@rbac(can_read_user_roles)
@return_xml
def rest_GET(self, request, user_id, role_id=None):
return self.get(user_id, role_id)
# TODO: this really should be split out into two services
# functions should not return multiple types of entities
def get(self, user_id, role_id=None):
if role_id is not None:
return self.mgr.getRbacUserRole(user_id, role_id)
else:
return self.mgr.getRbacUserRoles(user_id)
# CREATE -- ADD A RBAC ROLE
@access.admin
@requires('role', save=False)
@return_xml
def rest_POST(self, request, user_id, role):
return self.mgr.addRbacUserRole(user_id, role, request._authUser)
# DELETE
@access.admin
def rest_DELETE(self, request, user_id, role_id):
self.mgr.deleteRbacUserRole(user_id, role_id)
return HttpResponse(status=204)
class RbacRoleGrantsService(BaseRbacService):
"""
What grants are available on a role?
<roles>
...
</roles>
"""
# READ
@access.admin
@return_xml
def rest_GET(self, request, role_id, grant_id=None):
return self.get(role_id, grant_id)
def get(self, role_id, grant_id=None):
if grant_id is not None:
return self.mgr.getRbacPermission(grant_id)
else:
return self.mgr.getRbacPermissionsForRole(role_id)
# CREATE -- ADD A RBAC ROLE
# NOTE -- same as RbacRolesService method
@access.admin
@requires('grant', save=False)
@return_xml
def rest_POST(self, request, role_id, grant):
return self.mgr.addRbacPermission(grant, request._authUser)
# DELETE
# NOTE -- same as RbacRolesService method
@access.admin
def rest_DELETE(self, request, role_id, grant_id):
self.mgr.deleteRbacPermission(grant_id)
return HttpResponse(status=204)
class RbacRoleUsersService(BaseRbacService):
"""
What roles have what users?
"""
# READ
@access.admin
@return_xml
def rest_GET(self, request, role_id, user_id=None):
return self.get(role_id, user_id, request)
def get(self, role_id, user_id=None, request=None):
if user_id is None:
qs = querymodels.QuerySet.objects.get(name='All Users', is_public=True)
url = "/api/v1/query_sets/%s/all;filter_by=[user_roles.role.pk,EQUAL,%s]%s" % (qs.pk, role_id, request.params)
return HttpResponseRedirect(url)
else:
# obsolete URL no longer linked to since redirect
url = "/api/v1/users/%s%s" % (user_id, request.params)
return HttpResponseRedirect(url)
# CREATE -- ADD A RBAC USER TO A ROLE
@access.admin
@requires('user', save=False)
@return_xml
def rest_POST(self, request, role_id, user):
return self.mgr.addRbacUserRole(user.pk, role_id, request._authUser)
# DELETE
@access.admin
def rest_DELETE(self, request, role_id, user_id):
self.mgr.deleteRbacUserRole(user_id, role_id)
return HttpResponse(status=204)
| |
#!/usr/bin/python
#======================================================================
#
# Project : hpp_IOStressTest
# File : IOST_WMain_I2C.py
# Date : Oct 20, 2016
# Author : HuuHoang Nguyen
# Contact : hhnguyen@apm.com
# : hoangnh.hpp@gmail.com
# License : MIT License
# Copyright : 2016
# Description: The hpp_IOStressTest is under the MIT License, a copy of license which may be found in LICENSE
#
#======================================================================
import io
import os
import sys
import time
from IOST_Prepare import IOST_Prepare
from IOST_Config import *
from IOST_WSetupTestcase import *
import gtk
import gtk.glade
#======================================================================
IOST_WMain_I2C_Debug_Enable = 0
#======================================================================
class IOST_WMain_I2C():
"""
This is class to get all I2C object from IOST_WMain_Skylark window and control to these
component
"""
def __init__(self, glade_filename, window_name, builder=None):
"""
"""
self.IOST_WMain_I2C_window_name=window_name
if not builder:
self.IOST_I2C_Builder = gtk.Builder()
self.IOST_I2C_Builder.add_from_file(glade_filename)
self.IOST_I2C_Builder.connect_signals(self)
else:
self.IOST_I2C_Builder = builder
#----------------------------------------------------------------------
def GetI2C_Obj(self, window_name):
"""
Get all I2C objects on WMain window
"""
self.IOST_Objs[window_name][window_name+"_IP_Enable_I2C_CB"] = self.IOST_I2C_Builder.get_object(self.IOST_Objs[window_name]["_IP_Enable_I2C_CB"])
for i in range(0, self.IOST_Data["I2C_PortNum"]):
self.IOST_Objs[window_name][window_name+"_Config_I2C"+str(i)+"_CB"] = \
self.IOST_I2C_Builder.get_object(self.IOST_Objs[window_name]["_Config_I2C"+str(i)+"_CB"])
self.IOST_Objs[window_name][window_name+"_Config_I2C"+str(i)+"_B"] = \
self.IOST_I2C_Builder.get_object(self.IOST_Objs[window_name]["_Config_I2C"+str(i)+"_B"])
#----------------------------------------------------------------------
def SetValueToI2C_Obj(self, window_name):
"""
Init all I2C objects when start IOST Wmain program
"""
if self.IOST_Data["I2C"] == "Enable":
self.IOST_Objs[window_name][window_name+"_IP_Enable_I2C_CB"].set_active(True)
for i in range(0, self.IOST_Data["I2C_PortNum"]):
if self.IOST_Data["I2C"+str(i)][0] == "Disable":
self.IOST_Objs[window_name][window_name+"_Config_I2C"+str(i)+"_CB"].set_active(False)
self.IOST_Objs[window_name][window_name+"_Config_I2C"+str(i)+"_B"].set_sensitive(False)
else:
self.IOST_Objs[window_name][window_name+"_Config_I2C"+str(i)+"_CB"].set_active(True)
self.IOST_Objs[window_name][window_name+"_Config_I2C"+str(i)+"_B"].set_sensitive(True)
else:
self.IOST_Objs[window_name][window_name+"_IP_Enable_I2C_CB"].set_active(False)
for i in range(0, self.IOST_Data["I2C_PortNum"]):
self.IOST_Objs[window_name][window_name+"_Config_I2C"+str(i)+"_CB"].set_sensitive(False)
self.IOST_Objs[window_name][window_name+"_Config_I2C"+str(i)+"_B"].set_sensitive(False)
#Update test case
for i in range(0, self.IOST_Data["I2C_PortNum"]):
self.IOST_Data["I2C"+str(i)+"_TestCaseNum"] = len(self.IOST_Data["I2C"+str(i)]) - 1
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C0_B_clicked(self, object, data=None):
"Control to ConfigI2C-0 button "
self.WSetupTestcase_show("IOST_WSetupTestcase", "_Skylark", "I2C0")
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C0_C_toggled(self, object, data=None):
""
Res = self.IOST_Objs[self.IOST_WMain_I2C_window_name][self.IOST_WMain_I2C_window_name+"_Config_I2C0_CB"].get_active()
self.IOST_Objs[self.IOST_WMain_I2C_window_name][self.IOST_WMain_I2C_window_name+"_Config_I2C0_B"].set_sensitive(Res)
if (Res):
self.IOST_Data["I2C0"][0] = 'Enable'
else:
self.IOST_Data["I2C0"][0] = 'Disable'
if IOST_WMain_I2C_Debug_Enable:
print self.IOST_Data["I2C0"][0]
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C1_B_clicked(self, object, data=None):
"Control to ConfigI2C-1 button "
self.WSetupTestcase_show("IOST_WSetupTestcase", "_Skylark", "I2C1")
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C1_C_toggled(self, object, data=None):
""
Res = self.IOST_Objs[self.IOST_WMain_I2C_window_name][self.IOST_WMain_I2C_window_name+"_Config_I2C1_CB"].get_active()
self.IOST_Objs[self.IOST_WMain_I2C_window_name][self.IOST_WMain_I2C_window_name+"_Config_I2C1_B"].set_sensitive(Res)
if (Res):
self.IOST_Data["I2C1"][0] = 'Enable'
else:
self.IOST_Data["I2C1"][0] = 'Disable'
if IOST_WMain_I2C_Debug_Enable:
print self.IOST_Data["I2C1"][0]
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C2_B_clicked(self, object, data=None):
"Control to ConfigI2C-2 button "
self.WSetupTestcase_show("IOST_WSetupTestcase", "_Skylark", "I2C2")
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C2_C_toggled(self, object, data=None):
""
Res = self.IOST_Objs[self.IOST_WMain_I2C_window_name][self.IOST_WMain_I2C_window_name+"_Config_I2C2_CB"].get_active()
self.IOST_Objs[self.IOST_WMain_I2C_window_name][self.IOST_WMain_I2C_window_name+"_Config_I2C2_B"].set_sensitive(Res)
if (Res):
self.IOST_Data["I2C2"][0] = 'Enable'
else:
self.IOST_Data["I2C2"][0] = 'Disable'
if IOST_WMain_I2C_Debug_Enable:
print self.IOST_Data["I2C2"][0]
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C3_B_clicked(self, object, data=None):
"Control to ConfigI2C-3 button "
self.WSetupTestcase_show("IOST_WSetupTestcase", "_Skylark", "I2C3")
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C3_C_toggled(self, object, data=None):
""
Res = self.IOST_Objs[self.IOST_WMain_I2C_window_name][self.IOST_WMain_I2C_window_name+"_Config_I2C3_CB"].get_active()
self.IOST_Objs[self.IOST_WMain_I2C_window_name][self.IOST_WMain_I2C_window_name+"_Config_I2C3_B"].set_sensitive(Res)
if (Res):
self.IOST_Data["I2C3"][0] = 'Enable'
else:
self.IOST_Data["I2C3"][0] = 'Disable'
if IOST_WMain_I2C_Debug_Enable:
print self.IOST_Data["I2C3"][0]
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C4_B_clicked(self, object, data=None):
"Control to ConfigI2C-4 button "
self.WSetupTestcase_show("IOST_WSetupTestcase", "_Skylark", "I2C4")
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C4_C_toggled(self, object, data=None):
""
Res = self.IOST_Objs[self.IOST_WMain_I2C_window_name][self.IOST_WMain_I2C_window_name+"_Config_I2C4_CB"].get_active()
self.IOST_Objs[self.IOST_WMain_I2C_window_name][self.IOST_WMain_I2C_window_name+"_Config_I2C4_B"].set_sensitive(Res)
if (Res):
self.IOST_Data["I2C4"][0] = 'Enable'
else:
self.IOST_Data["I2C4"][0] = 'Disable'
if IOST_WMain_I2C_Debug_Enable:
print self.IOST_Data["I2C4"][0]
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C5_B_clicked(self, object, data=None):
"Control to ConfigI2C-5 button "
self.WSetupTestcase_show("IOST_WSetupTestcase", "_Skylark", "I2C5")
#----------------------------------------------------------------------
def on_IOST_WMain_Config_I2C5_C_toggled(self, object, data=None):
""
Res = self.IOST_Objs[self.IOST_WMain_I2C_window_name][self.IOST_WMain_I2C_window_name+"_Config_I2C5_CB"].get_active()
self.IOST_Objs[self.IOST_WMain_I2C_window_name][self.IOST_WMain_I2C_window_name+"_Config_I2C5_B"].set_sensitive(Res)
if (Res):
self.IOST_Data["I2C5"][0] = 'Enable'
else:
self.IOST_Data["I2C5"][0] = 'Disable'
if IOST_WMain_I2C_Debug_Enable:
print self.IOST_Data["I2C5"][0]
#----------------------------------------------------------------------
def on_IOST_WMain_IP_Enable_I2C_CB_toggled(self, object, data=None):
Res = self.IOST_Objs[self.IOST_WMain_I2C_window_name][self.IOST_WMain_I2C_window_name+"_IP_Enable_I2C_CB"].get_active()
self.IOST_WMain_I2C_set_sensitive_all(Res)
if Res:
self.IOST_Data["I2C"] = 'Enable'
else:
self.IOST_Data["I2C"] = 'Disable'
#----------------------------------------------------------------------
def IOST_WMain_I2C_set_sensitive_all(self, value):
for i in range(0, self.IOST_Data["I2C_PortNum"]):
self.IOST_Objs[self.IOST_WMain_I2C_window_name][self.IOST_WMain_I2C_window_name+"_Config_I2C"+str(i)+"_CB"].set_sensitive(value)
if self.IOST_Data["I2C"+str(i)][0] == "Enable" and value:
self.IOST_Objs[self.IOST_WMain_I2C_window_name][self.IOST_WMain_I2C_window_name+"_Config_I2C"+str(i)+"_B"].set_sensitive(value)
else:
self.IOST_Objs[self.IOST_WMain_I2C_window_name][self.IOST_WMain_I2C_window_name+"_Config_I2C"+str(i)+"_B"].set_sensitive(False)
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
| |
# Copyright 2013 Netherlands eScience Center
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import os
import signal
import sys
import subprocess
from celery import Task
from celery.utils.log import get_task_logger
from mako.template import Template
from rpy2.robjects import IntVector
from rpy2.robjects.packages import SignatureTranslatedAnonymousPackage
from sqlalchemy.engine.url import make_url
logger = get_task_logger(__name__)
class PythonTask(Task):
"""Abstract task to run Python code
Attributes:
abstract : boolean
Abstract classes are not registered,
but are used as the base class for new task types.
label : string
Human readable label of task.
title : string
Title of task. Should be a single line.
description : string
Description of task. May contain html.
autoregister : boolean
If disabled this task won't be registered automatically.
js_form : string
Filename of javascript form.
result_template: string
Filename of Mako template to render the script results.
With following variables available inside template:
* `task`, that is task object or self
* celery `result` object
* `query`, same a run script response['query']
* `files`, dictionary of with filename as key and url as value.
"""
abstract = True
label = None
title = None
description = ''
js_form = 'form.js'
result_template = None
autoregister = True # Change to False to hide this task
made_by_researcher = True
def output_dir(self):
"""Directory where task can put output files.
Combination of 'task_output_directory' config and task identifier.
Directory will be created when it does not exist.
"""
directory = os.path.join(self.app.conf['task_output_directory'],
self.request.id,
)
try:
os.makedirs(directory)
except OSError as e:
# ignore exception when it already exists
if e.errno != errno.EEXIST:
raise e
return directory
def local_db_url(self, db_url):
"""Convert db_url string into a :class:`~sqlalchemy.engine.url.URL`"""
front_db_url = make_url(db_url)
return front_db_url
def task_dir(self):
"""Directory in which Task is defined
Can be used to find location of Task resources like executables.
"""
this_file = os.path.abspath(sys.modules[self.__module__].__file__)
return os.path.dirname(this_file)
def formfields2taskargs(self, fields, db_url):
"""Validate and serialize form fields to dict which is passed to Task.run().
Used as kwargs for Task.run()
If task requires db access it can add db_url to the task arguments.
The db_url can be parsed and used to connect to db.
eg. Python using sqlalchemy models::
from script_wrapper.models import DBSession, Devices
Session = DBSession(db_url)
res = Session().query(Devices).all()
Session.close_all()
eg. For Matlab the SQLAlchemy URL has to be converted to JDBC
In Python::
from script_wrapper.models import make_url
u = make_url(db_url)
username = u.username
password = u.password
instance = u.database
drivers = {'postgresql': 'org.postgresql.Driver'}
driver = drivers[u.drivername]
jdbct = 'jdbc:{drivername}://{host}:{port}/{database}'
jdbc_url = jdbct.format(drivername=u.drivername,
host=u.host,
port=u.port or 5432,
database=u.database)
if u.query.has_key('sslmode') and u.query['sslmode'] == 'require':
jdbc_url += '?ssl=true'
In Matlab::
conn = database(instance, username, password, driver, jdbc_url)
eg. For R
In Python::
from script_wrapper.models import make_url
u = make_url(db_url)
drivers = {'postgresql': 'PostgreSQL'}
driver = drivers[u.drivername]
dbname=u.database
host = u.host
port = u.port or 5432
username = u.username
password = u.password
In R::
drv <- dbDriver(driver)
# TODO how to use sslmode=require in R,
# possibly via PGSSLMODE environment variable?
con <- dbConnect(drv, dbname=dbname, host=host, port=port,
user=username, password=password)
Throw a script_wrapper.validation.Invalid exception when fields are not valid.
"""
return fields
def _abs_file_name(self, filename):
return os.path.join(self.task_dir(), filename)
def result_template_location(self):
"""Mako template to render result content"""
return self._abs_file_name(self.result_template)
def render_result(self, result, files):
"""Returns result html based on the task result and it's output files.
Returns None when there is no result_template or the result did not complete successfully.
"""
if self.result_template is not None and result.successful():
template = Template(filename=self.result_template_location(), output_encoding='utf-8')
return template.render(query=result.result['query'], files=files)
else:
return None
def js_form_location(self):
"""Javascript to render ExtJS form to div with 'form' id"""
return self._abs_file_name(self.js_form)
def sslify_dbname(self, db_url):
"""To connect to postgresql database which requires ssl add query to db name."""
db_name = db_url.database
sslmodes = ['require', 'verify', 'verify-full']
if 'sslmode' in db_url.query and db_url.query['sslmode'] in sslmodes:
db_name += '?ssl=true&sslfactory=org.postgresql.ssl.NonValidatingFactory'
return db_name
class RTask(PythonTask):
"""Abstract task to run R function in a R-script file
Implementing this class requires you to override the script attribute
and the ``run`` method, eg.::
class PlotTask(RTask):
script = 'plot.r'
def run(self, output_dir()):
self.load_mfile()
self.r.plot(output_dir())
return {'query': {}}
Attributes:
script : string
Filename of R script with a function.
"""
abstract = True
script = None
_r = None
@property
def r(self):
"""self.r_script is imported into R
and it's functions are available as self.r.<function>"""
if self._r is None:
f = open(os.path.join(self.task_dir(), self.script))
r_string = f.read()
f.close()
self._r = SignatureTranslatedAnonymousPackage(r_string, 'r')
return self._r
def toIntVector(self, myints):
"""Convert Python list of ints into a R Int vector"""
return IntVector(myints)
class OctaveTask(PythonTask):
"""Abstract task to run GNU Octave function in a octave script file
Implementing this class requires you to override the script attribute
and the ``run`` method, eg.::
class PlotTask(OctaveTask):
script = 'plot.m'
def run(self, output_dir()):
self.load_mfile()
self.octave.plot(output_dir())
return {'query': {}}
Attributes:
script : string
Filename of octave script with a function.
"""
abstract = True
script = None
def __init__(self):
from oct2py import octave
self.octave = octave
def load_mfile(self):
"""Add self.script to Octave path"""
self.octave.addpath(os.path.join(self.task_dir(), self.script))
class CalledProcessError(Exception):
def __init__(self, returncode, cmd):
self.returncode = returncode
self.cmd = cmd
Exception.__init__(self, self.__str__())
def __str__(self):
return "Command '{}' returned non-zero exit status {}".format(self.cmd, self.returncode)
class SubProcessTask(PythonTask):
"""Abstract task to subprocess.Popen.
Writes standard out to `stdout.txt` file and standard error to `stderr.txt` file.
Can execute any executable program with arguments.
Raises subprocess.CalledProcessError when subprocess returns non-zero exit status.
"""
abstract = True
def pargs(self):
"""Arguments prepended to run(\*args) which are used as Popen args"""
return []
def env(self):
"""Environment to use for subprocess.
Defaults to current environment.
Can be used to pass sensitive information to subprocess like passwords.
"""
return os.environ
def run(self, *args):
"""Perform subprocess with self.pargs() and \*args list as arguments.
Returns dict with following keys:
* files, a dict of base-filenames and absolute paths.
* return_code
"""
mypid = None
pargs = self.pargs() + list(args)
# Make sure all arguments are strings
pargs = [str(parg) for parg in pargs]
stdout_fn = os.path.join(self.output_dir(), 'stdout.txt')
stdout = open(stdout_fn, 'w')
stderr_fn = os.path.join(self.output_dir(), 'stderr.txt')
stderr = open(stderr_fn, 'w')
# When the task is revoked the children of the subprocess will keep running
# To make sure the children are also killed use the process group id
# To kill the process group id the term signal has to be redirected
oldsignal = signal.getsignal(signal.SIGTERM)
def cleanup():
"""Close output files and revert term signal"""
stderr.close()
stdout.close()
signal.signal(signal.SIGTERM, oldsignal)
def killit(signum, frame):
"""Kill the current process group and cleanup"""
logger.warn('Killing pg {} of pid {} with signal {}'.format(os.getpgid(mypid), mypid, signum))
os.killpg(os.getpgid(mypid), signum)
cleanup()
signal.signal(signal.SIGTERM, killit)
popen = subprocess.Popen(pargs,
cwd=self.output_dir(),
env=self.env(),
stdout=stdout,
stderr=stderr,
# starts subprocess in own process group
# whole group can be killed when task is revoked
preexec_fn=os.setsid,
)
self.update_state(state='RUNNING')
mypid = popen.pid
return_code = popen.wait()
cleanup()
if return_code is not 0:
raise CalledProcessError(return_code, 'script')
return {'return_code': return_code}
class MatlabTask(SubProcessTask):
"""Abstract task to execute compiled Matlab function
Implementing this class requires you to override the script attribute
and the ``run`` method, eg.::
class PlotTask(MatlabTask):
script = 'run_plot.sh'
def run(self, output_dir()):
result = super(PlotTask, self).run(output_dir())
return result
Attributes:
script : string
Filename of Matlab deployment script.
During `mcc -vm plot.m` an executable and deployment script is build.
The executable is executed using the script.
Eg. Matlab script `plot.m` will be runnable by running `run_plot.sh`.
"""
abstract = True
_matlab = None
script = None
matlab_version = '2012b'
@property
def matlab(self):
"""Location of Matlab installation or
location of Matlab compile runtime
Fetched from celery config with 'matlab.location' key.
"""
if self._matlab is None:
self._matlab = self.app.conf['matlab.location.' + self.matlab_version]
return self._matlab
def pargs(self):
"""Prepend the deployment script and matlab location"""
p = super(MatlabTask, self).pargs()
p += [os.path.join(self.task_dir(), self.script),
self.matlab,
]
return p
def list2vector_string(self, mylist):
"""Convers list into Matlab vector
eg. x = [1,2,3] becomes '[1,2,3]'
"""
return '[{}]'.format(",".join([str(i) for i in mylist]))
def list2cell_array_string(self, mylist):
"""Convers list into Matlab vector
eg. x = ['foo', 'bar'] becomes '{foo,bar}'
"""
return '{{{}}}'.format(",".join(["'{}'".format(i) for i in mylist]))
| |
# Alpha O. Sall
# 03/24/2014
from flask import Flask, request, Response
import json
import requests
from array import *
from Log import Log
def getCephRestApiUrl(request):
# discover ceph-rest-api URL
return request.url_root.replace("inkscopeCtrl","ceph-rest-api")
class Pools:
"""docstring for pools"""
def __init__(self):
pass
def newpool_attribute(self, jsonform):
jsondata = json.loads(jsonform)
self.name = jsondata['pool_name']
self.pg_num = jsondata['pg_num']
self.pgp_num = jsondata['pg_placement_num']
self.type = jsondata['type']
self.size = jsondata['size']
self.min_size = jsondata['min_size']
self.crash_replay_interval = jsondata['crash_replay_interval']
self.crush_ruleset = jsondata['crush_ruleset']
self.erasure_code_profile = jsondata['erasure_code_profile']
self.quota_max_objects = jsondata['quota_max_objects']
self.quota_max_bytes = jsondata['quota_max_bytes']
def savedpool_attribute(self, ind, jsonfile):
r = jsonfile.json()
self.name = r['output']['pools'][ind]['pool_name']
self.pg_num = r['output']['pools'][ind]['pg_num']
self.pgp_num = r['output']['pools'][ind]['pg_placement_num']
self.type = r['output']['pools'][ind]['type']
self.size = r['output']['pools'][ind]['size']
self.min_size = r['output']['pools'][ind]['min_size']
self.crash_replay_interval = r['output']['pools'][ind]['crash_replay_interval']
self.crush_ruleset = r['output']['pools'][ind]['crush_ruleset']
self.erasure_code_profile = r['output']['pools'][ind]['erasure_code_profile']
self.quota_max_objects = r['output']['pools'][ind]['quota_max_objects']
self.quota_max_bytes = r['output']['pools'][ind]['quota_max_bytes']
def register(self):
uri = self.cephRestApiUrl+'osd/pool/create?pool='+self.name+'&pool_type='+self.type+'&pg_num='+str(self.pg_num)+'&pgp_num='+str(self.pgp_num)
if self.erasure_code_profile != "":
uri += '&erasure_code_profile='+self.erasure_code_profile
register_pool = requests.put(uri)
# if newpool.register().status_code != 200:
# # return 'Error '+str(r.status_code)+' on creating pools'
# else:
def getindice(id, jsondata):
r = jsondata.content
r = json.loads(r)
mypoolsnum = array('i',[])
for i in r['output']['pools']:
mypoolsnum.append(i[u'pool'])
if id not in mypoolsnum:
return "Pool not found"
else:
for i in range(len(mypoolsnum)):
if mypoolsnum[i]==id:
id=i
return id
def getpoolname(ind, jsondata):
r = jsondata.json()
poolname = r['output']['pools'][ind]['pool_name']
return str(poolname)
def checkpool(pool_id, jsondata):
skeleton = {'status':'','output':{}}
if isinstance(pool_id, int):
ind = getindice(pool_id, jsondata)
id = ind
if id == "Pool id not found":
skeleton['status'] = id
result = json.dumps(skeleton)
return Response(result, mimetype='application/json')
else:
skeleton['status'] = 'OK'
result = json.dumps(skeleton)
return Response(result, mimetype='application/json')
if isinstance(pool_id, str):
r = jsondata.content
r = json.loads(r)
mypoolsname = array('i',[])
for i in r['output']:
mypoolsname.append(i[u'poolname'])
if pool_id not in mypoolsname:
skeleton['status'] = 'OK'
result = json.dumps(skeleton)
return Response(result, mimetype='application/json')
else:
skeleton['status'] = pool_id+'already exits. Please enter a new pool name'
result = json.dumps(skeleton)
return Response(result, mimetype='application/json')
def geterrors(url, methods):
try:
if methods == 'GET':
r = requests.get(url)
else:
r = requests.put(url)
except HTTPError, e:
return 'Error '+str(r.status_code)
else:
return 'ok'
# @app.route('/pools/', methods=['GET','POST'])
# @app.route('/pools/<int:id>', methods=['GET','DELETE','PUT'])
def pool_manage(id):
cephRestApiUrl = getCephRestApiUrl(request);
if request.method == 'GET':
if id == None:
r = requests.get(cephRestApiUrl+'osd/lspools.json')
if r.status_code != 200:
return Response(r.raise_for_status())
else:
r = r.content
return Response(r, mimetype='application/json')
else:
data = requests.get(cephRestApiUrl+'osd/dump.json')
if data.status_code != 200:
return 'Error '+str(data.status_code)+' on the request getting pools'
else:
ind = getindice(id, data)
id = ind
skeleton = {'status':'','output':{}}
if id == "Pool id not found":
skeleton['status'] = id
result = json.dumps(skeleton)
return Response(result, mimetype='application/json')
else:
r = data.content
r = json.loads(r)
#r = data.json()
skeleton['status'] = r['status']
skeleton['output'] = r['output']['pools'][id]
result = json.dumps(skeleton)
return Response(result, mimetype='application/json')
elif request.method =='POST':
jsonform = request.form['json']
newpool = Pools()
newpool.cephRestApiUrl = getCephRestApiUrl(request)
newpool.newpool_attribute(jsonform)
newpool.register()
jsondata = requests.get(cephRestApiUrl+'osd/dump.json')
r = jsondata.content
r = json.loads(r)
#r = jsondata.json()
nbpool = len(r['output']['pools'])
poolcreated = Pools()
poolcreated.savedpool_attribute(nbpool-1, jsondata)
# set pool parameter
var_name= ['size', 'min_size', 'crash_replay_interval','crush_ruleset']
param_to_set_list = [newpool.size, newpool.min_size, newpool.crash_replay_interval, newpool.crush_ruleset]
default_param_list = [poolcreated.size, poolcreated.min_size, poolcreated.crash_replay_interval, poolcreated.crush_ruleset]
for i in range(len(default_param_list)):
if param_to_set_list[i] != default_param_list[i]:
r = requests.put(cephRestApiUrl+'osd/pool/set?pool='+str(poolcreated.name)+'&var='+var_name[i]+'&val='+str(param_to_set_list[i]))
else:
pass
# set object or byte limit on pool
field_name = ['max_objects','max_bytes']
param_to_set = [newpool.quota_max_objects, newpool.quota_max_bytes]
default_param = [poolcreated.quota_max_objects, poolcreated.quota_max_bytes]
for i in range(len(default_param)):
if param_to_set[i] != default_param[i]:
r = requests.put(cephRestApiUrl+'osd/pool/set-quota?pool='+str(poolcreated.name)+'&field='+field_name[i]+'&val='+str(param_to_set[i]))
else:
pass
return 'None'
elif request.method == 'DELETE':
data = requests.get(cephRestApiUrl+'osd/dump.json')
# if data.status_code != 200:
# return 'Error '+str(r.status_code)+' on the request getting pools'
# else:
#r = data.json()
r = data.content
r = json.loads(r)
# data = requests.get('http://localhost:8080/ceph-rest-api/osd/dump.json')
ind = getindice(id, data)
id = ind
poolname = r['output']['pools'][id]['pool_name']
poolname = str(poolname)
delete_request = requests.put(cephRestApiUrl+'osd/pool/delete?pool='+poolname+'&pool2='+poolname+'&sure=--yes-i-really-really-mean-it')
return str(delete_request.status_code)
else:
jsonform = request.form['json']
newpool = Pools()
newpool.newpool_attribute(jsonform)
data = requests.get(cephRestApiUrl+'osd/dump.json')
if data.status_code != 200:
return 'Error '+str(data.status_code)+' on the request getting pools'
else:
#r = data.json()
r = data.content
r = json.loads(r)
ind = getindice(id, data)
savedpool = Pools()
savedpool.savedpool_attribute(ind, data)
# rename the poolname
if str(newpool.name) != str(savedpool.name):
r = requests.put(cephRestApiUrl+'osd/pool/rename?srcpool='+str(savedpool.name)+'&destpool='+str(newpool.name))
# set pool parameter
var_name= ['size', 'min_size', 'crash_replay_interval','pg_num','pgp_num','crush_ruleset']
param_to_set_list = [newpool.size, newpool.min_size, newpool.crash_replay_interval, newpool.pg_num, newpool.pgp_num, newpool.crush_ruleset]
default_param_list = [savedpool.size, savedpool.min_size, savedpool.crash_replay_interval, savedpool.pg_num, savedpool.pgp_num, savedpool.crush_ruleset]
for i in range(len(default_param_list)):
if param_to_set_list[i] != default_param_list[i]:
r = requests.put(cephRestApiUrl+'osd/pool/set?pool='+str(newpool.name)+'&var='+var_name[i]+'&val='+str(param_to_set_list[i]))
else:
pass
# set object or byte limit on pool
field_name = ['max_objects','max_bytes']
param_to_set = [newpool.quota_max_objects, newpool.quota_max_bytes]
default_param = [savedpool.quota_max_objects, savedpool.quota_max_bytes]
for i in range(len(default_param)):
if param_to_set[i] != default_param[i]:
r = requests.put(cephRestApiUrl+'osd/pool/set-quota?pool='+str(newpool.name)+'&field='+field_name[i]+'&val='+str(param_to_set[i]))
else:
pass
return str(r.status_code)
# @app.route('/pools/<int:id>/snapshot', methods=['POST'])
def makesnapshot(id):
cephRestApiUrl = getCephRestApiUrl(request);
data = requests.get(cephRestApiUrl+'osd/dump.json')
#r = data.json()
r = data.content
r = json.loads(r)
ind = getindice(id,data)
id = ind
poolname = r['output']['pools'][id]['pool_name']
jsondata = request.form['json']
jsondata = json.loads(jsondata)
snap = jsondata['snapshot_name']
r = requests.put(cephRestApiUrl+'osd/pool/mksnap?pool='+str(poolname)+'&snap='+str(snap))
return str(r.status_code)
# @app.route('/pools/<int:id>/snapshot/<namesnapshot>', methods=['DELETE'])
def removesnapshot(id, namesnapshot):
cephRestApiUrl = getCephRestApiUrl(request);
data = requests.get(cephRestApiUrl+'osd/dump.json')
#r = data.json()
r = data.content
r = json.loads(r)
ind = getindice(id,data)
id = ind
poolname = r['output']['pools'][id]['pool_name']
try:
r = requests.put(cephRestApiUrl+'osd/pool/rmsnap?pool='+str(poolname)+'&snap='+str(namesnapshot))
except HTTPException, e:
return e
else:
return r.content
| |
import os
import os.path
import shutil
import socket
import subprocess
import sys
import tempfile
import threading
import time
import random
try:
import pytest
except ImportError:
print >> sys.stderr, "Integ tests require pytests!"
sys.exit(1)
def pytest_funcarg__servers(request):
"Returns a new APIHandler with a filter manager"
# Create tmpdir and delete after
tmpdir = tempfile.mkdtemp()
port = random.randint(2000, 60000)
# Write the configuration
config_path = os.path.join(tmpdir, "config.cfg")
conf = """[bloomd]
data_dir = %(dir)s
port = %(port)d
""" % {"dir": tmpdir, "port": port}
open(config_path, "w").write(conf)
# Start the process
proc = subprocess.Popen("./bloomd -f %s" % config_path, shell=True)
proc.poll()
assert proc.returncode is None
# Define a cleanup handler
def cleanup():
try:
subprocess.Popen("kill -9 %s" % proc.pid, shell=True)
time.sleep(1)
shutil.rmtree(tmpdir)
except:
pass
request.addfinalizer(cleanup)
# Make a connection to the server
connected = False
for x in xrange(3):
try:
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.settimeout(1)
conn.connect(("localhost", port))
connected = True
break
except Exception, e:
print e
time.sleep(1)
# Die now
if not connected:
raise EnvironmentError("Failed to connect!")
# Make a second connection
conn2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn2.settimeout(1)
conn2.connect(("localhost", port))
# Return the connection
return conn, conn2
class TestInteg(object):
def test_list_empty(self, servers):
"Tests doing a list on a fresh server"
server, _ = servers
fh = server.makefile()
server.sendall("list\n")
assert fh.readline() == "START\n"
assert fh.readline() == "END\n"
def test_list_prefix(self, servers):
"Tests lists with prefix"
server, _ = servers
fh = server.makefile()
server.sendall("create foobar2\n")
server.sendall("create foobar1\n")
server.sendall("create test4\n")
assert fh.readline() == "Done\n"
assert fh.readline() == "Done\n"
assert fh.readline() == "Done\n"
time.sleep(1) # Wait for vacuum
server.sendall("list foo\n")
assert fh.readline() == "START\n"
assert "foobar1" in fh.readline()
assert "foobar2" in fh.readline()
assert fh.readline() == "END\n"
def test_create(self, servers):
"Tests creating a filter"
server, _ = servers
fh = server.makefile()
server.sendall("create foobar\n")
assert fh.readline() == "Done\n"
server.sendall("list\n")
assert fh.readline() == "START\n"
assert "foobar" in fh.readline()
assert fh.readline() == "END\n"
def test_create_bad(self, servers):
"Tests creating a filter"
server, _ = servers
fh = server.makefile()
server.sendall("create " + ("foo"*100) + "\n")
assert fh.readline() == "Client Error: Bad filter name\n"
def test_doublecreate(self, servers):
"Tests creating a filter twice"
server, _ = servers
fh = server.makefile()
server.sendall("create foobar\n")
assert fh.readline() == "Done\n"
server.sendall("create foobar\n")
assert fh.readline() == "Exists\n"
def test_drop(self, servers):
"Tests dropping a filter"
server, _ = servers
fh = server.makefile()
server.sendall("create foobar\n")
assert fh.readline() == "Done\n"
server.sendall("list\n")
assert fh.readline() == "START\n"
assert "foobar" in fh.readline()
assert fh.readline() == "END\n"
server.sendall("drop foobar\n")
assert fh.readline() == "Done\n"
server.sendall("list\n")
assert fh.readline() == "START\n"
assert fh.readline() == "END\n"
def test_close(self, servers):
"Tests closing a filter"
server, _ = servers
fh = server.makefile()
server.sendall("create foobar\n")
assert fh.readline() == "Done\n"
server.sendall("list\n")
assert fh.readline() == "START\n"
assert "foobar" in fh.readline()
assert fh.readline() == "END\n"
server.sendall("close foobar\n")
assert fh.readline() == "Done\n"
server.sendall("list\n")
assert fh.readline() == "START\n"
assert "foobar" in fh.readline()
assert fh.readline() == "END\n"
def test_clear(self, servers):
"Tests clearing a filter"
server, _ = servers
fh = server.makefile()
server.sendall("create cleartest\n")
assert fh.readline() == "Done\n"
server.sendall("list\n")
assert fh.readline() == "START\n"
assert "cleartest" in fh.readline()
assert fh.readline() == "END\n"
server.sendall("clear cleartest\n")
assert fh.readline() == "Filter is not proxied. Close it first.\n"
server.sendall("list\n")
assert fh.readline() == "START\n"
assert "cleartest" in fh.readline()
assert fh.readline() == "END\n"
server.sendall("close cleartest\n")
assert fh.readline() == "Done\n"
server.sendall("clear cleartest\n")
assert fh.readline() == "Done\n"
server.sendall("list\n")
assert fh.readline() == "START\n"
assert fh.readline() == "END\n"
# Load + Drop the filter
time.sleep(3)
server.sendall("create cleartest\n")
assert fh.readline() == "Done\n"
server.sendall("drop cleartest\n")
assert fh.readline() == "Done\n"
def test_set(self, servers):
"Tests setting a value"
server, _ = servers
fh = server.makefile()
server.sendall("create foobar\n")
assert fh.readline() == "Done\n"
server.sendall("set foobar test\n")
assert fh.readline() == "Yes\n"
def test_bulk(self, servers):
"Tests setting bulk values"
server, _ = servers
fh = server.makefile()
server.sendall("create foobar\n")
assert fh.readline() == "Done\n"
server.sendall("multi foobar test blah\n")
assert fh.readline() == "No No\n"
server.sendall("bulk foobar test blah\n")
assert fh.readline() == "Yes Yes\n"
def test_doubleset(self, servers):
"Tests setting a value"
server, _ = servers
fh = server.makefile()
server.sendall("create foobar\n")
assert fh.readline() == "Done\n"
server.sendall("set foobar test\n")
assert fh.readline() == "Yes\n"
server.sendall("set foobar test\n")
assert fh.readline() == "No\n"
def test_check(self, servers):
"Tests checking a value"
server, _ = servers
fh = server.makefile()
server.sendall("create foobar\n")
assert fh.readline() == "Done\n"
server.sendall("set foobar test\n")
assert fh.readline() == "Yes\n"
server.sendall("check foobar test\n")
assert fh.readline() == "Yes\n"
def test_multi(self, servers):
"Tests checking multiple values"
server, _ = servers
fh = server.makefile()
server.sendall("create foobar\n")
assert fh.readline() == "Done\n"
server.sendall("multi foobar test test1 test2\n")
assert fh.readline() == "No No No\n"
server.sendall("set foobar test\n")
assert fh.readline() == "Yes\n"
server.sendall("multi foobar test test1 test2\n")
assert fh.readline() == "Yes No No\n"
def test_aliases(self, servers):
"Tests aliases"
server, _ = servers
fh = server.makefile()
server.sendall("create foobar\n")
assert fh.readline() == "Done\n"
server.sendall("b foobar test test1 test2\n")
assert fh.readline() == "Yes Yes Yes\n"
server.sendall("s foobar test\n")
assert fh.readline() == "No\n"
server.sendall("m foobar test1 test2\n")
assert fh.readline() == "Yes Yes\n"
server.sendall("c foobar test\n")
assert fh.readline() == "Yes\n"
def test_set_check(self, servers):
"Tests setting and checking many values"
server, _ = servers
fh = server.makefile()
server.sendall("create foobar\n")
assert fh.readline() == "Done\n"
for x in xrange(1000):
server.sendall("set foobar test%d\n" % x)
assert fh.readline() == "Yes\n"
for x in xrange(1000):
server.sendall("check foobar test%d\n" % x)
assert fh.readline() == "Yes\n"
def test_concurrent_drop(self, servers):
"Tests setting values and do a concurrent drop on the DB"
server, server2 = servers
fh = server.makefile()
def loopset():
for x in xrange(10000):
server.sendall("set pingpong test%d\n" % x)
resp = fh.readline()
if resp != "Yes\n":
assert resp == "Filter does not exist\n" and x > 100
return
else:
assert resp == "Yes\n"
assert False
def drop():
time.sleep(0.2)
server2.sendall("drop pingpong\n")
server.sendall("create pingpong\n")
fh.readline() == "Done\n"
t = threading.Thread(target=drop)
t.start()
loopset()
def test_concurrent_close(self, servers):
"Tests setting values and do a concurrent close on the DB"
server, server2 = servers
fh = server.makefile()
def loopset():
for x in xrange(100000):
server.sendall("set pingpong test%d\n" % x)
resp = fh.readline()
assert resp == "Yes\n"
def close():
time.sleep(0.1)
server2.sendall("close pingpong\n")
server.sendall("create pingpong\n")
fh.readline() == "Done\n"
t = threading.Thread(target=close)
t.start()
loopset()
def test_concurrent_flush(self, servers):
"Tests setting values and do a concurrent flush"
server, server2 = servers
fh = server.makefile()
def loopset():
for x in xrange(10000):
server.sendall("set pingpong test%d\n" % x)
resp = fh.readline()
assert resp == "Yes\n"
def flush():
for x in xrange(3):
time.sleep(0.1)
server2.sendall("flush pingpong\n")
server.sendall("create pingpong\n")
fh.readline() == "Done\n"
t = threading.Thread(target=flush)
t.start()
loopset()
def test_concurrent_create(self, servers):
"Tests creating a filter with concurrent sets"
server, server2 = servers
fh = server.makefile()
def loopset():
for x in xrange(1000):
server.sendall("set pingpong test%d\n" % x)
resp = fh.readline()
assert resp == "Yes\n"
for r in xrange(3):
for x in xrange(1000):
server.sendall("set pingpong%d test%d\n" % (r, x))
resp = fh.readline()
assert resp == "Yes\n"
def create():
for x in xrange(10):
server2.sendall("create pingpong%d\n" % x)
server.sendall("create pingpong\n")
fh.readline() == "Done\n"
t = threading.Thread(target=create)
t.start()
loopset()
def test_create_in_memory(self, servers):
"Tests creating a filter in_memory, tries flush"
server, _ = servers
fh = server.makefile()
server.sendall("create foobar in_memory\n")
assert fh.readline() == "Done\n"
server.sendall("list\n")
assert fh.readline() == "START\n"
assert "foobar" in fh.readline()
assert fh.readline() == "END\n"
server.sendall("flush foobar\n")
assert fh.readline() == "Done\n"
def test_set_check_in_memory(self, servers):
"Tests setting and checking many values"
server, _ = servers
fh = server.makefile()
server.sendall("create foobar in_memory\n")
assert fh.readline() == "Done\n"
for x in xrange(1000):
server.sendall("set foobar test%d\n" % x)
assert fh.readline() == "Yes\n"
for x in xrange(1000):
server.sendall("check foobar test%d\n" % x)
assert fh.readline() == "Yes\n"
def test_drop_in_memory(self, servers):
"Tests dropping a filter"
server, _ = servers
fh = server.makefile()
server.sendall("create foobar in_memory\n")
assert fh.readline() == "Done\n"
server.sendall("drop foobar\n")
assert fh.readline() == "Done\n"
server.sendall("list\n")
assert fh.readline() == "START\n"
assert fh.readline() == "END\n"
def test_in_progress_drop(self, servers):
"Tests creating/dropping a filter and getting the 'Delete in progress'"
server, _ = servers
fh = server.makefile()
for x in xrange(10):
# Create and drop should cause the vacuum to fall behind
server.sendall("create drop_in_prog\n")
assert fh.readline() == "Done\n"
server.sendall("drop drop_in_prog\n")
assert fh.readline() == "Done\n"
# Create after drop should fail
server.sendall("create drop_in_prog\n")
resp = fh.readline()
if resp == "Delete in progress\n":
return
elif resp == "Done\n":
server.sendall("drop drop_in_prog\n")
fh.readline()
assert False, "Failed to do a concurrent create"
def test_create_long_prefix(self, servers):
"Tests create with long prefix"
server, _ = servers
fh = server.makefile()
server.sendall("create test:create:filter:with:long:prefix:1\n")
server.sendall("create test:create:filter:with:long:prefix:2\n")
server.sendall("create test:create:filter:with:long:common:1\n")
assert fh.readline() == "Done\n"
assert fh.readline() == "Done\n"
assert fh.readline() == "Done\n"
time.sleep(2) # Wait for vacuum, ensures filters are in ascending order
server.sendall("list test:create:filter\n")
assert fh.readline() == "START\n"
assert "test:create:filter:with:long:common:1" in fh.readline()
assert "test:create:filter:with:long:prefix:1" in fh.readline()
assert "test:create:filter:with:long:prefix:2" in fh.readline()
assert fh.readline() == "END\n"
if __name__ == "__main__":
sys.exit(pytest.main(args="-k TestInteg."))
| |
# -*- coding: utf-8 -*-
"""
LiveSource.
"""
import ast
class LiveSource(object):
"""
Attributes:
code (str): Source code.
lst (int): LiveSource ast tree.
"""
def __init__(self, code, max_deep=10):
"""
Args:
code (str): Source code.
max_deep (int): Number of cached values at one line.
"""
self.code = code
self.lst = LSTree(max_deep)
def get_values(self):
"""
Returns values for all lines in code.
Returns:
Mapping type object.
"""
# FIXME: exceptions handling
compiled_code = compile(self._parse(), '<livesource>', 'exec')
exec(compiled_code, self.lst.globals, self.lst.locals)
return self.lst.locals['__livesource_listing']
def set_variable(self, lineno, var, val):
"""
Set variable value in specified line.
Args:
lineno (int): Line number.
var (str): Variable name.
val: Variable value
"""
return NotImplemented
def update(self, code):
"""
Update source code.
Args:
code (str): New source code.
"""
self.code = code
def _parse(self):
"""
Parse source code.
Returns:
ast tree object.
"""
tree = ast.parse(self.code)
self.lst.stack = [] # clear stack (needed?)
parsed_tree = self.lst.visit(tree)
ast.fix_missing_locations(parsed_tree)
return parsed_tree
class LSTree(ast.NodeVisitor):
"""
Attributes:
globals (dict): Globals for LSTree.
locals (dict): Locals for LSTree.
stack (list): Stack used by tree visitors.
"""
def __init__(self, max_deep=10):
"""
__livesource_listing = collections.defaultdict(
lambda: collections.deque(maxlen=max_deep))
Args:
max_deep (int): Number of cached values at one line.
"""
# NOTE: __livesource_listing is definied inside locals to speedup
# name searching
self.globals = {}
self.locals = {
'__livesource_listing': __import__('collections').defaultdict(
lambda: __import__('collections').deque(maxlen=max_deep))}
self.stack = []
#
# Tree visitors
#
def field_visit(self, field):
"""
Visit nodes in field.
Args:
Field (obj or list): ast node field.
"""
if not isinstance(field, list):
field = [field]
self.generic_visit(ast.Expression(body=field))
def block_visit(self, fields):
"""
Visit nodes in fields.
Returns:
Sequence of fields sorted by line number.
"""
old_stack = self.stack
self.stack = []
self.field_visit(fields)
if old_stack:
old_stack.append(self.stack)
self.stack = old_stack
fields.extend(self.stack)
try:
sorted_args = sorted(fields,
key=lambda obj: (obj.lineno, obj.col_offset))
except AttributeError: # no obj attributes
sorted_args = []
return sorted_args
#
# Modules
#
def visit_Expression(self, node):
"""
Used when code is compiled by eval().
Args:
node (ast.AST): ast node.
"""
return ast.Expression(body=self.block_visit(node.body))
def visit_Interactive(self, node):
"""
Used when code is compiled by interactive console.
Args:
node (ast.AST): ast node.
"""
return ast.Interactive(body=self.block_visit(node.body))
def visit_Module(self, node):
"""
Used when code is normally executed.
Args:
node (ast.AST): ast node.
"""
return ast.Module(body=self.block_visit(node.body))
#
# Statements
#
def visit_Assign(self, node):
"""
Assignment statement.
Args:
node (ast.AST): ast node.
"""
self.field_visit(node.targets)
return node
def visit_AugAssign(self, node):
"""
Augmented assignment statement.
Args:
node (ast.AST): ast node.
"""
self.field_visit(node.target)
return node
def visit_If(self, node):
"""
Conditional statement.
Args:
node (ast.AST): ast node.
"""
node.body = self.block_visit(node.body)
lineno = node.lineno
name = ast.Name(id='None', ctx=ast.Load()) # no name
value = node.test # boolean
body = [self._add_listener(lineno, name, value)]
body.extend(node.body)
node.body = body
return node
def visit_Print(self, node):
"""
Print statement.
Args:
node (ast.AST): ast node.
Note:
Only in python 2.
"""
lineno = node.lineno
name = ast.Name(id='None', ctx=ast.Load()) # no name
value = ast.Call(func=ast.Attribute(value=ast.Str(s=' '),
attr='join',
ctx=ast.Load()),
args=[ast.Tuple(elts=node.values,
ctx=ast.Load())],
keywords=[],
starargs=None,
kwargs=None)
self.stack.append(self._add_listener(lineno, name, value))
return node
def visit_Return(self, node):
"""
Return statement.
Args:
node (ast.AST): ast node.
"""
self.field_visit(node.value)
return node
def visit_While(self, node):
"""
While loop.
Args:
node (ast.AST): ast node.
"""
self.block_visit(node.body)
lineno = node.lineno
name = ast.Name(id='None', ctx=ast.Load())
value = node.test
body = [self._add_listener(lineno, name, value)]
body.extend(node.body)
node.body = body
return node
#
# Expressions
#
def visit_Attribute(self, node):
"""
Attribute expression.
Args:
node (ast.AST): ast node.
"""
attr_obj = node
name = attr_obj.attr
while isinstance(attr_obj.value, ast.Attribute): # nested attributes
attr_obj = attr_obj.value
name = '{0}.{1}'.format(attr_obj.attr, name)
else:
name = ast.Str(s='{0}.{1}'.format(attr_obj.value.id, name))
lineno = node.lineno
value = ast.Attribute(value=node.value,
attr=node.attr,
ctx=ast.Load(),
lineno=lineno,
col_offset=node.col_offset)
self.stack.append(self._add_listener(lineno, name, value))
return node
def visit_Compare(self, node):
"""
Compare expression.
Args:
node (ast.AST): ast node.
"""
lineno = node.lineno
name = ast.Name(id='None', ctx=ast.Load())
value = node
self.stack.append(self._add_listener(lineno, name, value))
return node
def visit_Name(self, node):
"""
Name expression.
Args:
node (ast.AST): ast node.
"""
lineno = node.lineno
name = ast.Str(s=node.id)
value = ast.Name(id=node.id,
ctx=ast.Load(),
lineno=lineno,
col_offset=node.col_offset)
self.stack.append(self._add_listener(lineno, name, value))
return node
@staticmethod
def _add_listener(lineno, var_name, val):
"""
Assigns watched variable with __livesource_listing.
Args:
lineno (int): Line number of watched variable in source code.
var_name (str): Watched variable name.
val (ast.expr): Value of watched variable.
Returns:
ast node.
"""
# FIXME: change data structure to fix
# multiple inline variable assignment
# __livesource_listing[lineno].append(var_name, val, )
return ast.Expr(
value=ast.Call(
func=ast.Attribute(
value=ast.Subscript(
value=ast.Name(id='__livesource_listing',
ctx=ast.Load()),
slice=ast.Index(value=ast.Num(n=lineno)),
ctx=ast.Load()),
attr='append',
ctx=ast.Load()),
args=[ast.Tuple(elts=[var_name, val, ],
ctx=ast.Load()), ],
keywords=[],
starargs=None,
kwargs=None),
lineno=lineno + 1,
col_offset=-1)
| |
from cases_framework import Case, MultiCase
# Covers cases for vtocc_cached2
class Case2(Case):
def __init__(self, **kwargs):
Case.__init__(self, cache_table='vtocc_cached2', **kwargs)
cases = [
"alter table vtocc_cached2 comment 'new'",
Case2(doc="PK_IN (null key)",
query_plan="PK_IN",
sql="select * from vtocc_cached2 where eid = 2 and bid = :bid",
bindings={"bid": None},
result=[],
rowcount=0,
rewritten=[
"select * from vtocc_cached2 where 1 != 1",
"select eid, bid, name, foo from vtocc_cached2 where (eid = 2 and bid = null)"],
cache_absent=1),
Case2(doc="PK_IN (empty cache)",
query_plan="PK_IN",
sql="select * from vtocc_cached2 where eid = 2 and bid = 'foo'",
result=[(2, 'foo', 'abcd2', 'efgh')],
rowcount=1,
rewritten=[
"select * from vtocc_cached2 where 1 != 1",
"select eid, bid, name, foo from vtocc_cached2 where (eid = 2 and bid = 'foo')"],
cache_misses=1),
# (2.foo) is in cache
Case2(doc="PK_IN, use cache",
query_plan="PK_IN",
sql="select bid, eid, name, foo from vtocc_cached2 where eid = 2 and bid = 'foo'",
result=[('foo', 2, 'abcd2', 'efgh')],
rowcount=1,
rewritten=["select bid, eid, name, foo from vtocc_cached2 where 1 != 1"],
cache_hits=1),
# (2.foo)
Case2(doc="PK_IN, absent",
query_plan="PK_IN",
sql="select bid, eid, name, foo from vtocc_cached2 where eid = 3 and bid = 'foo'",
result=[],
rowcount=0,
rewritten=[
"select bid, eid, name, foo from vtocc_cached2 where 1 != 1",
"select eid, bid, name, foo from vtocc_cached2 where (eid = 3 and bid = 'foo')"],
cache_absent=1),
# (2.foo)
Case2(doc="out of order columns list",
sql="select bid, eid from vtocc_cached2 where eid = 1 and bid = 'foo'",
result=[('foo', 1)],
rowcount=1,
rewritten=[
"select bid, eid from vtocc_cached2 where 1 != 1",
"select eid, bid, name, foo from vtocc_cached2 where (eid = 1 and bid = 'foo')"],
cache_misses=1),
# (1.foo, 2.foo)
Case2(doc="out of order columns list, use cache",
sql="select bid, eid from vtocc_cached2 where eid = 1 and bid = 'foo'",
result=[('foo', 1)],
rowcount=1,
rewritten=[],
cache_hits=1),
# (1.foo, 2.foo)
Case2(doc="pk_in for composite pk table, two fetches from db (absent)",
query_plan="PK_IN",
sql="select eid, bid, name, foo from vtocc_cached2 where eid = 1 and bid in('absent1', 'absent2')",
result=[],
rowcount=0,
rewritten=[
"select eid, bid, name, foo from vtocc_cached2 where 1 != 1",
"select eid, bid, name, foo from vtocc_cached2 where (eid = 1 and bid = 'absent1') or (eid = 1 and bid = 'absent2')"],
cache_hits=0,
cache_misses=0,
cache_absent=2,
cache_invalidations=0),
# (1.foo, 1.bar, 2.foo)
Case2(doc="pk_in for composite pk table, 1 fetch from db",
query_plan="PK_IN",
sql="select eid, bid, name, foo from vtocc_cached2 where eid = 1 and bid in('foo', 'bar')",
result=[(1L, 'foo', 'abcd1', 'efgh'), (1L, 'bar', 'abcd1', 'efgh')],
rowcount=2,
rewritten=[
"select eid, bid, name, foo from vtocc_cached2 where 1 != 1",
"select eid, bid, name, foo from vtocc_cached2 where (eid = 1 and bid = 'bar')"],
cache_hits=1,
cache_misses=1,
cache_absent=0,
cache_invalidations=0),
# (1.foo, 1.bar, 2.foo)
Case2(doc="pk_in for composite pk table, 0 fetch from db",
query_plan="PK_IN",
sql="select eid, bid, name, foo from vtocc_cached2 where eid = 1 and bid in('foo', 'bar')",
result=[(1L, 'foo', 'abcd1', 'efgh'), (1L, 'bar', 'abcd1', 'efgh')],
rowcount=2,
rewritten=[],
cache_hits=2,
cache_misses=0,
cache_absent=0,
cache_invalidations=0),
# (1.foo, 1.bar, 2.foo)
Case2(doc="select_subquery for composite pk table, 1 fetch from db",
query_plan="SELECT_SUBQUERY",
sql="select eid, bid, name, foo from vtocc_cached2 where eid = 2 and name='abcd2'",
result=[(2L, 'foo', 'abcd2', 'efgh'), (2L, 'bar', 'abcd2', 'efgh')],
rowcount=2,
rewritten=[
"select eid, bid, name, foo from vtocc_cached2 where 1 != 1",
"select eid, bid from vtocc_cached2 use index (aname2) where eid = 2 and name = 'abcd2' limit 10001",
"select eid, bid, name, foo from vtocc_cached2 where (eid = 2 and bid = 'bar')"],
cache_hits=1,
cache_misses=1,
cache_absent=0,
cache_invalidations=0),
# (1.foo, 1.bar, 2.foo, 2.bar)
Case2(doc="verify 1.bar is in cache",
sql="select bid, eid from vtocc_cached2 where eid = 1 and bid = 'bar'",
result=[('bar', 1)],
rowcount=1,
rewritten=[
"select bid, eid from vtocc_cached2 where 1 != 1"],
cache_hits=1),
# (1.foo, 1.bar, 2.foo, 2.bar)
MultiCase(
"update",
['begin',
"update vtocc_cached2 set foo='fghi' where bid = 'bar'",
Case2(sql="commit",
cache_invalidations=2),
Case2(sql="select * from vtocc_cached2 where eid = 1 and bid = 'bar'",
result=[(1L, 'bar', 'abcd1', 'fghi')],
rowcount=1,
rewritten=[
"select * from vtocc_cached2 where 1 != 1",
"select eid, bid, name, foo from vtocc_cached2 where (eid = 1 and bid = 'bar')"],
cache_misses=1)]),
# (1.foo, 1.bar, 2.foo, 2.bar)
MultiCase(
"this will not invalidate the cache",
['begin',
"update vtocc_cached2 set foo='fghi' where bid = 'bar'",
'rollback',
Case2(sql="select * from vtocc_cached2 where eid = 1 and bid = 'bar'",
result=[(1L, 'bar', 'abcd1', 'fghi')],
rowcount=1,
rewritten=[],
cache_hits=1)]),
# (1.foo, 1.bar, 2.foo, 2.bar)
MultiCase(
"delete",
['begin',
"delete from vtocc_cached2 where eid = 1 and bid = 'bar'",
Case2(sql="commit",
cache_invalidations=1),
Case2(sql="select * from vtocc_cached2 where eid = 1 and bid = 'bar'",
result=[],
rowcount=0,
rewritten="select eid, bid, name, foo from vtocc_cached2 where (eid = 1 and bid = 'bar')",
cache_absent=1),
"begin",
"insert into vtocc_cached2(eid, bid, name, foo) values (1, 'bar', 'abcd1', 'efgh')",
Case2(sql="commit",
cache_invalidations=0)]),
# (1.foo, 2.foo, 2.bar)
Case2(doc="Verify 1.foo is in cache",
sql="select * from vtocc_cached2 where eid = 1 and bid = 'foo'",
result=[(1, 'foo', 'abcd1', 'efgh')],
rowcount=1,
rewritten=["select * from vtocc_cached2 where 1 != 1"],
cache_hits=1),
# (1.foo, 2.foo, 2.bar)
# DDL
"alter table vtocc_cached2 comment 'test'",
Case2(doc="Verify cache is empty after DDL",
sql="select * from vtocc_cached2 where eid = 1 and bid = 'foo'",
result=[(1, 'foo', 'abcd1', 'efgh')],
rowcount=1,
rewritten=[
"select * from vtocc_cached2 where 1 != 1",
"select eid, bid, name, foo from vtocc_cached2 where (eid = 1 and bid = 'foo')"],
cache_misses=1),
# (1.foo)
Case2(doc="Verify row is cached",
sql="select * from vtocc_cached2 where eid = 1 and bid = 'foo'",
result=[(1, 'foo', 'abcd1', 'efgh')],
rowcount=1,
rewritten=[],
cache_hits=1),
# (1.foo)
]
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# standard library
from distutils.version import LooseVersion
# external dependencies
import numpy as np
from numpy.fft import fft, fftfreq
from scipy import __version__ as scipy_version
from scipy.integrate import trapz, cumtrapz
from scipy.interpolate import UnivariateSpline
from scipy.optimize import fmin
from scipy.signal import butter, filtfilt
try:
from scipy.stats import nanmean
except ImportError: # NOTE : nanmean was removed from SciPy in version 0.18.0.
from numpy import nanmean
from scipy import sparse
import matplotlib.pyplot as plt
def sync_error(tau, signal1, signal2, time, plot=False):
'''Returns the error between two signal time histories given a time
shift, tau.
Parameters
----------
tau : float
The time shift.
signal1 : ndarray, shape(n,)
The signal that will be interpolated. This signal is
typically "cleaner" that signal2 and/or has a higher sample rate.
signal2 : ndarray, shape(n,)
The signal that will be shifted to syncronize with signal 1.
time : ndarray, shape(n,)
The time vector for the two signals
plot : boolean, optional, default=False
If true a plot will be shown of the resulting signals.
Returns
-------
error : float
Error between the two signals for the given tau.
'''
# make sure tau isn't too large
if np.abs(tau) >= time[-1]:
raise ValueError(('abs(tau), {0}, must be less than or equal to ' +
'{1}').format(str(np.abs(tau)), str(time[-1])))
# this is the time for the second signal which is assumed to lag the first
# signal
shiftedTime = time + tau
# create time vector where the two signals overlap
if tau > 0:
intervalTime = shiftedTime[np.nonzero(shiftedTime < time[-1])]
else:
intervalTime = shiftedTime[np.nonzero(shiftedTime > time[0])]
# interpolate between signal 1 samples to find points that correspond in
# time to signal 2 on the shifted time
sig1OnInterval = np.interp(intervalTime, time, signal1)
# truncate signal 2 to the time interval
if tau > 0:
sig2OnInterval = signal2[np.nonzero(shiftedTime <= intervalTime[-1])]
else:
sig2OnInterval = signal2[np.nonzero(shiftedTime >= intervalTime[0])]
if plot is True:
fig, axes = plt.subplots(2, 1)
axes[0].plot(time, signal1, time, signal2)
axes[0].legend(('Signal 1', 'Signal 2'))
axes[0].set_title("Before shifting.")
axes[1].plot(intervalTime, sig1OnInterval, intervalTime,
sig2OnInterval)
axes[1].set_title("After shifting.")
axes[1].legend(('Signal 1', 'Signal 2'))
plt.show()
# calculate the error between the two signals
error = np.linalg.norm(sig1OnInterval - sig2OnInterval)
return error
def find_timeshift(signal1, signal2, sample_rate, guess=None, plot=False):
'''Returns the timeshift, tau, of the second signal relative to the
first signal.
Parameters
----------
signal1 : array_like, shape(n, )
The base signal.
signal2 : array_like, shape(n, )
A signal shifted relative to the first signal. The second signal
should be leading the first signal.
sample_rate : integer or float
Sample rate of the signals. This should be the same for each signal.
guess : float, optional, default=None
If you've got a good guess for the time shift then supply it here.
plot : boolean, optional, defaul=False
If true, a plot of the error landscape will be shown.
Returns
-------
tau : float
The timeshift between the two signals.
'''
# raise an error if the signals are not the same length
if len(signal1) != len(signal2):
raise ValueError('Signals are not the same length!')
# subtract the mean and normalize both signals
signal1 = normalize(subtract_mean(signal1))
signal2 = normalize(subtract_mean(signal2))
time = time_vector(len(signal1), sample_rate)
if guess is None:
# set up the error landscape, error vs tau
# We assume the time shift is
tau_range = np.linspace(-time[len(time) // 4], time[len(time) // 4],
num=len(time) // 10)
# TODO : Can I vectorize this?
error = np.zeros_like(tau_range)
for i, val in enumerate(tau_range):
error[i] = sync_error(val, signal1, signal2, time)
if plot is True:
plt.figure()
plt.plot(tau_range, error)
plt.xlabel('tau')
plt.ylabel('error')
plt.show()
# find initial condition from landscape
tau0 = tau_range[np.argmin(error)]
else:
tau0 = guess
print("The minimun of the error landscape is {}.".format(tau0))
tau, fval = fmin(sync_error, tau0, args=(signal1, signal2, time),
full_output=True, disp=True)[0:2]
return tau
def truncate_data(tau, signal1, signal2, sample_rate):
'''Returns the truncated vectors with respect to the time shift tau. It
assume you've found the time shift between two signals with
find_time_shift or something similar.
Parameters
----------
tau : float
The time shift.
signal1 : array_like, shape(n, )
A time series.
signal2 : array_like, shape(n, )
A time series.
sample_rate : integer
The sample rate of the two signals.
Returns
-------
truncated1 : ndarray, shape(m, )
The truncated time series.
truncated2 : ndarray, shape(m, )
The truncated time series.
'''
t = time_vector(len(signal1), sample_rate)
# shift the first signal
t1 = t - tau
t2 = t
# make the common time interval
common_interval = t2[np.nonzero(t2 < t1[-1])]
truncated1 = np.interp(common_interval, t1, signal1)
truncated2 = signal2[np.nonzero(t2 <= common_interval[-1])]
return truncated1, truncated2
def least_squares_variance(A, sum_of_residuals):
"""Returns the variance in the ordinary least squares fit and the
covariance matrix of the estimated parameters.
Parameters
----------
A : ndarray, shape(n,d)
The left hand side matrix in Ax=B.
sum_of_residuals : float
The sum of the residuals (residual sum of squares).
Returns
-------
variance : float
The variance of the fit.
covariance : ndarray, shape(d,d)
The covariance of x in Ax = b.
"""
# I am pretty sure that the residuals from numpy.linalg.lstsq is the SSE
# (the residual sum of squares).
degrees_of_freedom = (A.shape[0] - A.shape[1])
variance = sum_of_residuals / degrees_of_freedom
# There may be a way to use the pinv here for more efficient
# computations. (A^T A)^-1 A^T = np.linalg.pinv(A) so np.linalg.pinv(A)
# (A^T)^-1 ... or maybe not.
if sparse.issparse(A):
inv = sparse.linalg.inv
prod = A.T * A
else:
inv = np.linalg.inv
prod = np.dot(A.T, A)
covariance = variance * inv(prod)
return variance, covariance
def coefficient_of_determination(measured, predicted):
"""Computes the coefficient of determination with respect to a measured
and predicted array.
Parameters
----------
measured : array_like, shape(n,)
The observed or measured values.
predicted : array_like, shape(n,)
The values predicted by a model.
Returns
-------
r_squared : float
The coefficient of determination.
Notes
-----
The coefficient of determination [also referred to as R^2 and VAF
(variance accounted for)] is computed either of these two ways::
sum( [predicted - mean(measured)] ** 2 )
R^2 = ----------------------------------------
sum( [measured - mean(measured)] ** 2 )
or::
sum( [measured - predicted] ** 2 )
R^2 = 1 - ---------------------------------------
sum( [measured - mean(measured)] ** 2 )
"""
# 2-norm => np.sqrt(np.sum(measured - predicted)**2))
numerator = np.linalg.norm(measured - predicted) ** 2
denominator = np.linalg.norm(measured - measured.mean()) ** 2
r_squared = 1.0 - numerator / denominator
return r_squared
def fit_goodness(ym, yp):
'''
Calculate the goodness of fit.
Parameters
----------
ym : ndarray, shape(n,)
The vector of measured values.
yp : ndarry, shape(n,)
The vector of predicted values.
Returns
-------
rsq : float
The r squared value of the fit.
SSE : float
The error sum of squares.
SST : float
The total sum of squares.
SSR : float
The regression sum of squares.
Notes
-----
SST = SSR + SSE
'''
ym_bar = np.mean(ym)
SSR = sum((yp - ym_bar) ** 2)
SST = sum((ym - ym_bar) ** 2)
SSE = SST - SSR
rsq = SSR / SST
return rsq, SSE, SST, SSR
def spline_over_nan(x, y):
"""
Returns a vector of which a cubic spline is used to fill in gaps in the
data from nan values.
Parameters
----------
x : ndarray, shape(n,)
This x values should not contain nans.
y : ndarray, shape(n,)
The y values may contain nans.
Returns
-------
ySpline : ndarray, shape(n,)
The splined y values. If `y` doesn't contain any nans then `ySpline` is
`y`.
Notes
-----
The splined data is identical to the input data, except that the nan's are
replaced by new data from the spline fit.
"""
# if there are nans in the data then spline away
if np.isnan(y).any():
# remove the values with nans
xNoNan = x[np.nonzero(np.isnan(y) == False)]
yNoNan = y[np.nonzero(np.isnan(y) == False)]
# fit a spline through the data
spline = UnivariateSpline(xNoNan, yNoNan, k=3, s=0)
return spline(x)
else:
return y
def curve_area_stats(x, y):
'''
Return the box plot stats of a curve based on area.
Parameters
----------
x : ndarray, shape (n,)
The x values
y : ndarray, shape (n,m)
The y values
n are the time steps
m are the various curves
Returns
-------
A dictionary containing:
median : ndarray, shape (m,)
The x value corresponding to 0.5*area under the curve
lq : ndarray, shape (m,)
lower quartile
uq : ndarray, shape (m,)
upper quartile
98p : ndarray, shape (m,)
98th percentile
2p : ndarray, shape (m,)
2nd percentile
'''
area = trapz(y, x=x, axis=0) # shape (m,)
percents = np.array([0.02*area, 0.25*area, 0.5*area, 0.75*area, 0.98*area]) # shape (5,m)
CumArea = cumtrapz(y.T, x=x.T) # shape(m,n)
xstats = {'2p':[], 'lq':[], 'median':[], 'uq':[], '98p':[]}
for j, curve in enumerate(CumArea):
flags = [False for flag in range(5)]
for i, val in enumerate(curve):
if val > percents[0][j] and flags[0] == False:
xstats['2p'].append(x[i])
flags[0] = True
elif val > percents[1][j] and flags[1] == False:
xstats['lq'].append(x[i])
flags[1] = True
elif val > percents[2][j] and flags[2] == False:
xstats['median'].append(x[i])
flags[2] = True
elif val > percents[3][j] and flags[3] == False:
xstats['uq'].append(x[i])
flags[3] = True
elif val > percents[4][j] and flags[4] == False:
xstats['98p'].append(x[i])
flags[4] = True
if flags[4] == False:
# this is what happens if it finds none of the above
xstats['2p'].append(0.)
xstats['lq'].append(0.)
xstats['median'].append(0.)
xstats['uq'].append(0.)
xstats['98p'].append(0.)
for k, v in xstats.items():
xstats[k] = np.array(v)
return xstats
def freq_spectrum(data, sampleRate):
"""
Return the frequency spectrum of a data set.
Parameters
----------
data : ndarray, shape (m,) or shape(n,m)
The array of time signals where n is the number of variables and m is
the number of time steps.
sampleRate : int
The signal sampling rate in hertz.
Returns
-------
frequency : ndarray, shape (p,)
The frequencies where p is a power of 2 close to m.
amplitude : ndarray, shape (p,n)
The amplitude at each frequency.
"""
def nextpow2(i):
'''
Return the next power of 2 for the given number.
'''
n = 2
while n < i: n *= 2
return n
time = 1. / sampleRate # sample time
try:
L = data.shape[1] # length of data if (n, m)
except:
L = data.shape[0] # length of data if (n,)
# calculate the closest power of 2 for the length of the data
n = nextpow2(L)
Y = fft(data, n) / L # divide by L for scaling
f = fftfreq(n, d=time)
#f = sampleRate/2.*linspace(0, 1, n)
#print 'f =', f, f.shape, type(f)
frequency = f[1:n / 2]
try:
amplitude = 2 * abs(Y[:, 1:n / 2]).T # multiply by 2 because we take half the vector
#power = abs(Y[:, 1:n/2])**2
except:
amplitude = 2 * abs(Y[1:n / 2])
#power = abs(Y[1:n/2])**2
return frequency, amplitude
def butterworth(data, cutoff, samplerate, order=2, axis=-1, btype='lowpass',
**kwargs):
"""Returns the data filtered by a forward/backward Butterworth filter.
Parameters
----------
data : ndarray, shape(n,) or shape(n,m)
The data to filter. Only handles 1D and 2D arrays.
cutoff : float
The filter cutoff frequency in hertz.
samplerate : float
The sample rate of the data in hertz.
order : int
The order of the Butterworth filter.
axis : int
The axis to filter along.
btype : {'lowpass'|'highpass'|'bandpass'|'bandstop'}
The type of filter. Default is 'lowpass'.
kwargs : keyword value pairs
Any extra arguments to get passed to scipy.signal.filtfilt.
Returns
-------
filtered_data : ndarray
The low pass filtered version of data.
Notes
-----
The provided cutoff frequency is corrected by a multiplicative factor to
ensure the double pass filter cutoff frequency matches that of a single
pass filter, see [Winter2009]_.
References
----------
.. [Winter2009] David A. Winter (2009) Biomechanics and motor control of
human movement. 4th edition. Hoboken: Wiley.
"""
if len(data.shape) > 2:
raise ValueError('This function only works with 1D or 2D arrays.')
nyquist_frequency = 0.5 * samplerate
# Since we use filtfilt below, we correct the cutoff frequency to ensure
# the filter**2 crosses the -3 dB line at the cutoff frequency.
# |H(w)| = sqrt(1 / (1 + (w / wc)**(2n)))
# wc : cutoff frequency
# n : Butterworth filter order
# |H**2(w)| = 1 / (1 + (w / wc)**(2n))
# |H**2(wc)| = 1 / (1 + (wc / wa)**(2n)) = 1 / sqrt(2) = -3 dB
# wa : adjusted cutoff frequency for double filter
# wa = (np.sqrt(2.0) - 1.0) ** (-1.0 / (2.0 * n))
correction_factor = (np.sqrt(2.0) - 1.0) ** (-1.0 / (2.0 * order))
# Wn is the ratio of the corrected cutoff frequency to the Nyquist
# frequency.
Wn = correction_factor * cutoff / nyquist_frequency
b, a = butter(order, Wn, btype=btype)
# SciPy 0.9.0 has a simple filtfilt, with no optional arguments. SciPy
# 0.10.0 introduced the axis argument. So, to stay compatible with
# 0.9.0, which is the SciPy installed on Ubuntu 12.04 LTS, we check the
# version. The version in SciPy 0.9.0 doesn't have kwargs either.
nine = LooseVersion('0.9.0')
ten = LooseVersion('0.10.0')
current = LooseVersion(scipy_version)
if current >= nine and current < ten:
print('SciPy 0.9.0 only supports 1D filtfilt, ' +
'so you get a slow version.')
if len(data.shape) == 2:
if axis == 0:
data = data.T
filtered = np.zeros_like(data)
for i, vector in enumerate(data):
filtered[i] = filtfilt(b, a, vector)
if axis == 0:
return filtered.T
else:
return filtered
else:
return filtfilt(b, a, data)
elif current >= ten:
return filtfilt(b, a, data, axis=axis, **kwargs)
def subtract_mean(sig, hasNans=False):
'''
Subtracts the mean from a signal with nanmean.
Parameters
----------
sig : ndarray, shape(n,)
hasNans : boolean, optional
If your data has nans use this flag if you want to ignore them.
Returns
-------
ndarray, shape(n,)
sig minus the mean of sig
'''
if hasNans:
return sig - nanmean(sig)
else:
return sig - np.mean(sig)
def normalize(sig, hasNans=False):
'''
Normalizes the vector with respect to the maximum value.
Parameters
----------
sig : ndarray, shape(n,)
hasNans : boolean, optional
If your data has nans use this flag if you want to ignore them.
Returns
-------
normSig : ndarray, shape(n,)
The signal normalized with respect to the maximum value.
'''
# TODO : This could be a try/except statement instead of an optional
# argument.
if hasNans:
normSig = sig / np.nanmax(sig)
else:
normSig = sig / np.max(sig)
return normSig
def derivative(x, y, method='forward', padding=None):
"""Returns the derivative of y with respect to x.
Parameters
----------
x : ndarray, shape(n,)
The monotonically increasing independent variable.
y : ndarray, shape(n,) or shape(n, m)
The dependent variable(s).
method : string, optional
'forward'
Use the forward difference method.
'backward'
Use the backward difference method.
'central'
Use the central difference method.
'combination'
This is equivalent to ``method='central', padding='second
order'`` and is in place for backwards compatibility. Selecting
this method will ignore and user supplied padding settings.
padding : None, float, 'adjacent' or 'second order', optional
The default, None, will result in the derivative vector being n-a in
length where a=1 for forward and backward and a=2 for central. If
you provide a float this value will be used to pad the result so
that len(dydx) == n. If 'adjacent' is used, the nearest neighbor
will be used for padding. If 'second order' is chosen second order
foward and backward difference are used to pad the end points.
Returns
-------
dydx : ndarray, shape(n,) or shape(n-1,)
for combination else shape(n-1,)
"""
x = np.asarray(x)
y = np.asarray(y)
if method == 'combination':
method = 'central'
padding = 'second order'
if len(x.shape) > 1:
raise ValueError('x must be have shape(n,).')
if len(y.shape) > 2:
raise ValueError('y can at most have two dimensions.')
if x.shape[0] != y.shape[0]:
raise ValueError('x and y must have the same first dimension.')
if method == 'forward' or method == 'backward':
if x.shape[0] < 2:
raise ValueError('x must have a length of at least 2.')
if len(y.shape) == 1:
deriv = np.diff(y) / np.diff(x)
else:
deriv = (np.diff(y.T) / np.diff(x)).T
elif method == 'central':
if x.shape[0] < 3:
raise ValueError('x must have a length of at least 3.')
if len(y.shape) == 1:
deriv = (y[2:] - y[:-2]) / (x[2:] - x[:-2])
else:
deriv = ((y[2:] - y[:-2]).T / (x[2:] - x[:-2])).T
else:
msg = ("There is no {} method here! Try 'forward', 'backward', "
"'central', or 'combination'.").format(method)
raise NotImplementedError(msg)
if padding is None:
dydx = deriv
else:
dydx = np.zeros_like(y)
if padding == 'adjacent':
dydx[0] = deriv[0]
dydx[-1] = deriv[-1]
elif padding == 'second order':
dydx[0] = ((-3.0*y[0] + 4.0*y[1] - y[2]) / 2.0 / (x[1] - x[0]))
dydx[-1] = ((3.0*y[-1] - 4.0*y[-2] + y[-3]) / 2.0 /
(x[-1] - x[-2]))
else:
dydx[0] = padding
dydx[-1] = padding
if method == 'forward':
dydx[:-1] = deriv
elif method == 'backward':
dydx[1:] = deriv
elif method == 'central':
dydx[1:-1] = deriv
return dydx
def time_vector(num_samples, sample_rate, start_time=0.0):
'''Returns a time vector starting at zero.
Parameters
----------
num_samples : int
Total number of samples.
sample_rate : float
Sample rate of the signal in hertz.
start_time : float, optional, default=0.0
The start time of the time series.
Returns
-------
time : ndarray, shape(numSamples,)
Time vector starting at zero.
'''
ns = num_samples
sr = float(sample_rate)
return np.linspace(start_time, (ns - 1) / sr + start_time, num=ns)
| |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.net.proto import ProtocolBuffer
import array
import _dummy_thread as thread
__pychecker__ = """maxreturns=0 maxbranches=0 no-callinit
unusednames=printElemNumber,debug_strs no-special"""
if hasattr(ProtocolBuffer, 'ExtendableProtocolMessage'):
_extension_runtime = True
_ExtendableProtocolMessage = ProtocolBuffer.ExtendableProtocolMessage
else:
_extension_runtime = False
_ExtendableProtocolMessage = ProtocolBuffer.ProtocolMessage
class SystemServiceError(ProtocolBuffer.ProtocolMessage):
OK = 0
INTERNAL_ERROR = 1
BACKEND_REQUIRED = 2
LIMIT_REACHED = 3
_ErrorCode_NAMES = {
0: "OK",
1: "INTERNAL_ERROR",
2: "BACKEND_REQUIRED",
3: "LIMIT_REACHED",
}
def ErrorCode_Name(cls, x): return cls._ErrorCode_NAMES.get(x, "")
ErrorCode_Name = classmethod(ErrorCode_Name)
def __init__(self, contents=None):
pass
if contents is not None: self.MergeFromString(contents)
def MergeFrom(self, x):
assert x is not self
def Equals(self, x):
if x is self: return 1
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
return n
def ByteSizePartial(self):
n = 0
return n
def Clear(self):
pass
def OutputUnchecked(self, out):
pass
def OutputPartial(self, out):
pass
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in range(0, 1+maxtag)])
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
}, 0)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
}, 0, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.SystemServiceError'
class SystemStat(ProtocolBuffer.ProtocolMessage):
has_current_ = 0
current_ = 0.0
has_average1m_ = 0
average1m_ = 0.0
has_average10m_ = 0
average10m_ = 0.0
has_total_ = 0
total_ = 0.0
has_rate1m_ = 0
rate1m_ = 0.0
has_rate10m_ = 0
rate10m_ = 0.0
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def current(self): return self.current_
def set_current(self, x):
self.has_current_ = 1
self.current_ = x
def clear_current(self):
if self.has_current_:
self.has_current_ = 0
self.current_ = 0.0
def has_current(self): return self.has_current_
def average1m(self): return self.average1m_
def set_average1m(self, x):
self.has_average1m_ = 1
self.average1m_ = x
def clear_average1m(self):
if self.has_average1m_:
self.has_average1m_ = 0
self.average1m_ = 0.0
def has_average1m(self): return self.has_average1m_
def average10m(self): return self.average10m_
def set_average10m(self, x):
self.has_average10m_ = 1
self.average10m_ = x
def clear_average10m(self):
if self.has_average10m_:
self.has_average10m_ = 0
self.average10m_ = 0.0
def has_average10m(self): return self.has_average10m_
def total(self): return self.total_
def set_total(self, x):
self.has_total_ = 1
self.total_ = x
def clear_total(self):
if self.has_total_:
self.has_total_ = 0
self.total_ = 0.0
def has_total(self): return self.has_total_
def rate1m(self): return self.rate1m_
def set_rate1m(self, x):
self.has_rate1m_ = 1
self.rate1m_ = x
def clear_rate1m(self):
if self.has_rate1m_:
self.has_rate1m_ = 0
self.rate1m_ = 0.0
def has_rate1m(self): return self.has_rate1m_
def rate10m(self): return self.rate10m_
def set_rate10m(self, x):
self.has_rate10m_ = 1
self.rate10m_ = x
def clear_rate10m(self):
if self.has_rate10m_:
self.has_rate10m_ = 0
self.rate10m_ = 0.0
def has_rate10m(self): return self.has_rate10m_
def MergeFrom(self, x):
assert x is not self
if (x.has_current()): self.set_current(x.current())
if (x.has_average1m()): self.set_average1m(x.average1m())
if (x.has_average10m()): self.set_average10m(x.average10m())
if (x.has_total()): self.set_total(x.total())
if (x.has_rate1m()): self.set_rate1m(x.rate1m())
if (x.has_rate10m()): self.set_rate10m(x.rate10m())
def Equals(self, x):
if x is self: return 1
if self.has_current_ != x.has_current_: return 0
if self.has_current_ and self.current_ != x.current_: return 0
if self.has_average1m_ != x.has_average1m_: return 0
if self.has_average1m_ and self.average1m_ != x.average1m_: return 0
if self.has_average10m_ != x.has_average10m_: return 0
if self.has_average10m_ and self.average10m_ != x.average10m_: return 0
if self.has_total_ != x.has_total_: return 0
if self.has_total_ and self.total_ != x.total_: return 0
if self.has_rate1m_ != x.has_rate1m_: return 0
if self.has_rate1m_ and self.rate1m_ != x.rate1m_: return 0
if self.has_rate10m_ != x.has_rate10m_: return 0
if self.has_rate10m_ and self.rate10m_ != x.rate10m_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
if (self.has_current_): n += 9
if (self.has_average1m_): n += 9
if (self.has_average10m_): n += 9
if (self.has_total_): n += 9
if (self.has_rate1m_): n += 9
if (self.has_rate10m_): n += 9
return n
def ByteSizePartial(self):
n = 0
if (self.has_current_): n += 9
if (self.has_average1m_): n += 9
if (self.has_average10m_): n += 9
if (self.has_total_): n += 9
if (self.has_rate1m_): n += 9
if (self.has_rate10m_): n += 9
return n
def Clear(self):
self.clear_current()
self.clear_average1m()
self.clear_average10m()
self.clear_total()
self.clear_rate1m()
self.clear_rate10m()
def OutputUnchecked(self, out):
if (self.has_current_):
out.putVarInt32(9)
out.putDouble(self.current_)
if (self.has_total_):
out.putVarInt32(17)
out.putDouble(self.total_)
if (self.has_average1m_):
out.putVarInt32(25)
out.putDouble(self.average1m_)
if (self.has_average10m_):
out.putVarInt32(33)
out.putDouble(self.average10m_)
if (self.has_rate1m_):
out.putVarInt32(41)
out.putDouble(self.rate1m_)
if (self.has_rate10m_):
out.putVarInt32(49)
out.putDouble(self.rate10m_)
def OutputPartial(self, out):
if (self.has_current_):
out.putVarInt32(9)
out.putDouble(self.current_)
if (self.has_total_):
out.putVarInt32(17)
out.putDouble(self.total_)
if (self.has_average1m_):
out.putVarInt32(25)
out.putDouble(self.average1m_)
if (self.has_average10m_):
out.putVarInt32(33)
out.putDouble(self.average10m_)
if (self.has_rate1m_):
out.putVarInt32(41)
out.putDouble(self.rate1m_)
if (self.has_rate10m_):
out.putVarInt32(49)
out.putDouble(self.rate10m_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 9:
self.set_current(d.getDouble())
continue
if tt == 17:
self.set_total(d.getDouble())
continue
if tt == 25:
self.set_average1m(d.getDouble())
continue
if tt == 33:
self.set_average10m(d.getDouble())
continue
if tt == 41:
self.set_rate1m(d.getDouble())
continue
if tt == 49:
self.set_rate10m(d.getDouble())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_current_: res+=prefix+("current: %s\n" % self.DebugFormat(self.current_))
if self.has_average1m_: res+=prefix+("average1m: %s\n" % self.DebugFormat(self.average1m_))
if self.has_average10m_: res+=prefix+("average10m: %s\n" % self.DebugFormat(self.average10m_))
if self.has_total_: res+=prefix+("total: %s\n" % self.DebugFormat(self.total_))
if self.has_rate1m_: res+=prefix+("rate1m: %s\n" % self.DebugFormat(self.rate1m_))
if self.has_rate10m_: res+=prefix+("rate10m: %s\n" % self.DebugFormat(self.rate10m_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in range(0, 1+maxtag)])
kcurrent = 1
kaverage1m = 3
kaverage10m = 4
ktotal = 2
krate1m = 5
krate10m = 6
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "current",
2: "total",
3: "average1m",
4: "average10m",
5: "rate1m",
6: "rate10m",
}, 6)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.DOUBLE,
2: ProtocolBuffer.Encoder.DOUBLE,
3: ProtocolBuffer.Encoder.DOUBLE,
4: ProtocolBuffer.Encoder.DOUBLE,
5: ProtocolBuffer.Encoder.DOUBLE,
6: ProtocolBuffer.Encoder.DOUBLE,
}, 6, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.SystemStat'
class GetSystemStatsRequest(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
pass
if contents is not None: self.MergeFromString(contents)
def MergeFrom(self, x):
assert x is not self
def Equals(self, x):
if x is self: return 1
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
return n
def ByteSizePartial(self):
n = 0
return n
def Clear(self):
pass
def OutputUnchecked(self, out):
pass
def OutputPartial(self, out):
pass
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in range(0, 1+maxtag)])
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
}, 0)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
}, 0, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.GetSystemStatsRequest'
class GetSystemStatsResponse(ProtocolBuffer.ProtocolMessage):
has_cpu_ = 0
cpu_ = None
has_memory_ = 0
memory_ = None
def __init__(self, contents=None):
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def cpu(self):
if self.cpu_ is None:
self.lazy_init_lock_.acquire()
try:
if self.cpu_ is None: self.cpu_ = SystemStat()
finally:
self.lazy_init_lock_.release()
return self.cpu_
def mutable_cpu(self): self.has_cpu_ = 1; return self.cpu()
def clear_cpu(self):
if self.has_cpu_:
self.has_cpu_ = 0;
if self.cpu_ is not None: self.cpu_.Clear()
def has_cpu(self): return self.has_cpu_
def memory(self):
if self.memory_ is None:
self.lazy_init_lock_.acquire()
try:
if self.memory_ is None: self.memory_ = SystemStat()
finally:
self.lazy_init_lock_.release()
return self.memory_
def mutable_memory(self): self.has_memory_ = 1; return self.memory()
def clear_memory(self):
if self.has_memory_:
self.has_memory_ = 0;
if self.memory_ is not None: self.memory_.Clear()
def has_memory(self): return self.has_memory_
def MergeFrom(self, x):
assert x is not self
if (x.has_cpu()): self.mutable_cpu().MergeFrom(x.cpu())
if (x.has_memory()): self.mutable_memory().MergeFrom(x.memory())
def Equals(self, x):
if x is self: return 1
if self.has_cpu_ != x.has_cpu_: return 0
if self.has_cpu_ and self.cpu_ != x.cpu_: return 0
if self.has_memory_ != x.has_memory_: return 0
if self.has_memory_ and self.memory_ != x.memory_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (self.has_cpu_ and not self.cpu_.IsInitialized(debug_strs)): initialized = 0
if (self.has_memory_ and not self.memory_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
if (self.has_cpu_): n += 1 + self.lengthString(self.cpu_.ByteSize())
if (self.has_memory_): n += 1 + self.lengthString(self.memory_.ByteSize())
return n
def ByteSizePartial(self):
n = 0
if (self.has_cpu_): n += 1 + self.lengthString(self.cpu_.ByteSizePartial())
if (self.has_memory_): n += 1 + self.lengthString(self.memory_.ByteSizePartial())
return n
def Clear(self):
self.clear_cpu()
self.clear_memory()
def OutputUnchecked(self, out):
if (self.has_cpu_):
out.putVarInt32(10)
out.putVarInt32(self.cpu_.ByteSize())
self.cpu_.OutputUnchecked(out)
if (self.has_memory_):
out.putVarInt32(18)
out.putVarInt32(self.memory_.ByteSize())
self.memory_.OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_cpu_):
out.putVarInt32(10)
out.putVarInt32(self.cpu_.ByteSizePartial())
self.cpu_.OutputPartial(out)
if (self.has_memory_):
out.putVarInt32(18)
out.putVarInt32(self.memory_.ByteSizePartial())
self.memory_.OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_cpu().TryMerge(tmp)
continue
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_memory().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_cpu_:
res+=prefix+"cpu <\n"
res+=self.cpu_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_memory_:
res+=prefix+"memory <\n"
res+=self.memory_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in range(0, 1+maxtag)])
kcpu = 1
kmemory = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "cpu",
2: "memory",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.GetSystemStatsResponse'
class StartBackgroundRequestRequest(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
pass
if contents is not None: self.MergeFromString(contents)
def MergeFrom(self, x):
assert x is not self
def Equals(self, x):
if x is self: return 1
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
return n
def ByteSizePartial(self):
n = 0
return n
def Clear(self):
pass
def OutputUnchecked(self, out):
pass
def OutputPartial(self, out):
pass
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in range(0, 1+maxtag)])
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
}, 0)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
}, 0, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.StartBackgroundRequestRequest'
class StartBackgroundRequestResponse(ProtocolBuffer.ProtocolMessage):
has_request_id_ = 0
request_id_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def request_id(self): return self.request_id_
def set_request_id(self, x):
self.has_request_id_ = 1
self.request_id_ = x
def clear_request_id(self):
if self.has_request_id_:
self.has_request_id_ = 0
self.request_id_ = ""
def has_request_id(self): return self.has_request_id_
def MergeFrom(self, x):
assert x is not self
if (x.has_request_id()): self.set_request_id(x.request_id())
def Equals(self, x):
if x is self: return 1
if self.has_request_id_ != x.has_request_id_: return 0
if self.has_request_id_ and self.request_id_ != x.request_id_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
if (self.has_request_id_): n += 1 + self.lengthString(len(self.request_id_))
return n
def ByteSizePartial(self):
n = 0
if (self.has_request_id_): n += 1 + self.lengthString(len(self.request_id_))
return n
def Clear(self):
self.clear_request_id()
def OutputUnchecked(self, out):
if (self.has_request_id_):
out.putVarInt32(10)
out.putPrefixedString(self.request_id_)
def OutputPartial(self, out):
if (self.has_request_id_):
out.putVarInt32(10)
out.putPrefixedString(self.request_id_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_request_id(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_request_id_: res+=prefix+("request_id: %s\n" % self.DebugFormatString(self.request_id_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in range(0, 1+maxtag)])
krequest_id = 1
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "request_id",
}, 1)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
}, 1, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.StartBackgroundRequestResponse'
if _extension_runtime:
pass
__all__ = ['SystemServiceError','SystemStat','GetSystemStatsRequest','GetSystemStatsResponse','StartBackgroundRequestRequest','StartBackgroundRequestResponse']
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matmul."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test as test_lib
def _AddTest(test, op_name, testcase_name, fn):
test_name = "_".join(["test", op_name, testcase_name])
if hasattr(test, test_name):
raise RuntimeError("Test %s defined more than once" % test_name)
setattr(test, test_name, fn)
def _GetTransposedMatrices(x, x_name, kwargs):
if kwargs["transpose_" + x_name] is True:
return x.T
elif kwargs["adjoint_" + x_name] is True:
return np.conj(x.T)
else:
return x
class MatMulTest(test_lib.TestCase):
pass # Filled in below
def _GetMatMulTest(a_np_, b_np_, use_static_shape_, **kwargs_):
def Test(self):
# TODO(rmlarsen): Re-enable this test when we have fixed the failure on
# Windows.
if not use_static_shape_ and a_np_.dtype is np.int32:
self.skipTest("Skipping test to avoid failure on Windows.")
np_val = np.matrix(a_np_) * np.matrix(b_np_)
use_gpu = True
if a_np_.dtype is np.float16 and (
not test_util.CudaSupportsHalfMatMulAndConv()):
use_gpu = False
print("Built without fp16 matmul support for Cuda, running test on CPU.")
# Transpose and possibly conjugate a_np_ and b_np_ according to the
# attributes such that tf.matmul(effective_a_np, effective_b_np, **kwargs)
# results in a valid matrix multiplication and produces the same result as
# np.matrix(a_np_) * np.matrix(b_np_)
effective_a_np = _GetTransposedMatrices(a_np_, "a", kwargs_)
effective_b_np = _GetTransposedMatrices(b_np_, "b", kwargs_)
with self.test_session(use_gpu=use_gpu) as sess:
if use_static_shape_:
a = constant_op.constant(effective_a_np)
b = constant_op.constant(effective_b_np)
res = math_ops.matmul(a, b, **kwargs_)
tf_val = res.eval()
else:
a = array_ops.placeholder(a_np_.dtype)
b = array_ops.placeholder(b_np_.dtype)
res = math_ops.matmul(a, b, **kwargs_)
tf_val = sess.run(res, feed_dict={a: effective_a_np, b: effective_b_np})
self.assertAllCloseAccordingToType(
tf_val,
np_val,
float_rtol=1e-5,
float_atol=1e-5,
half_rtol=0.1,
half_atol=0.1)
return Test
class MatMulGradientTest(test_lib.TestCase):
pass # Will be filled in below.
def _GetMatMulGradientTest(a_np_, b_np_, use_static_shape_, **kwargs_):
def Test(self):
if not use_static_shape_ or a_np_.dtype in (np.int32, np.float16):
self.skipTest("Skipping infeasible gradient test.")
# Transpose and possibly conjugate a_np_ and b_np_ according to the
# attributes such that tf.matmul(effective_a_np, effective_b_np, **kwargs)
# results in a valid matrix multiplication and produces the same result as
# np.matrix(a_np_) * np.matrix(b_np_)
effective_a_np = _GetTransposedMatrices(a_np_, "a", kwargs_)
effective_b_np = _GetTransposedMatrices(b_np_, "b", kwargs_)
epsilon = np.finfo(a_np_.dtype).eps
delta = epsilon**(1.0 / 3.0)
tol = 20 * delta
with self.test_session(use_gpu=True):
a = constant_op.constant(effective_a_np)
b = constant_op.constant(effective_b_np)
res = math_ops.matmul(a, b, **kwargs_)
for x, x_init in [a, effective_a_np], [b, effective_b_np]:
theoretical, numerical = gradient_checker.compute_gradient(
x,
x_init.shape,
res, [a_np_.shape[0], b_np_.shape[1]],
x_init_value=x_init,
delta=delta)
self.assertAllClose(theoretical, numerical, rtol=tol, atol=tol)
return Test
class MatMulStatsTest(test_lib.TestCase):
def testSimpleStatistics(self):
g = ops.Graph()
with g.as_default():
a = variables.Variable(random_ops.random_normal([25, 16]))
b = variables.Variable(random_ops.random_normal([16, 9]))
math_ops.matmul(a, b)
for op in g.get_operations():
flops = ops.get_stats_for_node_def(g, op.node_def, "flops").value
if op.name == "MatMul":
self.assertEqual(7200, flops)
def testTransposedStatistics(self):
g = ops.Graph()
with g.as_default():
a = variables.Variable(random_ops.random_normal([16, 25]))
b = variables.Variable(random_ops.random_normal([16, 9]))
math_ops.matmul(a, b, transpose_a=True)
for op in g.get_operations():
flops = ops.get_stats_for_node_def(g, op.node_def, "flops").value
if op.name == "MatMul":
self.assertEqual(7200, flops)
if __name__ == "__main__":
sizes = [1, 3, 5]
trans_options = [[False, False], [True, False], [False, True]]
for dtype in (np.int32, np.float16, np.float32, np.float64, np.complex64,
np.complex128):
for m in sizes:
for n in sizes:
for k in sizes:
# Construct compatible random matrices a_np of size [m, k] and b_np
# of size [k, n].
a_np = np.random.normal(-5, 5, m * k).astype(dtype).reshape([m, k])
if dtype in (np.complex64, np.complex128):
a_np.imag = np.random.normal(-5, 5,
m * k).astype(dtype).reshape([m, k])
b_np = np.random.normal(-5, 5, k * n).astype(dtype).reshape([k, n])
if dtype in (np.complex64, np.complex128):
b_np.imag = np.random.normal(-5, 5,
k * n).astype(dtype).reshape([k, n])
for adjoint_a, transpose_a in trans_options:
for adjoint_b, transpose_b in trans_options:
for use_static_shape in [False, True]:
name = "%s_%s_%s_%s_%s_%s_%s_%s_%s" % (
dtype.__name__, m, n, k, adjoint_a, transpose_a, adjoint_b,
transpose_b, use_static_shape)
_AddTest(MatMulTest, "MatMulTest", name,
_GetMatMulTest(
a_np,
b_np,
use_static_shape,
adjoint_a=adjoint_a,
transpose_a=transpose_a,
adjoint_b=adjoint_b,
transpose_b=transpose_b))
_AddTest(MatMulGradientTest, "MatMulGradientTest", name,
_GetMatMulGradientTest(
a_np,
b_np,
use_static_shape,
adjoint_a=adjoint_a,
transpose_a=transpose_a,
adjoint_b=adjoint_b,
transpose_b=transpose_b))
test_lib.main()
| |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Util functions and classes for cloudstorage_api."""
__all__ = ['set_default_retry_params',
'RetryParams',
]
import copy
import httplib
import logging
import math
import os
import threading
import time
import urllib
try:
from google.appengine.api import app_identity
from google.appengine.api import urlfetch
from google.appengine.api import urlfetch_errors
from google.appengine.datastore import datastore_rpc
from google.appengine.ext import ndb
from google.appengine.ext.ndb import eventloop
from google.appengine.ext.ndb import tasklets
from google.appengine.ext.ndb import utils
from google.appengine import runtime
from google.appengine.runtime import apiproxy_errors
except ImportError:
from google.appengine.api import app_identity
from google.appengine.api import urlfetch
from google.appengine.api import urlfetch_errors
from google.appengine.datastore import datastore_rpc
from google.appengine import runtime
from google.appengine.runtime import apiproxy_errors
from google.appengine.ext import ndb
from google.appengine.ext.ndb import eventloop
from google.appengine.ext.ndb import tasklets
from google.appengine.ext.ndb import utils
_RETRIABLE_EXCEPTIONS = (urlfetch.DownloadError,
urlfetch_errors.InternalTransientError,
apiproxy_errors.Error,
app_identity.InternalError,
app_identity.BackendDeadlineExceeded)
_thread_local_settings = threading.local()
_thread_local_settings.default_retry_params = None
def set_default_retry_params(retry_params):
"""Set a default RetryParams for current thread current request."""
_thread_local_settings.default_retry_params = copy.copy(retry_params)
def _get_default_retry_params():
"""Get default RetryParams for current request and current thread.
Returns:
A new instance of the default RetryParams.
"""
default = getattr(_thread_local_settings, 'default_retry_params', None)
if default is None or not default.belong_to_current_request():
return RetryParams()
else:
return copy.copy(default)
def _quote_filename(filename):
"""Quotes filename to use as a valid URI path.
Args:
filename: user provided filename. /bucket/filename.
Returns:
The filename properly quoted to use as URI's path component.
"""
return urllib.quote(filename)
def _unquote_filename(filename):
"""Unquotes a valid URI path back to its filename.
This is the opposite of _quote_filename.
Args:
filename: a quoted filename. /bucket/some%20filename.
Returns:
The filename unquoted.
"""
return urllib.unquote(filename)
def _should_retry(resp):
"""Given a urlfetch response, decide whether to retry that request."""
return (resp.status_code == httplib.REQUEST_TIMEOUT or
(resp.status_code >= 500 and
resp.status_code < 600))
class _RetryWrapper(object):
"""A wrapper that wraps retry logic around any tasklet."""
def __init__(self,
retry_params,
retriable_exceptions=_RETRIABLE_EXCEPTIONS,
should_retry=lambda r: False):
"""Init.
Args:
retry_params: an RetryParams instance.
retriable_exceptions: a list of exception classes that are retriable.
should_retry: a function that takes a result from the tasklet and returns
a boolean. True if the result should be retried.
"""
self.retry_params = retry_params
self.retriable_exceptions = retriable_exceptions
self.should_retry = should_retry
@ndb.tasklet
def run(self, tasklet, **kwds):
"""Run a tasklet with retry.
The retry should be transparent to the caller: if no results
are successful, the exception or result from the last retry is returned
to the caller.
Args:
tasklet: the tasklet to run.
**kwds: keywords arguments to run the tasklet.
Raises:
The exception from running the tasklet.
Returns:
The result from running the tasklet.
"""
start_time = time.time()
n = 1
while True:
e = None
result = None
got_result = False
try:
result = yield tasklet(**kwds)
got_result = True
if not self.should_retry(result):
raise ndb.Return(result)
except runtime.DeadlineExceededError:
logging.debug(
'Tasklet has exceeded request deadline after %s seconds total',
time.time() - start_time)
raise
except self.retriable_exceptions, e:
pass
if n == 1:
logging.debug('Tasklet is %r', tasklet)
delay = self.retry_params.delay(n, start_time)
if delay <= 0:
logging.debug(
'Tasklet failed after %s attempts and %s seconds in total',
n, time.time() - start_time)
if got_result:
raise ndb.Return(result)
elif e is not None:
raise e
else:
assert False, 'Should never reach here.'
if got_result:
logging.debug(
'Got result %r from tasklet.', result)
else:
logging.debug(
'Got exception "%r" from tasklet.', e)
logging.debug('Retry in %s seconds.', delay)
n += 1
yield tasklets.sleep(delay)
class RetryParams(object):
"""Retry configuration parameters."""
_DEFAULT_USER_AGENT = 'App Engine Python GCS Client'
@datastore_rpc._positional(1)
def __init__(self,
backoff_factor=2.0,
initial_delay=0.1,
max_delay=10.0,
min_retries=3,
max_retries=6,
max_retry_period=30.0,
urlfetch_timeout=None,
save_access_token=False,
_user_agent=None,
memcache_access_token=True):
"""Init.
This object is unique per request per thread.
Library will retry according to this setting when App Engine Server
can't call urlfetch, urlfetch timed out, or urlfetch got a 408 or
500-600 response.
Args:
backoff_factor: exponential backoff multiplier.
initial_delay: seconds to delay for the first retry.
max_delay: max seconds to delay for every retry.
min_retries: min number of times to retry. This value is automatically
capped by max_retries.
max_retries: max number of times to retry. Set this to 0 for no retry.
max_retry_period: max total seconds spent on retry. Retry stops when
this period passed AND min_retries has been attempted.
urlfetch_timeout: timeout for urlfetch in seconds. Could be None,
in which case the value will be chosen by urlfetch module.
save_access_token: persist access token to datastore to avoid
excessive usage of GetAccessToken API. In addition to this, the token
will be cached in process, and may also be cached in memcache (see
memcache_access_token param). However, storing in Datastore can still
be useful in the event that memcache is unavailable.
_user_agent: The user agent string that you want to use in your requests.
memcache_access_token: cache access token in memcache to avoid excessive
usage of GetAccessToken API.
"""
self.backoff_factor = self._check('backoff_factor', backoff_factor)
self.initial_delay = self._check('initial_delay', initial_delay)
self.max_delay = self._check('max_delay', max_delay)
self.max_retry_period = self._check('max_retry_period', max_retry_period)
self.max_retries = self._check('max_retries', max_retries, True, int)
self.min_retries = self._check('min_retries', min_retries, True, int)
if self.min_retries > self.max_retries:
self.min_retries = self.max_retries
self.urlfetch_timeout = None
if urlfetch_timeout is not None:
self.urlfetch_timeout = self._check('urlfetch_timeout', urlfetch_timeout)
self.save_access_token = self._check('save_access_token', save_access_token,
True, bool)
self.memcache_access_token = self._check('memcache_access_token',
memcache_access_token,
True,
bool)
self._user_agent = _user_agent or self._DEFAULT_USER_AGENT
self._request_id = os.getenv('REQUEST_LOG_ID')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
@classmethod
def _check(cls, name, val, can_be_zero=False, val_type=float):
"""Check init arguments.
Args:
name: name of the argument. For logging purpose.
val: value. Value has to be non negative number.
can_be_zero: whether value can be zero.
val_type: Python type of the value.
Returns:
The value.
Raises:
ValueError: when invalid value is passed in.
TypeError: when invalid value type is passed in.
"""
valid_types = [val_type]
if val_type is float:
valid_types.append(int)
if type(val) not in valid_types:
raise TypeError(
'Expect type %s for parameter %s' % (val_type.__name__, name))
if val < 0:
raise ValueError(
'Value for parameter %s has to be greater than 0' % name)
if not can_be_zero and val == 0:
raise ValueError(
'Value for parameter %s can not be 0' % name)
return val
def belong_to_current_request(self):
return os.getenv('REQUEST_LOG_ID') == self._request_id
def delay(self, n, start_time):
"""Calculate delay before the next retry.
Args:
n: the number of current attempt. The first attempt should be 1.
start_time: the time when retry started in unix time.
Returns:
Number of seconds to wait before next retry. -1 if retry should give up.
"""
if (n > self.max_retries or
(n > self.min_retries and
time.time() - start_time > self.max_retry_period)):
return -1
return min(
math.pow(self.backoff_factor, n-1) * self.initial_delay,
self.max_delay)
def _run_until_rpc():
"""Eagerly evaluate tasklets until it is blocking on some RPC.
Usually ndb eventloop el isn't run until some code calls future.get_result().
When an async tasklet is called, the tasklet wrapper evaluates the tasklet
code into a generator, enqueues a callback _help_tasklet_along onto
the el.current queue, and returns a future.
_help_tasklet_along, when called by the el, will
get one yielded value from the generator. If the value if another future,
set up a callback _on_future_complete to invoke _help_tasklet_along
when the dependent future fulfills. If the value if a RPC, set up a
callback _on_rpc_complete to invoke _help_tasklet_along when the RPC fulfills.
Thus _help_tasklet_along drills down
the chain of futures until some future is blocked by RPC. El runs
all callbacks and constantly check pending RPC status.
"""
el = eventloop.get_event_loop()
while el.current:
el.run0()
def _eager_tasklet(tasklet):
"""Decorator to turn tasklet to run eagerly."""
@utils.wrapping(tasklet)
def eager_wrapper(*args, **kwds):
fut = tasklet(*args, **kwds)
_run_until_rpc()
return fut
return eager_wrapper
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import shutil
import stat
import tempfile
import threading
import time
import unittest
from collections import namedtuple
from pyspark import SparkConf, SparkFiles, SparkContext
from pyspark.testing.utils import ReusedPySparkTestCase, PySparkTestCase, QuietTest, SPARK_HOME
class CheckpointTests(ReusedPySparkTestCase):
def setUp(self):
self.checkpointDir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(self.checkpointDir.name)
self.sc.setCheckpointDir(self.checkpointDir.name)
def tearDown(self):
shutil.rmtree(self.checkpointDir.name)
def test_basic_checkpointing(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: range(1, x + 1))
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.getCheckpointFile() is None)
self.assertFalse(self.sc.getCheckpointDir() is None)
flatMappedRDD.checkpoint()
result = flatMappedRDD.collect()
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.isCheckpointed())
self.assertEqual(flatMappedRDD.collect(), result)
self.assertEqual(
"file:" + self.checkpointDir.name,
os.path.dirname(os.path.dirname(flatMappedRDD.getCheckpointFile())),
)
self.assertEqual(
self.sc.getCheckpointDir(), os.path.dirname(flatMappedRDD.getCheckpointFile())
)
def test_checkpoint_and_restore(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: [x])
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.getCheckpointFile() is None)
flatMappedRDD.checkpoint()
flatMappedRDD.count() # forces a checkpoint to be computed
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.getCheckpointFile() is not None)
recovered = self.sc._checkpointFile(
flatMappedRDD.getCheckpointFile(), flatMappedRDD._jrdd_deserializer
)
self.assertEqual([1, 2, 3, 4], recovered.collect())
class LocalCheckpointTests(ReusedPySparkTestCase):
def test_basic_localcheckpointing(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: range(1, x + 1))
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertFalse(flatMappedRDD.isLocallyCheckpointed())
flatMappedRDD.localCheckpoint()
result = flatMappedRDD.collect()
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.isLocallyCheckpointed())
self.assertEqual(flatMappedRDD.collect(), result)
class AddFileTests(PySparkTestCase):
def test_add_py_file(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this job fails due to `userlibrary` not being on the Python path:
# disable logging in log4j temporarily
def func(x):
from userlibrary import UserClass # type: ignore
return UserClass().hello()
with QuietTest(self.sc):
self.assertRaises(Exception, self.sc.parallelize(range(2)).map(func).first)
# Add the file, so the job should now succeed:
path = os.path.join(SPARK_HOME, "python/test_support/userlibrary.py")
self.sc.addPyFile(path)
res = self.sc.parallelize(range(2)).map(func).first()
self.assertEqual("Hello World!", res)
def test_add_file_locally(self):
path = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
self.sc.addFile(path)
download_path = SparkFiles.get("hello.txt")
self.assertNotEqual(path, download_path)
with open(download_path) as test_file:
self.assertEqual("Hello World!\n", test_file.readline())
def test_add_file_recursively_locally(self):
path = os.path.join(SPARK_HOME, "python/test_support/hello")
self.sc.addFile(path, True)
download_path = SparkFiles.get("hello")
self.assertNotEqual(path, download_path)
with open(download_path + "/hello.txt") as test_file:
self.assertEqual("Hello World!\n", test_file.readline())
with open(download_path + "/sub_hello/sub_hello.txt") as test_file:
self.assertEqual("Sub Hello World!\n", test_file.readline())
def test_add_py_file_locally(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this fails due to `userlibrary` not being on the Python path:
def func():
from userlibrary import UserClass # noqa: F401
self.assertRaises(ImportError, func)
path = os.path.join(SPARK_HOME, "python/test_support/userlibrary.py")
self.sc.addPyFile(path)
from userlibrary import UserClass
self.assertEqual("Hello World!", UserClass().hello())
def test_add_egg_file_locally(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this fails due to `userlibrary` not being on the Python path:
def func():
from userlib import UserClass # type: ignore[import]
UserClass()
self.assertRaises(ImportError, func)
path = os.path.join(SPARK_HOME, "python/test_support/userlib-0.1.zip")
self.sc.addPyFile(path)
from userlib import UserClass
self.assertEqual("Hello World from inside a package!", UserClass().hello())
def test_overwrite_system_module(self):
self.sc.addPyFile(os.path.join(SPARK_HOME, "python/test_support/SimpleHTTPServer.py"))
import SimpleHTTPServer # type: ignore[import]
self.assertEqual("My Server", SimpleHTTPServer.__name__)
def func(x):
import SimpleHTTPServer # type: ignore[import]
return SimpleHTTPServer.__name__
self.assertEqual(["My Server"], self.sc.parallelize(range(1)).map(func).collect())
class ContextTests(unittest.TestCase):
def test_failed_sparkcontext_creation(self):
# Regression test for SPARK-1550
self.assertRaises(Exception, lambda: SparkContext("an-invalid-master-name"))
def test_get_or_create(self):
with SparkContext.getOrCreate() as sc:
self.assertTrue(SparkContext.getOrCreate() is sc)
def test_parallelize_eager_cleanup(self):
with SparkContext() as sc:
temp_files = os.listdir(sc._temp_dir)
sc.parallelize([0, 1, 2])
post_parallelize_temp_files = os.listdir(sc._temp_dir)
self.assertEqual(temp_files, post_parallelize_temp_files)
def test_set_conf(self):
# This is for an internal use case. When there is an existing SparkContext,
# SparkSession's builder needs to set configs into SparkContext's conf.
sc = SparkContext()
sc._conf.set("spark.test.SPARK16224", "SPARK16224")
self.assertEqual(sc._jsc.sc().conf().get("spark.test.SPARK16224"), "SPARK16224")
sc.stop()
def test_stop(self):
sc = SparkContext()
self.assertNotEqual(SparkContext._active_spark_context, None)
sc.stop()
self.assertEqual(SparkContext._active_spark_context, None)
def test_with(self):
with SparkContext():
self.assertNotEqual(SparkContext._active_spark_context, None)
self.assertEqual(SparkContext._active_spark_context, None)
def test_with_exception(self):
try:
with SparkContext():
self.assertNotEqual(SparkContext._active_spark_context, None)
raise RuntimeError()
except BaseException:
pass
self.assertEqual(SparkContext._active_spark_context, None)
def test_with_stop(self):
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
sc.stop()
self.assertEqual(SparkContext._active_spark_context, None)
def test_progress_api(self):
with SparkContext() as sc:
sc.setJobGroup("test_progress_api", "", True)
rdd = sc.parallelize(range(10)).map(lambda x: time.sleep(100))
def run():
# When thread is pinned, job group should be set for each thread for now.
# Local properties seem not being inherited like Scala side does.
if os.environ.get("PYSPARK_PIN_THREAD", "true").lower() == "true":
sc.setJobGroup("test_progress_api", "", True)
try:
rdd.count()
except Exception:
pass
t = threading.Thread(target=run)
t.daemon = True
t.start()
# wait for scheduler to start
time.sleep(3)
tracker = sc.statusTracker()
jobIds = tracker.getJobIdsForGroup("test_progress_api")
self.assertEqual(1, len(jobIds))
job = tracker.getJobInfo(jobIds[0])
self.assertEqual(1, len(job.stageIds))
stage = tracker.getStageInfo(job.stageIds[0])
self.assertEqual(rdd.getNumPartitions(), stage.numTasks)
sc.cancelAllJobs()
t.join()
# wait for event listener to update the status
time.sleep(1)
job = tracker.getJobInfo(jobIds[0])
self.assertEqual("FAILED", job.status)
self.assertEqual([], tracker.getActiveJobsIds())
self.assertEqual([], tracker.getActiveStageIds())
sc.stop()
def test_startTime(self):
with SparkContext() as sc:
self.assertGreater(sc.startTime, 0)
def test_forbid_insecure_gateway(self):
# Fail immediately if you try to create a SparkContext
# with an insecure gateway
parameters = namedtuple("MockGatewayParameters", "auth_token")(None)
mock_insecure_gateway = namedtuple("MockJavaGateway", "gateway_parameters")(parameters)
with self.assertRaises(ValueError) as context:
SparkContext(gateway=mock_insecure_gateway)
self.assertIn("insecure Py4j gateway", str(context.exception))
def test_resources(self):
"""Test the resources are empty by default."""
with SparkContext() as sc:
resources = sc.resources
self.assertEqual(len(resources), 0)
def test_disallow_to_create_spark_context_in_executors(self):
# SPARK-32160: SparkContext should not be created in executors.
with SparkContext("local-cluster[3, 1, 1024]") as sc:
with self.assertRaises(Exception) as context:
sc.range(2).foreach(lambda _: SparkContext())
self.assertIn(
"SparkContext should only be created and accessed on the driver.",
str(context.exception),
)
def test_allow_to_create_spark_context_in_executors(self):
# SPARK-32160: SparkContext can be created in executors if the config is set.
def create_spark_context():
conf = SparkConf().set("spark.executor.allowSparkContext", "true")
with SparkContext(conf=conf):
pass
with SparkContext("local-cluster[3, 1, 1024]") as sc:
sc.range(2).foreach(lambda _: create_spark_context())
class ContextTestsWithResources(unittest.TestCase):
def setUp(self):
class_name = self.__class__.__name__
self.tempFile = tempfile.NamedTemporaryFile(delete=False)
self.tempFile.write(b'echo {\\"name\\": \\"gpu\\", \\"addresses\\": [\\"0\\"]}')
self.tempFile.close()
# create temporary directory for Worker resources coordination
self.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(self.tempdir.name)
os.chmod(
self.tempFile.name,
stat.S_IRWXU | stat.S_IXGRP | stat.S_IRGRP | stat.S_IROTH | stat.S_IXOTH,
)
conf = SparkConf().set("spark.test.home", SPARK_HOME)
conf = conf.set("spark.driver.resource.gpu.amount", "1")
conf = conf.set("spark.driver.resource.gpu.discoveryScript", self.tempFile.name)
self.sc = SparkContext("local-cluster[2,1,1024]", class_name, conf=conf)
def test_resources(self):
"""Test the resources are available."""
resources = self.sc.resources
self.assertEqual(len(resources), 1)
self.assertTrue("gpu" in resources)
self.assertEqual(resources["gpu"].name, "gpu")
self.assertEqual(resources["gpu"].addresses, ["0"])
def tearDown(self):
os.unlink(self.tempFile.name)
self.sc.stop()
if __name__ == "__main__":
from pyspark.tests.test_context import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| |
__author__ = 'yuxiang'
import os
import datasets
import datasets.scenenet
import datasets.imdb
import cPickle
import numpy as np
import cv2
class scenenet(datasets.imdb):
def __init__(self, image_set, scenenet_path = None):
datasets.imdb.__init__(self, 'scenenet_' + image_set)
self._image_set = image_set
self._scenenet_path = self._get_default_path() if scenenet_path is None \
else scenenet_path
self._data_path = os.path.join(self._scenenet_path, 'data')
self._classes = ('unknown', 'floor', 'ceiling', 'wall', 'bed', 'chair', 'furniture', 'nightstand', 'shelf', \
'curtain', 'painting', 'pillow', 'door', 'window', 'table', 'sofa', 'lamp', 'vase', 'plant', 'plate')
self._class_to_ind = dict(zip(self.classes, xrange(self.num_classes)))
self._image_ext = '.png'
self._image_index = self._load_image_set_index()
self._roidb_handler = self.gt_roidb
assert os.path.exists(self._scenenet_path), \
'SceneNet path does not exist: {}'.format(self._scenenet_path)
assert os.path.exists(self._data_path), \
'Data path does not exist: {}'.format(self._data_path)
# image
def image_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self.image_path_from_index(self.image_index[i])
def image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
image_path = os.path.join(self._data_path, index + '_scene' + self._image_ext)
assert os.path.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path
# depth
def depth_path_at(self, i):
"""
Return the absolute path to depth i in the image sequence.
"""
return self.depth_path_from_index(self.image_index[i])
def depth_path_from_index(self, index):
"""
Construct an depth path from the image's "index" identifier.
"""
depth_path = os.path.join(self._data_path, index + '_depth' + self._image_ext)
assert os.path.exists(depth_path), \
'Path does not exist: {}'.format(depth_path)
return depth_path
# label
def label_path_at(self, i):
"""
Return the absolute path to metadata i in the image sequence.
"""
return self.label_path_from_index(self.image_index[i])
def label_path_from_index(self, index):
"""
Construct an metadata path from the image's "index" identifier.
"""
label_path = os.path.join(self._data_path, index + '_label' + self._image_ext)
assert os.path.exists(label_path), \
'Path does not exist: {}'.format(label_path)
return label_path
# camera pose
def metadata_path_at(self, i):
"""
Return the absolute path to metadata i in the image sequence.
"""
return self.metadata_path_from_index(self.image_index[i])
def metadata_path_from_index(self, index):
"""
Construct an metadata path from the image's "index" identifier.
"""
metadata_path = os.path.join(self._data_path, index + '_pose.txt')
assert os.path.exists(metadata_path), \
'Path does not exist: {}'.format(metadata_path)
return metadata_path
def _load_image_set_index(self):
"""
Load the indexes listed in this dataset's image set file.
"""
image_set_file = os.path.join(self._scenenet_path, self._image_set + '.txt')
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index = [x.rstrip('\n') for x in f.readlines()]
return image_index
def _get_default_path(self):
"""
Return the default path where KITTI is expected to be installed.
"""
return os.path.join(datasets.ROOT_DIR, 'data', 'SceneNet')
def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print '{} gt roidb loaded from {}'.format(self.name, cache_file)
return roidb
gt_roidb = [self._load_scenenet_annotation(index)
for index in self.image_index]
with open(cache_file, 'wb') as fid:
cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote gt roidb to {}'.format(cache_file)
return gt_roidb
def _load_scenenet_annotation(self, index):
"""
Load class name and meta data
"""
# image path
image_path = self.image_path_from_index(index)
# depth path
depth_path = self.depth_path_from_index(index)
# label path
label_path = self.label_path_from_index(index)
# metadata path
metadata_path = self.metadata_path_from_index(index)
# load projection matrix
P = np.loadtxt(metadata_path)
viewport = np.array([0, 0, 640, 480])
meta_data = {'projection_matrix' : P,
'viewport': viewport,
'factor_depth': 5000,
'near_plane': 0.1,
'far_plane': 1000}
return {'image': image_path,
'depth': depth_path,
'label': label_path,
'meta_data': meta_data,
'flipped': False}
def evaluate_segmentations(self, segmentations, output_dir):
# compute histogram
n_cl = self.num_classes
hist = np.zeros((n_cl, n_cl))
# for each image
for im_ind, index in enumerate(self.image_index):
# read ground truth labels
im = cv2.imread(self.label_path_from_index(index), cv2.IMREAD_UNCHANGED)
gt_labels = im.astype(np.float32, copy=True)
# predicated labels
sg_labels = segmentations[im_ind]['labels']
hist += self.fast_hist(gt_labels.flatten(), sg_labels.flatten(), n_cl)
# overall accuracy
acc = np.diag(hist).sum() / hist.sum()
print 'overall accuracy', acc
# per-class accuracy
acc = np.diag(hist) / hist.sum(1)
print 'mean accuracy', np.nanmean(acc)
# per-class IU
print 'per-class IU'
iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
for i in range(n_cl):
print '{} {}'.format(self._classes[i], iu[i])
print 'mean IU', np.nanmean(iu)
freq = hist.sum(1) / hist.sum()
print 'fwavacc', (freq[freq > 0] * iu[freq > 0]).sum()
filename = os.path.join(output_dir, 'segmentation.txt')
with open(filename, 'wt') as f:
for i in range(n_cl):
f.write('{:f}\n'.format(iu[i]))
if __name__ == '__main__':
d = datasets.scenenet('train')
res = d.roidb
from IPython import embed; embed()
| |
#!/usr/bin/python -tt
# vim: ai ts=4 sts=4 et sw=4
#
# Copyright (c) 2009, 2010, 2011 Intel, Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; version 2 of the License
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import os,sys
import re
import time
__ALL__ = ['set_mode',
'get_loglevel',
'set_loglevel',
'set_logfile',
'raw',
'debug',
'verbose',
'info',
'warning',
'error',
'ask',
'pause',
]
# COLORs in ANSI
INFO_COLOR = 32 # green
WARN_COLOR = 33 # yellow
ERR_COLOR = 31 # red
ASK_COLOR = 34 # blue
NO_COLOR = 0
PREFIX_RE = re.compile('^<(.*?)>\s*(.*)', re.S)
INTERACTIVE = True
LOG_LEVEL = 1
LOG_LEVELS = {
'quiet': 0,
'normal': 1,
'verbose': 2,
'debug': 3,
'never': 4,
}
LOG_FILE_FP = None
LOG_CONTENT = ''
CATCHERR_BUFFILE_FD = -1
CATCHERR_BUFFILE_PATH = None
CATCHERR_SAVED_2 = -1
def _general_print(head, color, msg = None, stream = None, level = 'normal'):
global LOG_CONTENT
if not stream:
stream = sys.stdout
if LOG_LEVELS[level] > LOG_LEVEL:
# skip
return
# encode raw 'unicode' str to utf8 encoded str
if msg and isinstance(msg, unicode):
msg = msg.encode('utf-8', 'ignore')
errormsg = ''
if CATCHERR_BUFFILE_FD > 0:
size = os.lseek(CATCHERR_BUFFILE_FD , 0, os.SEEK_END)
os.lseek(CATCHERR_BUFFILE_FD, 0, os.SEEK_SET)
errormsg = os.read(CATCHERR_BUFFILE_FD, size)
os.ftruncate(CATCHERR_BUFFILE_FD, 0)
# append error msg to LOG
if errormsg:
LOG_CONTENT += errormsg
# append normal msg to LOG
save_msg = msg.strip() if msg else None
if save_msg:
timestr = time.strftime("[%m/%d %H:%M:%S %Z] ", time.localtime())
LOG_CONTENT += timestr + save_msg + '\n'
if errormsg:
_color_print('', NO_COLOR, errormsg, stream, level)
_color_print(head, color, msg, stream, level)
def _color_print(head, color, msg, stream, level):
colored = True
if color == NO_COLOR or \
not stream.isatty() or \
os.getenv('ANSI_COLORS_DISABLED') is not None:
colored = False
if head.startswith('\r'):
# need not \n at last
newline = False
else:
newline = True
if colored:
head = '\033[%dm%s:\033[0m ' %(color, head)
if not newline:
# ESC cmd to clear line
head = '\033[2K' + head
else:
if head:
head += ': '
if head.startswith('\r'):
head = head.lstrip()
newline = True
if msg is not None:
if isinstance(msg, unicode):
msg = msg.encode('utf8', 'ignore')
stream.write('%s%s' % (head, msg))
if newline:
stream.write('\n')
stream.flush()
def _color_perror(head, color, msg, level = 'normal'):
if CATCHERR_BUFFILE_FD > 0:
_general_print(head, color, msg, sys.stdout, level)
else:
_general_print(head, color, msg, sys.stderr, level)
def _split_msg(head, msg):
if isinstance(msg, list):
msg = '\n'.join(map(str, msg))
if msg.startswith('\n'):
# means print \n at first
msg = msg.lstrip()
head = '\n' + head
elif msg.startswith('\r'):
# means print \r at first
msg = msg.lstrip()
head = '\r' + head
m = PREFIX_RE.match(msg)
if m:
head += ' <%s>' % m.group(1)
msg = m.group(2)
return head, msg
def get_loglevel():
return (k for k,v in LOG_LEVELS.items() if v==LOG_LEVEL).next()
def set_loglevel(level):
global LOG_LEVEL
if level not in LOG_LEVELS:
# no effect
return
LOG_LEVEL = LOG_LEVELS[level]
def set_interactive(mode=True):
global INTERACTIVE
if mode:
INTERACTIVE = True
else:
INTERACTIVE = False
def log(msg=''):
# log msg to LOG_CONTENT then save to logfile
global LOG_CONTENT
if msg:
LOG_CONTENT += msg
def raw(msg=''):
_general_print('', NO_COLOR, msg)
def info(msg):
head, msg = _split_msg('Info', msg)
_general_print(head, INFO_COLOR, msg)
def verbose(msg):
head, msg = _split_msg('Verbose', msg)
_general_print(head, INFO_COLOR, msg, level = 'verbose')
def warning(msg):
head, msg = _split_msg('Warning', msg)
_color_perror(head, WARN_COLOR, msg)
def debug(msg):
head, msg = _split_msg('Debug', msg)
_color_perror(head, ERR_COLOR, msg, level = 'debug')
def error(msg):
head, msg = _split_msg('Error', msg)
_color_perror(head, ERR_COLOR, msg)
sys.exit(1)
def ask(msg, default=True):
_general_print('\rQ', ASK_COLOR, '')
try:
if default:
msg += '(Y/n) '
else:
msg += '(y/N) '
if INTERACTIVE:
while True:
repl = raw_input(msg)
if repl.lower() == 'y':
return True
elif repl.lower() == 'n':
return False
elif not repl.strip():
# <Enter>
return default
# else loop
else:
if default:
msg += ' Y'
else:
msg += ' N'
_general_print('', NO_COLOR, msg)
return default
except KeyboardInterrupt:
sys.stdout.write('\n')
sys.exit(2)
def choice(msg, choices, default=0):
if default >= len(choices):
return None
_general_print('\rQ', ASK_COLOR, '')
try:
msg += " [%s] " % '/'.join(choices)
if INTERACTIVE:
while True:
repl = raw_input(msg)
if repl in choices:
return repl
elif not repl.strip():
return choices[default]
else:
msg += choices[default]
_general_print('', NO_COLOR, msg)
return choices[default]
except KeyboardInterrupt:
sys.stdout.write('\n')
sys.exit(2)
def pause(msg=None):
if INTERACTIVE:
_general_print('\rQ', ASK_COLOR, '')
if msg is None:
msg = 'press <ENTER> to continue ...'
raw_input(msg)
def set_logfile(fpath):
global LOG_FILE_FP
def _savelogf():
if LOG_FILE_FP:
fp = open(LOG_FILE_FP, 'w')
fp.write(LOG_CONTENT)
fp.close()
if LOG_FILE_FP is not None:
warning('duplicate log file configuration')
LOG_FILE_FP = fpath
import atexit
atexit.register(_savelogf)
def enable_logstderr(fpath):
global CATCHERR_BUFFILE_FD
global CATCHERR_BUFFILE_PATH
global CATCHERR_SAVED_2
if os.path.exists(fpath):
os.remove(fpath)
CATCHERR_BUFFILE_PATH = fpath
CATCHERR_BUFFILE_FD = os.open(CATCHERR_BUFFILE_PATH, os.O_RDWR|os.O_CREAT)
CATCHERR_SAVED_2 = os.dup(2)
os.dup2(CATCHERR_BUFFILE_FD, 2)
def disable_logstderr():
global CATCHERR_BUFFILE_FD
global CATCHERR_BUFFILE_PATH
global CATCHERR_SAVED_2
raw(msg = None) # flush message buffer and print it.
os.dup2(CATCHERR_SAVED_2, 2)
os.close(CATCHERR_SAVED_2)
os.close(CATCHERR_BUFFILE_FD)
os.unlink(CATCHERR_BUFFILE_PATH)
CATCHERR_BUFFILE_FD = -1
CATCHERR_BUFFILE_PATH = None
CATCHERR_SAVED_2 = -1
| |
#
# This file is:
# Copyright (C) 2018 Calin Culianu <calin.culianu@gmail.com>
#
# MIT License
#
from . import utils
from . import gui
from . import private_key_dialog
from . import sign_decrypt_dialog
from . import history
from electroncash import WalletStorage, Wallet
from electroncash.util import timestamp_to_datetime
import electroncash.exchange_rate
from electroncash.i18n import _, language
from electroncash.address import Address
import time, html, sys, enum
from collections import namedtuple
from .uikit_bindings import *
from .custom_objc import *
_TYPES = ("Any","Receiving","Change")
_STATUSES = ("All", "Funded", "Unused", "Used")
_TYPES_BY_NAME = dict()
_STATUSES_BY_NAME = dict()
if False:
# this is here simply to get picked up by i18n messages.pot, etc.
__DUMMY_FOR_TRANSLATION = ( _("Any"), _("All"), _("Funded"), _("Unused"),
_("Used"), _("Receiving"), _("Change") )
for i,k in enumerate(_TYPES):
_TYPES_BY_NAME[k] = i
for i,k in enumerate(_STATUSES):
_STATUSES_BY_NAME[k] = i
class AddressDetail(AddressDetailBase):
blockRefresh = objc_property()
needsRefresh = objc_property()
domain = objc_property() # string repr of adddress in question -- used to get the cached address entry from the datamgr
kbas = objc_property()
@objc_method
def init(self) -> ObjCInstance:
raise ValueError("INVALID USAGE: Cannot construct an AddressDetail with a simple 'init' call! Use 'initWithAddress:' instead ")
@objc_method
def initWithAddress_(self, address) -> ObjCInstance:
self = ObjCInstance(send_super(__class__, self, 'init'))
if self:
self.title = "Address Details"
self.domain = address
self.optionsBarBut = UIBarButtonItem.alloc().initWithImage_style_target_action_(UIImage.imageNamed_("barbut_options"), UIBarButtonItemStylePlain, self, SEL(b'onOptions')).autorelease()
self.navigationItem.rightBarButtonItem = self.optionsBarBut
gui.ElectrumGui.gui.sigAddresses.connect(lambda:self.refresh(), self)
gui.ElectrumGui.gui.sigHistory.connect(lambda:self.refresh(), self)
bb = UIBarButtonItem.new().autorelease()
bb.title = _("Back")
self.navigationItem.backBarButtonItem = bb
return self
@objc_method
def dealloc(self) -> None:
#print("AddressDetail dealloc")
gui.ElectrumGui.gui.sigAddresses.disconnect(self)
gui.ElectrumGui.gui.sigHistory.disconnect(self)
utils.nspy_pop(self)
self.domain = None
self.blockRefresh = None
self.needsRefresh = None
self.kbas = None
send_super(__class__, self, 'dealloc')
@objc_method
def loadView(self) -> None:
NSBundle.mainBundle.loadNibNamed_owner_options_("AddressDetail",self,None)
parent = gui.ElectrumGui.gui
entry = _Get(self.domain)
self.statusTopSaved = self.statusTopCS.constant
self.txHistoryTopSaved = self.txHistoryTopCS.constant
self.descDel.placeholderFont = UIFont.italicSystemFontOfSize_(14.0)
self.descDel.placeholderText = '\n' + _(str(self.descDel.placeholderText).strip())
# Re-use of TxHistoryHelper below...
helper = history.NewTxHistoryHelper(tv = self.tv, vc = self, noRefreshControl = True, domain = [entry.address], cls=history.TxHistoryHelperWithHeader)
@objc_method
def viewDidLoad(self) -> None:
# setup callbacks
def didBeginEditing() -> None:
self.blockRefresh = True # temporarily block refreshing since that kills our keyboard/textfield
self.descDel.didBeginEditing = Block(didBeginEditing)
def didEndEditing(text : ObjCInstance) -> None:
text = py_from_ns(text)
self.blockRefresh = False # unblock block refreshing
entry = _Get(self.domain)
text = str(text).strip()
new_label = text
utils.NSLog("new label for address %s = %s", entry.address.to_storage_string(), new_label)
gui.ElectrumGui.gui.on_label_edited(entry.address, new_label)
# Note that above should implicitly refresh us due to sigAddresses signal
self.doRefreshIfNeeded() # just in case we had a blocked refresh and electrumgui didn't signal.
self.descDel.didEndEditing = Block(didEndEditing)
@objc_method
def viewWillAppear_(self, animated : bool) -> None:
send_super(__class__, self, 'viewWillAppear:', animated, argtypes=[c_bool])
self.kbas = utils.register_keyboard_autoscroll(self.view)
self.refresh()
@objc_method
def viewWillDisappear_(self, animated : bool) -> None:
send_super(__class__, self, 'viewWillDisappear:', animated, argtypes=[c_bool])
if self.kbas:
utils.unregister_keyboard_autoscroll(self.kbas)
self.kbas = None
@objc_method
def refresh(self) -> None:
if self.viewIfLoaded is None or self.blockRefresh:
self.needsRefresh = True
return
entry = _Get(self.domain)
if not entry:
return # wallet may have been closed...
# kern the tits! ;)
tits = [self.balanceTit, self.numTxTit, self.statusTit, self.descTit]
for tit in tits:
tit.setText_withKerning_(tit.text, utils._kern)
self.address.text = entry.address.to_ui_string()
self.balance.text = entry.balance_str.strip() + " " + entry.base_unit.strip()
self.fiatBalance.text = entry.fiat_balance_str.strip()
if self.fiatBalance.text:
self.fiatBalance.setHidden_(False)
self.statusTopCS.constant = self.statusTopSaved
else:
self.fiatBalance.setHidden_(True)
self.statusTopCS.constant = 0.0
if entry.is_frozen:
c = utils.uicolor_custom('frozentext')
else:
c = utils.uicolor_custom('dark')
self.address.textColor = c
self.balance.textColor = c
self.fiatBalance.textColor = c
def numTXsAttrStr() -> ObjCInstance:
ats = NSMutableAttributedString.alloc().initWithString_(str(entry.num_tx)).autorelease()
hadUTXOs = entry.num_utxos
utxos = ' (' + str(hadUTXOs) + ' UTXOs)' if entry.num_tx else ''
attrs = { NSFontAttributeName: UIFont.systemFontOfSize_weight_(14.0, UIFontWeightLight) }
ats.appendAttributedString_(NSAttributedString.alloc().initWithString_attributes_(utxos, attrs).autorelease())
if hadUTXOs and utxos:
attrs = dict()
l = len(str(hadUTXOs))
r = NSRange(ats.length()-(7+l),l+6)
attrs[NSForegroundColorAttributeName] = utils.uicolor_custom('link')
attrs[NSUnderlineStyleAttributeName] = NSUnderlineStyleSingle
ats.addAttributes_range_(attrs, r)
self.utxoGr.setEnabled_(True)
else:
self.utxoGr.setEnabled_(False)
return ats
self.numTx.attributedText = numTXsAttrStr()
self.descDel.text = entry.label.strip()
xtra = []
if entry.is_watch_only:
xtra.append(_('watching only'))
if entry.is_change:
xtra.append(_('Change'))
if entry.is_used:
xtra.append(_('Used'))
if entry.is_frozen:
xtra.append(_('Frozen'))
if xtra:
self.status.text = ', '.join(xtra)
else:
self.status.text = ''
if not self.status.text:
self.status.text = _('Receiving Address')
elif self.status.text == _('Change'):
self.status.text = _('Change Address')
froz = _('Frozen')
lfroz = len(froz)
stext = self.status.text
if stext[-lfroz:] == froz:
ats = NSMutableAttributedString.alloc().initWithString_(stext).autorelease()
r = NSRange(len(stext)-lfroz, lfroz)
ats.addAttribute_value_range_(NSForegroundColorAttributeName, utils.uicolor_custom('frozentext'), r)
self.status.attributedText = ats
size = CGSizeMake(200.0,200.0) # the returned image has a 10 pix margin -- this compensates for it
self.qr.contentMode = UIViewContentModeCenter # if the image pix margin changes -- FIX THIS
self.qr.image = utils.get_qrcode_image_for_data(self.address.text, size = size)
self.refreshButs()
self.tv.reloadData() # might be a sometimes-redundant call since WalletsTxHelper also calls reload data..
self.needsRefresh = False
@objc_method
def onOptions(self) -> None:
entry = _Get(self.domain)
_ShowAddressContextMenu(entry, self, ipadAnchor = self.optionsBarBut)
@objc_method
def onSpendFrom(self) -> None:
entry = _Get(self.domain)
_SpendFrom(entry, self)
@objc_method
def onUTXOs(self) -> None:
from .coins import PushCoinsVC
coinsvc = PushCoinsVC([_Get(self.domain).address], self.navigationController)
@objc_method
def toggleFreezeAddress(self) -> None:
_ToggleFreeze(_Get(self.domain)) # will implicitly refresh us due to signal being emitte
@objc_method
def cpyAddress(self) -> None:
entry = _Get(self.domain)
gui.ElectrumGui.gui.copy_to_clipboard(entry.addr_str, "Address")
@objc_method
def refreshButs(self) -> None:
v = self.viewIfLoaded
if v is None: return
entry = _Get(self.domain)
watch_only = entry.is_watch_only
but = self.freezeBut
but.setSelected_(bool(entry.is_frozen))
but.setHidden_(watch_only)
but = self.spendFromBut
but.setHidden_(bool(watch_only or entry.is_frozen or not entry.balance))
if but.isHidden():
self.txHistoryTopCS.constant = -50.0
else:
self.txHistoryTopCS.constant = self.txHistoryTopSaved
@objc_method
def doRefreshIfNeeded(self) -> None:
if self.needsRefresh: self.refresh()
@objc_method
def onCloseKeyboard_(self, sender : ObjCInstance) -> None:
self.view.endEditing_(True)
@objc_method
def onQRImgTap(self) -> None:
if not self.qr.image: gui.ElectrumGui.gui.show_error(vc = self, message = "Error, No QR Image")
else:
def ShowIt() -> None:
utils.show_share_actions(vc = self, img = self.qr.image, ipadAnchor = self.qr.convertRect_toView_(self.qr.bounds, self.view), objectName = _("Image"))
c1 = UIColor.clearColor
c2 = UIColor.colorWithRed_green_blue_alpha_(0.0,0.0,0.0,0.3)
self.qr.backgroundColorAnimationFromColor_toColor_duration_reverses_completion_(c1, c2, 0.2, True, ShowIt)
ModeNormal = 0
ModePicker = 1
# Addresses Tab -- shows addresses, etc
class AddressesVC(AddressesVCBase):
needsRefresh = objc_property()
blockRefresh = objc_property()
mode = objc_property()
refreshControl = objc_property()
comboL = objc_property()
comboR = objc_property()
comboPreset = objc_property()
@objc_method
def initWithMode_(self, mode : int):
self = ObjCInstance(send_super(__class__, self, 'init'))
if self:
self.comboL = None
self.comboR = None
self.needsRefresh = False
self.blockRefresh = False
self.mode = int(mode)
ad = _("&Addresses").translate({ord('&') : None})
self.title = ad if self.mode == ModeNormal else _("Choose Address")
if self.mode == ModeNormal:
self.tabBarItem.image = UIImage.imageNamed_("tab_addresses_new")
bb = UIBarButtonItem.alloc().initWithTitle_style_target_action_(_GetBBTitle(), UIBarButtonItemStylePlain, self, SEL(b'toggleCashAddr')).autorelease()
bb.possibleTitles = NSSet.setWithArray_(_GetBBTitle('*'))
d = { NSFontAttributeName : UIFont.systemFontOfSize_(14.0) }
bb.setTitleTextAttributes_forState_(d, UIControlStateNormal)
d[NSFontAttributeName] = UIFont.systemFontOfSize_weight_(14.0, UIFontWeightRegular)
bb.setTitleTextAttributes_forState_(d, UIControlStateHighlighted)
self.navigationItem.rightBarButtonItem = bb
if self.mode == ModePicker:
def onRefreshCtl() -> None:
self.refresh()
self.refreshControl = UIRefreshControl.new().autorelease()
self.refreshControl.handleControlEvent_withBlock_(UIControlEventValueChanged, onRefreshCtl)
bb = UIBarButtonItem.new().autorelease()
bb.title = _("Back")
self.navigationItem.backBarButtonItem = bb
gui.ElectrumGui.gui.sigAddresses.connect(lambda:self.refresh(), self)
return self
@objc_method
def dealloc(self) -> None:
gui.ElectrumGui.gui.sigAddresses.disconnect(self)
self.needsRefresh = None
self.mode = None
self.blockRefresh = None
self.refreshControl = None
self.comboL = None
self.comboR = None
self.comboPreset = None
utils.nspy_pop(self)
utils.remove_all_callbacks(self)
send_super(__class__, self, 'dealloc')
@objc_method
def loadView(self) -> None:
NSBundle.mainBundle.loadNibNamed_owner_options_("Addresses", self, None) # auto-attaches view
if self.mode == ModeNormal:
uinib = UINib.nibWithNibName_bundle_("AddressesCell", None)
self.tableView.registerNib_forCellReuseIdentifier_(uinib, "AddressesCell")
# set up the combodrawer "child" vc's (they aren't really children in the iOS sense since I hate the way iOS treats embedded VCs)
objs = NSBundle.mainBundle.loadNibNamed_owner_options_("ComboDrawerPicker", None, None)
for o in objs:
if isinstance(o, ComboDrawerPicker):
self.comboL = o
break
objs = NSBundle.mainBundle.loadNibNamed_owner_options_("ComboDrawerPicker", None, None)
for o in objs:
if isinstance(o, ComboDrawerPicker):
self.comboR = o
break
self.comboL.flushLeft = True
@objc_method
def viewDidLoad(self) -> None:
send_super(__class__, self, 'viewDidLoad')
self.refreshControl = gui.ElectrumGui.gui.helper.createAndBindRefreshControl()
self.tableView.refreshControl = self.refreshControl
self.setupComboCallbacks()
self.setupComboItems()
@objc_method
def viewWillAppear_(self, animated : bool) -> None:
send_super(__class__, self, 'viewWillAppear:', animated, argtype=[c_bool])
# hacky pulling in of attributed text string form the 'child' vc into our proxy stub
self.topLblL.attributedText = self.comboL.attributedStringForTopTitle
self.topLblR.attributedText = self.comboR.attributedStringForTopTitle
@objc_method
def numberOfSectionsInTableView_(self, tableView) -> int:
try:
addrData = _Get()
return 1 if addrData.master[self.comboL.selection][self.comboR.selection] is not None else 0
except:
print("Error in addresses 1:",str(sys.exc_info()[1]))
return 0
@objc_method
def tableView_numberOfRowsInSection_(self, tableView : ObjCInstance, section : int) -> int:
try:
addrData = _Get()
return max(1,len(addrData.master[self.comboL.selection][self.comboR.selection])) if addrData is not None else 0
except:
print("Error in addresses 2:",str(sys.exc_info()[1]))
return 0
@objc_method
def tableView_cellForRowAtIndexPath_(self, tableView, indexPath):
#todo: - allow for label editing (popup menu?)
identifier = "AddressesCell" if self.mode == ModeNormal else "Cell"
cell = tableView.dequeueReusableCellWithIdentifier_(identifier)
newCell = False
if self.mode == ModePicker and cell is None:
cell = UITableViewCell.alloc().initWithStyle_reuseIdentifier_(UITableViewCellStyleSubtitle,identifier).autorelease()
newCell = True
try:
addrData = _Get()
entries = addrData.master[self.comboL.selection][self.comboR.selection]
except:
print("Error in addresses 3:",str(sys.exc_info()[1]))
entries = list()
if indexPath.row >= len(entries) or cell is None:
cell = UITableViewCell.alloc().initWithStyle_reuseIdentifier_(UITableViewCellStyleSubtitle,"NoMatchCell").autorelease()
cell.textLabel.text = _("No Match")
cell.textLabel.textColor = utils.uicolor_custom('dark')
cell.detailTextLabel.text = _("No addresses match the specified criteria")
cell.detailTextLabel.textColor = utils.uicolor_custom('light')
return cell
entry = entries[indexPath.row]
if self.mode == ModeNormal:
cell.address.linkText = entry.addr_str
if entry.label:
cell.desc.setText_withKerning_(entry.label, utils._kern)
cell.topCS.constant = 7
cell.midCS.constant = 6
else:
cell.desc.text = ""
cell.topCS.constant = 15
cell.midCS.constant = 10
cell.balanceTit.setText_withKerning_(_('Balance'), utils._kern)
baltxt = entry.balance_str.strip() + " " + entry.base_unit
if entry.is_frozen:
amtColor = utils.uicolor_custom('frozentext')
fiatColor = utils.uicolor_custom('frozentextlight')
else:
amtColor = utils.uicolor_custom('dark')
fiatColor = utils.uicolor_custom('light')
cell.balance.attributedText = utils.hackyFiatAmtAttrStr(baltxt, entry.fiat_balance_str, '', 0.0, fiatColor, None, -0.5, amtColor = amtColor)
xtra = []
if entry.is_frozen:
if entry.is_change:
xtra += [_('Change')]
xtra += [_('Frozen')]
cell.flags.textColor = utils.uicolor_custom('frozentext')
else:
cell.flags.textColor = utils.uicolor_custom('dark')
if entry.is_change:
xtra += [_('Change')]
if entry.is_used:
xtra += [_('Used')]
xtra += [ str(entry.num_tx) + ' Tx' + ('s' if entry.num_tx != 1 else '')]
cell.flags.setText_withKerning_(', '.join(xtra) if xtra else '', utils._kern)
def linkTarget(celladdy : objc_id) -> None:
if self.navigationController and self.navigationController.visibleViewController.ptr == self.ptr:
celladdy = ObjCInstance(celladdy)
self.onTapAddress_(celladdy)
cell.address.tag = (self.comboL.selection << 24) | (self.comboR.selection << 16) | (indexPath.row & 0xffff) # useful for onTapAddress to figure out what tapped it
cell.address.linkTarget = linkTarget
return cell
else: # picker mode
if newCell:
cell.accessoryType = UITableViewCellAccessoryNone
cell.textLabel.adjustsFontSizeToFitWidth = False
#cell.textLabel.minimumScaleFactor = 0.9
cell.textLabel.lineBreakMode = NSLineBreakByTruncatingMiddle
font = cell.textLabel.font
cell.textLabel.font = UIFont.systemFontOfSize_weight_(font.pointSize, UIFontWeightRegular)
cell.detailTextLabel.adjustsFontSizeToFitWidth = True
cell.detailTextLabel.minimumScaleFactor = 0.85
cell.textLabel.text = str(entry.address)
cell.detailTextLabel.text = "bal: " + entry.balance_str + ( (' (' + entry.fiat_balance_str + ')') if addrData.show_fx else '') + " numtx: " + str(entry.num_tx) + ((" - " + entry.label) if entry.label else "")
font = cell.detailTextLabel.font
cell.detailTextLabel.font = UIFont.systemFontOfSize_weight_(font.pointSize, UIFontWeightRegular)
cell.backgroundColor = tableView.backgroundColor
cell.textLabel.textColor = utils.uicolor_custom('dark')
cell.detailTextLabel.textColor = utils.uicolor_custom('light')
if entry.is_frozen:
#cell.backgroundColor = utils.uicolor_custom('frozen address')
cell.textLabel.textColor = utils.uicolor_custom('frozentext')
cell.detailTextLabel.textColor = utils.uicolor_custom('frozentextlight')
if entry.is_change:
cell.detailTextLabel.text = cell.detailTextLabel.text + " (Change Address)"
#cell.backgroundColor = utils.uicolor_custom('change address')
return cell
@objc_method
def tableView_heightForRowAtIndexPath_(self, tv, indexPath) -> float:
if self.mode == ModeNormal:
return 91.0
return 44.0
# Below 2 methods conform to UITableViewDelegate protocol
@objc_method
def tableView_accessoryButtonTappedForRowWithIndexPath_(self, tv, indexPath):
#print("ACCESSORY TAPPED CALLED")
pass
@objc_method
def tableView_didSelectRowAtIndexPath_(self, tv, indexPath):
#print("DID SELECT ROW CALLED FOR SECTION %s, ROW %s"%(str(indexPath.section),str(indexPath.row)))
tv.deselectRowAtIndexPath_animated_(indexPath,True)
try:
addrData = _Get()
section = addrData.master[self.comboL.selection][self.comboR.selection]
if indexPath.row >= len(section):
print("User tapped invalid cell. Possibly the 'No Results' cell.")
return
entry = section[indexPath.row]
if self.mode == ModeNormal:
PushDetail(entry, self.navigationController)
else:
cb = utils.get_callback(self, 'on_picked')
if callable(cb): cb(entry)
except:
print ("Exception encountered:",str(sys.exc_info()[1]))
@objc_method
def refresh(self):
self.needsRefresh = True # mark that a refresh was called in case refresh is blocked
if self.blockRefresh:
return
if self.refreshControl: self.refreshControl.endRefreshing()
if self.tableView:
self.tableView.reloadData()
if self.mode == ModeNormal and self.navigationItem.rightBarButtonItem:
self.navigationItem.rightBarButtonItem.title = _GetBBTitle()
#print("did address refresh")
self.needsRefresh = False # indicate refreshing done
# This method runs in the main thread as it's enqueue using our hacky "Heartbeat" mechanism/workaround for iOS
@objc_method
def doRefreshIfNeeded(self) -> None:
if self.needsRefresh:
self.refresh()
#print ("ADDRESSES REFRESHED")
@objc_method
def toggleCashAddr(self) -> None:
gui.ElectrumGui.gui.toggle_cashaddr(not gui.ElectrumGui.gui.prefs_get_use_cashaddr())
@objc_method
def onTapAddress_(self, linkView : ObjCInstance) -> None:
tag = linkView.tag
typ = (tag >> 24)&0xff
stat = (tag >> 16)&0xff
row = tag & 0xffff
try:
entry = _Get().master[typ][stat][row]
except:
print("onTapAddress exception:",str(sys.exc_info()[1]))
return
_ShowAddressContextMenu(entry, self, ipadAnchor = linkView.convertRect_toView_(linkView.bounds, self.view))
# -----------------------------------
# COMBO DRAWER RELATED STUFF BELOW...
# -----------------------------------
@objc_method
def setupComboItems(self) -> None:
self.comboL.topTitle = _("Type")
self.comboL.items = [ _(x) for x in _TYPES ]
self.comboR.topTitle = _("Status")
self.comboR.items = [ _(x) for x in _STATUSES ]
parent = gui.ElectrumGui.gui
presetOK = False
if self.comboPreset:
try:
cpl = list(self.comboPreset)
self.comboL.selection = cpl[0]
self.comboR.selection = cpl[1]
presetOK = True # success!
except:
utils.NSLog("Exception trying to read comboPreset in setupComboItems: %s", str(sys.exc_info()[1]))
# if above fails... read from config...
if parent.config and not presetOK:
self.comboL.selection = parent.config.get("AddressTab_Type_Filter", 0)
self.comboR.selection = parent.config.get("AddressTab_Status_Filter", 0)
@objc_method
def setupComboCallbacks(self) -> None:
# TODO: set up comboL and comboR vc's, and other misc. setup
def closeLAnim() -> None:
self.doComboClose_(self.comboL)
def closeRAnim() -> None:
self.doComboClose_(self.comboR)
def bgTapChk(p : CGPoint) -> None:
this = self.presentedViewController
if isinstance(this, ComboDrawerPicker):
fwl = self.topComboProxyL.convertRect_toView_(self.topComboProxyL.bounds, self.view)
fwr = self.topComboProxyR.convertRect_toView_(self.topComboProxyR.bounds, self.view)
p = self.view.convertPoint_fromView_(p, self.presentedViewController.view)
that = None
if CGRectContainsPoint(fwl, p): that = self.comboL
elif CGRectContainsPoint(fwr, p): that = self.comboR
if that:
# this hack to prevent screen flicker due to delays in present and dismiss viewcontroller.. very hacky but works!!
window = gui.ElectrumGui.gui.window
hax = UIView.alloc().initWithFrame_(window.bounds).autorelease()
hax.backgroundColor = that.view.backgroundColor
hax.opaque = False
hax2 = UIView.alloc().initWithFrame_(this.bottomView.convertRect_toView_(this.bottomView.bounds,None)).autorelease()
hax2.backgroundColor = this.bottomView.backgroundColor
hax.addSubview_(hax2)
window.addSubview_(hax)
that.view.backgroundColor = UIColor.clearColor
this.view.backgroundColor = UIColor.clearColor
def showIt() -> None:
def killHax() -> None:
this.view.backgroundColor = hax.backgroundColor
that.view.backgroundColor = hax.backgroundColor
hax.removeFromSuperview()
that.openAnimated_(False)
self.presentViewController_animated_completion_(that, False, killHax)
self.dismissViewControllerAnimated_completion_(False, showIt)
this.closeAnimated_(False)
else:
self.doComboClose_(this)
def selectionChanged(sel : int) -> None:
which = self.presentedViewController
if isinstance(which, ComboDrawerPicker):
parent = gui.ElectrumGui.gui
if parent.config and not self.comboPreset:
whichKey = "AddressTab_Status_Filter" if which == self.comboR else "AddressTab_Type_Filter"
parent.config.set_key(whichKey, sel, True)
whichLbl = self.topLblL if which == self.comboL else self.topLblR
whichLbl.attributedText = which.attributedStringForTopTitle
self.doComboClose_(which)
# TODO: make the selection change take effect in how the table is filtered below..
self.tableView.reloadData()
self.comboL.backgroundTappedBlock = bgTapChk
self.comboL.controlTappedBlock = closeLAnim
self.comboL.controlTappedBlock = closeLAnim
self.comboL.selectedBlock = selectionChanged
self.comboR.backgroundTappedBlock = bgTapChk
self.comboR.controlTappedBlock = closeRAnim
self.comboR.selectedBlock = selectionChanged
@objc_method
def doComboOpen_(self, vc) -> None:
semiclear = vc.view.backgroundColor.copy()
vc.view.backgroundColor = UIColor.clearColor
def compl() -> None:
vc.view.backgroundColorAnimationToColor_duration_reverses_completion_(semiclear.autorelease(), 0.2, False, None)
vc.openAnimated_(True)
self.presentViewController_animated_completion_(vc, False, compl)
@objc_method
def doComboClose_(self, vc) -> None:
self.doComboClose_animated_(vc, True)
@objc_method
def doComboClose_animated_(self, vc, animated : bool) -> None:
# NB: weak ref self.modalDrawerVC will be auto-cleared by obj-c runtime after it is dismissed
if animated:
utils.call_later(0.050, self.dismissViewControllerAnimated_completion_,True, None)
else:
self.dismissViewControllerAnimated_completion_(False, None)
vc.closeAnimated_(animated)
@objc_method
def onTapComboProxyL(self) -> None:
self.doComboOpen_(self.comboL)
@objc_method
def onTapComboProxyR(self) -> None:
self.doComboOpen_(self.comboR)
class AddressData:
Entry = namedtuple("Entry", "address addr_str addr_idx label balance_str fiat_balance_str num_tx is_frozen balance is_change is_used base_unit is_watch_only num_utxos")
def __init__(self, gui_parent):
self.parent = gui_parent
self.clear()
def clear(self):
self.show_fx = False
self.master = [ [list() for s in range(0,len(_STATUSES))] for t in range(0, len(_TYPES)) ]
def refresh(self):
t0 = time.time()
self.clear()
wallet = self.parent.wallet
daemon = self.parent.daemon
if wallet is None: return
receiving_addresses = wallet.get_receiving_addresses()
change_addresses = wallet.get_change_addresses()
numAddresses = 0
base_unit = self.parent.base_unit()
is_watch_only = wallet.is_watching_only()
if daemon and daemon.fx and daemon.fx.is_enabled() and daemon.fx.get_fiat_address_config():
fx = daemon.fx
self.show_fx = True
else:
self.show_fx = False
fx = None
sequences = [0,1] if change_addresses else [0]
from .coins import get_coin_counts
for is_change in sequences:
addr_list = change_addresses if is_change else receiving_addresses
for n, address in enumerate(addr_list):
numAddresses += 1
num = wallet.get_num_tx(address)
is_used = wallet.is_used(address)
balance = sum(wallet.get_addr_balance(address))
address_text = address.to_ui_string()
label = wallet.labels.get(address.to_storage_string(), '')
balance_text = self.parent.format_amount(balance, whitespaces=False)
is_frozen = wallet.is_frozen(address)
fiat_balance = (fx.value_str(balance, fx.exchange_rate()) + " " + fx.get_currency()) if fx else ""
num_utxos = get_coin_counts([address])
#Entry = "address addr_str addr_idx, label, balance_str, fiat_balance_str, num_tx, is_frozen, balance, is_change, is_used, base_unit is_watch_only num_utxos"
item = AddressData.Entry(address, address_text, n, label, balance_text, fiat_balance, num,
bool(is_frozen), balance, bool(is_change), bool(is_used), base_unit, is_watch_only, num_utxos)
#_TYPES = ("Any","Receiving","Change")
#_STATUSES = ("All", "Funded", "Unused", "Used")
self.master[0][0].append(item) # item belongs in 'Any,All' regardless
self.master[2 if item.is_change else 1][0].append(item) # append to either change or receiving of 'All' list
if item.balance:
self.master[0][1].append(item) # item belongs in 'Any,Funded' regardless
self.master[2 if item.is_change else 1][1].append(item) # append to either change or receiving of 'Funded' list
if item.is_used:
self.master[0][3].append(item) # item belongs in the 'Any,Used' always, if used
self.master[2 if item.is_change else 1][3].append(item) # append to either change or receiving of 'All' list
else: # Unused list
self.master[0][2].append(item) # item belongs in the 'Any,Unused' always, if unused
self.master[2 if item.is_change else 1][2].append(item) # append to either change or receiving of 'All' list
# sort addresses by balance, num_tx, and index, descending
for i,l1 in enumerate(self.master):
for j,l2 in enumerate(l1):
l2.sort(key=lambda x: [x.balance,x.num_tx,0-x.addr_idx], reverse=True )
#print(_TYPES[i],_STATUSES[j],"len",len(l2))
utils.NSLog("fetched %d addresses from wallet in %f ms",numAddresses,(time.time()-t0)*1e3)
def present_modal_address_picker(callback, vc = None, comboPreset : list = None) -> None:
parent = gui.ElectrumGui.gui
avc = AddressesVC.alloc().initWithMode_(ModePicker).autorelease()
nav = utils.tintify(UINavigationController.alloc().initWithRootViewController_(avc).autorelease())
avc.comboPreset = list(comboPreset) if isinstance(comboPreset, (tuple, list)) and len(comboPreset) == 2 else None
def pickedAddress(entry) -> None:
if callable(callback):
callback(entry)
nav.presentingViewController.dismissViewControllerAnimated_completion_(True, None)
utils.add_callback(avc, 'on_picked', pickedAddress)
parent.add_navigation_bar_close_to_modal_vc(avc, leftSide = True)
if vc is None: vc = parent.get_presented_viewcontroller()
vc.presentViewController_animated_completion_(nav, True, None)
def EntryForAddress(address : str) -> object:
return gui.ElectrumGui.gui.get_address_entry(address)
def PushDetail(address_or_entry : object, navController : ObjCInstance) -> ObjCInstance:
entry = None
if isinstance(address_or_entry, (str,Address)): entry = EntryForAddress(address_or_entry)
elif isinstance(address_or_entry, AddressData.Entry):
entry = address_or_entry
if not entry:
raise ValueError('PushDetailForAddress -- missing entry for address!')
addrDetail = AddressDetail.alloc().initWithAddress_(entry.address.to_storage_string()).autorelease()
navController.pushViewController_animated_(addrDetail, True)
return addrDetail
from typing import Any
class AddressesMgr(utils.DataMgr):
def doReloadForKey(self, key : Any) -> Any:
if key is None:
a = AddressData(gui.ElectrumGui.gui)
a.refresh()
utils.NSLog("AddressMgr refresh (full)")
return a
elif key and isinstance(key, (str, Address)):
if isinstance(key, str):
key = Address.from_string(key)
a = self.get(None) # recursive call to self to get cached 'all' or rebuild 'all' if not cached
if a:
entries = a.master[0][0]
for entry in entries:
if entry.address == key:
return entry
return None
def _Get(domain = None) -> AddressData:
if isinstance(domain, ObjCInstance): domain = py_from_ns(domain)
return gui.ElectrumGui.gui.sigAddresses.get(domain)
def _GetBBTitle(x = None) -> Any:
if x is not None:
#return [ _("Show CashAddr"), _("Show Legacy") ]
return [ _("Toggle Format"), _("Toggle Format") ]
if gui.ElectrumGui.gui.prefs_get_use_cashaddr():
return _("Toggle Format") #_("Show Legacy")
return _("Toggle Format") #_("Show CashAddr")
def _ShowAddressContextMenu(entry, parentvc, ipadAnchor, toggleFreezeCallback = None):
parent = gui.ElectrumGui.gui
if not parent.wallet:
utils.NSLog("_ShowAddressContextMenu: wallet is None -- possibly backgrounded/closed wallet. Returning early.")
return
def on_block_explorer() -> None:
parent.view_on_block_explorer(entry.address, 'addr')
def on_request_payment() -> None:
parent.jump_to_receive_with_address(entry.address)
def on_private_key() -> None:
def onPw(password : str) -> None:
# present the private key view controller here.
pk = None
try:
pk = parent.wallet.export_private_key(entry.address, password) if parent.wallet else None
except:
parent.show_error(str(sys.exc_info()[1]))
return
if pk:
vc = private_key_dialog.PrivateKeyDialog.alloc().init().autorelease()
pkentry = private_key_dialog.PrivateKeyEntry(entry.address, pk, entry.is_frozen, entry.is_change)
utils.nspy_put_byname(vc, pkentry, 'entry')
parentvc.navigationController.pushViewController_animated_(vc, True)
parent.prompt_password_if_needed_asynch(onPw)
def on_sign_verify() -> None:
vc = sign_decrypt_dialog.Create_SignVerify_VC(entry.address)
parentvc.navigationController.pushViewController_animated_(vc, True)
def on_encrypt_decrypt() -> None:
if not parent.wallet: return
try:
pubkey = parent.wallet.get_public_key(entry.address)
except:
print("exception extracting public key:",str(sys.exc_info()[1]))
return
if pubkey is not None and not isinstance(pubkey, str):
pubkey = pubkey.to_ui_string()
if not pubkey:
return
vc = sign_decrypt_dialog.Create_EncryptDecrypt_VC(entry.address, pubkey)
parentvc.navigationController.pushViewController_animated_(vc, True)
def on_copy() -> None:
parent.copy_to_clipboard(entry.addr_str, 'Address')
actions = [
[ _('Cancel') ],
[ _('Copy Address'), on_copy ],
[ _("Request payment"), on_request_payment ],
]
watch_only = entry.is_watch_only
if entry.num_utxos and parentvc.navigationController:
from .coins import PushCoinsVC
actions.insert(2, [_('Show Coins (UTXOs)'), PushCoinsVC, [entry.address], parentvc.navigationController])
if isinstance(parentvc, AddressDetail):
actions.insert(2, [ _('Share/Save QR...'), lambda: parentvc.onQRImgTap() ])
if not watch_only:
try:
pubkey = parent.wallet.get_public_key(entry.address)
pubkey = pubkey.to_ui_string() if pubkey and not isinstance(pubkey, str) else pubkey
if pubkey:
actions.insert(2, [ _('Copy Public key'), lambda: parent.copy_to_clipboard(pubkey, _('Public key')) ] )
except:
pass
def onToggleFreeze() -> None:
_ToggleFreeze(entry)
if callable(toggleFreezeCallback):
toggleFreezeCallback()
actions.append([ _('Freeze') if not entry.is_frozen else _('Unfreeze'), onToggleFreeze ])
if not watch_only and not entry.is_frozen and entry.balance > 0:
actions.append([ _('Spend from this Address'), lambda: _SpendFrom(entry, vc = parentvc) ] )
actions.append([ _("View on block explorer"), on_block_explorer ])
if not watch_only:
actions.append([ _('Private key'), on_private_key ] )
if entry.address.kind == entry.address.ADDR_P2PKH:
if not watch_only:
actions.append([ _('Sign/verify Message'), on_sign_verify ] )
actions.append([ _('Encrypt/decrypt Message'), on_encrypt_decrypt ] )
else:
actions.append([ _('Verify Message'), on_sign_verify ] )
utils.show_alert(
vc = parentvc,
title = _("Options"),
message = entry.addr_str,#[0:12] + "..." + entry.addr_str[-12:],
actions = actions,
cancel = _('Cancel'),
style = UIAlertControllerStyleActionSheet,
ipadAnchor = ipadAnchor
)
def _ToggleFreeze(entry):
parent = gui.ElectrumGui.gui
if parent.wallet:
parent.wallet.set_frozen_state([entry.address], not entry.is_frozen)
parent.wallet.storage.write()
parent.refresh_components('addresses')
def _SpendFrom(entry, vc = None):
parent = gui.ElectrumGui.gui
if parent.wallet:
coins = parent.wallet.get_spendable_coins([entry.address], parent.config)
if coins:
parent.jump_to_send_with_spend_from(coins, vc = vc)
else:
# Figure out why no coins despite menu option -- and provide
# a reasonable error message.
coins = parent.wallet.get_addr_utxo(entry.address)
msg = _('Address has no spendable coins')
if coins:
if all(bool(x['slp_token']) for x in coins.values()):
msg = _('Address contains only spend-locked SLP tokens')
elif all(bool(x['is_frozen_coin']) for x in coins.values()):
msg = _('Address contains only frozen coins')
parent.show_error(msg, title = _('Cannot Spend'))
| |
"""
Gate Unit tests
"""
import pytest
from anchore_engine.db import Image
from anchore_engine.services.policy_engine.engine.policy.gates.image_metadata import (
ImageMetadataAttributeCheckTrigger,
ImageMetadataGate,
)
from anchore_engine.subsys import logger
from tests.integration.services.policy_engine.engine.policy.gates import GateUnitTest
logger.enable_test_logging()
test_image = Image()
test_image.distro_name = "debian"
test_image.distro_version = "9"
test_image.like_distro = "debian"
test_image.user_id = "0"
test_image.layers_json = ["sha256:a", "sha256:b", "sha256:c"]
test_image.layer_info_json = ["layer1", "layer2"]
test_image.dockerfile_contents = "FROM SCRATCH\nHEALTHcheck blah\n"
test_image.dockerfile_mode = "Guessed"
test_image.size = 100 * 1024 * 1024
test_image.docker_data_json = {
"Comment": "",
"Container": "4e69ef98747345110dc23069be98ff0ae562cc83a187ff1bdd1d2e0048889679",
"DockerVersion": "17.03.1-ce",
"Parent": "",
"Created": "2017-05-15T17:41:29.424239055Z",
"Config": {
"Tty": False,
"Cmd": ["node"],
"Volumes": None,
"Domainname": "",
"WorkingDir": "",
"Image": "sha256:6f109e97451ad43719c0a1802153811ad2f02d912a1d15a2ed7f0728be3026b6",
"Hostname": "200591939db7",
"StdinOnce": False,
"ArgsEscaped": True,
"Labels": {},
"AttachStdin": False,
"User": "",
"Env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"NPM_CONFIG_LOGLEVEL=info",
"NODE_VERSION=7.10.0",
"YARN_VERSION=0.24.4",
],
"Entrypoint": False,
"OnBuild": [],
"AttachStderr": False,
"AttachStdout": False,
"OpenStdin": False,
},
"RepoDigests": [
"node@sha256:ca55f4f5cb68a78c3ad9fe1ee13cba692906ec25dd73782800cbd4ae4b9fac45"
],
"Author": "",
"GraphDriver": {"Data": None, "Name": "aufs"},
"Id": "sha256:6c792d9195914c8038f4cabd9356a5af47ead140b87682c8651edd55b010686c",
"VirtualSize": 665664130,
"Architecture": "amd64",
"ContainerConfig": {
"Tty": False,
"Cmd": ["/bin/sh", "-c", "#(nop) ", 'CMD ["node"]'],
"Volumes": None,
"Domainname": "",
"WorkingDir": "",
"Image": "sha256:6f109e97451ad43719c0a1802153811ad2f02d912a1d15a2ed7f0728be3026b6",
"Hostname": "200591939db7",
"StdinOnce": False,
"ArgsEscaped": True,
"Labels": {},
"AttachStdin": False,
"User": "",
"Env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"NPM_CONFIG_LOGLEVEL=info",
"NODE_VERSION=7.10.0",
"YARN_VERSION=0.24.4",
],
"Entrypoint": None,
"OnBuild": [],
"AttachStderr": False,
"AttachStdout": False,
"OpenStdin": False,
},
"RepoTags": ["node:latest"],
"Os": "linux",
"RootFS": {
"Layers": [
"sha256:8d4d1ab5ff74fc361fb74212fff3b6dc1e6c16d1e1f0e8b44f9a9112b00b564f",
"sha256:c59fa6cbcbd97c74330d140b99d12098d2d80c77533d35444d311c9393d129ce",
"sha256:445ed6ee6867fb85175f4c784722576125ea6352219637b792e0bdf3d3357e9c",
"sha256:e7b0b4cd055a4570908cd908865755362570be3157e5bdb64e93b8a5ca3a1b61",
"sha256:2607b744b89df8bd9eb04c49319abc672b2cf1479ef63996d33c64033c988293",
"sha256:0f20784b55ffa7d6d82533f54a0b6476e305a3b578bf9ed681bcb2a4f4f3e9dc",
"sha256:a8d5a17bf5cccb8f640bf6ab662a7e0881e20f5335c93ec97a6dcd3a6942195b",
"sha256:3e88edcc5f79b45da734bd31106a71913759a6d8d965dee50fad73bfcf378f26",
],
"Type": "layers",
},
"Size": 665664130,
}
@pytest.mark.usefixtures("cls_test_data_env2")
class ImageMetadataGateTest(GateUnitTest):
gate_clazz = ImageMetadataGate
def test_imagemetadata_validator(self):
test = {
"size": ("100", True),
"architecture": ("test", True),
"os_type": ("rhel", True),
"distro": ("centos", True),
"distro_version": ("8", True),
"like_distro": ("rhel", True),
"layer_count": ("10", True),
"blah": ("", False),
"image_size": ("100000", False),
"layer_size": ("100", False),
}
for attr, result in list(test.items()):
val, expect_ok = result
try:
t, gate, test_context = self.get_initialized_trigger(
ImageMetadataAttributeCheckTrigger.__trigger_name__,
attribute=attr,
check="=",
value=val,
)
if not expect_ok and t:
self.assertEqual(expect_ok, bool(t))
except:
if not expect_ok:
logger.info(
(
"Caught expected exception on invalid attr name: {}".format(
attr
)
)
)
else:
raise
def test_imagemetadatatrigger_params(self):
test = {
"size": str(test_image.size),
"architecture": "amd64",
"os_type": "linux",
"distro": test_image.distro_name,
"distro_version": test_image.distro_version,
"like_distro": test_image.like_distro,
"layer_count": str(len(test_image.layers_json)),
}
for val, check in list(test.items()):
logger.info(("Testing attr {} against {}".format(val, check)))
t, gate, test_context = self.get_initialized_trigger(
ImageMetadataAttributeCheckTrigger.__trigger_name__,
attribute=val,
check="=",
value=check,
)
test_context = gate.prepare_context(test_image, test_context)
t.evaluate(test_image, test_context)
self.assertEqual(len(t.fired), 1)
logger.info(("Fired: {}".format([x.json() for x in t.fired])))
def test_imagemetadatatrigger_distro_name(self):
logger.info("Testing =")
t, gate, test_context = self.get_initialized_trigger(
ImageMetadataAttributeCheckTrigger.__trigger_name__,
attribute="distro",
check="=",
value="debian",
)
test_context = gate.prepare_context(test_image, test_context)
t.evaluate(test_image, test_context)
self.assertEqual(len(t.fired), 1)
logger.info(("Fired: {}".format([x.json() for x in t.fired])))
t, gate, test_context = self.get_initialized_trigger(
ImageMetadataAttributeCheckTrigger.__trigger_name__,
attribute="distro",
check="=",
value="mandriva",
)
test_context = gate.prepare_context(test_image, test_context)
t.evaluate(test_image, test_context)
self.assertEqual(len(t.fired), 0)
logger.info(("Fired: {}".format([x.json() for x in t.fired])))
logger.info("Testing !=")
t, gate, test_context = self.get_initialized_trigger(
ImageMetadataAttributeCheckTrigger.__trigger_name__,
attribute="distro",
check="!=",
value="mandriva",
)
test_context = gate.prepare_context(test_image, test_context)
t.evaluate(test_image, test_context)
self.assertEqual(len(t.fired), 1)
logger.info(("Fired: {}".format([x.json() for x in t.fired])))
t, gate, test_context = self.get_initialized_trigger(
ImageMetadataAttributeCheckTrigger.__trigger_name__,
attribute="distro",
check="!=",
value="debian",
)
test_context = gate.prepare_context(test_image, test_context)
t.evaluate(test_image, test_context)
self.assertEqual(len(t.fired), 0)
logger.info(("Fired: {}".format([x.json() for x in t.fired])))
logger.info("Testing like")
t, gate, test_context = self.get_initialized_trigger(
ImageMetadataAttributeCheckTrigger.__trigger_name__,
attribute="distro",
check="like",
value=".*ebia.*",
)
test_context = gate.prepare_context(test_image, test_context)
t.evaluate(test_image, test_context)
self.assertEqual(len(t.fired), 1)
logger.info(("Fired: {}".format([x.json() for x in t.fired])))
t, gate, test_context = self.get_initialized_trigger(
ImageMetadataAttributeCheckTrigger.__trigger_name__,
attribute="distro",
check="like",
value=".*entos.*",
)
test_context = gate.prepare_context(test_image, test_context)
t.evaluate(test_image, test_context)
self.assertEqual(len(t.fired), 0)
logger.info(("Fired: {}".format([x.json() for x in t.fired])))
logger.info("Testing not_like")
t, gate, test_context = self.get_initialized_trigger(
ImageMetadataAttributeCheckTrigger.__trigger_name__,
attribute="distro",
check="not_like",
value=".*entos.*",
)
test_context = gate.prepare_context(test_image, test_context)
t.evaluate(test_image, test_context)
self.assertEqual(len(t.fired), 1)
logger.info(("Fired: {}".format([x.json() for x in t.fired])))
logger.info('Testing "in"')
t, gate, test_context = self.get_initialized_trigger(
ImageMetadataAttributeCheckTrigger.__trigger_name__,
attribute="distro",
check="in",
value=" centos , debian ",
)
test_context = gate.prepare_context(test_image, test_context)
t.evaluate(test_image, test_context)
self.assertEqual(len(t.fired), 1)
logger.info(("Fired: {}".format([x.json() for x in t.fired])))
logger.info('Testing "in" --fail')
t, gate, test_context = self.get_initialized_trigger(
ImageMetadataAttributeCheckTrigger.__trigger_name__,
attribute="distro",
check="in",
value=" centos , rhel ",
)
test_context = gate.prepare_context(test_image, test_context)
t.evaluate(test_image, test_context)
self.assertEqual(len(t.fired), 0)
logger.info(("Fired: {}".format([x.json() for x in t.fired])))
logger.info('Testing "not_in"')
t, gate, test_context = self.get_initialized_trigger(
ImageMetadataAttributeCheckTrigger.__trigger_name__,
attribute="distro",
check="not_in",
value=" centos , mandriva ",
)
test_context = gate.prepare_context(test_image, test_context)
t.evaluate(test_image, test_context)
self.assertEqual(len(t.fired), 1)
logger.info(("Fired: {}".format([x.json() for x in t.fired])))
logger.info('Testing "not_in" --fail')
t, gate, test_context = self.get_initialized_trigger(
ImageMetadataAttributeCheckTrigger.__trigger_name__,
attribute="distro",
check="not_in",
value=" debian , rhel ",
)
test_context = gate.prepare_context(test_image, test_context)
t.evaluate(test_image, test_context)
self.assertEqual(len(t.fired), 0)
logger.info(("Fired: {}".format([x.json() for x in t.fired])))
| |
import re
import uuid
from collections import defaultdict, namedtuple
from collections.abc import Iterable
from functools import lru_cache
from urllib.parse import unquote
from sanic.exceptions import MethodNotSupported, NotFound
from sanic.views import CompositionView
Route = namedtuple(
"Route", ["handler", "methods", "pattern", "parameters", "name", "uri"]
)
Parameter = namedtuple("Parameter", ["name", "cast"])
REGEX_TYPES = {
"string": (str, r"[^/]+"),
"int": (int, r"-?\d+"),
"number": (float, r"-?[0-9\\.]+"),
"alpha": (str, r"[A-Za-z]+"),
"path": (str, r"[^/].*?"),
"uuid": (
uuid.UUID,
r"[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-"
r"[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}",
),
}
ROUTER_CACHE_SIZE = 1024
def url_hash(url):
return url.count("/")
class RouteExists(Exception):
pass
class RouteDoesNotExist(Exception):
pass
class ParameterNameConflicts(Exception):
pass
class Router:
"""Router supports basic routing with parameters and method checks
Usage:
.. code-block:: python
@sanic.route('/my/url/<my_param>', methods=['GET', 'POST', ...])
def my_route(request, my_param):
do stuff...
or
.. code-block:: python
@sanic.route('/my/url/<my_param:my_type>', methods['GET', 'POST', ...])
def my_route_with_type(request, my_param: my_type):
do stuff...
Parameters will be passed as keyword arguments to the request handling
function. Provided parameters can also have a type by appending :type to
the <parameter>. Given parameter must be able to be type-casted to this.
If no type is provided, a string is expected. A regular expression can
also be passed in as the type. The argument given to the function will
always be a string, independent of the type.
"""
routes_static = None
routes_dynamic = None
routes_always_check = None
parameter_pattern = re.compile(r"<(.+?)>")
def __init__(self):
self.routes_all = {}
self.routes_names = {}
self.routes_static_files = {}
self.routes_static = {}
self.routes_dynamic = defaultdict(list)
self.routes_always_check = []
self.hosts = set()
@classmethod
def parse_parameter_string(cls, parameter_string):
"""Parse a parameter string into its constituent name, type, and
pattern
For example::
parse_parameter_string('<param_one:[A-z]>')` ->
('param_one', str, '[A-z]')
:param parameter_string: String to parse
:return: tuple containing
(parameter_name, parameter_type, parameter_pattern)
"""
# We could receive NAME or NAME:PATTERN
name = parameter_string
pattern = "string"
if ":" in parameter_string:
name, pattern = parameter_string.split(":", 1)
if not name:
raise ValueError(
"Invalid parameter syntax: {}".format(parameter_string)
)
default = (str, pattern)
# Pull from pre-configured types
_type, pattern = REGEX_TYPES.get(pattern, default)
return name, _type, pattern
def add(
self,
uri,
methods,
handler,
host=None,
strict_slashes=False,
version=None,
name=None,
):
"""Add a handler to the route list
:param uri: path to match
:param methods: sequence of accepted method names. If none are
provided, any method is allowed
:param handler: request handler function.
When executed, it should provide a response object.
:param strict_slashes: strict to trailing slash
:param version: current version of the route or blueprint. See
docs for further details.
:return: Nothing
"""
if version is not None:
version = re.escape(str(version).strip("/").lstrip("v"))
uri = "/".join(["/v{}".format(version), uri.lstrip("/")])
# add regular version
self._add(uri, methods, handler, host, name)
if strict_slashes:
return
if not isinstance(host, str) and host is not None:
# we have gotten back to the top of the recursion tree where the
# host was originally a list. By now, we've processed the strict
# slashes logic on the leaf nodes (the individual host strings in
# the list of host)
return
# Add versions with and without trailing /
slashed_methods = self.routes_all.get(uri + "/", frozenset({}))
unslashed_methods = self.routes_all.get(uri[:-1], frozenset({}))
if isinstance(methods, Iterable):
_slash_is_missing = all(
method in slashed_methods for method in methods
)
_without_slash_is_missing = all(
method in unslashed_methods for method in methods
)
else:
_slash_is_missing = methods in slashed_methods
_without_slash_is_missing = methods in unslashed_methods
slash_is_missing = not uri[-1] == "/" and not _slash_is_missing
without_slash_is_missing = (
uri[-1] == "/" and not _without_slash_is_missing and not uri == "/"
)
# add version with trailing slash
if slash_is_missing:
self._add(uri + "/", methods, handler, host, name)
# add version without trailing slash
elif without_slash_is_missing:
self._add(uri[:-1], methods, handler, host, name)
def _add(self, uri, methods, handler, host=None, name=None):
"""Add a handler to the route list
:param uri: path to match
:param methods: sequence of accepted method names. If none are
provided, any method is allowed
:param handler: request handler function.
When executed, it should provide a response object.
:param name: user defined route name for url_for
:return: Nothing
"""
if host is not None:
if isinstance(host, str):
uri = host + uri
self.hosts.add(host)
else:
if not isinstance(host, Iterable):
raise ValueError(
"Expected either string or Iterable of "
"host strings, not {!r}".format(host)
)
for host_ in host:
self.add(uri, methods, handler, host_, name)
return
# Dict for faster lookups of if method allowed
if methods:
methods = frozenset(methods)
parameters = []
parameter_names = set()
properties = {"unhashable": None}
def add_parameter(match):
name = match.group(1)
name, _type, pattern = self.parse_parameter_string(name)
if name in parameter_names:
raise ParameterNameConflicts(
"Multiple parameter named <{name}> "
"in route uri {uri}".format(name=name, uri=uri)
)
parameter_names.add(name)
parameter = Parameter(name=name, cast=_type)
parameters.append(parameter)
# Mark the whole route as unhashable if it has the hash key in it
if re.search(r"(^|[^^]){1}/", pattern):
properties["unhashable"] = True
# Mark the route as unhashable if it matches the hash key
elif re.search(r"/", pattern):
properties["unhashable"] = True
return "({})".format(pattern)
pattern_string = re.sub(self.parameter_pattern, add_parameter, uri)
pattern = re.compile(r"^{}$".format(pattern_string))
def merge_route(route, methods, handler):
# merge to the existing route when possible.
if not route.methods or not methods:
# method-unspecified routes are not mergeable.
raise RouteExists("Route already registered: {}".format(uri))
elif route.methods.intersection(methods):
# already existing method is not overloadable.
duplicated = methods.intersection(route.methods)
raise RouteExists(
"Route already registered: {} [{}]".format(
uri, ",".join(list(duplicated))
)
)
if isinstance(route.handler, CompositionView):
view = route.handler
else:
view = CompositionView()
view.add(route.methods, route.handler)
view.add(methods, handler)
route = route._replace(
handler=view, methods=methods.union(route.methods)
)
return route
if parameters:
# TODO: This is too complex, we need to reduce the complexity
if properties["unhashable"]:
routes_to_check = self.routes_always_check
ndx, route = self.check_dynamic_route_exists(
pattern, routes_to_check, parameters
)
else:
routes_to_check = self.routes_dynamic[url_hash(uri)]
ndx, route = self.check_dynamic_route_exists(
pattern, routes_to_check, parameters
)
if ndx != -1:
# Pop the ndx of the route, no dups of the same route
routes_to_check.pop(ndx)
else:
route = self.routes_all.get(uri)
# prefix the handler name with the blueprint name
# if available
# special prefix for static files
is_static = False
if name and name.startswith("_static_"):
is_static = True
name = name.split("_static_", 1)[-1]
if hasattr(handler, "__blueprintname__"):
handler_name = "{}.{}".format(
handler.__blueprintname__, name or handler.__name__
)
else:
handler_name = name or getattr(handler, "__name__", None)
if route:
route = merge_route(route, methods, handler)
else:
route = Route(
handler=handler,
methods=methods,
pattern=pattern,
parameters=parameters,
name=handler_name,
uri=uri,
)
self.routes_all[uri] = route
if is_static:
pair = self.routes_static_files.get(handler_name)
if not (pair and (pair[0] + "/" == uri or uri + "/" == pair[0])):
self.routes_static_files[handler_name] = (uri, route)
else:
pair = self.routes_names.get(handler_name)
if not (pair and (pair[0] + "/" == uri or uri + "/" == pair[0])):
self.routes_names[handler_name] = (uri, route)
if properties["unhashable"]:
self.routes_always_check.append(route)
elif parameters:
self.routes_dynamic[url_hash(uri)].append(route)
else:
self.routes_static[uri] = route
@staticmethod
def check_dynamic_route_exists(pattern, routes_to_check, parameters):
"""
Check if a URL pattern exists in a list of routes provided based on
the comparison of URL pattern and the parameters.
:param pattern: URL parameter pattern
:param routes_to_check: list of dynamic routes either hashable or
unhashable routes.
:param parameters: List of :class:`Parameter` items
:return: Tuple of index and route if matching route exists else
-1 for index and None for route
"""
for ndx, route in enumerate(routes_to_check):
if route.pattern == pattern and route.parameters == parameters:
return ndx, route
else:
return -1, None
def remove(self, uri, clean_cache=True, host=None):
if host is not None:
uri = host + uri
try:
route = self.routes_all.pop(uri)
for handler_name, pairs in self.routes_names.items():
if pairs[0] == uri:
self.routes_names.pop(handler_name)
break
for handler_name, pairs in self.routes_static_files.items():
if pairs[0] == uri:
self.routes_static_files.pop(handler_name)
break
except KeyError:
raise RouteDoesNotExist("Route was not registered: {}".format(uri))
if route in self.routes_always_check:
self.routes_always_check.remove(route)
elif (
url_hash(uri) in self.routes_dynamic
and route in self.routes_dynamic[url_hash(uri)]
):
self.routes_dynamic[url_hash(uri)].remove(route)
else:
self.routes_static.pop(uri)
if clean_cache:
self._get.cache_clear()
@lru_cache(maxsize=ROUTER_CACHE_SIZE)
def find_route_by_view_name(self, view_name, name=None):
"""Find a route in the router based on the specified view name.
:param view_name: string of view name to search by
:param kwargs: additional params, usually for static files
:return: tuple containing (uri, Route)
"""
if not view_name:
return (None, None)
if view_name == "static" or view_name.endswith(".static"):
return self.routes_static_files.get(name, (None, None))
return self.routes_names.get(view_name, (None, None))
def get(self, request):
"""Get a request handler based on the URL of the request, or raises an
error
:param request: Request object
:return: handler, arguments, keyword arguments
"""
# No virtual hosts specified; default behavior
if not self.hosts:
return self._get(request.path, request.method, "")
# virtual hosts specified; try to match route to the host header
try:
return self._get(
request.path, request.method, request.headers.get("Host", "")
)
# try default hosts
except NotFound:
return self._get(request.path, request.method, "")
def get_supported_methods(self, url):
"""Get a list of supported methods for a url and optional host.
:param url: URL string (including host)
:return: frozenset of supported methods
"""
route = self.routes_all.get(url)
# if methods are None then this logic will prevent an error
return getattr(route, "methods", None) or frozenset()
@lru_cache(maxsize=ROUTER_CACHE_SIZE)
def _get(self, url, method, host):
"""Get a request handler based on the URL of the request, or raises an
error. Internal method for caching.
:param url: request URL
:param method: request method
:return: handler, arguments, keyword arguments
"""
url = unquote(host + url)
# Check against known static routes
route = self.routes_static.get(url)
method_not_supported = MethodNotSupported(
"Method {} not allowed for URL {}".format(method, url),
method=method,
allowed_methods=self.get_supported_methods(url),
)
if route:
if route.methods and method not in route.methods:
raise method_not_supported
match = route.pattern.match(url)
else:
route_found = False
# Move on to testing all regex routes
for route in self.routes_dynamic[url_hash(url)]:
match = route.pattern.match(url)
route_found |= match is not None
# Do early method checking
if match and method in route.methods:
break
else:
# Lastly, check against all regex routes that cannot be hashed
for route in self.routes_always_check:
match = route.pattern.match(url)
route_found |= match is not None
# Do early method checking
if match and method in route.methods:
break
else:
# Route was found but the methods didn't match
if route_found:
raise method_not_supported
raise NotFound("Requested URL {} not found".format(url))
kwargs = {
p.name: p.cast(value)
for value, p in zip(match.groups(1), route.parameters)
}
route_handler = route.handler
if hasattr(route_handler, "handlers"):
route_handler = route_handler.handlers[method]
return route_handler, [], kwargs, route.uri
def is_stream_handler(self, request):
""" Handler for request is stream or not.
:param request: Request object
:return: bool
"""
try:
handler = self.get(request)[0]
except (NotFound, MethodNotSupported):
return False
if hasattr(handler, "view_class") and hasattr(
handler.view_class, request.method.lower()
):
handler = getattr(handler.view_class, request.method.lower())
return hasattr(handler, "is_stream")
| |
"""
Sheet classes.
A Sheet is a two-dimensional arrangement of processing units,
typically modeling a neural region or a subset of cells in a neural
region. Any new Sheet classes added to this directory will
automatically become available for any model.
"""
# Imported here so that all Sheets will be in the same package
from topo.base.sheet import Sheet
from topo.base.projection import ProjectionSheet # pyflakes:ignore (API import)
from topo.base.cf import CFSheet
from topo.base.generatorsheet import GeneratorSheet
from topo.base.generatorsheet import ChannelGeneratorSheet # pyflakes:ignore (API import)
# Imported here for ease of access by users
from topo.base.boundingregion import BoundingBox # pyflakes:ignore (API import)
from topo.base.sheet import activity_type # pyflakes:ignore (API import)
import numpy
import topo
import param
from topo.base.cf import CFIter
from topo.base.projection import Projection
from topo.base.simulation import FunctionEvent, PeriodicEventSequence, EPConnectionEvent
class ActivityCopy(Sheet):
"""
Copies incoming Activity patterns to its activity matrix and output port.
Trivial Sheet class that is useful primarily as a placeholder for
data that is computed elsewhere but that you want to appear as a
Sheet, e.g. when wrapping an external simulation.
"""
dest_ports=['Activity']
src_ports=['Activity']
def input_event(self,conn,data):
self.input_data=data
def process_current_time(self):
if hasattr(self, 'input_data'):
self.activity*=0
self.activity+=self.input_data
self.send_output(src_port='Activity',data=self.activity)
del self.input_data
class SequenceGeneratorSheet(GeneratorSheet):
"""
Sheet that generates a timed sequence of patterns.
This sheet will repeatedly generate the input_sequence, with the
given onsets. The sequence is repeated every self.period time
units. If the total length of the sequence is longer than
self.period, a warning is issued and the sequence repeats
immediately after completion.
"""
input_sequence = param.List(default=[],
doc="""The sequence of patterns to generate. Must be a list of
(onset,generator) tuples. An empty list defaults to the
single tuple: (0,self.input_generator), resulting in
identical behavior to an ordinary GeneratorSheet.""")
def __init__(self,**params):
super(SequenceGeneratorSheet,self).__init__(**params)
if not self.input_sequence:
self.input_sequence = [(0,self.input_generator)]
def start(self):
assert self.simulation
event_seq = []
for delay,gen in self.input_sequence:
event_seq.append(FunctionEvent(self.simulation.convert_to_time_type(delay),self.set_input_generator,gen))
event_seq.append(FunctionEvent(0,self.generate))
now = self.simulation.time()
self.event = PeriodicEventSequence(now+self.simulation.convert_to_time_type(self.phase),self.simulation.convert_to_time_type(self.period),event_seq)
self.simulation.enqueue_event(self.event)
def compute_joint_norm_totals(projlist,active_units_mask=True):
"""
Compute norm_total for each CF in each projection from a group to
be normalized jointly.
"""
# Assumes that all Projections in the list have the same r,c size
assert len(projlist)>=1
iterator = CFIter(projlist[0],active_units_mask=active_units_mask)
for junk,i in iterator():
sums = [p.flatcfs[i].norm_total for p in projlist]
joint_sum = numpy.add.reduce(sums)
for p in projlist:
p.flatcfs[i].norm_total=joint_sum
class JointNormalizingCFSheet(CFSheet):
"""
A type of CFSheet extended to support joint sum-based normalization.
For L1 normalization, joint normalization means normalizing the
sum of (the absolute values of) all weights in a set of
corresponding CFs in different Projections, rather than only
considering weights in the same CF.
This class provides a mechanism for grouping Projections (see
_port_match and _grouped_in_projections) and a learn() function
that computes the joint sums. Joint normalization also requires
having ConnectionField store and return a norm_total for each
neuron, and having an TransferFn that will respect this norm_total
rather than the strict total of the ConnectionField's weights. At
present, CFPOF_DivisiveNormalizeL1 and
CFPOF_DivisiveNormalizeL1_opt do use norm_total; others can be
extended to do something similar if necessary.
To enable joint normalization, you can declare that all the
incoming connections that should be normalized together each
have a dest_port of:
dest_port=('Activity','JointNormalize', 'AfferentGroup1'),
Then all those that have this dest_port will be normalized
together, as long as an appropriate TransferFn is being used.
"""
joint_norm_fn = param.Callable(default=compute_joint_norm_totals,doc="""
Function to use to compute the norm_total for each CF in each
projection from a group to be normalized jointly.""")
# JABALERT: Should check that whenever a connection is added to a
# group, it has the same no of cfs as the existing connections.
def start(self):
self._normalize_weights(active_units_mask=False)
# CEBALERT: rename active_units_mask and default to False
def _normalize_weights(self,active_units_mask=True):
"""
Apply the weights_output_fns for every group of Projections.
If active_units_mask is True, only active units will have
their weights normalized.
"""
for key,projlist in self._grouped_in_projections('JointNormalize').items():
if key == None:
normtype='Individually'
else:
normtype='Jointly'
self.joint_norm_fn(projlist,active_units_mask)
self.debug(normtype + " normalizing:")
for p in projlist:
p.apply_learn_output_fns(active_units_mask=active_units_mask)
self.debug(' ',p.name)
def learn(self):
"""
Call the learn() method on every Projection to the Sheet, and
call the output functions (jointly if necessary).
"""
# Ask all projections to learn independently
for proj in self.in_connections:
if not isinstance(proj,Projection):
self.debug("Skipping non-Projection "+proj.name)
else:
proj.learn()
# Apply output function in groups determined by dest_port
self._normalize_weights()
class JointNormalizingCFSheet_Continuous(JointNormalizingCFSheet):
"""
CFSheet that runs continuously, with no 'resting' periods between pattern presentations.
Note that learning occurs only when the time is a whole number.
"""
def process_current_time(self):
if self.new_input:
self.new_input = False
if(float(topo.sim.time()) % 1.0 == 0.0):
#self.activate()
if (self.plastic):
self.learn()
#else:
self.activate()
class SettlingCFSheet(JointNormalizingCFSheet):
"""
A JointNormalizingCFSheet implementing the idea of settling.
Breaks continuous time up into discrete iterations, each
consisting of a series of activations, up to a fixed number of
settling steps. Settling is controlled by the tsettle parameter;
once that number of settling steps has been reached, an external
input is required before the sheet will activate again.
See the LISSOM algorithm (Sirosh and Miikkulainen, Biological
Cybernetics 71:66-78, 1994) for one example of its usage.
"""
strict_tsettle = param.Parameter(default = None,doc="""
If non-None, delay sending output until activation_count reaches this value.""")
mask_init_time=param.Integer(default=5,bounds=(0,None),doc="""
Determines when a new mask is initialized in each new iteration.
The mask is reset whenever new input comes in. Once the
activation_count (see tsettle) reaches mask_init_time, the mask
is initialized to reflect the current activity profile.""")
tsettle=param.Integer(default=8,bounds=(0,None),doc="""
Number of times to activate the SettlingCFSheet sheet for each external input event.
A counter is incremented each time an input is received from any
source, and once the counter reaches tsettle, the last activation
step is skipped so that there will not be any further recurrent
activation. The next external (i.e., afferent or feedback)
event will then start the counter over again.""")
continuous_learning = param.Boolean(default=False, doc="""
Whether to modify the weights after every settling step.
If false, waits until settling is completed before doing learning.""")
precedence = param.Number(0.6)
post_initialization_weights_output_fns = param.HookList([],doc="""
If not empty, weights output_fns that will replace the
existing ones after an initial normalization step.""")
beginning_of_iteration = param.HookList(default=[],instantiate=False,doc="""
List of callables to be executed at the beginning of each iteration.""")
end_of_iteration = param.HookList(default=[],instantiate=False,doc="""
List of callables to be executed at the end of each iteration.""")
def __init__(self,**params):
super(SettlingCFSheet,self).__init__(**params)
self.__counter_stack=[]
self.activation_count = 0
self.new_iteration = True
def start(self):
self._normalize_weights(active_units_mask=False)
if len(self.post_initialization_weights_output_fns)>0:
for proj in self.in_connections:
if not isinstance(proj,Projection):
self.debug("Skipping non-Projection ")
else:
proj.weights_output_fns=self.post_initialization_weights_output_fns
def input_event(self,conn,data):
# On a new afferent input, clear the activity
if self.new_iteration:
for f in self.beginning_of_iteration: f()
self.new_iteration = False
self.activity *= 0.0
for proj in self.in_connections:
proj.activity *= 0.0
self.mask.reset()
super(SettlingCFSheet,self).input_event(conn,data)
### JABALERT! There should be some sort of warning when
### tsettle times the input delay is larger than the input period.
### Right now it seems to do strange things in that case (does it
### settle at all after the first iteration?), but of course that
### is arguably an error condition anyway (and should thus be
### flagged).
# CEBALERT: there is at least one bug in here for tsettle==0: see
# CB/JAB email "LISSOM tsettle question", 2010/03/22.
def process_current_time(self):
"""
Pass the accumulated stimulation through self.output_fns and
send it out on the default output port.
"""
if self.new_input:
self.new_input = False
if self.activation_count == self.mask_init_time:
self.mask.calculate()
if self.tsettle == 0:
# Special case: behave just like a CFSheet
self.activate()
self.learn()
elif self.activation_count == self.tsettle:
# Once we have been activated the required number of times
# (determined by tsettle), reset various counters, learn
# if appropriate, and avoid further activation until an
# external event arrives.
for f in self.end_of_iteration: f()
self.activation_count = 0
self.new_iteration = True # used by input_event when it is called
if (self.plastic and not self.continuous_learning):
self.learn()
else:
self.activate()
self.activation_count += 1
if (self.plastic and self.continuous_learning):
self.learn()
# print the weights of a unit
def printwts(self,x,y):
for proj in self.in_connections:
print proj.name, x, y
print proj.cfs[x,y].weights
def state_push(self,**args):
super(SettlingCFSheet,self).state_push(**args)
self.__counter_stack.append((self.activation_count,self.new_iteration))
def state_pop(self,**args):
super(SettlingCFSheet,self).state_pop(**args)
self.activation_count,self.new_iteration=self.__counter_stack.pop()
def send_output(self,src_port=None,data=None):
"""Send some data out to all connections on the given src_port."""
out_conns_on_src_port = [conn for conn in self.out_connections
if self._port_match(conn.src_port,[src_port])]
for conn in out_conns_on_src_port:
if self.strict_tsettle != None:
if self.activation_count < self.strict_tsettle:
if len(conn.dest_port)>2 and conn.dest_port[2] == 'Afferent':
continue
self.verbose("Sending output on src_port %s via connection %s to %s" %
(str(src_port), conn.name, conn.dest.name))
e=EPConnectionEvent(self.simulation.convert_to_time_type(conn.delay)+self.simulation.time(),conn,data)
self.simulation.enqueue_event(e)
_public = list(set([_k for _k,_v in locals().items() if isinstance(_v,type) and issubclass(_v,Sheet)]))
_public += [
"compute_joint_norm_totals",
"BoundingBox",
"activity_type",
]
# Automatically discover all .py files in this directory.
import os,fnmatch
__all__ = _public + [f.split('.py')[0] for f in os.listdir(__path__[0]) if fnmatch.fnmatch(f,'[!._]*.py')]
del f,os,fnmatch
# By default, avoid loading modules that rely on external libraries
# that might not be present on this system.
__all__.remove('ptztracker')
| |
import copy
from itertools import chain
import exchange
import lan
from processing import collect_array as ca
from processing import collect_device as cd
from processing import collect_id as ci
from processing import collect_loop as cl
class PlaceInReg(object):
def __init__(self, ast):
self.ast = ast
self.PlaceInRegFinding = tuple()
self.PlaceInRegCond = None
self.perform_transformation = False
def place_in_reg(self):
""" Find all array references that can be cached in registers.
Then rewrite the code in this fashion.
"""
optimizable_arrays = dict()
hoist_loop_set = set()
ref_to_loop = ca.get_ref_to_loop(self.ast)
write_only = ca.get_write_only(self.ast)
subscript_no_id = ca.get_subscript_no_id(self.ast)
grid_indices = cl.get_grid_indices(self.ast)
for n in ref_to_loop:
if n in write_only:
continue
ref1 = ref_to_loop[n]
sub1 = subscript_no_id[n]
for (ref, sub, i) in zip(ref1, sub1, range(len(ref1))):
if self._can_perform_optimization(ref, sub):
hoist_loop_set |= set(sub) - set(grid_indices)
try:
optimizable_arrays[n].append(i)
except KeyError:
optimizable_arrays[n] = [i]
hoist_loop_set = self._remove_unknown_loops(hoist_loop_set)
if len(hoist_loop_set) > 1:
print """ PlaceInReg: array references was inside two loops. No optimization. """
return
hoist_loop_list = list(hoist_loop_set)
if optimizable_arrays:
self._set_optimization_arg(optimizable_arrays, hoist_loop_list)
self._set_optimization_condition(optimizable_arrays, hoist_loop_list)
def _set_optimization_condition(self, optimizable_arrays, hoistloop):
num_ref_hoisted = len(list(chain.from_iterable(optimizable_arrays.values())))
(lower_limit, upper_limit) = cl.get_loop_limits(self.ast)
if hoistloop:
m = hoistloop[0]
lhs = lan.BinOp(lan.Id(upper_limit[m]), '-', lan.Id(lower_limit[m]))
else:
lhs = lan.Constant(1)
self.PlaceInRegCond = lan.BinOp(lan.BinOp(lhs, '*', lan.Constant(num_ref_hoisted)), '<', lan.Constant(40))
def _set_optimization_arg(self, optimizable_arrays, hoistloop):
self.PlaceInRegFinding = (optimizable_arrays, hoistloop)
def _remove_unknown_loops(self, insideloop):
loops = cl.get_inner_loops(self.ast)
return {k for k in insideloop if k in loops}
def _can_perform_optimization(self, loop_idx, sub_idx):
"""
# for each array, for each array ref, collect which loop, loop_idx, it is in
# and what indices, sub_idx, are in its subscript.
# if there is a grid_idx in sub_idx and there exists a loop_idx not in sub_idx
:param loop_idx:
:param sub_idx:
:return:
"""
grid_indices = cl.get_grid_indices(self.ast)
return set(sub_idx).intersection(set(grid_indices)) and \
set(loop_idx).difference(set(sub_idx))
def place_in_reg2(self, arr_dict):
self._insert_cache_in_reg(arr_dict)
self._replace_global_ref_with_reg_id(arr_dict)
def _insert_cache_in_reg(self, arr_dict):
initstats = []
# Create the loadings
types = ci.get_types(self.ast)
kernel = cd.get_kernel(self.ast)
kernel_stats = kernel.statements
for i, n in enumerate(arr_dict):
for m in arr_dict[n]:
regid = self._create_reg_var_id(m, n)
reg_type = types[n][0]
reg = lan.TypeId([reg_type], regid)
assign = self._create_reg_assignment(m, n, reg)
initstats.append(assign)
kernel_stats.insert(0, lan.GroupCompound(initstats))
def _replace_global_ref_with_reg_id(self, arr_dict):
# Replace the global Arefs with the register vars
loop_arrays = ca.get_loop_arrays(self.ast)
loop_arrays_parent = ca.get_loop_arrays_parent(self.ast)
for i, n in enumerate(arr_dict):
for m in arr_dict[n]:
idx = m
reg_id = self._create_reg_var_id(m, n)
parent = loop_arrays_parent[n][idx]
aref_old = loop_arrays[n][idx]
exchange_array_id_with_id = exchange.ExchangeArrayIdWithId(aref_old, reg_id)
exchange_array_id_with_id.visit(parent)
@staticmethod
def _create_reg_var_id(m, n):
return lan.Id(n + str(m) + '_reg')
def _create_reg_assignment(self, m, n, reg):
idx = m
loop_arrays = ca.get_loop_arrays(self.ast)
glob_array_ref = copy.deepcopy(loop_arrays[n][idx])
reg_dict = {'isReg': []}
glob_array_ref.extra = reg_dict
assign = lan.Assignment(reg, glob_array_ref)
return assign
def place_in_reg3(self):
""" Check if the arrayref is inside a loop and use a static
array for the allocation of the registers
"""
kernel = cd.get_kernel(self.ast)
kernel_stats = kernel.statements
self.place_in_reg()
if self.PlaceInRegFinding is ():
return
(optimizable_arrays, hoist_loop_list) = self.PlaceInRegFinding
self.perform_transformation = True
if not optimizable_arrays:
return
if not hoist_loop_list:
self.place_in_reg2(optimizable_arrays)
return
hoist_loop = hoist_loop_list[0]
if hoist_loop == '':
print "placeInReg3 only works when the ArrayRef is inside a loop"
print optimizable_arrays
return
initstats = self._create_reg_array_alloc(optimizable_arrays, hoist_loop)
# add the load loop to the initiation stage
loopstats = self._create_load_loop(hoist_loop, initstats)
# Create the loadings
for i, n in enumerate(optimizable_arrays):
for m in optimizable_arrays[n]:
regid = self._create_reg_array_var(n, hoist_loop)
assign = self._create_reg_assignment(m, n, regid)
loopstats.append(assign)
kernel_stats.insert(0, lan.GroupCompound(initstats))
# Replace the global Arefs with the register Arefs
loop_arrays = ca.get_loop_arrays(self.ast)
for i, n in enumerate(optimizable_arrays):
for m in optimizable_arrays[n]:
idx = m
regid = self._create_reg_array_var(n, hoist_loop)
aref_new = copy.deepcopy(regid)
aref_old = loop_arrays[n][idx]
# Copying the internal data of the two arefs
aref_old.name.name = aref_new.name.name
aref_old.subscript = aref_new.subscript
def _create_load_loop(self, hoist_loop, initstats):
loops = cl.get_inner_loops(self.ast)
loop = copy.deepcopy(loops[hoist_loop])
loopstats = []
loop.compound.statements = loopstats
initstats.append(loop)
return loopstats
@staticmethod
def _create_reg_array_var(n, hoist_loop):
regid = lan.ArrayRef(lan.Id(n + '_reg'), [lan.Id(hoist_loop)])
return regid
def _create_reg_array_alloc(self, optimizable_arrays, hoist_loop):
initstats = []
types = ci.get_types(self.ast)
(_, upper_limit) = cl.get_loop_limits(self.ast)
# Add allocation of registers to the initiation stage
for n in optimizable_arrays:
array_init = lan.ArrayTypeId([types[n][0]], lan.Id(n + '_reg'), [lan.Id(upper_limit[hoist_loop])])
initstats.append(array_init)
return initstats
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for functional style sequence-to-sequence models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import math
import random
import numpy as np
from tensorflow.contrib.legacy_seq2seq.python.ops import seq2seq as seq2seq_lib
from tensorflow.contrib.rnn.python.ops import core_rnn_cell
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adam
class Seq2SeqTest(test.TestCase):
def testRNNDecoder(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
_, enc_state = rnn.static_rnn(
rnn_cell.GRUCell(2), inp, dtype=dtypes.float32)
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
cell = core_rnn_cell.OutputProjectionWrapper(rnn_cell.GRUCell(2), 4)
dec, mem = seq2seq_lib.rnn_decoder(dec_inp, enc_state, cell)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testBasicRNNSeq2Seq(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
cell = core_rnn_cell.OutputProjectionWrapper(rnn_cell.GRUCell(2), 4)
dec, mem = seq2seq_lib.basic_rnn_seq2seq(inp, dec_inp, cell)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testTiedRNNSeq2Seq(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
cell = core_rnn_cell.OutputProjectionWrapper(rnn_cell.GRUCell(2), 4)
dec, mem = seq2seq_lib.tied_rnn_seq2seq(inp, dec_inp, cell)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual(1, len(res))
self.assertEqual((2, 2), res[0].shape)
def testEmbeddingRNNDecoder(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
cell_fn = lambda: rnn_cell.BasicLSTMCell(2)
cell = cell_fn()
_, enc_state = rnn.static_rnn(cell, inp, dtype=dtypes.float32)
dec_inp = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(3)
]
# Use a new cell instance since the attention decoder uses a
# different variable scope.
dec, mem = seq2seq_lib.embedding_rnn_decoder(
dec_inp, enc_state, cell_fn(), num_symbols=4, embedding_size=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 2), res[0].shape)
res = sess.run([mem])
self.assertEqual(1, len(res))
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
def testEmbeddingRNNSeq2Seq(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
enc_inp = [
constant_op.constant(
1, dtypes.int32, shape=[2]) for i in range(2)
]
dec_inp = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(3)
]
cell_fn = lambda: rnn_cell.BasicLSTMCell(2)
cell = cell_fn()
dec, mem = seq2seq_lib.embedding_rnn_seq2seq(
enc_inp,
dec_inp,
cell,
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
# Test with state_is_tuple=False.
with variable_scope.variable_scope("no_tuple"):
cell_nt = rnn_cell.BasicLSTMCell(2, state_is_tuple=False)
dec, mem = seq2seq_lib.embedding_rnn_seq2seq(
enc_inp,
dec_inp,
cell_nt,
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 4), res[0].shape)
# Test externally provided output projection.
w = variable_scope.get_variable("proj_w", [2, 5])
b = variable_scope.get_variable("proj_b", [5])
with variable_scope.variable_scope("proj_seq2seq"):
dec, _ = seq2seq_lib.embedding_rnn_seq2seq(
enc_inp,
dec_inp,
cell_fn(),
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2,
output_projection=(w, b))
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 2), res[0].shape)
# Test that previous-feeding model ignores inputs after the first.
dec_inp2 = [
constant_op.constant(
0, dtypes.int32, shape=[2]) for _ in range(3)
]
with variable_scope.variable_scope("other"):
d3, _ = seq2seq_lib.embedding_rnn_seq2seq(
enc_inp,
dec_inp2,
cell_fn(),
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2,
feed_previous=constant_op.constant(True))
with variable_scope.variable_scope("other_2"):
d1, _ = seq2seq_lib.embedding_rnn_seq2seq(
enc_inp,
dec_inp,
cell_fn(),
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2,
feed_previous=True)
with variable_scope.variable_scope("other_3"):
d2, _ = seq2seq_lib.embedding_rnn_seq2seq(
enc_inp,
dec_inp2,
cell_fn(),
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2,
feed_previous=True)
sess.run([variables.global_variables_initializer()])
res1 = sess.run(d1)
res2 = sess.run(d2)
res3 = sess.run(d3)
self.assertAllClose(res1, res2)
self.assertAllClose(res1, res3)
def testEmbeddingTiedRNNSeq2Seq(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
enc_inp = [
constant_op.constant(
1, dtypes.int32, shape=[2]) for i in range(2)
]
dec_inp = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(3)
]
cell = functools.partial(rnn_cell.BasicLSTMCell, 2, state_is_tuple=True)
dec, mem = seq2seq_lib.embedding_tied_rnn_seq2seq(
enc_inp, dec_inp, cell(), num_symbols=5, embedding_size=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
# Test when num_decoder_symbols is provided, the size of decoder output
# is num_decoder_symbols.
with variable_scope.variable_scope("decoder_symbols_seq2seq"):
dec, mem = seq2seq_lib.embedding_tied_rnn_seq2seq(
enc_inp,
dec_inp,
cell(),
num_symbols=5,
num_decoder_symbols=3,
embedding_size=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 3), res[0].shape)
# Test externally provided output projection.
w = variable_scope.get_variable("proj_w", [2, 5])
b = variable_scope.get_variable("proj_b", [5])
with variable_scope.variable_scope("proj_seq2seq"):
dec, _ = seq2seq_lib.embedding_tied_rnn_seq2seq(
enc_inp,
dec_inp,
cell(),
num_symbols=5,
embedding_size=2,
output_projection=(w, b))
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 2), res[0].shape)
# Test that previous-feeding model ignores inputs after the first.
dec_inp2 = [constant_op.constant(0, dtypes.int32, shape=[2])] * 3
with variable_scope.variable_scope("other"):
d3, _ = seq2seq_lib.embedding_tied_rnn_seq2seq(
enc_inp,
dec_inp2,
cell(),
num_symbols=5,
embedding_size=2,
feed_previous=constant_op.constant(True))
with variable_scope.variable_scope("other_2"):
d1, _ = seq2seq_lib.embedding_tied_rnn_seq2seq(
enc_inp,
dec_inp,
cell(),
num_symbols=5,
embedding_size=2,
feed_previous=True)
with variable_scope.variable_scope("other_3"):
d2, _ = seq2seq_lib.embedding_tied_rnn_seq2seq(
enc_inp,
dec_inp2,
cell(),
num_symbols=5,
embedding_size=2,
feed_previous=True)
sess.run([variables.global_variables_initializer()])
res1 = sess.run(d1)
res2 = sess.run(d2)
res3 = sess.run(d3)
self.assertAllClose(res1, res2)
self.assertAllClose(res1, res3)
def testAttentionDecoder1(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
cell_fn = lambda: rnn_cell.GRUCell(2)
cell = cell_fn()
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
enc_outputs, enc_state = rnn.static_rnn(cell, inp, dtype=dtypes.float32)
attn_states = array_ops.concat([
array_ops.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs
], 1)
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
# Create a new cell instance for the decoder, since it uses a
# different variable scope
dec, mem = seq2seq_lib.attention_decoder(
dec_inp, enc_state, attn_states, cell_fn(), output_size=4)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testAttentionDecoder2(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
cell_fn = lambda: rnn_cell.GRUCell(2)
cell = cell_fn()
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
enc_outputs, enc_state = rnn.static_rnn(cell, inp, dtype=dtypes.float32)
attn_states = array_ops.concat([
array_ops.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs
], 1)
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
# Use a new cell instance since the attention decoder uses a
# different variable scope.
dec, mem = seq2seq_lib.attention_decoder(
dec_inp, enc_state, attn_states, cell_fn(),
output_size=4, num_heads=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testDynamicAttentionDecoder1(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
cell_fn = lambda: rnn_cell.GRUCell(2)
cell = cell_fn()
inp = constant_op.constant(0.5, shape=[2, 2, 2])
enc_outputs, enc_state = rnn.dynamic_rnn(
cell, inp, dtype=dtypes.float32)
attn_states = enc_outputs
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
# Use a new cell instance since the attention decoder uses a
# different variable scope.
dec, mem = seq2seq_lib.attention_decoder(
dec_inp, enc_state, attn_states, cell_fn(), output_size=4)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testDynamicAttentionDecoder2(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
cell_fn = lambda: rnn_cell.GRUCell(2)
cell = cell_fn()
inp = constant_op.constant(0.5, shape=[2, 2, 2])
enc_outputs, enc_state = rnn.dynamic_rnn(
cell, inp, dtype=dtypes.float32)
attn_states = enc_outputs
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
# Use a new cell instance since the attention decoder uses a
# different variable scope.
dec, mem = seq2seq_lib.attention_decoder(
dec_inp, enc_state, attn_states, cell_fn(),
output_size=4, num_heads=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testAttentionDecoderStateIsTuple(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
single_cell = lambda: rnn_cell.BasicLSTMCell( # pylint: disable=g-long-lambda
2, state_is_tuple=True)
cell_fn = lambda: rnn_cell.MultiRNNCell( # pylint: disable=g-long-lambda
cells=[single_cell() for _ in range(2)], state_is_tuple=True)
cell = cell_fn()
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
enc_outputs, enc_state = rnn.static_rnn(cell, inp, dtype=dtypes.float32)
attn_states = array_ops.concat([
array_ops.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs
], 1)
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
# Use a new cell instance since the attention decoder uses a
# different variable scope.
dec, mem = seq2seq_lib.attention_decoder(
dec_inp, enc_state, attn_states, cell_fn(), output_size=4)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual(2, len(res[0]))
self.assertEqual((2, 2), res[0][0].c.shape)
self.assertEqual((2, 2), res[0][0].h.shape)
self.assertEqual((2, 2), res[0][1].c.shape)
self.assertEqual((2, 2), res[0][1].h.shape)
def testDynamicAttentionDecoderStateIsTuple(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
cell_fn = lambda: rnn_cell.MultiRNNCell( # pylint: disable=g-long-lambda
cells=[rnn_cell.BasicLSTMCell(2) for _ in range(2)])
cell = cell_fn()
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
enc_outputs, enc_state = rnn.static_rnn(cell, inp, dtype=dtypes.float32)
attn_states = array_ops.concat([
array_ops.reshape(e, [-1, 1, cell.output_size])
for e in enc_outputs
], 1)
dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3
# Use a new cell instance since the attention decoder uses a
# different variable scope.
dec, mem = seq2seq_lib.attention_decoder(
dec_inp, enc_state, attn_states, cell_fn(), output_size=4)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 4), res[0].shape)
res = sess.run([mem])
self.assertEqual(2, len(res[0]))
self.assertEqual((2, 2), res[0][0].c.shape)
self.assertEqual((2, 2), res[0][0].h.shape)
self.assertEqual((2, 2), res[0][1].c.shape)
self.assertEqual((2, 2), res[0][1].h.shape)
def testEmbeddingAttentionDecoder(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
inp = [constant_op.constant(0.5, shape=[2, 2])] * 2
cell_fn = lambda: rnn_cell.GRUCell(2)
cell = cell_fn()
enc_outputs, enc_state = rnn.static_rnn(cell, inp, dtype=dtypes.float32)
attn_states = array_ops.concat([
array_ops.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs
], 1)
dec_inp = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(3)
]
# Use a new cell instance since the attention decoder uses a
# different variable scope.
dec, mem = seq2seq_lib.embedding_attention_decoder(
dec_inp,
enc_state,
attn_states,
cell_fn(),
num_symbols=4,
embedding_size=2,
output_size=3)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 3), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].shape)
def testEmbeddingAttentionSeq2Seq(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
enc_inp = [
constant_op.constant(
1, dtypes.int32, shape=[2]) for i in range(2)
]
dec_inp = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(3)
]
cell_fn = lambda: rnn_cell.BasicLSTMCell(2)
cell = cell_fn()
dec, mem = seq2seq_lib.embedding_attention_seq2seq(
enc_inp,
dec_inp,
cell,
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
# Test with state_is_tuple=False.
with variable_scope.variable_scope("no_tuple"):
cell_fn = functools.partial(
rnn_cell.BasicLSTMCell, 2, state_is_tuple=False)
cell_nt = cell_fn()
dec, mem = seq2seq_lib.embedding_attention_seq2seq(
enc_inp,
dec_inp,
cell_nt,
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2)
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run([mem])
self.assertEqual((2, 4), res[0].shape)
# Test externally provided output projection.
w = variable_scope.get_variable("proj_w", [2, 5])
b = variable_scope.get_variable("proj_b", [5])
with variable_scope.variable_scope("proj_seq2seq"):
dec, _ = seq2seq_lib.embedding_attention_seq2seq(
enc_inp,
dec_inp,
cell_fn(),
num_encoder_symbols=2,
num_decoder_symbols=5,
embedding_size=2,
output_projection=(w, b))
sess.run([variables.global_variables_initializer()])
res = sess.run(dec)
self.assertEqual(3, len(res))
self.assertEqual((2, 2), res[0].shape)
# TODO(ebrevdo, lukaszkaiser): Re-enable once RNNCells allow reuse
# within a variable scope that already has a weights tensor.
#
# # Test that previous-feeding model ignores inputs after the first.
# dec_inp2 = [
# constant_op.constant(
# 0, dtypes.int32, shape=[2]) for _ in range(3)
# ]
# with variable_scope.variable_scope("other"):
# d3, _ = seq2seq_lib.embedding_attention_seq2seq(
# enc_inp,
# dec_inp2,
# cell_fn(),
# num_encoder_symbols=2,
# num_decoder_symbols=5,
# embedding_size=2,
# feed_previous=constant_op.constant(True))
# sess.run([variables.global_variables_initializer()])
# variable_scope.get_variable_scope().reuse_variables()
# cell = cell_fn()
# d1, _ = seq2seq_lib.embedding_attention_seq2seq(
# enc_inp,
# dec_inp,
# cell,
# num_encoder_symbols=2,
# num_decoder_symbols=5,
# embedding_size=2,
# feed_previous=True)
# d2, _ = seq2seq_lib.embedding_attention_seq2seq(
# enc_inp,
# dec_inp2,
# cell,
# num_encoder_symbols=2,
# num_decoder_symbols=5,
# embedding_size=2,
# feed_previous=True)
# res1 = sess.run(d1)
# res2 = sess.run(d2)
# res3 = sess.run(d3)
# self.assertAllClose(res1, res2)
# self.assertAllClose(res1, res3)
def testOne2ManyRNNSeq2Seq(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
enc_inp = [
constant_op.constant(
1, dtypes.int32, shape=[2]) for i in range(2)
]
dec_inp_dict = {}
dec_inp_dict["0"] = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(3)
]
dec_inp_dict["1"] = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(4)
]
dec_symbols_dict = {"0": 5, "1": 6}
def EncCellFn():
return rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
def DecCellsFn():
return dict((k, rnn_cell.BasicLSTMCell(2, state_is_tuple=True))
for k in dec_symbols_dict)
outputs_dict, state_dict = (seq2seq_lib.one2many_rnn_seq2seq(
enc_inp, dec_inp_dict, EncCellFn(), DecCellsFn(),
2, dec_symbols_dict, embedding_size=2))
sess.run([variables.global_variables_initializer()])
res = sess.run(outputs_dict["0"])
self.assertEqual(3, len(res))
self.assertEqual((2, 5), res[0].shape)
res = sess.run(outputs_dict["1"])
self.assertEqual(4, len(res))
self.assertEqual((2, 6), res[0].shape)
res = sess.run([state_dict["0"]])
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
res = sess.run([state_dict["1"]])
self.assertEqual((2, 2), res[0].c.shape)
self.assertEqual((2, 2), res[0].h.shape)
# Test that previous-feeding model ignores inputs after the first, i.e.
# dec_inp_dict2 has different inputs from dec_inp_dict after the first
# time-step.
dec_inp_dict2 = {}
dec_inp_dict2["0"] = [
constant_op.constant(
0, dtypes.int32, shape=[2]) for _ in range(3)
]
dec_inp_dict2["1"] = [
constant_op.constant(
0, dtypes.int32, shape=[2]) for _ in range(4)
]
with variable_scope.variable_scope("other"):
outputs_dict3, _ = seq2seq_lib.one2many_rnn_seq2seq(
enc_inp,
dec_inp_dict2,
EncCellFn(),
DecCellsFn(),
2,
dec_symbols_dict,
embedding_size=2,
feed_previous=constant_op.constant(True))
with variable_scope.variable_scope("other_2"):
outputs_dict1, _ = seq2seq_lib.one2many_rnn_seq2seq(
enc_inp,
dec_inp_dict,
EncCellFn(),
DecCellsFn(),
2,
dec_symbols_dict,
embedding_size=2,
feed_previous=True)
with variable_scope.variable_scope("other_3"):
outputs_dict2, _ = seq2seq_lib.one2many_rnn_seq2seq(
enc_inp,
dec_inp_dict2,
EncCellFn(),
DecCellsFn(),
2,
dec_symbols_dict,
embedding_size=2,
feed_previous=True)
sess.run([variables.global_variables_initializer()])
res1 = sess.run(outputs_dict1["0"])
res2 = sess.run(outputs_dict2["0"])
res3 = sess.run(outputs_dict3["0"])
self.assertAllClose(res1, res2)
self.assertAllClose(res1, res3)
def testSequenceLoss(self):
with self.test_session() as sess:
logits = [constant_op.constant(i + 0.5, shape=[2, 5]) for i in range(3)]
targets = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(3)
]
weights = [constant_op.constant(1.0, shape=[2]) for i in range(3)]
average_loss_per_example = seq2seq_lib.sequence_loss(
logits,
targets,
weights,
average_across_timesteps=True,
average_across_batch=True)
res = sess.run(average_loss_per_example)
self.assertAllClose(1.60944, res)
average_loss_per_sequence = seq2seq_lib.sequence_loss(
logits,
targets,
weights,
average_across_timesteps=False,
average_across_batch=True)
res = sess.run(average_loss_per_sequence)
self.assertAllClose(4.828314, res)
total_loss = seq2seq_lib.sequence_loss(
logits,
targets,
weights,
average_across_timesteps=False,
average_across_batch=False)
res = sess.run(total_loss)
self.assertAllClose(9.656628, res)
def testSequenceLossByExample(self):
with self.test_session() as sess:
output_classes = 5
logits = [
constant_op.constant(
i + 0.5, shape=[2, output_classes]) for i in range(3)
]
targets = [
constant_op.constant(
i, dtypes.int32, shape=[2]) for i in range(3)
]
weights = [constant_op.constant(1.0, shape=[2]) for i in range(3)]
average_loss_per_example = (seq2seq_lib.sequence_loss_by_example(
logits, targets, weights, average_across_timesteps=True))
res = sess.run(average_loss_per_example)
self.assertAllClose(np.asarray([1.609438, 1.609438]), res)
loss_per_sequence = seq2seq_lib.sequence_loss_by_example(
logits, targets, weights, average_across_timesteps=False)
res = sess.run(loss_per_sequence)
self.assertAllClose(np.asarray([4.828314, 4.828314]), res)
# TODO(ebrevdo, lukaszkaiser): Re-enable once RNNCells allow reuse
# within a variable scope that already has a weights tensor.
#
# def testModelWithBucketsScopeAndLoss(self):
# """Test variable scope reuse is not reset after model_with_buckets."""
# classes = 10
# buckets = [(4, 4), (8, 8)]
# with self.test_session():
# # Here comes a sample Seq2Seq model using GRU cells.
# def SampleGRUSeq2Seq(enc_inp, dec_inp, weights, per_example_loss):
# """Example sequence-to-sequence model that uses GRU cells."""
# def GRUSeq2Seq(enc_inp, dec_inp):
# cell = rnn_cell.MultiRNNCell(
# [rnn_cell.GRUCell(24) for _ in range(2)])
# return seq2seq_lib.embedding_attention_seq2seq(
# enc_inp,
# dec_inp,
# cell,
# num_encoder_symbols=classes,
# num_decoder_symbols=classes,
# embedding_size=24)
# targets = [dec_inp[i + 1] for i in range(len(dec_inp) - 1)] + [0]
# return seq2seq_lib.model_with_buckets(
# enc_inp,
# dec_inp,
# targets,
# weights,
# buckets,
# GRUSeq2Seq,
# per_example_loss=per_example_loss)
# # Now we construct the copy model.
# inp = [
# array_ops.placeholder(
# dtypes.int32, shape=[None]) for _ in range(8)
# ]
# out = [
# array_ops.placeholder(
# dtypes.int32, shape=[None]) for _ in range(8)
# ]
# weights = [
# array_ops.ones_like(
# inp[0], dtype=dtypes.float32) for _ in range(8)
# ]
# with variable_scope.variable_scope("root"):
# _, losses1 = SampleGRUSeq2Seq(
# inp, out, weights, per_example_loss=False)
# # Now check that we did not accidentally set reuse.
# self.assertEqual(False, variable_scope.get_variable_scope().reuse)
# with variable_scope.variable_scope("new"):
# _, losses2 = SampleGRUSeq2Seq
# inp, out, weights, per_example_loss=True)
# # First loss is scalar, the second one is a 1-dimensinal tensor.
# self.assertEqual([], losses1[0].get_shape().as_list())
# self.assertEqual([None], losses2[0].get_shape().as_list())
def testModelWithBuckets(self):
"""Larger tests that does full sequence-to-sequence model training."""
# We learn to copy 10 symbols in 2 buckets: length 4 and length 8.
classes = 10
buckets = [(4, 4), (8, 8)]
perplexities = [[], []] # Results for each bucket.
random_seed.set_random_seed(111)
random.seed(111)
np.random.seed(111)
with self.test_session() as sess:
# We use sampled softmax so we keep output projection separate.
w = variable_scope.get_variable("proj_w", [24, classes])
w_t = array_ops.transpose(w)
b = variable_scope.get_variable("proj_b", [classes])
# Here comes a sample Seq2Seq model using GRU cells.
def SampleGRUSeq2Seq(enc_inp, dec_inp, weights):
"""Example sequence-to-sequence model that uses GRU cells."""
def GRUSeq2Seq(enc_inp, dec_inp):
cell = rnn_cell.MultiRNNCell(
[rnn_cell.GRUCell(24) for _ in range(2)], state_is_tuple=True)
return seq2seq_lib.embedding_attention_seq2seq(
enc_inp,
dec_inp,
cell,
num_encoder_symbols=classes,
num_decoder_symbols=classes,
embedding_size=24,
output_projection=(w, b))
targets = [dec_inp[i + 1] for i in range(len(dec_inp) - 1)] + [0]
def SampledLoss(labels, logits):
labels = array_ops.reshape(labels, [-1, 1])
return nn_impl.sampled_softmax_loss(
weights=w_t,
biases=b,
labels=labels,
inputs=logits,
num_sampled=8,
num_classes=classes)
return seq2seq_lib.model_with_buckets(
enc_inp,
dec_inp,
targets,
weights,
buckets,
GRUSeq2Seq,
softmax_loss_function=SampledLoss)
# Now we construct the copy model.
batch_size = 8
inp = [
array_ops.placeholder(
dtypes.int32, shape=[None]) for _ in range(8)
]
out = [
array_ops.placeholder(
dtypes.int32, shape=[None]) for _ in range(8)
]
weights = [
array_ops.ones_like(
inp[0], dtype=dtypes.float32) for _ in range(8)
]
with variable_scope.variable_scope("root"):
_, losses = SampleGRUSeq2Seq(inp, out, weights)
updates = []
params = variables.global_variables()
optimizer = adam.AdamOptimizer(0.03, epsilon=1e-5)
for i in range(len(buckets)):
full_grads = gradients_impl.gradients(losses[i], params)
grads, _ = clip_ops.clip_by_global_norm(full_grads, 30.0)
update = optimizer.apply_gradients(zip(grads, params))
updates.append(update)
sess.run([variables.global_variables_initializer()])
steps = 6
for _ in range(steps):
bucket = random.choice(np.arange(len(buckets)))
length = buckets[bucket][0]
i = [
np.array(
[np.random.randint(9) + 1 for _ in range(batch_size)],
dtype=np.int32) for _ in range(length)
]
# 0 is our "GO" symbol here.
o = [np.array([0] * batch_size, dtype=np.int32)] + i
feed = {}
for i1, i2, o1, o2 in zip(inp[:length], i[:length], out[:length],
o[:length]):
feed[i1.name] = i2
feed[o1.name] = o2
if length < 8: # For the 4-bucket, we need the 5th as target.
feed[out[length].name] = o[length]
res = sess.run([updates[bucket], losses[bucket]], feed)
perplexities[bucket].append(math.exp(float(res[1])))
for bucket in range(len(buckets)):
if len(perplexities[bucket]) > 1: # Assert that perplexity went down.
self.assertLess(perplexities[bucket][-1], # 20% margin of error.
1.2 * perplexities[bucket][0])
def testModelWithBooleanFeedPrevious(self):
"""Test the model behavior when feed_previous is True.
For example, the following two cases have the same effect:
- Train `embedding_rnn_seq2seq` with `feed_previous=True`, which contains
a `embedding_rnn_decoder` with `feed_previous=True` and
`update_embedding_for_previous=True`. The decoder is fed with "<Go>"
and outputs "A, B, C".
- Train `embedding_rnn_seq2seq` with `feed_previous=False`. The decoder
is fed with "<Go>, A, B".
"""
num_encoder_symbols = 3
num_decoder_symbols = 5
batch_size = 2
num_enc_timesteps = 2
num_dec_timesteps = 3
def TestModel(seq2seq):
with self.test_session(graph=ops.Graph()) as sess:
random_seed.set_random_seed(111)
random.seed(111)
np.random.seed(111)
enc_inp = [
constant_op.constant(
i + 1, dtypes.int32, shape=[batch_size])
for i in range(num_enc_timesteps)
]
dec_inp_fp_true = [
constant_op.constant(
i, dtypes.int32, shape=[batch_size])
for i in range(num_dec_timesteps)
]
dec_inp_holder_fp_false = [
array_ops.placeholder(
dtypes.int32, shape=[batch_size])
for _ in range(num_dec_timesteps)
]
targets = [
constant_op.constant(
i + 1, dtypes.int32, shape=[batch_size])
for i in range(num_dec_timesteps)
]
weights = [
constant_op.constant(
1.0, shape=[batch_size]) for i in range(num_dec_timesteps)
]
def ForwardBackward(enc_inp, dec_inp, feed_previous):
scope_name = "fp_{}".format(feed_previous)
with variable_scope.variable_scope(scope_name):
dec_op, _ = seq2seq(enc_inp, dec_inp, feed_previous=feed_previous)
net_variables = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES,
scope_name)
optimizer = adam.AdamOptimizer(0.03, epsilon=1e-5)
update_op = optimizer.minimize(
seq2seq_lib.sequence_loss(dec_op, targets, weights),
var_list=net_variables)
return dec_op, update_op, net_variables
dec_op_fp_true, update_fp_true, variables_fp_true = ForwardBackward(
enc_inp, dec_inp_fp_true, feed_previous=True)
_, update_fp_false, variables_fp_false = ForwardBackward(
enc_inp, dec_inp_holder_fp_false, feed_previous=False)
sess.run(variables.global_variables_initializer())
# We only check consistencies between the variables existing in both
# the models with True and False feed_previous. Variables created by
# the loop_function in the model with True feed_previous are ignored.
v_false_name_dict = {
v.name.split("/", 1)[-1]: v
for v in variables_fp_false
}
matched_variables = [(v, v_false_name_dict[v.name.split("/", 1)[-1]])
for v in variables_fp_true]
for v_true, v_false in matched_variables:
sess.run(state_ops.assign(v_false, v_true))
# Take the symbols generated by the decoder with feed_previous=True as
# the true input symbols for the decoder with feed_previous=False.
dec_fp_true = sess.run(dec_op_fp_true)
output_symbols_fp_true = np.argmax(dec_fp_true, axis=2)
dec_inp_fp_false = np.vstack((dec_inp_fp_true[0].eval(),
output_symbols_fp_true[:-1]))
sess.run(update_fp_true)
sess.run(update_fp_false, {
holder: inp
for holder, inp in zip(dec_inp_holder_fp_false, dec_inp_fp_false)
})
for v_true, v_false in matched_variables:
self.assertAllClose(v_true.eval(), v_false.eval())
def EmbeddingRNNSeq2SeqF(enc_inp, dec_inp, feed_previous):
cell = rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
return seq2seq_lib.embedding_rnn_seq2seq(
enc_inp,
dec_inp,
cell,
num_encoder_symbols,
num_decoder_symbols,
embedding_size=2,
feed_previous=feed_previous)
def EmbeddingRNNSeq2SeqNoTupleF(enc_inp, dec_inp, feed_previous):
cell = rnn_cell.BasicLSTMCell(2, state_is_tuple=False)
return seq2seq_lib.embedding_rnn_seq2seq(
enc_inp,
dec_inp,
cell,
num_encoder_symbols,
num_decoder_symbols,
embedding_size=2,
feed_previous=feed_previous)
def EmbeddingTiedRNNSeq2Seq(enc_inp, dec_inp, feed_previous):
cell = rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
return seq2seq_lib.embedding_tied_rnn_seq2seq(
enc_inp,
dec_inp,
cell,
num_decoder_symbols,
embedding_size=2,
feed_previous=feed_previous)
def EmbeddingTiedRNNSeq2SeqNoTuple(enc_inp, dec_inp, feed_previous):
cell = rnn_cell.BasicLSTMCell(2, state_is_tuple=False)
return seq2seq_lib.embedding_tied_rnn_seq2seq(
enc_inp,
dec_inp,
cell,
num_decoder_symbols,
embedding_size=2,
feed_previous=feed_previous)
def EmbeddingAttentionSeq2Seq(enc_inp, dec_inp, feed_previous):
cell = rnn_cell.BasicLSTMCell(2, state_is_tuple=True)
return seq2seq_lib.embedding_attention_seq2seq(
enc_inp,
dec_inp,
cell,
num_encoder_symbols,
num_decoder_symbols,
embedding_size=2,
feed_previous=feed_previous)
def EmbeddingAttentionSeq2SeqNoTuple(enc_inp, dec_inp, feed_previous):
cell = rnn_cell.BasicLSTMCell(2, state_is_tuple=False)
return seq2seq_lib.embedding_attention_seq2seq(
enc_inp,
dec_inp,
cell,
num_encoder_symbols,
num_decoder_symbols,
embedding_size=2,
feed_previous=feed_previous)
for model in (EmbeddingRNNSeq2SeqF, EmbeddingRNNSeq2SeqNoTupleF,
EmbeddingTiedRNNSeq2Seq, EmbeddingTiedRNNSeq2SeqNoTuple,
EmbeddingAttentionSeq2Seq, EmbeddingAttentionSeq2SeqNoTuple):
TestModel(model)
if __name__ == "__main__":
test.main()
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.contrib.layers.python.ops.sparse_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.layers.python.ops import sparse_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
def _assert_sparse_tensor_value(test_case, expected, actual):
test_case.assertEqual(np.int64, np.array(actual.indices).dtype)
test_case.assertAllEqual(expected.indices, actual.indices)
test_case.assertEqual(
np.array(expected.values).dtype, np.array(actual.values).dtype)
test_case.assertAllEqual(expected.values, actual.values)
test_case.assertEqual(np.int64, np.array(actual.dense_shape).dtype)
test_case.assertAllEqual(expected.dense_shape, actual.dense_shape)
class DenseToSparseTensorTest(test.TestCase):
def test_dense_to_sparse_tensor_1d(self):
with self.test_session() as sess:
st = sparse_ops.dense_to_sparse_tensor([1, 0, 2, 0])
result = sess.run(st)
self.assertEqual(result.indices.dtype, np.int64)
self.assertEqual(result.values.dtype, np.int32)
self.assertEqual(result.dense_shape.dtype, np.int64)
self.assertAllEqual([[0], [2]], result.indices)
self.assertAllEqual([1, 2], result.values)
self.assertAllEqual([4], result.dense_shape)
def test_dense_to_sparse_tensor_1d_float(self):
with self.test_session() as sess:
st = sparse_ops.dense_to_sparse_tensor([1.5, 0.0, 2.3, 0.0])
result = sess.run(st)
self.assertEqual(result.indices.dtype, np.int64)
self.assertEqual(result.values.dtype, np.float32)
self.assertEqual(result.dense_shape.dtype, np.int64)
self.assertAllEqual([[0], [2]], result.indices)
self.assertAllClose([1.5, 2.3], result.values)
self.assertAllEqual([4], result.dense_shape)
def test_dense_to_sparse_tensor_1d_bool(self):
with self.test_session() as sess:
st = sparse_ops.dense_to_sparse_tensor([True, False, True, False])
result = sess.run(st)
self.assertEqual(result.indices.dtype, np.int64)
self.assertEqual(result.values.dtype, np.bool)
self.assertEqual(result.dense_shape.dtype, np.int64)
self.assertAllEqual([[0], [2]], result.indices)
self.assertAllEqual([True, True], result.values)
self.assertAllEqual([4], result.dense_shape)
def test_dense_to_sparse_tensor_1d_str(self):
with self.test_session() as sess:
st = sparse_ops.dense_to_sparse_tensor([b'qwe', b'', b'ewq', b''])
result = sess.run(st)
self.assertEqual(result.indices.dtype, np.int64)
self.assertEqual(result.values.dtype, np.object)
self.assertEqual(result.dense_shape.dtype, np.int64)
self.assertAllEqual([[0], [2]], result.indices)
self.assertAllEqual([b'qwe', b'ewq'], result.values)
self.assertAllEqual([4], result.dense_shape)
def test_dense_to_sparse_tensor_1d_str_special_ignore(self):
with self.test_session() as sess:
st = sparse_ops.dense_to_sparse_tensor(
[b'qwe', b'', b'ewq', b''], ignore_value=b'qwe')
result = sess.run(st)
self.assertEqual(result.indices.dtype, np.int64)
self.assertEqual(result.values.dtype, np.object)
self.assertEqual(result.dense_shape.dtype, np.int64)
self.assertAllEqual([[1], [2], [3]], result.indices)
self.assertAllEqual([b'', b'ewq', b''], result.values)
self.assertAllEqual([4], result.dense_shape)
def test_dense_to_sparse_tensor_2d(self):
with self.test_session() as sess:
st = sparse_ops.dense_to_sparse_tensor([[1, 2, 0, 0], [3, 4, 5, 0]])
result = sess.run(st)
self.assertAllEqual([[0, 0], [0, 1], [1, 0], [1, 1], [1, 2]],
result.indices)
self.assertAllEqual([1, 2, 3, 4, 5], result.values)
self.assertAllEqual([2, 4], result.dense_shape)
def test_dense_to_sparse_tensor_3d(self):
with self.test_session() as sess:
st = sparse_ops.dense_to_sparse_tensor([[[1, 2, 0, 0], [3, 4, 5, 0]],
[[7, 8, 0, 0], [9, 0, 0, 0]]])
result = sess.run(st)
self.assertAllEqual([[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [0, 1, 2],
[1, 0, 0], [1, 0, 1], [1, 1, 0]], result.indices)
self.assertAllEqual([1, 2, 3, 4, 5, 7, 8, 9], result.values)
self.assertAllEqual([2, 2, 4], result.dense_shape)
def test_dense_to_sparse_tensor_unknown_1d_shape(self):
with self.test_session() as sess:
tensor = array_ops.placeholder(shape=[None], dtype=dtypes.int32)
st = sparse_ops.dense_to_sparse_tensor(tensor)
result = sess.run(st, feed_dict={tensor: [0, 100, 0, 3]})
self.assertAllEqual([[1], [3]], result.indices)
self.assertAllEqual([100, 3], result.values)
self.assertAllEqual([4], result.dense_shape)
def test_dense_to_sparse_tensor_unknown_3d_shape(self):
with self.test_session() as sess:
tensor = array_ops.placeholder(
shape=[None, None, None], dtype=dtypes.int32)
st = sparse_ops.dense_to_sparse_tensor(tensor)
result = sess.run(st,
feed_dict={
tensor: [[[1, 2, 0, 0], [3, 4, 5, 0]],
[[7, 8, 0, 0], [9, 0, 0, 0]]]
})
self.assertAllEqual([[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [0, 1, 2],
[1, 0, 0], [1, 0, 1], [1, 1, 0]], result.indices)
self.assertAllEqual([1, 2, 3, 4, 5, 7, 8, 9], result.values)
self.assertAllEqual([2, 2, 4], result.dense_shape)
def test_dense_to_sparse_unknown_rank(self):
ph = array_ops.placeholder(dtype=dtypes.int32)
with self.test_session() as sess:
st = sparse_ops.dense_to_sparse_tensor(ph)
result = sess.run(st, feed_dict={ph: [[1, 2, 0, 0], [3, 4, 5, 0]]})
self.assertAllEqual([[0, 0], [0, 1], [1, 0], [1, 1], [1, 2]],
result.indices)
self.assertAllEqual([1, 2, 3, 4, 5], result.values)
self.assertAllEqual([2, 4], result.dense_shape)
class SparseRowEnvelopeTest(test.TestCase):
def test_sparse_row_envelope(self):
expected_sparse_row_envelope = [1, 0, 3]
with self.test_session() as sess:
sparse_input = sparse_tensor.SparseTensor(
indices=[[0, 0], [2, 0], [2, 1], [2, 2]],
values=[0, 1, 2, 3],
dense_shape=[3, 3])
sparse_row_envelope = sess.run(
sparse_ops.sparse_row_envelope(sparse_input))
self.assertAllEqual(expected_sparse_row_envelope,
sparse_row_envelope)
def test_sparse_row_envelope_unsorted_indices(self):
expected_sparse_row_envelope = [1, 0, 3]
with self.test_session() as sess:
sparse_input = sparse_tensor.SparseTensor(
indices=[[2, 0], [2, 2], [2, 1], [0, 0]],
values=[0, 1, 2, 3],
dense_shape=[3, 3])
sparse_row_envelope = sess.run(
sparse_ops.sparse_row_envelope(sparse_input))
self.assertAllEqual(expected_sparse_row_envelope,
sparse_row_envelope)
def test_sparse_row_envelope_empty_in_the_end(self):
expected_sparse_row_envelope = [1, 0, 3, 0, 0]
with self.test_session() as sess:
sparse_input = sparse_tensor.SparseTensor(
indices=[[0, 0], [2, 0], [2, 1], [2, 2]],
values=[0, 1, 2, 3],
dense_shape=[5, 3])
sparse_row_envelope = sess.run(
sparse_ops.sparse_row_envelope(sparse_input))
self.assertAllEqual(expected_sparse_row_envelope,
sparse_row_envelope)
def test_sparse_row_envelope_empty_3d(self):
expected_sparse_row_envelope = [1, 0, 3, 0, 0]
with self.test_session() as sess:
sparse_input = sparse_tensor.SparseTensor(
indices=[[0, 0, 0], [0, 2, 0], [0, 2, 1], [0, 2, 2]],
values=[0, 1, 2, 3],
dense_shape=[1, 5, 3])
sparse_row_envelope = sess.run(
sparse_ops.sparse_row_envelope(sparse_input, 1, 2))
self.assertAllEqual(expected_sparse_row_envelope,
sparse_row_envelope)
class IndicatorToSparseIdsTest(test.TestCase):
def test_indicators_to_sparse_ids_1d(self):
indicators = (0, 0, 1, 0)
sparse_ids = sparse_ops.indicators_to_sparse_ids(indicators)
with self.test_session():
_assert_sparse_tensor_value(self, sparse_tensor.SparseTensorValue(
indices=((0,),),
values=(2,),
dense_shape=(1,),
), sparse_ids.eval())
def test_indicators_to_sparse_ids_2d(self):
indicators = (
(0, 0, 1, 0),
(1, 0, 0, 1),
)
sparse_ids = sparse_ops.indicators_to_sparse_ids(indicators)
with self.test_session():
_assert_sparse_tensor_value(self, sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 3),
dense_shape=(2, 2),
), sparse_ids.eval())
def test_indicators_to_sparse_ids_3d(self):
indicators = (
((0, 0, 1, 0, 0), (0, 0, 0, 0, 0)),
((1, 0, 0, 1, 0), (0, 0, 1, 0, 0)),
((0, 0, 0, 0, 0), (0, 0, 0, 0, 0)),
((1, 0, 0, 1, 1), (0, 0, 1, 0, 0)),
)
sparse_ids = sparse_ops.indicators_to_sparse_ids(indicators)
with self.test_session():
_assert_sparse_tensor_value(self, sparse_tensor.SparseTensorValue(
indices=(
(0, 0, 0),
(1, 0, 0), (1, 0, 1), (1, 1, 0),
(3, 0, 0), (3, 0, 1), (3, 0, 2), (3, 1, 0)
), values=(
2,
0, 3, 2,
0, 3, 4, 2
), dense_shape=(4, 2, 3),
), sparse_ids.eval())
def test_int16_to_sparse_ids_2d(self):
indicators = (
(0, 0, 1, 0),
(1, 0, 0, 1),
)
sparse_ids = sparse_ops.indicators_to_sparse_ids(
indicators, dtype=dtypes.int16)
with self.test_session():
_assert_sparse_tensor_value(self, sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=np.array((2, 0, 3), dtype=np.int16),
dense_shape=(2, 2),
), sparse_ids.eval())
def test_indicators_to_sparse_ids_ignore_value(self):
indicators = (
((-1, -1, 10, -1), (-1, -1, -1, -1)),
((11, -1, -1, 12), (-1, -1, 13, -1)),
)
sparse_ids = sparse_ops.indicators_to_sparse_ids(
indicators, ignore_value=-1)
with self.test_session():
_assert_sparse_tensor_value(self, sparse_tensor.SparseTensorValue(
indices=((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0)),
values=(2, 0, 3, 2),
dense_shape=(2, 2, 2),
), sparse_ids.eval())
def test_string_indicators_to_sparse_ids(self):
indicators = (
(('', '', 'A', ''), ('', '', '', '')),
(('B', '', '', 'C'), ('', '', 'D', '')),
)
sparse_ids = sparse_ops.indicators_to_sparse_ids(indicators)
with self.test_session():
_assert_sparse_tensor_value(self, sparse_tensor.SparseTensorValue(
indices=((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0)),
values=(2, 0, 3, 2),
dense_shape=(2, 2, 2),
), sparse_ids.eval())
def test_string_indicators_to_sparse_ids_ignore_value(self):
indicators = (
(('x', 'x', 'A', 'x'), ('x', 'x', 'x', 'x')),
(('B', 'x', 'x', 'C'), ('x', 'x', 'D', 'x')),
)
sparse_ids = sparse_ops.indicators_to_sparse_ids(
indicators, ignore_value='x')
with self.test_session():
_assert_sparse_tensor_value(self, sparse_tensor.SparseTensorValue(
indices=((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0)),
values=(2, 0, 3, 2),
dense_shape=(2, 2, 2),
), sparse_ids.eval())
def test_indicators_to_sparse_ids_unknown_3d_shape(self):
indicators_values = (
((0, 0, 1, 0), (0, 0, 0, 0)),
((1, 0, 0, 1), (0, 0, 1, 0)),
)
indicators = array_ops.placeholder(
dtype=dtypes.int32, shape=(None, None, None))
sparse_ids = sparse_ops.indicators_to_sparse_ids(indicators)
with self.test_session():
_assert_sparse_tensor_value(self, sparse_tensor.SparseTensorValue(
indices=((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0)),
values=(2, 0, 3, 2),
dense_shape=(2, 2, 2),
), sparse_ids.eval(feed_dict={indicators: indicators_values}))
def test_indicators_to_sparse_ids_unknown_rank(self):
indicators_values = (
((0, 0, 1, 0), (0, 0, 0, 0)),
((1, 0, 0, 1), (0, 0, 1, 0)),
)
indicators = array_ops.placeholder(dtype=dtypes.int32)
sparse_ids = sparse_ops.indicators_to_sparse_ids(indicators)
with self.test_session():
_assert_sparse_tensor_value(self, sparse_tensor.SparseTensorValue(
indices=((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0)),
values=(2, 0, 3, 2),
dense_shape=(2, 2, 2),
), sparse_ids.eval(feed_dict={indicators: indicators_values}))
if __name__ == '__main__':
test.main()
| |
# -*- coding: utf-8 -*-
import re
import urllib2
import socket
import logging
import conf
import dbhandler
import sys
import exception
import time
HEADERS = {"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.66 Safari/537.36"}
##Extract the movie url
def parseMovie(page, startID):
logging.basicConfig(filename=conf.logPath,format="%(asctime)s:%(levelname)s:%(message)s",level=logging.DEBUG)
movie_pat = re.compile(r"<a href=\"/title/tt([\d]*)/\" title=")
error_pat = re.compile(r"<title>(IMDb: Error)</title>")#To test whether reach error page
if error_pat.search(page):
print "##parseMovie: Got error page, maybe it is done crawling"
logging.warning("##parseMovie: Got error page, maybe it is done crawling")
return False
error_pat_2 = re.compile(r"<title>404 Error</title>")
if error_pat_2.search(page):
raise exception.requestLimitException("404 Error")
return False
for id in movie_pat.finditer(page):
if conf.movieID != "-1":
if id.group(1) != conf.movieID:
continue
else:
conf.movieID = "-1" #disable the movieID
print "startID: " + str(startID) + " movieID: " + id.group(1)
logging.info("startID: " + str(startID) + " movieID: " + id.group(1))
url = "http://www.imdb.com/title/tt%s/"%(id.group(1))
req = urllib2.Request(url, headers=HEADERS)
while(True):
try:
content = urllib2.urlopen(req, timeout=5).read()
if parseMovieInfo(content, id.group(1)):
if parseRating(id.group(1)):
break
else:
return False
else:
return False
except urllib2.URLError:
print "##parseMovie: Bad url or timeout, reconnecting..."
logging.warning("##parseMovie: Bad url or timeout, reconnecting...")
continue
except socket.timeout:
print "##parseMovie: Timeout, reconnecting..."
logging.warning("##parseMovie: Timeout, reconnecting...")
continue
except exception.requestLimitException, e:
print "##parseMovie: Request limit, wait " + str(conf.waitTime) + "s to connect again."
logging.warning("##parseMovie: Request limit, wait " + str(conf.waitTime) + "s to connect again.")
time.sleep(conf.waitTime)
continue
except socket.error:
print "##parseMovie: socket.error, wait " + str(conf.waitTime) + "s to connect again."
logging.warning("##parseMovie: socket.error, wait " + str(conf.waitTime) + "s to connect again.")
time.sleep(conf.waitTime)
continue
return True
##Extract movie info
def parseMovieInfo(page, movieID):
logging.basicConfig(filename=conf.logPath,format="%(asctime)s:%(levelname)s:%(message)s",level=logging.DEBUG)
error_pat_2 = re.compile(r"<title>404 Error</title>")
if error_pat_2.search(page):
raise exception.requestLimitException("404 Error")
return False
##movie's title
title_pat = re.compile(r"<h1 class=\"header\">[^>]*>([^<]*)</span>")
title_re = title_pat.search(page)
##movie's release year
year_pat_1 = re.compile(r"ref_=tt_ov_inf\" >([\d]*)</a>\)</span>")
year_pat_2 = re.compile(r"<span class=\"nobr\">(.*)</span>[^<]*</h1>")
year_re = year_pat_1.search(page)
if not year_re:
year_re = year_pat_2.search(page)
##movie's average rating from imdb
avRating_pat = re.compile(r"star-box-giga-star\">([^<]*)</div>")
avRating_re = avRating_pat.search(page)
##movie's length and type(<=3)
length_pat_str = r"<div class=\"infobar\">([^<]*<span[^<]*</span>|)[^<]*<time[^>]*>[\s]*(?P<length>[^<\n]*)[\s]*</time>"
type_pat_str = r"([^<]*<a[^<]*<span[^>]*>([^<]*)</span></a>[^<]*<span[^<]*</span>|)"
type_pat_str_postfix = r"([^<]*<a[^<]*<span[^>]*>([^<]*)</span></a>|)"
length_type_pat = re.compile(length_pat_str + type_pat_str + type_pat_str + type_pat_str_postfix)
length_type_re = length_type_pat.search(page)
title = ""
year = ""
avRating = ""
length = ""
type_ = ""
if title_re:
title = title_re.group(1)
if year_re:
year = year_re.group(1)
if avRating_re:
avRating = avRating_re.group(1)
if length_type_re:
length = length_type_re.group("length")
if length_type_re.group(4):
type_ = type_ + length_type_re.group(4)
if length_type_re.group(6):
type_ = type_ + "|" + length_type_re.group(6)
if length_type_re.group(8):
type_ = type_ + "|" + length_type_re.group(8)
##To avoid the "InvalidStringData" error when we insert the data into the db
flag = True
try:
title_temp = title.decode("unicode_escape")
except UnicodeDecodeError:
flag = False
if flag:
title = title_temp
else:
title = ""
info = {
"title": title,
"year": year,
"avRating": avRating,
"length": length,
"type": type_,
"movieID": movieID
}
try:
dbhandler.writeMovieInfo(info)
except:
print "##writeMovieInfo db error:", sys.exc_info()[0]
logging.error("##writeMovieInfo db error: " + str(sys.exc_info()[0]))
return False
print "title: " + title
print "year: " + year
print "avRating: " + avRating
print "length: " + length
print "type: " + type_
print "movieID: " + movieID
return True
##Extract the rating url
def parseRating(id):
logging.basicConfig(filename=conf.logPath,format="%(asctime)s:%(levelname)s:%(message)s",level=logging.DEBUG)
prefix = "http://www.imdb.com/title/tt"
postfix = "/reviews?start="
startID = 0
if conf.ratingStartID != 0:
startID = conf.ratingStartID
conf.ratingStartID = 0
ratingCount = 0
while(True):
print "RRRRRRRRRRRRRRRRRRRRRRRRating startID: " + str(startID)
logging.info("Rating startID: " + str(startID))
url = prefix + id + postfix + str(startID)
req = urllib2.Request(url, headers=HEADERS)
try:
page = urllib2.urlopen(req, timeout=5).read()
count = parseRatingInfo(page, id)
if count == -1:
print "##parseRating url: " + url
logging.warning("##parseRating url: " + url)
return False
if count != 0:
startID += 10
ratingCount += count
continue
else:
print str(ratingCount) + " rating(s) have been crawled"
logging.info(str(ratingCount) + " rating(s) have been crawled")
break
except urllib2.URLError:
print "##parseRating: Bad url or timeout, reconnecting..."
logging.warning("##parseRating: Bad url or timeout, reconnecting...")
continue
except socket.timeout:
print "##parseRating: Timeout, reconnecting..."
logging.warning("##parseRating: Timeout, reconnecting...")
continue
except exception.requestLimitException, e:
print "##parseRating: Request limit, wait " + str(conf.waitTime) + "s to connect again."
logging.warning("##parseRating: Request limit, wait " + str(conf.waitTime) + "s to connect again.")
time.sleep(conf.waitTime)
continue
except socket.error:
print "##parseRating: socket.error, wait " + str(conf.waitTime) + "s to connect again."
logging.warning("##parseRating: socket.error, wait " + str(conf.waitTime) + "s to connect again.")
time.sleep(conf.waitTime)
continue
return True
##Extract the rating info
def parseRatingInfo(page, movieID):
logging.basicConfig(filename=conf.logPath,format="%(asctime)s:%(levelname)s:%(message)s",level=logging.DEBUG)
error_pat_2 = re.compile(r"<title>404 Error</title>")
if error_pat_2.search(page):
raise exception.requestLimitException("404 Error")
return -1
topic_pat_str = r"<h2>([^<]*)</h2>[^<]*<[^a]*"
rating_pat_str = r"alt=\"([\d]*)/10\"[^>]*><br>[^<]*<b>Author:"
author_pat_str = r"</b>[^<]*<a href=\"/user/ur([\d]*)/\"[^<]*</a>"
location_pat_str = r"([^<]*<small>from ([^<]*)</small>|)" #not necessary
time_pat_str = r"[^s]*small>(?P<time>[^<]*)</small>"
pat = re.compile(topic_pat_str +
rating_pat_str +
author_pat_str +
location_pat_str +
time_pat_str)
count = 0 #To sum up ratings have been crawled
for result in pat.finditer(page):
topic = result.group(1)
rating = result.group(2)
author = result.group(3)
location = ""
if(result.group(5)):
location = result.group(5)
time = result.group("time")
##To avoid the "InvalidStringData" error when we insert the data into the db
flag = True
try:
topic_temp = topic.decode("unicode_escape")
except UnicodeDecodeError:
flag = False
if flag:
topic = topic_temp
else:
topic = ""
print "-------------" + movieID + "---------------"
print "topic: " + topic
print "rating: " + rating
print "author: " + author
print "location: " + location
print "time: " + time
print "movieID: " + movieID
print "-----------------------------------"
info = {
"topic": topic,
"rating": rating,
"author": author,
"location": location,
"time": time,
"movieID": movieID
}
try:
dbhandler.writeRatingInfo(info)
except:
print "##writeRatingInfo db error:", sys.exc_info()[0]
logging.error("##writeRatingInfo db error: " + str(sys.exc_info()[0]))
return -1
count += 1
if(count == 0):
print "Finish crawling ratings of movie: " + movieID
return count
| |
import os
import shutil
import configparser
import requests
import datetime
import zipfile
import uuid
import re
import subprocess as sp
from flask import current_app as app
from aslo.service import activity as activity_service
from aslo.celery_app import logger
from .exceptions import ReleaseError, BuildProcessError, ScreenshotDoesNotExist
from . import gh
from . import i18n
from . import img
def get_bundle_path(bundle_name):
return os.path.join(app.config['BUILD_BUNDLE_DIR'], bundle_name)
def xo_file_exists(assets):
for asset in assets:
if '.xo' in asset['name']:
logger.info('Attached xo file has been found.')
return asset
logger.info('No attached xo file has been found.')
return None
def download_attached_xo(xo):
# save on blocks of 1024
logger.info("Downloading {} file...".format(xo['name']))
response = requests.get(xo['browser_download_url'], stream=True)
tmp_bundle_path = os.path.join(app.config['TEMP_BUNDLE_DIR'], xo['name'])
with open(tmp_bundle_path, "wb") as fh:
for block in response.iter_content(chunk_size=1024):
fh.write(block)
return tmp_bundle_path
def verify_and_extract_xo(tmp_bundle_path):
def verify_xo(xo_archive):
logger.info('Searching for activity.info inside xo file.')
valid = any(
'activity.info' in filename for filename in xo_archive.namelist()
)
if not valid:
raise ReleaseError('activity.info not found in xo file.')
else:
logger.info('activity.info file has been found in xo file.')
# TODO: are we going to store this locally and/or in remote server?
bundle_name = os.path.basename(tmp_bundle_path)
bundle_path = get_bundle_path(bundle_name)
if os.path.exists(bundle_path) and os.path.isfile(bundle_path):
raise ReleaseError(
'Bundle {} already exist.'.format(bundle_name)
)
def extract_xo(xo_archive):
random_uuid = uuid.uuid4().hex
extract_dir = os.path.join(app.config['TEMP_BUNDLE_DIR'], random_uuid)
try:
os.mkdir(extract_dir)
except (IOError, FileExistsError) as e:
raise ReleaseError(
'Failed to created {} directory. Error: {}'
.format(extract_dir, e)
)
# Find root_prefix for the xo archive. Usually it's Name.Activity
archive_root_prefix = os.path.commonpath(xo_archive.namelist())
try:
xo_archive.extractall(path=extract_dir)
extraction_path = os.path.join(extract_dir, archive_root_prefix)
except Exception as e:
logger.exception(e)
return extraction_path
xo_archive = zipfile.ZipFile(tmp_bundle_path)
verify_xo(xo_archive)
extraction_path = extract_xo(xo_archive)
return extraction_path
def clone_repo(url, tag, repo_path):
target_dir = app.config['BUILD_CLONE_REPO']
if not os.path.isdir(target_dir):
raise BuildProcessError('Directory %s does not exist' % target_dir)
if os.path.isdir(repo_path):
logger.info('Removing existing cloned repo %s', repo_path)
try:
shutil.rmtree(repo_path)
except IOError as e:
raise BuildProcessError(
'Can\'t remove existing repo {}. Exception: {}'
.format(repo_path, e)
)
cmd = ['git', '-c', 'advice.detachedHead=false', '-C', target_dir,
'clone', '-b', tag, '--depth', '1', url]
logger.info('Cloning repo %s', url)
if sp.call(cmd) != 0:
raise BuildProcessError('[%s] command has failed' % ' '.join(cmd))
def get_activity_metadata(repo_path):
def metadata_file_exists():
activity_file = os.path.join(repo_path, 'activity/activity.info')
if not os.path.isfile(activity_file):
raise ReleaseError(
'Activity file %s does not exist' % activity_file
)
return activity_file
def parse_metadata_file(activity_file):
parser = configparser.ConfigParser()
if len(parser.read(activity_file)) == 0:
raise ReleaseError('Error parsing metadata file')
try:
attributes = dict(parser.items('Activity'))
except configparser.NoSectionError as e:
raise ReleaseError(
'Error parsing metadata file. Exception message: %s' % e
)
return attributes
def validate_mandatory_attributes(attributes):
MANDATORY_ATTRIBUTES = ['name', 'bundle_id', 'license',
'icon', 'activity_version']
for attr in MANDATORY_ATTRIBUTES:
if attr not in attributes:
raise ReleaseError(
'%s field missing in activity metadata' % attr
)
return True
logger.info('Getting activity metadata from activity.info file.')
activity_file = metadata_file_exists()
attributes = parse_metadata_file(activity_file)
validate_mandatory_attributes(attributes)
return attributes
def invoke_bundle_build(repo_path):
def check_bundle():
dist_dir = os.path.join(repo_path, 'dist')
if os.path.isdir(dist_dir) and len(os.listdir(dist_dir)) == 1:
logger.info('Bundle has been built successfully')
return os.path.join(dist_dir, os.listdir(dist_dir)[0])
else:
raise BuildProcessError('Bundle file was not generated correctly')
logger.info('Building bundle.')
volume = repo_path + ':/activity'
docker_image = app.config['BUILD_DOCKER_IMAGE']
docker_cmd = ['docker', 'run', '--rm', '-v', volume, docker_image]
logger.info('Running docker command: "%s"', ' '.join(docker_cmd))
if sp.call(docker_cmd) != 0:
raise BuildProcessError('Docker building process has failed')
bundle_path = check_bundle()
return bundle_path
def compare_version_in_bundlename_and_metadata(tmp_bundle_path, metadata):
bundle_name = os.path.basename(tmp_bundle_path)
match = re.search('^\w+-(\d+.?\d*).xo$', bundle_name)
bundle_version = match.group(1) if match else None
if metadata['activity_version'] != bundle_version:
raise ReleaseError(
'Bundle filename version and activity metadata version '
'does not match.'
)
def get_sugar_details(activity, repo_path):
logger.info('Applying heuristic to determine min sugar supported version.')
def is_gtk3():
GTK3_IMPORT_TYPES = {'sugar3': 3, 'from gi.repository import Gtk': 3,
'sugar.': 2, 'import pygtk': 2,
'pygtk.require': 2}
setup_py_path = os.path.join(repo_path, 'setup.py')
all_files = os.listdir(repo_path)
try_paths = [setup_py_path] + all_files
for path in try_paths:
if os.path.isfile(path):
with open(path) as f:
text = f.read()
for sign in GTK3_IMPORT_TYPES:
if sign in text:
version = GTK3_IMPORT_TYPES[sign]
return version == 3
# Fallback to assuming GTK3
return True
def is_web():
if 'exec' in activity:
return activity['exec'] == 'sugar-activity-web'
return False # Fallback
def has_old_toolbars():
OLD_TOOLBAR_SIGNS = ['activity.ActivityToolbox', 'gtk.Toolbar']
for path in os.listdir(repo_path):
if os.path.isfile(path):
with open(path) as f:
text = f.read()
for sign in OLD_TOOLBAR_SIGNS:
if sign in text:
return True
return False
def determine_min_sugar_version(is_gtk3, is_web, has_old_toolbars):
min_sugar_version = '0.100' if is_web else (
'0.96' if is_gtk3 else (
'0.86' if not has_old_toolbars else '0.82'
))
return min_sugar_version
sugar = {}
sugar['is_gtk3'] = is_gtk3()
sugar['is_web'] = is_web()
sugar['has_old_toolbars'] = has_old_toolbars()
sugar['min_sugar_version'] = determine_min_sugar_version(
sugar['is_gtk3'], sugar['is_web'], sugar['has_old_toolbars']
)
return sugar
def store_bundle(tmp_bundle_path):
try:
shutil.copy2(tmp_bundle_path, app.config['BUILD_BUNDLE_DIR'])
stored_bundle = os.path.join(
app.config['BUILD_BUNDLE_DIR'],
os.path.basename(tmp_bundle_path)
)
os.chmod(stored_bundle, 0o644)
except IOError as e:
raise ReleaseError(
'Bundle copying has failed: %s', e
)
logger.info('Bundle succesfully stored at %s', stored_bundle)
def clean_up(tmp_bundle_path, repo_path):
try:
os.remove(tmp_bundle_path)
shutil.rmtree(repo_path)
except IOError as e:
raise ReleaseError('Error removing file: %s' % e)
def handle_release(gh_json):
repo_url = gh_json['repository']['clone_url']
repo_name = gh_json['repository']['name']
release = gh_json['release']
tag = release['tag_name']
tag_commit = gh.find_tag_commit(gh_json['repository']['full_name'], tag)
xo_asset = None
# TODO: Extract message to constants file
gh.comment_on_commit(
tag_commit, "Build has started :hourglass_flowing_sand:"
)
if 'assets' in release and len(release['assets']) != 0:
xo_asset = xo_file_exists(release['assets'])
if xo_asset:
logger.info('[bundle-release] No bundle building process needed.')
tmp_bundle_path = download_attached_xo(xo_asset)
repo_path = verify_and_extract_xo(tmp_bundle_path)
else:
logger.info('[sourcecode-release] Building bundle from source code.')
repo_path = os.path.join(app.config['BUILD_CLONE_REPO'], repo_name)
clone_repo(repo_url, tag, repo_path)
tmp_bundle_path = invoke_bundle_build(repo_path)
metadata = get_activity_metadata(repo_path)
compare_version_in_bundlename_and_metadata(tmp_bundle_path, metadata)
translations = i18n.get_translations(repo_path)
if translations:
metadata['i18n_name'] = i18n.translate_field(
metadata['name'], translations
)
metadata['i18n_summary'] = i18n.translate_field(
metadata.get('summary', ''), translations
)
# name and summary fields might have empty values or missing transl.
if not metadata['i18n_name']:
metadata['i18n_name'] = {'en': metadata['name']}
if not metadata['i18n_summary']:
metadata['i18n_summary'] = {'en': metadata.get('summary', '')}
else:
metadata['i18n_name'] = {'en': metadata['name']}
metadata['i18n_summary'] = {'en': metadata.get('summary', '')}
metadata['repository'] = repo_url
metadata['developers'] = gh.get_developers(
gh_json['repository']['full_name']
)
metadata['icon_bin'] = img.get_icon(repo_path, metadata['icon'])
try:
screenshots = img.get_screenshots(repo_path, metadata['bundle_id'])
except ScreenshotDoesNotExist as e:
screenshots = {}
logger.info(e)
finally:
metadata['screenshots'] = screenshots
metadata['sugar'] = get_sugar_details(metadata, repo_path)
metadata['release'] = {}
metadata['release']['notes'] = gh.render_markdown(
gh_json['release']['body']
)
metadata['release']['time'] = datetime.datetime.strptime(
gh_json['release']['published_at'], '%Y-%m-%dT%H:%M:%SZ'
)
logger.info('Inserting activity into db.')
activity_service.insert_activity(metadata)
logger.info('Saving bundle.')
store_bundle(tmp_bundle_path)
logger.info('Cleaning up.')
clean_up(tmp_bundle_path, repo_path)
| |
# The Admin4 Project
# (c) 2013-2014 Andreas Pflug
#
# Licensed under the Apache License,
# see LICENSE.TXT for conditions of usage
import wx
from tree import DragTreeCtrl, TreeItemData
from wh import xlt, Menu
from adm import images
from _pgsql import pgQuery
class Snippet:
def __init__(self, id, parent, name, text, sort):
self.id=id
self.sort=sort
self.name=name
self.parent=parent
self.text=text
self.treeitem=None
self.prevText=None
def IsGroup(self):
return not self.text
class SnippetTree(DragTreeCtrl):
def __init__(self, parentWin, server, editor):
DragTreeCtrl.__init__(self, parentWin, "Snippets", style=wx.TR_HAS_BUTTONS | wx.TR_HIDE_ROOT | wx.TR_LINES_AT_ROOT)
self.editor=editor
self.server=server
self.frame=parentWin
self.snippets={}
self.Bind(wx.EVT_RIGHT_DOWN, self.OnTreeRightClick)
self.Bind(wx.EVT_TREE_SEL_CHANGED, self.OnTreeSelChanged)
self.Bind(wx.EVT_TREE_ITEM_ACTIVATED, self.OnTreeActivate)
rootSnippets=[]
if self.frame.snippet_table:
set=self.server.GetCursor().ExecuteSet("SELECT * FROM %s ORDER BY parent, sort" % self.frame.snippet_table)
for row in set:
snippet=Snippet(row['id'], row['parent'], row['name'], row['snippet'], row['sort'])
self.snippets[snippet.id]=snippet
if not snippet.parent:
rootSnippets.append(snippet)
for snippet in rootSnippets:
if not snippet.parent:
self.AppendSnippet(snippet, parentItem=self.GetRootItem())
self.checkChildren(snippet)
for snippet in self.snippets.values():
if not snippet.treeitem:
self.AppendSnippet(snippet, parentItem=self.GetRootItem())
else:
item=self.AppendItem(self.GetRootItem(), xlt("Snippets not available:"))
item=self.AppendItem(item, xlt("Server not instrumented."))
self.ExpandAll()
def updateSnippet(self, snippet):
query=pgQuery(self.frame.snippet_table, self.server.GetCursor())
query.AddColVal('parent', snippet.parent)
query.AddColVal('sort', snippet.sort)
query.AddColVal('name', snippet.name)
query.AddColVal('snippet', snippet.text)
query.AddWhere('id', snippet.id)
query.Update()
def insertSnippet(self, snippet):
query=pgQuery(self.frame.snippet_table, self.server.GetCursor())
query.AddColVal('parent', snippet.parent)
query.AddColVal('sort', snippet.sort)
query.AddColVal('name', snippet.name)
query.AddColVal('snippet', snippet.text)
id=query.Insert("id")
snippet.id=id
return id
def OnDelSnippet(self, evt):
snippet=self.GetNode()
if snippet:
query=pgQuery(self.frame.snippet_table, self.server.GetCursor())
query.AddWhere('id', snippet.id)
query.Delete()
self.Delete(snippet.treeitem)
del self.snippets[snippet.id]
self.frame.SetStatus(xlt("Snippet deleted."))
def getSnippetName(self, snippet):
if snippet.name:
return snippet.name
else:
maxtextlen=80
if len(snippet.text) < maxtextlen:
return snippet.text
else:
return snippet.text[:maxtextlen] + "..."
def AppendSnippet(self, snippet, text=None, parentItem=None):
if not parentItem:
parent=self.GetNode()
if parent:
parentItem=parent.treeitem
if parent.text:
group=self.snippets.get(parent.parent)
if group:
parentItem=group.treeitem
else:
parentItem=self.GetRootItem()
else:
parentItem=self.GetRootItem()
if not isinstance(snippet, Snippet):
parent=0
if parentItem:
p=self.GetNode(parentItem)
if p:
parent=p.id
maxSort=1
for s in self.snippets.values():
if s.parent == parent and s.sort > maxSort:
maxSort=s.sort
snippet=Snippet(None, parent, snippet, text, maxSort+1)
self.insertSnippet(snippet)
if snippet.IsGroup():
image= images.GetModuleId(self, 'snippets')
else:
image= images.GetModuleId(self, 'snippet')
item=self.AppendItem(parentItem, self.getSnippetName(snippet), image=image, selectedImage=image, data=TreeItemData(snippet))
self.snippets[snippet.id] = snippet
snippet.treeitem=item
return True
def CanReplace(self):
if not self.frame.snippet_table:
return False
a,e=self.editor.GetSelection()
if a==e and self.editor.GetLineCount() < 2 and not self.frame.getSql():
return False
snippet=self.GetNode()
return snippet and snippet.text
def ReplaceSnippet(self, text):
snippet=self.GetNode()
if snippet:
snippet.prevText=snippet.text
snippet.text=text
self.updateSnippet(snippet)
self.frame.SetStatus(xlt("Snippet updated."))
return False
def OnReplaceSnippet(self, evt):
sql=self.frame.getSql()
if sql:
self.ReplaceSnippet(sql)
def OnRenameSnippet(self, evt):
snippet=self.GetNode()
if snippet:
dlg=wx.TextEntryDialog(self, xlt("Name"), xlt("Rename snippet"), snippet.name)
if dlg.ShowModal() == wx.ID_OK:
snippet.name = dlg.GetValue()
self.updateSnippet(snippet)
self.SetItemText(snippet.treeitem, self.getSnippetName(snippet))
self.frame.SetStatus(xlt("Snippet renamed."))
def OnRevertSnippet(self, evt):
snippet=self.GetNode()
if snippet and snippet.prevText:
snippet.text=snippet.prevText
snippet.prevText=None
self.updateSnippet(snippet)
self.frame.SetStatus(xlt("Snippet reverted."))
return False
def OnAddGroup(self, evt):
dlg=wx.TextEntryDialog(self, xlt("Group name"), xlt("Add group"))
if dlg.ShowModal() == wx.ID_OK:
name=dlg.GetValue()
if name:
self.AppendSnippet(name, parentItem=self.GetRootItem())
def OnTreeSelChanged(self, evt):
self.frame.updateMenu()
def OnTreeRightClick(self, evt):
item, _flags=self.HitTest(evt.GetPosition())
if item and item != self.GetSelection():
self.SelectItem(item)
cm=Menu(self.frame)
if item:
snippet=self.GetNode(item)
if snippet.IsGroup():
cm.Add(self.OnRenameSnippet, xlt("Rename"), xlt(("Rename group")))
item=cm.Add(self.OnDelSnippet, xlt("Delete"), xlt(("Delete group")))
for s in self.snippets.values():
if s.parent == snippet.id:
cm.Enable(item, False)
break;
else:
cm.Add(self.OnReplaceSnippet, xlt("Replace"), xlt(("Replace snippet text")))
cm.Add(self.OnRenameSnippet, xlt("Rename"), xlt(("Rename snippet")))
item=cm.Add(self.OnRevertSnippet, xlt("Revert"), xlt(("Revert snippet to previous text")))
cm.Enable(item, snippet.prevText != None)
cm.Add(self.OnDelSnippet, xlt("Delete"), xlt(("Delete snippet")))
cm.AppendSeparator()
cm.Add(self.OnAddGroup, xlt("Add group"), xlt(("Add group")))
cm.Popup(evt)
def ExecuteDrag(self, targetItem):
if targetItem: targetSnippet=self.GetNode(targetItem)
else: targetSnippet=None
snippet=self.GetNode(self.currentItem)
parentItem=self.GetRootItem()
image=self.GetItemImage(snippet.treeitem)
if self.currentItem != targetItem and targetSnippet != snippet:
self.Delete(snippet.treeitem)
if targetSnippet:
if targetSnippet.IsGroup():
parentItem=targetSnippet.treeitem
snippet.parent=targetSnippet.id
else:
group=self.snippets.get(targetSnippet.parent)
snippet.parent=targetSnippet.parent
if group:
parentItem=group.treeitem
snippet.sort=targetSnippet.sort+1
nextItem=self.GetNextSibling(targetItem)
if nextItem:
nextSnippet=self.GetNode(nextItem)
if nextSnippet and nextSnippet.parent == targetSnippet:
snippet.sort=(nextSnippet.sort + targetSnippet.sort)/2
item=self.InsertItem(parentItem, targetItem, self.getSnippetName(snippet), image=image, data=TreeItemData(snippet))
snippet.treeitem = item
targetSnippet=None
else:
item=self.AppendItem(parentItem, self.getSnippetName(snippet), image=image, data=TreeItemData(snippet))
snippet.treeitem = item
snippet.parent=0
if targetSnippet:
self.AppendSnippet(snippet, None, parentItem)
self.updateSnippet(snippet)
self.checkChildren(snippet)
def checkChildren(self, snippet):
for child in self.snippets.values():
if child.parent == snippet.id:
self.AppendSnippet(child, None, snippet.treeitem)
self.checkChildren(child)
def OnTreeActivate(self, evt):
snippet= self.GetNode()
if snippet:
self.editor.ReplaceSelection(snippet.text)
self.frame.updateMenu()
self.editor.SetFocus()
| |
##########################################################################
#
# Copyright (c) 2014-2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import six
import IECore
import Gaffer
import GafferTest
class NodeAlgoTest( GafferTest.TestCase ) :
def testUserDefaults( self ) :
node = GafferTest.AddNode()
self.assertEqual( node["op1"].getValue(), 0 )
self.assertFalse( Gaffer.NodeAlgo.hasUserDefault( node["op1"] ) )
Gaffer.Metadata.registerValue( GafferTest.AddNode.staticTypeId(), "op1", "userDefault", IECore.IntData( 7 ) )
self.assertTrue( Gaffer.NodeAlgo.hasUserDefault( node["op1"] ) )
self.assertFalse( Gaffer.NodeAlgo.isSetToUserDefault( node["op1"] ) )
Gaffer.NodeAlgo.applyUserDefaults( node )
self.assertEqual( node["op1"].getValue(), 7 )
self.assertTrue( Gaffer.NodeAlgo.isSetToUserDefault( node["op1"] ) )
# even if it's registered, it doesn't get applied outside of the NodeMenu UI
node2 = GafferTest.AddNode()
self.assertEqual( node2["op1"].getValue(), 0 )
Gaffer.NodeAlgo.applyUserDefaults( node2 )
self.assertEqual( node2["op1"].getValue(), 7 )
# they can also be applied to the plug directly
node2["op1"].setValue( 1 )
Gaffer.NodeAlgo.applyUserDefault( node2["op1"] )
self.assertEqual( node2["op1"].getValue(), 7 )
# the userDefault can be unregistered by overriding with None
node3 = GafferTest.AddNode()
Gaffer.Metadata.registerValue( GafferTest.AddNode.staticTypeId(), "op1", "userDefault", None )
self.assertFalse( Gaffer.NodeAlgo.hasUserDefault( node3["op1"] ) )
Gaffer.NodeAlgo.applyUserDefaults( node3 )
self.assertEqual( node3["op1"].getValue(), 0 )
self.assertFalse( Gaffer.NodeAlgo.isSetToUserDefault( node["op1"] ) )
def testCompoundPlugUserDefaults( self ) :
node = GafferTest.CompoundPlugNode()
self.assertEqual( node["p"]["s"].getValue(), "" )
Gaffer.Metadata.registerValue( GafferTest.CompoundPlugNode.staticTypeId(), "p.s", "userDefault", IECore.StringData( "from the metadata" ) )
self.assertFalse( Gaffer.NodeAlgo.isSetToUserDefault( node["p"]["s"] ) )
Gaffer.NodeAlgo.applyUserDefaults( node )
self.assertEqual( node["p"]["s"].getValue(), "from the metadata" )
self.assertTrue( Gaffer.NodeAlgo.isSetToUserDefault( node["p"]["s"] ) )
# override the metadata for this particular instance
Gaffer.Metadata.registerValue( node["p"]["s"], "userDefault", IECore.StringData( "i am special" ) )
self.assertFalse( Gaffer.NodeAlgo.isSetToUserDefault( node["p"]["s"] ) )
Gaffer.NodeAlgo.applyUserDefaults( node )
self.assertEqual( node["p"]["s"].getValue(), "i am special" )
self.assertTrue( Gaffer.NodeAlgo.isSetToUserDefault( node["p"]["s"] ) )
# this node still gets the original userDefault
node2 = GafferTest.CompoundPlugNode()
self.assertFalse( Gaffer.NodeAlgo.isSetToUserDefault( node2["p"]["s"] ) )
Gaffer.NodeAlgo.applyUserDefaults( node2 )
self.assertEqual( node2["p"]["s"].getValue(), "from the metadata" )
self.assertTrue( Gaffer.NodeAlgo.isSetToUserDefault( node2["p"]["s"] ) )
def testSeveralUserDefaults( self ) :
node = GafferTest.AddNode()
node2 = GafferTest.AddNode()
self.assertEqual( node["op1"].getValue(), 0 )
self.assertEqual( node2["op1"].getValue(), 0 )
Gaffer.Metadata.registerValue( GafferTest.AddNode.staticTypeId(), "op1", "userDefault", IECore.IntData( 1 ) )
Gaffer.Metadata.registerValue( node2["op1"], "userDefault", IECore.IntData( 2 ) )
Gaffer.NodeAlgo.applyUserDefaults( [ node, node2 ] )
self.assertEqual( node["op1"].getValue(), 1 )
self.assertEqual( node2["op1"].getValue(), 2 )
def testUnsettableUserDefaults( self ) :
node = GafferTest.AddNode()
node["op2"].setInput( node["op1"] )
self.assertEqual( node["op1"].getValue(), 0 )
self.assertEqual( node["op2"].getValue(), 0 )
Gaffer.Metadata.registerValue( GafferTest.AddNode, "op1", "userDefault", IECore.IntData( 1 ) )
Gaffer.Metadata.registerValue( GafferTest.AddNode, "op2", "userDefault", IECore.IntData( 2 ) )
Gaffer.NodeAlgo.applyUserDefaults( node )
self.assertEqual( node["op1"].getValue(), 1 )
self.assertEqual( node["op2"].getValue(), 1 )
def testPresets( self ) :
node = GafferTest.AddNode()
self.assertEqual( Gaffer.NodeAlgo.presets( node["op1"] ), [] )
self.assertEqual( Gaffer.NodeAlgo.currentPreset( node["op1"] ), None )
Gaffer.Metadata.registerValue( node["op1"], "preset:one", 1 )
Gaffer.Metadata.registerValue( node["op1"], "preset:two", 2 )
self.assertEqual( Gaffer.NodeAlgo.presets( node["op1"] ), [ "one", "two" ] )
self.assertEqual( Gaffer.NodeAlgo.currentPreset( node["op1"] ), None )
Gaffer.NodeAlgo.applyPreset( node["op1"], "one" )
self.assertEqual( node["op1"].getValue(), 1 )
self.assertEqual( Gaffer.NodeAlgo.currentPreset( node["op1"] ), "one" )
Gaffer.NodeAlgo.applyPreset( node["op1"], "two" )
self.assertEqual( node["op1"].getValue(), 2 )
self.assertEqual( Gaffer.NodeAlgo.currentPreset( node["op1"] ), "two" )
def testPresetsArray( self ) :
node = GafferTest.AddNode()
self.assertEqual( Gaffer.NodeAlgo.presets( node["op1"] ), [] )
Gaffer.Metadata.registerValue(
node["op1"], "presetNames",
IECore.StringVectorData( [ "a", "b", "c" ] )
)
Gaffer.Metadata.registerValue(
node["op1"], "presetValues",
IECore.IntVectorData( [ 1, 2, 3 ] )
)
self.assertEqual( Gaffer.NodeAlgo.presets( node["op1"] ), [ "a", "b", "c" ] )
Gaffer.NodeAlgo.applyPreset( node["op1"], "a" )
self.assertEqual( node["op1"].getValue(), 1 )
self.assertEqual( Gaffer.NodeAlgo.currentPreset( node["op1"] ), "a" )
Gaffer.NodeAlgo.applyPreset( node["op1"], "b" )
self.assertEqual( node["op1"].getValue(), 2 )
self.assertEqual( Gaffer.NodeAlgo.currentPreset( node["op1"] ), "b" )
Gaffer.NodeAlgo.applyPreset( node["op1"], "c" )
self.assertEqual( node["op1"].getValue(), 3 )
self.assertEqual( Gaffer.NodeAlgo.currentPreset( node["op1"] ), "c" )
# a preset registered individually should take precedence
Gaffer.Metadata.registerValue( node["op1"], "preset:c", 10 )
self.assertEqual( Gaffer.NodeAlgo.currentPreset( node["op1"] ), None )
Gaffer.NodeAlgo.applyPreset( node["op1"], "c" )
self.assertEqual( node["op1"].getValue(), 10 )
self.assertEqual( Gaffer.NodeAlgo.currentPreset( node["op1"] ), "c" )
def __visitationGraph( self ) :
# L1_1 L1_2
# | |\
# | | \
# | | \
# L2_1 L2_2 L2_3
# |\ | /
# | \ | /
# | \ | /
# | \ |/
# L3_1 L3_2
s = Gaffer.ScriptNode()
s["L1_1"] = GafferTest.MultiplyNode()
s["L1_2"] = GafferTest.AddNode()
s["L2_1"] = GafferTest.AddNode()
s["L2_2"] = GafferTest.MultiplyNode()
s["L2_3"] = GafferTest.AddNode()
s["L3_1"] = GafferTest.AddNode()
s["L3_2"] = GafferTest.MultiplyNode()
s["L3_2"]["op3"] = Gaffer.IntPlug()
s["L2_1"]["op1"].setInput( s["L1_1"]["product"] )
s["L2_2"]["op1"].setInput( s["L1_2"]["sum"] )
s["L2_3"]["op1"].setInput( s["L1_2"]["sum"] )
s["L3_1"]["op1"].setInput( s["L2_1"]["sum"] )
s["L3_2"]["op1"].setInput( s["L2_1"]["sum"] )
s["L3_2"]["op2"].setInput( s["L2_2"]["product"] )
s["L3_2"]["op3"].setInput( s["L2_3"]["sum"] )
return s
class __CapturingVisitor( object ) :
def __init__( self ) :
self.visited = []
def __call__( self, node ) :
self.visited.append( node )
return True
def testVisitUpstream( self ) :
g = self.__visitationGraph()
v = self.__CapturingVisitor()
Gaffer.NodeAlgo.visitUpstream( g["L3_1"], v )
self.assertEqual( v.visited, [ g["L2_1"], g["L1_1"] ] )
del v.visited[:]
Gaffer.NodeAlgo.visitUpstream( g["L3_2"], v )
self.assertEqual( v.visited, [ g["L2_1"], g["L2_2"], g["L2_3"], g["L1_1"], g["L1_2"] ] )
del v.visited[:]
Gaffer.NodeAlgo.visitUpstream( g["L3_2"], v, order = Gaffer.NodeAlgo.VisitOrder.DepthFirst )
self.assertEqual( v.visited, [ g["L2_1"], g["L1_1"], g["L2_2"], g["L1_2"], g["L2_3"] ] )
def testVisitDownstream( self ) :
g = self.__visitationGraph()
v = self.__CapturingVisitor()
Gaffer.NodeAlgo.visitDownstream( g["L1_1"], v )
self.assertEqual( v.visited, [ g["L2_1"], g["L3_1"], g["L3_2"] ] )
del v.visited[:]
Gaffer.NodeAlgo.visitDownstream( g["L1_2"], v )
self.assertEqual( v.visited, [ g["L2_2"], g["L2_3"], g["L3_2"] ] )
del v.visited[:]
Gaffer.NodeAlgo.visitDownstream( g["L1_2"], v, order = Gaffer.NodeAlgo.VisitOrder.DepthFirst )
self.assertEqual( v.visited, [ g["L2_2"], g["L3_2"], g["L2_3"] ] )
def testVisitConnected( self ) :
g = self.__visitationGraph()
v = self.__CapturingVisitor()
Gaffer.NodeAlgo.visitConnected( g["L2_1"], v )
self.assertEqual( v.visited, [ g["L1_1"], g["L3_1"], g["L3_2"], g["L2_2"], g["L2_3"], g["L1_2"] ] )
v = self.__CapturingVisitor()
Gaffer.NodeAlgo.visitConnected( g["L2_1"], v, order = Gaffer.NodeAlgo.VisitOrder.DepthFirst )
self.assertEqual( v.visited, [ g["L1_1"], g["L3_1"], g["L3_2"], g["L2_2"], g["L1_2"], g["L2_3"] ] )
def testFindUpstream( self ) :
g = self.__visitationGraph()
isLevelOne = lambda node : node.getName().startswith( "L1" )
self.assertEqual( Gaffer.NodeAlgo.findUpstream( g["L3_1"], isLevelOne ), g["L1_1"] )
self.assertEqual( Gaffer.NodeAlgo.findUpstream( g["L3_2"], isLevelOne ), g["L1_1"] )
self.assertEqual( Gaffer.NodeAlgo.findUpstream( g["L1_1"], isLevelOne ), None )
def testFindDownstream( self ) :
g = self.__visitationGraph()
isLevelThree = lambda node : node.getName().startswith( "L3" )
self.assertEqual( Gaffer.NodeAlgo.findDownstream( g["L1_1"], isLevelThree ), g["L3_1"] )
self.assertEqual( Gaffer.NodeAlgo.findDownstream( g["L1_2"], isLevelThree ), g["L3_2"] )
self.assertEqual( Gaffer.NodeAlgo.findDownstream( g["L3_2"], isLevelThree ), None )
def testFindConnected( self ) :
g = self.__visitationGraph()
isLevelTwo = lambda node : node.getName().startswith( "L2" )
self.assertEqual( Gaffer.NodeAlgo.findConnected( g["L1_1"], isLevelTwo ), g["L2_1"] )
self.assertEqual( Gaffer.NodeAlgo.findConnected( g["L1_2"], isLevelTwo ), g["L2_2"] )
self.assertEqual( Gaffer.NodeAlgo.findConnected( g["L2_1"], isLevelTwo ), g["L2_2"] )
def testFindAllUpstream( self ) :
g = self.__visitationGraph()
isLevelOne = lambda node : node.getName().startswith( "L1" )
self.assertEqual( Gaffer.NodeAlgo.findAllUpstream( g["L3_1"], isLevelOne ), [ g["L1_1"] ] )
self.assertEqual( Gaffer.NodeAlgo.findAllUpstream( g["L3_2"], isLevelOne ), [ g["L1_1"], g["L1_2"] ] )
self.assertEqual( Gaffer.NodeAlgo.findAllUpstream( g["L1_1"], isLevelOne ), [] )
def testFindAllDownstream( self ) :
g = self.__visitationGraph()
isLevelThree = lambda node : node.getName().startswith( "L3" )
self.assertEqual( Gaffer.NodeAlgo.findAllDownstream( g["L1_1"], isLevelThree ), [ g["L3_1"], g["L3_2"] ] )
self.assertEqual( Gaffer.NodeAlgo.findAllDownstream( g["L1_2"], isLevelThree ), [ g["L3_2"] ] )
self.assertEqual( Gaffer.NodeAlgo.findAllDownstream( g["L3_2"], isLevelThree ), [] )
def testFindAllConnected( self ) :
g = self.__visitationGraph()
isLevelTwo = lambda node : node.getName().startswith( "L2" )
self.assertEqual( Gaffer.NodeAlgo.findAllConnected( g["L1_1"], isLevelTwo ), [ g["L2_1"], g["L2_2"], g["L2_3"] ] )
self.assertEqual( Gaffer.NodeAlgo.findAllConnected( g["L1_2"], isLevelTwo ), [ g["L2_2"], g["L2_3"], g["L2_1"] ] )
self.assertEqual( Gaffer.NodeAlgo.findAllConnected( g["L2_1"], isLevelTwo ), [ g["L2_2"], g["L2_3"] ] )
def testUpstreamNodes( self ) :
g = self.__visitationGraph()
self.assertEqual( Gaffer.NodeAlgo.upstreamNodes( g["L3_1"] ), [ g["L2_1" ], g["L1_1"] ] )
self.assertEqual( Gaffer.NodeAlgo.upstreamNodes( g["L3_1"], GafferTest.MultiplyNode ), [ g["L1_1"] ] )
def testDownstreamNodes( self ) :
g = self.__visitationGraph()
self.assertEqual( Gaffer.NodeAlgo.downstreamNodes( g["L1_1"] ), [ g["L2_1" ], g["L3_1"], g["L3_2"] ] )
self.assertEqual( Gaffer.NodeAlgo.downstreamNodes( g["L1_1"], GafferTest.MultiplyNode ), [ g["L3_2"] ] )
def testConnectedNodes( self ) :
g = self.__visitationGraph()
self.assertEqual( Gaffer.NodeAlgo.connectedNodes( g["L1_1"] ), [ g["L2_1" ], g["L3_1"], g["L3_2"], g["L2_2"], g["L2_3"], g["L1_2"] ] )
self.assertEqual( Gaffer.NodeAlgo.connectedNodes( g["L1_1"], GafferTest.MultiplyNode ), [ g["L3_2"], g["L2_2"] ] )
def testBadVisitorReturnValue( self ) :
g = self.__visitationGraph()
with six.assertRaisesRegex( self, RuntimeError, r"Visitor must return a bool \(True to continue, False to prune\)" ) :
Gaffer.NodeAlgo.visitUpstream( g["L3_1"], lambda node : None )
def __boxedVisitationGraph( self ) :
s = self.__visitationGraph()
b = Gaffer.Box.create( s, Gaffer.StandardSet( [ s["L1_1"] ] ) )
b.setName( "Box_L1_1" )
b = Gaffer.Box.create( s, Gaffer.StandardSet( [ s["L2_3"] ] ) )
b.setName( "Box_L2_3" )
return s
def testVisitBoxedNodesDepthFirst( self ) :
s = self.__boxedVisitationGraph()
self.assertEqual(
Gaffer.NodeAlgo.downstreamNodes( s["Box_L1_1"], order = Gaffer.NodeAlgo.VisitOrder.DepthFirst ),
[ s[ "L2_1"], s["L3_1"], s["L3_2"] ]
)
self.assertEqual(
Gaffer.NodeAlgo.downstreamNodes( s["Box_L1_1"]["L1_1"], order = Gaffer.NodeAlgo.VisitOrder.DepthFirst ),
[ s["Box_L1_1"], s[ "L2_1"], s["L3_1"], s["L3_2"] ]
)
self.assertEqual(
Gaffer.NodeAlgo.downstreamNodes( s["L1_2"], order = Gaffer.NodeAlgo.VisitOrder.DepthFirst ),
[ s["L2_2"], s["L3_2"], s["Box_L2_3"], s["Box_L2_3"]["L2_3"] ]
)
self.assertEqual(
Gaffer.NodeAlgo.upstreamNodes( s["Box_L2_3"], order = Gaffer.NodeAlgo.VisitOrder.DepthFirst ),
[ s[ "L1_2"] ]
)
self.assertEqual(
Gaffer.NodeAlgo.upstreamNodes( s["Box_L2_3"]["L2_3"], order = Gaffer.NodeAlgo.VisitOrder.DepthFirst ),
[ s["Box_L2_3"], s[ "L1_2"] ]
)
self.assertEqual(
Gaffer.NodeAlgo.upstreamNodes( s["L3_1"], order = Gaffer.NodeAlgo.VisitOrder.DepthFirst ),
[ s[ "L2_1"], s["Box_L1_1"], s["Box_L1_1"]["L1_1"] ]
)
def testVisitBoxedNodesBreadthFirst( self ) :
s = self.__boxedVisitationGraph()
self.assertEqual(
Gaffer.NodeAlgo.downstreamNodes( s["Box_L1_1"], order = Gaffer.NodeAlgo.VisitOrder.BreadthFirst ),
[ s[ "L2_1"], s["L3_1"], s["L3_2"] ]
)
self.assertEqual(
Gaffer.NodeAlgo.downstreamNodes( s["Box_L1_1"]["L1_1"], order = Gaffer.NodeAlgo.VisitOrder.BreadthFirst ),
[ s["Box_L1_1"], s[ "L2_1"], s["L3_1"], s["L3_2"] ]
)
self.assertEqual(
Gaffer.NodeAlgo.downstreamNodes( s["L1_2"], order = Gaffer.NodeAlgo.VisitOrder.BreadthFirst ),
[ s["L2_2"], s["Box_L2_3"], s["Box_L2_3"]["L2_3"], s["L3_2"] ]
)
self.assertEqual(
Gaffer.NodeAlgo.upstreamNodes( s["Box_L2_3"], order = Gaffer.NodeAlgo.VisitOrder.BreadthFirst ),
[ s[ "L1_2"] ]
)
self.assertEqual(
Gaffer.NodeAlgo.upstreamNodes( s["Box_L2_3"]["L2_3"], order = Gaffer.NodeAlgo.VisitOrder.BreadthFirst ),
[ s["Box_L2_3"], s[ "L1_2"] ]
)
self.assertEqual(
Gaffer.NodeAlgo.upstreamNodes( s["L3_1"], order = Gaffer.NodeAlgo.VisitOrder.BreadthFirst ),
[ s[ "L2_1"], s["Box_L1_1"], s["Box_L1_1"]["L1_1"] ]
)
self.assertEqual(
Gaffer.NodeAlgo.upstreamNodes( s["L3_2"], order = Gaffer.NodeAlgo.VisitOrder.BreadthFirst ),
[ s["L2_1"], s[ "L2_2"], s["Box_L2_3"], s["Box_L2_3"]["L2_3"], s["Box_L1_1"], s["Box_L1_1"]["L1_1"], s["L1_2"] ]
)
def testVisitBoxedBranches( self ) :
s = Gaffer.ScriptNode()
s["b"] = Gaffer.Box()
s["b"]["L1"] = GafferTest.AddNode()
s["b"]["L2_1"] = GafferTest.AddNode()
s["b"]["L2_2"] = GafferTest.AddNode()
s["b"]["L3_1"] = GafferTest.AddNode()
s["b"]["L1"]["op1"].setInput( s["b"]["L2_1"]["sum"] )
s["b"]["L1"]["op2"].setInput( s["b"]["L2_2"]["sum"] )
s["b"]["L2_1"]["op1"].setInput( s["b"]["L3_1"]["sum"] )
s["n"] = GafferTest.AddNode()
s["n"]["op1"].setInput( Gaffer.PlugAlgo.promote( s["b"]["L1"]["sum"] ) )
self.assertEqual(
Gaffer.NodeAlgo.upstreamNodes( s["n"], order = Gaffer.NodeAlgo.VisitOrder.DepthFirst ),
[ s["b"], s["b"]["L1"], s["b"]["L2_1"], s["b"]["L3_1"], s["b"]["L2_2"] ]
)
self.assertEqual(
Gaffer.NodeAlgo.upstreamNodes( s["n"], order = Gaffer.NodeAlgo.VisitOrder.BreadthFirst ),
[ s["b"], s["b"]["L1"], s["b"]["L2_1"], s["b"]["L2_2"], s["b"]["L3_1"] ]
)
def tearDown( self ) :
Gaffer.Metadata.deregisterValue( GafferTest.AddNode, "op1", "userDefault" )
Gaffer.Metadata.deregisterValue( GafferTest.AddNode, "op2", "userDefault" )
Gaffer.Metadata.deregisterValue( GafferTest.CompoundPlugNode, "p.s", "userDefault" )
if __name__ == "__main__":
unittest.main()
| |
# -*- coding: utf-8 -*-
"""
solace.auth
~~~~~~~~~~~
This module implements the auth system.
:copyright: (c) 2009 by Plurk Inc., see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
from threading import Lock
from werkzeug import import_string, redirect
from werkzeug.contrib.securecookie import SecureCookie
from datetime import datetime
from solace import settings
from solace.i18n import lazy_gettext
from solace.utils.support import UIException
from solace.utils.mail import send_email
_auth_system = None
_auth_select_lock = Lock()
def get_auth_system():
"""Return the auth system."""
global _auth_system
with _auth_select_lock:
if _auth_system is None:
_auth_system = import_string(settings.AUTH_SYSTEM)()
return _auth_system
def refresh_auth_system():
"""Tears down the auth system after a config change."""
global _auth_system
with _auth_system_lock:
_auth_system = None
def check_used_openids(identity_urls, ignored_owner=None):
"""Returns a set of all the identity URLs from the list of identity
URLs that are already associated on the system. If a owner is given,
items that are owned by the given user will not show up in the result
list.
"""
query = _OpenIDUserMapping.query.filter(
_OpenIDUserMapping.identity_url.in_(identity_urls)
)
if ignored_owner:
query = query.filter(_OpenIDUserMapping.user != ignored_owner)
return set([x.identity_url for x in query.all()])
class LoginUnsucessful(UIException):
"""Raised if the login failed."""
class AuthSystemBase(object):
"""The base auth system.
Most functionality is described in the methods and properties you have
to override for subclasses. A special notice applies for user
registration.
Different auth systems may create users at different stages (first login,
register etc.). At that point (where the user is created in the
database) the system has to call `after_register` and pass it the user
(and request) object. That method handles the confirmation mails and
whatever else is required. If you do not want your auth system to send
confirmation mails you still have to call the method but tell the user
of your class to disable registration activation in the configuration.
`after_register` should *not* be called if the registration process
should happen transparently for the user. eg, the user has already
registered somewhere else and the Solace account is created based on the
already existing account on first login.
"""
#: for auth systems that are managing the email externally this
#: attributes has to set to `True`. In that case the user will
#: be unable to change the email from the profile. (True for
#: the plurk auth, possible OpenID support and more.)
email_managed_external = False
#: like `email_managed_external` but for the password
password_managed_external = False
#: set to True to indicate that this login system does not use
#: a password. This will also affect the standard login form
#: and the standard profile form.
passwordless = False
#: if you don't want to see a register link in the user interface
#: for this auth system, you can disable it here.
show_register_link = True
@property
def can_reset_password(self):
"""You can either override this property or leave the default
implementation that should work most of the time. By default
the auth system can reset the password if the password is not
externally managed and not passwordless.
"""
return not (self.passwordless or self.password_managed_external)
def reset_password(self, request, user):
if settings.REGISTRATION_REQUIRES_ACTIVATION:
user.is_active = False
confirmation_url = url_for('core.activate_user', email=user.email,
key=user.activation_key, _external=True)
send_email(_(u'Registration Confirmation'),
render_template('mails/activate_user.txt', user=user,
confirmation_url=confirmation_url),
user.email)
request.flash(_(u'A mail was sent to %s with a link to finish the '
u'registration.') % user.email)
else:
request.flash(_(u'You\'re registered. You can login now.'))
def before_register(self, request):
"""Invoked before the standard register form processing. This is
intended to be used to redirect to an external register URL if
if the syncronization is only one-directional. If this function
returns a response object, Solace will abort standard registration
handling.
"""
def register(self, request):
"""Called like a view function with only the request. Has to do the
register heavy-lifting. Auth systems that only use the internal
database do not have to override this method. Implementers that
override this function *have* to call `after_register` to finish
the registration of the new user. If `before_register` is unnused
it does not have to be called, otherwise as documented.
"""
rv = self.before_register(request)
if rv is not None:
return rv
form = RegistrationForm()
if request.method == 'POST' and form.validate():
user = User(form['username'], form['email'], form['password'])
self.after_register(request, user)
session.commit()
if rv is not None:
return rv
return form.redirect('kb.overview')
return render_template('core/register.html', form=form.as_widget())
def after_register(self, request, user):
"""Handles activation."""
if settings.REGISTRATION_REQUIRES_ACTIVATION:
user.is_active = False
confirmation_url = url_for('core.activate_user', email=user.email,
key=user.activation_key, _external=True)
send_email(_(u'Registration Confirmation'),
render_template('mails/activate_user.txt', user=user,
confirmation_url=confirmation_url),
user.email)
request.flash(_(u'A mail was sent to %s with a link to finish the '
u'registration.') % user.email)
else:
request.flash(_(u'You\'re registered. You can login now.'))
def get_login_form(self):
"""Return the login form to be used by `login`."""
return StandardLoginForm()
def before_login(self, request):
"""If this login system uses an external login URL, this function
has to return a redirect response, otherwise None. This is called
before the standard form handling to allow redirecting to an
external login URL. This function is called by the default
`login` implementation.
If the actual login happens here because of a back-redirect the
system might raise a `LoginUnsucessful` exception.
"""
def login(self, request):
"""Like `register` just for login."""
form = self.get_login_form()
# some login systems require an external login URL. For example
# the one we use as Plurk.
try:
rv = self.before_login(request)
if rv is not None:
return rv
except LoginUnsucessful, e:
form.add_error(unicode(e))
# only validate if the before_login handler did not already cause
# an error. In that case there is not much win in validating
# twice, it would clear the error added.
if form.is_valid and request.method == 'POST' and form.validate():
try:
rv = self.perform_login(request, **form.data)
except LoginUnsucessful, e:
form.add_error(unicode(e))
else:
session.commit()
if rv is not None:
return rv
request.flash(_(u'You are now logged in.'))
return form.redirect('kb.overview')
return self.render_login_template(request, form)
def perform_login(self, request, **form_data):
"""If `login` is not overridden, this is called with the submitted
form data and might raise `LoginUnsucessful` so signal a login
error.
"""
raise NotImplementedError()
def render_login_template(self, request, form):
"""Renders the login template"""
return render_template('core/login.html', form=form.as_widget())
def get_edit_profile_form(self, user):
"""Returns the profile form to be used by the auth system."""
return StandardProfileEditForm(user)
def edit_profile(self, request):
"""Invoked like a view and does the profile handling."""
form = self.get_edit_profile_form(request.user)
if request.method == 'POST' and form.validate():
request.flash(_(u'Your profile was updated'))
form.apply_changes()
session.commit()
return form.redirect(form.user)
return self.render_edit_profile_template(request, form)
def render_edit_profile_template(self, request, form):
"""Renders the template for the profile edit page."""
return render_template('users/edit_profile.html',
form=form.as_widget())
def logout(self, request):
"""This has to logout the user again. This method must not fail.
If the logout requires the redirect to an external resource it
might return a redirect response. That resource then should not
redirect back to the logout page, but instead directly to the
**current** `request.next_url`.
Most auth systems do not have to implement this method. The
default one calls `set_user(request, None)`.
"""
self.set_user(request, None)
def get_user(self, request):
"""If the user is logged in this method has to return the user
object for the user that is logged in. Beware: the request
class provides some attributes such as `user` and `is_logged_in`
you may never use from this function to avoid recursion. The
request object will call this function for those two attributes.
If the user is not logged in, the return value has to be `None`.
This method also has to check if the user was not banned. If the
user is banned, it has to ensure that `None` is returned and
should ensure that future requests do not trigger this method.
Most auth systems do not have to implement this method.
"""
user_id = request.session.get('user_id')
if user_id is not None:
user = User.query.get(user_id)
if user is not None and user.is_banned:
del request.session['user_id']
else:
return user
def set_user(self, request, user):
"""Can be used by the login function to set the user. This function
should only be used for auth systems internally if they are not using
an external session.
"""
if user is None:
request.session.pop('user_id', None)
else:
user.last_login = datetime.utcnow()
request.session['user_id'] = user.id
class InternalAuth(AuthSystemBase):
"""Authenticate against the internal database."""
def perform_login(self, request, username, password):
user = User.query.filter_by(username=username).first()
if user is None:
raise LoginUnsucessful(_(u'No user named %s') % username)
if not user.is_active:
raise LoginUnsucessful(_(u'The user is not yet activated.'))
if not user.check_password(password):
raise LoginUnsucessful(_(u'Invalid password'))
if user.is_banned:
raise LoginUnsucessful(_(u'The user got banned from the system.'))
self.set_user(request, user)
# the openid support will be only available if the openid library is installed.
# otherwise we create a dummy auth system that fails upon usage.
try:
from solace._openid_auth import OpenIDAuth
except ImportError:
class OpenIDAuth(AuthSystemBase):
def __init__(self):
raise RuntimeError('python-openid library not installed but '
'required for openid support.')
# circular dependencies
from solace.application import url_for
from solace.models import User, _OpenIDUserMapping
from solace.database import session
from solace.i18n import _
from solace.forms import StandardLoginForm, RegistrationForm, \
StandardProfileEditForm
from solace.templating import render_template
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2006-2010 Tampere University of Technology
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
Test configuration generator
Usage: generateTestconf sourcedir testconfigurationfile targetdir
Generates a directory that includes models and data tables ready for a
test run. The files are originally located in sourcedir.
"""
import tema.eini.einiparser as einiparser
import shutil
import os
import stat
import sys
import tema.lsts.lsts as lsts
multipart_contents = r"""
[targets: type]
@TARGET@: rm
[targets: actionmachines[]]
@TARGET@: @ACTIONMACHINES@
[general: value]
type: single
result: rules.ext
taskswitchergen: TaskSwitcherGEN
taskswitchergenrm: TaskSwitcherGEN-rm
"""
multi_contents = r"""
[targets: conffile]
@TARGETS@
[general: value]
type: multi
result: combined-rules.ext
"""
GNUmakefile_contents = r"""
DEVICE=rm
RESULT=rules.ext
TASKSWITCHERGEN=TaskSwitcherGEN
TASKSWITCHERGENRM=TaskSwitcherGEN-rm
ACTIONMACHINELIST=$(filter-out $(TASKSWITCHERGEN), $(filter-out %-awgt, $(basename $(wildcard *.lsts))))
MAKEFILE_PATH:=$(shell python -c "import tema.modelutils;print tema.modelutils.MAKEFILE_PATH;")
include $(MAKEFILE_PATH)/GNUmakefile.include
"""
multitgt_GNUmakefile_contents = r"""
TARGETRULES=$(addsuffix /rules.ext, $(TARGETLIST))
RESULT=combined-rules.ext
PC_RULES_PATH:=$(shell python -c "import tema.modelutils;print tema.modelutils.PC_RULES_PATH;")
TEMA_ENGINE_HOME:=$(shell python -c "import os;import tema;print os.path.dirname(tema.__file__);")
MAKEFILE_PATH:=$(shell python -c "import tema.modelutils;print tema.modelutils.MAKEFILE_PATH;")
combined-rules.ext: $(TARGETRULES) multitarget-rules.ext
# remove duplicate rows and rename Messaging:start_awX -> Frank/Messaging:start_awX
cat $^ | python -c 'import sys; l=list(set(sys.stdin.readlines())); l.sort(); [sys.stdout.write(r) for r in l[::-1]]' | sed 's:^(\([a-zA-Z0-9_]*\)/\([^/]*\),\(.*\)-> "\(\2\):(\1/\2,\3-> "\1/\4:g' > $@
multitarget-rules.ext: multitarget-rules.rext
python $(TEMA_ENGINE_HOME)/modelutils/rextendedrules.py $< \
| python $(TEMA_ENGINE_HOME)/modelutils/renamerules.py >$@
multitarget-rules.rext: $(TARGETRULES) TargetSwitcher-awgt.lsts TargetSwitcher-rm.lsts Synchronizer-awgt.lsts Synchronizer-rm.lsts.nolayout $(PC_RULES_PATH)/GenericPCRules-Multitarget
$(RM) $@
for target in $(TARGETLIST); do \
egrep "^$$target/.+=\"" $$target/rules.ext >> $@; \
done
echo 'TargetSwitcher="TargetSwitcher-awgt.lsts"' >> $@
echo 'TargetSwitcher-rm="TargetSwitcher-rm.lsts"' >> $@
echo 'Synchronizer="Synchronizer-awgt.lsts"' >> $@
echo 'Synchronizer-rm="Synchronizer-rm.lsts.nolayout"' >> $@
cat $(PC_RULES_PATH)/GenericPCRules-Multitarget >> $@
Synchronizer.lsts:
@echo "Synchronizer.lsts is missing, check generateTestconf."
Synchronizer-rm.lsts:
@echo "Synchronizer-rm.lsts is missing, check generateTestconf."
%-awgt.lsts: %.lsts
python $(TEMA_ENGINE_HOME)/modelutils/gt.py $< $@ \
'P(s0,"sv$${.*}") -> P(s0,"sv$$=1.") T(s0,"start_sv$$=1.",s1) T(s1,"end_sv$$=1.",s0)' \
'T(s0,"aw$${.*}",s1)T(s0,"~aw$$=1.",s2) -> T(s0,"start_aw$$=1.",s_new)T(s_new,"end_aw$$=1.",s1)T(s_new,"~end_aw$$=1.",s2)' \
'T(s0,"aw$${.*}",s1)->T(s0,"start_aw$$=1.",s_new)T(s_new,"end_aw$$=1.",s1)' \
'T(s0,"WAKEts",s1) -> T(s0,"WAKEtsCANWAKE",s_new) T(s_new,"WAKEtsWAKE",s1)'
%.lsts.iso: %.lsts
iconv -f UTF-8 -t ISO-8859-1 $< > $@ || cat $< > $@
%.lsts.nolayout: %.lsts.iso
python $(TEMA_ENGINE_HOME)/modelutils/gt.py $< $@ \
'T(s0,"kw_return true",s1)T(s1,"end_aw$${.*}",s2)->T(s0,"end_aw$$=1.",s2)' \
'T(s0,"kw_return false",s1)T(s1,"end_aw$${.*}",s2)->T(s0,"~end_aw$$=1.",s2)'
%/rules.ext:
$(MAKE) -C $(subst /rules.ext,,$@)
mv $@ $@.orig
gawk -v TARGET=$(subst /rules.ext,,$@) '/^[0-9]+=/{print TARGET"/"gensub("=\"","=\""TARGET"/","1")}/") -> "/{s=gensub("\\(([0-9]+),\"","("TARGET"/\\1,\"","g"); print s} !(/^[0-9]+=/ || /") -> "/){print $$0}' < $@.orig > $@.pass1
gawk -v TARGET=$(subst /rules.ext,,$@) '/$(subst /rules.ext,,$@)\/[0-9]+=\"/{split($$0,a,"\""); newkey=gensub(".lsts.nolayout","","1",gensub("-awgt.lsts","","g",a[2]));keys[substr(a[1],1,length(a[1])-1)","]=newkey","; print newkey"=\""a[2]"\""} /") -> "/{for (k in keys) gsub(k,keys[k]); print $$0} !(/$(subst /rules.ext,,$@)\// || /") -> "/){print $$0}' < $@.pass1 > $@
TargetSwitcher.lsts:
python $(TEMA_ENGINE_HOME)/modelutils/generatetaskswitcher.py --am $(TARGETLIST) | sed -e 's/SLEEPts/SLEEPtgts/g' -e 's/WAKEts/WAKEtgts/g' > $@
TargetSwitcher-rm.lsts:
python $(TEMA_ENGINE_HOME)/modelutils/generatetaskswitcher.py --rm $(TARGETLIST) | sed -e "s/LaunchApp '\([^']*\)'/SetTarget \$$(OUT=\1.id)\$$/g" > $@
clean:
for tgt in $(TARGETLIST); do \
$(MAKE) -C $$tgt clean; \
done
$(RM) combined-rules.ext TargetSwitcher.lsts TargetSwitcher-rm.lsts
include $(MAKEFILE_PATH)/GNUmakefile-utils.include
"""
def error(msg):
sys.stderr.write(msg + '\n')
sys.exit(1)
def generate_synchronizer(iterable_target_names):
"""generate_synchronizer returns lsts_writer object"""
outlsts = lsts.writer()
actionnames = ["tau",
"WAKEtgt<Begin Synchronization>",
"REQALLtgt<Unprime Targets>",
"SLEEPtgt<End Synchronization: Success>",
"SLEEPtgt<End Synchronization: Failure>",
]
syncsucc_state = 2
syncfailed_action = 4
# (dest_state, action_number)
transitions = [[(1,1)],[(3,2)],[(0,3)]]
stateprops = {}
for t in iterable_target_names:
verifysucc_action = len(actionnames)
actionnames.append("awVerify%s" % t)
verifyfail_action = len(actionnames)
actionnames.append("~awVerify%s" % t)
reqprime_action = len(actionnames)
actionnames.append("REQALLtgt<Prime %s>" % t)
verifysucc_state = len(transitions)+1
verifyfail_state = len(transitions)+2
stateprops['successful verification of %s' % t] = [verifysucc_state]
stateprops['failed verification of %s' % t] = [verifyfail_state]
# from last failed verification state:
transitions.append([(verifysucc_state, verifysucc_action),
(verifyfail_state, verifyfail_action)])
# from verifysucc_state:
transitions.append([(syncsucc_state, reqprime_action)])
# from last failed verification state fail the whole thing:
transitions.append([(0, syncfailed_action)])
outlsts.set_actionnames(actionnames)
outlsts.set_transitions(transitions)
outlsts.set_stateprops(stateprops)
return outlsts
def generate_synchronizer_rm(iterable_target_names):
"""returns lstswriter"""
outlsts = lsts.writer()
actionnames = ["tau",
"kw_return false",
"kw_return true"]
return_false_action = 1
return_true_action = 2
transitions = [[]]
for t in iterable_target_names:
start_aw_action = len(actionnames)
actionnames.append("start_awVerify%s" % t)
end_aw_action = len(actionnames)
actionnames.append("end_awVerify%s" % t)
is_true_action = len(actionnames)
actionnames.append("kw_IsTrue $(OUT = (syncTarget == %s.id))$" % t)
is_not_true_action = len(actionnames)
actionnames.append("~kw_IsTrue $(OUT = (syncTarget == %s.id))$" % t)
# from initial state
transitions[0].append((len(transitions), start_aw_action))
transitions.append([(len(transitions)+1, is_true_action),
(len(transitions)+3, is_not_true_action)])
transitions.append([(len(transitions)+1, return_true_action)])
transitions.append([(0, end_aw_action)])
transitions.append([(len(transitions)+1, return_false_action)])
transitions.append([(0, end_aw_action)])
outlsts.set_actionnames(actionnames)
outlsts.set_transitions(transitions)
return outlsts
def copy_files(filelist, targetdir, rename_attarget_to=None, allow_nonexistence=False):
"""filelist is a list of strings, targetdir a string"""
for f in filelist:
try: file(f)
except IOError, e:
if e.errno==21: continue # file is a directory
elif e.errno==2 and allow_nonexistence: continue # file does not exist
else: raise e
try:
if rename_attarget_to==None:
shutil.copy(f, targetdir)
else:
new_contents = file(f).read().replace('@TARGET',rename_attarget_to)
# this might be removable code...
# new_contents = new_contents.replace('"SLEEPapp<','"SLEEPapp<%s: ' % (rename_attarget_to,))
# new_contents = new_contents.replace('"WAKEapp<','"WAKEapp<%s: ' % (rename_attarget_to,))
# new_contents = new_contents.replace('"REQ<','"REQ<%s: ' % (rename_attarget_to,))
# new_contents = new_contents.replace('"REQALL<','"REQALL<%s: ' % (rename_attarget_to,))
# new_contents = new_contents.replace('"ALLOW<','"ALLOW<%s: ' % (rename_attarget_to,))
# new_contents = new_contents.replace('"SLEEPts"','"SLEEPts<%s>"' % (rename_attarget_to,))
# new_contents = new_contents.replace('"WAKEts"','"WAKEts<%s>"' % (rename_attarget_to,))
file(targetdir+"/"+f[f.rfind('/')+1:],"w").write(new_contents)
except IOError:
error("Failed to copy file %s to directory %s" % (f,targetdir))
def mkdir(dirname):
try:
try:
if not stat.S_ISDIR(os.lstat(dirname)[stat.ST_MODE]):
raise Exception("Illegal file type")
except OSError, e:
if e.errno == 2: # file dirname does not exist
os.mkdir(dirname)
else:
raise e
except Exception, e:
error("Failed to create directory %s, %s" % (dirname,e))
def create_file(filename, file_contents):
try:
file(filename,"w").write(file_contents)
except Exception, e:
error("Failed to create file %s, %s" % (filename,e))
def generatetestconf(sourcedir,testconfigurationfile,targetdir):
targetdir = os.path.abspath(targetdir)
result = einiparser.Parser().parse(file(testconfigurationfile))
result['targets'], result['data']['datatables'], result['data']['localizationtables']
assert('type' in result['targets'].fields())
assert('actionmachines' in result['targets'].fields())
curdir = os.getcwd()
try:
os.chdir(sourcedir)
# create directory structure
mkdir(targetdir)
for target in result['targets']:
mkdir(targetdir+"/"+target)
mkdir(targetdir+"/"+target+"/rm")
create_file(targetdir + "/targets.td", "targets: " + str([target for target in result['targets'].keys()]))
# copy datatables
copy_files(result['data']['datatables']['names'], targetdir)
# copy localization tables
copy_files(result['data']['localizationtables']['names'], targetdir)
# copy action machines
for target in result['targets']:
copy_files(result['targets'][target]['actionmachines'],
targetdir+"/"+target,
rename_attarget_to = target)
copy_files([f.rsplit('.', 1)[0] + '.info' for f in result['targets'][target]['actionmachines']],
targetdir+"/"+target)
copy_files([result['targets'][target]['type']+"/"+f
for f in os.listdir(result['targets'][target]['type'])],
targetdir+"/"+target+"/rm",
rename_attarget_to = target)
# Multipart conffile for model composing
am = ",".join(result['targets'][target]['actionmachines'])
create_file(os.path.join(targetdir,target,"compose.conf"),
multipart_contents.replace("@TARGET@",target).replace("@ACTIONMACHINES@",am))
# GNUmakefile
create_file(os.path.join(targetdir,target,"GNUmakefile"),
"TGTMAGIC=tgts %s\n%s" % (target,GNUmakefile_contents) )
# Main conffile for model composing
create_file(os.path.join(targetdir, "compose.conf" ),
multi_contents.replace("@TARGETS@","\n".join(["%s: compose.conf" % t for t in result['targets']])))
create_file(os.path.join(targetdir, "GNUmakefile" ),
"TARGETLIST=" + " ".join([t for t in result['targets']]) \
+ "\n" + multitgt_GNUmakefile_contents)
# generate synchronizer-synchronizer and its refinement machine
lstswriter = generate_synchronizer(result['targets'])
lstswriter.write(file(targetdir+"/Synchronizer.lsts",'w'))
lstswriter = generate_synchronizer_rm(result['targets'])
lstswriter.write(file(targetdir+"/Synchronizer-rm.lsts",'w'))
finally:
os.chdir(curdir)
def main():
try:
generatetestconf(*sys.argv[1:])
except Exception, e:
print __doc__
error("Error: %s" % e)
if __name__ == '__main__':
main()
| |
"""Tests for Messaging Service Scripts end-to-end main process code."""
import asyncio
import functools
import itertools
import json
import multiprocessing as mp
import os
import typing
from typing import Any
import unittest
from unittest import mock
from perfkitbenchmarker.scripts.messaging_service_scripts.common import app
from perfkitbenchmarker.scripts.messaging_service_scripts.common import errors
from perfkitbenchmarker.scripts.messaging_service_scripts.common.e2e import latency_runner
from perfkitbenchmarker.scripts.messaging_service_scripts.common.e2e import main_process
from perfkitbenchmarker.scripts.messaging_service_scripts.common.e2e import protocol
from perfkitbenchmarker.scripts.messaging_service_scripts.common.e2e import publisher
from perfkitbenchmarker.scripts.messaging_service_scripts.common.e2e import receiver
from tests import pkb_common_test_case
AGGREGATE_E2E_METRICS = {
'e2e_latency_failure_counter': {
'value': 0,
'unit': '',
'metadata': {}
},
'e2e_latency_mean': {
'value': 500.0,
'unit': 'milliseconds',
'metadata': {
'samples': [500]
}
},
'e2e_latency_mean_without_cold_start': {
'value': 500.0,
'unit': 'milliseconds',
'metadata': {}
},
'e2e_latency_p50': {
'value': 500.0,
'unit': 'milliseconds',
'metadata': {}
},
'e2e_latency_p99': {
'value': 500.0,
'unit': 'milliseconds',
'metadata': {}
},
'e2e_latency_p99_9': {
'value': 500.0,
'unit': 'milliseconds',
'metadata': {}
},
'e2e_latency_percentage_received': {
'value': 100.0,
'unit': '%',
'metadata': {}
},
'e2e_acknowledge_latency_failure_counter': {
'value': 0,
'unit': '',
'metadata': {}
},
'e2e_acknowledge_latency_mean': {
'value': 1000.0,
'unit': 'milliseconds',
'metadata': {
'samples': [1000]
}
},
'e2e_acknowledge_latency_mean_without_cold_start': {
'value': 1000.0,
'unit': 'milliseconds',
'metadata': {}
},
'e2e_acknowledge_latency_p50': {
'value': 1000.0,
'unit': 'milliseconds',
'metadata': {}
},
'e2e_acknowledge_latency_p99': {
'value': 1000.0,
'unit': 'milliseconds',
'metadata': {}
},
'e2e_acknowledge_latency_p99_9': {
'value': 1000.0,
'unit': 'milliseconds',
'metadata': {}
},
'e2e_acknowledge_latency_percentage_received': {
'value': 100.0,
'unit': '%',
'metadata': {}
}
}
def AsyncTest(test_method):
"""Run an async method synchronously for testing."""
@functools.wraps(test_method)
def Wrapped(self, *args, **kwargs):
return asyncio.run(test_method(self, *args, **kwargs))
return Wrapped
def Just(value=None):
"""Wrap a value (by default None) in a future that returns immediately."""
future = asyncio.Future()
future.set_result(value)
return future
def GetMockCoro(return_value=None):
"""Gets a Mock Coroutine."""
async def MockCoro(*args, **kwargs): # pylint: disable=unused-argument
return return_value
return mock.Mock(wraps=MockCoro)
class MessagingServiceScriptsE2EMainProcessTest(
pkb_common_test_case.PkbCommonTestCase):
def setUp(self):
super().setUp()
self.pipe_mock = self.enter_context(
mock.patch.object(mp, 'Pipe', side_effect=self._GetPipeMocks))
self.process_mock = self.enter_context(mock.patch.object(mp, 'Process'))
self.subprocess_mock = self.process_mock.return_value
self.flags_mock = self.enter_context(mock.patch('absl.flags.FLAGS'))
self.app_mock = self.enter_context(mock.patch.object(app, 'App'))
def _GetPipeMocks(self):
return mock.Mock(name='pipe_writer'), mock.Mock(name='pipe_reader')
def _GetSubprocessInWriter(self, worker):
return typing.cast(Any, getattr(worker, 'subprocess_in_writer'))
def _GetSubprocessInReader(self, worker):
return typing.cast(Any, getattr(worker, 'subprocess_in_reader'))
def _GetSubprocessOutWriter(self, worker):
return typing.cast(Any, getattr(worker, 'subprocess_out_writer'))
def _GetSubprocessOutReader(self, worker):
return typing.cast(Any, getattr(worker, 'subprocess_out_reader'))
@mock.patch.object(
main_process.BaseWorker, '_join_subprocess', return_value=Just())
@mock.patch.object(
main_process.BaseWorker, '_read_subprocess_output', return_value=Just())
@AsyncTest
async def testStartStop(self, read_subprocess_output_mock,
join_subprocess_mock):
worker = main_process.PublisherWorker()
self.assertEqual(self.pipe_mock.call_count, 2)
self.assertEqual(self._GetSubprocessInWriter(worker)._extract_mock_name(),
'pipe_writer')
self.assertEqual(self._GetSubprocessInReader(worker)._extract_mock_name(),
'pipe_reader')
self.assertEqual(self._GetSubprocessOutWriter(worker)._extract_mock_name(),
'pipe_writer')
self.assertEqual(self._GetSubprocessOutReader(worker)._extract_mock_name(),
'pipe_reader')
self.assertEqual(worker.subprocess_func, publisher.main)
await worker.start()
self.process_mock.assert_called_once_with(
target=publisher.main,
kwargs={
'input_conn': worker.subprocess_in_reader,
'output_conn': worker.subprocess_out_writer,
'serialized_flags': self.flags_mock.flags_into_string(),
'app': self.app_mock.get_instance.return_value,
'pinned_cpus': None,
})
read_subprocess_output_mock.assert_called_once_with(protocol.Ready, None)
await worker.stop()
self.subprocess_mock.terminate.assert_called_once_with()
join_subprocess_mock.assert_called_once_with(None)
@mock.patch.object(
main_process.BaseWorker, '_join_subprocess', return_value=Just())
@mock.patch.object(
main_process.BaseWorker, '_read_subprocess_output', return_value=Just())
@AsyncTest
async def testStartWithPinnedCpus(self, *_):
worker = main_process.ReceiverWorker({3, 1, 4})
await worker.start()
self.process_mock.assert_called_once_with(
target=receiver.main,
kwargs={
'input_conn': worker.subprocess_in_reader,
'output_conn': worker.subprocess_out_writer,
'serialized_flags': self.flags_mock.flags_into_string(),
'app': self.app_mock.get_instance.return_value,
'pinned_cpus': {3, 1, 4},
})
await worker.stop()
@mock.patch.object(
main_process.BaseWorker,
'_join_subprocess',
side_effect=(errors.EndToEnd.SubprocessTimeoutError, Just()))
@mock.patch.object(
main_process.BaseWorker, '_read_subprocess_output', return_value=Just())
@AsyncTest
async def testStopKill(self, read_subprocess_output_mock,
join_subprocess_mock):
worker = main_process.PublisherWorker()
await worker.start()
read_subprocess_output_mock.assert_called_once_with(protocol.Ready, None)
await worker.stop()
self.subprocess_mock.terminate.assert_called_once_with()
self.subprocess_mock.kill.assert_called_once_with()
join_subprocess_mock.assert_has_calls([mock.call(None), mock.call(None)])
@mock.patch.object(asyncio, 'sleep', wraps=asyncio.sleep)
@mock.patch.object(
main_process.BaseWorker,
'_join_subprocess',
side_effect=(errors.EndToEnd.SubprocessTimeoutError, Just()))
@AsyncTest
async def testReadSubprocessOutput(self, _, sleep_mock):
worker = main_process.PublisherWorker()
with mock.patch.object(
main_process.BaseWorker, '_read_subprocess_output',
return_value=Just()):
await worker.start()
worker.subprocess_out_reader.poll.side_effect = [False, True]
worker.subprocess_out_reader.recv.return_value = 'hola'
self.assertEqual(await worker._read_subprocess_output(str), 'hola')
sleep_mock.assert_called_once_with(worker.SLEEP_TIME)
self.assertEqual(self._GetSubprocessOutReader(worker).poll.call_count, 2)
self._GetSubprocessOutReader(worker).recv.assert_called_once_with()
await worker.stop()
@mock.patch.object(
main_process.BaseWorker,
'_join_subprocess',
side_effect=(errors.EndToEnd.SubprocessTimeoutError, Just()))
@AsyncTest
async def testReadSubprocessOutputTimeout(self, _):
worker = main_process.PublisherWorker()
with mock.patch.object(
main_process.BaseWorker, '_read_subprocess_output',
return_value=Just()):
await worker.start()
worker.subprocess_out_reader.poll.side_effect = itertools.repeat(False)
self._GetSubprocessOutReader(worker).recv.assert_not_called()
with self.assertRaises(errors.EndToEnd.SubprocessTimeoutError):
await worker._read_subprocess_output(str, 0.2)
await worker.stop()
@mock.patch.object(
main_process.BaseWorker,
'_join_subprocess',
side_effect=(errors.EndToEnd.SubprocessTimeoutError, Just()))
@AsyncTest
async def testReadSubprocessUnexpectedObject(self, _):
worker = main_process.PublisherWorker()
with mock.patch.object(
main_process.BaseWorker, '_read_subprocess_output',
return_value=Just()):
await worker.start()
worker.subprocess_out_reader.poll.return_value = True
worker.subprocess_out_reader.recv.return_value = 42
with self.assertRaises(errors.EndToEnd.ReceivedUnexpectedObjectError):
await worker._read_subprocess_output(str)
self._GetSubprocessOutReader(worker).recv.assert_called_once_with()
await worker.stop()
@mock.patch.object(main_process.BaseWorker, 'start', return_value=Just())
@mock.patch.object(main_process.BaseWorker, 'stop', return_value=Just())
@mock.patch.object(
main_process.BaseWorker,
'_read_subprocess_output',
return_value=Just(protocol.AckPublish(publish_timestamp=1000)))
@AsyncTest
async def testPublish(self, read_subprocess_output_mock, *_):
worker = main_process.PublisherWorker()
await worker.start()
self.assertEqual(await worker.publish(), 1000)
self._GetSubprocessInWriter(worker).send.assert_called_once_with(
protocol.Publish())
read_subprocess_output_mock.assert_called_once_with(protocol.AckPublish,
None)
await worker.stop()
@mock.patch.object(main_process.BaseWorker, 'start', return_value=Just())
@mock.patch.object(main_process.BaseWorker, 'stop', return_value=Just())
@mock.patch.object(
main_process.BaseWorker,
'_read_subprocess_output',
return_value=Just(protocol.AckPublish(publish_error='blahblah')))
@AsyncTest
async def testPublishError(self, *_):
worker = main_process.PublisherWorker()
await worker.start()
with self.assertRaises(errors.EndToEnd.SubprocessFailedOperationError):
await worker.publish()
await worker.stop()
@mock.patch.object(main_process.BaseWorker, 'start', return_value=Just())
@mock.patch.object(main_process.BaseWorker, 'stop', return_value=Just())
@mock.patch.object(
main_process.BaseWorker, '_read_subprocess_output', return_value=Just())
@AsyncTest
async def testStartConsumption(self, read_subprocess_output_mock, *_):
worker = main_process.ReceiverWorker()
await worker.start()
await worker.start_consumption()
self._GetSubprocessInWriter(worker).send.assert_called_once_with(
protocol.Consume())
read_subprocess_output_mock.assert_called_once_with(protocol.AckConsume,
None)
await worker.stop()
@mock.patch.object(main_process.BaseWorker, 'start', return_value=Just())
@mock.patch.object(main_process.BaseWorker, 'stop', return_value=Just())
@mock.patch.object(
main_process.BaseWorker,
'_read_subprocess_output',
return_value=Just(protocol.ReceptionReport(receive_error='blahblah')))
@AsyncTest
async def testReceive(self, *_):
worker = main_process.ReceiverWorker()
await worker.start()
with self.assertRaises(errors.EndToEnd.SubprocessFailedOperationError):
await worker.receive()
await worker.stop()
class MessagingServiceScriptsEndToEndLatencyRunnerTest(
pkb_common_test_case.PkbCommonTestCase):
mock_coro = GetMockCoro()
mock_sleep_coro = GetMockCoro()
def setUp(self):
super().setUp()
self.publisher_mock = self.enter_context(
mock.patch.object(main_process, 'PublisherWorker'))
self.receiver_mock = self.enter_context(
mock.patch.object(main_process, 'ReceiverWorker'))
self.set_start_method_mock = self.enter_context(
mock.patch.object(mp, 'set_start_method'))
self.publisher_instance_mock = self.publisher_mock.return_value
self.receiver_instance_mock = self.receiver_mock.return_value
self.parent_mock = mock.Mock()
self.parent_mock.attach_mock(self.publisher_instance_mock, 'publisher')
self.parent_mock.attach_mock(self.publisher_instance_mock, 'receiver')
def _SetupWorkerMocks(self, publish_timestamp, receive_timestamp,
ack_timestamp):
self.publisher_instance_mock.start.return_value = Just()
self.publisher_instance_mock.stop.return_value = Just()
self.publisher_instance_mock.publish.return_value = Just(publish_timestamp)
self.receiver_instance_mock.start.return_value = Just()
self.receiver_instance_mock.stop.return_value = Just()
self.receiver_instance_mock.start_consumption.return_value = Just()
self.receiver_instance_mock.receive.return_value = Just(
(receive_timestamp, ack_timestamp))
@mock.patch.object(asyncio, 'run')
@mock.patch.object(
latency_runner.EndToEndLatencyRunner, '_async_run_phase', new=mock_coro)
def testRunPhase(self, asyncio_run_mock):
runner = latency_runner.EndToEndLatencyRunner(mock.Mock())
runner.run_phase(13, 14)
asyncio_run_mock.assert_called_once()
self.mock_coro.assert_called_once_with(13, 14)
@mock.patch.object(asyncio, 'sleep', new=mock_sleep_coro)
@mock.patch.object(latency_runner, 'print')
@AsyncTest
async def testAsyncRunPhase(self, print_mock):
self._SetupWorkerMocks(1_000_000_000, 1_500_000_000, 2_000_000_000)
runner = latency_runner.EndToEndLatencyRunner(mock.Mock())
metrics = await runner._async_run_phase(1, 42)
print_mock.assert_called_once_with(json.dumps(metrics))
self.assertEqual(metrics, AGGREGATE_E2E_METRICS)
@mock.patch.object(asyncio, 'sleep', new=mock_sleep_coro)
@mock.patch.object(latency_runner, 'print')
@mock.patch.object(os, 'sched_getaffinity', return_value={1, 2, 3, 4, 5, 6})
@mock.patch.object(os, 'sched_setaffinity')
@AsyncTest
async def testPinnedCpus(self, sched_setaffinity_mock, sched_getaffinity_mock,
*_):
runner_cls = latency_runner.EndToEndLatencyRunner
try:
self._SetupWorkerMocks(1_000_000_000, 1_500_000_000, 2_000_000_000)
self.publisher_mock.CPUS_REQUIRED = 1
self.receiver_mock.CPUS_REQUIRED = 1
runner_cls.on_startup()
sched_getaffinity_mock.assert_called_once_with(0)
main_pinned_cpus = runner_cls.MAIN_PINNED_CPUS
publisher_pinned_cpus = runner_cls.PUBLISHER_PINNED_CPUS
receiver_pinned_cpus = runner_cls.RECEIVER_PINNED_CPUS
self.assertTrue(main_pinned_cpus, 'non-empty and non-none')
self.assertTrue(publisher_pinned_cpus, 'non-empty and non-none')
self.assertTrue(receiver_pinned_cpus, 'non-empty and non-none')
self.assertLess(main_pinned_cpus, {1, 2, 3, 4, 5, 6})
self.assertLess(publisher_pinned_cpus, {1, 2, 3, 4, 5, 6})
self.assertLess(receiver_pinned_cpus, {1, 2, 3, 4, 5, 6})
self.assertLen(
main_pinned_cpus | publisher_pinned_cpus | receiver_pinned_cpus,
len(main_pinned_cpus) + len(publisher_pinned_cpus) +
len(receiver_pinned_cpus), 'test for disjointness')
sched_setaffinity_mock.assert_called_once_with(0, main_pinned_cpus)
runner = latency_runner.EndToEndLatencyRunner(mock.Mock())
await runner._async_run_phase(1, 42)
self.publisher_mock.assert_called_once_with(publisher_pinned_cpus)
self.receiver_mock.assert_called_once_with(receiver_pinned_cpus)
finally:
runner_cls.MAIN_PINNED_CPUS = None
runner_cls.PUBLISHER_PINNED_CPUS = None
runner_cls.RECEIVER_PINNED_CPUS = None
@mock.patch.object(asyncio, 'sleep', new=mock_sleep_coro)
@mock.patch.object(latency_runner, 'print')
@mock.patch.object(os, 'sched_getaffinity', return_value={1, 2})
@mock.patch.object(os, 'sched_setaffinity')
@AsyncTest
async def testPinnedCpusNotEnoughCpus(self, sched_setaffinity_mock,
sched_getaffinity_mock, *_):
self._SetupWorkerMocks(1_000_000_000, 1_500_000_000, 2_000_000_000)
self.publisher_mock.CPUS_REQUIRED = 1
self.receiver_mock.CPUS_REQUIRED = 1
runner_cls = latency_runner.EndToEndLatencyRunner
runner_cls.on_startup()
sched_getaffinity_mock.assert_called_once_with(0)
self.assertIsNone(runner_cls.MAIN_PINNED_CPUS)
self.assertIsNone(runner_cls.PUBLISHER_PINNED_CPUS)
self.assertIsNone(runner_cls.RECEIVER_PINNED_CPUS)
sched_setaffinity_mock.assert_not_called()
runner = latency_runner.EndToEndLatencyRunner(mock.Mock())
await runner._async_run_phase(1, 42)
self.publisher_mock.assert_called_once_with(None)
self.receiver_mock.assert_called_once_with(None)
if __name__ == '__main__':
unittest.main()
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._protected_items_operations import build_create_or_update_request, build_delete_request, build_get_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ProtectedItemsOperations:
"""ProtectedItemsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.recoveryservicesbackup.activestamp.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def get(
self,
vault_name: str,
resource_group_name: str,
fabric_name: str,
container_name: str,
protected_item_name: str,
filter: Optional[str] = None,
**kwargs: Any
) -> "_models.ProtectedItemResource":
"""Provides the details of the backed up item. This is an asynchronous operation. To know the
status of the operation,
call the GetItemOperationResult API.
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present.
:type resource_group_name: str
:param fabric_name: Fabric name associated with the backed up item.
:type fabric_name: str
:param container_name: Container name associated with the backed up item.
:type container_name: str
:param protected_item_name: Backed up item name whose details are to be fetched.
:type protected_item_name: str
:param filter: OData filter options.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ProtectedItemResource, or the result of cls(response)
:rtype: ~azure.mgmt.recoveryservicesbackup.activestamp.models.ProtectedItemResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ProtectedItemResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
vault_name=vault_name,
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
fabric_name=fabric_name,
container_name=container_name,
protected_item_name=protected_item_name,
filter=filter,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ProtectedItemResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupFabrics/{fabricName}/protectionContainers/{containerName}/protectedItems/{protectedItemName}'} # type: ignore
@distributed_trace_async
async def create_or_update(
self,
vault_name: str,
resource_group_name: str,
fabric_name: str,
container_name: str,
protected_item_name: str,
parameters: "_models.ProtectedItemResource",
**kwargs: Any
) -> Optional["_models.ProtectedItemResource"]:
"""Enables backup of an item or to modifies the backup policy information of an already backed up
item. This is an
asynchronous operation. To know the status of the operation, call the GetItemOperationResult
API.
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present.
:type resource_group_name: str
:param fabric_name: Fabric name associated with the backup item.
:type fabric_name: str
:param container_name: Container name associated with the backup item.
:type container_name: str
:param protected_item_name: Item name to be backed up.
:type protected_item_name: str
:param parameters: resource backed up item.
:type parameters: ~azure.mgmt.recoveryservicesbackup.activestamp.models.ProtectedItemResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ProtectedItemResource, or the result of cls(response)
:rtype: ~azure.mgmt.recoveryservicesbackup.activestamp.models.ProtectedItemResource or None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ProtectedItemResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ProtectedItemResource')
request = build_create_or_update_request(
vault_name=vault_name,
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
fabric_name=fabric_name,
container_name=container_name,
protected_item_name=protected_item_name,
content_type=content_type,
json=_json,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ProtectedItemResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupFabrics/{fabricName}/protectionContainers/{containerName}/protectedItems/{protectedItemName}'} # type: ignore
@distributed_trace_async
async def delete(
self,
vault_name: str,
resource_group_name: str,
fabric_name: str,
container_name: str,
protected_item_name: str,
**kwargs: Any
) -> None:
"""Used to disable backup of an item within a container. This is an asynchronous operation. To
know the status of the
request, call the GetItemOperationResult API.
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present.
:type resource_group_name: str
:param fabric_name: Fabric name associated with the backed up item.
:type fabric_name: str
:param container_name: Container name associated with the backed up item.
:type container_name: str
:param protected_item_name: Backed up item to be deleted.
:type protected_item_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
vault_name=vault_name,
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
fabric_name=fabric_name,
container_name=container_name,
protected_item_name=protected_item_name,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupFabrics/{fabricName}/protectionContainers/{containerName}/protectedItems/{protectedItemName}'} # type: ignore
| |
import time
import json
import threading
import struct
import urlparse
from xlog import getLogger
xlog = getLogger("x_tunnel")
from simple_http_client import HTTP_client
import utils
import base_container
import encrypt
import global_var as g
def encrypt_data(data):
if g.config.encrypt_data:
return encrypt.Encryptor(g.config.encrypt_password, g.config.encrypt_method).encrypt(data)
else:
return data
def decrypt_data(data):
if g.config.encrypt_data:
return encrypt.Encryptor(g.config.encrypt_password, g.config.encrypt_method).decrypt(data)
else:
return data
class ProxySession():
def __init__(self):
self.upload_task_queue = base_container.BlockSendPool(max_payload=g.config.block_max_size, send_delay=0)
self.ack_pool = base_container.AckPool()
self.mutex = threading.Lock() # lock for conn_id, sn generation, on_road_num change,
self.download_order_queue = base_container.BlockReceivePool(process_callback=self.download_data_processor)
self.running = False
self.start()
def start(self):
self.ack_pool.reset()
self.download_order_queue.reset()
self.roundtrip_thread = {}
self.session_id = utils.generate_random_lowercase(8)
self.last_conn_id = 0
self.last_transfer_no = 0
self.conn_list = {}
self.transfer_list = {}
self.last_roundtrip_time = 0
self.on_road_num = 0
self.last_download_data_time = 0
self.traffic = 0
if not self.login_session():
xlog.warn("x-tunnel session not start")
return False
self.running = True
self.upload_task_queue.start()
server_port = g.server_port
for i in range(0, g.config.concurent_thread_num):
if g.config.port_range > 1:
server_port += 1
if server_port > g.server_port + g.config.port_range:
server_port = g.server_port
server_address = (g.server_host, server_port)
self.roundtrip_thread[i] = threading.Thread(target=self.normal_roundtrip_worker, args=(server_address,))
self.roundtrip_thread[i].daemon = True
self.roundtrip_thread[i].start()
def stop(self):
if not self.running:
#xlog.warn("stop but not running")
return
self.running = False
self.balance = 0
self.close_all_connection()
self.upload_task_queue.stop()
#xlog.debug("begin join roundtrip_thread")
for i in self.roundtrip_thread:
# xlog.debug("begin join %d", i)
rthead = self.roundtrip_thread[i]
if rthead is threading.current_thread():
# xlog.debug("%d is self", i)
continue
rthead.join()
# xlog.debug("end join %d", i)
#xlog.debug("end join roundtrip_thread")
def reset(self):
xlog.debug("session reset")
self.stop()
self.start()
def status(self):
out_string = "session_id:%s<br>\n" % self.session_id
out_string += "running:%d<br>\n" % self.running
out_string += "last_roundtrip_time:%d<br>\n" % (time.time() - self.last_roundtrip_time)
out_string += "last_download_data_time:%d<br>\n" % (time.time() - self.last_download_data_time)
out_string += "last_conn_id:%d<br>\n" % self.last_conn_id
out_string += "last_transfer_no:%d<br>\n" % self.last_transfer_no
out_string += "on_road_num:%d<br>\n" % self.on_road_num
out_string += "transfer_list:<br>\r\n"
for transfer_no in sorted(self.transfer_list.iterkeys()):
transfer = self.transfer_list[transfer_no]
if "start" in self.transfer_list[transfer_no]:
time_way = " t:" + str((time.time() - self.transfer_list[transfer_no]["start"]))
else:
time_way = ""
out_string += "[%d] %s %s<br>\r\n" % (transfer_no, json.dumps(transfer), time_way)
out_string += "<br>\n" + self.upload_task_queue.status()
out_string += "<br>\n" + self.download_order_queue.status()
out_string += "<br>\n" + self.ack_pool.status()
for conn_id in self.conn_list:
out_string += "<br>\n" + self.conn_list[conn_id].status()
return out_string
def login_session(self):
if len(g.server_host) == 0 or g.server_port == 0:
return False
try:
start_time = time.time()
magic = "P"
pack_type = 1
upload_data_head = struct.pack("<cBB8sIHII", magic, g.protocol_version, pack_type, str(self.session_id),
g.config.block_max_size, g.config.send_delay, g.config.windows_size,
g.config.windows_ack)
upload_data_head += struct.pack("<H", len(g.config.login_account)) + str(g.config.login_account)
upload_data_head += struct.pack("<H", len(g.config.login_password)) + str(g.config.login_password)
upload_post_data = encrypt_data(upload_data_head)
http_client = HTTP_client((g.server_host, g.server_port), g.proxy, g.config.use_https,
g.config.conn_life, cert=g.cert)
content, status, heads = http_client.request(method="POST", path="data", data=upload_post_data,
timeout=g.config.roundtrip_timeout)
time_cost = time.time() - start_time
if status != 200:
xlog.warn("login session fail, status:%r", status)
return False
if len(content) < 6:
xlog.error("login data len:%d fail", len(content))
return False
info = decrypt_data(content)
magic, protocol_version, pack_type, res, message_len = struct.unpack("<cBBBH", info[:6])
message = info[6:]
if magic != "P" or protocol_version != 1 or pack_type != 1:
xlog.error("login_session time:%d head error:%s", 1000 * time_cost, utils.str2hex(info[:6]))
return False
if res != 0:
xlog.warn("login_session time:%d fail, res:%d msg:%s", 1000 * time_cost, res, message)
return False
xlog.info("login_session time:%d msg:%s", 1000 * time_cost, message)
return True
except Exception as e:
xlog.exception("login_session e:%r", e)
return False
def create_conn(self, sock, host, port):
if not self.running:
xlog.warn("session not running, can't connect")
return
self.mutex.acquire()
self.last_conn_id += 1
conn_id = self.last_conn_id
self.mutex.release()
seq = 0
cmd_type = 0 # create connection
sock_type = 0 # TCP
data = struct.pack("<IBBH", seq, cmd_type, sock_type, len(host)) + host + struct.pack("<H", port)
self.send_conn_data(conn_id, data)
self.conn_list[conn_id] = base_container.Conn(self, conn_id, sock, host, port, g.config.windows_size,
g.config.windows_ack, is_client=True)
return conn_id
def close_all_connection(self):
xlog.info("start close all connection")
conn_list = dict(self.conn_list)
for conn_id in conn_list:
try:
xlog.debug("stopping conn_id:%d", conn_id)
self.conn_list[conn_id].stop(reason="system reset")
except Exception as e:
xlog.warn("stopping conn_id:%d fail:%r", conn_id, e)
pass
# self.conn_list = {}
xlog.debug("stop all connection finished")
def remove_conn(self, conn_id):
xlog.debug("remove conn_id:%d", conn_id)
try:
del self.conn_list[conn_id]
except:
pass
def send_conn_data(self, conn_id, data, no_delay=False):
if not self.running:
return
# xlog.debug("upload conn_id:%d, len:%d", conn_id, len(data))
buf = base_container.WriteBuffer()
buf.append(struct.pack("<BII", 2, 4 + len(data), conn_id))
buf.append(data)
self.upload_task_queue.put(buf, no_delay)
def download_data_processor(self, data):
try:
while len(data):
data_type, data_len = struct.unpack("<BI", data.get(5))
if data_type == 2: # data:
conn_id = struct.unpack("<I", data.get(4))[0]
payload = data.get_buf(data_len - 4)
if conn_id not in self.conn_list:
xlog.warn("DATA conn_id %d not in list", conn_id)
else:
# xlog.debug("down conn:%d len:%d", conn_id, len(payload))
self.conn_list[conn_id].put_cmd_data(payload)
else:
raise Exception("process_block, unknown type:%d" % data_type)
except Exception as e:
xlog.exception("download_data_processor:%r", e)
def touch_roundtrip(self):
self.upload_task_queue.put("")
def get_transfer_no(self):
with self.mutex:
self.last_transfer_no += 1
transfer_no = self.last_transfer_no
return transfer_no
def normal_roundtrip_worker(self, server_address):
last_roundtrip_download_size = 0
http_client = HTTP_client(server_address, g.proxy, g.config.use_https, g.config.conn_life, cert=g.cert)
while self.running:
if self.on_road_num > g.config.concurent_thread_num * 0.8:
block = True
elif last_roundtrip_download_size > g.config.block_max_size:
block = False
elif len(self.conn_list) > 0 and self.on_road_num < 1:
# keep at least one pulling thread
block = False
elif len(self.conn_list) > 0 and time.time() - self.last_download_data_time < 120 and \
self.on_road_num < g.config.concurent_thread_num * 0.1:
# busy, have data download
block = False
else:
block = True
if block:
get_timeout = 24 * 3600
else:
get_timeout = 0
# self.transfer_list[transfer_no]["stat"] = "get local data"
upload_data, send_sn = self.upload_task_queue.get(get_timeout)
transfer_no = self.get_transfer_no()
self.transfer_list[transfer_no] = {}
self.transfer_list[transfer_no]["sn"] = send_sn
send_data_len = len(upload_data)
upload_ack_data = self.ack_pool.get()
send_ack_len = len(upload_ack_data)
magic = "P"
pack_type = 2
if self.on_road_num > g.config.concurent_thread_num * 0.8:
server_timeout = 0
else:
server_timeout = g.config.roundtrip_timeout / 2
upload_data_head = struct.pack("<cBB8sIIBIH", magic, g.protocol_version, pack_type, str(self.session_id),
transfer_no,
send_sn, server_timeout, send_data_len, send_ack_len)
upload_post_buf = base_container.WriteBuffer(upload_data_head)
upload_post_buf.append(upload_data)
upload_post_buf.append(upload_ack_data)
upload_post_data = str(upload_post_buf)
upload_post_data = encrypt_data(upload_post_data)
try_no = 0
while self.running:
try_no += 1
sleep_time = min(try_no, 30)
self.last_roundtrip_time = time.time()
start_time = time.time()
with self.mutex:
self.on_road_num += 1
# xlog.debug("start roundtrip transfer_no:%d send_data_len:%d ack_len:%d", transfer_no, send_data_len, send_ack_len)
try:
self.transfer_list[transfer_no]["try"] = try_no
self.transfer_list[transfer_no]["stat"] = "request"
self.transfer_list[transfer_no]["start"] = time.time()
content, status, response = http_client.request(method="POST", path="data", data=upload_post_data,
timeout=g.config.roundtrip_timeout)
traffic = len(upload_post_data) + len(content) + 645
self.traffic += traffic
g.quota -= traffic
except Exception as e:
xlog.exception("request except:%r retry %d", e, try_no)
time.sleep(sleep_time)
continue
finally:
with self.mutex:
self.on_road_num -= 1
if status == 405: # session_id not exist on server
if self.running:
xlog.warn("server session_id not exist, start reset session")
self.reset()
return
elif status == 200:
recv_len = len(content)
if recv_len < 6:
xlog.error("roundtrip time:%d transfer_no:%d sn:%d send:%d status:%r retry:%d",
(time.time() - start_time) * 1000, transfer_no, send_sn, send_data_len, len(content),
status, try_no)
continue
content = decrypt_data(content)
data = base_container.ReadBuffer(content)
magic, version, pack_type = struct.unpack("<cBB", data.get(3))
if magic != "P" or version != g.protocol_version:
xlog.error("get data head:%s", utils.str2hex(content[:2]))
time.sleep(100)
break
if pack_type == 3: # error report
error_code, message_len = struct.unpack("<BH", data.get(3))
message = data.get(message_len)
xlog.warn("error report code:%d, msg:%s", error_code, message)
if error_code == 1: # no quota
xlog.warn("login x_server error:no quota")
self.stop()
return
else:
xlog.error("unknown error code:%d", error_code)
return
if pack_type != 2: # normal download traffic pack
xlog.error("pack type:%d", pack_type)
time.sleep(100)
break
sn, time_cost = struct.unpack("<II", data.get(8))
xlog.debug(
"roundtrip time:%d cost:%d transfer_no:%d send_sn:%d send:%d recv_sn:%d rcv:%d status:%r",
(time.time() - start_time) * 1000, time_cost, transfer_no, send_sn, send_data_len, sn,
len(content), status)
data_len = len(data)
if (sn > 0 and data_len == 0) or (sn == 0 and data_len > 0):
xlog.warn("get sn:%d len:%d %s", sn, data_len, data)
if sn:
self.last_download_data_time = time.time()
last_roundtrip_download_size = data_len
# xlog.debug("get sn:%d len:%d", sn, data_len)
self.download_order_queue.put(sn, data)
ack_pak = struct.pack("<Q", transfer_no)
self.ack_pool.put(ack_pak)
else:
last_roundtrip_download_size = 0
if send_data_len == 0 and data_len > g.config.block_max_size:
need_more_thread_num = int(g.config.concurent_thread_num * 0.5 - self.on_road_num)
if need_more_thread_num > 0:
for j in range(0, need_more_thread_num):
if self.on_road_num > g.config.concurent_thread_num * 0.5:
break
self.touch_roundtrip()
break
else:
xlog.warn("roundtrip time:%d transfer_no:%d send_sn:%d send:%d status:%r retry:%d",
(time.time() - start_time) * 1000, transfer_no, send_sn, send_data_len, status, try_no)
time.sleep(sleep_time)
del self.transfer_list[transfer_no]
xlog.info("roundtrip port:%d thread exit", server_address[1])
def calculate_quota_left(quota_list):
time_now = int(time.time())
quota_left = 0
if "current" in quota_list:
c_q_end_time = quota_list["current"]["end_time"]
if c_q_end_time > time_now:
quota_left += quota_list["current"]["quota"]
if "backup" in quota_list:
for qt in quota_list["backup"]:
b_q_quota = qt["quota"]
b_q_end_time = qt["end_time"]
if b_q_end_time < time_now:
continue
quota_left += b_q_quota
return quota_left
def get_api_server_http_client():
api_server = urlparse.urlparse(g.config.api_server)
http_client = HTTP_client((api_server.hostname, api_server.port), g.proxy, g.config.use_https, g.config.conn_life,
cert=g.cert)
return http_client
def call_api(path, req_info):
try:
start_time = time.time()
upload_post_data = json.dumps(req_info)
upload_post_data = encrypt_data(upload_post_data)
http_client = get_api_server_http_client()
content, status, heads = http_client.request(method="POST", path=path,
header={"Content-Type": "application/json"},
data=upload_post_data, timeout=g.config.roundtrip_timeout)
time_cost = time.time() - start_time
if status != 200:
reason = "status:%r" % status
xlog.warn("api:%s fail:%s t:%d", path, reason, time_cost)
return False, reason
content = decrypt_data(content)
try:
info = json.loads(content)
except Exception as e:
xlog.warn("api:%s parse json:%s fail:%r", path, content, e)
return False, "parse json fail"
res = info["res"]
if res != "success":
xlog.warn("api:%s fail:%s", path, info["reason"])
return False, info["reason"]
xlog.info("api:%s success t:%d", path, time_cost * 1000)
return True, info
except Exception as e:
xlog.exception("order e:%r", e)
return False, "except:%r" % e
def request_balance(account, password, is_register=False, update_server=True):
if is_register:
login_path = "register"
xlog.info("request_balance register:%s", account)
else:
login_path = "login"
req_info = {"account": account, "password": password}
res, info = call_api(login_path, req_info)
if not res:
return False, info
g.quota_list = info["quota_list"]
g.quota = calculate_quota_left(g.quota_list)
if g.quota <= 0:
xlog.warn("no quota")
if update_server:
g.server_host = str(info["host"])
g.server_port = info["port"]
g.balance = info["balance"]
xlog.info("request_balance host:%s port:%d balance:%f quota:%f", g.server_host, g.server_port,
g.balance, g.quota)
return True, "success"
| |
# -*- coding: utf-8 -*-
# This file is part of Eigen, a lightweight C++ template library
# for linear algebra.
#
# Copyright (C) 2009 Benjamin Schindler <bschindler@inf.ethz.ch>
#
# Eigen is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# Alternatively, you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License and a copy of the GNU General Public License along with
# Eigen. If not, see <http://www.gnu.org/licenses/>.
# Pretty printers for Eigen::Matrix
# This is still pretty basic as the python extension to gdb is still pretty basic.
# It cannot handle complex eigen types and it doesn't support any of the other eigen types
# Such as quaternion or some other type.
# This code supports fixed size as well as dynamic size matrices
# To use it:
#
# * create a directory and put the file as well as an empty __init__.py in that directory
# * Create a ~/.gdbinit file, that contains the following:
import gdb
import re
import itertools
class EigenMatrixPrinter:
"Print Eigen Matrix of some kind"
def __init__(self, val):
"Extract all the necessary information"
# The gdb extension does not support value template arguments - need to extract them by hand
type = val.type
if type.code == gdb.TYPE_CODE_REF:
type = type.target()
self.type = type.unqualified().strip_typedefs()
tag = self.type.tag
regex = re.compile('\<.*\>')
m = regex.findall(tag)[0][1:-1]
template_params = m.split(',')
template_params = map(lambda x:x.replace(" ", ""), template_params)
if template_params[1] == '-0x00000000000000001':
self.rows = val['m_storage']['m_rows']
else:
self.rows = int(template_params[1])
if template_params[2] == '-0x00000000000000001':
self.cols = val['m_storage']['m_cols']
else:
self.cols = int(template_params[2])
self.options = 0 # default value
if len(template_params) > 3:
self.options = template_params[3];
self.rowMajor = (int(self.options) & 0x1)
self.innerType = self.type.template_argument(0)
self.val = val
# Fixed size matrices have a struct as their storage, so we need to walk through this
self.data = self.val['m_storage']['m_data']
if self.data.type.code == gdb.TYPE_CODE_STRUCT:
self.data = self.data['array']
self.data = self.data.cast(self.innerType.pointer())
class _iterator:
def __init__ (self, rows, cols, dataPtr, rowMajor):
self.rows = rows
self.cols = cols
self.dataPtr = dataPtr
self.currentRow = 0
self.currentCol = 0
self.rowMajor = rowMajor
def __iter__ (self):
return self
def next(self):
row = self.currentRow
col = self.currentCol
if self.rowMajor == 0:
if self.currentCol >= self.cols:
raise StopIteration
self.currentRow = self.currentRow + 1
if self.currentRow >= self.rows:
self.currentRow = 0
self.currentCol = self.currentCol + 1
else:
if self.currentRow >= self.rows:
raise StopIteration
self.currentCol = self.currentCol + 1
if self.currentCol >= self.cols:
self.currentCol = 0
self.currentRow = self.currentRow + 1
item = self.dataPtr.dereference()
self.dataPtr = self.dataPtr + 1
if (self.cols == 1): #if it's a column vector
return ('[%d]' % (row,), item)
elif (self.rows == 1): #if it's a row vector
return ('[%d]' % (col,), item)
return ('[%d,%d]' % (row, col), item)
def children(self):
return self._iterator(self.rows, self.cols, self.data, self.rowMajor)
def to_string(self):
return "Eigen::Matrix<%s,%d,%d,%s> (data ptr: %s)" % (self.innerType, self.rows, self.cols, "RowMajor" if self.rowMajor else "ColMajor", self.data)
class EigenQuaternionPrinter:
"Print an Eigen Quaternion"
def __init__(self, val):
"Extract all the necessary information"
# The gdb extension does not support value template arguments - need to extract them by hand
type = val.type
if type.code == gdb.TYPE_CODE_REF:
type = type.target()
self.type = type.unqualified().strip_typedefs()
self.innerType = self.type.template_argument(0)
self.val = val
# Quaternions have a struct as their storage, so we need to walk through this
self.data = self.val['m_coeffs']['m_storage']['m_data']['array']
self.data = self.data.cast(self.innerType.pointer())
class _iterator:
def __init__ (self, dataPtr):
self.dataPtr = dataPtr
self.currentElement = 0
self.elementNames = ['x', 'y', 'z', 'w']
def __iter__ (self):
return self
def next(self):
element = self.currentElement
if self.currentElement >= 4: #there are 4 elements in a quanternion
raise StopIteration
self.currentElement = self.currentElement + 1
item = self.dataPtr.dereference()
self.dataPtr = self.dataPtr + 1
return ('[%s]' % (self.elementNames[element],), item)
def children(self):
return self._iterator(self.data)
def to_string(self):
return "Eigen::Quaternion<%s> (data ptr: %s)" % (self.innerType, self.data)
def build_eigen_dictionary ():
pretty_printers_dict[re.compile('^Eigen::Quaternion<.*>$')] = lambda val: EigenQuaternionPrinter(val)
pretty_printers_dict[re.compile('^Eigen::Matrix<.*>$')] = lambda val: EigenMatrixPrinter(val)
def register_eigen_printers(obj):
"Register eigen pretty-printers with objfile Obj"
if obj == None:
obj = gdb
obj.pretty_printers.append(lookup_function)
def lookup_function(val):
"Look-up and return a pretty-printer that can print va."
type = val.type
if type.code == gdb.TYPE_CODE_REF:
type = type.target()
type = type.unqualified().strip_typedefs()
typename = type.tag
if typename == None:
return None
for function in pretty_printers_dict:
if function.search(typename):
return pretty_printers_dict[function](val)
return None
pretty_printers_dict = {}
build_eigen_dictionary ()
| |
#encoding: utf-8
'''
Examples:
[import [guideline]]
[tr [Notes]]
[List seq=[[getPhones [nokia_se40]] sep=[, ]] [
[Link target=[[item.url]] [[item.caption]]]
]]
[getPhones [nokia_se40]]
[formatNotes:=[
[List seq=[[getPhones [[#]]] sep=[, ]] [
[Link target=[[item.url]] [[item.caption]]]
]]
]]
'''
from itertools import chain
from collections import deque
import inspect
import re
from mrkev.parser import Parser
from mrkev.translator import CallBlock, CallParameter, BlockDefinition, BlockScope, Translator, formParameterName
class CustomContext(object):
def __init__(self, ip, d):
self.d = d
self.ip = ip
def get(self, name):
parts = name.split('.')
fname, dictPath = parts[0], parts[1:]
obj = self.d.get(fname)
if obj:
if dictPath and callable(obj):
obj = obj(self.ip)
dictPath.reverse()
while dictPath and obj:
if isinstance(obj, list) and len(obj) == 1:
obj = obj[0]
if hasattr(obj, 'get'):
obj = obj.get(dictPath.pop())
else:
return None
if not callable(obj):
return lambda ip: obj
return obj
return None
class ErrorFormatter(object):
def formatBlockMissing(self, name):
return u'[{0} not found]'.format(name)
def formatRecurrenceLimit(self, name, limit):
return u'[recurrence limit for {0}]'.format(name)
class ErrorBlock(object):
def __init__(self, msg):
self.msg = msg
def __unicode__(self):
''' print error to output
'''
return self.msg
def __nonzero__(self):
''' getBoolean evaluates errors as False
'''
return False
class Interpreter(object):
#greater limit than python stack size will lead to exceptions
RECURRENCE_LIMIT = 30
def __init__(self, ast, errorFormatter=None):
self.ast = ast
self.useCount = 0
self.errorFormatter = errorFormatter or ErrorFormatter()
self.currentLexicalScope = None
self.blockScopes = deque()
self.callScopes = deque()
def evalToString(self):
return ''.join(unicode(s) for s in self.eval(self.ast))
def findBlock(self, block):
for c in self.blockScopes:
value = c.get(block.name)
if value is not None:
return value
return None
def findParameter(self, block):
for callBlock, blockDef in self.callScopes:
if blockDef is block.lexicalScope:
#get last calling of the same block
if block.inDefaultParameter:
#prevents call cycles in default parameters
return callBlock.get(block.name)
else:
return callBlock.get(block.name) or block.lexicalScope.get(block.name)
return None
def eval(self, block):
if isinstance(block, basestring):
#strings
res = [block]
elif isinstance(block, list):
#block content
res = list(chain(*[self.eval(b) for b in block]))
elif isinstance(block, CallBlock):
res = self.evalCallBlock(block)
elif isinstance(block, CallParameter):
blocks = self.findParameter(block)
if not blocks:
msg = self.errorFormatter.formatBlockMissing(block.name)
return [ErrorBlock(msg)]
res = self.eval(blocks)
elif isinstance(block, BlockScope):
self.addBlockScope(block)
res = self.eval(block.content)
self.removeBlockScope()
else:
res = block(self)
if not hasattr(res, '__iter__'):
res = [res]
return res
def evalCallBlock(self, block):
name = block.name
blockDef = self.findBlock(block)
if not blockDef:
msg = self.errorFormatter.formatBlockMissing(name)
return [ErrorBlock(msg)]
self.useCount += 1
if self.useCount > self.RECURRENCE_LIMIT:
return self.createRecurrenceLimit(name)
if isinstance(blockDef, BlockDefinition):
self.addCallScope(block, blockDef)
res = self.eval(blockDef.content)
self.removeCallScope()
else:
self.addCallScope(block, None)
res = self.eval(blockDef)
self.removeCallScope()
self.useCount -= 1
return res
def createRecurrenceLimit(self, name):
msg = self.errorFormatter.formatRecurrenceLimit(name, self.RECURRENCE_LIMIT)
return [ErrorBlock(msg)]
def addBlockScope(self, blockScope):
self.blockScopes.appendleft(blockScope)
def removeBlockScope(self):
self.blockScopes.popleft()
def addCallScope(self, blockCall, blockDefinition):
self.callScopes.appendleft((blockCall, blockDefinition))
def removeCallScope(self):
self.callScopes.popleft()
def getValue(self, name, ifMissing=None):
res = self.eval(CallParameter(name, lexicalScope=None, inDefaultParameter=True))
if ifMissing is not None and res and isinstance(res[0], ErrorBlock):
res = ifMissing
return res
def getString(self, name):
return ''.join(unicode(s) for s in self.getValue(name, []))
def getBoolean(self, name):
'''convert block to boolean
unknown or empty -> False
'''
res = self.getValue(name)
return len(res) > 0 and all(res)
def getGetLastCallParameters(self):
if self.callScopes:
return self.callScopes[0][0].params.keys()
else:
return []
class MethodWrapper(object):
def __init__(self, f):
self.args = [n for n in inspect.getargspec(f).args if n != 'self']
self.f = f
def __call__(self, ip):
formName = lambda a: formParameterName(a) if a != 'content' else '#'
params = dict((a, ip.getString(formName(a))) for a in self.args)
return self.f(**params)
class Template(object):
''' object for rendering markup which can be extended about parameters and functions
string based methods has to start with 'm' and continue with upper case letter
all parameters are converted to unicode
and should return list or unicode
e.g.
def mHello(self, name):
return 'Hello ' + name
'''
def __init__(self, code, errorFormatter=None):
if isinstance(code, basestring):
code = Parser(code).parse()
code = Translator().translate(code)
self.interpreter = Interpreter(code, errorFormatter=errorFormatter)
def render(self, **kwargs):
context = self.createContext(kwargs)
self.interpreter.addBlockScope(context)
return self.interpreter.evalToString()
def createContext(self, params):
builtins = {}
builtins.update(self._getParameters(params))
builtins.update(self._getTemplateFunctions())
builtins.update(self._getStringBasedMethods())
return CustomContext(self.interpreter, builtins)
def _getParameters(self, params):
params = dict(('$'+k, v) for k, v in params.items())
params.update({
'(': u'[',
')': u']',
'Sp': u' ',
})
return params
def _getTemplateFunctions(self):
return {
'If': self.If,
'List': self.List,
'Split': self.Split,
'html': TagGenerator(),
}
def _getStringBasedMethods(self):
#find all methods starting with m[A-Z].*
hasProperNameFormat = lambda k: len(k) > 2 and k[0] == 'm' and k[1].isupper()
templateMethods = ((k[1:], getattr(self, k)) for k in dir(self) if callable(getattr(self, k)) and hasProperNameFormat(k))
return dict((name, MethodWrapper(method)) for name, method in templateMethods)
def List(self, ip):
seq = ip.getValue('#Seq', [])
sep = ip.getString('#Sep')
if seq:
ip.addBlockScope(CustomContext(self.interpreter, {
#do not use for styling, css 2.0 is powerfull enough
'$Even': lambda _: i % 2 == 1,
'$First': lambda _: i == 0,
'$Item': lambda _: x,
'$Last': lambda _: i+1 == len(seq),
'$Odd': lambda _: i % 2 == 0,
'$Order': lambda _: i+1,
}))
if sep:
res = []
for i, x in enumerate(seq):
res.append(ip.getValue('#'))
if i + 1 != len(seq):
res.append(sep)
else:
res = [ip.getValue('#') for i, x in enumerate(seq)]
ip.removeBlockScope()
return list(chain(*res))
else:
return ip.getValue('#IfEmpty', [])
def Split(self, ip):
content = ip.getString('#')
sep = ip.getString('#Sep')
if sep:
res = content.split(sep)
else:
res = [content]
return res
def If(self, ip):
if ip.getBoolean('#'):
return ip.getValue('#Then', [])
else:
return ip.getValue('#Else', [])
class TagGenerator:
TAG_NAME_RE = re.compile(r'^[a-zA-Z0-9]+(:[a-zA-Z0-9]+)?$')
def get(self, name):
def wrapper(ip):
attributes = ip.getGetLastCallParameters()
if not self.TAG_NAME_RE.match(name):
return '[tag name "%s" invalid]' % name
attrList = [(a, ip.getString(a)) for a in attributes if a != '#']
if '#' in attributes:
content = ip.getString('#')
return list(chain(('<', name, joinAttributes(attrList), '>'), content, ('</', name, '>')))
else:
return ['<', name, joinAttributes(attrList), '/>']
return wrapper
def joinAttributes(attributes):
return ''.join(' %s="%s"' % (a[1:], escapeHtml(v))
for a, v in attributes if v)
def escapeHtml(s):
s = s.replace('&', '&')
s = s.replace('"', '"')
s = s.replace('>', '>')
s = s.replace('<', '<')
return s
| |
# -*- coding: utf-8 -*-
#
## copyright (C) 2018
# The Icecube Collaboration
#
# $Id$
#
# @version $Revision$
# @date $LastChangedDate$
# @author Hershal Pandya <hershal@udel.edu> Last changed by: $LastChangedBy$
#
from icecube import phys_services, dataclasses, icetray, recclasses
import numpy as np
import tables
from icecube.icetray.i3logging import log_fatal,log_warn
from llh_ratio_nd import get_slice_vector,log_likelihood_ratio
def signed_log(t):
return np.sign(t)*np.log10(np.absolute(t)+1)
def log_plus_one(t):
return np.log10(t+1)
def check_distinct_regions_add_up_to_full(distinct_regions_binedges,binedges,decimals=2):
combine_edges=[]
for i in range(len(distinct_regions_binedges)):
for j in range(len(distinct_regions_binedges[i])):
if i==0:
combine_edges.append(distinct_regions_binedges[i][j])
else:
combine_edges[j]=np.unique(np.sort(np.concatenate((combine_edges[j],distinct_regions_binedges[i][j]))))
for i in range(len(binedges)):
are_equal=(np.round(binedges[i],decimals=decimals)==np.round(combine_edges[i],decimals=decimals)).all()
if not are_equal:
print 'DistinctRegionsBinEdges do not add up to binedges for this dimension'
print combine_edges[i], binedges[i]
raise Exception('Inconsistency found')
for i in range(len(distinct_regions_binedges)):
for j in range(len(distinct_regions_binedges[0])):
if i<len(distinct_regions_binedges)-1:
next_one=i+1
else:
next_one=0
intersection=np.intersect1d(np.round(distinct_regions_binedges[i][j],decimals=decimals),np.round(distinct_regions_binedges[next_one][j],decimals=decimals))
if len(intersection)>1 and len(intersection)!=len(binedges[j]):
print 'comparing "Distinct" regions %i and %i, dimension %i'%(i,next_one,j)
print 'These regions Intersect'
print 'binedges of region1',distinct_regions_binedges[i]
print 'binedges of region2',distinct_regions_binedges[next_one]
raise Exception('Inconsistency found')
return
class IceTop_LLHRatio(icetray.I3ConditionalModule):
"""
Input takes I3VectorShieldHitRecords with following members:
distance
residual_time
charge
"""
def __init__(self,ctx):
icetray.I3ConditionalModule.__init__(self, ctx)
#common inputs
self.AddParameter('Hits_I3VectorShieldHitRecord',
'Shield applied to Pulses Using a reco',
None)
self.AddParameter('Unhits_I3VectorShieldHitRecord',
'Unhits from Shield and Charge/Time assigned false values',
None)
self.AddParameter('Excluded_I3VectorShieldHitRecord',
'Containing Dist of Excluded Tanks and Charge/time assigned false values',
None)
self.AddParameter('AngularReco_I3Particle',
'I3Particle from which cosZenith is to be drawn',
None)
self.AddParameter('EnergyReco_I3Particle',
'I3Particle from which logEnergy is to be drawn',
None)
self.AddParameter('LaputopParamsName',
'LaputopParams from which logS125 is to be drawn only accepted if EnergyReco_I3Particle not provided',
None)
self.AddParameter('RunMode','Options: GeneratePDF / CalcLLHR',None)
self.AddParameter('Output','Name of the output container','IceTopLLHR')
# inputs for RunMode CalcLLHR
self.AddParameter('OutputFileName','',None)
self.AddParameter('BinEdges5D','[logE_edges, cosZen_edges, logQ_edges, signed_logT_edges, logRplusone_edges]',[])
self.AddParameter('DistinctRegionsBinEdges3D',
'Disjoint Regions in Q, T, R PDF. e.g.Unhits/Excluded. [3dEdges1,3dEdges2,..]',
[])
# inputs for RunMode GeneratePDF
self.AddParameter('SigPDFInputFileName',
'Path to input file (Sig) made using GeneratePDF method in the previous run',None)
self.AddParameter('BkgPDFInputFileName',
'Path to input file (Bkg) made using GeneratePDF method in the previous run',None)
self.AddParameter('DecimalsForSanityCheck',
'Consistency checks will compare values rounded to these N decimals.Default:2',2)
self.AddParameter('SubtractEventFromPDF',
'subtract the event from the PDF if it was used for generating the PDF. Default:None',None)
return
def Configure(self):
self.HitsName = self.GetParameter('Hits_I3VectorShieldHitRecord')
self.UnhitsName = self.GetParameter('Unhits_I3VectorShieldHitRecord')
self.ExcludedName = self.GetParameter('Excluded_I3VectorShieldHitRecord')
self.AngularRecoName = self.GetParameter('AngularReco_I3Particle')
self.EnergyRecoName = self.GetParameter('EnergyReco_I3Particle')
self.LaputopParamsName = self.GetParameter('LaputopParamsName')
self.RunMode = self.GetParameter('RunMode')
self.Decimals= self.GetParameter('DecimalsForSanityCheck')
if self.RunMode=='GeneratePDF':
self.OutputName = self.GetParameter('OutputFileName')
self.binedges = self.GetParameter('BinEdges5D')
self.distinct_regions_binedges = self.GetParameter('DistinctRegionsBinEdges3D')
# make sure distinct regions binedges make sense
if len(self.distinct_regions_binedges)==0:
# give the whole region as a distinct single region
self.distinct_regions_binedges = [self.binedges[2:]]
else:
#check that each distinct region binedge is same shape as self.binedges i.e.
for i in self.distinct_regions_binedges:
if np.shape(i)!=np.shape(self.binedges[2:]):
print 'shape of self.binedges[2:] :',np.shape(self.binedges[2:])
print 'shape of self.distinct_regions_binedges',np.shape(self.distinct_regions_binedges)
log_fatal('DistinctRegionBinEdges and BinEdges* not compatible')
#check that joining all distinct regions gives total binedges
check_distinct_regions_add_up_to_full(self.distinct_regions_binedges, self.binedges[2:],decimals=self.Decimals)
self.labels = ['logE', 'cosZ', 'logQ', 'signedlogT', 'logRplusOne']
#creates the self.hist
self._init_hist()
elif self.RunMode=='CalcLLHR':
self.SigPDFInputName = self.GetParameter('SigPDFInputFileName')
self.BkgPDFInputName = self.GetParameter('BkgPDFInputFileName')
# this one should create self.bkg_hist, self.sig_hist, self.binedges, self.labels, self.distinct_regions_binedges
self._load_PDF_from_file()
self.SubtractEventFromPDF= self.GetParameter('SubtractEventFromPDF')
self.objname = self.GetParameter('Output')
return
def Physics(self,frame):
if self.RunMode=='GeneratePDF':
self._GenPDFsPhysics(frame)
elif self.RunMode=='CalcLLHR':
self._CalcLLHRPhysics(frame)
else:
log_fatal('RunMode can only accept one these two inputs: GeneratePDF / CalcLLHR')
self.PushFrame(frame)
return
def Finish(self):
if self.RunMode=='GeneratePDF':
# generate the outputfile. save histogram.
f=tables.open_file(self.OutputName,'w')
f.create_carray('/', 'hist', obj=self.hist,filters=tables.Filters(complib='blosc:lz4hc', complevel=1))
for i in range(len(self.binedges)):
f.create_carray('/', 'binedges_%i'%i,
obj=self.binedges[i],
filters=tables.Filters(complib='blosc:lz4hc',
complevel=1))
for i in range(len(self.distinct_regions_binedges)):
for j in range(len(self.distinct_regions_binedges[0])):
f.create_carray('/', 'region_%i_binedges_%i'%(i,j),
obj=self.distinct_regions_binedges[i][j],
filters=tables.Filters(complib='blosc:lz4hc',
complevel=1))
f.create_carray('/', 'labels', obj=self.labels,filters=tables.Filters(complib='blosc:lz4hc', complevel=1))
f.create_carray('/', 'n_events', obj=[self.n_events],filters=tables.Filters(complib='blosc:lz4hc', complevel=1))
f.close()
return
def _load_PDF_from_file(self):
'''
this part is hard wired for 5 dimensional PDFs
'''
f=tables.open_file(self.SigPDFInputName,'r')
self.sig_hist = f.root.hist[:]
self.binedges = [ f.root.binedges_0[:], f.root.binedges_1[:], f.root.binedges_2[:], f.root.binedges_3[:] , f.root.binedges_4[:]]
self.distinct_regions_binedges = [ ]
for r in range(1):
region_binedges=[]
for i in range(3):
temp=eval('f.root.region_%i_binedges_%i[:]'%(r,i))
region_binedges.append(temp)
self.distinct_regions_binedges.append(region_binedges)
self.labels = f.root.labels[:]
f.close()
f=tables.open_file(self.BkgPDFInputName,'r')
self.bkg_hist = f.root.hist[:]
binedges = [ f.root.binedges_0[:], f.root.binedges_1[:], f.root.binedges_2[:], f.root.binedges_3[:] , f.root.binedges_4[:]]
labels = f.root.labels[:]
f.close()
if np.shape(self.sig_hist)!=np.shape(self.bkg_hist):
print 'sig hist, bkg hist shapes dont match'
print 'sig hist shape',np.shape(sig_hist)
print 'bkg hist shape',np.shape(bkg_hist)
raise Exception('Inconsistency found')
for i in range(len(binedges)):
are_equal=(np.round(binedges[i],decimals=self.Decimals)==np.round(self.binedges[i],decimals=self.Decimals)).all()
if not are_equal:
print 'sig binedges dim %i'%i, self.binedges[i]
print 'bkg binedges dim %i'%i, binedges[i]
raise Exception('Sig and Bkg binedges are not equal')
if (labels!=self.labels).any():
print 'labels for sig and bkg are not same'
print 'are you sure you are loading correct sig/bkg pdfs?'
return
def _init_hist(self):
histogram_shape= np.array([len(i)-1 for i in self.binedges])
self.hist=np.zeros(histogram_shape)
self.n_events=0
return
def _fill(self,sample):
h,edges=np.histogramdd(sample,self.binedges)
if np.shape(h)!=np.shape(self.hist):
log_fatal('initialized histogram and fill histogram dont match in shape')
self.hist+= h
self.n_events+=1
return
def _GenPDFsPhysics(self,frame):
in_array=self._create_in_array(frame)
self._fill(in_array)
return
def _CalcLLHRPhysics(self,frame):
d={}
d['llh_ratio']= 0.
d['n_extrapolations_sig_PDF'] = 0.
d['n_extrapolations_bkg_PDF'] = 0.
d['llh_sig'] = 0.
d['llh_bkg'] = 0.
d['isGood'] = 0.
# load event information
in_array = self._create_in_array(frame)
logE=in_array[0][0]
coszen=in_array[0][1]
# select Q, T, R dimensions, generate event histogram
in_array = (in_array.T[2:]).T
binedges = self.binedges[2:]
event_hist,temp = np.histogramdd(in_array, binedges)
# check if event logE and coszen lies within range of binedges
if logE>self.binedges[0][-1] or logE<self.binedges[0][0]:
frame.Put(self.objname,dataclasses.I3MapStringDouble(d))
return
if coszen>self.binedges[1][-1] or coszen<self.binedges[1][0]:
frame.Put(self.objname,dataclasses.I3MapStringDouble(d))
return
# find the logE and coszen bins select those bins in sig/bkg pdfs
logEbincenters = np.array((self.binedges[0][1:] + self.binedges[0][:-1] )/2.)
coszenbincenters = np.array((self.binedges[1][1:] + self.binedges[1][:-1] )/2.)
dE = np.absolute(logEbincenters - logE)
Ebin=np.where(np.amin(dE)==dE)[0][0]
dcZ = np.absolute(coszenbincenters - coszen)
cZbin = np.where(np.amin(dcZ)==dcZ)[0][0]
sig_hist = self.sig_hist[Ebin][cZbin]
bkg_hist = self.bkg_hist[Ebin][cZbin]
# subtract the event from the PDF if it was used for generating the PDF
if self.SubtractEventFromPDF:
if self.SubtractEventFromPDF=='Sig':
sig_hist = sig_hist - event_hist
if (sig_hist<0).any():
log_fatal('Event subtraction led to negative values')
if self.SubtractEventFromPDF=='Bkg':
bkg_hist = bkg_hist - event_hist
if (bkg_hist<0).any():
log_fatal('Event subtraction led to negative values')
# normalize histogram, obtain PDFs
sig_pdf = sig_hist/ np.sum(sig_hist)
bkg_pdf = bkg_hist/ np.sum(bkg_hist)
# calculate llh ratio for each region separately and add it up
# separate calculation is done to avoid one region influencing
# extrapolated values of empty pixels in the PDF in another region
llh_map_sig=np.zeros_like(sig_hist)
llh_map_bkg=np.zeros_like(bkg_hist)
d['isGood']=1.
for region_edges in self.distinct_regions_binedges:
# obtain slice vector for the region of the PDF
region_range = [ [i[0],i[-1]] for i in region_edges]
slice_vector= get_slice_vector(binedges,region_range)
temp = log_likelihood_ratio(heatmap1=sig_pdf[slice_vector],
heatmap2=bkg_pdf[slice_vector],
event_hist = event_hist[slice_vector])
d['llh_ratio'] += temp[0]
# all the rest are debugging variables. some will be stored in I3VectorMap.
# not storing any histograms as output. Just numbers.
d['n_extrapolations_sig_PDF'] += temp[1]
d['n_extrapolations_bkg_PDF'] += temp[2]
d['llh_sig'] += temp[5]
d['llh_bkg'] += temp[6]
extrapolated_sig_PDF = temp[3]
extrapolated_bkg_PDF = temp[4]
llh_map_sig[slice_vector]=temp[7]
llh_map_bkg[slice_vector]=temp[8]
frame.Put(self.objname,dataclasses.I3MapStringDouble(d))
return
def _create_in_array(self,frame):
if self.EnergyRecoName:
En = np.log10(frame[self.EnergyRecoName].energy)
elif self.LaputopParamsName:
En = frame[self.LaputopParamsName].value(recclasses.LaputopParameter.Log10_S125)
# En = np.log10(frame[self.LaputopParamsName].s125)
else:
log_fatal('One of EnergyRecoName_I3Particle or LaputopParamsName needs to be given')
ze = np.cos(frame[self.AngularRecoName].dir.zenith)
hits = frame[self.HitsName]
unhits = frame[self.UnhitsName]
excluded = frame[self.ExcludedName]
#hits_t, hits_q, hits_r = np.array([[signed_log(hit.time_residual),np.log10(hit.charge), log_plus_one(hit.distance)] for hit in hits]).T
hits_t = signed_log(np.array([hit.time_residual for hit in hits]))
hits_q = np.log10(np.array([hit.charge for hit in hits]))
hits_r = log_plus_one(np.array([hit.distance for hit in hits]))
hits_E = np.ones_like(hits_r)*En
hits_z = np.ones_like(hits_r)*ze
#unhits_t, unhits_q, unhits_r = np.array([[signed_log(hit.time_residual),np.log10(hit.charge), log_plus_one(hit.distance)] for hit in unhits]).T
unhits_t = signed_log(np.array([hit.time_residual for hit in unhits]))
unhits_q = np.log10(np.array([hit.charge for hit in unhits]))
unhits_r = log_plus_one(np.array([hit.distance for hit in unhits]))
unhits_E = np.ones_like(unhits_r)*En
unhits_z = np.ones_like(unhits_r)*ze
#excluded_t, excluded_q, excluded_r = np.array([[signed_log(hit.time_residual),np.log10(hit.charge), log_plus_one(hit.distance)] for hit in excluded]).T
excluded_t = signed_log(np.array([hit.time_residual for hit in excluded]))
excluded_q = np.log10(np.array([hit.charge for hit in excluded]))
excluded_r = log_plus_one(np.array([hit.distance for hit in excluded]))
excluded_E = np.ones_like(excluded_r)*En
excluded_z = np.ones_like(excluded_r)*ze
# ready data for entry to 5D hist
t = np.concatenate( (hits_t, unhits_t, excluded_t) )
q = np.concatenate( (hits_q, unhits_q, excluded_q) )
r = np.concatenate( (hits_r, unhits_r, excluded_r) )
E = np.concatenate( (hits_E, unhits_E, excluded_E) )
z = np.concatenate( (hits_z, unhits_z, excluded_z) )
if len(t)!=162 or len(q)!=162 or len(r)!=162:
print 'N_t %s N_q %s N_r %s'%(len(t),len(q),len(r))
log_fatal('Total Tanks in Event not 162')
if np.isnan(t).any() or np.isnan(q).any() or np.isnan(r).any():
print 't',t
print 'q',q
print 'r',r
log_warn('signed_time/logq/logr have nans')
in_array=np.vstack([E,z,q,t,r]).T
return in_array
| |
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from sahara.plugins import provisioning as p
from sahara.utils import files as f
CDH5_UBUNTU_REPO = ('deb [arch=amd64] http://archive.cloudera.com/cdh5'
'/ubuntu/precise/amd64/cdh precise-cdh5.0.0 contrib'
'\ndeb-src http://archive.cloudera.com/cdh5/ubuntu'
'/precise/amd64/cdh precise-cdh5.0.0 contrib')
DEFAULT_CDH5_UBUNTU_REPO_KEY_URL = ('http://archive.cloudera.com/cdh5/ubuntu'
'/precise/amd64/cdh/archive.key')
CM5_UBUNTU_REPO = ('deb [arch=amd64] http://archive.cloudera.com/cm5'
'/ubuntu/precise/amd64/cm precise-cm5.0.0 contrib'
'\ndeb-src http://archive.cloudera.com/cm5/ubuntu'
'/precise/amd64/cm precise-cm5.0.0 contrib')
DEFAULT_CM5_UBUNTU_REPO_KEY_URL = ('http://archive.cloudera.com/cm5/ubuntu'
'/precise/amd64/cm/archive.key')
CDH5_CENTOS_REPO = ('[cloudera-cdh5]'
'\nname=Cloudera\'s Distribution for Hadoop, Version 5'
'\nbaseurl=http://archive.cloudera.com/cdh5/redhat/6'
'/x86_64/cdh/5.0.0/'
'\ngpgkey = http://archive.cloudera.com/cdh5/redhat/6'
'/x86_64/cdh/RPM-GPG-KEY-cloudera'
'\ngpgcheck = 1')
CM5_CENTOS_REPO = ('[cloudera-manager]'
'\nname=Cloudera Manager'
'\nbaseurl=http://archive.cloudera.com/cm5/redhat/6'
'/x86_64/cm/5.0.0/'
'\ngpgkey = http://archive.cloudera.com/cm5/redhat/6'
'/x86_64/cm/RPM-GPG-KEY-cloudera'
'\ngpgcheck = 1')
DEFAULT_SWIFT_LIB_URL = ('https://repository.cloudera.com/artifactory/repo/org'
'/apache/hadoop/hadoop-openstack/2.3.0-cdh5.0.0'
'/hadoop-openstack-2.3.0-cdh5.0.0.jar')
DEFAULT_EXTJS_LIB_URL = 'http://extjs.com/deploy/ext-2.2.zip'
CDH5_REPO_URL = p.Config(
'CDH5 repo list URL', 'general', 'cluster', priority=1,
default_value="")
CDH5_REPO_KEY_URL = p.Config(
'CDH5 repo key URL (for debian-based only)', 'general', 'cluster',
priority=1, default_value="")
CM5_REPO_URL = p.Config(
'CM5 repo list URL', 'general', 'cluster', priority=1,
default_value="")
CM5_REPO_KEY_URL = p.Config(
'CM5 repo key URL (for debian-based only)', 'general', 'cluster',
priority=1, default_value="")
ENABLE_SWIFT = p.Config('Enable Swift', 'general', 'cluster',
config_type='bool', priority=1,
default_value=True)
ENABLE_HBASE_COMMON_LIB = p.Config('Enable HBase Common Lib',
'general', 'cluster', config_type='bool',
priority=1, default_value=True)
SWIFT_LIB_URL = p.Config(
'Hadoop OpenStack library URL', 'general', 'cluster', priority=1,
default_value=DEFAULT_SWIFT_LIB_URL,
description=("Library that adds Swift support to CDH. The file will be "
"downloaded from VM."))
EXTJS_LIB_URL = p.Config(
"ExtJS library URL", 'general', 'cluster', priority=1,
default_value=DEFAULT_EXTJS_LIB_URL,
description=("Ext 2.2 library is required for Oozie Web Console. "
"The file will be downloaded from VM with oozie."))
AWAIT_AGENTS_TIMEOUT = p.Config(
'Await Cloudera agents timeout', 'general', 'cluster', config_type='int',
priority=1, default_value=300, is_optional=True,
description="Timeout for Cloudera agents connecting to Coudera Manager, "
"in seconds")
AWAIT_MANAGER_STARTING_TIMEOUT = p.Config(
'Timeout for Cloudera Manager starting', 'general', 'cluster',
config_type='int', priority=1, default_value=300, is_optional=True,
description='Timeout for Cloudera Manager starting, in seconds')
def _get_cluster_plugin_configs():
return [CDH5_REPO_URL, CDH5_REPO_KEY_URL, CM5_REPO_URL, CM5_REPO_KEY_URL,
ENABLE_SWIFT, ENABLE_HBASE_COMMON_LIB, SWIFT_LIB_URL,
EXTJS_LIB_URL, AWAIT_MANAGER_STARTING_TIMEOUT,
AWAIT_AGENTS_TIMEOUT]
# ng wide configs
def _load_json(path_to_file):
data = f.get_file_text(path_to_file)
return json.loads(data)
path_to_config = 'plugins/cdh/v5/resources/'
hdfs_confs = _load_json(path_to_config + 'hdfs-service.json')
namenode_confs = _load_json(path_to_config + 'hdfs-namenode.json')
datanode_confs = _load_json(path_to_config + 'hdfs-datanode.json')
secnamenode_confs = _load_json(path_to_config + 'hdfs-secondarynamenode.json')
yarn_confs = _load_json(path_to_config + 'yarn-service.json')
resourcemanager_confs = _load_json(
path_to_config + 'yarn-resourcemanager.json')
nodemanager_confs = _load_json(path_to_config + 'yarn-nodemanager.json')
jobhistory_confs = _load_json(path_to_config + 'yarn-jobhistory.json')
oozie_service_confs = _load_json(path_to_config + 'oozie-service.json')
oozie_role_confs = _load_json(path_to_config + 'oozie-oozie.json')
hive_service_confs = _load_json(path_to_config + 'hive-service.json')
hive_metastore_confs = _load_json(path_to_config + 'hive-metastore.json')
hive_hiveserver_confs = _load_json(path_to_config + 'hive-hiveserver2.json')
hive_webhcat_confs = _load_json(path_to_config + 'hive-webhcat.json')
hue_service_confs = _load_json(path_to_config + 'hue-service.json')
hue_role_confs = _load_json(path_to_config + 'hue-hue.json')
spark_service_confs = _load_json(path_to_config + 'spark-service.json')
spark_role_confs = _load_json(path_to_config + 'spark-history.json')
zookeeper_server_confs = _load_json(path_to_config + 'zookeeper-server.json')
zookeeper_service_confs = _load_json(path_to_config + 'zookeeper-service.json')
hbase_confs = _load_json(path_to_config + 'hbase-service.json')
master_confs = _load_json(path_to_config + 'hbase-master.json')
regionserver_confs = _load_json(path_to_config + 'hbase-regionserver.json')
priority_one_confs = _load_json(path_to_config + 'priority-one-confs.json')
def _prepare_value(value):
if not value:
return ""
return value.replace('\n', ' ')
def _init_configs(confs, app_target, scope):
cfgs = []
for cfg in confs:
priority = 1 if cfg['name'] in priority_one_confs else 2
c = p.Config(cfg['name'], app_target, scope, priority=priority,
default_value=_prepare_value(cfg['value']),
description=cfg['desc'], is_optional=True)
cfgs.append(c)
return cfgs
def _get_ng_plugin_configs():
cfg = []
cfg += _init_configs(hdfs_confs, 'HDFS', 'cluster')
cfg += _init_configs(namenode_confs, 'NAMENODE', 'node')
cfg += _init_configs(datanode_confs, 'DATANODE', 'node')
cfg += _init_configs(secnamenode_confs, 'SECONDARYNAMENODE', 'node')
cfg += _init_configs(yarn_confs, 'YARN', 'cluster')
cfg += _init_configs(resourcemanager_confs, 'RESOURCEMANAGER', 'node')
cfg += _init_configs(nodemanager_confs, 'NODEMANAGER', 'node')
cfg += _init_configs(jobhistory_confs, 'JOBHISTORY', 'node')
cfg += _init_configs(oozie_service_confs, 'OOZIE', 'cluster')
cfg += _init_configs(oozie_role_confs, 'OOZIE', 'node')
cfg += _init_configs(hive_service_confs, 'HIVE', 'cluster')
cfg += _init_configs(hive_metastore_confs, 'HIVEMETASTORE', 'node')
cfg += _init_configs(hive_hiveserver_confs, 'HIVESERVER', 'node')
cfg += _init_configs(hive_webhcat_confs, 'WEBHCAT', 'node')
cfg += _init_configs(hue_service_confs, 'HUE', 'cluster')
cfg += _init_configs(hue_role_confs, 'HUE', 'node')
cfg += _init_configs(spark_service_confs, 'SPARK_ON_YARN', 'cluster')
cfg += _init_configs(spark_role_confs, 'SPARK_ON_YARN', 'node')
cfg += _init_configs(zookeeper_service_confs, 'ZOOKEEPER', 'cluster')
cfg += _init_configs(zookeeper_server_confs, 'ZOOKEEPER', 'node')
cfg += _init_configs(hbase_confs, 'HBASE', 'cluster')
cfg += _init_configs(master_confs, 'MASTER', 'node')
cfg += _init_configs(regionserver_confs, 'REGIONSERVER', 'node')
return cfg
def get_plugin_configs():
cluster_wide = _get_cluster_plugin_configs()
ng_wide = _get_ng_plugin_configs()
return cluster_wide + ng_wide
def _get_config_value(cluster, key):
return cluster.cluster_configs.get(
'general', {}).get(key.name, key.default_value)
def get_cdh5_repo_url(cluster):
return _get_config_value(cluster, CDH5_REPO_URL)
def get_cdh5_key_url(cluster):
return _get_config_value(cluster, CDH5_REPO_KEY_URL)
def get_cm5_repo_url(cluster):
return _get_config_value(cluster, CM5_REPO_URL)
def get_cm5_key_url(cluster):
return _get_config_value(cluster, CM5_REPO_KEY_URL)
def is_swift_enabled(cluster):
return _get_config_value(cluster, ENABLE_SWIFT)
def is_hbase_common_lib_enabled(cluster):
return _get_config_value(cluster, ENABLE_HBASE_COMMON_LIB)
def get_swift_lib_url(cluster):
return _get_config_value(cluster, SWIFT_LIB_URL)
def get_extjs_lib_url(cluster):
return _get_config_value(cluster, EXTJS_LIB_URL)
| |
"""
dj-stripe Session Model Tests.
"""
from copy import deepcopy
from unittest.mock import patch
import pytest
import stripe
from django.test import TestCase
from djstripe.models import Session
from djstripe.settings import djstripe_settings
from tests import (
FAKE_BALANCE_TRANSACTION,
FAKE_CHARGE,
FAKE_CUSTOMER,
FAKE_INVOICE,
FAKE_PAYMENT_INTENT_I,
FAKE_PAYMENT_METHOD_I,
FAKE_PRODUCT,
FAKE_SESSION_I,
FAKE_SUBSCRIPTION,
AssertStripeFksMixin,
)
pytestmark = pytest.mark.django_db
class SessionTest(AssertStripeFksMixin, TestCase):
@patch(
"stripe.BalanceTransaction.retrieve",
return_value=deepcopy(FAKE_BALANCE_TRANSACTION),
autospec=True,
)
@patch(
"stripe.Subscription.retrieve",
return_value=deepcopy(FAKE_SUBSCRIPTION),
autospec=True,
)
@patch("stripe.Charge.retrieve", return_value=deepcopy(FAKE_CHARGE), autospec=True)
@patch(
"stripe.PaymentMethod.retrieve",
return_value=deepcopy(FAKE_PAYMENT_METHOD_I),
autospec=True,
)
@patch(
"stripe.Product.retrieve", return_value=deepcopy(FAKE_PRODUCT), autospec=True
)
@patch(
"stripe.Invoice.retrieve", return_value=deepcopy(FAKE_INVOICE), autospec=True
)
@patch(
"stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER), autospec=True
)
@patch(
"stripe.PaymentIntent.retrieve",
return_value=deepcopy(FAKE_PAYMENT_INTENT_I),
autospec=True,
)
def test_sync_from_stripe_data(
self,
payment_intent_retrieve_mock,
customer_retrieve_mock,
invoice_retrieve_mock,
product_retrieve_mock,
paymentmethod_card_retrieve_mock,
charge_retrieve_mock,
subscription_retrieve_mock,
balance_transaction_retrieve_mock,
):
session = Session.sync_from_stripe_data(deepcopy(FAKE_SESSION_I))
self.assert_fks(
session,
expected_blank_fks={
"djstripe.Charge.latest_upcominginvoice (related name)",
"djstripe.Charge.application_fee",
"djstripe.Charge.dispute",
"djstripe.Charge.on_behalf_of",
"djstripe.Charge.source_transfer",
"djstripe.Charge.transfer",
"djstripe.Customer.coupon",
"djstripe.Customer.default_payment_method",
"djstripe.Customer.subscriber",
"djstripe.Invoice.default_payment_method",
"djstripe.Invoice.default_source",
"djstripe.PaymentIntent.on_behalf_of",
"djstripe.PaymentIntent.payment_method",
"djstripe.PaymentIntent.upcominginvoice (related name)",
"djstripe.Subscription.default_payment_method",
"djstripe.Subscription.default_source",
"djstripe.Subscription.pending_setup_intent",
"djstripe.Subscription.schedule",
"djstripe.Session.subscription",
},
)
@patch(
"stripe.BalanceTransaction.retrieve",
return_value=deepcopy(FAKE_BALANCE_TRANSACTION),
autospec=True,
)
@patch(
"stripe.Subscription.retrieve",
return_value=deepcopy(FAKE_SUBSCRIPTION),
autospec=True,
)
@patch("stripe.Charge.retrieve", return_value=deepcopy(FAKE_CHARGE), autospec=True)
@patch(
"stripe.PaymentMethod.retrieve",
return_value=deepcopy(FAKE_PAYMENT_METHOD_I),
autospec=True,
)
@patch(
"stripe.Product.retrieve", return_value=deepcopy(FAKE_PRODUCT), autospec=True
)
@patch(
"stripe.Invoice.retrieve", return_value=deepcopy(FAKE_INVOICE), autospec=True
)
@patch(
"stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER), autospec=True
)
@patch(
"stripe.PaymentIntent.retrieve",
return_value=deepcopy(FAKE_PAYMENT_INTENT_I),
autospec=True,
)
def test___str__(
self,
payment_intent_retrieve_mock,
customer_retrieve_mock,
invoice_retrieve_mock,
product_retrieve_mock,
paymentmethod_card_retrieve_mock,
charge_retrieve_mock,
subscription_retrieve_mock,
balance_transaction_retrieve_mock,
):
session = Session.sync_from_stripe_data(deepcopy(FAKE_SESSION_I))
self.assertEqual(f"<id={FAKE_SESSION_I['id']}>", str(session))
class TestSession:
key = djstripe_settings.SUBSCRIBER_CUSTOMER_KEY
@pytest.mark.parametrize(
"metadata",
[
{},
{"key1": "val1", key: "random"},
],
)
# flake8: noqa (C901)
def test__attach_objects_post_save_hook(
self, monkeypatch, fake_user, fake_customer, metadata
):
"""
Test for Checkout Session _attach_objects_post_save_hook
"""
user = fake_user
customer = fake_customer
# because create_for_user method adds subscriber
customer.subcriber = None
customer.save()
# update metadata
if metadata.get(self.key, ""):
metadata[self.key] = user.id
fake_stripe_session = deepcopy(FAKE_SESSION_I)
fake_stripe_session["metadata"] = metadata
def mock_checkout_session_get(*args, **kwargs):
"""Monkeypatched stripe.Session.retrieve"""
return fake_stripe_session
def mock_customer_get(*args, **kwargs):
"""Monkeypatched stripe.Customer.retrieve"""
fake_customer = deepcopy(FAKE_CUSTOMER)
return fake_customer
def mock_payment_intent_get(*args, **kwargs):
"""Monkeypatched stripe.PaymentIntent.retrieve"""
fake_payment_intent = deepcopy(FAKE_PAYMENT_INTENT_I)
return fake_payment_intent
def mock_invoice_get(*args, **kwargs):
"""Monkeypatched stripe.Invoice.retrieve"""
return deepcopy(FAKE_INVOICE)
def mock_payment_method_get(*args, **kwargs):
"""Monkeypatched stripe.PaymentMethod.retrieve"""
fake_payment_intent = deepcopy(FAKE_PAYMENT_METHOD_I)
return fake_payment_intent
def mock_subscription_get(*args, **kwargs):
"""Monkeypatched stripe.Subscription.retrieve"""
return deepcopy(FAKE_SUBSCRIPTION)
def mock_balance_transaction_get(*args, **kwargs):
"""Monkeypatched stripe.BalanceTransaction.retrieve"""
return deepcopy(FAKE_BALANCE_TRANSACTION)
def mock_product_get(*args, **kwargs):
"""Monkeypatched stripe.Product.retrieve"""
return deepcopy(FAKE_PRODUCT)
def mock_charge_get(*args, **kwargs):
"""Monkeypatched stripe.Charge.retrieve"""
return deepcopy(FAKE_CHARGE)
# monkeypatch stripe.checkout.Session.retrieve, stripe.Customer.retrieve, stripe.PaymentIntent.retrieve
monkeypatch.setattr(
stripe.checkout.Session, "retrieve", mock_checkout_session_get
)
monkeypatch.setattr(stripe.Customer, "modify", mock_customer_get)
monkeypatch.setattr(stripe.PaymentIntent, "retrieve", mock_payment_intent_get)
# because of Reverse o2o field sync due to PaymentIntent.sync_from_stripe_data..
monkeypatch.setattr(stripe.Invoice, "retrieve", mock_invoice_get)
monkeypatch.setattr(stripe.PaymentMethod, "retrieve", mock_payment_method_get)
monkeypatch.setattr(stripe.Subscription, "retrieve", mock_subscription_get)
monkeypatch.setattr(
stripe.BalanceTransaction, "retrieve", mock_balance_transaction_get
)
monkeypatch.setattr(stripe.Product, "retrieve", mock_product_get)
monkeypatch.setattr(stripe.Charge, "retrieve", mock_charge_get)
# Invoke the sync to invoke _attach_objects_post_save_hook()
session = Session.sync_from_stripe_data(fake_stripe_session)
# refresh self.customer from db
customer.refresh_from_db()
assert session.customer.id == customer.id
assert customer.subscriber == user
if metadata.get(self.key, ""):
assert customer.metadata == {self.key: metadata.get(self.key)}
else:
assert customer.metadata == {}
| |
#!/usr/bin/env python
# coding=utf-8
"""
coding=utf-8
A utility to make handling many resumes easier by automatically pulling contact information, required skills and
custom text fields. These results are then surfaced as a convenient summary CSV.
"""
import functools
import logging
import re
import sys
from flask import Flask
from flask_restful import reqparse, abort, Api, Resource
import requests
import pandas as pd
reload(sys)
sys.setdefaultencoding('utf8')
app = Flask(__name__)
api = Api(app)
logging.basicConfig(level=logging.DEBUG)
'''
def main():
"""
Main method for ResumeParser. This utility will:
- Read in `data_path` and `output_path` from command line arguments
- Create a list of documents to scan
- Read the text from those documents
- Pull out desired information (e.g. contact info, skills, custom text fields)
- Output summary CSV
:return: None
:rtype: None
"""
logging.info('Begin Main')
url = 'http://achung.me/RahulSabnisResume-1.png'
resume_df = create_resume_df(url)
# Output to CSV
writer = pd.ExcelWriter('output.xlsx')
resume_df.to_excel(writer, 'Sheet1')
writer.save()
logging.info('End Main')
'''
def check_phone_number(string_to_search):
"""
Find first phone number in the string_to_search
:param string_to_search: A string to check for a phone number in
:type string_to_search: str
:return: A string containing the first phone number, or None if no phone number is found.
:rtype: str
"""
try:
regular_expression = re.compile(r"\(?" # open parenthesis
r"(\d{3})?" # area code
r"\)?" # close parenthesis
r"[\s\.-]{0,2}?" # area code, phone separator
r"(\d{3})" # 3 digit exchange
r"[\s\.-]{0,2}" # separator bbetween 3 digit exchange, 4 digit local
r"(\d{4})", # 4 digit local
re.IGNORECASE)
result = re.search(regular_expression, string_to_search)
if result:
result = result.groups()
result = "-".join(result)
return result
except Exception, exception_instance:
logging.error('Issue parsing phone number: ' + string_to_search + str(exception_instance))
return None
def check_email(string_to_search):
"""
Find first email address in the string_to_search
:param string_to_search: A string to check for an email address in
:type string_to_search: str
:return: A string containing the first email address, or None if no email address is found.
:rtype: str
"""
try:
regular_expression = re.compile(r"[A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]{2,4}", re.IGNORECASE)
result = re.search(regular_expression, string_to_search)
if result:
result = result.group()
return result
except Exception, exception_instance:
logging.error('Issue parsing email number: ' + string_to_search + str(exception_instance))
return None
def check_address(string_to_search):
"""
Find first physical address in the string_to_search
:param string_to_search: A string to check for a physical address in
:type string_to_search: str
:return: A string containing the first address, or None if no physical address is found.
:rtype: str
"""
try:
regular_expression = re.compile(r"[0-9]+ [a-z0-9,\.# ]+\bCA\b", re.IGNORECASE)
result = re.search(regular_expression, string_to_search)
if result:
result = result.group()
return result
except Exception, exception_instance:
logging.error('Issue parsing email number: ' + string_to_search + str(exception_instance))
return None
def term_count(string_to_search, term):
"""
A utility function which counts the number of times `term` occurs in `string_to_search`
:param string_to_search: A string which may or may not contain the term.
:type string_to_search: str
:param term: The term to search for the number of occurrences for
:type term: str
:return: The number of times the `term` occurs in the `string_to_search`
:rtype: int
"""
try:
regular_expression = re.compile(term, re.IGNORECASE)
result = re.findall(regular_expression, string_to_search)
return len(result)
except Exception, exception_instance:
logging.error('Issue parsing term: ' + str(term) + ' from string: ' + str(
string_to_search) + ': ' + str(exception_instance))
return 0
def term_match(string_to_search, term):
"""
A utility function which return the first match to the `regex_pattern` in the `string_to_search`
:param string_to_search: A string which may or may not contain the term.
:type string_to_search: str
:param term: The term to search for the number of occurrences for
:type term: str
:return: The first match of the `regex_pattern` in the `string_to_search`
:rtype: str
"""
try:
regular_expression = re.compile(term, re.IGNORECASE)
result = re.findall(regular_expression, string_to_search)
return result[0]
except Exception, exception_instance:
logging.error('Issue parsing term: ' + str(term) + ' from string: ' +
str(string_to_search) + ': ' + str(exception_instance))
return None
def get_text_image(url):
URL = "https://vision.googleapis.com/v1/images:annotate?key="
API = "AIzaSyAOrix8jeIDIJ9wVNufSqmIwCAgskcEoA8"
payload = {
"requests": [
{
"image": {
"source": {
"imageUri":
url
}
},
"features": [
{
"type": "TEXT_DETECTION"
}
]
}
]
}
r = requests.post(URL + API, json=payload)
j = r.json()
text = j['responses'][0]['fullTextAnnotation']['text']
return text.replace('\n', ' ')
def create_resume_df(url):
"""
This function creates a Pandas DF with one row for every input resume, and columns including the resumes's
file path and raw text.
This is achieved through the following steps:
- Create a list of documents to scan
- Read the text from those documents
- Pull out desired information (e.g. contact info, skills, custom text fields)
:param data_path: Path to a folder containing resumes. Any files ending in .pdf in this folder will be treated as a
resume.
:type data_path: str
:return: A Pandas DF with one row for every input resume, and columns including the resumes's
file path and raw text
:rtype: pd.DataFrame
"""
resume_summary_df = pd.DataFrame()
# Store metadata, raw text, and word count
resume_summary_df["raw_text"] = [get_text_image(url)]
resume_summary_df["num_words"] = resume_summary_df["raw_text"].apply(lambda x: len(x.split()))
# Scrape contact information
resume_summary_df["phone_number"] = resume_summary_df["raw_text"].apply(check_phone_number)
resume_summary_df["area_code"] = resume_summary_df["phone_number"].apply(
functools.partial(term_match, term=r"\d{3}"))
resume_summary_df["email"] = resume_summary_df["raw_text"].apply(check_email)
resume_summary_df["email_domain"] = resume_summary_df["email"].apply(functools.partial(term_match, term=r"@(.+)"))
resume_summary_df["address"] = resume_summary_df["raw_text"].apply(check_address)
resume_summary_df["linkedin"] = resume_summary_df["raw_text"].apply(functools.partial(term_count, term=r"linkedin"))
resume_summary_df["github"] = resume_summary_df["raw_text"].apply(functools.partial(term_count, term=r"github"))
# Scrape education information
resume_summary_df["phd"] = resume_summary_df["raw_text"].apply(functools.partial(term_count, term=r"ph.?d.?"))
# Scrape skill information
resume_summary_df["java_count"] = resume_summary_df["raw_text"].apply(functools.partial(term_count, term=r"java"))
resume_summary_df["python_count"] = resume_summary_df["raw_text"].apply(
functools.partial(term_count, term=r"python"))
resume_summary_df["R_count"] = resume_summary_df["raw_text"].apply(functools.partial(term_count, term=r" R[ ,]"))
resume_summary_df["latex_count"] = resume_summary_df["raw_text"].apply(functools.partial(term_count, term=r"latex"))
resume_summary_df["stata_count"] = resume_summary_df["raw_text"].apply(functools.partial(term_count, term=r"stata"))
resume_summary_df["CS_count"] = resume_summary_df["raw_text"].apply(
functools.partial(term_count, term=r"computer science"))
resume_summary_df["mysql_count"] = resume_summary_df["raw_text"].apply(functools.partial(term_count, term=r"mysql"))
resume_summary_df["ms_office"] = resume_summary_df["raw_text"].apply(
functools.partial(term_count, term=r"microsoft office"))
resume_summary_df["analytics"] = resume_summary_df["raw_text"].apply(
functools.partial(term_count, term=r"analytics"))
# Return enriched DF
print resume_summary_df
return resume_summary_df
parser = reqparse.RequestParser()
parser.add_argument('url')
class Parse(Resource):
def post(self):
args = parser.parse_args()
try:
url = args['url']
resume_df = create_resume_df(url)
return resume_df.to_json(), 201
except:
return "invalid url?", 400
api.add_resource(Parse, '/parse')
if __name__ == '__main__':
app.run(debug=True)
| |
# Copyright (c) 2017 David Sorokin <david.sorokin@gmail.com>
#
# Licensed under BSD3. See the LICENSE.txt file in the root of this distribution.
import os
from simulation.aivika.modeler.model_project import generate_cabal_file_impl
from simulation.aivika.modeler.model_project import generate_stack_file_impl
from simulation.aivika.modeler.model_project import generate_license_file_impl
from simulation.aivika.modeler.model_project import generate_readme_file_impl
from simulation.aivika.modeler.model_project import generate_setup_file_impl
from simulation.aivika.modeler.model_project import generate_lib_file_impl
class ModelException(Exception):
"""Raised when something is invalid when creating or processing the model."""
def __init__(self, message):
"""Initializes a new instance."""
self.message = message
class InvalidVariableException(ModelException):
"""Raised when the variable is invalid."""
def __init__(self, message):
"""Initializes a new instance."""
ModelException.__init__(self, message)
class Model:
"""The simulation model."""
pass
class MainModel(Model):
"""The main simulation model."""
def __init__(self, base_comp = None):
"""Initializes a new simulation model."""
self._base_comp = base_comp
self._pragmas = set()
self._package_imports = set()
self._package_locations = set()
self._extra_deps = set()
self._module_imports = set()
self._actions = []
self._sources = []
self._var_names = set()
self._lazy_var_names = set()
self._ports = set()
self._transact_types = set()
self._add_defaults()
def _add_defaults(self):
"""Add the defaults."""
self._pragmas.add('{-# LANGUAGE RecursiveDo #-}')
self._package_imports.add('aivika')
self._package_imports.add('aivika-transformers')
self._extra_deps.add('aivika-5.2')
self._extra_deps.add('aivika-transformers-5.2')
if self._base_comp is None:
self._module_imports.add('import Simulation.Aivika')
else:
self._module_imports.add('import Simulation.Aivika.Trans')
self._module_imports.add('import Data.Monoid')
self._module_imports.add('import Data.Functor')
self._module_imports.add('import Control.Arrow')
self._module_imports.add('import Control.Monad')
def get_main_model(self):
"""Return the main model."""
return self
def get_base_comp(self):
"""Return the basic computation type."""
return self._base_comp
def get_var_prefix(self):
"""Return the variable prefix."""
return ''
def is_source_prefix_mangled(self):
"""Whether the source name prefix is mangled."""
return False
def get_source_prefix(self):
"""Return the source name prefix."""
return ''
def add_pragma(self, pragma):
"""Add the specified pragma."""
self._pragmas.add(pragma)
def add_package_import(self, package):
"""Add the specified package to import."""
self._package_imports.add(package)
def add_package_location(self, package_location):
"""Add the specified package location."""
self._package_locations.add(package_location)
def add_extra_dep(self, extra_dep):
"""Add the specified extra dependency."""
self._extra_deps.add(extra_dep)
def add_module_import(self, module):
"""Add the specified module to import."""
self._module_imports.add(module)
def add_var(self, name, comp):
"""Add a new variable with the specified definition."""
if name in self._var_names:
raise InvalidVariableException('Variable ' + name + ' is already defined')
elif name in self._lazy_var_names:
action = name + ' <- ' + comp
self._lazy_var_names.remove(name)
self._var_names.add(name)
self.add_action(action)
else:
action = name + ' <- ' + comp
self._var_names.add(name)
self.add_action(action)
def add_lazy_var(self, name):
"""Add a new variable that will be defined lazily."""
if name in self._var_names:
raise InvalidVariableException('Variable ' + name + ' is already defined')
elif name in self._lazy_var_names:
raise InvalidVariableException('Variable ' + name + ' is already added as lazy')
else:
self._lazy_var_names.add(name)
def add_action(self, action):
"""Add the specified action."""
self._actions.append(action)
def add_port(self, port):
"""Add the specified port for completeness test."""
self._ports.add(port)
def add_result_source(self, source):
"""Add the specified result source."""
self._sources.append(source)
def add_transact_type(self, transact_type):
"""Add the specified transact type."""
self._transact_types.add(transact_type)
def require_complete(self):
"""Require the model to be complete."""
if len(self._lazy_var_names) > 0:
for name in self._lazy_var_names:
raise InvalidVariableException('Variable ' + name + ' is used but not defined')
for port in self._ports:
if not port.is_bound_to_input():
raise InvalidVariableException('Variable ' + port.get_name() + ' must be bound to its input')
if not port.is_bound_to_output():
raise InvalidVariableException('Variable ' + port.get_name() + ' must be bound to its output')
def run(self, specs, experiment = None, dirname = 'target'):
"""Generate and compile the project."""
self.generate(specs = specs, experiment = experiment, dirname = dirname)
cwd = os.getcwd()
os.chdir(dirname)
status = os.system('stack build')
if status == 0:
status = os.system('stack exec modeling-project-exe')
os.chdir(cwd)
if (status == 0) and (not (experiment is None)):
experiment.open()
return status
def compile(self, specs, experiment = None, dirname = 'target'):
"""Generate and compile the project."""
self.generate(specs = specs, experiment = experiment, dirname = dirname)
cwd = os.getcwd()
os.chdir(dirname)
status = os.system('stack build')
os.chdir(cwd)
return status
def generate(self, specs, experiment = None, dirname = 'target'):
"""Generate the project files."""
if not os.path.exists(dirname):
os.makedirs(dirname)
if not os.path.exists(dirname + '/app'):
os.makedirs(dirname + '/app')
if not os.path.exists(dirname + '/src'):
os.makedirs(dirname + '/src')
if not (experiment is None):
experiment.install(self)
self._generate_model(specs, experiment, dirname + '/app/Main.hs')
generate_cabal_file_impl(self, dirname + '/modeling-project.cabal')
generate_stack_file_impl(self, dirname + '/stack.yaml')
generate_license_file_impl(dirname + '/LICENSE.txt')
generate_readme_file_impl(dirname + '/README.md')
generate_setup_file_impl(dirname + '/Setup.hs')
generate_lib_file_impl(dirname + '/src/Lib.hs')
def _generate_model(self, specs, experiment = None, filename = 'dist/app/Model.hs'):
"""Generate the model file."""
with open(filename, "w") as file:
self._write_model(file, specs, experiment = experiment)
def _write_model(self, file, specs, experiment = None):
"""Write the model file."""
self.require_complete()
for pragma in self._pragmas:
file.write(pragma)
file.write('\n')
if len(self._pragmas) > 0:
file.write('\n')
file.write('-- NOTE: This file was auto-generated by aivika-modeler 1.0\n')
file.write('\n')
for module_import in self._module_imports:
file.write(module_import)
file.write('\n')
if len(self._module_imports) > 0:
file.write('\n')
file.write('specs =\n')
specs.write(file, ' ')
file.write('\n')
self._write_transact_types(file)
self._write_model_def(file)
file.write('\n')
if experiment is None:
file.write('main =\n')
file.write(' printSimulationResultsInStopTime\n')
file.write(' printResultSourceInEnglish\n')
file.write(' model specs\n')
file.write('\n')
else:
experiment.write(file)
file.write('\n')
def _write_model_def(self, file):
"""Write the model definition in the file."""
file.write('model =')
file.write('\n')
self._write_model_code(file, ' ')
def _write_model_code(self, file, indent = ''):
"""Write the code in the file."""
file.write(indent)
file.write('mdo --')
file.write('\n')
indent2 = indent + ' '
for action in self._actions:
file.write(indent2)
file.write(action)
file.write('\n')
file.write(indent2)
file.write('return $\n')
file.write(indent2)
file.write(' results\n')
self._write_sources(file, indent2 + ' ')
file.write('\n')
def _write_sources(self, file, indent):
"""Write the result source list in file."""
file.write(indent)
file.write('[')
first = True
for source in self._sources:
if first:
first = False
file.write(source)
else:
file.write(',\n')
file.write(indent)
file.write(' ')
file.write(source)
file.write(']')
def _write_transact_types(self, file):
"""Add the transact types."""
for tp in self._transact_types:
tp.write(file)
file.write('\n')
class SubModel(Model):
"""The sub-model."""
_next_id = 1
def __init__(self, model, name = None):
"""Initializes a new sub-model."""
self._main_model = model.get_main_model()
self._model = model
self._name = name
self._var_prefix = '_sub_' + str(SubModel._next_id)
SubModel._next_id += 1
if (name is None) or model.is_source_prefix_mangled():
self._source_prefix_mangled = True
self._source_prefix = self._var_prefix
else:
self._source_prefix_mangled = False
self._source_prefix = model.get_source_prefix() + name + '.'
def get_main_model(self):
"""Return the main model."""
return self._main_model
def get_parent_model(self):
"""Return the parent model."""
return self._model
def get_base_comp(self):
"""Get the basic computation type."""
return self._main_model.get_base_comp()
def get_var_prefix(self):
"""Return the variable prefix."""
return self._var_prefix
def is_source_prefix_mangled(self):
"""Whether the source name prefix is mangled."""
return self._source_prefix_mangled
def get_source_prefix(self):
"""Return the source name prefix."""
return self._source_prefix
def add_pragma(self, pragma):
"""Add the specified pragma."""
self._main_model.add_pragma(pragma)
def add_package_import(self, package):
"""Add the specified package to import."""
self._main_model.add_package_import(package)
def add_package_location(self, package_location):
"""Add the specified package location."""
self._main_model.add_package_location(package_location)
def add_extra_dep(self, extra_dep):
"""Add the specified extra dependency."""
self._main_model.add_extra_dep(extra_dep)
def add_module_import(self, module):
"""Add the specified module to import."""
self._main_model.add_module_import(module)
def add_var(self, name, comp):
"""Add a new variable with the specified definition."""
self._main_model.add_var(name, comp)
def add_lazy_var(self, name):
"""Add a new variable that will be defined lazily."""
self._main_model.add_lazy_var(name)
def add_action(self, action):
"""Add the specified action."""
self._main_model.add_action(action)
def add_port(self, port):
"""Add the specified port for completeness test."""
self._main_model.add_port(port)
def add_result_source(self, source):
"""Add the specified result source."""
self._main_model.add_result_source(source)
def add_transact_type(self, transact_type):
"""Add the specified transact type."""
self._main_model.add_transact_type(transact_type)
| |
# The MIT License
#
# Copyright (c) 2007 Aldo Cortesi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import sys, itertools, copy
def _isStringLike(anobj):
try:
# Avoid succeeding expensively if anobj is large.
anobj[:0]+''
except:
return 0
else:
return 1
def _isSequenceLike(anobj):
if not hasattr(anobj, "next"):
if _isStringLike(anobj):
return 0
try:
anobj[:0]
except:
return 0
return 1
class Tree(object):
"""
A simple implementation of an ordered tree
"""
def __init__(self, children = None):
"""
:children A nested list specifying a tree of children
"""
self.children = []
if children:
self.addChildrenFromList(children)
self.parent = None
def addChildrenFromList(self, children):
"""
Add children to this node.
:children A nested list specifying a tree of children
"""
skip = True
v = zip(
itertools.chain([None], children),
itertools.chain(children, [None])
)
for i in v:
if skip:
skip = False
continue
self.addChild(i[0])
if _isSequenceLike(i[1]):
i[0].addChildrenFromList(i[1])
skip = True
def addChild(self, node):
"""
Add a child to this node.
:child A Tree object
"""
if not isinstance(node, Tree):
s = "Invalid tree specification: %s is not a Tree object."%repr(node)
raise ValueError(s)
self.children.append(node)
node.register(self)
def register(self, parent):
"""
Called after a node has been added to a parent.
:child A Tree object
"""
self.parent = parent
def index(self):
"""
Return the index of this node in the parent child list, based on
object identity.
"""
if not self.parent:
raise ValueError("Can not retrieve index of a node with no parent.")
lst = [id(i) for i in self.parent.children]
return lst.index(id(self))
def remove(self):
"""
Remove this node from its parent. Returns the index this node had
in the parent child list.
"""
idx = self.index()
del self.parent.children[idx:idx+1]
self.parent = None
return idx
def clear(self):
"""
Clear all the children of this node. Return a list of the removed
children.
"""
n = self.children[:]
for i in n:
i.remove()
return n
def replace(self, *nodes):
"""
Replace this node with a sequence of other nodes. This is
equivalent to deleting this node from the child list, and then
inserting the specified sequence in its place.
:nodes A sequence of Tree objects
"""
parent = self.parent
idx = self.remove()
parent.children[idx:idx] = nodes
for i in nodes:
i.register(parent)
def reparent(self, node):
"""
Inserts a node between the current node and its parent. Returns the
specified parent node.
:node A Tree object
"""
self.replace(node)
node.addChild(self)
return node
def isDescendantOf(self, node):
"""
Returns true if the specified node lies on the path to the root
from this node.
:node A Tree object
"""
return (node in self.pathToRoot())
def siblings(self):
"""
Generator yielding all siblings of this node, including this
node itself.
"""
if not self.parent:
yield self
else:
for i in self.parent.children:
yield i
def pathToRoot(self):
"""
Generator yielding all objects on the path from this node to the
root of the tree, including this node itself.
"""
itm = self
while 1:
yield itm
if itm.parent is not None:
itm = itm.parent
else:
break
def pathFromRoot(self):
"""
Generator yielding all nodes on the path to this node from the
root of the tree, including this node itself.
"""
l = list(self.pathToRoot())
for i in reversed(l):
yield i
def getRoot(self):
"""
Return the topmost node in the tree.
"""
for i in self.pathToRoot():
pass
return i
def preOrder(self):
"""
Return a list of subnodes in PreOrder.
"""
yield self
# Take copy to make this robust under modification
for i in self.children[:]:
for j in i.preOrder():
yield j
def postOrder(self):
"""
Return a list of the subnodes in PostOrder.
"""
# Take copy to make this robust under modification
for i in self.children[:]:
for j in i.postOrder():
yield j
yield self
def _find(self, itr, *func, **kwargs):
for i in itr:
if kwargs:
kwpass = False
for k, v in kwargs.items():
if hasattr(i, k):
if not getattr(i, k) == v:
break
else:
break
else:
kwpass = True
else:
kwpass = True
if kwpass:
if all(map(lambda x: x(i), func)):
return i
return None
def findChild(self, *func, **kwargs):
"""
Find the first child matching all specified selectors in a
pre-order traversal of this node's subnodes. Return None if no
matching object is found.
:func A list of selector functions, that accept a node, and return
a boolean.
:kwargs A dictionary of attribute selectors. Checks that matching
attributes exist, and that their values are equal to the specified
values.
"""
return self._find(self.preOrder(), *func, **kwargs)
def findParent(self, *func, **kwargs):
"""
Find the first node matching func in a traversal to the root of the
tree. Return None if no matching object is found.
:func A list of selector functions, that accept a node, and return
a boolean.
:kwargs A dictionary of attribute selectors. Checks that matching
attributes exist, and that their values are equal to the specified
values.
"""
return self._find(
itertools.islice(self.pathToRoot(), 1, None),
*func,
**kwargs
)
def findForwards(self, *func, **kwargs):
"""
Search forwards in a preOrder traversal of the whole tree (not this
node's subnodes). Return None if object not found.
:func A list of selector functions, that accept a node, and return
a boolean.
:kwargs A dictionary of attribute selectors. Checks that matching
attributes exist, and that their values are equal to the specified
values.
"""
itr = self.getRoot().preOrder()
for i in itr:
if i is self:
break
return self._find(itr, *func, **kwargs)
def findBackwards(self, *func, **kwargs):
"""
Search backwards in a preOrder traversal of the whole tree (not
this node's subnodes). Return None if object not found.
:func A list of selector functions, that accept a node, and return
a boolean.
:kwargs A dictionary of attribute selectors. Checks that matching
attributes exist, and that their values are equal to the specified
values.
"""
# FIXME: Dreadfully inefficient...
lst = list(self.getRoot().preOrder())
lst.reverse()
myIndex = lst.index(self)
return self._find(lst[(myIndex+1):], *func, **kwargs)
def getPrevious(self):
"""
Find the previous node in the preOrder traversal of the tree.
"""
return self.findBackwards(lambda x: 1)
def getNext(self):
"""
Find the next node in the preOrder traversal of the tree.
"""
return self.findForwards(lambda x: 1)
def getDepth(self):
"""
Return the depth of this node, i.e. the number of nodes on the path
to the root.
"""
return len(list(self.pathToRoot()))
def findAttr(self, attr, default=None):
"""
Traverses the path to the root of the tree, looking for the
specified attribute. If it is found, return it, else return default.
:attr A string attribute name
:default Arbitrary default return value
"""
for i in self.pathToRoot():
if hasattr(i, attr):
return getattr(i, attr)
return default
def attrsToRoot(self, attr):
"""
Traverses the path from this node to the root of the tree, and
yields a value for each attribute. Nodes that do not have the
attribute and attribute values that test false are ignored.
:attr A string attribute name
"""
lst = []
for i in self.pathToRoot():
v = getattr(i, attr, None)
if v:
yield v
@staticmethod
def treeProp(name):
"""
Define a property whose value should be looked up on nodes between
this node and the root, inclusive. Returns the first matching
attribute. Raises ValueError if no matching attribute is found.
:name Property name
"""
def fget(self):
if self.__dict__.has_key(name):
return self.__dict__[name]
else:
if not self.parent:
raise ValueError, "Property %s not defined."%name
return getattr(self.parent, name)
def fset(self, value):
self.__dict__[name] = value
return property(fget, fset)
def dump(self, outf=sys.stdout):
"""
Dump a formatted representation of this tree to the specified file
descriptor.
:outf Output file descriptor.
"""
for i in self.preOrder():
print >> outf, "\t"*(i.getDepth()-1), repr(i)
def count(self):
"""
Number of nodes in this tree, including the root.
"""
return len(list(self.preOrder()))
def constructFromList(lst):
"""
:lst a nested list of Tree objects
Returns a list consisting of the nodes at the base of each tree. Trees
are constructed "bottom-up", so all parent nodes for a particular node
are guaranteed to exist when "addChild" is run.
"""
heads = []
for i, val in enumerate(lst):
if _isSequenceLike(val):
if i == 0 or _isSequenceLike(lst[i-1]):
raise ValueError, "constructFromList: Invalid list."
lst[i-1].addChildrenFromList(val)
else:
heads.append(val)
return heads
| |
#!/usr/bin/python3
# cloudpackages.py
#
# This script operates on *.product files for SUSE OpenStack cloud and will
# attempt to determine the provenance of all packages pulled in by a particular
# product. Alternatively, packages can be supplied on the command line using
# the -u option. For this script to work correctly, you need to:
#
# 1) Run it on an IBS working copy of a _product package (not needed for -u).
# 2) Have a working osc configuration that can be used against api.suse.de.
#
# usage:
#
# cloudpackages.py <product file> [ ... <product file> ]
# cloudpackages.py -u <project> <package> [ ... <package> ]
from __future__ import print_function
import optparse
import os
import re
import sys
import osc.conf
import osc.core
try:
from xml.etree import cElementTree as ET
except ImportError:
import cElementTree as ET
# Packages that cause trouble for some reason (usually conflicts) and that get
# their own buildinfo run. Full names are not needed: any package listed here
# will be matched using str.startswith().
_BLACKLIST = {
7: [
# have choice for product_flavor(suse-openstack-cloud-crowbar)
# needed by suse-openstack-cloud-crowbar-release:
# suse-openstack-cloud-crowbar-release-POOL
# suse-openstack-cloud-crowbar-release-cd
"patterns-cloud-admin",
# have choice for product_flavor(suse-openstack-cloud-crowbar)
# needed by suse-openstack-cloud-crowbar-release:
# suse-openstack-cloud-crowbar-release-POOL
# suse-openstack-cloud-crowbar-release-cd
"patterns-cloud-compute",
# have choice for product_flavor(suse-openstack-cloud-crowbar)
# needed by suse-openstack-cloud-crowbar-release:
# suse-openstack-cloud-crowbar-release-POOL
# suse-openstack-cloud-crowbar-release-cd
"patterns-cloud-controller",
# have choice for product_flavor(suse-openstack-cloud-crowbar)
# needed by suse-openstack-cloud-crowbar-release:
# suse-openstack-cloud-crowbar-release-POOL
# suse-openstack-cloud-crowbar-release-cd
"patterns-cloud-network",
# Conflicts with ansible
"ansible1",
# nothing provides ardana-ceph
"ardana-ceph",
# nothing provides ardana-cephlm
"ardana-cephlm",
# nothing provides ardana-cinderlm
"ardana-cinderlm",
# nothing provides ardana-ui-common
"ardana-ui-common",
# nothing provides ardana-vmfactory
"ardana-vmfactory",
# provider ardana-ses obsoletes ardana-extensions-ses
"ardana-extensions-ses",
# Conflicts with SUSE branding
"crowbar-core-branding-upstream",
# Obsoleted by documentation-* packages
"suse-openstack-cloud-upstream",
# Obsoleted by documentation-* packages
"suse-openstack-cloud-user",
# nothing provides mongodb
"mongodb",
# provider python-docker obsoletes python-docker-py
"python-docker-py",
# python-pycryptodome conflicts with python-pycrypto
"python-pycryptodome",
# nothing provides ruby2.1-rubygem-bson-1_11
"ruby2.1-rubygem-bson",
# nothing provides ruby2.1-rubygem-mongo
"ruby2.1-rubygem-mongo",
# unresolvable: nothing provides python-urllib3 >= 1.20 needed by
# python-botocore, (got version 1.16-3.9.2)
"openstack-ec2-api",
# unresolvable: nothing provides python-urllib3 >= 1.20 needed by
# python-botocore, (got version 1.16-3.9.2)
"python-ec2-api",
],
8: [
# have choice for product_flavor(suse-openstack-cloud-crowbar)
# needed by suse-openstack-cloud-crowbar-release:
# suse-openstack-cloud-crowbar-release-POOL
# suse-openstack-cloud-crowbar-release-cd
"patterns-cloud-admin",
# have choice for product_flavor(suse-openstack-cloud-crowbar)
# needed by suse-openstack-cloud-crowbar-release:
# suse-openstack-cloud-crowbar-release-POOL
# suse-openstack-cloud-crowbar-release-cd
"patterns-cloud-compute",
# have choice for product_flavor(suse-openstack-cloud-crowbar)
# needed by suse-openstack-cloud-crowbar-release:
# suse-openstack-cloud-crowbar-release-POOL
# suse-openstack-cloud-crowbar-release-cd
"patterns-cloud-controller",
# have choice for product_flavor(suse-openstack-cloud-crowbar)
# needed by suse-openstack-cloud-crowbar-release:
# suse-openstack-cloud-crowbar-release-POOL
# suse-openstack-cloud-crowbar-release-cd
"patterns-cloud-network",
# Conflicts with ansible
"ansible1",
# nothing provides ardana-ceph
"ardana-ceph",
# nothing provides ardana-cephlm
"ardana-cephlm",
# nothing provides ardana-cinderlm
"ardana-cinderlm",
# nothing provides ardana-ui-common
"ardana-ui-common",
# nothing provides ardana-vmfactory
"ardana-vmfactory",
# provider ardana-ses obsoletes ardana-extensions-ses
"ardana-extensions-ses",
# Conflicts with SUSE branding
"crowbar-core-branding-upstream",
# Obsoleted by documentation-* packages
"suse-openstack-cloud-upstream",
# Obsoleted by documentation-* packages
"suse-openstack-cloud-user",
# nothing provides mongodb
"mongodb",
# provider python-docker obsoletes python-docker-py
"python-docker-py",
# python-pycryptodome conflicts with python-pycrypto
"python-pycryptodome",
# nothing provides ruby2.1-rubygem-bson-1_11
"ruby2.1-rubygem-bson",
# nothing provides ruby2.1-rubygem-mongo
"ruby2.1-rubygem-mongo",
],
9: [
# have choice for product_flavor(suse-openstack-cloud-crowbar)
# needed by suse-openstack-cloud-crowbar-release:
# suse-openstack-cloud-crowbar-release-POOL
# suse-openstack-cloud-crowbar-release-cd
"patterns-cloud-admin",
# have choice for product_flavor(suse-openstack-cloud-crowbar)
# needed by suse-openstack-cloud-crowbar-release:
# suse-openstack-cloud-crowbar-release-POOL
# suse-openstack-cloud-crowbar-release-cd
"patterns-cloud-compute",
# have choice for product_flavor(suse-openstack-cloud-crowbar)
# needed by suse-openstack-cloud-crowbar-release:
# suse-openstack-cloud-crowbar-release-POOL
# suse-openstack-cloud-crowbar-release-cd
"patterns-cloud-controller",
# have choice for product_flavor(suse-openstack-cloud-crowbar)
# needed by suse-openstack-cloud-crowbar-release:
# suse-openstack-cloud-crowbar-release-POOL
# suse-openstack-cloud-crowbar-release-cd
"patterns-cloud-network",
# Conflicts with SUSE branding
"crowbar-core-branding-upstream",
# unresolvable: nothing provides octavia-test Devel:Cloud:9{,:Staging}
"octavia-test",
],
}
# Mapping of IBS project to SUSE OpenStack Cloud release.
_PROJECT_VERSION = {
"Devel:Cloud:7": 7,
"Devel:Cloud:8": 8,
"Devel:Cloud:9": 9,
"SUSE:SLE-12-SP2:Update:Products:Cloud7": 7,
"SUSE:SLE-12-SP3:Update:Products:Cloud8": 8,
"SUSE:SLE-12-SP4:Update:Products:Cloud9": 9,
}
# Mapping of IBS project to repository to use.
_PROJECT_REPO = {
"Devel:Cloud:7": "SLE_12_SP2",
"Devel:Cloud:8": "SLE_12_SP3",
"Devel:Cloud:9": "SLE_12_SP4",
"SUSE:SLE-12-SP2:Update:Products:Cloud7": "standard",
"SUSE:SLE-12-SP3:Update:Products:Cloud8": "standard",
"SUSE:SLE-12-SP4:Update:Products:Cloud9": "standard",
}
def get_cloud_version(project):
"""
Determine the SUSE OpenStack Cloud version we are dealing with from IBS
project.
"""
for key in _PROJECT_VERSION:
if project.startswith(key):
return _PROJECT_VERSION[key]
return None
def get_repository(project):
"""
Determine the package repository to use from IBS project.
"""
for key in _PROJECT_REPO:
if project.startswith(key):
return _PROJECT_REPO[key]
return None
def find_groupfiles(product_file):
"""
Extract a list of all group files being included from a product file.
"""
group_files = []
f = open(product_file)
for line in f.readlines():
if "xi:include" in line:
match = re.search(r'href="(.*\.group)"', line)
if match:
group_files.append(
os.path.join(
os.path.dirname(product_file), match.groups()[0]
)
)
f.close
return group_files
def find_packages(search_file):
"""
Extract a list of packages being pulled in from a .product or .group file.
"""
packages = set()
with open(search_file) as f:
raw = f.read()
# Group files may not have a DTD
if not raw.startswith("<?"):
raw = "<rootnode>\n" + raw + "</rootnode>"
try:
tree = ET.fromstring(raw)
except Exception as e:
print("Failed to parse %s" % search_file, file=sys.stderr)
print(e.msg, file=sys.stderr)
for node in tree.findall("package"):
packages.add(node.attrib["name"])
f.close()
return packages
def check_blacklist(package, cloud_version):
"""
Check whether a package's name starts with any of the patterns in the black
list for a given SUSE OpenStack Cloud version.
"""
for start in _BLACKLIST[cloud_version]:
if package.startswith(start):
return True
return False
def get_buildinfo(project, api, repository, arch, packages):
"""
Generate a spec from a list of packages and retrieve the buildinfo data for
that spec from IBS.
"""
cloud_version = get_cloud_version(project)
spec = "Name: _product\n"
# unresolvable: have choice for kvm needed by patterns-cloud-compute:
# kvm qemu-kvm
packages.add("qemu-kvm")
# unresolvable: have choice for libpq.so.5()(64bit) needed by
# python-psycopg2: libpq5 postgresql12-devel-mini
packages.add("libpq5")
if "patterns-cloud-ardana" in packages:
# This needs special treatment: we have a choice between atftp and tftp
# to fullfil cobbler's Requires.
packages.add("tftp")
if cloud_version <= 8:
# have choice for ardana-installer-ui needed by
# patterns-cloud-ardana: ardana-installer-ui
# ardana-installer-ui-hpe,
packages.add("ardana-installer-ui-hpe")
# have choice for ardana-installer-ui needed by
# patterns-cloud-ardana: ardana-installer-ui
# ardana-installer-ui-hpe
packages.add("ardana-opsconsole-ui-hpe")
# have choice for venv-openstack-horizon-x86_64 needed by
# patterns-cloud-ardana: venv-openstack-horizon-hpe-x86_64
# venv-openstack-horizon-x86_64
packages.discard("venv-openstack-horizon")
packages.add("venv-openstack-horizon-hpe-x86_64")
for p in packages:
if p.startswith("venv") and not p.endswith("-x86_64"):
# These are listed in ardana.group but do not exist
continue
if check_blacklist(p, cloud_version):
continue
spec += "BuildRequires: %s\n" % p
# initialize osc configuration
osc.conf.get_config()
buildinfo = osc.core.get_buildinfo(
api, project, "_product", repository, arch, spec
)
return buildinfo
def process_package_args(arch, api, args):
"""
Process a user provided IBS project/package(s) combination.
"""
if len(sys.argv) < 2:
print(
"usage: %s -u <project> <package> [ ... <package> ]" % sys.argv[0]
)
sys.exit(1)
project = args[0]
packages = set(args[1:])
repository = get_repository(project)
packages_all = dict()
buildinfo = get_buildinfo(project, api, repository, arch, packages)
tree = ET.fromstring(buildinfo)
bdeps = tree.findall("bdep")
if len(bdeps) == 0:
print(
"Package list generated buildinfo without packages."
" Raw buildinfo follows.",
file=sys.stderr,
)
sys.stderr.buffer.write(buildinfo)
return
for node in bdeps:
packages_all[node.attrib["name"]] = {
"version": node.attrib["version"],
"release": node.attrib["release"],
"project": node.attrib["project"],
}
for p in sorted(packages_all.keys()):
print(
"%s %s %s %s"
% (
p,
packages_all[p]["version"],
packages_all[p]["release"],
packages_all[p]["project"],
)
)
def process_product_files(arch):
"""
Process one or more *.product files.
"""
if len(sys.argv) < 2:
print("usage: %s <product file> [ ... <product file> ]" % sys.argv[0])
sys.exit(1)
for product_file in sys.argv[1:]:
packages_all = dict()
package_files = [product_file]
package_files.extend(find_groupfiles(product_file))
if os.path.exists(os.path.dirname(product_file)):
project = osc.core.store_read_project(
os.path.dirname(product_file)
)
api = osc.core.store_read_apiurl(os.path.dirname(product_file))
else:
# relative path with no leading component
project = osc.core.store_read_project(os.path.dirname(os.curdir))
api = osc.core.store_read_apiurl(os.path.dirname(os.curdir))
repository = get_repository(project)
for package_file in package_files:
packages = find_packages(package_file)
buildinfo = get_buildinfo(project, api, repository, arch, packages)
tree = ET.fromstring(buildinfo)
bdeps = tree.findall("bdep")
if len(bdeps) == 0:
print(
"%s generated buildinfo without packages."
" Raw buildinfo follows."
% package_file,
file=sys.stderr,
)
sys.stderr.buffer.write(buildinfo)
continue
for node in bdeps:
packages_all[node.attrib["name"]] = {
"version": node.attrib["version"],
"release": node.attrib["release"],
"project": node.attrib["project"],
}
for p in sorted(packages_all.keys()):
print(
"%s %s %s %s"
% (
p,
packages_all[p]["version"],
packages_all[p]["release"],
packages_all[p]["project"],
)
)
parser = optparse.OptionParser(
version="0.1.0",
description=(
"This script operates on *.product files for SUSE OpenStack cloud"
" and will attempt to determine the provenance (i.e. IBS project)"
" of all packages pulled in by a particular product."
" Alternatively, the -u option can be used to specify a project"
" and list of packages"
),
usage=(
"\n %s <product file> [ ... <product file> ]\n"
" %s -u <project> <package> [ ... <package> ]"
% (sys.argv[0], sys.argv[0])
),
)
parser.add_option(
"-u",
"--user-packages",
action="store_true",
default=False,
help="Instead of parsing product files,"
" operate on a user provided list of packages.",
)
parser.add_option(
"-A",
"--api",
default="https://api.suse.de",
help="API URL to use in --user-packages mode.",
)
(options, args) = parser.parse_args(sys.argv[1:])
arch = "x86_64"
if options.user_packages:
process_package_args(arch, options.api, args)
else:
process_product_files(arch)
| |
# -*- coding: utf-8 -*-
import datetime
import json
from unittest import mock
from django.db.models import Q
from django.test import TestCase
from elasticsearch.serializer import JSONSerializer
from wagtail.search.backends.elasticsearch2 import Elasticsearch2SearchBackend, get_model_root
from wagtail.search.query import MATCH_ALL, Phrase
from wagtail.tests.search import models
from .elasticsearch_common_tests import ElasticsearchCommonSearchBackendTests
class TestElasticsearch2SearchBackend(ElasticsearchCommonSearchBackendTests, TestCase):
backend_path = 'wagtail.search.backends.elasticsearch2'
class TestElasticsearch2SearchQuery(TestCase):
def assertDictEqual(self, a, b):
default = JSONSerializer().default
self.assertEqual(
json.dumps(a, sort_keys=True, default=default), json.dumps(b, sort_keys=True, default=default)
)
query_compiler_class = Elasticsearch2SearchBackend.query_compiler_class
def test_simple(self):
# Create a query
query_compiler = self.query_compiler_class(models.Book.objects.all(), "Hello")
# Check it
expected_result = {'filtered': {
'filter': {'match': {'content_type': 'searchtests.Book'}},
'query': {'multi_match': {'query': 'Hello', 'fields': ['_all', '_partials']}}
}}
self.assertDictEqual(query_compiler.get_query(), expected_result)
def test_match_all(self):
# Create a query
query_compiler = self.query_compiler_class(models.Book.objects.all(), MATCH_ALL)
# Check it
expected_result = {'filtered': {
'filter': {'match': {'content_type': 'searchtests.Book'}},
'query': {'match_all': {}}
}}
self.assertDictEqual(query_compiler.get_query(), expected_result)
def test_and_operator(self):
# Create a query
query_compiler = self.query_compiler_class(models.Book.objects.all(), "Hello", operator='and')
# Check it
expected_result = {'filtered': {
'filter': {'match': {'content_type': 'searchtests.Book'}},
'query': {'multi_match': {'query': 'Hello', 'fields': ['_all', '_partials'], 'operator': 'and'}}
}}
self.assertDictEqual(query_compiler.get_query(), expected_result)
def test_filter(self):
# Create a query
query_compiler = self.query_compiler_class(models.Book.objects.filter(title="Test"), "Hello")
# Check it
expected_result = {'filtered': {'filter': {'and': [
{'match': {'content_type': 'searchtests.Book'}},
{'term': {'title_filter': 'Test'}}
]}, 'query': {'multi_match': {'query': 'Hello', 'fields': ['_all', '_partials']}}}}
self.assertDictEqual(query_compiler.get_query(), expected_result)
def test_and_filter(self):
# Create a query
query_compiler = self.query_compiler_class(models.Book.objects.filter(title="Test", publication_date=datetime.date(2017, 10, 18)), "Hello")
# Check it
expected_result = {'filtered': {'filter': {'and': [
{'match': {'content_type': 'searchtests.Book'}},
{'and': [{'term': {'publication_date_filter': '2017-10-18'}}, {'term': {'title_filter': 'Test'}}]}
]}, 'query': {'multi_match': {'query': 'Hello', 'fields': ['_all', '_partials']}}}}
# Make sure field filters are sorted (as they can be in any order which may cause false positives)
query = query_compiler.get_query()
field_filters = query['filtered']['filter']['and'][1]['and']
field_filters[:] = sorted(field_filters, key=lambda f: list(f['term'].keys())[0])
self.assertDictEqual(query, expected_result)
def test_or_filter(self):
# Create a query
query_compiler = self.query_compiler_class(models.Book.objects.filter(Q(title="Test") | Q(publication_date=datetime.date(2017, 10, 18))), "Hello")
# Make sure field filters are sorted (as they can be in any order which may cause false positives)
query = query_compiler.get_query()
field_filters = query['filtered']['filter']['and'][1]['or']
field_filters[:] = sorted(field_filters, key=lambda f: list(f['term'].keys())[0])
# Check it
expected_result = {'filtered': {'filter': {'and': [
{'match': {'content_type': 'searchtests.Book'}},
{'or': [{'term': {'publication_date_filter': '2017-10-18'}}, {'term': {'title_filter': 'Test'}}]}
]}, 'query': {'multi_match': {'query': 'Hello', 'fields': ['_all', '_partials']}}}}
self.assertDictEqual(query, expected_result)
def test_negated_filter(self):
# Create a query
query_compiler = self.query_compiler_class(models.Book.objects.exclude(publication_date=datetime.date(2017, 10, 18)), "Hello")
# Check it
expected_result = {'filtered': {'filter': {'and': [
{'match': {'content_type': 'searchtests.Book'}},
{'not': {'term': {'publication_date_filter': '2017-10-18'}}}
]}, 'query': {'multi_match': {'query': 'Hello', 'fields': ['_all', '_partials']}}}}
self.assertDictEqual(query_compiler.get_query(), expected_result)
def test_fields(self):
# Create a query
query_compiler = self.query_compiler_class(models.Book.objects.all(), "Hello", fields=['title'])
# Check it
expected_result = {'filtered': {
'filter': {'match': {'content_type': 'searchtests.Book'}},
'query': {'match': {'title': {'query': 'Hello'}}}
}}
self.assertDictEqual(query_compiler.get_query(), expected_result)
def test_fields_with_and_operator(self):
# Create a query
query_compiler = self.query_compiler_class(models.Book.objects.all(), "Hello", fields=['title'], operator='and')
# Check it
expected_result = {'filtered': {
'filter': {'match': {'content_type': 'searchtests.Book'}},
'query': {'match': {'title': {'query': 'Hello', 'operator': 'and'}}}
}}
self.assertDictEqual(query_compiler.get_query(), expected_result)
def test_multiple_fields(self):
# Create a query
query_compiler = self.query_compiler_class(models.Book.objects.all(), "Hello", fields=['title', 'content'])
# Check it
expected_result = {'filtered': {
'filter': {'match': {'content_type': 'searchtests.Book'}},
'query': {'multi_match': {'fields': ['title', 'content'], 'query': 'Hello'}}
}}
self.assertDictEqual(query_compiler.get_query(), expected_result)
def test_multiple_fields_with_and_operator(self):
# Create a query
query_compiler = self.query_compiler_class(
models.Book.objects.all(), "Hello", fields=['title', 'content'], operator='and'
)
# Check it
expected_result = {'filtered': {
'filter': {'match': {'content_type': 'searchtests.Book'}},
'query': {'multi_match': {'fields': ['title', 'content'], 'query': 'Hello', 'operator': 'and'}}
}}
self.assertDictEqual(query_compiler.get_query(), expected_result)
def test_exact_lookup(self):
# Create a query
query_compiler = self.query_compiler_class(models.Book.objects.filter(title__exact="Test"), "Hello")
# Check it
expected_result = {'filtered': {'filter': {'and': [
{'match': {'content_type': 'searchtests.Book'}},
{'term': {'title_filter': 'Test'}}
]}, 'query': {'multi_match': {'query': 'Hello', 'fields': ['_all', '_partials']}}}}
self.assertDictEqual(query_compiler.get_query(), expected_result)
def test_none_lookup(self):
# Create a query
query_compiler = self.query_compiler_class(models.Book.objects.filter(title=None), "Hello")
# Check it
expected_result = {'filtered': {'filter': {'and': [
{'match': {'content_type': 'searchtests.Book'}},
{'missing': {'field': 'title_filter'}}
]}, 'query': {'multi_match': {'query': 'Hello', 'fields': ['_all', '_partials']}}}}
self.assertDictEqual(query_compiler.get_query(), expected_result)
def test_isnull_true_lookup(self):
# Create a query
query_compiler = self.query_compiler_class(models.Book.objects.filter(title__isnull=True), "Hello")
# Check it
expected_result = {'filtered': {'filter': {'and': [
{'match': {'content_type': 'searchtests.Book'}},
{'missing': {'field': 'title_filter'}}
]}, 'query': {'multi_match': {'query': 'Hello', 'fields': ['_all', '_partials']}}}}
self.assertDictEqual(query_compiler.get_query(), expected_result)
def test_isnull_false_lookup(self):
# Create a query
query_compiler = self.query_compiler_class(models.Book.objects.filter(title__isnull=False), "Hello")
# Check it
expected_result = {'filtered': {'filter': {'and': [
{'match': {'content_type': 'searchtests.Book'}},
{'exists': {'field': 'title_filter'}}
]}, 'query': {'multi_match': {'query': 'Hello', 'fields': ['_all', '_partials']}}}}
self.assertDictEqual(query_compiler.get_query(), expected_result)
def test_startswith_lookup(self):
# Create a query
query_compiler = self.query_compiler_class(models.Book.objects.filter(title__startswith="Test"), "Hello")
# Check it
expected_result = {'filtered': {'filter': {'and': [
{'match': {'content_type': 'searchtests.Book'}},
{'prefix': {'title_filter': 'Test'}}
]}, 'query': {'multi_match': {'query': 'Hello', 'fields': ['_all', '_partials']}}}}
self.assertDictEqual(query_compiler.get_query(), expected_result)
def test_gt_lookup(self):
# This also tests conversion of python dates to strings
# Create a query
query_compiler = self.query_compiler_class(
models.Book.objects.filter(publication_date__gt=datetime.datetime(2014, 4, 29)), "Hello"
)
# Check it
expected_result = {'filtered': {'filter': {'and': [
{'match': {'content_type': 'searchtests.Book'}},
{'range': {'publication_date_filter': {'gt': '2014-04-29'}}}
]}, 'query': {'multi_match': {'query': 'Hello', 'fields': ['_all', '_partials']}}}}
self.assertDictEqual(query_compiler.get_query(), expected_result)
def test_lt_lookup(self):
# Create a query
query_compiler = self.query_compiler_class(
models.Book.objects.filter(publication_date__lt=datetime.datetime(2014, 4, 29)), "Hello"
)
# Check it
expected_result = {'filtered': {'filter': {'and': [
{'match': {'content_type': 'searchtests.Book'}},
{'range': {'publication_date_filter': {'lt': '2014-04-29'}}}
]}, 'query': {'multi_match': {'query': 'Hello', 'fields': ['_all', '_partials']}}}}
self.assertDictEqual(query_compiler.get_query(), expected_result)
def test_gte_lookup(self):
# Create a query
query_compiler = self.query_compiler_class(
models.Book.objects.filter(publication_date__gte=datetime.datetime(2014, 4, 29)), "Hello"
)
# Check it
expected_result = {'filtered': {'filter': {'and': [
{'match': {'content_type': 'searchtests.Book'}},
{'range': {'publication_date_filter': {'gte': '2014-04-29'}}}
]}, 'query': {'multi_match': {'query': 'Hello', 'fields': ['_all', '_partials']}}}}
self.assertDictEqual(query_compiler.get_query(), expected_result)
def test_lte_lookup(self):
# Create a query
query_compiler = self.query_compiler_class(
models.Book.objects.filter(publication_date__lte=datetime.datetime(2014, 4, 29)), "Hello"
)
# Check it
expected_result = {'filtered': {'filter': {'and': [
{'match': {'content_type': 'searchtests.Book'}},
{'range': {'publication_date_filter': {'lte': '2014-04-29'}}}
]}, 'query': {'multi_match': {'query': 'Hello', 'fields': ['_all', '_partials']}}}}
self.assertDictEqual(query_compiler.get_query(), expected_result)
def test_range_lookup(self):
start_date = datetime.datetime(2014, 4, 29)
end_date = datetime.datetime(2014, 8, 19)
# Create a query
query_compiler = self.query_compiler_class(
models.Book.objects.filter(publication_date__range=(start_date, end_date)), "Hello"
)
# Check it
expected_result = {'filtered': {'filter': {'and': [
{'match': {'content_type': 'searchtests.Book'}},
{'range': {'publication_date_filter': {'gte': '2014-04-29', 'lte': '2014-08-19'}}}
]}, 'query': {'multi_match': {'query': 'Hello', 'fields': ['_all', '_partials']}}}}
self.assertDictEqual(query_compiler.get_query(), expected_result)
def test_custom_ordering(self):
# Create a query
query_compiler = self.query_compiler_class(
models.Book.objects.order_by('publication_date'), "Hello", order_by_relevance=False
)
# Check it
expected_result = [{'publication_date_filter': 'asc'}]
self.assertDictEqual(query_compiler.get_sort(), expected_result)
def test_custom_ordering_reversed(self):
# Create a query
query_compiler = self.query_compiler_class(
models.Book.objects.order_by('-publication_date'), "Hello", order_by_relevance=False
)
# Check it
expected_result = [{'publication_date_filter': 'desc'}]
self.assertDictEqual(query_compiler.get_sort(), expected_result)
def test_custom_ordering_multiple(self):
# Create a query
query_compiler = self.query_compiler_class(
models.Book.objects.order_by('publication_date', 'number_of_pages'), "Hello", order_by_relevance=False
)
# Check it
expected_result = [{'publication_date_filter': 'asc'}, {'number_of_pages_filter': 'asc'}]
self.assertDictEqual(query_compiler.get_sort(), expected_result)
def test_phrase_query(self):
# Create a query
query_compiler = self.query_compiler_class(models.Book.objects.all(), Phrase("Hello world"))
# Check it
expected_result = {'multi_match': {'fields': ['_all', '_partials'], 'query': "Hello world", 'type': 'phrase'}}
self.assertDictEqual(query_compiler.get_inner_query(), expected_result)
def test_phrase_query_single_field(self):
# Create a query
query_compiler = self.query_compiler_class(models.Book.objects.all(), Phrase("Hello world"), fields=['title'])
# Check it
expected_result = {'match_phrase': {'title': "Hello world"}}
self.assertDictEqual(query_compiler.get_inner_query(), expected_result)
class TestElasticsearch2SearchResults(TestCase):
fixtures = ['search']
def assertDictEqual(self, a, b):
default = JSONSerializer().default
self.assertEqual(
json.dumps(a, sort_keys=True, default=default), json.dumps
)
def get_results(self):
backend = Elasticsearch2SearchBackend({})
query_compiler = mock.MagicMock()
query_compiler.queryset = models.Book.objects.all()
query_compiler.get_query.return_value = 'QUERY'
query_compiler.get_sort.return_value = None
return backend.results_class(backend, query_compiler)
def construct_search_response(self, results):
return {
'_shards': {'failed': 0, 'successful': 5, 'total': 5},
'hits': {
'hits': [
{
'_id': 'searchtests_book:' + str(result),
'_index': 'wagtail',
'_score': 1,
'_type': 'searchtests_book',
'fields': {
'pk': [str(result)],
}
}
for result in results
],
'max_score': 1,
'total': len(results)
},
'timed_out': False,
'took': 2
}
@mock.patch('elasticsearch.Elasticsearch.search')
def test_basic_search(self, search):
search.return_value = self.construct_search_response([])
results = self.get_results()
list(results) # Performs search
search.assert_any_call(
body={'query': 'QUERY'},
_source=False,
fields='pk',
index='wagtail__searchtests_book',
scroll='2m',
size=100
)
@mock.patch('elasticsearch.Elasticsearch.search')
def test_get_single_item(self, search):
# Need to return something to prevent index error
search.return_value = self.construct_search_response([1])
results = self.get_results()
results[10] # Performs search
search.assert_any_call(
from_=10,
body={'query': 'QUERY'},
_source=False,
fields='pk',
index='wagtail__searchtests_book',
size=1
)
@mock.patch('elasticsearch.Elasticsearch.search')
def test_slice_results(self, search):
search.return_value = self.construct_search_response([])
results = self.get_results()[1:4]
list(results) # Performs search
search.assert_any_call(
from_=1,
body={'query': 'QUERY'},
_source=False,
fields='pk',
index='wagtail__searchtests_book',
size=3
)
@mock.patch('elasticsearch.Elasticsearch.search')
def test_slice_results_multiple_times(self, search):
search.return_value = self.construct_search_response([])
results = self.get_results()[10:][:10]
list(results) # Performs search
search.assert_any_call(
from_=10,
body={'query': 'QUERY'},
_source=False,
fields='pk',
index='wagtail__searchtests_book',
size=10
)
@mock.patch('elasticsearch.Elasticsearch.search')
def test_slice_results_and_get_item(self, search):
# Need to return something to prevent index error
search.return_value = self.construct_search_response([1])
results = self.get_results()[10:]
results[10] # Performs search
search.assert_any_call(
from_=20,
body={'query': 'QUERY'},
_source=False,
fields='pk',
index='wagtail__searchtests_book',
size=1
)
@mock.patch('elasticsearch.Elasticsearch.search')
def test_result_returned(self, search):
search.return_value = self.construct_search_response([1])
results = self.get_results()
self.assertEqual(results[0], models.Book.objects.get(id=1))
@mock.patch('elasticsearch.Elasticsearch.search')
def test_len_1(self, search):
search.return_value = self.construct_search_response([1])
results = self.get_results()
self.assertEqual(len(results), 1)
@mock.patch('elasticsearch.Elasticsearch.search')
def test_len_2(self, search):
search.return_value = self.construct_search_response([1, 2])
results = self.get_results()
self.assertEqual(len(results), 2)
@mock.patch('elasticsearch.Elasticsearch.search')
def test_duplicate_results(self, search): # Duplicates will not be removed
search.return_value = self.construct_search_response([1, 1])
results = list(self.get_results()) # Must cast to list so we only create one query
self.assertEqual(len(results), 2)
self.assertEqual(results[0], models.Book.objects.get(id=1))
self.assertEqual(results[1], models.Book.objects.get(id=1))
@mock.patch('elasticsearch.Elasticsearch.search')
def test_result_order(self, search):
search.return_value = self.construct_search_response(
[1, 2, 3]
)
results = list(self.get_results()) # Must cast to list so we only create one query
self.assertEqual(results[0], models.Book.objects.get(id=1))
self.assertEqual(results[1], models.Book.objects.get(id=2))
self.assertEqual(results[2], models.Book.objects.get(id=3))
@mock.patch('elasticsearch.Elasticsearch.search')
def test_result_order_2(self, search):
search.return_value = self.construct_search_response(
[3, 2, 1]
)
results = list(self.get_results()) # Must cast to list so we only create one query
self.assertEqual(results[0], models.Book.objects.get(id=3))
self.assertEqual(results[1], models.Book.objects.get(id=2))
self.assertEqual(results[2], models.Book.objects.get(id=1))
class TestElasticsearch2Mapping(TestCase):
fixtures = ['search']
def assertDictEqual(self, a, b):
default = JSONSerializer().default
self.assertEqual(
json.dumps(a, sort_keys=True, default=default), json.dumps(b, sort_keys=True, default=default)
)
def setUp(self):
# Create ES mapping
self.es_mapping = Elasticsearch2SearchBackend.mapping_class(models.Book)
self.obj = models.Book.objects.get(id=4)
def test_get_document_type(self):
self.assertEqual(self.es_mapping.get_document_type(), 'searchtests_book')
def test_get_mapping(self):
# Build mapping
mapping = self.es_mapping.get_mapping()
# Check
expected_result = {
'searchtests_book': {
'properties': {
'pk': {'index': 'not_analyzed', 'type': 'string', 'store': True, 'include_in_all': False},
'content_type': {'index': 'not_analyzed', 'type': 'string', 'include_in_all': False},
'_partials': {'analyzer': 'edgengram_analyzer', 'search_analyzer': 'standard', 'include_in_all': False, 'type': 'string'},
'title': {'type': 'string', 'boost': 2.0, 'include_in_all': True, 'analyzer': 'edgengram_analyzer', 'search_analyzer': 'standard'},
'title_edgengrams': {'type': 'string', 'include_in_all': False, 'analyzer': 'edgengram_analyzer', 'search_analyzer': 'standard'},
'title_filter': {'index': 'not_analyzed', 'type': 'string', 'include_in_all': False},
'authors': {
'type': 'nested',
'properties': {
'name': {'type': 'string', 'include_in_all': True},
'name_edgengrams': {'analyzer': 'edgengram_analyzer', 'include_in_all': False, 'search_analyzer': 'standard', 'type': 'string'},
'date_of_birth_filter': {'index': 'not_analyzed', 'type': 'date', 'include_in_all': False},
},
},
'authors_filter': {'index': 'not_analyzed', 'type': 'integer', 'include_in_all': False},
'publication_date_filter': {'index': 'not_analyzed', 'type': 'date', 'include_in_all': False},
'number_of_pages_filter': {'index': 'not_analyzed', 'type': 'integer', 'include_in_all': False},
'tags': {
'type': 'nested',
'properties': {
'name': {'type': 'string', 'include_in_all': True},
'slug_filter': {'index': 'not_analyzed', 'type': 'string', 'include_in_all': False},
},
},
'tags_filter': {'index': 'not_analyzed', 'type': 'integer', 'include_in_all': False}
}
}
}
self.assertDictEqual(mapping, expected_result)
def test_get_document_id(self):
self.assertEqual(self.es_mapping.get_document_id(self.obj), 'searchtests_book:' + str(self.obj.pk))
def test_get_document(self):
# Get document
document = self.es_mapping.get_document(self.obj)
# Sort partials
if '_partials' in document:
document['_partials'].sort()
# Check
expected_result = {
'pk': '4',
'content_type': ["searchtests.Book"],
'_partials': ['J. R. R. Tolkien', 'The Fellowship of the Ring', 'The Fellowship of the Ring'],
'title': 'The Fellowship of the Ring',
'title_edgengrams': 'The Fellowship of the Ring',
'title_filter': 'The Fellowship of the Ring',
'authors': [
{
'name': 'J. R. R. Tolkien',
'name_edgengrams': 'J. R. R. Tolkien',
'date_of_birth_filter': datetime.date(1892, 1, 3)
}
],
'authors_filter': [2],
'publication_date_filter': datetime.date(1954, 7, 29),
'number_of_pages_filter': 423,
'tags': [],
'tags_filter': []
}
self.assertDictEqual(document, expected_result)
class TestElasticsearch2MappingInheritance(TestCase):
fixtures = ['search']
def assertDictEqual(self, a, b):
default = JSONSerializer().default
self.assertEqual(
json.dumps(a, sort_keys=True, default=default), json.dumps(b, sort_keys=True, default=default)
)
def setUp(self):
# Create ES mapping
self.es_mapping = Elasticsearch2SearchBackend.mapping_class(models.Novel)
self.obj = models.Novel.objects.get(id=4)
def test_get_document_type(self):
self.assertEqual(self.es_mapping.get_document_type(), 'searchtests_book_searchtests_novel')
def test_get_mapping(self):
# Build mapping
mapping = self.es_mapping.get_mapping()
# Check
expected_result = {
'searchtests_book_searchtests_novel': {
'properties': {
# New
'searchtests_novel__setting': {'type': 'string', 'include_in_all': True, 'analyzer': 'edgengram_analyzer', 'search_analyzer': 'standard'},
'searchtests_novel__protagonist': {
'type': 'nested',
'properties': {
'name': {'type': 'string', 'boost': 0.5, 'include_in_all': True},
'novel_id_filter': {'index': 'not_analyzed', 'type': 'integer', 'include_in_all': False}
}
},
'searchtests_novel__protagonist_id_filter': {'index': 'not_analyzed', 'type': 'integer', 'include_in_all': False},
'searchtests_novel__characters': {
'type': 'nested',
'properties': {
'name': {'type': 'string', 'boost': 0.25, 'include_in_all': True},
}
},
# Inherited
'pk': {'index': 'not_analyzed', 'type': 'string', 'store': True, 'include_in_all': False},
'content_type': {'index': 'not_analyzed', 'type': 'string', 'include_in_all': False},
'_partials': {'analyzer': 'edgengram_analyzer', 'search_analyzer': 'standard', 'include_in_all': False, 'type': 'string'},
'title': {'type': 'string', 'boost': 2.0, 'include_in_all': True, 'analyzer': 'edgengram_analyzer', 'search_analyzer': 'standard'},
'title_edgengrams': {'type': 'string', 'include_in_all': False, 'analyzer': 'edgengram_analyzer', 'search_analyzer': 'standard'},
'title_filter': {'index': 'not_analyzed', 'type': 'string', 'include_in_all': False},
'authors': {
'type': 'nested',
'properties': {
'name': {'type': 'string', 'include_in_all': True},
'name_edgengrams': {'analyzer': 'edgengram_analyzer', 'include_in_all': False, 'search_analyzer': 'standard', 'type': 'string'},
'date_of_birth_filter': {'index': 'not_analyzed', 'type': 'date', 'include_in_all': False},
},
},
'authors_filter': {'index': 'not_analyzed', 'type': 'integer', 'include_in_all': False},
'publication_date_filter': {'index': 'not_analyzed', 'type': 'date', 'include_in_all': False},
'number_of_pages_filter': {'index': 'not_analyzed', 'type': 'integer', 'include_in_all': False},
'tags': {
'type': 'nested',
'properties': {
'name': {'type': 'string', 'include_in_all': True},
'slug_filter': {'index': 'not_analyzed', 'type': 'string', 'include_in_all': False},
},
},
'tags_filter': {'index': 'not_analyzed', 'type': 'integer', 'include_in_all': False}
}
}
}
self.assertDictEqual(mapping, expected_result)
def test_get_document_id(self):
# This must be tests_searchtest instead of 'tests_searchtest_tests_searchtestchild'
# as it uses the contents base content type name.
# This prevents the same object being accidentally indexed twice.
self.assertEqual(self.es_mapping.get_document_id(self.obj), 'searchtests_book:' + str(self.obj.pk))
def test_get_document(self):
# Build document
document = self.es_mapping.get_document(self.obj)
# Sort partials
if '_partials' in document:
document['_partials'].sort()
# Sort characters
if 'searchtests_novel__characters' in document:
document['searchtests_novel__characters'].sort(key=lambda c: c['name'])
# Check
expected_result = {
# New
'searchtests_novel__setting': "Middle Earth",
'searchtests_novel__protagonist': {
'name': "Frodo Baggins",
'novel_id_filter': 4
},
'searchtests_novel__protagonist_id_filter': 8,
'searchtests_novel__characters': [
{
'name': "Bilbo Baggins"
},
{
'name': "Frodo Baggins"
},
{
'name': "Gandalf"
}
],
# Changed
'content_type': ["searchtests.Novel", "searchtests.Book"],
'_partials': ['J. R. R. Tolkien', 'Middle Earth', 'The Fellowship of the Ring', 'The Fellowship of the Ring'],
# Inherited
'pk': '4',
'title': 'The Fellowship of the Ring',
'title_edgengrams': 'The Fellowship of the Ring',
'title_filter': 'The Fellowship of the Ring',
'authors': [
{
'name': 'J. R. R. Tolkien',
'name_edgengrams': 'J. R. R. Tolkien',
'date_of_birth_filter': datetime.date(1892, 1, 3)
}
],
'authors_filter': [2],
'publication_date_filter': datetime.date(1954, 7, 29),
'number_of_pages_filter': 423,
'tags': [],
'tags_filter': []
}
self.assertDictEqual(document, expected_result)
@mock.patch('wagtail.search.backends.elasticsearch2.Elasticsearch')
class TestBackendConfiguration(TestCase):
def test_default_settings(self, Elasticsearch):
Elasticsearch2SearchBackend(params={})
Elasticsearch.assert_called_with(
hosts=[
{
'host': 'localhost',
'port': 9200,
'url_prefix': '',
'use_ssl': False,
'verify_certs': False,
'http_auth': None
}
],
timeout=10
)
def test_hosts(self, Elasticsearch):
Elasticsearch2SearchBackend(params={
'HOSTS': [
{
'host': '127.0.0.1',
'port': 9300,
'use_ssl': True,
'verify_certs': True,
}
]
})
Elasticsearch.assert_called_with(
hosts=[
{
'host': '127.0.0.1',
'port': 9300,
'use_ssl': True,
'verify_certs': True,
}
],
timeout=10
)
def test_urls(self, Elasticsearch):
# This test backwards compatibility with old URLS setting
Elasticsearch2SearchBackend(params={
'URLS': [
'http://localhost:12345',
'https://127.0.0.1:54321',
'http://username:password@elasticsearch.mysite.com',
'https://elasticsearch.mysite.com/hello',
],
})
Elasticsearch.assert_called_with(
hosts=[
{
'host': 'localhost',
'port': 12345,
'url_prefix': '',
'use_ssl': False,
'verify_certs': False,
'http_auth': None,
},
{
'host': '127.0.0.1',
'port': 54321,
'url_prefix': '',
'use_ssl': True,
'verify_certs': True,
'http_auth': None,
},
{
'host': 'elasticsearch.mysite.com',
'port': 80,
'url_prefix': '',
'use_ssl': False,
'verify_certs': False,
'http_auth': ('username', 'password')
},
{
'host': 'elasticsearch.mysite.com',
'port': 443,
'url_prefix': '/hello',
'use_ssl': True,
'verify_certs': True,
'http_auth': None,
},
],
timeout=10
)
class TestGetModelRoot(TestCase):
def test_root_model(self):
from wagtail.core.models import Page
self.assertEqual(get_model_root(Page), Page)
def test_child_model(self):
from wagtail.core.models import Page
from wagtail.tests.testapp.models import SimplePage
self.assertEqual(get_model_root(SimplePage), Page)
def test_grandchild_model(self):
# MTIChildPage inherits from MTIBasePage which inherits from Page
from wagtail.core.models import Page
from wagtail.tests.testapp.models import MTIChildPage
self.assertEqual(get_model_root(MTIChildPage), Page)
| |
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
"""Generic handlers for imports and exports.
"""
from dateutil.parser import parse
from flask import current_app
from sqlalchemy import and_
from sqlalchemy import or_
import re
import traceback
from ggrc import db
from ggrc.automapper import AutomapperGenerator
from ggrc.converters import errors
from ggrc.converters import get_exportables
from ggrc.login import get_current_user
from ggrc.models import Audit
from ggrc.models import CategoryBase
from ggrc.models import Contract
from ggrc.models import Assessment
from ggrc.models import ObjectPerson
from ggrc.models import Option
from ggrc.models import Person
from ggrc.models import Policy
from ggrc.models import Program
from ggrc.models import Regulation
from ggrc.models import Relationship
from ggrc.models import Request
from ggrc.models import Standard
from ggrc.models import all_models
from ggrc.models.reflection import AttributeInfo
from ggrc.models.relationship_helper import RelationshipHelper
from ggrc.rbac import permissions
MAPPING_PREFIX = "__mapping__:"
CUSTOM_ATTR_PREFIX = "__custom__:"
class ColumnHandler(object):
def __init__(self, row_converter, key, **options):
self.row_converter = row_converter
self.key = key
self.value = None
self.raw_value = options.get("raw_value", "").strip()
self.validator = options.get("validator")
self.mandatory = options.get("mandatory", False)
self.default = options.get("default")
self.description = options.get("description", "")
self.display_name = options.get("display_name", "")
self.dry_run = row_converter.block_converter.converter.dry_run
self.new_objects = self.row_converter.block_converter.converter.new_objects
self.unique = options.get("unique", False)
if options.get("parse"):
self.set_value()
def check_unique_consistency(self):
"""Returns true if no object exists with the same unique field."""
if not self.unique:
return
if not self.value:
return
if not self.row_converter.obj:
return
nr_duplicates = self.row_converter.object_class.query.filter(and_(
getattr(self.row_converter.object_class, self.key) == self.value,
self.row_converter.object_class.id != self.row_converter.obj.id
)).count()
if nr_duplicates > 0:
self.add_error(errors.DUPLICATE_VALUE,
column_name=self.key,
value=self.value)
self.row_converter.set_ignore()
def set_value(self):
self.value = self.parse_item()
def get_value(self):
return getattr(self.row_converter.obj, self.key, self.value)
def add_error(self, template, **kwargs):
self.row_converter.add_error(template, **kwargs)
def add_warning(self, template, **kwargs):
self.row_converter.add_warning(template, **kwargs)
def parse_item(self):
return self.raw_value
def set_obj_attr(self):
if not self.value:
return
try:
setattr(self.row_converter.obj, self.key, self.value)
except:
self.row_converter.add_error(errors.UNKNOWN_ERROR)
trace = traceback.format_exc()
error = "Import failed with:\nsetattr({}, {}, {})\n{}".format(
self.row_converter.obj, self.key, self.value, trace)
current_app.logger.error(error)
def get_default(self):
if callable(self.default):
return self.default()
return self.default
def insert_object(self):
""" For inserting fields such as custom attributes and mappings """
pass
class DeleteColumnHandler(ColumnHandler):
# this is a white list of objects that can be deleted in a cascade
# e.g. deleting a Market can delete the associated ObjectOwner object too
DELETE_WHITELIST = {"Relationship", "ObjectOwner", "ObjectPerson"}
ALLOWED_VALUES = {"", "no", "false", "true", "yes", "force"}
TRUE_VALUES = {"true", "yes", "force"}
def get_value(self):
return ""
def parse_item(self):
if self.raw_value.lower() not in self.ALLOWED_VALUES:
self.add_error(errors.WRONG_VALUE_ERROR, column_name=self.display_name)
return False
is_delete = self.raw_value.lower() in self.TRUE_VALUES
self._allow_cascade = self.raw_value.lower() == "force"
self.row_converter.is_delete = is_delete
return is_delete
def set_obj_attr(self):
if not self.value:
return
obj = self.row_converter.obj
if self.row_converter.is_new:
self.add_error(errors.DELETE_NEW_OBJECT_ERROR,
object_type=obj.type,
slug=obj.slug)
return
if self.row_converter.ignore:
return
tr = db.session.begin_nested()
try:
tr.session.delete(obj)
deleted = len([o for o in tr.session.deleted
if o.type not in self.DELETE_WHITELIST])
if deleted > 1 and not self._allow_cascade:
self.add_error(errors.DELETE_CASCADE_ERROR,
object_type=obj.type, slug=obj.slug)
finally:
if self.dry_run or self.row_converter.ignore:
tr.rollback()
else:
indexer = self.row_converter.block_converter.converter.indexer
if indexer is not None:
for o in tr.session.deleted:
indexer.delete_record(o.id, o.__class__.__name__, commit=False)
tr.commit()
class StatusColumnHandler(ColumnHandler):
def __init__(self, row_converter, key, **options):
self.key = key
self.valid_states = row_converter.object_class.VALID_STATES
self.state_mappings = {str(s).lower(): s for s in self.valid_states}
super(StatusColumnHandler, self).__init__(row_converter, key, **options)
def parse_item(self):
value = self.raw_value.lower()
status = self.state_mappings.get(value)
if status is None:
if self.mandatory:
if len(self.valid_states) > 0:
self.add_warning(errors.WRONG_REQUIRED_VALUE,
value=value[:20],
column_name=self.display_name)
status = self.valid_states[0]
else:
self.add_error(errors.MISSING_VALUE_ERROR,
column_name=self.display_name)
return
elif value != "":
self.add_warning(errors.WRONG_VALUE, column_name=self.display_name)
return status
class UserColumnHandler(ColumnHandler):
""" Handler for primary and secondary contacts """
def get_users_list(self):
users = set()
email_lines = self.raw_value.splitlines()
owner_emails = filter(unicode.strip, email_lines) # noqa
for raw_line in owner_emails:
email = raw_line.strip().lower()
person = self.get_person(email)
if person:
users.add(person)
else:
self.add_warning(errors.UNKNOWN_USER_WARNING, email=email)
return list(users)
def get_person(self, email):
new_objects = self.row_converter.block_converter.converter.new_objects
if email not in new_objects[Person]:
new_objects[Person][email] = Person.query.filter_by(email=email).first()
return new_objects[Person].get(email)
def parse_item(self):
email = self.raw_value.lower()
person = self.get_person(email)
if not person:
if email != "":
self.add_warning(errors.UNKNOWN_USER_WARNING, email=email)
elif self.mandatory:
self.add_error(errors.MISSING_VALUE_ERROR,
column_name=self.display_name)
return person
def get_value(self):
person = getattr(self.row_converter.obj, self.key)
if person:
return person.email
return self.value
class OwnerColumnHandler(UserColumnHandler):
def parse_item(self):
owners = set()
email_lines = self.raw_value.splitlines()
owner_emails = filter(unicode.strip, email_lines) # noqa
for raw_line in owner_emails:
email = raw_line.strip().lower()
person = self.get_person(email)
if person:
owners.add(person)
else:
self.add_warning(errors.UNKNOWN_USER_WARNING, email=email)
if not owners:
self.add_warning(errors.OWNER_MISSING)
owners.add(get_current_user())
return list(owners)
def set_obj_attr(self):
try:
for person in self.row_converter.obj.owners:
if person not in self.value:
self.row_converter.obj.owners.remove(person)
for person in self.value:
if person not in self.row_converter.obj.owners:
self.row_converter.obj.owners.append(person)
except:
self.row_converter.add_error(errors.UNKNOWN_ERROR)
trace = traceback.format_exc()
error = "Import failed with:\nsetattr({}, {}, {})\n{}".format(
self.row_converter.obj, self.key, self.value, trace)
current_app.logger.error(error)
def get_value(self):
emails = [owner.email for owner in self.row_converter.obj.owners]
return "\n".join(emails)
class SlugColumnHandler(ColumnHandler):
def parse_item(self):
if self.raw_value:
return self.raw_value
return ""
class DateColumnHandler(ColumnHandler):
def parse_item(self):
try:
return parse(self.raw_value)
except:
self.add_error(
u"Unknown date format, use YYYY-MM-DD or MM/DD/YYYY format")
def get_value(self):
date = getattr(self.row_converter.obj, self.key)
if date:
return date.strftime("%m/%d/%Y")
return ""
class EmailColumnHandler(ColumnHandler):
def parse_item(self):
""" emails are case insensitive """
return self.raw_value.lower()
class TextColumnHandler(ColumnHandler):
""" Single line text field handler """
def parse_item(self):
""" Remove multiple spaces and new lines from text """
if not self.raw_value:
return ""
return self.clean_whitespaces(self.raw_value)
def clean_whitespaces(self, value):
clean_value = re.sub(r'\s+', " ", value)
if clean_value != value:
self.add_warning(errors.WHITESPACE_WARNING,
column_name=self.display_name)
return value
class RequiredTextColumnHandler(TextColumnHandler):
def parse_item(self):
value = self.raw_value or ""
clean_value = self.clean_whitespaces(value)
if not clean_value:
self.add_error(errors.MISSING_VALUE_ERROR, column_name=self.display_name)
return clean_value
class TextareaColumnHandler(ColumnHandler):
""" Multi line text field handler """
def parse_item(self):
""" Remove multiple spaces and new lines from text """
if not self.raw_value:
return ""
return re.sub(r'\s+', " ", self.raw_value).strip()
class MappingColumnHandler(ColumnHandler):
""" Handler for mapped objects """
def __init__(self, row_converter, key, **options):
self.key = key
exportable = get_exportables()
self.attr_name = options.get("attr_name", "")
self.mapping_object = exportable.get(self.attr_name)
self.new_slugs = row_converter.block_converter.converter.new_objects[
self.mapping_object]
self.unmap = self.key.startswith(AttributeInfo.UNMAPPING_PREFIX)
super(MappingColumnHandler, self).__init__(row_converter, key, **options)
def parse_item(self):
""" Remove multiple spaces and new lines from text """
class_ = self.mapping_object
lines = set(self.raw_value.splitlines())
slugs = filter(unicode.strip, lines) # noqa
objects = []
for slug in slugs:
obj = class_.query.filter(class_.slug == slug).first()
if obj:
if permissions.is_allowed_update_for(obj):
objects.append(obj)
else:
self.add_warning(
errors.MAPPING_PERMISSION_ERROR,
object_type=class_._inflector.human_singular.title(),
slug=slug,
)
elif not (slug in self.new_slugs and self.dry_run):
self.add_warning(errors.UNKNOWN_OBJECT,
object_type=class_._inflector.human_singular.title(),
slug=slug)
return objects
def set_obj_attr(self):
self.value = self.parse_item()
def insert_object(self):
""" Create a new mapping object """
if self.dry_run or not self.value:
return
current_obj = self.row_converter.obj
relationships = []
for obj in self.value:
mapping = Relationship.find_related(current_obj, obj)
if not self.unmap and not mapping:
mapping = Relationship(source=current_obj, destination=obj)
relationships.append(mapping)
db.session.add(mapping)
elif self.unmap and mapping:
db.session.delete(mapping)
db.session.flush()
# it is safe to reuse this automapper since no other objects will be
# created while creating automappings and cache reuse yields significant
# performance boost
automapper = AutomapperGenerator(use_benchmark=False)
for relation in relationships:
automapper.generate_automappings(relation)
self.dry_run = True
def get_value(self):
if self.unmap:
return ""
related_slugs = []
related_ids = RelationshipHelper.get_ids_related_to(
self.mapping_object.__name__,
self.row_converter.object_class.__name__,
[self.row_converter.obj.id])
if related_ids:
related_objects = self.mapping_object.query.filter(
self.mapping_object.id.in_(related_ids))
related_slugs = (getattr(o, "slug", getattr(o, "email", None))
for o in related_objects)
related_slugs = [slug for slug in related_slugs if slug is not None]
return "\n".join(related_slugs)
def set_value(self):
pass
class ConclusionColumnHandler(ColumnHandler):
""" Handler for design and operationally columns in ControlAssesments """
def parse_item(self):
conclusion_map = {i.lower(): i for i in
Assessment.VALID_CONCLUSIONS}
return conclusion_map.get(self.raw_value.lower(), "")
class OptionColumnHandler(ColumnHandler):
def parse_item(self):
prefixed_key = "{}_{}".format(
self.row_converter.object_class._inflector.table_singular, self.key)
item = Option.query.filter(
and_(Option.title == self.raw_value.strip(),
or_(Option.role == self.key,
Option.role == prefixed_key))).first()
return item
def get_value(self):
option = getattr(self.row_converter.obj, self.key, None)
if option is None:
return ""
if callable(option.title):
return option.title()
return option.title
class CheckboxColumnHandler(ColumnHandler):
def parse_item(self):
""" mandatory checkboxes will get evelauted to false on empty value """
if self.raw_value == "":
return False
value = self.raw_value.lower() in ("yes", "true")
if self.raw_value == "--":
value = None
if self.raw_value.lower() not in ("yes", "true", "no", "false", "--"):
self.add_warning(errors.WRONG_VALUE, column_name=self.display_name)
return value
def get_value(self):
val = getattr(self.row_converter.obj, self.key, False)
if val is None:
return "--"
return "true" if val else "false"
def set_obj_attr(self):
""" handle set object for boolean values
This is the only handler that will allow setting a None value"""
try:
setattr(self.row_converter.obj, self.key, self.value)
except:
self.row_converter.add_error(errors.UNKNOWN_ERROR)
trace = traceback.format_exc()
error = "Import failed with:\nsetattr({}, {}, {})\n{}".format(
self.row_converter.obj, self.key, self.value, trace)
current_app.logger.error(error)
class ParentColumnHandler(ColumnHandler):
""" handler for directly mapped columns """
parent = None
def __init__(self, row_converter, key, **options):
super(ParentColumnHandler, self).__init__(row_converter, key, **options)
def parse_item(self):
""" get parent object """
# pylint: disable=protected-access
if self.raw_value == "":
self.add_error(errors.MISSING_VALUE_ERROR, column_name=self.display_name)
return None
slug = self.raw_value
obj = self.new_objects.get(self.parent, {}).get(slug)
if obj is None:
obj = self.parent.query.filter(self.parent.slug == slug).first()
if obj is None:
self.add_error(errors.UNKNOWN_OBJECT,
object_type=self.parent._inflector.human_singular.title(),
slug=slug)
return None
context_id = None
if hasattr(obj, "context_id") and \
hasattr(self.row_converter.obj, "context_id"):
context_id = obj.context_id
if context_id is not None:
name = self.row_converter.obj.__class__.__name__
if not permissions.is_allowed_create(name, None, context_id) \
and not permissions.has_conditions('create', name):
self.add_error(errors.MAPPING_PERMISSION_ERROR,
object_type=obj.type, slug=slug)
return None
return obj
def set_obj_attr(self):
super(ParentColumnHandler, self).set_obj_attr()
# inherit context
obj = self.row_converter.obj
parent = getattr(obj, self.key, None)
if parent is not None and \
hasattr(obj, "context_id") and \
hasattr(parent, "context_id") and \
parent.context_id is not None:
obj.context_id = parent.context_id
def get_value(self):
value = getattr(self.row_converter.obj, self.key, self.value)
if not value:
return None
return value.slug
class ProgramColumnHandler(ParentColumnHandler):
def __init__(self, row_converter, key, **options):
self.parent = Program
super(ProgramColumnHandler, self).__init__(row_converter, key, **options)
class SectionDirectiveColumnHandler(MappingColumnHandler):
def get_directive_from_slug(self, directive_class, slug):
if slug in self.new_objects[directive_class]:
return self.new_objects[directive_class][slug]
return directive_class.query.filter_by(slug=slug).first()
def parse_item(self):
""" get a directive from slug """
allowed_directives = [Policy, Regulation, Standard, Contract]
if self.raw_value == "":
return None
slug = self.raw_value
for directive_class in allowed_directives:
directive = self.get_directive_from_slug(directive_class, slug)
if directive is not None:
return [directive]
self.add_error(errors.UNKNOWN_OBJECT, object_type="Program", slug=slug)
return None
def get_value(self):
# Legacy field. With the new mapping system it is not possible to determine
# which was the primary directive that has been mapped
return ""
class ControlColumnHandler(MappingColumnHandler):
def insert_object(self):
if len(self.value) != 1:
self.add_error(errors.WRONG_VALUE_ERROR, column_name="Control")
return
self.row_converter.obj.control = self.value[0]
MappingColumnHandler.insert_object(self)
class AuditColumnHandler(MappingColumnHandler):
def __init__(self, row_converter, key, **options):
key = "{}audit".format(MAPPING_PREFIX)
super(AuditColumnHandler, self).__init__(row_converter, key, **options)
class RequestAuditColumnHandler(ParentColumnHandler):
def __init__(self, row_converter, key, **options):
self.parent = Audit
super(RequestAuditColumnHandler, self) \
.__init__(row_converter, "audit", **options)
class ObjectPersonColumnHandler(UserColumnHandler):
"""
ObjectPerson handler for all specific columns such as "owner" or any other
role. This handler will remove all people not listed in the value and will
add people that are missing.
"""
def parse_item(self):
return self.get_users_list()
def set_obj_attr(self):
pass
def get_value(self):
object_person = db.session.query(ObjectPerson.person_id).filter_by(
personable_id=self.row_converter.obj.id,
personable_type=self.row_converter.obj.__class__.__name__)
users = Person.query.filter(Person.id.in_(object_person))
emails = [user.email for user in users]
return "\n".join(emails)
def remove_current_people(self):
ObjectPerson.query.filter_by(
personable_id=self.row_converter.obj.id,
personable_type=self.row_converter.obj.__class__.__name__).delete()
def insert_object(self):
if self.dry_run or not self.value:
return
self.remove_current_people()
for person in self.value:
object_person = ObjectPerson(
personable=self.row_converter.obj,
person=person,
context=self.row_converter.obj.context
)
db.session.add(object_person)
self.dry_run = True
class PersonMappingColumnHandler(ObjectPersonColumnHandler):
"""
This handler will only add people listed in self.value if they are not yet
connected to the current object.
"""
def remove_current_people(self):
obj = self.row_converter.obj
self.value = [person for person in self.value
if not ObjectPerson.query.filter_by(
personable_id=obj.id,
personable_type=obj.__class__.__name__,
person=person).count()
]
class PersonUnmappingColumnHandler(ObjectPersonColumnHandler):
"""
This handler will only remove people listed in self.value if they are already
connected to the current object.
"""
def insert_object(self):
if self.dry_run or not self.value:
return
for person in self.value:
ObjectPerson.query.filter_by(
personable_id=self.row_converter.obj.id,
personable_type=self.row_converter.obj.__class__.__name__,
person=person
).delete()
self.dry_run = True
class CategoryColumnHandler(ColumnHandler):
def parse_item(self):
names = [v.strip() for v in self.raw_value.split("\n")]
names = [name for name in names if name != ""]
if not names:
return None
categories = CategoryBase.query.filter(and_(
CategoryBase.name.in_(names),
CategoryBase.type == self.category_base_type
)).all()
category_names = set([c.name.strip() for c in categories])
for name in names:
if name not in category_names:
self.add_warning(errors.WRONG_MULTI_VALUE,
column_name=self.display_name,
value=name)
return categories
def set_obj_attr(self):
if self.value is None:
return
setattr(self.row_converter.obj, self.key, self.value)
def get_value(self):
categories = getattr(self.row_converter.obj, self.key, self.value)
categorie_names = [c.name for c in categories]
return "\n".join(categorie_names)
class ControlCategoryColumnHandler(CategoryColumnHandler):
def __init__(self, row_converter, key, **options):
self.category_base_type = "ControlCategory"
super(ControlCategoryColumnHandler, self).__init__(
row_converter, key, **options)
class ControlAssertionColumnHandler(CategoryColumnHandler):
def __init__(self, row_converter, key, **options):
self.category_base_type = "ControlAssertion"
super(ControlAssertionColumnHandler, self).__init__(
row_converter, key, **options)
class RequestColumnHandler(ParentColumnHandler):
def __init__(self, row_converter, key, **options):
self.parent = Request
super(RequestColumnHandler, self).__init__(row_converter, key, **options)
class DocumentsColumnHandler(ColumnHandler):
def get_value(self):
lines = ["{} {}".format(d.title, d.link)
for d in self.row_converter.obj.documents]
return "\n".join(lines)
def parse_item(self):
lines = [line.rsplit(" ", 1) for line in self.raw_value.splitlines()]
documents = []
for line in lines:
if len(line) != 2:
self.add_warning(errors.WRONG_VALUE, column_name=self.display_name)
continue
title, link = line
documents.append(
all_models.Document(title=title.strip(), link=link.strip()))
return documents
def set_obj_attr(self):
pass
def insert_object(self):
if self.dry_run or not self.value:
return
self.row_converter.obj.documents = self.value
self.dry_run = True
class RequestTypeColumnHandler(ColumnHandler):
def __init__(self, row_converter, key, **options):
self.key = key
valid_types = row_converter.object_class.VALID_TYPES
self.type_mappings = {str(s).lower(): s for s in valid_types}
super(RequestTypeColumnHandler, self).__init__(
row_converter, key, **options)
def parse_item(self):
value = self.raw_value.lower()
req_type = self.type_mappings.get(value)
if req_type is None:
req_type = self.get_default()
if not self.row_converter.is_new:
req_type = self.get_value()
if value:
self.add_warning(errors.WRONG_VALUE,
value=value[:20],
column_name=self.display_name)
return req_type
| |
"""
General tests for all estimators in sklearn.
"""
# Authors: Andreas Mueller <amueller@ais.uni-bonn.de>
# Gael Varoquaux gael.varoquaux@normalesup.org
# License: BSD 3 clause
import os
import warnings
import sys
import re
import pkgutil
from inspect import isgenerator
from functools import partial
import pytest
from sklearn.utils import all_estimators
from sklearn.utils._testing import ignore_warnings
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.estimator_checks import check_estimator
import sklearn
from sklearn.base import BiclusterMixin
from sklearn.decomposition import PCA
from sklearn.linear_model._base import LinearClassifierMixin
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import Ridge
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.pipeline import make_pipeline
from sklearn.utils import IS_PYPY
from sklearn.utils._testing import SkipTest
from sklearn.utils.estimator_checks import (
_construct_instance,
_set_checking_parameters,
_get_check_estimator_ids,
check_class_weight_balanced_linear_classifier,
parametrize_with_checks,
check_n_features_in_after_fitting,
)
def test_all_estimator_no_base_class():
# test that all_estimators doesn't find abstract classes.
for name, Estimator in all_estimators():
msg = ("Base estimators such as {0} should not be included"
" in all_estimators").format(name)
assert not name.lower().startswith('base'), msg
def _sample_func(x, y=1):
pass
@pytest.mark.parametrize("val, expected", [
(partial(_sample_func, y=1), "_sample_func(y=1)"),
(_sample_func, "_sample_func"),
(partial(_sample_func, 'world'), "_sample_func"),
(LogisticRegression(C=2.0), "LogisticRegression(C=2.0)"),
(LogisticRegression(random_state=1, solver='newton-cg',
class_weight='balanced', warm_start=True),
"LogisticRegression(class_weight='balanced',random_state=1,"
"solver='newton-cg',warm_start=True)")
])
def test_get_check_estimator_ids(val, expected):
assert _get_check_estimator_ids(val) == expected
def _tested_estimators():
for name, Estimator in all_estimators():
if issubclass(Estimator, BiclusterMixin):
continue
try:
estimator = _construct_instance(Estimator)
except SkipTest:
continue
yield estimator
@parametrize_with_checks(list(_tested_estimators()))
def test_estimators(estimator, check, request):
# Common tests for estimator instances
with ignore_warnings(category=(FutureWarning,
ConvergenceWarning,
UserWarning, FutureWarning)):
_set_checking_parameters(estimator)
check(estimator)
def test_check_estimator_generate_only():
all_instance_gen_checks = check_estimator(LogisticRegression(),
generate_only=True)
assert isgenerator(all_instance_gen_checks)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
# ignore deprecated open(.., 'U') in numpy distutils
def test_configure():
# Smoke test the 'configure' step of setup, this tests all the
# 'configure' functions in the setup.pys in scikit-learn
# This test requires Cython which is not necessarily there when running
# the tests of an installed version of scikit-learn or when scikit-learn
# is installed in editable mode by pip build isolation enabled.
pytest.importorskip("Cython")
cwd = os.getcwd()
setup_path = os.path.abspath(os.path.join(sklearn.__path__[0], '..'))
setup_filename = os.path.join(setup_path, 'setup.py')
if not os.path.exists(setup_filename):
pytest.skip('setup.py not available')
# XXX unreached code as of v0.22
try:
os.chdir(setup_path)
old_argv = sys.argv
sys.argv = ['setup.py', 'config']
with warnings.catch_warnings():
# The configuration spits out warnings when not finding
# Blas/Atlas development headers
warnings.simplefilter('ignore', UserWarning)
with open('setup.py') as f:
exec(f.read(), dict(__name__='__main__'))
finally:
sys.argv = old_argv
os.chdir(cwd)
def _tested_linear_classifiers():
classifiers = all_estimators(type_filter='classifier')
with warnings.catch_warnings(record=True):
for name, clazz in classifiers:
required_parameters = getattr(clazz, "_required_parameters", [])
if len(required_parameters):
# FIXME
continue
if ('class_weight' in clazz().get_params().keys() and
issubclass(clazz, LinearClassifierMixin)):
yield name, clazz
@pytest.mark.parametrize("name, Classifier",
_tested_linear_classifiers())
def test_class_weight_balanced_linear_classifiers(name, Classifier):
check_class_weight_balanced_linear_classifier(name, Classifier)
@ignore_warnings
def test_import_all_consistency():
# Smoke test to check that any name in a __all__ list is actually defined
# in the namespace of the module or package.
pkgs = pkgutil.walk_packages(path=sklearn.__path__, prefix='sklearn.',
onerror=lambda _: None)
submods = [modname for _, modname, _ in pkgs]
for modname in submods + ['sklearn']:
if ".tests." in modname:
continue
if IS_PYPY and ('_svmlight_format_io' in modname or
'feature_extraction._hashing_fast' in modname):
continue
package = __import__(modname, fromlist="dummy")
for name in getattr(package, '__all__', ()):
assert hasattr(package, name),\
"Module '{0}' has no attribute '{1}'".format(modname, name)
def test_root_import_all_completeness():
EXCEPTIONS = ('utils', 'tests', 'base', 'setup', 'conftest')
for _, modname, _ in pkgutil.walk_packages(path=sklearn.__path__,
onerror=lambda _: None):
if '.' in modname or modname.startswith('_') or modname in EXCEPTIONS:
continue
assert modname in sklearn.__all__
def test_all_tests_are_importable():
# Ensure that for each contentful subpackage, there is a test directory
# within it that is also a subpackage (i.e. a directory with __init__.py)
HAS_TESTS_EXCEPTIONS = re.compile(r'''(?x)
\.externals(\.|$)|
\.tests(\.|$)|
\._
''')
lookup = {name: ispkg
for _, name, ispkg
in pkgutil.walk_packages(sklearn.__path__, prefix='sklearn.')}
missing_tests = [name for name, ispkg in lookup.items()
if ispkg
and not HAS_TESTS_EXCEPTIONS.search(name)
and name + '.tests' not in lookup]
assert missing_tests == [], ('{0} do not have `tests` subpackages. '
'Perhaps they require '
'__init__.py or an add_subpackage directive '
'in the parent '
'setup.py'.format(missing_tests))
def test_class_support_removed():
# Make sure passing classes to check_estimator or parametrize_with_checks
# raises an error
msg = "Passing a class was deprecated.* isn't supported anymore"
with pytest.raises(TypeError, match=msg):
check_estimator(LogisticRegression)
with pytest.raises(TypeError, match=msg):
parametrize_with_checks([LogisticRegression])
def _generate_search_cv_instances():
for SearchCV, (Estimator, param_grid) in zip(
[GridSearchCV, RandomizedSearchCV],
[
(Ridge, {"alpha": [0.1, 1.0]}),
(LogisticRegression, {"C": [0.1, 1.0]}),
],
):
yield SearchCV(Estimator(), param_grid)
for SearchCV, (Estimator, param_grid) in zip(
[GridSearchCV, RandomizedSearchCV],
[
(Ridge, {"ridge__alpha": [0.1, 1.0]}),
(LogisticRegression, {"logisticregression__C": [0.1, 1.0]}),
],
):
yield SearchCV(
make_pipeline(PCA(), Estimator()), param_grid
).set_params(error_score="raise")
@parametrize_with_checks(list(_generate_search_cv_instances()))
def test_search_cv(estimator, check, request):
# Common tests for SearchCV instances
# We have a separate test because those meta-estimators can accept a
# wide range of base estimators (classifiers, regressors, pipelines)
with ignore_warnings(
category=(
FutureWarning,
ConvergenceWarning,
UserWarning,
FutureWarning,
)
):
check(estimator)
# TODO: When more modules get added, we can remove it from this list to make
# sure it gets tested. After we finish each module we can move the checks
# into sklearn.utils.estimator_checks.check_n_features_in.
#
# check_estimators_partial_fit_n_features can either be removed or updated
# with the two more assertions:
# 1. `n_features_in_` is set during the first call to `partial_fit`.
# 2. More strict when it comes to the error message.
#
# check_classifiers_train would need to be updated with the error message
N_FEATURES_IN_AFTER_FIT_MODULES_TO_IGNORE = {
'calibration',
'compose',
'covariance',
'cross_decomposition',
'discriminant_analysis',
'ensemble',
'feature_extraction',
'feature_selection',
'gaussian_process',
'isotonic',
'linear_model',
'manifold',
'mixture',
'model_selection',
'multiclass',
'multioutput',
'naive_bayes',
'neighbors',
'pipeline',
'random_projection',
'semi_supervised',
'svm',
}
N_FEATURES_IN_AFTER_FIT_ESTIMATORS = [
est for est in _tested_estimators() if est.__module__.split('.')[1] not in
N_FEATURES_IN_AFTER_FIT_MODULES_TO_IGNORE
]
@pytest.mark.parametrize("estimator", N_FEATURES_IN_AFTER_FIT_ESTIMATORS,
ids=_get_check_estimator_ids)
def test_check_n_features_in_after_fitting(estimator):
_set_checking_parameters(estimator)
check_n_features_in_after_fitting(estimator.__class__.__name__, estimator)
| |
from rest_framework import generics, status
from rest_framework.authentication import \
SessionAuthentication, \
TokenAuthentication
from rest_framework.permissions import IsAuthenticated, IsAdminUser
from rest_framework.decorators import \
authentication_classes, \
permission_classes, \
api_view
from rest_framework.reverse import reverse
from rest_framework.response import Response
from rest_framework.views import exception_handler
from rest_framework.exceptions import NotAuthenticated
from analytics.serializers import *
def custom_exception_handler(exc):
# Call REST framework's default exception handler first,
# to get the standard error response.
if isinstance(exc, NotAuthenticated):
response = Response(
{'detail': 'Not authenticated'},
status=status.HTTP_401_UNAUTHORIZED,
exception=True
)
else:
response = exception_handler(exc)
return response
@api_view(['GET'])
@authentication_classes((SessionAuthentication, TokenAuthentication))
@permission_classes((IsAuthenticated,))
def analytics_api_root(request):
"""
The entry endpoint of our API.
"""
return Response({
'analytes': reverse('analyte-list', request=request),
'conjugates': reverse('conjugate-list', request=request),
'buffers': reverse('buffer-list', request=request),
'isotypes': reverse('isotype-list', request=request),
'sample-types': reverse('sample-type-list', request=request),
})
class LoginRequiredMixin(object):
"""
View mixin to verify a user is logged in.
"""
authentication_classes = (SessionAuthentication, TokenAuthentication)
permission_classes = (IsAuthenticated,)
class AdminRequiredMixin(object):
"""
View mixin to verify a user is an administrator.
"""
authentication_classes = (SessionAuthentication,)
permission_classes = (IsAuthenticated, IsAdminUser)
@api_view(['GET'])
@authentication_classes((SessionAuthentication, TokenAuthentication))
@permission_classes((IsAuthenticated,))
def get_user_details(request):
return Response(
{
'username': request.user.username,
'email': request.user.email,
'superuser': request.user.is_superuser,
'staff': request.user.is_staff
}
)
class AnalyteList(generics.ListCreateAPIView):
"""
API endpoint representing a list of analytes.
"""
model = Analyte
serializer_class = AnalyteSerializer
filter_fields = ('name', 'subtrahend')
def post(self, request, *args, **kwargs):
if not request.user.is_staff:
return Response(status=status.HTTP_403_FORBIDDEN)
response = super(AnalyteList, self).post(request, *args, **kwargs)
return response
class AnalyteDetail(
AdminRequiredMixin,
generics.RetrieveUpdateDestroyAPIView):
"""
API endpoint representing a single analyte.
"""
model = Analyte
serializer_class = AnalyteSerializer
def put(self, request, *args, **kwargs):
if not request.user.is_staff:
return Response(status=status.HTTP_403_FORBIDDEN)
return super(AnalyteDetail, self).put(request, *args, **kwargs)
def patch(self, request, *args, **kwargs):
return Response(status=status.HTTP_501_NOT_IMPLEMENTED)
def delete(self, request, *args, **kwargs):
if not request.user.is_staff:
return Response(status=status.HTTP_403_FORBIDDEN)
return super(AnalyteDetail, self).delete(request, *args, **kwargs)
class ConjugateList(generics.ListCreateAPIView):
"""
API endpoint representing a list of conjugates.
"""
model = Conjugate
serializer_class = ConjugateSerializer
filter_fields = ('name',)
def post(self, request, *args, **kwargs):
if not request.user.is_staff:
return Response(status=status.HTTP_403_FORBIDDEN)
response = super(ConjugateList, self).post(request, *args, **kwargs)
return response
class ConjugateDetail(
AdminRequiredMixin,
generics.RetrieveUpdateDestroyAPIView):
"""
API endpoint representing a single conjugate.
"""
model = Conjugate
serializer_class = ConjugateSerializer
def put(self, request, *args, **kwargs):
if not request.user.is_staff:
return Response(status=status.HTTP_403_FORBIDDEN)
return super(ConjugateDetail, self).put(request, *args, **kwargs)
def patch(self, request, *args, **kwargs):
return Response(status=status.HTTP_501_NOT_IMPLEMENTED)
def delete(self, request, *args, **kwargs):
if not request.user.is_staff:
return Response(status=status.HTTP_403_FORBIDDEN)
return super(ConjugateDetail, self).delete(request, *args, **kwargs)
class BufferList(generics.ListCreateAPIView):
"""
API endpoint representing a list of buffers.
"""
model = Buffer
serializer_class = BufferSerializer
filter_fields = ('name',)
def post(self, request, *args, **kwargs):
if not request.user.is_staff:
return Response(status=status.HTTP_403_FORBIDDEN)
response = super(BufferList, self).post(request, *args, **kwargs)
return response
class BufferDetail(
AdminRequiredMixin,
generics.RetrieveUpdateDestroyAPIView):
"""
API endpoint representing a single buffer.
"""
model = Buffer
serializer_class = BufferSerializer
def put(self, request, *args, **kwargs):
if not request.user.is_staff:
return Response(status=status.HTTP_403_FORBIDDEN)
return super(BufferDetail, self).put(request, *args, **kwargs)
def patch(self, request, *args, **kwargs):
return Response(status=status.HTTP_501_NOT_IMPLEMENTED)
def delete(self, request, *args, **kwargs):
if not request.user.is_staff:
return Response(status=status.HTTP_403_FORBIDDEN)
return super(BufferDetail, self).delete(request, *args, **kwargs)
class IsotypeList(generics.ListCreateAPIView):
"""
API endpoint representing a list of isotypes.
"""
model = Isotype
serializer_class = IsotypeSerializer
filter_fields = ('name',)
def post(self, request, *args, **kwargs):
if not request.user.is_staff:
return Response(status=status.HTTP_403_FORBIDDEN)
response = super(IsotypeList, self).post(request, *args, **kwargs)
return response
class IsotypeDetail(
AdminRequiredMixin,
generics.RetrieveUpdateDestroyAPIView):
"""
API endpoint representing a single isotype.
"""
model = Isotype
serializer_class = IsotypeSerializer
def put(self, request, *args, **kwargs):
if not request.user.is_staff:
return Response(status=status.HTTP_403_FORBIDDEN)
return super(IsotypeDetail, self).put(request, *args, **kwargs)
def patch(self, request, *args, **kwargs):
return Response(status=status.HTTP_501_NOT_IMPLEMENTED)
def delete(self, request, *args, **kwargs):
if not request.user.is_staff:
return Response(status=status.HTTP_403_FORBIDDEN)
return super(IsotypeDetail, self).delete(request, *args, **kwargs)
class SampleTypeList(generics.ListCreateAPIView):
"""
API endpoint representing a list of isotypes.
"""
model = SampleType
serializer_class = SampleTypeSerializer
filter_fields = ('name',)
def post(self, request, *args, **kwargs):
if not request.user.is_staff:
return Response(status=status.HTTP_403_FORBIDDEN)
response = super(SampleTypeList, self).post(request, *args, **kwargs)
return response
class SampleTypeDetail(
AdminRequiredMixin,
generics.RetrieveUpdateDestroyAPIView):
"""
API endpoint representing a single isotype.
"""
model = SampleType
serializer_class = SampleTypeSerializer
def put(self, request, *args, **kwargs):
if not request.user.is_staff:
return Response(status=status.HTTP_403_FORBIDDEN)
return super(SampleTypeDetail, self).put(request, *args, **kwargs)
def patch(self, request, *args, **kwargs):
return Response(status=status.HTTP_501_NOT_IMPLEMENTED)
def delete(self, request, *args, **kwargs):
if not request.user.is_staff:
return Response(status=status.HTTP_403_FORBIDDEN)
return super(SampleTypeDetail, self).delete(request, *args, **kwargs)
| |
import cookielib
import os
import re
import sys
import subprocess
import time
import urllib
import xbmc
import xbmcaddon
import xbmcgui
import xbmcplugin
import xbmcvfs
# get initial plugin settings
pluginhandle = int(sys.argv[1])
addon = xbmcaddon.Addon()
addonID = addon.getAddonInfo('id')
# add local lib to sys.path to import local library files
sys.path.append(os.path.join(xbmc.translatePath('special://home/addons/' + addonID), 'resources', 'lib'))
import avalon_kodi_netflix_interop_auth as auth
import avalon_kodi_netflix_menus as menus
import avalon_kodi_utils as utils
import avalon_kodi_netflix_interop_scrape as scraper
# read addon settings
username = addon.getSetting('username')
password = addon.getSetting('password')
# determine additional resource paths
maxrequestsperminute = 50
# set paths for re-use throughout plugin
metaroot = xbmc.translatePath('special://profile/addon_data/' + addonID + '/meta')
playerpath = xbmc.translatePath('special://home/addons/' + addonID + '/resources/LaunchPlayer.exe')
cookiepath = xbmc.translatePath('special://profile/addon_data/' + addonID + '/cookies')
callstackpath = xbmc.translatePath('special://profile/addon_data/' + addonID + '/callstack')
apiurlpath = xbmc.translatePath('special://profile/addon_data/' + addonID + '/apiurl')
if not os.path.exists(metaroot):
os.mkdir(metaroot)
# setup the cookies
cookiejar = cookielib.MozillaCookieJar()
if os.path.exists(cookiepath):
cookiejar.load(cookiepath)
# get path parameters
params = utils.paramStringToDict(sys.argv[2])
mode = urllib.unquote_plus(params.get('mode',''))
genre = urllib.unquote_plus(params.get('genre',''))
genrename = urllib.unquote_plus(params.get('genrename', ''))
videoid = urllib.unquote_plus(params.get('title',''))
seriesid = urllib.unquote_plus(params.get('series',''))
seasonid = urllib.unquote_plus(params.get('seasonid', ''))
season = urllib.unquote_plus(params.get('season', ''))
track = urllib.unquote_plus(params.get('track',''))
# save the cookiejar - we're probably doin it too early here as a login hasn't occured
cookiejar.save(cookiepath)
# The real guts start here... which mode are we running?
if mode == 'listgenres':
# list the genres found in /meta/genres/genres.json
# def genres(addon, addonID, pluginhandle, metapath , viewpath , callstackpath, maxrequestsperminute, cookiepath, metaroot)
menus.genres(addon, addonID, pluginhandle, os.path.join(metaroot, "genres", "genres.json"), sys.argv[0], callstackpath, maxrequestsperminute, cookiepath, metaroot)
elif mode == 'listsubgenres':
# list the sub-genres of the genre specified by the genre parameter
# def subGenres(addon, addonID, pluginhandle, metapath , viewpath , callstackpath, maxrequestsperminute, cookiepath, genreid):
menus.subGenres(addon, addonID, pluginhandle, os.path.join(metaroot, "genres", genre + ".json"), sys.argv[0], callstackpath, maxrequestsperminute, cookiepath, genre)
elif mode == 'listgenretitles':
# determine the genre's meta data file path
genretitlesmetapath = os.path.join(metaroot, "genreTitles", genre + ".json")
# is the genre file out-of-date or missing?
updateGenreTitles = False
if os.path.exists(genretitlesmetapath):
oneday = 24 * 60 * 60
if utils.fileIsOlderThan(genretitlesmetapath, (oneday * int(addon.getSetting("cacheage")))):
updateGenreTitles = True
else:
updateGenreTitles = True
# if the genre file is out-of-date or missing run the update script
if updateGenreTitles:
# do the settings call for a prompt before updating?
if addon.getSetting("promptforcache") == "true":
dialog = xbmcgui.Dialog()
ret = dialog.yesno('Netflix', utils.translation(addon, 30200))
if(ret):
# run the script if the user says so...
# 'xbmc.runscript(special://home/addons/' + addonID + '/resources/scripts/UpdateGenreTitles.py, ' + addon.getSetting("username") + ', ' + addon.getSetting("password") + ', ' + addon.getSetting("cacheage") + ', ' + cookiepath + ', ' + callstackpath + ', ' + str(maxrequestsperminute) + ', ' + addonID + ',' + metaroot + ',' + genres[title] + ',' + title + ')'
xbmc.executebuiltin('xbmc.runscript(special://home/addons/' + addonID + '/resources/scripts/UpdateGenreTitles.py, ' + addon.getSetting("username") + ', ' + addon.getSetting("password") + ', ' + addon.getSetting("cacheage") + ', ' + cookiepath + ', ' + callstackpath + ', ' + str(maxrequestsperminute) + ', ' + addonID + ',' + metaroot + ',' + genre + ',' + genrename + ')')
else:
# run the script
xbmc.executebuiltin('xbmc.runscript(special://home/addons/' + addonID + '/resources/scripts/UpdateGenreTitles.py, ' + addon.getSetting("username") + ', ' + addon.getSetting("password") + ', ' + addon.getSetting("cacheage") + ', ' + cookiepath + ', ' + callstackpath + ', ' + str(maxrequestsperminute) + ', ' + addonID + ',' + metaroot + ',' + genre + ',' + genrename + ')')
# regardless of inprogress updates list titles for specified genre that we already have data for...
# def genreTitles(addon, addonID, pluginhandle, metapath , viewpath , callstackpath, maxrequestsperminute, cookiepath, genreid, metaroot):
menus.genreTitles(addon, addonID, pluginhandle, genretitlesmetapath, sys.argv[0], callstackpath, maxrequestsperminute, cookiepath, genre, metaroot)
elif mode == 'listseasons':
# determine the mata data folder for the series' title store
metapath = os.path.join(metaroot, "Titles", seriesid)
# list the seasons for the specified title
# def seasons(addon, addonID, pluginhandle, metapath, viewpath , callstackpath, maxrequestsperminute, cookiepath, seriesid, metaroot)
menus.seasons(addon, addonID, pluginhandle, metapath, sys.argv[0], callstackpath, maxrequestsperminute, cookiepath, seriesid, metaroot)
elif mode == 'listepisodes':
# determine the meta data folder for the series' title store
metapath = os.path.join(metaroot, "Titles", seriesid)
# list the episodes for the specified series and season
# def episodes(addon, addonid, pluginhandle, metapath, viewpath , callstackpath, maxreq , cookiepath, seriesid, seasonid, metaroot)
menus.episodes(addon, addonID, pluginhandle, metapath, sys.argv[0], callstackpath, maxrequestsperminute, cookiepath, seriesid, seasonid, metaroot)
elif mode == 'playvideo':
# play the video
# first of all stop the kodi player if it is currently playing
if(xbmc.Player().isPlaying()):
xbmc.Player().stop()
try:
subprocess.Popen(playerpath + ' /movieid=' + videoid, shell=False)
except:
pass
elif mode == 'playepisode':
# play the episode
# first of all stop the kodi player if it is currently playing
if(xbmc.Player().isPlaying()):
xbmc.Player().stop()
try:
subprocess.Popen(playerpath + ' /movieid=' + videoid + ' /seriesid=' + seriesid + ' /savepath=' + os.path.join(metaroot, "titles", seriesid) + ' /un=' + username + ' /pw=' + password, shell=False)
except:
pass
elif mode == 'mylist':
# list MyList titles
# def myList(viewpath , pluginhandle, metaroot, addon)
menus.myList(sys.argv[0], pluginhandle, metaroot, addon, callstackpath, maxrequestsperminute, cookiepath)
elif mode=='search':
# create a keyboard dialog and take a search string
keyboard = xbmc.Keyboard('', utils.translation(addon, 30203))
keyboard.doModal()
# if something is entered and submitted, get the text and pass it to the search menu
if keyboard.isConfirmed() and keyboard.getText():
search_string = keyboard.getText()
menus.search(addon, addonID, pluginhandle, sys.argv[0], callstackpath, maxrequestsperminute, cookiejar, search_string, metaroot, cookiepath)
else:
# clear any active states
if os.path.exists(os.path.join(metaroot, "active")):
for ffile in os.listdir(os.path.join(metaroot, "active")):
os.remove(os.path.join(metaroot, "active", ffile))
# check that the basic meta cache has been saved and has not expired
UpdateGenres = False
if os.path.exists(os.path.join(metaroot, "Genres", "genres.json")):
oneday = 24 * 60 * 60
if utils.fileIsOlderThan(os.path.join(metaroot, "Genres", "genres.json"), (oneday * int(addon.getSetting("cacheage")))):
UpdateGenres = True
else:
UpdateGenres = True
if os.path.exists(os.path.join(metaroot, "active", "scrape_genres")):
UpdateGenres = False
# check if MyList needs to be updated
UpdateMyList = False
if os.path.isdir(os.path.join(metaroot, "MyList")):
oneday = 24 * 60 * 60
for ffile in os.listdir(os.path.join(metaroot, "MyList")):
if utils.fileIsOlderThan(os.path.join(metaroot, "MyList", ffile), (oneday * int(addon.getSetting("mylistage")))):
UpdateMyList = True
if UpdateMyList:
print "Netflix: MyList is out-of-date"
else:
print "Netflix: MyList is up-to-date"
else:
print "Netflix: MyList data is not available"
UpdateMyList = True
# don't re-cache if already in progress - this will cause weird bounce on the available titles
if os.path.exists(os.path.join(metaroot, "active", "scrape_mylist")):
UpdateMyList = False
# update genres
if UpdateGenres:
if addon.getSetting("promptforcache") == "true":
dialog = xbmcgui.Dialog()
ret = dialog.yesno('Netflix', utils.translation(addon, 30200))
if(ret):
# make sure we can login to the Netflix website
while not auth.login(username, password, cookiejar, callstackpath, maxrequestsperminute):
d = xbmcgui.Dialog()
addon.setSetting("username", d.input(utils.translation(addon, 30004)))
addon.setSetting("password", d.input(utils.translation(addon, 30005), type=xbmcgui.INPUT_ALPHANUM, option=xbmcgui.ALPHANUM_HIDE_INPUT))
username = addon.getSetting("username")
password = addon.getSetting("password")
xbmc.executebuiltin('xbmc.runscript(special://home/addons/' + addonID + '/resources/scripts/UpdateGenres.py, ' + addon.getSetting("username") + ', ' + addon.getSetting("password") + ', ' + addon.getSetting("cacheage") + ', ' + cookiepath + ', ' + callstackpath + ', ' + str(maxrequestsperminute) + ', ' + addonID + ',' + metaroot + ')')
else:
xbmc.executebuiltin('xbmc.runscript(special://home/addons/' + addonID + '/resources/scripts/UpdateGenres.py, ' + addon.getSetting("username") + ', ' + addon.getSetting("password") + ', ' + addon.getSetting("cacheage") + ', ' + cookiepath + ', ' + callstackpath + ', ' + str(maxrequestsperminute) + ', ' + addonID + ',' + metaroot + ')')
# update MyList
if UpdateMyList:
if addon.getSetting("promptformylist") == "true":
dialog = xbmcgui.Dialog()
ret = dialog.yesno('Netflix', utils.translation(addon, 30202))
if ret:
# make sure we can login to the Netflix website
while not auth.login(username, password, cookiejar, callstackpath, maxrequestsperminute):
d = xbmcgui.Dialog()
addon.setSetting("username", d.input(utils.translation(addon, 30004)))
addon.setSetting("password", d.input(utils.translation(addon, 30005), type=xbmcgui.INPUT_ALPHANUM, option=xbmcgui.ALPHANUM_HIDE_INPUT))
username = addon.getSetting("username")
password = addon.getSetting("password")
xbmc.executebuiltin('xbmc.runscript(special://home/addons/' + addonID + '/resources/scripts/UpdateMyList.py, ' + addon.getSetting("username") + ', ' + addon.getSetting("password") + ', ' + addon.getSetting("cacheage") + ', ' + cookiepath + ', ' + callstackpath + ', ' + str(maxrequestsperminute) + ', ' + addonID + ', ' + metaroot + ')')
else:
xbmc.executebuiltin('xbmc.runscript(special://home/addons/' + addonID + '/resources/scripts/UpdateMyList.py, ' + addon.getSetting("username") + ', ' + addon.getSetting("password") + ', ' + addon.getSetting("cacheage") + ', ' + cookiepath + ', ' + callstackpath + ', ' + str(maxrequestsperminute) + ', ' + addonID + ', ' + metaroot + ')')
# make sure the API url is upto date
scraper.scrapeAPIURL(cookiejar, callstackpath, maxrequestsperminute, metaroot)
# display the main index
# def index(addon, addonID, pluginhandle, metapath, viewpath , callstackpath, maxrequestsperminute, cookiepath)
menus.index(addon, addonID, pluginhandle, metaroot, sys.argv[0], callstackpath, maxrequestsperminute, cookiepath)
| |
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/svn/docs/wafbook/single.html#_obtaining_the_waf_file
import sys
if sys.hexversion < 0x020400f0: from sets import Set as set
try:
from xml.sax import make_parser
from xml.sax.handler import ContentHandler
except ImportError:
has_xml=False
ContentHandler=object
else:
has_xml=True
import os,sys
from waflib.Tools import c_preproc,cxx
from waflib import TaskGen,Task,Utils,Runner,Options,Node,Errors
from waflib.TaskGen import feature,after_method,extension
from waflib.Logs import error
MOC_H=['.h','.hpp','.hxx','.hh']
EXT_RCC=['.qrc']
EXT_UI=['.ui']
EXT_QT4=['.cpp','.cc','.cxx','.C']
class qxx(cxx.cxx):
def __init__(self,*k,**kw):
Task.Task.__init__(self,*k,**kw)
self.moc_done=0
def scan(self):
(nodes,names)=c_preproc.scan(self)
for x in nodes:
if x.name.endswith('.moc'):
nodes.remove(x)
names.append(x.path_from(self.inputs[0].parent.get_bld()))
return(nodes,names)
def runnable_status(self):
if self.moc_done:
return Task.Task.runnable_status(self)
else:
for t in self.run_after:
if not t.hasrun:
return Task.ASK_LATER
self.add_moc_tasks()
return Task.Task.runnable_status(self)
def add_moc_tasks(self):
node=self.inputs[0]
bld=self.generator.bld
try:
self.signature()
except KeyError:
pass
else:
delattr(self,'cache_sig')
moctasks=[]
mocfiles=[]
try:
tmp_lst=bld.raw_deps[self.uid()]
bld.raw_deps[self.uid()]=[]
except KeyError:
tmp_lst=[]
for d in tmp_lst:
if not d.endswith('.moc'):
continue
if d in mocfiles:
error("paranoia owns")
continue
mocfiles.append(d)
h_node=None
try:ext=Options.options.qt_header_ext.split()
except AttributeError:pass
if not ext:ext=MOC_H
base2=d[:-4]
for x in[node.parent]+self.generator.includes_nodes:
for e in ext:
h_node=x.find_node(base2+e)
if h_node:
break
else:
continue
break
else:
raise Errors.WafError('no header found for %r which is a moc file'%d)
m_node=h_node.change_ext('.moc')
bld.node_deps[(self.inputs[0].parent.abspath(),m_node.name)]=h_node
task=Task.classes['moc'](env=self.env,generator=self.generator)
task.set_inputs(h_node)
task.set_outputs(m_node)
gen=bld.producer
gen.outstanding.insert(0,task)
gen.total+=1
moctasks.append(task)
tmp_lst=bld.raw_deps[self.uid()]=mocfiles
lst=bld.node_deps.get(self.uid(),())
for d in lst:
name=d.name
if name.endswith('.moc'):
task=Task.classes['moc'](env=self.env,generator=self.generator)
task.set_inputs(bld.node_deps[(self.inputs[0].parent.abspath(),name)])
task.set_outputs(d)
gen=bld.producer
gen.outstanding.insert(0,task)
gen.total+=1
moctasks.append(task)
self.run_after.update(set(moctasks))
self.moc_done=1
run=Task.classes['cxx'].__dict__['run']
class trans_update(Task.Task):
run_str='${QT_LUPDATE} ${SRC} -ts ${TGT}'
color='BLUE'
Task.update_outputs(trans_update)
class XMLHandler(ContentHandler):
def __init__(self):
self.buf=[]
self.files=[]
def startElement(self,name,attrs):
if name=='file':
self.buf=[]
def endElement(self,name):
if name=='file':
self.files.append(str(''.join(self.buf)))
def characters(self,cars):
self.buf.append(cars)
def create_rcc_task(self,node):
rcnode=node.change_ext('_rc.cpp')
rcctask=self.create_task('rcc',node,rcnode)
cpptask=self.create_task('cxx',rcnode,rcnode.change_ext('.o'))
try:
self.compiled_tasks.append(cpptask)
except AttributeError:
self.compiled_tasks=[cpptask]
return cpptask
def create_uic_task(self,node):
uictask=self.create_task('ui4',node)
uictask.outputs=[self.path.find_or_declare(self.env['ui_PATTERN']%node.name[:-3])]
def add_lang(self,node):
self.lang=self.to_list(getattr(self,'lang',[]))+[node]
def apply_qt4(self):
if getattr(self,'lang',None):
qmtasks=[]
for x in self.to_list(self.lang):
if isinstance(x,str):
x=self.path.find_resource(x+'.ts')
qmtasks.append(self.create_task('ts2qm',x,x.change_ext('.qm')))
if getattr(self,'update',None)and Options.options.trans_qt4:
cxxnodes=[a.inputs[0]for a in self.compiled_tasks]+[a.inputs[0]for a in self.tasks if getattr(a,'inputs',None)and a.inputs[0].name.endswith('.ui')]
for x in qmtasks:
self.create_task('trans_update',cxxnodes,x.inputs)
if getattr(self,'langname',None):
qmnodes=[x.outputs[0]for x in qmtasks]
rcnode=self.langname
if isinstance(rcnode,str):
rcnode=self.path.find_or_declare(rcnode+'.qrc')
t=self.create_task('qm2rcc',qmnodes,rcnode)
k=create_rcc_task(self,t.outputs[0])
self.link_task.inputs.append(k.outputs[0])
lst=[]
for flag in self.to_list(self.env['CXXFLAGS']):
if len(flag)<2:continue
f=flag[0:2]
if f in['-D','-I','/D','/I']:
lst.append(flag)
self.env['MOC_FLAGS']=lst
def cxx_hook(self,node):
return self.create_compiled_task('qxx',node)
class rcc(Task.Task):
color='BLUE'
run_str='${QT_RCC} -name ${SRC[0].name} ${SRC[0].abspath()} ${RCC_ST} -o ${TGT}'
ext_out=['.h']
def scan(self):
node=self.inputs[0]
parser=make_parser()
curHandler=XMLHandler()
parser.setContentHandler(curHandler)
fi=open(self.inputs[0].abspath())
parser.parse(fi)
fi.close()
nodes=[]
names=[]
root=self.inputs[0].parent
for x in curHandler.files:
nd=root.find_resource(x)
if nd:nodes.append(nd)
else:names.append(x)
return(nodes,names)
class moc(Task.Task):
color='BLUE'
run_str='${QT_MOC} ${MOC_FLAGS} ${CPPPATH_ST:INCPATHS} ${DEFINES_ST:DEFINES} ${SRC} ${MOC_ST} ${TGT}'
class ui4(Task.Task):
color='BLUE'
run_str='${QT_UIC} ${SRC} -o ${TGT}'
ext_out=['.h']
class ts2qm(Task.Task):
color='BLUE'
run_str='${QT_LRELEASE} ${QT_LRELEASE_FLAGS} ${SRC} -qm ${TGT}'
class qm2rcc(Task.Task):
color='BLUE'
after='ts2qm'
def run(self):
txt='\n'.join(['<file>%s</file>'%k.path_from(self.outputs[0].parent)for k in self.inputs])
code='<!DOCTYPE RCC><RCC version="1.0">\n<qresource>\n%s\n</qresource>\n</RCC>'%txt
self.outputs[0].write(code)
def configure(self):
env=self.env
opt=Options.options
qtdir=getattr(opt,'qtdir','')
qtbin=getattr(opt,'qtbin','')
qtlibs=getattr(opt,'qtlibs','')
useframework=getattr(opt,'use_qt4_osxframework',True)
paths=[]
if qtdir:
qtbin=os.path.join(qtdir,'bin')
if not qtdir:
qtdir=self.environ.get('QT4_ROOT','')
qtbin=os.path.join(qtdir,'bin')
if qtbin:
paths=[qtbin]
if not qtdir:
paths=os.environ.get('PATH','').split(os.pathsep)
paths.append('/usr/share/qt4/bin/')
try:
lst=Utils.listdir('/usr/local/Trolltech/')
except OSError:
pass
else:
if lst:
lst.sort()
lst.reverse()
qtdir='/usr/local/Trolltech/%s/'%lst[0]
qtbin=os.path.join(qtdir,'bin')
paths.append(qtbin)
cand=None
prev_ver=['4','0','0']
for qmk in['qmake-qt4','qmake4','qmake']:
try:
qmake=self.find_program(qmk,path_list=paths)
except self.errors.ConfigurationError:
pass
else:
try:
version=self.cmd_and_log([qmake,'-query','QT_VERSION']).strip()
except self.errors.ConfigurationError:
pass
else:
if version:
new_ver=version.split('.')
if new_ver>prev_ver:
cand=qmake
prev_ver=new_ver
if cand:
qmake=cand
else:
self.fatal('could not find qmake for qt4')
self.env.QMAKE=qmake
qtincludes=self.cmd_and_log([qmake,'-query','QT_INSTALL_HEADERS']).strip()
qtdir=self.cmd_and_log([qmake,'-query','QT_INSTALL_PREFIX']).strip()+os.sep
qtbin=self.cmd_and_log([qmake,'-query','QT_INSTALL_BINS']).strip()+os.sep
if not qtlibs:
try:
qtlibs=self.cmd_and_log([qmake,'-query','QT_INSTALL_LIBS']).strip()
except Errors.WafError:
qtlibs=os.path.join(qtdir,'lib')
def find_bin(lst,var):
for f in lst:
try:
ret=self.find_program(f,path_list=paths)
except self.errors.ConfigurationError:
pass
else:
env[var]=ret
break
find_bin(['uic-qt3','uic3'],'QT_UIC3')
find_bin(['uic-qt4','uic'],'QT_UIC')
if not env['QT_UIC']:
self.fatal('cannot find the uic compiler for qt4')
try:
version=self.cmd_and_log(env['QT_UIC']+" -version 2>&1").strip()
except self.errors.ConfigurationError:
self.fatal('your uic compiler is for qt3, add uic for qt4 to your path')
version=version.replace('Qt User Interface Compiler ','')
version=version.replace('User Interface Compiler for Qt','')
if version.find(' 3.')!=-1:
self.msg('Checking for uic version','(%s: too old)'%version,False)
self.fatal('uic is too old')
self.msg('Checking for uic version','(%s)'%version)
find_bin(['moc-qt4','moc'],'QT_MOC')
find_bin(['rcc'],'QT_RCC')
find_bin(['lrelease-qt4','lrelease'],'QT_LRELEASE')
find_bin(['lupdate-qt4','lupdate'],'QT_LUPDATE')
env['UIC3_ST']='%s -o %s'
env['UIC_ST']='%s -o %s'
env['MOC_ST']='-o'
env['ui_PATTERN']='ui_%s.h'
env['QT_LRELEASE_FLAGS']=['-silent']
vars="QtCore QtGui QtUiTools QtNetwork QtOpenGL QtSql QtSvg QtTest QtXml QtWebKit Qt3Support".split()
vars_debug=[a+'_debug'for a in vars]
if not'PKG_CONFIG_PATH'in os.environ:
os.environ['PKG_CONFIG_PATH']='%s:%s/pkgconfig:/usr/lib/qt4/lib/pkgconfig:/opt/qt4/lib/pkgconfig:/usr/lib/qt4/lib:/opt/qt4/lib'%(qtlibs,qtlibs)
for i in vars_debug+vars:
try:
self.check_cfg(package=i,args='--cflags --libs')
except self.errors.ConfigurationError:
pass
def process_lib(vars_,coreval):
for d in vars_:
var=d.upper()
if var=='QTCORE':
continue
value=env['LIBPATH_'+var]
if value:
core=env[coreval]
accu=[]
for lib in value:
if lib in core:
continue
accu.append(lib)
env['LIBPATH_'+var]=accu
process_lib(vars,'LIBPATH_QTCORE')
process_lib(vars_debug,'LIBPATH_QTCORE_DEBUG')
if Options.options.want_rpath:
def process_rpath(vars_,coreval):
for d in vars_:
var=d.upper()
value=env['LIBPATH_'+var]
if value:
core=env[coreval]
accu=[]
for lib in value:
if var!='QTCORE':
if lib in core:
continue
accu.append('-Wl,--rpath='+lib)
env['RPATH_'+var]=accu
process_rpath(vars,'LIBPATH_QTCORE')
process_rpath(vars_debug,'LIBPATH_QTCORE_DEBUG')
def options(opt):
opt.add_option('--want-rpath',action='store_true',default=False,dest='want_rpath',help='enable the rpath for qt libraries')
opt.add_option('--header-ext',type='string',default='',help='header extension for moc files',dest='qt_header_ext')
for i in'qtdir qtbin qtlibs'.split():
opt.add_option('--'+i,type='string',default='',dest=i)
if sys.platform=="darwin":
opt.add_option('--no-qt4-framework',action="store_false",help='do not use the framework version of Qt4 in OS X',dest='use_qt4_osxframework',default=True)
opt.add_option('--translate',action="store_true",help="collect translation strings",dest="trans_qt4",default=False)
extension(*EXT_RCC)(create_rcc_task)
extension(*EXT_UI)(create_uic_task)
extension('.ts')(add_lang)
feature('qt4')(apply_qt4)
after_method('apply_link')(apply_qt4)
extension(*EXT_QT4)(cxx_hook)
| |
"""
Miscellaneous function (re)definitions from the Py3.4+ standard library
for Python 2.6/2.7.
- math.ceil (for Python 2.7)
- collections.OrderedDict (for Python 2.6)
- collections.Counter (for Python 2.6)
- collections.ChainMap (for all versions prior to Python 3.3)
- itertools.count (for Python 2.6, with step parameter)
- subprocess.check_output (for Python 2.6)
- reprlib.recursive_repr (for Python 2.6+)
"""
import subprocess
from math import ceil as oldceil
from collections import Mapping, MutableMapping
from operator import itemgetter as _itemgetter, eq as _eq
import sys
import heapq as _heapq
from _weakref import proxy as _proxy
from itertools import repeat as _repeat, chain as _chain, starmap as _starmap
from future.utils import iteritems, itervalues, PY26, PY3
def ceil(x):
"""
Return the ceiling of x as an int.
This is the smallest integral value >= x.
"""
return int(oldceil(x))
########################################################################
### reprlib.recursive_repr decorator from Py3.4
########################################################################
from itertools import islice
try:
from _thread import get_ident
except ImportError:
from _dummy_thread import get_ident
def recursive_repr(fillvalue='...'):
'Decorator to make a repr function return fillvalue for a recursive call'
def decorating_function(user_function):
repr_running = set()
def wrapper(self):
key = id(self), get_ident()
if key in repr_running:
return fillvalue
repr_running.add(key)
try:
result = user_function(self)
finally:
repr_running.discard(key)
return result
# Can't use functools.wraps() here because of bootstrap issues
wrapper.__module__ = getattr(user_function, '__module__')
wrapper.__doc__ = getattr(user_function, '__doc__')
wrapper.__name__ = getattr(user_function, '__name__')
wrapper.__annotations__ = getattr(user_function, '__annotations__', {})
return wrapper
return decorating_function
################################################################################
### OrderedDict
################################################################################
class _Link(object):
__slots__ = 'prev', 'next', 'key', '__weakref__'
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as regular dictionaries.
# The internal self.__map dict maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# The sentinel is in self.__hardroot with a weakref proxy in self.__root.
# The prev links are weakref proxies (to prevent circular references).
# Individual links are kept alive by the hard reference in self.__map.
# Those hard references disappear when a key is deleted from an OrderedDict.
def __init__(*args, **kwds):
'''Initialize an ordered dictionary. The signature is the same as
regular dictionaries, but keyword arguments are not recommended because
their insertion order is arbitrary.
'''
if not args:
raise TypeError("descriptor '__init__' of 'OrderedDict' object "
"needs an argument")
self = args[0]
args = args[1:]
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__hardroot = _Link()
self.__root = root = _proxy(self.__hardroot)
root.prev = root.next = root
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value,
dict_setitem=dict.__setitem__, proxy=_proxy, Link=_Link):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link at the end of the linked list,
# and the inherited dictionary is updated with the new key/value pair.
if key not in self:
self.__map[key] = link = Link()
root = self.__root
last = root.prev
link.prev, link.next, link.key = last, root, key
last.next = link
root.prev = proxy(link)
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which gets
# removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link = self.__map.pop(key)
link_prev = link.prev
link_next = link.next
link_prev.next = link_next
link_next.prev = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
# Traverse the linked list in order.
root = self.__root
curr = root.next
while curr is not root:
yield curr.key
curr = curr.next
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
# Traverse the linked list in reverse order.
root = self.__root
curr = root.prev
while curr is not root:
yield curr.key
curr = curr.prev
def clear(self):
'od.clear() -> None. Remove all items from od.'
root = self.__root
root.prev = root.next = root
self.__map.clear()
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root.prev
link_prev = link.prev
link_prev.next = root
root.prev = link_prev
else:
link = root.next
link_next = link.next
root.next = link_next
link_next.prev = root
key = link.key
del self.__map[key]
value = dict.pop(self, key)
return key, value
def move_to_end(self, key, last=True):
'''Move an existing element to the end (or beginning if last==False).
Raises KeyError if the element does not exist.
When last=True, acts like a fast version of self[key]=self.pop(key).
'''
link = self.__map[key]
link_prev = link.prev
link_next = link.next
link_prev.next = link_next
link_next.prev = link_prev
root = self.__root
if last:
last = root.prev
link.prev = last
link.next = root
last.next = root.prev = link
else:
first = root.next
link.prev = root
link.next = first
root.next = first.prev = link
def __sizeof__(self):
sizeof = sys.getsizeof
n = len(self) + 1 # number of links including root
size = sizeof(self.__dict__) # instance dictionary
size += sizeof(self.__map) * 2 # internal dict and inherited dict
size += sizeof(self.__hardroot) * n # link objects
size += sizeof(self.__root) * n # proxy objects
return size
update = __update = MutableMapping.update
keys = MutableMapping.keys
values = MutableMapping.values
items = MutableMapping.items
__ne__ = MutableMapping.__ne__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding
value. If key is not found, d is returned if given, otherwise KeyError
is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
@recursive_repr()
def __repr__(self):
'od.__repr__() <==> repr(od)'
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self.items()))
def __reduce__(self):
'Return state information for pickling'
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
return self.__class__, (), inst_dict or None, None, iter(self.items())
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S.
If not specified, the value defaults to None.
'''
self = cls()
for key in iterable:
self[key] = value
return self
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return dict.__eq__(self, other) and all(map(_eq, self, other))
return dict.__eq__(self, other)
# {{{ http://code.activestate.com/recipes/576611/ (r11)
try:
from operator import itemgetter
from heapq import nlargest
except ImportError:
pass
########################################################################
### Counter
########################################################################
def _count_elements(mapping, iterable):
'Tally elements from the iterable.'
mapping_get = mapping.get
for elem in iterable:
mapping[elem] = mapping_get(elem, 0) + 1
class Counter(dict):
'''Dict subclass for counting hashable items. Sometimes called a bag
or multiset. Elements are stored as dictionary keys and their counts
are stored as dictionary values.
>>> c = Counter('abcdeabcdabcaba') # count elements from a string
>>> c.most_common(3) # three most common elements
[('a', 5), ('b', 4), ('c', 3)]
>>> sorted(c) # list all unique elements
['a', 'b', 'c', 'd', 'e']
>>> ''.join(sorted(c.elements())) # list elements with repetitions
'aaaaabbbbcccdde'
>>> sum(c.values()) # total of all counts
15
>>> c['a'] # count of letter 'a'
5
>>> for elem in 'shazam': # update counts from an iterable
... c[elem] += 1 # by adding 1 to each element's count
>>> c['a'] # now there are seven 'a'
7
>>> del c['b'] # remove all 'b'
>>> c['b'] # now there are zero 'b'
0
>>> d = Counter('simsalabim') # make another counter
>>> c.update(d) # add in the second counter
>>> c['a'] # now there are nine 'a'
9
>>> c.clear() # empty the counter
>>> c
Counter()
Note: If a count is set to zero or reduced to zero, it will remain
in the counter until the entry is deleted or the counter is cleared:
>>> c = Counter('aaabbc')
>>> c['b'] -= 2 # reduce the count of 'b' by two
>>> c.most_common() # 'b' is still in, but its count is zero
[('a', 3), ('c', 1), ('b', 0)]
'''
# References:
# http://en.wikipedia.org/wiki/Multiset
# http://www.gnu.org/software/smalltalk/manual-base/html_node/Bag.html
# http://www.demo2s.com/Tutorial/Cpp/0380__set-multiset/Catalog0380__set-multiset.htm
# http://code.activestate.com/recipes/259174/
# Knuth, TAOCP Vol. II section 4.6.3
def __init__(*args, **kwds):
'''Create a new, empty Counter object. And if given, count elements
from an input iterable. Or, initialize the count from another mapping
of elements to their counts.
>>> c = Counter() # a new, empty counter
>>> c = Counter('gallahad') # a new counter from an iterable
>>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping
>>> c = Counter(a=4, b=2) # a new counter from keyword args
'''
if not args:
raise TypeError("descriptor '__init__' of 'Counter' object "
"needs an argument")
self = args[0]
args = args[1:]
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
super(Counter, self).__init__()
self.update(*args, **kwds)
def __missing__(self, key):
'The count of elements not in the Counter is zero.'
# Needed so that self[missing_item] does not raise KeyError
return 0
def most_common(self, n=None):
'''List the n most common elements and their counts from the most
common to the least. If n is None, then list all element counts.
>>> Counter('abcdeabcdabcaba').most_common(3)
[('a', 5), ('b', 4), ('c', 3)]
'''
# Emulate Bag.sortedByCount from Smalltalk
if n is None:
return sorted(self.items(), key=_itemgetter(1), reverse=True)
return _heapq.nlargest(n, self.items(), key=_itemgetter(1))
def elements(self):
'''Iterator over elements repeating each as many times as its count.
>>> c = Counter('ABCABC')
>>> sorted(c.elements())
['A', 'A', 'B', 'B', 'C', 'C']
# Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1
>>> prime_factors = Counter({2: 2, 3: 3, 17: 1})
>>> product = 1
>>> for factor in prime_factors.elements(): # loop over factors
... product *= factor # and multiply them
>>> product
1836
Note, if an element's count has been set to zero or is a negative
number, elements() will ignore it.
'''
# Emulate Bag.do from Smalltalk and Multiset.begin from C++.
return _chain.from_iterable(_starmap(_repeat, self.items()))
# Override dict methods where necessary
@classmethod
def fromkeys(cls, iterable, v=None):
# There is no equivalent method for counters because setting v=1
# means that no element can have a count greater than one.
raise NotImplementedError(
'Counter.fromkeys() is undefined. Use Counter(iterable) instead.')
def update(*args, **kwds):
'''Like dict.update() but add counts instead of replacing them.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.update('witch') # add elements from another iterable
>>> d = Counter('watch')
>>> c.update(d) # add elements from another counter
>>> c['h'] # four 'h' in which, witch, and watch
4
'''
# The regular dict.update() operation makes no sense here because the
# replace behavior results in the some of original untouched counts
# being mixed-in with all of the other counts for a mismash that
# doesn't have a straight-forward interpretation in most counting
# contexts. Instead, we implement straight-addition. Both the inputs
# and outputs are allowed to contain zero and negative counts.
if not args:
raise TypeError("descriptor 'update' of 'Counter' object "
"needs an argument")
self = args[0]
args = args[1:]
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
iterable = args[0] if args else None
if iterable is not None:
if isinstance(iterable, Mapping):
if self:
self_get = self.get
for elem, count in iterable.items():
self[elem] = count + self_get(elem, 0)
else:
super(Counter, self).update(iterable) # fast path when counter is empty
else:
_count_elements(self, iterable)
if kwds:
self.update(kwds)
def subtract(*args, **kwds):
'''Like dict.update() but subtracts counts instead of replacing them.
Counts can be reduced below zero. Both the inputs and outputs are
allowed to contain zero and negative counts.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.subtract('witch') # subtract elements from another iterable
>>> c.subtract(Counter('watch')) # subtract elements from another counter
>>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch
0
>>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch
-1
'''
if not args:
raise TypeError("descriptor 'subtract' of 'Counter' object "
"needs an argument")
self = args[0]
args = args[1:]
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
iterable = args[0] if args else None
if iterable is not None:
self_get = self.get
if isinstance(iterable, Mapping):
for elem, count in iterable.items():
self[elem] = self_get(elem, 0) - count
else:
for elem in iterable:
self[elem] = self_get(elem, 0) - 1
if kwds:
self.subtract(kwds)
def copy(self):
'Return a shallow copy.'
return self.__class__(self)
def __reduce__(self):
return self.__class__, (dict(self),)
def __delitem__(self, elem):
'Like dict.__delitem__() but does not raise KeyError for missing values.'
if elem in self:
super(Counter, self).__delitem__(elem)
def __repr__(self):
if not self:
return '%s()' % self.__class__.__name__
try:
items = ', '.join(map('%r: %r'.__mod__, self.most_common()))
return '%s({%s})' % (self.__class__.__name__, items)
except TypeError:
# handle case where values are not orderable
return '{0}({1!r})'.format(self.__class__.__name__, dict(self))
# Multiset-style mathematical operations discussed in:
# Knuth TAOCP Volume II section 4.6.3 exercise 19
# and at http://en.wikipedia.org/wiki/Multiset
#
# Outputs guaranteed to only include positive counts.
#
# To strip negative and zero counts, add-in an empty counter:
# c += Counter()
def __add__(self, other):
'''Add counts from two counters.
>>> Counter('abbb') + Counter('bcc')
Counter({'b': 4, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count + other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __sub__(self, other):
''' Subtract count, but keep only results with positive counts.
>>> Counter('abbbc') - Counter('bccd')
Counter({'b': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count - other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count < 0:
result[elem] = 0 - count
return result
def __or__(self, other):
'''Union is the maximum of value in either of the input counters.
>>> Counter('abbb') | Counter('bcc')
Counter({'b': 3, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = other_count if count < other_count else count
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __and__(self, other):
''' Intersection is the minimum of corresponding counts.
>>> Counter('abbb') & Counter('bcc')
Counter({'b': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = count if count < other_count else other_count
if newcount > 0:
result[elem] = newcount
return result
def __pos__(self):
'Adds an empty counter, effectively stripping negative and zero counts'
return self + Counter()
def __neg__(self):
'''Subtracts from an empty counter. Strips positive and zero counts,
and flips the sign on negative counts.
'''
return Counter() - self
def _keep_positive(self):
'''Internal method to strip elements with a negative or zero count'''
nonpositive = [elem for elem, count in self.items() if not count > 0]
for elem in nonpositive:
del self[elem]
return self
def __iadd__(self, other):
'''Inplace add from another counter, keeping only positive counts.
>>> c = Counter('abbb')
>>> c += Counter('bcc')
>>> c
Counter({'b': 4, 'c': 2, 'a': 1})
'''
for elem, count in other.items():
self[elem] += count
return self._keep_positive()
def __isub__(self, other):
'''Inplace subtract counter, but keep only results with positive counts.
>>> c = Counter('abbbc')
>>> c -= Counter('bccd')
>>> c
Counter({'b': 2, 'a': 1})
'''
for elem, count in other.items():
self[elem] -= count
return self._keep_positive()
def __ior__(self, other):
'''Inplace union is the maximum of value from either counter.
>>> c = Counter('abbb')
>>> c |= Counter('bcc')
>>> c
Counter({'b': 3, 'c': 2, 'a': 1})
'''
for elem, other_count in other.items():
count = self[elem]
if other_count > count:
self[elem] = other_count
return self._keep_positive()
def __iand__(self, other):
'''Inplace intersection is the minimum of corresponding counts.
>>> c = Counter('abbb')
>>> c &= Counter('bcc')
>>> c
Counter({'b': 1})
'''
for elem, count in self.items():
other_count = other[elem]
if other_count < count:
self[elem] = other_count
return self._keep_positive()
def check_output(*popenargs, **kwargs):
"""
For Python 2.6 compatibility: see
http://stackoverflow.com/questions/4814970/
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd)
return output
def count(start=0, step=1):
"""
``itertools.count`` in Py 2.6 doesn't accept a step
parameter. This is an enhanced version of ``itertools.count``
for Py2.6 equivalent to ``itertools.count`` in Python 2.7+.
"""
while True:
yield start
start += step
########################################################################
### ChainMap (helper for configparser and string.Template)
### From the Py3.4 source code. See also:
### https://github.com/kkxue/Py2ChainMap/blob/master/py2chainmap.py
########################################################################
class ChainMap(MutableMapping):
''' A ChainMap groups multiple dicts (or other mappings) together
to create a single, updateable view.
The underlying mappings are stored in a list. That list is public and can
accessed or updated using the *maps* attribute. There is no other state.
Lookups search the underlying mappings successively until a key is found.
In contrast, writes, updates, and deletions only operate on the first
mapping.
'''
def __init__(self, *maps):
'''Initialize a ChainMap by setting *maps* to the given mappings.
If no mappings are provided, a single empty dictionary is used.
'''
self.maps = list(maps) or [{}] # always at least one map
def __missing__(self, key):
raise KeyError(key)
def __getitem__(self, key):
for mapping in self.maps:
try:
return mapping[key] # can't use 'key in mapping' with defaultdict
except KeyError:
pass
return self.__missing__(key) # support subclasses that define __missing__
def get(self, key, default=None):
return self[key] if key in self else default
def __len__(self):
return len(set().union(*self.maps)) # reuses stored hash values if possible
def __iter__(self):
return iter(set().union(*self.maps))
def __contains__(self, key):
return any(key in m for m in self.maps)
def __bool__(self):
return any(self.maps)
# Py2 compatibility:
__nonzero__ = __bool__
@recursive_repr()
def __repr__(self):
return '{0.__class__.__name__}({1})'.format(
self, ', '.join(map(repr, self.maps)))
@classmethod
def fromkeys(cls, iterable, *args):
'Create a ChainMap with a single dict created from the iterable.'
return cls(dict.fromkeys(iterable, *args))
def copy(self):
'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]'
return self.__class__(self.maps[0].copy(), *self.maps[1:])
__copy__ = copy
def new_child(self, m=None): # like Django's Context.push()
'''
New ChainMap with a new map followed by all previous maps. If no
map is provided, an empty dict is used.
'''
if m is None:
m = {}
return self.__class__(m, *self.maps)
@property
def parents(self): # like Django's Context.pop()
'New ChainMap from maps[1:].'
return self.__class__(*self.maps[1:])
def __setitem__(self, key, value):
self.maps[0][key] = value
def __delitem__(self, key):
try:
del self.maps[0][key]
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def popitem(self):
'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.'
try:
return self.maps[0].popitem()
except KeyError:
raise KeyError('No keys found in the first mapping.')
def pop(self, key, *args):
'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].'
try:
return self.maps[0].pop(key, *args)
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def clear(self):
'Clear maps[0], leaving maps[1:] intact.'
self.maps[0].clear()
# Back up our definitions above in case they're useful
_OrderedDict = OrderedDict
_Counter = Counter
_check_output = check_output
_count = count
_ceil = ceil
__count_elements = _count_elements
_recursive_repr = recursive_repr
_ChainMap = ChainMap
# Overwrite the definitions above with the usual ones
# from the standard library:
if sys.version_info >= (2, 7):
from collections import OrderedDict, Counter
from subprocess import check_output
from itertools import count
if sys.version_info >= (3, 0):
from math import ceil
from collections import _count_elements
if sys.version_info >= (3, 3):
from reprlib import recursive_repr
from collections import ChainMap
| |
from django.core.exceptions import ImproperlyConfigured
from django.db import connection
from django.db.models.query import sql, QuerySet, Q
from django.contrib.gis.db.backend import SpatialBackend
from django.contrib.gis.db.models.fields import GeometryField, PointField
from django.contrib.gis.db.models.sql import AreaField, DistanceField, GeomField, GeoQuery, GeoWhereNode
from django.contrib.gis.measure import Area, Distance
from django.contrib.gis.models import get_srid_info
qn = connection.ops.quote_name
# For backwards-compatibility; Q object should work just fine
# after queryset-refactor.
class GeoQ(Q): pass
class GeomSQL(object):
"Simple wrapper object for geometric SQL."
def __init__(self, geo_sql):
self.sql = geo_sql
def as_sql(self, *args, **kwargs):
return self.sql
class GeoQuerySet(QuerySet):
"The Geographic QuerySet."
def __init__(self, model=None, query=None):
super(GeoQuerySet, self).__init__(model=model, query=query)
self.query = query or GeoQuery(self.model, connection)
def area(self, tolerance=0.05, **kwargs):
"""
Returns the area of the geographic field in an `area` attribute on
each element of this GeoQuerySet.
"""
# Peforming setup here rather than in `_spatial_attribute` so that
# we can get the units for `AreaField`.
procedure_args, geo_field = self._spatial_setup('area', field_name=kwargs.get('field_name', None))
s = {'procedure_args' : procedure_args,
'geo_field' : geo_field,
'setup' : False,
}
if SpatialBackend.oracle:
s['procedure_fmt'] = '%(geo_col)s,%(tolerance)s'
s['procedure_args']['tolerance'] = tolerance
s['select_field'] = AreaField('sq_m') # Oracle returns area in units of meters.
elif SpatialBackend.postgis:
if not geo_field.geodetic:
# Getting the area units of the geographic field.
s['select_field'] = AreaField(Area.unit_attname(geo_field._unit_name))
else:
# TODO: Do we want to support raw number areas for geodetic fields?
raise Exception('Area on geodetic coordinate systems not supported.')
return self._spatial_attribute('area', s, **kwargs)
def centroid(self, **kwargs):
"""
Returns the centroid of the geographic field in a `centroid`
attribute on each element of this GeoQuerySet.
"""
return self._geom_attribute('centroid', **kwargs)
def difference(self, geom, **kwargs):
"""
Returns the spatial difference of the geographic field in a `difference`
attribute on each element of this GeoQuerySet.
"""
return self._geomset_attribute('difference', geom, **kwargs)
def distance(self, geom, **kwargs):
"""
Returns the distance from the given geographic field name to the
given geometry in a `distance` attribute on each element of the
GeoQuerySet.
Keyword Arguments:
`spheroid` => If the geometry field is geodetic and PostGIS is
the spatial database, then the more accurate
spheroid calculation will be used instead of the
quicker sphere calculation.
`tolerance` => Used only for Oracle. The tolerance is
in meters -- a default of 5 centimeters (0.05)
is used.
"""
return self._distance_attribute('distance', geom, **kwargs)
def envelope(self, **kwargs):
"""
Returns a Geometry representing the bounding box of the
Geometry field in an `envelope` attribute on each element of
the GeoQuerySet.
"""
return self._geom_attribute('envelope', **kwargs)
def extent(self, **kwargs):
"""
Returns the extent (aggregate) of the features in the GeoQuerySet. The
extent will be returned as a 4-tuple, consisting of (xmin, ymin, xmax, ymax).
"""
convert_extent = None
if SpatialBackend.postgis:
def convert_extent(box, geo_field):
# TODO: Parsing of BOX3D, Oracle support (patches welcome!)
# Box text will be something like "BOX(-90.0 30.0, -85.0 40.0)";
# parsing out and returning as a 4-tuple.
ll, ur = box[4:-1].split(',')
xmin, ymin = map(float, ll.split())
xmax, ymax = map(float, ur.split())
return (xmin, ymin, xmax, ymax)
elif SpatialBackend.oracle:
def convert_extent(wkt, geo_field):
raise NotImplementedError
return self._spatial_aggregate('extent', convert_func=convert_extent, **kwargs)
def gml(self, precision=8, version=2, **kwargs):
"""
Returns GML representation of the given field in a `gml` attribute
on each element of the GeoQuerySet.
"""
s = {'desc' : 'GML', 'procedure_args' : {'precision' : precision}}
if SpatialBackend.postgis:
# PostGIS AsGML() aggregate function parameter order depends on the
# version -- uggh.
major, minor1, minor2 = SpatialBackend.version
if major >= 1 and (minor1 > 3 or (minor1 == 3 and minor2 > 1)):
procedure_fmt = '%(version)s,%(geo_col)s,%(precision)s'
else:
procedure_fmt = '%(geo_col)s,%(precision)s,%(version)s'
s['procedure_args'] = {'precision' : precision, 'version' : version}
return self._spatial_attribute('gml', s, **kwargs)
def intersection(self, geom, **kwargs):
"""
Returns the spatial intersection of the Geometry field in
an `intersection` attribute on each element of this
GeoQuerySet.
"""
return self._geomset_attribute('intersection', geom, **kwargs)
def kml(self, **kwargs):
"""
Returns KML representation of the geometry field in a `kml`
attribute on each element of this GeoQuerySet.
"""
s = {'desc' : 'KML',
'procedure_fmt' : '%(geo_col)s,%(precision)s',
'procedure_args' : {'precision' : kwargs.pop('precision', 8)},
}
return self._spatial_attribute('kml', s, **kwargs)
def length(self, **kwargs):
"""
Returns the length of the geometry field as a `Distance` object
stored in a `length` attribute on each element of this GeoQuerySet.
"""
return self._distance_attribute('length', None, **kwargs)
def make_line(self, **kwargs):
"""
Creates a linestring from all of the PointField geometries in the
this GeoQuerySet and returns it. This is a spatial aggregate
method, and thus returns a geometry rather than a GeoQuerySet.
"""
kwargs['geo_field_type'] = PointField
kwargs['agg_field'] = GeometryField
return self._spatial_aggregate('make_line', **kwargs)
def mem_size(self, **kwargs):
"""
Returns the memory size (number of bytes) that the geometry field takes
in a `mem_size` attribute on each element of this GeoQuerySet.
"""
return self._spatial_attribute('mem_size', {}, **kwargs)
def num_geom(self, **kwargs):
"""
Returns the number of geometries if the field is a
GeometryCollection or Multi* Field in a `num_geom`
attribute on each element of this GeoQuerySet; otherwise
the sets with None.
"""
return self._spatial_attribute('num_geom', {}, **kwargs)
def num_points(self, **kwargs):
"""
Returns the number of points in the first linestring in the
Geometry field in a `num_points` attribute on each element of
this GeoQuerySet; otherwise sets with None.
"""
return self._spatial_attribute('num_points', {}, **kwargs)
def perimeter(self, **kwargs):
"""
Returns the perimeter of the geometry field as a `Distance` object
stored in a `perimeter` attribute on each element of this GeoQuerySet.
"""
return self._distance_attribute('perimeter', None, **kwargs)
def point_on_surface(self, **kwargs):
"""
Returns a Point geometry guaranteed to lie on the surface of the
Geometry field in a `point_on_surface` attribute on each element
of this GeoQuerySet; otherwise sets with None.
"""
return self._geom_attribute('point_on_surface', **kwargs)
def scale(self, x, y, z=0.0, **kwargs):
"""
Scales the geometry to a new size by multiplying the ordinates
with the given x,y,z scale factors.
"""
s = {'procedure_fmt' : '%(geo_col)s,%(x)s,%(y)s,%(z)s',
'procedure_args' : {'x' : x, 'y' : y, 'z' : z},
'select_field' : GeomField(),
}
return self._spatial_attribute('scale', s, **kwargs)
def svg(self, **kwargs):
"""
Returns SVG representation of the geographic field in a `svg`
attribute on each element of this GeoQuerySet.
"""
s = {'desc' : 'SVG',
'procedure_fmt' : '%(geo_col)s,%(rel)s,%(precision)s',
'procedure_args' : {'rel' : int(kwargs.pop('relative', 0)),
'precision' : kwargs.pop('precision', 8)},
}
return self._spatial_attribute('svg', s, **kwargs)
def sym_difference(self, geom, **kwargs):
"""
Returns the symmetric difference of the geographic field in a
`sym_difference` attribute on each element of this GeoQuerySet.
"""
return self._geomset_attribute('sym_difference', geom, **kwargs)
def translate(self, x, y, z=0.0, **kwargs):
"""
Translates the geometry to a new location using the given numeric
parameters as offsets.
"""
s = {'procedure_fmt' : '%(geo_col)s,%(x)s,%(y)s,%(z)s',
'procedure_args' : {'x' : x, 'y' : y, 'z' : z},
'select_field' : GeomField(),
}
return self._spatial_attribute('translate', s, **kwargs)
def transform(self, srid=4326, **kwargs):
"""
Transforms the given geometry field to the given SRID. If no SRID is
provided, the transformation will default to using 4326 (WGS84).
"""
if not isinstance(srid, (int, long)):
raise TypeError('An integer SRID must be provided.')
field_name = kwargs.get('field_name', None)
tmp, geo_field = self._spatial_setup('transform', field_name=field_name)
# Getting the selection SQL for the given geographic field.
field_col = self._geocol_select(geo_field, field_name)
# Why cascading substitutions? Because spatial backends like
# Oracle and MySQL already require a function call to convert to text, thus
# when there's also a transformation we need to cascade the substitutions.
# For example, 'SDO_UTIL.TO_WKTGEOMETRY(SDO_CS.TRANSFORM( ... )'
geo_col = self.query.custom_select.get(geo_field, field_col)
# Setting the key for the field's column with the custom SELECT SQL to
# override the geometry column returned from the database.
custom_sel = '%s(%s, %s)' % (SpatialBackend.transform, geo_col, srid)
# TODO: Should we have this as an alias?
# custom_sel = '(%s(%s, %s)) AS %s' % (SpatialBackend.transform, geo_col, srid, qn(geo_field.name))
self.query.transformed_srid = srid # So other GeoQuerySet methods
self.query.custom_select[geo_field] = custom_sel
return self._clone()
def union(self, geom, **kwargs):
"""
Returns the union of the geographic field with the given
Geometry in a `union` attribute on each element of this GeoQuerySet.
"""
return self._geomset_attribute('union', geom, **kwargs)
def unionagg(self, **kwargs):
"""
Performs an aggregate union on the given geometry field. Returns
None if the GeoQuerySet is empty. The `tolerance` keyword is for
Oracle backends only.
"""
kwargs['agg_field'] = GeometryField
return self._spatial_aggregate('unionagg', **kwargs)
### Private API -- Abstracted DRY routines. ###
def _spatial_setup(self, att, aggregate=False, desc=None, field_name=None, geo_field_type=None):
"""
Performs set up for executing the spatial function.
"""
# Does the spatial backend support this?
func = getattr(SpatialBackend, att, False)
if desc is None: desc = att
if not func: raise ImproperlyConfigured('%s stored procedure not available.' % desc)
# Initializing the procedure arguments.
procedure_args = {'function' : func}
# Is there a geographic field in the model to perform this
# operation on?
geo_field = self.query._geo_field(field_name)
if not geo_field:
raise TypeError('%s output only available on GeometryFields.' % func)
# If the `geo_field_type` keyword was used, then enforce that
# type limitation.
if not geo_field_type is None and not isinstance(geo_field, geo_field_type):
raise TypeError('"%s" stored procedures may only be called on %ss.' % (func, geo_field_type.__name__))
# Setting the procedure args.
procedure_args['geo_col'] = self._geocol_select(geo_field, field_name, aggregate)
return procedure_args, geo_field
def _spatial_aggregate(self, att, field_name=None,
agg_field=None, convert_func=None,
geo_field_type=None, tolerance=0.0005):
"""
DRY routine for calling aggregate spatial stored procedures and
returning their result to the caller of the function.
"""
# Constructing the setup keyword arguments.
setup_kwargs = {'aggregate' : True,
'field_name' : field_name,
'geo_field_type' : geo_field_type,
}
procedure_args, geo_field = self._spatial_setup(att, **setup_kwargs)
if SpatialBackend.oracle:
procedure_args['tolerance'] = tolerance
# Adding in selection SQL for Oracle geometry columns.
if agg_field is GeometryField:
agg_sql = '%s' % SpatialBackend.select
else:
agg_sql = '%s'
agg_sql = agg_sql % ('%(function)s(SDOAGGRTYPE(%(geo_col)s,%(tolerance)s))' % procedure_args)
else:
agg_sql = '%(function)s(%(geo_col)s)' % procedure_args
# Wrapping our selection SQL in `GeomSQL` to bypass quoting, and
# specifying the type of the aggregate field.
self.query.select = [GeomSQL(agg_sql)]
self.query.select_fields = [agg_field]
try:
# `asql` => not overriding `sql` module.
asql, params = self.query.as_sql()
except sql.datastructures.EmptyResultSet:
return None
# Getting a cursor, executing the query, and extracting the returned
# value from the aggregate function.
cursor = connection.cursor()
cursor.execute(asql, params)
result = cursor.fetchone()[0]
# If the `agg_field` is specified as a GeometryField, then autmatically
# set up the conversion function.
if agg_field is GeometryField and not callable(convert_func):
if SpatialBackend.postgis:
def convert_geom(hex, geo_field):
if hex: return SpatialBackend.Geometry(hex)
else: return None
elif SpatialBackend.oracle:
def convert_geom(clob, geo_field):
if clob: return SpatialBackend.Geometry(clob.read(), geo_field._srid)
else: return None
convert_func = convert_geom
# Returning the callback function evaluated on the result culled
# from the executed cursor.
if callable(convert_func):
return convert_func(result, geo_field)
else:
return result
def _spatial_attribute(self, att, settings, field_name=None, model_att=None):
"""
DRY routine for calling a spatial stored procedure on a geometry column
and attaching its output as an attribute of the model.
Arguments:
att:
The name of the spatial attribute that holds the spatial
SQL function to call.
settings:
Dictonary of internal settings to customize for the spatial procedure.
Public Keyword Arguments:
field_name:
The name of the geographic field to call the spatial
function on. May also be a lookup to a geometry field
as part of a foreign key relation.
model_att:
The name of the model attribute to attach the output of
the spatial function to.
"""
# Default settings.
settings.setdefault('desc', None)
settings.setdefault('geom_args', ())
settings.setdefault('geom_field', None)
settings.setdefault('procedure_args', {})
settings.setdefault('procedure_fmt', '%(geo_col)s')
settings.setdefault('select_params', [])
# Performing setup for the spatial column, unless told not to.
if settings.get('setup', True):
default_args, geo_field = self._spatial_setup(att, desc=settings['desc'], field_name=field_name)
for k, v in default_args.iteritems(): settings['procedure_args'].setdefault(k, v)
else:
geo_field = settings['geo_field']
# The attribute to attach to the model.
if not isinstance(model_att, basestring): model_att = att
# Special handling for any argument that is a geometry.
for name in settings['geom_args']:
# Using the field's get_db_prep_lookup() to get any needed
# transformation SQL -- we pass in a 'dummy' `contains` lookup.
where, params = geo_field.get_db_prep_lookup('contains', settings['procedure_args'][name])
# Replacing the procedure format with that of any needed
# transformation SQL.
old_fmt = '%%(%s)s' % name
new_fmt = where[0] % '%%s'
settings['procedure_fmt'] = settings['procedure_fmt'].replace(old_fmt, new_fmt)
settings['select_params'].extend(params)
# Getting the format for the stored procedure.
fmt = '%%(function)s(%s)' % settings['procedure_fmt']
# If the result of this function needs to be converted.
if settings.get('select_field', False):
sel_fld = settings['select_field']
if isinstance(sel_fld, GeomField) and SpatialBackend.select:
self.query.custom_select[model_att] = SpatialBackend.select
self.query.extra_select_fields[model_att] = sel_fld
# Finally, setting the extra selection attribute with
# the format string expanded with the stored procedure
# arguments.
return self.extra(select={model_att : fmt % settings['procedure_args']},
select_params=settings['select_params'])
def _distance_attribute(self, func, geom=None, tolerance=0.05, spheroid=False, **kwargs):
"""
DRY routine for GeoQuerySet distance attribute routines.
"""
# Setting up the distance procedure arguments.
procedure_args, geo_field = self._spatial_setup(func, field_name=kwargs.get('field_name', None))
# If geodetic defaulting distance attribute to meters (Oracle and
# PostGIS spherical distances return meters). Otherwise, use the
# units of the geometry field.
if geo_field.geodetic:
dist_att = 'm'
else:
dist_att = Distance.unit_attname(geo_field._unit_name)
# Shortcut booleans for what distance function we're using.
distance = func == 'distance'
length = func == 'length'
perimeter = func == 'perimeter'
if not (distance or length or perimeter):
raise ValueError('Unknown distance function: %s' % func)
# The field's get_db_prep_lookup() is used to get any
# extra distance parameters. Here we set up the
# parameters that will be passed in to field's function.
lookup_params = [geom or 'POINT (0 0)', 0]
# If the spheroid calculation is desired, either by the `spheroid`
# keyword or wehn calculating the length of geodetic field, make
# sure the 'spheroid' distance setting string is passed in so we
# get the correct spatial stored procedure.
if spheroid or (SpatialBackend.postgis and geo_field.geodetic and length):
lookup_params.append('spheroid')
where, params = geo_field.get_db_prep_lookup('distance_lte', lookup_params)
# The `geom_args` flag is set to true if a geometry parameter was
# passed in.
geom_args = bool(geom)
if SpatialBackend.oracle:
if distance:
procedure_fmt = '%(geo_col)s,%(geom)s,%(tolerance)s'
elif length or perimeter:
procedure_fmt = '%(geo_col)s,%(tolerance)s'
procedure_args['tolerance'] = tolerance
else:
# Getting whether this field is in units of degrees since the field may have
# been transformed via the `transform` GeoQuerySet method.
if self.query.transformed_srid:
u, unit_name, s = get_srid_info(self.query.transformed_srid)
geodetic = unit_name in geo_field.geodetic_units
else:
geodetic = geo_field.geodetic
if distance:
if self.query.transformed_srid:
# Setting the `geom_args` flag to false because we want to handle
# transformation SQL here, rather than the way done by default
# (which will transform to the original SRID of the field rather
# than to what was transformed to).
geom_args = False
procedure_fmt = '%s(%%(geo_col)s, %s)' % (SpatialBackend.transform, self.query.transformed_srid)
if geom.srid is None or geom.srid == self.query.transformed_srid:
# If the geom parameter srid is None, it is assumed the coordinates
# are in the transformed units. A placeholder is used for the
# geometry parameter.
procedure_fmt += ', %%s'
else:
# We need to transform the geom to the srid specified in `transform()`,
# so wrapping the geometry placeholder in transformation SQL.
procedure_fmt += ', %s(%%%%s, %s)' % (SpatialBackend.transform, self.query.transformed_srid)
else:
# `transform()` was not used on this GeoQuerySet.
procedure_fmt = '%(geo_col)s,%(geom)s'
if geodetic:
# Spherical distance calculation is needed (because the geographic
# field is geodetic). However, the PostGIS ST_distance_sphere/spheroid()
# procedures may only do queries from point columns to point geometries
# some error checking is required.
if not isinstance(geo_field, PointField):
raise TypeError('Spherical distance calculation only supported on PointFields.')
if not str(SpatialBackend.Geometry(buffer(params[0].wkb)).geom_type) == 'Point':
raise TypeError('Spherical distance calculation only supported with Point Geometry parameters')
# The `function` procedure argument needs to be set differently for
# geodetic distance calculations.
if spheroid:
# Call to distance_spheroid() requires spheroid param as well.
procedure_fmt += ',%(spheroid)s'
procedure_args.update({'function' : SpatialBackend.distance_spheroid, 'spheroid' : where[1]})
else:
procedure_args.update({'function' : SpatialBackend.distance_sphere})
elif length or perimeter:
procedure_fmt = '%(geo_col)s'
if geodetic and length:
# There's no `length_sphere`
procedure_fmt += ',%(spheroid)s'
procedure_args.update({'function' : SpatialBackend.length_spheroid, 'spheroid' : where[1]})
# Setting up the settings for `_spatial_attribute`.
s = {'select_field' : DistanceField(dist_att),
'setup' : False,
'geo_field' : geo_field,
'procedure_args' : procedure_args,
'procedure_fmt' : procedure_fmt,
}
if geom_args:
s['geom_args'] = ('geom',)
s['procedure_args']['geom'] = geom
elif geom:
# The geometry is passed in as a parameter because we handled
# transformation conditions in this routine.
s['select_params'] = [SpatialBackend.Adaptor(geom)]
return self._spatial_attribute(func, s, **kwargs)
def _geom_attribute(self, func, tolerance=0.05, **kwargs):
"""
DRY routine for setting up a GeoQuerySet method that attaches a
Geometry attribute (e.g., `centroid`, `point_on_surface`).
"""
s = {'select_field' : GeomField(),}
if SpatialBackend.oracle:
s['procedure_fmt'] = '%(geo_col)s,%(tolerance)s'
s['procedure_args'] = {'tolerance' : tolerance}
return self._spatial_attribute(func, s, **kwargs)
def _geomset_attribute(self, func, geom, tolerance=0.05, **kwargs):
"""
DRY routine for setting up a GeoQuerySet method that attaches a
Geometry attribute and takes a Geoemtry parameter. This is used
for geometry set-like operations (e.g., intersection, difference,
union, sym_difference).
"""
s = {'geom_args' : ('geom',),
'select_field' : GeomField(),
'procedure_fmt' : '%(geo_col)s,%(geom)s',
'procedure_args' : {'geom' : geom},
}
if SpatialBackend.oracle:
s['procedure_fmt'] += ',%(tolerance)s'
s['procedure_args']['tolerance'] = tolerance
return self._spatial_attribute(func, s, **kwargs)
def _geocol_select(self, geo_field, field_name, aggregate=False):
"""
Helper routine for constructing the SQL to select the geographic
column. Takes into account if the geographic field is in a
ForeignKey relation to the current model.
"""
# If this is an aggregate spatial query, the flag needs to be
# set on the `GeoQuery` object of this queryset.
if aggregate: self.query.aggregate = True
# Is this operation going to be on a related geographic field?
if not geo_field in self.model._meta.fields:
# If so, it'll have to be added to the select related information
# (e.g., if 'location__point' was given as the field name).
self.query.add_select_related([field_name])
self.query.pre_sql_setup()
rel_table, rel_col = self.query.related_select_cols[self.query.related_select_fields.index(geo_field)]
return self.query._field_column(geo_field, rel_table)
else:
return self.query._field_column(geo_field)
| |
# bootstrapping setuptools
import ez_setup
ez_setup.use_setuptools()
import os
import sys
import textwrap
from distutils.errors import *
from distutils.command.clean import clean as _clean
from distutils.cmd import Command
from setuptools import setup
from distutils import log
from distutils.core import setup
class clean(_clean):
"""Also cleanup local temp files."""
def run(self):
_clean.run(self)
import fnmatch
# kill temporary files
patterns = [
# generic tempfiles
'*~', '*.bak', '*.pyc',
# tempfiles generated by ANTLR runs
't[0-9]*Lexer.py', 't[0-9]*Parser.py',
'*.tokens', '*__.g',
]
for path in ('antlr3', 'unittests', 'tests'):
path = os.path.join(os.path.dirname(__file__), path)
if os.path.isdir(path):
for root, dirs, files in os.walk(path, topdown=True):
graveyard = []
for pat in patterns:
graveyard.extend(fnmatch.filter(files, pat))
for name in graveyard:
filePath = os.path.join(root, name)
try:
log.info("removing '%s'", filePath)
os.unlink(filePath)
except OSError, exc:
log.warn(
"Failed to delete '%s': %s",
filePath, exc
)
class TestError(DistutilsError):
pass
# grml.. the class name appears in the --help output:
# ...
# Options for 'CmdUnitTest' command
# ...
# so I have to use a rather ugly name...
class unittest(Command):
"""Run unit tests for package"""
description = "run unit tests for package"
user_options = [
('xml-output=', None,
"Directory for JUnit compatible XML files."),
]
boolean_options = []
def initialize_options(self):
self.xml_output = None
def finalize_options(self):
pass
def run(self):
testDir = os.path.join(os.path.dirname(__file__), 'unittests')
if not os.path.isdir(testDir):
raise DistutilsFileError(
"There is not 'unittests' directory. Did you fetch the "
"development version?",
)
import glob
import imp
import unittest
import traceback
import StringIO
suite = unittest.TestSuite()
loadFailures = []
# collect tests from all unittests/test*.py files
testFiles = []
for testPath in glob.glob(os.path.join(testDir, 'test*.py')):
testFiles.append(testPath)
testFiles.sort()
for testPath in testFiles:
testID = os.path.basename(testPath)[:-3]
try:
modFile, modPathname, modDescription \
= imp.find_module(testID, [testDir])
testMod = imp.load_module(
testID, modFile, modPathname, modDescription
)
suite.addTests(
unittest.defaultTestLoader.loadTestsFromModule(testMod)
)
except Exception:
buf = StringIO.StringIO()
traceback.print_exc(file=buf)
loadFailures.append(
(os.path.basename(testPath), buf.getvalue())
)
if self.xml_output:
import xmlrunner
runner = xmlrunner.XMLTestRunner(
stream=open(os.path.join(self.xml_output, 'unittest.xml'), 'w'))
else:
runner = unittest.TextTestRunner(verbosity=2)
result = runner.run(suite)
for testName, error in loadFailures:
sys.stderr.write('\n' + '='*70 + '\n')
sys.stderr.write(
"Failed to load test module %s\n" % testName
)
sys.stderr.write(error)
sys.stderr.write('\n')
if not result.wasSuccessful() or loadFailures:
raise TestError(
"Unit test suite failed!",
)
class functest(Command):
"""Run functional tests for package"""
description = "run functional tests for package"
user_options = [
('testcase=', None,
"testcase to run [default: run all]"),
('antlr-version=', None,
"ANTLR version to use [default: HEAD (in ../../build)]"),
('antlr-jar=', None,
"Explicit path to an antlr jar (overrides --antlr-version)"),
('xml-output=', None,
"Directory for JUnit compatible XML files."),
]
boolean_options = []
def initialize_options(self):
self.testcase = None
self.antlr_version = 'HEAD'
self.antlr_jar = None
self.xml_output = None
def finalize_options(self):
pass
def run(self):
import glob
import imp
import unittest
import traceback
import StringIO
testDir = os.path.join(os.path.dirname(__file__), 'tests')
if not os.path.isdir(testDir):
raise DistutilsFileError(
"There is not 'tests' directory. Did you fetch the "
"development version?",
)
# make sure, relative imports from testcases work
sys.path.insert(0, testDir)
rootDir = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..'))
if self.antlr_jar is not None:
classpath = [self.antlr_jar]
elif self.antlr_version == 'HEAD':
classpath = [
os.path.join(rootDir, 'tool', 'target', 'classes'),
os.path.join(rootDir, 'runtime', 'Java', 'target', 'classes')
]
else:
classpath = [
os.path.join(rootDir, 'archive',
'antlr-%s.jar' % self.antlr_version)
]
classpath.extend([
os.path.join(rootDir, 'lib', 'antlr-2.7.7.jar'),
os.path.join(rootDir, 'lib', 'stringtemplate-3.2.1.jar'),
os.path.join(rootDir, 'lib', 'ST-4.0.2.jar'),
os.path.join(rootDir, 'lib', 'junit-4.2.jar')
])
os.environ['CLASSPATH'] = ':'.join(classpath)
os.environ['ANTLRVERSION'] = self.antlr_version
suite = unittest.TestSuite()
loadFailures = []
# collect tests from all tests/t*.py files
testFiles = []
test_glob = 't[0-9][0-9][0-9]*.py'
for testPath in glob.glob(os.path.join(testDir, test_glob)):
if testPath.endswith('Lexer.py') or testPath.endswith('Parser.py'):
continue
# if a single testcase has been selected, filter out all other
# tests
if (self.testcase is not None
and not os.path.basename(testPath)[:-3].startswith(self.testcase)):
continue
testFiles.append(testPath)
testFiles.sort()
for testPath in testFiles:
testID = os.path.basename(testPath)[:-3]
try:
modFile, modPathname, modDescription \
= imp.find_module(testID, [testDir])
testMod = imp.load_module(
testID, modFile, modPathname, modDescription)
suite.addTests(
unittest.defaultTestLoader.loadTestsFromModule(testMod))
except Exception:
buf = StringIO.StringIO()
traceback.print_exc(file=buf)
loadFailures.append(
(os.path.basename(testPath), buf.getvalue()))
if self.xml_output:
import xmlrunner
runner = xmlrunner.XMLTestRunner(
stream=open(os.path.join(self.xml_output, 'functest.xml'), 'w'))
else:
runner = unittest.TextTestRunner(verbosity=2)
result = runner.run(suite)
for testName, error in loadFailures:
sys.stderr.write('\n' + '='*70 + '\n')
sys.stderr.write(
"Failed to load test module %s\n" % testName
)
sys.stderr.write(error)
sys.stderr.write('\n')
if not result.wasSuccessful() or loadFailures:
raise TestError(
"Functional test suite failed!",
)
setup(name='antlr_python_runtime',
version='3.4',
packages=['antlr3'],
author="Benjamin Niemann",
author_email="pink@odahoda.de",
url="http://www.antlr.org/",
download_url="http://www.antlr.org/download.html",
license="BSD",
description="Runtime package for ANTLR3",
long_description=textwrap.dedent('''\
This is the runtime package for ANTLR3, which is required to use parsers
generated by ANTLR3.
'''),
cmdclass={'unittest': unittest,
'functest': functest,
'clean': clean
},
)
| |
from zope.interface import implements #@UnresolvedImport
from twisted.internet.protocol import ReconnectingClientFactory
from txredis.protocol import Redis, RedisSubscriber, defer
from twisted.internet import interfaces
import uuid
import itertools
from txthoonk.types import Feed
try:
from collection import OrderedDict
except ImportError:
OrderedDict = dict
class FeedExists(Exception):
pass
class FeedDoesNotExist(Exception):
pass
class ThoonkBase(object):
"""
Thoonk object base class.
"""
SEPARATOR = "\x00"
implements(interfaces.IProtocol)
def __init__(self, redis):
'''
Constructor
@param redis: the txredis instance
'''
self.set_redis(redis)
self._uuid = uuid.uuid4().hex
def set_redis(self, redis):
'''
Set the txredis instance
@param redis: the txredis instance
'''
self.redis = redis
def dataReceived(self, data):
"""
Called whenever data is received.
Use this method to translate to a higher-level message. Usually, some
callback will be made upon the receipt of each complete protocol
message.
@param data: a string of indeterminate length. Please keep in mind
that you will probably need to buffer some data, as partial
(or multiple) protocol messages may be received! I recommend
that unit tests for protocols call through to this method with
differing chunk sizes, down to one byte at a time.
"""
self.redis.dataReceived(data)
def connectionLost(self, reason):
"""
Called when the connection is shut down.
Clear any circular references here, and any external references
to this Protocol. The connection has been closed. The C{reason}
Failure wraps a L{twisted.internet.error.ConnectionDone} or
L{twisted.internet.error.ConnectionLost} instance (or a subclass
of one of those).
@type reason: L{twisted.python.failure.Failure}
"""
self.redis.connectionLost(reason)
def makeConnection(self, transport):
"""
Make a connection to a transport and a server.
"""
self.redis.makeConnection(transport)
def connectionMade(self):
"""
Called when a connection is made.
This may be considered the initializer of the protocol, because
it is called when the connection is completed. For clients,
this is called once the connection to the server has been
established; for servers, this is called after an accept() call
stops blocking and a socket has been received. If you need to
send any greeting or initial message, do it here.
"""
self.redis.connectionMade()
class ThoonkPub(ThoonkBase):
'''
Thoonk publisher class
'''
redis = Redis() # pydev: force code completion
def __init__(self, *args, **kwargs):
self.feed = self._get_feed_type(Feed, type_="feed")
super(ThoonkPub, self).__init__(*args, **kwargs)
def _get_feed_type(self, kls, type_):
'''
Returns a function in order to generate a specific feed type
@param kls: the python class of feed
@param type_: the type of feed to be stored in.
'''
config = {'type': type_}
def _create_type(feed_name):
'''
Creates a new feed of this type.
@param feed_name: the name of the feed.
'''
def _get_feed(*args):
"""Create a new a new instance of passed class"""
return kls(pub=self, name=feed_name)
def _exists(ret):
"""
Called when self.feed_exists returns
"""
if ret:
return _get_feed()
d = self.create_feed(feed_name, config)
d.addCallback(_get_feed)
return d
return self.feed_exists(feed_name).addCallback(_exists)
return _create_type
def _publish_channel(self, channel, *args):
"""Calls self.publish_channel appending self._uuid at end"""
args = list(args) + [self._uuid]
return self.publish_channel(channel, *args)
def publish_channel(self, channel, *args):
'''
Publish on channel.
@param channel: the channel where message will be published
@param *args: a list that will compose the message
'''
message = self.SEPARATOR.join(args)
return self.redis.publish(channel, message)
def create_feed(self, feed_name, config={}):
"""
Create a new feed with a given configuration.
The configuration is a dict, and should include a 'type'
entry with the class of the feed type implementation.
@param feed_name: The name of the new feed.
@param config: A dictionary of configuration values.
"""
def _set_config(ret):
'''
Called when self._publish_channel returns.
'''
return self.set_config(feed_name, config)
def _publish(ret):
"""
Called when redis.sadd returns.
"""
if ret == 1:
d = self._publish_channel("newfeed", feed_name)
d.addCallback(_set_config)
return d
else:
return defer.fail(FeedExists())
return self.redis.sadd("feeds", feed_name).addCallback(_publish)
def delete_feed(self, feed_name):
"""
Delete a given feed.
@param feed_name: The name of the feed.
"""
hash_feed_config = "feed.config:%s" % feed_name
def _exec_check(bulk_result):
# All defers must be succeed
assert all([a[0] for a in bulk_result])
# assert number of commands
assert len(bulk_result) == 7
multi_result = bulk_result[-1][1]
if multi_result:
# transaction done :D
# assert number commands in transaction
assert len(multi_result) == 3
# check if feed_name existed when was deleted
exists = multi_result[0]
if not exists:
return defer.fail(FeedDoesNotExist())
return True
# transaction fail :-(
# repeat it
return self.delete_feed(feed_name)
defers = []
# issue all commands in order to avoid concurrent calls
defers.append(self.redis.watch("feeds")) #0
defers.append(self.redis.watch(hash_feed_config)) #1
# begin transaction
defers.append(self.redis.multi()) #2
defers.append(self.redis.srem("feeds", feed_name)) #3 - #0
defers.append(self.redis.delete(hash_feed_config)) #4 - #1
defers.append(self._publish_channel("delfeed", feed_name)) #5 - #2
# end transaction
defers.append(self.redis.execute()) #6
return defer.DeferredList(defers).addCallback(_exec_check)
def feed_exists(self, feed_name):
"""
Check if a given feed exists.
@param feed_name: The name of the feed.
"""
return self.redis.sismember("feeds", feed_name)
def set_config(self, feed_name, config):
"""
Set the configuration for a given feed.
@param feed_name: The name of the feed.
@param config: A dictionary of configuration values.
"""
def _exists(ret):
if not ret:
return defer.fail(FeedDoesNotExist())
dl = []
for k, v in config.items():
dl.append(self.redis.hset('feed.config:%s' % feed_name, k, v))
return defer.DeferredList(dl)
return self.feed_exists(feed_name).addCallback(_exists)
def get_config(self, feed_name):
"""
Get the configuration for a given feed.
@param feed_name: The name of the feed.
@return: A defer witch callback function will have a config dict
as the first argument
"""
def _exists(ret):
if not ret:
return defer.fail(FeedDoesNotExist())
return self.redis.hgetall('feed.config:%s' % feed_name)
return self.feed_exists(feed_name).addCallback(_exists)
def get_feed_names(self):
"""
Return the set of known feeds.
@return: a defer witch callback function will have the set result
as first argument
"""
return self.redis.smembers("feeds")
class ThoonkPubFactory(ReconnectingClientFactory):
'''
ThoonkPub Factory
'''
protocol = Redis
protocol_wrapper = ThoonkPub
def __init__(self, *args, **kwargs):
'''
Constructor
'''
self._args = args
self._kwargs = kwargs
def buildProtocol(self, addr):
"""
Called when a connection has been established to addr.
If None is returned, the connection is assumed to have been refused,
and the Port will close the connection.
@type addr: (host, port)
@param addr: The address of the newly-established connection
@return: None if the connection was refused, otherwise an object
providing L{IProtocol}.
"""
redis = self.protocol(*self._args, **self._kwargs)
self.resetDelay()
return self.protocol_wrapper(redis)
class ThoonkSub(ThoonkBase):
'''
Thoonk Subscriber class.
'''
redis = RedisSubscriber() # pydev: force code completion
def __init__(self, redis):
'''
Constructor
@param redis: the txredis instance
'''
self._handlers = {'id_gen': itertools.count(), #@UndefinedVariable
'channel_handlers': {},
'id2channel' : {}}
# delay subscribe
self._subscribed = {'running': False,
'subscribed': {},
'running_for': None,
'defer': None}
super(ThoonkSub, self).__init__(redis)
def _get_sub_channel_cb(self, channel):
'''
Returns a callback in order to subscribe one channel.
@param channel: the desired channel.
'''
return lambda arg: self._sub_channel(channel)
def _evt2channel(self, evt):
'''
Convert Thoonk.py channels in compatible events
@param evt: the event
'''
# Thoonk.py compatible events
channel = evt
if evt == "create":
channel = "newfeed"
elif evt == "delete":
channel = "delfeed"
return channel
def _sub_channel(self, channel):
"""
Subscribe to a channel using a defer.
This call will queue channel subscriptions.
@param channel: the desired channel.
"""
if self._subscribed['subscribed'].get(channel):
# already subcribed
return defer.succeed(True)
if self._subscribed['running']:
# call it later, queue it
d = self._subscribed['defer']
d.addCallback(self._get_sub_channel_cb(channel))
return d
def set_subscribed(*args):
'''
Called when channel was subscribed.
'''
self._subscribed['running'] = False
self._subscribed['subscribed'][channel] = True
return True
self._subscribed['running'] = True
self.redis.subscribe(channel)
d = defer.Deferred()
self._subscribed['defer'] = d
self._subscribed['running_for'] = channel
return d.addCallback(set_subscribed)
def set_redis(self, redis):
'''
Set the txredis instance
@param redis: the txredis instance
'''
# FIXME: on (re)connect (re)subscribe all channels
redis.messageReceived = self.messageReceived
redis.channelSubscribed = self.channelSubscribed
super(ThoonkSub, self).set_redis(redis)
def register_handler(self, evt, handler):
"""
Register a function to respond to feed events.
Event types/handler params:
- create handler(feedname)
- newfeed handler(feedname)
- delete handler(feedname)
- delfeed handler(feedname)
- feed.publish:[feed] handler(id, item)
- feed.retract:[feed] handler(id)
- feed.edit:[feed] handler(id, item)
@param evt: The name of the feed event.
@param handler: The function for handling the event.
"""
channel = self._evt2channel(evt)
if not channel:
return defer.succeed(None)
def _register_callback(*args):
"""
Called when channel was subscribed.
"""
id_ = self._handlers['id_gen'].next()
# store map id -> channel
self._handlers['id2channel'][id_] = channel
handlers = self._handlers['channel_handlers'].get(channel)
if not handlers:
handlers = self._handlers['channel_handlers'][channel] = OrderedDict()
# store handler
handlers[id_] = handler
return id_
return self._sub_channel(channel).addCallback(_register_callback)
def remove_handler(self, id_):
"""
Unregister a function that was registered via register_handler
@param id_: the handler id
"""
channel = self._handlers['id2channel'].get(id_)
if not channel:
return
del self._handlers['channel_handlers'][channel][id_]
del self._handlers['id2channel'][id_]
def messageReceived(self, channel, message):
"""
Called when this connection is subscribed to a channel that
has received a message published on it.
"""
handlers = self._handlers['channel_handlers'].get(channel)
if handlers is None:
return
for handler in handlers.values():
args = message.split(self.SEPARATOR)
handler(*args)
def channelSubscribed(self, channel, numSubscriptions):
"""
Called when a channel is subscribed to.
"""
assert self._subscribed['running']
assert self._subscribed['running_for'] == channel
d = self._subscribed['defer']
d.callback(True)
class ThoonkSubFactory(ThoonkPubFactory):
'''
ThoonkSub Factory class.
'''
protocol = RedisSubscriber
protocol_wrapper = ThoonkSub
| |
"""Test the cross_validation module"""
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.fixes import unique
from sklearn import cross_validation as cval
from sklearn.base import BaseEstimator
from sklearn.datasets import make_regression
from sklearn.datasets import load_iris
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import explained_variance_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import Scorer
from sklearn.externals import six
from sklearn.linear_model import Ridge
from sklearn.svm import SVC
class MockListClassifier(BaseEstimator):
"""Dummy classifier to test the cross-validation.
Checks that GridSearchCV didn't convert X to array.
"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
assert_true(isinstance(X, list))
return self
def predict(self, T):
return T.shape[0]
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
class MockClassifier(BaseEstimator):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0):
self.a = a
def fit(self, X, Y=None, sample_weight=None, class_prior=None):
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
return self
def predict(self, T):
return T.shape[0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
y = np.arange(10) / 2
##############################################################################
# Tests
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
y = [0, 0, 1, 1, 2]
cval.StratifiedKFold(y, 3)
# checking there was only one warning.
assert_equal(len(w), 1)
# checking it has the right type
assert_equal(w[0].category, Warning)
# checking it's the right warning. This might be a bad test since it's
# a characteristic of the code and not a behavior
assert_true("The least populated class" in str(w[0]))
# Error when number of folds is <= 0
assert_raises(ValueError, cval.KFold, 2, 0)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 1)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
all_folds = None
for train, test in kf:
if all_folds is None:
all_folds = test.copy()
else:
all_folds = np.concatenate((all_folds, test))
all_folds.sort()
assert_array_equal(all_folds, np.arange(300))
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf1 = cval.KFold(300, 3, shuffle=True, random_state=0, indices=True)
kf2 = cval.KFold(300, 3, shuffle=True, random_state=0, indices=False)
ind = np.arange(300)
for kf in (kf1, kf2):
all_folds = None
for train, test in kf:
sorted_array = np.arange(100)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(101, 200)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(201, 300)
assert_true(np.any(sorted_array != ind[train]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0, indices=True)
for train, test in sss:
assert_array_equal(unique(y[train]), unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(unique(y[train], return_inverse=True)[1]) /
float(len(y[train])))
p_test = (np.bincount(unique(y[test], return_inverse=True)[1]) /
float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(y[train].size + y[test].size, y.size)
assert_array_equal(np.lib.arraysetops.intersect1d(train, test), [])
def test_stratified_shuffle_split_iter_no_indices():
y = np.asarray([0, 1, 2] * 10)
sss1 = cval.StratifiedShuffleSplit(y, indices=False, random_state=0)
train_mask, test_mask = next(iter(sss1))
sss2 = cval.StratifiedShuffleSplit(y, indices=True, random_state=0)
train_indices, test_indices = next(iter(sss2))
assert_array_equal(sorted(test_indices), np.where(test_mask)[0])
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with X as list
clf = MockListClassifier()
scores = cval.cross_val_score(clf, X.tolist(), y)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
score = cval.cross_val_score(clf, X, y, score_func=score_func)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = range(10)
split = cval.train_test_split(X, X_s, y)
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [1., 0.97, 0.90, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [1., 0.97, 0.90, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1", cv=5)
assert_array_almost_equal(f1_scores, [1., 0.97, 0.90, 0.97, 1.], 2)
# also test deprecated old way
with warnings.catch_warnings(record=True):
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
score_func=f1_score, cv=5)
assert_array_almost_equal(f1_scores, [1., 0.97, 0.90, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error
mse_scores = cval.cross_val_score(reg, X, y, cv=5, scoring="mse")
expected_mse = np.array([763.07, 553.16, 274.38, 273.26, 1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
with warnings.catch_warnings(record=True):
ev_scores = cval.cross_val_score(reg, X, y, cv=5,
score_func=explained_variance_score)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, "accuracy", cv)
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, "accuracy", cv, labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
scorer = Scorer(fbeta_score, beta=2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, scoring=scorer, cv=cv, labels=np.ones(y.size),
random_state=0)
assert_almost_equal(score_label, .95, 2)
assert_almost_equal(pvalue_label, 0.01, 3)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2, indices=True)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, "accuracy", cv_sparse,
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(svm, X, y,
"accuracy", cv)
assert_less(score, 0.5)
assert_greater(pvalue, 0.4)
# test with deprecated interface
with warnings.catch_warnings(record=True):
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, score_func=accuracy_score, cv=cv)
assert_less(score, 0.5)
assert_greater(pvalue, 0.4)
def test_cross_val_generator_with_mask():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4, indices=False)
lpo = cval.LeavePOut(4, 2, indices=False)
kf = cval.KFold(4, 2, indices=False)
skf = cval.StratifiedKFold(y, 2, indices=False)
lolo = cval.LeaveOneLabelOut(labels, indices=False)
lopo = cval.LeavePLabelOut(labels, 2, indices=False)
ss = cval.ShuffleSplit(4, indices=False)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss]:
for train, test in cv:
X_train, X_test = X[train], X[test]
y_train, y_test = y[train], y[test]
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4, indices=True)
lpo = cval.LeavePOut(4, 2, indices=True)
kf = cval.KFold(4, 2, indices=True)
skf = cval.StratifiedKFold(y, 2, indices=True)
lolo = cval.LeaveOneLabelOut(labels, indices=True)
lopo = cval.LeavePLabelOut(labels, 2, indices=True)
b = cval.Bootstrap(2) # only in index mode
ss = cval.ShuffleSplit(2, indices=True)
for cv in [loo, lpo, kf, skf, lolo, lopo, b, ss]:
for train, test in cv:
X_train, X_test = X[train], X[test]
y_train, y_test = y[train], y[test]
def test_cross_val_generator_mask_indices_same():
# Test that the cross validation generators return the same results when
# indices=True and when indices=False
y = np.array([0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2])
labels = np.array([1, 1, 2, 3, 3, 3, 4])
loo_mask = cval.LeaveOneOut(5, indices=False)
loo_ind = cval.LeaveOneOut(5, indices=True)
lpo_mask = cval.LeavePOut(10, 2, indices=False)
lpo_ind = cval.LeavePOut(10, 2, indices=True)
kf_mask = cval.KFold(10, 5, indices=False, shuffle=True, random_state=1)
kf_ind = cval.KFold(10, 5, indices=True, shuffle=True, random_state=1)
skf_mask = cval.StratifiedKFold(y, 3, indices=False)
skf_ind = cval.StratifiedKFold(y, 3, indices=True)
lolo_mask = cval.LeaveOneLabelOut(labels, indices=False)
lolo_ind = cval.LeaveOneLabelOut(labels, indices=True)
lopo_mask = cval.LeavePLabelOut(labels, 2, indices=False)
lopo_ind = cval.LeavePLabelOut(labels, 2, indices=True)
for cv_mask, cv_ind in [(loo_mask, loo_ind), (lpo_mask, lpo_ind),
(kf_mask, kf_ind), (skf_mask, skf_ind), (lolo_mask, lolo_ind),
(lopo_mask, lopo_ind)]:
for (train_mask, test_mask), (train_ind, test_ind) in \
zip(cv_mask, cv_ind):
assert_array_equal(np.where(train_mask == True)[0], train_ind)
assert_array_equal(np.where(test_mask == True)[0], test_ind)
def test_bootstrap_errors():
assert_raises(ValueError, cval.Bootstrap, 10, train_size=100)
assert_raises(ValueError, cval.Bootstrap, 10, test_size=100)
assert_raises(ValueError, cval.Bootstrap, 10, train_size=1.1)
assert_raises(ValueError, cval.Bootstrap, 10, test_size=1.1)
def test_bootstrap_test_sizes():
assert_equal(cval.Bootstrap(10, test_size=0.2).test_size, 2)
assert_equal(cval.Bootstrap(10, test_size=2).test_size, 2)
assert_equal(cval.Bootstrap(10, test_size=None).test_size, 5)
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_cross_indices_exception():
X = coo_matrix(np.array([[1, 2], [3, 4], [5, 6], [7, 8]]))
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4, indices=False)
lpo = cval.LeavePOut(4, 2, indices=False)
kf = cval.KFold(4, 2, indices=False)
skf = cval.StratifiedKFold(y, 2, indices=False)
lolo = cval.LeaveOneLabelOut(labels, indices=False)
lopo = cval.LeavePLabelOut(labels, 2, indices=False)
assert_raises(ValueError, cval.check_cv, loo, X, y)
assert_raises(ValueError, cval.check_cv, lpo, X, y)
assert_raises(ValueError, cval.check_cv, kf, X, y)
assert_raises(ValueError, cval.check_cv, skf, X, y)
assert_raises(ValueError, cval.check_cv, lolo, X, y)
assert_raises(ValueError, cval.check_cv, lopo, X, y)
| |
#!/usr/bin/env python
# Copyright (C) 2006-2010, University of Maryland
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/ or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Author: James Krycka
"""
This script uses py2exe to create dist\refl1d.exe and dist\refl1d_gui.exe for
running the Refl1D application in either CLI or GUI mode.
These executables start the application and import the rest of the application
code stored in library.zip. The python interpreter and other required python
packages and dlls are also placed in the zip file. Additional resource files
that are needed when Refl1D is run are copied to the dist directory tree. On
completion, the contents of the dist directory tree can be used by the Inno
Setup Compiler (via a separate script) to build a Windows installer/uninstaller
for deployment of the Refl1D application. For testing purposes, refl1d.exe or
refl1d_gui.exe can be run from the dist directory.
"""
import os
import sys
# Force build before continuing
os.system('"%s" setup.py build'%sys.executable)
# Remove the current directory from the python path
here = os.path.abspath(os.path.dirname(__file__))
sys.path = [p for p in sys.path if os.path.abspath(p) != here]
import glob
from distutils.core import setup
from distutils.util import get_platform
# Augment the setup interface with the py2exe command and make sure the py2exe
# option is passed to setup.
import py2exe
if len(sys.argv) == 1:
sys.argv.append('py2exe')
# Put the build lib on the start of the path.
# For packages with binary extensions, need platform. If it is a pure
# script library, use an empty platform string.
platform = '.%s-%s'%(get_platform(),sys.version[:3])
#platform = ''
build_lib = os.path.abspath('build/lib'+platform)
sys.path.insert(0, build_lib)
#print "\n".join(sys.path)
import wx
import matplotlib
matplotlib.use('WXAgg')
#import periodictable
# Retrieve the application version string.
import osrefl
version = osrefl.__version__
# A manifest is required to be included in a py2exe image (or accessible as a
# file in the image directory) when wxPython is included so that the Windows XP
# theme is used when rendering wx widgets. The manifest must be matched to the
# version of Python that is being used.
#
# Create a manifest for use with Python 2.5 on Windows XP or Vista. It is
# adapted from the Python manifest file (C:\Python25\pythonw.exe.manifest).
manifest_for_python25 = """
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<assemblyIdentity
version="1.0.0.0"
processorArchitecture="x86"
name="%(prog)s"
type="win32"
/>
<description>%(prog)s</description>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.Windows.Common-Controls"
version="6.0.0.0"
processorArchitecture="X86"
publicKeyToken="6595b64144ccf1df"
language="*"
/>
</dependentAssembly>
</dependency>
</assembly>
"""
# Create a manifest for use with Python 2.6 or 2.7 on Windows XP or Vista.
manifest_for_python26 = """
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<assemblyIdentity
version="5.0.0.0"
processorArchitecture="x86"
name="%(prog)s"
type="win32">
</assemblyIdentity>
<description>%(prog)s</description>
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel
level="asInvoker"
uiAccess="false">
</requestedExecutionLevel>
</requestedPrivileges>
</security>
</trustInfo>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.VC90.CRT"
version="9.0.21022.8"
processorArchitecture="x86"
publicKeyToken="1fc8b3b9a1e18e3b">
</assemblyIdentity>
</dependentAssembly>
</dependency>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.Windows.Common-Controls"
version="6.0.0.0"
processorArchitecture="x86"
publicKeyToken="6595b64144ccf1df"
language="*">
</assemblyIdentity>
</dependentAssembly>
</dependency>
</assembly>
"""
# Select the appropriate manifest to use.
if sys.version_info >= (3, 0) or sys.version_info < (2, 5):
print "*** This script only works with Python 2.5, 2.6, or 2.7."
sys.exit()
elif sys.version_info >= (2, 6):
manifest = manifest_for_python26
elif sys.version_info >= (2, 5):
manifest = manifest_for_python25
# Create a list of all files to include along side the executable being built
# in the dist directory tree. Each element of the data_files list is a tuple
# consisting of a path (relative to dist\) and a list of files in that path.
data_files = []
# Add resource files that need to reside in the same directory as the image.
data_files.append( ('.', [os.path.join('.', 'LICENSE.txt')]) )
data_files.append( ('.', [os.path.join('.', 'README.txt')]) )
data_files.append( ('.', [os.path.join('.', 'launch.bat')]) )
# Add application specific data files from the refl1d\refl1d-data folder.
#data_files += refl1d.data_files()
# Add data files from the matplotlib\mpl-data folder and its subfolders.
# For matploblib prior to version 0.99 see the examples at the end of the file.
data_files += matplotlib.get_py2exe_datafiles()
# Add data files from the periodictable\xsf folder.
#data_files += periodictable.data_files()
# Add example directories and their files. An empty directory is ignored.
# Note that Inno Setup will determine where these files will be placed such as
# C:\My Documents\... instead of the installation folder.
for path in glob.glob(os.path.join('examples', '*')):
if os.path.isdir(path):
for file in glob.glob(os.path.join(path, '*.*')):
data_files.append( (path, [file]) )
else:
data_files.append( ('examples', [path]) )
# Add PDF documentation to the dist staging directory.
pdf = os.path.join('doc', '_build','latex','OsRefl.pdf')
if os.path.isfile(pdf):
data_files.append( ('doc', [pdf]) )
else:
print "*** %s not found - building frozen image without it ***" %pdf
# Add the Microsoft Visual C++ 2008 redistributable kit if we are building with
# Python 2.6 or 2.7. This kit will be installed on the target system as part
# of the installation process for the frozen image. Note that the Python 2.5
# interpreter requires msvcr71.dll which is included in the Python25 package,
# however, Python 2.6 and 2.7 require the msvcr90.dll but they do not bundle it
# with the Python26 or Python27 package. Thus, for Python 2.6 and later, the
# appropriate dll must be present on the target system at runtime.
if sys.version_info >= (2, 6):
pypath = os.path.dirname(sys.executable)
data_files.append( ('.', [os.path.join(pypath, 'vcredist_x86.exe')]) )
# Specify required packages to bundle in the executable image.
packages = ['numpy', 'scipy', 'matplotlib', 'pytz',
'osrefl'
]
# Specify files to include in the executable image.
includes = []
# Specify files to exclude from the executable image.
# - We can safely exclude Tk/Tcl and Qt modules because our app uses wxPython.
# - We do not use ssl services so they are omitted.
# - We can safely exclude the TkAgg matplotlib backend because our app uses
# "matplotlib.use('WXAgg')" to override the default matplotlib configuration.
# - On the web it is widely recommended to exclude certain lib*.dll modules
# but this does not seem necessary any more (but adding them does not hurt).
# - Python25 requires mscvr71.dll, however, Win XP includes this file.
# - Since we do not support Win 9x systems, w9xpopen.dll is not needed.
# - For some reason cygwin1.dll gets included by default, but it is not needed.
excludes = ['Tkinter', 'PyQt4', '_ssl', '_tkagg', 'numpy.distutils.test']
dll_excludes = ['libgdk_pixbuf-2.0-0.dll',
'libgobject-2.0-0.dll',
'libgdk-win32-2.0-0.dll',
'tcl84.dll',
'tk84.dll',
'QtGui4.dll',
'QtCore4.dll',
'msvcr71.dll',
'msvcp90.dll',
'w9xpopen.exe',
'cygwin1.dll']
class Target():
"""This class stores metadata about the distribution in a dictionary."""
def __init__(self, **kw):
self.__dict__.update(kw)
self.version = version
clientCLI = Target(
name = 'OsRefl',
description = 'OsRefl CLI application',
script = 'osrefl.py', # module to run on application start
dest_base = 'osrefl', # file name part of the exe file to create
icon_resources = [(1, os.path.join('icon', 'OSlog.ico'))], # also need to specify in data_files
bitmap_resources = [],
other_resources = [(24, 1, manifest % dict(prog='OsRefl'))] )
# Now we do the work to create a standalone distribution using py2exe.
#
# When the application is run in console mode, a console window will be created
# to receive any logging or error messages and the application will then create
# a separate GUI application window.
#
# When the application is run in windows mode, it will create a GUI application
# window and no console window will be provided. Output to stderr will be
# written to <app-image-name>.log.
setup(
console=[clientCLI],
options={'py2exe': {
'packages': packages,
'includes': includes,
'excludes': excludes,
'dll_excludes': dll_excludes,
'compressed': 1, # standard compression
'optimize': 0, # no byte-code optimization
'dist_dir': "dist",# where to put py2exe results
'xref': False, # display cross reference (as html doc)
'bundle_files': 1, # bundle python25.dll in library.zip
}
},
# Since we are building two exe's, do not put the shared library in each
# of them. Instead create a single, separate library.zip file.
### zipfile=None, # bundle library.zip in exe
data_files=data_files # list of files to copy to dist directory
)
| |
import oauth2
import json
from datetime import date
import logging
from django.utils import timezone
from django.urls import reverse
from django.views.decorators.csrf import csrf_exempt
from ims_lti_py.tool_provider import DjangoToolProvider
from django.shortcuts import (
redirect,
get_object_or_404,
render
)
import waffle
from lti.utils import only_lti
from lti import app_settings as settings
from lti.models import LTIUser, CourseRef, LtiConsumer
from .outcomes import store_outcome_parameters
from ct.models import Course, Role, CourseUnit, Unit
from chat.models import EnrollUnitCode
from accounts.models import Profile
ROLES_MAP = {
'Instructor': Role.INSTRUCTOR,
'Administrator': Role.INSTRUCTOR,
'Learner': Role.ENROLLED,
}
MOODLE_PARAMS = (
'user_id',
'context_id',
'lis_person_name_full',
'lis_person_name_given',
'lis_person_name_family',
'lis_person_sourcedid',
'tool_consumer_instance_guid',
'lis_person_contact_email_primary',
'tool_consumer_info_product_family_code',
)
LOGGER = logging.getLogger(__name__)
@csrf_exempt
def lti_init(request, course_id=None, unit_id=None):
"""LTI init view
Analyze LTI POST request to start LTI session.
:param course_id: course id from launch url
:param unit_id: unit id from lunch url
"""
if settings.LTI_DEBUG:
LOGGER.debug(request.META)
LOGGER.debug(request.POST)
session = request.session
# Code from ims_lti_py_django example
session.clear()
short_term_lti = request.POST.get('custom_short_term')
instance_guid = request.POST.get('tool_consumer_instance_guid')
consumer_key = request.POST.get('oauth_consumer_key')
if short_term_lti:
lti_consumer = LtiConsumer.objects.filter(consumer_key=consumer_key).first()
else:
lti_consumer = LtiConsumer.get_or_combine(instance_guid, consumer_key)
if not lti_consumer:
LOGGER.error('Consumer with key {} was not found.'.format(consumer_key))
return render(
request,
'lti/error.html',
{'message': 'LTI request is not valid'}
)
try:
if lti_consumer.expiration_date and lti_consumer.expiration_date < date.today():
raise oauth2.Error('Consumer Key has expired.')
if lti_consumer.consumer_key != consumer_key:
raise oauth2.Error('Wrong Consumer Key: {}'.format(consumer_key))
consumer_key = lti_consumer.consumer_key
secret = lti_consumer.consumer_secret
tool = DjangoToolProvider(consumer_key, secret, request.POST)
is_valid = tool.is_valid_request(request)
session['target'] = '_blank'
except (oauth2.MissingSignature,
oauth2.Error,
KeyError,
AttributeError) as err:
is_valid = False
session['message'] = "{}".format(err)
LOGGER.error(
"Error during processing LTI request: {}".format(err.__str__())
)
session['is_valid'] = is_valid
session['LTI_POST'] = {k: v for (k, v) in list(request.POST.items())}
if settings.LTI_DEBUG:
msg = 'session: is_valid = {}'.format(session.get('is_valid'))
LOGGER.debug(msg)
if session.get('message'):
msg = 'session: message = {}'.format(session.get('message'))
LOGGER.debug(msg)
if not is_valid:
return render(
request,
'lti/error.html',
{'message': 'LTI request is not valid'}
)
return lti_redirect(request, lti_consumer, course_id, unit_id)
def lti_redirect(request, lti_consumer, course_id=None, unit_id=None):
"""Create user and redirect to Course
| Create LTIUser with all needed link to Django user
| and/or UserSocialAuth.
| Finally login Django user and redirect to Course
:param unit_id: unit id from lunch url
"""
request_dict = request.session['LTI_POST']
context_id = request_dict.get('context_id')
course_ref = CourseRef.objects.filter(context_id=context_id).first()
user_id = request_dict.get('user_id', None)
roles_from_request = request_dict.get('roles', '').split(',')
roles = list(set((ROLES_MAP.get(role, Role.ENROLLED) for role in roles_from_request)))
if not user_id:
return render(
request,
'lti/error.html',
{'message': 'There is not user_id required LTI param'}
)
user, created = LTIUser.objects.get_or_create(
user_id=user_id,
lti_consumer=lti_consumer
)
extra_data = {k: v for (k, v) in list(request_dict.items())
if k in MOODLE_PARAMS}
user.extra_data = json.dumps(extra_data)
user.save()
if not user.is_linked:
user.create_links()
user.login(request)
# check user timezone and save it if not yet set or set incorrect timezone
Profile.check_tz(request)
if not course_id or not Course.objects.filter(id=course_id).exists():
if course_ref:
course_id = course_ref.course.id
elif Role.INSTRUCTOR in roles:
return redirect(reverse('lti:create_courseref'))
else:
return render(
request,
'lti/error.html',
{'message': """You are trying to access Course that does not exists but
Students can not create new Courses automatically"""}
)
user.enroll(roles, course_id)
if Role.INSTRUCTOR in roles:
# NOTE: waffle allow to disable\enable features in the project through admin UI.
# Here we enable\disable redirecting user to instructor UI.
if waffle.switch_is_active('instructor_UI'):
if not unit_id:
return redirect(reverse('ctms:course_view', args=(course_id,)))
else:
return redirect(reverse('ctms:courslet_view', args=(course_id, unit_id)))
else:
if not unit_id:
return redirect(reverse('ct:course', args=(course_id,)))
else:
return redirect(reverse('ct:unit_tasks', args=(course_id, unit_id)))
else:
course = get_object_or_404(Course, id=course_id)
unit = None
try:
unit = Unit.objects.get(id=unit_id)
course_unit = CourseUnit.objects.get(unit=unit, course=course)
except Unit.DoesNotExist:
# Get first CourseUnit by order if there is no Unit found
course_unit = course.courseunit_set.filter(
releaseTime__isnull=False,
releaseTime__lt=timezone.now()
).order_by('order').first()
if not unit and not course_unit:
return render(
request,
'lti/error.html',
{'message': 'There are no units to display for that Course.'}
)
enroll_code = EnrollUnitCode.get_code(course_unit)
if not course_unit.unit.unitlesson_set.filter(
order__isnull=False
).exists():
return render(
request,
'lti/error.html',
{'message': 'There are no Lessons to display for that Courselet.'}
)
params = {}
params['course_id'] = course_id
params['lis_result_sourcedid'] = request.POST.get('lis_result_sourcedid')
params['lis_outcome_service_url'] = request.POST.get('lis_outcome_service_url')
store_outcome_parameters(params, request.user, lti_consumer)
if waffle.switch_is_active('chat_ui'):
if not unit_id:
return redirect(reverse('lms:course_view', kwargs={'course_id': course_id}))
else:
return redirect(reverse('chat:chat_enroll', kwargs={'enroll_key': enroll_code}))
else:
if not unit_id:
return redirect(reverse('ct:course_student', args=(course_id,)))
else:
return redirect(reverse('ct:study_unit', args=(course_id, unit_id)))
@only_lti
def create_courseref(request):
"""
Create CourseRef and Course entry based on context_title.
"""
request_dict = request.session['LTI_POST']
if not request.session.get('is_valid'):
return redirect(reverse('ct:home'))
context_id = request_dict.get('context_id')
roles_from_request = request_dict.get('roles', '').split(',')
roles = list(set((ROLES_MAP.get(role, Role.ENROLLED) for role in roles_from_request)))
# Make sure this context_id is not used
course_ref = CourseRef.objects.filter(context_id=context_id).first()
if course_ref:
if Role.INSTRUCTOR in roles:
return redirect(reverse('ct:course', args=(course_ref.course.id,)))
else:
return redirect(reverse('ct:home'))
course = Course(
title=request_dict.get('context_title', 'Course title for %s' % context_id),
addedBy=request.user
)
course.save()
role = Role(role=Role.INSTRUCTOR, course=course, user=request.user)
role.save()
course_id = course.id
course_ref = CourseRef(
course=course,
context_id=context_id,
tc_guid=request_dict.get('tool_consumer_instance_guid', request.META.get('HTTP_HOST'))
)
course_ref.save()
course_ref.instructors.add(request.user)
return redirect(reverse('ct:edit_course', args=(course_id,)))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.